Annotation of src/sys/kern/subr_pool.c, Revision 1.112
1.112 ! bjh21 1: /* $NetBSD: subr_pool.c,v 1.111 2006/01/26 15:07:25 christos Exp $ */
1.1 pk 2:
3: /*-
1.43 thorpej 4: * Copyright (c) 1997, 1999, 2000 The NetBSD Foundation, Inc.
1.1 pk 5: * All rights reserved.
6: *
7: * This code is derived from software contributed to The NetBSD Foundation
1.20 thorpej 8: * by Paul Kranenburg; by Jason R. Thorpe of the Numerical Aerospace
9: * Simulation Facility, NASA Ames Research Center.
1.1 pk 10: *
11: * Redistribution and use in source and binary forms, with or without
12: * modification, are permitted provided that the following conditions
13: * are met:
14: * 1. Redistributions of source code must retain the above copyright
15: * notice, this list of conditions and the following disclaimer.
16: * 2. Redistributions in binary form must reproduce the above copyright
17: * notice, this list of conditions and the following disclaimer in the
18: * documentation and/or other materials provided with the distribution.
19: * 3. All advertising materials mentioning features or use of this software
20: * must display the following acknowledgement:
1.13 christos 21: * This product includes software developed by the NetBSD
22: * Foundation, Inc. and its contributors.
1.1 pk 23: * 4. Neither the name of The NetBSD Foundation nor the names of its
24: * contributors may be used to endorse or promote products derived
25: * from this software without specific prior written permission.
26: *
27: * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28: * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29: * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30: * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31: * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32: * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33: * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34: * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35: * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36: * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37: * POSSIBILITY OF SUCH DAMAGE.
38: */
1.64 lukem 39:
40: #include <sys/cdefs.h>
1.112 ! bjh21 41: __KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.111 2006/01/26 15:07:25 christos Exp $");
1.24 scottr 42:
1.25 thorpej 43: #include "opt_pool.h"
1.24 scottr 44: #include "opt_poollog.h"
1.28 thorpej 45: #include "opt_lockdebug.h"
1.1 pk 46:
47: #include <sys/param.h>
48: #include <sys/systm.h>
49: #include <sys/proc.h>
50: #include <sys/errno.h>
51: #include <sys/kernel.h>
52: #include <sys/malloc.h>
53: #include <sys/lock.h>
54: #include <sys/pool.h>
1.20 thorpej 55: #include <sys/syslog.h>
1.3 pk 56:
57: #include <uvm/uvm.h>
58:
1.1 pk 59: /*
60: * Pool resource management utility.
1.3 pk 61: *
1.88 chs 62: * Memory is allocated in pages which are split into pieces according to
63: * the pool item size. Each page is kept on one of three lists in the
64: * pool structure: `pr_emptypages', `pr_fullpages' and `pr_partpages',
65: * for empty, full and partially-full pages respectively. The individual
66: * pool items are on a linked list headed by `ph_itemlist' in each page
67: * header. The memory for building the page list is either taken from
68: * the allocated pages themselves (for small pool items) or taken from
69: * an internal pool of page headers (`phpool').
1.1 pk 70: */
71:
1.3 pk 72: /* List of all pools */
1.102 chs 73: LIST_HEAD(,pool) pool_head = LIST_HEAD_INITIALIZER(pool_head);
1.3 pk 74:
75: /* Private pool for page header structures */
1.97 yamt 76: #define PHPOOL_MAX 8
77: static struct pool phpool[PHPOOL_MAX];
78: #define PHPOOL_FREELIST_NELEM(idx) (((idx) == 0) ? 0 : (1 << (idx)))
1.3 pk 79:
1.62 bjh21 80: #ifdef POOL_SUBPAGE
81: /* Pool of subpages for use by normal pools. */
82: static struct pool psppool;
83: #endif
84:
1.98 yamt 85: static void *pool_page_alloc_meta(struct pool *, int);
86: static void pool_page_free_meta(struct pool *, void *);
87:
88: /* allocator for pool metadata */
89: static struct pool_allocator pool_allocator_meta = {
90: pool_page_alloc_meta, pool_page_free_meta
91: };
92:
1.3 pk 93: /* # of seconds to retain page after last use */
94: int pool_inactive_time = 10;
95:
96: /* Next candidate for drainage (see pool_drain()) */
1.23 thorpej 97: static struct pool *drainpp;
98:
99: /* This spin lock protects both pool_head and drainpp. */
100: struct simplelock pool_head_slock = SIMPLELOCK_INITIALIZER;
1.3 pk 101:
1.99 yamt 102: typedef uint8_t pool_item_freelist_t;
103:
1.3 pk 104: struct pool_item_header {
105: /* Page headers */
1.88 chs 106: LIST_ENTRY(pool_item_header)
1.3 pk 107: ph_pagelist; /* pool page list */
1.88 chs 108: SPLAY_ENTRY(pool_item_header)
109: ph_node; /* Off-page page headers */
1.3 pk 110: caddr_t ph_page; /* this page's address */
111: struct timeval ph_time; /* last referenced */
1.97 yamt 112: union {
113: /* !PR_NOTOUCH */
114: struct {
1.102 chs 115: LIST_HEAD(, pool_item)
1.97 yamt 116: phu_itemlist; /* chunk list for this page */
117: } phu_normal;
118: /* PR_NOTOUCH */
119: struct {
120: uint16_t
121: phu_off; /* start offset in page */
1.99 yamt 122: pool_item_freelist_t
1.97 yamt 123: phu_firstfree; /* first free item */
1.99 yamt 124: /*
125: * XXX it might be better to use
126: * a simple bitmap and ffs(3)
127: */
1.97 yamt 128: } phu_notouch;
129: } ph_u;
130: uint16_t ph_nmissing; /* # of chunks in use */
1.3 pk 131: };
1.97 yamt 132: #define ph_itemlist ph_u.phu_normal.phu_itemlist
133: #define ph_off ph_u.phu_notouch.phu_off
134: #define ph_firstfree ph_u.phu_notouch.phu_firstfree
1.3 pk 135:
1.1 pk 136: struct pool_item {
1.3 pk 137: #ifdef DIAGNOSTIC
1.82 thorpej 138: u_int pi_magic;
1.33 chs 139: #endif
1.82 thorpej 140: #define PI_MAGIC 0xdeadbeefU
1.3 pk 141: /* Other entries use only this list entry */
1.102 chs 142: LIST_ENTRY(pool_item) pi_list;
1.3 pk 143: };
144:
1.53 thorpej 145: #define POOL_NEEDS_CATCHUP(pp) \
146: ((pp)->pr_nitems < (pp)->pr_minitems)
147:
1.43 thorpej 148: /*
149: * Pool cache management.
150: *
151: * Pool caches provide a way for constructed objects to be cached by the
152: * pool subsystem. This can lead to performance improvements by avoiding
153: * needless object construction/destruction; it is deferred until absolutely
154: * necessary.
155: *
156: * Caches are grouped into cache groups. Each cache group references
157: * up to 16 constructed objects. When a cache allocates an object
158: * from the pool, it calls the object's constructor and places it into
159: * a cache group. When a cache group frees an object back to the pool,
160: * it first calls the object's destructor. This allows the object to
161: * persist in constructed form while freed to the cache.
162: *
163: * Multiple caches may exist for each pool. This allows a single
164: * object type to have multiple constructed forms. The pool references
165: * each cache, so that when a pool is drained by the pagedaemon, it can
166: * drain each individual cache as well. Each time a cache is drained,
167: * the most idle cache group is freed to the pool in its entirety.
168: *
169: * Pool caches are layed on top of pools. By layering them, we can avoid
170: * the complexity of cache management for pools which would not benefit
171: * from it.
172: */
173:
174: /* The cache group pool. */
175: static struct pool pcgpool;
1.3 pk 176:
1.102 chs 177: static void pool_cache_reclaim(struct pool_cache *, struct pool_pagelist *,
178: struct pool_cache_grouplist *);
179: static void pcg_grouplist_free(struct pool_cache_grouplist *);
1.3 pk 180:
1.42 thorpej 181: static int pool_catchup(struct pool *);
1.55 thorpej 182: static void pool_prime_page(struct pool *, caddr_t,
183: struct pool_item_header *);
1.88 chs 184: static void pool_update_curpage(struct pool *);
1.66 thorpej 185:
186: void *pool_allocator_alloc(struct pool *, int);
187: void pool_allocator_free(struct pool *, void *);
1.3 pk 188:
1.97 yamt 189: static void pool_print_pagelist(struct pool *, struct pool_pagelist *,
1.88 chs 190: void (*)(const char *, ...));
1.42 thorpej 191: static void pool_print1(struct pool *, const char *,
192: void (*)(const char *, ...));
1.3 pk 193:
1.88 chs 194: static int pool_chk_page(struct pool *, const char *,
195: struct pool_item_header *);
196:
1.3 pk 197: /*
1.52 thorpej 198: * Pool log entry. An array of these is allocated in pool_init().
1.3 pk 199: */
200: struct pool_log {
201: const char *pl_file;
202: long pl_line;
203: int pl_action;
1.25 thorpej 204: #define PRLOG_GET 1
205: #define PRLOG_PUT 2
1.3 pk 206: void *pl_addr;
1.1 pk 207: };
208:
1.86 matt 209: #ifdef POOL_DIAGNOSTIC
1.3 pk 210: /* Number of entries in pool log buffers */
1.17 thorpej 211: #ifndef POOL_LOGSIZE
212: #define POOL_LOGSIZE 10
213: #endif
214:
215: int pool_logsize = POOL_LOGSIZE;
1.1 pk 216:
1.110 perry 217: static inline void
1.42 thorpej 218: pr_log(struct pool *pp, void *v, int action, const char *file, long line)
1.3 pk 219: {
220: int n = pp->pr_curlogentry;
221: struct pool_log *pl;
222:
1.20 thorpej 223: if ((pp->pr_roflags & PR_LOGGING) == 0)
1.3 pk 224: return;
225:
226: /*
227: * Fill in the current entry. Wrap around and overwrite
228: * the oldest entry if necessary.
229: */
230: pl = &pp->pr_log[n];
231: pl->pl_file = file;
232: pl->pl_line = line;
233: pl->pl_action = action;
234: pl->pl_addr = v;
235: if (++n >= pp->pr_logsize)
236: n = 0;
237: pp->pr_curlogentry = n;
238: }
239:
240: static void
1.42 thorpej 241: pr_printlog(struct pool *pp, struct pool_item *pi,
242: void (*pr)(const char *, ...))
1.3 pk 243: {
244: int i = pp->pr_logsize;
245: int n = pp->pr_curlogentry;
246:
1.20 thorpej 247: if ((pp->pr_roflags & PR_LOGGING) == 0)
1.3 pk 248: return;
249:
250: /*
251: * Print all entries in this pool's log.
252: */
253: while (i-- > 0) {
254: struct pool_log *pl = &pp->pr_log[n];
255: if (pl->pl_action != 0) {
1.25 thorpej 256: if (pi == NULL || pi == pl->pl_addr) {
257: (*pr)("\tlog entry %d:\n", i);
258: (*pr)("\t\taction = %s, addr = %p\n",
259: pl->pl_action == PRLOG_GET ? "get" : "put",
260: pl->pl_addr);
261: (*pr)("\t\tfile: %s at line %lu\n",
262: pl->pl_file, pl->pl_line);
263: }
1.3 pk 264: }
265: if (++n >= pp->pr_logsize)
266: n = 0;
267: }
268: }
1.25 thorpej 269:
1.110 perry 270: static inline void
1.42 thorpej 271: pr_enter(struct pool *pp, const char *file, long line)
1.25 thorpej 272: {
273:
1.34 thorpej 274: if (__predict_false(pp->pr_entered_file != NULL)) {
1.25 thorpej 275: printf("pool %s: reentrancy at file %s line %ld\n",
276: pp->pr_wchan, file, line);
277: printf(" previous entry at file %s line %ld\n",
278: pp->pr_entered_file, pp->pr_entered_line);
279: panic("pr_enter");
280: }
281:
282: pp->pr_entered_file = file;
283: pp->pr_entered_line = line;
284: }
285:
1.110 perry 286: static inline void
1.42 thorpej 287: pr_leave(struct pool *pp)
1.25 thorpej 288: {
289:
1.34 thorpej 290: if (__predict_false(pp->pr_entered_file == NULL)) {
1.25 thorpej 291: printf("pool %s not entered?\n", pp->pr_wchan);
292: panic("pr_leave");
293: }
294:
295: pp->pr_entered_file = NULL;
296: pp->pr_entered_line = 0;
297: }
298:
1.110 perry 299: static inline void
1.42 thorpej 300: pr_enter_check(struct pool *pp, void (*pr)(const char *, ...))
1.25 thorpej 301: {
302:
303: if (pp->pr_entered_file != NULL)
304: (*pr)("\n\tcurrently entered from file %s line %ld\n",
305: pp->pr_entered_file, pp->pr_entered_line);
306: }
1.3 pk 307: #else
1.25 thorpej 308: #define pr_log(pp, v, action, file, line)
309: #define pr_printlog(pp, pi, pr)
310: #define pr_enter(pp, file, line)
311: #define pr_leave(pp)
312: #define pr_enter_check(pp, pr)
1.59 thorpej 313: #endif /* POOL_DIAGNOSTIC */
1.3 pk 314:
1.110 perry 315: static inline int
1.97 yamt 316: pr_item_notouch_index(const struct pool *pp, const struct pool_item_header *ph,
317: const void *v)
318: {
319: const char *cp = v;
320: int idx;
321:
322: KASSERT(pp->pr_roflags & PR_NOTOUCH);
323: idx = (cp - ph->ph_page - ph->ph_off) / pp->pr_size;
324: KASSERT(idx < pp->pr_itemsperpage);
325: return idx;
326: }
327:
1.99 yamt 328: #define PR_FREELIST_ALIGN(p) \
329: roundup((uintptr_t)(p), sizeof(pool_item_freelist_t))
330: #define PR_FREELIST(ph) ((pool_item_freelist_t *)PR_FREELIST_ALIGN((ph) + 1))
331: #define PR_INDEX_USED ((pool_item_freelist_t)-1)
332: #define PR_INDEX_EOL ((pool_item_freelist_t)-2)
1.97 yamt 333:
1.110 perry 334: static inline void
1.97 yamt 335: pr_item_notouch_put(const struct pool *pp, struct pool_item_header *ph,
336: void *obj)
337: {
338: int idx = pr_item_notouch_index(pp, ph, obj);
1.99 yamt 339: pool_item_freelist_t *freelist = PR_FREELIST(ph);
1.97 yamt 340:
341: KASSERT(freelist[idx] == PR_INDEX_USED);
342: freelist[idx] = ph->ph_firstfree;
343: ph->ph_firstfree = idx;
344: }
345:
1.110 perry 346: static inline void *
1.97 yamt 347: pr_item_notouch_get(const struct pool *pp, struct pool_item_header *ph)
348: {
349: int idx = ph->ph_firstfree;
1.99 yamt 350: pool_item_freelist_t *freelist = PR_FREELIST(ph);
1.97 yamt 351:
352: KASSERT(freelist[idx] != PR_INDEX_USED);
353: ph->ph_firstfree = freelist[idx];
354: freelist[idx] = PR_INDEX_USED;
355:
356: return ph->ph_page + ph->ph_off + idx * pp->pr_size;
357: }
358:
1.110 perry 359: static inline int
1.88 chs 360: phtree_compare(struct pool_item_header *a, struct pool_item_header *b)
361: {
362: if (a->ph_page < b->ph_page)
363: return (-1);
364: else if (a->ph_page > b->ph_page)
365: return (1);
366: else
367: return (0);
368: }
369:
370: SPLAY_PROTOTYPE(phtree, pool_item_header, ph_node, phtree_compare);
371: SPLAY_GENERATE(phtree, pool_item_header, ph_node, phtree_compare);
372:
1.3 pk 373: /*
374: * Return the pool page header based on page address.
375: */
1.110 perry 376: static inline struct pool_item_header *
1.42 thorpej 377: pr_find_pagehead(struct pool *pp, caddr_t page)
1.3 pk 378: {
1.88 chs 379: struct pool_item_header *ph, tmp;
1.3 pk 380:
1.20 thorpej 381: if ((pp->pr_roflags & PR_PHINPAGE) != 0)
1.3 pk 382: return ((struct pool_item_header *)(page + pp->pr_phoffset));
383:
1.88 chs 384: tmp.ph_page = page;
385: ph = SPLAY_FIND(phtree, &pp->pr_phtree, &tmp);
386: return ph;
1.3 pk 387: }
388:
1.101 thorpej 389: static void
390: pr_pagelist_free(struct pool *pp, struct pool_pagelist *pq)
391: {
392: struct pool_item_header *ph;
393: int s;
394:
395: while ((ph = LIST_FIRST(pq)) != NULL) {
396: LIST_REMOVE(ph, ph_pagelist);
397: pool_allocator_free(pp, ph->ph_page);
398: if ((pp->pr_roflags & PR_PHINPAGE) == 0) {
399: s = splvm();
400: pool_put(pp->pr_phpool, ph);
401: splx(s);
402: }
403: }
404: }
405:
1.3 pk 406: /*
407: * Remove a page from the pool.
408: */
1.110 perry 409: static inline void
1.61 chs 410: pr_rmpage(struct pool *pp, struct pool_item_header *ph,
411: struct pool_pagelist *pq)
1.3 pk 412: {
413:
1.101 thorpej 414: LOCK_ASSERT(simple_lock_held(&pp->pr_slock));
1.91 yamt 415:
1.3 pk 416: /*
1.7 thorpej 417: * If the page was idle, decrement the idle page count.
1.3 pk 418: */
1.6 thorpej 419: if (ph->ph_nmissing == 0) {
420: #ifdef DIAGNOSTIC
421: if (pp->pr_nidle == 0)
422: panic("pr_rmpage: nidle inconsistent");
1.20 thorpej 423: if (pp->pr_nitems < pp->pr_itemsperpage)
424: panic("pr_rmpage: nitems inconsistent");
1.6 thorpej 425: #endif
426: pp->pr_nidle--;
427: }
1.7 thorpej 428:
1.20 thorpej 429: pp->pr_nitems -= pp->pr_itemsperpage;
430:
1.7 thorpej 431: /*
1.101 thorpej 432: * Unlink the page from the pool and queue it for release.
1.7 thorpej 433: */
1.88 chs 434: LIST_REMOVE(ph, ph_pagelist);
1.91 yamt 435: if ((pp->pr_roflags & PR_PHINPAGE) == 0)
436: SPLAY_REMOVE(phtree, &pp->pr_phtree, ph);
1.101 thorpej 437: LIST_INSERT_HEAD(pq, ph, ph_pagelist);
438:
1.7 thorpej 439: pp->pr_npages--;
440: pp->pr_npagefree++;
1.6 thorpej 441:
1.88 chs 442: pool_update_curpage(pp);
1.3 pk 443: }
444:
445: /*
1.94 simonb 446: * Initialize all the pools listed in the "pools" link set.
447: */
448: void
449: link_pool_init(void)
450: {
451: __link_set_decl(pools, struct link_pool_init);
452: struct link_pool_init * const *pi;
453:
454: __link_set_foreach(pi, pools)
455: pool_init((*pi)->pp, (*pi)->size, (*pi)->align,
456: (*pi)->align_offset, (*pi)->flags, (*pi)->wchan,
457: (*pi)->palloc);
458: }
459:
460: /*
1.3 pk 461: * Initialize the given pool resource structure.
462: *
463: * We export this routine to allow other kernel parts to declare
464: * static pools that must be initialized before malloc() is available.
465: */
466: void
1.42 thorpej 467: pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags,
1.66 thorpej 468: const char *wchan, struct pool_allocator *palloc)
1.3 pk 469: {
1.88 chs 470: int off, slack;
1.92 enami 471: size_t trysize, phsize;
1.93 dbj 472: int s;
1.3 pk 473:
1.99 yamt 474: KASSERT((1UL << (CHAR_BIT * sizeof(pool_item_freelist_t))) - 2 >=
475: PHPOOL_FREELIST_NELEM(PHPOOL_MAX - 1));
476:
1.25 thorpej 477: #ifdef POOL_DIAGNOSTIC
478: /*
479: * Always log if POOL_DIAGNOSTIC is defined.
480: */
481: if (pool_logsize != 0)
482: flags |= PR_LOGGING;
483: #endif
484:
1.66 thorpej 485: if (palloc == NULL)
486: palloc = &pool_allocator_kmem;
1.112 ! bjh21 487: #ifdef POOL_SUBPAGE
! 488: if (size > palloc->pa_pagesz) {
! 489: if (palloc == &pool_allocator_kmem)
! 490: palloc = &pool_allocator_kmem_fullpage;
! 491: else if (palloc == &pool_allocator_nointr)
! 492: palloc = &pool_allocator_nointr_fullpage;
! 493: }
1.66 thorpej 494: #endif /* POOL_SUBPAGE */
495: if ((palloc->pa_flags & PA_INITIALIZED) == 0) {
1.112 ! bjh21 496: if (palloc->pa_pagesz == 0)
1.66 thorpej 497: palloc->pa_pagesz = PAGE_SIZE;
498:
499: TAILQ_INIT(&palloc->pa_list);
500:
501: simple_lock_init(&palloc->pa_slock);
502: palloc->pa_pagemask = ~(palloc->pa_pagesz - 1);
503: palloc->pa_pageshift = ffs(palloc->pa_pagesz) - 1;
504: palloc->pa_flags |= PA_INITIALIZED;
1.4 thorpej 505: }
1.3 pk 506:
507: if (align == 0)
508: align = ALIGN(1);
1.14 thorpej 509:
510: if (size < sizeof(struct pool_item))
511: size = sizeof(struct pool_item);
1.3 pk 512:
1.78 thorpej 513: size = roundup(size, align);
1.66 thorpej 514: #ifdef DIAGNOSTIC
515: if (size > palloc->pa_pagesz)
1.35 pk 516: panic("pool_init: pool item size (%lu) too large",
517: (u_long)size);
1.66 thorpej 518: #endif
1.35 pk 519:
1.3 pk 520: /*
521: * Initialize the pool structure.
522: */
1.88 chs 523: LIST_INIT(&pp->pr_emptypages);
524: LIST_INIT(&pp->pr_fullpages);
525: LIST_INIT(&pp->pr_partpages);
1.102 chs 526: LIST_INIT(&pp->pr_cachelist);
1.3 pk 527: pp->pr_curpage = NULL;
528: pp->pr_npages = 0;
529: pp->pr_minitems = 0;
530: pp->pr_minpages = 0;
531: pp->pr_maxpages = UINT_MAX;
1.20 thorpej 532: pp->pr_roflags = flags;
533: pp->pr_flags = 0;
1.35 pk 534: pp->pr_size = size;
1.3 pk 535: pp->pr_align = align;
536: pp->pr_wchan = wchan;
1.66 thorpej 537: pp->pr_alloc = palloc;
1.20 thorpej 538: pp->pr_nitems = 0;
539: pp->pr_nout = 0;
540: pp->pr_hardlimit = UINT_MAX;
541: pp->pr_hardlimit_warning = NULL;
1.31 thorpej 542: pp->pr_hardlimit_ratecap.tv_sec = 0;
543: pp->pr_hardlimit_ratecap.tv_usec = 0;
544: pp->pr_hardlimit_warning_last.tv_sec = 0;
545: pp->pr_hardlimit_warning_last.tv_usec = 0;
1.68 thorpej 546: pp->pr_drain_hook = NULL;
547: pp->pr_drain_hook_arg = NULL;
1.3 pk 548:
549: /*
550: * Decide whether to put the page header off page to avoid
1.92 enami 551: * wasting too large a part of the page or too big item.
552: * Off-page page headers go on a hash table, so we can match
553: * a returned item with its header based on the page address.
554: * We use 1/16 of the page size and about 8 times of the item
555: * size as the threshold (XXX: tune)
556: *
557: * However, we'll put the header into the page if we can put
558: * it without wasting any items.
559: *
560: * Silently enforce `0 <= ioff < align'.
1.3 pk 561: */
1.92 enami 562: pp->pr_itemoffset = ioff %= align;
563: /* See the comment below about reserved bytes. */
564: trysize = palloc->pa_pagesz - ((align - ioff) % align);
565: phsize = ALIGN(sizeof(struct pool_item_header));
1.97 yamt 566: if ((pp->pr_roflags & PR_NOTOUCH) == 0 &&
567: (pp->pr_size < MIN(palloc->pa_pagesz / 16, phsize << 3) ||
568: trysize / pp->pr_size == (trysize - phsize) / pp->pr_size)) {
1.3 pk 569: /* Use the end of the page for the page header */
1.20 thorpej 570: pp->pr_roflags |= PR_PHINPAGE;
1.92 enami 571: pp->pr_phoffset = off = palloc->pa_pagesz - phsize;
1.2 pk 572: } else {
1.3 pk 573: /* The page header will be taken from our page header pool */
574: pp->pr_phoffset = 0;
1.66 thorpej 575: off = palloc->pa_pagesz;
1.88 chs 576: SPLAY_INIT(&pp->pr_phtree);
1.2 pk 577: }
1.1 pk 578:
1.3 pk 579: /*
580: * Alignment is to take place at `ioff' within the item. This means
581: * we must reserve up to `align - 1' bytes on the page to allow
582: * appropriate positioning of each item.
583: */
584: pp->pr_itemsperpage = (off - ((align - ioff) % align)) / pp->pr_size;
1.43 thorpej 585: KASSERT(pp->pr_itemsperpage != 0);
1.97 yamt 586: if ((pp->pr_roflags & PR_NOTOUCH)) {
587: int idx;
588:
589: for (idx = 0; pp->pr_itemsperpage > PHPOOL_FREELIST_NELEM(idx);
590: idx++) {
591: /* nothing */
592: }
593: if (idx >= PHPOOL_MAX) {
594: /*
595: * if you see this panic, consider to tweak
596: * PHPOOL_MAX and PHPOOL_FREELIST_NELEM.
597: */
598: panic("%s: too large itemsperpage(%d) for PR_NOTOUCH",
599: pp->pr_wchan, pp->pr_itemsperpage);
600: }
601: pp->pr_phpool = &phpool[idx];
602: } else if ((pp->pr_roflags & PR_PHINPAGE) == 0) {
603: pp->pr_phpool = &phpool[0];
604: }
605: #if defined(DIAGNOSTIC)
606: else {
607: pp->pr_phpool = NULL;
608: }
609: #endif
1.3 pk 610:
611: /*
612: * Use the slack between the chunks and the page header
613: * for "cache coloring".
614: */
615: slack = off - pp->pr_itemsperpage * pp->pr_size;
616: pp->pr_maxcolor = (slack / align) * align;
617: pp->pr_curcolor = 0;
618:
619: pp->pr_nget = 0;
620: pp->pr_nfail = 0;
621: pp->pr_nput = 0;
622: pp->pr_npagealloc = 0;
623: pp->pr_npagefree = 0;
1.1 pk 624: pp->pr_hiwat = 0;
1.8 thorpej 625: pp->pr_nidle = 0;
1.3 pk 626:
1.59 thorpej 627: #ifdef POOL_DIAGNOSTIC
1.25 thorpej 628: if (flags & PR_LOGGING) {
629: if (kmem_map == NULL ||
630: (pp->pr_log = malloc(pool_logsize * sizeof(struct pool_log),
631: M_TEMP, M_NOWAIT)) == NULL)
1.20 thorpej 632: pp->pr_roflags &= ~PR_LOGGING;
1.3 pk 633: pp->pr_curlogentry = 0;
634: pp->pr_logsize = pool_logsize;
635: }
1.59 thorpej 636: #endif
1.25 thorpej 637:
638: pp->pr_entered_file = NULL;
639: pp->pr_entered_line = 0;
1.3 pk 640:
1.21 thorpej 641: simple_lock_init(&pp->pr_slock);
1.1 pk 642:
1.3 pk 643: /*
1.43 thorpej 644: * Initialize private page header pool and cache magazine pool if we
645: * haven't done so yet.
1.23 thorpej 646: * XXX LOCKING.
1.3 pk 647: */
1.97 yamt 648: if (phpool[0].pr_size == 0) {
649: int idx;
650: for (idx = 0; idx < PHPOOL_MAX; idx++) {
651: static char phpool_names[PHPOOL_MAX][6+1+6+1];
652: int nelem;
653: size_t sz;
654:
655: nelem = PHPOOL_FREELIST_NELEM(idx);
656: snprintf(phpool_names[idx], sizeof(phpool_names[idx]),
657: "phpool-%d", nelem);
658: sz = sizeof(struct pool_item_header);
659: if (nelem) {
660: sz = PR_FREELIST_ALIGN(sz)
1.99 yamt 661: + nelem * sizeof(pool_item_freelist_t);
1.97 yamt 662: }
663: pool_init(&phpool[idx], sz, 0, 0, 0,
1.98 yamt 664: phpool_names[idx], &pool_allocator_meta);
1.97 yamt 665: }
1.62 bjh21 666: #ifdef POOL_SUBPAGE
667: pool_init(&psppool, POOL_SUBPAGE, POOL_SUBPAGE, 0,
1.98 yamt 668: PR_RECURSIVE, "psppool", &pool_allocator_meta);
1.62 bjh21 669: #endif
1.43 thorpej 670: pool_init(&pcgpool, sizeof(struct pool_cache_group), 0, 0,
1.98 yamt 671: 0, "pcgpool", &pool_allocator_meta);
1.1 pk 672: }
673:
1.23 thorpej 674: /* Insert into the list of all pools. */
675: simple_lock(&pool_head_slock);
1.102 chs 676: LIST_INSERT_HEAD(&pool_head, pp, pr_poollist);
1.23 thorpej 677: simple_unlock(&pool_head_slock);
1.66 thorpej 678:
679: /* Insert this into the list of pools using this allocator. */
1.93 dbj 680: s = splvm();
1.66 thorpej 681: simple_lock(&palloc->pa_slock);
682: TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list);
683: simple_unlock(&palloc->pa_slock);
1.93 dbj 684: splx(s);
1.1 pk 685: }
686:
687: /*
688: * De-commision a pool resource.
689: */
690: void
1.42 thorpej 691: pool_destroy(struct pool *pp)
1.1 pk 692: {
1.101 thorpej 693: struct pool_pagelist pq;
1.3 pk 694: struct pool_item_header *ph;
1.93 dbj 695: int s;
1.43 thorpej 696:
1.101 thorpej 697: /* Remove from global pool list */
698: simple_lock(&pool_head_slock);
1.102 chs 699: LIST_REMOVE(pp, pr_poollist);
1.101 thorpej 700: if (drainpp == pp)
701: drainpp = NULL;
702: simple_unlock(&pool_head_slock);
703:
704: /* Remove this pool from its allocator's list of pools. */
1.93 dbj 705: s = splvm();
1.66 thorpej 706: simple_lock(&pp->pr_alloc->pa_slock);
707: TAILQ_REMOVE(&pp->pr_alloc->pa_list, pp, pr_alloc_list);
708: simple_unlock(&pp->pr_alloc->pa_slock);
1.93 dbj 709: splx(s);
1.66 thorpej 710:
1.101 thorpej 711: s = splvm();
712: simple_lock(&pp->pr_slock);
713:
1.102 chs 714: KASSERT(LIST_EMPTY(&pp->pr_cachelist));
1.3 pk 715:
716: #ifdef DIAGNOSTIC
1.20 thorpej 717: if (pp->pr_nout != 0) {
1.25 thorpej 718: pr_printlog(pp, NULL, printf);
1.80 provos 719: panic("pool_destroy: pool busy: still out: %u",
1.20 thorpej 720: pp->pr_nout);
1.3 pk 721: }
722: #endif
1.1 pk 723:
1.101 thorpej 724: KASSERT(LIST_EMPTY(&pp->pr_fullpages));
725: KASSERT(LIST_EMPTY(&pp->pr_partpages));
726:
1.3 pk 727: /* Remove all pages */
1.101 thorpej 728: LIST_INIT(&pq);
1.88 chs 729: while ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL)
1.101 thorpej 730: pr_rmpage(pp, ph, &pq);
731:
732: simple_unlock(&pp->pr_slock);
733: splx(s);
1.3 pk 734:
1.101 thorpej 735: pr_pagelist_free(pp, &pq);
1.3 pk 736:
1.59 thorpej 737: #ifdef POOL_DIAGNOSTIC
1.20 thorpej 738: if ((pp->pr_roflags & PR_LOGGING) != 0)
1.3 pk 739: free(pp->pr_log, M_TEMP);
1.59 thorpej 740: #endif
1.1 pk 741: }
742:
1.68 thorpej 743: void
744: pool_set_drain_hook(struct pool *pp, void (*fn)(void *, int), void *arg)
745: {
746:
747: /* XXX no locking -- must be used just after pool_init() */
748: #ifdef DIAGNOSTIC
749: if (pp->pr_drain_hook != NULL)
750: panic("pool_set_drain_hook(%s): already set", pp->pr_wchan);
751: #endif
752: pp->pr_drain_hook = fn;
753: pp->pr_drain_hook_arg = arg;
754: }
755:
1.88 chs 756: static struct pool_item_header *
1.55 thorpej 757: pool_alloc_item_header(struct pool *pp, caddr_t storage, int flags)
758: {
759: struct pool_item_header *ph;
760: int s;
761:
762: LOCK_ASSERT(simple_lock_held(&pp->pr_slock) == 0);
763:
764: if ((pp->pr_roflags & PR_PHINPAGE) != 0)
765: ph = (struct pool_item_header *) (storage + pp->pr_phoffset);
766: else {
1.85 pk 767: s = splvm();
1.97 yamt 768: ph = pool_get(pp->pr_phpool, flags);
1.55 thorpej 769: splx(s);
770: }
771:
772: return (ph);
773: }
1.1 pk 774:
775: /*
1.3 pk 776: * Grab an item from the pool; must be called at appropriate spl level
1.1 pk 777: */
1.3 pk 778: void *
1.59 thorpej 779: #ifdef POOL_DIAGNOSTIC
1.42 thorpej 780: _pool_get(struct pool *pp, int flags, const char *file, long line)
1.56 sommerfe 781: #else
782: pool_get(struct pool *pp, int flags)
783: #endif
1.1 pk 784: {
785: struct pool_item *pi;
1.3 pk 786: struct pool_item_header *ph;
1.55 thorpej 787: void *v;
1.1 pk 788:
1.2 pk 789: #ifdef DIAGNOSTIC
1.95 atatat 790: if (__predict_false(pp->pr_itemsperpage == 0))
791: panic("pool_get: pool %p: pr_itemsperpage is zero, "
792: "pool not initialized?", pp);
1.84 thorpej 793: if (__predict_false(curlwp == NULL && doing_shutdown == 0 &&
1.37 sommerfe 794: (flags & PR_WAITOK) != 0))
1.77 matt 795: panic("pool_get: %s: must have NOWAIT", pp->pr_wchan);
1.58 thorpej 796:
1.102 chs 797: #endif /* DIAGNOSTIC */
1.58 thorpej 798: #ifdef LOCKDEBUG
799: if (flags & PR_WAITOK)
800: simple_lock_only_held(NULL, "pool_get(PR_WAITOK)");
1.102 chs 801: SCHED_ASSERT_UNLOCKED();
1.56 sommerfe 802: #endif
1.1 pk 803:
1.21 thorpej 804: simple_lock(&pp->pr_slock);
1.25 thorpej 805: pr_enter(pp, file, line);
1.20 thorpej 806:
807: startover:
808: /*
809: * Check to see if we've reached the hard limit. If we have,
810: * and we can wait, then wait until an item has been returned to
811: * the pool.
812: */
813: #ifdef DIAGNOSTIC
1.34 thorpej 814: if (__predict_false(pp->pr_nout > pp->pr_hardlimit)) {
1.25 thorpej 815: pr_leave(pp);
1.21 thorpej 816: simple_unlock(&pp->pr_slock);
1.20 thorpej 817: panic("pool_get: %s: crossed hard limit", pp->pr_wchan);
818: }
819: #endif
1.34 thorpej 820: if (__predict_false(pp->pr_nout == pp->pr_hardlimit)) {
1.68 thorpej 821: if (pp->pr_drain_hook != NULL) {
822: /*
823: * Since the drain hook is going to free things
824: * back to the pool, unlock, call the hook, re-lock,
825: * and check the hardlimit condition again.
826: */
827: pr_leave(pp);
828: simple_unlock(&pp->pr_slock);
829: (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, flags);
830: simple_lock(&pp->pr_slock);
831: pr_enter(pp, file, line);
832: if (pp->pr_nout < pp->pr_hardlimit)
833: goto startover;
834: }
835:
1.29 sommerfe 836: if ((flags & PR_WAITOK) && !(flags & PR_LIMITFAIL)) {
1.20 thorpej 837: /*
838: * XXX: A warning isn't logged in this case. Should
839: * it be?
840: */
841: pp->pr_flags |= PR_WANTED;
1.25 thorpej 842: pr_leave(pp);
1.40 sommerfe 843: ltsleep(pp, PSWP, pp->pr_wchan, 0, &pp->pr_slock);
1.25 thorpej 844: pr_enter(pp, file, line);
1.20 thorpej 845: goto startover;
846: }
1.31 thorpej 847:
848: /*
849: * Log a message that the hard limit has been hit.
850: */
851: if (pp->pr_hardlimit_warning != NULL &&
852: ratecheck(&pp->pr_hardlimit_warning_last,
853: &pp->pr_hardlimit_ratecap))
854: log(LOG_ERR, "%s\n", pp->pr_hardlimit_warning);
1.21 thorpej 855:
856: pp->pr_nfail++;
857:
1.25 thorpej 858: pr_leave(pp);
1.21 thorpej 859: simple_unlock(&pp->pr_slock);
1.20 thorpej 860: return (NULL);
861: }
862:
1.3 pk 863: /*
864: * The convention we use is that if `curpage' is not NULL, then
865: * it points at a non-empty bucket. In particular, `curpage'
866: * never points at a page header which has PR_PHINPAGE set and
867: * has no items in its bucket.
868: */
1.20 thorpej 869: if ((ph = pp->pr_curpage) == NULL) {
870: #ifdef DIAGNOSTIC
871: if (pp->pr_nitems != 0) {
1.21 thorpej 872: simple_unlock(&pp->pr_slock);
1.20 thorpej 873: printf("pool_get: %s: curpage NULL, nitems %u\n",
874: pp->pr_wchan, pp->pr_nitems);
1.80 provos 875: panic("pool_get: nitems inconsistent");
1.20 thorpej 876: }
877: #endif
878:
1.21 thorpej 879: /*
880: * Call the back-end page allocator for more memory.
881: * Release the pool lock, as the back-end page allocator
882: * may block.
883: */
1.25 thorpej 884: pr_leave(pp);
1.21 thorpej 885: simple_unlock(&pp->pr_slock);
1.66 thorpej 886: v = pool_allocator_alloc(pp, flags);
1.55 thorpej 887: if (__predict_true(v != NULL))
888: ph = pool_alloc_item_header(pp, v, flags);
1.15 pk 889:
1.55 thorpej 890: if (__predict_false(v == NULL || ph == NULL)) {
891: if (v != NULL)
1.66 thorpej 892: pool_allocator_free(pp, v);
1.55 thorpej 893:
1.91 yamt 894: simple_lock(&pp->pr_slock);
895: pr_enter(pp, file, line);
896:
1.21 thorpej 897: /*
1.55 thorpej 898: * We were unable to allocate a page or item
899: * header, but we released the lock during
900: * allocation, so perhaps items were freed
901: * back to the pool. Check for this case.
1.21 thorpej 902: */
903: if (pp->pr_curpage != NULL)
904: goto startover;
1.15 pk 905:
1.3 pk 906: if ((flags & PR_WAITOK) == 0) {
907: pp->pr_nfail++;
1.25 thorpej 908: pr_leave(pp);
1.21 thorpej 909: simple_unlock(&pp->pr_slock);
1.1 pk 910: return (NULL);
1.3 pk 911: }
912:
1.15 pk 913: /*
914: * Wait for items to be returned to this pool.
1.21 thorpej 915: *
1.109 christos 916: * wake up once a second and try again,
917: * as the check in pool_cache_put_paddr() is racy.
1.15 pk 918: */
1.1 pk 919: pp->pr_flags |= PR_WANTED;
1.66 thorpej 920: /* PA_WANTED is already set on the allocator. */
1.25 thorpej 921: pr_leave(pp);
1.109 christos 922: ltsleep(pp, PSWP, pp->pr_wchan, hz, &pp->pr_slock);
1.25 thorpej 923: pr_enter(pp, file, line);
1.20 thorpej 924: goto startover;
1.1 pk 925: }
1.3 pk 926:
1.15 pk 927: /* We have more memory; add it to the pool */
1.91 yamt 928: simple_lock(&pp->pr_slock);
929: pr_enter(pp, file, line);
1.55 thorpej 930: pool_prime_page(pp, v, ph);
1.15 pk 931: pp->pr_npagealloc++;
932:
1.20 thorpej 933: /* Start the allocation process over. */
934: goto startover;
1.3 pk 935: }
1.97 yamt 936: if (pp->pr_roflags & PR_NOTOUCH) {
937: #ifdef DIAGNOSTIC
938: if (__predict_false(ph->ph_nmissing == pp->pr_itemsperpage)) {
939: pr_leave(pp);
940: simple_unlock(&pp->pr_slock);
941: panic("pool_get: %s: page empty", pp->pr_wchan);
942: }
943: #endif
944: v = pr_item_notouch_get(pp, ph);
945: #ifdef POOL_DIAGNOSTIC
946: pr_log(pp, v, PRLOG_GET, file, line);
947: #endif
948: } else {
1.102 chs 949: v = pi = LIST_FIRST(&ph->ph_itemlist);
1.97 yamt 950: if (__predict_false(v == NULL)) {
951: pr_leave(pp);
952: simple_unlock(&pp->pr_slock);
953: panic("pool_get: %s: page empty", pp->pr_wchan);
954: }
1.20 thorpej 955: #ifdef DIAGNOSTIC
1.97 yamt 956: if (__predict_false(pp->pr_nitems == 0)) {
957: pr_leave(pp);
958: simple_unlock(&pp->pr_slock);
959: printf("pool_get: %s: items on itemlist, nitems %u\n",
960: pp->pr_wchan, pp->pr_nitems);
961: panic("pool_get: nitems inconsistent");
962: }
1.65 enami 963: #endif
1.56 sommerfe 964:
1.65 enami 965: #ifdef POOL_DIAGNOSTIC
1.97 yamt 966: pr_log(pp, v, PRLOG_GET, file, line);
1.65 enami 967: #endif
1.3 pk 968:
1.65 enami 969: #ifdef DIAGNOSTIC
1.97 yamt 970: if (__predict_false(pi->pi_magic != PI_MAGIC)) {
971: pr_printlog(pp, pi, printf);
972: panic("pool_get(%s): free list modified: "
973: "magic=%x; page %p; item addr %p\n",
974: pp->pr_wchan, pi->pi_magic, ph->ph_page, pi);
975: }
1.3 pk 976: #endif
977:
1.97 yamt 978: /*
979: * Remove from item list.
980: */
1.102 chs 981: LIST_REMOVE(pi, pi_list);
1.97 yamt 982: }
1.20 thorpej 983: pp->pr_nitems--;
984: pp->pr_nout++;
1.6 thorpej 985: if (ph->ph_nmissing == 0) {
986: #ifdef DIAGNOSTIC
1.34 thorpej 987: if (__predict_false(pp->pr_nidle == 0))
1.6 thorpej 988: panic("pool_get: nidle inconsistent");
989: #endif
990: pp->pr_nidle--;
1.88 chs 991:
992: /*
993: * This page was previously empty. Move it to the list of
994: * partially-full pages. This page is already curpage.
995: */
996: LIST_REMOVE(ph, ph_pagelist);
997: LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist);
1.6 thorpej 998: }
1.3 pk 999: ph->ph_nmissing++;
1.97 yamt 1000: if (ph->ph_nmissing == pp->pr_itemsperpage) {
1.21 thorpej 1001: #ifdef DIAGNOSTIC
1.97 yamt 1002: if (__predict_false((pp->pr_roflags & PR_NOTOUCH) == 0 &&
1.102 chs 1003: !LIST_EMPTY(&ph->ph_itemlist))) {
1.25 thorpej 1004: pr_leave(pp);
1.21 thorpej 1005: simple_unlock(&pp->pr_slock);
1006: panic("pool_get: %s: nmissing inconsistent",
1007: pp->pr_wchan);
1008: }
1009: #endif
1.3 pk 1010: /*
1.88 chs 1011: * This page is now full. Move it to the full list
1012: * and select a new current page.
1.3 pk 1013: */
1.88 chs 1014: LIST_REMOVE(ph, ph_pagelist);
1015: LIST_INSERT_HEAD(&pp->pr_fullpages, ph, ph_pagelist);
1016: pool_update_curpage(pp);
1.1 pk 1017: }
1.3 pk 1018:
1019: pp->pr_nget++;
1.111 christos 1020: pr_leave(pp);
1.20 thorpej 1021:
1022: /*
1023: * If we have a low water mark and we are now below that low
1024: * water mark, add more items to the pool.
1025: */
1.53 thorpej 1026: if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) {
1.20 thorpej 1027: /*
1028: * XXX: Should we log a warning? Should we set up a timeout
1029: * to try again in a second or so? The latter could break
1030: * a caller's assumptions about interrupt protection, etc.
1031: */
1032: }
1033:
1.21 thorpej 1034: simple_unlock(&pp->pr_slock);
1.1 pk 1035: return (v);
1036: }
1037:
1038: /*
1.43 thorpej 1039: * Internal version of pool_put(). Pool is already locked/entered.
1.1 pk 1040: */
1.43 thorpej 1041: static void
1.101 thorpej 1042: pool_do_put(struct pool *pp, void *v, struct pool_pagelist *pq)
1.1 pk 1043: {
1044: struct pool_item *pi = v;
1.3 pk 1045: struct pool_item_header *ph;
1046: caddr_t page;
1.21 thorpej 1047: int s;
1.3 pk 1048:
1.61 chs 1049: LOCK_ASSERT(simple_lock_held(&pp->pr_slock));
1.102 chs 1050: SCHED_ASSERT_UNLOCKED();
1.61 chs 1051:
1.66 thorpej 1052: page = (caddr_t)((u_long)v & pp->pr_alloc->pa_pagemask);
1.1 pk 1053:
1.30 thorpej 1054: #ifdef DIAGNOSTIC
1.34 thorpej 1055: if (__predict_false(pp->pr_nout == 0)) {
1.30 thorpej 1056: printf("pool %s: putting with none out\n",
1057: pp->pr_wchan);
1058: panic("pool_put");
1059: }
1060: #endif
1.3 pk 1061:
1.34 thorpej 1062: if (__predict_false((ph = pr_find_pagehead(pp, page)) == NULL)) {
1.25 thorpej 1063: pr_printlog(pp, NULL, printf);
1.3 pk 1064: panic("pool_put: %s: page header missing", pp->pr_wchan);
1065: }
1.28 thorpej 1066:
1067: #ifdef LOCKDEBUG
1068: /*
1069: * Check if we're freeing a locked simple lock.
1070: */
1071: simple_lock_freecheck((caddr_t)pi, ((caddr_t)pi) + pp->pr_size);
1072: #endif
1.3 pk 1073:
1074: /*
1075: * Return to item list.
1076: */
1.97 yamt 1077: if (pp->pr_roflags & PR_NOTOUCH) {
1078: pr_item_notouch_put(pp, ph, v);
1079: } else {
1.2 pk 1080: #ifdef DIAGNOSTIC
1.97 yamt 1081: pi->pi_magic = PI_MAGIC;
1.3 pk 1082: #endif
1.32 chs 1083: #ifdef DEBUG
1.97 yamt 1084: {
1085: int i, *ip = v;
1.32 chs 1086:
1.97 yamt 1087: for (i = 0; i < pp->pr_size / sizeof(int); i++) {
1088: *ip++ = PI_MAGIC;
1089: }
1.32 chs 1090: }
1091: #endif
1092:
1.102 chs 1093: LIST_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list);
1.97 yamt 1094: }
1.79 thorpej 1095: KDASSERT(ph->ph_nmissing != 0);
1.3 pk 1096: ph->ph_nmissing--;
1097: pp->pr_nput++;
1.20 thorpej 1098: pp->pr_nitems++;
1099: pp->pr_nout--;
1.3 pk 1100:
1101: /* Cancel "pool empty" condition if it exists */
1102: if (pp->pr_curpage == NULL)
1103: pp->pr_curpage = ph;
1104:
1105: if (pp->pr_flags & PR_WANTED) {
1106: pp->pr_flags &= ~PR_WANTED;
1.15 pk 1107: if (ph->ph_nmissing == 0)
1108: pp->pr_nidle++;
1.3 pk 1109: wakeup((caddr_t)pp);
1110: return;
1111: }
1112:
1113: /*
1.88 chs 1114: * If this page is now empty, do one of two things:
1.21 thorpej 1115: *
1.88 chs 1116: * (1) If we have more pages than the page high water mark,
1.96 thorpej 1117: * free the page back to the system. ONLY CONSIDER
1.90 thorpej 1118: * FREEING BACK A PAGE IF WE HAVE MORE THAN OUR MINIMUM PAGE
1119: * CLAIM.
1.21 thorpej 1120: *
1.88 chs 1121: * (2) Otherwise, move the page to the empty page list.
1122: *
1123: * Either way, select a new current page (so we use a partially-full
1124: * page if one is available).
1.3 pk 1125: */
1126: if (ph->ph_nmissing == 0) {
1.6 thorpej 1127: pp->pr_nidle++;
1.90 thorpej 1128: if (pp->pr_npages > pp->pr_minpages &&
1129: (pp->pr_npages > pp->pr_maxpages ||
1130: (pp->pr_alloc->pa_flags & PA_WANT) != 0)) {
1.101 thorpej 1131: pr_rmpage(pp, ph, pq);
1.3 pk 1132: } else {
1.88 chs 1133: LIST_REMOVE(ph, ph_pagelist);
1134: LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist);
1.3 pk 1135:
1.21 thorpej 1136: /*
1137: * Update the timestamp on the page. A page must
1138: * be idle for some period of time before it can
1139: * be reclaimed by the pagedaemon. This minimizes
1140: * ping-pong'ing for memory.
1141: */
1142: s = splclock();
1143: ph->ph_time = mono_time;
1144: splx(s);
1.1 pk 1145: }
1.88 chs 1146: pool_update_curpage(pp);
1.1 pk 1147: }
1.88 chs 1148:
1.21 thorpej 1149: /*
1.88 chs 1150: * If the page was previously completely full, move it to the
1151: * partially-full list and make it the current page. The next
1152: * allocation will get the item from this page, instead of
1153: * further fragmenting the pool.
1.21 thorpej 1154: */
1155: else if (ph->ph_nmissing == (pp->pr_itemsperpage - 1)) {
1.88 chs 1156: LIST_REMOVE(ph, ph_pagelist);
1157: LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist);
1.21 thorpej 1158: pp->pr_curpage = ph;
1159: }
1.43 thorpej 1160: }
1161:
1162: /*
1163: * Return resource to the pool; must be called at appropriate spl level
1164: */
1.59 thorpej 1165: #ifdef POOL_DIAGNOSTIC
1.43 thorpej 1166: void
1167: _pool_put(struct pool *pp, void *v, const char *file, long line)
1168: {
1.101 thorpej 1169: struct pool_pagelist pq;
1170:
1171: LIST_INIT(&pq);
1.43 thorpej 1172:
1173: simple_lock(&pp->pr_slock);
1174: pr_enter(pp, file, line);
1175:
1.56 sommerfe 1176: pr_log(pp, v, PRLOG_PUT, file, line);
1177:
1.101 thorpej 1178: pool_do_put(pp, v, &pq);
1.21 thorpej 1179:
1.25 thorpej 1180: pr_leave(pp);
1.21 thorpej 1181: simple_unlock(&pp->pr_slock);
1.101 thorpej 1182:
1.102 chs 1183: pr_pagelist_free(pp, &pq);
1.1 pk 1184: }
1.57 sommerfe 1185: #undef pool_put
1.59 thorpej 1186: #endif /* POOL_DIAGNOSTIC */
1.1 pk 1187:
1.56 sommerfe 1188: void
1189: pool_put(struct pool *pp, void *v)
1190: {
1.101 thorpej 1191: struct pool_pagelist pq;
1192:
1193: LIST_INIT(&pq);
1.56 sommerfe 1194:
1195: simple_lock(&pp->pr_slock);
1.101 thorpej 1196: pool_do_put(pp, v, &pq);
1197: simple_unlock(&pp->pr_slock);
1.56 sommerfe 1198:
1.102 chs 1199: pr_pagelist_free(pp, &pq);
1.56 sommerfe 1200: }
1.57 sommerfe 1201:
1.59 thorpej 1202: #ifdef POOL_DIAGNOSTIC
1.57 sommerfe 1203: #define pool_put(h, v) _pool_put((h), (v), __FILE__, __LINE__)
1.56 sommerfe 1204: #endif
1.74 thorpej 1205:
1206: /*
1207: * Add N items to the pool.
1208: */
1209: int
1210: pool_prime(struct pool *pp, int n)
1211: {
1.83 scw 1212: struct pool_item_header *ph = NULL;
1.74 thorpej 1213: caddr_t cp;
1.75 simonb 1214: int newpages;
1.74 thorpej 1215:
1216: simple_lock(&pp->pr_slock);
1217:
1218: newpages = roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1219:
1220: while (newpages-- > 0) {
1221: simple_unlock(&pp->pr_slock);
1222: cp = pool_allocator_alloc(pp, PR_NOWAIT);
1223: if (__predict_true(cp != NULL))
1224: ph = pool_alloc_item_header(pp, cp, PR_NOWAIT);
1225:
1226: if (__predict_false(cp == NULL || ph == NULL)) {
1227: if (cp != NULL)
1228: pool_allocator_free(pp, cp);
1.91 yamt 1229: simple_lock(&pp->pr_slock);
1.74 thorpej 1230: break;
1231: }
1232:
1.91 yamt 1233: simple_lock(&pp->pr_slock);
1.74 thorpej 1234: pool_prime_page(pp, cp, ph);
1235: pp->pr_npagealloc++;
1236: pp->pr_minpages++;
1237: }
1238:
1239: if (pp->pr_minpages >= pp->pr_maxpages)
1240: pp->pr_maxpages = pp->pr_minpages + 1; /* XXX */
1241:
1242: simple_unlock(&pp->pr_slock);
1243: return (0);
1244: }
1.55 thorpej 1245:
1246: /*
1.3 pk 1247: * Add a page worth of items to the pool.
1.21 thorpej 1248: *
1249: * Note, we must be called with the pool descriptor LOCKED.
1.3 pk 1250: */
1.55 thorpej 1251: static void
1252: pool_prime_page(struct pool *pp, caddr_t storage, struct pool_item_header *ph)
1.3 pk 1253: {
1254: struct pool_item *pi;
1255: caddr_t cp = storage;
1256: unsigned int align = pp->pr_align;
1257: unsigned int ioff = pp->pr_itemoffset;
1.55 thorpej 1258: int n;
1.89 yamt 1259: int s;
1.36 pk 1260:
1.91 yamt 1261: LOCK_ASSERT(simple_lock_held(&pp->pr_slock));
1262:
1.66 thorpej 1263: #ifdef DIAGNOSTIC
1264: if (((u_long)cp & (pp->pr_alloc->pa_pagesz - 1)) != 0)
1.36 pk 1265: panic("pool_prime_page: %s: unaligned page", pp->pr_wchan);
1.66 thorpej 1266: #endif
1.3 pk 1267:
1268: /*
1269: * Insert page header.
1270: */
1.88 chs 1271: LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist);
1.102 chs 1272: LIST_INIT(&ph->ph_itemlist);
1.3 pk 1273: ph->ph_page = storage;
1274: ph->ph_nmissing = 0;
1.89 yamt 1275: s = splclock();
1276: ph->ph_time = mono_time;
1277: splx(s);
1.88 chs 1278: if ((pp->pr_roflags & PR_PHINPAGE) == 0)
1279: SPLAY_INSERT(phtree, &pp->pr_phtree, ph);
1.3 pk 1280:
1.6 thorpej 1281: pp->pr_nidle++;
1282:
1.3 pk 1283: /*
1284: * Color this page.
1285: */
1286: cp = (caddr_t)(cp + pp->pr_curcolor);
1287: if ((pp->pr_curcolor += align) > pp->pr_maxcolor)
1288: pp->pr_curcolor = 0;
1289:
1290: /*
1291: * Adjust storage to apply aligment to `pr_itemoffset' in each item.
1292: */
1293: if (ioff != 0)
1294: cp = (caddr_t)(cp + (align - ioff));
1295:
1296: /*
1297: * Insert remaining chunks on the bucket list.
1298: */
1299: n = pp->pr_itemsperpage;
1.20 thorpej 1300: pp->pr_nitems += n;
1.3 pk 1301:
1.97 yamt 1302: if (pp->pr_roflags & PR_NOTOUCH) {
1.99 yamt 1303: pool_item_freelist_t *freelist = PR_FREELIST(ph);
1.97 yamt 1304: int i;
1305:
1.99 yamt 1306: ph->ph_off = cp - storage;
1.97 yamt 1307: ph->ph_firstfree = 0;
1308: for (i = 0; i < n - 1; i++)
1309: freelist[i] = i + 1;
1310: freelist[n - 1] = PR_INDEX_EOL;
1311: } else {
1312: while (n--) {
1313: pi = (struct pool_item *)cp;
1.78 thorpej 1314:
1.97 yamt 1315: KASSERT(((((vaddr_t)pi) + ioff) & (align - 1)) == 0);
1.3 pk 1316:
1.97 yamt 1317: /* Insert on page list */
1.102 chs 1318: LIST_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list);
1.3 pk 1319: #ifdef DIAGNOSTIC
1.97 yamt 1320: pi->pi_magic = PI_MAGIC;
1.3 pk 1321: #endif
1.97 yamt 1322: cp = (caddr_t)(cp + pp->pr_size);
1323: }
1.3 pk 1324: }
1325:
1326: /*
1327: * If the pool was depleted, point at the new page.
1328: */
1329: if (pp->pr_curpage == NULL)
1330: pp->pr_curpage = ph;
1331:
1332: if (++pp->pr_npages > pp->pr_hiwat)
1333: pp->pr_hiwat = pp->pr_npages;
1334: }
1335:
1.20 thorpej 1336: /*
1.52 thorpej 1337: * Used by pool_get() when nitems drops below the low water mark. This
1.88 chs 1338: * is used to catch up pr_nitems with the low water mark.
1.20 thorpej 1339: *
1.21 thorpej 1340: * Note 1, we never wait for memory here, we let the caller decide what to do.
1.20 thorpej 1341: *
1.73 thorpej 1342: * Note 2, we must be called with the pool already locked, and we return
1.20 thorpej 1343: * with it locked.
1344: */
1345: static int
1.42 thorpej 1346: pool_catchup(struct pool *pp)
1.20 thorpej 1347: {
1.83 scw 1348: struct pool_item_header *ph = NULL;
1.20 thorpej 1349: caddr_t cp;
1350: int error = 0;
1351:
1.54 thorpej 1352: while (POOL_NEEDS_CATCHUP(pp)) {
1.20 thorpej 1353: /*
1.21 thorpej 1354: * Call the page back-end allocator for more memory.
1355: *
1356: * XXX: We never wait, so should we bother unlocking
1357: * the pool descriptor?
1.20 thorpej 1358: */
1.21 thorpej 1359: simple_unlock(&pp->pr_slock);
1.66 thorpej 1360: cp = pool_allocator_alloc(pp, PR_NOWAIT);
1.55 thorpej 1361: if (__predict_true(cp != NULL))
1362: ph = pool_alloc_item_header(pp, cp, PR_NOWAIT);
1363: if (__predict_false(cp == NULL || ph == NULL)) {
1364: if (cp != NULL)
1.66 thorpej 1365: pool_allocator_free(pp, cp);
1.20 thorpej 1366: error = ENOMEM;
1.91 yamt 1367: simple_lock(&pp->pr_slock);
1.20 thorpej 1368: break;
1369: }
1.91 yamt 1370: simple_lock(&pp->pr_slock);
1.55 thorpej 1371: pool_prime_page(pp, cp, ph);
1.26 thorpej 1372: pp->pr_npagealloc++;
1.20 thorpej 1373: }
1374:
1375: return (error);
1376: }
1377:
1.88 chs 1378: static void
1379: pool_update_curpage(struct pool *pp)
1380: {
1381:
1382: pp->pr_curpage = LIST_FIRST(&pp->pr_partpages);
1383: if (pp->pr_curpage == NULL) {
1384: pp->pr_curpage = LIST_FIRST(&pp->pr_emptypages);
1385: }
1386: }
1387:
1.3 pk 1388: void
1.42 thorpej 1389: pool_setlowat(struct pool *pp, int n)
1.3 pk 1390: {
1.15 pk 1391:
1.21 thorpej 1392: simple_lock(&pp->pr_slock);
1393:
1.3 pk 1394: pp->pr_minitems = n;
1.15 pk 1395: pp->pr_minpages = (n == 0)
1396: ? 0
1.18 thorpej 1397: : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1.20 thorpej 1398:
1399: /* Make sure we're caught up with the newly-set low water mark. */
1.75 simonb 1400: if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) {
1.20 thorpej 1401: /*
1402: * XXX: Should we log a warning? Should we set up a timeout
1403: * to try again in a second or so? The latter could break
1404: * a caller's assumptions about interrupt protection, etc.
1405: */
1406: }
1.21 thorpej 1407:
1408: simple_unlock(&pp->pr_slock);
1.3 pk 1409: }
1410:
1411: void
1.42 thorpej 1412: pool_sethiwat(struct pool *pp, int n)
1.3 pk 1413: {
1.15 pk 1414:
1.21 thorpej 1415: simple_lock(&pp->pr_slock);
1416:
1.15 pk 1417: pp->pr_maxpages = (n == 0)
1418: ? 0
1.18 thorpej 1419: : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1.21 thorpej 1420:
1421: simple_unlock(&pp->pr_slock);
1.3 pk 1422: }
1423:
1.20 thorpej 1424: void
1.42 thorpej 1425: pool_sethardlimit(struct pool *pp, int n, const char *warnmess, int ratecap)
1.20 thorpej 1426: {
1427:
1.21 thorpej 1428: simple_lock(&pp->pr_slock);
1.20 thorpej 1429:
1430: pp->pr_hardlimit = n;
1431: pp->pr_hardlimit_warning = warnmess;
1.31 thorpej 1432: pp->pr_hardlimit_ratecap.tv_sec = ratecap;
1433: pp->pr_hardlimit_warning_last.tv_sec = 0;
1434: pp->pr_hardlimit_warning_last.tv_usec = 0;
1.20 thorpej 1435:
1436: /*
1.21 thorpej 1437: * In-line version of pool_sethiwat(), because we don't want to
1438: * release the lock.
1.20 thorpej 1439: */
1440: pp->pr_maxpages = (n == 0)
1441: ? 0
1442: : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1.21 thorpej 1443:
1444: simple_unlock(&pp->pr_slock);
1.20 thorpej 1445: }
1.3 pk 1446:
1447: /*
1448: * Release all complete pages that have not been used recently.
1449: */
1.66 thorpej 1450: int
1.59 thorpej 1451: #ifdef POOL_DIAGNOSTIC
1.42 thorpej 1452: _pool_reclaim(struct pool *pp, const char *file, long line)
1.56 sommerfe 1453: #else
1454: pool_reclaim(struct pool *pp)
1455: #endif
1.3 pk 1456: {
1457: struct pool_item_header *ph, *phnext;
1.43 thorpej 1458: struct pool_cache *pc;
1.61 chs 1459: struct pool_pagelist pq;
1.102 chs 1460: struct pool_cache_grouplist pcgl;
1461: struct timeval curtime, diff;
1.21 thorpej 1462: int s;
1.3 pk 1463:
1.68 thorpej 1464: if (pp->pr_drain_hook != NULL) {
1465: /*
1466: * The drain hook must be called with the pool unlocked.
1467: */
1468: (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, PR_NOWAIT);
1469: }
1470:
1.21 thorpej 1471: if (simple_lock_try(&pp->pr_slock) == 0)
1.66 thorpej 1472: return (0);
1.25 thorpej 1473: pr_enter(pp, file, line);
1.68 thorpej 1474:
1.88 chs 1475: LIST_INIT(&pq);
1.102 chs 1476: LIST_INIT(&pcgl);
1.3 pk 1477:
1.43 thorpej 1478: /*
1479: * Reclaim items from the pool's caches.
1480: */
1.102 chs 1481: LIST_FOREACH(pc, &pp->pr_cachelist, pc_poollist)
1482: pool_cache_reclaim(pc, &pq, &pcgl);
1.43 thorpej 1483:
1.21 thorpej 1484: s = splclock();
1485: curtime = mono_time;
1486: splx(s);
1487:
1.88 chs 1488: for (ph = LIST_FIRST(&pp->pr_emptypages); ph != NULL; ph = phnext) {
1489: phnext = LIST_NEXT(ph, ph_pagelist);
1.3 pk 1490:
1491: /* Check our minimum page claim */
1492: if (pp->pr_npages <= pp->pr_minpages)
1493: break;
1494:
1.88 chs 1495: KASSERT(ph->ph_nmissing == 0);
1496: timersub(&curtime, &ph->ph_time, &diff);
1497: if (diff.tv_sec < pool_inactive_time)
1498: continue;
1.21 thorpej 1499:
1.88 chs 1500: /*
1501: * If freeing this page would put us below
1502: * the low water mark, stop now.
1503: */
1504: if ((pp->pr_nitems - pp->pr_itemsperpage) <
1505: pp->pr_minitems)
1506: break;
1.21 thorpej 1507:
1.88 chs 1508: pr_rmpage(pp, ph, &pq);
1.3 pk 1509: }
1510:
1.25 thorpej 1511: pr_leave(pp);
1.21 thorpej 1512: simple_unlock(&pp->pr_slock);
1.102 chs 1513: if (LIST_EMPTY(&pq) && LIST_EMPTY(&pcgl))
1514: return 0;
1.66 thorpej 1515:
1.101 thorpej 1516: pr_pagelist_free(pp, &pq);
1.102 chs 1517: pcg_grouplist_free(&pcgl);
1.66 thorpej 1518: return (1);
1.3 pk 1519: }
1520:
1521: /*
1522: * Drain pools, one at a time.
1.21 thorpej 1523: *
1524: * Note, we must never be called from an interrupt context.
1.3 pk 1525: */
1526: void
1.42 thorpej 1527: pool_drain(void *arg)
1.3 pk 1528: {
1529: struct pool *pp;
1.23 thorpej 1530: int s;
1.3 pk 1531:
1.61 chs 1532: pp = NULL;
1.49 thorpej 1533: s = splvm();
1.23 thorpej 1534: simple_lock(&pool_head_slock);
1.61 chs 1535: if (drainpp == NULL) {
1.102 chs 1536: drainpp = LIST_FIRST(&pool_head);
1.61 chs 1537: }
1538: if (drainpp) {
1539: pp = drainpp;
1.102 chs 1540: drainpp = LIST_NEXT(pp, pr_poollist);
1.61 chs 1541: }
1542: simple_unlock(&pool_head_slock);
1.63 chs 1543: pool_reclaim(pp);
1.61 chs 1544: splx(s);
1.3 pk 1545: }
1546:
1547: /*
1548: * Diagnostic helpers.
1549: */
1550: void
1.42 thorpej 1551: pool_print(struct pool *pp, const char *modif)
1.21 thorpej 1552: {
1553: int s;
1554:
1.49 thorpej 1555: s = splvm();
1.25 thorpej 1556: if (simple_lock_try(&pp->pr_slock) == 0) {
1557: printf("pool %s is locked; try again later\n",
1558: pp->pr_wchan);
1559: splx(s);
1560: return;
1561: }
1562: pool_print1(pp, modif, printf);
1.21 thorpej 1563: simple_unlock(&pp->pr_slock);
1564: splx(s);
1565: }
1566:
1.25 thorpej 1567: void
1.108 yamt 1568: pool_printall(const char *modif, void (*pr)(const char *, ...))
1569: {
1570: struct pool *pp;
1571:
1572: if (simple_lock_try(&pool_head_slock) == 0) {
1573: (*pr)("WARNING: pool_head_slock is locked\n");
1574: } else {
1575: simple_unlock(&pool_head_slock);
1576: }
1577:
1578: LIST_FOREACH(pp, &pool_head, pr_poollist) {
1579: pool_printit(pp, modif, pr);
1580: }
1581: }
1582:
1583: void
1.42 thorpej 1584: pool_printit(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
1.25 thorpej 1585: {
1586:
1587: if (pp == NULL) {
1588: (*pr)("Must specify a pool to print.\n");
1589: return;
1590: }
1591:
1592: /*
1593: * Called from DDB; interrupts should be blocked, and all
1594: * other processors should be paused. We can skip locking
1595: * the pool in this case.
1596: *
1597: * We do a simple_lock_try() just to print the lock
1598: * status, however.
1599: */
1600:
1601: if (simple_lock_try(&pp->pr_slock) == 0)
1602: (*pr)("WARNING: pool %s is locked\n", pp->pr_wchan);
1603: else
1.107 yamt 1604: simple_unlock(&pp->pr_slock);
1.25 thorpej 1605:
1606: pool_print1(pp, modif, pr);
1607: }
1608:
1.21 thorpej 1609: static void
1.97 yamt 1610: pool_print_pagelist(struct pool *pp, struct pool_pagelist *pl,
1611: void (*pr)(const char *, ...))
1.88 chs 1612: {
1613: struct pool_item_header *ph;
1614: #ifdef DIAGNOSTIC
1615: struct pool_item *pi;
1616: #endif
1617:
1618: LIST_FOREACH(ph, pl, ph_pagelist) {
1619: (*pr)("\t\tpage %p, nmissing %d, time %lu,%lu\n",
1620: ph->ph_page, ph->ph_nmissing,
1621: (u_long)ph->ph_time.tv_sec,
1622: (u_long)ph->ph_time.tv_usec);
1623: #ifdef DIAGNOSTIC
1.97 yamt 1624: if (!(pp->pr_roflags & PR_NOTOUCH)) {
1.102 chs 1625: LIST_FOREACH(pi, &ph->ph_itemlist, pi_list) {
1.97 yamt 1626: if (pi->pi_magic != PI_MAGIC) {
1627: (*pr)("\t\t\titem %p, magic 0x%x\n",
1628: pi, pi->pi_magic);
1629: }
1.88 chs 1630: }
1631: }
1632: #endif
1633: }
1634: }
1635:
1636: static void
1.42 thorpej 1637: pool_print1(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
1.3 pk 1638: {
1.25 thorpej 1639: struct pool_item_header *ph;
1.44 thorpej 1640: struct pool_cache *pc;
1641: struct pool_cache_group *pcg;
1642: int i, print_log = 0, print_pagelist = 0, print_cache = 0;
1.25 thorpej 1643: char c;
1644:
1645: while ((c = *modif++) != '\0') {
1646: if (c == 'l')
1647: print_log = 1;
1648: if (c == 'p')
1649: print_pagelist = 1;
1.44 thorpej 1650: if (c == 'c')
1651: print_cache = 1;
1.25 thorpej 1652: }
1653:
1654: (*pr)("POOL %s: size %u, align %u, ioff %u, roflags 0x%08x\n",
1655: pp->pr_wchan, pp->pr_size, pp->pr_align, pp->pr_itemoffset,
1656: pp->pr_roflags);
1.66 thorpej 1657: (*pr)("\talloc %p\n", pp->pr_alloc);
1.25 thorpej 1658: (*pr)("\tminitems %u, minpages %u, maxpages %u, npages %u\n",
1659: pp->pr_minitems, pp->pr_minpages, pp->pr_maxpages, pp->pr_npages);
1660: (*pr)("\titemsperpage %u, nitems %u, nout %u, hardlimit %u\n",
1661: pp->pr_itemsperpage, pp->pr_nitems, pp->pr_nout, pp->pr_hardlimit);
1662:
1663: (*pr)("\n\tnget %lu, nfail %lu, nput %lu\n",
1664: pp->pr_nget, pp->pr_nfail, pp->pr_nput);
1665: (*pr)("\tnpagealloc %lu, npagefree %lu, hiwat %u, nidle %lu\n",
1666: pp->pr_npagealloc, pp->pr_npagefree, pp->pr_hiwat, pp->pr_nidle);
1667:
1668: if (print_pagelist == 0)
1669: goto skip_pagelist;
1670:
1.88 chs 1671: if ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL)
1672: (*pr)("\n\tempty page list:\n");
1.97 yamt 1673: pool_print_pagelist(pp, &pp->pr_emptypages, pr);
1.88 chs 1674: if ((ph = LIST_FIRST(&pp->pr_fullpages)) != NULL)
1675: (*pr)("\n\tfull page list:\n");
1.97 yamt 1676: pool_print_pagelist(pp, &pp->pr_fullpages, pr);
1.88 chs 1677: if ((ph = LIST_FIRST(&pp->pr_partpages)) != NULL)
1678: (*pr)("\n\tpartial-page list:\n");
1.97 yamt 1679: pool_print_pagelist(pp, &pp->pr_partpages, pr);
1.88 chs 1680:
1.25 thorpej 1681: if (pp->pr_curpage == NULL)
1682: (*pr)("\tno current page\n");
1683: else
1684: (*pr)("\tcurpage %p\n", pp->pr_curpage->ph_page);
1685:
1686: skip_pagelist:
1687: if (print_log == 0)
1688: goto skip_log;
1689:
1690: (*pr)("\n");
1691: if ((pp->pr_roflags & PR_LOGGING) == 0)
1692: (*pr)("\tno log\n");
1693: else
1694: pr_printlog(pp, NULL, pr);
1.3 pk 1695:
1.25 thorpej 1696: skip_log:
1.44 thorpej 1697: if (print_cache == 0)
1698: goto skip_cache;
1699:
1.102 chs 1700: #define PR_GROUPLIST(pcg) \
1701: (*pr)("\t\tgroup %p: avail %d\n", pcg, pcg->pcg_avail); \
1702: for (i = 0; i < PCG_NOBJECTS; i++) { \
1703: if (pcg->pcg_objects[i].pcgo_pa != \
1704: POOL_PADDR_INVALID) { \
1705: (*pr)("\t\t\t%p, 0x%llx\n", \
1706: pcg->pcg_objects[i].pcgo_va, \
1707: (unsigned long long) \
1708: pcg->pcg_objects[i].pcgo_pa); \
1709: } else { \
1710: (*pr)("\t\t\t%p\n", \
1711: pcg->pcg_objects[i].pcgo_va); \
1712: } \
1713: }
1714:
1715: LIST_FOREACH(pc, &pp->pr_cachelist, pc_poollist) {
1.103 chs 1716: (*pr)("\tcache %p\n", pc);
1.48 thorpej 1717: (*pr)("\t hits %lu misses %lu ngroups %lu nitems %lu\n",
1718: pc->pc_hits, pc->pc_misses, pc->pc_ngroups, pc->pc_nitems);
1.102 chs 1719: (*pr)("\t full groups:\n");
1.103 chs 1720: LIST_FOREACH(pcg, &pc->pc_fullgroups, pcg_list) {
1.102 chs 1721: PR_GROUPLIST(pcg);
1.103 chs 1722: }
1.102 chs 1723: (*pr)("\t partial groups:\n");
1.103 chs 1724: LIST_FOREACH(pcg, &pc->pc_partgroups, pcg_list) {
1.102 chs 1725: PR_GROUPLIST(pcg);
1.103 chs 1726: }
1.102 chs 1727: (*pr)("\t empty groups:\n");
1.103 chs 1728: LIST_FOREACH(pcg, &pc->pc_emptygroups, pcg_list) {
1.102 chs 1729: PR_GROUPLIST(pcg);
1.103 chs 1730: }
1.44 thorpej 1731: }
1.102 chs 1732: #undef PR_GROUPLIST
1.44 thorpej 1733:
1734: skip_cache:
1.88 chs 1735: pr_enter_check(pp, pr);
1736: }
1737:
1738: static int
1739: pool_chk_page(struct pool *pp, const char *label, struct pool_item_header *ph)
1740: {
1741: struct pool_item *pi;
1742: caddr_t page;
1743: int n;
1744:
1745: page = (caddr_t)((u_long)ph & pp->pr_alloc->pa_pagemask);
1746: if (page != ph->ph_page &&
1747: (pp->pr_roflags & PR_PHINPAGE) != 0) {
1748: if (label != NULL)
1749: printf("%s: ", label);
1750: printf("pool(%p:%s): page inconsistency: page %p;"
1751: " at page head addr %p (p %p)\n", pp,
1752: pp->pr_wchan, ph->ph_page,
1753: ph, page);
1754: return 1;
1755: }
1.3 pk 1756:
1.97 yamt 1757: if ((pp->pr_roflags & PR_NOTOUCH) != 0)
1758: return 0;
1759:
1.102 chs 1760: for (pi = LIST_FIRST(&ph->ph_itemlist), n = 0;
1.88 chs 1761: pi != NULL;
1.102 chs 1762: pi = LIST_NEXT(pi,pi_list), n++) {
1.88 chs 1763:
1764: #ifdef DIAGNOSTIC
1765: if (pi->pi_magic != PI_MAGIC) {
1766: if (label != NULL)
1767: printf("%s: ", label);
1768: printf("pool(%s): free list modified: magic=%x;"
1769: " page %p; item ordinal %d;"
1770: " addr %p (p %p)\n",
1771: pp->pr_wchan, pi->pi_magic, ph->ph_page,
1772: n, pi, page);
1773: panic("pool");
1774: }
1775: #endif
1776: page =
1777: (caddr_t)((u_long)pi & pp->pr_alloc->pa_pagemask);
1778: if (page == ph->ph_page)
1779: continue;
1780:
1781: if (label != NULL)
1782: printf("%s: ", label);
1783: printf("pool(%p:%s): page inconsistency: page %p;"
1784: " item ordinal %d; addr %p (p %p)\n", pp,
1785: pp->pr_wchan, ph->ph_page,
1786: n, pi, page);
1787: return 1;
1788: }
1789: return 0;
1.3 pk 1790: }
1791:
1.88 chs 1792:
1.3 pk 1793: int
1.42 thorpej 1794: pool_chk(struct pool *pp, const char *label)
1.3 pk 1795: {
1796: struct pool_item_header *ph;
1797: int r = 0;
1798:
1.21 thorpej 1799: simple_lock(&pp->pr_slock);
1.88 chs 1800: LIST_FOREACH(ph, &pp->pr_emptypages, ph_pagelist) {
1801: r = pool_chk_page(pp, label, ph);
1802: if (r) {
1803: goto out;
1804: }
1805: }
1806: LIST_FOREACH(ph, &pp->pr_fullpages, ph_pagelist) {
1807: r = pool_chk_page(pp, label, ph);
1808: if (r) {
1.3 pk 1809: goto out;
1810: }
1.88 chs 1811: }
1812: LIST_FOREACH(ph, &pp->pr_partpages, ph_pagelist) {
1813: r = pool_chk_page(pp, label, ph);
1814: if (r) {
1.3 pk 1815: goto out;
1816: }
1817: }
1.88 chs 1818:
1.3 pk 1819: out:
1.21 thorpej 1820: simple_unlock(&pp->pr_slock);
1.3 pk 1821: return (r);
1.43 thorpej 1822: }
1823:
1824: /*
1825: * pool_cache_init:
1826: *
1827: * Initialize a pool cache.
1828: *
1829: * NOTE: If the pool must be protected from interrupts, we expect
1830: * to be called at the appropriate interrupt priority level.
1831: */
1832: void
1833: pool_cache_init(struct pool_cache *pc, struct pool *pp,
1834: int (*ctor)(void *, void *, int),
1835: void (*dtor)(void *, void *),
1836: void *arg)
1837: {
1838:
1.102 chs 1839: LIST_INIT(&pc->pc_emptygroups);
1840: LIST_INIT(&pc->pc_fullgroups);
1841: LIST_INIT(&pc->pc_partgroups);
1.43 thorpej 1842: simple_lock_init(&pc->pc_slock);
1843:
1844: pc->pc_pool = pp;
1845:
1846: pc->pc_ctor = ctor;
1847: pc->pc_dtor = dtor;
1848: pc->pc_arg = arg;
1849:
1.48 thorpej 1850: pc->pc_hits = 0;
1851: pc->pc_misses = 0;
1852:
1853: pc->pc_ngroups = 0;
1854:
1855: pc->pc_nitems = 0;
1856:
1.43 thorpej 1857: simple_lock(&pp->pr_slock);
1.102 chs 1858: LIST_INSERT_HEAD(&pp->pr_cachelist, pc, pc_poollist);
1.43 thorpej 1859: simple_unlock(&pp->pr_slock);
1860: }
1861:
1862: /*
1863: * pool_cache_destroy:
1864: *
1865: * Destroy a pool cache.
1866: */
1867: void
1868: pool_cache_destroy(struct pool_cache *pc)
1869: {
1870: struct pool *pp = pc->pc_pool;
1871:
1872: /* First, invalidate the entire cache. */
1873: pool_cache_invalidate(pc);
1874:
1875: /* ...and remove it from the pool's cache list. */
1876: simple_lock(&pp->pr_slock);
1.102 chs 1877: LIST_REMOVE(pc, pc_poollist);
1.43 thorpej 1878: simple_unlock(&pp->pr_slock);
1879: }
1880:
1.110 perry 1881: static inline void *
1.87 thorpej 1882: pcg_get(struct pool_cache_group *pcg, paddr_t *pap)
1.43 thorpej 1883: {
1884: void *object;
1885: u_int idx;
1886:
1887: KASSERT(pcg->pcg_avail <= PCG_NOBJECTS);
1.45 thorpej 1888: KASSERT(pcg->pcg_avail != 0);
1.43 thorpej 1889: idx = --pcg->pcg_avail;
1890:
1.87 thorpej 1891: KASSERT(pcg->pcg_objects[idx].pcgo_va != NULL);
1892: object = pcg->pcg_objects[idx].pcgo_va;
1893: if (pap != NULL)
1894: *pap = pcg->pcg_objects[idx].pcgo_pa;
1895: pcg->pcg_objects[idx].pcgo_va = NULL;
1.43 thorpej 1896:
1897: return (object);
1898: }
1899:
1.110 perry 1900: static inline void
1.87 thorpej 1901: pcg_put(struct pool_cache_group *pcg, void *object, paddr_t pa)
1.43 thorpej 1902: {
1903: u_int idx;
1904:
1905: KASSERT(pcg->pcg_avail < PCG_NOBJECTS);
1906: idx = pcg->pcg_avail++;
1907:
1.87 thorpej 1908: KASSERT(pcg->pcg_objects[idx].pcgo_va == NULL);
1909: pcg->pcg_objects[idx].pcgo_va = object;
1910: pcg->pcg_objects[idx].pcgo_pa = pa;
1.43 thorpej 1911: }
1912:
1.102 chs 1913: static void
1914: pcg_grouplist_free(struct pool_cache_grouplist *pcgl)
1915: {
1916: struct pool_cache_group *pcg;
1917: int s;
1918:
1919: s = splvm();
1920: while ((pcg = LIST_FIRST(pcgl)) != NULL) {
1921: LIST_REMOVE(pcg, pcg_list);
1922: pool_put(&pcgpool, pcg);
1923: }
1924: splx(s);
1925: }
1926:
1.43 thorpej 1927: /*
1.87 thorpej 1928: * pool_cache_get{,_paddr}:
1.43 thorpej 1929: *
1.87 thorpej 1930: * Get an object from a pool cache (optionally returning
1931: * the physical address of the object).
1.43 thorpej 1932: */
1933: void *
1.87 thorpej 1934: pool_cache_get_paddr(struct pool_cache *pc, int flags, paddr_t *pap)
1.43 thorpej 1935: {
1936: struct pool_cache_group *pcg;
1937: void *object;
1.58 thorpej 1938:
1939: #ifdef LOCKDEBUG
1940: if (flags & PR_WAITOK)
1941: simple_lock_only_held(NULL, "pool_cache_get(PR_WAITOK)");
1942: #endif
1.43 thorpej 1943:
1944: simple_lock(&pc->pc_slock);
1945:
1.102 chs 1946: pcg = LIST_FIRST(&pc->pc_partgroups);
1947: if (pcg == NULL) {
1948: pcg = LIST_FIRST(&pc->pc_fullgroups);
1949: if (pcg != NULL) {
1950: LIST_REMOVE(pcg, pcg_list);
1951: LIST_INSERT_HEAD(&pc->pc_partgroups, pcg, pcg_list);
1.43 thorpej 1952: }
1.102 chs 1953: }
1954: if (pcg == NULL) {
1.43 thorpej 1955:
1956: /*
1957: * No groups with any available objects. Allocate
1958: * a new object, construct it, and return it to
1959: * the caller. We will allocate a group, if necessary,
1960: * when the object is freed back to the cache.
1961: */
1.48 thorpej 1962: pc->pc_misses++;
1.43 thorpej 1963: simple_unlock(&pc->pc_slock);
1964: object = pool_get(pc->pc_pool, flags);
1965: if (object != NULL && pc->pc_ctor != NULL) {
1966: if ((*pc->pc_ctor)(pc->pc_arg, object, flags) != 0) {
1967: pool_put(pc->pc_pool, object);
1968: return (NULL);
1969: }
1970: }
1.87 thorpej 1971: if (object != NULL && pap != NULL) {
1972: #ifdef POOL_VTOPHYS
1973: *pap = POOL_VTOPHYS(object);
1974: #else
1975: *pap = POOL_PADDR_INVALID;
1976: #endif
1977: }
1.43 thorpej 1978: return (object);
1979: }
1980:
1.48 thorpej 1981: pc->pc_hits++;
1982: pc->pc_nitems--;
1.87 thorpej 1983: object = pcg_get(pcg, pap);
1.43 thorpej 1984:
1.102 chs 1985: if (pcg->pcg_avail == 0) {
1986: LIST_REMOVE(pcg, pcg_list);
1987: LIST_INSERT_HEAD(&pc->pc_emptygroups, pcg, pcg_list);
1988: }
1.43 thorpej 1989: simple_unlock(&pc->pc_slock);
1990:
1991: return (object);
1992: }
1993:
1994: /*
1.87 thorpej 1995: * pool_cache_put{,_paddr}:
1.43 thorpej 1996: *
1.87 thorpej 1997: * Put an object back to the pool cache (optionally caching the
1998: * physical address of the object).
1.43 thorpej 1999: */
2000: void
1.87 thorpej 2001: pool_cache_put_paddr(struct pool_cache *pc, void *object, paddr_t pa)
1.43 thorpej 2002: {
2003: struct pool_cache_group *pcg;
1.60 thorpej 2004: int s;
1.43 thorpej 2005:
1.109 christos 2006: if (__predict_false((pc->pc_pool->pr_flags & PR_WANTED) != 0)) {
2007: goto destruct;
2008: }
2009:
1.43 thorpej 2010: simple_lock(&pc->pc_slock);
2011:
1.102 chs 2012: pcg = LIST_FIRST(&pc->pc_partgroups);
2013: if (pcg == NULL) {
2014: pcg = LIST_FIRST(&pc->pc_emptygroups);
2015: if (pcg != NULL) {
2016: LIST_REMOVE(pcg, pcg_list);
2017: LIST_INSERT_HEAD(&pc->pc_partgroups, pcg, pcg_list);
1.43 thorpej 2018: }
1.102 chs 2019: }
2020: if (pcg == NULL) {
1.43 thorpej 2021:
2022: /*
2023: * No empty groups to free the object to. Attempt to
1.47 thorpej 2024: * allocate one.
1.43 thorpej 2025: */
1.47 thorpej 2026: simple_unlock(&pc->pc_slock);
1.60 thorpej 2027: s = splvm();
1.43 thorpej 2028: pcg = pool_get(&pcgpool, PR_NOWAIT);
1.60 thorpej 2029: splx(s);
1.102 chs 2030: if (pcg == NULL) {
1.109 christos 2031: destruct:
1.102 chs 2032:
2033: /*
2034: * Unable to allocate a cache group; destruct the object
2035: * and free it back to the pool.
2036: */
2037: pool_cache_destruct_object(pc, object);
2038: return;
1.43 thorpej 2039: }
1.102 chs 2040: memset(pcg, 0, sizeof(*pcg));
2041: simple_lock(&pc->pc_slock);
2042: pc->pc_ngroups++;
2043: LIST_INSERT_HEAD(&pc->pc_partgroups, pcg, pcg_list);
1.43 thorpej 2044: }
2045:
1.48 thorpej 2046: pc->pc_nitems++;
1.87 thorpej 2047: pcg_put(pcg, object, pa);
1.43 thorpej 2048:
1.102 chs 2049: if (pcg->pcg_avail == PCG_NOBJECTS) {
2050: LIST_REMOVE(pcg, pcg_list);
2051: LIST_INSERT_HEAD(&pc->pc_fullgroups, pcg, pcg_list);
2052: }
1.43 thorpej 2053: simple_unlock(&pc->pc_slock);
1.51 thorpej 2054: }
2055:
2056: /*
2057: * pool_cache_destruct_object:
2058: *
2059: * Force destruction of an object and its release back into
2060: * the pool.
2061: */
2062: void
2063: pool_cache_destruct_object(struct pool_cache *pc, void *object)
2064: {
2065:
2066: if (pc->pc_dtor != NULL)
2067: (*pc->pc_dtor)(pc->pc_arg, object);
2068: pool_put(pc->pc_pool, object);
1.43 thorpej 2069: }
2070:
1.102 chs 2071: static void
1.106 christos 2072: pool_do_cache_invalidate_grouplist(struct pool_cache_grouplist *pcgsl,
1.105 christos 2073: struct pool_cache *pc, struct pool_pagelist *pq,
1.106 christos 2074: struct pool_cache_grouplist *pcgdl)
1.102 chs 2075: {
1.106 christos 2076: struct pool_cache_group *pcg, *npcg;
1.102 chs 2077: void *object;
2078:
1.106 christos 2079: for (pcg = LIST_FIRST(pcgsl); pcg != NULL; pcg = npcg) {
1.102 chs 2080: npcg = LIST_NEXT(pcg, pcg_list);
2081: while (pcg->pcg_avail != 0) {
2082: pc->pc_nitems--;
2083: object = pcg_get(pcg, NULL);
2084: if (pc->pc_dtor != NULL)
2085: (*pc->pc_dtor)(pc->pc_arg, object);
2086: pool_do_put(pc->pc_pool, object, pq);
2087: }
1.103 chs 2088: pc->pc_ngroups--;
1.102 chs 2089: LIST_REMOVE(pcg, pcg_list);
1.106 christos 2090: LIST_INSERT_HEAD(pcgdl, pcg, pcg_list);
1.102 chs 2091: }
1.105 christos 2092: }
2093:
2094: static void
2095: pool_do_cache_invalidate(struct pool_cache *pc, struct pool_pagelist *pq,
2096: struct pool_cache_grouplist *pcgl)
2097: {
2098:
2099: LOCK_ASSERT(simple_lock_held(&pc->pc_slock));
2100: LOCK_ASSERT(simple_lock_held(&pc->pc_pool->pr_slock));
2101:
1.106 christos 2102: pool_do_cache_invalidate_grouplist(&pc->pc_fullgroups, pc, pq, pcgl);
2103: pool_do_cache_invalidate_grouplist(&pc->pc_partgroups, pc, pq, pcgl);
1.103 chs 2104:
2105: KASSERT(LIST_EMPTY(&pc->pc_partgroups));
2106: KASSERT(LIST_EMPTY(&pc->pc_fullgroups));
2107: KASSERT(pc->pc_nitems == 0);
1.102 chs 2108: }
2109:
1.43 thorpej 2110: /*
1.101 thorpej 2111: * pool_cache_invalidate:
1.43 thorpej 2112: *
1.101 thorpej 2113: * Invalidate a pool cache (destruct and release all of the
2114: * cached objects).
1.43 thorpej 2115: */
1.101 thorpej 2116: void
2117: pool_cache_invalidate(struct pool_cache *pc)
1.43 thorpej 2118: {
1.101 thorpej 2119: struct pool_pagelist pq;
1.102 chs 2120: struct pool_cache_grouplist pcgl;
1.101 thorpej 2121:
2122: LIST_INIT(&pq);
1.102 chs 2123: LIST_INIT(&pcgl);
1.101 thorpej 2124:
2125: simple_lock(&pc->pc_slock);
2126: simple_lock(&pc->pc_pool->pr_slock);
1.43 thorpej 2127:
1.102 chs 2128: pool_do_cache_invalidate(pc, &pq, &pcgl);
1.43 thorpej 2129:
1.101 thorpej 2130: simple_unlock(&pc->pc_pool->pr_slock);
2131: simple_unlock(&pc->pc_slock);
1.43 thorpej 2132:
1.102 chs 2133: pr_pagelist_free(pc->pc_pool, &pq);
2134: pcg_grouplist_free(&pcgl);
1.43 thorpej 2135: }
2136:
2137: /*
2138: * pool_cache_reclaim:
2139: *
2140: * Reclaim a pool cache for pool_reclaim().
2141: */
2142: static void
1.102 chs 2143: pool_cache_reclaim(struct pool_cache *pc, struct pool_pagelist *pq,
2144: struct pool_cache_grouplist *pcgl)
1.43 thorpej 2145: {
1.101 thorpej 2146:
2147: /*
2148: * We're locking in the wrong order (normally pool_cache -> pool,
2149: * but the pool is already locked when we get here), so we have
2150: * to use trylock. If we can't lock the pool_cache, it's not really
2151: * a big deal here.
2152: */
2153: if (simple_lock_try(&pc->pc_slock) == 0)
2154: return;
2155:
1.102 chs 2156: pool_do_cache_invalidate(pc, pq, pcgl);
1.43 thorpej 2157:
2158: simple_unlock(&pc->pc_slock);
1.3 pk 2159: }
1.66 thorpej 2160:
2161: /*
2162: * Pool backend allocators.
2163: *
2164: * Each pool has a backend allocator that handles allocation, deallocation,
2165: * and any additional draining that might be needed.
2166: *
2167: * We provide two standard allocators:
2168: *
2169: * pool_allocator_kmem - the default when no allocator is specified
2170: *
2171: * pool_allocator_nointr - used for pools that will not be accessed
2172: * in interrupt context.
2173: */
2174: void *pool_page_alloc(struct pool *, int);
2175: void pool_page_free(struct pool *, void *);
2176:
1.112 ! bjh21 2177: #ifdef POOL_SUBPAGE
! 2178: struct pool_allocator pool_allocator_kmem_fullpage = {
! 2179: pool_page_alloc, pool_page_free, 0,
! 2180: };
! 2181: #else
1.66 thorpej 2182: struct pool_allocator pool_allocator_kmem = {
2183: pool_page_alloc, pool_page_free, 0,
2184: };
1.112 ! bjh21 2185: #endif
1.66 thorpej 2186:
2187: void *pool_page_alloc_nointr(struct pool *, int);
2188: void pool_page_free_nointr(struct pool *, void *);
2189:
1.112 ! bjh21 2190: #ifdef POOL_SUBPAGE
! 2191: struct pool_allocator pool_allocator_nointr_fullpage = {
! 2192: pool_page_alloc_nointr, pool_page_free_nointr, 0,
! 2193: };
! 2194: #else
1.66 thorpej 2195: struct pool_allocator pool_allocator_nointr = {
2196: pool_page_alloc_nointr, pool_page_free_nointr, 0,
2197: };
1.112 ! bjh21 2198: #endif
1.66 thorpej 2199:
2200: #ifdef POOL_SUBPAGE
2201: void *pool_subpage_alloc(struct pool *, int);
2202: void pool_subpage_free(struct pool *, void *);
2203:
1.112 ! bjh21 2204: struct pool_allocator pool_allocator_kmem = {
! 2205: pool_subpage_alloc, pool_subpage_free, POOL_SUBPAGE,
! 2206: };
! 2207:
! 2208: void *pool_subpage_alloc_nointr(struct pool *, int);
! 2209: void pool_subpage_free_nointr(struct pool *, void *);
! 2210:
! 2211: struct pool_allocator pool_allocator_nointr = {
! 2212: pool_subpage_alloc, pool_subpage_free, POOL_SUBPAGE,
1.66 thorpej 2213: };
2214: #endif /* POOL_SUBPAGE */
2215:
2216: /*
2217: * We have at least three different resources for the same allocation and
2218: * each resource can be depleted. First, we have the ready elements in the
2219: * pool. Then we have the resource (typically a vm_map) for this allocator.
2220: * Finally, we have physical memory. Waiting for any of these can be
2221: * unnecessary when any other is freed, but the kernel doesn't support
2222: * sleeping on multiple wait channels, so we have to employ another strategy.
2223: *
2224: * The caller sleeps on the pool (so that it can be awakened when an item
2225: * is returned to the pool), but we set PA_WANT on the allocator. When a
2226: * page is returned to the allocator and PA_WANT is set, pool_allocator_free
2227: * will wake up all sleeping pools belonging to this allocator.
2228: *
2229: * XXX Thundering herd.
2230: */
2231: void *
2232: pool_allocator_alloc(struct pool *org, int flags)
2233: {
2234: struct pool_allocator *pa = org->pr_alloc;
2235: struct pool *pp, *start;
2236: int s, freed;
2237: void *res;
2238:
1.91 yamt 2239: LOCK_ASSERT(!simple_lock_held(&org->pr_slock));
2240:
1.66 thorpej 2241: do {
2242: if ((res = (*pa->pa_alloc)(org, flags)) != NULL)
2243: return (res);
1.68 thorpej 2244: if ((flags & PR_WAITOK) == 0) {
2245: /*
2246: * We only run the drain hookhere if PR_NOWAIT.
2247: * In other cases, the hook will be run in
2248: * pool_reclaim().
2249: */
2250: if (org->pr_drain_hook != NULL) {
2251: (*org->pr_drain_hook)(org->pr_drain_hook_arg,
2252: flags);
2253: if ((res = (*pa->pa_alloc)(org, flags)) != NULL)
2254: return (res);
2255: }
1.66 thorpej 2256: break;
1.68 thorpej 2257: }
1.66 thorpej 2258:
2259: /*
1.109 christos 2260: * Drain all pools, that use this allocator.
2261: * We do this to reclaim VA space.
1.66 thorpej 2262: * pa_alloc is responsible for waiting for
2263: * physical memory.
2264: *
2265: * XXX We risk looping forever if start if someone
2266: * calls pool_destroy on "start". But there is no
2267: * other way to have potentially sleeping pool_reclaim,
2268: * non-sleeping locks on pool_allocator, and some
2269: * stirring of drained pools in the allocator.
1.68 thorpej 2270: *
2271: * XXX Maybe we should use pool_head_slock for locking
2272: * the allocators?
1.66 thorpej 2273: */
2274: freed = 0;
2275:
2276: s = splvm();
2277: simple_lock(&pa->pa_slock);
2278: pp = start = TAILQ_FIRST(&pa->pa_list);
2279: do {
2280: TAILQ_REMOVE(&pa->pa_list, pp, pr_alloc_list);
2281: TAILQ_INSERT_TAIL(&pa->pa_list, pp, pr_alloc_list);
1.73 thorpej 2282: simple_unlock(&pa->pa_slock);
1.66 thorpej 2283: freed = pool_reclaim(pp);
1.73 thorpej 2284: simple_lock(&pa->pa_slock);
1.66 thorpej 2285: } while ((pp = TAILQ_FIRST(&pa->pa_list)) != start &&
2286: freed == 0);
2287:
2288: if (freed == 0) {
2289: /*
2290: * We set PA_WANT here, the caller will most likely
2291: * sleep waiting for pages (if not, this won't hurt
2292: * that much), and there is no way to set this in
2293: * the caller without violating locking order.
2294: */
2295: pa->pa_flags |= PA_WANT;
2296: }
2297: simple_unlock(&pa->pa_slock);
2298: splx(s);
2299: } while (freed);
2300: return (NULL);
2301: }
2302:
2303: void
2304: pool_allocator_free(struct pool *pp, void *v)
2305: {
2306: struct pool_allocator *pa = pp->pr_alloc;
2307: int s;
2308:
1.91 yamt 2309: LOCK_ASSERT(!simple_lock_held(&pp->pr_slock));
2310:
1.66 thorpej 2311: (*pa->pa_free)(pp, v);
2312:
2313: s = splvm();
2314: simple_lock(&pa->pa_slock);
2315: if ((pa->pa_flags & PA_WANT) == 0) {
2316: simple_unlock(&pa->pa_slock);
2317: splx(s);
2318: return;
2319: }
2320:
2321: TAILQ_FOREACH(pp, &pa->pa_list, pr_alloc_list) {
2322: simple_lock(&pp->pr_slock);
2323: if ((pp->pr_flags & PR_WANTED) != 0) {
2324: pp->pr_flags &= ~PR_WANTED;
2325: wakeup(pp);
2326: }
1.69 thorpej 2327: simple_unlock(&pp->pr_slock);
1.66 thorpej 2328: }
2329: pa->pa_flags &= ~PA_WANT;
2330: simple_unlock(&pa->pa_slock);
2331: splx(s);
2332: }
2333:
2334: void *
2335: pool_page_alloc(struct pool *pp, int flags)
2336: {
2337: boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
2338:
1.100 yamt 2339: return ((void *) uvm_km_alloc_poolpage_cache(kmem_map, waitok));
1.66 thorpej 2340: }
2341:
2342: void
2343: pool_page_free(struct pool *pp, void *v)
2344: {
2345:
1.98 yamt 2346: uvm_km_free_poolpage_cache(kmem_map, (vaddr_t) v);
2347: }
2348:
2349: static void *
2350: pool_page_alloc_meta(struct pool *pp, int flags)
2351: {
2352: boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
2353:
1.100 yamt 2354: return ((void *) uvm_km_alloc_poolpage(kmem_map, waitok));
1.98 yamt 2355: }
2356:
2357: static void
2358: pool_page_free_meta(struct pool *pp, void *v)
2359: {
2360:
1.100 yamt 2361: uvm_km_free_poolpage(kmem_map, (vaddr_t) v);
1.66 thorpej 2362: }
2363:
2364: #ifdef POOL_SUBPAGE
2365: /* Sub-page allocator, for machines with large hardware pages. */
2366: void *
2367: pool_subpage_alloc(struct pool *pp, int flags)
2368: {
1.93 dbj 2369: void *v;
2370: int s;
2371: s = splvm();
2372: v = pool_get(&psppool, flags);
2373: splx(s);
2374: return v;
1.66 thorpej 2375: }
2376:
2377: void
2378: pool_subpage_free(struct pool *pp, void *v)
2379: {
1.93 dbj 2380: int s;
2381: s = splvm();
1.66 thorpej 2382: pool_put(&psppool, v);
1.93 dbj 2383: splx(s);
1.66 thorpej 2384: }
2385:
2386: /* We don't provide a real nointr allocator. Maybe later. */
2387: void *
1.112 ! bjh21 2388: pool_subpage_alloc_nointr(struct pool *pp, int flags)
1.66 thorpej 2389: {
2390:
2391: return (pool_subpage_alloc(pp, flags));
2392: }
2393:
2394: void
1.112 ! bjh21 2395: pool_subpage_free_nointr(struct pool *pp, void *v)
1.66 thorpej 2396: {
2397:
2398: pool_subpage_free(pp, v);
2399: }
1.112 ! bjh21 2400: #endif /* POOL_SUBPAGE */
1.66 thorpej 2401: void *
2402: pool_page_alloc_nointr(struct pool *pp, int flags)
2403: {
2404: boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
2405:
1.100 yamt 2406: return ((void *) uvm_km_alloc_poolpage_cache(kernel_map, waitok));
1.66 thorpej 2407: }
2408:
2409: void
2410: pool_page_free_nointr(struct pool *pp, void *v)
2411: {
2412:
1.98 yamt 2413: uvm_km_free_poolpage_cache(kernel_map, (vaddr_t) v);
1.66 thorpej 2414: }
CVSweb <webmaster@jp.NetBSD.org>