Annotation of src/sys/kern/subr_pool.c, Revision 1.119
1.119 ! yamt 1: /* $NetBSD$ */
1.1 pk 2:
3: /*-
1.43 thorpej 4: * Copyright (c) 1997, 1999, 2000 The NetBSD Foundation, Inc.
1.1 pk 5: * All rights reserved.
6: *
7: * This code is derived from software contributed to The NetBSD Foundation
1.20 thorpej 8: * by Paul Kranenburg; by Jason R. Thorpe of the Numerical Aerospace
9: * Simulation Facility, NASA Ames Research Center.
1.1 pk 10: *
11: * Redistribution and use in source and binary forms, with or without
12: * modification, are permitted provided that the following conditions
13: * are met:
14: * 1. Redistributions of source code must retain the above copyright
15: * notice, this list of conditions and the following disclaimer.
16: * 2. Redistributions in binary form must reproduce the above copyright
17: * notice, this list of conditions and the following disclaimer in the
18: * documentation and/or other materials provided with the distribution.
19: * 3. All advertising materials mentioning features or use of this software
20: * must display the following acknowledgement:
1.13 christos 21: * This product includes software developed by the NetBSD
22: * Foundation, Inc. and its contributors.
1.1 pk 23: * 4. Neither the name of The NetBSD Foundation nor the names of its
24: * contributors may be used to endorse or promote products derived
25: * from this software without specific prior written permission.
26: *
27: * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28: * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29: * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30: * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31: * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32: * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33: * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34: * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35: * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36: * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37: * POSSIBILITY OF SUCH DAMAGE.
38: */
1.64 lukem 39:
40: #include <sys/cdefs.h>
1.119 ! yamt 41: __KERNEL_RCSID(0, "$NetBSD$");
1.24 scottr 42:
1.25 thorpej 43: #include "opt_pool.h"
1.24 scottr 44: #include "opt_poollog.h"
1.28 thorpej 45: #include "opt_lockdebug.h"
1.1 pk 46:
47: #include <sys/param.h>
48: #include <sys/systm.h>
49: #include <sys/proc.h>
50: #include <sys/errno.h>
51: #include <sys/kernel.h>
52: #include <sys/malloc.h>
53: #include <sys/lock.h>
54: #include <sys/pool.h>
1.20 thorpej 55: #include <sys/syslog.h>
1.3 pk 56:
57: #include <uvm/uvm.h>
58:
1.1 pk 59: /*
60: * Pool resource management utility.
1.3 pk 61: *
1.88 chs 62: * Memory is allocated in pages which are split into pieces according to
63: * the pool item size. Each page is kept on one of three lists in the
64: * pool structure: `pr_emptypages', `pr_fullpages' and `pr_partpages',
65: * for empty, full and partially-full pages respectively. The individual
66: * pool items are on a linked list headed by `ph_itemlist' in each page
67: * header. The memory for building the page list is either taken from
68: * the allocated pages themselves (for small pool items) or taken from
69: * an internal pool of page headers (`phpool').
1.1 pk 70: */
71:
1.3 pk 72: /* List of all pools */
1.102 chs 73: LIST_HEAD(,pool) pool_head = LIST_HEAD_INITIALIZER(pool_head);
1.3 pk 74:
75: /* Private pool for page header structures */
1.97 yamt 76: #define PHPOOL_MAX 8
77: static struct pool phpool[PHPOOL_MAX];
78: #define PHPOOL_FREELIST_NELEM(idx) (((idx) == 0) ? 0 : (1 << (idx)))
1.3 pk 79:
1.62 bjh21 80: #ifdef POOL_SUBPAGE
81: /* Pool of subpages for use by normal pools. */
82: static struct pool psppool;
83: #endif
84:
1.117 yamt 85: static SLIST_HEAD(, pool_allocator) pa_deferinitq =
86: SLIST_HEAD_INITIALIZER(pa_deferinitq);
87:
1.98 yamt 88: static void *pool_page_alloc_meta(struct pool *, int);
89: static void pool_page_free_meta(struct pool *, void *);
90:
91: /* allocator for pool metadata */
92: static struct pool_allocator pool_allocator_meta = {
1.117 yamt 93: pool_page_alloc_meta, pool_page_free_meta,
94: .pa_backingmapptr = &kmem_map,
1.98 yamt 95: };
96:
1.3 pk 97: /* # of seconds to retain page after last use */
98: int pool_inactive_time = 10;
99:
100: /* Next candidate for drainage (see pool_drain()) */
1.23 thorpej 101: static struct pool *drainpp;
102:
103: /* This spin lock protects both pool_head and drainpp. */
104: struct simplelock pool_head_slock = SIMPLELOCK_INITIALIZER;
1.3 pk 105:
1.99 yamt 106: typedef uint8_t pool_item_freelist_t;
107:
1.3 pk 108: struct pool_item_header {
109: /* Page headers */
1.88 chs 110: LIST_ENTRY(pool_item_header)
1.3 pk 111: ph_pagelist; /* pool page list */
1.88 chs 112: SPLAY_ENTRY(pool_item_header)
113: ph_node; /* Off-page page headers */
1.3 pk 114: caddr_t ph_page; /* this page's address */
115: struct timeval ph_time; /* last referenced */
1.97 yamt 116: union {
117: /* !PR_NOTOUCH */
118: struct {
1.102 chs 119: LIST_HEAD(, pool_item)
1.97 yamt 120: phu_itemlist; /* chunk list for this page */
121: } phu_normal;
122: /* PR_NOTOUCH */
123: struct {
124: uint16_t
125: phu_off; /* start offset in page */
1.99 yamt 126: pool_item_freelist_t
1.97 yamt 127: phu_firstfree; /* first free item */
1.99 yamt 128: /*
129: * XXX it might be better to use
130: * a simple bitmap and ffs(3)
131: */
1.97 yamt 132: } phu_notouch;
133: } ph_u;
134: uint16_t ph_nmissing; /* # of chunks in use */
1.3 pk 135: };
1.97 yamt 136: #define ph_itemlist ph_u.phu_normal.phu_itemlist
137: #define ph_off ph_u.phu_notouch.phu_off
138: #define ph_firstfree ph_u.phu_notouch.phu_firstfree
1.3 pk 139:
1.1 pk 140: struct pool_item {
1.3 pk 141: #ifdef DIAGNOSTIC
1.82 thorpej 142: u_int pi_magic;
1.33 chs 143: #endif
1.82 thorpej 144: #define PI_MAGIC 0xdeadbeefU
1.3 pk 145: /* Other entries use only this list entry */
1.102 chs 146: LIST_ENTRY(pool_item) pi_list;
1.3 pk 147: };
148:
1.53 thorpej 149: #define POOL_NEEDS_CATCHUP(pp) \
150: ((pp)->pr_nitems < (pp)->pr_minitems)
151:
1.43 thorpej 152: /*
153: * Pool cache management.
154: *
155: * Pool caches provide a way for constructed objects to be cached by the
156: * pool subsystem. This can lead to performance improvements by avoiding
157: * needless object construction/destruction; it is deferred until absolutely
158: * necessary.
159: *
160: * Caches are grouped into cache groups. Each cache group references
161: * up to 16 constructed objects. When a cache allocates an object
162: * from the pool, it calls the object's constructor and places it into
163: * a cache group. When a cache group frees an object back to the pool,
164: * it first calls the object's destructor. This allows the object to
165: * persist in constructed form while freed to the cache.
166: *
167: * Multiple caches may exist for each pool. This allows a single
168: * object type to have multiple constructed forms. The pool references
169: * each cache, so that when a pool is drained by the pagedaemon, it can
170: * drain each individual cache as well. Each time a cache is drained,
171: * the most idle cache group is freed to the pool in its entirety.
172: *
173: * Pool caches are layed on top of pools. By layering them, we can avoid
174: * the complexity of cache management for pools which would not benefit
175: * from it.
176: */
177:
178: /* The cache group pool. */
179: static struct pool pcgpool;
1.3 pk 180:
1.102 chs 181: static void pool_cache_reclaim(struct pool_cache *, struct pool_pagelist *,
182: struct pool_cache_grouplist *);
183: static void pcg_grouplist_free(struct pool_cache_grouplist *);
1.3 pk 184:
1.42 thorpej 185: static int pool_catchup(struct pool *);
1.55 thorpej 186: static void pool_prime_page(struct pool *, caddr_t,
187: struct pool_item_header *);
1.88 chs 188: static void pool_update_curpage(struct pool *);
1.66 thorpej 189:
1.113 yamt 190: static int pool_grow(struct pool *, int);
1.117 yamt 191: static void *pool_allocator_alloc(struct pool *, int);
192: static void pool_allocator_free(struct pool *, void *);
1.3 pk 193:
1.97 yamt 194: static void pool_print_pagelist(struct pool *, struct pool_pagelist *,
1.88 chs 195: void (*)(const char *, ...));
1.42 thorpej 196: static void pool_print1(struct pool *, const char *,
197: void (*)(const char *, ...));
1.3 pk 198:
1.88 chs 199: static int pool_chk_page(struct pool *, const char *,
200: struct pool_item_header *);
201:
1.3 pk 202: /*
1.52 thorpej 203: * Pool log entry. An array of these is allocated in pool_init().
1.3 pk 204: */
205: struct pool_log {
206: const char *pl_file;
207: long pl_line;
208: int pl_action;
1.25 thorpej 209: #define PRLOG_GET 1
210: #define PRLOG_PUT 2
1.3 pk 211: void *pl_addr;
1.1 pk 212: };
213:
1.86 matt 214: #ifdef POOL_DIAGNOSTIC
1.3 pk 215: /* Number of entries in pool log buffers */
1.17 thorpej 216: #ifndef POOL_LOGSIZE
217: #define POOL_LOGSIZE 10
218: #endif
219:
220: int pool_logsize = POOL_LOGSIZE;
1.1 pk 221:
1.110 perry 222: static inline void
1.42 thorpej 223: pr_log(struct pool *pp, void *v, int action, const char *file, long line)
1.3 pk 224: {
225: int n = pp->pr_curlogentry;
226: struct pool_log *pl;
227:
1.20 thorpej 228: if ((pp->pr_roflags & PR_LOGGING) == 0)
1.3 pk 229: return;
230:
231: /*
232: * Fill in the current entry. Wrap around and overwrite
233: * the oldest entry if necessary.
234: */
235: pl = &pp->pr_log[n];
236: pl->pl_file = file;
237: pl->pl_line = line;
238: pl->pl_action = action;
239: pl->pl_addr = v;
240: if (++n >= pp->pr_logsize)
241: n = 0;
242: pp->pr_curlogentry = n;
243: }
244:
245: static void
1.42 thorpej 246: pr_printlog(struct pool *pp, struct pool_item *pi,
247: void (*pr)(const char *, ...))
1.3 pk 248: {
249: int i = pp->pr_logsize;
250: int n = pp->pr_curlogentry;
251:
1.20 thorpej 252: if ((pp->pr_roflags & PR_LOGGING) == 0)
1.3 pk 253: return;
254:
255: /*
256: * Print all entries in this pool's log.
257: */
258: while (i-- > 0) {
259: struct pool_log *pl = &pp->pr_log[n];
260: if (pl->pl_action != 0) {
1.25 thorpej 261: if (pi == NULL || pi == pl->pl_addr) {
262: (*pr)("\tlog entry %d:\n", i);
263: (*pr)("\t\taction = %s, addr = %p\n",
264: pl->pl_action == PRLOG_GET ? "get" : "put",
265: pl->pl_addr);
266: (*pr)("\t\tfile: %s at line %lu\n",
267: pl->pl_file, pl->pl_line);
268: }
1.3 pk 269: }
270: if (++n >= pp->pr_logsize)
271: n = 0;
272: }
273: }
1.25 thorpej 274:
1.110 perry 275: static inline void
1.42 thorpej 276: pr_enter(struct pool *pp, const char *file, long line)
1.25 thorpej 277: {
278:
1.34 thorpej 279: if (__predict_false(pp->pr_entered_file != NULL)) {
1.25 thorpej 280: printf("pool %s: reentrancy at file %s line %ld\n",
281: pp->pr_wchan, file, line);
282: printf(" previous entry at file %s line %ld\n",
283: pp->pr_entered_file, pp->pr_entered_line);
284: panic("pr_enter");
285: }
286:
287: pp->pr_entered_file = file;
288: pp->pr_entered_line = line;
289: }
290:
1.110 perry 291: static inline void
1.42 thorpej 292: pr_leave(struct pool *pp)
1.25 thorpej 293: {
294:
1.34 thorpej 295: if (__predict_false(pp->pr_entered_file == NULL)) {
1.25 thorpej 296: printf("pool %s not entered?\n", pp->pr_wchan);
297: panic("pr_leave");
298: }
299:
300: pp->pr_entered_file = NULL;
301: pp->pr_entered_line = 0;
302: }
303:
1.110 perry 304: static inline void
1.42 thorpej 305: pr_enter_check(struct pool *pp, void (*pr)(const char *, ...))
1.25 thorpej 306: {
307:
308: if (pp->pr_entered_file != NULL)
309: (*pr)("\n\tcurrently entered from file %s line %ld\n",
310: pp->pr_entered_file, pp->pr_entered_line);
311: }
1.3 pk 312: #else
1.25 thorpej 313: #define pr_log(pp, v, action, file, line)
314: #define pr_printlog(pp, pi, pr)
315: #define pr_enter(pp, file, line)
316: #define pr_leave(pp)
317: #define pr_enter_check(pp, pr)
1.59 thorpej 318: #endif /* POOL_DIAGNOSTIC */
1.3 pk 319:
1.110 perry 320: static inline int
1.97 yamt 321: pr_item_notouch_index(const struct pool *pp, const struct pool_item_header *ph,
322: const void *v)
323: {
324: const char *cp = v;
325: int idx;
326:
327: KASSERT(pp->pr_roflags & PR_NOTOUCH);
328: idx = (cp - ph->ph_page - ph->ph_off) / pp->pr_size;
329: KASSERT(idx < pp->pr_itemsperpage);
330: return idx;
331: }
332:
1.99 yamt 333: #define PR_FREELIST_ALIGN(p) \
334: roundup((uintptr_t)(p), sizeof(pool_item_freelist_t))
335: #define PR_FREELIST(ph) ((pool_item_freelist_t *)PR_FREELIST_ALIGN((ph) + 1))
336: #define PR_INDEX_USED ((pool_item_freelist_t)-1)
337: #define PR_INDEX_EOL ((pool_item_freelist_t)-2)
1.97 yamt 338:
1.110 perry 339: static inline void
1.97 yamt 340: pr_item_notouch_put(const struct pool *pp, struct pool_item_header *ph,
341: void *obj)
342: {
343: int idx = pr_item_notouch_index(pp, ph, obj);
1.99 yamt 344: pool_item_freelist_t *freelist = PR_FREELIST(ph);
1.97 yamt 345:
346: KASSERT(freelist[idx] == PR_INDEX_USED);
347: freelist[idx] = ph->ph_firstfree;
348: ph->ph_firstfree = idx;
349: }
350:
1.110 perry 351: static inline void *
1.97 yamt 352: pr_item_notouch_get(const struct pool *pp, struct pool_item_header *ph)
353: {
354: int idx = ph->ph_firstfree;
1.99 yamt 355: pool_item_freelist_t *freelist = PR_FREELIST(ph);
1.97 yamt 356:
357: KASSERT(freelist[idx] != PR_INDEX_USED);
358: ph->ph_firstfree = freelist[idx];
359: freelist[idx] = PR_INDEX_USED;
360:
361: return ph->ph_page + ph->ph_off + idx * pp->pr_size;
362: }
363:
1.110 perry 364: static inline int
1.88 chs 365: phtree_compare(struct pool_item_header *a, struct pool_item_header *b)
366: {
367: if (a->ph_page < b->ph_page)
368: return (-1);
369: else if (a->ph_page > b->ph_page)
370: return (1);
371: else
372: return (0);
373: }
374:
375: SPLAY_PROTOTYPE(phtree, pool_item_header, ph_node, phtree_compare);
376: SPLAY_GENERATE(phtree, pool_item_header, ph_node, phtree_compare);
377:
1.3 pk 378: /*
379: * Return the pool page header based on page address.
380: */
1.110 perry 381: static inline struct pool_item_header *
1.42 thorpej 382: pr_find_pagehead(struct pool *pp, caddr_t page)
1.3 pk 383: {
1.88 chs 384: struct pool_item_header *ph, tmp;
1.3 pk 385:
1.20 thorpej 386: if ((pp->pr_roflags & PR_PHINPAGE) != 0)
1.3 pk 387: return ((struct pool_item_header *)(page + pp->pr_phoffset));
388:
1.88 chs 389: tmp.ph_page = page;
390: ph = SPLAY_FIND(phtree, &pp->pr_phtree, &tmp);
391: return ph;
1.3 pk 392: }
393:
1.101 thorpej 394: static void
395: pr_pagelist_free(struct pool *pp, struct pool_pagelist *pq)
396: {
397: struct pool_item_header *ph;
398: int s;
399:
400: while ((ph = LIST_FIRST(pq)) != NULL) {
401: LIST_REMOVE(ph, ph_pagelist);
402: pool_allocator_free(pp, ph->ph_page);
403: if ((pp->pr_roflags & PR_PHINPAGE) == 0) {
404: s = splvm();
405: pool_put(pp->pr_phpool, ph);
406: splx(s);
407: }
408: }
409: }
410:
1.3 pk 411: /*
412: * Remove a page from the pool.
413: */
1.110 perry 414: static inline void
1.61 chs 415: pr_rmpage(struct pool *pp, struct pool_item_header *ph,
416: struct pool_pagelist *pq)
1.3 pk 417: {
418:
1.101 thorpej 419: LOCK_ASSERT(simple_lock_held(&pp->pr_slock));
1.91 yamt 420:
1.3 pk 421: /*
1.7 thorpej 422: * If the page was idle, decrement the idle page count.
1.3 pk 423: */
1.6 thorpej 424: if (ph->ph_nmissing == 0) {
425: #ifdef DIAGNOSTIC
426: if (pp->pr_nidle == 0)
427: panic("pr_rmpage: nidle inconsistent");
1.20 thorpej 428: if (pp->pr_nitems < pp->pr_itemsperpage)
429: panic("pr_rmpage: nitems inconsistent");
1.6 thorpej 430: #endif
431: pp->pr_nidle--;
432: }
1.7 thorpej 433:
1.20 thorpej 434: pp->pr_nitems -= pp->pr_itemsperpage;
435:
1.7 thorpej 436: /*
1.101 thorpej 437: * Unlink the page from the pool and queue it for release.
1.7 thorpej 438: */
1.88 chs 439: LIST_REMOVE(ph, ph_pagelist);
1.91 yamt 440: if ((pp->pr_roflags & PR_PHINPAGE) == 0)
441: SPLAY_REMOVE(phtree, &pp->pr_phtree, ph);
1.101 thorpej 442: LIST_INSERT_HEAD(pq, ph, ph_pagelist);
443:
1.7 thorpej 444: pp->pr_npages--;
445: pp->pr_npagefree++;
1.6 thorpej 446:
1.88 chs 447: pool_update_curpage(pp);
1.3 pk 448: }
449:
1.117 yamt 450: static boolean_t
451: pa_starved_p(struct pool_allocator *pa)
452: {
453:
454: if (pa->pa_backingmap != NULL) {
455: return vm_map_starved_p(pa->pa_backingmap);
456: }
457: return FALSE;
458: }
459:
460: static int
461: pool_reclaim_callback(struct callback_entry *ce, void *obj, void *arg)
462: {
463: struct pool *pp = obj;
464: struct pool_allocator *pa = pp->pr_alloc;
465:
466: KASSERT(&pp->pr_reclaimerentry == ce);
467: pool_reclaim(pp);
468: if (!pa_starved_p(pa)) {
469: return CALLBACK_CHAIN_ABORT;
470: }
471: return CALLBACK_CHAIN_CONTINUE;
472: }
473:
474: static void
475: pool_reclaim_register(struct pool *pp)
476: {
477: struct vm_map *map = pp->pr_alloc->pa_backingmap;
478: int s;
479:
480: if (map == NULL) {
481: return;
482: }
483:
484: s = splvm(); /* not necessary for INTRSAFE maps, but don't care. */
485: callback_register(&vm_map_to_kernel(map)->vmk_reclaim_callback,
486: &pp->pr_reclaimerentry, pp, pool_reclaim_callback);
487: splx(s);
488: }
489:
490: static void
491: pool_reclaim_unregister(struct pool *pp)
492: {
493: struct vm_map *map = pp->pr_alloc->pa_backingmap;
494: int s;
495:
496: if (map == NULL) {
497: return;
498: }
499:
500: s = splvm(); /* not necessary for INTRSAFE maps, but don't care. */
501: callback_unregister(&vm_map_to_kernel(map)->vmk_reclaim_callback,
502: &pp->pr_reclaimerentry);
503: splx(s);
504: }
505:
506: static void
507: pa_reclaim_register(struct pool_allocator *pa)
508: {
509: struct vm_map *map = *pa->pa_backingmapptr;
510: struct pool *pp;
511:
512: KASSERT(pa->pa_backingmap == NULL);
513: if (map == NULL) {
514: SLIST_INSERT_HEAD(&pa_deferinitq, pa, pa_q);
515: return;
516: }
517: pa->pa_backingmap = map;
518: TAILQ_FOREACH(pp, &pa->pa_list, pr_alloc_list) {
519: pool_reclaim_register(pp);
520: }
521: }
522:
1.3 pk 523: /*
1.94 simonb 524: * Initialize all the pools listed in the "pools" link set.
525: */
526: void
1.117 yamt 527: pool_subsystem_init(void)
1.94 simonb 528: {
1.117 yamt 529: struct pool_allocator *pa;
1.94 simonb 530: __link_set_decl(pools, struct link_pool_init);
531: struct link_pool_init * const *pi;
532:
533: __link_set_foreach(pi, pools)
534: pool_init((*pi)->pp, (*pi)->size, (*pi)->align,
535: (*pi)->align_offset, (*pi)->flags, (*pi)->wchan,
536: (*pi)->palloc);
1.117 yamt 537:
538: while ((pa = SLIST_FIRST(&pa_deferinitq)) != NULL) {
539: KASSERT(pa->pa_backingmapptr != NULL);
540: KASSERT(*pa->pa_backingmapptr != NULL);
541: SLIST_REMOVE_HEAD(&pa_deferinitq, pa_q);
542: pa_reclaim_register(pa);
543: }
1.94 simonb 544: }
545:
546: /*
1.3 pk 547: * Initialize the given pool resource structure.
548: *
549: * We export this routine to allow other kernel parts to declare
550: * static pools that must be initialized before malloc() is available.
551: */
552: void
1.42 thorpej 553: pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags,
1.66 thorpej 554: const char *wchan, struct pool_allocator *palloc)
1.3 pk 555: {
1.116 simonb 556: #ifdef DEBUG
557: struct pool *pp1;
558: #endif
1.92 enami 559: size_t trysize, phsize;
1.116 simonb 560: int off, slack, s;
1.3 pk 561:
1.99 yamt 562: KASSERT((1UL << (CHAR_BIT * sizeof(pool_item_freelist_t))) - 2 >=
563: PHPOOL_FREELIST_NELEM(PHPOOL_MAX - 1));
564:
1.116 simonb 565: #ifdef DEBUG
566: /*
567: * Check that the pool hasn't already been initialised and
568: * added to the list of all pools.
569: */
570: LIST_FOREACH(pp1, &pool_head, pr_poollist) {
571: if (pp == pp1)
572: panic("pool_init: pool %s already initialised",
573: wchan);
574: }
575: #endif
576:
1.25 thorpej 577: #ifdef POOL_DIAGNOSTIC
578: /*
579: * Always log if POOL_DIAGNOSTIC is defined.
580: */
581: if (pool_logsize != 0)
582: flags |= PR_LOGGING;
583: #endif
584:
1.66 thorpej 585: if (palloc == NULL)
586: palloc = &pool_allocator_kmem;
1.112 bjh21 587: #ifdef POOL_SUBPAGE
588: if (size > palloc->pa_pagesz) {
589: if (palloc == &pool_allocator_kmem)
590: palloc = &pool_allocator_kmem_fullpage;
591: else if (palloc == &pool_allocator_nointr)
592: palloc = &pool_allocator_nointr_fullpage;
593: }
1.66 thorpej 594: #endif /* POOL_SUBPAGE */
595: if ((palloc->pa_flags & PA_INITIALIZED) == 0) {
1.112 bjh21 596: if (palloc->pa_pagesz == 0)
1.66 thorpej 597: palloc->pa_pagesz = PAGE_SIZE;
598:
599: TAILQ_INIT(&palloc->pa_list);
600:
601: simple_lock_init(&palloc->pa_slock);
602: palloc->pa_pagemask = ~(palloc->pa_pagesz - 1);
603: palloc->pa_pageshift = ffs(palloc->pa_pagesz) - 1;
1.117 yamt 604:
605: if (palloc->pa_backingmapptr != NULL) {
606: pa_reclaim_register(palloc);
607: }
1.66 thorpej 608: palloc->pa_flags |= PA_INITIALIZED;
1.4 thorpej 609: }
1.3 pk 610:
611: if (align == 0)
612: align = ALIGN(1);
1.14 thorpej 613:
614: if (size < sizeof(struct pool_item))
615: size = sizeof(struct pool_item);
1.3 pk 616:
1.78 thorpej 617: size = roundup(size, align);
1.66 thorpej 618: #ifdef DIAGNOSTIC
619: if (size > palloc->pa_pagesz)
1.35 pk 620: panic("pool_init: pool item size (%lu) too large",
621: (u_long)size);
1.66 thorpej 622: #endif
1.35 pk 623:
1.3 pk 624: /*
625: * Initialize the pool structure.
626: */
1.88 chs 627: LIST_INIT(&pp->pr_emptypages);
628: LIST_INIT(&pp->pr_fullpages);
629: LIST_INIT(&pp->pr_partpages);
1.102 chs 630: LIST_INIT(&pp->pr_cachelist);
1.3 pk 631: pp->pr_curpage = NULL;
632: pp->pr_npages = 0;
633: pp->pr_minitems = 0;
634: pp->pr_minpages = 0;
635: pp->pr_maxpages = UINT_MAX;
1.20 thorpej 636: pp->pr_roflags = flags;
637: pp->pr_flags = 0;
1.35 pk 638: pp->pr_size = size;
1.3 pk 639: pp->pr_align = align;
640: pp->pr_wchan = wchan;
1.66 thorpej 641: pp->pr_alloc = palloc;
1.20 thorpej 642: pp->pr_nitems = 0;
643: pp->pr_nout = 0;
644: pp->pr_hardlimit = UINT_MAX;
645: pp->pr_hardlimit_warning = NULL;
1.31 thorpej 646: pp->pr_hardlimit_ratecap.tv_sec = 0;
647: pp->pr_hardlimit_ratecap.tv_usec = 0;
648: pp->pr_hardlimit_warning_last.tv_sec = 0;
649: pp->pr_hardlimit_warning_last.tv_usec = 0;
1.68 thorpej 650: pp->pr_drain_hook = NULL;
651: pp->pr_drain_hook_arg = NULL;
1.3 pk 652:
653: /*
654: * Decide whether to put the page header off page to avoid
1.92 enami 655: * wasting too large a part of the page or too big item.
656: * Off-page page headers go on a hash table, so we can match
657: * a returned item with its header based on the page address.
658: * We use 1/16 of the page size and about 8 times of the item
659: * size as the threshold (XXX: tune)
660: *
661: * However, we'll put the header into the page if we can put
662: * it without wasting any items.
663: *
664: * Silently enforce `0 <= ioff < align'.
1.3 pk 665: */
1.92 enami 666: pp->pr_itemoffset = ioff %= align;
667: /* See the comment below about reserved bytes. */
668: trysize = palloc->pa_pagesz - ((align - ioff) % align);
669: phsize = ALIGN(sizeof(struct pool_item_header));
1.97 yamt 670: if ((pp->pr_roflags & PR_NOTOUCH) == 0 &&
671: (pp->pr_size < MIN(palloc->pa_pagesz / 16, phsize << 3) ||
672: trysize / pp->pr_size == (trysize - phsize) / pp->pr_size)) {
1.3 pk 673: /* Use the end of the page for the page header */
1.20 thorpej 674: pp->pr_roflags |= PR_PHINPAGE;
1.92 enami 675: pp->pr_phoffset = off = palloc->pa_pagesz - phsize;
1.2 pk 676: } else {
1.3 pk 677: /* The page header will be taken from our page header pool */
678: pp->pr_phoffset = 0;
1.66 thorpej 679: off = palloc->pa_pagesz;
1.88 chs 680: SPLAY_INIT(&pp->pr_phtree);
1.2 pk 681: }
1.1 pk 682:
1.3 pk 683: /*
684: * Alignment is to take place at `ioff' within the item. This means
685: * we must reserve up to `align - 1' bytes on the page to allow
686: * appropriate positioning of each item.
687: */
688: pp->pr_itemsperpage = (off - ((align - ioff) % align)) / pp->pr_size;
1.43 thorpej 689: KASSERT(pp->pr_itemsperpage != 0);
1.97 yamt 690: if ((pp->pr_roflags & PR_NOTOUCH)) {
691: int idx;
692:
693: for (idx = 0; pp->pr_itemsperpage > PHPOOL_FREELIST_NELEM(idx);
694: idx++) {
695: /* nothing */
696: }
697: if (idx >= PHPOOL_MAX) {
698: /*
699: * if you see this panic, consider to tweak
700: * PHPOOL_MAX and PHPOOL_FREELIST_NELEM.
701: */
702: panic("%s: too large itemsperpage(%d) for PR_NOTOUCH",
703: pp->pr_wchan, pp->pr_itemsperpage);
704: }
705: pp->pr_phpool = &phpool[idx];
706: } else if ((pp->pr_roflags & PR_PHINPAGE) == 0) {
707: pp->pr_phpool = &phpool[0];
708: }
709: #if defined(DIAGNOSTIC)
710: else {
711: pp->pr_phpool = NULL;
712: }
713: #endif
1.3 pk 714:
715: /*
716: * Use the slack between the chunks and the page header
717: * for "cache coloring".
718: */
719: slack = off - pp->pr_itemsperpage * pp->pr_size;
720: pp->pr_maxcolor = (slack / align) * align;
721: pp->pr_curcolor = 0;
722:
723: pp->pr_nget = 0;
724: pp->pr_nfail = 0;
725: pp->pr_nput = 0;
726: pp->pr_npagealloc = 0;
727: pp->pr_npagefree = 0;
1.1 pk 728: pp->pr_hiwat = 0;
1.8 thorpej 729: pp->pr_nidle = 0;
1.3 pk 730:
1.59 thorpej 731: #ifdef POOL_DIAGNOSTIC
1.25 thorpej 732: if (flags & PR_LOGGING) {
733: if (kmem_map == NULL ||
734: (pp->pr_log = malloc(pool_logsize * sizeof(struct pool_log),
735: M_TEMP, M_NOWAIT)) == NULL)
1.20 thorpej 736: pp->pr_roflags &= ~PR_LOGGING;
1.3 pk 737: pp->pr_curlogentry = 0;
738: pp->pr_logsize = pool_logsize;
739: }
1.59 thorpej 740: #endif
1.25 thorpej 741:
742: pp->pr_entered_file = NULL;
743: pp->pr_entered_line = 0;
1.3 pk 744:
1.21 thorpej 745: simple_lock_init(&pp->pr_slock);
1.1 pk 746:
1.3 pk 747: /*
1.43 thorpej 748: * Initialize private page header pool and cache magazine pool if we
749: * haven't done so yet.
1.23 thorpej 750: * XXX LOCKING.
1.3 pk 751: */
1.97 yamt 752: if (phpool[0].pr_size == 0) {
753: int idx;
754: for (idx = 0; idx < PHPOOL_MAX; idx++) {
755: static char phpool_names[PHPOOL_MAX][6+1+6+1];
756: int nelem;
757: size_t sz;
758:
759: nelem = PHPOOL_FREELIST_NELEM(idx);
760: snprintf(phpool_names[idx], sizeof(phpool_names[idx]),
761: "phpool-%d", nelem);
762: sz = sizeof(struct pool_item_header);
763: if (nelem) {
764: sz = PR_FREELIST_ALIGN(sz)
1.99 yamt 765: + nelem * sizeof(pool_item_freelist_t);
1.97 yamt 766: }
767: pool_init(&phpool[idx], sz, 0, 0, 0,
1.98 yamt 768: phpool_names[idx], &pool_allocator_meta);
1.97 yamt 769: }
1.62 bjh21 770: #ifdef POOL_SUBPAGE
771: pool_init(&psppool, POOL_SUBPAGE, POOL_SUBPAGE, 0,
1.98 yamt 772: PR_RECURSIVE, "psppool", &pool_allocator_meta);
1.62 bjh21 773: #endif
1.43 thorpej 774: pool_init(&pcgpool, sizeof(struct pool_cache_group), 0, 0,
1.98 yamt 775: 0, "pcgpool", &pool_allocator_meta);
1.1 pk 776: }
777:
1.23 thorpej 778: /* Insert into the list of all pools. */
779: simple_lock(&pool_head_slock);
1.102 chs 780: LIST_INSERT_HEAD(&pool_head, pp, pr_poollist);
1.23 thorpej 781: simple_unlock(&pool_head_slock);
1.66 thorpej 782:
783: /* Insert this into the list of pools using this allocator. */
1.93 dbj 784: s = splvm();
1.66 thorpej 785: simple_lock(&palloc->pa_slock);
786: TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list);
787: simple_unlock(&palloc->pa_slock);
1.93 dbj 788: splx(s);
1.117 yamt 789: pool_reclaim_register(pp);
1.1 pk 790: }
791:
792: /*
793: * De-commision a pool resource.
794: */
795: void
1.42 thorpej 796: pool_destroy(struct pool *pp)
1.1 pk 797: {
1.101 thorpej 798: struct pool_pagelist pq;
1.3 pk 799: struct pool_item_header *ph;
1.93 dbj 800: int s;
1.43 thorpej 801:
1.101 thorpej 802: /* Remove from global pool list */
803: simple_lock(&pool_head_slock);
1.102 chs 804: LIST_REMOVE(pp, pr_poollist);
1.101 thorpej 805: if (drainpp == pp)
806: drainpp = NULL;
807: simple_unlock(&pool_head_slock);
808:
809: /* Remove this pool from its allocator's list of pools. */
1.117 yamt 810: pool_reclaim_unregister(pp);
1.93 dbj 811: s = splvm();
1.66 thorpej 812: simple_lock(&pp->pr_alloc->pa_slock);
813: TAILQ_REMOVE(&pp->pr_alloc->pa_list, pp, pr_alloc_list);
814: simple_unlock(&pp->pr_alloc->pa_slock);
1.93 dbj 815: splx(s);
1.66 thorpej 816:
1.101 thorpej 817: s = splvm();
818: simple_lock(&pp->pr_slock);
819:
1.102 chs 820: KASSERT(LIST_EMPTY(&pp->pr_cachelist));
1.3 pk 821:
822: #ifdef DIAGNOSTIC
1.20 thorpej 823: if (pp->pr_nout != 0) {
1.25 thorpej 824: pr_printlog(pp, NULL, printf);
1.80 provos 825: panic("pool_destroy: pool busy: still out: %u",
1.20 thorpej 826: pp->pr_nout);
1.3 pk 827: }
828: #endif
1.1 pk 829:
1.101 thorpej 830: KASSERT(LIST_EMPTY(&pp->pr_fullpages));
831: KASSERT(LIST_EMPTY(&pp->pr_partpages));
832:
1.3 pk 833: /* Remove all pages */
1.101 thorpej 834: LIST_INIT(&pq);
1.88 chs 835: while ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL)
1.101 thorpej 836: pr_rmpage(pp, ph, &pq);
837:
838: simple_unlock(&pp->pr_slock);
839: splx(s);
1.3 pk 840:
1.101 thorpej 841: pr_pagelist_free(pp, &pq);
1.3 pk 842:
1.59 thorpej 843: #ifdef POOL_DIAGNOSTIC
1.20 thorpej 844: if ((pp->pr_roflags & PR_LOGGING) != 0)
1.3 pk 845: free(pp->pr_log, M_TEMP);
1.59 thorpej 846: #endif
1.1 pk 847: }
848:
1.68 thorpej 849: void
850: pool_set_drain_hook(struct pool *pp, void (*fn)(void *, int), void *arg)
851: {
852:
853: /* XXX no locking -- must be used just after pool_init() */
854: #ifdef DIAGNOSTIC
855: if (pp->pr_drain_hook != NULL)
856: panic("pool_set_drain_hook(%s): already set", pp->pr_wchan);
857: #endif
858: pp->pr_drain_hook = fn;
859: pp->pr_drain_hook_arg = arg;
860: }
861:
1.88 chs 862: static struct pool_item_header *
1.55 thorpej 863: pool_alloc_item_header(struct pool *pp, caddr_t storage, int flags)
864: {
865: struct pool_item_header *ph;
866: int s;
867:
868: LOCK_ASSERT(simple_lock_held(&pp->pr_slock) == 0);
869:
870: if ((pp->pr_roflags & PR_PHINPAGE) != 0)
871: ph = (struct pool_item_header *) (storage + pp->pr_phoffset);
872: else {
1.85 pk 873: s = splvm();
1.97 yamt 874: ph = pool_get(pp->pr_phpool, flags);
1.55 thorpej 875: splx(s);
876: }
877:
878: return (ph);
879: }
1.1 pk 880:
881: /*
1.3 pk 882: * Grab an item from the pool; must be called at appropriate spl level
1.1 pk 883: */
1.3 pk 884: void *
1.59 thorpej 885: #ifdef POOL_DIAGNOSTIC
1.42 thorpej 886: _pool_get(struct pool *pp, int flags, const char *file, long line)
1.56 sommerfe 887: #else
888: pool_get(struct pool *pp, int flags)
889: #endif
1.1 pk 890: {
891: struct pool_item *pi;
1.3 pk 892: struct pool_item_header *ph;
1.55 thorpej 893: void *v;
1.1 pk 894:
1.2 pk 895: #ifdef DIAGNOSTIC
1.95 atatat 896: if (__predict_false(pp->pr_itemsperpage == 0))
897: panic("pool_get: pool %p: pr_itemsperpage is zero, "
898: "pool not initialized?", pp);
1.84 thorpej 899: if (__predict_false(curlwp == NULL && doing_shutdown == 0 &&
1.37 sommerfe 900: (flags & PR_WAITOK) != 0))
1.77 matt 901: panic("pool_get: %s: must have NOWAIT", pp->pr_wchan);
1.58 thorpej 902:
1.102 chs 903: #endif /* DIAGNOSTIC */
1.58 thorpej 904: #ifdef LOCKDEBUG
905: if (flags & PR_WAITOK)
1.119 ! yamt 906: ASSERT_SLEEPABLE(NULL, "pool_get(PR_WAITOK)");
1.102 chs 907: SCHED_ASSERT_UNLOCKED();
1.56 sommerfe 908: #endif
1.1 pk 909:
1.21 thorpej 910: simple_lock(&pp->pr_slock);
1.25 thorpej 911: pr_enter(pp, file, line);
1.20 thorpej 912:
913: startover:
914: /*
915: * Check to see if we've reached the hard limit. If we have,
916: * and we can wait, then wait until an item has been returned to
917: * the pool.
918: */
919: #ifdef DIAGNOSTIC
1.34 thorpej 920: if (__predict_false(pp->pr_nout > pp->pr_hardlimit)) {
1.25 thorpej 921: pr_leave(pp);
1.21 thorpej 922: simple_unlock(&pp->pr_slock);
1.20 thorpej 923: panic("pool_get: %s: crossed hard limit", pp->pr_wchan);
924: }
925: #endif
1.34 thorpej 926: if (__predict_false(pp->pr_nout == pp->pr_hardlimit)) {
1.68 thorpej 927: if (pp->pr_drain_hook != NULL) {
928: /*
929: * Since the drain hook is going to free things
930: * back to the pool, unlock, call the hook, re-lock,
931: * and check the hardlimit condition again.
932: */
933: pr_leave(pp);
934: simple_unlock(&pp->pr_slock);
935: (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, flags);
936: simple_lock(&pp->pr_slock);
937: pr_enter(pp, file, line);
938: if (pp->pr_nout < pp->pr_hardlimit)
939: goto startover;
940: }
941:
1.29 sommerfe 942: if ((flags & PR_WAITOK) && !(flags & PR_LIMITFAIL)) {
1.20 thorpej 943: /*
944: * XXX: A warning isn't logged in this case. Should
945: * it be?
946: */
947: pp->pr_flags |= PR_WANTED;
1.25 thorpej 948: pr_leave(pp);
1.40 sommerfe 949: ltsleep(pp, PSWP, pp->pr_wchan, 0, &pp->pr_slock);
1.25 thorpej 950: pr_enter(pp, file, line);
1.20 thorpej 951: goto startover;
952: }
1.31 thorpej 953:
954: /*
955: * Log a message that the hard limit has been hit.
956: */
957: if (pp->pr_hardlimit_warning != NULL &&
958: ratecheck(&pp->pr_hardlimit_warning_last,
959: &pp->pr_hardlimit_ratecap))
960: log(LOG_ERR, "%s\n", pp->pr_hardlimit_warning);
1.21 thorpej 961:
962: pp->pr_nfail++;
963:
1.25 thorpej 964: pr_leave(pp);
1.21 thorpej 965: simple_unlock(&pp->pr_slock);
1.20 thorpej 966: return (NULL);
967: }
968:
1.3 pk 969: /*
970: * The convention we use is that if `curpage' is not NULL, then
971: * it points at a non-empty bucket. In particular, `curpage'
972: * never points at a page header which has PR_PHINPAGE set and
973: * has no items in its bucket.
974: */
1.20 thorpej 975: if ((ph = pp->pr_curpage) == NULL) {
1.113 yamt 976: int error;
977:
1.20 thorpej 978: #ifdef DIAGNOSTIC
979: if (pp->pr_nitems != 0) {
1.21 thorpej 980: simple_unlock(&pp->pr_slock);
1.20 thorpej 981: printf("pool_get: %s: curpage NULL, nitems %u\n",
982: pp->pr_wchan, pp->pr_nitems);
1.80 provos 983: panic("pool_get: nitems inconsistent");
1.20 thorpej 984: }
985: #endif
986:
1.21 thorpej 987: /*
988: * Call the back-end page allocator for more memory.
989: * Release the pool lock, as the back-end page allocator
990: * may block.
991: */
1.25 thorpej 992: pr_leave(pp);
1.113 yamt 993: error = pool_grow(pp, flags);
994: pr_enter(pp, file, line);
995: if (error != 0) {
1.21 thorpej 996: /*
1.55 thorpej 997: * We were unable to allocate a page or item
998: * header, but we released the lock during
999: * allocation, so perhaps items were freed
1000: * back to the pool. Check for this case.
1.21 thorpej 1001: */
1002: if (pp->pr_curpage != NULL)
1003: goto startover;
1.15 pk 1004:
1.117 yamt 1005: pp->pr_nfail++;
1.25 thorpej 1006: pr_leave(pp);
1.117 yamt 1007: simple_unlock(&pp->pr_slock);
1008: return (NULL);
1.1 pk 1009: }
1.3 pk 1010:
1.20 thorpej 1011: /* Start the allocation process over. */
1012: goto startover;
1.3 pk 1013: }
1.97 yamt 1014: if (pp->pr_roflags & PR_NOTOUCH) {
1015: #ifdef DIAGNOSTIC
1016: if (__predict_false(ph->ph_nmissing == pp->pr_itemsperpage)) {
1017: pr_leave(pp);
1018: simple_unlock(&pp->pr_slock);
1019: panic("pool_get: %s: page empty", pp->pr_wchan);
1020: }
1021: #endif
1022: v = pr_item_notouch_get(pp, ph);
1023: #ifdef POOL_DIAGNOSTIC
1024: pr_log(pp, v, PRLOG_GET, file, line);
1025: #endif
1026: } else {
1.102 chs 1027: v = pi = LIST_FIRST(&ph->ph_itemlist);
1.97 yamt 1028: if (__predict_false(v == NULL)) {
1029: pr_leave(pp);
1030: simple_unlock(&pp->pr_slock);
1031: panic("pool_get: %s: page empty", pp->pr_wchan);
1032: }
1.20 thorpej 1033: #ifdef DIAGNOSTIC
1.97 yamt 1034: if (__predict_false(pp->pr_nitems == 0)) {
1035: pr_leave(pp);
1036: simple_unlock(&pp->pr_slock);
1037: printf("pool_get: %s: items on itemlist, nitems %u\n",
1038: pp->pr_wchan, pp->pr_nitems);
1039: panic("pool_get: nitems inconsistent");
1040: }
1.65 enami 1041: #endif
1.56 sommerfe 1042:
1.65 enami 1043: #ifdef POOL_DIAGNOSTIC
1.97 yamt 1044: pr_log(pp, v, PRLOG_GET, file, line);
1.65 enami 1045: #endif
1.3 pk 1046:
1.65 enami 1047: #ifdef DIAGNOSTIC
1.97 yamt 1048: if (__predict_false(pi->pi_magic != PI_MAGIC)) {
1049: pr_printlog(pp, pi, printf);
1050: panic("pool_get(%s): free list modified: "
1051: "magic=%x; page %p; item addr %p\n",
1052: pp->pr_wchan, pi->pi_magic, ph->ph_page, pi);
1053: }
1.3 pk 1054: #endif
1055:
1.97 yamt 1056: /*
1057: * Remove from item list.
1058: */
1.102 chs 1059: LIST_REMOVE(pi, pi_list);
1.97 yamt 1060: }
1.20 thorpej 1061: pp->pr_nitems--;
1062: pp->pr_nout++;
1.6 thorpej 1063: if (ph->ph_nmissing == 0) {
1064: #ifdef DIAGNOSTIC
1.34 thorpej 1065: if (__predict_false(pp->pr_nidle == 0))
1.6 thorpej 1066: panic("pool_get: nidle inconsistent");
1067: #endif
1068: pp->pr_nidle--;
1.88 chs 1069:
1070: /*
1071: * This page was previously empty. Move it to the list of
1072: * partially-full pages. This page is already curpage.
1073: */
1074: LIST_REMOVE(ph, ph_pagelist);
1075: LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist);
1.6 thorpej 1076: }
1.3 pk 1077: ph->ph_nmissing++;
1.97 yamt 1078: if (ph->ph_nmissing == pp->pr_itemsperpage) {
1.21 thorpej 1079: #ifdef DIAGNOSTIC
1.97 yamt 1080: if (__predict_false((pp->pr_roflags & PR_NOTOUCH) == 0 &&
1.102 chs 1081: !LIST_EMPTY(&ph->ph_itemlist))) {
1.25 thorpej 1082: pr_leave(pp);
1.21 thorpej 1083: simple_unlock(&pp->pr_slock);
1084: panic("pool_get: %s: nmissing inconsistent",
1085: pp->pr_wchan);
1086: }
1087: #endif
1.3 pk 1088: /*
1.88 chs 1089: * This page is now full. Move it to the full list
1090: * and select a new current page.
1.3 pk 1091: */
1.88 chs 1092: LIST_REMOVE(ph, ph_pagelist);
1093: LIST_INSERT_HEAD(&pp->pr_fullpages, ph, ph_pagelist);
1094: pool_update_curpage(pp);
1.1 pk 1095: }
1.3 pk 1096:
1097: pp->pr_nget++;
1.111 christos 1098: pr_leave(pp);
1.20 thorpej 1099:
1100: /*
1101: * If we have a low water mark and we are now below that low
1102: * water mark, add more items to the pool.
1103: */
1.53 thorpej 1104: if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) {
1.20 thorpej 1105: /*
1106: * XXX: Should we log a warning? Should we set up a timeout
1107: * to try again in a second or so? The latter could break
1108: * a caller's assumptions about interrupt protection, etc.
1109: */
1110: }
1111:
1.21 thorpej 1112: simple_unlock(&pp->pr_slock);
1.1 pk 1113: return (v);
1114: }
1115:
1116: /*
1.43 thorpej 1117: * Internal version of pool_put(). Pool is already locked/entered.
1.1 pk 1118: */
1.43 thorpej 1119: static void
1.101 thorpej 1120: pool_do_put(struct pool *pp, void *v, struct pool_pagelist *pq)
1.1 pk 1121: {
1122: struct pool_item *pi = v;
1.3 pk 1123: struct pool_item_header *ph;
1124: caddr_t page;
1125:
1.61 chs 1126: LOCK_ASSERT(simple_lock_held(&pp->pr_slock));
1.102 chs 1127: SCHED_ASSERT_UNLOCKED();
1.61 chs 1128:
1.66 thorpej 1129: page = (caddr_t)((u_long)v & pp->pr_alloc->pa_pagemask);
1.1 pk 1130:
1.30 thorpej 1131: #ifdef DIAGNOSTIC
1.34 thorpej 1132: if (__predict_false(pp->pr_nout == 0)) {
1.30 thorpej 1133: printf("pool %s: putting with none out\n",
1134: pp->pr_wchan);
1135: panic("pool_put");
1136: }
1137: #endif
1.3 pk 1138:
1.34 thorpej 1139: if (__predict_false((ph = pr_find_pagehead(pp, page)) == NULL)) {
1.25 thorpej 1140: pr_printlog(pp, NULL, printf);
1.3 pk 1141: panic("pool_put: %s: page header missing", pp->pr_wchan);
1142: }
1.28 thorpej 1143:
1144: #ifdef LOCKDEBUG
1145: /*
1146: * Check if we're freeing a locked simple lock.
1147: */
1148: simple_lock_freecheck((caddr_t)pi, ((caddr_t)pi) + pp->pr_size);
1149: #endif
1.3 pk 1150:
1151: /*
1152: * Return to item list.
1153: */
1.97 yamt 1154: if (pp->pr_roflags & PR_NOTOUCH) {
1155: pr_item_notouch_put(pp, ph, v);
1156: } else {
1.2 pk 1157: #ifdef DIAGNOSTIC
1.97 yamt 1158: pi->pi_magic = PI_MAGIC;
1.3 pk 1159: #endif
1.32 chs 1160: #ifdef DEBUG
1.97 yamt 1161: {
1162: int i, *ip = v;
1.32 chs 1163:
1.97 yamt 1164: for (i = 0; i < pp->pr_size / sizeof(int); i++) {
1165: *ip++ = PI_MAGIC;
1166: }
1.32 chs 1167: }
1168: #endif
1169:
1.102 chs 1170: LIST_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list);
1.97 yamt 1171: }
1.79 thorpej 1172: KDASSERT(ph->ph_nmissing != 0);
1.3 pk 1173: ph->ph_nmissing--;
1174: pp->pr_nput++;
1.20 thorpej 1175: pp->pr_nitems++;
1176: pp->pr_nout--;
1.3 pk 1177:
1178: /* Cancel "pool empty" condition if it exists */
1179: if (pp->pr_curpage == NULL)
1180: pp->pr_curpage = ph;
1181:
1182: if (pp->pr_flags & PR_WANTED) {
1183: pp->pr_flags &= ~PR_WANTED;
1.15 pk 1184: if (ph->ph_nmissing == 0)
1185: pp->pr_nidle++;
1.3 pk 1186: wakeup((caddr_t)pp);
1187: return;
1188: }
1189:
1190: /*
1.88 chs 1191: * If this page is now empty, do one of two things:
1.21 thorpej 1192: *
1.88 chs 1193: * (1) If we have more pages than the page high water mark,
1.96 thorpej 1194: * free the page back to the system. ONLY CONSIDER
1.90 thorpej 1195: * FREEING BACK A PAGE IF WE HAVE MORE THAN OUR MINIMUM PAGE
1196: * CLAIM.
1.21 thorpej 1197: *
1.88 chs 1198: * (2) Otherwise, move the page to the empty page list.
1199: *
1200: * Either way, select a new current page (so we use a partially-full
1201: * page if one is available).
1.3 pk 1202: */
1203: if (ph->ph_nmissing == 0) {
1.6 thorpej 1204: pp->pr_nidle++;
1.90 thorpej 1205: if (pp->pr_npages > pp->pr_minpages &&
1206: (pp->pr_npages > pp->pr_maxpages ||
1.117 yamt 1207: pa_starved_p(pp->pr_alloc))) {
1.101 thorpej 1208: pr_rmpage(pp, ph, pq);
1.3 pk 1209: } else {
1.88 chs 1210: LIST_REMOVE(ph, ph_pagelist);
1211: LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist);
1.3 pk 1212:
1.21 thorpej 1213: /*
1214: * Update the timestamp on the page. A page must
1215: * be idle for some period of time before it can
1216: * be reclaimed by the pagedaemon. This minimizes
1217: * ping-pong'ing for memory.
1218: */
1.118 kardel 1219: getmicrotime(&ph->ph_time);
1.1 pk 1220: }
1.88 chs 1221: pool_update_curpage(pp);
1.1 pk 1222: }
1.88 chs 1223:
1.21 thorpej 1224: /*
1.88 chs 1225: * If the page was previously completely full, move it to the
1226: * partially-full list and make it the current page. The next
1227: * allocation will get the item from this page, instead of
1228: * further fragmenting the pool.
1.21 thorpej 1229: */
1230: else if (ph->ph_nmissing == (pp->pr_itemsperpage - 1)) {
1.88 chs 1231: LIST_REMOVE(ph, ph_pagelist);
1232: LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist);
1.21 thorpej 1233: pp->pr_curpage = ph;
1234: }
1.43 thorpej 1235: }
1236:
1237: /*
1238: * Return resource to the pool; must be called at appropriate spl level
1239: */
1.59 thorpej 1240: #ifdef POOL_DIAGNOSTIC
1.43 thorpej 1241: void
1242: _pool_put(struct pool *pp, void *v, const char *file, long line)
1243: {
1.101 thorpej 1244: struct pool_pagelist pq;
1245:
1246: LIST_INIT(&pq);
1.43 thorpej 1247:
1248: simple_lock(&pp->pr_slock);
1249: pr_enter(pp, file, line);
1250:
1.56 sommerfe 1251: pr_log(pp, v, PRLOG_PUT, file, line);
1252:
1.101 thorpej 1253: pool_do_put(pp, v, &pq);
1.21 thorpej 1254:
1.25 thorpej 1255: pr_leave(pp);
1.21 thorpej 1256: simple_unlock(&pp->pr_slock);
1.101 thorpej 1257:
1.102 chs 1258: pr_pagelist_free(pp, &pq);
1.1 pk 1259: }
1.57 sommerfe 1260: #undef pool_put
1.59 thorpej 1261: #endif /* POOL_DIAGNOSTIC */
1.1 pk 1262:
1.56 sommerfe 1263: void
1264: pool_put(struct pool *pp, void *v)
1265: {
1.101 thorpej 1266: struct pool_pagelist pq;
1267:
1268: LIST_INIT(&pq);
1.56 sommerfe 1269:
1270: simple_lock(&pp->pr_slock);
1.101 thorpej 1271: pool_do_put(pp, v, &pq);
1272: simple_unlock(&pp->pr_slock);
1.56 sommerfe 1273:
1.102 chs 1274: pr_pagelist_free(pp, &pq);
1.56 sommerfe 1275: }
1.57 sommerfe 1276:
1.59 thorpej 1277: #ifdef POOL_DIAGNOSTIC
1.57 sommerfe 1278: #define pool_put(h, v) _pool_put((h), (v), __FILE__, __LINE__)
1.56 sommerfe 1279: #endif
1.74 thorpej 1280:
1281: /*
1.113 yamt 1282: * pool_grow: grow a pool by a page.
1283: *
1284: * => called with pool locked.
1285: * => unlock and relock the pool.
1286: * => return with pool locked.
1287: */
1288:
1289: static int
1290: pool_grow(struct pool *pp, int flags)
1291: {
1292: struct pool_item_header *ph = NULL;
1293: char *cp;
1294:
1295: simple_unlock(&pp->pr_slock);
1296: cp = pool_allocator_alloc(pp, flags);
1297: if (__predict_true(cp != NULL)) {
1298: ph = pool_alloc_item_header(pp, cp, flags);
1299: }
1300: if (__predict_false(cp == NULL || ph == NULL)) {
1301: if (cp != NULL) {
1302: pool_allocator_free(pp, cp);
1303: }
1304: simple_lock(&pp->pr_slock);
1305: return ENOMEM;
1306: }
1307:
1308: simple_lock(&pp->pr_slock);
1309: pool_prime_page(pp, cp, ph);
1310: pp->pr_npagealloc++;
1311: return 0;
1312: }
1313:
1314: /*
1.74 thorpej 1315: * Add N items to the pool.
1316: */
1317: int
1318: pool_prime(struct pool *pp, int n)
1319: {
1.75 simonb 1320: int newpages;
1.113 yamt 1321: int error = 0;
1.74 thorpej 1322:
1323: simple_lock(&pp->pr_slock);
1324:
1325: newpages = roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1326:
1327: while (newpages-- > 0) {
1.113 yamt 1328: error = pool_grow(pp, PR_NOWAIT);
1329: if (error) {
1.74 thorpej 1330: break;
1331: }
1332: pp->pr_minpages++;
1333: }
1334:
1335: if (pp->pr_minpages >= pp->pr_maxpages)
1336: pp->pr_maxpages = pp->pr_minpages + 1; /* XXX */
1337:
1338: simple_unlock(&pp->pr_slock);
1.113 yamt 1339: return error;
1.74 thorpej 1340: }
1.55 thorpej 1341:
1342: /*
1.3 pk 1343: * Add a page worth of items to the pool.
1.21 thorpej 1344: *
1345: * Note, we must be called with the pool descriptor LOCKED.
1.3 pk 1346: */
1.55 thorpej 1347: static void
1348: pool_prime_page(struct pool *pp, caddr_t storage, struct pool_item_header *ph)
1.3 pk 1349: {
1350: struct pool_item *pi;
1351: caddr_t cp = storage;
1352: unsigned int align = pp->pr_align;
1353: unsigned int ioff = pp->pr_itemoffset;
1.55 thorpej 1354: int n;
1.36 pk 1355:
1.91 yamt 1356: LOCK_ASSERT(simple_lock_held(&pp->pr_slock));
1357:
1.66 thorpej 1358: #ifdef DIAGNOSTIC
1359: if (((u_long)cp & (pp->pr_alloc->pa_pagesz - 1)) != 0)
1.36 pk 1360: panic("pool_prime_page: %s: unaligned page", pp->pr_wchan);
1.66 thorpej 1361: #endif
1.3 pk 1362:
1363: /*
1364: * Insert page header.
1365: */
1.88 chs 1366: LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist);
1.102 chs 1367: LIST_INIT(&ph->ph_itemlist);
1.3 pk 1368: ph->ph_page = storage;
1369: ph->ph_nmissing = 0;
1.118 kardel 1370: getmicrotime(&ph->ph_time);
1.88 chs 1371: if ((pp->pr_roflags & PR_PHINPAGE) == 0)
1372: SPLAY_INSERT(phtree, &pp->pr_phtree, ph);
1.3 pk 1373:
1.6 thorpej 1374: pp->pr_nidle++;
1375:
1.3 pk 1376: /*
1377: * Color this page.
1378: */
1379: cp = (caddr_t)(cp + pp->pr_curcolor);
1380: if ((pp->pr_curcolor += align) > pp->pr_maxcolor)
1381: pp->pr_curcolor = 0;
1382:
1383: /*
1384: * Adjust storage to apply aligment to `pr_itemoffset' in each item.
1385: */
1386: if (ioff != 0)
1387: cp = (caddr_t)(cp + (align - ioff));
1388:
1389: /*
1390: * Insert remaining chunks on the bucket list.
1391: */
1392: n = pp->pr_itemsperpage;
1.20 thorpej 1393: pp->pr_nitems += n;
1.3 pk 1394:
1.97 yamt 1395: if (pp->pr_roflags & PR_NOTOUCH) {
1.99 yamt 1396: pool_item_freelist_t *freelist = PR_FREELIST(ph);
1.97 yamt 1397: int i;
1398:
1.99 yamt 1399: ph->ph_off = cp - storage;
1.97 yamt 1400: ph->ph_firstfree = 0;
1401: for (i = 0; i < n - 1; i++)
1402: freelist[i] = i + 1;
1403: freelist[n - 1] = PR_INDEX_EOL;
1404: } else {
1405: while (n--) {
1406: pi = (struct pool_item *)cp;
1.78 thorpej 1407:
1.97 yamt 1408: KASSERT(((((vaddr_t)pi) + ioff) & (align - 1)) == 0);
1.3 pk 1409:
1.97 yamt 1410: /* Insert on page list */
1.102 chs 1411: LIST_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list);
1.3 pk 1412: #ifdef DIAGNOSTIC
1.97 yamt 1413: pi->pi_magic = PI_MAGIC;
1.3 pk 1414: #endif
1.97 yamt 1415: cp = (caddr_t)(cp + pp->pr_size);
1416: }
1.3 pk 1417: }
1418:
1419: /*
1420: * If the pool was depleted, point at the new page.
1421: */
1422: if (pp->pr_curpage == NULL)
1423: pp->pr_curpage = ph;
1424:
1425: if (++pp->pr_npages > pp->pr_hiwat)
1426: pp->pr_hiwat = pp->pr_npages;
1427: }
1428:
1.20 thorpej 1429: /*
1.52 thorpej 1430: * Used by pool_get() when nitems drops below the low water mark. This
1.88 chs 1431: * is used to catch up pr_nitems with the low water mark.
1.20 thorpej 1432: *
1.21 thorpej 1433: * Note 1, we never wait for memory here, we let the caller decide what to do.
1.20 thorpej 1434: *
1.73 thorpej 1435: * Note 2, we must be called with the pool already locked, and we return
1.20 thorpej 1436: * with it locked.
1437: */
1438: static int
1.42 thorpej 1439: pool_catchup(struct pool *pp)
1.20 thorpej 1440: {
1441: int error = 0;
1442:
1.54 thorpej 1443: while (POOL_NEEDS_CATCHUP(pp)) {
1.113 yamt 1444: error = pool_grow(pp, PR_NOWAIT);
1445: if (error) {
1.20 thorpej 1446: break;
1447: }
1448: }
1.113 yamt 1449: return error;
1.20 thorpej 1450: }
1451:
1.88 chs 1452: static void
1453: pool_update_curpage(struct pool *pp)
1454: {
1455:
1456: pp->pr_curpage = LIST_FIRST(&pp->pr_partpages);
1457: if (pp->pr_curpage == NULL) {
1458: pp->pr_curpage = LIST_FIRST(&pp->pr_emptypages);
1459: }
1460: }
1461:
1.3 pk 1462: void
1.42 thorpej 1463: pool_setlowat(struct pool *pp, int n)
1.3 pk 1464: {
1.15 pk 1465:
1.21 thorpej 1466: simple_lock(&pp->pr_slock);
1467:
1.3 pk 1468: pp->pr_minitems = n;
1.15 pk 1469: pp->pr_minpages = (n == 0)
1470: ? 0
1.18 thorpej 1471: : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1.20 thorpej 1472:
1473: /* Make sure we're caught up with the newly-set low water mark. */
1.75 simonb 1474: if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) {
1.20 thorpej 1475: /*
1476: * XXX: Should we log a warning? Should we set up a timeout
1477: * to try again in a second or so? The latter could break
1478: * a caller's assumptions about interrupt protection, etc.
1479: */
1480: }
1.21 thorpej 1481:
1482: simple_unlock(&pp->pr_slock);
1.3 pk 1483: }
1484:
1485: void
1.42 thorpej 1486: pool_sethiwat(struct pool *pp, int n)
1.3 pk 1487: {
1.15 pk 1488:
1.21 thorpej 1489: simple_lock(&pp->pr_slock);
1490:
1.15 pk 1491: pp->pr_maxpages = (n == 0)
1492: ? 0
1.18 thorpej 1493: : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1.21 thorpej 1494:
1495: simple_unlock(&pp->pr_slock);
1.3 pk 1496: }
1497:
1.20 thorpej 1498: void
1.42 thorpej 1499: pool_sethardlimit(struct pool *pp, int n, const char *warnmess, int ratecap)
1.20 thorpej 1500: {
1501:
1.21 thorpej 1502: simple_lock(&pp->pr_slock);
1.20 thorpej 1503:
1504: pp->pr_hardlimit = n;
1505: pp->pr_hardlimit_warning = warnmess;
1.31 thorpej 1506: pp->pr_hardlimit_ratecap.tv_sec = ratecap;
1507: pp->pr_hardlimit_warning_last.tv_sec = 0;
1508: pp->pr_hardlimit_warning_last.tv_usec = 0;
1.20 thorpej 1509:
1510: /*
1.21 thorpej 1511: * In-line version of pool_sethiwat(), because we don't want to
1512: * release the lock.
1.20 thorpej 1513: */
1514: pp->pr_maxpages = (n == 0)
1515: ? 0
1516: : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1.21 thorpej 1517:
1518: simple_unlock(&pp->pr_slock);
1.20 thorpej 1519: }
1.3 pk 1520:
1521: /*
1522: * Release all complete pages that have not been used recently.
1523: */
1.66 thorpej 1524: int
1.59 thorpej 1525: #ifdef POOL_DIAGNOSTIC
1.42 thorpej 1526: _pool_reclaim(struct pool *pp, const char *file, long line)
1.56 sommerfe 1527: #else
1528: pool_reclaim(struct pool *pp)
1529: #endif
1.3 pk 1530: {
1531: struct pool_item_header *ph, *phnext;
1.43 thorpej 1532: struct pool_cache *pc;
1.61 chs 1533: struct pool_pagelist pq;
1.102 chs 1534: struct pool_cache_grouplist pcgl;
1535: struct timeval curtime, diff;
1.3 pk 1536:
1.68 thorpej 1537: if (pp->pr_drain_hook != NULL) {
1538: /*
1539: * The drain hook must be called with the pool unlocked.
1540: */
1541: (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, PR_NOWAIT);
1542: }
1543:
1.21 thorpej 1544: if (simple_lock_try(&pp->pr_slock) == 0)
1.66 thorpej 1545: return (0);
1.25 thorpej 1546: pr_enter(pp, file, line);
1.68 thorpej 1547:
1.88 chs 1548: LIST_INIT(&pq);
1.102 chs 1549: LIST_INIT(&pcgl);
1.3 pk 1550:
1.43 thorpej 1551: /*
1552: * Reclaim items from the pool's caches.
1553: */
1.102 chs 1554: LIST_FOREACH(pc, &pp->pr_cachelist, pc_poollist)
1555: pool_cache_reclaim(pc, &pq, &pcgl);
1.43 thorpej 1556:
1.118 kardel 1557: getmicrotime(&curtime);
1.21 thorpej 1558:
1.88 chs 1559: for (ph = LIST_FIRST(&pp->pr_emptypages); ph != NULL; ph = phnext) {
1560: phnext = LIST_NEXT(ph, ph_pagelist);
1.3 pk 1561:
1562: /* Check our minimum page claim */
1563: if (pp->pr_npages <= pp->pr_minpages)
1564: break;
1565:
1.88 chs 1566: KASSERT(ph->ph_nmissing == 0);
1567: timersub(&curtime, &ph->ph_time, &diff);
1.117 yamt 1568: if (diff.tv_sec < pool_inactive_time
1569: && !pa_starved_p(pp->pr_alloc))
1.88 chs 1570: continue;
1.21 thorpej 1571:
1.88 chs 1572: /*
1573: * If freeing this page would put us below
1574: * the low water mark, stop now.
1575: */
1576: if ((pp->pr_nitems - pp->pr_itemsperpage) <
1577: pp->pr_minitems)
1578: break;
1.21 thorpej 1579:
1.88 chs 1580: pr_rmpage(pp, ph, &pq);
1.3 pk 1581: }
1582:
1.25 thorpej 1583: pr_leave(pp);
1.21 thorpej 1584: simple_unlock(&pp->pr_slock);
1.102 chs 1585: if (LIST_EMPTY(&pq) && LIST_EMPTY(&pcgl))
1586: return 0;
1.66 thorpej 1587:
1.101 thorpej 1588: pr_pagelist_free(pp, &pq);
1.102 chs 1589: pcg_grouplist_free(&pcgl);
1.66 thorpej 1590: return (1);
1.3 pk 1591: }
1592:
1593: /*
1594: * Drain pools, one at a time.
1.21 thorpej 1595: *
1596: * Note, we must never be called from an interrupt context.
1.3 pk 1597: */
1598: void
1.42 thorpej 1599: pool_drain(void *arg)
1.3 pk 1600: {
1601: struct pool *pp;
1.23 thorpej 1602: int s;
1.3 pk 1603:
1.61 chs 1604: pp = NULL;
1.49 thorpej 1605: s = splvm();
1.23 thorpej 1606: simple_lock(&pool_head_slock);
1.61 chs 1607: if (drainpp == NULL) {
1.102 chs 1608: drainpp = LIST_FIRST(&pool_head);
1.61 chs 1609: }
1610: if (drainpp) {
1611: pp = drainpp;
1.102 chs 1612: drainpp = LIST_NEXT(pp, pr_poollist);
1.61 chs 1613: }
1614: simple_unlock(&pool_head_slock);
1.115 christos 1615: if (pp)
1616: pool_reclaim(pp);
1.61 chs 1617: splx(s);
1.3 pk 1618: }
1619:
1620: /*
1621: * Diagnostic helpers.
1622: */
1623: void
1.42 thorpej 1624: pool_print(struct pool *pp, const char *modif)
1.21 thorpej 1625: {
1626: int s;
1627:
1.49 thorpej 1628: s = splvm();
1.25 thorpej 1629: if (simple_lock_try(&pp->pr_slock) == 0) {
1630: printf("pool %s is locked; try again later\n",
1631: pp->pr_wchan);
1632: splx(s);
1633: return;
1634: }
1635: pool_print1(pp, modif, printf);
1.21 thorpej 1636: simple_unlock(&pp->pr_slock);
1637: splx(s);
1638: }
1639:
1.25 thorpej 1640: void
1.108 yamt 1641: pool_printall(const char *modif, void (*pr)(const char *, ...))
1642: {
1643: struct pool *pp;
1644:
1645: if (simple_lock_try(&pool_head_slock) == 0) {
1646: (*pr)("WARNING: pool_head_slock is locked\n");
1647: } else {
1648: simple_unlock(&pool_head_slock);
1649: }
1650:
1651: LIST_FOREACH(pp, &pool_head, pr_poollist) {
1652: pool_printit(pp, modif, pr);
1653: }
1654: }
1655:
1656: void
1.42 thorpej 1657: pool_printit(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
1.25 thorpej 1658: {
1659:
1660: if (pp == NULL) {
1661: (*pr)("Must specify a pool to print.\n");
1662: return;
1663: }
1664:
1665: /*
1666: * Called from DDB; interrupts should be blocked, and all
1667: * other processors should be paused. We can skip locking
1668: * the pool in this case.
1669: *
1670: * We do a simple_lock_try() just to print the lock
1671: * status, however.
1672: */
1673:
1674: if (simple_lock_try(&pp->pr_slock) == 0)
1675: (*pr)("WARNING: pool %s is locked\n", pp->pr_wchan);
1676: else
1.107 yamt 1677: simple_unlock(&pp->pr_slock);
1.25 thorpej 1678:
1679: pool_print1(pp, modif, pr);
1680: }
1681:
1.21 thorpej 1682: static void
1.97 yamt 1683: pool_print_pagelist(struct pool *pp, struct pool_pagelist *pl,
1684: void (*pr)(const char *, ...))
1.88 chs 1685: {
1686: struct pool_item_header *ph;
1687: #ifdef DIAGNOSTIC
1688: struct pool_item *pi;
1689: #endif
1690:
1691: LIST_FOREACH(ph, pl, ph_pagelist) {
1692: (*pr)("\t\tpage %p, nmissing %d, time %lu,%lu\n",
1693: ph->ph_page, ph->ph_nmissing,
1694: (u_long)ph->ph_time.tv_sec,
1695: (u_long)ph->ph_time.tv_usec);
1696: #ifdef DIAGNOSTIC
1.97 yamt 1697: if (!(pp->pr_roflags & PR_NOTOUCH)) {
1.102 chs 1698: LIST_FOREACH(pi, &ph->ph_itemlist, pi_list) {
1.97 yamt 1699: if (pi->pi_magic != PI_MAGIC) {
1700: (*pr)("\t\t\titem %p, magic 0x%x\n",
1701: pi, pi->pi_magic);
1702: }
1.88 chs 1703: }
1704: }
1705: #endif
1706: }
1707: }
1708:
1709: static void
1.42 thorpej 1710: pool_print1(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
1.3 pk 1711: {
1.25 thorpej 1712: struct pool_item_header *ph;
1.44 thorpej 1713: struct pool_cache *pc;
1714: struct pool_cache_group *pcg;
1715: int i, print_log = 0, print_pagelist = 0, print_cache = 0;
1.25 thorpej 1716: char c;
1717:
1718: while ((c = *modif++) != '\0') {
1719: if (c == 'l')
1720: print_log = 1;
1721: if (c == 'p')
1722: print_pagelist = 1;
1.44 thorpej 1723: if (c == 'c')
1724: print_cache = 1;
1.25 thorpej 1725: }
1726:
1727: (*pr)("POOL %s: size %u, align %u, ioff %u, roflags 0x%08x\n",
1728: pp->pr_wchan, pp->pr_size, pp->pr_align, pp->pr_itemoffset,
1729: pp->pr_roflags);
1.66 thorpej 1730: (*pr)("\talloc %p\n", pp->pr_alloc);
1.25 thorpej 1731: (*pr)("\tminitems %u, minpages %u, maxpages %u, npages %u\n",
1732: pp->pr_minitems, pp->pr_minpages, pp->pr_maxpages, pp->pr_npages);
1733: (*pr)("\titemsperpage %u, nitems %u, nout %u, hardlimit %u\n",
1734: pp->pr_itemsperpage, pp->pr_nitems, pp->pr_nout, pp->pr_hardlimit);
1735:
1736: (*pr)("\n\tnget %lu, nfail %lu, nput %lu\n",
1737: pp->pr_nget, pp->pr_nfail, pp->pr_nput);
1738: (*pr)("\tnpagealloc %lu, npagefree %lu, hiwat %u, nidle %lu\n",
1739: pp->pr_npagealloc, pp->pr_npagefree, pp->pr_hiwat, pp->pr_nidle);
1740:
1741: if (print_pagelist == 0)
1742: goto skip_pagelist;
1743:
1.88 chs 1744: if ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL)
1745: (*pr)("\n\tempty page list:\n");
1.97 yamt 1746: pool_print_pagelist(pp, &pp->pr_emptypages, pr);
1.88 chs 1747: if ((ph = LIST_FIRST(&pp->pr_fullpages)) != NULL)
1748: (*pr)("\n\tfull page list:\n");
1.97 yamt 1749: pool_print_pagelist(pp, &pp->pr_fullpages, pr);
1.88 chs 1750: if ((ph = LIST_FIRST(&pp->pr_partpages)) != NULL)
1751: (*pr)("\n\tpartial-page list:\n");
1.97 yamt 1752: pool_print_pagelist(pp, &pp->pr_partpages, pr);
1.88 chs 1753:
1.25 thorpej 1754: if (pp->pr_curpage == NULL)
1755: (*pr)("\tno current page\n");
1756: else
1757: (*pr)("\tcurpage %p\n", pp->pr_curpage->ph_page);
1758:
1759: skip_pagelist:
1760: if (print_log == 0)
1761: goto skip_log;
1762:
1763: (*pr)("\n");
1764: if ((pp->pr_roflags & PR_LOGGING) == 0)
1765: (*pr)("\tno log\n");
1766: else
1767: pr_printlog(pp, NULL, pr);
1.3 pk 1768:
1.25 thorpej 1769: skip_log:
1.44 thorpej 1770: if (print_cache == 0)
1771: goto skip_cache;
1772:
1.102 chs 1773: #define PR_GROUPLIST(pcg) \
1774: (*pr)("\t\tgroup %p: avail %d\n", pcg, pcg->pcg_avail); \
1775: for (i = 0; i < PCG_NOBJECTS; i++) { \
1776: if (pcg->pcg_objects[i].pcgo_pa != \
1777: POOL_PADDR_INVALID) { \
1778: (*pr)("\t\t\t%p, 0x%llx\n", \
1779: pcg->pcg_objects[i].pcgo_va, \
1780: (unsigned long long) \
1781: pcg->pcg_objects[i].pcgo_pa); \
1782: } else { \
1783: (*pr)("\t\t\t%p\n", \
1784: pcg->pcg_objects[i].pcgo_va); \
1785: } \
1786: }
1787:
1788: LIST_FOREACH(pc, &pp->pr_cachelist, pc_poollist) {
1.103 chs 1789: (*pr)("\tcache %p\n", pc);
1.48 thorpej 1790: (*pr)("\t hits %lu misses %lu ngroups %lu nitems %lu\n",
1791: pc->pc_hits, pc->pc_misses, pc->pc_ngroups, pc->pc_nitems);
1.102 chs 1792: (*pr)("\t full groups:\n");
1.103 chs 1793: LIST_FOREACH(pcg, &pc->pc_fullgroups, pcg_list) {
1.102 chs 1794: PR_GROUPLIST(pcg);
1.103 chs 1795: }
1.102 chs 1796: (*pr)("\t partial groups:\n");
1.103 chs 1797: LIST_FOREACH(pcg, &pc->pc_partgroups, pcg_list) {
1.102 chs 1798: PR_GROUPLIST(pcg);
1.103 chs 1799: }
1.102 chs 1800: (*pr)("\t empty groups:\n");
1.103 chs 1801: LIST_FOREACH(pcg, &pc->pc_emptygroups, pcg_list) {
1.102 chs 1802: PR_GROUPLIST(pcg);
1.103 chs 1803: }
1.44 thorpej 1804: }
1.102 chs 1805: #undef PR_GROUPLIST
1.44 thorpej 1806:
1807: skip_cache:
1.88 chs 1808: pr_enter_check(pp, pr);
1809: }
1810:
1811: static int
1812: pool_chk_page(struct pool *pp, const char *label, struct pool_item_header *ph)
1813: {
1814: struct pool_item *pi;
1815: caddr_t page;
1816: int n;
1817:
1818: page = (caddr_t)((u_long)ph & pp->pr_alloc->pa_pagemask);
1819: if (page != ph->ph_page &&
1820: (pp->pr_roflags & PR_PHINPAGE) != 0) {
1821: if (label != NULL)
1822: printf("%s: ", label);
1823: printf("pool(%p:%s): page inconsistency: page %p;"
1824: " at page head addr %p (p %p)\n", pp,
1825: pp->pr_wchan, ph->ph_page,
1826: ph, page);
1827: return 1;
1828: }
1.3 pk 1829:
1.97 yamt 1830: if ((pp->pr_roflags & PR_NOTOUCH) != 0)
1831: return 0;
1832:
1.102 chs 1833: for (pi = LIST_FIRST(&ph->ph_itemlist), n = 0;
1.88 chs 1834: pi != NULL;
1.102 chs 1835: pi = LIST_NEXT(pi,pi_list), n++) {
1.88 chs 1836:
1837: #ifdef DIAGNOSTIC
1838: if (pi->pi_magic != PI_MAGIC) {
1839: if (label != NULL)
1840: printf("%s: ", label);
1841: printf("pool(%s): free list modified: magic=%x;"
1842: " page %p; item ordinal %d;"
1843: " addr %p (p %p)\n",
1844: pp->pr_wchan, pi->pi_magic, ph->ph_page,
1845: n, pi, page);
1846: panic("pool");
1847: }
1848: #endif
1849: page =
1850: (caddr_t)((u_long)pi & pp->pr_alloc->pa_pagemask);
1851: if (page == ph->ph_page)
1852: continue;
1853:
1854: if (label != NULL)
1855: printf("%s: ", label);
1856: printf("pool(%p:%s): page inconsistency: page %p;"
1857: " item ordinal %d; addr %p (p %p)\n", pp,
1858: pp->pr_wchan, ph->ph_page,
1859: n, pi, page);
1860: return 1;
1861: }
1862: return 0;
1.3 pk 1863: }
1864:
1.88 chs 1865:
1.3 pk 1866: int
1.42 thorpej 1867: pool_chk(struct pool *pp, const char *label)
1.3 pk 1868: {
1869: struct pool_item_header *ph;
1870: int r = 0;
1871:
1.21 thorpej 1872: simple_lock(&pp->pr_slock);
1.88 chs 1873: LIST_FOREACH(ph, &pp->pr_emptypages, ph_pagelist) {
1874: r = pool_chk_page(pp, label, ph);
1875: if (r) {
1876: goto out;
1877: }
1878: }
1879: LIST_FOREACH(ph, &pp->pr_fullpages, ph_pagelist) {
1880: r = pool_chk_page(pp, label, ph);
1881: if (r) {
1.3 pk 1882: goto out;
1883: }
1.88 chs 1884: }
1885: LIST_FOREACH(ph, &pp->pr_partpages, ph_pagelist) {
1886: r = pool_chk_page(pp, label, ph);
1887: if (r) {
1.3 pk 1888: goto out;
1889: }
1890: }
1.88 chs 1891:
1.3 pk 1892: out:
1.21 thorpej 1893: simple_unlock(&pp->pr_slock);
1.3 pk 1894: return (r);
1.43 thorpej 1895: }
1896:
1897: /*
1898: * pool_cache_init:
1899: *
1900: * Initialize a pool cache.
1901: *
1902: * NOTE: If the pool must be protected from interrupts, we expect
1903: * to be called at the appropriate interrupt priority level.
1904: */
1905: void
1906: pool_cache_init(struct pool_cache *pc, struct pool *pp,
1907: int (*ctor)(void *, void *, int),
1908: void (*dtor)(void *, void *),
1909: void *arg)
1910: {
1911:
1.102 chs 1912: LIST_INIT(&pc->pc_emptygroups);
1913: LIST_INIT(&pc->pc_fullgroups);
1914: LIST_INIT(&pc->pc_partgroups);
1.43 thorpej 1915: simple_lock_init(&pc->pc_slock);
1916:
1917: pc->pc_pool = pp;
1918:
1919: pc->pc_ctor = ctor;
1920: pc->pc_dtor = dtor;
1921: pc->pc_arg = arg;
1922:
1.48 thorpej 1923: pc->pc_hits = 0;
1924: pc->pc_misses = 0;
1925:
1926: pc->pc_ngroups = 0;
1927:
1928: pc->pc_nitems = 0;
1929:
1.43 thorpej 1930: simple_lock(&pp->pr_slock);
1.102 chs 1931: LIST_INSERT_HEAD(&pp->pr_cachelist, pc, pc_poollist);
1.43 thorpej 1932: simple_unlock(&pp->pr_slock);
1933: }
1934:
1935: /*
1936: * pool_cache_destroy:
1937: *
1938: * Destroy a pool cache.
1939: */
1940: void
1941: pool_cache_destroy(struct pool_cache *pc)
1942: {
1943: struct pool *pp = pc->pc_pool;
1944:
1945: /* First, invalidate the entire cache. */
1946: pool_cache_invalidate(pc);
1947:
1948: /* ...and remove it from the pool's cache list. */
1949: simple_lock(&pp->pr_slock);
1.102 chs 1950: LIST_REMOVE(pc, pc_poollist);
1.43 thorpej 1951: simple_unlock(&pp->pr_slock);
1952: }
1953:
1.110 perry 1954: static inline void *
1.87 thorpej 1955: pcg_get(struct pool_cache_group *pcg, paddr_t *pap)
1.43 thorpej 1956: {
1957: void *object;
1958: u_int idx;
1959:
1960: KASSERT(pcg->pcg_avail <= PCG_NOBJECTS);
1.45 thorpej 1961: KASSERT(pcg->pcg_avail != 0);
1.43 thorpej 1962: idx = --pcg->pcg_avail;
1963:
1.87 thorpej 1964: KASSERT(pcg->pcg_objects[idx].pcgo_va != NULL);
1965: object = pcg->pcg_objects[idx].pcgo_va;
1966: if (pap != NULL)
1967: *pap = pcg->pcg_objects[idx].pcgo_pa;
1968: pcg->pcg_objects[idx].pcgo_va = NULL;
1.43 thorpej 1969:
1970: return (object);
1971: }
1972:
1.110 perry 1973: static inline void
1.87 thorpej 1974: pcg_put(struct pool_cache_group *pcg, void *object, paddr_t pa)
1.43 thorpej 1975: {
1976: u_int idx;
1977:
1978: KASSERT(pcg->pcg_avail < PCG_NOBJECTS);
1979: idx = pcg->pcg_avail++;
1980:
1.87 thorpej 1981: KASSERT(pcg->pcg_objects[idx].pcgo_va == NULL);
1982: pcg->pcg_objects[idx].pcgo_va = object;
1983: pcg->pcg_objects[idx].pcgo_pa = pa;
1.43 thorpej 1984: }
1985:
1.102 chs 1986: static void
1987: pcg_grouplist_free(struct pool_cache_grouplist *pcgl)
1988: {
1989: struct pool_cache_group *pcg;
1990: int s;
1991:
1992: s = splvm();
1993: while ((pcg = LIST_FIRST(pcgl)) != NULL) {
1994: LIST_REMOVE(pcg, pcg_list);
1995: pool_put(&pcgpool, pcg);
1996: }
1997: splx(s);
1998: }
1999:
1.43 thorpej 2000: /*
1.87 thorpej 2001: * pool_cache_get{,_paddr}:
1.43 thorpej 2002: *
1.87 thorpej 2003: * Get an object from a pool cache (optionally returning
2004: * the physical address of the object).
1.43 thorpej 2005: */
2006: void *
1.87 thorpej 2007: pool_cache_get_paddr(struct pool_cache *pc, int flags, paddr_t *pap)
1.43 thorpej 2008: {
2009: struct pool_cache_group *pcg;
2010: void *object;
1.58 thorpej 2011:
2012: #ifdef LOCKDEBUG
2013: if (flags & PR_WAITOK)
1.119 ! yamt 2014: ASSERT_SLEEPABLE(NULL, "pool_cache_get(PR_WAITOK)");
1.58 thorpej 2015: #endif
1.43 thorpej 2016:
2017: simple_lock(&pc->pc_slock);
2018:
1.102 chs 2019: pcg = LIST_FIRST(&pc->pc_partgroups);
2020: if (pcg == NULL) {
2021: pcg = LIST_FIRST(&pc->pc_fullgroups);
2022: if (pcg != NULL) {
2023: LIST_REMOVE(pcg, pcg_list);
2024: LIST_INSERT_HEAD(&pc->pc_partgroups, pcg, pcg_list);
1.43 thorpej 2025: }
1.102 chs 2026: }
2027: if (pcg == NULL) {
1.43 thorpej 2028:
2029: /*
2030: * No groups with any available objects. Allocate
2031: * a new object, construct it, and return it to
2032: * the caller. We will allocate a group, if necessary,
2033: * when the object is freed back to the cache.
2034: */
1.48 thorpej 2035: pc->pc_misses++;
1.43 thorpej 2036: simple_unlock(&pc->pc_slock);
2037: object = pool_get(pc->pc_pool, flags);
2038: if (object != NULL && pc->pc_ctor != NULL) {
2039: if ((*pc->pc_ctor)(pc->pc_arg, object, flags) != 0) {
2040: pool_put(pc->pc_pool, object);
2041: return (NULL);
2042: }
2043: }
1.87 thorpej 2044: if (object != NULL && pap != NULL) {
2045: #ifdef POOL_VTOPHYS
2046: *pap = POOL_VTOPHYS(object);
2047: #else
2048: *pap = POOL_PADDR_INVALID;
2049: #endif
2050: }
1.43 thorpej 2051: return (object);
2052: }
2053:
1.48 thorpej 2054: pc->pc_hits++;
2055: pc->pc_nitems--;
1.87 thorpej 2056: object = pcg_get(pcg, pap);
1.43 thorpej 2057:
1.102 chs 2058: if (pcg->pcg_avail == 0) {
2059: LIST_REMOVE(pcg, pcg_list);
2060: LIST_INSERT_HEAD(&pc->pc_emptygroups, pcg, pcg_list);
2061: }
1.43 thorpej 2062: simple_unlock(&pc->pc_slock);
2063:
2064: return (object);
2065: }
2066:
2067: /*
1.87 thorpej 2068: * pool_cache_put{,_paddr}:
1.43 thorpej 2069: *
1.87 thorpej 2070: * Put an object back to the pool cache (optionally caching the
2071: * physical address of the object).
1.43 thorpej 2072: */
2073: void
1.87 thorpej 2074: pool_cache_put_paddr(struct pool_cache *pc, void *object, paddr_t pa)
1.43 thorpej 2075: {
2076: struct pool_cache_group *pcg;
1.60 thorpej 2077: int s;
1.43 thorpej 2078:
1.109 christos 2079: if (__predict_false((pc->pc_pool->pr_flags & PR_WANTED) != 0)) {
2080: goto destruct;
2081: }
2082:
1.43 thorpej 2083: simple_lock(&pc->pc_slock);
2084:
1.102 chs 2085: pcg = LIST_FIRST(&pc->pc_partgroups);
2086: if (pcg == NULL) {
2087: pcg = LIST_FIRST(&pc->pc_emptygroups);
2088: if (pcg != NULL) {
2089: LIST_REMOVE(pcg, pcg_list);
2090: LIST_INSERT_HEAD(&pc->pc_partgroups, pcg, pcg_list);
1.43 thorpej 2091: }
1.102 chs 2092: }
2093: if (pcg == NULL) {
1.43 thorpej 2094:
2095: /*
2096: * No empty groups to free the object to. Attempt to
1.47 thorpej 2097: * allocate one.
1.43 thorpej 2098: */
1.47 thorpej 2099: simple_unlock(&pc->pc_slock);
1.60 thorpej 2100: s = splvm();
1.43 thorpej 2101: pcg = pool_get(&pcgpool, PR_NOWAIT);
1.60 thorpej 2102: splx(s);
1.102 chs 2103: if (pcg == NULL) {
1.109 christos 2104: destruct:
1.102 chs 2105:
2106: /*
2107: * Unable to allocate a cache group; destruct the object
2108: * and free it back to the pool.
2109: */
2110: pool_cache_destruct_object(pc, object);
2111: return;
1.43 thorpej 2112: }
1.102 chs 2113: memset(pcg, 0, sizeof(*pcg));
2114: simple_lock(&pc->pc_slock);
2115: pc->pc_ngroups++;
2116: LIST_INSERT_HEAD(&pc->pc_partgroups, pcg, pcg_list);
1.43 thorpej 2117: }
2118:
1.48 thorpej 2119: pc->pc_nitems++;
1.87 thorpej 2120: pcg_put(pcg, object, pa);
1.43 thorpej 2121:
1.102 chs 2122: if (pcg->pcg_avail == PCG_NOBJECTS) {
2123: LIST_REMOVE(pcg, pcg_list);
2124: LIST_INSERT_HEAD(&pc->pc_fullgroups, pcg, pcg_list);
2125: }
1.43 thorpej 2126: simple_unlock(&pc->pc_slock);
1.51 thorpej 2127: }
2128:
2129: /*
2130: * pool_cache_destruct_object:
2131: *
2132: * Force destruction of an object and its release back into
2133: * the pool.
2134: */
2135: void
2136: pool_cache_destruct_object(struct pool_cache *pc, void *object)
2137: {
2138:
2139: if (pc->pc_dtor != NULL)
2140: (*pc->pc_dtor)(pc->pc_arg, object);
2141: pool_put(pc->pc_pool, object);
1.43 thorpej 2142: }
2143:
1.102 chs 2144: static void
1.106 christos 2145: pool_do_cache_invalidate_grouplist(struct pool_cache_grouplist *pcgsl,
1.105 christos 2146: struct pool_cache *pc, struct pool_pagelist *pq,
1.106 christos 2147: struct pool_cache_grouplist *pcgdl)
1.102 chs 2148: {
1.106 christos 2149: struct pool_cache_group *pcg, *npcg;
1.102 chs 2150: void *object;
2151:
1.106 christos 2152: for (pcg = LIST_FIRST(pcgsl); pcg != NULL; pcg = npcg) {
1.102 chs 2153: npcg = LIST_NEXT(pcg, pcg_list);
2154: while (pcg->pcg_avail != 0) {
2155: pc->pc_nitems--;
2156: object = pcg_get(pcg, NULL);
2157: if (pc->pc_dtor != NULL)
2158: (*pc->pc_dtor)(pc->pc_arg, object);
2159: pool_do_put(pc->pc_pool, object, pq);
2160: }
1.103 chs 2161: pc->pc_ngroups--;
1.102 chs 2162: LIST_REMOVE(pcg, pcg_list);
1.106 christos 2163: LIST_INSERT_HEAD(pcgdl, pcg, pcg_list);
1.102 chs 2164: }
1.105 christos 2165: }
2166:
2167: static void
2168: pool_do_cache_invalidate(struct pool_cache *pc, struct pool_pagelist *pq,
2169: struct pool_cache_grouplist *pcgl)
2170: {
2171:
2172: LOCK_ASSERT(simple_lock_held(&pc->pc_slock));
2173: LOCK_ASSERT(simple_lock_held(&pc->pc_pool->pr_slock));
2174:
1.106 christos 2175: pool_do_cache_invalidate_grouplist(&pc->pc_fullgroups, pc, pq, pcgl);
2176: pool_do_cache_invalidate_grouplist(&pc->pc_partgroups, pc, pq, pcgl);
1.103 chs 2177:
2178: KASSERT(LIST_EMPTY(&pc->pc_partgroups));
2179: KASSERT(LIST_EMPTY(&pc->pc_fullgroups));
2180: KASSERT(pc->pc_nitems == 0);
1.102 chs 2181: }
2182:
1.43 thorpej 2183: /*
1.101 thorpej 2184: * pool_cache_invalidate:
1.43 thorpej 2185: *
1.101 thorpej 2186: * Invalidate a pool cache (destruct and release all of the
2187: * cached objects).
1.43 thorpej 2188: */
1.101 thorpej 2189: void
2190: pool_cache_invalidate(struct pool_cache *pc)
1.43 thorpej 2191: {
1.101 thorpej 2192: struct pool_pagelist pq;
1.102 chs 2193: struct pool_cache_grouplist pcgl;
1.101 thorpej 2194:
2195: LIST_INIT(&pq);
1.102 chs 2196: LIST_INIT(&pcgl);
1.101 thorpej 2197:
2198: simple_lock(&pc->pc_slock);
2199: simple_lock(&pc->pc_pool->pr_slock);
1.43 thorpej 2200:
1.102 chs 2201: pool_do_cache_invalidate(pc, &pq, &pcgl);
1.43 thorpej 2202:
1.101 thorpej 2203: simple_unlock(&pc->pc_pool->pr_slock);
2204: simple_unlock(&pc->pc_slock);
1.43 thorpej 2205:
1.102 chs 2206: pr_pagelist_free(pc->pc_pool, &pq);
2207: pcg_grouplist_free(&pcgl);
1.43 thorpej 2208: }
2209:
2210: /*
2211: * pool_cache_reclaim:
2212: *
2213: * Reclaim a pool cache for pool_reclaim().
2214: */
2215: static void
1.102 chs 2216: pool_cache_reclaim(struct pool_cache *pc, struct pool_pagelist *pq,
2217: struct pool_cache_grouplist *pcgl)
1.43 thorpej 2218: {
1.101 thorpej 2219:
2220: /*
2221: * We're locking in the wrong order (normally pool_cache -> pool,
2222: * but the pool is already locked when we get here), so we have
2223: * to use trylock. If we can't lock the pool_cache, it's not really
2224: * a big deal here.
2225: */
2226: if (simple_lock_try(&pc->pc_slock) == 0)
2227: return;
2228:
1.102 chs 2229: pool_do_cache_invalidate(pc, pq, pcgl);
1.43 thorpej 2230:
2231: simple_unlock(&pc->pc_slock);
1.3 pk 2232: }
1.66 thorpej 2233:
2234: /*
2235: * Pool backend allocators.
2236: *
2237: * Each pool has a backend allocator that handles allocation, deallocation,
2238: * and any additional draining that might be needed.
2239: *
2240: * We provide two standard allocators:
2241: *
2242: * pool_allocator_kmem - the default when no allocator is specified
2243: *
2244: * pool_allocator_nointr - used for pools that will not be accessed
2245: * in interrupt context.
2246: */
2247: void *pool_page_alloc(struct pool *, int);
2248: void pool_page_free(struct pool *, void *);
2249:
1.112 bjh21 2250: #ifdef POOL_SUBPAGE
2251: struct pool_allocator pool_allocator_kmem_fullpage = {
2252: pool_page_alloc, pool_page_free, 0,
1.117 yamt 2253: .pa_backingmapptr = &kmem_map,
1.112 bjh21 2254: };
2255: #else
1.66 thorpej 2256: struct pool_allocator pool_allocator_kmem = {
2257: pool_page_alloc, pool_page_free, 0,
1.117 yamt 2258: .pa_backingmapptr = &kmem_map,
1.66 thorpej 2259: };
1.112 bjh21 2260: #endif
1.66 thorpej 2261:
2262: void *pool_page_alloc_nointr(struct pool *, int);
2263: void pool_page_free_nointr(struct pool *, void *);
2264:
1.112 bjh21 2265: #ifdef POOL_SUBPAGE
2266: struct pool_allocator pool_allocator_nointr_fullpage = {
2267: pool_page_alloc_nointr, pool_page_free_nointr, 0,
1.117 yamt 2268: .pa_backingmapptr = &kernel_map,
1.112 bjh21 2269: };
2270: #else
1.66 thorpej 2271: struct pool_allocator pool_allocator_nointr = {
2272: pool_page_alloc_nointr, pool_page_free_nointr, 0,
1.117 yamt 2273: .pa_backingmapptr = &kernel_map,
1.66 thorpej 2274: };
1.112 bjh21 2275: #endif
1.66 thorpej 2276:
2277: #ifdef POOL_SUBPAGE
2278: void *pool_subpage_alloc(struct pool *, int);
2279: void pool_subpage_free(struct pool *, void *);
2280:
1.112 bjh21 2281: struct pool_allocator pool_allocator_kmem = {
2282: pool_subpage_alloc, pool_subpage_free, POOL_SUBPAGE,
1.117 yamt 2283: .pa_backingmapptr = &kmem_map,
1.112 bjh21 2284: };
2285:
2286: void *pool_subpage_alloc_nointr(struct pool *, int);
2287: void pool_subpage_free_nointr(struct pool *, void *);
2288:
2289: struct pool_allocator pool_allocator_nointr = {
2290: pool_subpage_alloc, pool_subpage_free, POOL_SUBPAGE,
1.117 yamt 2291: .pa_backingmapptr = &kmem_map,
1.66 thorpej 2292: };
2293: #endif /* POOL_SUBPAGE */
2294:
1.117 yamt 2295: static void *
2296: pool_allocator_alloc(struct pool *pp, int flags)
1.66 thorpej 2297: {
1.117 yamt 2298: struct pool_allocator *pa = pp->pr_alloc;
1.66 thorpej 2299: void *res;
2300:
1.117 yamt 2301: LOCK_ASSERT(!simple_lock_held(&pp->pr_slock));
1.66 thorpej 2302:
1.117 yamt 2303: res = (*pa->pa_alloc)(pp, flags);
2304: if (res == NULL && (flags & PR_WAITOK) == 0) {
1.66 thorpej 2305: /*
1.117 yamt 2306: * We only run the drain hook here if PR_NOWAIT.
2307: * In other cases, the hook will be run in
2308: * pool_reclaim().
1.66 thorpej 2309: */
1.117 yamt 2310: if (pp->pr_drain_hook != NULL) {
2311: (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, flags);
2312: res = (*pa->pa_alloc)(pp, flags);
1.66 thorpej 2313: }
1.117 yamt 2314: }
2315: return res;
1.66 thorpej 2316: }
2317:
1.117 yamt 2318: static void
1.66 thorpej 2319: pool_allocator_free(struct pool *pp, void *v)
2320: {
2321: struct pool_allocator *pa = pp->pr_alloc;
2322:
1.91 yamt 2323: LOCK_ASSERT(!simple_lock_held(&pp->pr_slock));
2324:
1.66 thorpej 2325: (*pa->pa_free)(pp, v);
2326: }
2327:
2328: void *
2329: pool_page_alloc(struct pool *pp, int flags)
2330: {
2331: boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
2332:
1.100 yamt 2333: return ((void *) uvm_km_alloc_poolpage_cache(kmem_map, waitok));
1.66 thorpej 2334: }
2335:
2336: void
2337: pool_page_free(struct pool *pp, void *v)
2338: {
2339:
1.98 yamt 2340: uvm_km_free_poolpage_cache(kmem_map, (vaddr_t) v);
2341: }
2342:
2343: static void *
2344: pool_page_alloc_meta(struct pool *pp, int flags)
2345: {
2346: boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
2347:
1.100 yamt 2348: return ((void *) uvm_km_alloc_poolpage(kmem_map, waitok));
1.98 yamt 2349: }
2350:
2351: static void
2352: pool_page_free_meta(struct pool *pp, void *v)
2353: {
2354:
1.100 yamt 2355: uvm_km_free_poolpage(kmem_map, (vaddr_t) v);
1.66 thorpej 2356: }
2357:
2358: #ifdef POOL_SUBPAGE
2359: /* Sub-page allocator, for machines with large hardware pages. */
2360: void *
2361: pool_subpage_alloc(struct pool *pp, int flags)
2362: {
1.93 dbj 2363: void *v;
2364: int s;
2365: s = splvm();
2366: v = pool_get(&psppool, flags);
2367: splx(s);
2368: return v;
1.66 thorpej 2369: }
2370:
2371: void
2372: pool_subpage_free(struct pool *pp, void *v)
2373: {
1.93 dbj 2374: int s;
2375: s = splvm();
1.66 thorpej 2376: pool_put(&psppool, v);
1.93 dbj 2377: splx(s);
1.66 thorpej 2378: }
2379:
2380: /* We don't provide a real nointr allocator. Maybe later. */
2381: void *
1.112 bjh21 2382: pool_subpage_alloc_nointr(struct pool *pp, int flags)
1.66 thorpej 2383: {
2384:
2385: return (pool_subpage_alloc(pp, flags));
2386: }
2387:
2388: void
1.112 bjh21 2389: pool_subpage_free_nointr(struct pool *pp, void *v)
1.66 thorpej 2390: {
2391:
2392: pool_subpage_free(pp, v);
2393: }
1.112 bjh21 2394: #endif /* POOL_SUBPAGE */
1.66 thorpej 2395: void *
2396: pool_page_alloc_nointr(struct pool *pp, int flags)
2397: {
2398: boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE;
2399:
1.100 yamt 2400: return ((void *) uvm_km_alloc_poolpage_cache(kernel_map, waitok));
1.66 thorpej 2401: }
2402:
2403: void
2404: pool_page_free_nointr(struct pool *pp, void *v)
2405: {
2406:
1.98 yamt 2407: uvm_km_free_poolpage_cache(kernel_map, (vaddr_t) v);
1.66 thorpej 2408: }
CVSweb <webmaster@jp.NetBSD.org>