Annotation of src/sys/kern/subr_pool.c, Revision 1.131.2.1
1.131.2.1! matt 1: /* $NetBSD: subr_pool.c,v 1.133 2007/10/11 19:53:37 ad Exp $ */
1.1 pk 2:
3: /*-
1.125 ad 4: * Copyright (c) 1997, 1999, 2000, 2002 The NetBSD Foundation, Inc.
1.1 pk 5: * All rights reserved.
6: *
7: * This code is derived from software contributed to The NetBSD Foundation
1.20 thorpej 8: * by Paul Kranenburg; by Jason R. Thorpe of the Numerical Aerospace
9: * Simulation Facility, NASA Ames Research Center.
1.1 pk 10: *
11: * Redistribution and use in source and binary forms, with or without
12: * modification, are permitted provided that the following conditions
13: * are met:
14: * 1. Redistributions of source code must retain the above copyright
15: * notice, this list of conditions and the following disclaimer.
16: * 2. Redistributions in binary form must reproduce the above copyright
17: * notice, this list of conditions and the following disclaimer in the
18: * documentation and/or other materials provided with the distribution.
19: * 3. All advertising materials mentioning features or use of this software
20: * must display the following acknowledgement:
1.13 christos 21: * This product includes software developed by the NetBSD
22: * Foundation, Inc. and its contributors.
1.1 pk 23: * 4. Neither the name of The NetBSD Foundation nor the names of its
24: * contributors may be used to endorse or promote products derived
25: * from this software without specific prior written permission.
26: *
27: * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28: * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29: * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30: * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31: * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32: * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33: * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34: * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35: * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36: * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37: * POSSIBILITY OF SUCH DAMAGE.
38: */
1.64 lukem 39:
40: #include <sys/cdefs.h>
1.131.2.1! matt 41: __KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.133 2007/10/11 19:53:37 ad Exp $");
1.24 scottr 42:
1.25 thorpej 43: #include "opt_pool.h"
1.24 scottr 44: #include "opt_poollog.h"
1.28 thorpej 45: #include "opt_lockdebug.h"
1.1 pk 46:
47: #include <sys/param.h>
48: #include <sys/systm.h>
49: #include <sys/proc.h>
50: #include <sys/errno.h>
51: #include <sys/kernel.h>
52: #include <sys/malloc.h>
53: #include <sys/lock.h>
54: #include <sys/pool.h>
1.20 thorpej 55: #include <sys/syslog.h>
1.125 ad 56: #include <sys/debug.h>
1.3 pk 57:
58: #include <uvm/uvm.h>
59:
1.1 pk 60: /*
61: * Pool resource management utility.
1.3 pk 62: *
1.88 chs 63: * Memory is allocated in pages which are split into pieces according to
64: * the pool item size. Each page is kept on one of three lists in the
65: * pool structure: `pr_emptypages', `pr_fullpages' and `pr_partpages',
66: * for empty, full and partially-full pages respectively. The individual
67: * pool items are on a linked list headed by `ph_itemlist' in each page
68: * header. The memory for building the page list is either taken from
69: * the allocated pages themselves (for small pool items) or taken from
70: * an internal pool of page headers (`phpool').
1.1 pk 71: */
72:
1.3 pk 73: /* List of all pools */
1.102 chs 74: LIST_HEAD(,pool) pool_head = LIST_HEAD_INITIALIZER(pool_head);
1.3 pk 75:
76: /* Private pool for page header structures */
1.97 yamt 77: #define PHPOOL_MAX 8
78: static struct pool phpool[PHPOOL_MAX];
79: #define PHPOOL_FREELIST_NELEM(idx) (((idx) == 0) ? 0 : (1 << (idx)))
1.3 pk 80:
1.62 bjh21 81: #ifdef POOL_SUBPAGE
82: /* Pool of subpages for use by normal pools. */
83: static struct pool psppool;
84: #endif
85:
1.117 yamt 86: static SLIST_HEAD(, pool_allocator) pa_deferinitq =
87: SLIST_HEAD_INITIALIZER(pa_deferinitq);
88:
1.98 yamt 89: static void *pool_page_alloc_meta(struct pool *, int);
90: static void pool_page_free_meta(struct pool *, void *);
91:
92: /* allocator for pool metadata */
93: static struct pool_allocator pool_allocator_meta = {
1.117 yamt 94: pool_page_alloc_meta, pool_page_free_meta,
95: .pa_backingmapptr = &kmem_map,
1.98 yamt 96: };
97:
1.3 pk 98: /* # of seconds to retain page after last use */
99: int pool_inactive_time = 10;
100:
101: /* Next candidate for drainage (see pool_drain()) */
1.23 thorpej 102: static struct pool *drainpp;
103:
104: /* This spin lock protects both pool_head and drainpp. */
105: struct simplelock pool_head_slock = SIMPLELOCK_INITIALIZER;
1.3 pk 106:
1.99 yamt 107: typedef uint8_t pool_item_freelist_t;
108:
1.3 pk 109: struct pool_item_header {
110: /* Page headers */
1.88 chs 111: LIST_ENTRY(pool_item_header)
1.3 pk 112: ph_pagelist; /* pool page list */
1.88 chs 113: SPLAY_ENTRY(pool_item_header)
114: ph_node; /* Off-page page headers */
1.128 christos 115: void * ph_page; /* this page's address */
1.3 pk 116: struct timeval ph_time; /* last referenced */
1.97 yamt 117: union {
118: /* !PR_NOTOUCH */
119: struct {
1.102 chs 120: LIST_HEAD(, pool_item)
1.97 yamt 121: phu_itemlist; /* chunk list for this page */
122: } phu_normal;
123: /* PR_NOTOUCH */
124: struct {
125: uint16_t
126: phu_off; /* start offset in page */
1.99 yamt 127: pool_item_freelist_t
1.97 yamt 128: phu_firstfree; /* first free item */
1.99 yamt 129: /*
130: * XXX it might be better to use
131: * a simple bitmap and ffs(3)
132: */
1.97 yamt 133: } phu_notouch;
134: } ph_u;
135: uint16_t ph_nmissing; /* # of chunks in use */
1.3 pk 136: };
1.97 yamt 137: #define ph_itemlist ph_u.phu_normal.phu_itemlist
138: #define ph_off ph_u.phu_notouch.phu_off
139: #define ph_firstfree ph_u.phu_notouch.phu_firstfree
1.3 pk 140:
1.1 pk 141: struct pool_item {
1.3 pk 142: #ifdef DIAGNOSTIC
1.82 thorpej 143: u_int pi_magic;
1.33 chs 144: #endif
1.82 thorpej 145: #define PI_MAGIC 0xdeadbeefU
1.3 pk 146: /* Other entries use only this list entry */
1.102 chs 147: LIST_ENTRY(pool_item) pi_list;
1.3 pk 148: };
149:
1.53 thorpej 150: #define POOL_NEEDS_CATCHUP(pp) \
151: ((pp)->pr_nitems < (pp)->pr_minitems)
152:
1.43 thorpej 153: /*
154: * Pool cache management.
155: *
156: * Pool caches provide a way for constructed objects to be cached by the
157: * pool subsystem. This can lead to performance improvements by avoiding
158: * needless object construction/destruction; it is deferred until absolutely
159: * necessary.
160: *
161: * Caches are grouped into cache groups. Each cache group references
162: * up to 16 constructed objects. When a cache allocates an object
163: * from the pool, it calls the object's constructor and places it into
164: * a cache group. When a cache group frees an object back to the pool,
165: * it first calls the object's destructor. This allows the object to
166: * persist in constructed form while freed to the cache.
167: *
168: * Multiple caches may exist for each pool. This allows a single
169: * object type to have multiple constructed forms. The pool references
170: * each cache, so that when a pool is drained by the pagedaemon, it can
171: * drain each individual cache as well. Each time a cache is drained,
172: * the most idle cache group is freed to the pool in its entirety.
173: *
174: * Pool caches are layed on top of pools. By layering them, we can avoid
175: * the complexity of cache management for pools which would not benefit
176: * from it.
177: */
178:
179: /* The cache group pool. */
180: static struct pool pcgpool;
1.3 pk 181:
1.102 chs 182: static void pool_cache_reclaim(struct pool_cache *, struct pool_pagelist *,
183: struct pool_cache_grouplist *);
184: static void pcg_grouplist_free(struct pool_cache_grouplist *);
1.3 pk 185:
1.42 thorpej 186: static int pool_catchup(struct pool *);
1.128 christos 187: static void pool_prime_page(struct pool *, void *,
1.55 thorpej 188: struct pool_item_header *);
1.88 chs 189: static void pool_update_curpage(struct pool *);
1.66 thorpej 190:
1.113 yamt 191: static int pool_grow(struct pool *, int);
1.117 yamt 192: static void *pool_allocator_alloc(struct pool *, int);
193: static void pool_allocator_free(struct pool *, void *);
1.3 pk 194:
1.97 yamt 195: static void pool_print_pagelist(struct pool *, struct pool_pagelist *,
1.88 chs 196: void (*)(const char *, ...));
1.42 thorpej 197: static void pool_print1(struct pool *, const char *,
198: void (*)(const char *, ...));
1.3 pk 199:
1.88 chs 200: static int pool_chk_page(struct pool *, const char *,
201: struct pool_item_header *);
202:
1.3 pk 203: /*
1.52 thorpej 204: * Pool log entry. An array of these is allocated in pool_init().
1.3 pk 205: */
206: struct pool_log {
207: const char *pl_file;
208: long pl_line;
209: int pl_action;
1.25 thorpej 210: #define PRLOG_GET 1
211: #define PRLOG_PUT 2
1.3 pk 212: void *pl_addr;
1.1 pk 213: };
214:
1.86 matt 215: #ifdef POOL_DIAGNOSTIC
1.3 pk 216: /* Number of entries in pool log buffers */
1.17 thorpej 217: #ifndef POOL_LOGSIZE
218: #define POOL_LOGSIZE 10
219: #endif
220:
221: int pool_logsize = POOL_LOGSIZE;
1.1 pk 222:
1.110 perry 223: static inline void
1.42 thorpej 224: pr_log(struct pool *pp, void *v, int action, const char *file, long line)
1.3 pk 225: {
226: int n = pp->pr_curlogentry;
227: struct pool_log *pl;
228:
1.20 thorpej 229: if ((pp->pr_roflags & PR_LOGGING) == 0)
1.3 pk 230: return;
231:
232: /*
233: * Fill in the current entry. Wrap around and overwrite
234: * the oldest entry if necessary.
235: */
236: pl = &pp->pr_log[n];
237: pl->pl_file = file;
238: pl->pl_line = line;
239: pl->pl_action = action;
240: pl->pl_addr = v;
241: if (++n >= pp->pr_logsize)
242: n = 0;
243: pp->pr_curlogentry = n;
244: }
245:
246: static void
1.42 thorpej 247: pr_printlog(struct pool *pp, struct pool_item *pi,
248: void (*pr)(const char *, ...))
1.3 pk 249: {
250: int i = pp->pr_logsize;
251: int n = pp->pr_curlogentry;
252:
1.20 thorpej 253: if ((pp->pr_roflags & PR_LOGGING) == 0)
1.3 pk 254: return;
255:
256: /*
257: * Print all entries in this pool's log.
258: */
259: while (i-- > 0) {
260: struct pool_log *pl = &pp->pr_log[n];
261: if (pl->pl_action != 0) {
1.25 thorpej 262: if (pi == NULL || pi == pl->pl_addr) {
263: (*pr)("\tlog entry %d:\n", i);
264: (*pr)("\t\taction = %s, addr = %p\n",
265: pl->pl_action == PRLOG_GET ? "get" : "put",
266: pl->pl_addr);
267: (*pr)("\t\tfile: %s at line %lu\n",
268: pl->pl_file, pl->pl_line);
269: }
1.3 pk 270: }
271: if (++n >= pp->pr_logsize)
272: n = 0;
273: }
274: }
1.25 thorpej 275:
1.110 perry 276: static inline void
1.42 thorpej 277: pr_enter(struct pool *pp, const char *file, long line)
1.25 thorpej 278: {
279:
1.34 thorpej 280: if (__predict_false(pp->pr_entered_file != NULL)) {
1.25 thorpej 281: printf("pool %s: reentrancy at file %s line %ld\n",
282: pp->pr_wchan, file, line);
283: printf(" previous entry at file %s line %ld\n",
284: pp->pr_entered_file, pp->pr_entered_line);
285: panic("pr_enter");
286: }
287:
288: pp->pr_entered_file = file;
289: pp->pr_entered_line = line;
290: }
291:
1.110 perry 292: static inline void
1.42 thorpej 293: pr_leave(struct pool *pp)
1.25 thorpej 294: {
295:
1.34 thorpej 296: if (__predict_false(pp->pr_entered_file == NULL)) {
1.25 thorpej 297: printf("pool %s not entered?\n", pp->pr_wchan);
298: panic("pr_leave");
299: }
300:
301: pp->pr_entered_file = NULL;
302: pp->pr_entered_line = 0;
303: }
304:
1.110 perry 305: static inline void
1.42 thorpej 306: pr_enter_check(struct pool *pp, void (*pr)(const char *, ...))
1.25 thorpej 307: {
308:
309: if (pp->pr_entered_file != NULL)
310: (*pr)("\n\tcurrently entered from file %s line %ld\n",
311: pp->pr_entered_file, pp->pr_entered_line);
312: }
1.3 pk 313: #else
1.25 thorpej 314: #define pr_log(pp, v, action, file, line)
315: #define pr_printlog(pp, pi, pr)
316: #define pr_enter(pp, file, line)
317: #define pr_leave(pp)
318: #define pr_enter_check(pp, pr)
1.59 thorpej 319: #endif /* POOL_DIAGNOSTIC */
1.3 pk 320:
1.110 perry 321: static inline int
1.97 yamt 322: pr_item_notouch_index(const struct pool *pp, const struct pool_item_header *ph,
323: const void *v)
324: {
325: const char *cp = v;
326: int idx;
327:
328: KASSERT(pp->pr_roflags & PR_NOTOUCH);
1.128 christos 329: idx = (cp - (char *)ph->ph_page - ph->ph_off) / pp->pr_size;
1.97 yamt 330: KASSERT(idx < pp->pr_itemsperpage);
331: return idx;
332: }
333:
1.99 yamt 334: #define PR_FREELIST_ALIGN(p) \
335: roundup((uintptr_t)(p), sizeof(pool_item_freelist_t))
336: #define PR_FREELIST(ph) ((pool_item_freelist_t *)PR_FREELIST_ALIGN((ph) + 1))
337: #define PR_INDEX_USED ((pool_item_freelist_t)-1)
338: #define PR_INDEX_EOL ((pool_item_freelist_t)-2)
1.97 yamt 339:
1.110 perry 340: static inline void
1.97 yamt 341: pr_item_notouch_put(const struct pool *pp, struct pool_item_header *ph,
342: void *obj)
343: {
344: int idx = pr_item_notouch_index(pp, ph, obj);
1.99 yamt 345: pool_item_freelist_t *freelist = PR_FREELIST(ph);
1.97 yamt 346:
347: KASSERT(freelist[idx] == PR_INDEX_USED);
348: freelist[idx] = ph->ph_firstfree;
349: ph->ph_firstfree = idx;
350: }
351:
1.110 perry 352: static inline void *
1.97 yamt 353: pr_item_notouch_get(const struct pool *pp, struct pool_item_header *ph)
354: {
355: int idx = ph->ph_firstfree;
1.99 yamt 356: pool_item_freelist_t *freelist = PR_FREELIST(ph);
1.97 yamt 357:
358: KASSERT(freelist[idx] != PR_INDEX_USED);
359: ph->ph_firstfree = freelist[idx];
360: freelist[idx] = PR_INDEX_USED;
361:
1.128 christos 362: return (char *)ph->ph_page + ph->ph_off + idx * pp->pr_size;
1.97 yamt 363: }
364:
1.110 perry 365: static inline int
1.88 chs 366: phtree_compare(struct pool_item_header *a, struct pool_item_header *b)
367: {
1.121 yamt 368:
369: /*
370: * we consider pool_item_header with smaller ph_page bigger.
371: * (this unnatural ordering is for the benefit of pr_find_pagehead.)
372: */
373:
1.88 chs 374: if (a->ph_page < b->ph_page)
1.121 yamt 375: return (1);
376: else if (a->ph_page > b->ph_page)
1.88 chs 377: return (-1);
378: else
379: return (0);
380: }
381:
382: SPLAY_PROTOTYPE(phtree, pool_item_header, ph_node, phtree_compare);
383: SPLAY_GENERATE(phtree, pool_item_header, ph_node, phtree_compare);
384:
1.3 pk 385: /*
1.121 yamt 386: * Return the pool page header based on item address.
1.3 pk 387: */
1.110 perry 388: static inline struct pool_item_header *
1.121 yamt 389: pr_find_pagehead(struct pool *pp, void *v)
1.3 pk 390: {
1.88 chs 391: struct pool_item_header *ph, tmp;
1.3 pk 392:
1.121 yamt 393: if ((pp->pr_roflags & PR_NOALIGN) != 0) {
1.128 christos 394: tmp.ph_page = (void *)(uintptr_t)v;
1.121 yamt 395: ph = SPLAY_FIND(phtree, &pp->pr_phtree, &tmp);
396: if (ph == NULL) {
397: ph = SPLAY_ROOT(&pp->pr_phtree);
398: if (ph != NULL && phtree_compare(&tmp, ph) >= 0) {
399: ph = SPLAY_NEXT(phtree, &pp->pr_phtree, ph);
400: }
401: KASSERT(ph == NULL || phtree_compare(&tmp, ph) < 0);
402: }
403: } else {
1.128 christos 404: void *page =
405: (void *)((uintptr_t)v & pp->pr_alloc->pa_pagemask);
1.121 yamt 406:
407: if ((pp->pr_roflags & PR_PHINPAGE) != 0) {
1.128 christos 408: ph = (struct pool_item_header *)((char *)page + pp->pr_phoffset);
1.121 yamt 409: } else {
410: tmp.ph_page = page;
411: ph = SPLAY_FIND(phtree, &pp->pr_phtree, &tmp);
412: }
413: }
1.3 pk 414:
1.121 yamt 415: KASSERT(ph == NULL || ((pp->pr_roflags & PR_PHINPAGE) != 0) ||
1.128 christos 416: ((char *)ph->ph_page <= (char *)v &&
417: (char *)v < (char *)ph->ph_page + pp->pr_alloc->pa_pagesz));
1.88 chs 418: return ph;
1.3 pk 419: }
420:
1.101 thorpej 421: static void
422: pr_pagelist_free(struct pool *pp, struct pool_pagelist *pq)
423: {
424: struct pool_item_header *ph;
425: int s;
426:
427: while ((ph = LIST_FIRST(pq)) != NULL) {
428: LIST_REMOVE(ph, ph_pagelist);
429: pool_allocator_free(pp, ph->ph_page);
430: if ((pp->pr_roflags & PR_PHINPAGE) == 0) {
431: s = splvm();
432: pool_put(pp->pr_phpool, ph);
433: splx(s);
434: }
435: }
436: }
437:
1.3 pk 438: /*
439: * Remove a page from the pool.
440: */
1.110 perry 441: static inline void
1.61 chs 442: pr_rmpage(struct pool *pp, struct pool_item_header *ph,
443: struct pool_pagelist *pq)
1.3 pk 444: {
445:
1.101 thorpej 446: LOCK_ASSERT(simple_lock_held(&pp->pr_slock));
1.91 yamt 447:
1.3 pk 448: /*
1.7 thorpej 449: * If the page was idle, decrement the idle page count.
1.3 pk 450: */
1.6 thorpej 451: if (ph->ph_nmissing == 0) {
452: #ifdef DIAGNOSTIC
453: if (pp->pr_nidle == 0)
454: panic("pr_rmpage: nidle inconsistent");
1.20 thorpej 455: if (pp->pr_nitems < pp->pr_itemsperpage)
456: panic("pr_rmpage: nitems inconsistent");
1.6 thorpej 457: #endif
458: pp->pr_nidle--;
459: }
1.7 thorpej 460:
1.20 thorpej 461: pp->pr_nitems -= pp->pr_itemsperpage;
462:
1.7 thorpej 463: /*
1.101 thorpej 464: * Unlink the page from the pool and queue it for release.
1.7 thorpej 465: */
1.88 chs 466: LIST_REMOVE(ph, ph_pagelist);
1.91 yamt 467: if ((pp->pr_roflags & PR_PHINPAGE) == 0)
468: SPLAY_REMOVE(phtree, &pp->pr_phtree, ph);
1.101 thorpej 469: LIST_INSERT_HEAD(pq, ph, ph_pagelist);
470:
1.7 thorpej 471: pp->pr_npages--;
472: pp->pr_npagefree++;
1.6 thorpej 473:
1.88 chs 474: pool_update_curpage(pp);
1.3 pk 475: }
476:
1.126 thorpej 477: static bool
1.117 yamt 478: pa_starved_p(struct pool_allocator *pa)
479: {
480:
481: if (pa->pa_backingmap != NULL) {
482: return vm_map_starved_p(pa->pa_backingmap);
483: }
1.127 thorpej 484: return false;
1.117 yamt 485: }
486:
487: static int
1.124 yamt 488: pool_reclaim_callback(struct callback_entry *ce, void *obj, void *arg)
1.117 yamt 489: {
490: struct pool *pp = obj;
491: struct pool_allocator *pa = pp->pr_alloc;
492:
493: KASSERT(&pp->pr_reclaimerentry == ce);
494: pool_reclaim(pp);
495: if (!pa_starved_p(pa)) {
496: return CALLBACK_CHAIN_ABORT;
497: }
498: return CALLBACK_CHAIN_CONTINUE;
499: }
500:
501: static void
502: pool_reclaim_register(struct pool *pp)
503: {
504: struct vm_map *map = pp->pr_alloc->pa_backingmap;
505: int s;
506:
507: if (map == NULL) {
508: return;
509: }
510:
511: s = splvm(); /* not necessary for INTRSAFE maps, but don't care. */
512: callback_register(&vm_map_to_kernel(map)->vmk_reclaim_callback,
513: &pp->pr_reclaimerentry, pp, pool_reclaim_callback);
514: splx(s);
515: }
516:
517: static void
518: pool_reclaim_unregister(struct pool *pp)
519: {
520: struct vm_map *map = pp->pr_alloc->pa_backingmap;
521: int s;
522:
523: if (map == NULL) {
524: return;
525: }
526:
527: s = splvm(); /* not necessary for INTRSAFE maps, but don't care. */
528: callback_unregister(&vm_map_to_kernel(map)->vmk_reclaim_callback,
529: &pp->pr_reclaimerentry);
530: splx(s);
531: }
532:
533: static void
534: pa_reclaim_register(struct pool_allocator *pa)
535: {
536: struct vm_map *map = *pa->pa_backingmapptr;
537: struct pool *pp;
538:
539: KASSERT(pa->pa_backingmap == NULL);
540: if (map == NULL) {
541: SLIST_INSERT_HEAD(&pa_deferinitq, pa, pa_q);
542: return;
543: }
544: pa->pa_backingmap = map;
545: TAILQ_FOREACH(pp, &pa->pa_list, pr_alloc_list) {
546: pool_reclaim_register(pp);
547: }
548: }
549:
1.3 pk 550: /*
1.94 simonb 551: * Initialize all the pools listed in the "pools" link set.
552: */
553: void
1.117 yamt 554: pool_subsystem_init(void)
1.94 simonb 555: {
1.117 yamt 556: struct pool_allocator *pa;
1.94 simonb 557: __link_set_decl(pools, struct link_pool_init);
558: struct link_pool_init * const *pi;
559:
560: __link_set_foreach(pi, pools)
561: pool_init((*pi)->pp, (*pi)->size, (*pi)->align,
562: (*pi)->align_offset, (*pi)->flags, (*pi)->wchan,
1.129 ad 563: (*pi)->palloc, (*pi)->ipl);
1.117 yamt 564:
565: while ((pa = SLIST_FIRST(&pa_deferinitq)) != NULL) {
566: KASSERT(pa->pa_backingmapptr != NULL);
567: KASSERT(*pa->pa_backingmapptr != NULL);
568: SLIST_REMOVE_HEAD(&pa_deferinitq, pa_q);
569: pa_reclaim_register(pa);
570: }
1.94 simonb 571: }
572:
573: /*
1.3 pk 574: * Initialize the given pool resource structure.
575: *
576: * We export this routine to allow other kernel parts to declare
577: * static pools that must be initialized before malloc() is available.
578: */
579: void
1.42 thorpej 580: pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags,
1.129 ad 581: const char *wchan, struct pool_allocator *palloc, int ipl)
1.3 pk 582: {
1.116 simonb 583: #ifdef DEBUG
584: struct pool *pp1;
585: #endif
1.92 enami 586: size_t trysize, phsize;
1.116 simonb 587: int off, slack, s;
1.3 pk 588:
1.99 yamt 589: KASSERT((1UL << (CHAR_BIT * sizeof(pool_item_freelist_t))) - 2 >=
590: PHPOOL_FREELIST_NELEM(PHPOOL_MAX - 1));
591:
1.116 simonb 592: #ifdef DEBUG
593: /*
594: * Check that the pool hasn't already been initialised and
595: * added to the list of all pools.
596: */
597: LIST_FOREACH(pp1, &pool_head, pr_poollist) {
598: if (pp == pp1)
599: panic("pool_init: pool %s already initialised",
600: wchan);
601: }
602: #endif
603:
1.25 thorpej 604: #ifdef POOL_DIAGNOSTIC
605: /*
606: * Always log if POOL_DIAGNOSTIC is defined.
607: */
608: if (pool_logsize != 0)
609: flags |= PR_LOGGING;
610: #endif
611:
1.66 thorpej 612: if (palloc == NULL)
613: palloc = &pool_allocator_kmem;
1.112 bjh21 614: #ifdef POOL_SUBPAGE
615: if (size > palloc->pa_pagesz) {
616: if (palloc == &pool_allocator_kmem)
617: palloc = &pool_allocator_kmem_fullpage;
618: else if (palloc == &pool_allocator_nointr)
619: palloc = &pool_allocator_nointr_fullpage;
620: }
1.66 thorpej 621: #endif /* POOL_SUBPAGE */
622: if ((palloc->pa_flags & PA_INITIALIZED) == 0) {
1.112 bjh21 623: if (palloc->pa_pagesz == 0)
1.66 thorpej 624: palloc->pa_pagesz = PAGE_SIZE;
625:
626: TAILQ_INIT(&palloc->pa_list);
627:
628: simple_lock_init(&palloc->pa_slock);
629: palloc->pa_pagemask = ~(palloc->pa_pagesz - 1);
630: palloc->pa_pageshift = ffs(palloc->pa_pagesz) - 1;
1.117 yamt 631:
632: if (palloc->pa_backingmapptr != NULL) {
633: pa_reclaim_register(palloc);
634: }
1.66 thorpej 635: palloc->pa_flags |= PA_INITIALIZED;
1.4 thorpej 636: }
1.3 pk 637:
638: if (align == 0)
639: align = ALIGN(1);
1.14 thorpej 640:
1.120 yamt 641: if ((flags & PR_NOTOUCH) == 0 && size < sizeof(struct pool_item))
1.14 thorpej 642: size = sizeof(struct pool_item);
1.3 pk 643:
1.78 thorpej 644: size = roundup(size, align);
1.66 thorpej 645: #ifdef DIAGNOSTIC
646: if (size > palloc->pa_pagesz)
1.121 yamt 647: panic("pool_init: pool item size (%zu) too large", size);
1.66 thorpej 648: #endif
1.35 pk 649:
1.3 pk 650: /*
651: * Initialize the pool structure.
652: */
1.88 chs 653: LIST_INIT(&pp->pr_emptypages);
654: LIST_INIT(&pp->pr_fullpages);
655: LIST_INIT(&pp->pr_partpages);
1.102 chs 656: LIST_INIT(&pp->pr_cachelist);
1.3 pk 657: pp->pr_curpage = NULL;
658: pp->pr_npages = 0;
659: pp->pr_minitems = 0;
660: pp->pr_minpages = 0;
661: pp->pr_maxpages = UINT_MAX;
1.20 thorpej 662: pp->pr_roflags = flags;
663: pp->pr_flags = 0;
1.35 pk 664: pp->pr_size = size;
1.3 pk 665: pp->pr_align = align;
666: pp->pr_wchan = wchan;
1.66 thorpej 667: pp->pr_alloc = palloc;
1.20 thorpej 668: pp->pr_nitems = 0;
669: pp->pr_nout = 0;
670: pp->pr_hardlimit = UINT_MAX;
671: pp->pr_hardlimit_warning = NULL;
1.31 thorpej 672: pp->pr_hardlimit_ratecap.tv_sec = 0;
673: pp->pr_hardlimit_ratecap.tv_usec = 0;
674: pp->pr_hardlimit_warning_last.tv_sec = 0;
675: pp->pr_hardlimit_warning_last.tv_usec = 0;
1.68 thorpej 676: pp->pr_drain_hook = NULL;
677: pp->pr_drain_hook_arg = NULL;
1.125 ad 678: pp->pr_freecheck = NULL;
1.3 pk 679:
680: /*
681: * Decide whether to put the page header off page to avoid
1.92 enami 682: * wasting too large a part of the page or too big item.
683: * Off-page page headers go on a hash table, so we can match
684: * a returned item with its header based on the page address.
685: * We use 1/16 of the page size and about 8 times of the item
686: * size as the threshold (XXX: tune)
687: *
688: * However, we'll put the header into the page if we can put
689: * it without wasting any items.
690: *
691: * Silently enforce `0 <= ioff < align'.
1.3 pk 692: */
1.92 enami 693: pp->pr_itemoffset = ioff %= align;
694: /* See the comment below about reserved bytes. */
695: trysize = palloc->pa_pagesz - ((align - ioff) % align);
696: phsize = ALIGN(sizeof(struct pool_item_header));
1.121 yamt 697: if ((pp->pr_roflags & (PR_NOTOUCH | PR_NOALIGN)) == 0 &&
1.97 yamt 698: (pp->pr_size < MIN(palloc->pa_pagesz / 16, phsize << 3) ||
699: trysize / pp->pr_size == (trysize - phsize) / pp->pr_size)) {
1.3 pk 700: /* Use the end of the page for the page header */
1.20 thorpej 701: pp->pr_roflags |= PR_PHINPAGE;
1.92 enami 702: pp->pr_phoffset = off = palloc->pa_pagesz - phsize;
1.2 pk 703: } else {
1.3 pk 704: /* The page header will be taken from our page header pool */
705: pp->pr_phoffset = 0;
1.66 thorpej 706: off = palloc->pa_pagesz;
1.88 chs 707: SPLAY_INIT(&pp->pr_phtree);
1.2 pk 708: }
1.1 pk 709:
1.3 pk 710: /*
711: * Alignment is to take place at `ioff' within the item. This means
712: * we must reserve up to `align - 1' bytes on the page to allow
713: * appropriate positioning of each item.
714: */
715: pp->pr_itemsperpage = (off - ((align - ioff) % align)) / pp->pr_size;
1.43 thorpej 716: KASSERT(pp->pr_itemsperpage != 0);
1.97 yamt 717: if ((pp->pr_roflags & PR_NOTOUCH)) {
718: int idx;
719:
720: for (idx = 0; pp->pr_itemsperpage > PHPOOL_FREELIST_NELEM(idx);
721: idx++) {
722: /* nothing */
723: }
724: if (idx >= PHPOOL_MAX) {
725: /*
726: * if you see this panic, consider to tweak
727: * PHPOOL_MAX and PHPOOL_FREELIST_NELEM.
728: */
729: panic("%s: too large itemsperpage(%d) for PR_NOTOUCH",
730: pp->pr_wchan, pp->pr_itemsperpage);
731: }
732: pp->pr_phpool = &phpool[idx];
733: } else if ((pp->pr_roflags & PR_PHINPAGE) == 0) {
734: pp->pr_phpool = &phpool[0];
735: }
736: #if defined(DIAGNOSTIC)
737: else {
738: pp->pr_phpool = NULL;
739: }
740: #endif
1.3 pk 741:
742: /*
743: * Use the slack between the chunks and the page header
744: * for "cache coloring".
745: */
746: slack = off - pp->pr_itemsperpage * pp->pr_size;
747: pp->pr_maxcolor = (slack / align) * align;
748: pp->pr_curcolor = 0;
749:
750: pp->pr_nget = 0;
751: pp->pr_nfail = 0;
752: pp->pr_nput = 0;
753: pp->pr_npagealloc = 0;
754: pp->pr_npagefree = 0;
1.1 pk 755: pp->pr_hiwat = 0;
1.8 thorpej 756: pp->pr_nidle = 0;
1.3 pk 757:
1.59 thorpej 758: #ifdef POOL_DIAGNOSTIC
1.25 thorpej 759: if (flags & PR_LOGGING) {
760: if (kmem_map == NULL ||
761: (pp->pr_log = malloc(pool_logsize * sizeof(struct pool_log),
762: M_TEMP, M_NOWAIT)) == NULL)
1.20 thorpej 763: pp->pr_roflags &= ~PR_LOGGING;
1.3 pk 764: pp->pr_curlogentry = 0;
765: pp->pr_logsize = pool_logsize;
766: }
1.59 thorpej 767: #endif
1.25 thorpej 768:
769: pp->pr_entered_file = NULL;
770: pp->pr_entered_line = 0;
1.3 pk 771:
1.21 thorpej 772: simple_lock_init(&pp->pr_slock);
1.1 pk 773:
1.3 pk 774: /*
1.43 thorpej 775: * Initialize private page header pool and cache magazine pool if we
776: * haven't done so yet.
1.23 thorpej 777: * XXX LOCKING.
1.3 pk 778: */
1.97 yamt 779: if (phpool[0].pr_size == 0) {
780: int idx;
781: for (idx = 0; idx < PHPOOL_MAX; idx++) {
782: static char phpool_names[PHPOOL_MAX][6+1+6+1];
783: int nelem;
784: size_t sz;
785:
786: nelem = PHPOOL_FREELIST_NELEM(idx);
787: snprintf(phpool_names[idx], sizeof(phpool_names[idx]),
788: "phpool-%d", nelem);
789: sz = sizeof(struct pool_item_header);
790: if (nelem) {
791: sz = PR_FREELIST_ALIGN(sz)
1.99 yamt 792: + nelem * sizeof(pool_item_freelist_t);
1.97 yamt 793: }
794: pool_init(&phpool[idx], sz, 0, 0, 0,
1.129 ad 795: phpool_names[idx], &pool_allocator_meta, IPL_VM);
1.97 yamt 796: }
1.62 bjh21 797: #ifdef POOL_SUBPAGE
798: pool_init(&psppool, POOL_SUBPAGE, POOL_SUBPAGE, 0,
1.129 ad 799: PR_RECURSIVE, "psppool", &pool_allocator_meta, IPL_VM);
1.62 bjh21 800: #endif
1.43 thorpej 801: pool_init(&pcgpool, sizeof(struct pool_cache_group), 0, 0,
1.129 ad 802: 0, "pcgpool", &pool_allocator_meta, IPL_VM);
1.1 pk 803: }
804:
1.23 thorpej 805: /* Insert into the list of all pools. */
806: simple_lock(&pool_head_slock);
1.102 chs 807: LIST_INSERT_HEAD(&pool_head, pp, pr_poollist);
1.23 thorpej 808: simple_unlock(&pool_head_slock);
1.66 thorpej 809:
810: /* Insert this into the list of pools using this allocator. */
1.93 dbj 811: s = splvm();
1.66 thorpej 812: simple_lock(&palloc->pa_slock);
813: TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list);
814: simple_unlock(&palloc->pa_slock);
1.93 dbj 815: splx(s);
1.117 yamt 816: pool_reclaim_register(pp);
1.1 pk 817: }
818:
819: /*
820: * De-commision a pool resource.
821: */
822: void
1.42 thorpej 823: pool_destroy(struct pool *pp)
1.1 pk 824: {
1.101 thorpej 825: struct pool_pagelist pq;
1.3 pk 826: struct pool_item_header *ph;
1.93 dbj 827: int s;
1.43 thorpej 828:
1.101 thorpej 829: /* Remove from global pool list */
830: simple_lock(&pool_head_slock);
1.102 chs 831: LIST_REMOVE(pp, pr_poollist);
1.101 thorpej 832: if (drainpp == pp)
833: drainpp = NULL;
834: simple_unlock(&pool_head_slock);
835:
836: /* Remove this pool from its allocator's list of pools. */
1.117 yamt 837: pool_reclaim_unregister(pp);
1.93 dbj 838: s = splvm();
1.66 thorpej 839: simple_lock(&pp->pr_alloc->pa_slock);
840: TAILQ_REMOVE(&pp->pr_alloc->pa_list, pp, pr_alloc_list);
841: simple_unlock(&pp->pr_alloc->pa_slock);
1.93 dbj 842: splx(s);
1.66 thorpej 843:
1.101 thorpej 844: s = splvm();
845: simple_lock(&pp->pr_slock);
846:
1.102 chs 847: KASSERT(LIST_EMPTY(&pp->pr_cachelist));
1.3 pk 848:
849: #ifdef DIAGNOSTIC
1.20 thorpej 850: if (pp->pr_nout != 0) {
1.25 thorpej 851: pr_printlog(pp, NULL, printf);
1.80 provos 852: panic("pool_destroy: pool busy: still out: %u",
1.20 thorpej 853: pp->pr_nout);
1.3 pk 854: }
855: #endif
1.1 pk 856:
1.101 thorpej 857: KASSERT(LIST_EMPTY(&pp->pr_fullpages));
858: KASSERT(LIST_EMPTY(&pp->pr_partpages));
859:
1.3 pk 860: /* Remove all pages */
1.101 thorpej 861: LIST_INIT(&pq);
1.88 chs 862: while ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL)
1.101 thorpej 863: pr_rmpage(pp, ph, &pq);
864:
865: simple_unlock(&pp->pr_slock);
866: splx(s);
1.3 pk 867:
1.101 thorpej 868: pr_pagelist_free(pp, &pq);
1.3 pk 869:
1.59 thorpej 870: #ifdef POOL_DIAGNOSTIC
1.20 thorpej 871: if ((pp->pr_roflags & PR_LOGGING) != 0)
1.3 pk 872: free(pp->pr_log, M_TEMP);
1.59 thorpej 873: #endif
1.1 pk 874: }
875:
1.68 thorpej 876: void
877: pool_set_drain_hook(struct pool *pp, void (*fn)(void *, int), void *arg)
878: {
879:
880: /* XXX no locking -- must be used just after pool_init() */
881: #ifdef DIAGNOSTIC
882: if (pp->pr_drain_hook != NULL)
883: panic("pool_set_drain_hook(%s): already set", pp->pr_wchan);
884: #endif
885: pp->pr_drain_hook = fn;
886: pp->pr_drain_hook_arg = arg;
887: }
888:
1.88 chs 889: static struct pool_item_header *
1.128 christos 890: pool_alloc_item_header(struct pool *pp, void *storage, int flags)
1.55 thorpej 891: {
892: struct pool_item_header *ph;
893: int s;
894:
895: LOCK_ASSERT(simple_lock_held(&pp->pr_slock) == 0);
896:
897: if ((pp->pr_roflags & PR_PHINPAGE) != 0)
1.128 christos 898: ph = (struct pool_item_header *) ((char *)storage + pp->pr_phoffset);
1.55 thorpej 899: else {
1.85 pk 900: s = splvm();
1.97 yamt 901: ph = pool_get(pp->pr_phpool, flags);
1.55 thorpej 902: splx(s);
903: }
904:
905: return (ph);
906: }
1.1 pk 907:
908: /*
1.3 pk 909: * Grab an item from the pool; must be called at appropriate spl level
1.1 pk 910: */
1.3 pk 911: void *
1.59 thorpej 912: #ifdef POOL_DIAGNOSTIC
1.42 thorpej 913: _pool_get(struct pool *pp, int flags, const char *file, long line)
1.56 sommerfe 914: #else
915: pool_get(struct pool *pp, int flags)
916: #endif
1.1 pk 917: {
918: struct pool_item *pi;
1.3 pk 919: struct pool_item_header *ph;
1.55 thorpej 920: void *v;
1.1 pk 921:
1.2 pk 922: #ifdef DIAGNOSTIC
1.95 atatat 923: if (__predict_false(pp->pr_itemsperpage == 0))
924: panic("pool_get: pool %p: pr_itemsperpage is zero, "
925: "pool not initialized?", pp);
1.84 thorpej 926: if (__predict_false(curlwp == NULL && doing_shutdown == 0 &&
1.37 sommerfe 927: (flags & PR_WAITOK) != 0))
1.77 matt 928: panic("pool_get: %s: must have NOWAIT", pp->pr_wchan);
1.58 thorpej 929:
1.102 chs 930: #endif /* DIAGNOSTIC */
1.58 thorpej 931: #ifdef LOCKDEBUG
932: if (flags & PR_WAITOK)
1.119 yamt 933: ASSERT_SLEEPABLE(NULL, "pool_get(PR_WAITOK)");
1.56 sommerfe 934: #endif
1.1 pk 935:
1.21 thorpej 936: simple_lock(&pp->pr_slock);
1.25 thorpej 937: pr_enter(pp, file, line);
1.20 thorpej 938:
939: startover:
940: /*
941: * Check to see if we've reached the hard limit. If we have,
942: * and we can wait, then wait until an item has been returned to
943: * the pool.
944: */
945: #ifdef DIAGNOSTIC
1.34 thorpej 946: if (__predict_false(pp->pr_nout > pp->pr_hardlimit)) {
1.25 thorpej 947: pr_leave(pp);
1.21 thorpej 948: simple_unlock(&pp->pr_slock);
1.20 thorpej 949: panic("pool_get: %s: crossed hard limit", pp->pr_wchan);
950: }
951: #endif
1.34 thorpej 952: if (__predict_false(pp->pr_nout == pp->pr_hardlimit)) {
1.68 thorpej 953: if (pp->pr_drain_hook != NULL) {
954: /*
955: * Since the drain hook is going to free things
956: * back to the pool, unlock, call the hook, re-lock,
957: * and check the hardlimit condition again.
958: */
959: pr_leave(pp);
960: simple_unlock(&pp->pr_slock);
961: (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, flags);
962: simple_lock(&pp->pr_slock);
963: pr_enter(pp, file, line);
964: if (pp->pr_nout < pp->pr_hardlimit)
965: goto startover;
966: }
967:
1.29 sommerfe 968: if ((flags & PR_WAITOK) && !(flags & PR_LIMITFAIL)) {
1.20 thorpej 969: /*
970: * XXX: A warning isn't logged in this case. Should
971: * it be?
972: */
973: pp->pr_flags |= PR_WANTED;
1.25 thorpej 974: pr_leave(pp);
1.40 sommerfe 975: ltsleep(pp, PSWP, pp->pr_wchan, 0, &pp->pr_slock);
1.25 thorpej 976: pr_enter(pp, file, line);
1.20 thorpej 977: goto startover;
978: }
1.31 thorpej 979:
980: /*
981: * Log a message that the hard limit has been hit.
982: */
983: if (pp->pr_hardlimit_warning != NULL &&
984: ratecheck(&pp->pr_hardlimit_warning_last,
985: &pp->pr_hardlimit_ratecap))
986: log(LOG_ERR, "%s\n", pp->pr_hardlimit_warning);
1.21 thorpej 987:
988: pp->pr_nfail++;
989:
1.25 thorpej 990: pr_leave(pp);
1.21 thorpej 991: simple_unlock(&pp->pr_slock);
1.20 thorpej 992: return (NULL);
993: }
994:
1.3 pk 995: /*
996: * The convention we use is that if `curpage' is not NULL, then
997: * it points at a non-empty bucket. In particular, `curpage'
998: * never points at a page header which has PR_PHINPAGE set and
999: * has no items in its bucket.
1000: */
1.20 thorpej 1001: if ((ph = pp->pr_curpage) == NULL) {
1.113 yamt 1002: int error;
1003:
1.20 thorpej 1004: #ifdef DIAGNOSTIC
1005: if (pp->pr_nitems != 0) {
1.21 thorpej 1006: simple_unlock(&pp->pr_slock);
1.20 thorpej 1007: printf("pool_get: %s: curpage NULL, nitems %u\n",
1008: pp->pr_wchan, pp->pr_nitems);
1.80 provos 1009: panic("pool_get: nitems inconsistent");
1.20 thorpej 1010: }
1011: #endif
1012:
1.21 thorpej 1013: /*
1014: * Call the back-end page allocator for more memory.
1015: * Release the pool lock, as the back-end page allocator
1016: * may block.
1017: */
1.25 thorpej 1018: pr_leave(pp);
1.113 yamt 1019: error = pool_grow(pp, flags);
1020: pr_enter(pp, file, line);
1021: if (error != 0) {
1.21 thorpej 1022: /*
1.55 thorpej 1023: * We were unable to allocate a page or item
1024: * header, but we released the lock during
1025: * allocation, so perhaps items were freed
1026: * back to the pool. Check for this case.
1.21 thorpej 1027: */
1028: if (pp->pr_curpage != NULL)
1029: goto startover;
1.15 pk 1030:
1.117 yamt 1031: pp->pr_nfail++;
1.25 thorpej 1032: pr_leave(pp);
1.117 yamt 1033: simple_unlock(&pp->pr_slock);
1034: return (NULL);
1.1 pk 1035: }
1.3 pk 1036:
1.20 thorpej 1037: /* Start the allocation process over. */
1038: goto startover;
1.3 pk 1039: }
1.97 yamt 1040: if (pp->pr_roflags & PR_NOTOUCH) {
1041: #ifdef DIAGNOSTIC
1042: if (__predict_false(ph->ph_nmissing == pp->pr_itemsperpage)) {
1043: pr_leave(pp);
1044: simple_unlock(&pp->pr_slock);
1045: panic("pool_get: %s: page empty", pp->pr_wchan);
1046: }
1047: #endif
1048: v = pr_item_notouch_get(pp, ph);
1049: #ifdef POOL_DIAGNOSTIC
1050: pr_log(pp, v, PRLOG_GET, file, line);
1051: #endif
1052: } else {
1.102 chs 1053: v = pi = LIST_FIRST(&ph->ph_itemlist);
1.97 yamt 1054: if (__predict_false(v == NULL)) {
1055: pr_leave(pp);
1056: simple_unlock(&pp->pr_slock);
1057: panic("pool_get: %s: page empty", pp->pr_wchan);
1058: }
1.20 thorpej 1059: #ifdef DIAGNOSTIC
1.97 yamt 1060: if (__predict_false(pp->pr_nitems == 0)) {
1061: pr_leave(pp);
1062: simple_unlock(&pp->pr_slock);
1063: printf("pool_get: %s: items on itemlist, nitems %u\n",
1064: pp->pr_wchan, pp->pr_nitems);
1065: panic("pool_get: nitems inconsistent");
1066: }
1.65 enami 1067: #endif
1.56 sommerfe 1068:
1.65 enami 1069: #ifdef POOL_DIAGNOSTIC
1.97 yamt 1070: pr_log(pp, v, PRLOG_GET, file, line);
1.65 enami 1071: #endif
1.3 pk 1072:
1.65 enami 1073: #ifdef DIAGNOSTIC
1.97 yamt 1074: if (__predict_false(pi->pi_magic != PI_MAGIC)) {
1075: pr_printlog(pp, pi, printf);
1076: panic("pool_get(%s): free list modified: "
1077: "magic=%x; page %p; item addr %p\n",
1078: pp->pr_wchan, pi->pi_magic, ph->ph_page, pi);
1079: }
1.3 pk 1080: #endif
1081:
1.97 yamt 1082: /*
1083: * Remove from item list.
1084: */
1.102 chs 1085: LIST_REMOVE(pi, pi_list);
1.97 yamt 1086: }
1.20 thorpej 1087: pp->pr_nitems--;
1088: pp->pr_nout++;
1.6 thorpej 1089: if (ph->ph_nmissing == 0) {
1090: #ifdef DIAGNOSTIC
1.34 thorpej 1091: if (__predict_false(pp->pr_nidle == 0))
1.6 thorpej 1092: panic("pool_get: nidle inconsistent");
1093: #endif
1094: pp->pr_nidle--;
1.88 chs 1095:
1096: /*
1097: * This page was previously empty. Move it to the list of
1098: * partially-full pages. This page is already curpage.
1099: */
1100: LIST_REMOVE(ph, ph_pagelist);
1101: LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist);
1.6 thorpej 1102: }
1.3 pk 1103: ph->ph_nmissing++;
1.97 yamt 1104: if (ph->ph_nmissing == pp->pr_itemsperpage) {
1.21 thorpej 1105: #ifdef DIAGNOSTIC
1.97 yamt 1106: if (__predict_false((pp->pr_roflags & PR_NOTOUCH) == 0 &&
1.102 chs 1107: !LIST_EMPTY(&ph->ph_itemlist))) {
1.25 thorpej 1108: pr_leave(pp);
1.21 thorpej 1109: simple_unlock(&pp->pr_slock);
1110: panic("pool_get: %s: nmissing inconsistent",
1111: pp->pr_wchan);
1112: }
1113: #endif
1.3 pk 1114: /*
1.88 chs 1115: * This page is now full. Move it to the full list
1116: * and select a new current page.
1.3 pk 1117: */
1.88 chs 1118: LIST_REMOVE(ph, ph_pagelist);
1119: LIST_INSERT_HEAD(&pp->pr_fullpages, ph, ph_pagelist);
1120: pool_update_curpage(pp);
1.1 pk 1121: }
1.3 pk 1122:
1123: pp->pr_nget++;
1.111 christos 1124: pr_leave(pp);
1.20 thorpej 1125:
1126: /*
1127: * If we have a low water mark and we are now below that low
1128: * water mark, add more items to the pool.
1129: */
1.53 thorpej 1130: if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) {
1.20 thorpej 1131: /*
1132: * XXX: Should we log a warning? Should we set up a timeout
1133: * to try again in a second or so? The latter could break
1134: * a caller's assumptions about interrupt protection, etc.
1135: */
1136: }
1137:
1.21 thorpej 1138: simple_unlock(&pp->pr_slock);
1.125 ad 1139: KASSERT((((vaddr_t)v + pp->pr_itemoffset) & (pp->pr_align - 1)) == 0);
1140: FREECHECK_OUT(&pp->pr_freecheck, v);
1.1 pk 1141: return (v);
1142: }
1143:
1144: /*
1.43 thorpej 1145: * Internal version of pool_put(). Pool is already locked/entered.
1.1 pk 1146: */
1.43 thorpej 1147: static void
1.101 thorpej 1148: pool_do_put(struct pool *pp, void *v, struct pool_pagelist *pq)
1.1 pk 1149: {
1150: struct pool_item *pi = v;
1.3 pk 1151: struct pool_item_header *ph;
1152:
1.61 chs 1153: LOCK_ASSERT(simple_lock_held(&pp->pr_slock));
1.125 ad 1154: FREECHECK_IN(&pp->pr_freecheck, v);
1.61 chs 1155:
1.30 thorpej 1156: #ifdef DIAGNOSTIC
1.34 thorpej 1157: if (__predict_false(pp->pr_nout == 0)) {
1.30 thorpej 1158: printf("pool %s: putting with none out\n",
1159: pp->pr_wchan);
1160: panic("pool_put");
1161: }
1162: #endif
1.3 pk 1163:
1.121 yamt 1164: if (__predict_false((ph = pr_find_pagehead(pp, v)) == NULL)) {
1.25 thorpej 1165: pr_printlog(pp, NULL, printf);
1.3 pk 1166: panic("pool_put: %s: page header missing", pp->pr_wchan);
1167: }
1.28 thorpej 1168:
1.3 pk 1169: /*
1170: * Return to item list.
1171: */
1.97 yamt 1172: if (pp->pr_roflags & PR_NOTOUCH) {
1173: pr_item_notouch_put(pp, ph, v);
1174: } else {
1.2 pk 1175: #ifdef DIAGNOSTIC
1.97 yamt 1176: pi->pi_magic = PI_MAGIC;
1.3 pk 1177: #endif
1.32 chs 1178: #ifdef DEBUG
1.97 yamt 1179: {
1180: int i, *ip = v;
1.32 chs 1181:
1.97 yamt 1182: for (i = 0; i < pp->pr_size / sizeof(int); i++) {
1183: *ip++ = PI_MAGIC;
1184: }
1.32 chs 1185: }
1186: #endif
1187:
1.102 chs 1188: LIST_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list);
1.97 yamt 1189: }
1.79 thorpej 1190: KDASSERT(ph->ph_nmissing != 0);
1.3 pk 1191: ph->ph_nmissing--;
1192: pp->pr_nput++;
1.20 thorpej 1193: pp->pr_nitems++;
1194: pp->pr_nout--;
1.3 pk 1195:
1196: /* Cancel "pool empty" condition if it exists */
1197: if (pp->pr_curpage == NULL)
1198: pp->pr_curpage = ph;
1199:
1200: if (pp->pr_flags & PR_WANTED) {
1201: pp->pr_flags &= ~PR_WANTED;
1.15 pk 1202: if (ph->ph_nmissing == 0)
1203: pp->pr_nidle++;
1.128 christos 1204: wakeup((void *)pp);
1.3 pk 1205: return;
1206: }
1207:
1208: /*
1.88 chs 1209: * If this page is now empty, do one of two things:
1.21 thorpej 1210: *
1.88 chs 1211: * (1) If we have more pages than the page high water mark,
1.96 thorpej 1212: * free the page back to the system. ONLY CONSIDER
1.90 thorpej 1213: * FREEING BACK A PAGE IF WE HAVE MORE THAN OUR MINIMUM PAGE
1214: * CLAIM.
1.21 thorpej 1215: *
1.88 chs 1216: * (2) Otherwise, move the page to the empty page list.
1217: *
1218: * Either way, select a new current page (so we use a partially-full
1219: * page if one is available).
1.3 pk 1220: */
1221: if (ph->ph_nmissing == 0) {
1.6 thorpej 1222: pp->pr_nidle++;
1.90 thorpej 1223: if (pp->pr_npages > pp->pr_minpages &&
1224: (pp->pr_npages > pp->pr_maxpages ||
1.117 yamt 1225: pa_starved_p(pp->pr_alloc))) {
1.101 thorpej 1226: pr_rmpage(pp, ph, pq);
1.3 pk 1227: } else {
1.88 chs 1228: LIST_REMOVE(ph, ph_pagelist);
1229: LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist);
1.3 pk 1230:
1.21 thorpej 1231: /*
1232: * Update the timestamp on the page. A page must
1233: * be idle for some period of time before it can
1234: * be reclaimed by the pagedaemon. This minimizes
1235: * ping-pong'ing for memory.
1236: */
1.118 kardel 1237: getmicrotime(&ph->ph_time);
1.1 pk 1238: }
1.88 chs 1239: pool_update_curpage(pp);
1.1 pk 1240: }
1.88 chs 1241:
1.21 thorpej 1242: /*
1.88 chs 1243: * If the page was previously completely full, move it to the
1244: * partially-full list and make it the current page. The next
1245: * allocation will get the item from this page, instead of
1246: * further fragmenting the pool.
1.21 thorpej 1247: */
1248: else if (ph->ph_nmissing == (pp->pr_itemsperpage - 1)) {
1.88 chs 1249: LIST_REMOVE(ph, ph_pagelist);
1250: LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist);
1.21 thorpej 1251: pp->pr_curpage = ph;
1252: }
1.43 thorpej 1253: }
1254:
1255: /*
1256: * Return resource to the pool; must be called at appropriate spl level
1257: */
1.59 thorpej 1258: #ifdef POOL_DIAGNOSTIC
1.43 thorpej 1259: void
1260: _pool_put(struct pool *pp, void *v, const char *file, long line)
1261: {
1.101 thorpej 1262: struct pool_pagelist pq;
1263:
1264: LIST_INIT(&pq);
1.43 thorpej 1265:
1266: simple_lock(&pp->pr_slock);
1267: pr_enter(pp, file, line);
1268:
1.56 sommerfe 1269: pr_log(pp, v, PRLOG_PUT, file, line);
1270:
1.101 thorpej 1271: pool_do_put(pp, v, &pq);
1.21 thorpej 1272:
1.25 thorpej 1273: pr_leave(pp);
1.21 thorpej 1274: simple_unlock(&pp->pr_slock);
1.101 thorpej 1275:
1.102 chs 1276: pr_pagelist_free(pp, &pq);
1.1 pk 1277: }
1.57 sommerfe 1278: #undef pool_put
1.59 thorpej 1279: #endif /* POOL_DIAGNOSTIC */
1.1 pk 1280:
1.56 sommerfe 1281: void
1282: pool_put(struct pool *pp, void *v)
1283: {
1.101 thorpej 1284: struct pool_pagelist pq;
1285:
1286: LIST_INIT(&pq);
1.56 sommerfe 1287:
1288: simple_lock(&pp->pr_slock);
1.101 thorpej 1289: pool_do_put(pp, v, &pq);
1290: simple_unlock(&pp->pr_slock);
1.56 sommerfe 1291:
1.102 chs 1292: pr_pagelist_free(pp, &pq);
1.56 sommerfe 1293: }
1.57 sommerfe 1294:
1.59 thorpej 1295: #ifdef POOL_DIAGNOSTIC
1.57 sommerfe 1296: #define pool_put(h, v) _pool_put((h), (v), __FILE__, __LINE__)
1.56 sommerfe 1297: #endif
1.74 thorpej 1298:
1299: /*
1.113 yamt 1300: * pool_grow: grow a pool by a page.
1301: *
1302: * => called with pool locked.
1303: * => unlock and relock the pool.
1304: * => return with pool locked.
1305: */
1306:
1307: static int
1308: pool_grow(struct pool *pp, int flags)
1309: {
1310: struct pool_item_header *ph = NULL;
1311: char *cp;
1312:
1313: simple_unlock(&pp->pr_slock);
1314: cp = pool_allocator_alloc(pp, flags);
1315: if (__predict_true(cp != NULL)) {
1316: ph = pool_alloc_item_header(pp, cp, flags);
1317: }
1318: if (__predict_false(cp == NULL || ph == NULL)) {
1319: if (cp != NULL) {
1320: pool_allocator_free(pp, cp);
1321: }
1322: simple_lock(&pp->pr_slock);
1323: return ENOMEM;
1324: }
1325:
1326: simple_lock(&pp->pr_slock);
1327: pool_prime_page(pp, cp, ph);
1328: pp->pr_npagealloc++;
1329: return 0;
1330: }
1331:
1332: /*
1.74 thorpej 1333: * Add N items to the pool.
1334: */
1335: int
1336: pool_prime(struct pool *pp, int n)
1337: {
1.75 simonb 1338: int newpages;
1.113 yamt 1339: int error = 0;
1.74 thorpej 1340:
1341: simple_lock(&pp->pr_slock);
1342:
1343: newpages = roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1344:
1345: while (newpages-- > 0) {
1.113 yamt 1346: error = pool_grow(pp, PR_NOWAIT);
1347: if (error) {
1.74 thorpej 1348: break;
1349: }
1350: pp->pr_minpages++;
1351: }
1352:
1353: if (pp->pr_minpages >= pp->pr_maxpages)
1354: pp->pr_maxpages = pp->pr_minpages + 1; /* XXX */
1355:
1356: simple_unlock(&pp->pr_slock);
1.113 yamt 1357: return error;
1.74 thorpej 1358: }
1.55 thorpej 1359:
1360: /*
1.3 pk 1361: * Add a page worth of items to the pool.
1.21 thorpej 1362: *
1363: * Note, we must be called with the pool descriptor LOCKED.
1.3 pk 1364: */
1.55 thorpej 1365: static void
1.128 christos 1366: pool_prime_page(struct pool *pp, void *storage, struct pool_item_header *ph)
1.3 pk 1367: {
1368: struct pool_item *pi;
1.128 christos 1369: void *cp = storage;
1.125 ad 1370: const unsigned int align = pp->pr_align;
1371: const unsigned int ioff = pp->pr_itemoffset;
1.55 thorpej 1372: int n;
1.36 pk 1373:
1.91 yamt 1374: LOCK_ASSERT(simple_lock_held(&pp->pr_slock));
1375:
1.66 thorpej 1376: #ifdef DIAGNOSTIC
1.121 yamt 1377: if ((pp->pr_roflags & PR_NOALIGN) == 0 &&
1378: ((uintptr_t)cp & (pp->pr_alloc->pa_pagesz - 1)) != 0)
1.36 pk 1379: panic("pool_prime_page: %s: unaligned page", pp->pr_wchan);
1.66 thorpej 1380: #endif
1.3 pk 1381:
1382: /*
1383: * Insert page header.
1384: */
1.88 chs 1385: LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist);
1.102 chs 1386: LIST_INIT(&ph->ph_itemlist);
1.3 pk 1387: ph->ph_page = storage;
1388: ph->ph_nmissing = 0;
1.118 kardel 1389: getmicrotime(&ph->ph_time);
1.88 chs 1390: if ((pp->pr_roflags & PR_PHINPAGE) == 0)
1391: SPLAY_INSERT(phtree, &pp->pr_phtree, ph);
1.3 pk 1392:
1.6 thorpej 1393: pp->pr_nidle++;
1394:
1.3 pk 1395: /*
1396: * Color this page.
1397: */
1.128 christos 1398: cp = (char *)cp + pp->pr_curcolor;
1.3 pk 1399: if ((pp->pr_curcolor += align) > pp->pr_maxcolor)
1400: pp->pr_curcolor = 0;
1401:
1402: /*
1403: * Adjust storage to apply aligment to `pr_itemoffset' in each item.
1404: */
1405: if (ioff != 0)
1.128 christos 1406: cp = (char *)cp + align - ioff;
1.3 pk 1407:
1.125 ad 1408: KASSERT((((vaddr_t)cp + ioff) & (align - 1)) == 0);
1409:
1.3 pk 1410: /*
1411: * Insert remaining chunks on the bucket list.
1412: */
1413: n = pp->pr_itemsperpage;
1.20 thorpej 1414: pp->pr_nitems += n;
1.3 pk 1415:
1.97 yamt 1416: if (pp->pr_roflags & PR_NOTOUCH) {
1.99 yamt 1417: pool_item_freelist_t *freelist = PR_FREELIST(ph);
1.97 yamt 1418: int i;
1419:
1.128 christos 1420: ph->ph_off = (char *)cp - (char *)storage;
1.97 yamt 1421: ph->ph_firstfree = 0;
1422: for (i = 0; i < n - 1; i++)
1423: freelist[i] = i + 1;
1424: freelist[n - 1] = PR_INDEX_EOL;
1425: } else {
1426: while (n--) {
1427: pi = (struct pool_item *)cp;
1.78 thorpej 1428:
1.97 yamt 1429: KASSERT(((((vaddr_t)pi) + ioff) & (align - 1)) == 0);
1.3 pk 1430:
1.97 yamt 1431: /* Insert on page list */
1.102 chs 1432: LIST_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list);
1.3 pk 1433: #ifdef DIAGNOSTIC
1.97 yamt 1434: pi->pi_magic = PI_MAGIC;
1.3 pk 1435: #endif
1.128 christos 1436: cp = (char *)cp + pp->pr_size;
1.125 ad 1437:
1438: KASSERT((((vaddr_t)cp + ioff) & (align - 1)) == 0);
1.97 yamt 1439: }
1.3 pk 1440: }
1441:
1442: /*
1443: * If the pool was depleted, point at the new page.
1444: */
1445: if (pp->pr_curpage == NULL)
1446: pp->pr_curpage = ph;
1447:
1448: if (++pp->pr_npages > pp->pr_hiwat)
1449: pp->pr_hiwat = pp->pr_npages;
1450: }
1451:
1.20 thorpej 1452: /*
1.52 thorpej 1453: * Used by pool_get() when nitems drops below the low water mark. This
1.88 chs 1454: * is used to catch up pr_nitems with the low water mark.
1.20 thorpej 1455: *
1.21 thorpej 1456: * Note 1, we never wait for memory here, we let the caller decide what to do.
1.20 thorpej 1457: *
1.73 thorpej 1458: * Note 2, we must be called with the pool already locked, and we return
1.20 thorpej 1459: * with it locked.
1460: */
1461: static int
1.42 thorpej 1462: pool_catchup(struct pool *pp)
1.20 thorpej 1463: {
1464: int error = 0;
1465:
1.54 thorpej 1466: while (POOL_NEEDS_CATCHUP(pp)) {
1.113 yamt 1467: error = pool_grow(pp, PR_NOWAIT);
1468: if (error) {
1.20 thorpej 1469: break;
1470: }
1471: }
1.113 yamt 1472: return error;
1.20 thorpej 1473: }
1474:
1.88 chs 1475: static void
1476: pool_update_curpage(struct pool *pp)
1477: {
1478:
1479: pp->pr_curpage = LIST_FIRST(&pp->pr_partpages);
1480: if (pp->pr_curpage == NULL) {
1481: pp->pr_curpage = LIST_FIRST(&pp->pr_emptypages);
1482: }
1483: }
1484:
1.3 pk 1485: void
1.42 thorpej 1486: pool_setlowat(struct pool *pp, int n)
1.3 pk 1487: {
1.15 pk 1488:
1.21 thorpej 1489: simple_lock(&pp->pr_slock);
1490:
1.3 pk 1491: pp->pr_minitems = n;
1.15 pk 1492: pp->pr_minpages = (n == 0)
1493: ? 0
1.18 thorpej 1494: : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1.20 thorpej 1495:
1496: /* Make sure we're caught up with the newly-set low water mark. */
1.75 simonb 1497: if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) {
1.20 thorpej 1498: /*
1499: * XXX: Should we log a warning? Should we set up a timeout
1500: * to try again in a second or so? The latter could break
1501: * a caller's assumptions about interrupt protection, etc.
1502: */
1503: }
1.21 thorpej 1504:
1505: simple_unlock(&pp->pr_slock);
1.3 pk 1506: }
1507:
1508: void
1.42 thorpej 1509: pool_sethiwat(struct pool *pp, int n)
1.3 pk 1510: {
1.15 pk 1511:
1.21 thorpej 1512: simple_lock(&pp->pr_slock);
1513:
1.15 pk 1514: pp->pr_maxpages = (n == 0)
1515: ? 0
1.18 thorpej 1516: : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1.21 thorpej 1517:
1518: simple_unlock(&pp->pr_slock);
1.3 pk 1519: }
1520:
1.20 thorpej 1521: void
1.42 thorpej 1522: pool_sethardlimit(struct pool *pp, int n, const char *warnmess, int ratecap)
1.20 thorpej 1523: {
1524:
1.21 thorpej 1525: simple_lock(&pp->pr_slock);
1.20 thorpej 1526:
1527: pp->pr_hardlimit = n;
1528: pp->pr_hardlimit_warning = warnmess;
1.31 thorpej 1529: pp->pr_hardlimit_ratecap.tv_sec = ratecap;
1530: pp->pr_hardlimit_warning_last.tv_sec = 0;
1531: pp->pr_hardlimit_warning_last.tv_usec = 0;
1.20 thorpej 1532:
1533: /*
1.21 thorpej 1534: * In-line version of pool_sethiwat(), because we don't want to
1535: * release the lock.
1.20 thorpej 1536: */
1537: pp->pr_maxpages = (n == 0)
1538: ? 0
1539: : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1.21 thorpej 1540:
1541: simple_unlock(&pp->pr_slock);
1.20 thorpej 1542: }
1.3 pk 1543:
1544: /*
1545: * Release all complete pages that have not been used recently.
1546: */
1.66 thorpej 1547: int
1.59 thorpej 1548: #ifdef POOL_DIAGNOSTIC
1.42 thorpej 1549: _pool_reclaim(struct pool *pp, const char *file, long line)
1.56 sommerfe 1550: #else
1551: pool_reclaim(struct pool *pp)
1552: #endif
1.3 pk 1553: {
1554: struct pool_item_header *ph, *phnext;
1.43 thorpej 1555: struct pool_cache *pc;
1.61 chs 1556: struct pool_pagelist pq;
1.102 chs 1557: struct pool_cache_grouplist pcgl;
1558: struct timeval curtime, diff;
1.3 pk 1559:
1.68 thorpej 1560: if (pp->pr_drain_hook != NULL) {
1561: /*
1562: * The drain hook must be called with the pool unlocked.
1563: */
1564: (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, PR_NOWAIT);
1565: }
1566:
1.21 thorpej 1567: if (simple_lock_try(&pp->pr_slock) == 0)
1.66 thorpej 1568: return (0);
1.25 thorpej 1569: pr_enter(pp, file, line);
1.68 thorpej 1570:
1.88 chs 1571: LIST_INIT(&pq);
1.102 chs 1572: LIST_INIT(&pcgl);
1.3 pk 1573:
1.43 thorpej 1574: /*
1575: * Reclaim items from the pool's caches.
1576: */
1.102 chs 1577: LIST_FOREACH(pc, &pp->pr_cachelist, pc_poollist)
1578: pool_cache_reclaim(pc, &pq, &pcgl);
1.43 thorpej 1579:
1.118 kardel 1580: getmicrotime(&curtime);
1.21 thorpej 1581:
1.88 chs 1582: for (ph = LIST_FIRST(&pp->pr_emptypages); ph != NULL; ph = phnext) {
1583: phnext = LIST_NEXT(ph, ph_pagelist);
1.3 pk 1584:
1585: /* Check our minimum page claim */
1586: if (pp->pr_npages <= pp->pr_minpages)
1587: break;
1588:
1.88 chs 1589: KASSERT(ph->ph_nmissing == 0);
1590: timersub(&curtime, &ph->ph_time, &diff);
1.117 yamt 1591: if (diff.tv_sec < pool_inactive_time
1592: && !pa_starved_p(pp->pr_alloc))
1.88 chs 1593: continue;
1.21 thorpej 1594:
1.88 chs 1595: /*
1596: * If freeing this page would put us below
1597: * the low water mark, stop now.
1598: */
1599: if ((pp->pr_nitems - pp->pr_itemsperpage) <
1600: pp->pr_minitems)
1601: break;
1.21 thorpej 1602:
1.88 chs 1603: pr_rmpage(pp, ph, &pq);
1.3 pk 1604: }
1605:
1.25 thorpej 1606: pr_leave(pp);
1.21 thorpej 1607: simple_unlock(&pp->pr_slock);
1.102 chs 1608: if (LIST_EMPTY(&pq) && LIST_EMPTY(&pcgl))
1609: return 0;
1.66 thorpej 1610:
1.101 thorpej 1611: pr_pagelist_free(pp, &pq);
1.102 chs 1612: pcg_grouplist_free(&pcgl);
1.66 thorpej 1613: return (1);
1.3 pk 1614: }
1615:
1616: /*
1617: * Drain pools, one at a time.
1.21 thorpej 1618: *
1619: * Note, we must never be called from an interrupt context.
1.131 ad 1620: *
1621: * XXX Pool can disappear while draining.
1.3 pk 1622: */
1623: void
1.124 yamt 1624: pool_drain(void *arg)
1.3 pk 1625: {
1626: struct pool *pp;
1.23 thorpej 1627: int s;
1.3 pk 1628:
1.61 chs 1629: pp = NULL;
1.49 thorpej 1630: s = splvm();
1.23 thorpej 1631: simple_lock(&pool_head_slock);
1.61 chs 1632: if (drainpp == NULL) {
1.102 chs 1633: drainpp = LIST_FIRST(&pool_head);
1.61 chs 1634: }
1635: if (drainpp) {
1636: pp = drainpp;
1.102 chs 1637: drainpp = LIST_NEXT(pp, pr_poollist);
1.61 chs 1638: }
1639: simple_unlock(&pool_head_slock);
1.115 christos 1640: if (pp)
1641: pool_reclaim(pp);
1.61 chs 1642: splx(s);
1.3 pk 1643: }
1644:
1645: /*
1646: * Diagnostic helpers.
1647: */
1648: void
1.42 thorpej 1649: pool_print(struct pool *pp, const char *modif)
1.21 thorpej 1650: {
1651: int s;
1652:
1.49 thorpej 1653: s = splvm();
1.25 thorpej 1654: if (simple_lock_try(&pp->pr_slock) == 0) {
1655: printf("pool %s is locked; try again later\n",
1656: pp->pr_wchan);
1657: splx(s);
1658: return;
1659: }
1660: pool_print1(pp, modif, printf);
1.21 thorpej 1661: simple_unlock(&pp->pr_slock);
1662: splx(s);
1663: }
1664:
1.25 thorpej 1665: void
1.108 yamt 1666: pool_printall(const char *modif, void (*pr)(const char *, ...))
1667: {
1668: struct pool *pp;
1669:
1670: if (simple_lock_try(&pool_head_slock) == 0) {
1671: (*pr)("WARNING: pool_head_slock is locked\n");
1672: } else {
1673: simple_unlock(&pool_head_slock);
1674: }
1675:
1676: LIST_FOREACH(pp, &pool_head, pr_poollist) {
1677: pool_printit(pp, modif, pr);
1678: }
1679: }
1680:
1681: void
1.42 thorpej 1682: pool_printit(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
1.25 thorpej 1683: {
1684:
1685: if (pp == NULL) {
1686: (*pr)("Must specify a pool to print.\n");
1687: return;
1688: }
1689:
1690: /*
1691: * Called from DDB; interrupts should be blocked, and all
1692: * other processors should be paused. We can skip locking
1693: * the pool in this case.
1694: *
1695: * We do a simple_lock_try() just to print the lock
1696: * status, however.
1697: */
1698:
1699: if (simple_lock_try(&pp->pr_slock) == 0)
1700: (*pr)("WARNING: pool %s is locked\n", pp->pr_wchan);
1701: else
1.107 yamt 1702: simple_unlock(&pp->pr_slock);
1.25 thorpej 1703:
1704: pool_print1(pp, modif, pr);
1705: }
1706:
1.21 thorpej 1707: static void
1.124 yamt 1708: pool_print_pagelist(struct pool *pp, struct pool_pagelist *pl,
1.97 yamt 1709: void (*pr)(const char *, ...))
1.88 chs 1710: {
1711: struct pool_item_header *ph;
1712: #ifdef DIAGNOSTIC
1713: struct pool_item *pi;
1714: #endif
1715:
1716: LIST_FOREACH(ph, pl, ph_pagelist) {
1717: (*pr)("\t\tpage %p, nmissing %d, time %lu,%lu\n",
1718: ph->ph_page, ph->ph_nmissing,
1719: (u_long)ph->ph_time.tv_sec,
1720: (u_long)ph->ph_time.tv_usec);
1721: #ifdef DIAGNOSTIC
1.97 yamt 1722: if (!(pp->pr_roflags & PR_NOTOUCH)) {
1.102 chs 1723: LIST_FOREACH(pi, &ph->ph_itemlist, pi_list) {
1.97 yamt 1724: if (pi->pi_magic != PI_MAGIC) {
1725: (*pr)("\t\t\titem %p, magic 0x%x\n",
1726: pi, pi->pi_magic);
1727: }
1.88 chs 1728: }
1729: }
1730: #endif
1731: }
1732: }
1733:
1734: static void
1.42 thorpej 1735: pool_print1(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
1.3 pk 1736: {
1.25 thorpej 1737: struct pool_item_header *ph;
1.44 thorpej 1738: struct pool_cache *pc;
1739: struct pool_cache_group *pcg;
1740: int i, print_log = 0, print_pagelist = 0, print_cache = 0;
1.25 thorpej 1741: char c;
1742:
1743: while ((c = *modif++) != '\0') {
1744: if (c == 'l')
1745: print_log = 1;
1746: if (c == 'p')
1747: print_pagelist = 1;
1.44 thorpej 1748: if (c == 'c')
1749: print_cache = 1;
1.25 thorpej 1750: }
1751:
1752: (*pr)("POOL %s: size %u, align %u, ioff %u, roflags 0x%08x\n",
1753: pp->pr_wchan, pp->pr_size, pp->pr_align, pp->pr_itemoffset,
1754: pp->pr_roflags);
1.66 thorpej 1755: (*pr)("\talloc %p\n", pp->pr_alloc);
1.25 thorpej 1756: (*pr)("\tminitems %u, minpages %u, maxpages %u, npages %u\n",
1757: pp->pr_minitems, pp->pr_minpages, pp->pr_maxpages, pp->pr_npages);
1758: (*pr)("\titemsperpage %u, nitems %u, nout %u, hardlimit %u\n",
1759: pp->pr_itemsperpage, pp->pr_nitems, pp->pr_nout, pp->pr_hardlimit);
1760:
1761: (*pr)("\n\tnget %lu, nfail %lu, nput %lu\n",
1762: pp->pr_nget, pp->pr_nfail, pp->pr_nput);
1763: (*pr)("\tnpagealloc %lu, npagefree %lu, hiwat %u, nidle %lu\n",
1764: pp->pr_npagealloc, pp->pr_npagefree, pp->pr_hiwat, pp->pr_nidle);
1765:
1766: if (print_pagelist == 0)
1767: goto skip_pagelist;
1768:
1.88 chs 1769: if ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL)
1770: (*pr)("\n\tempty page list:\n");
1.97 yamt 1771: pool_print_pagelist(pp, &pp->pr_emptypages, pr);
1.88 chs 1772: if ((ph = LIST_FIRST(&pp->pr_fullpages)) != NULL)
1773: (*pr)("\n\tfull page list:\n");
1.97 yamt 1774: pool_print_pagelist(pp, &pp->pr_fullpages, pr);
1.88 chs 1775: if ((ph = LIST_FIRST(&pp->pr_partpages)) != NULL)
1776: (*pr)("\n\tpartial-page list:\n");
1.97 yamt 1777: pool_print_pagelist(pp, &pp->pr_partpages, pr);
1.88 chs 1778:
1.25 thorpej 1779: if (pp->pr_curpage == NULL)
1780: (*pr)("\tno current page\n");
1781: else
1782: (*pr)("\tcurpage %p\n", pp->pr_curpage->ph_page);
1783:
1784: skip_pagelist:
1785: if (print_log == 0)
1786: goto skip_log;
1787:
1788: (*pr)("\n");
1789: if ((pp->pr_roflags & PR_LOGGING) == 0)
1790: (*pr)("\tno log\n");
1.122 christos 1791: else {
1.25 thorpej 1792: pr_printlog(pp, NULL, pr);
1.122 christos 1793: }
1.3 pk 1794:
1.25 thorpej 1795: skip_log:
1.44 thorpej 1796: if (print_cache == 0)
1797: goto skip_cache;
1798:
1.102 chs 1799: #define PR_GROUPLIST(pcg) \
1800: (*pr)("\t\tgroup %p: avail %d\n", pcg, pcg->pcg_avail); \
1801: for (i = 0; i < PCG_NOBJECTS; i++) { \
1802: if (pcg->pcg_objects[i].pcgo_pa != \
1803: POOL_PADDR_INVALID) { \
1804: (*pr)("\t\t\t%p, 0x%llx\n", \
1805: pcg->pcg_objects[i].pcgo_va, \
1806: (unsigned long long) \
1807: pcg->pcg_objects[i].pcgo_pa); \
1808: } else { \
1809: (*pr)("\t\t\t%p\n", \
1810: pcg->pcg_objects[i].pcgo_va); \
1811: } \
1812: }
1813:
1814: LIST_FOREACH(pc, &pp->pr_cachelist, pc_poollist) {
1.103 chs 1815: (*pr)("\tcache %p\n", pc);
1.48 thorpej 1816: (*pr)("\t hits %lu misses %lu ngroups %lu nitems %lu\n",
1817: pc->pc_hits, pc->pc_misses, pc->pc_ngroups, pc->pc_nitems);
1.102 chs 1818: (*pr)("\t full groups:\n");
1.103 chs 1819: LIST_FOREACH(pcg, &pc->pc_fullgroups, pcg_list) {
1.102 chs 1820: PR_GROUPLIST(pcg);
1.103 chs 1821: }
1.102 chs 1822: (*pr)("\t partial groups:\n");
1.103 chs 1823: LIST_FOREACH(pcg, &pc->pc_partgroups, pcg_list) {
1.102 chs 1824: PR_GROUPLIST(pcg);
1.103 chs 1825: }
1.102 chs 1826: (*pr)("\t empty groups:\n");
1.103 chs 1827: LIST_FOREACH(pcg, &pc->pc_emptygroups, pcg_list) {
1.102 chs 1828: PR_GROUPLIST(pcg);
1.103 chs 1829: }
1.44 thorpej 1830: }
1.102 chs 1831: #undef PR_GROUPLIST
1.44 thorpej 1832:
1833: skip_cache:
1.88 chs 1834: pr_enter_check(pp, pr);
1835: }
1836:
1837: static int
1838: pool_chk_page(struct pool *pp, const char *label, struct pool_item_header *ph)
1839: {
1840: struct pool_item *pi;
1.128 christos 1841: void *page;
1.88 chs 1842: int n;
1843:
1.121 yamt 1844: if ((pp->pr_roflags & PR_NOALIGN) == 0) {
1.128 christos 1845: page = (void *)((uintptr_t)ph & pp->pr_alloc->pa_pagemask);
1.121 yamt 1846: if (page != ph->ph_page &&
1847: (pp->pr_roflags & PR_PHINPAGE) != 0) {
1848: if (label != NULL)
1849: printf("%s: ", label);
1850: printf("pool(%p:%s): page inconsistency: page %p;"
1851: " at page head addr %p (p %p)\n", pp,
1852: pp->pr_wchan, ph->ph_page,
1853: ph, page);
1854: return 1;
1855: }
1.88 chs 1856: }
1.3 pk 1857:
1.97 yamt 1858: if ((pp->pr_roflags & PR_NOTOUCH) != 0)
1859: return 0;
1860:
1.102 chs 1861: for (pi = LIST_FIRST(&ph->ph_itemlist), n = 0;
1.88 chs 1862: pi != NULL;
1.102 chs 1863: pi = LIST_NEXT(pi,pi_list), n++) {
1.88 chs 1864:
1865: #ifdef DIAGNOSTIC
1866: if (pi->pi_magic != PI_MAGIC) {
1867: if (label != NULL)
1868: printf("%s: ", label);
1869: printf("pool(%s): free list modified: magic=%x;"
1.121 yamt 1870: " page %p; item ordinal %d; addr %p\n",
1.88 chs 1871: pp->pr_wchan, pi->pi_magic, ph->ph_page,
1.121 yamt 1872: n, pi);
1.88 chs 1873: panic("pool");
1874: }
1875: #endif
1.121 yamt 1876: if ((pp->pr_roflags & PR_NOALIGN) != 0) {
1877: continue;
1878: }
1.128 christos 1879: page = (void *)((uintptr_t)pi & pp->pr_alloc->pa_pagemask);
1.88 chs 1880: if (page == ph->ph_page)
1881: continue;
1882:
1883: if (label != NULL)
1884: printf("%s: ", label);
1885: printf("pool(%p:%s): page inconsistency: page %p;"
1886: " item ordinal %d; addr %p (p %p)\n", pp,
1887: pp->pr_wchan, ph->ph_page,
1888: n, pi, page);
1889: return 1;
1890: }
1891: return 0;
1.3 pk 1892: }
1893:
1.88 chs 1894:
1.3 pk 1895: int
1.42 thorpej 1896: pool_chk(struct pool *pp, const char *label)
1.3 pk 1897: {
1898: struct pool_item_header *ph;
1899: int r = 0;
1900:
1.21 thorpej 1901: simple_lock(&pp->pr_slock);
1.88 chs 1902: LIST_FOREACH(ph, &pp->pr_emptypages, ph_pagelist) {
1903: r = pool_chk_page(pp, label, ph);
1904: if (r) {
1905: goto out;
1906: }
1907: }
1908: LIST_FOREACH(ph, &pp->pr_fullpages, ph_pagelist) {
1909: r = pool_chk_page(pp, label, ph);
1910: if (r) {
1.3 pk 1911: goto out;
1912: }
1.88 chs 1913: }
1914: LIST_FOREACH(ph, &pp->pr_partpages, ph_pagelist) {
1915: r = pool_chk_page(pp, label, ph);
1916: if (r) {
1.3 pk 1917: goto out;
1918: }
1919: }
1.88 chs 1920:
1.3 pk 1921: out:
1.21 thorpej 1922: simple_unlock(&pp->pr_slock);
1.3 pk 1923: return (r);
1.43 thorpej 1924: }
1925:
1926: /*
1927: * pool_cache_init:
1928: *
1929: * Initialize a pool cache.
1930: *
1931: * NOTE: If the pool must be protected from interrupts, we expect
1932: * to be called at the appropriate interrupt priority level.
1933: */
1934: void
1935: pool_cache_init(struct pool_cache *pc, struct pool *pp,
1936: int (*ctor)(void *, void *, int),
1937: void (*dtor)(void *, void *),
1938: void *arg)
1939: {
1940:
1.102 chs 1941: LIST_INIT(&pc->pc_emptygroups);
1942: LIST_INIT(&pc->pc_fullgroups);
1943: LIST_INIT(&pc->pc_partgroups);
1.43 thorpej 1944: simple_lock_init(&pc->pc_slock);
1945:
1946: pc->pc_pool = pp;
1947:
1948: pc->pc_ctor = ctor;
1949: pc->pc_dtor = dtor;
1950: pc->pc_arg = arg;
1951:
1.48 thorpej 1952: pc->pc_hits = 0;
1953: pc->pc_misses = 0;
1954:
1955: pc->pc_ngroups = 0;
1956:
1957: pc->pc_nitems = 0;
1958:
1.43 thorpej 1959: simple_lock(&pp->pr_slock);
1.102 chs 1960: LIST_INSERT_HEAD(&pp->pr_cachelist, pc, pc_poollist);
1.43 thorpej 1961: simple_unlock(&pp->pr_slock);
1962: }
1963:
1964: /*
1965: * pool_cache_destroy:
1966: *
1967: * Destroy a pool cache.
1968: */
1969: void
1970: pool_cache_destroy(struct pool_cache *pc)
1971: {
1972: struct pool *pp = pc->pc_pool;
1973:
1974: /* First, invalidate the entire cache. */
1975: pool_cache_invalidate(pc);
1976:
1977: /* ...and remove it from the pool's cache list. */
1978: simple_lock(&pp->pr_slock);
1.102 chs 1979: LIST_REMOVE(pc, pc_poollist);
1.43 thorpej 1980: simple_unlock(&pp->pr_slock);
1981: }
1982:
1.110 perry 1983: static inline void *
1.87 thorpej 1984: pcg_get(struct pool_cache_group *pcg, paddr_t *pap)
1.43 thorpej 1985: {
1986: void *object;
1987: u_int idx;
1988:
1989: KASSERT(pcg->pcg_avail <= PCG_NOBJECTS);
1.45 thorpej 1990: KASSERT(pcg->pcg_avail != 0);
1.43 thorpej 1991: idx = --pcg->pcg_avail;
1992:
1.87 thorpej 1993: KASSERT(pcg->pcg_objects[idx].pcgo_va != NULL);
1994: object = pcg->pcg_objects[idx].pcgo_va;
1995: if (pap != NULL)
1996: *pap = pcg->pcg_objects[idx].pcgo_pa;
1997: pcg->pcg_objects[idx].pcgo_va = NULL;
1.43 thorpej 1998:
1999: return (object);
2000: }
2001:
1.110 perry 2002: static inline void
1.87 thorpej 2003: pcg_put(struct pool_cache_group *pcg, void *object, paddr_t pa)
1.43 thorpej 2004: {
2005: u_int idx;
2006:
2007: KASSERT(pcg->pcg_avail < PCG_NOBJECTS);
2008: idx = pcg->pcg_avail++;
2009:
1.87 thorpej 2010: KASSERT(pcg->pcg_objects[idx].pcgo_va == NULL);
2011: pcg->pcg_objects[idx].pcgo_va = object;
2012: pcg->pcg_objects[idx].pcgo_pa = pa;
1.43 thorpej 2013: }
2014:
1.102 chs 2015: static void
2016: pcg_grouplist_free(struct pool_cache_grouplist *pcgl)
2017: {
2018: struct pool_cache_group *pcg;
2019: int s;
2020:
2021: s = splvm();
2022: while ((pcg = LIST_FIRST(pcgl)) != NULL) {
2023: LIST_REMOVE(pcg, pcg_list);
2024: pool_put(&pcgpool, pcg);
2025: }
2026: splx(s);
2027: }
2028:
1.43 thorpej 2029: /*
1.87 thorpej 2030: * pool_cache_get{,_paddr}:
1.43 thorpej 2031: *
1.87 thorpej 2032: * Get an object from a pool cache (optionally returning
2033: * the physical address of the object).
1.43 thorpej 2034: */
2035: void *
1.87 thorpej 2036: pool_cache_get_paddr(struct pool_cache *pc, int flags, paddr_t *pap)
1.43 thorpej 2037: {
2038: struct pool_cache_group *pcg;
2039: void *object;
1.58 thorpej 2040:
2041: #ifdef LOCKDEBUG
2042: if (flags & PR_WAITOK)
1.119 yamt 2043: ASSERT_SLEEPABLE(NULL, "pool_cache_get(PR_WAITOK)");
1.58 thorpej 2044: #endif
1.43 thorpej 2045:
2046: simple_lock(&pc->pc_slock);
2047:
1.102 chs 2048: pcg = LIST_FIRST(&pc->pc_partgroups);
2049: if (pcg == NULL) {
2050: pcg = LIST_FIRST(&pc->pc_fullgroups);
2051: if (pcg != NULL) {
2052: LIST_REMOVE(pcg, pcg_list);
2053: LIST_INSERT_HEAD(&pc->pc_partgroups, pcg, pcg_list);
1.43 thorpej 2054: }
1.102 chs 2055: }
2056: if (pcg == NULL) {
1.43 thorpej 2057:
2058: /*
2059: * No groups with any available objects. Allocate
2060: * a new object, construct it, and return it to
2061: * the caller. We will allocate a group, if necessary,
2062: * when the object is freed back to the cache.
2063: */
1.48 thorpej 2064: pc->pc_misses++;
1.43 thorpej 2065: simple_unlock(&pc->pc_slock);
2066: object = pool_get(pc->pc_pool, flags);
2067: if (object != NULL && pc->pc_ctor != NULL) {
2068: if ((*pc->pc_ctor)(pc->pc_arg, object, flags) != 0) {
2069: pool_put(pc->pc_pool, object);
2070: return (NULL);
2071: }
2072: }
1.125 ad 2073: KASSERT((((vaddr_t)object + pc->pc_pool->pr_itemoffset) &
2074: (pc->pc_pool->pr_align - 1)) == 0);
1.87 thorpej 2075: if (object != NULL && pap != NULL) {
2076: #ifdef POOL_VTOPHYS
2077: *pap = POOL_VTOPHYS(object);
2078: #else
2079: *pap = POOL_PADDR_INVALID;
2080: #endif
2081: }
1.125 ad 2082:
2083: FREECHECK_OUT(&pc->pc_freecheck, object);
1.43 thorpej 2084: return (object);
2085: }
2086:
1.48 thorpej 2087: pc->pc_hits++;
2088: pc->pc_nitems--;
1.87 thorpej 2089: object = pcg_get(pcg, pap);
1.43 thorpej 2090:
1.102 chs 2091: if (pcg->pcg_avail == 0) {
2092: LIST_REMOVE(pcg, pcg_list);
2093: LIST_INSERT_HEAD(&pc->pc_emptygroups, pcg, pcg_list);
2094: }
1.43 thorpej 2095: simple_unlock(&pc->pc_slock);
2096:
1.125 ad 2097: KASSERT((((vaddr_t)object + pc->pc_pool->pr_itemoffset) &
2098: (pc->pc_pool->pr_align - 1)) == 0);
2099: FREECHECK_OUT(&pc->pc_freecheck, object);
1.43 thorpej 2100: return (object);
2101: }
2102:
2103: /*
1.87 thorpej 2104: * pool_cache_put{,_paddr}:
1.43 thorpej 2105: *
1.87 thorpej 2106: * Put an object back to the pool cache (optionally caching the
2107: * physical address of the object).
1.43 thorpej 2108: */
2109: void
1.87 thorpej 2110: pool_cache_put_paddr(struct pool_cache *pc, void *object, paddr_t pa)
1.43 thorpej 2111: {
2112: struct pool_cache_group *pcg;
1.60 thorpej 2113: int s;
1.43 thorpej 2114:
1.125 ad 2115: FREECHECK_IN(&pc->pc_freecheck, object);
2116:
1.109 christos 2117: if (__predict_false((pc->pc_pool->pr_flags & PR_WANTED) != 0)) {
2118: goto destruct;
2119: }
2120:
1.43 thorpej 2121: simple_lock(&pc->pc_slock);
2122:
1.102 chs 2123: pcg = LIST_FIRST(&pc->pc_partgroups);
2124: if (pcg == NULL) {
2125: pcg = LIST_FIRST(&pc->pc_emptygroups);
2126: if (pcg != NULL) {
2127: LIST_REMOVE(pcg, pcg_list);
2128: LIST_INSERT_HEAD(&pc->pc_partgroups, pcg, pcg_list);
1.43 thorpej 2129: }
1.102 chs 2130: }
2131: if (pcg == NULL) {
1.43 thorpej 2132:
2133: /*
2134: * No empty groups to free the object to. Attempt to
1.47 thorpej 2135: * allocate one.
1.43 thorpej 2136: */
1.47 thorpej 2137: simple_unlock(&pc->pc_slock);
1.60 thorpej 2138: s = splvm();
1.43 thorpej 2139: pcg = pool_get(&pcgpool, PR_NOWAIT);
1.60 thorpej 2140: splx(s);
1.102 chs 2141: if (pcg == NULL) {
1.109 christos 2142: destruct:
1.102 chs 2143:
2144: /*
2145: * Unable to allocate a cache group; destruct the object
2146: * and free it back to the pool.
2147: */
2148: pool_cache_destruct_object(pc, object);
2149: return;
1.43 thorpej 2150: }
1.102 chs 2151: memset(pcg, 0, sizeof(*pcg));
2152: simple_lock(&pc->pc_slock);
2153: pc->pc_ngroups++;
2154: LIST_INSERT_HEAD(&pc->pc_partgroups, pcg, pcg_list);
1.43 thorpej 2155: }
2156:
1.48 thorpej 2157: pc->pc_nitems++;
1.87 thorpej 2158: pcg_put(pcg, object, pa);
1.43 thorpej 2159:
1.102 chs 2160: if (pcg->pcg_avail == PCG_NOBJECTS) {
2161: LIST_REMOVE(pcg, pcg_list);
2162: LIST_INSERT_HEAD(&pc->pc_fullgroups, pcg, pcg_list);
2163: }
1.43 thorpej 2164: simple_unlock(&pc->pc_slock);
1.51 thorpej 2165: }
2166:
2167: /*
2168: * pool_cache_destruct_object:
2169: *
2170: * Force destruction of an object and its release back into
2171: * the pool.
2172: */
2173: void
2174: pool_cache_destruct_object(struct pool_cache *pc, void *object)
2175: {
2176:
2177: if (pc->pc_dtor != NULL)
2178: (*pc->pc_dtor)(pc->pc_arg, object);
2179: pool_put(pc->pc_pool, object);
1.43 thorpej 2180: }
2181:
1.130 ad 2182: /*
2183: * pool_do_cache_invalidate_grouplist:
2184: *
2185: * Invalidate a single grouplist and destruct all objects.
2186: * XXX This is too expensive. We should swap the list then
2187: * unlock.
2188: */
1.102 chs 2189: static void
1.106 christos 2190: pool_do_cache_invalidate_grouplist(struct pool_cache_grouplist *pcgsl,
1.105 christos 2191: struct pool_cache *pc, struct pool_pagelist *pq,
1.106 christos 2192: struct pool_cache_grouplist *pcgdl)
1.102 chs 2193: {
1.130 ad 2194: struct pool_cache_group *pcg;
1.102 chs 2195: void *object;
2196:
1.130 ad 2197: LOCK_ASSERT(simple_lock_held(&pc->pc_slock));
2198: LOCK_ASSERT(simple_lock_held(&pc->pc_pool->pr_slock));
2199:
2200: while ((pcg = LIST_FIRST(pcgsl)) != NULL) {
2201: pc->pc_ngroups--;
2202: LIST_REMOVE(pcg, pcg_list);
2203: LIST_INSERT_HEAD(pcgdl, pcg, pcg_list);
2204: pc->pc_nitems -= pcg->pcg_avail;
2205: simple_unlock(&pc->pc_pool->pr_slock);
2206: simple_unlock(&pc->pc_slock);
2207:
1.102 chs 2208: while (pcg->pcg_avail != 0) {
2209: object = pcg_get(pcg, NULL);
2210: if (pc->pc_dtor != NULL)
2211: (*pc->pc_dtor)(pc->pc_arg, object);
1.130 ad 2212: simple_lock(&pc->pc_pool->pr_slock);
1.102 chs 2213: pool_do_put(pc->pc_pool, object, pq);
1.130 ad 2214: simple_unlock(&pc->pc_pool->pr_slock);
1.102 chs 2215: }
1.130 ad 2216:
2217: simple_lock(&pc->pc_slock);
2218: simple_lock(&pc->pc_pool->pr_slock);
1.102 chs 2219: }
1.105 christos 2220: }
2221:
2222: static void
2223: pool_do_cache_invalidate(struct pool_cache *pc, struct pool_pagelist *pq,
2224: struct pool_cache_grouplist *pcgl)
2225: {
2226:
2227: LOCK_ASSERT(simple_lock_held(&pc->pc_slock));
2228: LOCK_ASSERT(simple_lock_held(&pc->pc_pool->pr_slock));
2229:
1.106 christos 2230: pool_do_cache_invalidate_grouplist(&pc->pc_fullgroups, pc, pq, pcgl);
2231: pool_do_cache_invalidate_grouplist(&pc->pc_partgroups, pc, pq, pcgl);
1.103 chs 2232:
2233: KASSERT(LIST_EMPTY(&pc->pc_partgroups));
2234: KASSERT(LIST_EMPTY(&pc->pc_fullgroups));
2235: KASSERT(pc->pc_nitems == 0);
1.102 chs 2236: }
2237:
1.43 thorpej 2238: /*
1.101 thorpej 2239: * pool_cache_invalidate:
1.43 thorpej 2240: *
1.101 thorpej 2241: * Invalidate a pool cache (destruct and release all of the
2242: * cached objects).
1.43 thorpej 2243: */
1.101 thorpej 2244: void
2245: pool_cache_invalidate(struct pool_cache *pc)
1.43 thorpej 2246: {
1.101 thorpej 2247: struct pool_pagelist pq;
1.102 chs 2248: struct pool_cache_grouplist pcgl;
1.101 thorpej 2249:
2250: LIST_INIT(&pq);
1.102 chs 2251: LIST_INIT(&pcgl);
1.101 thorpej 2252:
2253: simple_lock(&pc->pc_slock);
2254: simple_lock(&pc->pc_pool->pr_slock);
1.43 thorpej 2255:
1.102 chs 2256: pool_do_cache_invalidate(pc, &pq, &pcgl);
1.43 thorpej 2257:
1.101 thorpej 2258: simple_unlock(&pc->pc_pool->pr_slock);
2259: simple_unlock(&pc->pc_slock);
1.43 thorpej 2260:
1.102 chs 2261: pr_pagelist_free(pc->pc_pool, &pq);
2262: pcg_grouplist_free(&pcgl);
1.43 thorpej 2263: }
2264:
2265: /*
2266: * pool_cache_reclaim:
2267: *
2268: * Reclaim a pool cache for pool_reclaim().
2269: */
2270: static void
1.102 chs 2271: pool_cache_reclaim(struct pool_cache *pc, struct pool_pagelist *pq,
2272: struct pool_cache_grouplist *pcgl)
1.43 thorpej 2273: {
1.101 thorpej 2274:
2275: /*
2276: * We're locking in the wrong order (normally pool_cache -> pool,
2277: * but the pool is already locked when we get here), so we have
2278: * to use trylock. If we can't lock the pool_cache, it's not really
2279: * a big deal here.
2280: */
2281: if (simple_lock_try(&pc->pc_slock) == 0)
2282: return;
2283:
1.102 chs 2284: pool_do_cache_invalidate(pc, pq, pcgl);
1.43 thorpej 2285:
2286: simple_unlock(&pc->pc_slock);
1.3 pk 2287: }
1.66 thorpej 2288:
2289: /*
2290: * Pool backend allocators.
2291: *
2292: * Each pool has a backend allocator that handles allocation, deallocation,
2293: * and any additional draining that might be needed.
2294: *
2295: * We provide two standard allocators:
2296: *
2297: * pool_allocator_kmem - the default when no allocator is specified
2298: *
2299: * pool_allocator_nointr - used for pools that will not be accessed
2300: * in interrupt context.
2301: */
2302: void *pool_page_alloc(struct pool *, int);
2303: void pool_page_free(struct pool *, void *);
2304:
1.112 bjh21 2305: #ifdef POOL_SUBPAGE
2306: struct pool_allocator pool_allocator_kmem_fullpage = {
2307: pool_page_alloc, pool_page_free, 0,
1.117 yamt 2308: .pa_backingmapptr = &kmem_map,
1.112 bjh21 2309: };
2310: #else
1.66 thorpej 2311: struct pool_allocator pool_allocator_kmem = {
2312: pool_page_alloc, pool_page_free, 0,
1.117 yamt 2313: .pa_backingmapptr = &kmem_map,
1.66 thorpej 2314: };
1.112 bjh21 2315: #endif
1.66 thorpej 2316:
2317: void *pool_page_alloc_nointr(struct pool *, int);
2318: void pool_page_free_nointr(struct pool *, void *);
2319:
1.112 bjh21 2320: #ifdef POOL_SUBPAGE
2321: struct pool_allocator pool_allocator_nointr_fullpage = {
2322: pool_page_alloc_nointr, pool_page_free_nointr, 0,
1.117 yamt 2323: .pa_backingmapptr = &kernel_map,
1.112 bjh21 2324: };
2325: #else
1.66 thorpej 2326: struct pool_allocator pool_allocator_nointr = {
2327: pool_page_alloc_nointr, pool_page_free_nointr, 0,
1.117 yamt 2328: .pa_backingmapptr = &kernel_map,
1.66 thorpej 2329: };
1.112 bjh21 2330: #endif
1.66 thorpej 2331:
2332: #ifdef POOL_SUBPAGE
2333: void *pool_subpage_alloc(struct pool *, int);
2334: void pool_subpage_free(struct pool *, void *);
2335:
1.112 bjh21 2336: struct pool_allocator pool_allocator_kmem = {
2337: pool_subpage_alloc, pool_subpage_free, POOL_SUBPAGE,
1.117 yamt 2338: .pa_backingmapptr = &kmem_map,
1.112 bjh21 2339: };
2340:
2341: void *pool_subpage_alloc_nointr(struct pool *, int);
2342: void pool_subpage_free_nointr(struct pool *, void *);
2343:
2344: struct pool_allocator pool_allocator_nointr = {
2345: pool_subpage_alloc, pool_subpage_free, POOL_SUBPAGE,
1.117 yamt 2346: .pa_backingmapptr = &kmem_map,
1.66 thorpej 2347: };
2348: #endif /* POOL_SUBPAGE */
2349:
1.117 yamt 2350: static void *
2351: pool_allocator_alloc(struct pool *pp, int flags)
1.66 thorpej 2352: {
1.117 yamt 2353: struct pool_allocator *pa = pp->pr_alloc;
1.66 thorpej 2354: void *res;
2355:
1.117 yamt 2356: res = (*pa->pa_alloc)(pp, flags);
2357: if (res == NULL && (flags & PR_WAITOK) == 0) {
1.66 thorpej 2358: /*
1.117 yamt 2359: * We only run the drain hook here if PR_NOWAIT.
2360: * In other cases, the hook will be run in
2361: * pool_reclaim().
1.66 thorpej 2362: */
1.117 yamt 2363: if (pp->pr_drain_hook != NULL) {
2364: (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, flags);
2365: res = (*pa->pa_alloc)(pp, flags);
1.66 thorpej 2366: }
1.117 yamt 2367: }
2368: return res;
1.66 thorpej 2369: }
2370:
1.117 yamt 2371: static void
1.66 thorpej 2372: pool_allocator_free(struct pool *pp, void *v)
2373: {
2374: struct pool_allocator *pa = pp->pr_alloc;
2375:
2376: (*pa->pa_free)(pp, v);
2377: }
2378:
2379: void *
1.124 yamt 2380: pool_page_alloc(struct pool *pp, int flags)
1.66 thorpej 2381: {
1.127 thorpej 2382: bool waitok = (flags & PR_WAITOK) ? true : false;
1.66 thorpej 2383:
1.100 yamt 2384: return ((void *) uvm_km_alloc_poolpage_cache(kmem_map, waitok));
1.66 thorpej 2385: }
2386:
2387: void
1.124 yamt 2388: pool_page_free(struct pool *pp, void *v)
1.66 thorpej 2389: {
2390:
1.98 yamt 2391: uvm_km_free_poolpage_cache(kmem_map, (vaddr_t) v);
2392: }
2393:
2394: static void *
1.124 yamt 2395: pool_page_alloc_meta(struct pool *pp, int flags)
1.98 yamt 2396: {
1.127 thorpej 2397: bool waitok = (flags & PR_WAITOK) ? true : false;
1.98 yamt 2398:
1.100 yamt 2399: return ((void *) uvm_km_alloc_poolpage(kmem_map, waitok));
1.98 yamt 2400: }
2401:
2402: static void
1.124 yamt 2403: pool_page_free_meta(struct pool *pp, void *v)
1.98 yamt 2404: {
2405:
1.100 yamt 2406: uvm_km_free_poolpage(kmem_map, (vaddr_t) v);
1.66 thorpej 2407: }
2408:
2409: #ifdef POOL_SUBPAGE
2410: /* Sub-page allocator, for machines with large hardware pages. */
2411: void *
2412: pool_subpage_alloc(struct pool *pp, int flags)
2413: {
1.93 dbj 2414: void *v;
2415: int s;
2416: s = splvm();
2417: v = pool_get(&psppool, flags);
2418: splx(s);
2419: return v;
1.66 thorpej 2420: }
2421:
2422: void
2423: pool_subpage_free(struct pool *pp, void *v)
2424: {
1.93 dbj 2425: int s;
2426: s = splvm();
1.66 thorpej 2427: pool_put(&psppool, v);
1.93 dbj 2428: splx(s);
1.66 thorpej 2429: }
2430:
2431: /* We don't provide a real nointr allocator. Maybe later. */
2432: void *
1.112 bjh21 2433: pool_subpage_alloc_nointr(struct pool *pp, int flags)
1.66 thorpej 2434: {
2435:
2436: return (pool_subpage_alloc(pp, flags));
2437: }
2438:
2439: void
1.112 bjh21 2440: pool_subpage_free_nointr(struct pool *pp, void *v)
1.66 thorpej 2441: {
2442:
2443: pool_subpage_free(pp, v);
2444: }
1.112 bjh21 2445: #endif /* POOL_SUBPAGE */
1.66 thorpej 2446: void *
1.124 yamt 2447: pool_page_alloc_nointr(struct pool *pp, int flags)
1.66 thorpej 2448: {
1.127 thorpej 2449: bool waitok = (flags & PR_WAITOK) ? true : false;
1.66 thorpej 2450:
1.100 yamt 2451: return ((void *) uvm_km_alloc_poolpage_cache(kernel_map, waitok));
1.66 thorpej 2452: }
2453:
2454: void
1.124 yamt 2455: pool_page_free_nointr(struct pool *pp, void *v)
1.66 thorpej 2456: {
2457:
1.98 yamt 2458: uvm_km_free_poolpage_cache(kernel_map, (vaddr_t) v);
1.66 thorpej 2459: }
CVSweb <webmaster@jp.NetBSD.org>