Annotation of src/sys/kern/subr_pool.c, Revision 1.128.2.2
1.128.2.2! ad 1: /* $NetBSD: subr_pool.c,v 1.128.2.1 2007/03/13 16:51:56 ad Exp $ */
1.1 pk 2:
3: /*-
1.128.2.2! ad 4: * Copyright (c) 1997, 1999, 2000, 2002, 2007 The NetBSD Foundation, Inc.
1.1 pk 5: * All rights reserved.
6: *
7: * This code is derived from software contributed to The NetBSD Foundation
1.20 thorpej 8: * by Paul Kranenburg; by Jason R. Thorpe of the Numerical Aerospace
9: * Simulation Facility, NASA Ames Research Center.
1.1 pk 10: *
11: * Redistribution and use in source and binary forms, with or without
12: * modification, are permitted provided that the following conditions
13: * are met:
14: * 1. Redistributions of source code must retain the above copyright
15: * notice, this list of conditions and the following disclaimer.
16: * 2. Redistributions in binary form must reproduce the above copyright
17: * notice, this list of conditions and the following disclaimer in the
18: * documentation and/or other materials provided with the distribution.
19: * 3. All advertising materials mentioning features or use of this software
20: * must display the following acknowledgement:
1.13 christos 21: * This product includes software developed by the NetBSD
22: * Foundation, Inc. and its contributors.
1.1 pk 23: * 4. Neither the name of The NetBSD Foundation nor the names of its
24: * contributors may be used to endorse or promote products derived
25: * from this software without specific prior written permission.
26: *
27: * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28: * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29: * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30: * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31: * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32: * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33: * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34: * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35: * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36: * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37: * POSSIBILITY OF SUCH DAMAGE.
38: */
1.64 lukem 39:
40: #include <sys/cdefs.h>
1.128.2.2! ad 41: __KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.128.2.1 2007/03/13 16:51:56 ad Exp $");
1.24 scottr 42:
1.25 thorpej 43: #include "opt_pool.h"
1.24 scottr 44: #include "opt_poollog.h"
1.28 thorpej 45: #include "opt_lockdebug.h"
1.1 pk 46:
47: #include <sys/param.h>
48: #include <sys/systm.h>
49: #include <sys/proc.h>
50: #include <sys/errno.h>
51: #include <sys/kernel.h>
52: #include <sys/malloc.h>
53: #include <sys/lock.h>
54: #include <sys/pool.h>
1.20 thorpej 55: #include <sys/syslog.h>
1.125 ad 56: #include <sys/debug.h>
1.3 pk 57:
58: #include <uvm/uvm.h>
59:
1.1 pk 60: /*
61: * Pool resource management utility.
1.3 pk 62: *
1.88 chs 63: * Memory is allocated in pages which are split into pieces according to
64: * the pool item size. Each page is kept on one of three lists in the
65: * pool structure: `pr_emptypages', `pr_fullpages' and `pr_partpages',
66: * for empty, full and partially-full pages respectively. The individual
67: * pool items are on a linked list headed by `ph_itemlist' in each page
68: * header. The memory for building the page list is either taken from
69: * the allocated pages themselves (for small pool items) or taken from
70: * an internal pool of page headers (`phpool').
1.1 pk 71: */
72:
1.3 pk 73: /* List of all pools */
1.102 chs 74: LIST_HEAD(,pool) pool_head = LIST_HEAD_INITIALIZER(pool_head);
1.3 pk 75:
76: /* Private pool for page header structures */
1.97 yamt 77: #define PHPOOL_MAX 8
78: static struct pool phpool[PHPOOL_MAX];
79: #define PHPOOL_FREELIST_NELEM(idx) (((idx) == 0) ? 0 : (1 << (idx)))
1.3 pk 80:
1.62 bjh21 81: #ifdef POOL_SUBPAGE
82: /* Pool of subpages for use by normal pools. */
83: static struct pool psppool;
84: #endif
85:
1.117 yamt 86: static SLIST_HEAD(, pool_allocator) pa_deferinitq =
87: SLIST_HEAD_INITIALIZER(pa_deferinitq);
88:
1.98 yamt 89: static void *pool_page_alloc_meta(struct pool *, int);
90: static void pool_page_free_meta(struct pool *, void *);
91:
92: /* allocator for pool metadata */
93: static struct pool_allocator pool_allocator_meta = {
1.117 yamt 94: pool_page_alloc_meta, pool_page_free_meta,
95: .pa_backingmapptr = &kmem_map,
1.98 yamt 96: };
97:
1.3 pk 98: /* # of seconds to retain page after last use */
99: int pool_inactive_time = 10;
100:
101: /* Next candidate for drainage (see pool_drain()) */
1.23 thorpej 102: static struct pool *drainpp;
103:
1.128.2.2! ad 104: /* This lock protects both pool_head and drainpp. */
! 105: static kmutex_t pool_head_lock;
1.3 pk 106:
1.99 yamt 107: typedef uint8_t pool_item_freelist_t;
108:
1.3 pk 109: struct pool_item_header {
110: /* Page headers */
1.88 chs 111: LIST_ENTRY(pool_item_header)
1.3 pk 112: ph_pagelist; /* pool page list */
1.88 chs 113: SPLAY_ENTRY(pool_item_header)
114: ph_node; /* Off-page page headers */
1.128 christos 115: void * ph_page; /* this page's address */
1.3 pk 116: struct timeval ph_time; /* last referenced */
1.97 yamt 117: union {
118: /* !PR_NOTOUCH */
119: struct {
1.102 chs 120: LIST_HEAD(, pool_item)
1.97 yamt 121: phu_itemlist; /* chunk list for this page */
122: } phu_normal;
123: /* PR_NOTOUCH */
124: struct {
125: uint16_t
126: phu_off; /* start offset in page */
1.99 yamt 127: pool_item_freelist_t
1.97 yamt 128: phu_firstfree; /* first free item */
1.99 yamt 129: /*
130: * XXX it might be better to use
131: * a simple bitmap and ffs(3)
132: */
1.97 yamt 133: } phu_notouch;
134: } ph_u;
135: uint16_t ph_nmissing; /* # of chunks in use */
1.3 pk 136: };
1.97 yamt 137: #define ph_itemlist ph_u.phu_normal.phu_itemlist
138: #define ph_off ph_u.phu_notouch.phu_off
139: #define ph_firstfree ph_u.phu_notouch.phu_firstfree
1.3 pk 140:
1.1 pk 141: struct pool_item {
1.3 pk 142: #ifdef DIAGNOSTIC
1.82 thorpej 143: u_int pi_magic;
1.33 chs 144: #endif
1.82 thorpej 145: #define PI_MAGIC 0xdeadbeefU
1.3 pk 146: /* Other entries use only this list entry */
1.102 chs 147: LIST_ENTRY(pool_item) pi_list;
1.3 pk 148: };
149:
1.53 thorpej 150: #define POOL_NEEDS_CATCHUP(pp) \
151: ((pp)->pr_nitems < (pp)->pr_minitems)
152:
1.43 thorpej 153: /*
154: * Pool cache management.
155: *
156: * Pool caches provide a way for constructed objects to be cached by the
157: * pool subsystem. This can lead to performance improvements by avoiding
158: * needless object construction/destruction; it is deferred until absolutely
159: * necessary.
160: *
161: * Caches are grouped into cache groups. Each cache group references
162: * up to 16 constructed objects. When a cache allocates an object
163: * from the pool, it calls the object's constructor and places it into
164: * a cache group. When a cache group frees an object back to the pool,
165: * it first calls the object's destructor. This allows the object to
166: * persist in constructed form while freed to the cache.
167: *
168: * Multiple caches may exist for each pool. This allows a single
169: * object type to have multiple constructed forms. The pool references
170: * each cache, so that when a pool is drained by the pagedaemon, it can
171: * drain each individual cache as well. Each time a cache is drained,
172: * the most idle cache group is freed to the pool in its entirety.
173: *
174: * Pool caches are layed on top of pools. By layering them, we can avoid
175: * the complexity of cache management for pools which would not benefit
176: * from it.
177: */
178:
179: /* The cache group pool. */
180: static struct pool pcgpool;
1.3 pk 181:
1.102 chs 182: static void pool_cache_reclaim(struct pool_cache *, struct pool_pagelist *,
183: struct pool_cache_grouplist *);
184: static void pcg_grouplist_free(struct pool_cache_grouplist *);
1.3 pk 185:
1.42 thorpej 186: static int pool_catchup(struct pool *);
1.128 christos 187: static void pool_prime_page(struct pool *, void *,
1.55 thorpej 188: struct pool_item_header *);
1.88 chs 189: static void pool_update_curpage(struct pool *);
1.66 thorpej 190:
1.113 yamt 191: static int pool_grow(struct pool *, int);
1.117 yamt 192: static void *pool_allocator_alloc(struct pool *, int);
193: static void pool_allocator_free(struct pool *, void *);
1.3 pk 194:
1.97 yamt 195: static void pool_print_pagelist(struct pool *, struct pool_pagelist *,
1.88 chs 196: void (*)(const char *, ...));
1.42 thorpej 197: static void pool_print1(struct pool *, const char *,
198: void (*)(const char *, ...));
1.3 pk 199:
1.88 chs 200: static int pool_chk_page(struct pool *, const char *,
201: struct pool_item_header *);
202:
1.3 pk 203: /*
1.52 thorpej 204: * Pool log entry. An array of these is allocated in pool_init().
1.3 pk 205: */
206: struct pool_log {
207: const char *pl_file;
208: long pl_line;
209: int pl_action;
1.25 thorpej 210: #define PRLOG_GET 1
211: #define PRLOG_PUT 2
1.3 pk 212: void *pl_addr;
1.1 pk 213: };
214:
1.86 matt 215: #ifdef POOL_DIAGNOSTIC
1.3 pk 216: /* Number of entries in pool log buffers */
1.17 thorpej 217: #ifndef POOL_LOGSIZE
218: #define POOL_LOGSIZE 10
219: #endif
220:
221: int pool_logsize = POOL_LOGSIZE;
1.1 pk 222:
1.110 perry 223: static inline void
1.42 thorpej 224: pr_log(struct pool *pp, void *v, int action, const char *file, long line)
1.3 pk 225: {
226: int n = pp->pr_curlogentry;
227: struct pool_log *pl;
228:
1.20 thorpej 229: if ((pp->pr_roflags & PR_LOGGING) == 0)
1.3 pk 230: return;
231:
232: /*
233: * Fill in the current entry. Wrap around and overwrite
234: * the oldest entry if necessary.
235: */
236: pl = &pp->pr_log[n];
237: pl->pl_file = file;
238: pl->pl_line = line;
239: pl->pl_action = action;
240: pl->pl_addr = v;
241: if (++n >= pp->pr_logsize)
242: n = 0;
243: pp->pr_curlogentry = n;
244: }
245:
246: static void
1.42 thorpej 247: pr_printlog(struct pool *pp, struct pool_item *pi,
248: void (*pr)(const char *, ...))
1.3 pk 249: {
250: int i = pp->pr_logsize;
251: int n = pp->pr_curlogentry;
252:
1.20 thorpej 253: if ((pp->pr_roflags & PR_LOGGING) == 0)
1.3 pk 254: return;
255:
256: /*
257: * Print all entries in this pool's log.
258: */
259: while (i-- > 0) {
260: struct pool_log *pl = &pp->pr_log[n];
261: if (pl->pl_action != 0) {
1.25 thorpej 262: if (pi == NULL || pi == pl->pl_addr) {
263: (*pr)("\tlog entry %d:\n", i);
264: (*pr)("\t\taction = %s, addr = %p\n",
265: pl->pl_action == PRLOG_GET ? "get" : "put",
266: pl->pl_addr);
267: (*pr)("\t\tfile: %s at line %lu\n",
268: pl->pl_file, pl->pl_line);
269: }
1.3 pk 270: }
271: if (++n >= pp->pr_logsize)
272: n = 0;
273: }
274: }
1.25 thorpej 275:
1.110 perry 276: static inline void
1.42 thorpej 277: pr_enter(struct pool *pp, const char *file, long line)
1.25 thorpej 278: {
279:
1.34 thorpej 280: if (__predict_false(pp->pr_entered_file != NULL)) {
1.25 thorpej 281: printf("pool %s: reentrancy at file %s line %ld\n",
282: pp->pr_wchan, file, line);
283: printf(" previous entry at file %s line %ld\n",
284: pp->pr_entered_file, pp->pr_entered_line);
285: panic("pr_enter");
286: }
287:
288: pp->pr_entered_file = file;
289: pp->pr_entered_line = line;
290: }
291:
1.110 perry 292: static inline void
1.42 thorpej 293: pr_leave(struct pool *pp)
1.25 thorpej 294: {
295:
1.34 thorpej 296: if (__predict_false(pp->pr_entered_file == NULL)) {
1.25 thorpej 297: printf("pool %s not entered?\n", pp->pr_wchan);
298: panic("pr_leave");
299: }
300:
301: pp->pr_entered_file = NULL;
302: pp->pr_entered_line = 0;
303: }
304:
1.110 perry 305: static inline void
1.42 thorpej 306: pr_enter_check(struct pool *pp, void (*pr)(const char *, ...))
1.25 thorpej 307: {
308:
309: if (pp->pr_entered_file != NULL)
310: (*pr)("\n\tcurrently entered from file %s line %ld\n",
311: pp->pr_entered_file, pp->pr_entered_line);
312: }
1.3 pk 313: #else
1.25 thorpej 314: #define pr_log(pp, v, action, file, line)
315: #define pr_printlog(pp, pi, pr)
316: #define pr_enter(pp, file, line)
317: #define pr_leave(pp)
318: #define pr_enter_check(pp, pr)
1.59 thorpej 319: #endif /* POOL_DIAGNOSTIC */
1.3 pk 320:
1.110 perry 321: static inline int
1.97 yamt 322: pr_item_notouch_index(const struct pool *pp, const struct pool_item_header *ph,
323: const void *v)
324: {
325: const char *cp = v;
326: int idx;
327:
328: KASSERT(pp->pr_roflags & PR_NOTOUCH);
1.128 christos 329: idx = (cp - (char *)ph->ph_page - ph->ph_off) / pp->pr_size;
1.97 yamt 330: KASSERT(idx < pp->pr_itemsperpage);
331: return idx;
332: }
333:
1.99 yamt 334: #define PR_FREELIST_ALIGN(p) \
335: roundup((uintptr_t)(p), sizeof(pool_item_freelist_t))
336: #define PR_FREELIST(ph) ((pool_item_freelist_t *)PR_FREELIST_ALIGN((ph) + 1))
337: #define PR_INDEX_USED ((pool_item_freelist_t)-1)
338: #define PR_INDEX_EOL ((pool_item_freelist_t)-2)
1.97 yamt 339:
1.110 perry 340: static inline void
1.97 yamt 341: pr_item_notouch_put(const struct pool *pp, struct pool_item_header *ph,
342: void *obj)
343: {
344: int idx = pr_item_notouch_index(pp, ph, obj);
1.99 yamt 345: pool_item_freelist_t *freelist = PR_FREELIST(ph);
1.97 yamt 346:
347: KASSERT(freelist[idx] == PR_INDEX_USED);
348: freelist[idx] = ph->ph_firstfree;
349: ph->ph_firstfree = idx;
350: }
351:
1.110 perry 352: static inline void *
1.97 yamt 353: pr_item_notouch_get(const struct pool *pp, struct pool_item_header *ph)
354: {
355: int idx = ph->ph_firstfree;
1.99 yamt 356: pool_item_freelist_t *freelist = PR_FREELIST(ph);
1.97 yamt 357:
358: KASSERT(freelist[idx] != PR_INDEX_USED);
359: ph->ph_firstfree = freelist[idx];
360: freelist[idx] = PR_INDEX_USED;
361:
1.128 christos 362: return (char *)ph->ph_page + ph->ph_off + idx * pp->pr_size;
1.97 yamt 363: }
364:
1.110 perry 365: static inline int
1.88 chs 366: phtree_compare(struct pool_item_header *a, struct pool_item_header *b)
367: {
1.121 yamt 368:
369: /*
370: * we consider pool_item_header with smaller ph_page bigger.
371: * (this unnatural ordering is for the benefit of pr_find_pagehead.)
372: */
373:
1.88 chs 374: if (a->ph_page < b->ph_page)
1.121 yamt 375: return (1);
376: else if (a->ph_page > b->ph_page)
1.88 chs 377: return (-1);
378: else
379: return (0);
380: }
381:
382: SPLAY_PROTOTYPE(phtree, pool_item_header, ph_node, phtree_compare);
383: SPLAY_GENERATE(phtree, pool_item_header, ph_node, phtree_compare);
384:
1.3 pk 385: /*
1.121 yamt 386: * Return the pool page header based on item address.
1.3 pk 387: */
1.110 perry 388: static inline struct pool_item_header *
1.121 yamt 389: pr_find_pagehead(struct pool *pp, void *v)
1.3 pk 390: {
1.88 chs 391: struct pool_item_header *ph, tmp;
1.3 pk 392:
1.121 yamt 393: if ((pp->pr_roflags & PR_NOALIGN) != 0) {
1.128 christos 394: tmp.ph_page = (void *)(uintptr_t)v;
1.121 yamt 395: ph = SPLAY_FIND(phtree, &pp->pr_phtree, &tmp);
396: if (ph == NULL) {
397: ph = SPLAY_ROOT(&pp->pr_phtree);
398: if (ph != NULL && phtree_compare(&tmp, ph) >= 0) {
399: ph = SPLAY_NEXT(phtree, &pp->pr_phtree, ph);
400: }
401: KASSERT(ph == NULL || phtree_compare(&tmp, ph) < 0);
402: }
403: } else {
1.128 christos 404: void *page =
405: (void *)((uintptr_t)v & pp->pr_alloc->pa_pagemask);
1.121 yamt 406:
407: if ((pp->pr_roflags & PR_PHINPAGE) != 0) {
1.128 christos 408: ph = (struct pool_item_header *)((char *)page + pp->pr_phoffset);
1.121 yamt 409: } else {
410: tmp.ph_page = page;
411: ph = SPLAY_FIND(phtree, &pp->pr_phtree, &tmp);
412: }
413: }
1.3 pk 414:
1.121 yamt 415: KASSERT(ph == NULL || ((pp->pr_roflags & PR_PHINPAGE) != 0) ||
1.128 christos 416: ((char *)ph->ph_page <= (char *)v &&
417: (char *)v < (char *)ph->ph_page + pp->pr_alloc->pa_pagesz));
1.88 chs 418: return ph;
1.3 pk 419: }
420:
1.101 thorpej 421: static void
422: pr_pagelist_free(struct pool *pp, struct pool_pagelist *pq)
423: {
424: struct pool_item_header *ph;
425:
426: while ((ph = LIST_FIRST(pq)) != NULL) {
427: LIST_REMOVE(ph, ph_pagelist);
428: pool_allocator_free(pp, ph->ph_page);
1.128.2.2! ad 429: if ((pp->pr_roflags & PR_PHINPAGE) == 0)
1.101 thorpej 430: pool_put(pp->pr_phpool, ph);
431: }
432: }
433:
1.3 pk 434: /*
435: * Remove a page from the pool.
436: */
1.110 perry 437: static inline void
1.61 chs 438: pr_rmpage(struct pool *pp, struct pool_item_header *ph,
439: struct pool_pagelist *pq)
1.3 pk 440: {
441:
1.128.2.2! ad 442: KASSERT(mutex_owned(&pp->pr_lock));
1.91 yamt 443:
1.3 pk 444: /*
1.7 thorpej 445: * If the page was idle, decrement the idle page count.
1.3 pk 446: */
1.6 thorpej 447: if (ph->ph_nmissing == 0) {
448: #ifdef DIAGNOSTIC
449: if (pp->pr_nidle == 0)
450: panic("pr_rmpage: nidle inconsistent");
1.20 thorpej 451: if (pp->pr_nitems < pp->pr_itemsperpage)
452: panic("pr_rmpage: nitems inconsistent");
1.6 thorpej 453: #endif
454: pp->pr_nidle--;
455: }
1.7 thorpej 456:
1.20 thorpej 457: pp->pr_nitems -= pp->pr_itemsperpage;
458:
1.7 thorpej 459: /*
1.101 thorpej 460: * Unlink the page from the pool and queue it for release.
1.7 thorpej 461: */
1.88 chs 462: LIST_REMOVE(ph, ph_pagelist);
1.91 yamt 463: if ((pp->pr_roflags & PR_PHINPAGE) == 0)
464: SPLAY_REMOVE(phtree, &pp->pr_phtree, ph);
1.101 thorpej 465: LIST_INSERT_HEAD(pq, ph, ph_pagelist);
466:
1.7 thorpej 467: pp->pr_npages--;
468: pp->pr_npagefree++;
1.6 thorpej 469:
1.88 chs 470: pool_update_curpage(pp);
1.3 pk 471: }
472:
1.126 thorpej 473: static bool
1.117 yamt 474: pa_starved_p(struct pool_allocator *pa)
475: {
476:
477: if (pa->pa_backingmap != NULL) {
478: return vm_map_starved_p(pa->pa_backingmap);
479: }
1.127 thorpej 480: return false;
1.117 yamt 481: }
482:
483: static int
1.124 yamt 484: pool_reclaim_callback(struct callback_entry *ce, void *obj, void *arg)
1.117 yamt 485: {
486: struct pool *pp = obj;
487: struct pool_allocator *pa = pp->pr_alloc;
488:
489: KASSERT(&pp->pr_reclaimerentry == ce);
490: pool_reclaim(pp);
491: if (!pa_starved_p(pa)) {
492: return CALLBACK_CHAIN_ABORT;
493: }
494: return CALLBACK_CHAIN_CONTINUE;
495: }
496:
497: static void
498: pool_reclaim_register(struct pool *pp)
499: {
500: struct vm_map *map = pp->pr_alloc->pa_backingmap;
501: int s;
502:
503: if (map == NULL) {
504: return;
505: }
506:
507: s = splvm(); /* not necessary for INTRSAFE maps, but don't care. */
508: callback_register(&vm_map_to_kernel(map)->vmk_reclaim_callback,
509: &pp->pr_reclaimerentry, pp, pool_reclaim_callback);
510: splx(s);
511: }
512:
513: static void
514: pool_reclaim_unregister(struct pool *pp)
515: {
516: struct vm_map *map = pp->pr_alloc->pa_backingmap;
517: int s;
518:
519: if (map == NULL) {
520: return;
521: }
522:
523: s = splvm(); /* not necessary for INTRSAFE maps, but don't care. */
524: callback_unregister(&vm_map_to_kernel(map)->vmk_reclaim_callback,
525: &pp->pr_reclaimerentry);
526: splx(s);
527: }
528:
529: static void
530: pa_reclaim_register(struct pool_allocator *pa)
531: {
532: struct vm_map *map = *pa->pa_backingmapptr;
533: struct pool *pp;
534:
535: KASSERT(pa->pa_backingmap == NULL);
536: if (map == NULL) {
537: SLIST_INSERT_HEAD(&pa_deferinitq, pa, pa_q);
538: return;
539: }
540: pa->pa_backingmap = map;
541: TAILQ_FOREACH(pp, &pa->pa_list, pr_alloc_list) {
542: pool_reclaim_register(pp);
543: }
544: }
545:
1.3 pk 546: /*
1.94 simonb 547: * Initialize all the pools listed in the "pools" link set.
548: */
549: void
1.117 yamt 550: pool_subsystem_init(void)
1.94 simonb 551: {
1.117 yamt 552: struct pool_allocator *pa;
1.94 simonb 553: __link_set_decl(pools, struct link_pool_init);
554: struct link_pool_init * const *pi;
555:
1.128.2.2! ad 556: mutex_init(&pool_head_lock, MUTEX_DEFAULT, IPL_NONE);
! 557:
1.94 simonb 558: __link_set_foreach(pi, pools)
559: pool_init((*pi)->pp, (*pi)->size, (*pi)->align,
560: (*pi)->align_offset, (*pi)->flags, (*pi)->wchan,
1.128.2.1 ad 561: (*pi)->palloc, (*pi)->ipl);
1.117 yamt 562:
563: while ((pa = SLIST_FIRST(&pa_deferinitq)) != NULL) {
564: KASSERT(pa->pa_backingmapptr != NULL);
565: KASSERT(*pa->pa_backingmapptr != NULL);
566: SLIST_REMOVE_HEAD(&pa_deferinitq, pa_q);
567: pa_reclaim_register(pa);
568: }
1.94 simonb 569: }
570:
571: /*
1.3 pk 572: * Initialize the given pool resource structure.
573: *
574: * We export this routine to allow other kernel parts to declare
575: * static pools that must be initialized before malloc() is available.
576: */
577: void
1.42 thorpej 578: pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags,
1.128.2.1 ad 579: const char *wchan, struct pool_allocator *palloc, int ipl)
1.3 pk 580: {
1.116 simonb 581: #ifdef DEBUG
582: struct pool *pp1;
583: #endif
1.92 enami 584: size_t trysize, phsize;
1.128.2.2! ad 585: int off, slack;
1.3 pk 586:
1.99 yamt 587: KASSERT((1UL << (CHAR_BIT * sizeof(pool_item_freelist_t))) - 2 >=
588: PHPOOL_FREELIST_NELEM(PHPOOL_MAX - 1));
589:
1.116 simonb 590: #ifdef DEBUG
591: /*
592: * Check that the pool hasn't already been initialised and
593: * added to the list of all pools.
594: */
595: LIST_FOREACH(pp1, &pool_head, pr_poollist) {
596: if (pp == pp1)
597: panic("pool_init: pool %s already initialised",
598: wchan);
599: }
600: #endif
601:
1.25 thorpej 602: #ifdef POOL_DIAGNOSTIC
603: /*
604: * Always log if POOL_DIAGNOSTIC is defined.
605: */
606: if (pool_logsize != 0)
607: flags |= PR_LOGGING;
608: #endif
609:
1.66 thorpej 610: if (palloc == NULL)
611: palloc = &pool_allocator_kmem;
1.112 bjh21 612: #ifdef POOL_SUBPAGE
613: if (size > palloc->pa_pagesz) {
614: if (palloc == &pool_allocator_kmem)
615: palloc = &pool_allocator_kmem_fullpage;
616: else if (palloc == &pool_allocator_nointr)
617: palloc = &pool_allocator_nointr_fullpage;
618: }
1.66 thorpej 619: #endif /* POOL_SUBPAGE */
620: if ((palloc->pa_flags & PA_INITIALIZED) == 0) {
1.112 bjh21 621: if (palloc->pa_pagesz == 0)
1.66 thorpej 622: palloc->pa_pagesz = PAGE_SIZE;
623:
624: TAILQ_INIT(&palloc->pa_list);
625:
1.128.2.2! ad 626: mutex_init(&palloc->pa_lock, MUTEX_DRIVER, IPL_VM);
1.66 thorpej 627: palloc->pa_pagemask = ~(palloc->pa_pagesz - 1);
628: palloc->pa_pageshift = ffs(palloc->pa_pagesz) - 1;
1.117 yamt 629:
630: if (palloc->pa_backingmapptr != NULL) {
631: pa_reclaim_register(palloc);
632: }
1.66 thorpej 633: palloc->pa_flags |= PA_INITIALIZED;
1.4 thorpej 634: }
1.3 pk 635:
636: if (align == 0)
637: align = ALIGN(1);
1.14 thorpej 638:
1.120 yamt 639: if ((flags & PR_NOTOUCH) == 0 && size < sizeof(struct pool_item))
1.14 thorpej 640: size = sizeof(struct pool_item);
1.3 pk 641:
1.78 thorpej 642: size = roundup(size, align);
1.66 thorpej 643: #ifdef DIAGNOSTIC
644: if (size > palloc->pa_pagesz)
1.121 yamt 645: panic("pool_init: pool item size (%zu) too large", size);
1.66 thorpej 646: #endif
1.35 pk 647:
1.3 pk 648: /*
649: * Initialize the pool structure.
650: */
1.88 chs 651: LIST_INIT(&pp->pr_emptypages);
652: LIST_INIT(&pp->pr_fullpages);
653: LIST_INIT(&pp->pr_partpages);
1.102 chs 654: LIST_INIT(&pp->pr_cachelist);
1.3 pk 655: pp->pr_curpage = NULL;
656: pp->pr_npages = 0;
657: pp->pr_minitems = 0;
658: pp->pr_minpages = 0;
659: pp->pr_maxpages = UINT_MAX;
1.20 thorpej 660: pp->pr_roflags = flags;
661: pp->pr_flags = 0;
1.35 pk 662: pp->pr_size = size;
1.3 pk 663: pp->pr_align = align;
664: pp->pr_wchan = wchan;
1.66 thorpej 665: pp->pr_alloc = palloc;
1.20 thorpej 666: pp->pr_nitems = 0;
667: pp->pr_nout = 0;
668: pp->pr_hardlimit = UINT_MAX;
669: pp->pr_hardlimit_warning = NULL;
1.31 thorpej 670: pp->pr_hardlimit_ratecap.tv_sec = 0;
671: pp->pr_hardlimit_ratecap.tv_usec = 0;
672: pp->pr_hardlimit_warning_last.tv_sec = 0;
673: pp->pr_hardlimit_warning_last.tv_usec = 0;
1.68 thorpej 674: pp->pr_drain_hook = NULL;
675: pp->pr_drain_hook_arg = NULL;
1.125 ad 676: pp->pr_freecheck = NULL;
1.3 pk 677:
678: /*
679: * Decide whether to put the page header off page to avoid
1.92 enami 680: * wasting too large a part of the page or too big item.
681: * Off-page page headers go on a hash table, so we can match
682: * a returned item with its header based on the page address.
683: * We use 1/16 of the page size and about 8 times of the item
684: * size as the threshold (XXX: tune)
685: *
686: * However, we'll put the header into the page if we can put
687: * it without wasting any items.
688: *
689: * Silently enforce `0 <= ioff < align'.
1.3 pk 690: */
1.92 enami 691: pp->pr_itemoffset = ioff %= align;
692: /* See the comment below about reserved bytes. */
693: trysize = palloc->pa_pagesz - ((align - ioff) % align);
694: phsize = ALIGN(sizeof(struct pool_item_header));
1.121 yamt 695: if ((pp->pr_roflags & (PR_NOTOUCH | PR_NOALIGN)) == 0 &&
1.97 yamt 696: (pp->pr_size < MIN(palloc->pa_pagesz / 16, phsize << 3) ||
697: trysize / pp->pr_size == (trysize - phsize) / pp->pr_size)) {
1.3 pk 698: /* Use the end of the page for the page header */
1.20 thorpej 699: pp->pr_roflags |= PR_PHINPAGE;
1.92 enami 700: pp->pr_phoffset = off = palloc->pa_pagesz - phsize;
1.2 pk 701: } else {
1.3 pk 702: /* The page header will be taken from our page header pool */
703: pp->pr_phoffset = 0;
1.66 thorpej 704: off = palloc->pa_pagesz;
1.88 chs 705: SPLAY_INIT(&pp->pr_phtree);
1.2 pk 706: }
1.1 pk 707:
1.3 pk 708: /*
709: * Alignment is to take place at `ioff' within the item. This means
710: * we must reserve up to `align - 1' bytes on the page to allow
711: * appropriate positioning of each item.
712: */
713: pp->pr_itemsperpage = (off - ((align - ioff) % align)) / pp->pr_size;
1.43 thorpej 714: KASSERT(pp->pr_itemsperpage != 0);
1.97 yamt 715: if ((pp->pr_roflags & PR_NOTOUCH)) {
716: int idx;
717:
718: for (idx = 0; pp->pr_itemsperpage > PHPOOL_FREELIST_NELEM(idx);
719: idx++) {
720: /* nothing */
721: }
722: if (idx >= PHPOOL_MAX) {
723: /*
724: * if you see this panic, consider to tweak
725: * PHPOOL_MAX and PHPOOL_FREELIST_NELEM.
726: */
727: panic("%s: too large itemsperpage(%d) for PR_NOTOUCH",
728: pp->pr_wchan, pp->pr_itemsperpage);
729: }
730: pp->pr_phpool = &phpool[idx];
731: } else if ((pp->pr_roflags & PR_PHINPAGE) == 0) {
732: pp->pr_phpool = &phpool[0];
733: }
734: #if defined(DIAGNOSTIC)
735: else {
736: pp->pr_phpool = NULL;
737: }
738: #endif
1.3 pk 739:
740: /*
741: * Use the slack between the chunks and the page header
742: * for "cache coloring".
743: */
744: slack = off - pp->pr_itemsperpage * pp->pr_size;
745: pp->pr_maxcolor = (slack / align) * align;
746: pp->pr_curcolor = 0;
747:
748: pp->pr_nget = 0;
749: pp->pr_nfail = 0;
750: pp->pr_nput = 0;
751: pp->pr_npagealloc = 0;
752: pp->pr_npagefree = 0;
1.1 pk 753: pp->pr_hiwat = 0;
1.8 thorpej 754: pp->pr_nidle = 0;
1.3 pk 755:
1.59 thorpej 756: #ifdef POOL_DIAGNOSTIC
1.25 thorpej 757: if (flags & PR_LOGGING) {
758: if (kmem_map == NULL ||
759: (pp->pr_log = malloc(pool_logsize * sizeof(struct pool_log),
760: M_TEMP, M_NOWAIT)) == NULL)
1.20 thorpej 761: pp->pr_roflags &= ~PR_LOGGING;
1.3 pk 762: pp->pr_curlogentry = 0;
763: pp->pr_logsize = pool_logsize;
764: }
1.59 thorpej 765: #endif
1.25 thorpej 766:
767: pp->pr_entered_file = NULL;
768: pp->pr_entered_line = 0;
1.3 pk 769:
1.128.2.2! ad 770: mutex_init(&pp->pr_lock, MUTEX_DRIVER, ipl);
! 771: cv_init(&pp->pr_cv, wchan);
! 772: pp->pr_ipl = ipl;
! 773:
! 774: if (strcmp(wchan, "kmem-52") == 0) {
! 775: printf("kmem-52 initted, mutex @ %p\n", &pp->pr_lock);
! 776: printf("=> %x %x %x %x\n",
! 777: ((uint32_t *)&pp->pr_lock)[0],
! 778: ((uint32_t *)&pp->pr_lock)[1],
! 779: ((uint32_t *)&pp->pr_lock)[2],
! 780: ((uint32_t *)&pp->pr_lock)[3]);
! 781: }
1.1 pk 782:
1.3 pk 783: /*
1.43 thorpej 784: * Initialize private page header pool and cache magazine pool if we
785: * haven't done so yet.
1.23 thorpej 786: * XXX LOCKING.
1.3 pk 787: */
1.97 yamt 788: if (phpool[0].pr_size == 0) {
789: int idx;
790: for (idx = 0; idx < PHPOOL_MAX; idx++) {
791: static char phpool_names[PHPOOL_MAX][6+1+6+1];
792: int nelem;
793: size_t sz;
794:
795: nelem = PHPOOL_FREELIST_NELEM(idx);
796: snprintf(phpool_names[idx], sizeof(phpool_names[idx]),
797: "phpool-%d", nelem);
798: sz = sizeof(struct pool_item_header);
799: if (nelem) {
800: sz = PR_FREELIST_ALIGN(sz)
1.99 yamt 801: + nelem * sizeof(pool_item_freelist_t);
1.97 yamt 802: }
803: pool_init(&phpool[idx], sz, 0, 0, 0,
1.128.2.1 ad 804: phpool_names[idx], &pool_allocator_meta, IPL_VM);
1.97 yamt 805: }
1.62 bjh21 806: #ifdef POOL_SUBPAGE
807: pool_init(&psppool, POOL_SUBPAGE, POOL_SUBPAGE, 0,
1.128.2.1 ad 808: PR_RECURSIVE, "psppool", &pool_allocator_meta, IPL_VM);
1.62 bjh21 809: #endif
1.43 thorpej 810: pool_init(&pcgpool, sizeof(struct pool_cache_group), 0, 0,
1.128.2.1 ad 811: 0, "pcgpool", &pool_allocator_meta, IPL_VM);
1.1 pk 812: }
813:
1.128.2.2! ad 814: if (__predict_true(!cold)) {
! 815: /* Insert into the list of all pools. */
! 816: mutex_enter(&pool_head_lock);
! 817: LIST_INSERT_HEAD(&pool_head, pp, pr_poollist);
! 818: mutex_exit(&pool_head_lock);
! 819:
! 820: /* Insert this into the list of pools using this allocator. */
! 821: mutex_enter(&palloc->pa_lock);
! 822: TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list);
! 823: mutex_exit(&palloc->pa_lock);
! 824: } else {
! 825: LIST_INSERT_HEAD(&pool_head, pp, pr_poollist);
! 826: TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list);
! 827: }
! 828:
1.117 yamt 829: pool_reclaim_register(pp);
1.1 pk 830: }
831:
832: /*
833: * De-commision a pool resource.
834: */
835: void
1.42 thorpej 836: pool_destroy(struct pool *pp)
1.1 pk 837: {
1.101 thorpej 838: struct pool_pagelist pq;
1.3 pk 839: struct pool_item_header *ph;
1.43 thorpej 840:
1.101 thorpej 841: /* Remove from global pool list */
1.128.2.2! ad 842: mutex_enter(&pool_head_lock);
1.102 chs 843: LIST_REMOVE(pp, pr_poollist);
1.101 thorpej 844: if (drainpp == pp)
845: drainpp = NULL;
1.128.2.2! ad 846: mutex_exit(&pool_head_lock);
1.101 thorpej 847:
848: /* Remove this pool from its allocator's list of pools. */
1.117 yamt 849: pool_reclaim_unregister(pp);
1.128.2.2! ad 850: mutex_enter(&pp->pr_alloc->pa_lock);
1.66 thorpej 851: TAILQ_REMOVE(&pp->pr_alloc->pa_list, pp, pr_alloc_list);
1.128.2.2! ad 852: mutex_exit(&pp->pr_alloc->pa_lock);
1.66 thorpej 853:
1.128.2.2! ad 854: mutex_enter(&pp->pr_lock);
1.101 thorpej 855:
1.102 chs 856: KASSERT(LIST_EMPTY(&pp->pr_cachelist));
1.3 pk 857:
858: #ifdef DIAGNOSTIC
1.20 thorpej 859: if (pp->pr_nout != 0) {
1.25 thorpej 860: pr_printlog(pp, NULL, printf);
1.80 provos 861: panic("pool_destroy: pool busy: still out: %u",
1.20 thorpej 862: pp->pr_nout);
1.3 pk 863: }
864: #endif
1.1 pk 865:
1.101 thorpej 866: KASSERT(LIST_EMPTY(&pp->pr_fullpages));
867: KASSERT(LIST_EMPTY(&pp->pr_partpages));
868:
1.3 pk 869: /* Remove all pages */
1.101 thorpej 870: LIST_INIT(&pq);
1.88 chs 871: while ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL)
1.101 thorpej 872: pr_rmpage(pp, ph, &pq);
873:
1.128.2.2! ad 874: mutex_exit(&pp->pr_lock);
1.3 pk 875:
1.101 thorpej 876: pr_pagelist_free(pp, &pq);
1.3 pk 877:
1.59 thorpej 878: #ifdef POOL_DIAGNOSTIC
1.20 thorpej 879: if ((pp->pr_roflags & PR_LOGGING) != 0)
1.3 pk 880: free(pp->pr_log, M_TEMP);
1.59 thorpej 881: #endif
1.128.2.2! ad 882:
! 883: cv_destroy(&pp->pr_cv);
! 884: mutex_destroy(&pp->pr_lock);
1.1 pk 885: }
886:
1.68 thorpej 887: void
888: pool_set_drain_hook(struct pool *pp, void (*fn)(void *, int), void *arg)
889: {
890:
891: /* XXX no locking -- must be used just after pool_init() */
892: #ifdef DIAGNOSTIC
893: if (pp->pr_drain_hook != NULL)
894: panic("pool_set_drain_hook(%s): already set", pp->pr_wchan);
895: #endif
896: pp->pr_drain_hook = fn;
897: pp->pr_drain_hook_arg = arg;
898: }
899:
1.88 chs 900: static struct pool_item_header *
1.128 christos 901: pool_alloc_item_header(struct pool *pp, void *storage, int flags)
1.55 thorpej 902: {
903: struct pool_item_header *ph;
904:
905: if ((pp->pr_roflags & PR_PHINPAGE) != 0)
1.128 christos 906: ph = (struct pool_item_header *) ((char *)storage + pp->pr_phoffset);
1.128.2.2! ad 907: else
1.97 yamt 908: ph = pool_get(pp->pr_phpool, flags);
1.55 thorpej 909:
910: return (ph);
911: }
1.1 pk 912:
913: /*
1.3 pk 914: * Grab an item from the pool; must be called at appropriate spl level
1.1 pk 915: */
1.3 pk 916: void *
1.59 thorpej 917: #ifdef POOL_DIAGNOSTIC
1.42 thorpej 918: _pool_get(struct pool *pp, int flags, const char *file, long line)
1.56 sommerfe 919: #else
920: pool_get(struct pool *pp, int flags)
921: #endif
1.1 pk 922: {
923: struct pool_item *pi;
1.3 pk 924: struct pool_item_header *ph;
1.55 thorpej 925: void *v;
1.1 pk 926:
1.2 pk 927: #ifdef DIAGNOSTIC
1.95 atatat 928: if (__predict_false(pp->pr_itemsperpage == 0))
929: panic("pool_get: pool %p: pr_itemsperpage is zero, "
930: "pool not initialized?", pp);
1.84 thorpej 931: if (__predict_false(curlwp == NULL && doing_shutdown == 0 &&
1.37 sommerfe 932: (flags & PR_WAITOK) != 0))
1.77 matt 933: panic("pool_get: %s: must have NOWAIT", pp->pr_wchan);
1.58 thorpej 934:
1.102 chs 935: #endif /* DIAGNOSTIC */
1.58 thorpej 936: #ifdef LOCKDEBUG
937: if (flags & PR_WAITOK)
1.119 yamt 938: ASSERT_SLEEPABLE(NULL, "pool_get(PR_WAITOK)");
1.56 sommerfe 939: #endif
1.1 pk 940:
1.128.2.2! ad 941: mutex_enter(&pp->pr_lock);
1.25 thorpej 942: pr_enter(pp, file, line);
1.20 thorpej 943:
944: startover:
945: /*
946: * Check to see if we've reached the hard limit. If we have,
947: * and we can wait, then wait until an item has been returned to
948: * the pool.
949: */
950: #ifdef DIAGNOSTIC
1.34 thorpej 951: if (__predict_false(pp->pr_nout > pp->pr_hardlimit)) {
1.25 thorpej 952: pr_leave(pp);
1.128.2.2! ad 953: mutex_exit(&pp->pr_lock);
1.20 thorpej 954: panic("pool_get: %s: crossed hard limit", pp->pr_wchan);
955: }
956: #endif
1.34 thorpej 957: if (__predict_false(pp->pr_nout == pp->pr_hardlimit)) {
1.68 thorpej 958: if (pp->pr_drain_hook != NULL) {
959: /*
960: * Since the drain hook is going to free things
961: * back to the pool, unlock, call the hook, re-lock,
962: * and check the hardlimit condition again.
963: */
964: pr_leave(pp);
1.128.2.2! ad 965: mutex_exit(&pp->pr_lock);
1.68 thorpej 966: (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, flags);
1.128.2.2! ad 967: mutex_enter(&pp->pr_lock);
1.68 thorpej 968: pr_enter(pp, file, line);
969: if (pp->pr_nout < pp->pr_hardlimit)
970: goto startover;
971: }
972:
1.29 sommerfe 973: if ((flags & PR_WAITOK) && !(flags & PR_LIMITFAIL)) {
1.20 thorpej 974: /*
975: * XXX: A warning isn't logged in this case. Should
976: * it be?
977: */
978: pp->pr_flags |= PR_WANTED;
1.25 thorpej 979: pr_leave(pp);
1.128.2.2! ad 980: cv_wait(&pp->pr_cv, &pp->pr_lock);
1.25 thorpej 981: pr_enter(pp, file, line);
1.20 thorpej 982: goto startover;
983: }
1.31 thorpej 984:
985: /*
986: * Log a message that the hard limit has been hit.
987: */
988: if (pp->pr_hardlimit_warning != NULL &&
989: ratecheck(&pp->pr_hardlimit_warning_last,
990: &pp->pr_hardlimit_ratecap))
991: log(LOG_ERR, "%s\n", pp->pr_hardlimit_warning);
1.21 thorpej 992:
993: pp->pr_nfail++;
994:
1.25 thorpej 995: pr_leave(pp);
1.128.2.2! ad 996: mutex_exit(&pp->pr_lock);
1.20 thorpej 997: return (NULL);
998: }
999:
1.3 pk 1000: /*
1001: * The convention we use is that if `curpage' is not NULL, then
1002: * it points at a non-empty bucket. In particular, `curpage'
1003: * never points at a page header which has PR_PHINPAGE set and
1004: * has no items in its bucket.
1005: */
1.20 thorpej 1006: if ((ph = pp->pr_curpage) == NULL) {
1.113 yamt 1007: int error;
1008:
1.20 thorpej 1009: #ifdef DIAGNOSTIC
1010: if (pp->pr_nitems != 0) {
1.128.2.2! ad 1011: mutex_exit(&pp->pr_lock);
1.20 thorpej 1012: printf("pool_get: %s: curpage NULL, nitems %u\n",
1013: pp->pr_wchan, pp->pr_nitems);
1.80 provos 1014: panic("pool_get: nitems inconsistent");
1.20 thorpej 1015: }
1016: #endif
1017:
1.21 thorpej 1018: /*
1019: * Call the back-end page allocator for more memory.
1020: * Release the pool lock, as the back-end page allocator
1021: * may block.
1022: */
1.25 thorpej 1023: pr_leave(pp);
1.113 yamt 1024: error = pool_grow(pp, flags);
1025: pr_enter(pp, file, line);
1026: if (error != 0) {
1.21 thorpej 1027: /*
1.55 thorpej 1028: * We were unable to allocate a page or item
1029: * header, but we released the lock during
1030: * allocation, so perhaps items were freed
1031: * back to the pool. Check for this case.
1.21 thorpej 1032: */
1033: if (pp->pr_curpage != NULL)
1034: goto startover;
1.15 pk 1035:
1.117 yamt 1036: pp->pr_nfail++;
1.25 thorpej 1037: pr_leave(pp);
1.128.2.2! ad 1038: mutex_exit(&pp->pr_lock);
1.117 yamt 1039: return (NULL);
1.1 pk 1040: }
1.3 pk 1041:
1.20 thorpej 1042: /* Start the allocation process over. */
1043: goto startover;
1.3 pk 1044: }
1.97 yamt 1045: if (pp->pr_roflags & PR_NOTOUCH) {
1046: #ifdef DIAGNOSTIC
1047: if (__predict_false(ph->ph_nmissing == pp->pr_itemsperpage)) {
1048: pr_leave(pp);
1.128.2.2! ad 1049: mutex_exit(&pp->pr_lock);
1.97 yamt 1050: panic("pool_get: %s: page empty", pp->pr_wchan);
1051: }
1052: #endif
1053: v = pr_item_notouch_get(pp, ph);
1054: #ifdef POOL_DIAGNOSTIC
1055: pr_log(pp, v, PRLOG_GET, file, line);
1056: #endif
1057: } else {
1.102 chs 1058: v = pi = LIST_FIRST(&ph->ph_itemlist);
1.97 yamt 1059: if (__predict_false(v == NULL)) {
1060: pr_leave(pp);
1.128.2.2! ad 1061: mutex_exit(&pp->pr_lock);
1.97 yamt 1062: panic("pool_get: %s: page empty", pp->pr_wchan);
1063: }
1.20 thorpej 1064: #ifdef DIAGNOSTIC
1.97 yamt 1065: if (__predict_false(pp->pr_nitems == 0)) {
1066: pr_leave(pp);
1.128.2.2! ad 1067: mutex_exit(&pp->pr_lock);
1.97 yamt 1068: printf("pool_get: %s: items on itemlist, nitems %u\n",
1069: pp->pr_wchan, pp->pr_nitems);
1070: panic("pool_get: nitems inconsistent");
1071: }
1.65 enami 1072: #endif
1.56 sommerfe 1073:
1.65 enami 1074: #ifdef POOL_DIAGNOSTIC
1.97 yamt 1075: pr_log(pp, v, PRLOG_GET, file, line);
1.65 enami 1076: #endif
1.3 pk 1077:
1.65 enami 1078: #ifdef DIAGNOSTIC
1.97 yamt 1079: if (__predict_false(pi->pi_magic != PI_MAGIC)) {
1080: pr_printlog(pp, pi, printf);
1081: panic("pool_get(%s): free list modified: "
1082: "magic=%x; page %p; item addr %p\n",
1083: pp->pr_wchan, pi->pi_magic, ph->ph_page, pi);
1084: }
1.3 pk 1085: #endif
1086:
1.97 yamt 1087: /*
1088: * Remove from item list.
1089: */
1.102 chs 1090: LIST_REMOVE(pi, pi_list);
1.97 yamt 1091: }
1.20 thorpej 1092: pp->pr_nitems--;
1093: pp->pr_nout++;
1.6 thorpej 1094: if (ph->ph_nmissing == 0) {
1095: #ifdef DIAGNOSTIC
1.34 thorpej 1096: if (__predict_false(pp->pr_nidle == 0))
1.6 thorpej 1097: panic("pool_get: nidle inconsistent");
1098: #endif
1099: pp->pr_nidle--;
1.88 chs 1100:
1101: /*
1102: * This page was previously empty. Move it to the list of
1103: * partially-full pages. This page is already curpage.
1104: */
1105: LIST_REMOVE(ph, ph_pagelist);
1106: LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist);
1.6 thorpej 1107: }
1.3 pk 1108: ph->ph_nmissing++;
1.97 yamt 1109: if (ph->ph_nmissing == pp->pr_itemsperpage) {
1.21 thorpej 1110: #ifdef DIAGNOSTIC
1.97 yamt 1111: if (__predict_false((pp->pr_roflags & PR_NOTOUCH) == 0 &&
1.102 chs 1112: !LIST_EMPTY(&ph->ph_itemlist))) {
1.25 thorpej 1113: pr_leave(pp);
1.128.2.2! ad 1114: mutex_exit(&pp->pr_lock);
1.21 thorpej 1115: panic("pool_get: %s: nmissing inconsistent",
1116: pp->pr_wchan);
1117: }
1118: #endif
1.3 pk 1119: /*
1.88 chs 1120: * This page is now full. Move it to the full list
1121: * and select a new current page.
1.3 pk 1122: */
1.88 chs 1123: LIST_REMOVE(ph, ph_pagelist);
1124: LIST_INSERT_HEAD(&pp->pr_fullpages, ph, ph_pagelist);
1125: pool_update_curpage(pp);
1.1 pk 1126: }
1.3 pk 1127:
1128: pp->pr_nget++;
1.111 christos 1129: pr_leave(pp);
1.20 thorpej 1130:
1131: /*
1132: * If we have a low water mark and we are now below that low
1133: * water mark, add more items to the pool.
1134: */
1.53 thorpej 1135: if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) {
1.20 thorpej 1136: /*
1137: * XXX: Should we log a warning? Should we set up a timeout
1138: * to try again in a second or so? The latter could break
1139: * a caller's assumptions about interrupt protection, etc.
1140: */
1141: }
1142:
1.128.2.2! ad 1143: mutex_exit(&pp->pr_lock);
1.125 ad 1144: KASSERT((((vaddr_t)v + pp->pr_itemoffset) & (pp->pr_align - 1)) == 0);
1145: FREECHECK_OUT(&pp->pr_freecheck, v);
1.1 pk 1146: return (v);
1147: }
1148:
1149: /*
1.43 thorpej 1150: * Internal version of pool_put(). Pool is already locked/entered.
1.1 pk 1151: */
1.43 thorpej 1152: static void
1.101 thorpej 1153: pool_do_put(struct pool *pp, void *v, struct pool_pagelist *pq)
1.1 pk 1154: {
1155: struct pool_item *pi = v;
1.3 pk 1156: struct pool_item_header *ph;
1157:
1.128.2.2! ad 1158: KASSERT(mutex_owned(&pp->pr_lock));
1.125 ad 1159: FREECHECK_IN(&pp->pr_freecheck, v);
1.61 chs 1160:
1.30 thorpej 1161: #ifdef DIAGNOSTIC
1.34 thorpej 1162: if (__predict_false(pp->pr_nout == 0)) {
1.30 thorpej 1163: printf("pool %s: putting with none out\n",
1164: pp->pr_wchan);
1165: panic("pool_put");
1166: }
1167: #endif
1.3 pk 1168:
1.121 yamt 1169: if (__predict_false((ph = pr_find_pagehead(pp, v)) == NULL)) {
1.25 thorpej 1170: pr_printlog(pp, NULL, printf);
1.3 pk 1171: panic("pool_put: %s: page header missing", pp->pr_wchan);
1172: }
1.28 thorpej 1173:
1174: #ifdef LOCKDEBUG
1175: /*
1176: * Check if we're freeing a locked simple lock.
1177: */
1.128 christos 1178: simple_lock_freecheck(pi, (char *)pi + pp->pr_size);
1.28 thorpej 1179: #endif
1.3 pk 1180:
1181: /*
1182: * Return to item list.
1183: */
1.97 yamt 1184: if (pp->pr_roflags & PR_NOTOUCH) {
1185: pr_item_notouch_put(pp, ph, v);
1186: } else {
1.2 pk 1187: #ifdef DIAGNOSTIC
1.97 yamt 1188: pi->pi_magic = PI_MAGIC;
1.3 pk 1189: #endif
1.32 chs 1190: #ifdef DEBUG
1.97 yamt 1191: {
1192: int i, *ip = v;
1.32 chs 1193:
1.97 yamt 1194: for (i = 0; i < pp->pr_size / sizeof(int); i++) {
1195: *ip++ = PI_MAGIC;
1196: }
1.32 chs 1197: }
1198: #endif
1199:
1.102 chs 1200: LIST_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list);
1.97 yamt 1201: }
1.79 thorpej 1202: KDASSERT(ph->ph_nmissing != 0);
1.3 pk 1203: ph->ph_nmissing--;
1204: pp->pr_nput++;
1.20 thorpej 1205: pp->pr_nitems++;
1206: pp->pr_nout--;
1.3 pk 1207:
1208: /* Cancel "pool empty" condition if it exists */
1209: if (pp->pr_curpage == NULL)
1210: pp->pr_curpage = ph;
1211:
1212: if (pp->pr_flags & PR_WANTED) {
1213: pp->pr_flags &= ~PR_WANTED;
1.15 pk 1214: if (ph->ph_nmissing == 0)
1215: pp->pr_nidle++;
1.128 christos 1216: wakeup((void *)pp);
1.3 pk 1217: return;
1218: }
1219:
1220: /*
1.88 chs 1221: * If this page is now empty, do one of two things:
1.21 thorpej 1222: *
1.88 chs 1223: * (1) If we have more pages than the page high water mark,
1.96 thorpej 1224: * free the page back to the system. ONLY CONSIDER
1.90 thorpej 1225: * FREEING BACK A PAGE IF WE HAVE MORE THAN OUR MINIMUM PAGE
1226: * CLAIM.
1.21 thorpej 1227: *
1.88 chs 1228: * (2) Otherwise, move the page to the empty page list.
1229: *
1230: * Either way, select a new current page (so we use a partially-full
1231: * page if one is available).
1.3 pk 1232: */
1233: if (ph->ph_nmissing == 0) {
1.6 thorpej 1234: pp->pr_nidle++;
1.90 thorpej 1235: if (pp->pr_npages > pp->pr_minpages &&
1236: (pp->pr_npages > pp->pr_maxpages ||
1.117 yamt 1237: pa_starved_p(pp->pr_alloc))) {
1.101 thorpej 1238: pr_rmpage(pp, ph, pq);
1.3 pk 1239: } else {
1.88 chs 1240: LIST_REMOVE(ph, ph_pagelist);
1241: LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist);
1.3 pk 1242:
1.21 thorpej 1243: /*
1244: * Update the timestamp on the page. A page must
1245: * be idle for some period of time before it can
1246: * be reclaimed by the pagedaemon. This minimizes
1247: * ping-pong'ing for memory.
1248: */
1.118 kardel 1249: getmicrotime(&ph->ph_time);
1.1 pk 1250: }
1.88 chs 1251: pool_update_curpage(pp);
1.1 pk 1252: }
1.88 chs 1253:
1.21 thorpej 1254: /*
1.88 chs 1255: * If the page was previously completely full, move it to the
1256: * partially-full list and make it the current page. The next
1257: * allocation will get the item from this page, instead of
1258: * further fragmenting the pool.
1.21 thorpej 1259: */
1260: else if (ph->ph_nmissing == (pp->pr_itemsperpage - 1)) {
1.88 chs 1261: LIST_REMOVE(ph, ph_pagelist);
1262: LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist);
1.21 thorpej 1263: pp->pr_curpage = ph;
1264: }
1.43 thorpej 1265: }
1266:
1267: /*
1268: * Return resource to the pool; must be called at appropriate spl level
1269: */
1.59 thorpej 1270: #ifdef POOL_DIAGNOSTIC
1.43 thorpej 1271: void
1272: _pool_put(struct pool *pp, void *v, const char *file, long line)
1273: {
1.101 thorpej 1274: struct pool_pagelist pq;
1275:
1276: LIST_INIT(&pq);
1.43 thorpej 1277:
1.128.2.2! ad 1278: mutex_enter(&pp->pr_lock);
1.43 thorpej 1279: pr_enter(pp, file, line);
1280:
1.56 sommerfe 1281: pr_log(pp, v, PRLOG_PUT, file, line);
1282:
1.101 thorpej 1283: pool_do_put(pp, v, &pq);
1.21 thorpej 1284:
1.25 thorpej 1285: pr_leave(pp);
1.128.2.2! ad 1286: mutex_exit(&pp->pr_lock);
1.101 thorpej 1287:
1.102 chs 1288: pr_pagelist_free(pp, &pq);
1.1 pk 1289: }
1.57 sommerfe 1290: #undef pool_put
1.59 thorpej 1291: #endif /* POOL_DIAGNOSTIC */
1.1 pk 1292:
1.56 sommerfe 1293: void
1294: pool_put(struct pool *pp, void *v)
1295: {
1.101 thorpej 1296: struct pool_pagelist pq;
1297:
1298: LIST_INIT(&pq);
1.56 sommerfe 1299:
1.128.2.2! ad 1300: mutex_enter(&pp->pr_lock);
1.101 thorpej 1301: pool_do_put(pp, v, &pq);
1.128.2.2! ad 1302: mutex_exit(&pp->pr_lock);
1.56 sommerfe 1303:
1.102 chs 1304: pr_pagelist_free(pp, &pq);
1.56 sommerfe 1305: }
1.57 sommerfe 1306:
1.59 thorpej 1307: #ifdef POOL_DIAGNOSTIC
1.57 sommerfe 1308: #define pool_put(h, v) _pool_put((h), (v), __FILE__, __LINE__)
1.56 sommerfe 1309: #endif
1.74 thorpej 1310:
1311: /*
1.113 yamt 1312: * pool_grow: grow a pool by a page.
1313: *
1314: * => called with pool locked.
1315: * => unlock and relock the pool.
1316: * => return with pool locked.
1317: */
1318:
1319: static int
1320: pool_grow(struct pool *pp, int flags)
1321: {
1322: struct pool_item_header *ph = NULL;
1323: char *cp;
1324:
1.128.2.2! ad 1325: mutex_exit(&pp->pr_lock);
1.113 yamt 1326: cp = pool_allocator_alloc(pp, flags);
1327: if (__predict_true(cp != NULL)) {
1328: ph = pool_alloc_item_header(pp, cp, flags);
1329: }
1330: if (__predict_false(cp == NULL || ph == NULL)) {
1331: if (cp != NULL) {
1332: pool_allocator_free(pp, cp);
1333: }
1.128.2.2! ad 1334: mutex_enter(&pp->pr_lock);
1.113 yamt 1335: return ENOMEM;
1336: }
1337:
1.128.2.2! ad 1338: mutex_enter(&pp->pr_lock);
1.113 yamt 1339: pool_prime_page(pp, cp, ph);
1340: pp->pr_npagealloc++;
1341: return 0;
1342: }
1343:
1344: /*
1.74 thorpej 1345: * Add N items to the pool.
1346: */
1347: int
1348: pool_prime(struct pool *pp, int n)
1349: {
1.75 simonb 1350: int newpages;
1.113 yamt 1351: int error = 0;
1.74 thorpej 1352:
1.128.2.2! ad 1353: mutex_enter(&pp->pr_lock);
1.74 thorpej 1354:
1355: newpages = roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1356:
1357: while (newpages-- > 0) {
1.113 yamt 1358: error = pool_grow(pp, PR_NOWAIT);
1359: if (error) {
1.74 thorpej 1360: break;
1361: }
1362: pp->pr_minpages++;
1363: }
1364:
1365: if (pp->pr_minpages >= pp->pr_maxpages)
1366: pp->pr_maxpages = pp->pr_minpages + 1; /* XXX */
1367:
1.128.2.2! ad 1368: mutex_exit(&pp->pr_lock);
1.113 yamt 1369: return error;
1.74 thorpej 1370: }
1.55 thorpej 1371:
1372: /*
1.3 pk 1373: * Add a page worth of items to the pool.
1.21 thorpej 1374: *
1375: * Note, we must be called with the pool descriptor LOCKED.
1.3 pk 1376: */
1.55 thorpej 1377: static void
1.128 christos 1378: pool_prime_page(struct pool *pp, void *storage, struct pool_item_header *ph)
1.3 pk 1379: {
1380: struct pool_item *pi;
1.128 christos 1381: void *cp = storage;
1.125 ad 1382: const unsigned int align = pp->pr_align;
1383: const unsigned int ioff = pp->pr_itemoffset;
1.55 thorpej 1384: int n;
1.36 pk 1385:
1.128.2.2! ad 1386: KASSERT(mutex_owned(&pp->pr_lock));
1.91 yamt 1387:
1.66 thorpej 1388: #ifdef DIAGNOSTIC
1.121 yamt 1389: if ((pp->pr_roflags & PR_NOALIGN) == 0 &&
1390: ((uintptr_t)cp & (pp->pr_alloc->pa_pagesz - 1)) != 0)
1.36 pk 1391: panic("pool_prime_page: %s: unaligned page", pp->pr_wchan);
1.66 thorpej 1392: #endif
1.3 pk 1393:
1394: /*
1395: * Insert page header.
1396: */
1.88 chs 1397: LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist);
1.102 chs 1398: LIST_INIT(&ph->ph_itemlist);
1.3 pk 1399: ph->ph_page = storage;
1400: ph->ph_nmissing = 0;
1.118 kardel 1401: getmicrotime(&ph->ph_time);
1.88 chs 1402: if ((pp->pr_roflags & PR_PHINPAGE) == 0)
1403: SPLAY_INSERT(phtree, &pp->pr_phtree, ph);
1.3 pk 1404:
1.6 thorpej 1405: pp->pr_nidle++;
1406:
1.3 pk 1407: /*
1408: * Color this page.
1409: */
1.128 christos 1410: cp = (char *)cp + pp->pr_curcolor;
1.3 pk 1411: if ((pp->pr_curcolor += align) > pp->pr_maxcolor)
1412: pp->pr_curcolor = 0;
1413:
1414: /*
1415: * Adjust storage to apply aligment to `pr_itemoffset' in each item.
1416: */
1417: if (ioff != 0)
1.128 christos 1418: cp = (char *)cp + align - ioff;
1.3 pk 1419:
1.125 ad 1420: KASSERT((((vaddr_t)cp + ioff) & (align - 1)) == 0);
1421:
1.3 pk 1422: /*
1423: * Insert remaining chunks on the bucket list.
1424: */
1425: n = pp->pr_itemsperpage;
1.20 thorpej 1426: pp->pr_nitems += n;
1.3 pk 1427:
1.97 yamt 1428: if (pp->pr_roflags & PR_NOTOUCH) {
1.99 yamt 1429: pool_item_freelist_t *freelist = PR_FREELIST(ph);
1.97 yamt 1430: int i;
1431:
1.128 christos 1432: ph->ph_off = (char *)cp - (char *)storage;
1.97 yamt 1433: ph->ph_firstfree = 0;
1434: for (i = 0; i < n - 1; i++)
1435: freelist[i] = i + 1;
1436: freelist[n - 1] = PR_INDEX_EOL;
1437: } else {
1438: while (n--) {
1439: pi = (struct pool_item *)cp;
1.78 thorpej 1440:
1.97 yamt 1441: KASSERT(((((vaddr_t)pi) + ioff) & (align - 1)) == 0);
1.3 pk 1442:
1.97 yamt 1443: /* Insert on page list */
1.102 chs 1444: LIST_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list);
1.3 pk 1445: #ifdef DIAGNOSTIC
1.97 yamt 1446: pi->pi_magic = PI_MAGIC;
1.3 pk 1447: #endif
1.128 christos 1448: cp = (char *)cp + pp->pr_size;
1.125 ad 1449:
1450: KASSERT((((vaddr_t)cp + ioff) & (align - 1)) == 0);
1.97 yamt 1451: }
1.3 pk 1452: }
1453:
1454: /*
1455: * If the pool was depleted, point at the new page.
1456: */
1457: if (pp->pr_curpage == NULL)
1458: pp->pr_curpage = ph;
1459:
1460: if (++pp->pr_npages > pp->pr_hiwat)
1461: pp->pr_hiwat = pp->pr_npages;
1462: }
1463:
1.20 thorpej 1464: /*
1.52 thorpej 1465: * Used by pool_get() when nitems drops below the low water mark. This
1.88 chs 1466: * is used to catch up pr_nitems with the low water mark.
1.20 thorpej 1467: *
1.21 thorpej 1468: * Note 1, we never wait for memory here, we let the caller decide what to do.
1.20 thorpej 1469: *
1.73 thorpej 1470: * Note 2, we must be called with the pool already locked, and we return
1.20 thorpej 1471: * with it locked.
1472: */
1473: static int
1.42 thorpej 1474: pool_catchup(struct pool *pp)
1.20 thorpej 1475: {
1476: int error = 0;
1477:
1.54 thorpej 1478: while (POOL_NEEDS_CATCHUP(pp)) {
1.113 yamt 1479: error = pool_grow(pp, PR_NOWAIT);
1480: if (error) {
1.20 thorpej 1481: break;
1482: }
1483: }
1.113 yamt 1484: return error;
1.20 thorpej 1485: }
1486:
1.88 chs 1487: static void
1488: pool_update_curpage(struct pool *pp)
1489: {
1490:
1491: pp->pr_curpage = LIST_FIRST(&pp->pr_partpages);
1492: if (pp->pr_curpage == NULL) {
1493: pp->pr_curpage = LIST_FIRST(&pp->pr_emptypages);
1494: }
1495: }
1496:
1.3 pk 1497: void
1.42 thorpej 1498: pool_setlowat(struct pool *pp, int n)
1.3 pk 1499: {
1.15 pk 1500:
1.128.2.2! ad 1501: mutex_enter(&pp->pr_lock);
1.21 thorpej 1502:
1.3 pk 1503: pp->pr_minitems = n;
1.15 pk 1504: pp->pr_minpages = (n == 0)
1505: ? 0
1.18 thorpej 1506: : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1.20 thorpej 1507:
1508: /* Make sure we're caught up with the newly-set low water mark. */
1.75 simonb 1509: if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) {
1.20 thorpej 1510: /*
1511: * XXX: Should we log a warning? Should we set up a timeout
1512: * to try again in a second or so? The latter could break
1513: * a caller's assumptions about interrupt protection, etc.
1514: */
1515: }
1.21 thorpej 1516:
1.128.2.2! ad 1517: mutex_exit(&pp->pr_lock);
1.3 pk 1518: }
1519:
1520: void
1.42 thorpej 1521: pool_sethiwat(struct pool *pp, int n)
1.3 pk 1522: {
1.15 pk 1523:
1.128.2.2! ad 1524: mutex_enter(&pp->pr_lock);
1.21 thorpej 1525:
1.15 pk 1526: pp->pr_maxpages = (n == 0)
1527: ? 0
1.18 thorpej 1528: : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1.21 thorpej 1529:
1.128.2.2! ad 1530: mutex_exit(&pp->pr_lock);
1.3 pk 1531: }
1532:
1.20 thorpej 1533: void
1.42 thorpej 1534: pool_sethardlimit(struct pool *pp, int n, const char *warnmess, int ratecap)
1.20 thorpej 1535: {
1536:
1.128.2.2! ad 1537: mutex_enter(&pp->pr_lock);
1.20 thorpej 1538:
1539: pp->pr_hardlimit = n;
1540: pp->pr_hardlimit_warning = warnmess;
1.31 thorpej 1541: pp->pr_hardlimit_ratecap.tv_sec = ratecap;
1542: pp->pr_hardlimit_warning_last.tv_sec = 0;
1543: pp->pr_hardlimit_warning_last.tv_usec = 0;
1.20 thorpej 1544:
1545: /*
1.21 thorpej 1546: * In-line version of pool_sethiwat(), because we don't want to
1547: * release the lock.
1.20 thorpej 1548: */
1549: pp->pr_maxpages = (n == 0)
1550: ? 0
1551: : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage;
1.21 thorpej 1552:
1.128.2.2! ad 1553: mutex_exit(&pp->pr_lock);
1.20 thorpej 1554: }
1.3 pk 1555:
1556: /*
1557: * Release all complete pages that have not been used recently.
1558: */
1.66 thorpej 1559: int
1.59 thorpej 1560: #ifdef POOL_DIAGNOSTIC
1.42 thorpej 1561: _pool_reclaim(struct pool *pp, const char *file, long line)
1.56 sommerfe 1562: #else
1563: pool_reclaim(struct pool *pp)
1564: #endif
1.3 pk 1565: {
1566: struct pool_item_header *ph, *phnext;
1.43 thorpej 1567: struct pool_cache *pc;
1.61 chs 1568: struct pool_pagelist pq;
1.102 chs 1569: struct pool_cache_grouplist pcgl;
1570: struct timeval curtime, diff;
1.3 pk 1571:
1.68 thorpej 1572: if (pp->pr_drain_hook != NULL) {
1573: /*
1574: * The drain hook must be called with the pool unlocked.
1575: */
1576: (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, PR_NOWAIT);
1577: }
1578:
1.128.2.2! ad 1579: if (mutex_tryenter(&pp->pr_lock) == 0)
1.66 thorpej 1580: return (0);
1.25 thorpej 1581: pr_enter(pp, file, line);
1.68 thorpej 1582:
1.88 chs 1583: LIST_INIT(&pq);
1.102 chs 1584: LIST_INIT(&pcgl);
1.3 pk 1585:
1.43 thorpej 1586: /*
1587: * Reclaim items from the pool's caches.
1588: */
1.102 chs 1589: LIST_FOREACH(pc, &pp->pr_cachelist, pc_poollist)
1590: pool_cache_reclaim(pc, &pq, &pcgl);
1.43 thorpej 1591:
1.118 kardel 1592: getmicrotime(&curtime);
1.21 thorpej 1593:
1.88 chs 1594: for (ph = LIST_FIRST(&pp->pr_emptypages); ph != NULL; ph = phnext) {
1595: phnext = LIST_NEXT(ph, ph_pagelist);
1.3 pk 1596:
1597: /* Check our minimum page claim */
1598: if (pp->pr_npages <= pp->pr_minpages)
1599: break;
1600:
1.88 chs 1601: KASSERT(ph->ph_nmissing == 0);
1602: timersub(&curtime, &ph->ph_time, &diff);
1.117 yamt 1603: if (diff.tv_sec < pool_inactive_time
1604: && !pa_starved_p(pp->pr_alloc))
1.88 chs 1605: continue;
1.21 thorpej 1606:
1.88 chs 1607: /*
1608: * If freeing this page would put us below
1609: * the low water mark, stop now.
1610: */
1611: if ((pp->pr_nitems - pp->pr_itemsperpage) <
1612: pp->pr_minitems)
1613: break;
1.21 thorpej 1614:
1.88 chs 1615: pr_rmpage(pp, ph, &pq);
1.3 pk 1616: }
1617:
1.25 thorpej 1618: pr_leave(pp);
1.128.2.2! ad 1619: mutex_exit(&pp->pr_lock);
1.102 chs 1620: if (LIST_EMPTY(&pq) && LIST_EMPTY(&pcgl))
1621: return 0;
1.66 thorpej 1622:
1.101 thorpej 1623: pr_pagelist_free(pp, &pq);
1.102 chs 1624: pcg_grouplist_free(&pcgl);
1.66 thorpej 1625: return (1);
1.3 pk 1626: }
1627:
1628: /*
1629: * Drain pools, one at a time.
1.21 thorpej 1630: *
1631: * Note, we must never be called from an interrupt context.
1.3 pk 1632: */
1633: void
1.124 yamt 1634: pool_drain(void *arg)
1.3 pk 1635: {
1636: struct pool *pp;
1.23 thorpej 1637: int s;
1.3 pk 1638:
1.61 chs 1639: pp = NULL;
1.128.2.2! ad 1640: s = splvm(); /* XXX why? */
! 1641: mutex_enter(&pool_head_lock);
1.61 chs 1642: if (drainpp == NULL) {
1.102 chs 1643: drainpp = LIST_FIRST(&pool_head);
1.61 chs 1644: }
1645: if (drainpp) {
1646: pp = drainpp;
1.102 chs 1647: drainpp = LIST_NEXT(pp, pr_poollist);
1.61 chs 1648: }
1.128.2.2! ad 1649: mutex_exit(&pool_head_lock);
1.115 christos 1650: if (pp)
1651: pool_reclaim(pp);
1.61 chs 1652: splx(s);
1.3 pk 1653: }
1654:
1655: /*
1656: * Diagnostic helpers.
1657: */
1658: void
1.42 thorpej 1659: pool_print(struct pool *pp, const char *modif)
1.21 thorpej 1660: {
1661:
1.128.2.2! ad 1662: if (mutex_tryenter(&pp->pr_lock) == 0) {
1.25 thorpej 1663: printf("pool %s is locked; try again later\n",
1664: pp->pr_wchan);
1665: return;
1666: }
1667: pool_print1(pp, modif, printf);
1.128.2.2! ad 1668: mutex_exit(&pp->pr_lock);
1.21 thorpej 1669: }
1670:
1.25 thorpej 1671: void
1.108 yamt 1672: pool_printall(const char *modif, void (*pr)(const char *, ...))
1673: {
1674: struct pool *pp;
1675:
1.128.2.2! ad 1676: if (mutex_tryenter(&pool_head_lock) == 0) {
1.108 yamt 1677: (*pr)("WARNING: pool_head_slock is locked\n");
1678: } else {
1.128.2.2! ad 1679: mutex_exit(&pool_head_lock);
1.108 yamt 1680: }
1681:
1682: LIST_FOREACH(pp, &pool_head, pr_poollist) {
1683: pool_printit(pp, modif, pr);
1684: }
1685: }
1686:
1687: void
1.42 thorpej 1688: pool_printit(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
1.25 thorpej 1689: {
1690:
1691: if (pp == NULL) {
1692: (*pr)("Must specify a pool to print.\n");
1693: return;
1694: }
1695:
1696: /*
1697: * Called from DDB; interrupts should be blocked, and all
1698: * other processors should be paused. We can skip locking
1699: * the pool in this case.
1700: *
1.128.2.2! ad 1701: * We do a mutex_tryenter() just to print the lock
1.25 thorpej 1702: * status, however.
1703: */
1704:
1.128.2.2! ad 1705: if (mutex_tryenter(&pp->pr_lock) == 0)
1.25 thorpej 1706: (*pr)("WARNING: pool %s is locked\n", pp->pr_wchan);
1707: else
1.128.2.2! ad 1708: mutex_exit(&pp->pr_lock);
1.25 thorpej 1709:
1710: pool_print1(pp, modif, pr);
1711: }
1712:
1.21 thorpej 1713: static void
1.124 yamt 1714: pool_print_pagelist(struct pool *pp, struct pool_pagelist *pl,
1.97 yamt 1715: void (*pr)(const char *, ...))
1.88 chs 1716: {
1717: struct pool_item_header *ph;
1718: #ifdef DIAGNOSTIC
1719: struct pool_item *pi;
1720: #endif
1721:
1722: LIST_FOREACH(ph, pl, ph_pagelist) {
1723: (*pr)("\t\tpage %p, nmissing %d, time %lu,%lu\n",
1724: ph->ph_page, ph->ph_nmissing,
1725: (u_long)ph->ph_time.tv_sec,
1726: (u_long)ph->ph_time.tv_usec);
1727: #ifdef DIAGNOSTIC
1.97 yamt 1728: if (!(pp->pr_roflags & PR_NOTOUCH)) {
1.102 chs 1729: LIST_FOREACH(pi, &ph->ph_itemlist, pi_list) {
1.97 yamt 1730: if (pi->pi_magic != PI_MAGIC) {
1731: (*pr)("\t\t\titem %p, magic 0x%x\n",
1732: pi, pi->pi_magic);
1733: }
1.88 chs 1734: }
1735: }
1736: #endif
1737: }
1738: }
1739:
1740: static void
1.42 thorpej 1741: pool_print1(struct pool *pp, const char *modif, void (*pr)(const char *, ...))
1.3 pk 1742: {
1.25 thorpej 1743: struct pool_item_header *ph;
1.44 thorpej 1744: struct pool_cache *pc;
1745: struct pool_cache_group *pcg;
1746: int i, print_log = 0, print_pagelist = 0, print_cache = 0;
1.25 thorpej 1747: char c;
1748:
1749: while ((c = *modif++) != '\0') {
1750: if (c == 'l')
1751: print_log = 1;
1752: if (c == 'p')
1753: print_pagelist = 1;
1.44 thorpej 1754: if (c == 'c')
1755: print_cache = 1;
1.25 thorpej 1756: }
1757:
1758: (*pr)("POOL %s: size %u, align %u, ioff %u, roflags 0x%08x\n",
1759: pp->pr_wchan, pp->pr_size, pp->pr_align, pp->pr_itemoffset,
1760: pp->pr_roflags);
1.66 thorpej 1761: (*pr)("\talloc %p\n", pp->pr_alloc);
1.25 thorpej 1762: (*pr)("\tminitems %u, minpages %u, maxpages %u, npages %u\n",
1763: pp->pr_minitems, pp->pr_minpages, pp->pr_maxpages, pp->pr_npages);
1764: (*pr)("\titemsperpage %u, nitems %u, nout %u, hardlimit %u\n",
1765: pp->pr_itemsperpage, pp->pr_nitems, pp->pr_nout, pp->pr_hardlimit);
1766:
1767: (*pr)("\n\tnget %lu, nfail %lu, nput %lu\n",
1768: pp->pr_nget, pp->pr_nfail, pp->pr_nput);
1769: (*pr)("\tnpagealloc %lu, npagefree %lu, hiwat %u, nidle %lu\n",
1770: pp->pr_npagealloc, pp->pr_npagefree, pp->pr_hiwat, pp->pr_nidle);
1771:
1772: if (print_pagelist == 0)
1773: goto skip_pagelist;
1774:
1.88 chs 1775: if ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL)
1776: (*pr)("\n\tempty page list:\n");
1.97 yamt 1777: pool_print_pagelist(pp, &pp->pr_emptypages, pr);
1.88 chs 1778: if ((ph = LIST_FIRST(&pp->pr_fullpages)) != NULL)
1779: (*pr)("\n\tfull page list:\n");
1.97 yamt 1780: pool_print_pagelist(pp, &pp->pr_fullpages, pr);
1.88 chs 1781: if ((ph = LIST_FIRST(&pp->pr_partpages)) != NULL)
1782: (*pr)("\n\tpartial-page list:\n");
1.97 yamt 1783: pool_print_pagelist(pp, &pp->pr_partpages, pr);
1.88 chs 1784:
1.25 thorpej 1785: if (pp->pr_curpage == NULL)
1786: (*pr)("\tno current page\n");
1787: else
1788: (*pr)("\tcurpage %p\n", pp->pr_curpage->ph_page);
1789:
1790: skip_pagelist:
1791: if (print_log == 0)
1792: goto skip_log;
1793:
1794: (*pr)("\n");
1795: if ((pp->pr_roflags & PR_LOGGING) == 0)
1796: (*pr)("\tno log\n");
1.122 christos 1797: else {
1.25 thorpej 1798: pr_printlog(pp, NULL, pr);
1.122 christos 1799: }
1.3 pk 1800:
1.25 thorpej 1801: skip_log:
1.44 thorpej 1802: if (print_cache == 0)
1803: goto skip_cache;
1804:
1.102 chs 1805: #define PR_GROUPLIST(pcg) \
1806: (*pr)("\t\tgroup %p: avail %d\n", pcg, pcg->pcg_avail); \
1807: for (i = 0; i < PCG_NOBJECTS; i++) { \
1808: if (pcg->pcg_objects[i].pcgo_pa != \
1809: POOL_PADDR_INVALID) { \
1810: (*pr)("\t\t\t%p, 0x%llx\n", \
1811: pcg->pcg_objects[i].pcgo_va, \
1812: (unsigned long long) \
1813: pcg->pcg_objects[i].pcgo_pa); \
1814: } else { \
1815: (*pr)("\t\t\t%p\n", \
1816: pcg->pcg_objects[i].pcgo_va); \
1817: } \
1818: }
1819:
1820: LIST_FOREACH(pc, &pp->pr_cachelist, pc_poollist) {
1.103 chs 1821: (*pr)("\tcache %p\n", pc);
1.48 thorpej 1822: (*pr)("\t hits %lu misses %lu ngroups %lu nitems %lu\n",
1823: pc->pc_hits, pc->pc_misses, pc->pc_ngroups, pc->pc_nitems);
1.102 chs 1824: (*pr)("\t full groups:\n");
1.103 chs 1825: LIST_FOREACH(pcg, &pc->pc_fullgroups, pcg_list) {
1.102 chs 1826: PR_GROUPLIST(pcg);
1.103 chs 1827: }
1.102 chs 1828: (*pr)("\t partial groups:\n");
1.103 chs 1829: LIST_FOREACH(pcg, &pc->pc_partgroups, pcg_list) {
1.102 chs 1830: PR_GROUPLIST(pcg);
1.103 chs 1831: }
1.102 chs 1832: (*pr)("\t empty groups:\n");
1.103 chs 1833: LIST_FOREACH(pcg, &pc->pc_emptygroups, pcg_list) {
1.102 chs 1834: PR_GROUPLIST(pcg);
1.103 chs 1835: }
1.44 thorpej 1836: }
1.102 chs 1837: #undef PR_GROUPLIST
1.44 thorpej 1838:
1839: skip_cache:
1.88 chs 1840: pr_enter_check(pp, pr);
1841: }
1842:
1843: static int
1844: pool_chk_page(struct pool *pp, const char *label, struct pool_item_header *ph)
1845: {
1846: struct pool_item *pi;
1.128 christos 1847: void *page;
1.88 chs 1848: int n;
1849:
1.121 yamt 1850: if ((pp->pr_roflags & PR_NOALIGN) == 0) {
1.128 christos 1851: page = (void *)((uintptr_t)ph & pp->pr_alloc->pa_pagemask);
1.121 yamt 1852: if (page != ph->ph_page &&
1853: (pp->pr_roflags & PR_PHINPAGE) != 0) {
1854: if (label != NULL)
1855: printf("%s: ", label);
1856: printf("pool(%p:%s): page inconsistency: page %p;"
1857: " at page head addr %p (p %p)\n", pp,
1858: pp->pr_wchan, ph->ph_page,
1859: ph, page);
1860: return 1;
1861: }
1.88 chs 1862: }
1.3 pk 1863:
1.97 yamt 1864: if ((pp->pr_roflags & PR_NOTOUCH) != 0)
1865: return 0;
1866:
1.102 chs 1867: for (pi = LIST_FIRST(&ph->ph_itemlist), n = 0;
1.88 chs 1868: pi != NULL;
1.102 chs 1869: pi = LIST_NEXT(pi,pi_list), n++) {
1.88 chs 1870:
1871: #ifdef DIAGNOSTIC
1872: if (pi->pi_magic != PI_MAGIC) {
1873: if (label != NULL)
1874: printf("%s: ", label);
1875: printf("pool(%s): free list modified: magic=%x;"
1.121 yamt 1876: " page %p; item ordinal %d; addr %p\n",
1.88 chs 1877: pp->pr_wchan, pi->pi_magic, ph->ph_page,
1.121 yamt 1878: n, pi);
1.88 chs 1879: panic("pool");
1880: }
1881: #endif
1.121 yamt 1882: if ((pp->pr_roflags & PR_NOALIGN) != 0) {
1883: continue;
1884: }
1.128 christos 1885: page = (void *)((uintptr_t)pi & pp->pr_alloc->pa_pagemask);
1.88 chs 1886: if (page == ph->ph_page)
1887: continue;
1888:
1889: if (label != NULL)
1890: printf("%s: ", label);
1891: printf("pool(%p:%s): page inconsistency: page %p;"
1892: " item ordinal %d; addr %p (p %p)\n", pp,
1893: pp->pr_wchan, ph->ph_page,
1894: n, pi, page);
1895: return 1;
1896: }
1897: return 0;
1.3 pk 1898: }
1899:
1.88 chs 1900:
1.3 pk 1901: int
1.42 thorpej 1902: pool_chk(struct pool *pp, const char *label)
1.3 pk 1903: {
1904: struct pool_item_header *ph;
1905: int r = 0;
1906:
1.128.2.2! ad 1907: mutex_enter(&pp->pr_lock);
1.88 chs 1908: LIST_FOREACH(ph, &pp->pr_emptypages, ph_pagelist) {
1909: r = pool_chk_page(pp, label, ph);
1910: if (r) {
1911: goto out;
1912: }
1913: }
1914: LIST_FOREACH(ph, &pp->pr_fullpages, ph_pagelist) {
1915: r = pool_chk_page(pp, label, ph);
1916: if (r) {
1.3 pk 1917: goto out;
1918: }
1.88 chs 1919: }
1920: LIST_FOREACH(ph, &pp->pr_partpages, ph_pagelist) {
1921: r = pool_chk_page(pp, label, ph);
1922: if (r) {
1.3 pk 1923: goto out;
1924: }
1925: }
1.88 chs 1926:
1.3 pk 1927: out:
1.128.2.2! ad 1928: mutex_exit(&pp->pr_lock);
1.3 pk 1929: return (r);
1.43 thorpej 1930: }
1931:
1932: /*
1933: * pool_cache_init:
1934: *
1935: * Initialize a pool cache.
1936: *
1937: * NOTE: If the pool must be protected from interrupts, we expect
1938: * to be called at the appropriate interrupt priority level.
1939: */
1940: void
1941: pool_cache_init(struct pool_cache *pc, struct pool *pp,
1942: int (*ctor)(void *, void *, int),
1943: void (*dtor)(void *, void *),
1944: void *arg)
1945: {
1946:
1.102 chs 1947: LIST_INIT(&pc->pc_emptygroups);
1948: LIST_INIT(&pc->pc_fullgroups);
1949: LIST_INIT(&pc->pc_partgroups);
1.128.2.2! ad 1950: mutex_init(&pc->pc_lock, MUTEX_DRIVER, pp->pr_ipl);
1.43 thorpej 1951:
1952: pc->pc_pool = pp;
1953:
1954: pc->pc_ctor = ctor;
1955: pc->pc_dtor = dtor;
1956: pc->pc_arg = arg;
1957:
1.48 thorpej 1958: pc->pc_hits = 0;
1959: pc->pc_misses = 0;
1960:
1961: pc->pc_ngroups = 0;
1962:
1963: pc->pc_nitems = 0;
1964:
1.128.2.2! ad 1965: if (__predict_true(!cold)) {
! 1966: mutex_enter(&pp->pr_lock);
! 1967: LIST_INSERT_HEAD(&pp->pr_cachelist, pc, pc_poollist);
! 1968: mutex_exit(&pp->pr_lock);
! 1969: } else
! 1970: LIST_INSERT_HEAD(&pp->pr_cachelist, pc, pc_poollist);
1.43 thorpej 1971: }
1972:
1973: /*
1974: * pool_cache_destroy:
1975: *
1976: * Destroy a pool cache.
1977: */
1978: void
1979: pool_cache_destroy(struct pool_cache *pc)
1980: {
1981: struct pool *pp = pc->pc_pool;
1982:
1983: /* First, invalidate the entire cache. */
1984: pool_cache_invalidate(pc);
1985:
1986: /* ...and remove it from the pool's cache list. */
1.128.2.2! ad 1987: mutex_enter(&pp->pr_lock);
1.102 chs 1988: LIST_REMOVE(pc, pc_poollist);
1.128.2.2! ad 1989: mutex_exit(&pp->pr_lock);
! 1990:
! 1991: mutex_destroy(&pc->pc_lock);
1.43 thorpej 1992: }
1993:
1.110 perry 1994: static inline void *
1.87 thorpej 1995: pcg_get(struct pool_cache_group *pcg, paddr_t *pap)
1.43 thorpej 1996: {
1997: void *object;
1998: u_int idx;
1999:
2000: KASSERT(pcg->pcg_avail <= PCG_NOBJECTS);
1.45 thorpej 2001: KASSERT(pcg->pcg_avail != 0);
1.43 thorpej 2002: idx = --pcg->pcg_avail;
2003:
1.87 thorpej 2004: KASSERT(pcg->pcg_objects[idx].pcgo_va != NULL);
2005: object = pcg->pcg_objects[idx].pcgo_va;
2006: if (pap != NULL)
2007: *pap = pcg->pcg_objects[idx].pcgo_pa;
2008: pcg->pcg_objects[idx].pcgo_va = NULL;
1.43 thorpej 2009:
2010: return (object);
2011: }
2012:
1.110 perry 2013: static inline void
1.87 thorpej 2014: pcg_put(struct pool_cache_group *pcg, void *object, paddr_t pa)
1.43 thorpej 2015: {
2016: u_int idx;
2017:
2018: KASSERT(pcg->pcg_avail < PCG_NOBJECTS);
2019: idx = pcg->pcg_avail++;
2020:
1.87 thorpej 2021: KASSERT(pcg->pcg_objects[idx].pcgo_va == NULL);
2022: pcg->pcg_objects[idx].pcgo_va = object;
2023: pcg->pcg_objects[idx].pcgo_pa = pa;
1.43 thorpej 2024: }
2025:
1.102 chs 2026: static void
2027: pcg_grouplist_free(struct pool_cache_grouplist *pcgl)
2028: {
2029: struct pool_cache_group *pcg;
2030:
2031: while ((pcg = LIST_FIRST(pcgl)) != NULL) {
2032: LIST_REMOVE(pcg, pcg_list);
2033: pool_put(&pcgpool, pcg);
2034: }
2035: }
2036:
1.43 thorpej 2037: /*
1.87 thorpej 2038: * pool_cache_get{,_paddr}:
1.43 thorpej 2039: *
1.87 thorpej 2040: * Get an object from a pool cache (optionally returning
2041: * the physical address of the object).
1.43 thorpej 2042: */
2043: void *
1.87 thorpej 2044: pool_cache_get_paddr(struct pool_cache *pc, int flags, paddr_t *pap)
1.43 thorpej 2045: {
2046: struct pool_cache_group *pcg;
2047: void *object;
1.58 thorpej 2048:
2049: #ifdef LOCKDEBUG
2050: if (flags & PR_WAITOK)
1.119 yamt 2051: ASSERT_SLEEPABLE(NULL, "pool_cache_get(PR_WAITOK)");
1.58 thorpej 2052: #endif
1.43 thorpej 2053:
1.128.2.2! ad 2054: mutex_enter(&pc->pc_lock);
1.43 thorpej 2055:
1.102 chs 2056: pcg = LIST_FIRST(&pc->pc_partgroups);
2057: if (pcg == NULL) {
2058: pcg = LIST_FIRST(&pc->pc_fullgroups);
2059: if (pcg != NULL) {
2060: LIST_REMOVE(pcg, pcg_list);
2061: LIST_INSERT_HEAD(&pc->pc_partgroups, pcg, pcg_list);
1.43 thorpej 2062: }
1.102 chs 2063: }
2064: if (pcg == NULL) {
1.43 thorpej 2065:
2066: /*
2067: * No groups with any available objects. Allocate
2068: * a new object, construct it, and return it to
2069: * the caller. We will allocate a group, if necessary,
2070: * when the object is freed back to the cache.
2071: */
1.48 thorpej 2072: pc->pc_misses++;
1.128.2.2! ad 2073: mutex_exit(&pc->pc_lock);
1.43 thorpej 2074: object = pool_get(pc->pc_pool, flags);
2075: if (object != NULL && pc->pc_ctor != NULL) {
2076: if ((*pc->pc_ctor)(pc->pc_arg, object, flags) != 0) {
2077: pool_put(pc->pc_pool, object);
2078: return (NULL);
2079: }
2080: }
1.125 ad 2081: KASSERT((((vaddr_t)object + pc->pc_pool->pr_itemoffset) &
2082: (pc->pc_pool->pr_align - 1)) == 0);
1.87 thorpej 2083: if (object != NULL && pap != NULL) {
2084: #ifdef POOL_VTOPHYS
2085: *pap = POOL_VTOPHYS(object);
2086: #else
2087: *pap = POOL_PADDR_INVALID;
2088: #endif
2089: }
1.125 ad 2090:
2091: FREECHECK_OUT(&pc->pc_freecheck, object);
1.43 thorpej 2092: return (object);
2093: }
2094:
1.48 thorpej 2095: pc->pc_hits++;
2096: pc->pc_nitems--;
1.87 thorpej 2097: object = pcg_get(pcg, pap);
1.43 thorpej 2098:
1.102 chs 2099: if (pcg->pcg_avail == 0) {
2100: LIST_REMOVE(pcg, pcg_list);
2101: LIST_INSERT_HEAD(&pc->pc_emptygroups, pcg, pcg_list);
2102: }
1.128.2.2! ad 2103: mutex_exit(&pc->pc_lock);
1.43 thorpej 2104:
1.125 ad 2105: KASSERT((((vaddr_t)object + pc->pc_pool->pr_itemoffset) &
2106: (pc->pc_pool->pr_align - 1)) == 0);
2107: FREECHECK_OUT(&pc->pc_freecheck, object);
1.43 thorpej 2108: return (object);
2109: }
2110:
2111: /*
1.87 thorpej 2112: * pool_cache_put{,_paddr}:
1.43 thorpej 2113: *
1.87 thorpej 2114: * Put an object back to the pool cache (optionally caching the
2115: * physical address of the object).
1.43 thorpej 2116: */
2117: void
1.87 thorpej 2118: pool_cache_put_paddr(struct pool_cache *pc, void *object, paddr_t pa)
1.43 thorpej 2119: {
2120: struct pool_cache_group *pcg;
2121:
1.125 ad 2122: FREECHECK_IN(&pc->pc_freecheck, object);
2123:
1.109 christos 2124: if (__predict_false((pc->pc_pool->pr_flags & PR_WANTED) != 0)) {
2125: goto destruct;
2126: }
2127:
1.128.2.2! ad 2128: mutex_enter(&pc->pc_lock);
1.43 thorpej 2129:
1.102 chs 2130: pcg = LIST_FIRST(&pc->pc_partgroups);
2131: if (pcg == NULL) {
2132: pcg = LIST_FIRST(&pc->pc_emptygroups);
2133: if (pcg != NULL) {
2134: LIST_REMOVE(pcg, pcg_list);
2135: LIST_INSERT_HEAD(&pc->pc_partgroups, pcg, pcg_list);
1.43 thorpej 2136: }
1.102 chs 2137: }
2138: if (pcg == NULL) {
1.43 thorpej 2139:
2140: /*
2141: * No empty groups to free the object to. Attempt to
1.47 thorpej 2142: * allocate one.
1.43 thorpej 2143: */
1.128.2.2! ad 2144: mutex_exit(&pc->pc_lock);
1.43 thorpej 2145: pcg = pool_get(&pcgpool, PR_NOWAIT);
1.102 chs 2146: if (pcg == NULL) {
1.109 christos 2147: destruct:
1.102 chs 2148:
2149: /*
2150: * Unable to allocate a cache group; destruct the object
2151: * and free it back to the pool.
2152: */
2153: pool_cache_destruct_object(pc, object);
2154: return;
1.43 thorpej 2155: }
1.102 chs 2156: memset(pcg, 0, sizeof(*pcg));
1.128.2.2! ad 2157: mutex_enter(&pc->pc_lock);
1.102 chs 2158: pc->pc_ngroups++;
2159: LIST_INSERT_HEAD(&pc->pc_partgroups, pcg, pcg_list);
1.43 thorpej 2160: }
2161:
1.48 thorpej 2162: pc->pc_nitems++;
1.87 thorpej 2163: pcg_put(pcg, object, pa);
1.43 thorpej 2164:
1.102 chs 2165: if (pcg->pcg_avail == PCG_NOBJECTS) {
2166: LIST_REMOVE(pcg, pcg_list);
2167: LIST_INSERT_HEAD(&pc->pc_fullgroups, pcg, pcg_list);
2168: }
1.128.2.2! ad 2169: mutex_exit(&pc->pc_lock);
1.51 thorpej 2170: }
2171:
2172: /*
2173: * pool_cache_destruct_object:
2174: *
2175: * Force destruction of an object and its release back into
2176: * the pool.
2177: */
2178: void
2179: pool_cache_destruct_object(struct pool_cache *pc, void *object)
2180: {
2181:
2182: if (pc->pc_dtor != NULL)
2183: (*pc->pc_dtor)(pc->pc_arg, object);
2184: pool_put(pc->pc_pool, object);
1.43 thorpej 2185: }
2186:
1.102 chs 2187: static void
1.106 christos 2188: pool_do_cache_invalidate_grouplist(struct pool_cache_grouplist *pcgsl,
1.105 christos 2189: struct pool_cache *pc, struct pool_pagelist *pq,
1.106 christos 2190: struct pool_cache_grouplist *pcgdl)
1.102 chs 2191: {
1.106 christos 2192: struct pool_cache_group *pcg, *npcg;
1.102 chs 2193: void *object;
2194:
1.106 christos 2195: for (pcg = LIST_FIRST(pcgsl); pcg != NULL; pcg = npcg) {
1.102 chs 2196: npcg = LIST_NEXT(pcg, pcg_list);
2197: while (pcg->pcg_avail != 0) {
2198: pc->pc_nitems--;
2199: object = pcg_get(pcg, NULL);
2200: if (pc->pc_dtor != NULL)
2201: (*pc->pc_dtor)(pc->pc_arg, object);
2202: pool_do_put(pc->pc_pool, object, pq);
2203: }
1.103 chs 2204: pc->pc_ngroups--;
1.102 chs 2205: LIST_REMOVE(pcg, pcg_list);
1.106 christos 2206: LIST_INSERT_HEAD(pcgdl, pcg, pcg_list);
1.102 chs 2207: }
1.105 christos 2208: }
2209:
2210: static void
2211: pool_do_cache_invalidate(struct pool_cache *pc, struct pool_pagelist *pq,
2212: struct pool_cache_grouplist *pcgl)
2213: {
2214:
1.128.2.2! ad 2215: KASSERT(mutex_owned(&pc->pc_lock));
! 2216: KASSERT(mutex_owned(&pc->pc_pool->pr_lock));
1.105 christos 2217:
1.106 christos 2218: pool_do_cache_invalidate_grouplist(&pc->pc_fullgroups, pc, pq, pcgl);
2219: pool_do_cache_invalidate_grouplist(&pc->pc_partgroups, pc, pq, pcgl);
1.103 chs 2220:
2221: KASSERT(LIST_EMPTY(&pc->pc_partgroups));
2222: KASSERT(LIST_EMPTY(&pc->pc_fullgroups));
2223: KASSERT(pc->pc_nitems == 0);
1.102 chs 2224: }
2225:
1.43 thorpej 2226: /*
1.101 thorpej 2227: * pool_cache_invalidate:
1.43 thorpej 2228: *
1.101 thorpej 2229: * Invalidate a pool cache (destruct and release all of the
2230: * cached objects).
1.43 thorpej 2231: */
1.101 thorpej 2232: void
2233: pool_cache_invalidate(struct pool_cache *pc)
1.43 thorpej 2234: {
1.101 thorpej 2235: struct pool_pagelist pq;
1.102 chs 2236: struct pool_cache_grouplist pcgl;
1.101 thorpej 2237:
2238: LIST_INIT(&pq);
1.102 chs 2239: LIST_INIT(&pcgl);
1.101 thorpej 2240:
1.128.2.2! ad 2241: mutex_enter(&pc->pc_lock);
! 2242: mutex_enter(&pc->pc_pool->pr_lock);
1.43 thorpej 2243:
1.102 chs 2244: pool_do_cache_invalidate(pc, &pq, &pcgl);
1.43 thorpej 2245:
1.128.2.2! ad 2246: mutex_exit(&pc->pc_pool->pr_lock);
! 2247: mutex_exit(&pc->pc_lock);
1.43 thorpej 2248:
1.102 chs 2249: pr_pagelist_free(pc->pc_pool, &pq);
2250: pcg_grouplist_free(&pcgl);
1.43 thorpej 2251: }
2252:
2253: /*
2254: * pool_cache_reclaim:
2255: *
2256: * Reclaim a pool cache for pool_reclaim().
2257: */
2258: static void
1.102 chs 2259: pool_cache_reclaim(struct pool_cache *pc, struct pool_pagelist *pq,
2260: struct pool_cache_grouplist *pcgl)
1.43 thorpej 2261: {
1.101 thorpej 2262:
2263: /*
2264: * We're locking in the wrong order (normally pool_cache -> pool,
2265: * but the pool is already locked when we get here), so we have
2266: * to use trylock. If we can't lock the pool_cache, it's not really
2267: * a big deal here.
2268: */
1.128.2.2! ad 2269: if (mutex_tryenter(&pc->pc_lock) == 0)
1.101 thorpej 2270: return;
2271:
1.102 chs 2272: pool_do_cache_invalidate(pc, pq, pcgl);
1.43 thorpej 2273:
1.128.2.2! ad 2274: mutex_exit(&pc->pc_lock);
1.3 pk 2275: }
1.66 thorpej 2276:
2277: /*
2278: * Pool backend allocators.
2279: *
2280: * Each pool has a backend allocator that handles allocation, deallocation,
2281: * and any additional draining that might be needed.
2282: *
2283: * We provide two standard allocators:
2284: *
2285: * pool_allocator_kmem - the default when no allocator is specified
2286: *
2287: * pool_allocator_nointr - used for pools that will not be accessed
2288: * in interrupt context.
2289: */
2290: void *pool_page_alloc(struct pool *, int);
2291: void pool_page_free(struct pool *, void *);
2292:
1.112 bjh21 2293: #ifdef POOL_SUBPAGE
2294: struct pool_allocator pool_allocator_kmem_fullpage = {
2295: pool_page_alloc, pool_page_free, 0,
1.117 yamt 2296: .pa_backingmapptr = &kmem_map,
1.112 bjh21 2297: };
2298: #else
1.66 thorpej 2299: struct pool_allocator pool_allocator_kmem = {
2300: pool_page_alloc, pool_page_free, 0,
1.117 yamt 2301: .pa_backingmapptr = &kmem_map,
1.66 thorpej 2302: };
1.112 bjh21 2303: #endif
1.66 thorpej 2304:
2305: void *pool_page_alloc_nointr(struct pool *, int);
2306: void pool_page_free_nointr(struct pool *, void *);
2307:
1.112 bjh21 2308: #ifdef POOL_SUBPAGE
2309: struct pool_allocator pool_allocator_nointr_fullpage = {
2310: pool_page_alloc_nointr, pool_page_free_nointr, 0,
1.117 yamt 2311: .pa_backingmapptr = &kernel_map,
1.112 bjh21 2312: };
2313: #else
1.66 thorpej 2314: struct pool_allocator pool_allocator_nointr = {
2315: pool_page_alloc_nointr, pool_page_free_nointr, 0,
1.117 yamt 2316: .pa_backingmapptr = &kernel_map,
1.66 thorpej 2317: };
1.112 bjh21 2318: #endif
1.66 thorpej 2319:
2320: #ifdef POOL_SUBPAGE
2321: void *pool_subpage_alloc(struct pool *, int);
2322: void pool_subpage_free(struct pool *, void *);
2323:
1.112 bjh21 2324: struct pool_allocator pool_allocator_kmem = {
2325: pool_subpage_alloc, pool_subpage_free, POOL_SUBPAGE,
1.117 yamt 2326: .pa_backingmapptr = &kmem_map,
1.112 bjh21 2327: };
2328:
2329: void *pool_subpage_alloc_nointr(struct pool *, int);
2330: void pool_subpage_free_nointr(struct pool *, void *);
2331:
2332: struct pool_allocator pool_allocator_nointr = {
2333: pool_subpage_alloc, pool_subpage_free, POOL_SUBPAGE,
1.117 yamt 2334: .pa_backingmapptr = &kmem_map,
1.66 thorpej 2335: };
2336: #endif /* POOL_SUBPAGE */
2337:
1.117 yamt 2338: static void *
2339: pool_allocator_alloc(struct pool *pp, int flags)
1.66 thorpej 2340: {
1.117 yamt 2341: struct pool_allocator *pa = pp->pr_alloc;
1.66 thorpej 2342: void *res;
2343:
1.117 yamt 2344: res = (*pa->pa_alloc)(pp, flags);
2345: if (res == NULL && (flags & PR_WAITOK) == 0) {
1.66 thorpej 2346: /*
1.117 yamt 2347: * We only run the drain hook here if PR_NOWAIT.
2348: * In other cases, the hook will be run in
2349: * pool_reclaim().
1.66 thorpej 2350: */
1.117 yamt 2351: if (pp->pr_drain_hook != NULL) {
2352: (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, flags);
2353: res = (*pa->pa_alloc)(pp, flags);
1.66 thorpej 2354: }
1.117 yamt 2355: }
2356: return res;
1.66 thorpej 2357: }
2358:
1.117 yamt 2359: static void
1.66 thorpej 2360: pool_allocator_free(struct pool *pp, void *v)
2361: {
2362: struct pool_allocator *pa = pp->pr_alloc;
2363:
2364: (*pa->pa_free)(pp, v);
2365: }
2366:
2367: void *
1.124 yamt 2368: pool_page_alloc(struct pool *pp, int flags)
1.66 thorpej 2369: {
1.127 thorpej 2370: bool waitok = (flags & PR_WAITOK) ? true : false;
1.66 thorpej 2371:
1.100 yamt 2372: return ((void *) uvm_km_alloc_poolpage_cache(kmem_map, waitok));
1.66 thorpej 2373: }
2374:
2375: void
1.124 yamt 2376: pool_page_free(struct pool *pp, void *v)
1.66 thorpej 2377: {
2378:
1.98 yamt 2379: uvm_km_free_poolpage_cache(kmem_map, (vaddr_t) v);
2380: }
2381:
2382: static void *
1.124 yamt 2383: pool_page_alloc_meta(struct pool *pp, int flags)
1.98 yamt 2384: {
1.127 thorpej 2385: bool waitok = (flags & PR_WAITOK) ? true : false;
1.98 yamt 2386:
1.100 yamt 2387: return ((void *) uvm_km_alloc_poolpage(kmem_map, waitok));
1.98 yamt 2388: }
2389:
2390: static void
1.124 yamt 2391: pool_page_free_meta(struct pool *pp, void *v)
1.98 yamt 2392: {
2393:
1.100 yamt 2394: uvm_km_free_poolpage(kmem_map, (vaddr_t) v);
1.66 thorpej 2395: }
2396:
2397: #ifdef POOL_SUBPAGE
2398: /* Sub-page allocator, for machines with large hardware pages. */
2399: void *
2400: pool_subpage_alloc(struct pool *pp, int flags)
2401: {
1.128.2.2! ad 2402: return pool_get(&psppool, flags);
1.66 thorpej 2403: }
2404:
2405: void
2406: pool_subpage_free(struct pool *pp, void *v)
2407: {
2408: pool_put(&psppool, v);
2409: }
2410:
2411: /* We don't provide a real nointr allocator. Maybe later. */
2412: void *
1.112 bjh21 2413: pool_subpage_alloc_nointr(struct pool *pp, int flags)
1.66 thorpej 2414: {
2415:
2416: return (pool_subpage_alloc(pp, flags));
2417: }
2418:
2419: void
1.112 bjh21 2420: pool_subpage_free_nointr(struct pool *pp, void *v)
1.66 thorpej 2421: {
2422:
2423: pool_subpage_free(pp, v);
2424: }
1.112 bjh21 2425: #endif /* POOL_SUBPAGE */
1.66 thorpej 2426: void *
1.124 yamt 2427: pool_page_alloc_nointr(struct pool *pp, int flags)
1.66 thorpej 2428: {
1.127 thorpej 2429: bool waitok = (flags & PR_WAITOK) ? true : false;
1.66 thorpej 2430:
1.100 yamt 2431: return ((void *) uvm_km_alloc_poolpage_cache(kernel_map, waitok));
1.66 thorpej 2432: }
2433:
2434: void
1.124 yamt 2435: pool_page_free_nointr(struct pool *pp, void *v)
1.66 thorpej 2436: {
2437:
1.98 yamt 2438: uvm_km_free_poolpage_cache(kernel_map, (vaddr_t) v);
1.66 thorpej 2439: }
CVSweb <webmaster@jp.NetBSD.org>