Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files. =================================================================== RCS file: /ftp/cvs/cvsroot/src/sys/kern/subr_pool.c,v rcsdiff: /ftp/cvs/cvsroot/src/sys/kern/subr_pool.c,v: warning: Unknown phrases like `commitid ...;' are present. retrieving revision 1.56 retrieving revision 1.69 diff -u -p -r1.56 -r1.69 --- src/sys/kern/subr_pool.c 2001/05/13 17:06:59 1.56 +++ src/sys/kern/subr_pool.c 2002/03/08 21:43:54 1.69 @@ -1,4 +1,4 @@ -/* $NetBSD: subr_pool.c,v 1.56 2001/05/13 17:06:59 sommerfeld Exp $ */ +/* $NetBSD: subr_pool.c,v 1.69 2002/03/08 21:43:54 thorpej Exp $ */ /*- * Copyright (c) 1997, 1999, 2000 The NetBSD Foundation, Inc. @@ -37,6 +37,9 @@ * POSSIBILITY OF SUCH DAMAGE. */ +#include +__KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.69 2002/03/08 21:43:54 thorpej Exp $"); + #include "opt_pool.h" #include "opt_poollog.h" #include "opt_lockdebug.h" @@ -70,6 +73,11 @@ TAILQ_HEAD(,pool) pool_head = TAILQ_HEAD /* Private pool for page header structures */ static struct pool phpool; +#ifdef POOL_SUBPAGE +/* Pool of subpages for use by normal pools. */ +static struct pool psppool; +#endif + /* # of seconds to retain page after last use */ int pool_inactive_time = 10; @@ -90,6 +98,7 @@ struct pool_item_header { caddr_t ph_page; /* this page's address */ struct timeval ph_time; /* last referenced */ }; +TAILQ_HEAD(pool_pagelist,pool_item_header); struct pool_item { #ifdef DIAGNOSTIC @@ -101,7 +110,8 @@ struct pool_item { }; #define PR_HASH_INDEX(pp,addr) \ - (((u_long)(addr) >> (pp)->pr_pageshift) & (PR_HASHTABSIZE - 1)) + (((u_long)(addr) >> (pp)->pr_alloc->pa_pageshift) & \ + (PR_HASHTABSIZE - 1)) #define POOL_NEEDS_CATCHUP(pp) \ ((pp)->pr_nitems < (pp)->pr_minitems) @@ -150,8 +160,9 @@ static void pool_cache_reclaim(struct po static int pool_catchup(struct pool *); static void pool_prime_page(struct pool *, caddr_t, struct pool_item_header *); -static void *pool_page_alloc(unsigned long, int, int); -static void pool_page_free(void *, unsigned long, int); + +void *pool_allocator_alloc(struct pool *, int); +void pool_allocator_free(struct pool *, void *); static void pool_print1(struct pool *, const char *, void (*)(const char *, ...)); @@ -175,7 +186,7 @@ struct pool_log { int pool_logsize = POOL_LOGSIZE; -#ifdef DIAGNOSTIC +#ifdef POOL_DIAGNOSTIC static __inline void pr_log(struct pool *pp, void *v, int action, const char *file, long line) { @@ -272,7 +283,7 @@ pr_enter_check(struct pool *pp, void (*p #define pr_enter(pp, file, line) #define pr_leave(pp) #define pr_enter_check(pp, pr) -#endif /* DIAGNOSTIC */ +#endif /* POOL_DIAGNOSTIC */ /* * Return the pool page header based on page address. @@ -298,8 +309,10 @@ pr_find_pagehead(struct pool *pp, caddr_ * Remove a page from the pool. */ static __inline void -pr_rmpage(struct pool *pp, struct pool_item_header *ph) +pr_rmpage(struct pool *pp, struct pool_item_header *ph, + struct pool_pagelist *pq) { + int s; /* * If the page was idle, decrement the idle page count. @@ -317,29 +330,30 @@ pr_rmpage(struct pool *pp, struct pool_i pp->pr_nitems -= pp->pr_itemsperpage; /* - * Unlink a page from the pool and release it. + * Unlink a page from the pool and release it (or queue it for release). */ TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist); - (*pp->pr_free)(ph->ph_page, pp->pr_pagesz, pp->pr_mtype); + if (pq) { + TAILQ_INSERT_HEAD(pq, ph, ph_pagelist); + } else { + pool_allocator_free(pp, ph->ph_page); + if ((pp->pr_roflags & PR_PHINPAGE) == 0) { + LIST_REMOVE(ph, ph_hashlist); + s = splhigh(); + pool_put(&phpool, ph); + splx(s); + } + } pp->pr_npages--; pp->pr_npagefree++; - if ((pp->pr_roflags & PR_PHINPAGE) == 0) { - int s; - LIST_REMOVE(ph, ph_hashlist); - s = splhigh(); - pool_put(&phpool, ph); - splx(s); - } - if (pp->pr_curpage == ph) { /* * Find a new non-empty page header, if any. * Start search from the page head, to increase the * chance for "high water" pages to be freed. */ - for (ph = TAILQ_FIRST(&pp->pr_pagelist); ph != NULL; - ph = TAILQ_NEXT(ph, ph_pagelist)) + TAILQ_FOREACH(ph, &pp->pr_pagelist, ph_pagelist) if (TAILQ_FIRST(&ph->ph_itemlist) != NULL) break; @@ -355,10 +369,7 @@ pr_rmpage(struct pool *pp, struct pool_i */ void pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags, - const char *wchan, size_t pagesz, - void *(*alloc)(unsigned long, int, int), - void (*release)(void *, unsigned long, int), - int mtype) + const char *wchan, struct pool_allocator *palloc) { int off, slack, i; @@ -370,23 +381,43 @@ pool_init(struct pool *pp, size_t size, flags |= PR_LOGGING; #endif +#ifdef POOL_SUBPAGE + /* + * XXX We don't provide a real `nointr' back-end + * yet; all sub-pages come from a kmem back-end. + * maybe some day... + */ + if (palloc == NULL) { + extern struct pool_allocator pool_allocator_kmem_subpage; + palloc = &pool_allocator_kmem_subpage; + } /* - * Check arguments and construct default values. + * We'll assume any user-specified back-end allocator + * will deal with sub-pages, or simply don't care. */ - if (!powerof2(pagesz)) - panic("pool_init: page size invalid (%lx)\n", (u_long)pagesz); +#else + if (palloc == NULL) + palloc = &pool_allocator_kmem; +#endif /* POOL_SUBPAGE */ + if ((palloc->pa_flags & PA_INITIALIZED) == 0) { + if (palloc->pa_pagesz == 0) { +#ifdef POOL_SUBPAGE + if (palloc == &pool_allocator_kmem) + palloc->pa_pagesz = PAGE_SIZE; + else + palloc->pa_pagesz = POOL_SUBPAGE; +#else + palloc->pa_pagesz = PAGE_SIZE; +#endif /* POOL_SUBPAGE */ + } + + TAILQ_INIT(&palloc->pa_list); - if (alloc == NULL && release == NULL) { - alloc = pool_page_alloc; - release = pool_page_free; - pagesz = PAGE_SIZE; /* Rounds to PAGE_SIZE anyhow. */ - } else if ((alloc != NULL && release != NULL) == 0) { - /* If you specifiy one, must specify both. */ - panic("pool_init: must specify alloc and release together"); - } - - if (pagesz == 0) - pagesz = PAGE_SIZE; + simple_lock_init(&palloc->pa_slock); + palloc->pa_pagemask = ~(palloc->pa_pagesz - 1); + palloc->pa_pageshift = ffs(palloc->pa_pagesz) - 1; + palloc->pa_flags |= PA_INITIALIZED; + } if (align == 0) align = ALIGN(1); @@ -395,9 +426,11 @@ pool_init(struct pool *pp, size_t size, size = sizeof(struct pool_item); size = ALIGN(size); - if (size > pagesz) +#ifdef DIAGNOSTIC + if (size > palloc->pa_pagesz) panic("pool_init: pool item size (%lu) too large", (u_long)size); +#endif /* * Initialize the pool structure. @@ -414,12 +447,7 @@ pool_init(struct pool *pp, size_t size, pp->pr_size = size; pp->pr_align = align; pp->pr_wchan = wchan; - pp->pr_mtype = mtype; - pp->pr_alloc = alloc; - pp->pr_free = release; - pp->pr_pagesz = pagesz; - pp->pr_pagemask = ~(pagesz - 1); - pp->pr_pageshift = ffs(pagesz) - 1; + pp->pr_alloc = palloc; pp->pr_nitems = 0; pp->pr_nout = 0; pp->pr_hardlimit = UINT_MAX; @@ -428,6 +456,8 @@ pool_init(struct pool *pp, size_t size, pp->pr_hardlimit_ratecap.tv_usec = 0; pp->pr_hardlimit_warning_last.tv_sec = 0; pp->pr_hardlimit_warning_last.tv_usec = 0; + pp->pr_drain_hook = NULL; + pp->pr_drain_hook_arg = NULL; /* * Decide whether to put the page header off page to avoid @@ -436,15 +466,15 @@ pool_init(struct pool *pp, size_t size, * with its header based on the page address. * We use 1/16 of the page size as the threshold (XXX: tune) */ - if (pp->pr_size < pagesz/16) { + if (pp->pr_size < palloc->pa_pagesz/16) { /* Use the end of the page for the page header */ pp->pr_roflags |= PR_PHINPAGE; - pp->pr_phoffset = off = - pagesz - ALIGN(sizeof(struct pool_item_header)); + pp->pr_phoffset = off = palloc->pa_pagesz - + ALIGN(sizeof(struct pool_item_header)); } else { /* The page header will be taken from our page header pool */ pp->pr_phoffset = 0; - off = pagesz; + off = palloc->pa_pagesz; for (i = 0; i < PR_HASHTABSIZE; i++) { LIST_INIT(&pp->pr_hashtab[i]); } @@ -477,6 +507,7 @@ pool_init(struct pool *pp, size_t size, pp->pr_hiwat = 0; pp->pr_nidle = 0; +#ifdef POOL_DIAGNOSTIC if (flags & PR_LOGGING) { if (kmem_map == NULL || (pp->pr_log = malloc(pool_logsize * sizeof(struct pool_log), @@ -485,6 +516,7 @@ pool_init(struct pool *pp, size_t size, pp->pr_curlogentry = 0; pp->pr_logsize = pool_logsize; } +#endif pp->pr_entered_file = NULL; pp->pr_entered_line = 0; @@ -497,16 +529,28 @@ pool_init(struct pool *pp, size_t size, * XXX LOCKING. */ if (phpool.pr_size == 0) { +#ifdef POOL_SUBPAGE + pool_init(&phpool, sizeof(struct pool_item_header), 0, 0, 0, + "phpool", &pool_allocator_kmem); + pool_init(&psppool, POOL_SUBPAGE, POOL_SUBPAGE, 0, + PR_RECURSIVE, "psppool", &pool_allocator_kmem); +#else pool_init(&phpool, sizeof(struct pool_item_header), 0, 0, - 0, "phpool", 0, 0, 0, 0); + 0, "phpool", NULL); +#endif pool_init(&pcgpool, sizeof(struct pool_cache_group), 0, 0, - 0, "pcgpool", 0, 0, 0, 0); + 0, "pcgpool", NULL); } /* Insert into the list of all pools. */ simple_lock(&pool_head_slock); TAILQ_INSERT_TAIL(&pool_head, pp, pr_poollist); simple_unlock(&pool_head_slock); + + /* Insert this into the list of pools using this allocator. */ + simple_lock(&palloc->pa_slock); + TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list); + simple_unlock(&palloc->pa_slock); } /* @@ -518,6 +562,11 @@ pool_destroy(struct pool *pp) struct pool_item_header *ph; struct pool_cache *pc; + /* Locking order: pool_allocator -> pool */ + simple_lock(&pp->pr_alloc->pa_slock); + TAILQ_REMOVE(&pp->pr_alloc->pa_list, pp, pr_alloc_list); + simple_unlock(&pp->pr_alloc->pa_slock); + /* Destroy all caches for this pool. */ while ((pc = TAILQ_FIRST(&pp->pr_cachelist)) != NULL) pool_cache_destroy(pc); @@ -532,21 +581,34 @@ pool_destroy(struct pool *pp) /* Remove all pages */ if ((pp->pr_roflags & PR_STATIC) == 0) - while ((ph = pp->pr_pagelist.tqh_first) != NULL) - pr_rmpage(pp, ph); + while ((ph = TAILQ_FIRST(&pp->pr_pagelist)) != NULL) + pr_rmpage(pp, ph, NULL); /* Remove from global pool list */ simple_lock(&pool_head_slock); TAILQ_REMOVE(&pool_head, pp, pr_poollist); - /* XXX Only clear this if we were drainpp? */ - drainpp = NULL; + if (drainpp == pp) { + drainpp = NULL; + } simple_unlock(&pool_head_slock); +#ifdef POOL_DIAGNOSTIC if ((pp->pr_roflags & PR_LOGGING) != 0) free(pp->pr_log, M_TEMP); +#endif +} - if (pp->pr_roflags & PR_FREEHEADER) - free(pp, M_POOL); +void +pool_set_drain_hook(struct pool *pp, void (*fn)(void *, int), void *arg) +{ + + /* XXX no locking -- must be used just after pool_init() */ +#ifdef DIAGNOSTIC + if (pp->pr_drain_hook != NULL) + panic("pool_set_drain_hook(%s): already set", pp->pr_wchan); +#endif + pp->pr_drain_hook = fn; + pp->pr_drain_hook_arg = arg; } static __inline struct pool_item_header * @@ -572,7 +634,7 @@ pool_alloc_item_header(struct pool *pp, * Grab an item from the pool; must be called at appropriate spl level */ void * -#ifdef DIAGNOSTIC +#ifdef POOL_DIAGNOSTIC _pool_get(struct pool *pp, int flags, const char *file, long line) #else pool_get(struct pool *pp, int flags) @@ -592,7 +654,12 @@ pool_get(struct pool *pp, int flags) if (__predict_false(curproc == NULL && doing_shutdown == 0 && (flags & PR_WAITOK) != 0)) panic("pool_get: must have NOWAIT"); + +#ifdef LOCKDEBUG + if (flags & PR_WAITOK) + simple_lock_only_held(NULL, "pool_get(PR_WAITOK)"); #endif +#endif /* DIAGNOSTIC */ simple_lock(&pp->pr_slock); pr_enter(pp, file, line); @@ -611,6 +678,21 @@ pool_get(struct pool *pp, int flags) } #endif if (__predict_false(pp->pr_nout == pp->pr_hardlimit)) { + if (pp->pr_drain_hook != NULL) { + /* + * Since the drain hook is going to free things + * back to the pool, unlock, call the hook, re-lock, + * and check the hardlimit condition again. + */ + pr_leave(pp); + simple_unlock(&pp->pr_slock); + (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, flags); + simple_lock(&pp->pr_slock); + pr_enter(pp, file, line); + if (pp->pr_nout < pp->pr_hardlimit) + goto startover; + } + if ((flags & PR_WAITOK) && !(flags & PR_LIMITFAIL)) { /* * XXX: A warning isn't logged in this case. Should @@ -631,9 +713,6 @@ pool_get(struct pool *pp, int flags) &pp->pr_hardlimit_ratecap)) log(LOG_ERR, "%s\n", pp->pr_hardlimit_warning); - if (flags & PR_URGENT) - panic("pool_get: urgent"); - pp->pr_nfail++; pr_leave(pp); @@ -664,7 +743,7 @@ pool_get(struct pool *pp, int flags) */ pr_leave(pp); simple_unlock(&pp->pr_slock); - v = (*pp->pr_alloc)(pp->pr_pagesz, flags, pp->pr_mtype); + v = pool_allocator_alloc(pp, flags); if (__predict_true(v != NULL)) ph = pool_alloc_item_header(pp, v, flags); simple_lock(&pp->pr_slock); @@ -672,7 +751,7 @@ pool_get(struct pool *pp, int flags) if (__predict_false(v == NULL || ph == NULL)) { if (v != NULL) - (*pp->pr_free)(v, pp->pr_pagesz, pp->pr_mtype); + pool_allocator_free(pp, v); /* * We were unable to allocate a page or item @@ -683,9 +762,6 @@ pool_get(struct pool *pp, int flags) if (pp->pr_curpage != NULL) goto startover; - if (flags & PR_URGENT) - panic("pool_get: urgent"); - if ((flags & PR_WAITOK) == 0) { pp->pr_nfail++; pr_leave(pp); @@ -696,15 +772,11 @@ pool_get(struct pool *pp, int flags) /* * Wait for items to be returned to this pool. * - * XXX: we actually want to wait just until - * the page allocator has memory again. Depending - * on this pool's usage, we might get stuck here - * for a long time. - * * XXX: maybe we should wake up once a second and * try again? */ pp->pr_flags |= PR_WANTED; + /* PA_WANTED is already set on the allocator. */ pr_leave(pp); ltsleep(pp, PSWP, pp->pr_wchan, 0, &pp->pr_slock); pr_enter(pp, file, line); @@ -732,9 +804,13 @@ pool_get(struct pool *pp, int flags) pp->pr_wchan, pp->pr_nitems); panic("pool_get: nitems inconsistent\n"); } +#endif +#ifdef POOL_DIAGNOSTIC pr_log(pp, v, PRLOG_GET, file, line); +#endif +#ifdef DIAGNOSTIC if (__predict_false(pi->pi_magic != PI_MAGIC)) { pr_printlog(pp, pi, printf); panic("pool_get(%s): free list modified: magic=%x; page %p;" @@ -779,8 +855,7 @@ pool_get(struct pool *pp, int flags) */ TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist); TAILQ_INSERT_TAIL(&pp->pr_pagelist, ph, ph_pagelist); - for (ph = TAILQ_FIRST(&pp->pr_pagelist); ph != NULL; - ph = TAILQ_NEXT(ph, ph_pagelist)) + TAILQ_FOREACH(ph, &pp->pr_pagelist, ph_pagelist) if (TAILQ_FIRST(&ph->ph_itemlist) != NULL) break; @@ -817,7 +892,9 @@ pool_do_put(struct pool *pp, void *v) caddr_t page; int s; - page = (caddr_t)((u_long)v & pp->pr_pagemask); + LOCK_ASSERT(simple_lock_held(&pp->pr_slock)); + + page = (caddr_t)((u_long)v & pp->pr_alloc->pa_pagemask); #ifdef DIAGNOSTIC if (__predict_false(pp->pr_nout == 0)) { @@ -890,7 +967,7 @@ pool_do_put(struct pool *pp, void *v) if (ph->ph_nmissing == 0) { pp->pr_nidle++; if (pp->pr_npages > pp->pr_maxpages) { - pr_rmpage(pp, ph); + pr_rmpage(pp, ph, NULL); } else { TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist); TAILQ_INSERT_TAIL(&pp->pr_pagelist, ph, ph_pagelist); @@ -913,8 +990,7 @@ pool_do_put(struct pool *pp, void *v) * page with the fewest available items, to minimize * fragmentation? */ - for (ph = TAILQ_FIRST(&pp->pr_pagelist); ph != NULL; - ph = TAILQ_NEXT(ph, ph_pagelist)) + TAILQ_FOREACH(ph, &pp->pr_pagelist, ph_pagelist) if (TAILQ_FIRST(&ph->ph_itemlist) != NULL) break; @@ -937,7 +1013,7 @@ pool_do_put(struct pool *pp, void *v) /* * Return resource to the pool; must be called at appropriate spl level */ -#ifdef DIAGNOSTIC +#ifdef POOL_DIAGNOSTIC void _pool_put(struct pool *pp, void *v, const char *file, long line) { @@ -952,8 +1028,9 @@ _pool_put(struct pool *pp, void *v, cons pr_leave(pp); simple_unlock(&pp->pr_slock); } +#undef pool_put +#endif /* POOL_DIAGNOSTIC */ -#else void pool_put(struct pool *pp, void *v) { @@ -964,6 +1041,9 @@ pool_put(struct pool *pp, void *v) simple_unlock(&pp->pr_slock); } + +#ifdef POOL_DIAGNOSTIC +#define pool_put(h, v) _pool_put((h), (v), __FILE__, __LINE__) #endif /* @@ -982,7 +1062,7 @@ pool_prime(struct pool *pp, int n) while (newpages-- > 0) { simple_unlock(&pp->pr_slock); - cp = (*pp->pr_alloc)(pp->pr_pagesz, PR_NOWAIT, pp->pr_mtype); + cp = pool_allocator_alloc(pp, PR_NOWAIT); if (__predict_true(cp != NULL)) ph = pool_alloc_item_header(pp, cp, PR_NOWAIT); simple_lock(&pp->pr_slock); @@ -990,7 +1070,7 @@ pool_prime(struct pool *pp, int n) if (__predict_false(cp == NULL || ph == NULL)) { error = ENOMEM; if (cp != NULL) - (*pp->pr_free)(cp, pp->pr_pagesz, pp->pr_mtype); + pool_allocator_free(pp, cp); break; } @@ -1020,8 +1100,10 @@ pool_prime_page(struct pool *pp, caddr_t unsigned int ioff = pp->pr_itemoffset; int n; - if (((u_long)cp & (pp->pr_pagesz - 1)) != 0) +#ifdef DIAGNOSTIC + if (((u_long)cp & (pp->pr_alloc->pa_pagesz - 1)) != 0) panic("pool_prime_page: %s: unaligned page", pp->pr_wchan); +#endif if ((pp->pr_roflags & PR_PHINPAGE) == 0) LIST_INSERT_HEAD(&pp->pr_hashtab[PR_HASH_INDEX(pp, cp)], @@ -1116,13 +1198,13 @@ pool_catchup(struct pool *pp) * the pool descriptor? */ simple_unlock(&pp->pr_slock); - cp = (*pp->pr_alloc)(pp->pr_pagesz, PR_NOWAIT, pp->pr_mtype); + cp = pool_allocator_alloc(pp, PR_NOWAIT); if (__predict_true(cp != NULL)) ph = pool_alloc_item_header(pp, cp, PR_NOWAIT); simple_lock(&pp->pr_slock); if (__predict_false(cp == NULL || ph == NULL)) { if (cp != NULL) - (*pp->pr_free)(cp, pp->pr_pagesz, pp->pr_mtype); + pool_allocator_free(pp, cp); error = ENOMEM; break; } @@ -1194,49 +1276,10 @@ pool_sethardlimit(struct pool *pp, int n } /* - * Default page allocator. - */ -static void * -pool_page_alloc(unsigned long sz, int flags, int mtype) -{ - boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE; - - return ((void *)uvm_km_alloc_poolpage(waitok)); -} - -static void -pool_page_free(void *v, unsigned long sz, int mtype) -{ - - uvm_km_free_poolpage((vaddr_t)v); -} - -/* - * Alternate pool page allocator for pools that know they will - * never be accessed in interrupt context. - */ -void * -pool_page_alloc_nointr(unsigned long sz, int flags, int mtype) -{ - boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE; - - return ((void *)uvm_km_alloc_poolpage1(kernel_map, uvm.kernel_object, - waitok)); -} - -void -pool_page_free_nointr(void *v, unsigned long sz, int mtype) -{ - - uvm_km_free_poolpage1(kernel_map, (vaddr_t)v); -} - - -/* * Release all complete pages that have not been used recently. */ -void -#ifdef DIAGNOSTIC +int +#ifdef POOL_DIAGNOSTIC _pool_reclaim(struct pool *pp, const char *file, long line) #else pool_reclaim(struct pool *pp) @@ -1245,20 +1288,29 @@ pool_reclaim(struct pool *pp) struct pool_item_header *ph, *phnext; struct pool_cache *pc; struct timeval curtime; + struct pool_pagelist pq; int s; if (pp->pr_roflags & PR_STATIC) - return; + return (0); + + if (pp->pr_drain_hook != NULL) { + /* + * The drain hook must be called with the pool unlocked. + */ + (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, PR_NOWAIT); + } if (simple_lock_try(&pp->pr_slock) == 0) - return; + return (0); pr_enter(pp, file, line); + TAILQ_INIT(&pq); + /* * Reclaim items from the pool's caches. */ - for (pc = TAILQ_FIRST(&pp->pr_cachelist); pc != NULL; - pc = TAILQ_NEXT(pc, pc_poollist)) + TAILQ_FOREACH(pc, &pp->pr_cachelist, pc_poollist) pool_cache_reclaim(pc); s = splclock(); @@ -1286,14 +1338,29 @@ pool_reclaim(struct pool *pp) pp->pr_minitems) break; - pr_rmpage(pp, ph); + pr_rmpage(pp, ph, &pq); } } pr_leave(pp); simple_unlock(&pp->pr_slock); -} + if (TAILQ_EMPTY(&pq)) + return (0); + + while ((ph = TAILQ_FIRST(&pq)) != NULL) { + TAILQ_REMOVE(&pq, ph, ph_pagelist); + pool_allocator_free(pp, ph->ph_page); + if (pp->pr_roflags & PR_PHINPAGE) { + continue; + } + LIST_REMOVE(ph, ph_hashlist); + s = splhigh(); + pool_put(&phpool, ph); + splx(s); + } + return (1); +} /* * Drain pools, one at a time. @@ -1306,23 +1373,21 @@ pool_drain(void *arg) struct pool *pp; int s; + pp = NULL; s = splvm(); simple_lock(&pool_head_slock); - - if (drainpp == NULL && (drainpp = TAILQ_FIRST(&pool_head)) == NULL) - goto out; - - pp = drainpp; - drainpp = TAILQ_NEXT(pp, pr_poollist); - - pool_reclaim(pp); - - out: + if (drainpp == NULL) { + drainpp = TAILQ_FIRST(&pool_head); + } + if (drainpp) { + pp = drainpp; + drainpp = TAILQ_NEXT(pp, pr_poollist); + } simple_unlock(&pool_head_slock); + pool_reclaim(pp); splx(s); } - /* * Diagnostic helpers. */ @@ -1398,8 +1463,7 @@ pool_print1(struct pool *pp, const char (*pr)("POOL %s: size %u, align %u, ioff %u, roflags 0x%08x\n", pp->pr_wchan, pp->pr_size, pp->pr_align, pp->pr_itemoffset, pp->pr_roflags); - (*pr)("\tpagesz %u, mtype %d\n", pp->pr_pagesz, pp->pr_mtype); - (*pr)("\talloc %p, release %p\n", pp->pr_alloc, pp->pr_free); + (*pr)("\talloc %p\n", pp->pr_alloc); (*pr)("\tminitems %u, minpages %u, maxpages %u, npages %u\n", pp->pr_minitems, pp->pr_minpages, pp->pr_maxpages, pp->pr_npages); (*pr)("\titemsperpage %u, nitems %u, nout %u, hardlimit %u\n", @@ -1421,8 +1485,7 @@ pool_print1(struct pool *pp, const char (u_long)ph->ph_time.tv_sec, (u_long)ph->ph_time.tv_usec); #ifdef DIAGNOSTIC - for (pi = TAILQ_FIRST(&ph->ph_itemlist); pi != NULL; - pi = TAILQ_NEXT(pi, pi_list)) { + TAILQ_FOREACH(pi, &ph->ph_itemlist, pi_list) { if (pi->pi_magic != PI_MAGIC) { (*pr)("\t\t\titem %p, magic 0x%x\n", pi, pi->pi_magic); @@ -1451,14 +1514,12 @@ pool_print1(struct pool *pp, const char if (print_cache == 0) goto skip_cache; - for (pc = TAILQ_FIRST(&pp->pr_cachelist); pc != NULL; - pc = TAILQ_NEXT(pc, pc_poollist)) { + TAILQ_FOREACH(pc, &pp->pr_cachelist, pc_poollist) { (*pr)("\tcache %p: allocfrom %p freeto %p\n", pc, pc->pc_allocfrom, pc->pc_freeto); (*pr)("\t hits %lu misses %lu ngroups %lu nitems %lu\n", pc->pc_hits, pc->pc_misses, pc->pc_ngroups, pc->pc_nitems); - for (pcg = TAILQ_FIRST(&pc->pc_grouplist); pcg != NULL; - pcg = TAILQ_NEXT(pcg, pcg_list)) { + TAILQ_FOREACH(pcg, &pc->pc_grouplist, pcg_list) { (*pr)("\t\tgroup %p: avail %d\n", pcg, pcg->pcg_avail); for (i = 0; i < PCG_NOBJECTS; i++) (*pr)("\t\t\t%p\n", pcg->pcg_objects[i]); @@ -1478,14 +1539,12 @@ pool_chk(struct pool *pp, const char *la simple_lock(&pp->pr_slock); - for (ph = TAILQ_FIRST(&pp->pr_pagelist); ph != NULL; - ph = TAILQ_NEXT(ph, ph_pagelist)) { - + TAILQ_FOREACH(ph, &pp->pr_pagelist, ph_pagelist) { struct pool_item *pi; int n; caddr_t page; - page = (caddr_t)((u_long)ph & pp->pr_pagemask); + page = (caddr_t)((u_long)ph & pp->pr_alloc->pa_pagemask); if (page != ph->ph_page && (pp->pr_roflags & PR_PHINPAGE) != 0) { if (label != NULL) @@ -1514,7 +1573,8 @@ pool_chk(struct pool *pp, const char *la panic("pool"); } #endif - page = (caddr_t)((u_long)pi & pp->pr_pagemask); + page = + (caddr_t)((u_long)pi & pp->pr_alloc->pa_pagemask); if (page == ph->ph_page) continue; @@ -1630,11 +1690,15 @@ pool_cache_get(struct pool_cache *pc, in struct pool_cache_group *pcg; void *object; +#ifdef LOCKDEBUG + if (flags & PR_WAITOK) + simple_lock_only_held(NULL, "pool_cache_get(PR_WAITOK)"); +#endif + simple_lock(&pc->pc_slock); if ((pcg = pc->pc_allocfrom) == NULL) { - for (pcg = TAILQ_FIRST(&pc->pc_grouplist); pcg != NULL; - pcg = TAILQ_NEXT(pcg, pcg_list)) { + TAILQ_FOREACH(pcg, &pc->pc_grouplist, pcg_list) { if (pcg->pcg_avail != 0) { pc->pc_allocfrom = pcg; goto have_group; @@ -1681,12 +1745,12 @@ void pool_cache_put(struct pool_cache *pc, void *object) { struct pool_cache_group *pcg; + int s; simple_lock(&pc->pc_slock); if ((pcg = pc->pc_freeto) == NULL) { - for (pcg = TAILQ_FIRST(&pc->pc_grouplist); pcg != NULL; - pcg = TAILQ_NEXT(pcg, pcg_list)) { + TAILQ_FOREACH(pcg, &pc->pc_grouplist, pcg_list) { if (pcg->pcg_avail != PCG_NOBJECTS) { pc->pc_freeto = pcg; goto have_group; @@ -1698,7 +1762,9 @@ pool_cache_put(struct pool_cache *pc, vo * allocate one. */ simple_unlock(&pc->pc_slock); + s = splvm(); pcg = pool_get(&pcgpool, PR_NOWAIT); + splx(s); if (pcg != NULL) { memset(pcg, 0, sizeof(*pcg)); simple_lock(&pc->pc_slock); @@ -1754,6 +1820,7 @@ pool_cache_do_invalidate(struct pool_cac { struct pool_cache_group *pcg, *npcg; void *object; + int s; for (pcg = TAILQ_FIRST(&pc->pc_grouplist); pcg != NULL; pcg = npcg) { @@ -1772,7 +1839,9 @@ pool_cache_do_invalidate(struct pool_cac TAILQ_REMOVE(&pc->pc_grouplist, pcg, pcg_list); if (pc->pc_freeto == pcg) pc->pc_freeto = NULL; + s = splvm(); pool_put(&pcgpool, pcg); + splx(s); } } } @@ -1805,3 +1874,218 @@ pool_cache_reclaim(struct pool_cache *pc pool_cache_do_invalidate(pc, 1, pool_do_put); simple_unlock(&pc->pc_slock); } + +/* + * Pool backend allocators. + * + * Each pool has a backend allocator that handles allocation, deallocation, + * and any additional draining that might be needed. + * + * We provide two standard allocators: + * + * pool_allocator_kmem - the default when no allocator is specified + * + * pool_allocator_nointr - used for pools that will not be accessed + * in interrupt context. + */ +void *pool_page_alloc(struct pool *, int); +void pool_page_free(struct pool *, void *); + +struct pool_allocator pool_allocator_kmem = { + pool_page_alloc, pool_page_free, 0, +}; + +void *pool_page_alloc_nointr(struct pool *, int); +void pool_page_free_nointr(struct pool *, void *); + +struct pool_allocator pool_allocator_nointr = { + pool_page_alloc_nointr, pool_page_free_nointr, 0, +}; + +#ifdef POOL_SUBPAGE +void *pool_subpage_alloc(struct pool *, int); +void pool_subpage_free(struct pool *, void *); + +struct pool_allocator pool_allocator_kmem_subpage = { + pool_subpage_alloc, pool_subpage_free, 0, +}; +#endif /* POOL_SUBPAGE */ + +/* + * We have at least three different resources for the same allocation and + * each resource can be depleted. First, we have the ready elements in the + * pool. Then we have the resource (typically a vm_map) for this allocator. + * Finally, we have physical memory. Waiting for any of these can be + * unnecessary when any other is freed, but the kernel doesn't support + * sleeping on multiple wait channels, so we have to employ another strategy. + * + * The caller sleeps on the pool (so that it can be awakened when an item + * is returned to the pool), but we set PA_WANT on the allocator. When a + * page is returned to the allocator and PA_WANT is set, pool_allocator_free + * will wake up all sleeping pools belonging to this allocator. + * + * XXX Thundering herd. + */ +void * +pool_allocator_alloc(struct pool *org, int flags) +{ + struct pool_allocator *pa = org->pr_alloc; + struct pool *pp, *start; + int s, freed; + void *res; + + do { + if ((res = (*pa->pa_alloc)(org, flags)) != NULL) + return (res); + if ((flags & PR_WAITOK) == 0) { + /* + * We only run the drain hookhere if PR_NOWAIT. + * In other cases, the hook will be run in + * pool_reclaim(). + */ + if (org->pr_drain_hook != NULL) { + (*org->pr_drain_hook)(org->pr_drain_hook_arg, + flags); + if ((res = (*pa->pa_alloc)(org, flags)) != NULL) + return (res); + } + break; + } + + /* + * Drain all pools, except "org", that use this + * allocator. We do this to reclaim VA space. + * pa_alloc is responsible for waiting for + * physical memory. + * + * XXX We risk looping forever if start if someone + * calls pool_destroy on "start". But there is no + * other way to have potentially sleeping pool_reclaim, + * non-sleeping locks on pool_allocator, and some + * stirring of drained pools in the allocator. + * + * XXX Maybe we should use pool_head_slock for locking + * the allocators? + */ + freed = 0; + + s = splvm(); + simple_lock(&pa->pa_slock); + pp = start = TAILQ_FIRST(&pa->pa_list); + do { + TAILQ_REMOVE(&pa->pa_list, pp, pr_alloc_list); + TAILQ_INSERT_TAIL(&pa->pa_list, pp, pr_alloc_list); + if (pp == org) + continue; + simple_unlock(&pa->pa_list); + freed = pool_reclaim(pp); + simple_lock(&pa->pa_list); + } while ((pp = TAILQ_FIRST(&pa->pa_list)) != start && + freed == 0); + + if (freed == 0) { + /* + * We set PA_WANT here, the caller will most likely + * sleep waiting for pages (if not, this won't hurt + * that much), and there is no way to set this in + * the caller without violating locking order. + */ + pa->pa_flags |= PA_WANT; + } + simple_unlock(&pa->pa_slock); + splx(s); + } while (freed); + return (NULL); +} + +void +pool_allocator_free(struct pool *pp, void *v) +{ + struct pool_allocator *pa = pp->pr_alloc; + int s; + + (*pa->pa_free)(pp, v); + + s = splvm(); + simple_lock(&pa->pa_slock); + if ((pa->pa_flags & PA_WANT) == 0) { + simple_unlock(&pa->pa_slock); + splx(s); + return; + } + + TAILQ_FOREACH(pp, &pa->pa_list, pr_alloc_list) { + simple_lock(&pp->pr_slock); + if ((pp->pr_flags & PR_WANTED) != 0) { + pp->pr_flags &= ~PR_WANTED; + wakeup(pp); + } + simple_unlock(&pp->pr_slock); + } + pa->pa_flags &= ~PA_WANT; + simple_unlock(&pa->pa_slock); + splx(s); +} + +void * +pool_page_alloc(struct pool *pp, int flags) +{ + boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE; + + return ((void *) uvm_km_alloc_poolpage(waitok)); +} + +void +pool_page_free(struct pool *pp, void *v) +{ + + uvm_km_free_poolpage((vaddr_t) v); +} + +#ifdef POOL_SUBPAGE +/* Sub-page allocator, for machines with large hardware pages. */ +void * +pool_subpage_alloc(struct pool *pp, int flags) +{ + + return (pool_get(&psppool, flags)); +} + +void +pool_subpage_free(struct pool *pp, void *v) +{ + + pool_put(&psppool, v); +} + +/* We don't provide a real nointr allocator. Maybe later. */ +void * +pool_page_alloc_nointr(struct pool *pp, int flags) +{ + + return (pool_subpage_alloc(pp, flags)); +} + +void +pool_page_free_nointr(struct pool *pp, void *v) +{ + + pool_subpage_free(pp, v); +} +#else +void * +pool_page_alloc_nointr(struct pool *pp, int flags) +{ + boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE; + + return ((void *) uvm_km_alloc_poolpage1(kernel_map, + uvm.kernel_object, waitok)); +} + +void +pool_page_free_nointr(struct pool *pp, void *v) +{ + + uvm_km_free_poolpage1(kernel_map, (vaddr_t) v); +} +#endif /* POOL_SUBPAGE */