Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files. =================================================================== RCS file: /ftp/cvs/cvsroot/src/sys/kern/subr_pool.c,v rcsdiff: /ftp/cvs/cvsroot/src/sys/kern/subr_pool.c,v: warning: Unknown phrases like `commitid ...;' are present. retrieving revision 1.45 retrieving revision 1.60.2.1 diff -u -p -r1.45 -r1.60.2.1 --- src/sys/kern/subr_pool.c 2000/12/07 20:16:56 1.45 +++ src/sys/kern/subr_pool.c 2002/01/10 20:00:01 1.60.2.1 @@ -1,4 +1,4 @@ -/* $NetBSD: subr_pool.c,v 1.45 2000/12/07 20:16:56 thorpej Exp $ */ +/* $NetBSD: subr_pool.c,v 1.60.2.1 2002/01/10 20:00:01 thorpej Exp $ */ /*- * Copyright (c) 1997, 1999, 2000 The NetBSD Foundation, Inc. @@ -37,6 +37,9 @@ * POSSIBILITY OF SUCH DAMAGE. */ +#include +__KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.60.2.1 2002/01/10 20:00:01 thorpej Exp $"); + #include "opt_pool.h" #include "opt_poollog.h" #include "opt_lockdebug.h" @@ -70,6 +73,11 @@ TAILQ_HEAD(,pool) pool_head = TAILQ_HEAD /* Private pool for page header structures */ static struct pool phpool; +#ifdef POOL_SUBPAGE +/* Pool of subpages for use by normal pools. */ +static struct pool psppool; +#endif + /* # of seconds to retain page after last use */ int pool_inactive_time = 10; @@ -90,6 +98,7 @@ struct pool_item_header { caddr_t ph_page; /* this page's address */ struct timeval ph_time; /* last referenced */ }; +TAILQ_HEAD(pool_pagelist,pool_item_header); struct pool_item { #ifdef DIAGNOSTIC @@ -103,6 +112,9 @@ struct pool_item { #define PR_HASH_INDEX(pp,addr) \ (((u_long)(addr) >> (pp)->pr_pageshift) & (PR_HASHTABSIZE - 1)) +#define POOL_NEEDS_CATCHUP(pp) \ + ((pp)->pr_nitems < (pp)->pr_minitems) + /* * Pool cache management. * @@ -145,15 +157,20 @@ struct pool_cache_group { static void pool_cache_reclaim(struct pool_cache *); static int pool_catchup(struct pool *); -static void pool_prime_page(struct pool *, caddr_t); +static void pool_prime_page(struct pool *, caddr_t, + struct pool_item_header *); static void *pool_page_alloc(unsigned long, int, int); static void pool_page_free(void *, unsigned long, int); +#ifdef POOL_SUBPAGE +static void *pool_subpage_alloc(unsigned long, int, int); +static void pool_subpage_free(void *, unsigned long, int); +#endif static void pool_print1(struct pool *, const char *, void (*)(const char *, ...)); /* - * Pool log entry. An array of these is allocated in pool_create(). + * Pool log entry. An array of these is allocated in pool_init(). */ struct pool_log { const char *pl_file; @@ -171,7 +188,7 @@ struct pool_log { int pool_logsize = POOL_LOGSIZE; -#ifdef DIAGNOSTIC +#ifdef POOL_DIAGNOSTIC static __inline void pr_log(struct pool *pp, void *v, int action, const char *file, long line) { @@ -268,7 +285,7 @@ pr_enter_check(struct pool *pp, void (*p #define pr_enter(pp, file, line) #define pr_leave(pp) #define pr_enter_check(pp, pr) -#endif /* DIAGNOSTIC */ +#endif /* POOL_DIAGNOSTIC */ /* * Return the pool page header based on page address. @@ -294,8 +311,10 @@ pr_find_pagehead(struct pool *pp, caddr_ * Remove a page from the pool. */ static __inline void -pr_rmpage(struct pool *pp, struct pool_item_header *ph) +pr_rmpage(struct pool *pp, struct pool_item_header *ph, + struct pool_pagelist *pq) { + int s; /* * If the page was idle, decrement the idle page count. @@ -313,29 +332,30 @@ pr_rmpage(struct pool *pp, struct pool_i pp->pr_nitems -= pp->pr_itemsperpage; /* - * Unlink a page from the pool and release it. + * Unlink a page from the pool and release it (or queue it for release). */ TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist); - (*pp->pr_free)(ph->ph_page, pp->pr_pagesz, pp->pr_mtype); + if (pq) { + TAILQ_INSERT_HEAD(pq, ph, ph_pagelist); + } else { + (*pp->pr_free)(ph->ph_page, pp->pr_pagesz, pp->pr_mtype); + if ((pp->pr_roflags & PR_PHINPAGE) == 0) { + LIST_REMOVE(ph, ph_hashlist); + s = splhigh(); + pool_put(&phpool, ph); + splx(s); + } + } pp->pr_npages--; pp->pr_npagefree++; - if ((pp->pr_roflags & PR_PHINPAGE) == 0) { - int s; - LIST_REMOVE(ph, ph_hashlist); - s = splhigh(); - pool_put(&phpool, ph); - splx(s); - } - if (pp->pr_curpage == ph) { /* * Find a new non-empty page header, if any. * Start search from the page head, to increase the * chance for "high water" pages to be freed. */ - for (ph = TAILQ_FIRST(&pp->pr_pagelist); ph != NULL; - ph = TAILQ_NEXT(ph, ph_pagelist)) + TAILQ_FOREACH(ph, &pp->pr_pagelist, ph_pagelist) if (TAILQ_FIRST(&ph->ph_itemlist) != NULL) break; @@ -344,37 +364,6 @@ pr_rmpage(struct pool *pp, struct pool_i } /* - * Allocate and initialize a pool. - */ -struct pool * -pool_create(size_t size, u_int align, u_int ioff, int nitems, - const char *wchan, size_t pagesz, - void *(*alloc)(unsigned long, int, int), - void (*release)(void *, unsigned long, int), - int mtype) -{ - struct pool *pp; - int flags; - - pp = (struct pool *)malloc(sizeof(*pp), M_POOL, M_NOWAIT); - if (pp == NULL) - return (NULL); - - flags = PR_FREEHEADER; - pool_init(pp, size, align, ioff, flags, wchan, pagesz, - alloc, release, mtype); - - if (nitems != 0) { - if (pool_prime(pp, nitems, NULL) != 0) { - pool_destroy(pp); - return (NULL); - } - } - - return (pp); -} - -/* * Initialize the given pool resource structure. * * We export this routine to allow other kernel parts to declare @@ -404,13 +393,24 @@ pool_init(struct pool *pp, size_t size, panic("pool_init: page size invalid (%lx)\n", (u_long)pagesz); if (alloc == NULL && release == NULL) { +#ifdef POOL_SUBPAGE + alloc = pool_subpage_alloc; + release = pool_subpage_free; + pagesz = POOL_SUBPAGE; +#else alloc = pool_page_alloc; release = pool_page_free; pagesz = PAGE_SIZE; /* Rounds to PAGE_SIZE anyhow. */ +#endif } else if ((alloc != NULL && release != NULL) == 0) { /* If you specifiy one, must specify both. */ panic("pool_init: must specify alloc and release together"); } +#ifdef POOL_SUBPAGE + else if (alloc == pool_page_alloc_nointr && + release == pool_page_free_nointr) + pagesz = POOL_SUBPAGE; +#endif if (pagesz == 0) pagesz = PAGE_SIZE; @@ -504,6 +504,7 @@ pool_init(struct pool *pp, size_t size, pp->pr_hiwat = 0; pp->pr_nidle = 0; +#ifdef POOL_DIAGNOSTIC if (flags & PR_LOGGING) { if (kmem_map == NULL || (pp->pr_log = malloc(pool_logsize * sizeof(struct pool_log), @@ -512,6 +513,7 @@ pool_init(struct pool *pp, size_t size, pp->pr_curlogentry = 0; pp->pr_logsize = pool_logsize; } +#endif pp->pr_entered_file = NULL; pp->pr_entered_line = 0; @@ -524,8 +526,16 @@ pool_init(struct pool *pp, size_t size, * XXX LOCKING. */ if (phpool.pr_size == 0) { +#ifdef POOL_SUBPAGE + pool_init(&phpool, sizeof(struct pool_item_header), 0, 0, 0, + "phpool", PAGE_SIZE, pool_page_alloc, pool_page_free, 0); + pool_init(&psppool, POOL_SUBPAGE, POOL_SUBPAGE, 0, + PR_RECURSIVE, "psppool", PAGE_SIZE, + pool_page_alloc, pool_page_free, 0); +#else pool_init(&phpool, sizeof(struct pool_item_header), 0, 0, 0, "phpool", 0, 0, 0, 0); +#endif pool_init(&pcgpool, sizeof(struct pool_cache_group), 0, 0, 0, "pcgpool", 0, 0, 0, 0); } @@ -559,33 +569,58 @@ pool_destroy(struct pool *pp) /* Remove all pages */ if ((pp->pr_roflags & PR_STATIC) == 0) - while ((ph = pp->pr_pagelist.tqh_first) != NULL) - pr_rmpage(pp, ph); + while ((ph = TAILQ_FIRST(&pp->pr_pagelist)) != NULL) + pr_rmpage(pp, ph, NULL); /* Remove from global pool list */ simple_lock(&pool_head_slock); TAILQ_REMOVE(&pool_head, pp, pr_poollist); - /* XXX Only clear this if we were drainpp? */ - drainpp = NULL; + if (drainpp == pp) { + drainpp = NULL; + } simple_unlock(&pool_head_slock); +#ifdef POOL_DIAGNOSTIC if ((pp->pr_roflags & PR_LOGGING) != 0) free(pp->pr_log, M_TEMP); +#endif if (pp->pr_roflags & PR_FREEHEADER) free(pp, M_POOL); } +static __inline struct pool_item_header * +pool_alloc_item_header(struct pool *pp, caddr_t storage, int flags) +{ + struct pool_item_header *ph; + int s; + + LOCK_ASSERT(simple_lock_held(&pp->pr_slock) == 0); + + if ((pp->pr_roflags & PR_PHINPAGE) != 0) + ph = (struct pool_item_header *) (storage + pp->pr_phoffset); + else { + s = splhigh(); + ph = pool_get(&phpool, flags); + splx(s); + } + + return (ph); +} /* * Grab an item from the pool; must be called at appropriate spl level */ void * +#ifdef POOL_DIAGNOSTIC _pool_get(struct pool *pp, int flags, const char *file, long line) +#else +pool_get(struct pool *pp, int flags) +#endif { - void *v; struct pool_item *pi; struct pool_item_header *ph; + void *v; #ifdef DIAGNOSTIC if (__predict_false((pp->pr_roflags & PR_STATIC) && @@ -593,12 +628,17 @@ _pool_get(struct pool *pp, int flags, co pr_printlog(pp, NULL, printf); panic("pool_get: static"); } -#endif if (__predict_false(curproc == NULL && doing_shutdown == 0 && (flags & PR_WAITOK) != 0)) panic("pool_get: must have NOWAIT"); +#ifdef LOCKDEBUG + if (flags & PR_WAITOK) + simple_lock_only_held(NULL, "pool_get(PR_WAITOK)"); +#endif +#endif /* DIAGNOSTIC */ + simple_lock(&pp->pr_slock); pr_enter(pp, file, line); @@ -653,8 +693,6 @@ _pool_get(struct pool *pp, int flags, co * has no items in its bucket. */ if ((ph = pp->pr_curpage) == NULL) { - void *v; - #ifdef DIAGNOSTIC if (pp->pr_nitems != 0) { simple_unlock(&pp->pr_slock); @@ -672,15 +710,20 @@ _pool_get(struct pool *pp, int flags, co pr_leave(pp); simple_unlock(&pp->pr_slock); v = (*pp->pr_alloc)(pp->pr_pagesz, flags, pp->pr_mtype); + if (__predict_true(v != NULL)) + ph = pool_alloc_item_header(pp, v, flags); simple_lock(&pp->pr_slock); pr_enter(pp, file, line); - if (v == NULL) { + if (__predict_false(v == NULL || ph == NULL)) { + if (v != NULL) + (*pp->pr_free)(v, pp->pr_pagesz, pp->pr_mtype); + /* - * We were unable to allocate a page, but - * we released the lock during allocation, - * so perhaps items were freed back to the - * pool. Check for this case. + * We were unable to allocate a page or item + * header, but we released the lock during + * allocation, so perhaps items were freed + * back to the pool. Check for this case. */ if (pp->pr_curpage != NULL) goto startover; @@ -714,8 +757,8 @@ _pool_get(struct pool *pp, int flags, co } /* We have more memory; add it to the pool */ + pool_prime_page(pp, v, ph); pp->pr_npagealloc++; - pool_prime_page(pp, v); /* Start the allocation process over. */ goto startover; @@ -735,7 +778,10 @@ _pool_get(struct pool *pp, int flags, co panic("pool_get: nitems inconsistent\n"); } #endif + +#ifdef POOL_DIAGNOSTIC pr_log(pp, v, PRLOG_GET, file, line); +#endif #ifdef DIAGNOSTIC if (__predict_false(pi->pi_magic != PI_MAGIC)) { @@ -782,8 +828,7 @@ _pool_get(struct pool *pp, int flags, co */ TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist); TAILQ_INSERT_TAIL(&pp->pr_pagelist, ph, ph_pagelist); - for (ph = TAILQ_FIRST(&pp->pr_pagelist); ph != NULL; - ph = TAILQ_NEXT(ph, ph_pagelist)) + TAILQ_FOREACH(ph, &pp->pr_pagelist, ph_pagelist) if (TAILQ_FIRST(&ph->ph_itemlist) != NULL) break; @@ -796,7 +841,7 @@ _pool_get(struct pool *pp, int flags, co * If we have a low water mark and we are now below that low * water mark, add more items to the pool. */ - if (pp->pr_nitems < pp->pr_minitems && pool_catchup(pp) != 0) { + if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) { /* * XXX: Should we log a warning? Should we set up a timeout * to try again in a second or so? The latter could break @@ -813,13 +858,15 @@ _pool_get(struct pool *pp, int flags, co * Internal version of pool_put(). Pool is already locked/entered. */ static void -pool_do_put(struct pool *pp, void *v, const char *file, long line) +pool_do_put(struct pool *pp, void *v) { struct pool_item *pi = v; struct pool_item_header *ph; caddr_t page; int s; + LOCK_ASSERT(simple_lock_held(&pp->pr_slock)); + page = (caddr_t)((u_long)v & pp->pr_pagemask); #ifdef DIAGNOSTIC @@ -830,8 +877,6 @@ pool_do_put(struct pool *pp, void *v, co } #endif - pr_log(pp, v, PRLOG_PUT, file, line); - if (__predict_false((ph = pr_find_pagehead(pp, page)) == NULL)) { pr_printlog(pp, NULL, printf); panic("pool_put: %s: page header missing", pp->pr_wchan); @@ -895,7 +940,7 @@ pool_do_put(struct pool *pp, void *v, co if (ph->ph_nmissing == 0) { pp->pr_nidle++; if (pp->pr_npages > pp->pr_maxpages) { - pr_rmpage(pp, ph); + pr_rmpage(pp, ph, NULL); } else { TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist); TAILQ_INSERT_TAIL(&pp->pr_pagelist, ph, ph_pagelist); @@ -918,8 +963,7 @@ pool_do_put(struct pool *pp, void *v, co * page with the fewest available items, to minimize * fragmentation? */ - for (ph = TAILQ_FIRST(&pp->pr_pagelist); ph != NULL; - ph = TAILQ_NEXT(ph, ph_pagelist)) + TAILQ_FOREACH(ph, &pp->pr_pagelist, ph_pagelist) if (TAILQ_FIRST(&ph->ph_itemlist) != NULL) break; @@ -942,6 +986,7 @@ pool_do_put(struct pool *pp, void *v, co /* * Return resource to the pool; must be called at appropriate spl level */ +#ifdef POOL_DIAGNOSTIC void _pool_put(struct pool *pp, void *v, const char *file, long line) { @@ -949,56 +994,64 @@ _pool_put(struct pool *pp, void *v, cons simple_lock(&pp->pr_slock); pr_enter(pp, file, line); - pool_do_put(pp, v, file, line); + pr_log(pp, v, PRLOG_PUT, file, line); + + pool_do_put(pp, v); pr_leave(pp); simple_unlock(&pp->pr_slock); } +#undef pool_put +#endif /* POOL_DIAGNOSTIC */ + +void +pool_put(struct pool *pp, void *v) +{ + + simple_lock(&pp->pr_slock); + + pool_do_put(pp, v); + + simple_unlock(&pp->pr_slock); +} + +#ifdef POOL_DIAGNOSTIC +#define pool_put(h, v) _pool_put((h), (v), __FILE__, __LINE__) +#endif /* * Add N items to the pool. */ int -pool_prime(struct pool *pp, int n, caddr_t storage) +pool_prime(struct pool *pp, int n) { + struct pool_item_header *ph; caddr_t cp; - int newnitems, newpages; - -#ifdef DIAGNOSTIC - if (__predict_false(storage && !(pp->pr_roflags & PR_STATIC))) - panic("pool_prime: static"); - /* !storage && static caught below */ -#endif + int newpages, error = 0; simple_lock(&pp->pr_slock); - newnitems = pp->pr_minitems + n; - newpages = - roundup(newnitems, pp->pr_itemsperpage) / pp->pr_itemsperpage - - pp->pr_minpages; + newpages = roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage; while (newpages-- > 0) { - if (pp->pr_roflags & PR_STATIC) { - cp = storage; - storage += pp->pr_pagesz; - } else { - simple_unlock(&pp->pr_slock); - cp = (*pp->pr_alloc)(pp->pr_pagesz, 0, pp->pr_mtype); - simple_lock(&pp->pr_slock); - } + simple_unlock(&pp->pr_slock); + cp = (*pp->pr_alloc)(pp->pr_pagesz, PR_NOWAIT, pp->pr_mtype); + if (__predict_true(cp != NULL)) + ph = pool_alloc_item_header(pp, cp, PR_NOWAIT); + simple_lock(&pp->pr_slock); - if (cp == NULL) { - simple_unlock(&pp->pr_slock); - return (ENOMEM); + if (__predict_false(cp == NULL || ph == NULL)) { + error = ENOMEM; + if (cp != NULL) + (*pp->pr_free)(cp, pp->pr_pagesz, pp->pr_mtype); + break; } + pool_prime_page(pp, cp, ph); pp->pr_npagealloc++; - pool_prime_page(pp, cp); pp->pr_minpages++; } - pp->pr_minitems = newnitems; - if (pp->pr_minpages >= pp->pr_maxpages) pp->pr_maxpages = pp->pr_minpages + 1; /* XXX */ @@ -1012,27 +1065,20 @@ pool_prime(struct pool *pp, int n, caddr * Note, we must be called with the pool descriptor LOCKED. */ static void -pool_prime_page(struct pool *pp, caddr_t storage) +pool_prime_page(struct pool *pp, caddr_t storage, struct pool_item_header *ph) { struct pool_item *pi; - struct pool_item_header *ph; caddr_t cp = storage; unsigned int align = pp->pr_align; unsigned int ioff = pp->pr_itemoffset; - int s, n; + int n; if (((u_long)cp & (pp->pr_pagesz - 1)) != 0) panic("pool_prime_page: %s: unaligned page", pp->pr_wchan); - if ((pp->pr_roflags & PR_PHINPAGE) != 0) { - ph = (struct pool_item_header *)(cp + pp->pr_phoffset); - } else { - s = splhigh(); - ph = pool_get(&phpool, PR_URGENT); - splx(s); + if ((pp->pr_roflags & PR_PHINPAGE) == 0) LIST_INSERT_HEAD(&pp->pr_hashtab[PR_HASH_INDEX(pp, cp)], - ph, ph_hashlist); - } + ph, ph_hashlist); /* * Insert page header. @@ -1086,9 +1132,8 @@ pool_prime_page(struct pool *pp, caddr_t } /* - * Like pool_prime(), except this is used by pool_get() when nitems - * drops below the low water mark. This is used to catch up nitmes - * with the low water mark. + * Used by pool_get() when nitems drops below the low water mark. This + * is used to catch up nitmes with the low water mark. * * Note 1, we never wait for memory here, we let the caller decide what to do. * @@ -1100,6 +1145,7 @@ pool_prime_page(struct pool *pp, caddr_t static int pool_catchup(struct pool *pp) { + struct pool_item_header *ph; caddr_t cp; int error = 0; @@ -1115,7 +1161,7 @@ pool_catchup(struct pool *pp) return (0); } - while (pp->pr_nitems < pp->pr_minitems) { + while (POOL_NEEDS_CATCHUP(pp)) { /* * Call the page back-end allocator for more memory. * @@ -1123,14 +1169,18 @@ pool_catchup(struct pool *pp) * the pool descriptor? */ simple_unlock(&pp->pr_slock); - cp = (*pp->pr_alloc)(pp->pr_pagesz, 0, pp->pr_mtype); + cp = (*pp->pr_alloc)(pp->pr_pagesz, PR_NOWAIT, pp->pr_mtype); + if (__predict_true(cp != NULL)) + ph = pool_alloc_item_header(pp, cp, PR_NOWAIT); simple_lock(&pp->pr_slock); - if (__predict_false(cp == NULL)) { + if (__predict_false(cp == NULL || ph == NULL)) { + if (cp != NULL) + (*pp->pr_free)(cp, pp->pr_pagesz, pp->pr_mtype); error = ENOMEM; break; } + pool_prime_page(pp, cp, ph); pp->pr_npagealloc++; - pool_prime_page(pp, cp); } return (error); @@ -1149,8 +1199,7 @@ pool_setlowat(struct pool *pp, int n) : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage; /* Make sure we're caught up with the newly-set low water mark. */ - if ((pp->pr_nitems < pp->pr_minitems) && - (error = pool_catchup(pp)) != 0) { + if (POOL_NEEDS_CATCHUP(pp) && (error = pool_catchup(pp) != 0)) { /* * XXX: Should we log a warning? Should we set up a timeout * to try again in a second or so? The latter could break @@ -1215,6 +1264,41 @@ pool_page_free(void *v, unsigned long sz uvm_km_free_poolpage((vaddr_t)v); } +#ifdef POOL_SUBPAGE +/* + * Sub-page allocator, for machines with large hardware pages. + */ +static void * +pool_subpage_alloc(unsigned long sz, int flags, int mtype) +{ + + return pool_get(&psppool, flags); +} + +static void +pool_subpage_free(void *v, unsigned long sz, int mtype) +{ + + pool_put(&psppool, v); +} +#endif + +#ifdef POOL_SUBPAGE +/* We don't provide a real nointr allocator. Maybe later. */ +void * +pool_page_alloc_nointr(unsigned long sz, int flags, int mtype) +{ + + return pool_subpage_alloc(sz, flags, mtype); +} + +void +pool_page_free_nointr(void *v, unsigned long sz, int mtype) +{ + + pool_subpage_free(v, sz, mtype); +} +#else /* * Alternate pool page allocator for pools that know they will * never be accessed in interrupt context. @@ -1234,17 +1318,23 @@ pool_page_free_nointr(void *v, unsigned uvm_km_free_poolpage1(kernel_map, (vaddr_t)v); } +#endif /* * Release all complete pages that have not been used recently. */ void +#ifdef POOL_DIAGNOSTIC _pool_reclaim(struct pool *pp, const char *file, long line) +#else +pool_reclaim(struct pool *pp) +#endif { struct pool_item_header *ph, *phnext; struct pool_cache *pc; struct timeval curtime; + struct pool_pagelist pq; int s; if (pp->pr_roflags & PR_STATIC) @@ -1253,12 +1343,12 @@ _pool_reclaim(struct pool *pp, const cha if (simple_lock_try(&pp->pr_slock) == 0) return; pr_enter(pp, file, line); + TAILQ_INIT(&pq); /* * Reclaim items from the pool's caches. */ - for (pc = TAILQ_FIRST(&pp->pr_cachelist); pc != NULL; - pc = TAILQ_NEXT(pc, pc_poollist)) + TAILQ_FOREACH(pc, &pp->pr_cachelist, pc_poollist) pool_cache_reclaim(pc); s = splclock(); @@ -1286,12 +1376,26 @@ _pool_reclaim(struct pool *pp, const cha pp->pr_minitems) break; - pr_rmpage(pp, ph); + pr_rmpage(pp, ph, &pq); } } pr_leave(pp); simple_unlock(&pp->pr_slock); + if (TAILQ_EMPTY(&pq)) { + return; + } + while ((ph = TAILQ_FIRST(&pq)) != NULL) { + TAILQ_REMOVE(&pq, ph, ph_pagelist); + (*pp->pr_free)(ph->ph_page, pp->pr_pagesz, pp->pr_mtype); + if (pp->pr_roflags & PR_PHINPAGE) { + continue; + } + LIST_REMOVE(ph, ph_hashlist); + s = splhigh(); + pool_put(&phpool, ph); + splx(s); + } } @@ -1306,19 +1410,18 @@ pool_drain(void *arg) struct pool *pp; int s; - s = splimp(); + pp = NULL; + s = splvm(); simple_lock(&pool_head_slock); - - if (drainpp == NULL && (drainpp = TAILQ_FIRST(&pool_head)) == NULL) - goto out; - - pp = drainpp; - drainpp = TAILQ_NEXT(pp, pr_poollist); - - pool_reclaim(pp); - - out: + if (drainpp == NULL) { + drainpp = TAILQ_FIRST(&pool_head); + } + if (drainpp) { + pp = drainpp; + drainpp = TAILQ_NEXT(pp, pr_poollist); + } simple_unlock(&pool_head_slock); + pool_reclaim(pp); splx(s); } @@ -1331,7 +1434,7 @@ pool_print(struct pool *pp, const char * { int s; - s = splimp(); + s = splvm(); if (simple_lock_try(&pp->pr_slock) == 0) { printf("pool %s is locked; try again later\n", pp->pr_wchan); @@ -1421,8 +1524,7 @@ pool_print1(struct pool *pp, const char (u_long)ph->ph_time.tv_sec, (u_long)ph->ph_time.tv_usec); #ifdef DIAGNOSTIC - for (pi = TAILQ_FIRST(&ph->ph_itemlist); pi != NULL; - pi = TAILQ_NEXT(pi, pi_list)) { + TAILQ_FOREACH(pi, &ph->ph_itemlist, pi_list) { if (pi->pi_magic != PI_MAGIC) { (*pr)("\t\t\titem %p, magic 0x%x\n", pi, pi->pi_magic); @@ -1451,12 +1553,12 @@ pool_print1(struct pool *pp, const char if (print_cache == 0) goto skip_cache; - for (pc = TAILQ_FIRST(&pp->pr_cachelist); pc != NULL; - pc = TAILQ_NEXT(pc, pc_poollist)) { + TAILQ_FOREACH(pc, &pp->pr_cachelist, pc_poollist) { (*pr)("\tcache %p: allocfrom %p freeto %p\n", pc, pc->pc_allocfrom, pc->pc_freeto); - for (pcg = TAILQ_FIRST(&pc->pc_grouplist); pcg != NULL; - pcg = TAILQ_NEXT(pcg, pcg_list)) { + (*pr)("\t hits %lu misses %lu ngroups %lu nitems %lu\n", + pc->pc_hits, pc->pc_misses, pc->pc_ngroups, pc->pc_nitems); + TAILQ_FOREACH(pcg, &pc->pc_grouplist, pcg_list) { (*pr)("\t\tgroup %p: avail %d\n", pcg, pcg->pcg_avail); for (i = 0; i < PCG_NOBJECTS; i++) (*pr)("\t\t\t%p\n", pcg->pcg_objects[i]); @@ -1476,9 +1578,7 @@ pool_chk(struct pool *pp, const char *la simple_lock(&pp->pr_slock); - for (ph = TAILQ_FIRST(&pp->pr_pagelist); ph != NULL; - ph = TAILQ_NEXT(ph, ph_pagelist)) { - + TAILQ_FOREACH(ph, &pp->pr_pagelist, ph_pagelist) { struct pool_item *pi; int n; caddr_t page; @@ -1557,6 +1657,13 @@ pool_cache_init(struct pool_cache *pc, s pc->pc_dtor = dtor; pc->pc_arg = arg; + pc->pc_hits = 0; + pc->pc_misses = 0; + + pc->pc_ngroups = 0; + + pc->pc_nitems = 0; + simple_lock(&pp->pr_slock); TAILQ_INSERT_TAIL(&pp->pr_cachelist, pc, pc_poollist); simple_unlock(&pp->pr_slock); @@ -1621,11 +1728,15 @@ pool_cache_get(struct pool_cache *pc, in struct pool_cache_group *pcg; void *object; +#ifdef LOCKDEBUG + if (flags & PR_WAITOK) + simple_lock_only_held(NULL, "pool_cache_get(PR_WAITOK)"); +#endif + simple_lock(&pc->pc_slock); if ((pcg = pc->pc_allocfrom) == NULL) { - for (pcg = TAILQ_FIRST(&pc->pc_grouplist); pcg != NULL; - pcg = TAILQ_NEXT(pcg, pcg_list)) { + TAILQ_FOREACH(pcg, &pc->pc_grouplist, pcg_list) { if (pcg->pcg_avail != 0) { pc->pc_allocfrom = pcg; goto have_group; @@ -1638,6 +1749,7 @@ pool_cache_get(struct pool_cache *pc, in * the caller. We will allocate a group, if necessary, * when the object is freed back to the cache. */ + pc->pc_misses++; simple_unlock(&pc->pc_slock); object = pool_get(pc->pc_pool, flags); if (object != NULL && pc->pc_ctor != NULL) { @@ -1650,6 +1762,8 @@ pool_cache_get(struct pool_cache *pc, in } have_group: + pc->pc_hits++; + pc->pc_nitems--; object = pcg_get(pcg); if (pcg->pcg_avail == 0) @@ -1669,12 +1783,12 @@ void pool_cache_put(struct pool_cache *pc, void *object) { struct pool_cache_group *pcg; + int s; simple_lock(&pc->pc_slock); if ((pcg = pc->pc_freeto) == NULL) { - for (pcg = TAILQ_FIRST(&pc->pc_grouplist); pcg != NULL; - pcg = TAILQ_NEXT(pcg, pcg_list)) { + TAILQ_FOREACH(pcg, &pc->pc_grouplist, pcg_list) { if (pcg->pcg_avail != PCG_NOBJECTS) { pc->pc_freeto = pcg; goto have_group; @@ -1683,30 +1797,32 @@ pool_cache_put(struct pool_cache *pc, vo /* * No empty groups to free the object to. Attempt to - * allocate one. We don't unlock the cache here, since - * we never block. + * allocate one. */ + simple_unlock(&pc->pc_slock); + s = splvm(); pcg = pool_get(&pcgpool, PR_NOWAIT); + splx(s); if (pcg != NULL) { memset(pcg, 0, sizeof(*pcg)); + simple_lock(&pc->pc_slock); + pc->pc_ngroups++; TAILQ_INSERT_TAIL(&pc->pc_grouplist, pcg, pcg_list); - pc->pc_freeto = pcg; + if (pc->pc_freeto == NULL) + pc->pc_freeto = pcg; goto have_group; } - simple_unlock(&pc->pc_slock); - /* * Unable to allocate a cache group; destruct the object * and free it back to the pool. */ - if (pc->pc_dtor != NULL) - (*pc->pc_dtor)(pc->pc_arg, object); - pool_put(pc->pc_pool, object); + pool_cache_destruct_object(pc, object); return; } have_group: + pc->pc_nitems++; pcg_put(pcg, object); if (pcg->pcg_avail == PCG_NOBJECTS) @@ -1716,6 +1832,21 @@ pool_cache_put(struct pool_cache *pc, vo } /* + * pool_cache_destruct_object: + * + * Force destruction of an object and its release back into + * the pool. + */ +void +pool_cache_destruct_object(struct pool_cache *pc, void *object) +{ + + if (pc->pc_dtor != NULL) + (*pc->pc_dtor)(pc->pc_arg, object); + pool_put(pc->pc_pool, object); +} + +/* * pool_cache_do_invalidate: * * This internal function implements pool_cache_invalidate() and @@ -1723,25 +1854,32 @@ pool_cache_put(struct pool_cache *pc, vo */ static void pool_cache_do_invalidate(struct pool_cache *pc, int free_groups, - void (*putit)(struct pool *, void *, const char *, long)) + void (*putit)(struct pool *, void *)) { struct pool_cache_group *pcg, *npcg; void *object; + int s; for (pcg = TAILQ_FIRST(&pc->pc_grouplist); pcg != NULL; pcg = npcg) { npcg = TAILQ_NEXT(pcg, pcg_list); while (pcg->pcg_avail != 0) { + pc->pc_nitems--; object = pcg_get(pcg); if (pcg->pcg_avail == 0 && pc->pc_allocfrom == pcg) pc->pc_allocfrom = NULL; if (pc->pc_dtor != NULL) (*pc->pc_dtor)(pc->pc_arg, object); - (*putit)(pc->pc_pool, object, __FILE__, __LINE__); + (*putit)(pc->pc_pool, object); } if (free_groups) { + pc->pc_ngroups--; TAILQ_REMOVE(&pc->pc_grouplist, pcg, pcg_list); + if (pc->pc_freeto == pcg) + pc->pc_freeto = NULL; + s = splvm(); pool_put(&pcgpool, pcg); + splx(s); } } } @@ -1757,7 +1895,7 @@ pool_cache_invalidate(struct pool_cache { simple_lock(&pc->pc_slock); - pool_cache_do_invalidate(pc, 0, _pool_put); + pool_cache_do_invalidate(pc, 0, pool_put); simple_unlock(&pc->pc_slock); } @@ -1770,13 +1908,7 @@ static void pool_cache_reclaim(struct pool_cache *pc) { - /* - * We're locking in the opposite order (pool already - * locked in pool_reclaim()), so use a try-lock instead. - */ - - if (simple_lock_try(&pc->pc_slock) == 0) - return; + simple_lock(&pc->pc_slock); pool_cache_do_invalidate(pc, 1, pool_do_put); simple_unlock(&pc->pc_slock); }