Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files. =================================================================== RCS file: /ftp/cvs/cvsroot/src/sys/kern/subr_pool.c,v rcsdiff: /ftp/cvs/cvsroot/src/sys/kern/subr_pool.c,v: warning: Unknown phrases like `commitid ...;' are present. retrieving revision 1.67 retrieving revision 1.86 diff -u -p -r1.67 -r1.86 --- src/sys/kern/subr_pool.c 2002/03/08 20:51:26 1.67 +++ src/sys/kern/subr_pool.c 2003/03/16 08:06:51 1.86 @@ -1,4 +1,4 @@ -/* $NetBSD: subr_pool.c,v 1.67 2002/03/08 20:51:26 thorpej Exp $ */ +/* $NetBSD: subr_pool.c,v 1.86 2003/03/16 08:06:51 matt Exp $ */ /*- * Copyright (c) 1997, 1999, 2000 The NetBSD Foundation, Inc. @@ -38,7 +38,7 @@ */ #include -__KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.67 2002/03/08 20:51:26 thorpej Exp $"); +__KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.86 2003/03/16 08:06:51 matt Exp $"); #include "opt_pool.h" #include "opt_poollog.h" @@ -94,7 +94,7 @@ struct pool_item_header { TAILQ_HEAD(,pool_item) ph_itemlist; /* chunk list for this page */ LIST_ENTRY(pool_item_header) ph_hashlist; /* Off-page page headers */ - int ph_nmissing; /* # of chunks in use */ + unsigned int ph_nmissing; /* # of chunks in use */ caddr_t ph_page; /* this page's address */ struct timeval ph_time; /* last referenced */ }; @@ -102,9 +102,9 @@ TAILQ_HEAD(pool_pagelist,pool_item_heade struct pool_item { #ifdef DIAGNOSTIC - int pi_magic; + u_int pi_magic; #endif -#define PI_MAGIC 0xdeadbeef +#define PI_MAGIC 0xdeadbeefU /* Other entries use only this list entry */ TAILQ_ENTRY(pool_item) pi_list; }; @@ -145,16 +145,6 @@ struct pool_item { /* The cache group pool. */ static struct pool pcgpool; -/* The pool cache group. */ -#define PCG_NOBJECTS 16 -struct pool_cache_group { - TAILQ_ENTRY(pool_cache_group) - pcg_list; /* link in the pool cache's group list */ - u_int pcg_avail; /* # available objects */ - /* pointers to the objects */ - void *pcg_objects[PCG_NOBJECTS]; -}; - static void pool_cache_reclaim(struct pool_cache *); static int pool_catchup(struct pool *); @@ -179,6 +169,7 @@ struct pool_log { void *pl_addr; }; +#ifdef POOL_DIAGNOSTIC /* Number of entries in pool log buffers */ #ifndef POOL_LOGSIZE #define POOL_LOGSIZE 10 @@ -186,7 +177,6 @@ struct pool_log { int pool_logsize = POOL_LOGSIZE; -#ifdef POOL_DIAGNOSTIC static __inline void pr_log(struct pool *pp, void *v, int action, const char *file, long line) { @@ -339,7 +329,7 @@ pr_rmpage(struct pool *pp, struct pool_i pool_allocator_free(pp, ph->ph_page); if ((pp->pr_roflags & PR_PHINPAGE) == 0) { LIST_REMOVE(ph, ph_hashlist); - s = splhigh(); + s = splvm(); pool_put(&phpool, ph); splx(s); } @@ -425,7 +415,7 @@ pool_init(struct pool *pp, size_t size, if (size < sizeof(struct pool_item)) size = sizeof(struct pool_item); - size = ALIGN(size); + size = roundup(size, align); #ifdef DIAGNOSTIC if (size > palloc->pa_pagesz) panic("pool_init: pool item size (%lu) too large", @@ -456,6 +446,8 @@ pool_init(struct pool *pp, size_t size, pp->pr_hardlimit_ratecap.tv_usec = 0; pp->pr_hardlimit_warning_last.tv_sec = 0; pp->pr_hardlimit_warning_last.tv_usec = 0; + pp->pr_drain_hook = NULL; + pp->pr_drain_hook_arg = NULL; /* * Decide whether to put the page header off page to avoid @@ -572,15 +564,14 @@ pool_destroy(struct pool *pp) #ifdef DIAGNOSTIC if (pp->pr_nout != 0) { pr_printlog(pp, NULL, printf); - panic("pool_destroy: pool busy: still out: %u\n", + panic("pool_destroy: pool busy: still out: %u", pp->pr_nout); } #endif /* Remove all pages */ - if ((pp->pr_roflags & PR_STATIC) == 0) - while ((ph = TAILQ_FIRST(&pp->pr_pagelist)) != NULL) - pr_rmpage(pp, ph, NULL); + while ((ph = TAILQ_FIRST(&pp->pr_pagelist)) != NULL) + pr_rmpage(pp, ph, NULL); /* Remove from global pool list */ simple_lock(&pool_head_slock); @@ -596,6 +587,19 @@ pool_destroy(struct pool *pp) #endif } +void +pool_set_drain_hook(struct pool *pp, void (*fn)(void *, int), void *arg) +{ + + /* XXX no locking -- must be used just after pool_init() */ +#ifdef DIAGNOSTIC + if (pp->pr_drain_hook != NULL) + panic("pool_set_drain_hook(%s): already set", pp->pr_wchan); +#endif + pp->pr_drain_hook = fn; + pp->pr_drain_hook_arg = arg; +} + static __inline struct pool_item_header * pool_alloc_item_header(struct pool *pp, caddr_t storage, int flags) { @@ -607,7 +611,7 @@ pool_alloc_item_header(struct pool *pp, if ((pp->pr_roflags & PR_PHINPAGE) != 0) ph = (struct pool_item_header *) (storage + pp->pr_phoffset); else { - s = splhigh(); + s = splvm(); ph = pool_get(&phpool, flags); splx(s); } @@ -630,15 +634,9 @@ pool_get(struct pool *pp, int flags) void *v; #ifdef DIAGNOSTIC - if (__predict_false((pp->pr_roflags & PR_STATIC) && - (flags & PR_MALLOCOK))) { - pr_printlog(pp, NULL, printf); - panic("pool_get: static"); - } - - if (__predict_false(curproc == NULL && doing_shutdown == 0 && + if (__predict_false(curlwp == NULL && doing_shutdown == 0 && (flags & PR_WAITOK) != 0)) - panic("pool_get: must have NOWAIT"); + panic("pool_get: %s: must have NOWAIT", pp->pr_wchan); #ifdef LOCKDEBUG if (flags & PR_WAITOK) @@ -663,6 +661,21 @@ pool_get(struct pool *pp, int flags) } #endif if (__predict_false(pp->pr_nout == pp->pr_hardlimit)) { + if (pp->pr_drain_hook != NULL) { + /* + * Since the drain hook is going to free things + * back to the pool, unlock, call the hook, re-lock, + * and check the hardlimit condition again. + */ + pr_leave(pp); + simple_unlock(&pp->pr_slock); + (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, flags); + simple_lock(&pp->pr_slock); + pr_enter(pp, file, line); + if (pp->pr_nout < pp->pr_hardlimit) + goto startover; + } + if ((flags & PR_WAITOK) && !(flags & PR_LIMITFAIL)) { /* * XXX: A warning isn't logged in this case. Should @@ -702,7 +715,7 @@ pool_get(struct pool *pp, int flags) simple_unlock(&pp->pr_slock); printf("pool_get: %s: curpage NULL, nitems %u\n", pp->pr_wchan, pp->pr_nitems); - panic("pool_get: nitems inconsistent\n"); + panic("pool_get: nitems inconsistent"); } #endif @@ -772,7 +785,7 @@ pool_get(struct pool *pp, int flags) simple_unlock(&pp->pr_slock); printf("pool_get: %s: items on itemlist, nitems %u\n", pp->pr_wchan, pp->pr_nitems); - panic("pool_get: nitems inconsistent\n"); + panic("pool_get: nitems inconsistent"); } #endif @@ -903,6 +916,7 @@ pool_do_put(struct pool *pp, void *v) #endif TAILQ_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list); + KDASSERT(ph->ph_nmissing != 0); ph->ph_nmissing--; pp->pr_nput++; pp->pr_nitems++; @@ -936,7 +950,8 @@ pool_do_put(struct pool *pp, void *v) */ if (ph->ph_nmissing == 0) { pp->pr_nidle++; - if (pp->pr_npages > pp->pr_maxpages) { + if (pp->pr_npages > pp->pr_maxpages || + (pp->pr_alloc->pa_flags & PA_WANT) != 0) { pr_rmpage(pp, ph, NULL); } else { TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist); @@ -1022,9 +1037,9 @@ pool_put(struct pool *pp, void *v) int pool_prime(struct pool *pp, int n) { - struct pool_item_header *ph; + struct pool_item_header *ph = NULL; caddr_t cp; - int newpages, error = 0; + int newpages; simple_lock(&pp->pr_slock); @@ -1038,7 +1053,6 @@ pool_prime(struct pool *pp, int n) simple_lock(&pp->pr_slock); if (__predict_false(cp == NULL || ph == NULL)) { - error = ENOMEM; if (cp != NULL) pool_allocator_free(pp, cp); break; @@ -1112,6 +1126,8 @@ pool_prime_page(struct pool *pp, caddr_t while (n--) { pi = (struct pool_item *)cp; + KASSERT(((((vaddr_t)pi) + ioff) & (align - 1)) == 0); + /* Insert on page list */ TAILQ_INSERT_TAIL(&ph->ph_itemlist, pi, pi_list); #ifdef DIAGNOSTIC @@ -1136,30 +1152,16 @@ pool_prime_page(struct pool *pp, caddr_t * * Note 1, we never wait for memory here, we let the caller decide what to do. * - * Note 2, this doesn't work with static pools. - * - * Note 3, we must be called with the pool already locked, and we return + * Note 2, we must be called with the pool already locked, and we return * with it locked. */ static int pool_catchup(struct pool *pp) { - struct pool_item_header *ph; + struct pool_item_header *ph = NULL; caddr_t cp; int error = 0; - if (pp->pr_roflags & PR_STATIC) { - /* - * We dropped below the low water mark, and this is not a - * good thing. Log a warning. - * - * XXX: rate-limit this? - */ - printf("WARNING: static pool `%s' dropped below low water " - "mark\n", pp->pr_wchan); - return (0); - } - while (POOL_NEEDS_CATCHUP(pp)) { /* * Call the page back-end allocator for more memory. @@ -1188,7 +1190,6 @@ pool_catchup(struct pool *pp) void pool_setlowat(struct pool *pp, int n) { - int error; simple_lock(&pp->pr_slock); @@ -1198,7 +1199,7 @@ pool_setlowat(struct pool *pp, int n) : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage; /* Make sure we're caught up with the newly-set low water mark. */ - if (POOL_NEEDS_CATCHUP(pp) && (error = pool_catchup(pp) != 0)) { + if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) { /* * XXX: Should we log a warning? Should we set up a timeout * to try again in a second or so? The latter could break @@ -1261,12 +1262,17 @@ pool_reclaim(struct pool *pp) struct pool_pagelist pq; int s; - if (pp->pr_roflags & PR_STATIC) - return (0); + if (pp->pr_drain_hook != NULL) { + /* + * The drain hook must be called with the pool unlocked. + */ + (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, PR_NOWAIT); + } if (simple_lock_try(&pp->pr_slock) == 0) return (0); pr_enter(pp, file, line); + TAILQ_INIT(&pq); /* @@ -1316,7 +1322,7 @@ pool_reclaim(struct pool *pp) continue; } LIST_REMOVE(ph, ph_hashlist); - s = splhigh(); + s = splvm(); pool_put(&phpool, ph); splx(s); } @@ -1419,7 +1425,6 @@ pool_print1(struct pool *pp, const char print_pagelist = 1; if (c == 'c') print_cache = 1; - modif++; } (*pr)("POOL %s: size %u, align %u, ioff %u, roflags 0x%08x\n", @@ -1899,8 +1904,20 @@ pool_allocator_alloc(struct pool *org, i do { if ((res = (*pa->pa_alloc)(org, flags)) != NULL) return (res); - if ((flags & PR_WAITOK) == 0) + if ((flags & PR_WAITOK) == 0) { + /* + * We only run the drain hookhere if PR_NOWAIT. + * In other cases, the hook will be run in + * pool_reclaim(). + */ + if (org->pr_drain_hook != NULL) { + (*org->pr_drain_hook)(org->pr_drain_hook_arg, + flags); + if ((res = (*pa->pa_alloc)(org, flags)) != NULL) + return (res); + } break; + } /* * Drain all pools, except "org", that use this @@ -1913,6 +1930,9 @@ pool_allocator_alloc(struct pool *org, i * other way to have potentially sleeping pool_reclaim, * non-sleeping locks on pool_allocator, and some * stirring of drained pools in the allocator. + * + * XXX Maybe we should use pool_head_slock for locking + * the allocators? */ freed = 0; @@ -1924,9 +1944,9 @@ pool_allocator_alloc(struct pool *org, i TAILQ_INSERT_TAIL(&pa->pa_list, pp, pr_alloc_list); if (pp == org) continue; - simple_unlock(&pa->pa_list); + simple_unlock(&pa->pa_slock); freed = pool_reclaim(pp); - simple_lock(&pa->pa_list); + simple_lock(&pa->pa_slock); } while ((pp = TAILQ_FIRST(&pa->pa_list)) != start && freed == 0); @@ -1967,6 +1987,7 @@ pool_allocator_free(struct pool *pp, voi pp->pr_flags &= ~PR_WANTED; wakeup(pp); } + simple_unlock(&pp->pr_slock); } pa->pa_flags &= ~PA_WANT; simple_unlock(&pa->pa_slock);