Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files. =================================================================== RCS file: /ftp/cvs/cvsroot/src/sys/kern/subr_pool.c,v rcsdiff: /ftp/cvs/cvsroot/src/sys/kern/subr_pool.c,v: warning: Unknown phrases like `commitid ...;' are present. retrieving revision 1.4 retrieving revision 1.80 diff -u -p -r1.4 -r1.80 --- src/sys/kern/subr_pool.c 1998/07/24 20:19:23 1.4 +++ src/sys/kern/subr_pool.c 2002/09/27 15:37:46 1.80 @@ -1,11 +1,12 @@ -/* $NetBSD: subr_pool.c,v 1.4 1998/07/24 20:19:23 thorpej Exp $ */ +/* $NetBSD: subr_pool.c,v 1.80 2002/09/27 15:37:46 provos Exp $ */ /*- - * Copyright (c) 1997 The NetBSD Foundation, Inc. + * Copyright (c) 1997, 1999, 2000 The NetBSD Foundation, Inc. * All rights reserved. * * This code is derived from software contributed to The NetBSD Foundation - * by Paul Kranenburg. + * by Paul Kranenburg; by Jason R. Thorpe of the Numerical Aerospace + * Simulation Facility, NASA Ames Research Center. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -17,8 +18,8 @@ * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: - * This product includes software developed by the NetBSD - * Foundation, Inc. and its contributors. + * This product includes software developed by the NetBSD + * Foundation, Inc. and its contributors. * 4. Neither the name of The NetBSD Foundation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. @@ -36,6 +37,13 @@ * POSSIBILITY OF SUCH DAMAGE. */ +#include +__KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.80 2002/09/27 15:37:46 provos Exp $"); + +#include "opt_pool.h" +#include "opt_poollog.h" +#include "opt_lockdebug.h" + #include #include #include @@ -44,13 +52,9 @@ #include #include #include +#include -#include -#include - -#if defined(UVM) #include -#endif /* * Pool resource management utility. @@ -61,20 +65,27 @@ * headed by `ph_itemlist' in each page header. The memory for building * the page list is either taken from the allocated pages themselves (for * small pool items) or taken from an internal pool of page headers (`phpool'). - * */ /* List of all pools */ -static TAILQ_HEAD(,pool) pool_head = TAILQ_HEAD_INITIALIZER(pool_head); +TAILQ_HEAD(,pool) pool_head = TAILQ_HEAD_INITIALIZER(pool_head); /* Private pool for page header structures */ static struct pool phpool; +#ifdef POOL_SUBPAGE +/* Pool of subpages for use by normal pools. */ +static struct pool psppool; +#endif + /* # of seconds to retain page after last use */ int pool_inactive_time = 10; /* Next candidate for drainage (see pool_drain()) */ -static struct pool *drainpp = NULL; +static struct pool *drainpp; + +/* This spin lock protects both pool_head and drainpp. */ +struct simplelock pool_head_slock = SIMPLELOCK_INITIALIZER; struct pool_item_header { /* Page headers */ @@ -83,66 +94,96 @@ struct pool_item_header { TAILQ_HEAD(,pool_item) ph_itemlist; /* chunk list for this page */ LIST_ENTRY(pool_item_header) ph_hashlist; /* Off-page page headers */ - int ph_nmissing; /* # of chunks in use */ + unsigned int ph_nmissing; /* # of chunks in use */ caddr_t ph_page; /* this page's address */ struct timeval ph_time; /* last referenced */ }; +TAILQ_HEAD(pool_pagelist,pool_item_header); struct pool_item { #ifdef DIAGNOSTIC int pi_magic; -#define PI_MAGIC 0xdeadbeef #endif +#define PI_MAGIC 0xdeadbeef /* Other entries use only this list entry */ TAILQ_ENTRY(pool_item) pi_list; }; +#define PR_HASH_INDEX(pp,addr) \ + (((u_long)(addr) >> (pp)->pr_alloc->pa_pageshift) & \ + (PR_HASHTABSIZE - 1)) + +#define POOL_NEEDS_CATCHUP(pp) \ + ((pp)->pr_nitems < (pp)->pr_minitems) + +/* + * Pool cache management. + * + * Pool caches provide a way for constructed objects to be cached by the + * pool subsystem. This can lead to performance improvements by avoiding + * needless object construction/destruction; it is deferred until absolutely + * necessary. + * + * Caches are grouped into cache groups. Each cache group references + * up to 16 constructed objects. When a cache allocates an object + * from the pool, it calls the object's constructor and places it into + * a cache group. When a cache group frees an object back to the pool, + * it first calls the object's destructor. This allows the object to + * persist in constructed form while freed to the cache. + * + * Multiple caches may exist for each pool. This allows a single + * object type to have multiple constructed forms. The pool references + * each cache, so that when a pool is drained by the pagedaemon, it can + * drain each individual cache as well. Each time a cache is drained, + * the most idle cache group is freed to the pool in its entirety. + * + * Pool caches are layed on top of pools. By layering them, we can avoid + * the complexity of cache management for pools which would not benefit + * from it. + */ -#define PR_HASH_INDEX(pp,addr) \ - (((u_long)(addr) >> (pp)->pr_pageshift) & (PR_HASHTABSIZE - 1)) +/* The cache group pool. */ +static struct pool pcgpool; +static void pool_cache_reclaim(struct pool_cache *); +static int pool_catchup(struct pool *); +static void pool_prime_page(struct pool *, caddr_t, + struct pool_item_header *); -static struct pool_item_header - *pr_find_pagehead __P((struct pool *, caddr_t)); -static void pr_rmpage __P((struct pool *, struct pool_item_header *)); -static int pool_prime_page __P((struct pool *, caddr_t)); -static void *pool_page_alloc __P((unsigned long, int, int)); -static void pool_page_free __P((void *, unsigned long, int)); -int pool_chk __P((struct pool *, char *)); +void *pool_allocator_alloc(struct pool *, int); +void pool_allocator_free(struct pool *, void *); +static void pool_print1(struct pool *, const char *, + void (*)(const char *, ...)); -#ifdef POOL_DIAGNOSTIC /* - * Pool log entry. An array of these is allocated in pool_create(). + * Pool log entry. An array of these is allocated in pool_init(). */ struct pool_log { const char *pl_file; long pl_line; int pl_action; -#define PRLOG_GET 1 -#define PRLOG_PUT 2 +#define PRLOG_GET 1 +#define PRLOG_PUT 2 void *pl_addr; }; /* Number of entries in pool log buffers */ -int pool_logsize = 10; +#ifndef POOL_LOGSIZE +#define POOL_LOGSIZE 10 +#endif -static void pr_log __P((struct pool *, void *, int, const char *, long)); -static void pr_printlog __P((struct pool *)); +int pool_logsize = POOL_LOGSIZE; -static __inline__ void -pr_log(pp, v, action, file, line) - struct pool *pp; - void *v; - int action; - const char *file; - long line; +#ifdef POOL_DIAGNOSTIC +static __inline void +pr_log(struct pool *pp, void *v, int action, const char *file, long line) { int n = pp->pr_curlogentry; struct pool_log *pl; - if ((pp->pr_flags & PR_LOGGING) == 0) + if ((pp->pr_roflags & PR_LOGGING) == 0) return; /* @@ -160,51 +201,89 @@ pr_log(pp, v, action, file, line) } static void -pr_printlog(pp) - struct pool *pp; +pr_printlog(struct pool *pp, struct pool_item *pi, + void (*pr)(const char *, ...)) { int i = pp->pr_logsize; int n = pp->pr_curlogentry; - if ((pp->pr_flags & PR_LOGGING) == 0) + if ((pp->pr_roflags & PR_LOGGING) == 0) return; - pool_print(pp, "printlog"); - /* * Print all entries in this pool's log. */ while (i-- > 0) { struct pool_log *pl = &pp->pr_log[n]; if (pl->pl_action != 0) { - printf("log entry %d:\n", i); - printf("\taction = %s, addr = %p\n", - pl->pl_action == PRLOG_GET ? "get" : "put", - pl->pl_addr); - printf("\tfile: %s at line %lu\n", - pl->pl_file, pl->pl_line); + if (pi == NULL || pi == pl->pl_addr) { + (*pr)("\tlog entry %d:\n", i); + (*pr)("\t\taction = %s, addr = %p\n", + pl->pl_action == PRLOG_GET ? "get" : "put", + pl->pl_addr); + (*pr)("\t\tfile: %s at line %lu\n", + pl->pl_file, pl->pl_line); + } } if (++n >= pp->pr_logsize) n = 0; } } -#else -#define pr_log(pp, v, action, file, line) -#define pr_printlog(pp) -#endif +static __inline void +pr_enter(struct pool *pp, const char *file, long line) +{ + + if (__predict_false(pp->pr_entered_file != NULL)) { + printf("pool %s: reentrancy at file %s line %ld\n", + pp->pr_wchan, file, line); + printf(" previous entry at file %s line %ld\n", + pp->pr_entered_file, pp->pr_entered_line); + panic("pr_enter"); + } + + pp->pr_entered_file = file; + pp->pr_entered_line = line; +} + +static __inline void +pr_leave(struct pool *pp) +{ + + if (__predict_false(pp->pr_entered_file == NULL)) { + printf("pool %s not entered?\n", pp->pr_wchan); + panic("pr_leave"); + } + + pp->pr_entered_file = NULL; + pp->pr_entered_line = 0; +} + +static __inline void +pr_enter_check(struct pool *pp, void (*pr)(const char *, ...)) +{ + + if (pp->pr_entered_file != NULL) + (*pr)("\n\tcurrently entered from file %s line %ld\n", + pp->pr_entered_file, pp->pr_entered_line); +} +#else +#define pr_log(pp, v, action, file, line) +#define pr_printlog(pp, pi, pr) +#define pr_enter(pp, file, line) +#define pr_leave(pp) +#define pr_enter_check(pp, pr) +#endif /* POOL_DIAGNOSTIC */ /* * Return the pool page header based on page address. */ -static __inline__ struct pool_item_header * -pr_find_pagehead(pp, page) - struct pool *pp; - caddr_t page; +static __inline struct pool_item_header * +pr_find_pagehead(struct pool *pp, caddr_t page) { struct pool_item_header *ph; - if ((pp->pr_flags & PR_PHINPAGE) != 0) + if ((pp->pr_roflags & PR_PHINPAGE) != 0) return ((struct pool_item_header *)(page + pp->pr_phoffset)); for (ph = LIST_FIRST(&pp->pr_hashtab[PR_HASH_INDEX(pp, page)]); @@ -219,33 +298,52 @@ pr_find_pagehead(pp, page) /* * Remove a page from the pool. */ -static __inline__ void -pr_rmpage(pp, ph) - struct pool *pp; - struct pool_item_header *ph; +static __inline void +pr_rmpage(struct pool *pp, struct pool_item_header *ph, + struct pool_pagelist *pq) { + int s; + + /* + * If the page was idle, decrement the idle page count. + */ + if (ph->ph_nmissing == 0) { +#ifdef DIAGNOSTIC + if (pp->pr_nidle == 0) + panic("pr_rmpage: nidle inconsistent"); + if (pp->pr_nitems < pp->pr_itemsperpage) + panic("pr_rmpage: nitems inconsistent"); +#endif + pp->pr_nidle--; + } + + pp->pr_nitems -= pp->pr_itemsperpage; /* - * Unlink a page from the pool and release it. + * Unlink a page from the pool and release it (or queue it for release). */ TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist); - (*pp->pr_free)(ph->ph_page, pp->pr_pagesz, pp->pr_mtype); + if (pq) { + TAILQ_INSERT_HEAD(pq, ph, ph_pagelist); + } else { + pool_allocator_free(pp, ph->ph_page); + if ((pp->pr_roflags & PR_PHINPAGE) == 0) { + LIST_REMOVE(ph, ph_hashlist); + s = splhigh(); + pool_put(&phpool, ph); + splx(s); + } + } pp->pr_npages--; pp->pr_npagefree++; - if ((pp->pr_flags & PR_PHINPAGE) == 0) { - LIST_REMOVE(ph, ph_hashlist); - pool_put(&phpool, ph); - } - if (pp->pr_curpage == ph) { /* * Find a new non-empty page header, if any. * Start search from the page head, to increase the * chance for "high water" pages to be freed. */ - for (ph = TAILQ_FIRST(&pp->pr_pagelist); ph != NULL; - ph = TAILQ_NEXT(ph, ph_pagelist)) + TAILQ_FOREACH(ph, &pp->pr_pagelist, ph_pagelist) if (TAILQ_FIRST(&ph->ph_itemlist) != NULL) break; @@ -254,108 +352,102 @@ pr_rmpage(pp, ph) } /* - * Allocate and initialize a pool. + * Initialize the given pool resource structure. + * + * We export this routine to allow other kernel parts to declare + * static pools that must be initialized before malloc() is available. */ -struct pool * -pool_create(size, align, ioff, nitems, wchan, pagesz, alloc, release, mtype) - size_t size; - u_int align; - u_int ioff; - int nitems; - char *wchan; - size_t pagesz; - void *(*alloc) __P((unsigned long, int, int)); - void (*release) __P((void *, unsigned long, int)); - int mtype; +void +pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags, + const char *wchan, struct pool_allocator *palloc) { - struct pool *pp; - int flags; - - pp = (struct pool *)malloc(sizeof(*pp), M_POOL, M_NOWAIT); - if (pp == NULL) - return (NULL); + int off, slack, i; - flags = PR_FREEHEADER; #ifdef POOL_DIAGNOSTIC + /* + * Always log if POOL_DIAGNOSTIC is defined. + */ if (pool_logsize != 0) flags |= PR_LOGGING; #endif - pool_init(pp, size, align, ioff, flags, wchan, pagesz, - alloc, release, mtype); - - if (nitems != 0) { - if (pool_prime(pp, nitems, NULL) != 0) { - pool_destroy(pp); - return (NULL); - } +#ifdef POOL_SUBPAGE + /* + * XXX We don't provide a real `nointr' back-end + * yet; all sub-pages come from a kmem back-end. + * maybe some day... + */ + if (palloc == NULL) { + extern struct pool_allocator pool_allocator_kmem_subpage; + palloc = &pool_allocator_kmem_subpage; } + /* + * We'll assume any user-specified back-end allocator + * will deal with sub-pages, or simply don't care. + */ +#else + if (palloc == NULL) + palloc = &pool_allocator_kmem; +#endif /* POOL_SUBPAGE */ + if ((palloc->pa_flags & PA_INITIALIZED) == 0) { + if (palloc->pa_pagesz == 0) { +#ifdef POOL_SUBPAGE + if (palloc == &pool_allocator_kmem) + palloc->pa_pagesz = PAGE_SIZE; + else + palloc->pa_pagesz = POOL_SUBPAGE; +#else + palloc->pa_pagesz = PAGE_SIZE; +#endif /* POOL_SUBPAGE */ + } - return (pp); -} + TAILQ_INIT(&palloc->pa_list); -/* - * Initialize the given pool resource structure. - * - * We export this routine to allow other kernel parts to declare - * static pools that must be initialized before malloc() is available. - */ -void -pool_init(pp, size, align, ioff, flags, wchan, pagesz, alloc, release, mtype) - struct pool *pp; - size_t size; - u_int align; - u_int ioff; - int flags; - char *wchan; - size_t pagesz; - void *(*alloc) __P((unsigned long, int, int)); - void (*release) __P((void *, unsigned long, int)); - int mtype; -{ - int off, slack; - - /* - * Check arguments and construct default values. - */ - if (!powerof2(pagesz) || pagesz > PAGE_SIZE) - panic("pool_init: page size invalid (%lx)\n", (u_long)pagesz); - - if (alloc == NULL && release == NULL) { - alloc = pool_page_alloc; - release = pool_page_free; - pagesz = PAGE_SIZE; /* Rounds to PAGE_SIZE anyhow. */ - } else if ((alloc != NULL && release != NULL) == 0) { - /* If you specifiy one, must specify both. */ - panic("pool_init: must specify alloc and release together"); - } - - if (pagesz == 0) - pagesz = PAGE_SIZE; + simple_lock_init(&palloc->pa_slock); + palloc->pa_pagemask = ~(palloc->pa_pagesz - 1); + palloc->pa_pageshift = ffs(palloc->pa_pagesz) - 1; + palloc->pa_flags |= PA_INITIALIZED; + } if (align == 0) align = ALIGN(1); + if (size < sizeof(struct pool_item)) + size = sizeof(struct pool_item); + + size = roundup(size, align); +#ifdef DIAGNOSTIC + if (size > palloc->pa_pagesz) + panic("pool_init: pool item size (%lu) too large", + (u_long)size); +#endif + /* * Initialize the pool structure. */ - TAILQ_INSERT_TAIL(&pool_head, pp, pr_poollist); TAILQ_INIT(&pp->pr_pagelist); + TAILQ_INIT(&pp->pr_cachelist); pp->pr_curpage = NULL; pp->pr_npages = 0; pp->pr_minitems = 0; pp->pr_minpages = 0; pp->pr_maxpages = UINT_MAX; - pp->pr_flags = flags; - pp->pr_size = ALIGN(size); + pp->pr_roflags = flags; + pp->pr_flags = 0; + pp->pr_size = size; pp->pr_align = align; pp->pr_wchan = wchan; - pp->pr_mtype = mtype; - pp->pr_alloc = alloc; - pp->pr_free = release; - pp->pr_pagesz = pagesz; - pp->pr_pagemask = ~(pagesz - 1); - pp->pr_pageshift = ffs(pagesz) - 1; + pp->pr_alloc = palloc; + pp->pr_nitems = 0; + pp->pr_nout = 0; + pp->pr_hardlimit = UINT_MAX; + pp->pr_hardlimit_warning = NULL; + pp->pr_hardlimit_ratecap.tv_sec = 0; + pp->pr_hardlimit_ratecap.tv_usec = 0; + pp->pr_hardlimit_warning_last.tv_sec = 0; + pp->pr_hardlimit_warning_last.tv_usec = 0; + pp->pr_drain_hook = NULL; + pp->pr_drain_hook_arg = NULL; /* * Decide whether to put the page header off page to avoid @@ -364,16 +456,18 @@ pool_init(pp, size, align, ioff, flags, * with its header based on the page address. * We use 1/16 of the page size as the threshold (XXX: tune) */ - if (pp->pr_size < pagesz/16) { + if (pp->pr_size < palloc->pa_pagesz/16) { /* Use the end of the page for the page header */ - pp->pr_flags |= PR_PHINPAGE; - pp->pr_phoffset = off = - pagesz - ALIGN(sizeof(struct pool_item_header)); + pp->pr_roflags |= PR_PHINPAGE; + pp->pr_phoffset = off = palloc->pa_pagesz - + ALIGN(sizeof(struct pool_item_header)); } else { /* The page header will be taken from our page header pool */ pp->pr_phoffset = 0; - off = pagesz; - bzero(pp->pr_hashtab, sizeof(pp->pr_hashtab)); + off = palloc->pa_pagesz; + for (i = 0; i < PR_HASHTABSIZE; i++) { + LIST_INIT(&pp->pr_hashtab[i]); + } } /* @@ -385,6 +479,7 @@ pool_init(pp, size, align, ioff, flags, */ pp->pr_itemoffset = ioff = ioff % align; pp->pr_itemsperpage = (off - ((align - ioff) % align)) / pp->pr_size; + KASSERT(pp->pr_itemsperpage != 0); /* * Use the slack between the chunks and the page header @@ -400,98 +495,213 @@ pool_init(pp, size, align, ioff, flags, pp->pr_npagealloc = 0; pp->pr_npagefree = 0; pp->pr_hiwat = 0; + pp->pr_nidle = 0; #ifdef POOL_DIAGNOSTIC - if ((flags & PR_LOGGING) != 0) { - pp->pr_log = malloc(pool_logsize * sizeof(struct pool_log), - M_TEMP, M_NOWAIT); - if (pp->pr_log == NULL) - pp->pr_flags &= ~PR_LOGGING; + if (flags & PR_LOGGING) { + if (kmem_map == NULL || + (pp->pr_log = malloc(pool_logsize * sizeof(struct pool_log), + M_TEMP, M_NOWAIT)) == NULL) + pp->pr_roflags &= ~PR_LOGGING; pp->pr_curlogentry = 0; pp->pr_logsize = pool_logsize; } #endif - simple_lock_init(&pp->pr_lock); + pp->pr_entered_file = NULL; + pp->pr_entered_line = 0; + + simple_lock_init(&pp->pr_slock); /* - * Initialize private page header pool if we haven't done so yet. + * Initialize private page header pool and cache magazine pool if we + * haven't done so yet. + * XXX LOCKING. */ if (phpool.pr_size == 0) { +#ifdef POOL_SUBPAGE + pool_init(&phpool, sizeof(struct pool_item_header), 0, 0, 0, + "phpool", &pool_allocator_kmem); + pool_init(&psppool, POOL_SUBPAGE, POOL_SUBPAGE, 0, + PR_RECURSIVE, "psppool", &pool_allocator_kmem); +#else pool_init(&phpool, sizeof(struct pool_item_header), 0, 0, - 0, "phpool", 0, 0, 0, 0); + 0, "phpool", NULL); +#endif + pool_init(&pcgpool, sizeof(struct pool_cache_group), 0, 0, + 0, "pcgpool", NULL); } - return; + /* Insert into the list of all pools. */ + simple_lock(&pool_head_slock); + TAILQ_INSERT_TAIL(&pool_head, pp, pr_poollist); + simple_unlock(&pool_head_slock); + + /* Insert this into the list of pools using this allocator. */ + simple_lock(&palloc->pa_slock); + TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list); + simple_unlock(&palloc->pa_slock); } /* * De-commision a pool resource. */ void -pool_destroy(pp) - struct pool *pp; +pool_destroy(struct pool *pp) { struct pool_item_header *ph; + struct pool_cache *pc; + + /* Locking order: pool_allocator -> pool */ + simple_lock(&pp->pr_alloc->pa_slock); + TAILQ_REMOVE(&pp->pr_alloc->pa_list, pp, pr_alloc_list); + simple_unlock(&pp->pr_alloc->pa_slock); + + /* Destroy all caches for this pool. */ + while ((pc = TAILQ_FIRST(&pp->pr_cachelist)) != NULL) + pool_cache_destroy(pc); #ifdef DIAGNOSTIC - if (pp->pr_nget - pp->pr_nput != 0) { - pr_printlog(pp); - panic("pool_destroy: pool busy: still out: %lu\n", - pp->pr_nget - pp->pr_nput); + if (pp->pr_nout != 0) { + pr_printlog(pp, NULL, printf); + panic("pool_destroy: pool busy: still out: %u", + pp->pr_nout); } #endif /* Remove all pages */ - if ((pp->pr_flags & PR_STATIC) == 0) - while ((ph = pp->pr_pagelist.tqh_first) != NULL) - pr_rmpage(pp, ph); + while ((ph = TAILQ_FIRST(&pp->pr_pagelist)) != NULL) + pr_rmpage(pp, ph, NULL); /* Remove from global pool list */ + simple_lock(&pool_head_slock); TAILQ_REMOVE(&pool_head, pp, pr_poollist); - drainpp = NULL; + if (drainpp == pp) { + drainpp = NULL; + } + simple_unlock(&pool_head_slock); #ifdef POOL_DIAGNOSTIC - if ((pp->pr_flags & PR_LOGGING) != 0) + if ((pp->pr_roflags & PR_LOGGING) != 0) free(pp->pr_log, M_TEMP); #endif +} - if (pp->pr_flags & PR_FREEHEADER) - free(pp, M_POOL); +void +pool_set_drain_hook(struct pool *pp, void (*fn)(void *, int), void *arg) +{ + + /* XXX no locking -- must be used just after pool_init() */ +#ifdef DIAGNOSTIC + if (pp->pr_drain_hook != NULL) + panic("pool_set_drain_hook(%s): already set", pp->pr_wchan); +#endif + pp->pr_drain_hook = fn; + pp->pr_drain_hook_arg = arg; } +static __inline struct pool_item_header * +pool_alloc_item_header(struct pool *pp, caddr_t storage, int flags) +{ + struct pool_item_header *ph; + int s; + + LOCK_ASSERT(simple_lock_held(&pp->pr_slock) == 0); + + if ((pp->pr_roflags & PR_PHINPAGE) != 0) + ph = (struct pool_item_header *) (storage + pp->pr_phoffset); + else { + s = splhigh(); + ph = pool_get(&phpool, flags); + splx(s); + } + + return (ph); +} /* * Grab an item from the pool; must be called at appropriate spl level */ -#ifdef POOL_DIAGNOSTIC void * -_pool_get(pp, flags, file, line) - struct pool *pp; - int flags; - const char *file; - long line; +#ifdef POOL_DIAGNOSTIC +_pool_get(struct pool *pp, int flags, const char *file, long line) #else -void * -pool_get(pp, flags) - struct pool *pp; - int flags; +pool_get(struct pool *pp, int flags) #endif { - void *v; struct pool_item *pi; struct pool_item_header *ph; + void *v; #ifdef DIAGNOSTIC - if ((pp->pr_flags & PR_STATIC) && (flags & PR_MALLOCOK)) { - pr_printlog(pp); - panic("pool_get: static"); + if (__predict_false(curproc == NULL && doing_shutdown == 0 && + (flags & PR_WAITOK) != 0)) + panic("pool_get: %s: must have NOWAIT", pp->pr_wchan); + +#ifdef LOCKDEBUG + if (flags & PR_WAITOK) + simple_lock_only_held(NULL, "pool_get(PR_WAITOK)"); +#endif +#endif /* DIAGNOSTIC */ + + simple_lock(&pp->pr_slock); + pr_enter(pp, file, line); + + startover: + /* + * Check to see if we've reached the hard limit. If we have, + * and we can wait, then wait until an item has been returned to + * the pool. + */ +#ifdef DIAGNOSTIC + if (__predict_false(pp->pr_nout > pp->pr_hardlimit)) { + pr_leave(pp); + simple_unlock(&pp->pr_slock); + panic("pool_get: %s: crossed hard limit", pp->pr_wchan); } #endif + if (__predict_false(pp->pr_nout == pp->pr_hardlimit)) { + if (pp->pr_drain_hook != NULL) { + /* + * Since the drain hook is going to free things + * back to the pool, unlock, call the hook, re-lock, + * and check the hardlimit condition again. + */ + pr_leave(pp); + simple_unlock(&pp->pr_slock); + (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, flags); + simple_lock(&pp->pr_slock); + pr_enter(pp, file, line); + if (pp->pr_nout < pp->pr_hardlimit) + goto startover; + } + + if ((flags & PR_WAITOK) && !(flags & PR_LIMITFAIL)) { + /* + * XXX: A warning isn't logged in this case. Should + * it be? + */ + pp->pr_flags |= PR_WANTED; + pr_leave(pp); + ltsleep(pp, PSWP, pp->pr_wchan, 0, &pp->pr_slock); + pr_enter(pp, file, line); + goto startover; + } + + /* + * Log a message that the hard limit has been hit. + */ + if (pp->pr_hardlimit_warning != NULL && + ratecheck(&pp->pr_hardlimit_warning_last, + &pp->pr_hardlimit_ratecap)) + log(LOG_ERR, "%s\n", pp->pr_hardlimit_warning); + + pp->pr_nfail++; - simple_lock(&pp->pr_lock); - if (curproc == NULL && (flags & PR_WAITOK) != 0) - panic("pool_get: must have NOWAIT"); + pr_leave(pp); + simple_unlock(&pp->pr_slock); + return (NULL); + } /* * The convention we use is that if `curpage' is not NULL, then @@ -499,38 +709,93 @@ pool_get(pp, flags) * never points at a page header which has PR_PHINPAGE set and * has no items in its bucket. */ -again: if ((ph = pp->pr_curpage) == NULL) { - void *v = (*pp->pr_alloc)(pp->pr_pagesz, flags, pp->pr_mtype); - if (v == NULL) { - if (flags & PR_URGENT) - panic("pool_get: urgent"); +#ifdef DIAGNOSTIC + if (pp->pr_nitems != 0) { + simple_unlock(&pp->pr_slock); + printf("pool_get: %s: curpage NULL, nitems %u\n", + pp->pr_wchan, pp->pr_nitems); + panic("pool_get: nitems inconsistent"); + } +#endif + + /* + * Call the back-end page allocator for more memory. + * Release the pool lock, as the back-end page allocator + * may block. + */ + pr_leave(pp); + simple_unlock(&pp->pr_slock); + v = pool_allocator_alloc(pp, flags); + if (__predict_true(v != NULL)) + ph = pool_alloc_item_header(pp, v, flags); + simple_lock(&pp->pr_slock); + pr_enter(pp, file, line); + + if (__predict_false(v == NULL || ph == NULL)) { + if (v != NULL) + pool_allocator_free(pp, v); + + /* + * We were unable to allocate a page or item + * header, but we released the lock during + * allocation, so perhaps items were freed + * back to the pool. Check for this case. + */ + if (pp->pr_curpage != NULL) + goto startover; + if ((flags & PR_WAITOK) == 0) { pp->pr_nfail++; - simple_unlock(&pp->pr_lock); + pr_leave(pp); + simple_unlock(&pp->pr_slock); return (NULL); } + /* + * Wait for items to be returned to this pool. + * + * XXX: maybe we should wake up once a second and + * try again? + */ pp->pr_flags |= PR_WANTED; - simple_unlock(&pp->pr_lock); - tsleep((caddr_t)pp, PSWP, pp->pr_wchan, 0); - simple_lock(&pp->pr_lock); - } else { - pp->pr_npagealloc++; - pool_prime_page(pp, v); + /* PA_WANTED is already set on the allocator. */ + pr_leave(pp); + ltsleep(pp, PSWP, pp->pr_wchan, 0, &pp->pr_slock); + pr_enter(pp, file, line); + goto startover; } - goto again; + /* We have more memory; add it to the pool */ + pool_prime_page(pp, v, ph); + pp->pr_npagealloc++; + + /* Start the allocation process over. */ + goto startover; } - if ((v = pi = TAILQ_FIRST(&ph->ph_itemlist)) == NULL) + if (__predict_false((v = pi = TAILQ_FIRST(&ph->ph_itemlist)) == NULL)) { + pr_leave(pp); + simple_unlock(&pp->pr_slock); panic("pool_get: %s: page empty", pp->pr_wchan); + } +#ifdef DIAGNOSTIC + if (__predict_false(pp->pr_nitems == 0)) { + pr_leave(pp); + simple_unlock(&pp->pr_slock); + printf("pool_get: %s: items on itemlist, nitems %u\n", + pp->pr_wchan, pp->pr_nitems); + panic("pool_get: nitems inconsistent"); + } +#endif +#ifdef POOL_DIAGNOSTIC pr_log(pp, v, PRLOG_GET, file, line); +#endif #ifdef DIAGNOSTIC - if (pi->pi_magic != PI_MAGIC) { - pr_printlog(pp); + if (__predict_false(pi->pi_magic != PI_MAGIC)) { + pr_printlog(pp, pi, printf); panic("pool_get(%s): free list modified: magic=%x; page %p;" " item addr %p\n", pp->pr_wchan, pi->pi_magic, ph->ph_page, pi); @@ -541,19 +806,39 @@ again: * Remove from item list. */ TAILQ_REMOVE(&ph->ph_itemlist, pi, pi_list); + pp->pr_nitems--; + pp->pr_nout++; + if (ph->ph_nmissing == 0) { +#ifdef DIAGNOSTIC + if (__predict_false(pp->pr_nidle == 0)) + panic("pool_get: nidle inconsistent"); +#endif + pp->pr_nidle--; + } ph->ph_nmissing++; if (TAILQ_FIRST(&ph->ph_itemlist) == NULL) { +#ifdef DIAGNOSTIC + if (__predict_false(ph->ph_nmissing != pp->pr_itemsperpage)) { + pr_leave(pp); + simple_unlock(&pp->pr_slock); + panic("pool_get: %s: nmissing inconsistent", + pp->pr_wchan); + } +#endif /* * Find a new non-empty page header, if any. * Start search from the page head, to increase * the chance for "high water" pages to be freed. * - * First, move the now empty page to the head of - * the page list. + * Migrate empty pages to the end of the list. This + * will speed the update of curpage as pages become + * idle. Empty pages intermingled with idle pages + * is no big deal. As soon as a page becomes un-empty, + * it will move back to the head of the list. */ TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist); - TAILQ_INSERT_HEAD(&pp->pr_pagelist, ph, ph_pagelist); - while ((ph = TAILQ_NEXT(ph, ph_pagelist)) != NULL) + TAILQ_INSERT_TAIL(&pp->pr_pagelist, ph, ph_pagelist); + TAILQ_FOREACH(ph, &pp->pr_pagelist, ph_pagelist) if (TAILQ_FIRST(&ph->ph_itemlist) != NULL) break; @@ -561,51 +846,81 @@ again: } pp->pr_nget++; - simple_unlock(&pp->pr_lock); + + /* + * If we have a low water mark and we are now below that low + * water mark, add more items to the pool. + */ + if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) { + /* + * XXX: Should we log a warning? Should we set up a timeout + * to try again in a second or so? The latter could break + * a caller's assumptions about interrupt protection, etc. + */ + } + + pr_leave(pp); + simple_unlock(&pp->pr_slock); return (v); } /* - * Return resource to the pool; must be called at appropriate spl level + * Internal version of pool_put(). Pool is already locked/entered. */ -#ifdef POOL_DIAGNOSTIC -void -_pool_put(pp, v, file, line) - struct pool *pp; - void *v; - const char *file; - long line; -#else -void -pool_put(pp, v) - struct pool *pp; - void *v; -#endif +static void +pool_do_put(struct pool *pp, void *v) { struct pool_item *pi = v; struct pool_item_header *ph; caddr_t page; + int s; - page = (caddr_t)((u_long)v & pp->pr_pagemask); + LOCK_ASSERT(simple_lock_held(&pp->pr_slock)); - simple_lock(&pp->pr_lock); + page = (caddr_t)((u_long)v & pp->pr_alloc->pa_pagemask); - pr_log(pp, v, PRLOG_PUT, file, line); +#ifdef DIAGNOSTIC + if (__predict_false(pp->pr_nout == 0)) { + printf("pool %s: putting with none out\n", + pp->pr_wchan); + panic("pool_put"); + } +#endif - if ((ph = pr_find_pagehead(pp, page)) == NULL) { - pr_printlog(pp); + if (__predict_false((ph = pr_find_pagehead(pp, page)) == NULL)) { + pr_printlog(pp, NULL, printf); panic("pool_put: %s: page header missing", pp->pr_wchan); } +#ifdef LOCKDEBUG + /* + * Check if we're freeing a locked simple lock. + */ + simple_lock_freecheck((caddr_t)pi, ((caddr_t)pi) + pp->pr_size); +#endif + /* * Return to item list. */ #ifdef DIAGNOSTIC pi->pi_magic = PI_MAGIC; #endif +#ifdef DEBUG + { + int i, *ip = v; + + for (i = 0; i < pp->pr_size / sizeof(int); i++) { + *ip++ = PI_MAGIC; + } + } +#endif + TAILQ_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list); + KDASSERT(ph->ph_nmissing != 0); ph->ph_nmissing--; pp->pr_nput++; + pp->pr_nitems++; + pp->pr_nout--; /* Cancel "pool empty" condition if it exists */ if (pp->pr_curpage == NULL) @@ -613,113 +928,170 @@ pool_put(pp, v) if (pp->pr_flags & PR_WANTED) { pp->pr_flags &= ~PR_WANTED; + if (ph->ph_nmissing == 0) + pp->pr_nidle++; wakeup((caddr_t)pp); - simple_unlock(&pp->pr_lock); return; } /* - * If this page is now complete, move it to the end of the pagelist. - * If this page has just become un-empty, move it the head. + * If this page is now complete, do one of two things: + * + * (1) If we have more pages than the page high water + * mark, free the page back to the system. + * + * (2) Move it to the end of the page list, so that + * we minimize our chances of fragmenting the + * pool. Idle pages migrate to the end (along with + * completely empty pages, so that we find un-empty + * pages more quickly when we update curpage) of the + * list so they can be more easily swept up by + * the pagedaemon when pages are scarce. */ if (ph->ph_nmissing == 0) { - if (pp->pr_npages > pp->pr_maxpages) { -#if 0 - timeout(pool_drain, 0, pool_inactive_time*hz); -#else - pr_rmpage(pp, ph); -#endif + pp->pr_nidle++; + if (pp->pr_npages > pp->pr_maxpages || + (pp->pr_alloc->pa_flags & PA_WANT) != 0) { + pr_rmpage(pp, ph, NULL); } else { TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist); TAILQ_INSERT_TAIL(&pp->pr_pagelist, ph, ph_pagelist); - ph->ph_time = time; - /* XXX - update curpage */ - for (ph = TAILQ_FIRST(&pp->pr_pagelist); ph != NULL; - ph = TAILQ_NEXT(ph, ph_pagelist)) + /* + * Update the timestamp on the page. A page must + * be idle for some period of time before it can + * be reclaimed by the pagedaemon. This minimizes + * ping-pong'ing for memory. + */ + s = splclock(); + ph->ph_time = mono_time; + splx(s); + + /* + * Update the current page pointer. Just look for + * the first page with any free items. + * + * XXX: Maybe we want an option to look for the + * page with the fewest available items, to minimize + * fragmentation? + */ + TAILQ_FOREACH(ph, &pp->pr_pagelist, ph_pagelist) if (TAILQ_FIRST(&ph->ph_itemlist) != NULL) break; pp->pr_curpage = ph; } } + /* + * If the page has just become un-empty, move it to the head of + * the list, and make it the current page. The next allocation + * will get the item from this page, instead of further fragmenting + * the pool. + */ + else if (ph->ph_nmissing == (pp->pr_itemsperpage - 1)) { + TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist); + TAILQ_INSERT_HEAD(&pp->pr_pagelist, ph, ph_pagelist); + pp->pr_curpage = ph; + } +} + +/* + * Return resource to the pool; must be called at appropriate spl level + */ +#ifdef POOL_DIAGNOSTIC +void +_pool_put(struct pool *pp, void *v, const char *file, long line) +{ + + simple_lock(&pp->pr_slock); + pr_enter(pp, file, line); + + pr_log(pp, v, PRLOG_PUT, file, line); + + pool_do_put(pp, v); + + pr_leave(pp); + simple_unlock(&pp->pr_slock); +} +#undef pool_put +#endif /* POOL_DIAGNOSTIC */ + +void +pool_put(struct pool *pp, void *v) +{ + + simple_lock(&pp->pr_slock); + + pool_do_put(pp, v); - simple_unlock(&pp->pr_lock); + simple_unlock(&pp->pr_slock); } +#ifdef POOL_DIAGNOSTIC +#define pool_put(h, v) _pool_put((h), (v), __FILE__, __LINE__) +#endif + /* * Add N items to the pool. */ int -pool_prime(pp, n, storage) - struct pool *pp; - int n; - caddr_t storage; +pool_prime(struct pool *pp, int n) { + struct pool_item_header *ph; caddr_t cp; - int newnitems, newpages; + int newpages; -#ifdef DIAGNOSTIC - if (storage && !(pp->pr_flags & PR_STATIC)) - panic("pool_prime: static"); - /* !storage && static caught below */ -#endif + simple_lock(&pp->pr_slock); - newnitems = pp->pr_minitems + n; - newpages = - roundup(pp->pr_itemsperpage,newnitems) / pp->pr_itemsperpage - - pp->pr_minpages; + newpages = roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage; - simple_lock(&pp->pr_lock); while (newpages-- > 0) { - - if (pp->pr_flags & PR_STATIC) { - cp = storage; - storage += pp->pr_pagesz; - } else { - cp = (*pp->pr_alloc)(pp->pr_pagesz, 0, pp->pr_mtype); - } - - if (cp == NULL) { - simple_unlock(&pp->pr_lock); - return (ENOMEM); + simple_unlock(&pp->pr_slock); + cp = pool_allocator_alloc(pp, PR_NOWAIT); + if (__predict_true(cp != NULL)) + ph = pool_alloc_item_header(pp, cp, PR_NOWAIT); + simple_lock(&pp->pr_slock); + + if (__predict_false(cp == NULL || ph == NULL)) { + if (cp != NULL) + pool_allocator_free(pp, cp); + break; } - pool_prime_page(pp, cp); + pool_prime_page(pp, cp, ph); + pp->pr_npagealloc++; pp->pr_minpages++; } - pp->pr_minitems = newnitems; - if (pp->pr_minpages >= pp->pr_maxpages) pp->pr_maxpages = pp->pr_minpages + 1; /* XXX */ - simple_unlock(&pp->pr_lock); + simple_unlock(&pp->pr_slock); return (0); } /* * Add a page worth of items to the pool. + * + * Note, we must be called with the pool descriptor LOCKED. */ -int -pool_prime_page(pp, storage) - struct pool *pp; - caddr_t storage; +static void +pool_prime_page(struct pool *pp, caddr_t storage, struct pool_item_header *ph) { struct pool_item *pi; - struct pool_item_header *ph; caddr_t cp = storage; unsigned int align = pp->pr_align; unsigned int ioff = pp->pr_itemoffset; int n; - if ((pp->pr_flags & PR_PHINPAGE) != 0) { - ph = (struct pool_item_header *)(cp + pp->pr_phoffset); - } else { - ph = pool_get(&phpool, PR_URGENT); +#ifdef DIAGNOSTIC + if (((u_long)cp & (pp->pr_alloc->pa_pagesz - 1)) != 0) + panic("pool_prime_page: %s: unaligned page", pp->pr_wchan); +#endif + + if ((pp->pr_roflags & PR_PHINPAGE) == 0) LIST_INSERT_HEAD(&pp->pr_hashtab[PR_HASH_INDEX(pp, cp)], - ph, ph_hashlist); - } + ph, ph_hashlist); /* * Insert page header. @@ -728,7 +1100,9 @@ pool_prime_page(pp, storage) TAILQ_INIT(&ph->ph_itemlist); ph->ph_page = storage; ph->ph_nmissing = 0; - ph->ph_time.tv_sec = ph->ph_time.tv_usec = 0; + memset(&ph->ph_time, 0, sizeof(ph->ph_time)); + + pp->pr_nidle++; /* * Color this page. @@ -747,10 +1121,13 @@ pool_prime_page(pp, storage) * Insert remaining chunks on the bucket list. */ n = pp->pr_itemsperpage; + pp->pr_nitems += n; while (n--) { pi = (struct pool_item *)cp; + KASSERT(((((vaddr_t)pi) + ioff) & (align - 1)) == 0); + /* Insert on page list */ TAILQ_INSERT_TAIL(&ph->ph_itemlist, pi, pi_list); #ifdef DIAGNOSTIC @@ -767,84 +1144,146 @@ pool_prime_page(pp, storage) if (++pp->pr_npages > pp->pr_hiwat) pp->pr_hiwat = pp->pr_npages; - - return (0); } -void -pool_setlowat(pp, n) - pool_handle_t pp; - int n; +/* + * Used by pool_get() when nitems drops below the low water mark. This + * is used to catch up nitmes with the low water mark. + * + * Note 1, we never wait for memory here, we let the caller decide what to do. + * + * Note 2, we must be called with the pool already locked, and we return + * with it locked. + */ +static int +pool_catchup(struct pool *pp) { - pp->pr_minitems = n; - if (n == 0) { - pp->pr_minpages = 0; - return; + struct pool_item_header *ph; + caddr_t cp; + int error = 0; + + while (POOL_NEEDS_CATCHUP(pp)) { + /* + * Call the page back-end allocator for more memory. + * + * XXX: We never wait, so should we bother unlocking + * the pool descriptor? + */ + simple_unlock(&pp->pr_slock); + cp = pool_allocator_alloc(pp, PR_NOWAIT); + if (__predict_true(cp != NULL)) + ph = pool_alloc_item_header(pp, cp, PR_NOWAIT); + simple_lock(&pp->pr_slock); + if (__predict_false(cp == NULL || ph == NULL)) { + if (cp != NULL) + pool_allocator_free(pp, cp); + error = ENOMEM; + break; + } + pool_prime_page(pp, cp, ph); + pp->pr_npagealloc++; } - pp->pr_minpages = - roundup(pp->pr_itemsperpage,n) / pp->pr_itemsperpage; + + return (error); } void -pool_sethiwat(pp, n) - pool_handle_t pp; - int n; +pool_setlowat(struct pool *pp, int n) { - if (n == 0) { - pp->pr_maxpages = 0; - return; + + simple_lock(&pp->pr_slock); + + pp->pr_minitems = n; + pp->pr_minpages = (n == 0) + ? 0 + : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage; + + /* Make sure we're caught up with the newly-set low water mark. */ + if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) { + /* + * XXX: Should we log a warning? Should we set up a timeout + * to try again in a second or so? The latter could break + * a caller's assumptions about interrupt protection, etc. + */ } - pp->pr_maxpages = - roundup(pp->pr_itemsperpage,n) / pp->pr_itemsperpage; -} + simple_unlock(&pp->pr_slock); +} -/* - * Default page allocator. - */ -static void * -pool_page_alloc(sz, flags, mtype) - unsigned long sz; - int flags; - int mtype; +void +pool_sethiwat(struct pool *pp, int n) { -#if defined(UVM) - return ((void *)uvm_km_alloc_poolpage()); -#else - return ((void *)kmem_alloc_poolpage()); -#endif + simple_lock(&pp->pr_slock); + + pp->pr_maxpages = (n == 0) + ? 0 + : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage; + + simple_unlock(&pp->pr_slock); } -static void -pool_page_free(v, sz, mtype) - void *v; - unsigned long sz; - int mtype; +void +pool_sethardlimit(struct pool *pp, int n, const char *warnmess, int ratecap) { -#if defined(UVM) - uvm_km_free_poolpage((vm_offset_t)v); -#else - kmem_free_poolpage((vm_offset_t)v); -#endif + simple_lock(&pp->pr_slock); + + pp->pr_hardlimit = n; + pp->pr_hardlimit_warning = warnmess; + pp->pr_hardlimit_ratecap.tv_sec = ratecap; + pp->pr_hardlimit_warning_last.tv_sec = 0; + pp->pr_hardlimit_warning_last.tv_usec = 0; + + /* + * In-line version of pool_sethiwat(), because we don't want to + * release the lock. + */ + pp->pr_maxpages = (n == 0) + ? 0 + : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage; + + simple_unlock(&pp->pr_slock); } /* * Release all complete pages that have not been used recently. */ -void -pool_reclaim (pp) - pool_handle_t pp; +int +#ifdef POOL_DIAGNOSTIC +_pool_reclaim(struct pool *pp, const char *file, long line) +#else +pool_reclaim(struct pool *pp) +#endif { struct pool_item_header *ph, *phnext; - struct timeval curtime = time; + struct pool_cache *pc; + struct timeval curtime; + struct pool_pagelist pq; + int s; - if (pp->pr_flags & PR_STATIC) - return; + if (pp->pr_drain_hook != NULL) { + /* + * The drain hook must be called with the pool unlocked. + */ + (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, PR_NOWAIT); + } - if (simple_lock_try(&pp->pr_lock) == 0) - return; + if (simple_lock_try(&pp->pr_slock) == 0) + return (0); + pr_enter(pp, file, line); + + TAILQ_INIT(&pq); + + /* + * Reclaim items from the pool's caches. + */ + TAILQ_FOREACH(pc, &pp->pr_cachelist, pc_poollist) + pool_cache_reclaim(pc); + + s = splclock(); + curtime = mono_time; + splx(s); for (ph = TAILQ_FIRST(&pp->pr_pagelist); ph != NULL; ph = phnext) { phnext = TAILQ_NEXT(ph, ph_pagelist); @@ -858,88 +1297,228 @@ pool_reclaim (pp) timersub(&curtime, &ph->ph_time, &diff); if (diff.tv_sec < pool_inactive_time) continue; - pr_rmpage(pp, ph); + + /* + * If freeing this page would put us below + * the low water mark, stop now. + */ + if ((pp->pr_nitems - pp->pr_itemsperpage) < + pp->pr_minitems) + break; + + pr_rmpage(pp, ph, &pq); } } - simple_unlock(&pp->pr_lock); -} + pr_leave(pp); + simple_unlock(&pp->pr_slock); + if (TAILQ_EMPTY(&pq)) + return (0); + + while ((ph = TAILQ_FIRST(&pq)) != NULL) { + TAILQ_REMOVE(&pq, ph, ph_pagelist); + pool_allocator_free(pp, ph->ph_page); + if (pp->pr_roflags & PR_PHINPAGE) { + continue; + } + LIST_REMOVE(ph, ph_hashlist); + s = splhigh(); + pool_put(&phpool, ph); + splx(s); + } + return (1); +} /* * Drain pools, one at a time. + * + * Note, we must never be called from an interrupt context. */ void -pool_drain(arg) - void *arg; +pool_drain(void *arg) { struct pool *pp; - int s = splimp(); + int s; - /* XXX:lock pool head */ - if (drainpp == NULL && (drainpp = TAILQ_FIRST(&pool_head)) == NULL) { - splx(s); - return; + pp = NULL; + s = splvm(); + simple_lock(&pool_head_slock); + if (drainpp == NULL) { + drainpp = TAILQ_FIRST(&pool_head); + } + if (drainpp) { + pp = drainpp; + drainpp = TAILQ_NEXT(pp, pr_poollist); } - - pp = drainpp; - drainpp = TAILQ_NEXT(pp, pr_poollist); - /* XXX:unlock pool head */ - + simple_unlock(&pool_head_slock); pool_reclaim(pp); splx(s); } - -#ifdef DEBUG /* * Diagnostic helpers. */ void -pool_print(pp, label) - struct pool *pp; - char *label; +pool_print(struct pool *pp, const char *modif) +{ + int s; + + s = splvm(); + if (simple_lock_try(&pp->pr_slock) == 0) { + printf("pool %s is locked; try again later\n", + pp->pr_wchan); + splx(s); + return; + } + pool_print1(pp, modif, printf); + simple_unlock(&pp->pr_slock); + splx(s); +} + +void +pool_printit(struct pool *pp, const char *modif, void (*pr)(const char *, ...)) { + int didlock = 0; - if (label != NULL) - printf("%s: ", label); + if (pp == NULL) { + (*pr)("Must specify a pool to print.\n"); + return; + } + + /* + * Called from DDB; interrupts should be blocked, and all + * other processors should be paused. We can skip locking + * the pool in this case. + * + * We do a simple_lock_try() just to print the lock + * status, however. + */ + + if (simple_lock_try(&pp->pr_slock) == 0) + (*pr)("WARNING: pool %s is locked\n", pp->pr_wchan); + else + didlock = 1; + + pool_print1(pp, modif, pr); - printf("pool %s: nalloc %lu nfree %lu npagealloc %lu npagefree %lu\n" - " npages %u minitems %u itemsperpage %u itemoffset %u\n", - pp->pr_wchan, - pp->pr_nget, - pp->pr_nput, - pp->pr_npagealloc, - pp->pr_npagefree, - pp->pr_npages, - pp->pr_minitems, - pp->pr_itemsperpage, - pp->pr_itemoffset); + if (didlock) + simple_unlock(&pp->pr_slock); +} + +static void +pool_print1(struct pool *pp, const char *modif, void (*pr)(const char *, ...)) +{ + struct pool_item_header *ph; + struct pool_cache *pc; + struct pool_cache_group *pcg; +#ifdef DIAGNOSTIC + struct pool_item *pi; +#endif + int i, print_log = 0, print_pagelist = 0, print_cache = 0; + char c; + + while ((c = *modif++) != '\0') { + if (c == 'l') + print_log = 1; + if (c == 'p') + print_pagelist = 1; + if (c == 'c') + print_cache = 1; + modif++; + } + + (*pr)("POOL %s: size %u, align %u, ioff %u, roflags 0x%08x\n", + pp->pr_wchan, pp->pr_size, pp->pr_align, pp->pr_itemoffset, + pp->pr_roflags); + (*pr)("\talloc %p\n", pp->pr_alloc); + (*pr)("\tminitems %u, minpages %u, maxpages %u, npages %u\n", + pp->pr_minitems, pp->pr_minpages, pp->pr_maxpages, pp->pr_npages); + (*pr)("\titemsperpage %u, nitems %u, nout %u, hardlimit %u\n", + pp->pr_itemsperpage, pp->pr_nitems, pp->pr_nout, pp->pr_hardlimit); + + (*pr)("\n\tnget %lu, nfail %lu, nput %lu\n", + pp->pr_nget, pp->pr_nfail, pp->pr_nput); + (*pr)("\tnpagealloc %lu, npagefree %lu, hiwat %u, nidle %lu\n", + pp->pr_npagealloc, pp->pr_npagefree, pp->pr_hiwat, pp->pr_nidle); + + if (print_pagelist == 0) + goto skip_pagelist; + + if ((ph = TAILQ_FIRST(&pp->pr_pagelist)) != NULL) + (*pr)("\n\tpage list:\n"); + for (; ph != NULL; ph = TAILQ_NEXT(ph, ph_pagelist)) { + (*pr)("\t\tpage %p, nmissing %d, time %lu,%lu\n", + ph->ph_page, ph->ph_nmissing, + (u_long)ph->ph_time.tv_sec, + (u_long)ph->ph_time.tv_usec); +#ifdef DIAGNOSTIC + TAILQ_FOREACH(pi, &ph->ph_itemlist, pi_list) { + if (pi->pi_magic != PI_MAGIC) { + (*pr)("\t\t\titem %p, magic 0x%x\n", + pi, pi->pi_magic); + } + } +#endif + } + if (pp->pr_curpage == NULL) + (*pr)("\tno current page\n"); + else + (*pr)("\tcurpage %p\n", pp->pr_curpage->ph_page); + + skip_pagelist: + + if (print_log == 0) + goto skip_log; + + (*pr)("\n"); + if ((pp->pr_roflags & PR_LOGGING) == 0) + (*pr)("\tno log\n"); + else + pr_printlog(pp, NULL, pr); + + skip_log: + + if (print_cache == 0) + goto skip_cache; + + TAILQ_FOREACH(pc, &pp->pr_cachelist, pc_poollist) { + (*pr)("\tcache %p: allocfrom %p freeto %p\n", pc, + pc->pc_allocfrom, pc->pc_freeto); + (*pr)("\t hits %lu misses %lu ngroups %lu nitems %lu\n", + pc->pc_hits, pc->pc_misses, pc->pc_ngroups, pc->pc_nitems); + TAILQ_FOREACH(pcg, &pc->pc_grouplist, pcg_list) { + (*pr)("\t\tgroup %p: avail %d\n", pcg, pcg->pcg_avail); + for (i = 0; i < PCG_NOBJECTS; i++) + (*pr)("\t\t\t%p\n", pcg->pcg_objects[i]); + } + } + + skip_cache: + + pr_enter_check(pp, pr); } int -pool_chk(pp, label) - struct pool *pp; - char *label; +pool_chk(struct pool *pp, const char *label) { struct pool_item_header *ph; int r = 0; - simple_lock(&pp->pr_lock); - - for (ph = TAILQ_FIRST(&pp->pr_pagelist); ph != NULL; - ph = TAILQ_NEXT(ph, ph_pagelist)) { + simple_lock(&pp->pr_slock); + TAILQ_FOREACH(ph, &pp->pr_pagelist, ph_pagelist) { struct pool_item *pi; int n; caddr_t page; - page = (caddr_t)((u_long)ph & pp->pr_pagemask); - if (page != ph->ph_page) { + page = (caddr_t)((u_long)ph & pp->pr_alloc->pa_pagemask); + if (page != ph->ph_page && + (pp->pr_roflags & PR_PHINPAGE) != 0) { if (label != NULL) printf("%s: ", label); - printf("pool(%s): page inconsistency: page %p;" - " at page head addr %p (p %p)\n", + printf("pool(%p:%s): page inconsistency: page %p;" + " at page head addr %p (p %p)\n", pp, pp->pr_wchan, ph->ph_page, ph, page); r++; @@ -962,14 +1541,15 @@ pool_chk(pp, label) panic("pool"); } #endif - page = (caddr_t)((u_long)pi & pp->pr_pagemask); + page = + (caddr_t)((u_long)pi & pp->pr_alloc->pa_pagemask); if (page == ph->ph_page) continue; if (label != NULL) printf("%s: ", label); - printf("pool(%s): page inconsistency: page %p;" - " item ordinal %d; addr %p (p %p)\n", + printf("pool(%p:%s): page inconsistency: page %p;" + " item ordinal %d; addr %p (p %p)\n", pp, pp->pr_wchan, ph->ph_page, n, pi, page); r++; @@ -977,7 +1557,503 @@ pool_chk(pp, label) } } out: - simple_unlock(&pp->pr_lock); + simple_unlock(&pp->pr_slock); return (r); } + +/* + * pool_cache_init: + * + * Initialize a pool cache. + * + * NOTE: If the pool must be protected from interrupts, we expect + * to be called at the appropriate interrupt priority level. + */ +void +pool_cache_init(struct pool_cache *pc, struct pool *pp, + int (*ctor)(void *, void *, int), + void (*dtor)(void *, void *), + void *arg) +{ + + TAILQ_INIT(&pc->pc_grouplist); + simple_lock_init(&pc->pc_slock); + + pc->pc_allocfrom = NULL; + pc->pc_freeto = NULL; + pc->pc_pool = pp; + + pc->pc_ctor = ctor; + pc->pc_dtor = dtor; + pc->pc_arg = arg; + + pc->pc_hits = 0; + pc->pc_misses = 0; + + pc->pc_ngroups = 0; + + pc->pc_nitems = 0; + + simple_lock(&pp->pr_slock); + TAILQ_INSERT_TAIL(&pp->pr_cachelist, pc, pc_poollist); + simple_unlock(&pp->pr_slock); +} + +/* + * pool_cache_destroy: + * + * Destroy a pool cache. + */ +void +pool_cache_destroy(struct pool_cache *pc) +{ + struct pool *pp = pc->pc_pool; + + /* First, invalidate the entire cache. */ + pool_cache_invalidate(pc); + + /* ...and remove it from the pool's cache list. */ + simple_lock(&pp->pr_slock); + TAILQ_REMOVE(&pp->pr_cachelist, pc, pc_poollist); + simple_unlock(&pp->pr_slock); +} + +static __inline void * +pcg_get(struct pool_cache_group *pcg) +{ + void *object; + u_int idx; + + KASSERT(pcg->pcg_avail <= PCG_NOBJECTS); + KASSERT(pcg->pcg_avail != 0); + idx = --pcg->pcg_avail; + + KASSERT(pcg->pcg_objects[idx] != NULL); + object = pcg->pcg_objects[idx]; + pcg->pcg_objects[idx] = NULL; + + return (object); +} + +static __inline void +pcg_put(struct pool_cache_group *pcg, void *object) +{ + u_int idx; + + KASSERT(pcg->pcg_avail < PCG_NOBJECTS); + idx = pcg->pcg_avail++; + + KASSERT(pcg->pcg_objects[idx] == NULL); + pcg->pcg_objects[idx] = object; +} + +/* + * pool_cache_get: + * + * Get an object from a pool cache. + */ +void * +pool_cache_get(struct pool_cache *pc, int flags) +{ + struct pool_cache_group *pcg; + void *object; + +#ifdef LOCKDEBUG + if (flags & PR_WAITOK) + simple_lock_only_held(NULL, "pool_cache_get(PR_WAITOK)"); #endif + + simple_lock(&pc->pc_slock); + + if ((pcg = pc->pc_allocfrom) == NULL) { + TAILQ_FOREACH(pcg, &pc->pc_grouplist, pcg_list) { + if (pcg->pcg_avail != 0) { + pc->pc_allocfrom = pcg; + goto have_group; + } + } + + /* + * No groups with any available objects. Allocate + * a new object, construct it, and return it to + * the caller. We will allocate a group, if necessary, + * when the object is freed back to the cache. + */ + pc->pc_misses++; + simple_unlock(&pc->pc_slock); + object = pool_get(pc->pc_pool, flags); + if (object != NULL && pc->pc_ctor != NULL) { + if ((*pc->pc_ctor)(pc->pc_arg, object, flags) != 0) { + pool_put(pc->pc_pool, object); + return (NULL); + } + } + return (object); + } + + have_group: + pc->pc_hits++; + pc->pc_nitems--; + object = pcg_get(pcg); + + if (pcg->pcg_avail == 0) + pc->pc_allocfrom = NULL; + + simple_unlock(&pc->pc_slock); + + return (object); +} + +/* + * pool_cache_put: + * + * Put an object back to the pool cache. + */ +void +pool_cache_put(struct pool_cache *pc, void *object) +{ + struct pool_cache_group *pcg; + int s; + + simple_lock(&pc->pc_slock); + + if ((pcg = pc->pc_freeto) == NULL) { + TAILQ_FOREACH(pcg, &pc->pc_grouplist, pcg_list) { + if (pcg->pcg_avail != PCG_NOBJECTS) { + pc->pc_freeto = pcg; + goto have_group; + } + } + + /* + * No empty groups to free the object to. Attempt to + * allocate one. + */ + simple_unlock(&pc->pc_slock); + s = splvm(); + pcg = pool_get(&pcgpool, PR_NOWAIT); + splx(s); + if (pcg != NULL) { + memset(pcg, 0, sizeof(*pcg)); + simple_lock(&pc->pc_slock); + pc->pc_ngroups++; + TAILQ_INSERT_TAIL(&pc->pc_grouplist, pcg, pcg_list); + if (pc->pc_freeto == NULL) + pc->pc_freeto = pcg; + goto have_group; + } + + /* + * Unable to allocate a cache group; destruct the object + * and free it back to the pool. + */ + pool_cache_destruct_object(pc, object); + return; + } + + have_group: + pc->pc_nitems++; + pcg_put(pcg, object); + + if (pcg->pcg_avail == PCG_NOBJECTS) + pc->pc_freeto = NULL; + + simple_unlock(&pc->pc_slock); +} + +/* + * pool_cache_destruct_object: + * + * Force destruction of an object and its release back into + * the pool. + */ +void +pool_cache_destruct_object(struct pool_cache *pc, void *object) +{ + + if (pc->pc_dtor != NULL) + (*pc->pc_dtor)(pc->pc_arg, object); + pool_put(pc->pc_pool, object); +} + +/* + * pool_cache_do_invalidate: + * + * This internal function implements pool_cache_invalidate() and + * pool_cache_reclaim(). + */ +static void +pool_cache_do_invalidate(struct pool_cache *pc, int free_groups, + void (*putit)(struct pool *, void *)) +{ + struct pool_cache_group *pcg, *npcg; + void *object; + int s; + + for (pcg = TAILQ_FIRST(&pc->pc_grouplist); pcg != NULL; + pcg = npcg) { + npcg = TAILQ_NEXT(pcg, pcg_list); + while (pcg->pcg_avail != 0) { + pc->pc_nitems--; + object = pcg_get(pcg); + if (pcg->pcg_avail == 0 && pc->pc_allocfrom == pcg) + pc->pc_allocfrom = NULL; + if (pc->pc_dtor != NULL) + (*pc->pc_dtor)(pc->pc_arg, object); + (*putit)(pc->pc_pool, object); + } + if (free_groups) { + pc->pc_ngroups--; + TAILQ_REMOVE(&pc->pc_grouplist, pcg, pcg_list); + if (pc->pc_freeto == pcg) + pc->pc_freeto = NULL; + s = splvm(); + pool_put(&pcgpool, pcg); + splx(s); + } + } +} + +/* + * pool_cache_invalidate: + * + * Invalidate a pool cache (destruct and release all of the + * cached objects). + */ +void +pool_cache_invalidate(struct pool_cache *pc) +{ + + simple_lock(&pc->pc_slock); + pool_cache_do_invalidate(pc, 0, pool_put); + simple_unlock(&pc->pc_slock); +} + +/* + * pool_cache_reclaim: + * + * Reclaim a pool cache for pool_reclaim(). + */ +static void +pool_cache_reclaim(struct pool_cache *pc) +{ + + simple_lock(&pc->pc_slock); + pool_cache_do_invalidate(pc, 1, pool_do_put); + simple_unlock(&pc->pc_slock); +} + +/* + * Pool backend allocators. + * + * Each pool has a backend allocator that handles allocation, deallocation, + * and any additional draining that might be needed. + * + * We provide two standard allocators: + * + * pool_allocator_kmem - the default when no allocator is specified + * + * pool_allocator_nointr - used for pools that will not be accessed + * in interrupt context. + */ +void *pool_page_alloc(struct pool *, int); +void pool_page_free(struct pool *, void *); + +struct pool_allocator pool_allocator_kmem = { + pool_page_alloc, pool_page_free, 0, +}; + +void *pool_page_alloc_nointr(struct pool *, int); +void pool_page_free_nointr(struct pool *, void *); + +struct pool_allocator pool_allocator_nointr = { + pool_page_alloc_nointr, pool_page_free_nointr, 0, +}; + +#ifdef POOL_SUBPAGE +void *pool_subpage_alloc(struct pool *, int); +void pool_subpage_free(struct pool *, void *); + +struct pool_allocator pool_allocator_kmem_subpage = { + pool_subpage_alloc, pool_subpage_free, 0, +}; +#endif /* POOL_SUBPAGE */ + +/* + * We have at least three different resources for the same allocation and + * each resource can be depleted. First, we have the ready elements in the + * pool. Then we have the resource (typically a vm_map) for this allocator. + * Finally, we have physical memory. Waiting for any of these can be + * unnecessary when any other is freed, but the kernel doesn't support + * sleeping on multiple wait channels, so we have to employ another strategy. + * + * The caller sleeps on the pool (so that it can be awakened when an item + * is returned to the pool), but we set PA_WANT on the allocator. When a + * page is returned to the allocator and PA_WANT is set, pool_allocator_free + * will wake up all sleeping pools belonging to this allocator. + * + * XXX Thundering herd. + */ +void * +pool_allocator_alloc(struct pool *org, int flags) +{ + struct pool_allocator *pa = org->pr_alloc; + struct pool *pp, *start; + int s, freed; + void *res; + + do { + if ((res = (*pa->pa_alloc)(org, flags)) != NULL) + return (res); + if ((flags & PR_WAITOK) == 0) { + /* + * We only run the drain hookhere if PR_NOWAIT. + * In other cases, the hook will be run in + * pool_reclaim(). + */ + if (org->pr_drain_hook != NULL) { + (*org->pr_drain_hook)(org->pr_drain_hook_arg, + flags); + if ((res = (*pa->pa_alloc)(org, flags)) != NULL) + return (res); + } + break; + } + + /* + * Drain all pools, except "org", that use this + * allocator. We do this to reclaim VA space. + * pa_alloc is responsible for waiting for + * physical memory. + * + * XXX We risk looping forever if start if someone + * calls pool_destroy on "start". But there is no + * other way to have potentially sleeping pool_reclaim, + * non-sleeping locks on pool_allocator, and some + * stirring of drained pools in the allocator. + * + * XXX Maybe we should use pool_head_slock for locking + * the allocators? + */ + freed = 0; + + s = splvm(); + simple_lock(&pa->pa_slock); + pp = start = TAILQ_FIRST(&pa->pa_list); + do { + TAILQ_REMOVE(&pa->pa_list, pp, pr_alloc_list); + TAILQ_INSERT_TAIL(&pa->pa_list, pp, pr_alloc_list); + if (pp == org) + continue; + simple_unlock(&pa->pa_slock); + freed = pool_reclaim(pp); + simple_lock(&pa->pa_slock); + } while ((pp = TAILQ_FIRST(&pa->pa_list)) != start && + freed == 0); + + if (freed == 0) { + /* + * We set PA_WANT here, the caller will most likely + * sleep waiting for pages (if not, this won't hurt + * that much), and there is no way to set this in + * the caller without violating locking order. + */ + pa->pa_flags |= PA_WANT; + } + simple_unlock(&pa->pa_slock); + splx(s); + } while (freed); + return (NULL); +} + +void +pool_allocator_free(struct pool *pp, void *v) +{ + struct pool_allocator *pa = pp->pr_alloc; + int s; + + (*pa->pa_free)(pp, v); + + s = splvm(); + simple_lock(&pa->pa_slock); + if ((pa->pa_flags & PA_WANT) == 0) { + simple_unlock(&pa->pa_slock); + splx(s); + return; + } + + TAILQ_FOREACH(pp, &pa->pa_list, pr_alloc_list) { + simple_lock(&pp->pr_slock); + if ((pp->pr_flags & PR_WANTED) != 0) { + pp->pr_flags &= ~PR_WANTED; + wakeup(pp); + } + simple_unlock(&pp->pr_slock); + } + pa->pa_flags &= ~PA_WANT; + simple_unlock(&pa->pa_slock); + splx(s); +} + +void * +pool_page_alloc(struct pool *pp, int flags) +{ + boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE; + + return ((void *) uvm_km_alloc_poolpage(waitok)); +} + +void +pool_page_free(struct pool *pp, void *v) +{ + + uvm_km_free_poolpage((vaddr_t) v); +} + +#ifdef POOL_SUBPAGE +/* Sub-page allocator, for machines with large hardware pages. */ +void * +pool_subpage_alloc(struct pool *pp, int flags) +{ + + return (pool_get(&psppool, flags)); +} + +void +pool_subpage_free(struct pool *pp, void *v) +{ + + pool_put(&psppool, v); +} + +/* We don't provide a real nointr allocator. Maybe later. */ +void * +pool_page_alloc_nointr(struct pool *pp, int flags) +{ + + return (pool_subpage_alloc(pp, flags)); +} + +void +pool_page_free_nointr(struct pool *pp, void *v) +{ + + pool_subpage_free(pp, v); +} +#else +void * +pool_page_alloc_nointr(struct pool *pp, int flags) +{ + boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE; + + return ((void *) uvm_km_alloc_poolpage1(kernel_map, + uvm.kernel_object, waitok)); +} + +void +pool_page_free_nointr(struct pool *pp, void *v) +{ + + uvm_km_free_poolpage1(kernel_map, (vaddr_t) v); +} +#endif /* POOL_SUBPAGE */