Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files. =================================================================== RCS file: /ftp/cvs/cvsroot/src/sys/kern/subr_pool.c,v rcsdiff: /ftp/cvs/cvsroot/src/sys/kern/subr_pool.c,v: warning: Unknown phrases like `commitid ...;' are present. retrieving revision 1.63 retrieving revision 1.90 diff -u -p -r1.63 -r1.90 --- src/sys/kern/subr_pool.c 2001/10/21 00:06:05 1.63 +++ src/sys/kern/subr_pool.c 2004/01/09 19:00:16 1.90 @@ -1,4 +1,4 @@ -/* $NetBSD: subr_pool.c,v 1.63 2001/10/21 00:06:05 chs Exp $ */ +/* $NetBSD: subr_pool.c,v 1.90 2004/01/09 19:00:16 thorpej Exp $ */ /*- * Copyright (c) 1997, 1999, 2000 The NetBSD Foundation, Inc. @@ -37,6 +37,9 @@ * POSSIBILITY OF SUCH DAMAGE. */ +#include +__KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.90 2004/01/09 19:00:16 thorpej Exp $"); + #include "opt_pool.h" #include "opt_poollog.h" #include "opt_lockdebug.h" @@ -56,12 +59,14 @@ /* * Pool resource management utility. * - * Memory is allocated in pages which are split into pieces according - * to the pool item size. Each page is kept on a list headed by `pr_pagelist' - * in the pool structure and the individual pool items are on a linked list - * headed by `ph_itemlist' in each page header. The memory for building - * the page list is either taken from the allocated pages themselves (for - * small pool items) or taken from an internal pool of page headers (`phpool'). + * Memory is allocated in pages which are split into pieces according to + * the pool item size. Each page is kept on one of three lists in the + * pool structure: `pr_emptypages', `pr_fullpages' and `pr_partpages', + * for empty, full and partially-full pages respectively. The individual + * pool items are on a linked list headed by `ph_itemlist' in each page + * header. The memory for building the page list is either taken from + * the allocated pages themselves (for small pool items) or taken from + * an internal pool of page headers (`phpool'). */ /* List of all pools */ @@ -86,29 +91,25 @@ struct simplelock pool_head_slock = SIMP struct pool_item_header { /* Page headers */ - TAILQ_ENTRY(pool_item_header) + LIST_ENTRY(pool_item_header) ph_pagelist; /* pool page list */ TAILQ_HEAD(,pool_item) ph_itemlist; /* chunk list for this page */ - LIST_ENTRY(pool_item_header) - ph_hashlist; /* Off-page page headers */ - int ph_nmissing; /* # of chunks in use */ + SPLAY_ENTRY(pool_item_header) + ph_node; /* Off-page page headers */ + unsigned int ph_nmissing; /* # of chunks in use */ caddr_t ph_page; /* this page's address */ struct timeval ph_time; /* last referenced */ }; -TAILQ_HEAD(pool_pagelist,pool_item_header); struct pool_item { #ifdef DIAGNOSTIC - int pi_magic; + u_int pi_magic; #endif -#define PI_MAGIC 0xdeadbeef +#define PI_MAGIC 0xdeadbeefU /* Other entries use only this list entry */ TAILQ_ENTRY(pool_item) pi_list; }; -#define PR_HASH_INDEX(pp,addr) \ - (((u_long)(addr) >> (pp)->pr_pageshift) & (PR_HASHTABSIZE - 1)) - #define POOL_NEEDS_CATCHUP(pp) \ ((pp)->pr_nitems < (pp)->pr_minitems) @@ -141,31 +142,24 @@ struct pool_item { /* The cache group pool. */ static struct pool pcgpool; -/* The pool cache group. */ -#define PCG_NOBJECTS 16 -struct pool_cache_group { - TAILQ_ENTRY(pool_cache_group) - pcg_list; /* link in the pool cache's group list */ - u_int pcg_avail; /* # available objects */ - /* pointers to the objects */ - void *pcg_objects[PCG_NOBJECTS]; -}; - static void pool_cache_reclaim(struct pool_cache *); static int pool_catchup(struct pool *); static void pool_prime_page(struct pool *, caddr_t, struct pool_item_header *); -static void *pool_page_alloc(unsigned long, int, int); -static void pool_page_free(void *, unsigned long, int); -#ifdef POOL_SUBPAGE -static void *pool_subpage_alloc(unsigned long, int, int); -static void pool_subpage_free(void *, unsigned long, int); -#endif +static void pool_update_curpage(struct pool *); + +void *pool_allocator_alloc(struct pool *, int); +void pool_allocator_free(struct pool *, void *); +static void pool_print_pagelist(struct pool_pagelist *, + void (*)(const char *, ...)); static void pool_print1(struct pool *, const char *, void (*)(const char *, ...)); +static int pool_chk_page(struct pool *, const char *, + struct pool_item_header *); + /* * Pool log entry. An array of these is allocated in pool_init(). */ @@ -178,6 +172,7 @@ struct pool_log { void *pl_addr; }; +#ifdef POOL_DIAGNOSTIC /* Number of entries in pool log buffers */ #ifndef POOL_LOGSIZE #define POOL_LOGSIZE 10 @@ -185,7 +180,6 @@ struct pool_log { int pool_logsize = POOL_LOGSIZE; -#ifdef POOL_DIAGNOSTIC static __inline void pr_log(struct pool *pp, void *v, int action, const char *file, long line) { @@ -284,24 +278,34 @@ pr_enter_check(struct pool *pp, void (*p #define pr_enter_check(pp, pr) #endif /* POOL_DIAGNOSTIC */ +static __inline int +phtree_compare(struct pool_item_header *a, struct pool_item_header *b) +{ + if (a->ph_page < b->ph_page) + return (-1); + else if (a->ph_page > b->ph_page) + return (1); + else + return (0); +} + +SPLAY_PROTOTYPE(phtree, pool_item_header, ph_node, phtree_compare); +SPLAY_GENERATE(phtree, pool_item_header, ph_node, phtree_compare); + /* * Return the pool page header based on page address. */ static __inline struct pool_item_header * pr_find_pagehead(struct pool *pp, caddr_t page) { - struct pool_item_header *ph; + struct pool_item_header *ph, tmp; if ((pp->pr_roflags & PR_PHINPAGE) != 0) return ((struct pool_item_header *)(page + pp->pr_phoffset)); - for (ph = LIST_FIRST(&pp->pr_hashtab[PR_HASH_INDEX(pp, page)]); - ph != NULL; - ph = LIST_NEXT(ph, ph_hashlist)) { - if (ph->ph_page == page) - return (ph); - } - return (NULL); + tmp.ph_page = page; + ph = SPLAY_FIND(phtree, &pp->pr_phtree, &tmp); + return ph; } /* @@ -331,14 +335,14 @@ pr_rmpage(struct pool *pp, struct pool_i /* * Unlink a page from the pool and release it (or queue it for release). */ - TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist); + LIST_REMOVE(ph, ph_pagelist); if (pq) { - TAILQ_INSERT_HEAD(pq, ph, ph_pagelist); + LIST_INSERT_HEAD(pq, ph, ph_pagelist); } else { - (*pp->pr_free)(ph->ph_page, pp->pr_pagesz, pp->pr_mtype); + pool_allocator_free(pp, ph->ph_page); if ((pp->pr_roflags & PR_PHINPAGE) == 0) { - LIST_REMOVE(ph, ph_hashlist); - s = splhigh(); + SPLAY_REMOVE(phtree, &pp->pr_phtree, ph); + s = splvm(); pool_put(&phpool, ph); splx(s); } @@ -346,18 +350,7 @@ pr_rmpage(struct pool *pp, struct pool_i pp->pr_npages--; pp->pr_npagefree++; - if (pp->pr_curpage == ph) { - /* - * Find a new non-empty page header, if any. - * Start search from the page head, to increase the - * chance for "high water" pages to be freed. - */ - TAILQ_FOREACH(ph, &pp->pr_pagelist, ph_pagelist) - if (TAILQ_FIRST(&ph->ph_itemlist) != NULL) - break; - - pp->pr_curpage = ph; - } + pool_update_curpage(pp); } /* @@ -368,12 +361,9 @@ pr_rmpage(struct pool *pp, struct pool_i */ void pool_init(struct pool *pp, size_t size, u_int align, u_int ioff, int flags, - const char *wchan, size_t pagesz, - void *(*alloc)(unsigned long, int, int), - void (*release)(void *, unsigned long, int), - int mtype) + const char *wchan, struct pool_allocator *palloc) { - int off, slack, i; + int off, slack; #ifdef POOL_DIAGNOSTIC /* @@ -383,34 +373,43 @@ pool_init(struct pool *pp, size_t size, flags |= PR_LOGGING; #endif +#ifdef POOL_SUBPAGE /* - * Check arguments and construct default values. + * XXX We don't provide a real `nointr' back-end + * yet; all sub-pages come from a kmem back-end. + * maybe some day... */ - if (!powerof2(pagesz)) - panic("pool_init: page size invalid (%lx)\n", (u_long)pagesz); - - if (alloc == NULL && release == NULL) { + if (palloc == NULL) { + extern struct pool_allocator pool_allocator_kmem_subpage; + palloc = &pool_allocator_kmem_subpage; + } + /* + * We'll assume any user-specified back-end allocator + * will deal with sub-pages, or simply don't care. + */ +#else + if (palloc == NULL) + palloc = &pool_allocator_kmem; +#endif /* POOL_SUBPAGE */ + if ((palloc->pa_flags & PA_INITIALIZED) == 0) { + if (palloc->pa_pagesz == 0) { #ifdef POOL_SUBPAGE - alloc = pool_subpage_alloc; - release = pool_subpage_free; - pagesz = POOL_SUBPAGE; + if (palloc == &pool_allocator_kmem) + palloc->pa_pagesz = PAGE_SIZE; + else + palloc->pa_pagesz = POOL_SUBPAGE; #else - alloc = pool_page_alloc; - release = pool_page_free; - pagesz = PAGE_SIZE; /* Rounds to PAGE_SIZE anyhow. */ -#endif - } else if ((alloc != NULL && release != NULL) == 0) { - /* If you specifiy one, must specify both. */ - panic("pool_init: must specify alloc and release together"); + palloc->pa_pagesz = PAGE_SIZE; +#endif /* POOL_SUBPAGE */ + } + + TAILQ_INIT(&palloc->pa_list); + + simple_lock_init(&palloc->pa_slock); + palloc->pa_pagemask = ~(palloc->pa_pagesz - 1); + palloc->pa_pageshift = ffs(palloc->pa_pagesz) - 1; + palloc->pa_flags |= PA_INITIALIZED; } -#ifdef POOL_SUBPAGE - else if (alloc == pool_page_alloc_nointr && - release == pool_page_free_nointr) - pagesz = POOL_SUBPAGE; -#endif - - if (pagesz == 0) - pagesz = PAGE_SIZE; if (align == 0) align = ALIGN(1); @@ -418,15 +417,19 @@ pool_init(struct pool *pp, size_t size, if (size < sizeof(struct pool_item)) size = sizeof(struct pool_item); - size = ALIGN(size); - if (size > pagesz) + size = roundup(size, align); +#ifdef DIAGNOSTIC + if (size > palloc->pa_pagesz) panic("pool_init: pool item size (%lu) too large", (u_long)size); +#endif /* * Initialize the pool structure. */ - TAILQ_INIT(&pp->pr_pagelist); + LIST_INIT(&pp->pr_emptypages); + LIST_INIT(&pp->pr_fullpages); + LIST_INIT(&pp->pr_partpages); TAILQ_INIT(&pp->pr_cachelist); pp->pr_curpage = NULL; pp->pr_npages = 0; @@ -438,12 +441,7 @@ pool_init(struct pool *pp, size_t size, pp->pr_size = size; pp->pr_align = align; pp->pr_wchan = wchan; - pp->pr_mtype = mtype; - pp->pr_alloc = alloc; - pp->pr_free = release; - pp->pr_pagesz = pagesz; - pp->pr_pagemask = ~(pagesz - 1); - pp->pr_pageshift = ffs(pagesz) - 1; + pp->pr_alloc = palloc; pp->pr_nitems = 0; pp->pr_nout = 0; pp->pr_hardlimit = UINT_MAX; @@ -452,6 +450,8 @@ pool_init(struct pool *pp, size_t size, pp->pr_hardlimit_ratecap.tv_usec = 0; pp->pr_hardlimit_warning_last.tv_sec = 0; pp->pr_hardlimit_warning_last.tv_usec = 0; + pp->pr_drain_hook = NULL; + pp->pr_drain_hook_arg = NULL; /* * Decide whether to put the page header off page to avoid @@ -460,18 +460,16 @@ pool_init(struct pool *pp, size_t size, * with its header based on the page address. * We use 1/16 of the page size as the threshold (XXX: tune) */ - if (pp->pr_size < pagesz/16) { + if (pp->pr_size < palloc->pa_pagesz/16) { /* Use the end of the page for the page header */ pp->pr_roflags |= PR_PHINPAGE; - pp->pr_phoffset = off = - pagesz - ALIGN(sizeof(struct pool_item_header)); + pp->pr_phoffset = off = palloc->pa_pagesz - + ALIGN(sizeof(struct pool_item_header)); } else { /* The page header will be taken from our page header pool */ pp->pr_phoffset = 0; - off = pagesz; - for (i = 0; i < PR_HASHTABSIZE; i++) { - LIST_INIT(&pp->pr_hashtab[i]); - } + off = palloc->pa_pagesz; + SPLAY_INIT(&pp->pr_phtree); } /* @@ -525,22 +523,26 @@ pool_init(struct pool *pp, size_t size, if (phpool.pr_size == 0) { #ifdef POOL_SUBPAGE pool_init(&phpool, sizeof(struct pool_item_header), 0, 0, 0, - "phpool", PAGE_SIZE, pool_page_alloc, pool_page_free, 0); + "phpool", &pool_allocator_kmem); pool_init(&psppool, POOL_SUBPAGE, POOL_SUBPAGE, 0, - PR_RECURSIVE, "psppool", PAGE_SIZE, - pool_page_alloc, pool_page_free, 0); + PR_RECURSIVE, "psppool", &pool_allocator_kmem); #else pool_init(&phpool, sizeof(struct pool_item_header), 0, 0, - 0, "phpool", 0, 0, 0, 0); + 0, "phpool", NULL); #endif pool_init(&pcgpool, sizeof(struct pool_cache_group), 0, 0, - 0, "pcgpool", 0, 0, 0, 0); + 0, "pcgpool", NULL); } /* Insert into the list of all pools. */ simple_lock(&pool_head_slock); TAILQ_INSERT_TAIL(&pool_head, pp, pr_poollist); simple_unlock(&pool_head_slock); + + /* Insert this into the list of pools using this allocator. */ + simple_lock(&palloc->pa_slock); + TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list); + simple_unlock(&palloc->pa_slock); } /* @@ -552,6 +554,11 @@ pool_destroy(struct pool *pp) struct pool_item_header *ph; struct pool_cache *pc; + /* Locking order: pool_allocator -> pool */ + simple_lock(&pp->pr_alloc->pa_slock); + TAILQ_REMOVE(&pp->pr_alloc->pa_list, pp, pr_alloc_list); + simple_unlock(&pp->pr_alloc->pa_slock); + /* Destroy all caches for this pool. */ while ((pc = TAILQ_FIRST(&pp->pr_cachelist)) != NULL) pool_cache_destroy(pc); @@ -559,15 +566,16 @@ pool_destroy(struct pool *pp) #ifdef DIAGNOSTIC if (pp->pr_nout != 0) { pr_printlog(pp, NULL, printf); - panic("pool_destroy: pool busy: still out: %u\n", + panic("pool_destroy: pool busy: still out: %u", pp->pr_nout); } #endif /* Remove all pages */ - if ((pp->pr_roflags & PR_STATIC) == 0) - while ((ph = TAILQ_FIRST(&pp->pr_pagelist)) != NULL) - pr_rmpage(pp, ph, NULL); + while ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL) + pr_rmpage(pp, ph, NULL); + KASSERT(LIST_EMPTY(&pp->pr_fullpages)); + KASSERT(LIST_EMPTY(&pp->pr_partpages)); /* Remove from global pool list */ simple_lock(&pool_head_slock); @@ -581,12 +589,22 @@ pool_destroy(struct pool *pp) if ((pp->pr_roflags & PR_LOGGING) != 0) free(pp->pr_log, M_TEMP); #endif +} + +void +pool_set_drain_hook(struct pool *pp, void (*fn)(void *, int), void *arg) +{ - if (pp->pr_roflags & PR_FREEHEADER) - free(pp, M_POOL); + /* XXX no locking -- must be used just after pool_init() */ +#ifdef DIAGNOSTIC + if (pp->pr_drain_hook != NULL) + panic("pool_set_drain_hook(%s): already set", pp->pr_wchan); +#endif + pp->pr_drain_hook = fn; + pp->pr_drain_hook_arg = arg; } -static __inline struct pool_item_header * +static struct pool_item_header * pool_alloc_item_header(struct pool *pp, caddr_t storage, int flags) { struct pool_item_header *ph; @@ -597,7 +615,7 @@ pool_alloc_item_header(struct pool *pp, if ((pp->pr_roflags & PR_PHINPAGE) != 0) ph = (struct pool_item_header *) (storage + pp->pr_phoffset); else { - s = splhigh(); + s = splvm(); ph = pool_get(&phpool, flags); splx(s); } @@ -620,15 +638,9 @@ pool_get(struct pool *pp, int flags) void *v; #ifdef DIAGNOSTIC - if (__predict_false((pp->pr_roflags & PR_STATIC) && - (flags & PR_MALLOCOK))) { - pr_printlog(pp, NULL, printf); - panic("pool_get: static"); - } - - if (__predict_false(curproc == NULL && doing_shutdown == 0 && + if (__predict_false(curlwp == NULL && doing_shutdown == 0 && (flags & PR_WAITOK) != 0)) - panic("pool_get: must have NOWAIT"); + panic("pool_get: %s: must have NOWAIT", pp->pr_wchan); #ifdef LOCKDEBUG if (flags & PR_WAITOK) @@ -653,6 +665,21 @@ pool_get(struct pool *pp, int flags) } #endif if (__predict_false(pp->pr_nout == pp->pr_hardlimit)) { + if (pp->pr_drain_hook != NULL) { + /* + * Since the drain hook is going to free things + * back to the pool, unlock, call the hook, re-lock, + * and check the hardlimit condition again. + */ + pr_leave(pp); + simple_unlock(&pp->pr_slock); + (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, flags); + simple_lock(&pp->pr_slock); + pr_enter(pp, file, line); + if (pp->pr_nout < pp->pr_hardlimit) + goto startover; + } + if ((flags & PR_WAITOK) && !(flags & PR_LIMITFAIL)) { /* * XXX: A warning isn't logged in this case. Should @@ -673,9 +700,6 @@ pool_get(struct pool *pp, int flags) &pp->pr_hardlimit_ratecap)) log(LOG_ERR, "%s\n", pp->pr_hardlimit_warning); - if (flags & PR_URGENT) - panic("pool_get: urgent"); - pp->pr_nfail++; pr_leave(pp); @@ -695,7 +719,7 @@ pool_get(struct pool *pp, int flags) simple_unlock(&pp->pr_slock); printf("pool_get: %s: curpage NULL, nitems %u\n", pp->pr_wchan, pp->pr_nitems); - panic("pool_get: nitems inconsistent\n"); + panic("pool_get: nitems inconsistent"); } #endif @@ -706,7 +730,7 @@ pool_get(struct pool *pp, int flags) */ pr_leave(pp); simple_unlock(&pp->pr_slock); - v = (*pp->pr_alloc)(pp->pr_pagesz, flags, pp->pr_mtype); + v = pool_allocator_alloc(pp, flags); if (__predict_true(v != NULL)) ph = pool_alloc_item_header(pp, v, flags); simple_lock(&pp->pr_slock); @@ -714,7 +738,7 @@ pool_get(struct pool *pp, int flags) if (__predict_false(v == NULL || ph == NULL)) { if (v != NULL) - (*pp->pr_free)(v, pp->pr_pagesz, pp->pr_mtype); + pool_allocator_free(pp, v); /* * We were unable to allocate a page or item @@ -725,9 +749,6 @@ pool_get(struct pool *pp, int flags) if (pp->pr_curpage != NULL) goto startover; - if (flags & PR_URGENT) - panic("pool_get: urgent"); - if ((flags & PR_WAITOK) == 0) { pp->pr_nfail++; pr_leave(pp); @@ -738,15 +759,11 @@ pool_get(struct pool *pp, int flags) /* * Wait for items to be returned to this pool. * - * XXX: we actually want to wait just until - * the page allocator has memory again. Depending - * on this pool's usage, we might get stuck here - * for a long time. - * * XXX: maybe we should wake up once a second and * try again? */ pp->pr_flags |= PR_WANTED; + /* PA_WANTED is already set on the allocator. */ pr_leave(pp); ltsleep(pp, PSWP, pp->pr_wchan, 0, &pp->pr_slock); pr_enter(pp, file, line); @@ -760,7 +777,6 @@ pool_get(struct pool *pp, int flags) /* Start the allocation process over. */ goto startover; } - if (__predict_false((v = pi = TAILQ_FIRST(&ph->ph_itemlist)) == NULL)) { pr_leave(pp); simple_unlock(&pp->pr_slock); @@ -772,11 +788,15 @@ pool_get(struct pool *pp, int flags) simple_unlock(&pp->pr_slock); printf("pool_get: %s: items on itemlist, nitems %u\n", pp->pr_wchan, pp->pr_nitems); - panic("pool_get: nitems inconsistent\n"); + panic("pool_get: nitems inconsistent"); } +#endif +#ifdef POOL_DIAGNOSTIC pr_log(pp, v, PRLOG_GET, file, line); +#endif +#ifdef DIAGNOSTIC if (__predict_false(pi->pi_magic != PI_MAGIC)) { pr_printlog(pp, pi, printf); panic("pool_get(%s): free list modified: magic=%x; page %p;" @@ -797,9 +817,16 @@ pool_get(struct pool *pp, int flags) panic("pool_get: nidle inconsistent"); #endif pp->pr_nidle--; + + /* + * This page was previously empty. Move it to the list of + * partially-full pages. This page is already curpage. + */ + LIST_REMOVE(ph, ph_pagelist); + LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist); } ph->ph_nmissing++; - if (TAILQ_FIRST(&ph->ph_itemlist) == NULL) { + if (TAILQ_EMPTY(&ph->ph_itemlist)) { #ifdef DIAGNOSTIC if (__predict_false(ph->ph_nmissing != pp->pr_itemsperpage)) { pr_leave(pp); @@ -809,23 +836,12 @@ pool_get(struct pool *pp, int flags) } #endif /* - * Find a new non-empty page header, if any. - * Start search from the page head, to increase - * the chance for "high water" pages to be freed. - * - * Migrate empty pages to the end of the list. This - * will speed the update of curpage as pages become - * idle. Empty pages intermingled with idle pages - * is no big deal. As soon as a page becomes un-empty, - * it will move back to the head of the list. + * This page is now full. Move it to the full list + * and select a new current page. */ - TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist); - TAILQ_INSERT_TAIL(&pp->pr_pagelist, ph, ph_pagelist); - TAILQ_FOREACH(ph, &pp->pr_pagelist, ph_pagelist) - if (TAILQ_FIRST(&ph->ph_itemlist) != NULL) - break; - - pp->pr_curpage = ph; + LIST_REMOVE(ph, ph_pagelist); + LIST_INSERT_HEAD(&pp->pr_fullpages, ph, ph_pagelist); + pool_update_curpage(pp); } pp->pr_nget++; @@ -860,7 +876,7 @@ pool_do_put(struct pool *pp, void *v) LOCK_ASSERT(simple_lock_held(&pp->pr_slock)); - page = (caddr_t)((u_long)v & pp->pr_pagemask); + page = (caddr_t)((u_long)v & pp->pr_alloc->pa_pagemask); #ifdef DIAGNOSTIC if (__predict_false(pp->pr_nout == 0)) { @@ -899,6 +915,7 @@ pool_do_put(struct pool *pp, void *v) #endif TAILQ_INSERT_HEAD(&ph->ph_itemlist, pi, pi_list); + KDASSERT(ph->ph_nmissing != 0); ph->ph_nmissing--; pp->pr_nput++; pp->pr_nitems++; @@ -917,26 +934,29 @@ pool_do_put(struct pool *pp, void *v) } /* - * If this page is now complete, do one of two things: + * If this page is now empty, do one of two things: + * + * (1) If we have more pages than the page high water mark, + * or if we are flagged as immediately freeing back idle + * pages, free the page back to the system. ONLY CONSIDER + * FREEING BACK A PAGE IF WE HAVE MORE THAN OUR MINIMUM PAGE + * CLAIM. * - * (1) If we have more pages than the page high water - * mark, free the page back to the system. + * (2) Otherwise, move the page to the empty page list. * - * (2) Move it to the end of the page list, so that - * we minimize our chances of fragmenting the - * pool. Idle pages migrate to the end (along with - * completely empty pages, so that we find un-empty - * pages more quickly when we update curpage) of the - * list so they can be more easily swept up by - * the pagedaemon when pages are scarce. + * Either way, select a new current page (so we use a partially-full + * page if one is available). */ if (ph->ph_nmissing == 0) { pp->pr_nidle++; - if (pp->pr_npages > pp->pr_maxpages) { + if (pp->pr_npages > pp->pr_minpages && + (pp->pr_npages > pp->pr_maxpages || + (pp->pr_roflags & PR_IMMEDRELEASE) != 0 || + (pp->pr_alloc->pa_flags & PA_WANT) != 0)) { pr_rmpage(pp, ph, NULL); } else { - TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist); - TAILQ_INSERT_TAIL(&pp->pr_pagelist, ph, ph_pagelist); + LIST_REMOVE(ph, ph_pagelist); + LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist); /* * Update the timestamp on the page. A page must @@ -947,31 +967,19 @@ pool_do_put(struct pool *pp, void *v) s = splclock(); ph->ph_time = mono_time; splx(s); - - /* - * Update the current page pointer. Just look for - * the first page with any free items. - * - * XXX: Maybe we want an option to look for the - * page with the fewest available items, to minimize - * fragmentation? - */ - TAILQ_FOREACH(ph, &pp->pr_pagelist, ph_pagelist) - if (TAILQ_FIRST(&ph->ph_itemlist) != NULL) - break; - - pp->pr_curpage = ph; } + pool_update_curpage(pp); } + /* - * If the page has just become un-empty, move it to the head of - * the list, and make it the current page. The next allocation - * will get the item from this page, instead of further fragmenting - * the pool. + * If the page was previously completely full, move it to the + * partially-full list and make it the current page. The next + * allocation will get the item from this page, instead of + * further fragmenting the pool. */ else if (ph->ph_nmissing == (pp->pr_itemsperpage - 1)) { - TAILQ_REMOVE(&pp->pr_pagelist, ph, ph_pagelist); - TAILQ_INSERT_HEAD(&pp->pr_pagelist, ph, ph_pagelist); + LIST_REMOVE(ph, ph_pagelist); + LIST_INSERT_HEAD(&pp->pr_partpages, ph, ph_pagelist); pp->pr_curpage = ph; } } @@ -1018,9 +1026,9 @@ pool_put(struct pool *pp, void *v) int pool_prime(struct pool *pp, int n) { - struct pool_item_header *ph; + struct pool_item_header *ph = NULL; caddr_t cp; - int newpages, error = 0; + int newpages; simple_lock(&pp->pr_slock); @@ -1028,15 +1036,14 @@ pool_prime(struct pool *pp, int n) while (newpages-- > 0) { simple_unlock(&pp->pr_slock); - cp = (*pp->pr_alloc)(pp->pr_pagesz, PR_NOWAIT, pp->pr_mtype); + cp = pool_allocator_alloc(pp, PR_NOWAIT); if (__predict_true(cp != NULL)) ph = pool_alloc_item_header(pp, cp, PR_NOWAIT); simple_lock(&pp->pr_slock); if (__predict_false(cp == NULL || ph == NULL)) { - error = ENOMEM; if (cp != NULL) - (*pp->pr_free)(cp, pp->pr_pagesz, pp->pr_mtype); + pool_allocator_free(pp, cp); break; } @@ -1065,22 +1072,25 @@ pool_prime_page(struct pool *pp, caddr_t unsigned int align = pp->pr_align; unsigned int ioff = pp->pr_itemoffset; int n; + int s; - if (((u_long)cp & (pp->pr_pagesz - 1)) != 0) +#ifdef DIAGNOSTIC + if (((u_long)cp & (pp->pr_alloc->pa_pagesz - 1)) != 0) panic("pool_prime_page: %s: unaligned page", pp->pr_wchan); - - if ((pp->pr_roflags & PR_PHINPAGE) == 0) - LIST_INSERT_HEAD(&pp->pr_hashtab[PR_HASH_INDEX(pp, cp)], - ph, ph_hashlist); +#endif /* * Insert page header. */ - TAILQ_INSERT_HEAD(&pp->pr_pagelist, ph, ph_pagelist); + LIST_INSERT_HEAD(&pp->pr_emptypages, ph, ph_pagelist); TAILQ_INIT(&ph->ph_itemlist); ph->ph_page = storage; ph->ph_nmissing = 0; - memset(&ph->ph_time, 0, sizeof(ph->ph_time)); + s = splclock(); + ph->ph_time = mono_time; + splx(s); + if ((pp->pr_roflags & PR_PHINPAGE) == 0) + SPLAY_INSERT(phtree, &pp->pr_phtree, ph); pp->pr_nidle++; @@ -1106,6 +1116,8 @@ pool_prime_page(struct pool *pp, caddr_t while (n--) { pi = (struct pool_item *)cp; + KASSERT(((((vaddr_t)pi) + ioff) & (align - 1)) == 0); + /* Insert on page list */ TAILQ_INSERT_TAIL(&ph->ph_itemlist, pi, pi_list); #ifdef DIAGNOSTIC @@ -1126,34 +1138,20 @@ pool_prime_page(struct pool *pp, caddr_t /* * Used by pool_get() when nitems drops below the low water mark. This - * is used to catch up nitmes with the low water mark. + * is used to catch up pr_nitems with the low water mark. * * Note 1, we never wait for memory here, we let the caller decide what to do. * - * Note 2, this doesn't work with static pools. - * - * Note 3, we must be called with the pool already locked, and we return + * Note 2, we must be called with the pool already locked, and we return * with it locked. */ static int pool_catchup(struct pool *pp) { - struct pool_item_header *ph; + struct pool_item_header *ph = NULL; caddr_t cp; int error = 0; - if (pp->pr_roflags & PR_STATIC) { - /* - * We dropped below the low water mark, and this is not a - * good thing. Log a warning. - * - * XXX: rate-limit this? - */ - printf("WARNING: static pool `%s' dropped below low water " - "mark\n", pp->pr_wchan); - return (0); - } - while (POOL_NEEDS_CATCHUP(pp)) { /* * Call the page back-end allocator for more memory. @@ -1162,13 +1160,13 @@ pool_catchup(struct pool *pp) * the pool descriptor? */ simple_unlock(&pp->pr_slock); - cp = (*pp->pr_alloc)(pp->pr_pagesz, PR_NOWAIT, pp->pr_mtype); + cp = pool_allocator_alloc(pp, PR_NOWAIT); if (__predict_true(cp != NULL)) ph = pool_alloc_item_header(pp, cp, PR_NOWAIT); simple_lock(&pp->pr_slock); if (__predict_false(cp == NULL || ph == NULL)) { if (cp != NULL) - (*pp->pr_free)(cp, pp->pr_pagesz, pp->pr_mtype); + pool_allocator_free(pp, cp); error = ENOMEM; break; } @@ -1179,10 +1177,19 @@ pool_catchup(struct pool *pp) return (error); } +static void +pool_update_curpage(struct pool *pp) +{ + + pp->pr_curpage = LIST_FIRST(&pp->pr_partpages); + if (pp->pr_curpage == NULL) { + pp->pr_curpage = LIST_FIRST(&pp->pr_emptypages); + } +} + void pool_setlowat(struct pool *pp, int n) { - int error; simple_lock(&pp->pr_slock); @@ -1192,7 +1199,7 @@ pool_setlowat(struct pool *pp, int n) : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage; /* Make sure we're caught up with the newly-set low water mark. */ - if (POOL_NEEDS_CATCHUP(pp) && (error = pool_catchup(pp) != 0)) { + if (POOL_NEEDS_CATCHUP(pp) && pool_catchup(pp) != 0) { /* * XXX: Should we log a warning? Should we set up a timeout * to try again in a second or so? The latter could break @@ -1240,84 +1247,9 @@ pool_sethardlimit(struct pool *pp, int n } /* - * Default page allocator. - */ -static void * -pool_page_alloc(unsigned long sz, int flags, int mtype) -{ - boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE; - - return ((void *)uvm_km_alloc_poolpage(waitok)); -} - -static void -pool_page_free(void *v, unsigned long sz, int mtype) -{ - - uvm_km_free_poolpage((vaddr_t)v); -} - -#ifdef POOL_SUBPAGE -/* - * Sub-page allocator, for machines with large hardware pages. - */ -static void * -pool_subpage_alloc(unsigned long sz, int flags, int mtype) -{ - - return pool_get(&psppool, flags); -} - -static void -pool_subpage_free(void *v, unsigned long sz, int mtype) -{ - - pool_put(&psppool, v); -} -#endif - -#ifdef POOL_SUBPAGE -/* We don't provide a real nointr allocator. Maybe later. */ -void * -pool_page_alloc_nointr(unsigned long sz, int flags, int mtype) -{ - - return pool_subpage_alloc(sz, flags, mtype); -} - -void -pool_page_free_nointr(void *v, unsigned long sz, int mtype) -{ - - pool_subpage_free(v, sz, mtype); -} -#else -/* - * Alternate pool page allocator for pools that know they will - * never be accessed in interrupt context. - */ -void * -pool_page_alloc_nointr(unsigned long sz, int flags, int mtype) -{ - boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE; - - return ((void *)uvm_km_alloc_poolpage1(kernel_map, uvm.kernel_object, - waitok)); -} - -void -pool_page_free_nointr(void *v, unsigned long sz, int mtype) -{ - - uvm_km_free_poolpage1(kernel_map, (vaddr_t)v); -} -#endif - - -/* * Release all complete pages that have not been used recently. */ -void +int #ifdef POOL_DIAGNOSTIC _pool_reclaim(struct pool *pp, const char *file, long line) #else @@ -1328,15 +1260,21 @@ pool_reclaim(struct pool *pp) struct pool_cache *pc; struct timeval curtime; struct pool_pagelist pq; + struct timeval diff; int s; - if (pp->pr_roflags & PR_STATIC) - return; + if (pp->pr_drain_hook != NULL) { + /* + * The drain hook must be called with the pool unlocked. + */ + (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, PR_NOWAIT); + } if (simple_lock_try(&pp->pr_slock) == 0) - return; + return (0); pr_enter(pp, file, line); - TAILQ_INIT(&pq); + + LIST_INIT(&pq); /* * Reclaim items from the pool's caches. @@ -1348,49 +1286,48 @@ pool_reclaim(struct pool *pp) curtime = mono_time; splx(s); - for (ph = TAILQ_FIRST(&pp->pr_pagelist); ph != NULL; ph = phnext) { - phnext = TAILQ_NEXT(ph, ph_pagelist); + for (ph = LIST_FIRST(&pp->pr_emptypages); ph != NULL; ph = phnext) { + phnext = LIST_NEXT(ph, ph_pagelist); /* Check our minimum page claim */ if (pp->pr_npages <= pp->pr_minpages) break; - if (ph->ph_nmissing == 0) { - struct timeval diff; - timersub(&curtime, &ph->ph_time, &diff); - if (diff.tv_sec < pool_inactive_time) - continue; + KASSERT(ph->ph_nmissing == 0); + timersub(&curtime, &ph->ph_time, &diff); + if (diff.tv_sec < pool_inactive_time) + continue; - /* - * If freeing this page would put us below - * the low water mark, stop now. - */ - if ((pp->pr_nitems - pp->pr_itemsperpage) < - pp->pr_minitems) - break; + /* + * If freeing this page would put us below + * the low water mark, stop now. + */ + if ((pp->pr_nitems - pp->pr_itemsperpage) < + pp->pr_minitems) + break; - pr_rmpage(pp, ph, &pq); - } + pr_rmpage(pp, ph, &pq); } pr_leave(pp); simple_unlock(&pp->pr_slock); - if (TAILQ_EMPTY(&pq)) { - return; - } - while ((ph = TAILQ_FIRST(&pq)) != NULL) { - TAILQ_REMOVE(&pq, ph, ph_pagelist); - (*pp->pr_free)(ph->ph_page, pp->pr_pagesz, pp->pr_mtype); + if (LIST_EMPTY(&pq)) + return (0); + + while ((ph = LIST_FIRST(&pq)) != NULL) { + LIST_REMOVE(ph, ph_pagelist); + pool_allocator_free(pp, ph->ph_page); if (pp->pr_roflags & PR_PHINPAGE) { continue; } - LIST_REMOVE(ph, ph_hashlist); - s = splhigh(); + SPLAY_REMOVE(phtree, &pp->pr_phtree, ph); + s = splvm(); pool_put(&phpool, ph); splx(s); } -} + return (1); +} /* * Drain pools, one at a time. @@ -1418,7 +1355,6 @@ pool_drain(void *arg) splx(s); } - /* * Diagnostic helpers. */ @@ -1470,14 +1406,35 @@ pool_printit(struct pool *pp, const char } static void -pool_print1(struct pool *pp, const char *modif, void (*pr)(const char *, ...)) +pool_print_pagelist(struct pool_pagelist *pl, void (*pr)(const char *, ...)) { struct pool_item_header *ph; - struct pool_cache *pc; - struct pool_cache_group *pcg; #ifdef DIAGNOSTIC struct pool_item *pi; #endif + + LIST_FOREACH(ph, pl, ph_pagelist) { + (*pr)("\t\tpage %p, nmissing %d, time %lu,%lu\n", + ph->ph_page, ph->ph_nmissing, + (u_long)ph->ph_time.tv_sec, + (u_long)ph->ph_time.tv_usec); +#ifdef DIAGNOSTIC + TAILQ_FOREACH(pi, &ph->ph_itemlist, pi_list) { + if (pi->pi_magic != PI_MAGIC) { + (*pr)("\t\t\titem %p, magic 0x%x\n", + pi, pi->pi_magic); + } + } +#endif + } +} + +static void +pool_print1(struct pool *pp, const char *modif, void (*pr)(const char *, ...)) +{ + struct pool_item_header *ph; + struct pool_cache *pc; + struct pool_cache_group *pcg; int i, print_log = 0, print_pagelist = 0, print_cache = 0; char c; @@ -1488,14 +1445,12 @@ pool_print1(struct pool *pp, const char print_pagelist = 1; if (c == 'c') print_cache = 1; - modif++; } (*pr)("POOL %s: size %u, align %u, ioff %u, roflags 0x%08x\n", pp->pr_wchan, pp->pr_size, pp->pr_align, pp->pr_itemoffset, pp->pr_roflags); - (*pr)("\tpagesz %u, mtype %d\n", pp->pr_pagesz, pp->pr_mtype); - (*pr)("\talloc %p, release %p\n", pp->pr_alloc, pp->pr_free); + (*pr)("\talloc %p\n", pp->pr_alloc); (*pr)("\tminitems %u, minpages %u, maxpages %u, npages %u\n", pp->pr_minitems, pp->pr_minpages, pp->pr_maxpages, pp->pr_npages); (*pr)("\titemsperpage %u, nitems %u, nout %u, hardlimit %u\n", @@ -1509,29 +1464,22 @@ pool_print1(struct pool *pp, const char if (print_pagelist == 0) goto skip_pagelist; - if ((ph = TAILQ_FIRST(&pp->pr_pagelist)) != NULL) - (*pr)("\n\tpage list:\n"); - for (; ph != NULL; ph = TAILQ_NEXT(ph, ph_pagelist)) { - (*pr)("\t\tpage %p, nmissing %d, time %lu,%lu\n", - ph->ph_page, ph->ph_nmissing, - (u_long)ph->ph_time.tv_sec, - (u_long)ph->ph_time.tv_usec); -#ifdef DIAGNOSTIC - TAILQ_FOREACH(pi, &ph->ph_itemlist, pi_list) { - if (pi->pi_magic != PI_MAGIC) { - (*pr)("\t\t\titem %p, magic 0x%x\n", - pi, pi->pi_magic); - } - } -#endif - } + if ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL) + (*pr)("\n\tempty page list:\n"); + pool_print_pagelist(&pp->pr_emptypages, pr); + if ((ph = LIST_FIRST(&pp->pr_fullpages)) != NULL) + (*pr)("\n\tfull page list:\n"); + pool_print_pagelist(&pp->pr_fullpages, pr); + if ((ph = LIST_FIRST(&pp->pr_partpages)) != NULL) + (*pr)("\n\tpartial-page list:\n"); + pool_print_pagelist(&pp->pr_partpages, pr); + if (pp->pr_curpage == NULL) (*pr)("\tno current page\n"); else (*pr)("\tcurpage %p\n", pp->pr_curpage->ph_page); skip_pagelist: - if (print_log == 0) goto skip_log; @@ -1542,7 +1490,6 @@ pool_print1(struct pool *pp, const char pr_printlog(pp, NULL, pr); skip_log: - if (print_cache == 0) goto skip_cache; @@ -1553,72 +1500,103 @@ pool_print1(struct pool *pp, const char pc->pc_hits, pc->pc_misses, pc->pc_ngroups, pc->pc_nitems); TAILQ_FOREACH(pcg, &pc->pc_grouplist, pcg_list) { (*pr)("\t\tgroup %p: avail %d\n", pcg, pcg->pcg_avail); - for (i = 0; i < PCG_NOBJECTS; i++) - (*pr)("\t\t\t%p\n", pcg->pcg_objects[i]); + for (i = 0; i < PCG_NOBJECTS; i++) { + if (pcg->pcg_objects[i].pcgo_pa != + POOL_PADDR_INVALID) { + (*pr)("\t\t\t%p, 0x%llx\n", + pcg->pcg_objects[i].pcgo_va, + (unsigned long long) + pcg->pcg_objects[i].pcgo_pa); + } else { + (*pr)("\t\t\t%p\n", + pcg->pcg_objects[i].pcgo_va); + } + } } } skip_cache: - pr_enter_check(pp, pr); } -int -pool_chk(struct pool *pp, const char *label) +static int +pool_chk_page(struct pool *pp, const char *label, struct pool_item_header *ph) { - struct pool_item_header *ph; - int r = 0; + struct pool_item *pi; + caddr_t page; + int n; - simple_lock(&pp->pr_slock); + page = (caddr_t)((u_long)ph & pp->pr_alloc->pa_pagemask); + if (page != ph->ph_page && + (pp->pr_roflags & PR_PHINPAGE) != 0) { + if (label != NULL) + printf("%s: ", label); + printf("pool(%p:%s): page inconsistency: page %p;" + " at page head addr %p (p %p)\n", pp, + pp->pr_wchan, ph->ph_page, + ph, page); + return 1; + } + + for (pi = TAILQ_FIRST(&ph->ph_itemlist), n = 0; + pi != NULL; + pi = TAILQ_NEXT(pi,pi_list), n++) { - TAILQ_FOREACH(ph, &pp->pr_pagelist, ph_pagelist) { - struct pool_item *pi; - int n; - caddr_t page; - - page = (caddr_t)((u_long)ph & pp->pr_pagemask); - if (page != ph->ph_page && - (pp->pr_roflags & PR_PHINPAGE) != 0) { +#ifdef DIAGNOSTIC + if (pi->pi_magic != PI_MAGIC) { if (label != NULL) printf("%s: ", label); - printf("pool(%p:%s): page inconsistency: page %p;" - " at page head addr %p (p %p)\n", pp, - pp->pr_wchan, ph->ph_page, - ph, page); - r++; - goto out; + printf("pool(%s): free list modified: magic=%x;" + " page %p; item ordinal %d;" + " addr %p (p %p)\n", + pp->pr_wchan, pi->pi_magic, ph->ph_page, + n, pi, page); + panic("pool"); } +#endif + page = + (caddr_t)((u_long)pi & pp->pr_alloc->pa_pagemask); + if (page == ph->ph_page) + continue; - for (pi = TAILQ_FIRST(&ph->ph_itemlist), n = 0; - pi != NULL; - pi = TAILQ_NEXT(pi,pi_list), n++) { + if (label != NULL) + printf("%s: ", label); + printf("pool(%p:%s): page inconsistency: page %p;" + " item ordinal %d; addr %p (p %p)\n", pp, + pp->pr_wchan, ph->ph_page, + n, pi, page); + return 1; + } + return 0; +} -#ifdef DIAGNOSTIC - if (pi->pi_magic != PI_MAGIC) { - if (label != NULL) - printf("%s: ", label); - printf("pool(%s): free list modified: magic=%x;" - " page %p; item ordinal %d;" - " addr %p (p %p)\n", - pp->pr_wchan, pi->pi_magic, ph->ph_page, - n, pi, page); - panic("pool"); - } -#endif - page = (caddr_t)((u_long)pi & pp->pr_pagemask); - if (page == ph->ph_page) - continue; - if (label != NULL) - printf("%s: ", label); - printf("pool(%p:%s): page inconsistency: page %p;" - " item ordinal %d; addr %p (p %p)\n", pp, - pp->pr_wchan, ph->ph_page, - n, pi, page); - r++; +int +pool_chk(struct pool *pp, const char *label) +{ + struct pool_item_header *ph; + int r = 0; + + simple_lock(&pp->pr_slock); + LIST_FOREACH(ph, &pp->pr_emptypages, ph_pagelist) { + r = pool_chk_page(pp, label, ph); + if (r) { + goto out; + } + } + LIST_FOREACH(ph, &pp->pr_fullpages, ph_pagelist) { + r = pool_chk_page(pp, label, ph); + if (r) { goto out; } } + LIST_FOREACH(ph, &pp->pr_partpages, ph_pagelist) { + r = pool_chk_page(pp, label, ph); + if (r) { + goto out; + } + } + out: simple_unlock(&pp->pr_slock); return (r); @@ -1682,7 +1660,7 @@ pool_cache_destroy(struct pool_cache *pc } static __inline void * -pcg_get(struct pool_cache_group *pcg) +pcg_get(struct pool_cache_group *pcg, paddr_t *pap) { void *object; u_int idx; @@ -1691,32 +1669,36 @@ pcg_get(struct pool_cache_group *pcg) KASSERT(pcg->pcg_avail != 0); idx = --pcg->pcg_avail; - KASSERT(pcg->pcg_objects[idx] != NULL); - object = pcg->pcg_objects[idx]; - pcg->pcg_objects[idx] = NULL; + KASSERT(pcg->pcg_objects[idx].pcgo_va != NULL); + object = pcg->pcg_objects[idx].pcgo_va; + if (pap != NULL) + *pap = pcg->pcg_objects[idx].pcgo_pa; + pcg->pcg_objects[idx].pcgo_va = NULL; return (object); } static __inline void -pcg_put(struct pool_cache_group *pcg, void *object) +pcg_put(struct pool_cache_group *pcg, void *object, paddr_t pa) { u_int idx; KASSERT(pcg->pcg_avail < PCG_NOBJECTS); idx = pcg->pcg_avail++; - KASSERT(pcg->pcg_objects[idx] == NULL); - pcg->pcg_objects[idx] = object; + KASSERT(pcg->pcg_objects[idx].pcgo_va == NULL); + pcg->pcg_objects[idx].pcgo_va = object; + pcg->pcg_objects[idx].pcgo_pa = pa; } /* - * pool_cache_get: + * pool_cache_get{,_paddr}: * - * Get an object from a pool cache. + * Get an object from a pool cache (optionally returning + * the physical address of the object). */ void * -pool_cache_get(struct pool_cache *pc, int flags) +pool_cache_get_paddr(struct pool_cache *pc, int flags, paddr_t *pap) { struct pool_cache_group *pcg; void *object; @@ -1751,13 +1733,20 @@ pool_cache_get(struct pool_cache *pc, in return (NULL); } } + if (object != NULL && pap != NULL) { +#ifdef POOL_VTOPHYS + *pap = POOL_VTOPHYS(object); +#else + *pap = POOL_PADDR_INVALID; +#endif + } return (object); } have_group: pc->pc_hits++; pc->pc_nitems--; - object = pcg_get(pcg); + object = pcg_get(pcg, pap); if (pcg->pcg_avail == 0) pc->pc_allocfrom = NULL; @@ -1768,12 +1757,13 @@ pool_cache_get(struct pool_cache *pc, in } /* - * pool_cache_put: + * pool_cache_put{,_paddr}: * - * Put an object back to the pool cache. + * Put an object back to the pool cache (optionally caching the + * physical address of the object). */ void -pool_cache_put(struct pool_cache *pc, void *object) +pool_cache_put_paddr(struct pool_cache *pc, void *object, paddr_t pa) { struct pool_cache_group *pcg; int s; @@ -1816,7 +1806,7 @@ pool_cache_put(struct pool_cache *pc, vo have_group: pc->pc_nitems++; - pcg_put(pcg, object); + pcg_put(pcg, object, pa); if (pcg->pcg_avail == PCG_NOBJECTS) pc->pc_freeto = NULL; @@ -1858,7 +1848,7 @@ pool_cache_do_invalidate(struct pool_cac npcg = TAILQ_NEXT(pcg, pcg_list); while (pcg->pcg_avail != 0) { pc->pc_nitems--; - object = pcg_get(pcg); + object = pcg_get(pcg, NULL); if (pcg->pcg_avail == 0 && pc->pc_allocfrom == pcg) pc->pc_allocfrom = NULL; if (pc->pc_dtor != NULL) @@ -1905,3 +1895,218 @@ pool_cache_reclaim(struct pool_cache *pc pool_cache_do_invalidate(pc, 1, pool_do_put); simple_unlock(&pc->pc_slock); } + +/* + * Pool backend allocators. + * + * Each pool has a backend allocator that handles allocation, deallocation, + * and any additional draining that might be needed. + * + * We provide two standard allocators: + * + * pool_allocator_kmem - the default when no allocator is specified + * + * pool_allocator_nointr - used for pools that will not be accessed + * in interrupt context. + */ +void *pool_page_alloc(struct pool *, int); +void pool_page_free(struct pool *, void *); + +struct pool_allocator pool_allocator_kmem = { + pool_page_alloc, pool_page_free, 0, +}; + +void *pool_page_alloc_nointr(struct pool *, int); +void pool_page_free_nointr(struct pool *, void *); + +struct pool_allocator pool_allocator_nointr = { + pool_page_alloc_nointr, pool_page_free_nointr, 0, +}; + +#ifdef POOL_SUBPAGE +void *pool_subpage_alloc(struct pool *, int); +void pool_subpage_free(struct pool *, void *); + +struct pool_allocator pool_allocator_kmem_subpage = { + pool_subpage_alloc, pool_subpage_free, 0, +}; +#endif /* POOL_SUBPAGE */ + +/* + * We have at least three different resources for the same allocation and + * each resource can be depleted. First, we have the ready elements in the + * pool. Then we have the resource (typically a vm_map) for this allocator. + * Finally, we have physical memory. Waiting for any of these can be + * unnecessary when any other is freed, but the kernel doesn't support + * sleeping on multiple wait channels, so we have to employ another strategy. + * + * The caller sleeps on the pool (so that it can be awakened when an item + * is returned to the pool), but we set PA_WANT on the allocator. When a + * page is returned to the allocator and PA_WANT is set, pool_allocator_free + * will wake up all sleeping pools belonging to this allocator. + * + * XXX Thundering herd. + */ +void * +pool_allocator_alloc(struct pool *org, int flags) +{ + struct pool_allocator *pa = org->pr_alloc; + struct pool *pp, *start; + int s, freed; + void *res; + + do { + if ((res = (*pa->pa_alloc)(org, flags)) != NULL) + return (res); + if ((flags & PR_WAITOK) == 0) { + /* + * We only run the drain hookhere if PR_NOWAIT. + * In other cases, the hook will be run in + * pool_reclaim(). + */ + if (org->pr_drain_hook != NULL) { + (*org->pr_drain_hook)(org->pr_drain_hook_arg, + flags); + if ((res = (*pa->pa_alloc)(org, flags)) != NULL) + return (res); + } + break; + } + + /* + * Drain all pools, except "org", that use this + * allocator. We do this to reclaim VA space. + * pa_alloc is responsible for waiting for + * physical memory. + * + * XXX We risk looping forever if start if someone + * calls pool_destroy on "start". But there is no + * other way to have potentially sleeping pool_reclaim, + * non-sleeping locks on pool_allocator, and some + * stirring of drained pools in the allocator. + * + * XXX Maybe we should use pool_head_slock for locking + * the allocators? + */ + freed = 0; + + s = splvm(); + simple_lock(&pa->pa_slock); + pp = start = TAILQ_FIRST(&pa->pa_list); + do { + TAILQ_REMOVE(&pa->pa_list, pp, pr_alloc_list); + TAILQ_INSERT_TAIL(&pa->pa_list, pp, pr_alloc_list); + if (pp == org) + continue; + simple_unlock(&pa->pa_slock); + freed = pool_reclaim(pp); + simple_lock(&pa->pa_slock); + } while ((pp = TAILQ_FIRST(&pa->pa_list)) != start && + freed == 0); + + if (freed == 0) { + /* + * We set PA_WANT here, the caller will most likely + * sleep waiting for pages (if not, this won't hurt + * that much), and there is no way to set this in + * the caller without violating locking order. + */ + pa->pa_flags |= PA_WANT; + } + simple_unlock(&pa->pa_slock); + splx(s); + } while (freed); + return (NULL); +} + +void +pool_allocator_free(struct pool *pp, void *v) +{ + struct pool_allocator *pa = pp->pr_alloc; + int s; + + (*pa->pa_free)(pp, v); + + s = splvm(); + simple_lock(&pa->pa_slock); + if ((pa->pa_flags & PA_WANT) == 0) { + simple_unlock(&pa->pa_slock); + splx(s); + return; + } + + TAILQ_FOREACH(pp, &pa->pa_list, pr_alloc_list) { + simple_lock(&pp->pr_slock); + if ((pp->pr_flags & PR_WANTED) != 0) { + pp->pr_flags &= ~PR_WANTED; + wakeup(pp); + } + simple_unlock(&pp->pr_slock); + } + pa->pa_flags &= ~PA_WANT; + simple_unlock(&pa->pa_slock); + splx(s); +} + +void * +pool_page_alloc(struct pool *pp, int flags) +{ + boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE; + + return ((void *) uvm_km_alloc_poolpage(waitok)); +} + +void +pool_page_free(struct pool *pp, void *v) +{ + + uvm_km_free_poolpage((vaddr_t) v); +} + +#ifdef POOL_SUBPAGE +/* Sub-page allocator, for machines with large hardware pages. */ +void * +pool_subpage_alloc(struct pool *pp, int flags) +{ + + return (pool_get(&psppool, flags)); +} + +void +pool_subpage_free(struct pool *pp, void *v) +{ + + pool_put(&psppool, v); +} + +/* We don't provide a real nointr allocator. Maybe later. */ +void * +pool_page_alloc_nointr(struct pool *pp, int flags) +{ + + return (pool_subpage_alloc(pp, flags)); +} + +void +pool_page_free_nointr(struct pool *pp, void *v) +{ + + pool_subpage_free(pp, v); +} +#else +void * +pool_page_alloc_nointr(struct pool *pp, int flags) +{ + boolean_t waitok = (flags & PR_WAITOK) ? TRUE : FALSE; + + return ((void *) uvm_km_alloc_poolpage1(kernel_map, + uvm.kernel_object, waitok)); +} + +void +pool_page_free_nointr(struct pool *pp, void *v) +{ + + uvm_km_free_poolpage1(kernel_map, (vaddr_t) v); +} +#endif /* POOL_SUBPAGE */