Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files. =================================================================== RCS file: /ftp/cvs/cvsroot/src/sys/kern/subr_pool.c,v rcsdiff: /ftp/cvs/cvsroot/src/sys/kern/subr_pool.c,v: warning: Unknown phrases like `commitid ...;' are present. retrieving revision 1.101.2.7 retrieving revision 1.128.2.4 diff -u -p -r1.101.2.7 -r1.128.2.4 --- src/sys/kern/subr_pool.c 2007/12/07 17:33:07 1.101.2.7 +++ src/sys/kern/subr_pool.c 2007/03/22 12:30:29 1.128.2.4 @@ -1,4 +1,4 @@ -/* $NetBSD: subr_pool.c,v 1.101.2.7 2007/12/07 17:33:07 yamt Exp $ */ +/* $NetBSD: subr_pool.c,v 1.128.2.4 2007/03/22 12:30:29 ad Exp $ */ /*- * Copyright (c) 1997, 1999, 2000, 2002, 2007 The NetBSD Foundation, Inc. @@ -6,7 +6,7 @@ * * This code is derived from software contributed to The NetBSD Foundation * by Paul Kranenburg; by Jason R. Thorpe of the Numerical Aerospace - * Simulation Facility, NASA Ames Research Center, and by Andrew Doran. + * Simulation Facility, NASA Ames Research Center. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -38,7 +38,7 @@ */ #include -__KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.101.2.7 2007/12/07 17:33:07 yamt Exp $"); +__KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.128.2.4 2007/03/22 12:30:29 ad Exp $"); #include "opt_pool.h" #include "opt_poollog.h" @@ -46,7 +46,6 @@ __KERNEL_RCSID(0, "$NetBSD: subr_pool.c, #include #include -#include #include #include #include @@ -55,9 +54,6 @@ __KERNEL_RCSID(0, "$NetBSD: subr_pool.c, #include #include #include -#include -#include -#include #include @@ -77,15 +73,10 @@ __KERNEL_RCSID(0, "$NetBSD: subr_pool.c, /* List of all pools */ LIST_HEAD(,pool) pool_head = LIST_HEAD_INITIALIZER(pool_head); -/* List of all caches. */ -LIST_HEAD(,pool_cache) pool_cache_head = - LIST_HEAD_INITIALIZER(pool_cache_head); - /* Private pool for page header structures */ #define PHPOOL_MAX 8 static struct pool phpool[PHPOOL_MAX]; -#define PHPOOL_FREELIST_NELEM(idx) \ - (((idx) == 0) ? 0 : BITMAP_SIZE * (1 << (idx))) +#define PHPOOL_FREELIST_NELEM(idx) (((idx) == 0) ? 0 : (1 << (idx))) #ifdef POOL_SUBPAGE /* Pool of subpages for use by normal pools. */ @@ -99,7 +90,7 @@ static void *pool_page_alloc_meta(struct static void pool_page_free_meta(struct pool *, void *); /* allocator for pool metadata */ -struct pool_allocator pool_allocator_meta = { +static struct pool_allocator pool_allocator_meta = { pool_page_alloc_meta, pool_page_free_meta, .pa_backingmapptr = &kmem_map, }; @@ -112,11 +103,8 @@ static struct pool *drainpp; /* This lock protects both pool_head and drainpp. */ static kmutex_t pool_head_lock; -static kcondvar_t pool_busy; -typedef uint32_t pool_item_bitmap_t; -#define BITMAP_SIZE (CHAR_BIT * sizeof(pool_item_bitmap_t)) -#define BITMAP_MASK (BITMAP_SIZE - 1) +typedef uint8_t pool_item_freelist_t; struct pool_item_header { /* Page headers */ @@ -126,7 +114,6 @@ struct pool_item_header { ph_node; /* Off-page page headers */ void * ph_page; /* this page's address */ struct timeval ph_time; /* last referenced */ - uint16_t ph_nmissing; /* # of chunks in use */ union { /* !PR_NOTOUCH */ struct { @@ -135,20 +122,27 @@ struct pool_item_header { } phu_normal; /* PR_NOTOUCH */ struct { - uint16_t phu_off; /* start offset in page */ - pool_item_bitmap_t phu_bitmap[]; + uint16_t + phu_off; /* start offset in page */ + pool_item_freelist_t + phu_firstfree; /* first free item */ + /* + * XXX it might be better to use + * a simple bitmap and ffs(3) + */ } phu_notouch; } ph_u; + uint16_t ph_nmissing; /* # of chunks in use */ }; #define ph_itemlist ph_u.phu_normal.phu_itemlist #define ph_off ph_u.phu_notouch.phu_off -#define ph_bitmap ph_u.phu_notouch.phu_bitmap +#define ph_firstfree ph_u.phu_notouch.phu_firstfree struct pool_item { #ifdef DIAGNOSTIC u_int pi_magic; #endif -#define PI_MAGIC 0xdeaddeadU +#define PI_MAGIC 0xdeadbeefU /* Other entries use only this list entry */ LIST_ENTRY(pool_item) pi_list; }; @@ -164,34 +158,30 @@ struct pool_item { * needless object construction/destruction; it is deferred until absolutely * necessary. * - * Caches are grouped into cache groups. Each cache group references up - * to PCG_NUMOBJECTS constructed objects. When a cache allocates an - * object from the pool, it calls the object's constructor and places it - * into a cache group. When a cache group frees an object back to the - * pool, it first calls the object's destructor. This allows the object - * to persist in constructed form while freed to the cache. - * - * The pool references each cache, so that when a pool is drained by the - * pagedaemon, it can drain each individual cache as well. Each time a - * cache is drained, the most idle cache group is freed to the pool in - * its entirety. + * Caches are grouped into cache groups. Each cache group references + * up to 16 constructed objects. When a cache allocates an object + * from the pool, it calls the object's constructor and places it into + * a cache group. When a cache group frees an object back to the pool, + * it first calls the object's destructor. This allows the object to + * persist in constructed form while freed to the cache. + * + * Multiple caches may exist for each pool. This allows a single + * object type to have multiple constructed forms. The pool references + * each cache, so that when a pool is drained by the pagedaemon, it can + * drain each individual cache as well. Each time a cache is drained, + * the most idle cache group is freed to the pool in its entirety. * * Pool caches are layed on top of pools. By layering them, we can avoid * the complexity of cache management for pools which would not benefit * from it. */ +/* The cache group pool. */ static struct pool pcgpool; -static struct pool cache_pool; -static struct pool cache_cpu_pool; -static pool_cache_cpu_t *pool_cache_put_slow(pool_cache_cpu_t *, int *, - void *, paddr_t); -static pool_cache_cpu_t *pool_cache_get_slow(pool_cache_cpu_t *, int *, - void **, paddr_t *, int); -static void pool_cache_cpu_init1(struct cpu_info *, pool_cache_t); -static void pool_cache_invalidate_groups(pool_cache_t, pcg_t *); -static void pool_cache_xcall(pool_cache_t); +static void pool_cache_reclaim(struct pool_cache *, struct pool_pagelist *, + struct pool_cache_grouplist *); +static void pcg_grouplist_free(struct pool_cache_grouplist *); static int pool_catchup(struct pool *); static void pool_prime_page(struct pool *, void *, @@ -328,12 +318,12 @@ pr_enter_check(struct pool *pp, void (*p #define pr_enter_check(pp, pr) #endif /* POOL_DIAGNOSTIC */ -static inline unsigned int +static inline int pr_item_notouch_index(const struct pool *pp, const struct pool_item_header *ph, const void *v) { const char *cp = v; - unsigned int idx; + int idx; KASSERT(pp->pr_roflags & PR_NOTOUCH); idx = (cp - (char *)ph->ph_page - ph->ph_off) / pp->pr_size; @@ -341,55 +331,35 @@ pr_item_notouch_index(const struct pool return idx; } +#define PR_FREELIST_ALIGN(p) \ + roundup((uintptr_t)(p), sizeof(pool_item_freelist_t)) +#define PR_FREELIST(ph) ((pool_item_freelist_t *)PR_FREELIST_ALIGN((ph) + 1)) +#define PR_INDEX_USED ((pool_item_freelist_t)-1) +#define PR_INDEX_EOL ((pool_item_freelist_t)-2) + static inline void pr_item_notouch_put(const struct pool *pp, struct pool_item_header *ph, void *obj) { - unsigned int idx = pr_item_notouch_index(pp, ph, obj); - pool_item_bitmap_t *bitmap = ph->ph_bitmap + (idx / BITMAP_SIZE); - pool_item_bitmap_t mask = 1 << (idx & BITMAP_MASK); + int idx = pr_item_notouch_index(pp, ph, obj); + pool_item_freelist_t *freelist = PR_FREELIST(ph); - KASSERT((*bitmap & mask) == 0); - *bitmap |= mask; + KASSERT(freelist[idx] == PR_INDEX_USED); + freelist[idx] = ph->ph_firstfree; + ph->ph_firstfree = idx; } static inline void * pr_item_notouch_get(const struct pool *pp, struct pool_item_header *ph) { - pool_item_bitmap_t *bitmap = ph->ph_bitmap; - unsigned int idx; - int i; - - for (i = 0; ; i++) { - int bit; - - KASSERT((i * BITMAP_SIZE) < pp->pr_itemsperpage); - bit = ffs32(bitmap[i]); - if (bit) { - pool_item_bitmap_t mask; - - bit--; - idx = (i * BITMAP_SIZE) + bit; - mask = 1 << bit; - KASSERT((bitmap[i] & mask) != 0); - bitmap[i] &= ~mask; - break; - } - } - KASSERT(idx < pp->pr_itemsperpage); - return (char *)ph->ph_page + ph->ph_off + idx * pp->pr_size; -} + int idx = ph->ph_firstfree; + pool_item_freelist_t *freelist = PR_FREELIST(ph); -static inline void -pr_item_notouch_init(const struct pool *pp, struct pool_item_header *ph) -{ - pool_item_bitmap_t *bitmap = ph->ph_bitmap; - const int n = howmany(pp->pr_itemsperpage, BITMAP_SIZE); - int i; + KASSERT(freelist[idx] != PR_INDEX_USED); + ph->ph_firstfree = freelist[idx]; + freelist[idx] = PR_INDEX_USED; - for (i = 0; i < n; i++) { - bitmap[i] = (pool_item_bitmap_t)-1; - } + return (char *)ph->ph_page + ph->ph_off + idx * pp->pr_size; } static inline int @@ -584,7 +554,6 @@ pool_subsystem_init(void) struct link_pool_init * const *pi; mutex_init(&pool_head_lock, MUTEX_DEFAULT, IPL_NONE); - cv_init(&pool_busy, "poolbusy"); __link_set_foreach(pi, pools) pool_init((*pi)->pp, (*pi)->size, (*pi)->align, @@ -597,12 +566,6 @@ pool_subsystem_init(void) SLIST_REMOVE_HEAD(&pa_deferinitq, pa_q); pa_reclaim_register(pa); } - - pool_init(&cache_pool, sizeof(struct pool_cache), CACHE_LINE_SIZE, - 0, 0, "pcache", &pool_allocator_nointr, IPL_NONE); - - pool_init(&cache_cpu_pool, sizeof(pool_cache_cpu_t), CACHE_LINE_SIZE, - 0, 0, "pcachecpu", &pool_allocator_nointr, IPL_NONE); } /* @@ -621,6 +584,9 @@ pool_init(struct pool *pp, size_t size, size_t trysize, phsize; int off, slack; + KASSERT((1UL << (CHAR_BIT * sizeof(pool_item_freelist_t))) - 2 >= + PHPOOL_FREELIST_NELEM(PHPOOL_MAX - 1)); + #ifdef DEBUG /* * Check that the pool hasn't already been initialised and @@ -657,7 +623,7 @@ pool_init(struct pool *pp, size_t size, TAILQ_INIT(&palloc->pa_list); - mutex_init(&palloc->pa_lock, MUTEX_DEFAULT, IPL_VM); + mutex_init(&palloc->pa_lock, MUTEX_DRIVER, IPL_VM); palloc->pa_pagemask = ~(palloc->pa_pagesz - 1); palloc->pa_pageshift = ffs(palloc->pa_pagesz) - 1; @@ -685,7 +651,7 @@ pool_init(struct pool *pp, size_t size, LIST_INIT(&pp->pr_emptypages); LIST_INIT(&pp->pr_fullpages); LIST_INIT(&pp->pr_partpages); - pp->pr_cache = NULL; + LIST_INIT(&pp->pr_cachelist); pp->pr_curpage = NULL; pp->pr_npages = 0; pp->pr_minitems = 0; @@ -786,7 +752,6 @@ pool_init(struct pool *pp, size_t size, pp->pr_npagefree = 0; pp->pr_hiwat = 0; pp->pr_nidle = 0; - pp->pr_refcnt = 0; #ifdef POOL_DIAGNOSTIC if (flags & PR_LOGGING) { @@ -802,14 +767,7 @@ pool_init(struct pool *pp, size_t size, pp->pr_entered_file = NULL; pp->pr_entered_line = 0; - /* - * XXXAD hack to prevent IP input processing from blocking. - */ - if (ipl == IPL_SOFTNET) { - mutex_init(&pp->pr_lock, MUTEX_DEFAULT, IPL_VM); - } else { - mutex_init(&pp->pr_lock, MUTEX_DEFAULT, ipl); - } + mutex_init(&pp->pr_lock, MUTEX_DRIVER, ipl); cv_init(&pp->pr_cv, wchan); pp->pr_ipl = ipl; @@ -830,8 +788,8 @@ pool_init(struct pool *pp, size_t size, "phpool-%d", nelem); sz = sizeof(struct pool_item_header); if (nelem) { - sz = offsetof(struct pool_item_header, - ph_bitmap[howmany(nelem, BITMAP_SIZE)]); + sz = PR_FREELIST_ALIGN(sz) + + nelem * sizeof(pool_item_freelist_t); } pool_init(&phpool[idx], sz, 0, 0, 0, phpool_names[idx], &pool_allocator_meta, IPL_VM); @@ -840,8 +798,8 @@ pool_init(struct pool *pp, size_t size, pool_init(&psppool, POOL_SUBPAGE, POOL_SUBPAGE, 0, PR_RECURSIVE, "psppool", &pool_allocator_meta, IPL_VM); #endif - pool_init(&pcgpool, sizeof(pcg_t), CACHE_LINE_SIZE, 0, 0, - "cachegrp", &pool_allocator_meta, IPL_VM); + pool_init(&pcgpool, sizeof(struct pool_cache_group), 0, 0, + 0, "pcgpool", &pool_allocator_meta, IPL_VM); } if (__predict_true(!cold)) { @@ -873,8 +831,6 @@ pool_destroy(struct pool *pp) /* Remove from global pool list */ mutex_enter(&pool_head_lock); - while (pp->pr_refcnt != 0) - cv_wait(&pool_busy, &pool_head_lock); LIST_REMOVE(pp, pr_poollist); if (drainpp == pp) drainpp = NULL; @@ -888,7 +844,7 @@ pool_destroy(struct pool *pp) mutex_enter(&pp->pr_lock); - KASSERT(pp->pr_cache == NULL); + KASSERT(LIST_EMPTY(&pp->pr_cachelist)); #ifdef DIAGNOSTIC if (pp->pr_nout != 0) { @@ -946,7 +902,7 @@ pool_alloc_item_header(struct pool *pp, } /* - * Grab an item from the pool. + * Grab an item from the pool; must be called at appropriate spl level */ void * #ifdef POOL_DIAGNOSTIC @@ -1192,7 +1148,6 @@ pool_do_put(struct pool *pp, void *v, st KASSERT(mutex_owned(&pp->pr_lock)); FREECHECK_IN(&pp->pr_freecheck, v); - LOCKDEBUG_MEM_CHECK(v, pp->pr_size); #ifdef DIAGNOSTIC if (__predict_false(pp->pr_nout == 0)) { @@ -1294,7 +1249,7 @@ pool_do_put(struct pool *pp, void *v, st } /* - * Return resource to the pool. + * Return resource to the pool; must be called at appropriate spl level */ #ifdef POOL_DIAGNOSTIC void @@ -1455,7 +1410,14 @@ pool_prime_page(struct pool *pp, void *s pp->pr_nitems += n; if (pp->pr_roflags & PR_NOTOUCH) { - pr_item_notouch_init(pp, ph); + pool_item_freelist_t *freelist = PR_FREELIST(ph); + int i; + + ph->ph_off = (char *)cp - (char *)storage; + ph->ph_firstfree = 0; + for (i = 0; i < n - 1; i++) + freelist[i] = i + 1; + freelist[n - 1] = PR_INDEX_EOL; } else { while (n--) { pi = (struct pool_item *)cp; @@ -1586,10 +1548,10 @@ pool_reclaim(struct pool *pp) #endif { struct pool_item_header *ph, *phnext; + struct pool_cache *pc; struct pool_pagelist pq; + struct pool_cache_grouplist pcgl; struct timeval curtime, diff; - bool klock; - int rv; if (pp->pr_drain_hook != NULL) { /* @@ -1598,31 +1560,18 @@ pool_reclaim(struct pool *pp) (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, PR_NOWAIT); } - /* - * XXXSMP Because mutexes at IPL_SOFTXXX are still spinlocks, - * and we are called from the pagedaemon without kernel_lock. - * Does not apply to IPL_SOFTBIO. - */ - if (pp->pr_ipl == IPL_SOFTNET || pp->pr_ipl == IPL_SOFTCLOCK || - pp->pr_ipl == IPL_SOFTSERIAL) { - KERNEL_LOCK(1, NULL); - klock = true; - } else - klock = false; - - /* Reclaim items from the pool's cache (if any). */ - if (pp->pr_cache != NULL) - pool_cache_invalidate(pp->pr_cache); - - if (mutex_tryenter(&pp->pr_lock) == 0) { - if (klock) { - KERNEL_UNLOCK_ONE(NULL); - } + if (mutex_tryenter(&pp->pr_lock) == 0) return (0); - } pr_enter(pp, file, line); LIST_INIT(&pq); + LIST_INIT(&pcgl); + + /* + * Reclaim items from the pool's caches. + */ + LIST_FOREACH(pc, &pp->pr_cachelist, pc_poollist) + pool_cache_reclaim(pc, &pq, &pcgl); getmicrotime(&curtime); @@ -1652,86 +1601,39 @@ pool_reclaim(struct pool *pp) pr_leave(pp); mutex_exit(&pp->pr_lock); + if (LIST_EMPTY(&pq) && LIST_EMPTY(&pcgl)) + return 0; - if (LIST_EMPTY(&pq)) - rv = 0; - else { - pr_pagelist_free(pp, &pq); - rv = 1; - } - - if (klock) { - KERNEL_UNLOCK_ONE(NULL); - } - - return (rv); + pr_pagelist_free(pp, &pq); + pcg_grouplist_free(&pcgl); + return (1); } /* - * Drain pools, one at a time. This is a two stage process; - * drain_start kicks off a cross call to drain CPU-level caches - * if the pool has an associated pool_cache. drain_end waits - * for those cross calls to finish, and then drains the cache - * (if any) and pool. + * Drain pools, one at a time. * - * Note, must never be called from interrupt context. + * Note, we must never be called from an interrupt context. */ void -pool_drain_start(struct pool **ppp, uint64_t *wp) +pool_drain(void *arg) { struct pool *pp; - - KASSERT(!LIST_EMPTY(&pool_head)); + int s; pp = NULL; - - /* Find next pool to drain, and add a reference. */ + s = splvm(); /* XXX why? */ mutex_enter(&pool_head_lock); - do { - if (drainpp == NULL) { - drainpp = LIST_FIRST(&pool_head); - } - if (drainpp != NULL) { - pp = drainpp; - drainpp = LIST_NEXT(pp, pr_poollist); - } - /* - * Skip completely idle pools. We depend on at least - * one pool in the system being active. - */ - } while (pp == NULL || pp->pr_npages == 0); - pp->pr_refcnt++; - mutex_exit(&pool_head_lock); - - /* If there is a pool_cache, drain CPU level caches. */ - *ppp = pp; - if (pp->pr_cache != NULL) { - *wp = xc_broadcast(0, (xcfunc_t)pool_cache_xcall, - pp->pr_cache, NULL); + if (drainpp == NULL) { + drainpp = LIST_FIRST(&pool_head); + } + if (drainpp) { + pp = drainpp; + drainpp = LIST_NEXT(pp, pr_poollist); } -} - -void -pool_drain_end(struct pool *pp, uint64_t where) -{ - - if (pp == NULL) - return; - - KASSERT(pp->pr_refcnt > 0); - - /* Wait for remote draining to complete. */ - if (pp->pr_cache != NULL) - xc_wait(where); - - /* Drain the cache (if any) and pool.. */ - pool_reclaim(pp); - - /* Finally, unlock the pool. */ - mutex_enter(&pool_head_lock); - pp->pr_refcnt--; - cv_broadcast(&pool_busy); mutex_exit(&pool_head_lock); + if (pp) + pool_reclaim(pp); + splx(s); } /* @@ -1741,7 +1643,13 @@ void pool_print(struct pool *pp, const char *modif) { + if (mutex_tryenter(&pp->pr_lock) == 0) { + printf("pool %s is locked; try again later\n", + pp->pr_wchan); + return; + } pool_print1(pp, modif, printf); + mutex_exit(&pp->pr_lock); } void @@ -1749,6 +1657,12 @@ pool_printall(const char *modif, void (* { struct pool *pp; + if (mutex_tryenter(&pool_head_lock) == 0) { + (*pr)("WARNING: pool_head_slock is locked\n"); + } else { + mutex_exit(&pool_head_lock); + } + LIST_FOREACH(pp, &pool_head, pr_poollist) { pool_printit(pp, modif, pr); } @@ -1763,6 +1677,20 @@ pool_printit(struct pool *pp, const char return; } + /* + * Called from DDB; interrupts should be blocked, and all + * other processors should be paused. We can skip locking + * the pool in this case. + * + * We do a mutex_tryenter() just to print the lock + * status, however. + */ + + if (mutex_tryenter(&pp->pr_lock) == 0) + (*pr)("WARNING: pool %s is locked\n", pp->pr_wchan); + else + mutex_exit(&pp->pr_lock); + pool_print1(pp, modif, pr); } @@ -1797,10 +1725,8 @@ static void pool_print1(struct pool *pp, const char *modif, void (*pr)(const char *, ...)) { struct pool_item_header *ph; - pool_cache_t pc; - pcg_t *pcg; - pool_cache_cpu_t *cc; - uint64_t cpuhit, cpumiss; + struct pool_cache *pc; + struct pool_cache_group *pcg; int i, print_log = 0, print_pagelist = 0, print_cache = 0; char c; @@ -1813,13 +1739,7 @@ pool_print1(struct pool *pp, const char print_cache = 1; } - if ((pc = pp->pr_cache) != NULL) { - (*pr)("POOL CACHE"); - } else { - (*pr)("POOL"); - } - - (*pr)(" %s: size %u, align %u, ioff %u, roflags 0x%08x\n", + (*pr)("POOL %s: size %u, align %u, ioff %u, roflags 0x%08x\n", pp->pr_wchan, pp->pr_size, pp->pr_align, pp->pr_itemoffset, pp->pr_roflags); (*pr)("\talloc %p\n", pp->pr_alloc); @@ -1828,7 +1748,7 @@ pool_print1(struct pool *pp, const char (*pr)("\titemsperpage %u, nitems %u, nout %u, hardlimit %u\n", pp->pr_itemsperpage, pp->pr_nitems, pp->pr_nout, pp->pr_hardlimit); - (*pr)("\tnget %lu, nfail %lu, nput %lu\n", + (*pr)("\n\tnget %lu, nfail %lu, nput %lu\n", pp->pr_nget, pp->pr_nfail, pp->pr_nput); (*pr)("\tnpagealloc %lu, npagefree %lu, hiwat %u, nidle %lu\n", pp->pr_npagealloc, pp->pr_npagefree, pp->pr_hiwat, pp->pr_nidle); @@ -1863,6 +1783,8 @@ pool_print1(struct pool *pp, const char } skip_log: + if (print_cache == 0) + goto skip_cache; #define PR_GROUPLIST(pcg) \ (*pr)("\t\tgroup %p: avail %d\n", pcg, pcg->pcg_avail); \ @@ -1879,38 +1801,26 @@ pool_print1(struct pool *pp, const char } \ } - if (pc != NULL) { - cpuhit = 0; - cpumiss = 0; - for (i = 0; i < MAXCPUS; i++) { - if ((cc = pc->pc_cpus[i]) == NULL) - continue; - cpuhit += cc->cc_hits; - cpumiss += cc->cc_misses; - } - (*pr)("\tcpu layer hits %llu misses %llu\n", cpuhit, cpumiss); - (*pr)("\tcache layer hits %llu misses %llu\n", - pc->pc_hits, pc->pc_misses); - (*pr)("\tcache layer entry uncontended %llu contended %llu\n", - pc->pc_hits + pc->pc_misses - pc->pc_contended, - pc->pc_contended); - (*pr)("\tcache layer empty groups %u full groups %u\n", - pc->pc_nempty, pc->pc_nfull); - if (print_cache) { - (*pr)("\tfull cache groups:\n"); - for (pcg = pc->pc_fullgroups; pcg != NULL; - pcg = pcg->pcg_next) { - PR_GROUPLIST(pcg); - } - (*pr)("\tempty cache groups:\n"); - for (pcg = pc->pc_emptygroups; pcg != NULL; - pcg = pcg->pcg_next) { - PR_GROUPLIST(pcg); - } + LIST_FOREACH(pc, &pp->pr_cachelist, pc_poollist) { + (*pr)("\tcache %p\n", pc); + (*pr)("\t hits %lu misses %lu ngroups %lu nitems %lu\n", + pc->pc_hits, pc->pc_misses, pc->pc_ngroups, pc->pc_nitems); + (*pr)("\t full groups:\n"); + LIST_FOREACH(pcg, &pc->pc_fullgroups, pcg_list) { + PR_GROUPLIST(pcg); + } + (*pr)("\t partial groups:\n"); + LIST_FOREACH(pcg, &pc->pc_partgroups, pcg_list) { + PR_GROUPLIST(pcg); + } + (*pr)("\t empty groups:\n"); + LIST_FOREACH(pcg, &pc->pc_emptygroups, pcg_list) { + PR_GROUPLIST(pcg); } } #undef PR_GROUPLIST + skip_cache: pr_enter_check(pp, pr); } @@ -2007,100 +1917,41 @@ out: * pool_cache_init: * * Initialize a pool cache. - */ -pool_cache_t -pool_cache_init(size_t size, u_int align, u_int align_offset, u_int flags, - const char *wchan, struct pool_allocator *palloc, int ipl, - int (*ctor)(void *, void *, int), void (*dtor)(void *, void *), void *arg) -{ - pool_cache_t pc; - - pc = pool_get(&cache_pool, PR_WAITOK); - if (pc == NULL) - return NULL; - - pool_cache_bootstrap(pc, size, align, align_offset, flags, wchan, - palloc, ipl, ctor, dtor, arg); - - return pc; -} - -/* - * pool_cache_bootstrap: * - * Kernel-private version of pool_cache_init(). The caller - * provides initial storage. + * NOTE: If the pool must be protected from interrupts, we expect + * to be called at the appropriate interrupt priority level. */ void -pool_cache_bootstrap(pool_cache_t pc, size_t size, u_int align, - u_int align_offset, u_int flags, const char *wchan, - struct pool_allocator *palloc, int ipl, - int (*ctor)(void *, void *, int), void (*dtor)(void *, void *), +pool_cache_init(struct pool_cache *pc, struct pool *pp, + int (*ctor)(void *, void *, int), + void (*dtor)(void *, void *), void *arg) { - CPU_INFO_ITERATOR cii; - struct cpu_info *ci; - struct pool *pp; - - pp = &pc->pc_pool; - if (palloc == NULL && ipl == IPL_NONE) - palloc = &pool_allocator_nointr; - pool_init(pp, size, align, align_offset, flags, wchan, palloc, ipl); - /* - * XXXAD hack to prevent IP input processing from blocking. - */ - if (ipl == IPL_SOFTNET) { - mutex_init(&pc->pc_lock, MUTEX_DEFAULT, IPL_VM); - } else { - mutex_init(&pc->pc_lock, MUTEX_DEFAULT, ipl); - } + LIST_INIT(&pc->pc_emptygroups); + LIST_INIT(&pc->pc_fullgroups); + LIST_INIT(&pc->pc_partgroups); + mutex_init(&pc->pc_lock, MUTEX_DRIVER, pp->pr_ipl); - if (ctor == NULL) { - ctor = (int (*)(void *, void *, int))nullop; - } - if (dtor == NULL) { - dtor = (void (*)(void *, void *))nullop; - } + pc->pc_pool = pp; - pc->pc_emptygroups = NULL; - pc->pc_fullgroups = NULL; - pc->pc_partgroups = NULL; pc->pc_ctor = ctor; pc->pc_dtor = dtor; pc->pc_arg = arg; - pc->pc_hits = 0; + + pc->pc_hits = 0; pc->pc_misses = 0; - pc->pc_nempty = 0; - pc->pc_npart = 0; - pc->pc_nfull = 0; - pc->pc_contended = 0; - pc->pc_refcnt = 0; - pc->pc_freecheck = NULL; - - /* Allocate per-CPU caches. */ - memset(pc->pc_cpus, 0, sizeof(pc->pc_cpus)); - pc->pc_ncpu = 0; - if (ncpu == 0) { - /* XXX For sparc: boot CPU is not attached yet. */ - pool_cache_cpu_init1(curcpu(), pc); - } else { - for (CPU_INFO_FOREACH(cii, ci)) { - pool_cache_cpu_init1(ci, pc); - } - } - + + pc->pc_ngroups = 0; + + pc->pc_nitems = 0; + if (__predict_true(!cold)) { mutex_enter(&pp->pr_lock); - pp->pr_cache = pc; + LIST_INSERT_HEAD(&pp->pr_cachelist, pc, pc_poollist); mutex_exit(&pp->pr_lock); - mutex_enter(&pool_head_lock); - LIST_INSERT_HEAD(&pool_cache_head, pc, pc_cachelist); - mutex_exit(&pool_head_lock); - } else { - pp->pr_cache = pc; - LIST_INSERT_HEAD(&pool_cache_head, pc, pc_cachelist); - } + } else + LIST_INSERT_HEAD(&pp->pr_cachelist, pc, pc_poollist); } /* @@ -2109,618 +1960,302 @@ pool_cache_bootstrap(pool_cache_t pc, si * Destroy a pool cache. */ void -pool_cache_destroy(pool_cache_t pc) +pool_cache_destroy(struct pool_cache *pc) { - struct pool *pp = &pc->pc_pool; - pool_cache_cpu_t *cc; - pcg_t *pcg; - int i; - - /* Remove it from the global list. */ - mutex_enter(&pool_head_lock); - while (pc->pc_refcnt != 0) - cv_wait(&pool_busy, &pool_head_lock); - LIST_REMOVE(pc, pc_cachelist); - mutex_exit(&pool_head_lock); + struct pool *pp = pc->pc_pool; /* First, invalidate the entire cache. */ pool_cache_invalidate(pc); - /* Disassociate it from the pool. */ + /* ...and remove it from the pool's cache list. */ mutex_enter(&pp->pr_lock); - pp->pr_cache = NULL; + LIST_REMOVE(pc, pc_poollist); mutex_exit(&pp->pr_lock); - /* Destroy per-CPU data */ - for (i = 0; i < MAXCPUS; i++) { - if ((cc = pc->pc_cpus[i]) == NULL) - continue; - if ((pcg = cc->cc_current) != NULL) { - pcg->pcg_next = NULL; - pool_cache_invalidate_groups(pc, pcg); - } - if ((pcg = cc->cc_previous) != NULL) { - pcg->pcg_next = NULL; - pool_cache_invalidate_groups(pc, pcg); - } - if (cc != &pc->pc_cpu0) - pool_put(&cache_cpu_pool, cc); - } - - /* Finally, destroy it. */ mutex_destroy(&pc->pc_lock); - pool_destroy(pp); - pool_put(&cache_pool, pc); } -/* - * pool_cache_cpu_init1: - * - * Called for each pool_cache whenever a new CPU is attached. - */ -static void -pool_cache_cpu_init1(struct cpu_info *ci, pool_cache_t pc) +static inline void * +pcg_get(struct pool_cache_group *pcg, paddr_t *pap) { - pool_cache_cpu_t *cc; - int index; - - index = ci->ci_index; - - KASSERT(index < MAXCPUS); - KASSERT(((uintptr_t)pc->pc_cpus & (CACHE_LINE_SIZE - 1)) == 0); - - if ((cc = pc->pc_cpus[index]) != NULL) { - KASSERT(cc->cc_cpuindex == index); - return; - } - - /* - * The first CPU is 'free'. This needs to be the case for - * bootstrap - we may not be able to allocate yet. - */ - if (pc->pc_ncpu == 0) { - cc = &pc->pc_cpu0; - pc->pc_ncpu = 1; - } else { - mutex_enter(&pc->pc_lock); - pc->pc_ncpu++; - mutex_exit(&pc->pc_lock); - cc = pool_get(&cache_cpu_pool, PR_WAITOK); - } + void *object; + u_int idx; - cc->cc_ipl = pc->pc_pool.pr_ipl; - cc->cc_iplcookie = makeiplcookie(cc->cc_ipl); - cc->cc_cache = pc; - cc->cc_cpuindex = index; - cc->cc_hits = 0; - cc->cc_misses = 0; - cc->cc_current = NULL; - cc->cc_previous = NULL; + KASSERT(pcg->pcg_avail <= PCG_NOBJECTS); + KASSERT(pcg->pcg_avail != 0); + idx = --pcg->pcg_avail; + + KASSERT(pcg->pcg_objects[idx].pcgo_va != NULL); + object = pcg->pcg_objects[idx].pcgo_va; + if (pap != NULL) + *pap = pcg->pcg_objects[idx].pcgo_pa; + pcg->pcg_objects[idx].pcgo_va = NULL; - pc->pc_cpus[index] = cc; + return (object); } -/* - * pool_cache_cpu_init: - * - * Called whenever a new CPU is attached. - */ -void -pool_cache_cpu_init(struct cpu_info *ci) +static inline void +pcg_put(struct pool_cache_group *pcg, void *object, paddr_t pa) { - pool_cache_t pc; + u_int idx; - mutex_enter(&pool_head_lock); - LIST_FOREACH(pc, &pool_cache_head, pc_cachelist) { - pc->pc_refcnt++; - mutex_exit(&pool_head_lock); - - pool_cache_cpu_init1(ci, pc); + KASSERT(pcg->pcg_avail < PCG_NOBJECTS); + idx = pcg->pcg_avail++; - mutex_enter(&pool_head_lock); - pc->pc_refcnt--; - cv_broadcast(&pool_busy); - } - mutex_exit(&pool_head_lock); -} - -/* - * pool_cache_reclaim: - * - * Reclaim memory from a pool cache. - */ -bool -pool_cache_reclaim(pool_cache_t pc) -{ - - return pool_reclaim(&pc->pc_pool); + KASSERT(pcg->pcg_objects[idx].pcgo_va == NULL); + pcg->pcg_objects[idx].pcgo_va = object; + pcg->pcg_objects[idx].pcgo_pa = pa; } static void -pool_cache_destruct_object1(pool_cache_t pc, void *object) +pcg_grouplist_free(struct pool_cache_grouplist *pcgl) { + struct pool_cache_group *pcg; - (*pc->pc_dtor)(pc->pc_arg, object); - pool_put(&pc->pc_pool, object); + while ((pcg = LIST_FIRST(pcgl)) != NULL) { + LIST_REMOVE(pcg, pcg_list); + pool_put(&pcgpool, pcg); + } } /* - * pool_cache_destruct_object: + * pool_cache_get{,_paddr}: * - * Force destruction of an object and its release back into - * the pool. + * Get an object from a pool cache (optionally returning + * the physical address of the object). */ -void -pool_cache_destruct_object(pool_cache_t pc, void *object) +void * +pool_cache_get_paddr(struct pool_cache *pc, int flags, paddr_t *pap) { + struct pool_cache_group *pcg; + void *object; - FREECHECK_IN(&pc->pc_freecheck, object); - - pool_cache_destruct_object1(pc, object); -} +#ifdef LOCKDEBUG + if (flags & PR_WAITOK) + ASSERT_SLEEPABLE(NULL, "pool_cache_get(PR_WAITOK)"); +#endif -/* - * pool_cache_invalidate_groups: - * - * Invalidate a chain of groups and destruct all objects. - */ -static void -pool_cache_invalidate_groups(pool_cache_t pc, pcg_t *pcg) -{ - void *object; - pcg_t *next; - int i; + mutex_enter(&pc->pc_lock); - for (; pcg != NULL; pcg = next) { - next = pcg->pcg_next; + pcg = LIST_FIRST(&pc->pc_partgroups); + if (pcg == NULL) { + pcg = LIST_FIRST(&pc->pc_fullgroups); + if (pcg != NULL) { + LIST_REMOVE(pcg, pcg_list); + LIST_INSERT_HEAD(&pc->pc_partgroups, pcg, pcg_list); + } + } + if (pcg == NULL) { - for (i = 0; i < pcg->pcg_avail; i++) { - object = pcg->pcg_objects[i].pcgo_va; - pool_cache_destruct_object1(pc, object); + /* + * No groups with any available objects. Allocate + * a new object, construct it, and return it to + * the caller. We will allocate a group, if necessary, + * when the object is freed back to the cache. + */ + pc->pc_misses++; + mutex_exit(&pc->pc_lock); + object = pool_get(pc->pc_pool, flags); + if (object != NULL && pc->pc_ctor != NULL) { + if ((*pc->pc_ctor)(pc->pc_arg, object, flags) != 0) { + pool_put(pc->pc_pool, object); + return (NULL); + } + } + KASSERT((((vaddr_t)object + pc->pc_pool->pr_itemoffset) & + (pc->pc_pool->pr_align - 1)) == 0); + if (object != NULL && pap != NULL) { +#ifdef POOL_VTOPHYS + *pap = POOL_VTOPHYS(object); +#else + *pap = POOL_PADDR_INVALID; +#endif } - pool_put(&pcgpool, pcg); + FREECHECK_OUT(&pc->pc_freecheck, object); + return (object); } -} -/* - * pool_cache_invalidate: - * - * Invalidate a pool cache (destruct and release all of the - * cached objects). Does not reclaim objects from the pool. - */ -void -pool_cache_invalidate(pool_cache_t pc) -{ - pcg_t *full, *empty, *part; + pc->pc_hits++; + pc->pc_nitems--; + object = pcg_get(pcg, pap); - mutex_enter(&pc->pc_lock); - full = pc->pc_fullgroups; - empty = pc->pc_emptygroups; - part = pc->pc_partgroups; - pc->pc_fullgroups = NULL; - pc->pc_emptygroups = NULL; - pc->pc_partgroups = NULL; - pc->pc_nfull = 0; - pc->pc_nempty = 0; - pc->pc_npart = 0; + if (pcg->pcg_avail == 0) { + LIST_REMOVE(pcg, pcg_list); + LIST_INSERT_HEAD(&pc->pc_emptygroups, pcg, pcg_list); + } mutex_exit(&pc->pc_lock); - pool_cache_invalidate_groups(pc, full); - pool_cache_invalidate_groups(pc, empty); - pool_cache_invalidate_groups(pc, part); -} - -void -pool_cache_set_drain_hook(pool_cache_t pc, void (*fn)(void *, int), void *arg) -{ - - pool_set_drain_hook(&pc->pc_pool, fn, arg); -} - -void -pool_cache_setlowat(pool_cache_t pc, int n) -{ - - pool_setlowat(&pc->pc_pool, n); -} - -void -pool_cache_sethiwat(pool_cache_t pc, int n) -{ - - pool_sethiwat(&pc->pc_pool, n); + KASSERT((((vaddr_t)object + pc->pc_pool->pr_itemoffset) & + (pc->pc_pool->pr_align - 1)) == 0); + FREECHECK_OUT(&pc->pc_freecheck, object); + return (object); } +/* + * pool_cache_put{,_paddr}: + * + * Put an object back to the pool cache (optionally caching the + * physical address of the object). + */ void -pool_cache_sethardlimit(pool_cache_t pc, int n, const char *warnmess, int ratecap) +pool_cache_put_paddr(struct pool_cache *pc, void *object, paddr_t pa) { + struct pool_cache_group *pcg; - pool_sethardlimit(&pc->pc_pool, n, warnmess, ratecap); -} - -static inline pool_cache_cpu_t * -pool_cache_cpu_enter(pool_cache_t pc, int *s) -{ - pool_cache_cpu_t *cc; + FREECHECK_IN(&pc->pc_freecheck, object); - /* - * Prevent other users of the cache from accessing our - * CPU-local data. To avoid touching shared state, we - * pull the neccessary information from CPU local data. - */ - crit_enter(); - cc = pc->pc_cpus[curcpu()->ci_index]; - KASSERT(cc->cc_cache == pc); - if (cc->cc_ipl != IPL_NONE) { - *s = splraiseipl(cc->cc_iplcookie); + if (__predict_false((pc->pc_pool->pr_flags & PR_WANTED) != 0)) { + goto destruct; } - KASSERT(((uintptr_t)cc & (CACHE_LINE_SIZE - 1)) == 0); - - return cc; -} - -static inline void -pool_cache_cpu_exit(pool_cache_cpu_t *cc, int *s) -{ - /* No longer need exclusive access to the per-CPU data. */ - if (cc->cc_ipl != IPL_NONE) { - splx(*s); - } - crit_exit(); -} - -#if __GNUC_PREREQ__(3, 0) -__attribute ((noinline)) -#endif -pool_cache_cpu_t * -pool_cache_get_slow(pool_cache_cpu_t *cc, int *s, void **objectp, - paddr_t *pap, int flags) -{ - pcg_t *pcg, *cur; - uint64_t ncsw; - pool_cache_t pc; - void *object; - - pc = cc->cc_cache; - cc->cc_misses++; - - /* - * Nothing was available locally. Try and grab a group - * from the cache. - */ - if (!mutex_tryenter(&pc->pc_lock)) { - ncsw = curlwp->l_ncsw; - mutex_enter(&pc->pc_lock); - pc->pc_contended++; + mutex_enter(&pc->pc_lock); - /* - * If we context switched while locking, then - * our view of the per-CPU data is invalid: - * retry. - */ - if (curlwp->l_ncsw != ncsw) { - mutex_exit(&pc->pc_lock); - pool_cache_cpu_exit(cc, s); - return pool_cache_cpu_enter(pc, s); + pcg = LIST_FIRST(&pc->pc_partgroups); + if (pcg == NULL) { + pcg = LIST_FIRST(&pc->pc_emptygroups); + if (pcg != NULL) { + LIST_REMOVE(pcg, pcg_list); + LIST_INSERT_HEAD(&pc->pc_partgroups, pcg, pcg_list); } } + if (pcg == NULL) { - if ((pcg = pc->pc_fullgroups) != NULL) { /* - * If there's a full group, release our empty - * group back to the cache. Install the full - * group as cc_current and return. + * No empty groups to free the object to. Attempt to + * allocate one. */ - if ((cur = cc->cc_current) != NULL) { - KASSERT(cur->pcg_avail == 0); - cur->pcg_next = pc->pc_emptygroups; - pc->pc_emptygroups = cur; - pc->pc_nempty++; - } - KASSERT(pcg->pcg_avail == PCG_NOBJECTS); - cc->cc_current = pcg; - pc->pc_fullgroups = pcg->pcg_next; - pc->pc_hits++; - pc->pc_nfull--; mutex_exit(&pc->pc_lock); - return cc; - } + pcg = pool_get(&pcgpool, PR_NOWAIT); + if (pcg == NULL) { +destruct: - /* - * Nothing available locally or in cache. Take the slow - * path: fetch a new object from the pool and construct - * it. - */ - pc->pc_misses++; - mutex_exit(&pc->pc_lock); - pool_cache_cpu_exit(cc, s); - - object = pool_get(&pc->pc_pool, flags); - *objectp = object; - if (object == NULL) - return NULL; - - if ((*pc->pc_ctor)(pc->pc_arg, object, flags) != 0) { - pool_put(&pc->pc_pool, object); - *objectp = NULL; - return NULL; + /* + * Unable to allocate a cache group; destruct the object + * and free it back to the pool. + */ + pool_cache_destruct_object(pc, object); + return; + } + memset(pcg, 0, sizeof(*pcg)); + mutex_enter(&pc->pc_lock); + pc->pc_ngroups++; + LIST_INSERT_HEAD(&pc->pc_partgroups, pcg, pcg_list); } - KASSERT((((vaddr_t)object + pc->pc_pool.pr_itemoffset) & - (pc->pc_pool.pr_align - 1)) == 0); + pc->pc_nitems++; + pcg_put(pcg, object, pa); - if (pap != NULL) { -#ifdef POOL_VTOPHYS - *pap = POOL_VTOPHYS(object); -#else - *pap = POOL_PADDR_INVALID; -#endif + if (pcg->pcg_avail == PCG_NOBJECTS) { + LIST_REMOVE(pcg, pcg_list); + LIST_INSERT_HEAD(&pc->pc_fullgroups, pcg, pcg_list); } - - FREECHECK_OUT(&pc->pc_freecheck, object); - return NULL; + mutex_exit(&pc->pc_lock); } /* - * pool_cache_get{,_paddr}: + * pool_cache_destruct_object: * - * Get an object from a pool cache (optionally returning - * the physical address of the object). + * Force destruction of an object and its release back into + * the pool. */ -void * -pool_cache_get_paddr(pool_cache_t pc, int flags, paddr_t *pap) +void +pool_cache_destruct_object(struct pool_cache *pc, void *object) { - pool_cache_cpu_t *cc; - pcg_t *pcg; - void *object; - int s; - -#ifdef LOCKDEBUG - if (flags & PR_WAITOK) - ASSERT_SLEEPABLE(NULL, "pool_cache_get(PR_WAITOK)"); -#endif - - cc = pool_cache_cpu_enter(pc, &s); - do { - /* Try and allocate an object from the current group. */ - pcg = cc->cc_current; - if (pcg != NULL && pcg->pcg_avail > 0) { - object = pcg->pcg_objects[--pcg->pcg_avail].pcgo_va; - if (pap != NULL) - *pap = pcg->pcg_objects[pcg->pcg_avail].pcgo_pa; - pcg->pcg_objects[pcg->pcg_avail].pcgo_va = NULL; - KASSERT(pcg->pcg_avail <= PCG_NOBJECTS); - KASSERT(object != NULL); - cc->cc_hits++; - pool_cache_cpu_exit(cc, &s); - FREECHECK_OUT(&pc->pc_freecheck, object); - return object; - } - - /* - * That failed. If the previous group isn't empty, swap - * it with the current group and allocate from there. - */ - pcg = cc->cc_previous; - if (pcg != NULL && pcg->pcg_avail > 0) { - cc->cc_previous = cc->cc_current; - cc->cc_current = pcg; - continue; - } - - /* - * Can't allocate from either group: try the slow path. - * If get_slow() allocated an object for us, or if - * no more objects are available, it will return NULL. - * Otherwise, we need to retry. - */ - cc = pool_cache_get_slow(cc, &s, &object, pap, flags); - } while (cc != NULL); - return object; + if (pc->pc_dtor != NULL) + (*pc->pc_dtor)(pc->pc_arg, object); + pool_put(pc->pc_pool, object); } -#if __GNUC_PREREQ__(3, 0) -__attribute ((noinline)) -#endif -pool_cache_cpu_t * -pool_cache_put_slow(pool_cache_cpu_t *cc, int *s, void *object, paddr_t pa) +static void +pool_do_cache_invalidate_grouplist(struct pool_cache_grouplist *pcgsl, + struct pool_cache *pc, struct pool_pagelist *pq, + struct pool_cache_grouplist *pcgdl) { - pcg_t *pcg, *cur; - uint64_t ncsw; - pool_cache_t pc; - - pc = cc->cc_cache; - cc->cc_misses++; - - /* - * No free slots locally. Try to grab an empty, unused - * group from the cache. - */ - if (!mutex_tryenter(&pc->pc_lock)) { - ncsw = curlwp->l_ncsw; - mutex_enter(&pc->pc_lock); - pc->pc_contended++; + struct pool_cache_group *pcg, *npcg; + void *object; - /* - * If we context switched while locking, then - * our view of the per-CPU data is invalid: - * retry. - */ - if (curlwp->l_ncsw != ncsw) { - mutex_exit(&pc->pc_lock); - pool_cache_cpu_exit(cc, s); - return pool_cache_cpu_enter(pc, s); - } + for (pcg = LIST_FIRST(pcgsl); pcg != NULL; pcg = npcg) { + npcg = LIST_NEXT(pcg, pcg_list); + while (pcg->pcg_avail != 0) { + pc->pc_nitems--; + object = pcg_get(pcg, NULL); + if (pc->pc_dtor != NULL) + (*pc->pc_dtor)(pc->pc_arg, object); + pool_do_put(pc->pc_pool, object, pq); + } + pc->pc_ngroups--; + LIST_REMOVE(pcg, pcg_list); + LIST_INSERT_HEAD(pcgdl, pcg, pcg_list); } +} - if ((pcg = pc->pc_emptygroups) != NULL) { - /* - * If there's a empty group, release our full - * group back to the cache. Install the empty - * group as cc_current and return. - */ - if ((cur = cc->cc_current) != NULL) { - KASSERT(cur->pcg_avail == PCG_NOBJECTS); - cur->pcg_next = pc->pc_fullgroups; - pc->pc_fullgroups = cur; - pc->pc_nfull++; - } - KASSERT(pcg->pcg_avail == 0); - cc->cc_current = pcg; - pc->pc_emptygroups = pcg->pcg_next; - pc->pc_hits++; - pc->pc_nempty--; - mutex_exit(&pc->pc_lock); - return cc; - } +static void +pool_do_cache_invalidate(struct pool_cache *pc, struct pool_pagelist *pq, + struct pool_cache_grouplist *pcgl) +{ - /* - * Nothing available locally or in cache. Take the - * slow path and try to allocate a new group that we - * can release to. - */ - pc->pc_misses++; - mutex_exit(&pc->pc_lock); - pool_cache_cpu_exit(cc, s); + KASSERT(mutex_owned(&pc->pc_lock)); + KASSERT(mutex_owned(&pc->pc_pool->pr_lock)); - /* - * If we can't allocate a new group, just throw the - * object away. - */ - pcg = pool_get(&pcgpool, PR_NOWAIT); - if (pcg == NULL) { - pool_cache_destruct_object(pc, object); - return NULL; - } -#ifdef DIAGNOSTIC - memset(pcg, 0, sizeof(*pcg)); -#else - pcg->pcg_avail = 0; -#endif - - /* - * Add the empty group to the cache and try again. - */ - mutex_enter(&pc->pc_lock); - pcg->pcg_next = pc->pc_emptygroups; - pc->pc_emptygroups = pcg; - pc->pc_nempty++; - mutex_exit(&pc->pc_lock); + pool_do_cache_invalidate_grouplist(&pc->pc_fullgroups, pc, pq, pcgl); + pool_do_cache_invalidate_grouplist(&pc->pc_partgroups, pc, pq, pcgl); - return pool_cache_cpu_enter(pc, s); -} + KASSERT(LIST_EMPTY(&pc->pc_partgroups)); + KASSERT(LIST_EMPTY(&pc->pc_fullgroups)); + KASSERT(pc->pc_nitems == 0); +} /* - * pool_cache_put{,_paddr}: + * pool_cache_invalidate: * - * Put an object back to the pool cache (optionally caching the - * physical address of the object). + * Invalidate a pool cache (destruct and release all of the + * cached objects). */ void -pool_cache_put_paddr(pool_cache_t pc, void *object, paddr_t pa) +pool_cache_invalidate(struct pool_cache *pc) { - pool_cache_cpu_t *cc; - pcg_t *pcg; - int s; + struct pool_pagelist pq; + struct pool_cache_grouplist pcgl; - FREECHECK_IN(&pc->pc_freecheck, object); + LIST_INIT(&pq); + LIST_INIT(&pcgl); - cc = pool_cache_cpu_enter(pc, &s); - do { - /* If the current group isn't full, release it there. */ - pcg = cc->cc_current; - if (pcg != NULL && pcg->pcg_avail < PCG_NOBJECTS) { - KASSERT(pcg->pcg_objects[pcg->pcg_avail].pcgo_va - == NULL); - pcg->pcg_objects[pcg->pcg_avail].pcgo_va = object; - pcg->pcg_objects[pcg->pcg_avail].pcgo_pa = pa; - pcg->pcg_avail++; - cc->cc_hits++; - pool_cache_cpu_exit(cc, &s); - return; - } + mutex_enter(&pc->pc_lock); + mutex_enter(&pc->pc_pool->pr_lock); - /* - * That failed. If the previous group is empty, swap - * it with the current group and try again. - */ - pcg = cc->cc_previous; - if (pcg != NULL && pcg->pcg_avail == 0) { - cc->cc_previous = cc->cc_current; - cc->cc_current = pcg; - continue; - } + pool_do_cache_invalidate(pc, &pq, &pcgl); - /* - * Can't free to either group: try the slow path. - * If put_slow() releases the object for us, it - * will return NULL. Otherwise we need to retry. - */ - cc = pool_cache_put_slow(cc, &s, object, pa); - } while (cc != NULL); + mutex_exit(&pc->pc_pool->pr_lock); + mutex_exit(&pc->pc_lock); + + pr_pagelist_free(pc->pc_pool, &pq); + pcg_grouplist_free(&pcgl); } /* - * pool_cache_xcall: + * pool_cache_reclaim: * - * Transfer objects from the per-CPU cache to the global cache. - * Run within a cross-call thread. + * Reclaim a pool cache for pool_reclaim(). */ static void -pool_cache_xcall(pool_cache_t pc) +pool_cache_reclaim(struct pool_cache *pc, struct pool_pagelist *pq, + struct pool_cache_grouplist *pcgl) { - pool_cache_cpu_t *cc; - pcg_t *prev, *cur, **list; - int s = 0; /* XXXgcc */ - - cc = pool_cache_cpu_enter(pc, &s); - cur = cc->cc_current; - cc->cc_current = NULL; - prev = cc->cc_previous; - cc->cc_previous = NULL; - pool_cache_cpu_exit(cc, &s); - - /* - * XXXSMP Go to splvm to prevent kernel_lock from being taken, - * because locks at IPL_SOFTXXX are still spinlocks. Does not - * apply to IPL_SOFTBIO. Cross-call threads do not take the - * kernel_lock. + + /* + * We're locking in the wrong order (normally pool_cache -> pool, + * but the pool is already locked when we get here), so we have + * to use trylock. If we can't lock the pool_cache, it's not really + * a big deal here. */ - s = splvm(); - mutex_enter(&pc->pc_lock); - if (cur != NULL) { - if (cur->pcg_avail == PCG_NOBJECTS) { - list = &pc->pc_fullgroups; - pc->pc_nfull++; - } else if (cur->pcg_avail == 0) { - list = &pc->pc_emptygroups; - pc->pc_nempty++; - } else { - list = &pc->pc_partgroups; - pc->pc_npart++; - } - cur->pcg_next = *list; - *list = cur; - } - if (prev != NULL) { - if (prev->pcg_avail == PCG_NOBJECTS) { - list = &pc->pc_fullgroups; - pc->pc_nfull++; - } else if (prev->pcg_avail == 0) { - list = &pc->pc_emptygroups; - pc->pc_nempty++; - } else { - list = &pc->pc_partgroups; - pc->pc_npart++; - } - prev->pcg_next = *list; - *list = prev; - } + if (mutex_tryenter(&pc->pc_lock) == 0) + return; + + pool_do_cache_invalidate(pc, pq, pcgl); + mutex_exit(&pc->pc_lock); - splx(s); } /*