Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files. =================================================================== RCS file: /ftp/cvs/cvsroot/src/sys/kern/subr_pool.c,v rcsdiff: /ftp/cvs/cvsroot/src/sys/kern/subr_pool.c,v: warning: Unknown phrases like `commitid ...;' are present. retrieving revision 1.128.2.6 retrieving revision 1.137 diff -u -p -r1.128.2.6 -r1.137 --- src/sys/kern/subr_pool.c 2007/08/20 21:27:37 1.128.2.6 +++ src/sys/kern/subr_pool.c 2007/11/18 16:27:43 1.137 @@ -1,4 +1,4 @@ -/* $NetBSD: subr_pool.c,v 1.128.2.6 2007/08/20 21:27:37 ad Exp $ */ +/* $NetBSD: subr_pool.c,v 1.137 2007/11/18 16:27:43 ad Exp $ */ /*- * Copyright (c) 1997, 1999, 2000, 2002, 2007 The NetBSD Foundation, Inc. @@ -6,7 +6,7 @@ * * This code is derived from software contributed to The NetBSD Foundation * by Paul Kranenburg; by Jason R. Thorpe of the Numerical Aerospace - * Simulation Facility, NASA Ames Research Center. + * Simulation Facility, NASA Ames Research Center, and by Andrew Doran. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -38,7 +38,7 @@ */ #include -__KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.128.2.6 2007/08/20 21:27:37 ad Exp $"); +__KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.137 2007/11/18 16:27:43 ad Exp $"); #include "opt_pool.h" #include "opt_poollog.h" @@ -46,6 +46,7 @@ __KERNEL_RCSID(0, "$NetBSD: subr_pool.c, #include #include +#include #include #include #include @@ -55,6 +56,8 @@ __KERNEL_RCSID(0, "$NetBSD: subr_pool.c, #include #include #include +#include +#include #include @@ -74,10 +77,15 @@ __KERNEL_RCSID(0, "$NetBSD: subr_pool.c, /* List of all pools */ LIST_HEAD(,pool) pool_head = LIST_HEAD_INITIALIZER(pool_head); +/* List of all caches. */ +LIST_HEAD(,pool_cache) pool_cache_head = + LIST_HEAD_INITIALIZER(pool_cache_head); + /* Private pool for page header structures */ #define PHPOOL_MAX 8 static struct pool phpool[PHPOOL_MAX]; -#define PHPOOL_FREELIST_NELEM(idx) (((idx) == 0) ? 0 : (1 << (idx))) +#define PHPOOL_FREELIST_NELEM(idx) \ + (((idx) == 0) ? 0 : BITMAP_SIZE * (1 << (idx))) #ifdef POOL_SUBPAGE /* Pool of subpages for use by normal pools. */ @@ -91,7 +99,7 @@ static void *pool_page_alloc_meta(struct static void pool_page_free_meta(struct pool *, void *); /* allocator for pool metadata */ -static struct pool_allocator pool_allocator_meta = { +struct pool_allocator pool_allocator_meta = { pool_page_alloc_meta, pool_page_free_meta, .pa_backingmapptr = &kmem_map, }; @@ -104,8 +112,11 @@ static struct pool *drainpp; /* This lock protects both pool_head and drainpp. */ static kmutex_t pool_head_lock; +static kcondvar_t pool_busy; -typedef uint8_t pool_item_freelist_t; +typedef uint32_t pool_item_bitmap_t; +#define BITMAP_SIZE (CHAR_BIT * sizeof(pool_item_bitmap_t)) +#define BITMAP_MASK (BITMAP_SIZE - 1) struct pool_item_header { /* Page headers */ @@ -115,6 +126,7 @@ struct pool_item_header { ph_node; /* Off-page page headers */ void * ph_page; /* this page's address */ struct timeval ph_time; /* last referenced */ + uint16_t ph_nmissing; /* # of chunks in use */ union { /* !PR_NOTOUCH */ struct { @@ -123,27 +135,20 @@ struct pool_item_header { } phu_normal; /* PR_NOTOUCH */ struct { - uint16_t - phu_off; /* start offset in page */ - pool_item_freelist_t - phu_firstfree; /* first free item */ - /* - * XXX it might be better to use - * a simple bitmap and ffs(3) - */ + uint16_t phu_off; /* start offset in page */ + pool_item_bitmap_t phu_bitmap[]; } phu_notouch; } ph_u; - uint16_t ph_nmissing; /* # of chunks in use */ }; #define ph_itemlist ph_u.phu_normal.phu_itemlist #define ph_off ph_u.phu_notouch.phu_off -#define ph_firstfree ph_u.phu_notouch.phu_firstfree +#define ph_bitmap ph_u.phu_notouch.phu_bitmap struct pool_item { #ifdef DIAGNOSTIC u_int pi_magic; #endif -#define PI_MAGIC 0xdeadbeefU +#define PI_MAGIC 0xdeaddeadU /* Other entries use only this list entry */ LIST_ENTRY(pool_item) pi_list; }; @@ -159,30 +164,34 @@ struct pool_item { * needless object construction/destruction; it is deferred until absolutely * necessary. * - * Caches are grouped into cache groups. Each cache group references - * up to 16 constructed objects. When a cache allocates an object - * from the pool, it calls the object's constructor and places it into - * a cache group. When a cache group frees an object back to the pool, - * it first calls the object's destructor. This allows the object to - * persist in constructed form while freed to the cache. - * - * Multiple caches may exist for each pool. This allows a single - * object type to have multiple constructed forms. The pool references - * each cache, so that when a pool is drained by the pagedaemon, it can - * drain each individual cache as well. Each time a cache is drained, - * the most idle cache group is freed to the pool in its entirety. + * Caches are grouped into cache groups. Each cache group references up + * to PCG_NUMOBJECTS constructed objects. When a cache allocates an + * object from the pool, it calls the object's constructor and places it + * into a cache group. When a cache group frees an object back to the + * pool, it first calls the object's destructor. This allows the object + * to persist in constructed form while freed to the cache. + * + * The pool references each cache, so that when a pool is drained by the + * pagedaemon, it can drain each individual cache as well. Each time a + * cache is drained, the most idle cache group is freed to the pool in + * its entirety. * * Pool caches are layed on top of pools. By layering them, we can avoid * the complexity of cache management for pools which would not benefit * from it. */ -/* The cache group pool. */ static struct pool pcgpool; +static struct pool cache_pool; +static struct pool cache_cpu_pool; -static void pool_cache_reclaim(struct pool_cache *, struct pool_pagelist *, - struct pool_cache_grouplist *); -static void pcg_grouplist_free(struct pool_cache_grouplist *); +static pool_cache_cpu_t *pool_cache_put_slow(pool_cache_cpu_t *, int *, + void *, paddr_t); +static pool_cache_cpu_t *pool_cache_get_slow(pool_cache_cpu_t *, int *, + void **, paddr_t *, int); +static void pool_cache_cpu_init1(struct cpu_info *, pool_cache_t); +static void pool_cache_invalidate_groups(pool_cache_t, pcg_t *); +static void pool_cache_xcall(pool_cache_t); static int pool_catchup(struct pool *); static void pool_prime_page(struct pool *, void *, @@ -319,12 +328,12 @@ pr_enter_check(struct pool *pp, void (*p #define pr_enter_check(pp, pr) #endif /* POOL_DIAGNOSTIC */ -static inline int +static inline unsigned int pr_item_notouch_index(const struct pool *pp, const struct pool_item_header *ph, const void *v) { const char *cp = v; - int idx; + unsigned int idx; KASSERT(pp->pr_roflags & PR_NOTOUCH); idx = (cp - (char *)ph->ph_page - ph->ph_off) / pp->pr_size; @@ -332,35 +341,55 @@ pr_item_notouch_index(const struct pool return idx; } -#define PR_FREELIST_ALIGN(p) \ - roundup((uintptr_t)(p), sizeof(pool_item_freelist_t)) -#define PR_FREELIST(ph) ((pool_item_freelist_t *)PR_FREELIST_ALIGN((ph) + 1)) -#define PR_INDEX_USED ((pool_item_freelist_t)-1) -#define PR_INDEX_EOL ((pool_item_freelist_t)-2) - static inline void pr_item_notouch_put(const struct pool *pp, struct pool_item_header *ph, void *obj) { - int idx = pr_item_notouch_index(pp, ph, obj); - pool_item_freelist_t *freelist = PR_FREELIST(ph); + unsigned int idx = pr_item_notouch_index(pp, ph, obj); + pool_item_bitmap_t *bitmap = ph->ph_bitmap + (idx / BITMAP_SIZE); + pool_item_bitmap_t mask = 1 << (idx & BITMAP_MASK); - KASSERT(freelist[idx] == PR_INDEX_USED); - freelist[idx] = ph->ph_firstfree; - ph->ph_firstfree = idx; + KASSERT((*bitmap & mask) == 0); + *bitmap |= mask; } static inline void * pr_item_notouch_get(const struct pool *pp, struct pool_item_header *ph) { - int idx = ph->ph_firstfree; - pool_item_freelist_t *freelist = PR_FREELIST(ph); + pool_item_bitmap_t *bitmap = ph->ph_bitmap; + unsigned int idx; + int i; + + for (i = 0; ; i++) { + int bit; + + KASSERT((i * BITMAP_SIZE) < pp->pr_itemsperpage); + bit = ffs32(bitmap[i]); + if (bit) { + pool_item_bitmap_t mask; + + bit--; + idx = (i * BITMAP_SIZE) + bit; + mask = 1 << bit; + KASSERT((bitmap[i] & mask) != 0); + bitmap[i] &= ~mask; + break; + } + } + KASSERT(idx < pp->pr_itemsperpage); + return (char *)ph->ph_page + ph->ph_off + idx * pp->pr_size; +} - KASSERT(freelist[idx] != PR_INDEX_USED); - ph->ph_firstfree = freelist[idx]; - freelist[idx] = PR_INDEX_USED; +static inline void +pr_item_notouch_init(const struct pool *pp, struct pool_item_header *ph) +{ + pool_item_bitmap_t *bitmap = ph->ph_bitmap; + const int n = howmany(pp->pr_itemsperpage, BITMAP_SIZE); + int i; - return (char *)ph->ph_page + ph->ph_off + idx * pp->pr_size; + for (i = 0; i < n; i++) { + bitmap[i] = (pool_item_bitmap_t)-1; + } } static inline int @@ -555,6 +584,7 @@ pool_subsystem_init(void) struct link_pool_init * const *pi; mutex_init(&pool_head_lock, MUTEX_DEFAULT, IPL_NONE); + cv_init(&pool_busy, "poolbusy"); __link_set_foreach(pi, pools) pool_init((*pi)->pp, (*pi)->size, (*pi)->align, @@ -567,6 +597,12 @@ pool_subsystem_init(void) SLIST_REMOVE_HEAD(&pa_deferinitq, pa_q); pa_reclaim_register(pa); } + + pool_init(&cache_pool, sizeof(struct pool_cache), CACHE_LINE_SIZE, + 0, 0, "pcache", &pool_allocator_nointr, IPL_NONE); + + pool_init(&cache_cpu_pool, sizeof(pool_cache_cpu_t), CACHE_LINE_SIZE, + 0, 0, "pcachecpu", &pool_allocator_nointr, IPL_NONE); } /* @@ -585,9 +621,6 @@ pool_init(struct pool *pp, size_t size, size_t trysize, phsize; int off, slack; - KASSERT((1UL << (CHAR_BIT * sizeof(pool_item_freelist_t))) - 2 >= - PHPOOL_FREELIST_NELEM(PHPOOL_MAX - 1)); - #ifdef DEBUG /* * Check that the pool hasn't already been initialised and @@ -624,7 +657,7 @@ pool_init(struct pool *pp, size_t size, TAILQ_INIT(&palloc->pa_list); - mutex_init(&palloc->pa_lock, MUTEX_DRIVER, IPL_VM); + mutex_init(&palloc->pa_lock, MUTEX_DEFAULT, IPL_VM); palloc->pa_pagemask = ~(palloc->pa_pagesz - 1); palloc->pa_pageshift = ffs(palloc->pa_pagesz) - 1; @@ -652,7 +685,7 @@ pool_init(struct pool *pp, size_t size, LIST_INIT(&pp->pr_emptypages); LIST_INIT(&pp->pr_fullpages); LIST_INIT(&pp->pr_partpages); - LIST_INIT(&pp->pr_cachelist); + pp->pr_cache = NULL; pp->pr_curpage = NULL; pp->pr_npages = 0; pp->pr_minitems = 0; @@ -753,6 +786,7 @@ pool_init(struct pool *pp, size_t size, pp->pr_npagefree = 0; pp->pr_hiwat = 0; pp->pr_nidle = 0; + pp->pr_refcnt = 0; #ifdef POOL_DIAGNOSTIC if (flags & PR_LOGGING) { @@ -768,7 +802,7 @@ pool_init(struct pool *pp, size_t size, pp->pr_entered_file = NULL; pp->pr_entered_line = 0; - mutex_init(&pp->pr_lock, MUTEX_DRIVER, ipl); + mutex_init(&pp->pr_lock, MUTEX_DEFAULT, ipl); cv_init(&pp->pr_cv, wchan); pp->pr_ipl = ipl; @@ -789,8 +823,8 @@ pool_init(struct pool *pp, size_t size, "phpool-%d", nelem); sz = sizeof(struct pool_item_header); if (nelem) { - sz = PR_FREELIST_ALIGN(sz) - + nelem * sizeof(pool_item_freelist_t); + sz = offsetof(struct pool_item_header, + ph_bitmap[howmany(nelem, BITMAP_SIZE)]); } pool_init(&phpool[idx], sz, 0, 0, 0, phpool_names[idx], &pool_allocator_meta, IPL_VM); @@ -799,8 +833,8 @@ pool_init(struct pool *pp, size_t size, pool_init(&psppool, POOL_SUBPAGE, POOL_SUBPAGE, 0, PR_RECURSIVE, "psppool", &pool_allocator_meta, IPL_VM); #endif - pool_init(&pcgpool, sizeof(struct pool_cache_group), 0, 0, - 0, "pcgpool", &pool_allocator_meta, IPL_VM); + pool_init(&pcgpool, sizeof(pcg_t), CACHE_LINE_SIZE, 0, 0, + "cachegrp", &pool_allocator_meta, IPL_VM); } if (__predict_true(!cold)) { @@ -832,6 +866,8 @@ pool_destroy(struct pool *pp) /* Remove from global pool list */ mutex_enter(&pool_head_lock); + while (pp->pr_refcnt != 0) + cv_wait(&pool_busy, &pool_head_lock); LIST_REMOVE(pp, pr_poollist); if (drainpp == pp) drainpp = NULL; @@ -845,7 +881,7 @@ pool_destroy(struct pool *pp) mutex_enter(&pp->pr_lock); - KASSERT(LIST_EMPTY(&pp->pr_cachelist)); + KASSERT(pp->pr_cache == NULL); #ifdef DIAGNOSTIC if (pp->pr_nout != 0) { @@ -903,7 +939,7 @@ pool_alloc_item_header(struct pool *pp, } /* - * Grab an item from the pool; must be called at appropriate spl level + * Grab an item from the pool. */ void * #ifdef POOL_DIAGNOSTIC @@ -1251,7 +1287,7 @@ pool_do_put(struct pool *pp, void *v, st } /* - * Return resource to the pool; must be called at appropriate spl level + * Return resource to the pool. */ #ifdef POOL_DIAGNOSTIC void @@ -1412,14 +1448,7 @@ pool_prime_page(struct pool *pp, void *s pp->pr_nitems += n; if (pp->pr_roflags & PR_NOTOUCH) { - pool_item_freelist_t *freelist = PR_FREELIST(ph); - int i; - - ph->ph_off = (char *)cp - (char *)storage; - ph->ph_firstfree = 0; - for (i = 0; i < n - 1; i++) - freelist[i] = i + 1; - freelist[n - 1] = PR_INDEX_EOL; + pr_item_notouch_init(pp, ph); } else { while (n--) { pi = (struct pool_item *)cp; @@ -1550,10 +1579,10 @@ pool_reclaim(struct pool *pp) #endif { struct pool_item_header *ph, *phnext; - struct pool_cache *pc; struct pool_pagelist pq; - struct pool_cache_grouplist pcgl; struct timeval curtime, diff; + bool klock; + int rv; if (pp->pr_drain_hook != NULL) { /* @@ -1562,18 +1591,31 @@ pool_reclaim(struct pool *pp) (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, PR_NOWAIT); } - if (mutex_tryenter(&pp->pr_lock) == 0) + /* + * XXXSMP Because mutexes at IPL_SOFTXXX are still spinlocks, + * and we are called from the pagedaemon without kernel_lock. + * Does not apply to IPL_SOFTBIO. + */ + if (pp->pr_ipl == IPL_SOFTNET || pp->pr_ipl == IPL_SOFTCLOCK || + pp->pr_ipl == IPL_SOFTSERIAL) { + KERNEL_LOCK(1, NULL); + klock = true; + } else + klock = false; + + /* Reclaim items from the pool's cache (if any). */ + if (pp->pr_cache != NULL) + pool_cache_invalidate(pp->pr_cache); + + if (mutex_tryenter(&pp->pr_lock) == 0) { + if (klock) { + KERNEL_UNLOCK_ONE(NULL); + } return (0); + } pr_enter(pp, file, line); LIST_INIT(&pq); - LIST_INIT(&pcgl); - - /* - * Reclaim items from the pool's caches. - */ - LIST_FOREACH(pc, &pp->pr_cachelist, pc_poollist) - pool_cache_reclaim(pc, &pq, &pcgl); getmicrotime(&curtime); @@ -1603,41 +1645,86 @@ pool_reclaim(struct pool *pp) pr_leave(pp); mutex_exit(&pp->pr_lock); - if (LIST_EMPTY(&pq) && LIST_EMPTY(&pcgl)) - return 0; - pr_pagelist_free(pp, &pq); - pcg_grouplist_free(&pcgl); - return (1); + if (LIST_EMPTY(&pq)) + rv = 0; + else { + pr_pagelist_free(pp, &pq); + rv = 1; + } + + if (klock) { + KERNEL_UNLOCK_ONE(NULL); + } + + return (rv); } /* - * Drain pools, one at a time. - * - * Note, we must never be called from an interrupt context. + * Drain pools, one at a time. This is a two stage process; + * drain_start kicks off a cross call to drain CPU-level caches + * if the pool has an associated pool_cache. drain_end waits + * for those cross calls to finish, and then drains the cache + * (if any) and pool. * - * XXX Pool can disappear while draining. + * Note, must never be called from interrupt context. */ void -pool_drain(void *arg) +pool_drain_start(struct pool **ppp, uint64_t *wp) { struct pool *pp; - int s; + + KASSERT(!LIST_EMPTY(&pool_head)); pp = NULL; - s = splvm(); /* XXX why? */ + + /* Find next pool to drain, and add a reference. */ mutex_enter(&pool_head_lock); - if (drainpp == NULL) { - drainpp = LIST_FIRST(&pool_head); - } - if (drainpp) { - pp = drainpp; - drainpp = LIST_NEXT(pp, pr_poollist); + do { + if (drainpp == NULL) { + drainpp = LIST_FIRST(&pool_head); + } + if (drainpp != NULL) { + pp = drainpp; + drainpp = LIST_NEXT(pp, pr_poollist); + } + /* + * Skip completely idle pools. We depend on at least + * one pool in the system being active. + */ + } while (pp == NULL || pp->pr_npages == 0); + pp->pr_refcnt++; + mutex_exit(&pool_head_lock); + + /* If there is a pool_cache, drain CPU level caches. */ + *ppp = pp; + if (pp->pr_cache != NULL) { + *wp = xc_broadcast(0, (xcfunc_t)pool_cache_xcall, + pp->pr_cache, NULL); } +} + +void +pool_drain_end(struct pool *pp, uint64_t where) +{ + + if (pp == NULL) + return; + + KASSERT(pp->pr_refcnt > 0); + + /* Wait for remote draining to complete. */ + if (pp->pr_cache != NULL) + xc_wait(where); + + /* Drain the cache (if any) and pool.. */ + pool_reclaim(pp); + + /* Finally, unlock the pool. */ + mutex_enter(&pool_head_lock); + pp->pr_refcnt--; + cv_broadcast(&pool_busy); mutex_exit(&pool_head_lock); - if (pp) - pool_reclaim(pp); - splx(s); } /* @@ -1647,13 +1734,7 @@ void pool_print(struct pool *pp, const char *modif) { - if (mutex_tryenter(&pp->pr_lock) == 0) { - printf("pool %s is locked; try again later\n", - pp->pr_wchan); - return; - } pool_print1(pp, modif, printf); - mutex_exit(&pp->pr_lock); } void @@ -1661,12 +1742,6 @@ pool_printall(const char *modif, void (* { struct pool *pp; - if (mutex_tryenter(&pool_head_lock) == 0) { - (*pr)("WARNING: pool_head_slock is locked\n"); - } else { - mutex_exit(&pool_head_lock); - } - LIST_FOREACH(pp, &pool_head, pr_poollist) { pool_printit(pp, modif, pr); } @@ -1681,20 +1756,6 @@ pool_printit(struct pool *pp, const char return; } - /* - * Called from DDB; interrupts should be blocked, and all - * other processors should be paused. We can skip locking - * the pool in this case. - * - * We do a mutex_tryenter() just to print the lock - * status, however. - */ - - if (mutex_tryenter(&pp->pr_lock) == 0) - (*pr)("WARNING: pool %s is locked\n", pp->pr_wchan); - else - mutex_exit(&pp->pr_lock); - pool_print1(pp, modif, pr); } @@ -1729,8 +1790,10 @@ static void pool_print1(struct pool *pp, const char *modif, void (*pr)(const char *, ...)) { struct pool_item_header *ph; - struct pool_cache *pc; - struct pool_cache_group *pcg; + pool_cache_t pc; + pcg_t *pcg; + pool_cache_cpu_t *cc; + uint64_t cpuhit, cpumiss; int i, print_log = 0, print_pagelist = 0, print_cache = 0; char c; @@ -1743,7 +1806,13 @@ pool_print1(struct pool *pp, const char print_cache = 1; } - (*pr)("POOL %s: size %u, align %u, ioff %u, roflags 0x%08x\n", + if ((pc = pp->pr_cache) != NULL) { + (*pr)("POOL CACHE"); + } else { + (*pr)("POOL"); + } + + (*pr)(" %s: size %u, align %u, ioff %u, roflags 0x%08x\n", pp->pr_wchan, pp->pr_size, pp->pr_align, pp->pr_itemoffset, pp->pr_roflags); (*pr)("\talloc %p\n", pp->pr_alloc); @@ -1752,7 +1821,7 @@ pool_print1(struct pool *pp, const char (*pr)("\titemsperpage %u, nitems %u, nout %u, hardlimit %u\n", pp->pr_itemsperpage, pp->pr_nitems, pp->pr_nout, pp->pr_hardlimit); - (*pr)("\n\tnget %lu, nfail %lu, nput %lu\n", + (*pr)("\tnget %lu, nfail %lu, nput %lu\n", pp->pr_nget, pp->pr_nfail, pp->pr_nput); (*pr)("\tnpagealloc %lu, npagefree %lu, hiwat %u, nidle %lu\n", pp->pr_npagealloc, pp->pr_npagefree, pp->pr_hiwat, pp->pr_nidle); @@ -1787,8 +1856,6 @@ pool_print1(struct pool *pp, const char } skip_log: - if (print_cache == 0) - goto skip_cache; #define PR_GROUPLIST(pcg) \ (*pr)("\t\tgroup %p: avail %d\n", pcg, pcg->pcg_avail); \ @@ -1805,26 +1872,38 @@ pool_print1(struct pool *pp, const char } \ } - LIST_FOREACH(pc, &pp->pr_cachelist, pc_poollist) { - (*pr)("\tcache %p\n", pc); - (*pr)("\t hits %lu misses %lu ngroups %lu nitems %lu\n", - pc->pc_hits, pc->pc_misses, pc->pc_ngroups, pc->pc_nitems); - (*pr)("\t full groups:\n"); - LIST_FOREACH(pcg, &pc->pc_fullgroups, pcg_list) { - PR_GROUPLIST(pcg); - } - (*pr)("\t partial groups:\n"); - LIST_FOREACH(pcg, &pc->pc_partgroups, pcg_list) { - PR_GROUPLIST(pcg); - } - (*pr)("\t empty groups:\n"); - LIST_FOREACH(pcg, &pc->pc_emptygroups, pcg_list) { - PR_GROUPLIST(pcg); + if (pc != NULL) { + cpuhit = 0; + cpumiss = 0; + for (i = 0; i < MAXCPUS; i++) { + if ((cc = pc->pc_cpus[i]) == NULL) + continue; + cpuhit += cc->cc_hits; + cpumiss += cc->cc_misses; + } + (*pr)("\tcpu layer hits %llu misses %llu\n", cpuhit, cpumiss); + (*pr)("\tcache layer hits %llu misses %llu\n", + pc->pc_hits, pc->pc_misses); + (*pr)("\tcache layer entry uncontended %llu contended %llu\n", + pc->pc_hits + pc->pc_misses - pc->pc_contended, + pc->pc_contended); + (*pr)("\tcache layer empty groups %u full groups %u\n", + pc->pc_nempty, pc->pc_nfull); + if (print_cache) { + (*pr)("\tfull cache groups:\n"); + for (pcg = pc->pc_fullgroups; pcg != NULL; + pcg = pcg->pcg_next) { + PR_GROUPLIST(pcg); + } + (*pr)("\tempty cache groups:\n"); + for (pcg = pc->pc_emptygroups; pcg != NULL; + pcg = pcg->pcg_next) { + PR_GROUPLIST(pcg); + } } } #undef PR_GROUPLIST - skip_cache: pr_enter_check(pp, pr); } @@ -1921,41 +2000,93 @@ out: * pool_cache_init: * * Initialize a pool cache. + */ +pool_cache_t +pool_cache_init(size_t size, u_int align, u_int align_offset, u_int flags, + const char *wchan, struct pool_allocator *palloc, int ipl, + int (*ctor)(void *, void *, int), void (*dtor)(void *, void *), void *arg) +{ + pool_cache_t pc; + + pc = pool_get(&cache_pool, PR_WAITOK); + if (pc == NULL) + return NULL; + + pool_cache_bootstrap(pc, size, align, align_offset, flags, wchan, + palloc, ipl, ctor, dtor, arg); + + return pc; +} + +/* + * pool_cache_bootstrap: * - * NOTE: If the pool must be protected from interrupts, we expect - * to be called at the appropriate interrupt priority level. + * Kernel-private version of pool_cache_init(). The caller + * provides initial storage. */ void -pool_cache_init(struct pool_cache *pc, struct pool *pp, - int (*ctor)(void *, void *, int), - void (*dtor)(void *, void *), +pool_cache_bootstrap(pool_cache_t pc, size_t size, u_int align, + u_int align_offset, u_int flags, const char *wchan, + struct pool_allocator *palloc, int ipl, + int (*ctor)(void *, void *, int), void (*dtor)(void *, void *), void *arg) { + CPU_INFO_ITERATOR cii; + struct cpu_info *ci; + struct pool *pp; + + pp = &pc->pc_pool; + if (palloc == NULL && ipl == IPL_NONE) + palloc = &pool_allocator_nointr; + pool_init(pp, size, align, align_offset, flags, wchan, palloc, ipl); - LIST_INIT(&pc->pc_emptygroups); - LIST_INIT(&pc->pc_fullgroups); - LIST_INIT(&pc->pc_partgroups); - mutex_init(&pc->pc_lock, MUTEX_DRIVER, pp->pr_ipl); + mutex_init(&pc->pc_lock, MUTEX_DEFAULT, pp->pr_ipl); - pc->pc_pool = pp; + if (ctor == NULL) { + ctor = (int (*)(void *, void *, int))nullop; + } + if (dtor == NULL) { + dtor = (void (*)(void *, void *))nullop; + } + pc->pc_emptygroups = NULL; + pc->pc_fullgroups = NULL; + pc->pc_partgroups = NULL; pc->pc_ctor = ctor; pc->pc_dtor = dtor; pc->pc_arg = arg; - - pc->pc_hits = 0; + pc->pc_hits = 0; pc->pc_misses = 0; - - pc->pc_ngroups = 0; - - pc->pc_nitems = 0; - + pc->pc_nempty = 0; + pc->pc_npart = 0; + pc->pc_nfull = 0; + pc->pc_contended = 0; + pc->pc_refcnt = 0; + pc->pc_freecheck = NULL; + + /* Allocate per-CPU caches. */ + memset(pc->pc_cpus, 0, sizeof(pc->pc_cpus)); + pc->pc_ncpu = 0; + if (ncpu == 0) { + /* XXX For sparc: boot CPU is not attached yet. */ + pool_cache_cpu_init1(curcpu(), pc); + } else { + for (CPU_INFO_FOREACH(cii, ci)) { + pool_cache_cpu_init1(ci, pc); + } + } + if (__predict_true(!cold)) { mutex_enter(&pp->pr_lock); - LIST_INSERT_HEAD(&pp->pr_cachelist, pc, pc_poollist); + pp->pr_cache = pc; mutex_exit(&pp->pr_lock); - } else - LIST_INSERT_HEAD(&pp->pr_cachelist, pc, pc_poollist); + mutex_enter(&pool_head_lock); + LIST_INSERT_HEAD(&pool_cache_head, pc, pc_cachelist); + mutex_exit(&pool_head_lock); + } else { + pp->pr_cache = pc; + LIST_INSERT_HEAD(&pool_cache_head, pc, pc_cachelist); + } } /* @@ -1964,319 +2095,618 @@ pool_cache_init(struct pool_cache *pc, s * Destroy a pool cache. */ void -pool_cache_destroy(struct pool_cache *pc) +pool_cache_destroy(pool_cache_t pc) { - struct pool *pp = pc->pc_pool; + struct pool *pp = &pc->pc_pool; + pool_cache_cpu_t *cc; + pcg_t *pcg; + int i; + + /* Remove it from the global list. */ + mutex_enter(&pool_head_lock); + while (pc->pc_refcnt != 0) + cv_wait(&pool_busy, &pool_head_lock); + LIST_REMOVE(pc, pc_cachelist); + mutex_exit(&pool_head_lock); /* First, invalidate the entire cache. */ pool_cache_invalidate(pc); - /* ...and remove it from the pool's cache list. */ + /* Disassociate it from the pool. */ mutex_enter(&pp->pr_lock); - LIST_REMOVE(pc, pc_poollist); + pp->pr_cache = NULL; mutex_exit(&pp->pr_lock); + /* Destroy per-CPU data */ + for (i = 0; i < MAXCPUS; i++) { + if ((cc = pc->pc_cpus[i]) == NULL) + continue; + if ((pcg = cc->cc_current) != NULL) { + pcg->pcg_next = NULL; + pool_cache_invalidate_groups(pc, pcg); + } + if ((pcg = cc->cc_previous) != NULL) { + pcg->pcg_next = NULL; + pool_cache_invalidate_groups(pc, pcg); + } + if (cc != &pc->pc_cpu0) + pool_put(&cache_cpu_pool, cc); + } + + /* Finally, destroy it. */ mutex_destroy(&pc->pc_lock); + pool_destroy(pp); + pool_put(&cache_pool, pc); } -static inline void * -pcg_get(struct pool_cache_group *pcg, paddr_t *pap) +/* + * pool_cache_cpu_init1: + * + * Called for each pool_cache whenever a new CPU is attached. + */ +static void +pool_cache_cpu_init1(struct cpu_info *ci, pool_cache_t pc) { - void *object; - u_int idx; + pool_cache_cpu_t *cc; + int index; + + index = ci->ci_index; + + KASSERT(index < MAXCPUS); + KASSERT(((uintptr_t)pc->pc_cpus & (CACHE_LINE_SIZE - 1)) == 0); + + if ((cc = pc->pc_cpus[index]) != NULL) { + KASSERT(cc->cc_cpuindex == index); + return; + } - KASSERT(pcg->pcg_avail <= PCG_NOBJECTS); - KASSERT(pcg->pcg_avail != 0); - idx = --pcg->pcg_avail; - - KASSERT(pcg->pcg_objects[idx].pcgo_va != NULL); - object = pcg->pcg_objects[idx].pcgo_va; - if (pap != NULL) - *pap = pcg->pcg_objects[idx].pcgo_pa; - pcg->pcg_objects[idx].pcgo_va = NULL; + /* + * The first CPU is 'free'. This needs to be the case for + * bootstrap - we may not be able to allocate yet. + */ + if (pc->pc_ncpu == 0) { + cc = &pc->pc_cpu0; + pc->pc_ncpu = 1; + } else { + mutex_enter(&pc->pc_lock); + pc->pc_ncpu++; + mutex_exit(&pc->pc_lock); + cc = pool_get(&cache_cpu_pool, PR_WAITOK); + } + + cc->cc_ipl = pc->pc_pool.pr_ipl; + cc->cc_iplcookie = makeiplcookie(cc->cc_ipl); + cc->cc_cache = pc; + cc->cc_cpuindex = index; + cc->cc_hits = 0; + cc->cc_misses = 0; + cc->cc_current = NULL; + cc->cc_previous = NULL; - return (object); + pc->pc_cpus[index] = cc; } -static inline void -pcg_put(struct pool_cache_group *pcg, void *object, paddr_t pa) +/* + * pool_cache_cpu_init: + * + * Called whenever a new CPU is attached. + */ +void +pool_cache_cpu_init(struct cpu_info *ci) { - u_int idx; + pool_cache_t pc; + + mutex_enter(&pool_head_lock); + LIST_FOREACH(pc, &pool_cache_head, pc_cachelist) { + pc->pc_refcnt++; + mutex_exit(&pool_head_lock); - KASSERT(pcg->pcg_avail < PCG_NOBJECTS); - idx = pcg->pcg_avail++; + pool_cache_cpu_init1(ci, pc); - KASSERT(pcg->pcg_objects[idx].pcgo_va == NULL); - pcg->pcg_objects[idx].pcgo_va = object; - pcg->pcg_objects[idx].pcgo_pa = pa; + mutex_enter(&pool_head_lock); + pc->pc_refcnt--; + cv_broadcast(&pool_busy); + } + mutex_exit(&pool_head_lock); +} + +/* + * pool_cache_reclaim: + * + * Reclaim memory from a pool cache. + */ +bool +pool_cache_reclaim(pool_cache_t pc) +{ + + return pool_reclaim(&pc->pc_pool); } static void -pcg_grouplist_free(struct pool_cache_grouplist *pcgl) +pool_cache_destruct_object1(pool_cache_t pc, void *object) { - struct pool_cache_group *pcg; - while ((pcg = LIST_FIRST(pcgl)) != NULL) { - LIST_REMOVE(pcg, pcg_list); - pool_put(&pcgpool, pcg); - } + (*pc->pc_dtor)(pc->pc_arg, object); + pool_put(&pc->pc_pool, object); } /* - * pool_cache_get{,_paddr}: + * pool_cache_destruct_object: * - * Get an object from a pool cache (optionally returning - * the physical address of the object). + * Force destruction of an object and its release back into + * the pool. */ -void * -pool_cache_get_paddr(struct pool_cache *pc, int flags, paddr_t *pap) +void +pool_cache_destruct_object(pool_cache_t pc, void *object) { - struct pool_cache_group *pcg; - void *object; -#ifdef LOCKDEBUG - if (flags & PR_WAITOK) - ASSERT_SLEEPABLE(NULL, "pool_cache_get(PR_WAITOK)"); -#endif + FREECHECK_IN(&pc->pc_freecheck, object); - mutex_enter(&pc->pc_lock); + pool_cache_destruct_object1(pc, object); +} - pcg = LIST_FIRST(&pc->pc_partgroups); - if (pcg == NULL) { - pcg = LIST_FIRST(&pc->pc_fullgroups); - if (pcg != NULL) { - LIST_REMOVE(pcg, pcg_list); - LIST_INSERT_HEAD(&pc->pc_partgroups, pcg, pcg_list); - } - } - if (pcg == NULL) { +/* + * pool_cache_invalidate_groups: + * + * Invalidate a chain of groups and destruct all objects. + */ +static void +pool_cache_invalidate_groups(pool_cache_t pc, pcg_t *pcg) +{ + void *object; + pcg_t *next; + int i; - /* - * No groups with any available objects. Allocate - * a new object, construct it, and return it to - * the caller. We will allocate a group, if necessary, - * when the object is freed back to the cache. - */ - pc->pc_misses++; - mutex_exit(&pc->pc_lock); - object = pool_get(pc->pc_pool, flags); - if (object != NULL && pc->pc_ctor != NULL) { - if ((*pc->pc_ctor)(pc->pc_arg, object, flags) != 0) { - pool_put(pc->pc_pool, object); - return (NULL); - } - } - KASSERT((((vaddr_t)object + pc->pc_pool->pr_itemoffset) & - (pc->pc_pool->pr_align - 1)) == 0); - if (object != NULL && pap != NULL) { -#ifdef POOL_VTOPHYS - *pap = POOL_VTOPHYS(object); -#else - *pap = POOL_PADDR_INVALID; -#endif + for (; pcg != NULL; pcg = next) { + next = pcg->pcg_next; + + for (i = 0; i < pcg->pcg_avail; i++) { + object = pcg->pcg_objects[i].pcgo_va; + pool_cache_destruct_object1(pc, object); } - FREECHECK_OUT(&pc->pc_freecheck, object); - return (object); + pool_put(&pcgpool, pcg); } +} - pc->pc_hits++; - pc->pc_nitems--; - object = pcg_get(pcg, pap); +/* + * pool_cache_invalidate: + * + * Invalidate a pool cache (destruct and release all of the + * cached objects). Does not reclaim objects from the pool. + */ +void +pool_cache_invalidate(pool_cache_t pc) +{ + pcg_t *full, *empty, *part; - if (pcg->pcg_avail == 0) { - LIST_REMOVE(pcg, pcg_list); - LIST_INSERT_HEAD(&pc->pc_emptygroups, pcg, pcg_list); - } + mutex_enter(&pc->pc_lock); + full = pc->pc_fullgroups; + empty = pc->pc_emptygroups; + part = pc->pc_partgroups; + pc->pc_fullgroups = NULL; + pc->pc_emptygroups = NULL; + pc->pc_partgroups = NULL; + pc->pc_nfull = 0; + pc->pc_nempty = 0; + pc->pc_npart = 0; mutex_exit(&pc->pc_lock); - KASSERT((((vaddr_t)object + pc->pc_pool->pr_itemoffset) & - (pc->pc_pool->pr_align - 1)) == 0); - FREECHECK_OUT(&pc->pc_freecheck, object); - return (object); + pool_cache_invalidate_groups(pc, full); + pool_cache_invalidate_groups(pc, empty); + pool_cache_invalidate_groups(pc, part); } -/* - * pool_cache_put{,_paddr}: - * - * Put an object back to the pool cache (optionally caching the - * physical address of the object). - */ void -pool_cache_put_paddr(struct pool_cache *pc, void *object, paddr_t pa) +pool_cache_set_drain_hook(pool_cache_t pc, void (*fn)(void *, int), void *arg) { - struct pool_cache_group *pcg; - FREECHECK_IN(&pc->pc_freecheck, object); + pool_set_drain_hook(&pc->pc_pool, fn, arg); +} + +void +pool_cache_setlowat(pool_cache_t pc, int n) +{ + + pool_setlowat(&pc->pc_pool, n); +} + +void +pool_cache_sethiwat(pool_cache_t pc, int n) +{ + + pool_sethiwat(&pc->pc_pool, n); +} + +void +pool_cache_sethardlimit(pool_cache_t pc, int n, const char *warnmess, int ratecap) +{ + + pool_sethardlimit(&pc->pc_pool, n, warnmess, ratecap); +} + +static inline pool_cache_cpu_t * +pool_cache_cpu_enter(pool_cache_t pc, int *s) +{ + pool_cache_cpu_t *cc; - if (__predict_false((pc->pc_pool->pr_flags & PR_WANTED) != 0)) { - goto destruct; + /* + * Prevent other users of the cache from accessing our + * CPU-local data. To avoid touching shared state, we + * pull the neccessary information from CPU local data. + */ + crit_enter(); + cc = pc->pc_cpus[curcpu()->ci_index]; + KASSERT(cc->cc_cache == pc); + if (cc->cc_ipl != IPL_NONE) { + *s = splraiseipl(cc->cc_iplcookie); } + KASSERT(((uintptr_t)cc & (CACHE_LINE_SIZE - 1)) == 0); - mutex_enter(&pc->pc_lock); + return cc; +} - pcg = LIST_FIRST(&pc->pc_partgroups); - if (pcg == NULL) { - pcg = LIST_FIRST(&pc->pc_emptygroups); - if (pcg != NULL) { - LIST_REMOVE(pcg, pcg_list); - LIST_INSERT_HEAD(&pc->pc_partgroups, pcg, pcg_list); +static inline void +pool_cache_cpu_exit(pool_cache_cpu_t *cc, int *s) +{ + + /* No longer need exclusive access to the per-CPU data. */ + if (cc->cc_ipl != IPL_NONE) { + splx(*s); + } + crit_exit(); +} + +#if __GNUC_PREREQ__(3, 0) +__attribute ((noinline)) +#endif +pool_cache_cpu_t * +pool_cache_get_slow(pool_cache_cpu_t *cc, int *s, void **objectp, + paddr_t *pap, int flags) +{ + pcg_t *pcg, *cur; + uint64_t ncsw; + pool_cache_t pc; + void *object; + + pc = cc->cc_cache; + cc->cc_misses++; + + /* + * Nothing was available locally. Try and grab a group + * from the cache. + */ + if (!mutex_tryenter(&pc->pc_lock)) { + ncsw = curlwp->l_ncsw; + mutex_enter(&pc->pc_lock); + pc->pc_contended++; + + /* + * If we context switched while locking, then + * our view of the per-CPU data is invalid: + * retry. + */ + if (curlwp->l_ncsw != ncsw) { + mutex_exit(&pc->pc_lock); + pool_cache_cpu_exit(cc, s); + return pool_cache_cpu_enter(pc, s); } } - if (pcg == NULL) { + if ((pcg = pc->pc_fullgroups) != NULL) { /* - * No empty groups to free the object to. Attempt to - * allocate one. + * If there's a full group, release our empty + * group back to the cache. Install the full + * group as cc_current and return. */ + if ((cur = cc->cc_current) != NULL) { + KASSERT(cur->pcg_avail == 0); + cur->pcg_next = pc->pc_emptygroups; + pc->pc_emptygroups = cur; + pc->pc_nempty++; + } + KASSERT(pcg->pcg_avail == PCG_NOBJECTS); + cc->cc_current = pcg; + pc->pc_fullgroups = pcg->pcg_next; + pc->pc_hits++; + pc->pc_nfull--; mutex_exit(&pc->pc_lock); - pcg = pool_get(&pcgpool, PR_NOWAIT); - if (pcg == NULL) { -destruct: + return cc; + } - /* - * Unable to allocate a cache group; destruct the object - * and free it back to the pool. - */ - pool_cache_destruct_object(pc, object); - return; - } - memset(pcg, 0, sizeof(*pcg)); - mutex_enter(&pc->pc_lock); - pc->pc_ngroups++; - LIST_INSERT_HEAD(&pc->pc_partgroups, pcg, pcg_list); + /* + * Nothing available locally or in cache. Take the slow + * path: fetch a new object from the pool and construct + * it. + */ + pc->pc_misses++; + mutex_exit(&pc->pc_lock); + pool_cache_cpu_exit(cc, s); + + object = pool_get(&pc->pc_pool, flags); + *objectp = object; + if (object == NULL) + return NULL; + + if ((*pc->pc_ctor)(pc->pc_arg, object, flags) != 0) { + pool_put(&pc->pc_pool, object); + *objectp = NULL; + return NULL; } - pc->pc_nitems++; - pcg_put(pcg, object, pa); + KASSERT((((vaddr_t)object + pc->pc_pool.pr_itemoffset) & + (pc->pc_pool.pr_align - 1)) == 0); - if (pcg->pcg_avail == PCG_NOBJECTS) { - LIST_REMOVE(pcg, pcg_list); - LIST_INSERT_HEAD(&pc->pc_fullgroups, pcg, pcg_list); + if (pap != NULL) { +#ifdef POOL_VTOPHYS + *pap = POOL_VTOPHYS(object); +#else + *pap = POOL_PADDR_INVALID; +#endif } - mutex_exit(&pc->pc_lock); + + FREECHECK_OUT(&pc->pc_freecheck, object); + return NULL; } /* - * pool_cache_destruct_object: + * pool_cache_get{,_paddr}: * - * Force destruction of an object and its release back into - * the pool. + * Get an object from a pool cache (optionally returning + * the physical address of the object). */ -void -pool_cache_destruct_object(struct pool_cache *pc, void *object) +void * +pool_cache_get_paddr(pool_cache_t pc, int flags, paddr_t *pap) { + pool_cache_cpu_t *cc; + pcg_t *pcg; + void *object; + int s; + +#ifdef LOCKDEBUG + if (flags & PR_WAITOK) + ASSERT_SLEEPABLE(NULL, "pool_cache_get(PR_WAITOK)"); +#endif + + cc = pool_cache_cpu_enter(pc, &s); + do { + /* Try and allocate an object from the current group. */ + pcg = cc->cc_current; + if (pcg != NULL && pcg->pcg_avail > 0) { + object = pcg->pcg_objects[--pcg->pcg_avail].pcgo_va; + if (pap != NULL) + *pap = pcg->pcg_objects[pcg->pcg_avail].pcgo_pa; + pcg->pcg_objects[pcg->pcg_avail].pcgo_va = NULL; + KASSERT(pcg->pcg_avail <= PCG_NOBJECTS); + KASSERT(object != NULL); + cc->cc_hits++; + pool_cache_cpu_exit(cc, &s); + FREECHECK_OUT(&pc->pc_freecheck, object); + return object; + } - if (pc->pc_dtor != NULL) - (*pc->pc_dtor)(pc->pc_arg, object); - pool_put(pc->pc_pool, object); + /* + * That failed. If the previous group isn't empty, swap + * it with the current group and allocate from there. + */ + pcg = cc->cc_previous; + if (pcg != NULL && pcg->pcg_avail > 0) { + cc->cc_previous = cc->cc_current; + cc->cc_current = pcg; + continue; + } + + /* + * Can't allocate from either group: try the slow path. + * If get_slow() allocated an object for us, or if + * no more objects are available, it will return NULL. + * Otherwise, we need to retry. + */ + cc = pool_cache_get_slow(cc, &s, &object, pap, flags); + } while (cc != NULL); + + return object; } -/* - * pool_do_cache_invalidate_grouplist: - * - * Invalidate a single grouplist and destruct all objects. - * XXX This is too expensive. We should swap the list then - * unlock. - */ -static void -pool_do_cache_invalidate_grouplist(struct pool_cache_grouplist *pcgsl, - struct pool_cache *pc, struct pool_pagelist *pq, - struct pool_cache_grouplist *pcgdl) +#if __GNUC_PREREQ__(3, 0) +__attribute ((noinline)) +#endif +pool_cache_cpu_t * +pool_cache_put_slow(pool_cache_cpu_t *cc, int *s, void *object, paddr_t pa) { - struct pool_cache_group *pcg; - void *object; + pcg_t *pcg, *cur; + uint64_t ncsw; + pool_cache_t pc; - KASSERT(mutex_owned(&pc->pc_lock)); - KASSERT(mutex_owned(&pc->pc_pool->pr_lock)); + pc = cc->cc_cache; + cc->cc_misses++; - while ((pcg = LIST_FIRST(pcgsl)) != NULL) { - pc->pc_ngroups--; - LIST_REMOVE(pcg, pcg_list); - LIST_INSERT_HEAD(pcgdl, pcg, pcg_list); - pc->pc_nitems -= pcg->pcg_avail; - mutex_exit(&pc->pc_pool->pr_lock); - mutex_exit(&pc->pc_lock); + /* + * No free slots locally. Try to grab an empty, unused + * group from the cache. + */ + if (!mutex_tryenter(&pc->pc_lock)) { + ncsw = curlwp->l_ncsw; + mutex_enter(&pc->pc_lock); + pc->pc_contended++; - while (pcg->pcg_avail != 0) { - object = pcg_get(pcg, NULL); - if (pc->pc_dtor != NULL) - (*pc->pc_dtor)(pc->pc_arg, object); - mutex_enter(&pc->pc_pool->pr_lock); - pool_do_put(pc->pc_pool, object, pq); - mutex_exit(&pc->pc_pool->pr_lock); + /* + * If we context switched while locking, then + * our view of the per-CPU data is invalid: + * retry. + */ + if (curlwp->l_ncsw != ncsw) { + mutex_exit(&pc->pc_lock); + pool_cache_cpu_exit(cc, s); + return pool_cache_cpu_enter(pc, s); } + } - mutex_enter(&pc->pc_lock); - mutex_enter(&pc->pc_pool->pr_lock); + if ((pcg = pc->pc_emptygroups) != NULL) { + /* + * If there's a empty group, release our full + * group back to the cache. Install the empty + * group as cc_current and return. + */ + if ((cur = cc->cc_current) != NULL) { + KASSERT(cur->pcg_avail == PCG_NOBJECTS); + cur->pcg_next = pc->pc_fullgroups; + pc->pc_fullgroups = cur; + pc->pc_nfull++; + } + KASSERT(pcg->pcg_avail == 0); + cc->cc_current = pcg; + pc->pc_emptygroups = pcg->pcg_next; + pc->pc_hits++; + pc->pc_nempty--; + mutex_exit(&pc->pc_lock); + return cc; } -} -static void -pool_do_cache_invalidate(struct pool_cache *pc, struct pool_pagelist *pq, - struct pool_cache_grouplist *pcgl) -{ + /* + * Nothing available locally or in cache. Take the + * slow path and try to allocate a new group that we + * can release to. + */ + pc->pc_misses++; + mutex_exit(&pc->pc_lock); + pool_cache_cpu_exit(cc, s); - KASSERT(mutex_owned(&pc->pc_lock)); - KASSERT(mutex_owned(&pc->pc_pool->pr_lock)); + /* + * If we can't allocate a new group, just throw the + * object away. + */ + pcg = pool_get(&pcgpool, PR_NOWAIT); + if (pcg == NULL) { + pool_cache_destruct_object(pc, object); + return NULL; + } +#ifdef DIAGNOSTIC + memset(pcg, 0, sizeof(*pcg)); +#else + pcg->pcg_avail = 0; +#endif - pool_do_cache_invalidate_grouplist(&pc->pc_fullgroups, pc, pq, pcgl); - pool_do_cache_invalidate_grouplist(&pc->pc_partgroups, pc, pq, pcgl); + /* + * Add the empty group to the cache and try again. + */ + mutex_enter(&pc->pc_lock); + pcg->pcg_next = pc->pc_emptygroups; + pc->pc_emptygroups = pcg; + pc->pc_nempty++; + mutex_exit(&pc->pc_lock); - KASSERT(LIST_EMPTY(&pc->pc_partgroups)); - KASSERT(LIST_EMPTY(&pc->pc_fullgroups)); - KASSERT(pc->pc_nitems == 0); -} + return pool_cache_cpu_enter(pc, s); +} /* - * pool_cache_invalidate: + * pool_cache_put{,_paddr}: * - * Invalidate a pool cache (destruct and release all of the - * cached objects). + * Put an object back to the pool cache (optionally caching the + * physical address of the object). */ void -pool_cache_invalidate(struct pool_cache *pc) +pool_cache_put_paddr(pool_cache_t pc, void *object, paddr_t pa) { - struct pool_pagelist pq; - struct pool_cache_grouplist pcgl; - - LIST_INIT(&pq); - LIST_INIT(&pcgl); + pool_cache_cpu_t *cc; + pcg_t *pcg; + int s; - mutex_enter(&pc->pc_lock); - mutex_enter(&pc->pc_pool->pr_lock); + FREECHECK_IN(&pc->pc_freecheck, object); - pool_do_cache_invalidate(pc, &pq, &pcgl); + cc = pool_cache_cpu_enter(pc, &s); + do { + /* If the current group isn't full, release it there. */ + pcg = cc->cc_current; + if (pcg != NULL && pcg->pcg_avail < PCG_NOBJECTS) { + KASSERT(pcg->pcg_objects[pcg->pcg_avail].pcgo_va + == NULL); + pcg->pcg_objects[pcg->pcg_avail].pcgo_va = object; + pcg->pcg_objects[pcg->pcg_avail].pcgo_pa = pa; + pcg->pcg_avail++; + cc->cc_hits++; + pool_cache_cpu_exit(cc, &s); + return; + } - mutex_exit(&pc->pc_pool->pr_lock); - mutex_exit(&pc->pc_lock); + /* + * That failed. If the previous group is empty, swap + * it with the current group and try again. + */ + pcg = cc->cc_previous; + if (pcg != NULL && pcg->pcg_avail == 0) { + cc->cc_previous = cc->cc_current; + cc->cc_current = pcg; + continue; + } - pr_pagelist_free(pc->pc_pool, &pq); - pcg_grouplist_free(&pcgl); + /* + * Can't free to either group: try the slow path. + * If put_slow() releases the object for us, it + * will return NULL. Otherwise we need to retry. + */ + cc = pool_cache_put_slow(cc, &s, object, pa); + } while (cc != NULL); } /* - * pool_cache_reclaim: + * pool_cache_xcall: * - * Reclaim a pool cache for pool_reclaim(). + * Transfer objects from the per-CPU cache to the global cache. + * Run within a cross-call thread. */ static void -pool_cache_reclaim(struct pool_cache *pc, struct pool_pagelist *pq, - struct pool_cache_grouplist *pcgl) +pool_cache_xcall(pool_cache_t pc) { - - /* - * We're locking in the wrong order (normally pool_cache -> pool, - * but the pool is already locked when we get here), so we have - * to use trylock. If we can't lock the pool_cache, it's not really - * a big deal here. + pool_cache_cpu_t *cc; + pcg_t *prev, *cur, **list; + int s = 0; /* XXXgcc */ + + cc = pool_cache_cpu_enter(pc, &s); + cur = cc->cc_current; + cc->cc_current = NULL; + prev = cc->cc_previous; + cc->cc_previous = NULL; + pool_cache_cpu_exit(cc, &s); + + /* + * XXXSMP Go to splvm to prevent kernel_lock from being taken, + * because locks at IPL_SOFTXXX are still spinlocks. Does not + * apply to IPL_SOFTBIO. Cross-call threads do not take the + * kernel_lock. */ - if (mutex_tryenter(&pc->pc_lock) == 0) - return; - - pool_do_cache_invalidate(pc, pq, pcgl); - + s = splvm(); + mutex_enter(&pc->pc_lock); + if (cur != NULL) { + if (cur->pcg_avail == PCG_NOBJECTS) { + list = &pc->pc_fullgroups; + pc->pc_nfull++; + } else if (cur->pcg_avail == 0) { + list = &pc->pc_emptygroups; + pc->pc_nempty++; + } else { + list = &pc->pc_partgroups; + pc->pc_npart++; + } + cur->pcg_next = *list; + *list = cur; + } + if (prev != NULL) { + if (prev->pcg_avail == PCG_NOBJECTS) { + list = &pc->pc_fullgroups; + pc->pc_nfull++; + } else if (prev->pcg_avail == 0) { + list = &pc->pc_emptygroups; + pc->pc_nempty++; + } else { + list = &pc->pc_partgroups; + pc->pc_npart++; + } + prev->pcg_next = *list; + *list = prev; + } mutex_exit(&pc->pc_lock); + splx(s); } /*