Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files. =================================================================== RCS file: /ftp/cvs/cvsroot/src/sys/kern/subr_pool.c,v rcsdiff: /ftp/cvs/cvsroot/src/sys/kern/subr_pool.c,v: warning: Unknown phrases like `commitid ...;' are present. retrieving revision 1.128.2.13 retrieving revision 1.131.4.1 diff -u -p -r1.128.2.13 -r1.131.4.1 --- src/sys/kern/subr_pool.c 2007/11/01 21:10:14 1.128.2.13 +++ src/sys/kern/subr_pool.c 2007/10/14 11:48:46 1.131.4.1 @@ -1,12 +1,12 @@ -/* $NetBSD: subr_pool.c,v 1.128.2.13 2007/11/01 21:10:14 ad Exp $ */ +/* $NetBSD: subr_pool.c,v 1.131.4.1 2007/10/14 11:48:46 yamt Exp $ */ /*- - * Copyright (c) 1997, 1999, 2000, 2002, 2007 The NetBSD Foundation, Inc. + * Copyright (c) 1997, 1999, 2000, 2002 The NetBSD Foundation, Inc. * All rights reserved. * * This code is derived from software contributed to The NetBSD Foundation * by Paul Kranenburg; by Jason R. Thorpe of the Numerical Aerospace - * Simulation Facility, NASA Ames Research Center, and by Andrew Doran. + * Simulation Facility, NASA Ames Research Center. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -38,7 +38,7 @@ */ #include -__KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.128.2.13 2007/11/01 21:10:14 ad Exp $"); +__KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.131.4.1 2007/10/14 11:48:46 yamt Exp $"); #include "opt_pool.h" #include "opt_poollog.h" @@ -54,8 +54,6 @@ __KERNEL_RCSID(0, "$NetBSD: subr_pool.c, #include #include #include -#include -#include #include @@ -75,10 +73,6 @@ __KERNEL_RCSID(0, "$NetBSD: subr_pool.c, /* List of all pools */ LIST_HEAD(,pool) pool_head = LIST_HEAD_INITIALIZER(pool_head); -/* List of all caches. */ -LIST_HEAD(,pool_cache) pool_cache_head = - LIST_HEAD_INITIALIZER(pool_cache_head); - /* Private pool for page header structures */ #define PHPOOL_MAX 8 static struct pool phpool[PHPOOL_MAX]; @@ -96,7 +90,7 @@ static void *pool_page_alloc_meta(struct static void pool_page_free_meta(struct pool *, void *); /* allocator for pool metadata */ -struct pool_allocator pool_allocator_meta = { +static struct pool_allocator pool_allocator_meta = { pool_page_alloc_meta, pool_page_free_meta, .pa_backingmapptr = &kmem_map, }; @@ -107,9 +101,8 @@ int pool_inactive_time = 10; /* Next candidate for drainage (see pool_drain()) */ static struct pool *drainpp; -/* This lock protects both pool_head and drainpp. */ -static kmutex_t pool_head_lock; -static kcondvar_t pool_busy; +/* This spin lock protects both pool_head and drainpp. */ +struct simplelock pool_head_slock = SIMPLELOCK_INITIALIZER; typedef uint8_t pool_item_freelist_t; @@ -149,7 +142,7 @@ struct pool_item { #ifdef DIAGNOSTIC u_int pi_magic; #endif -#define PI_MAGIC 0xdeaddeadU +#define PI_MAGIC 0xdeadbeefU /* Other entries use only this list entry */ LIST_ENTRY(pool_item) pi_list; }; @@ -165,34 +158,30 @@ struct pool_item { * needless object construction/destruction; it is deferred until absolutely * necessary. * - * Caches are grouped into cache groups. Each cache group references up - * to PCG_NUMOBJECTS constructed objects. When a cache allocates an - * object from the pool, it calls the object's constructor and places it - * into a cache group. When a cache group frees an object back to the - * pool, it first calls the object's destructor. This allows the object - * to persist in constructed form while freed to the cache. - * - * The pool references each cache, so that when a pool is drained by the - * pagedaemon, it can drain each individual cache as well. Each time a - * cache is drained, the most idle cache group is freed to the pool in - * its entirety. + * Caches are grouped into cache groups. Each cache group references + * up to 16 constructed objects. When a cache allocates an object + * from the pool, it calls the object's constructor and places it into + * a cache group. When a cache group frees an object back to the pool, + * it first calls the object's destructor. This allows the object to + * persist in constructed form while freed to the cache. + * + * Multiple caches may exist for each pool. This allows a single + * object type to have multiple constructed forms. The pool references + * each cache, so that when a pool is drained by the pagedaemon, it can + * drain each individual cache as well. Each time a cache is drained, + * the most idle cache group is freed to the pool in its entirety. * * Pool caches are layed on top of pools. By layering them, we can avoid * the complexity of cache management for pools which would not benefit * from it. */ +/* The cache group pool. */ static struct pool pcgpool; -static struct pool cache_pool; -static struct pool cache_cpu_pool; -static pool_cache_cpu_t *pool_cache_put_slow(pool_cache_cpu_t *, int *, - void *, paddr_t); -static pool_cache_cpu_t *pool_cache_get_slow(pool_cache_cpu_t *, int *, - void **, paddr_t *, int); -static void pool_cache_cpu_init1(struct cpu_info *, pool_cache_t); -static void pool_cache_invalidate_groups(pool_cache_t, pcg_t *); -static void pool_cache_xcall(pool_cache_t); +static void pool_cache_reclaim(struct pool_cache *, struct pool_pagelist *, + struct pool_cache_grouplist *); +static void pcg_grouplist_free(struct pool_cache_grouplist *); static int pool_catchup(struct pool *); static void pool_prime_page(struct pool *, void *, @@ -433,12 +422,16 @@ static void pr_pagelist_free(struct pool *pp, struct pool_pagelist *pq) { struct pool_item_header *ph; + int s; while ((ph = LIST_FIRST(pq)) != NULL) { LIST_REMOVE(ph, ph_pagelist); pool_allocator_free(pp, ph->ph_page); - if ((pp->pr_roflags & PR_PHINPAGE) == 0) + if ((pp->pr_roflags & PR_PHINPAGE) == 0) { + s = splvm(); pool_put(pp->pr_phpool, ph); + splx(s); + } } } @@ -450,7 +443,7 @@ pr_rmpage(struct pool *pp, struct pool_i struct pool_pagelist *pq) { - KASSERT(mutex_owned(&pp->pr_lock)); + LOCK_ASSERT(simple_lock_held(&pp->pr_slock)); /* * If the page was idle, decrement the idle page count. @@ -564,9 +557,6 @@ pool_subsystem_init(void) __link_set_decl(pools, struct link_pool_init); struct link_pool_init * const *pi; - mutex_init(&pool_head_lock, MUTEX_DEFAULT, IPL_NONE); - cv_init(&pool_busy, "poolbusy"); - __link_set_foreach(pi, pools) pool_init((*pi)->pp, (*pi)->size, (*pi)->align, (*pi)->align_offset, (*pi)->flags, (*pi)->wchan, @@ -578,12 +568,6 @@ pool_subsystem_init(void) SLIST_REMOVE_HEAD(&pa_deferinitq, pa_q); pa_reclaim_register(pa); } - - pool_init(&cache_pool, sizeof(struct pool_cache), CACHE_LINE_SIZE, - 0, 0, "pcache", &pool_allocator_nointr, IPL_NONE); - - pool_init(&cache_cpu_pool, sizeof(pool_cache_cpu_t), CACHE_LINE_SIZE, - 0, 0, "pcachecpu", &pool_allocator_nointr, IPL_NONE); } /* @@ -600,7 +584,7 @@ pool_init(struct pool *pp, size_t size, struct pool *pp1; #endif size_t trysize, phsize; - int off, slack; + int off, slack, s; KASSERT((1UL << (CHAR_BIT * sizeof(pool_item_freelist_t))) - 2 >= PHPOOL_FREELIST_NELEM(PHPOOL_MAX - 1)); @@ -641,7 +625,7 @@ pool_init(struct pool *pp, size_t size, TAILQ_INIT(&palloc->pa_list); - mutex_init(&palloc->pa_lock, MUTEX_DEFAULT, IPL_VM); + simple_lock_init(&palloc->pa_slock); palloc->pa_pagemask = ~(palloc->pa_pagesz - 1); palloc->pa_pageshift = ffs(palloc->pa_pagesz) - 1; @@ -669,7 +653,7 @@ pool_init(struct pool *pp, size_t size, LIST_INIT(&pp->pr_emptypages); LIST_INIT(&pp->pr_fullpages); LIST_INIT(&pp->pr_partpages); - pp->pr_cache = NULL; + LIST_INIT(&pp->pr_cachelist); pp->pr_curpage = NULL; pp->pr_npages = 0; pp->pr_minitems = 0; @@ -770,7 +754,6 @@ pool_init(struct pool *pp, size_t size, pp->pr_npagefree = 0; pp->pr_hiwat = 0; pp->pr_nidle = 0; - pp->pr_refcnt = 0; #ifdef POOL_DIAGNOSTIC if (flags & PR_LOGGING) { @@ -786,9 +769,7 @@ pool_init(struct pool *pp, size_t size, pp->pr_entered_file = NULL; pp->pr_entered_line = 0; - mutex_init(&pp->pr_lock, MUTEX_DEFAULT, ipl); - cv_init(&pp->pr_cv, wchan); - pp->pr_ipl = ipl; + simple_lock_init(&pp->pr_slock); /* * Initialize private page header pool and cache magazine pool if we @@ -817,25 +798,21 @@ pool_init(struct pool *pp, size_t size, pool_init(&psppool, POOL_SUBPAGE, POOL_SUBPAGE, 0, PR_RECURSIVE, "psppool", &pool_allocator_meta, IPL_VM); #endif - pool_init(&pcgpool, sizeof(pcg_t), CACHE_LINE_SIZE, 0, 0, - "cachegrp", &pool_allocator_meta, IPL_VM); + pool_init(&pcgpool, sizeof(struct pool_cache_group), 0, 0, + 0, "pcgpool", &pool_allocator_meta, IPL_VM); } - if (__predict_true(!cold)) { - /* Insert into the list of all pools. */ - mutex_enter(&pool_head_lock); - LIST_INSERT_HEAD(&pool_head, pp, pr_poollist); - mutex_exit(&pool_head_lock); - - /* Insert this into the list of pools using this allocator. */ - mutex_enter(&palloc->pa_lock); - TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list); - mutex_exit(&palloc->pa_lock); - } else { - LIST_INSERT_HEAD(&pool_head, pp, pr_poollist); - TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list); - } + /* Insert into the list of all pools. */ + simple_lock(&pool_head_slock); + LIST_INSERT_HEAD(&pool_head, pp, pr_poollist); + simple_unlock(&pool_head_slock); + /* Insert this into the list of pools using this allocator. */ + s = splvm(); + simple_lock(&palloc->pa_slock); + TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list); + simple_unlock(&palloc->pa_slock); + splx(s); pool_reclaim_register(pp); } @@ -847,25 +824,27 @@ pool_destroy(struct pool *pp) { struct pool_pagelist pq; struct pool_item_header *ph; + int s; /* Remove from global pool list */ - mutex_enter(&pool_head_lock); - while (pp->pr_refcnt != 0) - cv_wait(&pool_busy, &pool_head_lock); + simple_lock(&pool_head_slock); LIST_REMOVE(pp, pr_poollist); if (drainpp == pp) drainpp = NULL; - mutex_exit(&pool_head_lock); + simple_unlock(&pool_head_slock); /* Remove this pool from its allocator's list of pools. */ pool_reclaim_unregister(pp); - mutex_enter(&pp->pr_alloc->pa_lock); + s = splvm(); + simple_lock(&pp->pr_alloc->pa_slock); TAILQ_REMOVE(&pp->pr_alloc->pa_list, pp, pr_alloc_list); - mutex_exit(&pp->pr_alloc->pa_lock); + simple_unlock(&pp->pr_alloc->pa_slock); + splx(s); - mutex_enter(&pp->pr_lock); + s = splvm(); + simple_lock(&pp->pr_slock); - KASSERT(pp->pr_cache == NULL); + KASSERT(LIST_EMPTY(&pp->pr_cachelist)); #ifdef DIAGNOSTIC if (pp->pr_nout != 0) { @@ -883,7 +862,8 @@ pool_destroy(struct pool *pp) while ((ph = LIST_FIRST(&pp->pr_emptypages)) != NULL) pr_rmpage(pp, ph, &pq); - mutex_exit(&pp->pr_lock); + simple_unlock(&pp->pr_slock); + splx(s); pr_pagelist_free(pp, &pq); @@ -891,9 +871,6 @@ pool_destroy(struct pool *pp) if ((pp->pr_roflags & PR_LOGGING) != 0) free(pp->pr_log, M_TEMP); #endif - - cv_destroy(&pp->pr_cv); - mutex_destroy(&pp->pr_lock); } void @@ -913,17 +890,23 @@ static struct pool_item_header * pool_alloc_item_header(struct pool *pp, void *storage, int flags) { struct pool_item_header *ph; + int s; + + LOCK_ASSERT(simple_lock_held(&pp->pr_slock) == 0); if ((pp->pr_roflags & PR_PHINPAGE) != 0) ph = (struct pool_item_header *) ((char *)storage + pp->pr_phoffset); - else + else { + s = splvm(); ph = pool_get(pp->pr_phpool, flags); + splx(s); + } return (ph); } /* - * Grab an item from the pool. + * Grab an item from the pool; must be called at appropriate spl level */ void * #ifdef POOL_DIAGNOSTIC @@ -950,7 +933,7 @@ pool_get(struct pool *pp, int flags) ASSERT_SLEEPABLE(NULL, "pool_get(PR_WAITOK)"); #endif - mutex_enter(&pp->pr_lock); + simple_lock(&pp->pr_slock); pr_enter(pp, file, line); startover: @@ -962,7 +945,7 @@ pool_get(struct pool *pp, int flags) #ifdef DIAGNOSTIC if (__predict_false(pp->pr_nout > pp->pr_hardlimit)) { pr_leave(pp); - mutex_exit(&pp->pr_lock); + simple_unlock(&pp->pr_slock); panic("pool_get: %s: crossed hard limit", pp->pr_wchan); } #endif @@ -974,9 +957,9 @@ pool_get(struct pool *pp, int flags) * and check the hardlimit condition again. */ pr_leave(pp); - mutex_exit(&pp->pr_lock); + simple_unlock(&pp->pr_slock); (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, flags); - mutex_enter(&pp->pr_lock); + simple_lock(&pp->pr_slock); pr_enter(pp, file, line); if (pp->pr_nout < pp->pr_hardlimit) goto startover; @@ -989,7 +972,7 @@ pool_get(struct pool *pp, int flags) */ pp->pr_flags |= PR_WANTED; pr_leave(pp); - cv_wait(&pp->pr_cv, &pp->pr_lock); + ltsleep(pp, PSWP, pp->pr_wchan, 0, &pp->pr_slock); pr_enter(pp, file, line); goto startover; } @@ -1005,7 +988,7 @@ pool_get(struct pool *pp, int flags) pp->pr_nfail++; pr_leave(pp); - mutex_exit(&pp->pr_lock); + simple_unlock(&pp->pr_slock); return (NULL); } @@ -1020,7 +1003,7 @@ pool_get(struct pool *pp, int flags) #ifdef DIAGNOSTIC if (pp->pr_nitems != 0) { - mutex_exit(&pp->pr_lock); + simple_unlock(&pp->pr_slock); printf("pool_get: %s: curpage NULL, nitems %u\n", pp->pr_wchan, pp->pr_nitems); panic("pool_get: nitems inconsistent"); @@ -1047,7 +1030,7 @@ pool_get(struct pool *pp, int flags) pp->pr_nfail++; pr_leave(pp); - mutex_exit(&pp->pr_lock); + simple_unlock(&pp->pr_slock); return (NULL); } @@ -1058,7 +1041,7 @@ pool_get(struct pool *pp, int flags) #ifdef DIAGNOSTIC if (__predict_false(ph->ph_nmissing == pp->pr_itemsperpage)) { pr_leave(pp); - mutex_exit(&pp->pr_lock); + simple_unlock(&pp->pr_slock); panic("pool_get: %s: page empty", pp->pr_wchan); } #endif @@ -1070,13 +1053,13 @@ pool_get(struct pool *pp, int flags) v = pi = LIST_FIRST(&ph->ph_itemlist); if (__predict_false(v == NULL)) { pr_leave(pp); - mutex_exit(&pp->pr_lock); + simple_unlock(&pp->pr_slock); panic("pool_get: %s: page empty", pp->pr_wchan); } #ifdef DIAGNOSTIC if (__predict_false(pp->pr_nitems == 0)) { pr_leave(pp); - mutex_exit(&pp->pr_lock); + simple_unlock(&pp->pr_slock); printf("pool_get: %s: items on itemlist, nitems %u\n", pp->pr_wchan, pp->pr_nitems); panic("pool_get: nitems inconsistent"); @@ -1123,7 +1106,7 @@ pool_get(struct pool *pp, int flags) if (__predict_false((pp->pr_roflags & PR_NOTOUCH) == 0 && !LIST_EMPTY(&ph->ph_itemlist))) { pr_leave(pp); - mutex_exit(&pp->pr_lock); + simple_unlock(&pp->pr_slock); panic("pool_get: %s: nmissing inconsistent", pp->pr_wchan); } @@ -1152,7 +1135,7 @@ pool_get(struct pool *pp, int flags) */ } - mutex_exit(&pp->pr_lock); + simple_unlock(&pp->pr_slock); KASSERT((((vaddr_t)v + pp->pr_itemoffset) & (pp->pr_align - 1)) == 0); FREECHECK_OUT(&pp->pr_freecheck, v); return (v); @@ -1167,9 +1150,8 @@ pool_do_put(struct pool *pp, void *v, st struct pool_item *pi = v; struct pool_item_header *ph; - KASSERT(mutex_owned(&pp->pr_lock)); + LOCK_ASSERT(simple_lock_held(&pp->pr_slock)); FREECHECK_IN(&pp->pr_freecheck, v); - LOCKDEBUG_MEM_CHECK(v, pp->pr_size); #ifdef DIAGNOSTIC if (__predict_false(pp->pr_nout == 0)) { @@ -1219,7 +1201,7 @@ pool_do_put(struct pool *pp, void *v, st pp->pr_flags &= ~PR_WANTED; if (ph->ph_nmissing == 0) pp->pr_nidle++; - cv_broadcast(&pp->pr_cv); + wakeup((void *)pp); return; } @@ -1271,7 +1253,7 @@ pool_do_put(struct pool *pp, void *v, st } /* - * Return resource to the pool. + * Return resource to the pool; must be called at appropriate spl level */ #ifdef POOL_DIAGNOSTIC void @@ -1281,7 +1263,7 @@ _pool_put(struct pool *pp, void *v, cons LIST_INIT(&pq); - mutex_enter(&pp->pr_lock); + simple_lock(&pp->pr_slock); pr_enter(pp, file, line); pr_log(pp, v, PRLOG_PUT, file, line); @@ -1289,7 +1271,7 @@ _pool_put(struct pool *pp, void *v, cons pool_do_put(pp, v, &pq); pr_leave(pp); - mutex_exit(&pp->pr_lock); + simple_unlock(&pp->pr_slock); pr_pagelist_free(pp, &pq); } @@ -1303,9 +1285,9 @@ pool_put(struct pool *pp, void *v) LIST_INIT(&pq); - mutex_enter(&pp->pr_lock); + simple_lock(&pp->pr_slock); pool_do_put(pp, v, &pq); - mutex_exit(&pp->pr_lock); + simple_unlock(&pp->pr_slock); pr_pagelist_free(pp, &pq); } @@ -1328,7 +1310,7 @@ pool_grow(struct pool *pp, int flags) struct pool_item_header *ph = NULL; char *cp; - mutex_exit(&pp->pr_lock); + simple_unlock(&pp->pr_slock); cp = pool_allocator_alloc(pp, flags); if (__predict_true(cp != NULL)) { ph = pool_alloc_item_header(pp, cp, flags); @@ -1337,11 +1319,11 @@ pool_grow(struct pool *pp, int flags) if (cp != NULL) { pool_allocator_free(pp, cp); } - mutex_enter(&pp->pr_lock); + simple_lock(&pp->pr_slock); return ENOMEM; } - mutex_enter(&pp->pr_lock); + simple_lock(&pp->pr_slock); pool_prime_page(pp, cp, ph); pp->pr_npagealloc++; return 0; @@ -1356,7 +1338,7 @@ pool_prime(struct pool *pp, int n) int newpages; int error = 0; - mutex_enter(&pp->pr_lock); + simple_lock(&pp->pr_slock); newpages = roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage; @@ -1371,7 +1353,7 @@ pool_prime(struct pool *pp, int n) if (pp->pr_minpages >= pp->pr_maxpages) pp->pr_maxpages = pp->pr_minpages + 1; /* XXX */ - mutex_exit(&pp->pr_lock); + simple_unlock(&pp->pr_slock); return error; } @@ -1389,7 +1371,7 @@ pool_prime_page(struct pool *pp, void *s const unsigned int ioff = pp->pr_itemoffset; int n; - KASSERT(mutex_owned(&pp->pr_lock)); + LOCK_ASSERT(simple_lock_held(&pp->pr_slock)); #ifdef DIAGNOSTIC if ((pp->pr_roflags & PR_NOALIGN) == 0 && @@ -1504,7 +1486,7 @@ void pool_setlowat(struct pool *pp, int n) { - mutex_enter(&pp->pr_lock); + simple_lock(&pp->pr_slock); pp->pr_minitems = n; pp->pr_minpages = (n == 0) @@ -1520,27 +1502,27 @@ pool_setlowat(struct pool *pp, int n) */ } - mutex_exit(&pp->pr_lock); + simple_unlock(&pp->pr_slock); } void pool_sethiwat(struct pool *pp, int n) { - mutex_enter(&pp->pr_lock); + simple_lock(&pp->pr_slock); pp->pr_maxpages = (n == 0) ? 0 : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage; - mutex_exit(&pp->pr_lock); + simple_unlock(&pp->pr_slock); } void pool_sethardlimit(struct pool *pp, int n, const char *warnmess, int ratecap) { - mutex_enter(&pp->pr_lock); + simple_lock(&pp->pr_slock); pp->pr_hardlimit = n; pp->pr_hardlimit_warning = warnmess; @@ -1556,7 +1538,7 @@ pool_sethardlimit(struct pool *pp, int n ? 0 : roundup(n, pp->pr_itemsperpage) / pp->pr_itemsperpage; - mutex_exit(&pp->pr_lock); + simple_unlock(&pp->pr_slock); } /* @@ -1570,10 +1552,10 @@ pool_reclaim(struct pool *pp) #endif { struct pool_item_header *ph, *phnext; + struct pool_cache *pc; struct pool_pagelist pq; + struct pool_cache_grouplist pcgl; struct timeval curtime, diff; - bool klock; - int rv; if (pp->pr_drain_hook != NULL) { /* @@ -1582,36 +1564,18 @@ pool_reclaim(struct pool *pp) (*pp->pr_drain_hook)(pp->pr_drain_hook_arg, PR_NOWAIT); } - /* - * XXXSMP Because mutexes at IPL_SOFTXXX are still spinlocks, - * and we are called from the pagedaemon without kernel_lock. - * Does not apply to IPL_SOFTBIO. - */ - switch (pp->pr_ipl) { - case IPL_SOFTNET: - case IPL_SOFTCLOCK: - case IPL_SOFTSERIAL: - KERNEL_LOCK(1, NULL); - klock = true; - break; - default: - klock = false; - break; - } - - /* Reclaim items from the pool's cache (if any). */ - if (pp->pr_cache != NULL) - pool_cache_invalidate(pp->pr_cache); - - if (mutex_tryenter(&pp->pr_lock) == 0) { - if (klock) { - KERNEL_UNLOCK_ONE(NULL); - } + if (simple_lock_try(&pp->pr_slock) == 0) return (0); - } pr_enter(pp, file, line); LIST_INIT(&pq); + LIST_INIT(&pcgl); + + /* + * Reclaim items from the pool's caches. + */ + LIST_FOREACH(pc, &pp->pr_cachelist, pc_poollist) + pool_cache_reclaim(pc, &pq, &pcgl); getmicrotime(&curtime); @@ -1640,87 +1604,42 @@ pool_reclaim(struct pool *pp) } pr_leave(pp); - mutex_exit(&pp->pr_lock); - - if (LIST_EMPTY(&pq)) - rv = 0; - else { - pr_pagelist_free(pp, &pq); - rv = 1; - } - - if (klock) { - KERNEL_UNLOCK_ONE(NULL); - } + simple_unlock(&pp->pr_slock); + if (LIST_EMPTY(&pq) && LIST_EMPTY(&pcgl)) + return 0; - return (rv); + pr_pagelist_free(pp, &pq); + pcg_grouplist_free(&pcgl); + return (1); } /* - * Drain pools, one at a time. This is a two stage process; - * drain_start kicks off a cross call to drain CPU-level caches - * if the pool has an associated pool_cache. drain_end waits - * for those cross calls to finish, and then drains the cache - * (if any) and pool. + * Drain pools, one at a time. + * + * Note, we must never be called from an interrupt context. * - * Note, must never be called from interrupt context. + * XXX Pool can disappear while draining. */ void -pool_drain_start(struct pool **ppp, uint64_t *wp) +pool_drain(void *arg) { struct pool *pp; - - KASSERT(!LIST_EMPTY(&pool_head)); + int s; pp = NULL; - - /* Find next pool to drain, and add a reference. */ - mutex_enter(&pool_head_lock); - do { - if (drainpp == NULL) { - drainpp = LIST_FIRST(&pool_head); - } - if (drainpp != NULL) { - pp = drainpp; - drainpp = LIST_NEXT(pp, pr_poollist); - } - /* - * Skip completely idle pools. We depend on at least - * one pool in the system being active. - */ - } while (pp == NULL || pp->pr_npages == 0); - pp->pr_refcnt++; - mutex_exit(&pool_head_lock); - - /* If there is a pool_cache, drain CPU level caches. */ - *ppp = pp; - if (pp->pr_cache != NULL) { - *wp = xc_broadcast(0, (xcfunc_t)pool_cache_xcall, - pp->pr_cache, NULL); - } -} - -void -pool_drain_end(struct pool *pp, uint64_t where) -{ - - if (pp == NULL) - return; - - KASSERT(pp->pr_refcnt > 0); - - /* Wait for remote draining to complete. */ - if (pp->pr_cache != NULL) - xc_wait(where); - - /* Drain the cache (if any) and pool.. */ - pool_reclaim(pp); - - /* Finally, unlock the pool. */ - mutex_enter(&pool_head_lock); - pp->pr_refcnt--; - cv_broadcast(&pool_busy); - mutex_exit(&pool_head_lock); + s = splvm(); + simple_lock(&pool_head_slock); + if (drainpp == NULL) { + drainpp = LIST_FIRST(&pool_head); + } + if (drainpp) { + pp = drainpp; + drainpp = LIST_NEXT(pp, pr_poollist); + } + simple_unlock(&pool_head_slock); + if (pp) + pool_reclaim(pp); + splx(s); } /* @@ -1729,8 +1648,18 @@ pool_drain_end(struct pool *pp, uint64_t void pool_print(struct pool *pp, const char *modif) { + int s; + s = splvm(); + if (simple_lock_try(&pp->pr_slock) == 0) { + printf("pool %s is locked; try again later\n", + pp->pr_wchan); + splx(s); + return; + } pool_print1(pp, modif, printf); + simple_unlock(&pp->pr_slock); + splx(s); } void @@ -1738,6 +1667,12 @@ pool_printall(const char *modif, void (* { struct pool *pp; + if (simple_lock_try(&pool_head_slock) == 0) { + (*pr)("WARNING: pool_head_slock is locked\n"); + } else { + simple_unlock(&pool_head_slock); + } + LIST_FOREACH(pp, &pool_head, pr_poollist) { pool_printit(pp, modif, pr); } @@ -1752,6 +1687,20 @@ pool_printit(struct pool *pp, const char return; } + /* + * Called from DDB; interrupts should be blocked, and all + * other processors should be paused. We can skip locking + * the pool in this case. + * + * We do a simple_lock_try() just to print the lock + * status, however. + */ + + if (simple_lock_try(&pp->pr_slock) == 0) + (*pr)("WARNING: pool %s is locked\n", pp->pr_wchan); + else + simple_unlock(&pp->pr_slock); + pool_print1(pp, modif, pr); } @@ -1786,10 +1735,8 @@ static void pool_print1(struct pool *pp, const char *modif, void (*pr)(const char *, ...)) { struct pool_item_header *ph; - pool_cache_t pc; - pcg_t *pcg; - pool_cache_cpu_t *cc; - uint64_t cpuhit, cpumiss; + struct pool_cache *pc; + struct pool_cache_group *pcg; int i, print_log = 0, print_pagelist = 0, print_cache = 0; char c; @@ -1802,13 +1749,7 @@ pool_print1(struct pool *pp, const char print_cache = 1; } - if ((pc = pp->pr_cache) != NULL) { - (*pr)("POOL CACHE"); - } else { - (*pr)("POOL"); - } - - (*pr)(" %s: size %u, align %u, ioff %u, roflags 0x%08x\n", + (*pr)("POOL %s: size %u, align %u, ioff %u, roflags 0x%08x\n", pp->pr_wchan, pp->pr_size, pp->pr_align, pp->pr_itemoffset, pp->pr_roflags); (*pr)("\talloc %p\n", pp->pr_alloc); @@ -1817,7 +1758,7 @@ pool_print1(struct pool *pp, const char (*pr)("\titemsperpage %u, nitems %u, nout %u, hardlimit %u\n", pp->pr_itemsperpage, pp->pr_nitems, pp->pr_nout, pp->pr_hardlimit); - (*pr)("\tnget %lu, nfail %lu, nput %lu\n", + (*pr)("\n\tnget %lu, nfail %lu, nput %lu\n", pp->pr_nget, pp->pr_nfail, pp->pr_nput); (*pr)("\tnpagealloc %lu, npagefree %lu, hiwat %u, nidle %lu\n", pp->pr_npagealloc, pp->pr_npagefree, pp->pr_hiwat, pp->pr_nidle); @@ -1852,6 +1793,8 @@ pool_print1(struct pool *pp, const char } skip_log: + if (print_cache == 0) + goto skip_cache; #define PR_GROUPLIST(pcg) \ (*pr)("\t\tgroup %p: avail %d\n", pcg, pcg->pcg_avail); \ @@ -1868,38 +1811,26 @@ pool_print1(struct pool *pp, const char } \ } - if (pc != NULL) { - cpuhit = 0; - cpumiss = 0; - for (i = 0; i < MAXCPUS; i++) { - if ((cc = pc->pc_cpus[i]) == NULL) - continue; - cpuhit += cc->cc_hits; - cpumiss += cc->cc_misses; - } - (*pr)("\tcpu layer hits %llu misses %llu\n", cpuhit, cpumiss); - (*pr)("\tcache layer hits %llu misses %llu\n", - pc->pc_hits, pc->pc_misses); - (*pr)("\tcache layer entry uncontended %llu contended %llu\n", - pc->pc_hits + pc->pc_misses - pc->pc_contended, - pc->pc_contended); - (*pr)("\tcache layer empty groups %u full groups %u\n", - pc->pc_nempty, pc->pc_nfull); - if (print_cache) { - (*pr)("\tfull cache groups:\n"); - for (pcg = pc->pc_fullgroups; pcg != NULL; - pcg = pcg->pcg_next) { - PR_GROUPLIST(pcg); - } - (*pr)("\tempty cache groups:\n"); - for (pcg = pc->pc_emptygroups; pcg != NULL; - pcg = pcg->pcg_next) { - PR_GROUPLIST(pcg); - } + LIST_FOREACH(pc, &pp->pr_cachelist, pc_poollist) { + (*pr)("\tcache %p\n", pc); + (*pr)("\t hits %lu misses %lu ngroups %lu nitems %lu\n", + pc->pc_hits, pc->pc_misses, pc->pc_ngroups, pc->pc_nitems); + (*pr)("\t full groups:\n"); + LIST_FOREACH(pcg, &pc->pc_fullgroups, pcg_list) { + PR_GROUPLIST(pcg); + } + (*pr)("\t partial groups:\n"); + LIST_FOREACH(pcg, &pc->pc_partgroups, pcg_list) { + PR_GROUPLIST(pcg); + } + (*pr)("\t empty groups:\n"); + LIST_FOREACH(pcg, &pc->pc_emptygroups, pcg_list) { + PR_GROUPLIST(pcg); } } #undef PR_GROUPLIST + skip_cache: pr_enter_check(pp, pr); } @@ -1967,7 +1898,7 @@ pool_chk(struct pool *pp, const char *la struct pool_item_header *ph; int r = 0; - mutex_enter(&pp->pr_lock); + simple_lock(&pp->pr_slock); LIST_FOREACH(ph, &pp->pr_emptypages, ph_pagelist) { r = pool_chk_page(pp, label, ph); if (r) { @@ -1988,7 +1919,7 @@ pool_chk(struct pool *pp, const char *la } out: - mutex_exit(&pp->pr_lock); + simple_unlock(&pp->pr_slock); return (r); } @@ -1996,87 +1927,38 @@ out: * pool_cache_init: * * Initialize a pool cache. - */ -pool_cache_t -pool_cache_init(size_t size, u_int align, u_int align_offset, u_int flags, - const char *wchan, struct pool_allocator *palloc, int ipl, - int (*ctor)(void *, void *, int), void (*dtor)(void *, void *), void *arg) -{ - pool_cache_t pc; - - pc = pool_get(&cache_pool, PR_WAITOK); - if (pc == NULL) - return NULL; - - pool_cache_bootstrap(pc, size, align, align_offset, flags, wchan, - palloc, ipl, ctor, dtor, arg); - - return pc; -} - -/* - * pool_cache_bootstrap: * - * Kernel-private version of pool_cache_init(). The caller - * provides initial storage. + * NOTE: If the pool must be protected from interrupts, we expect + * to be called at the appropriate interrupt priority level. */ void -pool_cache_bootstrap(pool_cache_t pc, size_t size, u_int align, - u_int align_offset, u_int flags, const char *wchan, - struct pool_allocator *palloc, int ipl, - int (*ctor)(void *, void *, int), void (*dtor)(void *, void *), +pool_cache_init(struct pool_cache *pc, struct pool *pp, + int (*ctor)(void *, void *, int), + void (*dtor)(void *, void *), void *arg) { - CPU_INFO_ITERATOR cii; - struct cpu_info *ci; - struct pool *pp; - - pp = &pc->pc_pool; - if (palloc == NULL && ipl == IPL_NONE) - palloc = &pool_allocator_nointr; - pool_init(pp, size, align, align_offset, flags, wchan, palloc, ipl); - mutex_init(&pc->pc_lock, MUTEX_DEFAULT, pp->pr_ipl); + LIST_INIT(&pc->pc_emptygroups); + LIST_INIT(&pc->pc_fullgroups); + LIST_INIT(&pc->pc_partgroups); + simple_lock_init(&pc->pc_slock); - if (ctor == NULL) { - ctor = (int (*)(void *, void *, int))nullop; - } - if (dtor == NULL) { - dtor = (void (*)(void *, void *))nullop; - } + pc->pc_pool = pp; - pc->pc_emptygroups = NULL; - pc->pc_fullgroups = NULL; - pc->pc_partgroups = NULL; pc->pc_ctor = ctor; pc->pc_dtor = dtor; pc->pc_arg = arg; - pc->pc_hits = 0; + + pc->pc_hits = 0; pc->pc_misses = 0; - pc->pc_nempty = 0; - pc->pc_npart = 0; - pc->pc_nfull = 0; - pc->pc_contended = 0; - pc->pc_refcnt = 0; - - /* Allocate per-CPU caches. */ - memset(pc->pc_cpus, 0, sizeof(pc->pc_cpus)); - pc->pc_ncpu = 0; - for (CPU_INFO_FOREACH(cii, ci)) { - pool_cache_cpu_init1(ci, pc); - } - - if (__predict_true(!cold)) { - mutex_enter(&pp->pr_lock); - pp->pr_cache = pc; - mutex_exit(&pp->pr_lock); - mutex_enter(&pool_head_lock); - LIST_INSERT_HEAD(&pool_cache_head, pc, pc_cachelist); - mutex_exit(&pool_head_lock); - } else { - pp->pr_cache = pc; - LIST_INSERT_HEAD(&pool_cache_head, pc, pc_cachelist); - } + + pc->pc_ngroups = 0; + + pc->pc_nitems = 0; + + simple_lock(&pp->pr_slock); + LIST_INSERT_HEAD(&pp->pr_cachelist, pc, pc_poollist); + simple_unlock(&pp->pr_slock); } /* @@ -2085,620 +1967,323 @@ pool_cache_bootstrap(pool_cache_t pc, si * Destroy a pool cache. */ void -pool_cache_destroy(pool_cache_t pc) +pool_cache_destroy(struct pool_cache *pc) { - struct pool *pp = &pc->pc_pool; - pool_cache_cpu_t *cc; - pcg_t *pcg; - int i; - - /* Remove it from the global list. */ - mutex_enter(&pool_head_lock); - while (pc->pc_refcnt != 0) - cv_wait(&pool_busy, &pool_head_lock); - LIST_REMOVE(pc, pc_cachelist); - mutex_exit(&pool_head_lock); + struct pool *pp = pc->pc_pool; /* First, invalidate the entire cache. */ pool_cache_invalidate(pc); - /* Disassociate it from the pool. */ - mutex_enter(&pp->pr_lock); - pp->pr_cache = NULL; - mutex_exit(&pp->pr_lock); - - /* Destroy per-CPU data */ - for (i = 0; i < MAXCPUS; i++) { - if ((cc = pc->pc_cpus[i]) == NULL) - continue; - if ((pcg = cc->cc_current) != NULL) { - pcg->pcg_next = NULL; - pool_cache_invalidate_groups(pc, pcg); - } - if ((pcg = cc->cc_previous) != NULL) { - pcg->pcg_next = NULL; - pool_cache_invalidate_groups(pc, pcg); - } - if (cc != &pc->pc_cpu0) - pool_put(&cache_cpu_pool, cc); - } - - /* Finally, destroy it. */ - mutex_destroy(&pc->pc_lock); - pool_destroy(pp); - pool_put(&cache_pool, pc); -} - -/* - * pool_cache_cpu_init1: - * - * Called for each pool_cache whenever a new CPU is attached. - */ -static void -pool_cache_cpu_init1(struct cpu_info *ci, pool_cache_t pc) -{ - pool_cache_cpu_t *cc; - - KASSERT(((uintptr_t)pc->pc_cpus & (CACHE_LINE_SIZE - 1)) == 0); - - if ((cc = pc->pc_cpus[ci->ci_index]) != NULL) { - KASSERT(cc->cc_cpu = ci); - return; - } - - /* - * The first CPU is 'free'. This needs to be the case for - * bootstrap - we may not be able to allocate yet. - */ - if (pc->pc_ncpu == 0) { - cc = &pc->pc_cpu0; - pc->pc_ncpu = 1; - } else { - mutex_enter(&pc->pc_lock); - pc->pc_ncpu++; - mutex_exit(&pc->pc_lock); - cc = pool_get(&cache_cpu_pool, PR_WAITOK); - } - - cc->cc_ipl = pc->pc_pool.pr_ipl; - cc->cc_iplcookie = makeiplcookie(cc->cc_ipl); - cc->cc_cache = pc; - cc->cc_cpu = ci; - cc->cc_hits = 0; - cc->cc_misses = 0; - cc->cc_current = NULL; - cc->cc_previous = NULL; - - pc->pc_cpus[ci->ci_index] = cc; + /* ...and remove it from the pool's cache list. */ + simple_lock(&pp->pr_slock); + LIST_REMOVE(pc, pc_poollist); + simple_unlock(&pp->pr_slock); } -/* - * pool_cache_cpu_init: - * - * Called whenever a new CPU is attached. - */ -void -pool_cache_cpu_init(struct cpu_info *ci) +static inline void * +pcg_get(struct pool_cache_group *pcg, paddr_t *pap) { - pool_cache_t pc; - - mutex_enter(&pool_head_lock); - LIST_FOREACH(pc, &pool_cache_head, pc_cachelist) { - pc->pc_refcnt++; - mutex_exit(&pool_head_lock); + void *object; + u_int idx; - pool_cache_cpu_init1(ci, pc); + KASSERT(pcg->pcg_avail <= PCG_NOBJECTS); + KASSERT(pcg->pcg_avail != 0); + idx = --pcg->pcg_avail; + + KASSERT(pcg->pcg_objects[idx].pcgo_va != NULL); + object = pcg->pcg_objects[idx].pcgo_va; + if (pap != NULL) + *pap = pcg->pcg_objects[idx].pcgo_pa; + pcg->pcg_objects[idx].pcgo_va = NULL; - mutex_enter(&pool_head_lock); - pc->pc_refcnt--; - cv_broadcast(&pool_busy); - } - mutex_exit(&pool_head_lock); + return (object); } -/* - * pool_cache_reclaim: - * - * Reclaim memory from a pool cache. - */ -bool -pool_cache_reclaim(pool_cache_t pc) +static inline void +pcg_put(struct pool_cache_group *pcg, void *object, paddr_t pa) { + u_int idx; - return pool_reclaim(&pc->pc_pool); -} + KASSERT(pcg->pcg_avail < PCG_NOBJECTS); + idx = pcg->pcg_avail++; -/* - * pool_cache_destruct_object: - * - * Force destruction of an object and its release back into - * the pool. - */ -void -pool_cache_destruct_object(pool_cache_t pc, void *object) -{ - - (*pc->pc_dtor)(pc->pc_arg, object); - pool_put(&pc->pc_pool, object); + KASSERT(pcg->pcg_objects[idx].pcgo_va == NULL); + pcg->pcg_objects[idx].pcgo_va = object; + pcg->pcg_objects[idx].pcgo_pa = pa; } -/* - * pool_cache_invalidate_groups: - * - * Invalidate a chain of groups and destruct all objects. - */ static void -pool_cache_invalidate_groups(pool_cache_t pc, pcg_t *pcg) +pcg_grouplist_free(struct pool_cache_grouplist *pcgl) { - void *object; - pcg_t *next; - int i; - - for (; pcg != NULL; pcg = next) { - next = pcg->pcg_next; - - for (i = 0; i < pcg->pcg_avail; i++) { - object = pcg->pcg_objects[i].pcgo_va; - pool_cache_destruct_object(pc, object); - } + struct pool_cache_group *pcg; + int s; + s = splvm(); + while ((pcg = LIST_FIRST(pcgl)) != NULL) { + LIST_REMOVE(pcg, pcg_list); pool_put(&pcgpool, pcg); } + splx(s); } /* - * pool_cache_invalidate: + * pool_cache_get{,_paddr}: * - * Invalidate a pool cache (destruct and release all of the - * cached objects). Does not reclaim objects from the pool. + * Get an object from a pool cache (optionally returning + * the physical address of the object). */ -void -pool_cache_invalidate(pool_cache_t pc) -{ - pcg_t *full, *empty, *part; - - mutex_enter(&pc->pc_lock); - full = pc->pc_fullgroups; - empty = pc->pc_emptygroups; - part = pc->pc_partgroups; - pc->pc_fullgroups = NULL; - pc->pc_emptygroups = NULL; - pc->pc_partgroups = NULL; - pc->pc_nfull = 0; - pc->pc_nempty = 0; - pc->pc_npart = 0; - mutex_exit(&pc->pc_lock); - - pool_cache_invalidate_groups(pc, full); - pool_cache_invalidate_groups(pc, empty); - pool_cache_invalidate_groups(pc, part); -} - -void -pool_cache_set_drain_hook(pool_cache_t pc, void (*fn)(void *, int), void *arg) -{ - - pool_set_drain_hook(&pc->pc_pool, fn, arg); -} - -void -pool_cache_setlowat(pool_cache_t pc, int n) -{ - - pool_setlowat(&pc->pc_pool, n); -} - -void -pool_cache_sethiwat(pool_cache_t pc, int n) -{ - - pool_sethiwat(&pc->pc_pool, n); -} - -void -pool_cache_sethardlimit(pool_cache_t pc, int n, const char *warnmess, int ratecap) -{ - - pool_sethardlimit(&pc->pc_pool, n, warnmess, ratecap); -} - -static inline pool_cache_cpu_t * -pool_cache_cpu_enter(pool_cache_t pc, int *s) -{ - pool_cache_cpu_t *cc; - struct cpu_info *ci; - - /* - * Prevent other users of the cache from accessing our - * CPU-local data. To avoid touching shared state, we - * pull the neccessary information from CPU local data. - */ - ci = curcpu(); - KASSERT(ci->ci_data.cpu_index < MAXCPUS); - cc = pc->pc_cpus[ci->ci_data.cpu_index]; - KASSERT(cc->cc_cache == pc); - if (cc->cc_ipl == IPL_NONE) { - crit_enter(); - } else { - *s = splraiseipl(cc->cc_iplcookie); - } - - /* Moved to another CPU before disabling preemption? */ - if (__predict_false(ci != curcpu())) { - ci = curcpu(); - cc = pc->pc_cpus[ci->ci_data.cpu_index]; - } - -#ifdef DIAGNOSTIC - KASSERT(cc->cc_cpu == ci); - KASSERT(((uintptr_t)cc & (CACHE_LINE_SIZE - 1)) == 0); -#endif - - return cc; -} - -static inline void -pool_cache_cpu_exit(pool_cache_cpu_t *cc, int *s) -{ - - /* No longer need exclusive access to the per-CPU data. */ - if (cc->cc_ipl == IPL_NONE) { - crit_exit(); - } else { - splx(*s); - } -} - -#if __GNUC_PREREQ__(3, 0) -__attribute ((noinline)) -#endif -pool_cache_cpu_t * -pool_cache_get_slow(pool_cache_cpu_t *cc, int *s, void **objectp, - paddr_t *pap, int flags) +void * +pool_cache_get_paddr(struct pool_cache *pc, int flags, paddr_t *pap) { - pcg_t *pcg, *cur; - uint64_t ncsw; - pool_cache_t pc; + struct pool_cache_group *pcg; void *object; - pc = cc->cc_cache; - cc->cc_misses++; +#ifdef LOCKDEBUG + if (flags & PR_WAITOK) + ASSERT_SLEEPABLE(NULL, "pool_cache_get(PR_WAITOK)"); +#endif - /* - * Nothing was available locally. Try and grab a group - * from the cache. - */ - if (!mutex_tryenter(&pc->pc_lock)) { - ncsw = curlwp->l_ncsw; - mutex_enter(&pc->pc_lock); - pc->pc_contended++; + simple_lock(&pc->pc_slock); - /* - * If we context switched while locking, then - * our view of the per-CPU data is invalid: - * retry. - */ - if (curlwp->l_ncsw != ncsw) { - mutex_exit(&pc->pc_lock); - pool_cache_cpu_exit(cc, s); - return pool_cache_cpu_enter(pc, s); + pcg = LIST_FIRST(&pc->pc_partgroups); + if (pcg == NULL) { + pcg = LIST_FIRST(&pc->pc_fullgroups); + if (pcg != NULL) { + LIST_REMOVE(pcg, pcg_list); + LIST_INSERT_HEAD(&pc->pc_partgroups, pcg, pcg_list); } } + if (pcg == NULL) { - if ((pcg = pc->pc_fullgroups) != NULL) { /* - * If there's a full group, release our empty - * group back to the cache. Install the full - * group as cc_current and return. + * No groups with any available objects. Allocate + * a new object, construct it, and return it to + * the caller. We will allocate a group, if necessary, + * when the object is freed back to the cache. */ - if ((cur = cc->cc_current) != NULL) { - KASSERT(cur->pcg_avail == 0); - cur->pcg_next = pc->pc_emptygroups; - pc->pc_emptygroups = cur; - pc->pc_nempty++; + pc->pc_misses++; + simple_unlock(&pc->pc_slock); + object = pool_get(pc->pc_pool, flags); + if (object != NULL && pc->pc_ctor != NULL) { + if ((*pc->pc_ctor)(pc->pc_arg, object, flags) != 0) { + pool_put(pc->pc_pool, object); + return (NULL); + } + } + KASSERT((((vaddr_t)object + pc->pc_pool->pr_itemoffset) & + (pc->pc_pool->pr_align - 1)) == 0); + if (object != NULL && pap != NULL) { +#ifdef POOL_VTOPHYS + *pap = POOL_VTOPHYS(object); +#else + *pap = POOL_PADDR_INVALID; +#endif } - KASSERT(pcg->pcg_avail == PCG_NOBJECTS); - cc->cc_current = pcg; - pc->pc_fullgroups = pcg->pcg_next; - pc->pc_hits++; - pc->pc_nfull--; - mutex_exit(&pc->pc_lock); - return cc; - } - - /* - * Nothing available locally or in cache. Take the slow - * path: fetch a new object from the pool and construct - * it. - */ - pc->pc_misses++; - mutex_exit(&pc->pc_lock); - pool_cache_cpu_exit(cc, s); - - object = pool_get(&pc->pc_pool, flags); - *objectp = object; - if (object == NULL) - return NULL; - if ((*pc->pc_ctor)(pc->pc_arg, object, flags) != 0) { - pool_put(&pc->pc_pool, object); - *objectp = NULL; - return NULL; + FREECHECK_OUT(&pc->pc_freecheck, object); + return (object); } - KASSERT((((vaddr_t)object + pc->pc_pool.pr_itemoffset) & - (pc->pc_pool.pr_align - 1)) == 0); + pc->pc_hits++; + pc->pc_nitems--; + object = pcg_get(pcg, pap); - if (pap != NULL) { -#ifdef POOL_VTOPHYS - *pap = POOL_VTOPHYS(object); -#else - *pap = POOL_PADDR_INVALID; -#endif + if (pcg->pcg_avail == 0) { + LIST_REMOVE(pcg, pcg_list); + LIST_INSERT_HEAD(&pc->pc_emptygroups, pcg, pcg_list); } + simple_unlock(&pc->pc_slock); + KASSERT((((vaddr_t)object + pc->pc_pool->pr_itemoffset) & + (pc->pc_pool->pr_align - 1)) == 0); FREECHECK_OUT(&pc->pc_freecheck, object); - return NULL; + return (object); } /* - * pool_cache_get{,_paddr}: + * pool_cache_put{,_paddr}: * - * Get an object from a pool cache (optionally returning - * the physical address of the object). + * Put an object back to the pool cache (optionally caching the + * physical address of the object). */ -void * -pool_cache_get_paddr(pool_cache_t pc, int flags, paddr_t *pap) +void +pool_cache_put_paddr(struct pool_cache *pc, void *object, paddr_t pa) { - pool_cache_cpu_t *cc; - pcg_t *pcg; - void *object; + struct pool_cache_group *pcg; int s; -#ifdef LOCKDEBUG - if (flags & PR_WAITOK) - ASSERT_SLEEPABLE(NULL, "pool_cache_get(PR_WAITOK)"); -#endif + FREECHECK_IN(&pc->pc_freecheck, object); - cc = pool_cache_cpu_enter(pc, &s); - do { - /* Try and allocate an object from the current group. */ - pcg = cc->cc_current; - if (pcg != NULL && pcg->pcg_avail > 0) { - object = pcg->pcg_objects[--pcg->pcg_avail].pcgo_va; - if (pap != NULL) - *pap = pcg->pcg_objects[pcg->pcg_avail].pcgo_pa; - pcg->pcg_objects[pcg->pcg_avail].pcgo_va = NULL; - KASSERT(pcg->pcg_avail <= PCG_NOBJECTS); - KASSERT(object != NULL); - cc->cc_hits++; - pool_cache_cpu_exit(cc, &s); - FREECHECK_OUT(&pc->pc_freecheck, object); - return object; + if (__predict_false((pc->pc_pool->pr_flags & PR_WANTED) != 0)) { + goto destruct; + } + + simple_lock(&pc->pc_slock); + + pcg = LIST_FIRST(&pc->pc_partgroups); + if (pcg == NULL) { + pcg = LIST_FIRST(&pc->pc_emptygroups); + if (pcg != NULL) { + LIST_REMOVE(pcg, pcg_list); + LIST_INSERT_HEAD(&pc->pc_partgroups, pcg, pcg_list); } + } + if (pcg == NULL) { /* - * That failed. If the previous group isn't empty, swap - * it with the current group and allocate from there. + * No empty groups to free the object to. Attempt to + * allocate one. */ - pcg = cc->cc_previous; - if (pcg != NULL && pcg->pcg_avail > 0) { - cc->cc_previous = cc->cc_current; - cc->cc_current = pcg; - continue; + simple_unlock(&pc->pc_slock); + s = splvm(); + pcg = pool_get(&pcgpool, PR_NOWAIT); + splx(s); + if (pcg == NULL) { +destruct: + + /* + * Unable to allocate a cache group; destruct the object + * and free it back to the pool. + */ + pool_cache_destruct_object(pc, object); + return; } + memset(pcg, 0, sizeof(*pcg)); + simple_lock(&pc->pc_slock); + pc->pc_ngroups++; + LIST_INSERT_HEAD(&pc->pc_partgroups, pcg, pcg_list); + } - /* - * Can't allocate from either group: try the slow path. - * If get_slow() allocated an object for us, or if - * no more objects are available, it will return NULL. - * Otherwise, we need to retry. - */ - cc = pool_cache_get_slow(cc, &s, &object, pap, flags); - } while (cc != NULL); + pc->pc_nitems++; + pcg_put(pcg, object, pa); - return object; + if (pcg->pcg_avail == PCG_NOBJECTS) { + LIST_REMOVE(pcg, pcg_list); + LIST_INSERT_HEAD(&pc->pc_fullgroups, pcg, pcg_list); + } + simple_unlock(&pc->pc_slock); } -#if __GNUC_PREREQ__(3, 0) -__attribute ((noinline)) -#endif -pool_cache_cpu_t * -pool_cache_put_slow(pool_cache_cpu_t *cc, int *s, void *object, paddr_t pa) +/* + * pool_cache_destruct_object: + * + * Force destruction of an object and its release back into + * the pool. + */ +void +pool_cache_destruct_object(struct pool_cache *pc, void *object) { - pcg_t *pcg, *cur; - uint64_t ncsw; - pool_cache_t pc; - pc = cc->cc_cache; - cc->cc_misses++; + if (pc->pc_dtor != NULL) + (*pc->pc_dtor)(pc->pc_arg, object); + pool_put(pc->pc_pool, object); +} - /* - * No free slots locally. Try to grab an empty, unused - * group from the cache. - */ - if (!mutex_tryenter(&pc->pc_lock)) { - ncsw = curlwp->l_ncsw; - mutex_enter(&pc->pc_lock); - pc->pc_contended++; +/* + * pool_do_cache_invalidate_grouplist: + * + * Invalidate a single grouplist and destruct all objects. + * XXX This is too expensive. We should swap the list then + * unlock. + */ +static void +pool_do_cache_invalidate_grouplist(struct pool_cache_grouplist *pcgsl, + struct pool_cache *pc, struct pool_pagelist *pq, + struct pool_cache_grouplist *pcgdl) +{ + struct pool_cache_group *pcg; + void *object; - /* - * If we context switched while locking, then - * our view of the per-CPU data is invalid: - * retry. - */ - if (curlwp->l_ncsw != ncsw) { - mutex_exit(&pc->pc_lock); - pool_cache_cpu_exit(cc, s); - return pool_cache_cpu_enter(pc, s); - } - } + LOCK_ASSERT(simple_lock_held(&pc->pc_slock)); + LOCK_ASSERT(simple_lock_held(&pc->pc_pool->pr_slock)); - if ((pcg = pc->pc_emptygroups) != NULL) { - /* - * If there's a empty group, release our full - * group back to the cache. Install the empty - * group as cc_current and return. - */ - if ((cur = cc->cc_current) != NULL) { - KASSERT(cur->pcg_avail == PCG_NOBJECTS); - cur->pcg_next = pc->pc_fullgroups; - pc->pc_fullgroups = cur; - pc->pc_nfull++; + while ((pcg = LIST_FIRST(pcgsl)) != NULL) { + pc->pc_ngroups--; + LIST_REMOVE(pcg, pcg_list); + LIST_INSERT_HEAD(pcgdl, pcg, pcg_list); + pc->pc_nitems -= pcg->pcg_avail; + simple_unlock(&pc->pc_pool->pr_slock); + simple_unlock(&pc->pc_slock); + + while (pcg->pcg_avail != 0) { + object = pcg_get(pcg, NULL); + if (pc->pc_dtor != NULL) + (*pc->pc_dtor)(pc->pc_arg, object); + simple_lock(&pc->pc_pool->pr_slock); + pool_do_put(pc->pc_pool, object, pq); + simple_unlock(&pc->pc_pool->pr_slock); } - KASSERT(pcg->pcg_avail == 0); - cc->cc_current = pcg; - pc->pc_emptygroups = pcg->pcg_next; - pc->pc_hits++; - pc->pc_nempty--; - mutex_exit(&pc->pc_lock); - return cc; + + simple_lock(&pc->pc_slock); + simple_lock(&pc->pc_pool->pr_slock); } +} - /* - * Nothing available locally or in cache. Take the - * slow path and try to allocate a new group that we - * can release to. - */ - pc->pc_misses++; - mutex_exit(&pc->pc_lock); - pool_cache_cpu_exit(cc, s); +static void +pool_do_cache_invalidate(struct pool_cache *pc, struct pool_pagelist *pq, + struct pool_cache_grouplist *pcgl) +{ - /* - * If we can't allocate a new group, just throw the - * object away. - */ - pcg = pool_get(&pcgpool, PR_NOWAIT); - if (pcg == NULL) { - pool_cache_destruct_object(pc, object); - return NULL; - } -#ifdef DIAGNOSTIC - memset(pcg, 0, sizeof(*pcg)); -#else - pcg->pcg_avail = 0; -#endif + LOCK_ASSERT(simple_lock_held(&pc->pc_slock)); + LOCK_ASSERT(simple_lock_held(&pc->pc_pool->pr_slock)); - /* - * Add the empty group to the cache and try again. - */ - mutex_enter(&pc->pc_lock); - pcg->pcg_next = pc->pc_emptygroups; - pc->pc_emptygroups = pcg; - pc->pc_nempty++; - mutex_exit(&pc->pc_lock); + pool_do_cache_invalidate_grouplist(&pc->pc_fullgroups, pc, pq, pcgl); + pool_do_cache_invalidate_grouplist(&pc->pc_partgroups, pc, pq, pcgl); - return pool_cache_cpu_enter(pc, s); -} + KASSERT(LIST_EMPTY(&pc->pc_partgroups)); + KASSERT(LIST_EMPTY(&pc->pc_fullgroups)); + KASSERT(pc->pc_nitems == 0); +} /* - * pool_cache_put{,_paddr}: + * pool_cache_invalidate: * - * Put an object back to the pool cache (optionally caching the - * physical address of the object). + * Invalidate a pool cache (destruct and release all of the + * cached objects). */ void -pool_cache_put_paddr(pool_cache_t pc, void *object, paddr_t pa) +pool_cache_invalidate(struct pool_cache *pc) { - pool_cache_cpu_t *cc; - pcg_t *pcg; - int s; + struct pool_pagelist pq; + struct pool_cache_grouplist pcgl; - FREECHECK_IN(&pc->pc_freecheck, object); + LIST_INIT(&pq); + LIST_INIT(&pcgl); - cc = pool_cache_cpu_enter(pc, &s); - do { - /* If the current group isn't full, release it there. */ - pcg = cc->cc_current; - if (pcg != NULL && pcg->pcg_avail < PCG_NOBJECTS) { - KASSERT(pcg->pcg_objects[pcg->pcg_avail].pcgo_va - == NULL); - pcg->pcg_objects[pcg->pcg_avail].pcgo_va = object; - pcg->pcg_objects[pcg->pcg_avail].pcgo_pa = pa; - pcg->pcg_avail++; - cc->cc_hits++; - pool_cache_cpu_exit(cc, &s); - return; - } + simple_lock(&pc->pc_slock); + simple_lock(&pc->pc_pool->pr_slock); - /* - * That failed. If the previous group is empty, swap - * it with the current group and try again. - */ - pcg = cc->cc_previous; - if (pcg != NULL && pcg->pcg_avail == 0) { - cc->cc_previous = cc->cc_current; - cc->cc_current = pcg; - continue; - } + pool_do_cache_invalidate(pc, &pq, &pcgl); - /* - * Can't free to either group: try the slow path. - * If put_slow() releases the object for us, it - * will return NULL. Otherwise we need to retry. - */ - cc = pool_cache_put_slow(cc, &s, object, pa); - } while (cc != NULL); + simple_unlock(&pc->pc_pool->pr_slock); + simple_unlock(&pc->pc_slock); + + pr_pagelist_free(pc->pc_pool, &pq); + pcg_grouplist_free(&pcgl); } /* - * pool_cache_xcall: + * pool_cache_reclaim: * - * Transfer objects from the per-CPU cache to the global cache. - * Run within a cross-call thread. + * Reclaim a pool cache for pool_reclaim(). */ static void -pool_cache_xcall(pool_cache_t pc) +pool_cache_reclaim(struct pool_cache *pc, struct pool_pagelist *pq, + struct pool_cache_grouplist *pcgl) { - pool_cache_cpu_t *cc; - pcg_t *prev, *cur, **list; - int s = 0; /* XXXgcc */ - - cc = pool_cache_cpu_enter(pc, &s); - cur = cc->cc_current; - cc->cc_current = NULL; - prev = cc->cc_previous; - cc->cc_previous = NULL; - pool_cache_cpu_exit(cc, &s); - - /* - * XXXSMP Go to splvm to prevent kernel_lock from being taken, - * because locks at IPL_SOFTXXX are still spinlocks. Does not - * apply to IPL_SOFTBIO. Cross-call threads do not take the - * kernel_lock. + + /* + * We're locking in the wrong order (normally pool_cache -> pool, + * but the pool is already locked when we get here), so we have + * to use trylock. If we can't lock the pool_cache, it's not really + * a big deal here. */ - s = splvm(); - mutex_enter(&pc->pc_lock); - if (cur != NULL) { - if (cur->pcg_avail == PCG_NOBJECTS) { - list = &pc->pc_fullgroups; - pc->pc_nfull++; - } else if (cur->pcg_avail == 0) { - list = &pc->pc_emptygroups; - pc->pc_nempty++; - } else { - list = &pc->pc_partgroups; - pc->pc_npart++; - } - cur->pcg_next = *list; - *list = cur; - } - if (prev != NULL) { - if (prev->pcg_avail == PCG_NOBJECTS) { - list = &pc->pc_fullgroups; - pc->pc_nfull++; - } else if (prev->pcg_avail == 0) { - list = &pc->pc_emptygroups; - pc->pc_nempty++; - } else { - list = &pc->pc_partgroups; - pc->pc_npart++; - } - prev->pcg_next = *list; - *list = prev; - } - mutex_exit(&pc->pc_lock); - splx(s); + if (simple_lock_try(&pc->pc_slock) == 0) + return; + + pool_do_cache_invalidate(pc, pq, pcgl); + + simple_unlock(&pc->pc_slock); } /* @@ -2826,13 +2411,21 @@ pool_page_free_meta(struct pool *pp, voi void * pool_subpage_alloc(struct pool *pp, int flags) { - return pool_get(&psppool, flags); + void *v; + int s; + s = splvm(); + v = pool_get(&psppool, flags); + splx(s); + return v; } void pool_subpage_free(struct pool *pp, void *v) { + int s; + s = splvm(); pool_put(&psppool, v); + splx(s); } /* We don't provide a real nointr allocator. Maybe later. */