Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files. =================================================================== RCS file: /ftp/cvs/cvsroot/src/sys/kern/subr_pool.c,v rcsdiff: /ftp/cvs/cvsroot/src/sys/kern/subr_pool.c,v: warning: Unknown phrases like `commitid ...;' are present. retrieving revision 1.151.6.3 retrieving revision 1.176 diff -u -p -r1.151.6.3 -r1.176 --- src/sys/kern/subr_pool.c 2008/09/28 10:40:53 1.151.6.3 +++ src/sys/kern/subr_pool.c 2009/10/15 20:50:12 1.176 @@ -1,4 +1,4 @@ -/* $NetBSD: subr_pool.c,v 1.151.6.3 2008/09/28 10:40:53 mjf Exp $ */ +/* $NetBSD: subr_pool.c,v 1.176 2009/10/15 20:50:12 thorpej Exp $ */ /*- * Copyright (c) 1997, 1999, 2000, 2002, 2007, 2008 The NetBSD Foundation, Inc. @@ -31,7 +31,7 @@ */ #include -__KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.151.6.3 2008/09/28 10:40:53 mjf Exp $"); +__KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.176 2009/10/15 20:50:12 thorpej Exp $"); #include "opt_ddb.h" #include "opt_pool.h" @@ -69,7 +69,7 @@ __KERNEL_RCSID(0, "$NetBSD: subr_pool.c, */ /* List of all pools */ -TAILQ_HEAD(,pool) pool_head = TAILQ_HEAD_INITIALIZER(pool_head); +static TAILQ_HEAD(, pool) pool_head = TAILQ_HEAD_INITIALIZER(pool_head); /* Private pool for page header structures */ #define PHPOOL_MAX 8 @@ -188,6 +188,7 @@ static bool pool_cache_get_slow(pool_cac void **, paddr_t *, int); static void pool_cache_cpu_init1(struct cpu_info *, pool_cache_t); static void pool_cache_invalidate_groups(pool_cache_t, pcg_t *); +static void pool_cache_invalidate_cpu(pool_cache_t, u_int); static void pool_cache_xcall(pool_cache_t); static int pool_catchup(struct pool *); @@ -587,17 +588,10 @@ void pool_subsystem_init(void) { struct pool_allocator *pa; - __link_set_decl(pools, struct link_pool_init); - struct link_pool_init * const *pi; mutex_init(&pool_head_lock, MUTEX_DEFAULT, IPL_NONE); cv_init(&pool_busy, "poolbusy"); - __link_set_foreach(pi, pools) - pool_init((*pi)->pp, (*pi)->size, (*pi)->align, - (*pi)->align_offset, (*pi)->flags, (*pi)->wchan, - (*pi)->palloc, (*pi)->ipl); - while ((pa = SLIST_FIRST(&pa_deferinitq)) != NULL) { KASSERT(pa->pa_backingmapptr != NULL); KASSERT(*pa->pa_backingmapptr != NULL); @@ -2129,9 +2123,7 @@ void pool_cache_destroy(pool_cache_t pc) { struct pool *pp = &pc->pc_pool; - pool_cache_cpu_t *cc; - pcg_t *pcg; - int i; + u_int i; /* Remove it from the global list. */ mutex_enter(&pool_head_lock); @@ -2149,20 +2141,8 @@ pool_cache_destroy(pool_cache_t pc) mutex_exit(&pp->pr_lock); /* Destroy per-CPU data */ - for (i = 0; i < MAXCPUS; i++) { - if ((cc = pc->pc_cpus[i]) == NULL) - continue; - if ((pcg = cc->cc_current) != &pcg_dummy) { - pcg->pcg_next = NULL; - pool_cache_invalidate_groups(pc, pcg); - } - if ((pcg = cc->cc_previous) != &pcg_dummy) { - pcg->pcg_next = NULL; - pool_cache_invalidate_groups(pc, pcg); - } - if (cc != &pc->pc_cpu0) - pool_put(&cache_cpu_pool, cc); - } + for (i = 0; i < MAXCPUS; i++) + pool_cache_invalidate_cpu(pc, i); /* Finally, destroy it. */ mutex_destroy(&pc->pc_lock); @@ -2309,11 +2289,33 @@ pool_cache_invalidate_groups(pool_cache_ * * Invalidate a pool cache (destruct and release all of the * cached objects). Does not reclaim objects from the pool. + * + * Note: For pool caches that provide constructed objects, there + * is an assumption that another level of synchronization is occurring + * between the input to the constructor and the cache invalidation. */ void pool_cache_invalidate(pool_cache_t pc) { pcg_t *full, *empty, *part; + uint64_t where; + + if (ncpu < 2) { + /* + * We might be called early enough in the boot process + * for the CPU data structures to not be fully initialized. + * In this case, simply gather the local CPU's cache now + * since it will be the only one running. + */ + pool_cache_xcall(pc); + } else { + /* + * Gather all of the CPU-specific caches into the + * global cache. + */ + where = xc_broadcast(0, (xcfunc_t)pool_cache_xcall, pc, NULL); + xc_wait(where); + } mutex_enter(&pc->pc_lock); full = pc->pc_fullgroups; @@ -2332,6 +2334,40 @@ pool_cache_invalidate(pool_cache_t pc) pool_cache_invalidate_groups(pc, part); } +/* + * pool_cache_invalidate_cpu: + * + * Invalidate all CPU-bound cached objects in pool cache, the CPU being + * identified by its associated index. + * It is caller's responsibility to ensure that no operation is + * taking place on this pool cache while doing this invalidation. + * WARNING: as no inter-CPU locking is enforced, trying to invalidate + * pool cached objects from a CPU different from the one currently running + * may result in an undefined behaviour. + */ +static void +pool_cache_invalidate_cpu(pool_cache_t pc, u_int index) +{ + + pool_cache_cpu_t *cc; + pcg_t *pcg; + + if ((cc = pc->pc_cpus[index]) == NULL) + return; + + if ((pcg = cc->cc_current) != &pcg_dummy) { + pcg->pcg_next = NULL; + pool_cache_invalidate_groups(pc, pcg); + } + if ((pcg = cc->cc_previous) != &pcg_dummy) { + pcg->pcg_next = NULL; + pool_cache_invalidate_groups(pc, pcg); + } + if (cc != &pc->pc_cpu0) + pool_put(&cache_cpu_pool, cc); + +} + void pool_cache_set_drain_hook(pool_cache_t pc, void (*fn)(void *, int), void *arg) { @@ -2528,8 +2564,23 @@ pool_cache_put_slow(pool_cache_cpu_t *cc KASSERT(cc->cc_previous->pcg_avail == cc->cc_previous->pcg_size); pc = cc->cc_cache; + pcg = NULL; cc->cc_misses++; + /* + * If there are no empty groups in the cache then allocate one + * while still unlocked. + */ + if (__predict_false(pc->pc_emptygroups == NULL)) { + if (__predict_true(!pool_cache_disable)) { + pcg = pool_get(pc->pc_pcgpool, PR_NOWAIT); + } + if (__predict_true(pcg != NULL)) { + pcg->pcg_avail = 0; + pcg->pcg_size = pc->pc_pcgsize; + } + } + /* Lock the cache. */ if (__predict_false(!mutex_tryenter(&pc->pc_lock))) { ncsw = curlwp->l_ncsw; @@ -2542,20 +2593,16 @@ pool_cache_put_slow(pool_cache_cpu_t *cc */ if (__predict_false(curlwp->l_ncsw != ncsw)) { mutex_exit(&pc->pc_lock); + if (pcg != NULL) { + pool_put(pc->pc_pcgpool, pcg); + } return true; } } /* If there are no empty groups in the cache then allocate one. */ - if (__predict_false((pcg = pc->pc_emptygroups) == NULL)) { - if (__predict_true(!pool_cache_disable)) { - pcg = pool_get(pc->pc_pcgpool, PR_NOWAIT); - } - if (__predict_true(pcg != NULL)) { - pcg->pcg_avail = 0; - pcg->pcg_size = pc->pc_pcgsize; - } - } else { + if (pcg == NULL && pc->pc_emptygroups != NULL) { + pcg = pc->pc_emptygroups; pc->pc_emptygroups = pcg->pcg_next; pc->pc_nempty--; } @@ -2610,6 +2657,7 @@ pool_cache_put_paddr(pool_cache_t pc, vo pcg_t *pcg; int s; + KASSERT(object != NULL); FREECHECK_IN(&pc->pc_freecheck, object); /* Lock out interrupts and disable preemption. */ @@ -2982,7 +3030,7 @@ found: if (pool_in_cg(pp, cc->cc_current, addr) || pool_in_cg(pp, cc->cc_previous, addr)) { struct cpu_info *ci = - cpu_lookup_byindex(i); + cpu_lookup(i); incpucache = true; snprintf(cpucachestr,