Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files. =================================================================== RCS file: /ftp/cvs/cvsroot/src/sys/kern/subr_pool.c,v rcsdiff: /ftp/cvs/cvsroot/src/sys/kern/subr_pool.c,v: warning: Unknown phrases like `commitid ...;' are present. retrieving revision 1.151.6.3 retrieving revision 1.182 diff -u -p -r1.151.6.3 -r1.182 --- src/sys/kern/subr_pool.c 2008/09/28 10:40:53 1.151.6.3 +++ src/sys/kern/subr_pool.c 2010/01/20 23:40:42 1.182 @@ -1,4 +1,4 @@ -/* $NetBSD: subr_pool.c,v 1.151.6.3 2008/09/28 10:40:53 mjf Exp $ */ +/* $NetBSD: subr_pool.c,v 1.182 2010/01/20 23:40:42 rmind Exp $ */ /*- * Copyright (c) 1997, 1999, 2000, 2002, 2007, 2008 The NetBSD Foundation, Inc. @@ -31,7 +31,7 @@ */ #include -__KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.151.6.3 2008/09/28 10:40:53 mjf Exp $"); +__KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.182 2010/01/20 23:40:42 rmind Exp $"); #include "opt_ddb.h" #include "opt_pool.h" @@ -69,7 +69,7 @@ __KERNEL_RCSID(0, "$NetBSD: subr_pool.c, */ /* List of all pools */ -TAILQ_HEAD(,pool) pool_head = TAILQ_HEAD_INITIALIZER(pool_head); +static TAILQ_HEAD(, pool) pool_head = TAILQ_HEAD_INITIALIZER(pool_head); /* Private pool for page header structures */ #define PHPOOL_MAX 8 @@ -104,6 +104,9 @@ static struct pool *drainpp; static kmutex_t pool_head_lock; static kcondvar_t pool_busy; +/* This lock protects initialization of a potentially shared pool allocator */ +static kmutex_t pool_allocator_lock; + typedef uint32_t pool_item_bitmap_t; #define BITMAP_SIZE (CHAR_BIT * sizeof(pool_item_bitmap_t)) #define BITMAP_MASK (BITMAP_SIZE - 1) @@ -188,6 +191,7 @@ static bool pool_cache_get_slow(pool_cac void **, paddr_t *, int); static void pool_cache_cpu_init1(struct cpu_info *, pool_cache_t); static void pool_cache_invalidate_groups(pool_cache_t, pcg_t *); +static void pool_cache_invalidate_cpu(pool_cache_t, u_int); static void pool_cache_xcall(pool_cache_t); static int pool_catchup(struct pool *); @@ -230,16 +234,28 @@ int pool_logsize = POOL_LOGSIZE; static inline void pr_log(struct pool *pp, void *v, int action, const char *file, long line) { - int n = pp->pr_curlogentry; + int n; struct pool_log *pl; if ((pp->pr_roflags & PR_LOGGING) == 0) return; + if (pp->pr_log == NULL) { + if (kmem_map != NULL) + pp->pr_log = malloc( + pool_logsize * sizeof(struct pool_log), + M_TEMP, M_NOWAIT | M_ZERO); + if (pp->pr_log == NULL) + return; + pp->pr_curlogentry = 0; + pp->pr_logsize = pool_logsize; + } + /* * Fill in the current entry. Wrap around and overwrite * the oldest entry if necessary. */ + n = pp->pr_curlogentry; pl = &pp->pr_log[n]; pl->pl_file = file; pl->pl_line = line; @@ -257,7 +273,7 @@ pr_printlog(struct pool *pp, struct pool int i = pp->pr_logsize; int n = pp->pr_curlogentry; - if ((pp->pr_roflags & PR_LOGGING) == 0) + if (pp->pr_log == NULL) return; /* @@ -587,17 +603,11 @@ void pool_subsystem_init(void) { struct pool_allocator *pa; - __link_set_decl(pools, struct link_pool_init); - struct link_pool_init * const *pi; mutex_init(&pool_head_lock, MUTEX_DEFAULT, IPL_NONE); + mutex_init(&pool_allocator_lock, MUTEX_DEFAULT, IPL_NONE); cv_init(&pool_busy, "poolbusy"); - __link_set_foreach(pi, pools) - pool_init((*pi)->pp, (*pi)->size, (*pi)->align, - (*pi)->align_offset, (*pi)->flags, (*pi)->wchan, - (*pi)->palloc, (*pi)->ipl); - while ((pa = SLIST_FIRST(&pa_deferinitq)) != NULL) { KASSERT(pa->pa_backingmapptr != NULL); KASSERT(*pa->pa_backingmapptr != NULL); @@ -656,7 +666,9 @@ pool_init(struct pool *pp, size_t size, palloc = &pool_allocator_nointr_fullpage; } #endif /* POOL_SUBPAGE */ - if ((palloc->pa_flags & PA_INITIALIZED) == 0) { + if (!cold) + mutex_enter(&pool_allocator_lock); + if (palloc->pa_refcnt++ == 0) { if (palloc->pa_pagesz == 0) palloc->pa_pagesz = PAGE_SIZE; @@ -669,8 +681,9 @@ pool_init(struct pool *pp, size_t size, if (palloc->pa_backingmapptr != NULL) { pa_reclaim_register(palloc); } - palloc->pa_flags |= PA_INITIALIZED; } + if (!cold) + mutex_exit(&pool_allocator_lock); if (align == 0) align = ALIGN(1); @@ -793,16 +806,7 @@ pool_init(struct pool *pp, size_t size, pp->pr_nidle = 0; pp->pr_refcnt = 0; -#ifdef POOL_DIAGNOSTIC - if (flags & PR_LOGGING) { - if (kmem_map == NULL || - (pp->pr_log = malloc(pool_logsize * sizeof(struct pool_log), - M_TEMP, M_NOWAIT)) == NULL) - pp->pr_roflags &= ~PR_LOGGING; - pp->pr_curlogentry = 0; - pp->pr_logsize = pool_logsize; - } -#endif + pp->pr_log = NULL; pp->pr_entered_file = NULL; pp->pr_entered_line = 0; @@ -851,7 +855,7 @@ pool_init(struct pool *pp, size_t size, } /* Insert into the list of all pools. */ - if (__predict_true(!cold)) + if (!cold) mutex_enter(&pool_head_lock); TAILQ_FOREACH(pp1, &pool_head, pr_poollist) { if (strcmp(pp1->pr_wchan, pp->pr_wchan) > 0) @@ -861,14 +865,14 @@ pool_init(struct pool *pp, size_t size, TAILQ_INSERT_TAIL(&pool_head, pp, pr_poollist); else TAILQ_INSERT_BEFORE(pp1, pp, pr_poollist); - if (__predict_true(!cold)) + if (!cold) mutex_exit(&pool_head_lock); /* Insert this into the list of pools using this allocator. */ - if (__predict_true(!cold)) + if (!cold) mutex_enter(&palloc->pa_lock); TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list); - if (__predict_true(!cold)) + if (!cold) mutex_exit(&palloc->pa_lock); pool_reclaim_register(pp); @@ -898,6 +902,11 @@ pool_destroy(struct pool *pp) TAILQ_REMOVE(&pp->pr_alloc->pa_list, pp, pr_alloc_list); mutex_exit(&pp->pr_alloc->pa_lock); + mutex_enter(&pool_allocator_lock); + if (--pp->pr_alloc->pa_refcnt == 0) + mutex_destroy(&pp->pr_alloc->pa_lock); + mutex_exit(&pool_allocator_lock); + mutex_enter(&pp->pr_lock); KASSERT(pp->pr_cache == NULL); @@ -923,8 +932,10 @@ pool_destroy(struct pool *pp) pr_pagelist_free(pp, &pq); #ifdef POOL_DIAGNOSTIC - if ((pp->pr_roflags & PR_LOGGING) != 0) + if (pp->pr_log != NULL) { free(pp->pr_log, M_TEMP); + pp->pr_log = NULL; + } #endif cv_destroy(&pp->pr_cv); @@ -2129,9 +2140,7 @@ void pool_cache_destroy(pool_cache_t pc) { struct pool *pp = &pc->pc_pool; - pool_cache_cpu_t *cc; - pcg_t *pcg; - int i; + u_int i; /* Remove it from the global list. */ mutex_enter(&pool_head_lock); @@ -2149,20 +2158,8 @@ pool_cache_destroy(pool_cache_t pc) mutex_exit(&pp->pr_lock); /* Destroy per-CPU data */ - for (i = 0; i < MAXCPUS; i++) { - if ((cc = pc->pc_cpus[i]) == NULL) - continue; - if ((pcg = cc->cc_current) != &pcg_dummy) { - pcg->pcg_next = NULL; - pool_cache_invalidate_groups(pc, pcg); - } - if ((pcg = cc->cc_previous) != &pcg_dummy) { - pcg->pcg_next = NULL; - pool_cache_invalidate_groups(pc, pcg); - } - if (cc != &pc->pc_cpu0) - pool_put(&cache_cpu_pool, cc); - } + for (i = 0; i < MAXCPUS; i++) + pool_cache_invalidate_cpu(pc, i); /* Finally, destroy it. */ mutex_destroy(&pc->pc_lock); @@ -2309,12 +2306,35 @@ pool_cache_invalidate_groups(pool_cache_ * * Invalidate a pool cache (destruct and release all of the * cached objects). Does not reclaim objects from the pool. + * + * Note: For pool caches that provide constructed objects, there + * is an assumption that another level of synchronization is occurring + * between the input to the constructor and the cache invalidation. */ void pool_cache_invalidate(pool_cache_t pc) { pcg_t *full, *empty, *part; +#if 0 + uint64_t where; + if (ncpu < 2 || !mp_online) { + /* + * We might be called early enough in the boot process + * for the CPU data structures to not be fully initialized. + * In this case, simply gather the local CPU's cache now + * since it will be the only one running. + */ + pool_cache_xcall(pc); + } else { + /* + * Gather all of the CPU-specific caches into the + * global cache. + */ + where = xc_broadcast(0, (xcfunc_t)pool_cache_xcall, pc, NULL); + xc_wait(where); + } +#endif mutex_enter(&pc->pc_lock); full = pc->pc_fullgroups; empty = pc->pc_emptygroups; @@ -2332,6 +2352,40 @@ pool_cache_invalidate(pool_cache_t pc) pool_cache_invalidate_groups(pc, part); } +/* + * pool_cache_invalidate_cpu: + * + * Invalidate all CPU-bound cached objects in pool cache, the CPU being + * identified by its associated index. + * It is caller's responsibility to ensure that no operation is + * taking place on this pool cache while doing this invalidation. + * WARNING: as no inter-CPU locking is enforced, trying to invalidate + * pool cached objects from a CPU different from the one currently running + * may result in an undefined behaviour. + */ +static void +pool_cache_invalidate_cpu(pool_cache_t pc, u_int index) +{ + + pool_cache_cpu_t *cc; + pcg_t *pcg; + + if ((cc = pc->pc_cpus[index]) == NULL) + return; + + if ((pcg = cc->cc_current) != &pcg_dummy) { + pcg->pcg_next = NULL; + pool_cache_invalidate_groups(pc, pcg); + } + if ((pcg = cc->cc_previous) != &pcg_dummy) { + pcg->pcg_next = NULL; + pool_cache_invalidate_groups(pc, pcg); + } + if (cc != &pc->pc_cpu0) + pool_put(&cache_cpu_pool, cc); + +} + void pool_cache_set_drain_hook(pool_cache_t pc, void (*fn)(void *, int), void *arg) { @@ -2528,8 +2582,23 @@ pool_cache_put_slow(pool_cache_cpu_t *cc KASSERT(cc->cc_previous->pcg_avail == cc->cc_previous->pcg_size); pc = cc->cc_cache; + pcg = NULL; cc->cc_misses++; + /* + * If there are no empty groups in the cache then allocate one + * while still unlocked. + */ + if (__predict_false(pc->pc_emptygroups == NULL)) { + if (__predict_true(!pool_cache_disable)) { + pcg = pool_get(pc->pc_pcgpool, PR_NOWAIT); + } + if (__predict_true(pcg != NULL)) { + pcg->pcg_avail = 0; + pcg->pcg_size = pc->pc_pcgsize; + } + } + /* Lock the cache. */ if (__predict_false(!mutex_tryenter(&pc->pc_lock))) { ncsw = curlwp->l_ncsw; @@ -2542,20 +2611,16 @@ pool_cache_put_slow(pool_cache_cpu_t *cc */ if (__predict_false(curlwp->l_ncsw != ncsw)) { mutex_exit(&pc->pc_lock); + if (pcg != NULL) { + pool_put(pc->pc_pcgpool, pcg); + } return true; } } /* If there are no empty groups in the cache then allocate one. */ - if (__predict_false((pcg = pc->pc_emptygroups) == NULL)) { - if (__predict_true(!pool_cache_disable)) { - pcg = pool_get(pc->pc_pcgpool, PR_NOWAIT); - } - if (__predict_true(pcg != NULL)) { - pcg->pcg_avail = 0; - pcg->pcg_size = pc->pc_pcgsize; - } - } else { + if (pcg == NULL && pc->pc_emptygroups != NULL) { + pcg = pc->pc_emptygroups; pc->pc_emptygroups = pcg->pcg_next; pc->pc_nempty--; } @@ -2610,6 +2675,7 @@ pool_cache_put_paddr(pool_cache_t pc, vo pcg_t *pcg; int s; + KASSERT(object != NULL); FREECHECK_IN(&pc->pc_freecheck, object); /* Lock out interrupts and disable preemption. */ @@ -2982,7 +3048,7 @@ found: if (pool_in_cg(pp, cc->cc_current, addr) || pool_in_cg(pp, cc->cc_previous, addr)) { struct cpu_info *ci = - cpu_lookup_byindex(i); + cpu_lookup(i); incpucache = true; snprintf(cpucachestr,