Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files. =================================================================== RCS file: /ftp/cvs/cvsroot/src/sys/kern/subr_pool.c,v rcsdiff: /ftp/cvs/cvsroot/src/sys/kern/subr_pool.c,v: warning: Unknown phrases like `commitid ...;' are present. retrieving revision 1.158.2.4 retrieving revision 1.167 diff -u -p -r1.158.2.4 -r1.167 --- src/sys/kern/subr_pool.c 2010/03/11 15:04:18 1.158.2.4 +++ src/sys/kern/subr_pool.c 2008/08/08 16:58:01 1.167 @@ -1,4 +1,4 @@ -/* $NetBSD: subr_pool.c,v 1.158.2.4 2010/03/11 15:04:18 yamt Exp $ */ +/* $NetBSD: subr_pool.c,v 1.167 2008/08/08 16:58:01 skrll Exp $ */ /*- * Copyright (c) 1997, 1999, 2000, 2002, 2007, 2008 The NetBSD Foundation, Inc. @@ -31,7 +31,7 @@ */ #include -__KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.158.2.4 2010/03/11 15:04:18 yamt Exp $"); +__KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.167 2008/08/08 16:58:01 skrll Exp $"); #include "opt_ddb.h" #include "opt_pool.h" @@ -69,7 +69,7 @@ __KERNEL_RCSID(0, "$NetBSD: subr_pool.c, */ /* List of all pools */ -static TAILQ_HEAD(, pool) pool_head = TAILQ_HEAD_INITIALIZER(pool_head); +TAILQ_HEAD(,pool) pool_head = TAILQ_HEAD_INITIALIZER(pool_head); /* Private pool for page header structures */ #define PHPOOL_MAX 8 @@ -104,9 +104,6 @@ static struct pool *drainpp; static kmutex_t pool_head_lock; static kcondvar_t pool_busy; -/* This lock protects initialization of a potentially shared pool allocator */ -static kmutex_t pool_allocator_lock; - typedef uint32_t pool_item_bitmap_t; #define BITMAP_SIZE (CHAR_BIT * sizeof(pool_item_bitmap_t)) #define BITMAP_MASK (BITMAP_SIZE - 1) @@ -183,7 +180,7 @@ TAILQ_HEAD(,pool_cache) pool_cache_head TAILQ_HEAD_INITIALIZER(pool_cache_head); int pool_cache_disable; /* global disable for caching */ -static const pcg_t pcg_dummy; /* zero sized: always empty, yet always full */ +static pcg_t pcg_dummy; /* zero sized: always empty, yet always full */ static bool pool_cache_put_slow(pool_cache_cpu_t *, int, void *); @@ -191,7 +188,6 @@ static bool pool_cache_get_slow(pool_cac void **, paddr_t *, int); static void pool_cache_cpu_init1(struct cpu_info *, pool_cache_t); static void pool_cache_invalidate_groups(pool_cache_t, pcg_t *); -static void pool_cache_invalidate_cpu(pool_cache_t, u_int); static void pool_cache_xcall(pool_cache_t); static int pool_catchup(struct pool *); @@ -234,28 +230,16 @@ int pool_logsize = POOL_LOGSIZE; static inline void pr_log(struct pool *pp, void *v, int action, const char *file, long line) { - int n; + int n = pp->pr_curlogentry; struct pool_log *pl; if ((pp->pr_roflags & PR_LOGGING) == 0) return; - if (pp->pr_log == NULL) { - if (kmem_map != NULL) - pp->pr_log = malloc( - pool_logsize * sizeof(struct pool_log), - M_TEMP, M_NOWAIT | M_ZERO); - if (pp->pr_log == NULL) - return; - pp->pr_curlogentry = 0; - pp->pr_logsize = pool_logsize; - } - /* * Fill in the current entry. Wrap around and overwrite * the oldest entry if necessary. */ - n = pp->pr_curlogentry; pl = &pp->pr_log[n]; pl->pl_file = file; pl->pl_line = line; @@ -273,7 +257,7 @@ pr_printlog(struct pool *pp, struct pool int i = pp->pr_logsize; int n = pp->pr_curlogentry; - if (pp->pr_log == NULL) + if ((pp->pr_roflags & PR_LOGGING) == 0) return; /* @@ -603,11 +587,17 @@ void pool_subsystem_init(void) { struct pool_allocator *pa; + __link_set_decl(pools, struct link_pool_init); + struct link_pool_init * const *pi; mutex_init(&pool_head_lock, MUTEX_DEFAULT, IPL_NONE); - mutex_init(&pool_allocator_lock, MUTEX_DEFAULT, IPL_NONE); cv_init(&pool_busy, "poolbusy"); + __link_set_foreach(pi, pools) + pool_init((*pi)->pp, (*pi)->size, (*pi)->align, + (*pi)->align_offset, (*pi)->flags, (*pi)->wchan, + (*pi)->palloc, (*pi)->ipl); + while ((pa = SLIST_FIRST(&pa_deferinitq)) != NULL) { KASSERT(pa->pa_backingmapptr != NULL); KASSERT(*pa->pa_backingmapptr != NULL); @@ -666,9 +656,7 @@ pool_init(struct pool *pp, size_t size, palloc = &pool_allocator_nointr_fullpage; } #endif /* POOL_SUBPAGE */ - if (!cold) - mutex_enter(&pool_allocator_lock); - if (palloc->pa_refcnt++ == 0) { + if ((palloc->pa_flags & PA_INITIALIZED) == 0) { if (palloc->pa_pagesz == 0) palloc->pa_pagesz = PAGE_SIZE; @@ -681,9 +669,8 @@ pool_init(struct pool *pp, size_t size, if (palloc->pa_backingmapptr != NULL) { pa_reclaim_register(palloc); } + palloc->pa_flags |= PA_INITIALIZED; } - if (!cold) - mutex_exit(&pool_allocator_lock); if (align == 0) align = ALIGN(1); @@ -806,7 +793,16 @@ pool_init(struct pool *pp, size_t size, pp->pr_nidle = 0; pp->pr_refcnt = 0; - pp->pr_log = NULL; +#ifdef POOL_DIAGNOSTIC + if (flags & PR_LOGGING) { + if (kmem_map == NULL || + (pp->pr_log = malloc(pool_logsize * sizeof(struct pool_log), + M_TEMP, M_NOWAIT)) == NULL) + pp->pr_roflags &= ~PR_LOGGING; + pp->pr_curlogentry = 0; + pp->pr_logsize = pool_logsize; + } +#endif pp->pr_entered_file = NULL; pp->pr_entered_line = 0; @@ -855,7 +851,7 @@ pool_init(struct pool *pp, size_t size, } /* Insert into the list of all pools. */ - if (!cold) + if (__predict_true(!cold)) mutex_enter(&pool_head_lock); TAILQ_FOREACH(pp1, &pool_head, pr_poollist) { if (strcmp(pp1->pr_wchan, pp->pr_wchan) > 0) @@ -865,14 +861,14 @@ pool_init(struct pool *pp, size_t size, TAILQ_INSERT_TAIL(&pool_head, pp, pr_poollist); else TAILQ_INSERT_BEFORE(pp1, pp, pr_poollist); - if (!cold) + if (__predict_true(!cold)) mutex_exit(&pool_head_lock); /* Insert this into the list of pools using this allocator. */ - if (!cold) + if (__predict_true(!cold)) mutex_enter(&palloc->pa_lock); TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list); - if (!cold) + if (__predict_true(!cold)) mutex_exit(&palloc->pa_lock); pool_reclaim_register(pp); @@ -902,11 +898,6 @@ pool_destroy(struct pool *pp) TAILQ_REMOVE(&pp->pr_alloc->pa_list, pp, pr_alloc_list); mutex_exit(&pp->pr_alloc->pa_lock); - mutex_enter(&pool_allocator_lock); - if (--pp->pr_alloc->pa_refcnt == 0) - mutex_destroy(&pp->pr_alloc->pa_lock); - mutex_exit(&pool_allocator_lock); - mutex_enter(&pp->pr_lock); KASSERT(pp->pr_cache == NULL); @@ -932,10 +923,8 @@ pool_destroy(struct pool *pp) pr_pagelist_free(pp, &pq); #ifdef POOL_DIAGNOSTIC - if (pp->pr_log != NULL) { + if ((pp->pr_roflags & PR_LOGGING) != 0) free(pp->pr_log, M_TEMP); - pp->pr_log = NULL; - } #endif cv_destroy(&pp->pr_cv); @@ -1538,8 +1527,6 @@ pool_update_curpage(struct pool *pp) if (pp->pr_curpage == NULL) { pp->pr_curpage = LIST_FIRST(&pp->pr_emptypages); } - KASSERT((pp->pr_curpage == NULL && pp->pr_nitems == 0) || - (pp->pr_curpage != NULL && pp->pr_nitems > 0)); } void @@ -2140,7 +2127,9 @@ void pool_cache_destroy(pool_cache_t pc) { struct pool *pp = &pc->pc_pool; - u_int i; + pool_cache_cpu_t *cc; + pcg_t *pcg; + int i; /* Remove it from the global list. */ mutex_enter(&pool_head_lock); @@ -2158,8 +2147,20 @@ pool_cache_destroy(pool_cache_t pc) mutex_exit(&pp->pr_lock); /* Destroy per-CPU data */ - for (i = 0; i < MAXCPUS; i++) - pool_cache_invalidate_cpu(pc, i); + for (i = 0; i < MAXCPUS; i++) { + if ((cc = pc->pc_cpus[i]) == NULL) + continue; + if ((pcg = cc->cc_current) != &pcg_dummy) { + pcg->pcg_next = NULL; + pool_cache_invalidate_groups(pc, pcg); + } + if ((pcg = cc->cc_previous) != &pcg_dummy) { + pcg->pcg_next = NULL; + pool_cache_invalidate_groups(pc, pcg); + } + if (cc != &pc->pc_cpu0) + pool_put(&cache_cpu_pool, cc); + } /* Finally, destroy it. */ mutex_destroy(&pc->pc_lock); @@ -2207,8 +2208,8 @@ pool_cache_cpu_init1(struct cpu_info *ci cc->cc_cpuindex = index; cc->cc_hits = 0; cc->cc_misses = 0; - cc->cc_current = __UNCONST(&pcg_dummy); - cc->cc_previous = __UNCONST(&pcg_dummy); + cc->cc_current = &pcg_dummy; + cc->cc_previous = &pcg_dummy; pc->pc_cpus[index] = cc; } @@ -2306,35 +2307,12 @@ pool_cache_invalidate_groups(pool_cache_ * * Invalidate a pool cache (destruct and release all of the * cached objects). Does not reclaim objects from the pool. - * - * Note: For pool caches that provide constructed objects, there - * is an assumption that another level of synchronization is occurring - * between the input to the constructor and the cache invalidation. */ void pool_cache_invalidate(pool_cache_t pc) { pcg_t *full, *empty, *part; -#if 0 - uint64_t where; - if (ncpu < 2 || !mp_online) { - /* - * We might be called early enough in the boot process - * for the CPU data structures to not be fully initialized. - * In this case, simply gather the local CPU's cache now - * since it will be the only one running. - */ - pool_cache_xcall(pc); - } else { - /* - * Gather all of the CPU-specific caches into the - * global cache. - */ - where = xc_broadcast(0, (xcfunc_t)pool_cache_xcall, pc, NULL); - xc_wait(where); - } -#endif mutex_enter(&pc->pc_lock); full = pc->pc_fullgroups; empty = pc->pc_emptygroups; @@ -2352,40 +2330,6 @@ pool_cache_invalidate(pool_cache_t pc) pool_cache_invalidate_groups(pc, part); } -/* - * pool_cache_invalidate_cpu: - * - * Invalidate all CPU-bound cached objects in pool cache, the CPU being - * identified by its associated index. - * It is caller's responsibility to ensure that no operation is - * taking place on this pool cache while doing this invalidation. - * WARNING: as no inter-CPU locking is enforced, trying to invalidate - * pool cached objects from a CPU different from the one currently running - * may result in an undefined behaviour. - */ -static void -pool_cache_invalidate_cpu(pool_cache_t pc, u_int index) -{ - - pool_cache_cpu_t *cc; - pcg_t *pcg; - - if ((cc = pc->pc_cpus[index]) == NULL) - return; - - if ((pcg = cc->cc_current) != &pcg_dummy) { - pcg->pcg_next = NULL; - pool_cache_invalidate_groups(pc, pcg); - } - if ((pcg = cc->cc_previous) != &pcg_dummy) { - pcg->pcg_next = NULL; - pool_cache_invalidate_groups(pc, pcg); - } - if (cc != &pc->pc_cpu0) - pool_put(&cache_cpu_pool, cc); - -} - void pool_cache_set_drain_hook(pool_cache_t pc, void (*fn)(void *, int), void *arg) { @@ -2423,9 +2367,6 @@ pool_cache_get_slow(pool_cache_cpu_t *cc pool_cache_t pc; void *object; - KASSERT(cc->cc_current->pcg_avail == 0); - KASSERT(cc->cc_previous->pcg_avail == 0); - pc = cc->cc_cache; cc->cc_misses++; @@ -2578,27 +2519,9 @@ pool_cache_put_slow(pool_cache_cpu_t *cc uint64_t ncsw; pool_cache_t pc; - KASSERT(cc->cc_current->pcg_avail == cc->cc_current->pcg_size); - KASSERT(cc->cc_previous->pcg_avail == cc->cc_previous->pcg_size); - pc = cc->cc_cache; - pcg = NULL; cc->cc_misses++; - /* - * If there are no empty groups in the cache then allocate one - * while still unlocked. - */ - if (__predict_false(pc->pc_emptygroups == NULL)) { - if (__predict_true(!pool_cache_disable)) { - pcg = pool_get(pc->pc_pcgpool, PR_NOWAIT); - } - if (__predict_true(pcg != NULL)) { - pcg->pcg_avail = 0; - pcg->pcg_size = pc->pc_pcgsize; - } - } - /* Lock the cache. */ if (__predict_false(!mutex_tryenter(&pc->pc_lock))) { ncsw = curlwp->l_ncsw; @@ -2611,16 +2534,20 @@ pool_cache_put_slow(pool_cache_cpu_t *cc */ if (__predict_false(curlwp->l_ncsw != ncsw)) { mutex_exit(&pc->pc_lock); - if (pcg != NULL) { - pool_put(pc->pc_pcgpool, pcg); - } return true; } } /* If there are no empty groups in the cache then allocate one. */ - if (pcg == NULL && pc->pc_emptygroups != NULL) { - pcg = pc->pc_emptygroups; + if (__predict_false((pcg = pc->pc_emptygroups) == NULL)) { + if (__predict_true(!pool_cache_disable)) { + pcg = pool_get(pc->pc_pcgpool, PR_NOWAIT); + } + if (__predict_true(pcg != NULL)) { + pcg->pcg_avail = 0; + pcg->pcg_size = pc->pc_pcgsize; + } + } else { pc->pc_emptygroups = pcg->pcg_next; pc->pc_nempty--; } @@ -2675,7 +2602,6 @@ pool_cache_put_paddr(pool_cache_t pc, vo pcg_t *pcg; int s; - KASSERT(object != NULL); FREECHECK_IN(&pc->pc_freecheck, object); /* Lock out interrupts and disable preemption. */ @@ -2732,9 +2658,9 @@ pool_cache_xcall(pool_cache_t pc) mutex_enter(&pc->pc_lock); cc = pc->pc_cpus[curcpu()->ci_index]; cur = cc->cc_current; - cc->cc_current = __UNCONST(&pcg_dummy); + cc->cc_current = &pcg_dummy; prev = cc->cc_previous; - cc->cc_previous = __UNCONST(&pcg_dummy); + cc->cc_previous = &pcg_dummy; if (cur != &pcg_dummy) { if (cur->pcg_avail == cur->pcg_size) { list = &pc->pc_fullgroups; @@ -3048,7 +2974,7 @@ found: if (pool_in_cg(pp, cc->cc_current, addr) || pool_in_cg(pp, cc->cc_previous, addr)) { struct cpu_info *ci = - cpu_lookup(i); + cpu_lookup_byindex(i); incpucache = true; snprintf(cpucachestr,