Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files. =================================================================== RCS file: /ftp/cvs/cvsroot/src/sys/kern/subr_pool.c,v rcsdiff: /ftp/cvs/cvsroot/src/sys/kern/subr_pool.c,v: warning: Unknown phrases like `commitid ...;' are present. retrieving revision 1.129.12.3 retrieving revision 1.137.2.2 diff -u -p -r1.129.12.3 -r1.137.2.2 --- src/sys/kern/subr_pool.c 2007/11/11 16:48:08 1.129.12.3 +++ src/sys/kern/subr_pool.c 2007/12/12 22:03:31 1.137.2.2 @@ -1,4 +1,4 @@ -/* $NetBSD: subr_pool.c,v 1.129.12.3 2007/11/11 16:48:08 joerg Exp $ */ +/* $NetBSD: subr_pool.c,v 1.137.2.2 2007/12/12 22:03:31 ad Exp $ */ /*- * Copyright (c) 1997, 1999, 2000, 2002, 2007 The NetBSD Foundation, Inc. @@ -38,7 +38,7 @@ */ #include -__KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.129.12.3 2007/11/11 16:48:08 joerg Exp $"); +__KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.137.2.2 2007/12/12 22:03:31 ad Exp $"); #include "opt_pool.h" #include "opt_poollog.h" @@ -77,10 +77,6 @@ __KERNEL_RCSID(0, "$NetBSD: subr_pool.c, /* List of all pools */ LIST_HEAD(,pool) pool_head = LIST_HEAD_INITIALIZER(pool_head); -/* List of all caches. */ -LIST_HEAD(,pool_cache) pool_cache_head = - LIST_HEAD_INITIALIZER(pool_cache_head); - /* Private pool for page header structures */ #define PHPOOL_MAX 8 static struct pool phpool[PHPOOL_MAX]; @@ -185,6 +181,13 @@ static struct pool pcgpool; static struct pool cache_pool; static struct pool cache_cpu_pool; +/* List of all caches. */ +LIST_HEAD(,pool_cache) pool_cache_head = + LIST_HEAD_INITIALIZER(pool_cache_head); + +int pool_cache_disable; + + static pool_cache_cpu_t *pool_cache_put_slow(pool_cache_cpu_t *, int *, void *, paddr_t); static pool_cache_cpu_t *pool_cache_get_slow(pool_cache_cpu_t *, int *, @@ -802,7 +805,14 @@ pool_init(struct pool *pp, size_t size, pp->pr_entered_file = NULL; pp->pr_entered_line = 0; - mutex_init(&pp->pr_lock, MUTEX_DEFAULT, ipl); + /* + * XXXAD hack to prevent IP input processing from blocking. + */ + if (ipl == IPL_SOFTNET) { + mutex_init(&pp->pr_lock, MUTEX_DEFAULT, IPL_VM); + } else { + mutex_init(&pp->pr_lock, MUTEX_DEFAULT, ipl); + } cv_init(&pp->pr_cv, wchan); pp->pr_ipl = ipl; @@ -2040,7 +2050,14 @@ pool_cache_bootstrap(pool_cache_t pc, si palloc = &pool_allocator_nointr; pool_init(pp, size, align, align_offset, flags, wchan, palloc, ipl); - mutex_init(&pc->pc_lock, MUTEX_DEFAULT, pp->pr_ipl); + /* + * XXXAD hack to prevent IP input processing from blocking. + */ + if (ipl == IPL_SOFTNET) { + mutex_init(&pc->pc_lock, MUTEX_DEFAULT, IPL_VM); + } else { + mutex_init(&pc->pc_lock, MUTEX_DEFAULT, ipl); + } if (ctor == NULL) { ctor = (int (*)(void *, void *, int))nullop; @@ -2062,12 +2079,18 @@ pool_cache_bootstrap(pool_cache_t pc, si pc->pc_nfull = 0; pc->pc_contended = 0; pc->pc_refcnt = 0; + pc->pc_freecheck = NULL; /* Allocate per-CPU caches. */ memset(pc->pc_cpus, 0, sizeof(pc->pc_cpus)); pc->pc_ncpu = 0; - for (CPU_INFO_FOREACH(cii, ci)) { - pool_cache_cpu_init1(ci, pc); + if (ncpu == 0) { + /* XXX For sparc: boot CPU is not attached yet. */ + pool_cache_cpu_init1(curcpu(), pc); + } else { + for (CPU_INFO_FOREACH(cii, ci)) { + pool_cache_cpu_init1(ci, pc); + } } if (__predict_true(!cold)) { @@ -2142,11 +2165,15 @@ static void pool_cache_cpu_init1(struct cpu_info *ci, pool_cache_t pc) { pool_cache_cpu_t *cc; + int index; + + index = ci->ci_index; + KASSERT(index < MAXCPUS); KASSERT(((uintptr_t)pc->pc_cpus & (CACHE_LINE_SIZE - 1)) == 0); - if ((cc = pc->pc_cpus[ci->ci_index]) != NULL) { - KASSERT(cc->cc_cpu = ci); + if ((cc = pc->pc_cpus[index]) != NULL) { + KASSERT(cc->cc_cpuindex == index); return; } @@ -2167,13 +2194,13 @@ pool_cache_cpu_init1(struct cpu_info *ci cc->cc_ipl = pc->pc_pool.pr_ipl; cc->cc_iplcookie = makeiplcookie(cc->cc_ipl); cc->cc_cache = pc; - cc->cc_cpu = ci; + cc->cc_cpuindex = index; cc->cc_hits = 0; cc->cc_misses = 0; cc->cc_current = NULL; cc->cc_previous = NULL; - pc->pc_cpus[ci->ci_index] = cc; + pc->pc_cpus[index] = cc; } /* @@ -2212,6 +2239,14 @@ pool_cache_reclaim(pool_cache_t pc) return pool_reclaim(&pc->pc_pool); } +static void +pool_cache_destruct_object1(pool_cache_t pc, void *object) +{ + + (*pc->pc_dtor)(pc->pc_arg, object); + pool_put(&pc->pc_pool, object); +} + /* * pool_cache_destruct_object: * @@ -2222,8 +2257,9 @@ void pool_cache_destruct_object(pool_cache_t pc, void *object) { - (*pc->pc_dtor)(pc->pc_arg, object); - pool_put(&pc->pc_pool, object); + FREECHECK_IN(&pc->pc_freecheck, object); + + pool_cache_destruct_object1(pc, object); } /* @@ -2243,7 +2279,7 @@ pool_cache_invalidate_groups(pool_cache_ for (i = 0; i < pcg->pcg_avail; i++) { object = pcg->pcg_objects[i].pcgo_va; - pool_cache_destruct_object(pc, object); + pool_cache_destruct_object1(pc, object); } pool_put(&pcgpool, pcg); @@ -2310,33 +2346,19 @@ static inline pool_cache_cpu_t * pool_cache_cpu_enter(pool_cache_t pc, int *s) { pool_cache_cpu_t *cc; - struct cpu_info *ci; /* * Prevent other users of the cache from accessing our * CPU-local data. To avoid touching shared state, we * pull the neccessary information from CPU local data. */ - ci = curcpu(); - KASSERT(ci->ci_data.cpu_index < MAXCPUS); - cc = pc->pc_cpus[ci->ci_data.cpu_index]; + crit_enter(); + cc = pc->pc_cpus[curcpu()->ci_index]; KASSERT(cc->cc_cache == pc); - if (cc->cc_ipl == IPL_NONE) { - crit_enter(); - } else { + if (cc->cc_ipl != IPL_NONE) { *s = splraiseipl(cc->cc_iplcookie); } - - /* Moved to another CPU before disabling preemption? */ - if (__predict_false(ci != curcpu())) { - ci = curcpu(); - cc = pc->pc_cpus[ci->ci_data.cpu_index]; - } - -#ifdef DIAGNOSTIC - KASSERT(cc->cc_cpu == ci); KASSERT(((uintptr_t)cc & (CACHE_LINE_SIZE - 1)) == 0); -#endif return cc; } @@ -2346,11 +2368,10 @@ pool_cache_cpu_exit(pool_cache_cpu_t *cc { /* No longer need exclusive access to the per-CPU data. */ - if (cc->cc_ipl == IPL_NONE) { - crit_exit(); - } else { + if (cc->cc_ipl != IPL_NONE) { splx(*s); } + crit_exit(); } #if __GNUC_PREREQ__(3, 0) @@ -2572,7 +2593,11 @@ pool_cache_put_slow(pool_cache_cpu_t *cc * If we can't allocate a new group, just throw the * object away. */ - pcg = pool_get(&pcgpool, PR_NOWAIT); + if (pool_cache_disable) { + pcg = NULL; + } else { + pcg = pool_get(&pcgpool, PR_NOWAIT); + } if (pcg == NULL) { pool_cache_destruct_object(pc, object); return NULL;