Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files. =================================================================== RCS file: /ftp/cvs/cvsroot/src/sys/kern/subr_pool.c,v rcsdiff: /ftp/cvs/cvsroot/src/sys/kern/subr_pool.c,v: warning: Unknown phrases like `commitid ...;' are present. retrieving revision 1.151.6.4 retrieving revision 1.157 diff -u -p -r1.151.6.4 -r1.157 --- src/sys/kern/subr_pool.c 2009/01/17 13:29:19 1.151.6.4 +++ src/sys/kern/subr_pool.c 2008/04/24 11:38:36 1.157 @@ -1,7 +1,7 @@ -/* $NetBSD: subr_pool.c,v 1.151.6.4 2009/01/17 13:29:19 mjf Exp $ */ +/* $NetBSD: subr_pool.c,v 1.157 2008/04/24 11:38:36 ad Exp $ */ /*- - * Copyright (c) 1997, 1999, 2000, 2002, 2007, 2008 The NetBSD Foundation, Inc. + * Copyright (c) 1997, 1999, 2000, 2002, 2007 The NetBSD Foundation, Inc. * All rights reserved. * * This code is derived from software contributed to The NetBSD Foundation @@ -16,6 +16,13 @@ * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the NetBSD + * Foundation, Inc. and its contributors. + * 4. Neither the name of The NetBSD Foundation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED @@ -31,7 +38,7 @@ */ #include -__KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.151.6.4 2009/01/17 13:29:19 mjf Exp $"); +__KERNEL_RCSID(0, "$NetBSD: subr_pool.c,v 1.157 2008/04/24 11:38:36 ad Exp $"); #include "opt_ddb.h" #include "opt_pool.h" @@ -179,13 +186,13 @@ static struct pool cache_cpu_pool; TAILQ_HEAD(,pool_cache) pool_cache_head = TAILQ_HEAD_INITIALIZER(pool_cache_head); -int pool_cache_disable; /* global disable for caching */ -static const pcg_t pcg_dummy; /* zero sized: always empty, yet always full */ +int pool_cache_disable; -static bool pool_cache_put_slow(pool_cache_cpu_t *, int, - void *); -static bool pool_cache_get_slow(pool_cache_cpu_t *, int, - void **, paddr_t *, int); + +static pool_cache_cpu_t *pool_cache_put_slow(pool_cache_cpu_t *, int *, + void *, paddr_t); +static pool_cache_cpu_t *pool_cache_get_slow(pool_cache_cpu_t *, int *, + void **, paddr_t *, int); static void pool_cache_cpu_init1(struct cpu_info *, pool_cache_t); static void pool_cache_invalidate_groups(pool_cache_t, pcg_t *); static void pool_cache_xcall(pool_cache_t); @@ -864,7 +871,7 @@ pool_init(struct pool *pp, size_t size, if (__predict_true(!cold)) mutex_exit(&pool_head_lock); - /* Insert this into the list of pools using this allocator. */ + /* Insert this into the list of pools using this allocator. */ if (__predict_true(!cold)) mutex_enter(&palloc->pa_lock); TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list); @@ -1253,7 +1260,10 @@ pool_do_put(struct pool *pp, void *v, st if (pp->pr_flags & PR_WANTED) { pp->pr_flags &= ~PR_WANTED; + if (ph->ph_nmissing == 0) + pp->pr_nidle++; cv_broadcast(&pp->pr_cv); + return; } /* @@ -1527,8 +1537,6 @@ pool_update_curpage(struct pool *pp) if (pp->pr_curpage == NULL) { pp->pr_curpage = LIST_FIRST(&pp->pr_emptypages); } - KASSERT((pp->pr_curpage == NULL && pp->pr_nitems == 0) || - (pp->pr_curpage != NULL && pp->pr_nitems > 0)); } void @@ -2084,10 +2092,8 @@ pool_cache_bootstrap(pool_cache_t pc, si if ((flags & PR_LARGECACHE) != 0) { pc->pc_pcgsize = PCG_NOBJECTS_LARGE; - pc->pc_pcgpool = &pcg_large_pool; } else { pc->pc_pcgsize = PCG_NOBJECTS_NORMAL; - pc->pc_pcgpool = &pcg_normal_pool; } /* Allocate per-CPU caches. */ @@ -2152,11 +2158,11 @@ pool_cache_destroy(pool_cache_t pc) for (i = 0; i < MAXCPUS; i++) { if ((cc = pc->pc_cpus[i]) == NULL) continue; - if ((pcg = cc->cc_current) != &pcg_dummy) { + if ((pcg = cc->cc_current) != NULL) { pcg->pcg_next = NULL; pool_cache_invalidate_groups(pc, pcg); } - if ((pcg = cc->cc_previous) != &pcg_dummy) { + if ((pcg = cc->cc_previous) != NULL) { pcg->pcg_next = NULL; pool_cache_invalidate_groups(pc, pcg); } @@ -2210,8 +2216,8 @@ pool_cache_cpu_init1(struct cpu_info *ci cc->cc_cpuindex = index; cc->cc_hits = 0; cc->cc_misses = 0; - cc->cc_current = __UNCONST(&pcg_dummy); - cc->cc_previous = __UNCONST(&pcg_dummy); + cc->cc_current = NULL; + cc->cc_previous = NULL; pc->pc_cpus[index] = cc; } @@ -2360,8 +2366,42 @@ pool_cache_sethardlimit(pool_cache_t pc, pool_sethardlimit(&pc->pc_pool, n, warnmess, ratecap); } -static bool __noinline -pool_cache_get_slow(pool_cache_cpu_t *cc, int s, void **objectp, +static inline pool_cache_cpu_t * +pool_cache_cpu_enter(pool_cache_t pc, int *s) +{ + pool_cache_cpu_t *cc; + + /* + * Prevent other users of the cache from accessing our + * CPU-local data. To avoid touching shared state, we + * pull the neccessary information from CPU local data. + */ + crit_enter(); + cc = pc->pc_cpus[curcpu()->ci_index]; + KASSERT(cc->cc_cache == pc); + if (cc->cc_ipl != IPL_NONE) { + *s = splraiseipl(cc->cc_iplcookie); + } + + return cc; +} + +static inline void +pool_cache_cpu_exit(pool_cache_cpu_t *cc, int *s) +{ + + /* No longer need exclusive access to the per-CPU data. */ + if (cc->cc_ipl != IPL_NONE) { + splx(*s); + } + crit_exit(); +} + +#if __GNUC_PREREQ__(3, 0) +__attribute ((noinline)) +#endif +pool_cache_cpu_t * +pool_cache_get_slow(pool_cache_cpu_t *cc, int *s, void **objectp, paddr_t *pap, int flags) { pcg_t *pcg, *cur; @@ -2369,9 +2409,6 @@ pool_cache_get_slow(pool_cache_cpu_t *cc pool_cache_t pc; void *object; - KASSERT(cc->cc_current->pcg_avail == 0); - KASSERT(cc->cc_previous->pcg_avail == 0); - pc = cc->cc_cache; cc->cc_misses++; @@ -2379,7 +2416,7 @@ pool_cache_get_slow(pool_cache_cpu_t *cc * Nothing was available locally. Try and grab a group * from the cache. */ - if (__predict_false(!mutex_tryenter(&pc->pc_lock))) { + if (!mutex_tryenter(&pc->pc_lock)) { ncsw = curlwp->l_ncsw; mutex_enter(&pc->pc_lock); pc->pc_contended++; @@ -2391,17 +2428,18 @@ pool_cache_get_slow(pool_cache_cpu_t *cc */ if (curlwp->l_ncsw != ncsw) { mutex_exit(&pc->pc_lock); - return true; + pool_cache_cpu_exit(cc, s); + return pool_cache_cpu_enter(pc, s); } } - if (__predict_true((pcg = pc->pc_fullgroups) != NULL)) { + if ((pcg = pc->pc_fullgroups) != NULL) { /* * If there's a full group, release our empty * group back to the cache. Install the full * group as cc_current and return. */ - if (__predict_true((cur = cc->cc_current) != &pcg_dummy)) { + if ((cur = cc->cc_current) != NULL) { KASSERT(cur->pcg_avail == 0); cur->pcg_next = pc->pc_emptygroups; pc->pc_emptygroups = cur; @@ -2413,7 +2451,7 @@ pool_cache_get_slow(pool_cache_cpu_t *cc pc->pc_hits++; pc->pc_nfull--; mutex_exit(&pc->pc_lock); - return true; + return cc; } /* @@ -2423,17 +2461,17 @@ pool_cache_get_slow(pool_cache_cpu_t *cc */ pc->pc_misses++; mutex_exit(&pc->pc_lock); - splx(s); + pool_cache_cpu_exit(cc, s); object = pool_get(&pc->pc_pool, flags); *objectp = object; - if (__predict_false(object == NULL)) - return false; + if (object == NULL) + return NULL; - if (__predict_false((*pc->pc_ctor)(pc->pc_arg, object, flags) != 0)) { + if ((*pc->pc_ctor)(pc->pc_arg, object, flags) != 0) { pool_put(&pc->pc_pool, object); *objectp = NULL; - return false; + return NULL; } KASSERT((((vaddr_t)object + pc->pc_pool.pr_itemoffset) & @@ -2448,7 +2486,7 @@ pool_cache_get_slow(pool_cache_cpu_t *cc } FREECHECK_OUT(&pc->pc_freecheck, object); - return false; + return NULL; } /* @@ -2471,24 +2509,21 @@ pool_cache_get_paddr(pool_cache_t pc, in } #endif - /* Lock out interrupts and disable preemption. */ - s = splvm(); - while (/* CONSTCOND */ true) { + cc = pool_cache_cpu_enter(pc, &s); + do { /* Try and allocate an object from the current group. */ - cc = pc->pc_cpus[curcpu()->ci_index]; - KASSERT(cc->cc_cache == pc); pcg = cc->cc_current; - if (__predict_true(pcg->pcg_avail > 0)) { + if (pcg != NULL && pcg->pcg_avail > 0) { object = pcg->pcg_objects[--pcg->pcg_avail].pcgo_va; - if (__predict_false(pap != NULL)) + if (pap != NULL) *pap = pcg->pcg_objects[pcg->pcg_avail].pcgo_pa; #if defined(DIAGNOSTIC) pcg->pcg_objects[pcg->pcg_avail].pcgo_va = NULL; - KASSERT(pcg->pcg_avail < pcg->pcg_size); +#endif /* defined(DIAGNOSTIC) */ + KASSERT(pcg->pcg_avail <= pcg->pcg_size); KASSERT(object != NULL); -#endif cc->cc_hits++; - splx(s); + pool_cache_cpu_exit(cc, &s); FREECHECK_OUT(&pc->pc_freecheck, object); return object; } @@ -2498,7 +2533,7 @@ pool_cache_get_paddr(pool_cache_t pc, in * it with the current group and allocate from there. */ pcg = cc->cc_previous; - if (__predict_true(pcg->pcg_avail > 0)) { + if (pcg != NULL && pcg->pcg_avail > 0) { cc->cc_previous = cc->cc_current; cc->cc_current = pcg; continue; @@ -2507,83 +2542,63 @@ pool_cache_get_paddr(pool_cache_t pc, in /* * Can't allocate from either group: try the slow path. * If get_slow() allocated an object for us, or if - * no more objects are available, it will return false. + * no more objects are available, it will return NULL. * Otherwise, we need to retry. */ - if (!pool_cache_get_slow(cc, s, &object, pap, flags)) - break; - } + cc = pool_cache_get_slow(cc, &s, &object, pap, flags); + } while (cc != NULL); return object; } -static bool __noinline -pool_cache_put_slow(pool_cache_cpu_t *cc, int s, void *object) +#if __GNUC_PREREQ__(3, 0) +__attribute ((noinline)) +#endif +pool_cache_cpu_t * +pool_cache_put_slow(pool_cache_cpu_t *cc, int *s, void *object, paddr_t pa) { pcg_t *pcg, *cur; uint64_t ncsw; pool_cache_t pc; - - KASSERT(cc->cc_current->pcg_avail == cc->cc_current->pcg_size); - KASSERT(cc->cc_previous->pcg_avail == cc->cc_previous->pcg_size); + u_int nobj; pc = cc->cc_cache; - pcg = NULL; cc->cc_misses++; /* - * If there are no empty groups in the cache then allocate one - * while still unlocked. + * No free slots locally. Try to grab an empty, unused + * group from the cache. */ - if (__predict_false(pc->pc_emptygroups == NULL)) { - if (__predict_true(!pool_cache_disable)) { - pcg = pool_get(pc->pc_pcgpool, PR_NOWAIT); - } - if (__predict_true(pcg != NULL)) { - pcg->pcg_avail = 0; - pcg->pcg_size = pc->pc_pcgsize; - } - } - - /* Lock the cache. */ - if (__predict_false(!mutex_tryenter(&pc->pc_lock))) { + if (!mutex_tryenter(&pc->pc_lock)) { ncsw = curlwp->l_ncsw; mutex_enter(&pc->pc_lock); pc->pc_contended++; /* - * If we context switched while locking, then our view of - * the per-CPU data is invalid: retry. + * If we context switched while locking, then + * our view of the per-CPU data is invalid: + * retry. */ - if (__predict_false(curlwp->l_ncsw != ncsw)) { + if (curlwp->l_ncsw != ncsw) { mutex_exit(&pc->pc_lock); - if (pcg != NULL) { - pool_put(pc->pc_pcgpool, pcg); - } - return true; + pool_cache_cpu_exit(cc, s); + return pool_cache_cpu_enter(pc, s); } } - /* If there are no empty groups in the cache then allocate one. */ - if (pcg == NULL && pc->pc_emptygroups != NULL) { - pcg = pc->pc_emptygroups; - pc->pc_emptygroups = pcg->pcg_next; - pc->pc_nempty--; - } - - /* - * If there's a empty group, release our full group back - * to the cache. Install the empty group to the local CPU - * and return. - */ - if (pcg != NULL) { + if ((pcg = pc->pc_emptygroups) != NULL) { + /* + * If there's a empty group, release our full + * group back to the cache. Install the empty + * group and return. + */ KASSERT(pcg->pcg_avail == 0); - if (__predict_false(cc->cc_previous == &pcg_dummy)) { + pc->pc_emptygroups = pcg->pcg_next; + if (cc->cc_previous == NULL) { cc->cc_previous = pcg; } else { - cur = cc->cc_current; - if (__predict_true(cur != &pcg_dummy)) { - KASSERT(cur->pcg_avail == cur->pcg_size); + if ((cur = cc->cc_current) != NULL) { + KASSERT(cur->pcg_avail == pcg->pcg_size); cur->pcg_next = pc->pc_fullgroups; pc->pc_fullgroups = cur; pc->pc_nfull++; @@ -2591,21 +2606,49 @@ pool_cache_put_slow(pool_cache_cpu_t *cc cc->cc_current = pcg; } pc->pc_hits++; + pc->pc_nempty--; mutex_exit(&pc->pc_lock); - return true; + return cc; } /* - * Nothing available locally or in cache, and we didn't - * allocate an empty group. Take the slow path and destroy - * the object here and now. + * Nothing available locally or in cache. Take the + * slow path and try to allocate a new group that we + * can release to. */ pc->pc_misses++; mutex_exit(&pc->pc_lock); - splx(s); - pool_cache_destruct_object(pc, object); + pool_cache_cpu_exit(cc, s); - return false; + /* + * If we can't allocate a new group, just throw the + * object away. + */ + nobj = pc->pc_pcgsize; + if (pool_cache_disable) { + pcg = NULL; + } else if (nobj == PCG_NOBJECTS_LARGE) { + pcg = pool_get(&pcg_large_pool, PR_NOWAIT); + } else { + pcg = pool_get(&pcg_normal_pool, PR_NOWAIT); + } + if (pcg == NULL) { + pool_cache_destruct_object(pc, object); + return NULL; + } + pcg->pcg_avail = 0; + pcg->pcg_size = nobj; + + /* + * Add the empty group to the cache and try again. + */ + mutex_enter(&pc->pc_lock); + pcg->pcg_next = pc->pc_emptygroups; + pc->pc_emptygroups = pcg; + pc->pc_nempty++; + mutex_exit(&pc->pc_lock); + + return pool_cache_cpu_enter(pc, s); } /* @@ -2623,28 +2666,25 @@ pool_cache_put_paddr(pool_cache_t pc, vo FREECHECK_IN(&pc->pc_freecheck, object); - /* Lock out interrupts and disable preemption. */ - s = splvm(); - while (/* CONSTCOND */ true) { + cc = pool_cache_cpu_enter(pc, &s); + do { /* If the current group isn't full, release it there. */ - cc = pc->pc_cpus[curcpu()->ci_index]; - KASSERT(cc->cc_cache == pc); pcg = cc->cc_current; - if (__predict_true(pcg->pcg_avail < pcg->pcg_size)) { + if (pcg != NULL && pcg->pcg_avail < pcg->pcg_size) { pcg->pcg_objects[pcg->pcg_avail].pcgo_va = object; pcg->pcg_objects[pcg->pcg_avail].pcgo_pa = pa; pcg->pcg_avail++; cc->cc_hits++; - splx(s); + pool_cache_cpu_exit(cc, &s); return; } /* - * That failed. If the previous group isn't full, swap + * That failed. If the previous group is empty, swap * it with the current group and try again. */ pcg = cc->cc_previous; - if (__predict_true(pcg->pcg_avail < pcg->pcg_size)) { + if (pcg != NULL && pcg->pcg_avail == 0) { cc->cc_previous = cc->cc_current; cc->cc_current = pcg; continue; @@ -2653,11 +2693,10 @@ pool_cache_put_paddr(pool_cache_t pc, vo /* * Can't free to either group: try the slow path. * If put_slow() releases the object for us, it - * will return false. Otherwise we need to retry. + * will return NULL. Otherwise we need to retry. */ - if (!pool_cache_put_slow(cc, s, object)) - break; - } + cc = pool_cache_put_slow(cc, &s, object, pa); + } while (cc != NULL); } /* @@ -2671,16 +2710,24 @@ pool_cache_xcall(pool_cache_t pc) { pool_cache_cpu_t *cc; pcg_t *prev, *cur, **list; - int s; + int s = 0; /* XXXgcc */ - s = splvm(); - mutex_enter(&pc->pc_lock); - cc = pc->pc_cpus[curcpu()->ci_index]; + cc = pool_cache_cpu_enter(pc, &s); cur = cc->cc_current; - cc->cc_current = __UNCONST(&pcg_dummy); + cc->cc_current = NULL; prev = cc->cc_previous; - cc->cc_previous = __UNCONST(&pcg_dummy); - if (cur != &pcg_dummy) { + cc->cc_previous = NULL; + pool_cache_cpu_exit(cc, &s); + + /* + * XXXSMP Go to splvm to prevent kernel_lock from being taken, + * because locks at IPL_SOFTXXX are still spinlocks. Does not + * apply to IPL_SOFTBIO. Cross-call threads do not take the + * kernel_lock. + */ + s = splvm(); + mutex_enter(&pc->pc_lock); + if (cur != NULL) { if (cur->pcg_avail == cur->pcg_size) { list = &pc->pc_fullgroups; pc->pc_nfull++; @@ -2694,7 +2741,7 @@ pool_cache_xcall(pool_cache_t pc) cur->pcg_next = *list; *list = cur; } - if (prev != &pcg_dummy) { + if (prev != NULL) { if (prev->pcg_avail == prev->pcg_size) { list = &pc->pc_fullgroups; pc->pc_nfull++; @@ -2993,7 +3040,7 @@ found: if (pool_in_cg(pp, cc->cc_current, addr) || pool_in_cg(pp, cc->cc_previous, addr)) { struct cpu_info *ci = - cpu_lookup(i); + cpu_lookup_byindex(i); incpucache = true; snprintf(cpucachestr,