version 1.161.2.1, 2008/07/18 16:37:49 |
version 1.162, 2008/07/04 13:28:08 |
Line 1253 pool_do_put(struct pool *pp, void *v, st |
|
Line 1253 pool_do_put(struct pool *pp, void *v, st |
|
|
|
if (pp->pr_flags & PR_WANTED) { |
if (pp->pr_flags & PR_WANTED) { |
pp->pr_flags &= ~PR_WANTED; |
pp->pr_flags &= ~PR_WANTED; |
|
if (ph->ph_nmissing == 0) |
|
pp->pr_nidle++; |
cv_broadcast(&pp->pr_cv); |
cv_broadcast(&pp->pr_cv); |
|
return; |
} |
} |
|
|
/* |
/* |
Line 2082 pool_cache_bootstrap(pool_cache_t pc, si |
|
Line 2085 pool_cache_bootstrap(pool_cache_t pc, si |
|
|
|
if ((flags & PR_LARGECACHE) != 0) { |
if ((flags & PR_LARGECACHE) != 0) { |
pc->pc_pcgsize = PCG_NOBJECTS_LARGE; |
pc->pc_pcgsize = PCG_NOBJECTS_LARGE; |
pc->pc_pcgpool = &pcg_large_pool; |
|
} else { |
} else { |
pc->pc_pcgsize = PCG_NOBJECTS_NORMAL; |
pc->pc_pcgsize = PCG_NOBJECTS_NORMAL; |
pc->pc_pcgpool = &pcg_normal_pool; |
|
} |
} |
|
|
/* Allocate per-CPU caches. */ |
/* Allocate per-CPU caches. */ |
Line 2468 pool_cache_get_paddr(pool_cache_t pc, in |
|
Line 2469 pool_cache_get_paddr(pool_cache_t pc, in |
|
|
|
/* Lock out interrupts and disable preemption. */ |
/* Lock out interrupts and disable preemption. */ |
s = splvm(); |
s = splvm(); |
while (/* CONSTCOND */ true) { |
do { |
/* Try and allocate an object from the current group. */ |
/* Try and allocate an object from the current group. */ |
cc = pc->pc_cpus[curcpu()->ci_index]; |
cc = pc->pc_cpus[curcpu()->ci_index]; |
KASSERT(cc->cc_cache == pc); |
KASSERT(cc->cc_cache == pc); |
Line 2479 pool_cache_get_paddr(pool_cache_t pc, in |
|
Line 2480 pool_cache_get_paddr(pool_cache_t pc, in |
|
*pap = pcg->pcg_objects[pcg->pcg_avail].pcgo_pa; |
*pap = pcg->pcg_objects[pcg->pcg_avail].pcgo_pa; |
#if defined(DIAGNOSTIC) |
#if defined(DIAGNOSTIC) |
pcg->pcg_objects[pcg->pcg_avail].pcgo_va = NULL; |
pcg->pcg_objects[pcg->pcg_avail].pcgo_va = NULL; |
KASSERT(pcg->pcg_avail < pcg->pcg_size); |
#endif /* defined(DIAGNOSTIC) */ |
|
KASSERT(pcg->pcg_avail <= pcg->pcg_size); |
KASSERT(object != NULL); |
KASSERT(object != NULL); |
#endif |
|
cc->cc_hits++; |
cc->cc_hits++; |
splx(s); |
splx(s); |
FREECHECK_OUT(&pc->pc_freecheck, object); |
FREECHECK_OUT(&pc->pc_freecheck, object); |
Line 2505 pool_cache_get_paddr(pool_cache_t pc, in |
|
Line 2506 pool_cache_get_paddr(pool_cache_t pc, in |
|
* no more objects are available, it will return false. |
* no more objects are available, it will return false. |
* Otherwise, we need to retry. |
* Otherwise, we need to retry. |
*/ |
*/ |
if (!pool_cache_get_slow(cc, s, &object, pap, flags)) |
} while (pool_cache_get_slow(cc, s, &object, pap, flags)); |
break; |
|
} |
|
|
|
return object; |
return object; |
} |
} |
Line 2515 pool_cache_get_paddr(pool_cache_t pc, in |
|
Line 2514 pool_cache_get_paddr(pool_cache_t pc, in |
|
static bool __noinline |
static bool __noinline |
pool_cache_put_slow(pool_cache_cpu_t *cc, int s, void *object) |
pool_cache_put_slow(pool_cache_cpu_t *cc, int s, void *object) |
{ |
{ |
pcg_t *pcg, *cur; |
pcg_t *pcg, *cur, *empty; |
uint64_t ncsw; |
uint64_t ncsw; |
pool_cache_t pc; |
pool_cache_t pc; |
|
|
pc = cc->cc_cache; |
pc = cc->cc_cache; |
cc->cc_misses++; |
cc->cc_misses++; |
|
|
|
/* |
|
* If there appear to be no empty groups in the cache then |
|
* allocate one in advance. |
|
*/ |
|
empty = NULL; |
|
if (__predict_false(pc->pc_emptygroups == NULL)) { |
|
if (__predict_false(pool_cache_disable)) { |
|
empty = NULL; |
|
} else if (pc->pc_pcgsize == PCG_NOBJECTS_LARGE) { |
|
empty = pool_get(&pcg_large_pool, PR_NOWAIT); |
|
} else { |
|
empty = pool_get(&pcg_normal_pool, PR_NOWAIT); |
|
} |
|
} |
|
|
/* Lock the cache. */ |
/* Lock the cache. */ |
|
ncsw = curlwp->l_ncsw; |
if (__predict_false(!mutex_tryenter(&pc->pc_lock))) { |
if (__predict_false(!mutex_tryenter(&pc->pc_lock))) { |
ncsw = curlwp->l_ncsw; |
|
mutex_enter(&pc->pc_lock); |
mutex_enter(&pc->pc_lock); |
pc->pc_contended++; |
pc->pc_contended++; |
|
} |
|
|
/* |
/* |
* If we context switched while locking, then our view of |
* If we speculatively allocated an empty group, link it into |
* the per-CPU data is invalid: retry. |
* the cache's list. |
*/ |
*/ |
if (__predict_false(curlwp->l_ncsw != ncsw)) { |
if (empty != NULL) { |
mutex_exit(&pc->pc_lock); |
empty->pcg_avail = 0; |
return true; |
empty->pcg_size = pc->pc_pcgsize; |
} |
empty->pcg_next = pc->pc_emptygroups; |
|
pc->pc_emptygroups = empty; |
|
pc->pc_nempty++; |
|
pc->pc_misses++; |
} |
} |
|
|
/* If there are no empty groups in the cache then allocate one. */ |
/* |
if (__predict_false((pcg = pc->pc_emptygroups) == NULL)) { |
* If we context switched while locking, then our view of the |
if (__predict_true(!pool_cache_disable)) { |
* per-CPU data is invalid: retry. |
pcg = pool_get(pc->pc_pcgpool, PR_NOWAIT); |
*/ |
} |
if (__predict_false(curlwp->l_ncsw != ncsw)) { |
if (__predict_true(pcg != NULL)) { |
mutex_exit(&pc->pc_lock); |
pcg->pcg_avail = 0; |
return true; |
pcg->pcg_size = pc->pc_pcgsize; |
|
} |
|
} else { |
|
pc->pc_emptygroups = pcg->pcg_next; |
|
pc->pc_nempty--; |
|
} |
} |
|
|
/* |
/* |
Line 2557 pool_cache_put_slow(pool_cache_cpu_t *cc |
|
Line 2570 pool_cache_put_slow(pool_cache_cpu_t *cc |
|
* to the cache. Install the empty group to the local CPU |
* to the cache. Install the empty group to the local CPU |
* and return. |
* and return. |
*/ |
*/ |
if (pcg != NULL) { |
if (__predict_true((pcg = pc->pc_emptygroups) != NULL)) { |
KASSERT(pcg->pcg_avail == 0); |
KASSERT(pcg->pcg_avail == 0); |
|
pc->pc_emptygroups = pcg->pcg_next; |
if (__predict_false(cc->cc_previous == &pcg_dummy)) { |
if (__predict_false(cc->cc_previous == &pcg_dummy)) { |
cc->cc_previous = pcg; |
cc->cc_previous = pcg; |
} else { |
} else { |
cur = cc->cc_current; |
cur = cc->cc_current; |
if (__predict_true(cur != &pcg_dummy)) { |
if (__predict_true(cur != &pcg_dummy)) { |
KASSERT(cur->pcg_avail == cur->pcg_size); |
KASSERT(cur->pcg_avail == pcg->pcg_size); |
cur->pcg_next = pc->pc_fullgroups; |
cur->pcg_next = pc->pc_fullgroups; |
pc->pc_fullgroups = cur; |
pc->pc_fullgroups = cur; |
pc->pc_nfull++; |
pc->pc_nfull++; |
} |
} |
cc->cc_current = pcg; |
cc->cc_current = pcg; |
} |
} |
pc->pc_hits++; |
pc->pc_hits += (empty == NULL); |
|
pc->pc_nempty--; |
mutex_exit(&pc->pc_lock); |
mutex_exit(&pc->pc_lock); |
return true; |
return true; |
} |
} |
Line 2606 pool_cache_put_paddr(pool_cache_t pc, vo |
|
Line 2621 pool_cache_put_paddr(pool_cache_t pc, vo |
|
|
|
/* Lock out interrupts and disable preemption. */ |
/* Lock out interrupts and disable preemption. */ |
s = splvm(); |
s = splvm(); |
while (/* CONSTCOND */ true) { |
do { |
/* If the current group isn't full, release it there. */ |
/* If the current group isn't full, release it there. */ |
cc = pc->pc_cpus[curcpu()->ci_index]; |
cc = pc->pc_cpus[curcpu()->ci_index]; |
KASSERT(cc->cc_cache == pc); |
KASSERT(cc->cc_cache == pc); |
Line 2636 pool_cache_put_paddr(pool_cache_t pc, vo |
|
Line 2651 pool_cache_put_paddr(pool_cache_t pc, vo |
|
* If put_slow() releases the object for us, it |
* If put_slow() releases the object for us, it |
* will return false. Otherwise we need to retry. |
* will return false. Otherwise we need to retry. |
*/ |
*/ |
if (!pool_cache_put_slow(cc, s, object)) |
} while (pool_cache_put_slow(cc, s, object)); |
break; |
|
} |
|
} |
} |
|
|
/* |
/* |