version 1.158.2.3, 2009/09/16 13:38:01 |
version 1.162, 2008/07/04 13:28:08 |
Line 69 __KERNEL_RCSID(0, "$NetBSD$"); |
|
Line 69 __KERNEL_RCSID(0, "$NetBSD$"); |
|
*/ |
*/ |
|
|
/* List of all pools */ |
/* List of all pools */ |
static TAILQ_HEAD(, pool) pool_head = TAILQ_HEAD_INITIALIZER(pool_head); |
TAILQ_HEAD(,pool) pool_head = TAILQ_HEAD_INITIALIZER(pool_head); |
|
|
/* Private pool for page header structures */ |
/* Private pool for page header structures */ |
#define PHPOOL_MAX 8 |
#define PHPOOL_MAX 8 |
Line 180 TAILQ_HEAD(,pool_cache) pool_cache_head |
|
Line 180 TAILQ_HEAD(,pool_cache) pool_cache_head |
|
TAILQ_HEAD_INITIALIZER(pool_cache_head); |
TAILQ_HEAD_INITIALIZER(pool_cache_head); |
|
|
int pool_cache_disable; /* global disable for caching */ |
int pool_cache_disable; /* global disable for caching */ |
static const pcg_t pcg_dummy; /* zero sized: always empty, yet always full */ |
static pcg_t pcg_dummy; /* zero sized: always empty, yet always full */ |
|
|
static bool pool_cache_put_slow(pool_cache_cpu_t *, int, |
static bool pool_cache_put_slow(pool_cache_cpu_t *, int, |
void *); |
void *); |
|
|
pool_subsystem_init(void) |
pool_subsystem_init(void) |
{ |
{ |
struct pool_allocator *pa; |
struct pool_allocator *pa; |
|
__link_set_decl(pools, struct link_pool_init); |
|
struct link_pool_init * const *pi; |
|
|
mutex_init(&pool_head_lock, MUTEX_DEFAULT, IPL_NONE); |
mutex_init(&pool_head_lock, MUTEX_DEFAULT, IPL_NONE); |
cv_init(&pool_busy, "poolbusy"); |
cv_init(&pool_busy, "poolbusy"); |
|
|
|
__link_set_foreach(pi, pools) |
|
pool_init((*pi)->pp, (*pi)->size, (*pi)->align, |
|
(*pi)->align_offset, (*pi)->flags, (*pi)->wchan, |
|
(*pi)->palloc, (*pi)->ipl); |
|
|
while ((pa = SLIST_FIRST(&pa_deferinitq)) != NULL) { |
while ((pa = SLIST_FIRST(&pa_deferinitq)) != NULL) { |
KASSERT(pa->pa_backingmapptr != NULL); |
KASSERT(pa->pa_backingmapptr != NULL); |
KASSERT(*pa->pa_backingmapptr != NULL); |
KASSERT(*pa->pa_backingmapptr != NULL); |
Line 857 pool_init(struct pool *pp, size_t size, |
|
Line 864 pool_init(struct pool *pp, size_t size, |
|
if (__predict_true(!cold)) |
if (__predict_true(!cold)) |
mutex_exit(&pool_head_lock); |
mutex_exit(&pool_head_lock); |
|
|
/* Insert this into the list of pools using this allocator. */ |
/* Insert this into the list of pools using this allocator. */ |
if (__predict_true(!cold)) |
if (__predict_true(!cold)) |
mutex_enter(&palloc->pa_lock); |
mutex_enter(&palloc->pa_lock); |
TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list); |
TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list); |
Line 1246 pool_do_put(struct pool *pp, void *v, st |
|
Line 1253 pool_do_put(struct pool *pp, void *v, st |
|
|
|
if (pp->pr_flags & PR_WANTED) { |
if (pp->pr_flags & PR_WANTED) { |
pp->pr_flags &= ~PR_WANTED; |
pp->pr_flags &= ~PR_WANTED; |
|
if (ph->ph_nmissing == 0) |
|
pp->pr_nidle++; |
cv_broadcast(&pp->pr_cv); |
cv_broadcast(&pp->pr_cv); |
|
return; |
} |
} |
|
|
/* |
/* |
Line 1520 pool_update_curpage(struct pool *pp) |
|
Line 1530 pool_update_curpage(struct pool *pp) |
|
if (pp->pr_curpage == NULL) { |
if (pp->pr_curpage == NULL) { |
pp->pr_curpage = LIST_FIRST(&pp->pr_emptypages); |
pp->pr_curpage = LIST_FIRST(&pp->pr_emptypages); |
} |
} |
KASSERT((pp->pr_curpage == NULL && pp->pr_nitems == 0) || |
|
(pp->pr_curpage != NULL && pp->pr_nitems > 0)); |
|
} |
} |
|
|
void |
void |
Line 2077 pool_cache_bootstrap(pool_cache_t pc, si |
|
Line 2085 pool_cache_bootstrap(pool_cache_t pc, si |
|
|
|
if ((flags & PR_LARGECACHE) != 0) { |
if ((flags & PR_LARGECACHE) != 0) { |
pc->pc_pcgsize = PCG_NOBJECTS_LARGE; |
pc->pc_pcgsize = PCG_NOBJECTS_LARGE; |
pc->pc_pcgpool = &pcg_large_pool; |
|
} else { |
} else { |
pc->pc_pcgsize = PCG_NOBJECTS_NORMAL; |
pc->pc_pcgsize = PCG_NOBJECTS_NORMAL; |
pc->pc_pcgpool = &pcg_normal_pool; |
|
} |
} |
|
|
/* Allocate per-CPU caches. */ |
/* Allocate per-CPU caches. */ |
Line 2203 pool_cache_cpu_init1(struct cpu_info *ci |
|
Line 2209 pool_cache_cpu_init1(struct cpu_info *ci |
|
cc->cc_cpuindex = index; |
cc->cc_cpuindex = index; |
cc->cc_hits = 0; |
cc->cc_hits = 0; |
cc->cc_misses = 0; |
cc->cc_misses = 0; |
cc->cc_current = __UNCONST(&pcg_dummy); |
cc->cc_current = &pcg_dummy; |
cc->cc_previous = __UNCONST(&pcg_dummy); |
cc->cc_previous = &pcg_dummy; |
|
|
pc->pc_cpus[index] = cc; |
pc->pc_cpus[index] = cc; |
} |
} |
Line 2362 pool_cache_get_slow(pool_cache_cpu_t *cc |
|
Line 2368 pool_cache_get_slow(pool_cache_cpu_t *cc |
|
pool_cache_t pc; |
pool_cache_t pc; |
void *object; |
void *object; |
|
|
KASSERT(cc->cc_current->pcg_avail == 0); |
|
KASSERT(cc->cc_previous->pcg_avail == 0); |
|
|
|
pc = cc->cc_cache; |
pc = cc->cc_cache; |
cc->cc_misses++; |
cc->cc_misses++; |
|
|
Line 2466 pool_cache_get_paddr(pool_cache_t pc, in |
|
Line 2469 pool_cache_get_paddr(pool_cache_t pc, in |
|
|
|
/* Lock out interrupts and disable preemption. */ |
/* Lock out interrupts and disable preemption. */ |
s = splvm(); |
s = splvm(); |
while (/* CONSTCOND */ true) { |
do { |
/* Try and allocate an object from the current group. */ |
/* Try and allocate an object from the current group. */ |
cc = pc->pc_cpus[curcpu()->ci_index]; |
cc = pc->pc_cpus[curcpu()->ci_index]; |
KASSERT(cc->cc_cache == pc); |
KASSERT(cc->cc_cache == pc); |
Line 2477 pool_cache_get_paddr(pool_cache_t pc, in |
|
Line 2480 pool_cache_get_paddr(pool_cache_t pc, in |
|
*pap = pcg->pcg_objects[pcg->pcg_avail].pcgo_pa; |
*pap = pcg->pcg_objects[pcg->pcg_avail].pcgo_pa; |
#if defined(DIAGNOSTIC) |
#if defined(DIAGNOSTIC) |
pcg->pcg_objects[pcg->pcg_avail].pcgo_va = NULL; |
pcg->pcg_objects[pcg->pcg_avail].pcgo_va = NULL; |
KASSERT(pcg->pcg_avail < pcg->pcg_size); |
#endif /* defined(DIAGNOSTIC) */ |
|
KASSERT(pcg->pcg_avail <= pcg->pcg_size); |
KASSERT(object != NULL); |
KASSERT(object != NULL); |
#endif |
|
cc->cc_hits++; |
cc->cc_hits++; |
splx(s); |
splx(s); |
FREECHECK_OUT(&pc->pc_freecheck, object); |
FREECHECK_OUT(&pc->pc_freecheck, object); |
Line 2503 pool_cache_get_paddr(pool_cache_t pc, in |
|
Line 2506 pool_cache_get_paddr(pool_cache_t pc, in |
|
* no more objects are available, it will return false. |
* no more objects are available, it will return false. |
* Otherwise, we need to retry. |
* Otherwise, we need to retry. |
*/ |
*/ |
if (!pool_cache_get_slow(cc, s, &object, pap, flags)) |
} while (pool_cache_get_slow(cc, s, &object, pap, flags)); |
break; |
|
} |
|
|
|
return object; |
return object; |
} |
} |
Line 2513 pool_cache_get_paddr(pool_cache_t pc, in |
|
Line 2514 pool_cache_get_paddr(pool_cache_t pc, in |
|
static bool __noinline |
static bool __noinline |
pool_cache_put_slow(pool_cache_cpu_t *cc, int s, void *object) |
pool_cache_put_slow(pool_cache_cpu_t *cc, int s, void *object) |
{ |
{ |
pcg_t *pcg, *cur; |
pcg_t *pcg, *cur, *empty; |
uint64_t ncsw; |
uint64_t ncsw; |
pool_cache_t pc; |
pool_cache_t pc; |
|
|
KASSERT(cc->cc_current->pcg_avail == cc->cc_current->pcg_size); |
|
KASSERT(cc->cc_previous->pcg_avail == cc->cc_previous->pcg_size); |
|
|
|
pc = cc->cc_cache; |
pc = cc->cc_cache; |
pcg = NULL; |
|
cc->cc_misses++; |
cc->cc_misses++; |
|
|
/* |
/* |
* If there are no empty groups in the cache then allocate one |
* If there appear to be no empty groups in the cache then |
* while still unlocked. |
* allocate one in advance. |
*/ |
*/ |
|
empty = NULL; |
if (__predict_false(pc->pc_emptygroups == NULL)) { |
if (__predict_false(pc->pc_emptygroups == NULL)) { |
if (__predict_true(!pool_cache_disable)) { |
if (__predict_false(pool_cache_disable)) { |
pcg = pool_get(pc->pc_pcgpool, PR_NOWAIT); |
empty = NULL; |
} |
} else if (pc->pc_pcgsize == PCG_NOBJECTS_LARGE) { |
if (__predict_true(pcg != NULL)) { |
empty = pool_get(&pcg_large_pool, PR_NOWAIT); |
pcg->pcg_avail = 0; |
} else { |
pcg->pcg_size = pc->pc_pcgsize; |
empty = pool_get(&pcg_normal_pool, PR_NOWAIT); |
} |
} |
} |
} |
|
|
/* Lock the cache. */ |
/* Lock the cache. */ |
|
ncsw = curlwp->l_ncsw; |
if (__predict_false(!mutex_tryenter(&pc->pc_lock))) { |
if (__predict_false(!mutex_tryenter(&pc->pc_lock))) { |
ncsw = curlwp->l_ncsw; |
|
mutex_enter(&pc->pc_lock); |
mutex_enter(&pc->pc_lock); |
pc->pc_contended++; |
pc->pc_contended++; |
|
} |
|
|
/* |
/* |
* If we context switched while locking, then our view of |
* If we speculatively allocated an empty group, link it into |
* the per-CPU data is invalid: retry. |
* the cache's list. |
*/ |
*/ |
if (__predict_false(curlwp->l_ncsw != ncsw)) { |
if (empty != NULL) { |
mutex_exit(&pc->pc_lock); |
empty->pcg_avail = 0; |
if (pcg != NULL) { |
empty->pcg_size = pc->pc_pcgsize; |
pool_put(pc->pc_pcgpool, pcg); |
empty->pcg_next = pc->pc_emptygroups; |
} |
pc->pc_emptygroups = empty; |
return true; |
pc->pc_nempty++; |
} |
pc->pc_misses++; |
} |
} |
|
|
/* If there are no empty groups in the cache then allocate one. */ |
/* |
if (pcg == NULL && pc->pc_emptygroups != NULL) { |
* If we context switched while locking, then our view of the |
pcg = pc->pc_emptygroups; |
* per-CPU data is invalid: retry. |
pc->pc_emptygroups = pcg->pcg_next; |
*/ |
pc->pc_nempty--; |
if (__predict_false(curlwp->l_ncsw != ncsw)) { |
|
mutex_exit(&pc->pc_lock); |
|
return true; |
} |
} |
|
|
/* |
/* |
Line 2569 pool_cache_put_slow(pool_cache_cpu_t *cc |
|
Line 2570 pool_cache_put_slow(pool_cache_cpu_t *cc |
|
* to the cache. Install the empty group to the local CPU |
* to the cache. Install the empty group to the local CPU |
* and return. |
* and return. |
*/ |
*/ |
if (pcg != NULL) { |
if (__predict_true((pcg = pc->pc_emptygroups) != NULL)) { |
KASSERT(pcg->pcg_avail == 0); |
KASSERT(pcg->pcg_avail == 0); |
|
pc->pc_emptygroups = pcg->pcg_next; |
if (__predict_false(cc->cc_previous == &pcg_dummy)) { |
if (__predict_false(cc->cc_previous == &pcg_dummy)) { |
cc->cc_previous = pcg; |
cc->cc_previous = pcg; |
} else { |
} else { |
cur = cc->cc_current; |
cur = cc->cc_current; |
if (__predict_true(cur != &pcg_dummy)) { |
if (__predict_true(cur != &pcg_dummy)) { |
KASSERT(cur->pcg_avail == cur->pcg_size); |
KASSERT(cur->pcg_avail == pcg->pcg_size); |
cur->pcg_next = pc->pc_fullgroups; |
cur->pcg_next = pc->pc_fullgroups; |
pc->pc_fullgroups = cur; |
pc->pc_fullgroups = cur; |
pc->pc_nfull++; |
pc->pc_nfull++; |
} |
} |
cc->cc_current = pcg; |
cc->cc_current = pcg; |
} |
} |
pc->pc_hits++; |
pc->pc_hits += (empty == NULL); |
|
pc->pc_nempty--; |
mutex_exit(&pc->pc_lock); |
mutex_exit(&pc->pc_lock); |
return true; |
return true; |
} |
} |
Line 2614 pool_cache_put_paddr(pool_cache_t pc, vo |
|
Line 2617 pool_cache_put_paddr(pool_cache_t pc, vo |
|
pcg_t *pcg; |
pcg_t *pcg; |
int s; |
int s; |
|
|
KASSERT(object != NULL); |
|
FREECHECK_IN(&pc->pc_freecheck, object); |
FREECHECK_IN(&pc->pc_freecheck, object); |
|
|
/* Lock out interrupts and disable preemption. */ |
/* Lock out interrupts and disable preemption. */ |
s = splvm(); |
s = splvm(); |
while (/* CONSTCOND */ true) { |
do { |
/* If the current group isn't full, release it there. */ |
/* If the current group isn't full, release it there. */ |
cc = pc->pc_cpus[curcpu()->ci_index]; |
cc = pc->pc_cpus[curcpu()->ci_index]; |
KASSERT(cc->cc_cache == pc); |
KASSERT(cc->cc_cache == pc); |
Line 2649 pool_cache_put_paddr(pool_cache_t pc, vo |
|
Line 2651 pool_cache_put_paddr(pool_cache_t pc, vo |
|
* If put_slow() releases the object for us, it |
* If put_slow() releases the object for us, it |
* will return false. Otherwise we need to retry. |
* will return false. Otherwise we need to retry. |
*/ |
*/ |
if (!pool_cache_put_slow(cc, s, object)) |
} while (pool_cache_put_slow(cc, s, object)); |
break; |
|
} |
|
} |
} |
|
|
/* |
/* |
Line 2671 pool_cache_xcall(pool_cache_t pc) |
|
Line 2671 pool_cache_xcall(pool_cache_t pc) |
|
mutex_enter(&pc->pc_lock); |
mutex_enter(&pc->pc_lock); |
cc = pc->pc_cpus[curcpu()->ci_index]; |
cc = pc->pc_cpus[curcpu()->ci_index]; |
cur = cc->cc_current; |
cur = cc->cc_current; |
cc->cc_current = __UNCONST(&pcg_dummy); |
cc->cc_current = &pcg_dummy; |
prev = cc->cc_previous; |
prev = cc->cc_previous; |
cc->cc_previous = __UNCONST(&pcg_dummy); |
cc->cc_previous = &pcg_dummy; |
if (cur != &pcg_dummy) { |
if (cur != &pcg_dummy) { |
if (cur->pcg_avail == cur->pcg_size) { |
if (cur->pcg_avail == cur->pcg_size) { |
list = &pc->pc_fullgroups; |
list = &pc->pc_fullgroups; |
|
|
if (pool_in_cg(pp, cc->cc_current, addr) || |
if (pool_in_cg(pp, cc->cc_current, addr) || |
pool_in_cg(pp, cc->cc_previous, addr)) { |
pool_in_cg(pp, cc->cc_previous, addr)) { |
struct cpu_info *ci = |
struct cpu_info *ci = |
cpu_lookup(i); |
cpu_lookup_byindex(i); |
|
|
incpucache = true; |
incpucache = true; |
snprintf(cpucachestr, |
snprintf(cpucachestr, |