version 1.156.2.2, 2008/06/04 02:05:39 |
version 1.183, 2010/04/25 11:49:04 |
|
|
/* $NetBSD$ */ |
/* $NetBSD$ */ |
|
|
/*- |
/*- |
* Copyright (c) 1997, 1999, 2000, 2002, 2007, 2008 The NetBSD Foundation, Inc. |
* Copyright (c) 1997, 1999, 2000, 2002, 2007, 2008, 2010 |
|
* The NetBSD Foundation, Inc. |
* All rights reserved. |
* All rights reserved. |
* |
* |
* This code is derived from software contributed to The NetBSD Foundation |
* This code is derived from software contributed to The NetBSD Foundation |
Line 69 __KERNEL_RCSID(0, "$NetBSD$"); |
|
Line 70 __KERNEL_RCSID(0, "$NetBSD$"); |
|
*/ |
*/ |
|
|
/* List of all pools */ |
/* List of all pools */ |
TAILQ_HEAD(,pool) pool_head = TAILQ_HEAD_INITIALIZER(pool_head); |
static TAILQ_HEAD(, pool) pool_head = TAILQ_HEAD_INITIALIZER(pool_head); |
|
|
/* Private pool for page header structures */ |
/* Private pool for page header structures */ |
#define PHPOOL_MAX 8 |
#define PHPOOL_MAX 8 |
Line 104 static struct pool *drainpp; |
|
Line 105 static struct pool *drainpp; |
|
static kmutex_t pool_head_lock; |
static kmutex_t pool_head_lock; |
static kcondvar_t pool_busy; |
static kcondvar_t pool_busy; |
|
|
|
/* This lock protects initialization of a potentially shared pool allocator */ |
|
static kmutex_t pool_allocator_lock; |
|
|
typedef uint32_t pool_item_bitmap_t; |
typedef uint32_t pool_item_bitmap_t; |
#define BITMAP_SIZE (CHAR_BIT * sizeof(pool_item_bitmap_t)) |
#define BITMAP_SIZE (CHAR_BIT * sizeof(pool_item_bitmap_t)) |
#define BITMAP_MASK (BITMAP_SIZE - 1) |
#define BITMAP_MASK (BITMAP_SIZE - 1) |
Line 179 static struct pool cache_cpu_pool; |
|
Line 183 static struct pool cache_cpu_pool; |
|
TAILQ_HEAD(,pool_cache) pool_cache_head = |
TAILQ_HEAD(,pool_cache) pool_cache_head = |
TAILQ_HEAD_INITIALIZER(pool_cache_head); |
TAILQ_HEAD_INITIALIZER(pool_cache_head); |
|
|
int pool_cache_disable; |
int pool_cache_disable; /* global disable for caching */ |
|
static const pcg_t pcg_dummy; /* zero sized: always empty, yet always full */ |
|
|
static pool_cache_cpu_t *pool_cache_put_slow(pool_cache_cpu_t *, int *, |
static bool pool_cache_put_slow(pool_cache_cpu_t *, int, |
void *, paddr_t); |
void *); |
static pool_cache_cpu_t *pool_cache_get_slow(pool_cache_cpu_t *, int *, |
static bool pool_cache_get_slow(pool_cache_cpu_t *, int, |
void **, paddr_t *, int); |
void **, paddr_t *, int); |
static void pool_cache_cpu_init1(struct cpu_info *, pool_cache_t); |
static void pool_cache_cpu_init1(struct cpu_info *, pool_cache_t); |
static void pool_cache_invalidate_groups(pool_cache_t, pcg_t *); |
static void pool_cache_invalidate_groups(pool_cache_t, pcg_t *); |
|
static void pool_cache_invalidate_cpu(pool_cache_t, u_int); |
static void pool_cache_xcall(pool_cache_t); |
static void pool_cache_xcall(pool_cache_t); |
|
|
static int pool_catchup(struct pool *); |
static int pool_catchup(struct pool *); |
Line 230 int pool_logsize = POOL_LOGSIZE; |
|
Line 235 int pool_logsize = POOL_LOGSIZE; |
|
static inline void |
static inline void |
pr_log(struct pool *pp, void *v, int action, const char *file, long line) |
pr_log(struct pool *pp, void *v, int action, const char *file, long line) |
{ |
{ |
int n = pp->pr_curlogentry; |
int n; |
struct pool_log *pl; |
struct pool_log *pl; |
|
|
if ((pp->pr_roflags & PR_LOGGING) == 0) |
if ((pp->pr_roflags & PR_LOGGING) == 0) |
return; |
return; |
|
|
|
if (pp->pr_log == NULL) { |
|
if (kmem_map != NULL) |
|
pp->pr_log = malloc( |
|
pool_logsize * sizeof(struct pool_log), |
|
M_TEMP, M_NOWAIT | M_ZERO); |
|
if (pp->pr_log == NULL) |
|
return; |
|
pp->pr_curlogentry = 0; |
|
pp->pr_logsize = pool_logsize; |
|
} |
|
|
/* |
/* |
* Fill in the current entry. Wrap around and overwrite |
* Fill in the current entry. Wrap around and overwrite |
* the oldest entry if necessary. |
* the oldest entry if necessary. |
*/ |
*/ |
|
n = pp->pr_curlogentry; |
pl = &pp->pr_log[n]; |
pl = &pp->pr_log[n]; |
pl->pl_file = file; |
pl->pl_file = file; |
pl->pl_line = line; |
pl->pl_line = line; |
Line 257 pr_printlog(struct pool *pp, struct pool |
|
Line 274 pr_printlog(struct pool *pp, struct pool |
|
int i = pp->pr_logsize; |
int i = pp->pr_logsize; |
int n = pp->pr_curlogentry; |
int n = pp->pr_curlogentry; |
|
|
if ((pp->pr_roflags & PR_LOGGING) == 0) |
if (pp->pr_log == NULL) |
return; |
return; |
|
|
/* |
/* |
|
|
pool_subsystem_init(void) |
pool_subsystem_init(void) |
{ |
{ |
struct pool_allocator *pa; |
struct pool_allocator *pa; |
__link_set_decl(pools, struct link_pool_init); |
|
struct link_pool_init * const *pi; |
|
|
|
mutex_init(&pool_head_lock, MUTEX_DEFAULT, IPL_NONE); |
mutex_init(&pool_head_lock, MUTEX_DEFAULT, IPL_NONE); |
|
mutex_init(&pool_allocator_lock, MUTEX_DEFAULT, IPL_NONE); |
cv_init(&pool_busy, "poolbusy"); |
cv_init(&pool_busy, "poolbusy"); |
|
|
__link_set_foreach(pi, pools) |
|
pool_init((*pi)->pp, (*pi)->size, (*pi)->align, |
|
(*pi)->align_offset, (*pi)->flags, (*pi)->wchan, |
|
(*pi)->palloc, (*pi)->ipl); |
|
|
|
while ((pa = SLIST_FIRST(&pa_deferinitq)) != NULL) { |
while ((pa = SLIST_FIRST(&pa_deferinitq)) != NULL) { |
KASSERT(pa->pa_backingmapptr != NULL); |
KASSERT(pa->pa_backingmapptr != NULL); |
KASSERT(*pa->pa_backingmapptr != NULL); |
KASSERT(*pa->pa_backingmapptr != NULL); |
Line 656 pool_init(struct pool *pp, size_t size, |
|
Line 667 pool_init(struct pool *pp, size_t size, |
|
palloc = &pool_allocator_nointr_fullpage; |
palloc = &pool_allocator_nointr_fullpage; |
} |
} |
#endif /* POOL_SUBPAGE */ |
#endif /* POOL_SUBPAGE */ |
if ((palloc->pa_flags & PA_INITIALIZED) == 0) { |
if (!cold) |
|
mutex_enter(&pool_allocator_lock); |
|
if (palloc->pa_refcnt++ == 0) { |
if (palloc->pa_pagesz == 0) |
if (palloc->pa_pagesz == 0) |
palloc->pa_pagesz = PAGE_SIZE; |
palloc->pa_pagesz = PAGE_SIZE; |
|
|
Line 669 pool_init(struct pool *pp, size_t size, |
|
Line 682 pool_init(struct pool *pp, size_t size, |
|
if (palloc->pa_backingmapptr != NULL) { |
if (palloc->pa_backingmapptr != NULL) { |
pa_reclaim_register(palloc); |
pa_reclaim_register(palloc); |
} |
} |
palloc->pa_flags |= PA_INITIALIZED; |
|
} |
} |
|
if (!cold) |
|
mutex_exit(&pool_allocator_lock); |
|
|
if (align == 0) |
if (align == 0) |
align = ALIGN(1); |
align = ALIGN(1); |
Line 793 pool_init(struct pool *pp, size_t size, |
|
Line 807 pool_init(struct pool *pp, size_t size, |
|
pp->pr_nidle = 0; |
pp->pr_nidle = 0; |
pp->pr_refcnt = 0; |
pp->pr_refcnt = 0; |
|
|
#ifdef POOL_DIAGNOSTIC |
pp->pr_log = NULL; |
if (flags & PR_LOGGING) { |
|
if (kmem_map == NULL || |
|
(pp->pr_log = malloc(pool_logsize * sizeof(struct pool_log), |
|
M_TEMP, M_NOWAIT)) == NULL) |
|
pp->pr_roflags &= ~PR_LOGGING; |
|
pp->pr_curlogentry = 0; |
|
pp->pr_logsize = pool_logsize; |
|
} |
|
#endif |
|
|
|
pp->pr_entered_file = NULL; |
pp->pr_entered_file = NULL; |
pp->pr_entered_line = 0; |
pp->pr_entered_line = 0; |
Line 851 pool_init(struct pool *pp, size_t size, |
|
Line 856 pool_init(struct pool *pp, size_t size, |
|
} |
} |
|
|
/* Insert into the list of all pools. */ |
/* Insert into the list of all pools. */ |
if (__predict_true(!cold)) |
if (!cold) |
mutex_enter(&pool_head_lock); |
mutex_enter(&pool_head_lock); |
TAILQ_FOREACH(pp1, &pool_head, pr_poollist) { |
TAILQ_FOREACH(pp1, &pool_head, pr_poollist) { |
if (strcmp(pp1->pr_wchan, pp->pr_wchan) > 0) |
if (strcmp(pp1->pr_wchan, pp->pr_wchan) > 0) |
Line 861 pool_init(struct pool *pp, size_t size, |
|
Line 866 pool_init(struct pool *pp, size_t size, |
|
TAILQ_INSERT_TAIL(&pool_head, pp, pr_poollist); |
TAILQ_INSERT_TAIL(&pool_head, pp, pr_poollist); |
else |
else |
TAILQ_INSERT_BEFORE(pp1, pp, pr_poollist); |
TAILQ_INSERT_BEFORE(pp1, pp, pr_poollist); |
if (__predict_true(!cold)) |
if (!cold) |
mutex_exit(&pool_head_lock); |
mutex_exit(&pool_head_lock); |
|
|
/* Insert this into the list of pools using this allocator. */ |
/* Insert this into the list of pools using this allocator. */ |
if (__predict_true(!cold)) |
if (!cold) |
mutex_enter(&palloc->pa_lock); |
mutex_enter(&palloc->pa_lock); |
TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list); |
TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list); |
if (__predict_true(!cold)) |
if (!cold) |
mutex_exit(&palloc->pa_lock); |
mutex_exit(&palloc->pa_lock); |
|
|
pool_reclaim_register(pp); |
pool_reclaim_register(pp); |
Line 898 pool_destroy(struct pool *pp) |
|
Line 903 pool_destroy(struct pool *pp) |
|
TAILQ_REMOVE(&pp->pr_alloc->pa_list, pp, pr_alloc_list); |
TAILQ_REMOVE(&pp->pr_alloc->pa_list, pp, pr_alloc_list); |
mutex_exit(&pp->pr_alloc->pa_lock); |
mutex_exit(&pp->pr_alloc->pa_lock); |
|
|
|
mutex_enter(&pool_allocator_lock); |
|
if (--pp->pr_alloc->pa_refcnt == 0) |
|
mutex_destroy(&pp->pr_alloc->pa_lock); |
|
mutex_exit(&pool_allocator_lock); |
|
|
mutex_enter(&pp->pr_lock); |
mutex_enter(&pp->pr_lock); |
|
|
KASSERT(pp->pr_cache == NULL); |
KASSERT(pp->pr_cache == NULL); |
Line 923 pool_destroy(struct pool *pp) |
|
Line 933 pool_destroy(struct pool *pp) |
|
pr_pagelist_free(pp, &pq); |
pr_pagelist_free(pp, &pq); |
|
|
#ifdef POOL_DIAGNOSTIC |
#ifdef POOL_DIAGNOSTIC |
if ((pp->pr_roflags & PR_LOGGING) != 0) |
if (pp->pr_log != NULL) { |
free(pp->pr_log, M_TEMP); |
free(pp->pr_log, M_TEMP); |
|
pp->pr_log = NULL; |
|
} |
#endif |
#endif |
|
|
cv_destroy(&pp->pr_cv); |
cv_destroy(&pp->pr_cv); |
Line 1253 pool_do_put(struct pool *pp, void *v, st |
|
Line 1265 pool_do_put(struct pool *pp, void *v, st |
|
|
|
if (pp->pr_flags & PR_WANTED) { |
if (pp->pr_flags & PR_WANTED) { |
pp->pr_flags &= ~PR_WANTED; |
pp->pr_flags &= ~PR_WANTED; |
if (ph->ph_nmissing == 0) |
|
pp->pr_nidle++; |
|
cv_broadcast(&pp->pr_cv); |
cv_broadcast(&pp->pr_cv); |
return; |
|
} |
} |
|
|
/* |
/* |
Line 1530 pool_update_curpage(struct pool *pp) |
|
Line 1539 pool_update_curpage(struct pool *pp) |
|
if (pp->pr_curpage == NULL) { |
if (pp->pr_curpage == NULL) { |
pp->pr_curpage = LIST_FIRST(&pp->pr_emptypages); |
pp->pr_curpage = LIST_FIRST(&pp->pr_emptypages); |
} |
} |
|
KASSERT((pp->pr_curpage == NULL && pp->pr_nitems == 0) || |
|
(pp->pr_curpage != NULL && pp->pr_nitems > 0)); |
} |
} |
|
|
void |
void |
Line 1894 pool_print1(struct pool *pp, const char |
|
Line 1905 pool_print1(struct pool *pp, const char |
|
if (pc != NULL) { |
if (pc != NULL) { |
cpuhit = 0; |
cpuhit = 0; |
cpumiss = 0; |
cpumiss = 0; |
for (i = 0; i < MAXCPUS; i++) { |
for (i = 0; i < __arraycount(pc->pc_cpus); i++) { |
if ((cc = pc->pc_cpus[i]) == NULL) |
if ((cc = pc->pc_cpus[i]) == NULL) |
continue; |
continue; |
cpuhit += cc->cc_hits; |
cpuhit += cc->cc_hits; |
Line 2085 pool_cache_bootstrap(pool_cache_t pc, si |
|
Line 2096 pool_cache_bootstrap(pool_cache_t pc, si |
|
|
|
if ((flags & PR_LARGECACHE) != 0) { |
if ((flags & PR_LARGECACHE) != 0) { |
pc->pc_pcgsize = PCG_NOBJECTS_LARGE; |
pc->pc_pcgsize = PCG_NOBJECTS_LARGE; |
|
pc->pc_pcgpool = &pcg_large_pool; |
} else { |
} else { |
pc->pc_pcgsize = PCG_NOBJECTS_NORMAL; |
pc->pc_pcgsize = PCG_NOBJECTS_NORMAL; |
|
pc->pc_pcgpool = &pcg_normal_pool; |
} |
} |
|
|
/* Allocate per-CPU caches. */ |
/* Allocate per-CPU caches. */ |
|
|
pool_cache_destroy(pool_cache_t pc) |
pool_cache_destroy(pool_cache_t pc) |
{ |
{ |
struct pool *pp = &pc->pc_pool; |
struct pool *pp = &pc->pc_pool; |
pool_cache_cpu_t *cc; |
u_int i; |
pcg_t *pcg; |
|
int i; |
|
|
|
/* Remove it from the global list. */ |
/* Remove it from the global list. */ |
mutex_enter(&pool_head_lock); |
mutex_enter(&pool_head_lock); |
Line 2148 pool_cache_destroy(pool_cache_t pc) |
|
Line 2159 pool_cache_destroy(pool_cache_t pc) |
|
mutex_exit(&pp->pr_lock); |
mutex_exit(&pp->pr_lock); |
|
|
/* Destroy per-CPU data */ |
/* Destroy per-CPU data */ |
for (i = 0; i < MAXCPUS; i++) { |
for (i = 0; i < __arraycount(pc->pc_cpus); i++) |
if ((cc = pc->pc_cpus[i]) == NULL) |
pool_cache_invalidate_cpu(pc, i); |
continue; |
|
if ((pcg = cc->cc_current) != NULL) { |
|
pcg->pcg_next = NULL; |
|
pool_cache_invalidate_groups(pc, pcg); |
|
} |
|
if ((pcg = cc->cc_previous) != NULL) { |
|
pcg->pcg_next = NULL; |
|
pool_cache_invalidate_groups(pc, pcg); |
|
} |
|
if (cc != &pc->pc_cpu0) |
|
pool_put(&cache_cpu_pool, cc); |
|
} |
|
|
|
/* Finally, destroy it. */ |
/* Finally, destroy it. */ |
mutex_destroy(&pc->pc_lock); |
mutex_destroy(&pc->pc_lock); |
Line 2182 pool_cache_cpu_init1(struct cpu_info *ci |
|
Line 2181 pool_cache_cpu_init1(struct cpu_info *ci |
|
|
|
index = ci->ci_index; |
index = ci->ci_index; |
|
|
KASSERT(index < MAXCPUS); |
KASSERT(index < __arraycount(pc->pc_cpus)); |
|
|
if ((cc = pc->pc_cpus[index]) != NULL) { |
if ((cc = pc->pc_cpus[index]) != NULL) { |
KASSERT(cc->cc_cpuindex == index); |
KASSERT(cc->cc_cpuindex == index); |
Line 2209 pool_cache_cpu_init1(struct cpu_info *ci |
|
Line 2208 pool_cache_cpu_init1(struct cpu_info *ci |
|
cc->cc_cpuindex = index; |
cc->cc_cpuindex = index; |
cc->cc_hits = 0; |
cc->cc_hits = 0; |
cc->cc_misses = 0; |
cc->cc_misses = 0; |
cc->cc_current = NULL; |
cc->cc_current = __UNCONST(&pcg_dummy); |
cc->cc_previous = NULL; |
cc->cc_previous = __UNCONST(&pcg_dummy); |
|
|
pc->pc_cpus[index] = cc; |
pc->pc_cpus[index] = cc; |
} |
} |
Line 2308 pool_cache_invalidate_groups(pool_cache_ |
|
Line 2307 pool_cache_invalidate_groups(pool_cache_ |
|
* |
* |
* Invalidate a pool cache (destruct and release all of the |
* Invalidate a pool cache (destruct and release all of the |
* cached objects). Does not reclaim objects from the pool. |
* cached objects). Does not reclaim objects from the pool. |
|
* |
|
* Note: For pool caches that provide constructed objects, there |
|
* is an assumption that another level of synchronization is occurring |
|
* between the input to the constructor and the cache invalidation. |
*/ |
*/ |
void |
void |
pool_cache_invalidate(pool_cache_t pc) |
pool_cache_invalidate(pool_cache_t pc) |
{ |
{ |
pcg_t *full, *empty, *part; |
pcg_t *full, *empty, *part; |
|
#if 0 |
|
uint64_t where; |
|
|
|
if (ncpu < 2 || !mp_online) { |
|
/* |
|
* We might be called early enough in the boot process |
|
* for the CPU data structures to not be fully initialized. |
|
* In this case, simply gather the local CPU's cache now |
|
* since it will be the only one running. |
|
*/ |
|
pool_cache_xcall(pc); |
|
} else { |
|
/* |
|
* Gather all of the CPU-specific caches into the |
|
* global cache. |
|
*/ |
|
where = xc_broadcast(0, (xcfunc_t)pool_cache_xcall, pc, NULL); |
|
xc_wait(where); |
|
} |
|
#endif |
mutex_enter(&pc->pc_lock); |
mutex_enter(&pc->pc_lock); |
full = pc->pc_fullgroups; |
full = pc->pc_fullgroups; |
empty = pc->pc_emptygroups; |
empty = pc->pc_emptygroups; |
Line 2331 pool_cache_invalidate(pool_cache_t pc) |
|
Line 2353 pool_cache_invalidate(pool_cache_t pc) |
|
pool_cache_invalidate_groups(pc, part); |
pool_cache_invalidate_groups(pc, part); |
} |
} |
|
|
|
/* |
|
* pool_cache_invalidate_cpu: |
|
* |
|
* Invalidate all CPU-bound cached objects in pool cache, the CPU being |
|
* identified by its associated index. |
|
* It is caller's responsibility to ensure that no operation is |
|
* taking place on this pool cache while doing this invalidation. |
|
* WARNING: as no inter-CPU locking is enforced, trying to invalidate |
|
* pool cached objects from a CPU different from the one currently running |
|
* may result in an undefined behaviour. |
|
*/ |
|
static void |
|
pool_cache_invalidate_cpu(pool_cache_t pc, u_int index) |
|
{ |
|
|
|
pool_cache_cpu_t *cc; |
|
pcg_t *pcg; |
|
|
|
if ((cc = pc->pc_cpus[index]) == NULL) |
|
return; |
|
|
|
if ((pcg = cc->cc_current) != &pcg_dummy) { |
|
pcg->pcg_next = NULL; |
|
pool_cache_invalidate_groups(pc, pcg); |
|
} |
|
if ((pcg = cc->cc_previous) != &pcg_dummy) { |
|
pcg->pcg_next = NULL; |
|
pool_cache_invalidate_groups(pc, pcg); |
|
} |
|
if (cc != &pc->pc_cpu0) |
|
pool_put(&cache_cpu_pool, cc); |
|
|
|
} |
|
|
void |
void |
pool_cache_set_drain_hook(pool_cache_t pc, void (*fn)(void *, int), void *arg) |
pool_cache_set_drain_hook(pool_cache_t pc, void (*fn)(void *, int), void *arg) |
{ |
{ |
Line 2359 pool_cache_sethardlimit(pool_cache_t pc, |
|
Line 2415 pool_cache_sethardlimit(pool_cache_t pc, |
|
pool_sethardlimit(&pc->pc_pool, n, warnmess, ratecap); |
pool_sethardlimit(&pc->pc_pool, n, warnmess, ratecap); |
} |
} |
|
|
static inline pool_cache_cpu_t * |
static bool __noinline |
pool_cache_cpu_enter(pool_cache_t pc, int *s) |
pool_cache_get_slow(pool_cache_cpu_t *cc, int s, void **objectp, |
{ |
|
pool_cache_cpu_t *cc; |
|
|
|
/* |
|
* Prevent other users of the cache from accessing our |
|
* CPU-local data. To avoid touching shared state, we |
|
* pull the neccessary information from CPU local data. |
|
*/ |
|
KPREEMPT_DISABLE(curlwp); |
|
cc = pc->pc_cpus[curcpu()->ci_index]; |
|
KASSERT(cc->cc_cache == pc); |
|
if (cc->cc_ipl != IPL_NONE) { |
|
*s = splraiseipl(cc->cc_iplcookie); |
|
} |
|
|
|
return cc; |
|
} |
|
|
|
static inline void |
|
pool_cache_cpu_exit(pool_cache_cpu_t *cc, int *s) |
|
{ |
|
|
|
/* No longer need exclusive access to the per-CPU data. */ |
|
if (cc->cc_ipl != IPL_NONE) { |
|
splx(*s); |
|
} |
|
KPREEMPT_ENABLE(curlwp); |
|
} |
|
|
|
pool_cache_cpu_t * __noinline |
|
pool_cache_get_slow(pool_cache_cpu_t *cc, int *s, void **objectp, |
|
paddr_t *pap, int flags) |
paddr_t *pap, int flags) |
{ |
{ |
pcg_t *pcg, *cur; |
pcg_t *pcg, *cur; |
Line 2399 pool_cache_get_slow(pool_cache_cpu_t *cc |
|
Line 2424 pool_cache_get_slow(pool_cache_cpu_t *cc |
|
pool_cache_t pc; |
pool_cache_t pc; |
void *object; |
void *object; |
|
|
|
KASSERT(cc->cc_current->pcg_avail == 0); |
|
KASSERT(cc->cc_previous->pcg_avail == 0); |
|
|
pc = cc->cc_cache; |
pc = cc->cc_cache; |
cc->cc_misses++; |
cc->cc_misses++; |
|
|
Line 2406 pool_cache_get_slow(pool_cache_cpu_t *cc |
|
Line 2434 pool_cache_get_slow(pool_cache_cpu_t *cc |
|
* Nothing was available locally. Try and grab a group |
* Nothing was available locally. Try and grab a group |
* from the cache. |
* from the cache. |
*/ |
*/ |
if (!mutex_tryenter(&pc->pc_lock)) { |
if (__predict_false(!mutex_tryenter(&pc->pc_lock))) { |
ncsw = curlwp->l_ncsw; |
ncsw = curlwp->l_ncsw; |
mutex_enter(&pc->pc_lock); |
mutex_enter(&pc->pc_lock); |
pc->pc_contended++; |
pc->pc_contended++; |
Line 2418 pool_cache_get_slow(pool_cache_cpu_t *cc |
|
Line 2446 pool_cache_get_slow(pool_cache_cpu_t *cc |
|
*/ |
*/ |
if (curlwp->l_ncsw != ncsw) { |
if (curlwp->l_ncsw != ncsw) { |
mutex_exit(&pc->pc_lock); |
mutex_exit(&pc->pc_lock); |
pool_cache_cpu_exit(cc, s); |
return true; |
return pool_cache_cpu_enter(pc, s); |
|
} |
} |
} |
} |
|
|
if ((pcg = pc->pc_fullgroups) != NULL) { |
if (__predict_true((pcg = pc->pc_fullgroups) != NULL)) { |
/* |
/* |
* If there's a full group, release our empty |
* If there's a full group, release our empty |
* group back to the cache. Install the full |
* group back to the cache. Install the full |
* group as cc_current and return. |
* group as cc_current and return. |
*/ |
*/ |
if ((cur = cc->cc_current) != NULL) { |
if (__predict_true((cur = cc->cc_current) != &pcg_dummy)) { |
KASSERT(cur->pcg_avail == 0); |
KASSERT(cur->pcg_avail == 0); |
cur->pcg_next = pc->pc_emptygroups; |
cur->pcg_next = pc->pc_emptygroups; |
pc->pc_emptygroups = cur; |
pc->pc_emptygroups = cur; |
Line 2441 pool_cache_get_slow(pool_cache_cpu_t *cc |
|
Line 2468 pool_cache_get_slow(pool_cache_cpu_t *cc |
|
pc->pc_hits++; |
pc->pc_hits++; |
pc->pc_nfull--; |
pc->pc_nfull--; |
mutex_exit(&pc->pc_lock); |
mutex_exit(&pc->pc_lock); |
return cc; |
return true; |
} |
} |
|
|
/* |
/* |
Line 2451 pool_cache_get_slow(pool_cache_cpu_t *cc |
|
Line 2478 pool_cache_get_slow(pool_cache_cpu_t *cc |
|
*/ |
*/ |
pc->pc_misses++; |
pc->pc_misses++; |
mutex_exit(&pc->pc_lock); |
mutex_exit(&pc->pc_lock); |
pool_cache_cpu_exit(cc, s); |
splx(s); |
|
|
object = pool_get(&pc->pc_pool, flags); |
object = pool_get(&pc->pc_pool, flags); |
*objectp = object; |
*objectp = object; |
if (object == NULL) |
if (__predict_false(object == NULL)) |
return NULL; |
return false; |
|
|
if ((*pc->pc_ctor)(pc->pc_arg, object, flags) != 0) { |
if (__predict_false((*pc->pc_ctor)(pc->pc_arg, object, flags) != 0)) { |
pool_put(&pc->pc_pool, object); |
pool_put(&pc->pc_pool, object); |
*objectp = NULL; |
*objectp = NULL; |
return NULL; |
return false; |
} |
} |
|
|
KASSERT((((vaddr_t)object + pc->pc_pool.pr_itemoffset) & |
KASSERT((((vaddr_t)object + pc->pc_pool.pr_itemoffset) & |
Line 2476 pool_cache_get_slow(pool_cache_cpu_t *cc |
|
Line 2503 pool_cache_get_slow(pool_cache_cpu_t *cc |
|
} |
} |
|
|
FREECHECK_OUT(&pc->pc_freecheck, object); |
FREECHECK_OUT(&pc->pc_freecheck, object); |
return NULL; |
return false; |
} |
} |
|
|
/* |
/* |
Line 2499 pool_cache_get_paddr(pool_cache_t pc, in |
|
Line 2526 pool_cache_get_paddr(pool_cache_t pc, in |
|
} |
} |
#endif |
#endif |
|
|
cc = pool_cache_cpu_enter(pc, &s); |
/* Lock out interrupts and disable preemption. */ |
do { |
s = splvm(); |
|
while (/* CONSTCOND */ true) { |
/* Try and allocate an object from the current group. */ |
/* Try and allocate an object from the current group. */ |
|
cc = pc->pc_cpus[curcpu()->ci_index]; |
|
KASSERT(cc->cc_cache == pc); |
pcg = cc->cc_current; |
pcg = cc->cc_current; |
if (pcg != NULL && pcg->pcg_avail > 0) { |
if (__predict_true(pcg->pcg_avail > 0)) { |
object = pcg->pcg_objects[--pcg->pcg_avail].pcgo_va; |
object = pcg->pcg_objects[--pcg->pcg_avail].pcgo_va; |
if (pap != NULL) |
if (__predict_false(pap != NULL)) |
*pap = pcg->pcg_objects[pcg->pcg_avail].pcgo_pa; |
*pap = pcg->pcg_objects[pcg->pcg_avail].pcgo_pa; |
#if defined(DIAGNOSTIC) |
#if defined(DIAGNOSTIC) |
pcg->pcg_objects[pcg->pcg_avail].pcgo_va = NULL; |
pcg->pcg_objects[pcg->pcg_avail].pcgo_va = NULL; |
#endif /* defined(DIAGNOSTIC) */ |
KASSERT(pcg->pcg_avail < pcg->pcg_size); |
KASSERT(pcg->pcg_avail <= pcg->pcg_size); |
|
KASSERT(object != NULL); |
KASSERT(object != NULL); |
|
#endif |
cc->cc_hits++; |
cc->cc_hits++; |
pool_cache_cpu_exit(cc, &s); |
splx(s); |
FREECHECK_OUT(&pc->pc_freecheck, object); |
FREECHECK_OUT(&pc->pc_freecheck, object); |
return object; |
return object; |
} |
} |
Line 2523 pool_cache_get_paddr(pool_cache_t pc, in |
|
Line 2553 pool_cache_get_paddr(pool_cache_t pc, in |
|
* it with the current group and allocate from there. |
* it with the current group and allocate from there. |
*/ |
*/ |
pcg = cc->cc_previous; |
pcg = cc->cc_previous; |
if (pcg != NULL && pcg->pcg_avail > 0) { |
if (__predict_true(pcg->pcg_avail > 0)) { |
cc->cc_previous = cc->cc_current; |
cc->cc_previous = cc->cc_current; |
cc->cc_current = pcg; |
cc->cc_current = pcg; |
continue; |
continue; |
Line 2532 pool_cache_get_paddr(pool_cache_t pc, in |
|
Line 2562 pool_cache_get_paddr(pool_cache_t pc, in |
|
/* |
/* |
* Can't allocate from either group: try the slow path. |
* Can't allocate from either group: try the slow path. |
* If get_slow() allocated an object for us, or if |
* If get_slow() allocated an object for us, or if |
* no more objects are available, it will return NULL. |
* no more objects are available, it will return false. |
* Otherwise, we need to retry. |
* Otherwise, we need to retry. |
*/ |
*/ |
cc = pool_cache_get_slow(cc, &s, &object, pap, flags); |
if (!pool_cache_get_slow(cc, s, &object, pap, flags)) |
} while (cc != NULL); |
break; |
|
} |
|
|
return object; |
return object; |
} |
} |
|
|
pool_cache_cpu_t * __noinline |
static bool __noinline |
pool_cache_put_slow(pool_cache_cpu_t *cc, int *s, void *object, paddr_t pa) |
pool_cache_put_slow(pool_cache_cpu_t *cc, int s, void *object) |
{ |
{ |
pcg_t *pcg, *cur; |
pcg_t *pcg, *cur; |
uint64_t ncsw; |
uint64_t ncsw; |
pool_cache_t pc; |
pool_cache_t pc; |
u_int nobj; |
|
|
KASSERT(cc->cc_current->pcg_avail == cc->cc_current->pcg_size); |
|
KASSERT(cc->cc_previous->pcg_avail == cc->cc_previous->pcg_size); |
|
|
pc = cc->cc_cache; |
pc = cc->cc_cache; |
|
pcg = NULL; |
cc->cc_misses++; |
cc->cc_misses++; |
|
|
/* |
/* |
* No free slots locally. Try to grab an empty, unused |
* If there are no empty groups in the cache then allocate one |
* group from the cache. |
* while still unlocked. |
*/ |
*/ |
if (!mutex_tryenter(&pc->pc_lock)) { |
if (__predict_false(pc->pc_emptygroups == NULL)) { |
|
if (__predict_true(!pool_cache_disable)) { |
|
pcg = pool_get(pc->pc_pcgpool, PR_NOWAIT); |
|
} |
|
if (__predict_true(pcg != NULL)) { |
|
pcg->pcg_avail = 0; |
|
pcg->pcg_size = pc->pc_pcgsize; |
|
} |
|
} |
|
|
|
/* Lock the cache. */ |
|
if (__predict_false(!mutex_tryenter(&pc->pc_lock))) { |
ncsw = curlwp->l_ncsw; |
ncsw = curlwp->l_ncsw; |
mutex_enter(&pc->pc_lock); |
mutex_enter(&pc->pc_lock); |
pc->pc_contended++; |
pc->pc_contended++; |
|
|
/* |
/* |
* If we context switched while locking, then |
* If we context switched while locking, then our view of |
* our view of the per-CPU data is invalid: |
* the per-CPU data is invalid: retry. |
* retry. |
|
*/ |
*/ |
if (curlwp->l_ncsw != ncsw) { |
if (__predict_false(curlwp->l_ncsw != ncsw)) { |
mutex_exit(&pc->pc_lock); |
mutex_exit(&pc->pc_lock); |
pool_cache_cpu_exit(cc, s); |
if (pcg != NULL) { |
return pool_cache_cpu_enter(pc, s); |
pool_put(pc->pc_pcgpool, pcg); |
|
} |
|
return true; |
} |
} |
} |
} |
|
|
if ((pcg = pc->pc_emptygroups) != NULL) { |
/* If there are no empty groups in the cache then allocate one. */ |
/* |
if (pcg == NULL && pc->pc_emptygroups != NULL) { |
* If there's a empty group, release our full |
pcg = pc->pc_emptygroups; |
* group back to the cache. Install the empty |
|
* group and return. |
|
*/ |
|
KASSERT(pcg->pcg_avail == 0); |
|
pc->pc_emptygroups = pcg->pcg_next; |
pc->pc_emptygroups = pcg->pcg_next; |
if (cc->cc_previous == NULL) { |
pc->pc_nempty--; |
|
} |
|
|
|
/* |
|
* If there's a empty group, release our full group back |
|
* to the cache. Install the empty group to the local CPU |
|
* and return. |
|
*/ |
|
if (pcg != NULL) { |
|
KASSERT(pcg->pcg_avail == 0); |
|
if (__predict_false(cc->cc_previous == &pcg_dummy)) { |
cc->cc_previous = pcg; |
cc->cc_previous = pcg; |
} else { |
} else { |
if ((cur = cc->cc_current) != NULL) { |
cur = cc->cc_current; |
KASSERT(cur->pcg_avail == pcg->pcg_size); |
if (__predict_true(cur != &pcg_dummy)) { |
|
KASSERT(cur->pcg_avail == cur->pcg_size); |
cur->pcg_next = pc->pc_fullgroups; |
cur->pcg_next = pc->pc_fullgroups; |
pc->pc_fullgroups = cur; |
pc->pc_fullgroups = cur; |
pc->pc_nfull++; |
pc->pc_nfull++; |
Line 2593 pool_cache_put_slow(pool_cache_cpu_t *cc |
|
Line 2646 pool_cache_put_slow(pool_cache_cpu_t *cc |
|
cc->cc_current = pcg; |
cc->cc_current = pcg; |
} |
} |
pc->pc_hits++; |
pc->pc_hits++; |
pc->pc_nempty--; |
|
mutex_exit(&pc->pc_lock); |
mutex_exit(&pc->pc_lock); |
return cc; |
return true; |
} |
} |
|
|
/* |
/* |
* Nothing available locally or in cache. Take the |
* Nothing available locally or in cache, and we didn't |
* slow path and try to allocate a new group that we |
* allocate an empty group. Take the slow path and destroy |
* can release to. |
* the object here and now. |
*/ |
*/ |
pc->pc_misses++; |
pc->pc_misses++; |
mutex_exit(&pc->pc_lock); |
mutex_exit(&pc->pc_lock); |
pool_cache_cpu_exit(cc, s); |
splx(s); |
|
pool_cache_destruct_object(pc, object); |
/* |
|
* If we can't allocate a new group, just throw the |
|
* object away. |
|
*/ |
|
nobj = pc->pc_pcgsize; |
|
if (pool_cache_disable) { |
|
pcg = NULL; |
|
} else if (nobj == PCG_NOBJECTS_LARGE) { |
|
pcg = pool_get(&pcg_large_pool, PR_NOWAIT); |
|
} else { |
|
pcg = pool_get(&pcg_normal_pool, PR_NOWAIT); |
|
} |
|
if (pcg == NULL) { |
|
pool_cache_destruct_object(pc, object); |
|
return NULL; |
|
} |
|
pcg->pcg_avail = 0; |
|
pcg->pcg_size = nobj; |
|
|
|
/* |
|
* Add the empty group to the cache and try again. |
|
*/ |
|
mutex_enter(&pc->pc_lock); |
|
pcg->pcg_next = pc->pc_emptygroups; |
|
pc->pc_emptygroups = pcg; |
|
pc->pc_nempty++; |
|
mutex_exit(&pc->pc_lock); |
|
|
|
return pool_cache_cpu_enter(pc, s); |
return false; |
} |
} |
|
|
/* |
/* |
Line 2651 pool_cache_put_paddr(pool_cache_t pc, vo |
|
Line 2676 pool_cache_put_paddr(pool_cache_t pc, vo |
|
pcg_t *pcg; |
pcg_t *pcg; |
int s; |
int s; |
|
|
|
KASSERT(object != NULL); |
FREECHECK_IN(&pc->pc_freecheck, object); |
FREECHECK_IN(&pc->pc_freecheck, object); |
|
|
cc = pool_cache_cpu_enter(pc, &s); |
/* Lock out interrupts and disable preemption. */ |
do { |
s = splvm(); |
|
while (/* CONSTCOND */ true) { |
/* If the current group isn't full, release it there. */ |
/* If the current group isn't full, release it there. */ |
|
cc = pc->pc_cpus[curcpu()->ci_index]; |
|
KASSERT(cc->cc_cache == pc); |
pcg = cc->cc_current; |
pcg = cc->cc_current; |
if (pcg != NULL && pcg->pcg_avail < pcg->pcg_size) { |
if (__predict_true(pcg->pcg_avail < pcg->pcg_size)) { |
pcg->pcg_objects[pcg->pcg_avail].pcgo_va = object; |
pcg->pcg_objects[pcg->pcg_avail].pcgo_va = object; |
pcg->pcg_objects[pcg->pcg_avail].pcgo_pa = pa; |
pcg->pcg_objects[pcg->pcg_avail].pcgo_pa = pa; |
pcg->pcg_avail++; |
pcg->pcg_avail++; |
cc->cc_hits++; |
cc->cc_hits++; |
pool_cache_cpu_exit(cc, &s); |
splx(s); |
return; |
return; |
} |
} |
|
|
/* |
/* |
* That failed. If the previous group is empty, swap |
* That failed. If the previous group isn't full, swap |
* it with the current group and try again. |
* it with the current group and try again. |
*/ |
*/ |
pcg = cc->cc_previous; |
pcg = cc->cc_previous; |
if (pcg != NULL && pcg->pcg_avail == 0) { |
if (__predict_true(pcg->pcg_avail < pcg->pcg_size)) { |
cc->cc_previous = cc->cc_current; |
cc->cc_previous = cc->cc_current; |
cc->cc_current = pcg; |
cc->cc_current = pcg; |
continue; |
continue; |
Line 2680 pool_cache_put_paddr(pool_cache_t pc, vo |
|
Line 2709 pool_cache_put_paddr(pool_cache_t pc, vo |
|
/* |
/* |
* Can't free to either group: try the slow path. |
* Can't free to either group: try the slow path. |
* If put_slow() releases the object for us, it |
* If put_slow() releases the object for us, it |
* will return NULL. Otherwise we need to retry. |
* will return false. Otherwise we need to retry. |
*/ |
*/ |
cc = pool_cache_put_slow(cc, &s, object, pa); |
if (!pool_cache_put_slow(cc, s, object)) |
} while (cc != NULL); |
break; |
|
} |
} |
} |
|
|
/* |
/* |
Line 2697 pool_cache_xcall(pool_cache_t pc) |
|
Line 2727 pool_cache_xcall(pool_cache_t pc) |
|
{ |
{ |
pool_cache_cpu_t *cc; |
pool_cache_cpu_t *cc; |
pcg_t *prev, *cur, **list; |
pcg_t *prev, *cur, **list; |
int s = 0; /* XXXgcc */ |
int s; |
|
|
cc = pool_cache_cpu_enter(pc, &s); |
|
cur = cc->cc_current; |
|
cc->cc_current = NULL; |
|
prev = cc->cc_previous; |
|
cc->cc_previous = NULL; |
|
pool_cache_cpu_exit(cc, &s); |
|
|
|
/* |
|
* XXXSMP Go to splvm to prevent kernel_lock from being taken, |
|
* because locks at IPL_SOFTXXX are still spinlocks. Does not |
|
* apply to IPL_SOFTBIO. Cross-call threads do not take the |
|
* kernel_lock. |
|
*/ |
|
s = splvm(); |
s = splvm(); |
mutex_enter(&pc->pc_lock); |
mutex_enter(&pc->pc_lock); |
if (cur != NULL) { |
cc = pc->pc_cpus[curcpu()->ci_index]; |
|
cur = cc->cc_current; |
|
cc->cc_current = __UNCONST(&pcg_dummy); |
|
prev = cc->cc_previous; |
|
cc->cc_previous = __UNCONST(&pcg_dummy); |
|
if (cur != &pcg_dummy) { |
if (cur->pcg_avail == cur->pcg_size) { |
if (cur->pcg_avail == cur->pcg_size) { |
list = &pc->pc_fullgroups; |
list = &pc->pc_fullgroups; |
pc->pc_nfull++; |
pc->pc_nfull++; |
Line 2728 pool_cache_xcall(pool_cache_t pc) |
|
Line 2750 pool_cache_xcall(pool_cache_t pc) |
|
cur->pcg_next = *list; |
cur->pcg_next = *list; |
*list = cur; |
*list = cur; |
} |
} |
if (prev != NULL) { |
if (prev != &pcg_dummy) { |
if (prev->pcg_avail == prev->pcg_size) { |
if (prev->pcg_avail == prev->pcg_size) { |
list = &pc->pc_fullgroups; |
list = &pc->pc_fullgroups; |
pc->pc_nfull++; |
pc->pc_nfull++; |
|
|
goto print; |
goto print; |
} |
} |
} |
} |
for (i = 0; i < MAXCPUS; i++) { |
for (i = 0; i < __arraycount(pc->pc_cpus); i++) { |
pool_cache_cpu_t *cc; |
pool_cache_cpu_t *cc; |
|
|
if ((cc = pc->pc_cpus[i]) == NULL) { |
if ((cc = pc->pc_cpus[i]) == NULL) { |
|
|
if (pool_in_cg(pp, cc->cc_current, addr) || |
if (pool_in_cg(pp, cc->cc_current, addr) || |
pool_in_cg(pp, cc->cc_previous, addr)) { |
pool_in_cg(pp, cc->cc_previous, addr)) { |
struct cpu_info *ci = |
struct cpu_info *ci = |
cpu_lookup_byindex(i); |
cpu_lookup(i); |
|
|
incpucache = true; |
incpucache = true; |
snprintf(cpucachestr, |
snprintf(cpucachestr, |