version 1.172, 2009/04/15 11:45:18 |
version 1.182, 2010/01/20 23:40:42 |
Line 69 __KERNEL_RCSID(0, "$NetBSD$"); |
|
Line 69 __KERNEL_RCSID(0, "$NetBSD$"); |
|
*/ |
*/ |
|
|
/* List of all pools */ |
/* List of all pools */ |
TAILQ_HEAD(,pool) pool_head = TAILQ_HEAD_INITIALIZER(pool_head); |
static TAILQ_HEAD(, pool) pool_head = TAILQ_HEAD_INITIALIZER(pool_head); |
|
|
/* Private pool for page header structures */ |
/* Private pool for page header structures */ |
#define PHPOOL_MAX 8 |
#define PHPOOL_MAX 8 |
Line 104 static struct pool *drainpp; |
|
Line 104 static struct pool *drainpp; |
|
static kmutex_t pool_head_lock; |
static kmutex_t pool_head_lock; |
static kcondvar_t pool_busy; |
static kcondvar_t pool_busy; |
|
|
|
/* This lock protects initialization of a potentially shared pool allocator */ |
|
static kmutex_t pool_allocator_lock; |
|
|
typedef uint32_t pool_item_bitmap_t; |
typedef uint32_t pool_item_bitmap_t; |
#define BITMAP_SIZE (CHAR_BIT * sizeof(pool_item_bitmap_t)) |
#define BITMAP_SIZE (CHAR_BIT * sizeof(pool_item_bitmap_t)) |
#define BITMAP_MASK (BITMAP_SIZE - 1) |
#define BITMAP_MASK (BITMAP_SIZE - 1) |
Line 188 static bool pool_cache_get_slow(pool_cac |
|
Line 191 static bool pool_cache_get_slow(pool_cac |
|
void **, paddr_t *, int); |
void **, paddr_t *, int); |
static void pool_cache_cpu_init1(struct cpu_info *, pool_cache_t); |
static void pool_cache_cpu_init1(struct cpu_info *, pool_cache_t); |
static void pool_cache_invalidate_groups(pool_cache_t, pcg_t *); |
static void pool_cache_invalidate_groups(pool_cache_t, pcg_t *); |
|
static void pool_cache_invalidate_cpu(pool_cache_t, u_int); |
static void pool_cache_xcall(pool_cache_t); |
static void pool_cache_xcall(pool_cache_t); |
|
|
static int pool_catchup(struct pool *); |
static int pool_catchup(struct pool *); |
Line 230 int pool_logsize = POOL_LOGSIZE; |
|
Line 234 int pool_logsize = POOL_LOGSIZE; |
|
static inline void |
static inline void |
pr_log(struct pool *pp, void *v, int action, const char *file, long line) |
pr_log(struct pool *pp, void *v, int action, const char *file, long line) |
{ |
{ |
int n = pp->pr_curlogentry; |
int n; |
struct pool_log *pl; |
struct pool_log *pl; |
|
|
if ((pp->pr_roflags & PR_LOGGING) == 0) |
if ((pp->pr_roflags & PR_LOGGING) == 0) |
return; |
return; |
|
|
|
if (pp->pr_log == NULL) { |
|
if (kmem_map != NULL) |
|
pp->pr_log = malloc( |
|
pool_logsize * sizeof(struct pool_log), |
|
M_TEMP, M_NOWAIT | M_ZERO); |
|
if (pp->pr_log == NULL) |
|
return; |
|
pp->pr_curlogentry = 0; |
|
pp->pr_logsize = pool_logsize; |
|
} |
|
|
/* |
/* |
* Fill in the current entry. Wrap around and overwrite |
* Fill in the current entry. Wrap around and overwrite |
* the oldest entry if necessary. |
* the oldest entry if necessary. |
*/ |
*/ |
|
n = pp->pr_curlogentry; |
pl = &pp->pr_log[n]; |
pl = &pp->pr_log[n]; |
pl->pl_file = file; |
pl->pl_file = file; |
pl->pl_line = line; |
pl->pl_line = line; |
Line 257 pr_printlog(struct pool *pp, struct pool |
|
Line 273 pr_printlog(struct pool *pp, struct pool |
|
int i = pp->pr_logsize; |
int i = pp->pr_logsize; |
int n = pp->pr_curlogentry; |
int n = pp->pr_curlogentry; |
|
|
if ((pp->pr_roflags & PR_LOGGING) == 0) |
if (pp->pr_log == NULL) |
return; |
return; |
|
|
/* |
/* |
|
|
pool_subsystem_init(void) |
pool_subsystem_init(void) |
{ |
{ |
struct pool_allocator *pa; |
struct pool_allocator *pa; |
__link_set_decl(pools, struct link_pool_init); |
|
struct link_pool_init * const *pi; |
|
|
|
mutex_init(&pool_head_lock, MUTEX_DEFAULT, IPL_NONE); |
mutex_init(&pool_head_lock, MUTEX_DEFAULT, IPL_NONE); |
|
mutex_init(&pool_allocator_lock, MUTEX_DEFAULT, IPL_NONE); |
cv_init(&pool_busy, "poolbusy"); |
cv_init(&pool_busy, "poolbusy"); |
|
|
__link_set_foreach(pi, pools) |
|
pool_init((*pi)->pp, (*pi)->size, (*pi)->align, |
|
(*pi)->align_offset, (*pi)->flags, (*pi)->wchan, |
|
(*pi)->palloc, (*pi)->ipl); |
|
|
|
while ((pa = SLIST_FIRST(&pa_deferinitq)) != NULL) { |
while ((pa = SLIST_FIRST(&pa_deferinitq)) != NULL) { |
KASSERT(pa->pa_backingmapptr != NULL); |
KASSERT(pa->pa_backingmapptr != NULL); |
KASSERT(*pa->pa_backingmapptr != NULL); |
KASSERT(*pa->pa_backingmapptr != NULL); |
Line 656 pool_init(struct pool *pp, size_t size, |
|
Line 666 pool_init(struct pool *pp, size_t size, |
|
palloc = &pool_allocator_nointr_fullpage; |
palloc = &pool_allocator_nointr_fullpage; |
} |
} |
#endif /* POOL_SUBPAGE */ |
#endif /* POOL_SUBPAGE */ |
if ((palloc->pa_flags & PA_INITIALIZED) == 0) { |
if (!cold) |
|
mutex_enter(&pool_allocator_lock); |
|
if (palloc->pa_refcnt++ == 0) { |
if (palloc->pa_pagesz == 0) |
if (palloc->pa_pagesz == 0) |
palloc->pa_pagesz = PAGE_SIZE; |
palloc->pa_pagesz = PAGE_SIZE; |
|
|
Line 669 pool_init(struct pool *pp, size_t size, |
|
Line 681 pool_init(struct pool *pp, size_t size, |
|
if (palloc->pa_backingmapptr != NULL) { |
if (palloc->pa_backingmapptr != NULL) { |
pa_reclaim_register(palloc); |
pa_reclaim_register(palloc); |
} |
} |
palloc->pa_flags |= PA_INITIALIZED; |
|
} |
} |
|
if (!cold) |
|
mutex_exit(&pool_allocator_lock); |
|
|
if (align == 0) |
if (align == 0) |
align = ALIGN(1); |
align = ALIGN(1); |
Line 793 pool_init(struct pool *pp, size_t size, |
|
Line 806 pool_init(struct pool *pp, size_t size, |
|
pp->pr_nidle = 0; |
pp->pr_nidle = 0; |
pp->pr_refcnt = 0; |
pp->pr_refcnt = 0; |
|
|
#ifdef POOL_DIAGNOSTIC |
pp->pr_log = NULL; |
if (flags & PR_LOGGING) { |
|
if (kmem_map == NULL || |
|
(pp->pr_log = malloc(pool_logsize * sizeof(struct pool_log), |
|
M_TEMP, M_NOWAIT)) == NULL) |
|
pp->pr_roflags &= ~PR_LOGGING; |
|
pp->pr_curlogentry = 0; |
|
pp->pr_logsize = pool_logsize; |
|
} |
|
#endif |
|
|
|
pp->pr_entered_file = NULL; |
pp->pr_entered_file = NULL; |
pp->pr_entered_line = 0; |
pp->pr_entered_line = 0; |
Line 851 pool_init(struct pool *pp, size_t size, |
|
Line 855 pool_init(struct pool *pp, size_t size, |
|
} |
} |
|
|
/* Insert into the list of all pools. */ |
/* Insert into the list of all pools. */ |
if (__predict_true(!cold)) |
if (!cold) |
mutex_enter(&pool_head_lock); |
mutex_enter(&pool_head_lock); |
TAILQ_FOREACH(pp1, &pool_head, pr_poollist) { |
TAILQ_FOREACH(pp1, &pool_head, pr_poollist) { |
if (strcmp(pp1->pr_wchan, pp->pr_wchan) > 0) |
if (strcmp(pp1->pr_wchan, pp->pr_wchan) > 0) |
Line 861 pool_init(struct pool *pp, size_t size, |
|
Line 865 pool_init(struct pool *pp, size_t size, |
|
TAILQ_INSERT_TAIL(&pool_head, pp, pr_poollist); |
TAILQ_INSERT_TAIL(&pool_head, pp, pr_poollist); |
else |
else |
TAILQ_INSERT_BEFORE(pp1, pp, pr_poollist); |
TAILQ_INSERT_BEFORE(pp1, pp, pr_poollist); |
if (__predict_true(!cold)) |
if (!cold) |
mutex_exit(&pool_head_lock); |
mutex_exit(&pool_head_lock); |
|
|
/* Insert this into the list of pools using this allocator. */ |
/* Insert this into the list of pools using this allocator. */ |
if (__predict_true(!cold)) |
if (!cold) |
mutex_enter(&palloc->pa_lock); |
mutex_enter(&palloc->pa_lock); |
TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list); |
TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list); |
if (__predict_true(!cold)) |
if (!cold) |
mutex_exit(&palloc->pa_lock); |
mutex_exit(&palloc->pa_lock); |
|
|
pool_reclaim_register(pp); |
pool_reclaim_register(pp); |
Line 898 pool_destroy(struct pool *pp) |
|
Line 902 pool_destroy(struct pool *pp) |
|
TAILQ_REMOVE(&pp->pr_alloc->pa_list, pp, pr_alloc_list); |
TAILQ_REMOVE(&pp->pr_alloc->pa_list, pp, pr_alloc_list); |
mutex_exit(&pp->pr_alloc->pa_lock); |
mutex_exit(&pp->pr_alloc->pa_lock); |
|
|
|
mutex_enter(&pool_allocator_lock); |
|
if (--pp->pr_alloc->pa_refcnt == 0) |
|
mutex_destroy(&pp->pr_alloc->pa_lock); |
|
mutex_exit(&pool_allocator_lock); |
|
|
mutex_enter(&pp->pr_lock); |
mutex_enter(&pp->pr_lock); |
|
|
KASSERT(pp->pr_cache == NULL); |
KASSERT(pp->pr_cache == NULL); |
Line 923 pool_destroy(struct pool *pp) |
|
Line 932 pool_destroy(struct pool *pp) |
|
pr_pagelist_free(pp, &pq); |
pr_pagelist_free(pp, &pq); |
|
|
#ifdef POOL_DIAGNOSTIC |
#ifdef POOL_DIAGNOSTIC |
if ((pp->pr_roflags & PR_LOGGING) != 0) |
if (pp->pr_log != NULL) { |
free(pp->pr_log, M_TEMP); |
free(pp->pr_log, M_TEMP); |
|
pp->pr_log = NULL; |
|
} |
#endif |
#endif |
|
|
cv_destroy(&pp->pr_cv); |
cv_destroy(&pp->pr_cv); |
|
|
pool_cache_destroy(pool_cache_t pc) |
pool_cache_destroy(pool_cache_t pc) |
{ |
{ |
struct pool *pp = &pc->pc_pool; |
struct pool *pp = &pc->pc_pool; |
pool_cache_cpu_t *cc; |
u_int i; |
pcg_t *pcg; |
|
int i; |
|
|
|
/* Remove it from the global list. */ |
/* Remove it from the global list. */ |
mutex_enter(&pool_head_lock); |
mutex_enter(&pool_head_lock); |
Line 2149 pool_cache_destroy(pool_cache_t pc) |
|
Line 2158 pool_cache_destroy(pool_cache_t pc) |
|
mutex_exit(&pp->pr_lock); |
mutex_exit(&pp->pr_lock); |
|
|
/* Destroy per-CPU data */ |
/* Destroy per-CPU data */ |
for (i = 0; i < MAXCPUS; i++) { |
for (i = 0; i < MAXCPUS; i++) |
if ((cc = pc->pc_cpus[i]) == NULL) |
pool_cache_invalidate_cpu(pc, i); |
continue; |
|
if ((pcg = cc->cc_current) != &pcg_dummy) { |
|
pcg->pcg_next = NULL; |
|
pool_cache_invalidate_groups(pc, pcg); |
|
} |
|
if ((pcg = cc->cc_previous) != &pcg_dummy) { |
|
pcg->pcg_next = NULL; |
|
pool_cache_invalidate_groups(pc, pcg); |
|
} |
|
if (cc != &pc->pc_cpu0) |
|
pool_put(&cache_cpu_pool, cc); |
|
} |
|
|
|
/* Finally, destroy it. */ |
/* Finally, destroy it. */ |
mutex_destroy(&pc->pc_lock); |
mutex_destroy(&pc->pc_lock); |
Line 2309 pool_cache_invalidate_groups(pool_cache_ |
|
Line 2306 pool_cache_invalidate_groups(pool_cache_ |
|
* |
* |
* Invalidate a pool cache (destruct and release all of the |
* Invalidate a pool cache (destruct and release all of the |
* cached objects). Does not reclaim objects from the pool. |
* cached objects). Does not reclaim objects from the pool. |
|
* |
|
* Note: For pool caches that provide constructed objects, there |
|
* is an assumption that another level of synchronization is occurring |
|
* between the input to the constructor and the cache invalidation. |
*/ |
*/ |
void |
void |
pool_cache_invalidate(pool_cache_t pc) |
pool_cache_invalidate(pool_cache_t pc) |
{ |
{ |
pcg_t *full, *empty, *part; |
pcg_t *full, *empty, *part; |
|
#if 0 |
|
uint64_t where; |
|
|
|
if (ncpu < 2 || !mp_online) { |
|
/* |
|
* We might be called early enough in the boot process |
|
* for the CPU data structures to not be fully initialized. |
|
* In this case, simply gather the local CPU's cache now |
|
* since it will be the only one running. |
|
*/ |
|
pool_cache_xcall(pc); |
|
} else { |
|
/* |
|
* Gather all of the CPU-specific caches into the |
|
* global cache. |
|
*/ |
|
where = xc_broadcast(0, (xcfunc_t)pool_cache_xcall, pc, NULL); |
|
xc_wait(where); |
|
} |
|
#endif |
mutex_enter(&pc->pc_lock); |
mutex_enter(&pc->pc_lock); |
full = pc->pc_fullgroups; |
full = pc->pc_fullgroups; |
empty = pc->pc_emptygroups; |
empty = pc->pc_emptygroups; |
Line 2332 pool_cache_invalidate(pool_cache_t pc) |
|
Line 2352 pool_cache_invalidate(pool_cache_t pc) |
|
pool_cache_invalidate_groups(pc, part); |
pool_cache_invalidate_groups(pc, part); |
} |
} |
|
|
|
/* |
|
* pool_cache_invalidate_cpu: |
|
* |
|
* Invalidate all CPU-bound cached objects in pool cache, the CPU being |
|
* identified by its associated index. |
|
* It is caller's responsibility to ensure that no operation is |
|
* taking place on this pool cache while doing this invalidation. |
|
* WARNING: as no inter-CPU locking is enforced, trying to invalidate |
|
* pool cached objects from a CPU different from the one currently running |
|
* may result in an undefined behaviour. |
|
*/ |
|
static void |
|
pool_cache_invalidate_cpu(pool_cache_t pc, u_int index) |
|
{ |
|
|
|
pool_cache_cpu_t *cc; |
|
pcg_t *pcg; |
|
|
|
if ((cc = pc->pc_cpus[index]) == NULL) |
|
return; |
|
|
|
if ((pcg = cc->cc_current) != &pcg_dummy) { |
|
pcg->pcg_next = NULL; |
|
pool_cache_invalidate_groups(pc, pcg); |
|
} |
|
if ((pcg = cc->cc_previous) != &pcg_dummy) { |
|
pcg->pcg_next = NULL; |
|
pool_cache_invalidate_groups(pc, pcg); |
|
} |
|
if (cc != &pc->pc_cpu0) |
|
pool_put(&cache_cpu_pool, cc); |
|
|
|
} |
|
|
void |
void |
pool_cache_set_drain_hook(pool_cache_t pc, void (*fn)(void *, int), void *arg) |
pool_cache_set_drain_hook(pool_cache_t pc, void (*fn)(void *, int), void *arg) |
{ |
{ |