version 1.175, 2009/10/08 21:54:45 |
version 1.181, 2010/01/03 09:42:22 |
Line 104 static struct pool *drainpp; |
|
Line 104 static struct pool *drainpp; |
|
static kmutex_t pool_head_lock; |
static kmutex_t pool_head_lock; |
static kcondvar_t pool_busy; |
static kcondvar_t pool_busy; |
|
|
|
/* This lock protects initialization of a potentially shared pool allocator */ |
|
static kmutex_t pool_allocator_lock; |
|
|
typedef uint32_t pool_item_bitmap_t; |
typedef uint32_t pool_item_bitmap_t; |
#define BITMAP_SIZE (CHAR_BIT * sizeof(pool_item_bitmap_t)) |
#define BITMAP_SIZE (CHAR_BIT * sizeof(pool_item_bitmap_t)) |
#define BITMAP_MASK (BITMAP_SIZE - 1) |
#define BITMAP_MASK (BITMAP_SIZE - 1) |
Line 231 int pool_logsize = POOL_LOGSIZE; |
|
Line 234 int pool_logsize = POOL_LOGSIZE; |
|
static inline void |
static inline void |
pr_log(struct pool *pp, void *v, int action, const char *file, long line) |
pr_log(struct pool *pp, void *v, int action, const char *file, long line) |
{ |
{ |
int n = pp->pr_curlogentry; |
int n; |
struct pool_log *pl; |
struct pool_log *pl; |
|
|
if ((pp->pr_roflags & PR_LOGGING) == 0) |
if ((pp->pr_roflags & PR_LOGGING) == 0) |
return; |
return; |
|
|
|
if (pp->pr_log == NULL) { |
|
if (kmem_map != NULL) |
|
pp->pr_log = malloc( |
|
pool_logsize * sizeof(struct pool_log), |
|
M_TEMP, M_NOWAIT | M_ZERO); |
|
if (pp->pr_log == NULL) |
|
return; |
|
pp->pr_curlogentry = 0; |
|
pp->pr_logsize = pool_logsize; |
|
} |
|
|
/* |
/* |
* Fill in the current entry. Wrap around and overwrite |
* Fill in the current entry. Wrap around and overwrite |
* the oldest entry if necessary. |
* the oldest entry if necessary. |
*/ |
*/ |
|
n = pp->pr_curlogentry; |
pl = &pp->pr_log[n]; |
pl = &pp->pr_log[n]; |
pl->pl_file = file; |
pl->pl_file = file; |
pl->pl_line = line; |
pl->pl_line = line; |
Line 258 pr_printlog(struct pool *pp, struct pool |
|
Line 273 pr_printlog(struct pool *pp, struct pool |
|
int i = pp->pr_logsize; |
int i = pp->pr_logsize; |
int n = pp->pr_curlogentry; |
int n = pp->pr_curlogentry; |
|
|
if ((pp->pr_roflags & PR_LOGGING) == 0) |
if (pp->pr_log == NULL) |
return; |
return; |
|
|
/* |
/* |
Line 590 pool_subsystem_init(void) |
|
Line 605 pool_subsystem_init(void) |
|
struct pool_allocator *pa; |
struct pool_allocator *pa; |
|
|
mutex_init(&pool_head_lock, MUTEX_DEFAULT, IPL_NONE); |
mutex_init(&pool_head_lock, MUTEX_DEFAULT, IPL_NONE); |
|
mutex_init(&pool_allocator_lock, MUTEX_DEFAULT, IPL_NONE); |
cv_init(&pool_busy, "poolbusy"); |
cv_init(&pool_busy, "poolbusy"); |
|
|
while ((pa = SLIST_FIRST(&pa_deferinitq)) != NULL) { |
while ((pa = SLIST_FIRST(&pa_deferinitq)) != NULL) { |
Line 650 pool_init(struct pool *pp, size_t size, |
|
Line 666 pool_init(struct pool *pp, size_t size, |
|
palloc = &pool_allocator_nointr_fullpage; |
palloc = &pool_allocator_nointr_fullpage; |
} |
} |
#endif /* POOL_SUBPAGE */ |
#endif /* POOL_SUBPAGE */ |
if ((palloc->pa_flags & PA_INITIALIZED) == 0) { |
if (!cold) |
|
mutex_enter(&pool_allocator_lock); |
|
if (palloc->pa_refcnt++ == 0) { |
if (palloc->pa_pagesz == 0) |
if (palloc->pa_pagesz == 0) |
palloc->pa_pagesz = PAGE_SIZE; |
palloc->pa_pagesz = PAGE_SIZE; |
|
|
Line 663 pool_init(struct pool *pp, size_t size, |
|
Line 681 pool_init(struct pool *pp, size_t size, |
|
if (palloc->pa_backingmapptr != NULL) { |
if (palloc->pa_backingmapptr != NULL) { |
pa_reclaim_register(palloc); |
pa_reclaim_register(palloc); |
} |
} |
palloc->pa_flags |= PA_INITIALIZED; |
|
} |
} |
|
if (!cold) |
|
mutex_exit(&pool_allocator_lock); |
|
|
if (align == 0) |
if (align == 0) |
align = ALIGN(1); |
align = ALIGN(1); |
Line 787 pool_init(struct pool *pp, size_t size, |
|
Line 806 pool_init(struct pool *pp, size_t size, |
|
pp->pr_nidle = 0; |
pp->pr_nidle = 0; |
pp->pr_refcnt = 0; |
pp->pr_refcnt = 0; |
|
|
#ifdef POOL_DIAGNOSTIC |
pp->pr_log = NULL; |
if (flags & PR_LOGGING) { |
|
if (kmem_map == NULL || |
|
(pp->pr_log = malloc(pool_logsize * sizeof(struct pool_log), |
|
M_TEMP, M_NOWAIT)) == NULL) |
|
pp->pr_roflags &= ~PR_LOGGING; |
|
pp->pr_curlogentry = 0; |
|
pp->pr_logsize = pool_logsize; |
|
} |
|
#endif |
|
|
|
pp->pr_entered_file = NULL; |
pp->pr_entered_file = NULL; |
pp->pr_entered_line = 0; |
pp->pr_entered_line = 0; |
Line 845 pool_init(struct pool *pp, size_t size, |
|
Line 855 pool_init(struct pool *pp, size_t size, |
|
} |
} |
|
|
/* Insert into the list of all pools. */ |
/* Insert into the list of all pools. */ |
if (__predict_true(!cold)) |
if (!cold) |
mutex_enter(&pool_head_lock); |
mutex_enter(&pool_head_lock); |
TAILQ_FOREACH(pp1, &pool_head, pr_poollist) { |
TAILQ_FOREACH(pp1, &pool_head, pr_poollist) { |
if (strcmp(pp1->pr_wchan, pp->pr_wchan) > 0) |
if (strcmp(pp1->pr_wchan, pp->pr_wchan) > 0) |
Line 855 pool_init(struct pool *pp, size_t size, |
|
Line 865 pool_init(struct pool *pp, size_t size, |
|
TAILQ_INSERT_TAIL(&pool_head, pp, pr_poollist); |
TAILQ_INSERT_TAIL(&pool_head, pp, pr_poollist); |
else |
else |
TAILQ_INSERT_BEFORE(pp1, pp, pr_poollist); |
TAILQ_INSERT_BEFORE(pp1, pp, pr_poollist); |
if (__predict_true(!cold)) |
if (!cold) |
mutex_exit(&pool_head_lock); |
mutex_exit(&pool_head_lock); |
|
|
/* Insert this into the list of pools using this allocator. */ |
/* Insert this into the list of pools using this allocator. */ |
if (__predict_true(!cold)) |
if (!cold) |
mutex_enter(&palloc->pa_lock); |
mutex_enter(&palloc->pa_lock); |
TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list); |
TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list); |
if (__predict_true(!cold)) |
if (!cold) |
mutex_exit(&palloc->pa_lock); |
mutex_exit(&palloc->pa_lock); |
|
|
pool_reclaim_register(pp); |
pool_reclaim_register(pp); |
Line 892 pool_destroy(struct pool *pp) |
|
Line 902 pool_destroy(struct pool *pp) |
|
TAILQ_REMOVE(&pp->pr_alloc->pa_list, pp, pr_alloc_list); |
TAILQ_REMOVE(&pp->pr_alloc->pa_list, pp, pr_alloc_list); |
mutex_exit(&pp->pr_alloc->pa_lock); |
mutex_exit(&pp->pr_alloc->pa_lock); |
|
|
|
mutex_enter(&pool_allocator_lock); |
|
if (--pp->pr_alloc->pa_refcnt == 0) |
|
mutex_destroy(&pp->pr_alloc->pa_lock); |
|
mutex_exit(&pool_allocator_lock); |
|
|
mutex_enter(&pp->pr_lock); |
mutex_enter(&pp->pr_lock); |
|
|
KASSERT(pp->pr_cache == NULL); |
KASSERT(pp->pr_cache == NULL); |
Line 917 pool_destroy(struct pool *pp) |
|
Line 932 pool_destroy(struct pool *pp) |
|
pr_pagelist_free(pp, &pq); |
pr_pagelist_free(pp, &pq); |
|
|
#ifdef POOL_DIAGNOSTIC |
#ifdef POOL_DIAGNOSTIC |
if ((pp->pr_roflags & PR_LOGGING) != 0) |
if (pp->pr_log != NULL) { |
free(pp->pr_log, M_TEMP); |
free(pp->pr_log, M_TEMP); |
|
pp->pr_log = NULL; |
|
} |
#endif |
#endif |
|
|
cv_destroy(&pp->pr_cv); |
cv_destroy(&pp->pr_cv); |
Line 2289 pool_cache_invalidate_groups(pool_cache_ |
|
Line 2306 pool_cache_invalidate_groups(pool_cache_ |
|
* |
* |
* Invalidate a pool cache (destruct and release all of the |
* Invalidate a pool cache (destruct and release all of the |
* cached objects). Does not reclaim objects from the pool. |
* cached objects). Does not reclaim objects from the pool. |
|
* |
|
* Note: For pool caches that provide constructed objects, there |
|
* is an assumption that another level of synchronization is occurring |
|
* between the input to the constructor and the cache invalidation. |
*/ |
*/ |
void |
void |
pool_cache_invalidate(pool_cache_t pc) |
pool_cache_invalidate(pool_cache_t pc) |
{ |
{ |
pcg_t *full, *empty, *part; |
pcg_t *full, *empty, *part; |
|
uint64_t where; |
|
|
|
if (ncpu < 2 || !mp_online) { |
|
/* |
|
* We might be called early enough in the boot process |
|
* for the CPU data structures to not be fully initialized. |
|
* In this case, simply gather the local CPU's cache now |
|
* since it will be the only one running. |
|
*/ |
|
pool_cache_xcall(pc); |
|
} else { |
|
/* |
|
* Gather all of the CPU-specific caches into the |
|
* global cache. |
|
*/ |
|
where = xc_broadcast(0, (xcfunc_t)pool_cache_xcall, pc, NULL); |
|
xc_wait(where); |
|
} |
|
|
mutex_enter(&pc->pc_lock); |
mutex_enter(&pc->pc_lock); |
full = pc->pc_fullgroups; |
full = pc->pc_fullgroups; |
Line 2313 pool_cache_invalidate(pool_cache_t pc) |
|
Line 2352 pool_cache_invalidate(pool_cache_t pc) |
|
} |
} |
|
|
/* |
/* |
* pool_cache_invalidate_local: |
|
* |
|
* Invalidate all local ('current CPU') cached objects in |
|
* pool cache. |
|
* It is caller's responsibility to ensure that no operation is |
|
* taking place on this pool cache while doing the local invalidation. |
|
*/ |
|
void |
|
pool_cache_invalidate_local(pool_cache_t pc) |
|
{ |
|
pool_cache_invalidate_cpu(pc, curcpu()->ci_index); |
|
} |
|
|
|
/* |
|
* pool_cache_invalidate_cpu: |
* pool_cache_invalidate_cpu: |
* |
* |
* Invalidate all CPU-bound cached objects in pool cache, the CPU being |
* Invalidate all CPU-bound cached objects in pool cache, the CPU being |