version 1.128.2.9, 2007/09/10 11:13:17 |
version 1.134, 2007/11/07 00:23:23 |
Line 55 __KERNEL_RCSID(0, "$NetBSD$"); |
|
Line 55 __KERNEL_RCSID(0, "$NetBSD$"); |
|
#include <sys/syslog.h> |
#include <sys/syslog.h> |
#include <sys/debug.h> |
#include <sys/debug.h> |
#include <sys/lockdebug.h> |
#include <sys/lockdebug.h> |
|
#include <sys/xcall.h> |
|
#include <sys/cpu.h> |
|
|
#include <uvm/uvm.h> |
#include <uvm/uvm.h> |
|
|
Line 148 struct pool_item { |
|
Line 150 struct pool_item { |
|
#ifdef DIAGNOSTIC |
#ifdef DIAGNOSTIC |
u_int pi_magic; |
u_int pi_magic; |
#endif |
#endif |
#define PI_MAGIC 0xdeadbeefU |
#define PI_MAGIC 0xdeaddeadU |
/* Other entries use only this list entry */ |
/* Other entries use only this list entry */ |
LIST_ENTRY(pool_item) pi_list; |
LIST_ENTRY(pool_item) pi_list; |
}; |
}; |
Line 191 static pool_cache_cpu_t *pool_cache_get_ |
|
Line 193 static pool_cache_cpu_t *pool_cache_get_ |
|
void **, paddr_t *, int); |
void **, paddr_t *, int); |
static void pool_cache_cpu_init1(struct cpu_info *, pool_cache_t); |
static void pool_cache_cpu_init1(struct cpu_info *, pool_cache_t); |
static void pool_cache_invalidate_groups(pool_cache_t, pcg_t *); |
static void pool_cache_invalidate_groups(pool_cache_t, pcg_t *); |
|
static void pool_cache_xcall(pool_cache_t); |
|
|
static int pool_catchup(struct pool *); |
static int pool_catchup(struct pool *); |
static void pool_prime_page(struct pool *, void *, |
static void pool_prime_page(struct pool *, void *, |
Line 921 pool_alloc_item_header(struct pool *pp, |
|
Line 924 pool_alloc_item_header(struct pool *pp, |
|
} |
} |
|
|
/* |
/* |
* Grab an item from the pool; must be called at appropriate spl level |
* Grab an item from the pool. |
*/ |
*/ |
void * |
void * |
#ifdef POOL_DIAGNOSTIC |
#ifdef POOL_DIAGNOSTIC |
Line 1269 pool_do_put(struct pool *pp, void *v, st |
|
Line 1272 pool_do_put(struct pool *pp, void *v, st |
|
} |
} |
|
|
/* |
/* |
* Return resource to the pool; must be called at appropriate spl level |
* Return resource to the pool. |
*/ |
*/ |
#ifdef POOL_DIAGNOSTIC |
#ifdef POOL_DIAGNOSTIC |
void |
void |
Line 1570 pool_reclaim(struct pool *pp) |
|
Line 1573 pool_reclaim(struct pool *pp) |
|
struct pool_item_header *ph, *phnext; |
struct pool_item_header *ph, *phnext; |
struct pool_pagelist pq; |
struct pool_pagelist pq; |
struct timeval curtime, diff; |
struct timeval curtime, diff; |
|
bool klock; |
|
int rv; |
|
|
if (pp->pr_drain_hook != NULL) { |
if (pp->pr_drain_hook != NULL) { |
/* |
/* |
Line 1578 pool_reclaim(struct pool *pp) |
|
Line 1583 pool_reclaim(struct pool *pp) |
|
(*pp->pr_drain_hook)(pp->pr_drain_hook_arg, PR_NOWAIT); |
(*pp->pr_drain_hook)(pp->pr_drain_hook_arg, PR_NOWAIT); |
} |
} |
|
|
|
/* |
|
* XXXSMP Because mutexes at IPL_SOFTXXX are still spinlocks, |
|
* and we are called from the pagedaemon without kernel_lock. |
|
* Does not apply to IPL_SOFTBIO. |
|
*/ |
|
if (pp->pr_ipl == IPL_SOFTNET || pp->pr_ipl == IPL_SOFTCLOCK || |
|
pp->pr_ipl == IPL_SOFTSERIAL) { |
|
KERNEL_LOCK(1, NULL); |
|
klock = true; |
|
} else |
|
klock = false; |
|
|
/* Reclaim items from the pool's cache (if any). */ |
/* Reclaim items from the pool's cache (if any). */ |
if (pp->pr_cache != NULL) |
if (pp->pr_cache != NULL) |
pool_cache_invalidate(pp->pr_cache); |
pool_cache_invalidate(pp->pr_cache); |
|
|
if (mutex_tryenter(&pp->pr_lock) == 0) |
if (mutex_tryenter(&pp->pr_lock) == 0) { |
|
if (klock) { |
|
KERNEL_UNLOCK_ONE(NULL); |
|
} |
return (0); |
return (0); |
|
} |
pr_enter(pp, file, line); |
pr_enter(pp, file, line); |
|
|
LIST_INIT(&pq); |
LIST_INIT(&pq); |
Line 1616 pool_reclaim(struct pool *pp) |
|
Line 1637 pool_reclaim(struct pool *pp) |
|
|
|
pr_leave(pp); |
pr_leave(pp); |
mutex_exit(&pp->pr_lock); |
mutex_exit(&pp->pr_lock); |
|
|
if (LIST_EMPTY(&pq)) |
if (LIST_EMPTY(&pq)) |
return 0; |
rv = 0; |
|
else { |
|
pr_pagelist_free(pp, &pq); |
|
rv = 1; |
|
} |
|
|
pr_pagelist_free(pp, &pq); |
if (klock) { |
|
KERNEL_UNLOCK_ONE(NULL); |
|
} |
|
|
return (1); |
return (rv); |
} |
} |
|
|
/* |
/* |
* Drain pools, one at a time. |
* Drain pools, one at a time. This is a two stage process; |
|
* drain_start kicks off a cross call to drain CPU-level caches |
|
* if the pool has an associated pool_cache. drain_end waits |
|
* for those cross calls to finish, and then drains the cache |
|
* (if any) and pool. |
* |
* |
* Note, we must never be called from an interrupt context. |
* Note, must never be called from interrupt context. |
*/ |
*/ |
void |
void |
pool_drain(void *arg) |
pool_drain_start(struct pool **ppp, uint64_t *wp) |
{ |
{ |
struct pool *pp; |
struct pool *pp; |
|
|
|
KASSERT(!LIST_EMPTY(&pool_head)); |
|
|
pp = NULL; |
pp = NULL; |
|
|
/* Find next pool to drain, and add a reference. */ |
/* Find next pool to drain, and add a reference. */ |
mutex_enter(&pool_head_lock); |
mutex_enter(&pool_head_lock); |
if (drainpp == NULL) { |
do { |
drainpp = LIST_FIRST(&pool_head); |
if (drainpp == NULL) { |
} |
drainpp = LIST_FIRST(&pool_head); |
if (drainpp != NULL) { |
} |
pp = drainpp; |
if (drainpp != NULL) { |
drainpp = LIST_NEXT(pp, pr_poollist); |
pp = drainpp; |
} |
drainpp = LIST_NEXT(pp, pr_poollist); |
if (pp != NULL) |
} |
pp->pr_refcnt++; |
/* |
|
* Skip completely idle pools. We depend on at least |
|
* one pool in the system being active. |
|
*/ |
|
} while (pp == NULL || pp->pr_npages == 0); |
|
pp->pr_refcnt++; |
mutex_exit(&pool_head_lock); |
mutex_exit(&pool_head_lock); |
|
|
/* If we have a candidate, drain it and unlock. */ |
/* If there is a pool_cache, drain CPU level caches. */ |
if (pp != NULL) { |
*ppp = pp; |
pool_reclaim(pp); |
if (pp->pr_cache != NULL) { |
mutex_enter(&pool_head_lock); |
*wp = xc_broadcast(0, (xcfunc_t)pool_cache_xcall, |
pp->pr_refcnt--; |
pp->pr_cache, NULL); |
cv_broadcast(&pool_busy); |
|
mutex_exit(&pool_head_lock); |
|
} |
} |
} |
} |
|
|
|
void |
|
pool_drain_end(struct pool *pp, uint64_t where) |
|
{ |
|
|
|
if (pp == NULL) |
|
return; |
|
|
|
KASSERT(pp->pr_refcnt > 0); |
|
|
|
/* Wait for remote draining to complete. */ |
|
if (pp->pr_cache != NULL) |
|
xc_wait(where); |
|
|
|
/* Drain the cache (if any) and pool.. */ |
|
pool_reclaim(pp); |
|
|
|
/* Finally, unlock the pool. */ |
|
mutex_enter(&pool_head_lock); |
|
pp->pr_refcnt--; |
|
cv_broadcast(&pool_busy); |
|
mutex_exit(&pool_head_lock); |
|
} |
|
|
/* |
/* |
* Diagnostic helpers. |
* Diagnostic helpers. |
*/ |
*/ |
Line 1974 pool_cache_bootstrap(pool_cache_t pc, si |
|
Line 2034 pool_cache_bootstrap(pool_cache_t pc, si |
|
|
|
mutex_init(&pc->pc_lock, MUTEX_DEFAULT, pp->pr_ipl); |
mutex_init(&pc->pc_lock, MUTEX_DEFAULT, pp->pr_ipl); |
|
|
|
if (ctor == NULL) { |
|
ctor = (int (*)(void *, void *, int))nullop; |
|
} |
|
if (dtor == NULL) { |
|
dtor = (void (*)(void *, void *))nullop; |
|
} |
|
|
pc->pc_emptygroups = NULL; |
pc->pc_emptygroups = NULL; |
pc->pc_fullgroups = NULL; |
pc->pc_fullgroups = NULL; |
|
pc->pc_partgroups = NULL; |
pc->pc_ctor = ctor; |
pc->pc_ctor = ctor; |
pc->pc_dtor = dtor; |
pc->pc_dtor = dtor; |
pc->pc_arg = arg; |
pc->pc_arg = arg; |
pc->pc_hits = 0; |
pc->pc_hits = 0; |
pc->pc_misses = 0; |
pc->pc_misses = 0; |
pc->pc_nempty = 0; |
pc->pc_nempty = 0; |
|
pc->pc_npart = 0; |
pc->pc_nfull = 0; |
pc->pc_nfull = 0; |
pc->pc_contended = 0; |
pc->pc_contended = 0; |
pc->pc_refcnt = 0; |
pc->pc_refcnt = 0; |
|
|
pool_cache_destruct_object(pool_cache_t pc, void *object) |
pool_cache_destruct_object(pool_cache_t pc, void *object) |
{ |
{ |
|
|
if (pc->pc_dtor != NULL) |
(*pc->pc_dtor)(pc->pc_arg, object); |
(*pc->pc_dtor)(pc->pc_arg, object); |
|
pool_put(&pc->pc_pool, object); |
pool_put(&pc->pc_pool, object); |
} |
} |
|
|
Line 2183 pool_cache_invalidate_groups(pool_cache_ |
|
Line 2251 pool_cache_invalidate_groups(pool_cache_ |
|
void |
void |
pool_cache_invalidate(pool_cache_t pc) |
pool_cache_invalidate(pool_cache_t pc) |
{ |
{ |
pcg_t *full, *empty; |
pcg_t *full, *empty, *part; |
|
|
mutex_enter(&pc->pc_lock); |
mutex_enter(&pc->pc_lock); |
full = pc->pc_fullgroups; |
full = pc->pc_fullgroups; |
empty = pc->pc_emptygroups; |
empty = pc->pc_emptygroups; |
|
part = pc->pc_partgroups; |
pc->pc_fullgroups = NULL; |
pc->pc_fullgroups = NULL; |
pc->pc_emptygroups = NULL; |
pc->pc_emptygroups = NULL; |
|
pc->pc_partgroups = NULL; |
pc->pc_nfull = 0; |
pc->pc_nfull = 0; |
pc->pc_nempty = 0; |
pc->pc_nempty = 0; |
|
pc->pc_npart = 0; |
mutex_exit(&pc->pc_lock); |
mutex_exit(&pc->pc_lock); |
|
|
pool_cache_invalidate_groups(pc, full); |
pool_cache_invalidate_groups(pc, full); |
pool_cache_invalidate_groups(pc, empty); |
pool_cache_invalidate_groups(pc, empty); |
|
pool_cache_invalidate_groups(pc, part); |
} |
} |
|
|
void |
void |
Line 2344 pool_cache_get_slow(pool_cache_cpu_t *cc |
|
Line 2416 pool_cache_get_slow(pool_cache_cpu_t *cc |
|
if (object == NULL) |
if (object == NULL) |
return NULL; |
return NULL; |
|
|
if (pc->pc_ctor != NULL) { |
if ((*pc->pc_ctor)(pc->pc_arg, object, flags) != 0) { |
if ((*pc->pc_ctor)(pc->pc_arg, object, flags) != 0) { |
pool_put(&pc->pc_pool, object); |
pool_put(&pc->pc_pool, object); |
*objectp = NULL; |
*objectp = NULL; |
return NULL; |
return NULL; |
|
} |
|
} |
} |
|
|
KASSERT((((vaddr_t)object + pc->pc_pool.pr_itemoffset) & |
KASSERT((((vaddr_t)object + pc->pc_pool.pr_itemoffset) & |
Line 2568 pool_cache_put_paddr(pool_cache_t pc, vo |
|
Line 2638 pool_cache_put_paddr(pool_cache_t pc, vo |
|
} |
} |
|
|
/* |
/* |
|
* pool_cache_xcall: |
|
* |
|
* Transfer objects from the per-CPU cache to the global cache. |
|
* Run within a cross-call thread. |
|
*/ |
|
static void |
|
pool_cache_xcall(pool_cache_t pc) |
|
{ |
|
pool_cache_cpu_t *cc; |
|
pcg_t *prev, *cur, **list; |
|
int s = 0; /* XXXgcc */ |
|
|
|
cc = pool_cache_cpu_enter(pc, &s); |
|
cur = cc->cc_current; |
|
cc->cc_current = NULL; |
|
prev = cc->cc_previous; |
|
cc->cc_previous = NULL; |
|
pool_cache_cpu_exit(cc, &s); |
|
|
|
/* |
|
* XXXSMP Go to splvm to prevent kernel_lock from being taken, |
|
* because locks at IPL_SOFTXXX are still spinlocks. Does not |
|
* apply to IPL_SOFTBIO. Cross-call threads do not take the |
|
* kernel_lock. |
|
*/ |
|
s = splvm(); |
|
mutex_enter(&pc->pc_lock); |
|
if (cur != NULL) { |
|
if (cur->pcg_avail == PCG_NOBJECTS) { |
|
list = &pc->pc_fullgroups; |
|
pc->pc_nfull++; |
|
} else if (cur->pcg_avail == 0) { |
|
list = &pc->pc_emptygroups; |
|
pc->pc_nempty++; |
|
} else { |
|
list = &pc->pc_partgroups; |
|
pc->pc_npart++; |
|
} |
|
cur->pcg_next = *list; |
|
*list = cur; |
|
} |
|
if (prev != NULL) { |
|
if (prev->pcg_avail == PCG_NOBJECTS) { |
|
list = &pc->pc_fullgroups; |
|
pc->pc_nfull++; |
|
} else if (prev->pcg_avail == 0) { |
|
list = &pc->pc_emptygroups; |
|
pc->pc_nempty++; |
|
} else { |
|
list = &pc->pc_partgroups; |
|
pc->pc_npart++; |
|
} |
|
prev->pcg_next = *list; |
|
*list = prev; |
|
} |
|
mutex_exit(&pc->pc_lock); |
|
splx(s); |
|
} |
|
|
|
/* |
* Pool backend allocators. |
* Pool backend allocators. |
* |
* |
* Each pool has a backend allocator that handles allocation, deallocation, |
* Each pool has a backend allocator that handles allocation, deallocation, |