version 1.128.2.10, 2007/09/25 01:36:19 |
version 1.133.4.1, 2007/11/19 00:48:50 |
Line 46 __KERNEL_RCSID(0, "$NetBSD$"); |
|
Line 46 __KERNEL_RCSID(0, "$NetBSD$"); |
|
|
|
#include <sys/param.h> |
#include <sys/param.h> |
#include <sys/systm.h> |
#include <sys/systm.h> |
|
#include <sys/bitops.h> |
#include <sys/proc.h> |
#include <sys/proc.h> |
#include <sys/errno.h> |
#include <sys/errno.h> |
#include <sys/kernel.h> |
#include <sys/kernel.h> |
Line 55 __KERNEL_RCSID(0, "$NetBSD$"); |
|
Line 56 __KERNEL_RCSID(0, "$NetBSD$"); |
|
#include <sys/syslog.h> |
#include <sys/syslog.h> |
#include <sys/debug.h> |
#include <sys/debug.h> |
#include <sys/lockdebug.h> |
#include <sys/lockdebug.h> |
|
#include <sys/xcall.h> |
|
#include <sys/cpu.h> |
|
|
#include <uvm/uvm.h> |
#include <uvm/uvm.h> |
|
|
Line 81 LIST_HEAD(,pool_cache) pool_cache_head = |
|
Line 84 LIST_HEAD(,pool_cache) pool_cache_head = |
|
/* Private pool for page header structures */ |
/* Private pool for page header structures */ |
#define PHPOOL_MAX 8 |
#define PHPOOL_MAX 8 |
static struct pool phpool[PHPOOL_MAX]; |
static struct pool phpool[PHPOOL_MAX]; |
#define PHPOOL_FREELIST_NELEM(idx) (((idx) == 0) ? 0 : (1 << (idx))) |
#define PHPOOL_FREELIST_NELEM(idx) \ |
|
(((idx) == 0) ? 0 : BITMAP_SIZE * (1 << (idx))) |
|
|
#ifdef POOL_SUBPAGE |
#ifdef POOL_SUBPAGE |
/* Pool of subpages for use by normal pools. */ |
/* Pool of subpages for use by normal pools. */ |
Line 110 static struct pool *drainpp; |
|
Line 114 static struct pool *drainpp; |
|
static kmutex_t pool_head_lock; |
static kmutex_t pool_head_lock; |
static kcondvar_t pool_busy; |
static kcondvar_t pool_busy; |
|
|
typedef uint8_t pool_item_freelist_t; |
typedef uint32_t pool_item_bitmap_t; |
|
#define BITMAP_SIZE (CHAR_BIT * sizeof(pool_item_bitmap_t)) |
|
#define BITMAP_MASK (BITMAP_SIZE - 1) |
|
|
struct pool_item_header { |
struct pool_item_header { |
/* Page headers */ |
/* Page headers */ |
Line 120 struct pool_item_header { |
|
Line 126 struct pool_item_header { |
|
ph_node; /* Off-page page headers */ |
ph_node; /* Off-page page headers */ |
void * ph_page; /* this page's address */ |
void * ph_page; /* this page's address */ |
struct timeval ph_time; /* last referenced */ |
struct timeval ph_time; /* last referenced */ |
|
uint16_t ph_nmissing; /* # of chunks in use */ |
union { |
union { |
/* !PR_NOTOUCH */ |
/* !PR_NOTOUCH */ |
struct { |
struct { |
Line 128 struct pool_item_header { |
|
Line 135 struct pool_item_header { |
|
} phu_normal; |
} phu_normal; |
/* PR_NOTOUCH */ |
/* PR_NOTOUCH */ |
struct { |
struct { |
uint16_t |
uint16_t phu_off; /* start offset in page */ |
phu_off; /* start offset in page */ |
pool_item_bitmap_t phu_bitmap[]; |
pool_item_freelist_t |
|
phu_firstfree; /* first free item */ |
|
/* |
|
* XXX it might be better to use |
|
* a simple bitmap and ffs(3) |
|
*/ |
|
} phu_notouch; |
} phu_notouch; |
} ph_u; |
} ph_u; |
uint16_t ph_nmissing; /* # of chunks in use */ |
|
}; |
}; |
#define ph_itemlist ph_u.phu_normal.phu_itemlist |
#define ph_itemlist ph_u.phu_normal.phu_itemlist |
#define ph_off ph_u.phu_notouch.phu_off |
#define ph_off ph_u.phu_notouch.phu_off |
#define ph_firstfree ph_u.phu_notouch.phu_firstfree |
#define ph_bitmap ph_u.phu_notouch.phu_bitmap |
|
|
struct pool_item { |
struct pool_item { |
#ifdef DIAGNOSTIC |
#ifdef DIAGNOSTIC |
u_int pi_magic; |
u_int pi_magic; |
#endif |
#endif |
#define PI_MAGIC 0xdeadbeefU |
#define PI_MAGIC 0xdeaddeadU |
/* Other entries use only this list entry */ |
/* Other entries use only this list entry */ |
LIST_ENTRY(pool_item) pi_list; |
LIST_ENTRY(pool_item) pi_list; |
}; |
}; |
Line 191 static pool_cache_cpu_t *pool_cache_get_ |
|
Line 191 static pool_cache_cpu_t *pool_cache_get_ |
|
void **, paddr_t *, int); |
void **, paddr_t *, int); |
static void pool_cache_cpu_init1(struct cpu_info *, pool_cache_t); |
static void pool_cache_cpu_init1(struct cpu_info *, pool_cache_t); |
static void pool_cache_invalidate_groups(pool_cache_t, pcg_t *); |
static void pool_cache_invalidate_groups(pool_cache_t, pcg_t *); |
|
static void pool_cache_xcall(pool_cache_t); |
|
|
static int pool_catchup(struct pool *); |
static int pool_catchup(struct pool *); |
static void pool_prime_page(struct pool *, void *, |
static void pool_prime_page(struct pool *, void *, |
Line 327 pr_enter_check(struct pool *pp, void (*p |
|
Line 328 pr_enter_check(struct pool *pp, void (*p |
|
#define pr_enter_check(pp, pr) |
#define pr_enter_check(pp, pr) |
#endif /* POOL_DIAGNOSTIC */ |
#endif /* POOL_DIAGNOSTIC */ |
|
|
static inline int |
static inline unsigned int |
pr_item_notouch_index(const struct pool *pp, const struct pool_item_header *ph, |
pr_item_notouch_index(const struct pool *pp, const struct pool_item_header *ph, |
const void *v) |
const void *v) |
{ |
{ |
const char *cp = v; |
const char *cp = v; |
int idx; |
unsigned int idx; |
|
|
KASSERT(pp->pr_roflags & PR_NOTOUCH); |
KASSERT(pp->pr_roflags & PR_NOTOUCH); |
idx = (cp - (char *)ph->ph_page - ph->ph_off) / pp->pr_size; |
idx = (cp - (char *)ph->ph_page - ph->ph_off) / pp->pr_size; |
Line 340 pr_item_notouch_index(const struct pool |
|
Line 341 pr_item_notouch_index(const struct pool |
|
return idx; |
return idx; |
} |
} |
|
|
#define PR_FREELIST_ALIGN(p) \ |
|
roundup((uintptr_t)(p), sizeof(pool_item_freelist_t)) |
|
#define PR_FREELIST(ph) ((pool_item_freelist_t *)PR_FREELIST_ALIGN((ph) + 1)) |
|
#define PR_INDEX_USED ((pool_item_freelist_t)-1) |
|
#define PR_INDEX_EOL ((pool_item_freelist_t)-2) |
|
|
|
static inline void |
static inline void |
pr_item_notouch_put(const struct pool *pp, struct pool_item_header *ph, |
pr_item_notouch_put(const struct pool *pp, struct pool_item_header *ph, |
void *obj) |
void *obj) |
{ |
{ |
int idx = pr_item_notouch_index(pp, ph, obj); |
unsigned int idx = pr_item_notouch_index(pp, ph, obj); |
pool_item_freelist_t *freelist = PR_FREELIST(ph); |
pool_item_bitmap_t *bitmap = ph->ph_bitmap + (idx / BITMAP_SIZE); |
|
pool_item_bitmap_t mask = 1 << (idx & BITMAP_MASK); |
|
|
KASSERT(freelist[idx] == PR_INDEX_USED); |
KASSERT((*bitmap & mask) == 0); |
freelist[idx] = ph->ph_firstfree; |
*bitmap |= mask; |
ph->ph_firstfree = idx; |
|
} |
} |
|
|
static inline void * |
static inline void * |
pr_item_notouch_get(const struct pool *pp, struct pool_item_header *ph) |
pr_item_notouch_get(const struct pool *pp, struct pool_item_header *ph) |
{ |
{ |
int idx = ph->ph_firstfree; |
pool_item_bitmap_t *bitmap = ph->ph_bitmap; |
pool_item_freelist_t *freelist = PR_FREELIST(ph); |
unsigned int idx; |
|
int i; |
|
|
KASSERT(freelist[idx] != PR_INDEX_USED); |
for (i = 0; ; i++) { |
ph->ph_firstfree = freelist[idx]; |
int bit; |
freelist[idx] = PR_INDEX_USED; |
|
|
|
|
KASSERT((i * BITMAP_SIZE) < pp->pr_itemsperpage); |
|
bit = ffs32(bitmap[i]); |
|
if (bit) { |
|
pool_item_bitmap_t mask; |
|
|
|
bit--; |
|
idx = (i * BITMAP_SIZE) + bit; |
|
mask = 1 << bit; |
|
KASSERT((bitmap[i] & mask) != 0); |
|
bitmap[i] &= ~mask; |
|
break; |
|
} |
|
} |
|
KASSERT(idx < pp->pr_itemsperpage); |
return (char *)ph->ph_page + ph->ph_off + idx * pp->pr_size; |
return (char *)ph->ph_page + ph->ph_off + idx * pp->pr_size; |
} |
} |
|
|
|
static inline void |
|
pr_item_notouch_init(const struct pool *pp, struct pool_item_header *ph) |
|
{ |
|
pool_item_bitmap_t *bitmap = ph->ph_bitmap; |
|
const int n = howmany(pp->pr_itemsperpage, BITMAP_SIZE); |
|
int i; |
|
|
|
for (i = 0; i < n; i++) { |
|
bitmap[i] = (pool_item_bitmap_t)-1; |
|
} |
|
} |
|
|
static inline int |
static inline int |
phtree_compare(struct pool_item_header *a, struct pool_item_header *b) |
phtree_compare(struct pool_item_header *a, struct pool_item_header *b) |
{ |
{ |
Line 600 pool_init(struct pool *pp, size_t size, |
|
Line 621 pool_init(struct pool *pp, size_t size, |
|
size_t trysize, phsize; |
size_t trysize, phsize; |
int off, slack; |
int off, slack; |
|
|
KASSERT((1UL << (CHAR_BIT * sizeof(pool_item_freelist_t))) - 2 >= |
|
PHPOOL_FREELIST_NELEM(PHPOOL_MAX - 1)); |
|
|
|
#ifdef DEBUG |
#ifdef DEBUG |
/* |
/* |
* Check that the pool hasn't already been initialised and |
* Check that the pool hasn't already been initialised and |
Line 805 pool_init(struct pool *pp, size_t size, |
|
Line 823 pool_init(struct pool *pp, size_t size, |
|
"phpool-%d", nelem); |
"phpool-%d", nelem); |
sz = sizeof(struct pool_item_header); |
sz = sizeof(struct pool_item_header); |
if (nelem) { |
if (nelem) { |
sz = PR_FREELIST_ALIGN(sz) |
sz = offsetof(struct pool_item_header, |
+ nelem * sizeof(pool_item_freelist_t); |
ph_bitmap[howmany(nelem, BITMAP_SIZE)]); |
} |
} |
pool_init(&phpool[idx], sz, 0, 0, 0, |
pool_init(&phpool[idx], sz, 0, 0, 0, |
phpool_names[idx], &pool_allocator_meta, IPL_VM); |
phpool_names[idx], &pool_allocator_meta, IPL_VM); |
Line 921 pool_alloc_item_header(struct pool *pp, |
|
Line 939 pool_alloc_item_header(struct pool *pp, |
|
} |
} |
|
|
/* |
/* |
* Grab an item from the pool; must be called at appropriate spl level |
* Grab an item from the pool. |
*/ |
*/ |
void * |
void * |
#ifdef POOL_DIAGNOSTIC |
#ifdef POOL_DIAGNOSTIC |
Line 1269 pool_do_put(struct pool *pp, void *v, st |
|
Line 1287 pool_do_put(struct pool *pp, void *v, st |
|
} |
} |
|
|
/* |
/* |
* Return resource to the pool; must be called at appropriate spl level |
* Return resource to the pool. |
*/ |
*/ |
#ifdef POOL_DIAGNOSTIC |
#ifdef POOL_DIAGNOSTIC |
void |
void |
Line 1430 pool_prime_page(struct pool *pp, void *s |
|
Line 1448 pool_prime_page(struct pool *pp, void *s |
|
pp->pr_nitems += n; |
pp->pr_nitems += n; |
|
|
if (pp->pr_roflags & PR_NOTOUCH) { |
if (pp->pr_roflags & PR_NOTOUCH) { |
pool_item_freelist_t *freelist = PR_FREELIST(ph); |
pr_item_notouch_init(pp, ph); |
int i; |
|
|
|
ph->ph_off = (char *)cp - (char *)storage; |
|
ph->ph_firstfree = 0; |
|
for (i = 0; i < n - 1; i++) |
|
freelist[i] = i + 1; |
|
freelist[n - 1] = PR_INDEX_EOL; |
|
} else { |
} else { |
while (n--) { |
while (n--) { |
pi = (struct pool_item *)cp; |
pi = (struct pool_item *)cp; |
Line 1570 pool_reclaim(struct pool *pp) |
|
Line 1581 pool_reclaim(struct pool *pp) |
|
struct pool_item_header *ph, *phnext; |
struct pool_item_header *ph, *phnext; |
struct pool_pagelist pq; |
struct pool_pagelist pq; |
struct timeval curtime, diff; |
struct timeval curtime, diff; |
|
bool klock; |
|
int rv; |
|
|
if (pp->pr_drain_hook != NULL) { |
if (pp->pr_drain_hook != NULL) { |
/* |
/* |
Line 1578 pool_reclaim(struct pool *pp) |
|
Line 1591 pool_reclaim(struct pool *pp) |
|
(*pp->pr_drain_hook)(pp->pr_drain_hook_arg, PR_NOWAIT); |
(*pp->pr_drain_hook)(pp->pr_drain_hook_arg, PR_NOWAIT); |
} |
} |
|
|
|
/* |
|
* XXXSMP Because mutexes at IPL_SOFTXXX are still spinlocks, |
|
* and we are called from the pagedaemon without kernel_lock. |
|
* Does not apply to IPL_SOFTBIO. |
|
*/ |
|
if (pp->pr_ipl == IPL_SOFTNET || pp->pr_ipl == IPL_SOFTCLOCK || |
|
pp->pr_ipl == IPL_SOFTSERIAL) { |
|
KERNEL_LOCK(1, NULL); |
|
klock = true; |
|
} else |
|
klock = false; |
|
|
/* Reclaim items from the pool's cache (if any). */ |
/* Reclaim items from the pool's cache (if any). */ |
if (pp->pr_cache != NULL) |
if (pp->pr_cache != NULL) |
pool_cache_invalidate(pp->pr_cache); |
pool_cache_invalidate(pp->pr_cache); |
|
|
if (mutex_tryenter(&pp->pr_lock) == 0) |
if (mutex_tryenter(&pp->pr_lock) == 0) { |
|
if (klock) { |
|
KERNEL_UNLOCK_ONE(NULL); |
|
} |
return (0); |
return (0); |
|
} |
pr_enter(pp, file, line); |
pr_enter(pp, file, line); |
|
|
LIST_INIT(&pq); |
LIST_INIT(&pq); |
Line 1616 pool_reclaim(struct pool *pp) |
|
Line 1645 pool_reclaim(struct pool *pp) |
|
|
|
pr_leave(pp); |
pr_leave(pp); |
mutex_exit(&pp->pr_lock); |
mutex_exit(&pp->pr_lock); |
|
|
if (LIST_EMPTY(&pq)) |
if (LIST_EMPTY(&pq)) |
return 0; |
rv = 0; |
|
else { |
|
pr_pagelist_free(pp, &pq); |
|
rv = 1; |
|
} |
|
|
pr_pagelist_free(pp, &pq); |
if (klock) { |
|
KERNEL_UNLOCK_ONE(NULL); |
|
} |
|
|
return (1); |
return (rv); |
} |
} |
|
|
/* |
/* |
* Drain pools, one at a time. |
* Drain pools, one at a time. This is a two stage process; |
|
* drain_start kicks off a cross call to drain CPU-level caches |
|
* if the pool has an associated pool_cache. drain_end waits |
|
* for those cross calls to finish, and then drains the cache |
|
* (if any) and pool. |
* |
* |
* Note, we must never be called from an interrupt context. |
* Note, must never be called from interrupt context. |
*/ |
*/ |
void |
void |
pool_drain(void *arg) |
pool_drain_start(struct pool **ppp, uint64_t *wp) |
{ |
{ |
struct pool *pp; |
struct pool *pp; |
|
|
|
KASSERT(!LIST_EMPTY(&pool_head)); |
|
|
pp = NULL; |
pp = NULL; |
|
|
/* Find next pool to drain, and add a reference. */ |
/* Find next pool to drain, and add a reference. */ |
mutex_enter(&pool_head_lock); |
mutex_enter(&pool_head_lock); |
if (drainpp == NULL) { |
do { |
drainpp = LIST_FIRST(&pool_head); |
if (drainpp == NULL) { |
} |
drainpp = LIST_FIRST(&pool_head); |
if (drainpp != NULL) { |
} |
pp = drainpp; |
if (drainpp != NULL) { |
drainpp = LIST_NEXT(pp, pr_poollist); |
pp = drainpp; |
} |
drainpp = LIST_NEXT(pp, pr_poollist); |
if (pp != NULL) |
} |
pp->pr_refcnt++; |
/* |
|
* Skip completely idle pools. We depend on at least |
|
* one pool in the system being active. |
|
*/ |
|
} while (pp == NULL || pp->pr_npages == 0); |
|
pp->pr_refcnt++; |
mutex_exit(&pool_head_lock); |
mutex_exit(&pool_head_lock); |
|
|
/* If we have a candidate, drain it and unlock. */ |
/* If there is a pool_cache, drain CPU level caches. */ |
if (pp != NULL) { |
*ppp = pp; |
pool_reclaim(pp); |
if (pp->pr_cache != NULL) { |
mutex_enter(&pool_head_lock); |
*wp = xc_broadcast(0, (xcfunc_t)pool_cache_xcall, |
pp->pr_refcnt--; |
pp->pr_cache, NULL); |
cv_broadcast(&pool_busy); |
|
mutex_exit(&pool_head_lock); |
|
} |
} |
} |
} |
|
|
|
void |
|
pool_drain_end(struct pool *pp, uint64_t where) |
|
{ |
|
|
|
if (pp == NULL) |
|
return; |
|
|
|
KASSERT(pp->pr_refcnt > 0); |
|
|
|
/* Wait for remote draining to complete. */ |
|
if (pp->pr_cache != NULL) |
|
xc_wait(where); |
|
|
|
/* Drain the cache (if any) and pool.. */ |
|
pool_reclaim(pp); |
|
|
|
/* Finally, unlock the pool. */ |
|
mutex_enter(&pool_head_lock); |
|
pp->pr_refcnt--; |
|
cv_broadcast(&pool_busy); |
|
mutex_exit(&pool_head_lock); |
|
} |
|
|
/* |
/* |
* Diagnostic helpers. |
* Diagnostic helpers. |
*/ |
*/ |
Line 1983 pool_cache_bootstrap(pool_cache_t pc, si |
|
Line 2051 pool_cache_bootstrap(pool_cache_t pc, si |
|
|
|
pc->pc_emptygroups = NULL; |
pc->pc_emptygroups = NULL; |
pc->pc_fullgroups = NULL; |
pc->pc_fullgroups = NULL; |
|
pc->pc_partgroups = NULL; |
pc->pc_ctor = ctor; |
pc->pc_ctor = ctor; |
pc->pc_dtor = dtor; |
pc->pc_dtor = dtor; |
pc->pc_arg = arg; |
pc->pc_arg = arg; |
pc->pc_hits = 0; |
pc->pc_hits = 0; |
pc->pc_misses = 0; |
pc->pc_misses = 0; |
pc->pc_nempty = 0; |
pc->pc_nempty = 0; |
|
pc->pc_npart = 0; |
pc->pc_nfull = 0; |
pc->pc_nfull = 0; |
pc->pc_contended = 0; |
pc->pc_contended = 0; |
pc->pc_refcnt = 0; |
pc->pc_refcnt = 0; |
|
pc->pc_freecheck = NULL; |
|
|
/* Allocate per-CPU caches. */ |
/* Allocate per-CPU caches. */ |
memset(pc->pc_cpus, 0, sizeof(pc->pc_cpus)); |
memset(pc->pc_cpus, 0, sizeof(pc->pc_cpus)); |
pc->pc_ncpu = 0; |
pc->pc_ncpu = 0; |
for (CPU_INFO_FOREACH(cii, ci)) { |
if (ncpu == 0) { |
pool_cache_cpu_init1(ci, pc); |
/* XXX For sparc: boot CPU is not attached yet. */ |
|
pool_cache_cpu_init1(curcpu(), pc); |
|
} else { |
|
for (CPU_INFO_FOREACH(cii, ci)) { |
|
pool_cache_cpu_init1(ci, pc); |
|
} |
} |
} |
|
|
if (__predict_true(!cold)) { |
if (__predict_true(!cold)) { |
|
|
pool_cache_cpu_init1(struct cpu_info *ci, pool_cache_t pc) |
pool_cache_cpu_init1(struct cpu_info *ci, pool_cache_t pc) |
{ |
{ |
pool_cache_cpu_t *cc; |
pool_cache_cpu_t *cc; |
|
int index; |
|
|
|
index = ci->ci_index; |
|
|
|
KASSERT(index < MAXCPUS); |
KASSERT(((uintptr_t)pc->pc_cpus & (CACHE_LINE_SIZE - 1)) == 0); |
KASSERT(((uintptr_t)pc->pc_cpus & (CACHE_LINE_SIZE - 1)) == 0); |
|
|
if ((cc = pc->pc_cpus[ci->ci_index]) != NULL) { |
if ((cc = pc->pc_cpus[index]) != NULL) { |
KASSERT(cc->cc_cpu = ci); |
KASSERT(cc->cc_cpuindex == index); |
return; |
return; |
} |
} |
|
|
Line 2097 pool_cache_cpu_init1(struct cpu_info *ci |
|
Line 2177 pool_cache_cpu_init1(struct cpu_info *ci |
|
cc->cc_ipl = pc->pc_pool.pr_ipl; |
cc->cc_ipl = pc->pc_pool.pr_ipl; |
cc->cc_iplcookie = makeiplcookie(cc->cc_ipl); |
cc->cc_iplcookie = makeiplcookie(cc->cc_ipl); |
cc->cc_cache = pc; |
cc->cc_cache = pc; |
cc->cc_cpu = ci; |
cc->cc_cpuindex = index; |
cc->cc_hits = 0; |
cc->cc_hits = 0; |
cc->cc_misses = 0; |
cc->cc_misses = 0; |
cc->cc_current = NULL; |
cc->cc_current = NULL; |
cc->cc_previous = NULL; |
cc->cc_previous = NULL; |
|
|
pc->pc_cpus[ci->ci_index] = cc; |
pc->pc_cpus[index] = cc; |
} |
} |
|
|
/* |
/* |
Line 2142 pool_cache_reclaim(pool_cache_t pc) |
|
Line 2222 pool_cache_reclaim(pool_cache_t pc) |
|
return pool_reclaim(&pc->pc_pool); |
return pool_reclaim(&pc->pc_pool); |
} |
} |
|
|
|
static void |
|
pool_cache_destruct_object1(pool_cache_t pc, void *object) |
|
{ |
|
|
|
(*pc->pc_dtor)(pc->pc_arg, object); |
|
pool_put(&pc->pc_pool, object); |
|
} |
|
|
/* |
/* |
* pool_cache_destruct_object: |
* pool_cache_destruct_object: |
* |
* |
|
|
pool_cache_destruct_object(pool_cache_t pc, void *object) |
pool_cache_destruct_object(pool_cache_t pc, void *object) |
{ |
{ |
|
|
(*pc->pc_dtor)(pc->pc_arg, object); |
FREECHECK_IN(&pc->pc_freecheck, object); |
pool_put(&pc->pc_pool, object); |
|
|
pool_cache_destruct_object1(pc, object); |
} |
} |
|
|
/* |
/* |
Line 2173 pool_cache_invalidate_groups(pool_cache_ |
|
Line 2262 pool_cache_invalidate_groups(pool_cache_ |
|
|
|
for (i = 0; i < pcg->pcg_avail; i++) { |
for (i = 0; i < pcg->pcg_avail; i++) { |
object = pcg->pcg_objects[i].pcgo_va; |
object = pcg->pcg_objects[i].pcgo_va; |
pool_cache_destruct_object(pc, object); |
pool_cache_destruct_object1(pc, object); |
} |
} |
|
|
pool_put(&pcgpool, pcg); |
pool_put(&pcgpool, pcg); |
Line 2189 pool_cache_invalidate_groups(pool_cache_ |
|
Line 2278 pool_cache_invalidate_groups(pool_cache_ |
|
void |
void |
pool_cache_invalidate(pool_cache_t pc) |
pool_cache_invalidate(pool_cache_t pc) |
{ |
{ |
pcg_t *full, *empty; |
pcg_t *full, *empty, *part; |
|
|
mutex_enter(&pc->pc_lock); |
mutex_enter(&pc->pc_lock); |
full = pc->pc_fullgroups; |
full = pc->pc_fullgroups; |
empty = pc->pc_emptygroups; |
empty = pc->pc_emptygroups; |
|
part = pc->pc_partgroups; |
pc->pc_fullgroups = NULL; |
pc->pc_fullgroups = NULL; |
pc->pc_emptygroups = NULL; |
pc->pc_emptygroups = NULL; |
|
pc->pc_partgroups = NULL; |
pc->pc_nfull = 0; |
pc->pc_nfull = 0; |
pc->pc_nempty = 0; |
pc->pc_nempty = 0; |
|
pc->pc_npart = 0; |
mutex_exit(&pc->pc_lock); |
mutex_exit(&pc->pc_lock); |
|
|
pool_cache_invalidate_groups(pc, full); |
pool_cache_invalidate_groups(pc, full); |
pool_cache_invalidate_groups(pc, empty); |
pool_cache_invalidate_groups(pc, empty); |
|
pool_cache_invalidate_groups(pc, part); |
} |
} |
|
|
void |
void |
Line 2236 static inline pool_cache_cpu_t * |
|
Line 2329 static inline pool_cache_cpu_t * |
|
pool_cache_cpu_enter(pool_cache_t pc, int *s) |
pool_cache_cpu_enter(pool_cache_t pc, int *s) |
{ |
{ |
pool_cache_cpu_t *cc; |
pool_cache_cpu_t *cc; |
struct cpu_info *ci; |
|
|
|
/* |
/* |
* Prevent other users of the cache from accessing our |
* Prevent other users of the cache from accessing our |
* CPU-local data. To avoid touching shared state, we |
* CPU-local data. To avoid touching shared state, we |
* pull the neccessary information from CPU local data. |
* pull the neccessary information from CPU local data. |
*/ |
*/ |
ci = curcpu(); |
crit_enter(); |
KASSERT(ci->ci_data.cpu_index < MAXCPUS); |
cc = pc->pc_cpus[curcpu()->ci_index]; |
cc = pc->pc_cpus[ci->ci_data.cpu_index]; |
|
KASSERT(cc->cc_cache == pc); |
KASSERT(cc->cc_cache == pc); |
if (cc->cc_ipl == IPL_NONE) { |
if (cc->cc_ipl != IPL_NONE) { |
crit_enter(); |
|
} else { |
|
*s = splraiseipl(cc->cc_iplcookie); |
*s = splraiseipl(cc->cc_iplcookie); |
} |
} |
|
|
/* Moved to another CPU before disabling preemption? */ |
|
if (__predict_false(ci != curcpu())) { |
|
ci = curcpu(); |
|
cc = pc->pc_cpus[ci->ci_data.cpu_index]; |
|
} |
|
|
|
#ifdef DIAGNOSTIC |
|
KASSERT(cc->cc_cpu == ci); |
|
KASSERT(((uintptr_t)cc & (CACHE_LINE_SIZE - 1)) == 0); |
KASSERT(((uintptr_t)cc & (CACHE_LINE_SIZE - 1)) == 0); |
#endif |
|
|
|
return cc; |
return cc; |
} |
} |
Line 2272 pool_cache_cpu_exit(pool_cache_cpu_t *cc |
|
Line 2351 pool_cache_cpu_exit(pool_cache_cpu_t *cc |
|
{ |
{ |
|
|
/* No longer need exclusive access to the per-CPU data. */ |
/* No longer need exclusive access to the per-CPU data. */ |
if (cc->cc_ipl == IPL_NONE) { |
if (cc->cc_ipl != IPL_NONE) { |
crit_exit(); |
|
} else { |
|
splx(*s); |
splx(*s); |
} |
} |
|
crit_exit(); |
} |
} |
|
|
#if __GNUC_PREREQ__(3, 0) |
#if __GNUC_PREREQ__(3, 0) |
Line 2572 pool_cache_put_paddr(pool_cache_t pc, vo |
|
Line 2650 pool_cache_put_paddr(pool_cache_t pc, vo |
|
} |
} |
|
|
/* |
/* |
|
* pool_cache_xcall: |
|
* |
|
* Transfer objects from the per-CPU cache to the global cache. |
|
* Run within a cross-call thread. |
|
*/ |
|
static void |
|
pool_cache_xcall(pool_cache_t pc) |
|
{ |
|
pool_cache_cpu_t *cc; |
|
pcg_t *prev, *cur, **list; |
|
int s = 0; /* XXXgcc */ |
|
|
|
cc = pool_cache_cpu_enter(pc, &s); |
|
cur = cc->cc_current; |
|
cc->cc_current = NULL; |
|
prev = cc->cc_previous; |
|
cc->cc_previous = NULL; |
|
pool_cache_cpu_exit(cc, &s); |
|
|
|
/* |
|
* XXXSMP Go to splvm to prevent kernel_lock from being taken, |
|
* because locks at IPL_SOFTXXX are still spinlocks. Does not |
|
* apply to IPL_SOFTBIO. Cross-call threads do not take the |
|
* kernel_lock. |
|
*/ |
|
s = splvm(); |
|
mutex_enter(&pc->pc_lock); |
|
if (cur != NULL) { |
|
if (cur->pcg_avail == PCG_NOBJECTS) { |
|
list = &pc->pc_fullgroups; |
|
pc->pc_nfull++; |
|
} else if (cur->pcg_avail == 0) { |
|
list = &pc->pc_emptygroups; |
|
pc->pc_nempty++; |
|
} else { |
|
list = &pc->pc_partgroups; |
|
pc->pc_npart++; |
|
} |
|
cur->pcg_next = *list; |
|
*list = cur; |
|
} |
|
if (prev != NULL) { |
|
if (prev->pcg_avail == PCG_NOBJECTS) { |
|
list = &pc->pc_fullgroups; |
|
pc->pc_nfull++; |
|
} else if (prev->pcg_avail == 0) { |
|
list = &pc->pc_emptygroups; |
|
pc->pc_nempty++; |
|
} else { |
|
list = &pc->pc_partgroups; |
|
pc->pc_npart++; |
|
} |
|
prev->pcg_next = *list; |
|
*list = prev; |
|
} |
|
mutex_exit(&pc->pc_lock); |
|
splx(s); |
|
} |
|
|
|
/* |
* Pool backend allocators. |
* Pool backend allocators. |
* |
* |
* Each pool has a backend allocator that handles allocation, deallocation, |
* Each pool has a backend allocator that handles allocation, deallocation, |