version 1.129.12.4, 2007/11/14 19:04:44 |
version 1.129.12.5, 2007/11/21 21:56:03 |
Line 2067 pool_cache_bootstrap(pool_cache_t pc, si |
|
Line 2067 pool_cache_bootstrap(pool_cache_t pc, si |
|
/* Allocate per-CPU caches. */ |
/* Allocate per-CPU caches. */ |
memset(pc->pc_cpus, 0, sizeof(pc->pc_cpus)); |
memset(pc->pc_cpus, 0, sizeof(pc->pc_cpus)); |
pc->pc_ncpu = 0; |
pc->pc_ncpu = 0; |
for (CPU_INFO_FOREACH(cii, ci)) { |
if (ncpu == 0) { |
pool_cache_cpu_init1(ci, pc); |
/* XXX For sparc: boot CPU is not attached yet. */ |
|
pool_cache_cpu_init1(curcpu(), pc); |
|
} else { |
|
for (CPU_INFO_FOREACH(cii, ci)) { |
|
pool_cache_cpu_init1(ci, pc); |
|
} |
} |
} |
|
|
if (__predict_true(!cold)) { |
if (__predict_true(!cold)) { |
|
|
pool_cache_cpu_init1(struct cpu_info *ci, pool_cache_t pc) |
pool_cache_cpu_init1(struct cpu_info *ci, pool_cache_t pc) |
{ |
{ |
pool_cache_cpu_t *cc; |
pool_cache_cpu_t *cc; |
|
int index; |
|
|
|
index = ci->ci_index; |
|
|
|
KASSERT(index < MAXCPUS); |
KASSERT(((uintptr_t)pc->pc_cpus & (CACHE_LINE_SIZE - 1)) == 0); |
KASSERT(((uintptr_t)pc->pc_cpus & (CACHE_LINE_SIZE - 1)) == 0); |
|
|
if ((cc = pc->pc_cpus[ci->ci_index]) != NULL) { |
if ((cc = pc->pc_cpus[index]) != NULL) { |
KASSERT(cc->cc_cpu = ci); |
KASSERT(cc->cc_cpuindex == index); |
return; |
return; |
} |
} |
|
|
Line 2168 pool_cache_cpu_init1(struct cpu_info *ci |
|
Line 2177 pool_cache_cpu_init1(struct cpu_info *ci |
|
cc->cc_ipl = pc->pc_pool.pr_ipl; |
cc->cc_ipl = pc->pc_pool.pr_ipl; |
cc->cc_iplcookie = makeiplcookie(cc->cc_ipl); |
cc->cc_iplcookie = makeiplcookie(cc->cc_ipl); |
cc->cc_cache = pc; |
cc->cc_cache = pc; |
cc->cc_cpu = ci; |
cc->cc_cpuindex = index; |
cc->cc_hits = 0; |
cc->cc_hits = 0; |
cc->cc_misses = 0; |
cc->cc_misses = 0; |
cc->cc_current = NULL; |
cc->cc_current = NULL; |
cc->cc_previous = NULL; |
cc->cc_previous = NULL; |
|
|
pc->pc_cpus[ci->ci_index] = cc; |
pc->pc_cpus[index] = cc; |
} |
} |
|
|
/* |
/* |
Line 2320 static inline pool_cache_cpu_t * |
|
Line 2329 static inline pool_cache_cpu_t * |
|
pool_cache_cpu_enter(pool_cache_t pc, int *s) |
pool_cache_cpu_enter(pool_cache_t pc, int *s) |
{ |
{ |
pool_cache_cpu_t *cc; |
pool_cache_cpu_t *cc; |
struct cpu_info *ci; |
|
|
|
/* |
/* |
* Prevent other users of the cache from accessing our |
* Prevent other users of the cache from accessing our |
* CPU-local data. To avoid touching shared state, we |
* CPU-local data. To avoid touching shared state, we |
* pull the neccessary information from CPU local data. |
* pull the neccessary information from CPU local data. |
*/ |
*/ |
ci = curcpu(); |
crit_enter(); |
KASSERT(ci->ci_data.cpu_index < MAXCPUS); |
cc = pc->pc_cpus[curcpu()->ci_index]; |
cc = pc->pc_cpus[ci->ci_data.cpu_index]; |
|
KASSERT(cc->cc_cache == pc); |
KASSERT(cc->cc_cache == pc); |
if (cc->cc_ipl == IPL_NONE) { |
if (cc->cc_ipl != IPL_NONE) { |
crit_enter(); |
|
} else { |
|
*s = splraiseipl(cc->cc_iplcookie); |
*s = splraiseipl(cc->cc_iplcookie); |
} |
} |
|
|
/* Moved to another CPU before disabling preemption? */ |
|
if (__predict_false(ci != curcpu())) { |
|
ci = curcpu(); |
|
cc = pc->pc_cpus[ci->ci_data.cpu_index]; |
|
} |
|
|
|
#ifdef DIAGNOSTIC |
|
KASSERT(cc->cc_cpu == ci); |
|
KASSERT(((uintptr_t)cc & (CACHE_LINE_SIZE - 1)) == 0); |
KASSERT(((uintptr_t)cc & (CACHE_LINE_SIZE - 1)) == 0); |
#endif |
|
|
|
return cc; |
return cc; |
} |
} |
Line 2356 pool_cache_cpu_exit(pool_cache_cpu_t *cc |
|
Line 2351 pool_cache_cpu_exit(pool_cache_cpu_t *cc |
|
{ |
{ |
|
|
/* No longer need exclusive access to the per-CPU data. */ |
/* No longer need exclusive access to the per-CPU data. */ |
if (cc->cc_ipl == IPL_NONE) { |
if (cc->cc_ipl != IPL_NONE) { |
crit_exit(); |
|
} else { |
|
splx(*s); |
splx(*s); |
} |
} |
|
crit_exit(); |
} |
} |
|
|
#if __GNUC_PREREQ__(3, 0) |
#if __GNUC_PREREQ__(3, 0) |