version 1.11, 2007/03/10 16:01:13 |
version 1.16, 2007/09/10 11:34:10 |
|
|
/* |
/* |
* Spin mutex SPL save / restore. |
* Spin mutex SPL save / restore. |
*/ |
*/ |
|
#ifndef MUTEX_COUNT_BIAS |
|
#define MUTEX_COUNT_BIAS 0 |
|
#endif |
|
|
#define MUTEX_SPIN_SPLRAISE(mtx) \ |
#define MUTEX_SPIN_SPLRAISE(mtx) \ |
do { \ |
do { \ |
|
|
int x__cnt, s; \ |
int x__cnt, s; \ |
x__cnt = x__ci->ci_mtx_count--; \ |
x__cnt = x__ci->ci_mtx_count--; \ |
s = splraiseipl(mtx->mtx_ipl); \ |
s = splraiseipl(mtx->mtx_ipl); \ |
if (x__cnt == 0) \ |
if (x__cnt == MUTEX_COUNT_BIAS) \ |
x__ci->ci_mtx_oldspl = (s); \ |
x__ci->ci_mtx_oldspl = (s); \ |
} while (/* CONSTCOND */ 0) |
} while (/* CONSTCOND */ 0) |
|
|
|
|
struct cpu_info *x__ci = curcpu(); \ |
struct cpu_info *x__ci = curcpu(); \ |
int s = x__ci->ci_mtx_oldspl; \ |
int s = x__ci->ci_mtx_oldspl; \ |
__insn_barrier(); \ |
__insn_barrier(); \ |
if (++(x__ci->ci_mtx_count) == 0) \ |
if (++(x__ci->ci_mtx_count) == MUTEX_COUNT_BIAS) \ |
splx(s); \ |
splx(s); \ |
} while (/* CONSTCOND */ 0) |
} while (/* CONSTCOND */ 0) |
|
|
Line 309 mutex_init(kmutex_t *mtx, kmutex_type_t |
|
Line 312 mutex_init(kmutex_t *mtx, kmutex_type_t |
|
|
|
memset(mtx, 0, sizeof(*mtx)); |
memset(mtx, 0, sizeof(*mtx)); |
|
|
if (type == MUTEX_DRIVER) |
switch (type) { |
|
case MUTEX_ADAPTIVE: |
|
case MUTEX_DEFAULT: |
|
KASSERT(ipl == IPL_NONE); |
|
break; |
|
case MUTEX_DRIVER: |
type = (ipl == IPL_NONE ? MUTEX_ADAPTIVE : MUTEX_SPIN); |
type = (ipl == IPL_NONE ? MUTEX_ADAPTIVE : MUTEX_SPIN); |
|
break; |
|
default: |
|
break; |
|
} |
|
|
switch (type) { |
switch (type) { |
case MUTEX_NODEBUG: |
case MUTEX_NODEBUG: |
KASSERT(ipl == IPL_NONE); |
|
id = LOCKDEBUG_ALLOC(mtx, NULL); |
id = LOCKDEBUG_ALLOC(mtx, NULL); |
MUTEX_INITIALIZE_ADAPTIVE(mtx, id); |
MUTEX_INITIALIZE_SPIN(mtx, id, ipl); |
break; |
break; |
case MUTEX_ADAPTIVE: |
case MUTEX_ADAPTIVE: |
case MUTEX_DEFAULT: |
case MUTEX_DEFAULT: |
KASSERT(ipl == IPL_NONE); |
|
id = LOCKDEBUG_ALLOC(mtx, &mutex_adaptive_lockops); |
id = LOCKDEBUG_ALLOC(mtx, &mutex_adaptive_lockops); |
MUTEX_INITIALIZE_ADAPTIVE(mtx, id); |
MUTEX_INITIALIZE_ADAPTIVE(mtx, id); |
break; |
break; |
Line 347 mutex_destroy(kmutex_t *mtx) |
|
Line 357 mutex_destroy(kmutex_t *mtx) |
|
MUTEX_ASSERT(mtx, !MUTEX_OWNED(mtx->mtx_owner) && |
MUTEX_ASSERT(mtx, !MUTEX_OWNED(mtx->mtx_owner) && |
!MUTEX_HAS_WAITERS(mtx)); |
!MUTEX_HAS_WAITERS(mtx)); |
} else { |
} else { |
MUTEX_ASSERT(mtx, mtx->mtx_lock != __SIMPLELOCK_LOCKED); |
MUTEX_ASSERT(mtx, !__SIMPLELOCK_LOCKED_P(&mtx->mtx_lock)); |
} |
} |
|
|
LOCKDEBUG_FREE(mtx, MUTEX_GETID(mtx)); |
LOCKDEBUG_FREE(mtx, MUTEX_GETID(mtx)); |
Line 359 mutex_destroy(kmutex_t *mtx) |
|
Line 369 mutex_destroy(kmutex_t *mtx) |
|
* |
* |
* Return true if an adaptive mutex owner is running on a CPU in the |
* Return true if an adaptive mutex owner is running on a CPU in the |
* system. If the target is waiting on the kernel big lock, then we |
* system. If the target is waiting on the kernel big lock, then we |
* return false immediately. This is necessary to avoid deadlock |
* must release it. This is necessary to avoid deadlock. |
* against the big lock. |
|
* |
* |
* Note that we can't use the mutex owner field as an LWP pointer. We |
* Note that we can't use the mutex owner field as an LWP pointer. We |
* don't have full control over the timing of our execution, and so the |
* don't have full control over the timing of our execution, and so the |
* pointer could be completely invalid by the time we dereference it. |
* pointer could be completely invalid by the time we dereference it. |
* |
|
* XXX This should be optimised further to reduce potential cache line |
|
* ping-ponging and skewing of the spin time while busy waiting. |
|
*/ |
*/ |
#ifdef MULTIPROCESSOR |
#ifdef MULTIPROCESSOR |
int |
int |
Line 381 mutex_onproc(uintptr_t owner, struct cpu |
|
Line 387 mutex_onproc(uintptr_t owner, struct cpu |
|
return 0; |
return 0; |
l = (struct lwp *)MUTEX_OWNER(owner); |
l = (struct lwp *)MUTEX_OWNER(owner); |
|
|
|
/* See if the target is running on a CPU somewhere. */ |
if ((ci = *cip) != NULL && ci->ci_curlwp == l) |
if ((ci = *cip) != NULL && ci->ci_curlwp == l) |
return ci->ci_biglock_wanted != l; |
goto run; |
|
for (CPU_INFO_FOREACH(cii, ci)) |
for (CPU_INFO_FOREACH(cii, ci)) { |
if (ci->ci_curlwp == l) |
if (ci->ci_curlwp == l) { |
goto run; |
*cip = ci; |
|
return ci->ci_biglock_wanted != l; |
|
} |
|
} |
|
|
|
|
/* No: it may be safe to block now. */ |
*cip = NULL; |
*cip = NULL; |
return 0; |
return 0; |
|
|
|
run: |
|
/* Target is running; do we need to block? */ |
|
*cip = ci; |
|
return ci->ci_biglock_wanted != l; |
} |
} |
#endif |
#endif /* MULTIPROCESSOR */ |
|
|
/* |
/* |
* mutex_vector_enter: |
* mutex_vector_enter: |
Line 448 mutex_vector_enter(kmutex_t *mtx) |
|
Line 457 mutex_vector_enter(kmutex_t *mtx) |
|
do { |
do { |
if (panicstr != NULL) |
if (panicstr != NULL) |
break; |
break; |
while (mtx->mtx_lock == __SIMPLELOCK_LOCKED) { |
while (__SIMPLELOCK_LOCKED_P(&mtx->mtx_lock)) { |
SPINLOCK_BACKOFF(count); |
SPINLOCK_BACKOFF(count); |
#ifdef LOCKDEBUG |
#ifdef LOCKDEBUG |
if (SPINLOCK_SPINOUT(spins)) |
if (SPINLOCK_SPINOUT(spins)) |
Line 600 mutex_vector_enter(kmutex_t *mtx) |
|
Line 609 mutex_vector_enter(kmutex_t *mtx) |
|
* completes before the modification of curlwp becomes |
* completes before the modification of curlwp becomes |
* visible to this CPU. |
* visible to this CPU. |
* |
* |
* o cpu_switch() posts a store fence before setting curlwp |
* o mi_switch() posts a store fence before setting curlwp |
* and before resuming execution of an LWP. |
* and before resuming execution of an LWP. |
* |
* |
* o _kernel_lock() posts a store fence before setting |
* o _kernel_lock() posts a store fence before setting |
Line 643 mutex_vector_enter(kmutex_t *mtx) |
|
Line 652 mutex_vector_enter(kmutex_t *mtx) |
|
* If the waiters bit is not set it's unsafe to go asleep, |
* If the waiters bit is not set it's unsafe to go asleep, |
* as we might never be awoken. |
* as we might never be awoken. |
*/ |
*/ |
mb_read(); |
if ((mb_read(), mutex_onproc(owner, &ci)) || |
if (mutex_onproc(owner, &ci) || !MUTEX_HAS_WAITERS(mtx)) { |
(mb_read(), !MUTEX_HAS_WAITERS(mtx))) { |
turnstile_exit(mtx); |
turnstile_exit(mtx); |
continue; |
continue; |
} |
} |
Line 656 mutex_vector_enter(kmutex_t *mtx) |
|
Line 665 mutex_vector_enter(kmutex_t *mtx) |
|
|
|
LOCKSTAT_STOP_TIMER(lsflag, slptime); |
LOCKSTAT_STOP_TIMER(lsflag, slptime); |
LOCKSTAT_COUNT(slpcnt, 1); |
LOCKSTAT_COUNT(slpcnt, 1); |
|
|
turnstile_unblock(); |
|
} |
} |
|
|
LOCKSTAT_EVENT(lsflag, mtx, LB_ADAPTIVE_MUTEX | LB_SLEEP1, |
LOCKSTAT_EVENT(lsflag, mtx, LB_ADAPTIVE_MUTEX | LB_SLEEP1, |
Line 683 mutex_vector_exit(kmutex_t *mtx) |
|
Line 690 mutex_vector_exit(kmutex_t *mtx) |
|
|
|
if (MUTEX_SPIN_P(mtx)) { |
if (MUTEX_SPIN_P(mtx)) { |
#ifdef FULL |
#ifdef FULL |
if (mtx->mtx_lock != __SIMPLELOCK_LOCKED) |
if (!__SIMPLELOCK_LOCKED_P(&mtx->mtx_lock)) |
MUTEX_ABORT(mtx, "exiting unheld spin mutex"); |
MUTEX_ABORT(mtx, "exiting unheld spin mutex"); |
MUTEX_UNLOCKED(mtx); |
MUTEX_UNLOCKED(mtx); |
__cpu_simple_unlock(&mtx->mtx_lock); |
__cpu_simple_unlock(&mtx->mtx_lock); |
Line 703 mutex_vector_exit(kmutex_t *mtx) |
|
Line 710 mutex_vector_exit(kmutex_t *mtx) |
|
MUTEX_ASSERT(mtx, MUTEX_OWNER(mtx->mtx_owner) == curthread); |
MUTEX_ASSERT(mtx, MUTEX_OWNER(mtx->mtx_owner) == curthread); |
MUTEX_UNLOCKED(mtx); |
MUTEX_UNLOCKED(mtx); |
|
|
|
#ifdef LOCKDEBUG |
|
/* |
|
* Avoid having to take the turnstile chain lock every time |
|
* around. Raise the priority level to splhigh() in order |
|
* to disable preemption and so make the following atomic. |
|
*/ |
|
{ |
|
int s = splhigh(); |
|
if (!MUTEX_HAS_WAITERS(mtx)) { |
|
MUTEX_RELEASE(mtx); |
|
splx(s); |
|
return; |
|
} |
|
splx(s); |
|
} |
|
#endif |
|
|
/* |
/* |
* Get this lock's turnstile. This gets the interlock on |
* Get this lock's turnstile. This gets the interlock on |
* the sleep queue. Once we have that, we can clear the |
* the sleep queue. Once we have that, we can clear the |
Line 757 mutex_owned(kmutex_t *mtx) |
|
Line 781 mutex_owned(kmutex_t *mtx) |
|
if (MUTEX_ADAPTIVE_P(mtx)) |
if (MUTEX_ADAPTIVE_P(mtx)) |
return MUTEX_OWNER(mtx->mtx_owner) == (uintptr_t)curlwp; |
return MUTEX_OWNER(mtx->mtx_owner) == (uintptr_t)curlwp; |
#ifdef FULL |
#ifdef FULL |
return mtx->mtx_lock == __SIMPLELOCK_LOCKED; |
return __SIMPLELOCK_LOCKED_P(&mtx->mtx_lock); |
#else |
#else |
return 1; |
return 1; |
#endif |
#endif |
Line 851 mutex_spin_retry(kmutex_t *mtx) |
|
Line 875 mutex_spin_retry(kmutex_t *mtx) |
|
do { |
do { |
if (panicstr != NULL) |
if (panicstr != NULL) |
break; |
break; |
while (mtx->mtx_lock == __SIMPLELOCK_LOCKED) { |
while (__SIMPLELOCK_LOCKED_P(&mtx->mtx_lock)) { |
SPINLOCK_BACKOFF(count); |
SPINLOCK_BACKOFF(count); |
#ifdef LOCKDEBUG |
#ifdef LOCKDEBUG |
if (SPINLOCK_SPINOUT(spins)) |
if (SPINLOCK_SPINOUT(spins)) |
Line 870 mutex_spin_retry(kmutex_t *mtx) |
|
Line 894 mutex_spin_retry(kmutex_t *mtx) |
|
#endif /* MULTIPROCESSOR */ |
#endif /* MULTIPROCESSOR */ |
} |
} |
#endif /* defined(__HAVE_SPIN_MUTEX_STUBS) || defined(FULL) */ |
#endif /* defined(__HAVE_SPIN_MUTEX_STUBS) || defined(FULL) */ |
|
|
/* |
|
* sched_lock_idle: |
|
* |
|
* XXX Ugly hack for cpu_switch(). |
|
*/ |
|
void |
|
sched_lock_idle(void) |
|
{ |
|
#ifdef FULL |
|
kmutex_t *mtx = &sched_mutex; |
|
|
|
curcpu()->ci_mtx_count--; |
|
|
|
if (!__cpu_simple_lock_try(&mtx->mtx_lock)) { |
|
mutex_spin_retry(mtx); |
|
return; |
|
} |
|
|
|
MUTEX_LOCKED(mtx); |
|
#else |
|
curcpu()->ci_mtx_count--; |
|
#endif /* FULL */ |
|
} |
|
|
|
/* |
|
* sched_unlock_idle: |
|
* |
|
* XXX Ugly hack for cpu_switch(). |
|
*/ |
|
void |
|
sched_unlock_idle(void) |
|
{ |
|
#ifdef FULL |
|
kmutex_t *mtx = &sched_mutex; |
|
|
|
if (mtx->mtx_lock != __SIMPLELOCK_LOCKED) |
|
MUTEX_ABORT(mtx, "sched_mutex not locked"); |
|
|
|
MUTEX_UNLOCKED(mtx); |
|
__cpu_simple_unlock(&mtx->mtx_lock); |
|
#endif /* FULL */ |
|
curcpu()->ci_mtx_count++; |
|
} |
|