version 1.12, 2007/03/12 02:19:14 |
version 1.30, 2008/01/05 12:31:39 |
|
|
/* $NetBSD$ */ |
/* $NetBSD$ */ |
|
|
/*- |
/*- |
* Copyright (c) 2002, 2006, 2007 The NetBSD Foundation, Inc. |
* Copyright (c) 2002, 2006, 2007, 2008 The NetBSD Foundation, Inc. |
* All rights reserved. |
* All rights reserved. |
* |
* |
* This code is derived from software contributed to The NetBSD Foundation |
* This code is derived from software contributed to The NetBSD Foundation |
|
|
* Richard McDougall. |
* Richard McDougall. |
*/ |
*/ |
|
|
#include "opt_multiprocessor.h" |
|
|
|
#define __MUTEX_PRIVATE |
#define __MUTEX_PRIVATE |
|
|
#include <sys/cdefs.h> |
#include <sys/cdefs.h> |
__KERNEL_RCSID(0, "$NetBSD$"); |
__KERNEL_RCSID(0, "$NetBSD$"); |
|
|
|
#include "opt_multiprocessor.h" |
|
|
#include <sys/param.h> |
#include <sys/param.h> |
#include <sys/proc.h> |
#include <sys/proc.h> |
#include <sys/mutex.h> |
#include <sys/mutex.h> |
Line 59 __KERNEL_RCSID(0, "$NetBSD$"); |
|
Line 59 __KERNEL_RCSID(0, "$NetBSD$"); |
|
#include <sys/systm.h> |
#include <sys/systm.h> |
#include <sys/lockdebug.h> |
#include <sys/lockdebug.h> |
#include <sys/kernel.h> |
#include <sys/kernel.h> |
|
#include <sys/atomic.h> |
|
#include <sys/intr.h> |
|
#include <sys/lock.h> |
|
|
#include <dev/lockstat.h> |
#include <dev/lockstat.h> |
|
|
#include <machine/intr.h> |
#include <machine/lock.h> |
|
|
/* |
/* |
* When not running a debug kernel, spin mutexes are not much |
* When not running a debug kernel, spin mutexes are not much |
Line 78 __KERNEL_RCSID(0, "$NetBSD$"); |
|
Line 81 __KERNEL_RCSID(0, "$NetBSD$"); |
|
*/ |
*/ |
|
|
#define MUTEX_WANTLOCK(mtx) \ |
#define MUTEX_WANTLOCK(mtx) \ |
LOCKDEBUG_WANTLOCK(MUTEX_GETID(mtx), \ |
LOCKDEBUG_WANTLOCK(MUTEX_DEBUG_P(mtx), (mtx), \ |
(uintptr_t)__builtin_return_address(0), 0) |
(uintptr_t)__builtin_return_address(0), 0) |
#define MUTEX_LOCKED(mtx) \ |
#define MUTEX_LOCKED(mtx) \ |
LOCKDEBUG_LOCKED(MUTEX_GETID(mtx), \ |
LOCKDEBUG_LOCKED(MUTEX_DEBUG_P(mtx), (mtx), \ |
(uintptr_t)__builtin_return_address(0), 0) |
(uintptr_t)__builtin_return_address(0), 0) |
#define MUTEX_UNLOCKED(mtx) \ |
#define MUTEX_UNLOCKED(mtx) \ |
LOCKDEBUG_UNLOCKED(MUTEX_GETID(mtx), \ |
LOCKDEBUG_UNLOCKED(MUTEX_DEBUG_P(mtx), (mtx), \ |
(uintptr_t)__builtin_return_address(0), 0) |
(uintptr_t)__builtin_return_address(0), 0) |
#define MUTEX_ABORT(mtx, msg) \ |
#define MUTEX_ABORT(mtx, msg) \ |
mutex_abort(mtx, __FUNCTION__, msg) |
mutex_abort(mtx, __func__, msg) |
|
|
#if defined(LOCKDEBUG) |
#if defined(LOCKDEBUG) |
|
|
|
|
|
|
#define MUTEX_OWNER(owner) \ |
#define MUTEX_OWNER(owner) \ |
(owner & MUTEX_THREAD) |
(owner & MUTEX_THREAD) |
#define MUTEX_OWNED(owner) \ |
|
(owner != 0) |
|
#define MUTEX_HAS_WAITERS(mtx) \ |
#define MUTEX_HAS_WAITERS(mtx) \ |
(((int)(mtx)->mtx_owner & MUTEX_BIT_WAITERS) != 0) |
(((int)(mtx)->mtx_owner & MUTEX_BIT_WAITERS) != 0) |
|
|
#define MUTEX_INITIALIZE_ADAPTIVE(mtx, id) \ |
#define MUTEX_INITIALIZE_ADAPTIVE(mtx, dodebug) \ |
do { \ |
do { \ |
(mtx)->mtx_id = (id); \ |
if (dodebug) \ |
|
(mtx)->mtx_owner |= MUTEX_BIT_DEBUG; \ |
} while (/* CONSTCOND */ 0); |
} while (/* CONSTCOND */ 0); |
|
|
#define MUTEX_INITIALIZE_SPIN(mtx, id, ipl) \ |
#define MUTEX_INITIALIZE_SPIN(mtx, dodebug, ipl) \ |
do { \ |
do { \ |
(mtx)->mtx_owner = MUTEX_BIT_SPIN; \ |
(mtx)->mtx_owner = MUTEX_BIT_SPIN; \ |
|
if (dodebug) \ |
|
(mtx)->mtx_owner |= MUTEX_BIT_DEBUG; \ |
(mtx)->mtx_ipl = makeiplcookie((ipl)); \ |
(mtx)->mtx_ipl = makeiplcookie((ipl)); \ |
(mtx)->mtx_id = (id); \ |
|
__cpu_simple_lock_init(&(mtx)->mtx_lock); \ |
__cpu_simple_lock_init(&(mtx)->mtx_lock); \ |
} while (/* CONSTCOND */ 0) |
} while (/* CONSTCOND */ 0) |
|
|
#define MUTEX_DESTROY(mtx) \ |
#define MUTEX_DESTROY(mtx) \ |
do { \ |
do { \ |
(mtx)->mtx_owner = MUTEX_THREAD; \ |
(mtx)->mtx_owner = MUTEX_THREAD; \ |
(mtx)->mtx_id = -1; \ |
|
} while (/* CONSTCOND */ 0); |
} while (/* CONSTCOND */ 0); |
|
|
#define MUTEX_SPIN_P(mtx) \ |
#define MUTEX_SPIN_P(mtx) \ |
|
|
#define MUTEX_ADAPTIVE_P(mtx) \ |
#define MUTEX_ADAPTIVE_P(mtx) \ |
(((mtx)->mtx_owner & MUTEX_BIT_SPIN) == 0) |
(((mtx)->mtx_owner & MUTEX_BIT_SPIN) == 0) |
|
|
#define MUTEX_GETID(mtx) ((mtx)->mtx_id) |
#define MUTEX_DEBUG_P(mtx) (((mtx)->mtx_owner & MUTEX_BIT_DEBUG) != 0) |
|
#if defined(LOCKDEBUG) |
|
#define MUTEX_OWNED(owner) (((owner) & ~MUTEX_BIT_DEBUG) != 0) |
|
#define MUTEX_INHERITDEBUG(new, old) (new) |= (old) & MUTEX_BIT_DEBUG |
|
#else /* defined(LOCKDEBUG) */ |
|
#define MUTEX_OWNED(owner) ((owner) != 0) |
|
#define MUTEX_INHERITDEBUG(new, old) /* nothing */ |
|
#endif /* defined(LOCKDEBUG) */ |
|
|
static inline int |
static inline int |
MUTEX_ACQUIRE(kmutex_t *mtx, uintptr_t curthread) |
MUTEX_ACQUIRE(kmutex_t *mtx, uintptr_t curthread) |
{ |
{ |
int rv; |
int rv; |
rv = MUTEX_CAS(&mtx->mtx_owner, 0UL, curthread); |
uintptr_t old = 0; |
|
uintptr_t new = curthread; |
|
|
|
MUTEX_INHERITDEBUG(old, mtx->mtx_owner); |
|
MUTEX_INHERITDEBUG(new, old); |
|
rv = MUTEX_CAS(&mtx->mtx_owner, old, new); |
MUTEX_RECEIVE(mtx); |
MUTEX_RECEIVE(mtx); |
return rv; |
return rv; |
} |
} |
Line 206 MUTEX_SET_WAITERS(kmutex_t *mtx, uintptr |
|
Line 220 MUTEX_SET_WAITERS(kmutex_t *mtx, uintptr |
|
static inline void |
static inline void |
MUTEX_RELEASE(kmutex_t *mtx) |
MUTEX_RELEASE(kmutex_t *mtx) |
{ |
{ |
|
uintptr_t new; |
|
|
MUTEX_GIVE(mtx); |
MUTEX_GIVE(mtx); |
mtx->mtx_owner = 0; |
new = 0; |
|
MUTEX_INHERITDEBUG(new, mtx->mtx_owner); |
|
mtx->mtx_owner = new; |
} |
} |
|
|
static inline void |
static inline void |
Line 239 __strong_alias(mutex_spin_exit,mutex_vec |
|
Line 257 __strong_alias(mutex_spin_exit,mutex_vec |
|
void mutex_abort(kmutex_t *, const char *, const char *); |
void mutex_abort(kmutex_t *, const char *, const char *); |
void mutex_dump(volatile void *); |
void mutex_dump(volatile void *); |
int mutex_onproc(uintptr_t, struct cpu_info **); |
int mutex_onproc(uintptr_t, struct cpu_info **); |
static struct lwp *mutex_owner(wchan_t); |
|
|
|
lockops_t mutex_spin_lockops = { |
lockops_t mutex_spin_lockops = { |
"Mutex", |
"Mutex", |
Line 258 syncobj_t mutex_syncobj = { |
|
Line 275 syncobj_t mutex_syncobj = { |
|
turnstile_unsleep, |
turnstile_unsleep, |
turnstile_changepri, |
turnstile_changepri, |
sleepq_lendpri, |
sleepq_lendpri, |
mutex_owner, |
(void *)mutex_owner, |
}; |
}; |
|
|
/* |
/* |
|
|
mutex_abort(kmutex_t *mtx, const char *func, const char *msg) |
mutex_abort(kmutex_t *mtx, const char *func, const char *msg) |
{ |
{ |
|
|
LOCKDEBUG_ABORT(MUTEX_GETID(mtx), mtx, (MUTEX_SPIN_P(mtx) ? |
LOCKDEBUG_ABORT(mtx, (MUTEX_SPIN_P(mtx) ? |
&mutex_spin_lockops : &mutex_adaptive_lockops), func, msg); |
&mutex_spin_lockops : &mutex_adaptive_lockops), func, msg); |
/* NOTREACHED */ |
/* NOTREACHED */ |
} |
} |
Line 308 mutex_abort(kmutex_t *mtx, const char *f |
|
Line 325 mutex_abort(kmutex_t *mtx, const char *f |
|
void |
void |
mutex_init(kmutex_t *mtx, kmutex_type_t type, int ipl) |
mutex_init(kmutex_t *mtx, kmutex_type_t type, int ipl) |
{ |
{ |
u_int id; |
bool dodebug; |
|
|
memset(mtx, 0, sizeof(*mtx)); |
memset(mtx, 0, sizeof(*mtx)); |
|
|
if (type == MUTEX_DRIVER) |
switch (type) { |
type = (ipl == IPL_NONE ? MUTEX_ADAPTIVE : MUTEX_SPIN); |
case MUTEX_ADAPTIVE: |
|
KASSERT(ipl == IPL_NONE); |
|
break; |
|
case MUTEX_DEFAULT: |
|
case MUTEX_DRIVER: |
|
if (ipl == IPL_NONE || ipl == IPL_SOFTCLOCK || |
|
ipl == IPL_SOFTBIO || ipl == IPL_SOFTNET || |
|
ipl == IPL_SOFTSERIAL) { |
|
type = MUTEX_ADAPTIVE; |
|
} else { |
|
type = MUTEX_SPIN; |
|
} |
|
break; |
|
default: |
|
break; |
|
} |
|
|
switch (type) { |
switch (type) { |
case MUTEX_NODEBUG: |
case MUTEX_NODEBUG: |
KASSERT(ipl == IPL_NONE); |
dodebug = LOCKDEBUG_ALLOC(mtx, NULL, |
id = LOCKDEBUG_ALLOC(mtx, NULL); |
(uintptr_t)__builtin_return_address(0)); |
MUTEX_INITIALIZE_ADAPTIVE(mtx, id); |
MUTEX_INITIALIZE_SPIN(mtx, dodebug, ipl); |
break; |
break; |
case MUTEX_ADAPTIVE: |
case MUTEX_ADAPTIVE: |
case MUTEX_DEFAULT: |
dodebug = LOCKDEBUG_ALLOC(mtx, &mutex_adaptive_lockops, |
KASSERT(ipl == IPL_NONE); |
(uintptr_t)__builtin_return_address(0)); |
id = LOCKDEBUG_ALLOC(mtx, &mutex_adaptive_lockops); |
MUTEX_INITIALIZE_ADAPTIVE(mtx, dodebug); |
MUTEX_INITIALIZE_ADAPTIVE(mtx, id); |
|
break; |
break; |
case MUTEX_SPIN: |
case MUTEX_SPIN: |
id = LOCKDEBUG_ALLOC(mtx, &mutex_spin_lockops); |
dodebug = LOCKDEBUG_ALLOC(mtx, &mutex_spin_lockops, |
MUTEX_INITIALIZE_SPIN(mtx, id, ipl); |
(uintptr_t)__builtin_return_address(0)); |
|
MUTEX_INITIALIZE_SPIN(mtx, dodebug, ipl); |
break; |
break; |
default: |
default: |
panic("mutex_init: impossible type"); |
panic("mutex_init: impossible type"); |
Line 350 mutex_destroy(kmutex_t *mtx) |
|
Line 382 mutex_destroy(kmutex_t *mtx) |
|
MUTEX_ASSERT(mtx, !MUTEX_OWNED(mtx->mtx_owner) && |
MUTEX_ASSERT(mtx, !MUTEX_OWNED(mtx->mtx_owner) && |
!MUTEX_HAS_WAITERS(mtx)); |
!MUTEX_HAS_WAITERS(mtx)); |
} else { |
} else { |
MUTEX_ASSERT(mtx, mtx->mtx_lock != __SIMPLELOCK_LOCKED); |
MUTEX_ASSERT(mtx, !__SIMPLELOCK_LOCKED_P(&mtx->mtx_lock)); |
} |
} |
|
|
LOCKDEBUG_FREE(mtx, MUTEX_GETID(mtx)); |
LOCKDEBUG_FREE(MUTEX_DEBUG_P(mtx), mtx); |
MUTEX_DESTROY(mtx); |
MUTEX_DESTROY(mtx); |
} |
} |
|
|
Line 362 mutex_destroy(kmutex_t *mtx) |
|
Line 394 mutex_destroy(kmutex_t *mtx) |
|
* |
* |
* Return true if an adaptive mutex owner is running on a CPU in the |
* Return true if an adaptive mutex owner is running on a CPU in the |
* system. If the target is waiting on the kernel big lock, then we |
* system. If the target is waiting on the kernel big lock, then we |
* return false immediately. This is necessary to avoid deadlock |
* must release it. This is necessary to avoid deadlock. |
* against the big lock. |
|
* |
* |
* Note that we can't use the mutex owner field as an LWP pointer. We |
* Note that we can't use the mutex owner field as an LWP pointer. We |
* don't have full control over the timing of our execution, and so the |
* don't have full control over the timing of our execution, and so the |
* pointer could be completely invalid by the time we dereference it. |
* pointer could be completely invalid by the time we dereference it. |
* |
|
* XXX This should be optimised further to reduce potential cache line |
|
* ping-ponging and skewing of the spin time while busy waiting. |
|
*/ |
*/ |
#ifdef MULTIPROCESSOR |
#ifdef MULTIPROCESSOR |
int |
int |
Line 384 mutex_onproc(uintptr_t owner, struct cpu |
|
Line 412 mutex_onproc(uintptr_t owner, struct cpu |
|
return 0; |
return 0; |
l = (struct lwp *)MUTEX_OWNER(owner); |
l = (struct lwp *)MUTEX_OWNER(owner); |
|
|
|
/* See if the target is running on a CPU somewhere. */ |
if ((ci = *cip) != NULL && ci->ci_curlwp == l) |
if ((ci = *cip) != NULL && ci->ci_curlwp == l) |
return ci->ci_biglock_wanted != l; |
goto run; |
|
for (CPU_INFO_FOREACH(cii, ci)) |
for (CPU_INFO_FOREACH(cii, ci)) { |
if (ci->ci_curlwp == l) |
if (ci->ci_curlwp == l) { |
goto run; |
*cip = ci; |
|
return ci->ci_biglock_wanted != l; |
|
} |
|
} |
|
|
|
|
/* No: it may be safe to block now. */ |
*cip = NULL; |
*cip = NULL; |
return 0; |
return 0; |
|
|
|
run: |
|
/* Target is running; do we need to block? */ |
|
*cip = ci; |
|
return ci->ci_biglock_wanted != l; |
} |
} |
#endif |
#endif /* MULTIPROCESSOR */ |
|
|
/* |
/* |
* mutex_vector_enter: |
* mutex_vector_enter: |
Line 451 mutex_vector_enter(kmutex_t *mtx) |
|
Line 482 mutex_vector_enter(kmutex_t *mtx) |
|
do { |
do { |
if (panicstr != NULL) |
if (panicstr != NULL) |
break; |
break; |
while (mtx->mtx_lock == __SIMPLELOCK_LOCKED) { |
while (__SIMPLELOCK_LOCKED_P(&mtx->mtx_lock)) { |
SPINLOCK_BACKOFF(count); |
SPINLOCK_BACKOFF(count); |
#ifdef LOCKDEBUG |
#ifdef LOCKDEBUG |
if (SPINLOCK_SPINOUT(spins)) |
if (SPINLOCK_SPINOUT(spins)) |
Line 478 mutex_vector_enter(kmutex_t *mtx) |
|
Line 509 mutex_vector_enter(kmutex_t *mtx) |
|
MUTEX_ASSERT(mtx, curthread != 0); |
MUTEX_ASSERT(mtx, curthread != 0); |
MUTEX_WANTLOCK(mtx); |
MUTEX_WANTLOCK(mtx); |
|
|
#ifdef LOCKDEBUG |
|
if (panicstr == NULL) { |
if (panicstr == NULL) { |
simple_lock_only_held(NULL, "mutex_enter"); |
|
#ifdef MULTIPROCESSOR |
|
LOCKDEBUG_BARRIER(&kernel_lock, 1); |
LOCKDEBUG_BARRIER(&kernel_lock, 1); |
#else |
|
LOCKDEBUG_BARRIER(NULL, 1); |
|
#endif |
|
} |
} |
#endif |
|
|
|
LOCKSTAT_ENTER(lsflag); |
LOCKSTAT_ENTER(lsflag); |
|
|
Line 586 mutex_vector_enter(kmutex_t *mtx) |
|
Line 610 mutex_vector_enter(kmutex_t *mtx) |
|
* or preempted). |
* or preempted). |
* |
* |
* o At any given time, MUTEX_SET_WAITERS() can only ever |
* o At any given time, MUTEX_SET_WAITERS() can only ever |
* be in progress on one CPU in the system - guarenteed |
* be in progress on one CPU in the system - guaranteed |
* by the turnstile chain lock. |
* by the turnstile chain lock. |
* |
* |
* o No other operations other than MUTEX_SET_WAITERS() |
* o No other operations other than MUTEX_SET_WAITERS() |
Line 603 mutex_vector_enter(kmutex_t *mtx) |
|
Line 627 mutex_vector_enter(kmutex_t *mtx) |
|
* completes before the modification of curlwp becomes |
* completes before the modification of curlwp becomes |
* visible to this CPU. |
* visible to this CPU. |
* |
* |
* o cpu_switch() posts a store fence before setting curlwp |
* o mi_switch() posts a store fence before setting curlwp |
* and before resuming execution of an LWP. |
* and before resuming execution of an LWP. |
* |
* |
* o _kernel_lock() posts a store fence before setting |
* o _kernel_lock() posts a store fence before setting |
Line 646 mutex_vector_enter(kmutex_t *mtx) |
|
Line 670 mutex_vector_enter(kmutex_t *mtx) |
|
* If the waiters bit is not set it's unsafe to go asleep, |
* If the waiters bit is not set it's unsafe to go asleep, |
* as we might never be awoken. |
* as we might never be awoken. |
*/ |
*/ |
mb_read(); |
if ((membar_consumer(), mutex_onproc(owner, &ci)) || |
if (mutex_onproc(owner, &ci) || !MUTEX_HAS_WAITERS(mtx)) { |
(membar_consumer(), !MUTEX_HAS_WAITERS(mtx))) { |
turnstile_exit(mtx); |
turnstile_exit(mtx); |
continue; |
continue; |
} |
} |
Line 659 mutex_vector_enter(kmutex_t *mtx) |
|
Line 683 mutex_vector_enter(kmutex_t *mtx) |
|
|
|
LOCKSTAT_STOP_TIMER(lsflag, slptime); |
LOCKSTAT_STOP_TIMER(lsflag, slptime); |
LOCKSTAT_COUNT(slpcnt, 1); |
LOCKSTAT_COUNT(slpcnt, 1); |
|
|
turnstile_unblock(); |
|
} |
} |
|
|
LOCKSTAT_EVENT(lsflag, mtx, LB_ADAPTIVE_MUTEX | LB_SLEEP1, |
LOCKSTAT_EVENT(lsflag, mtx, LB_ADAPTIVE_MUTEX | LB_SLEEP1, |
Line 686 mutex_vector_exit(kmutex_t *mtx) |
|
Line 708 mutex_vector_exit(kmutex_t *mtx) |
|
|
|
if (MUTEX_SPIN_P(mtx)) { |
if (MUTEX_SPIN_P(mtx)) { |
#ifdef FULL |
#ifdef FULL |
if (mtx->mtx_lock != __SIMPLELOCK_LOCKED) |
if (!__SIMPLELOCK_LOCKED_P(&mtx->mtx_lock)) |
MUTEX_ABORT(mtx, "exiting unheld spin mutex"); |
MUTEX_ABORT(mtx, "exiting unheld spin mutex"); |
MUTEX_UNLOCKED(mtx); |
MUTEX_UNLOCKED(mtx); |
__cpu_simple_unlock(&mtx->mtx_lock); |
__cpu_simple_unlock(&mtx->mtx_lock); |
Line 706 mutex_vector_exit(kmutex_t *mtx) |
|
Line 728 mutex_vector_exit(kmutex_t *mtx) |
|
MUTEX_ASSERT(mtx, MUTEX_OWNER(mtx->mtx_owner) == curthread); |
MUTEX_ASSERT(mtx, MUTEX_OWNER(mtx->mtx_owner) == curthread); |
MUTEX_UNLOCKED(mtx); |
MUTEX_UNLOCKED(mtx); |
|
|
|
#ifdef LOCKDEBUG |
|
/* |
|
* Avoid having to take the turnstile chain lock every time |
|
* around. Raise the priority level to splhigh() in order |
|
* to disable preemption and so make the following atomic. |
|
*/ |
|
{ |
|
int s = splhigh(); |
|
if (!MUTEX_HAS_WAITERS(mtx)) { |
|
MUTEX_RELEASE(mtx); |
|
splx(s); |
|
return; |
|
} |
|
splx(s); |
|
} |
|
#endif |
|
|
/* |
/* |
* Get this lock's turnstile. This gets the interlock on |
* Get this lock's turnstile. This gets the interlock on |
* the sleep queue. Once we have that, we can clear the |
* the sleep queue. Once we have that, we can clear the |
Line 760 mutex_owned(kmutex_t *mtx) |
|
Line 799 mutex_owned(kmutex_t *mtx) |
|
if (MUTEX_ADAPTIVE_P(mtx)) |
if (MUTEX_ADAPTIVE_P(mtx)) |
return MUTEX_OWNER(mtx->mtx_owner) == (uintptr_t)curlwp; |
return MUTEX_OWNER(mtx->mtx_owner) == (uintptr_t)curlwp; |
#ifdef FULL |
#ifdef FULL |
return mtx->mtx_lock == __SIMPLELOCK_LOCKED; |
return __SIMPLELOCK_LOCKED_P(&mtx->mtx_lock); |
#else |
#else |
return 1; |
return 1; |
#endif |
#endif |
Line 772 mutex_owned(kmutex_t *mtx) |
|
Line 811 mutex_owned(kmutex_t *mtx) |
|
* Return the current owner of an adaptive mutex. Used for |
* Return the current owner of an adaptive mutex. Used for |
* priority inheritance. |
* priority inheritance. |
*/ |
*/ |
static struct lwp * |
lwp_t * |
mutex_owner(wchan_t obj) |
mutex_owner(kmutex_t *mtx) |
{ |
{ |
kmutex_t *mtx = (void *)(uintptr_t)obj; /* discard qualifiers */ |
|
|
|
MUTEX_ASSERT(mtx, MUTEX_ADAPTIVE_P(mtx)); |
MUTEX_ASSERT(mtx, MUTEX_ADAPTIVE_P(mtx)); |
return (struct lwp *)MUTEX_OWNER(mtx->mtx_owner); |
return (struct lwp *)MUTEX_OWNER(mtx->mtx_owner); |
Line 854 mutex_spin_retry(kmutex_t *mtx) |
|
Line 892 mutex_spin_retry(kmutex_t *mtx) |
|
do { |
do { |
if (panicstr != NULL) |
if (panicstr != NULL) |
break; |
break; |
while (mtx->mtx_lock == __SIMPLELOCK_LOCKED) { |
while (__SIMPLELOCK_LOCKED_P(&mtx->mtx_lock)) { |
SPINLOCK_BACKOFF(count); |
SPINLOCK_BACKOFF(count); |
#ifdef LOCKDEBUG |
#ifdef LOCKDEBUG |
if (SPINLOCK_SPINOUT(spins)) |
if (SPINLOCK_SPINOUT(spins)) |
Line 873 mutex_spin_retry(kmutex_t *mtx) |
|
Line 911 mutex_spin_retry(kmutex_t *mtx) |
|
#endif /* MULTIPROCESSOR */ |
#endif /* MULTIPROCESSOR */ |
} |
} |
#endif /* defined(__HAVE_SPIN_MUTEX_STUBS) || defined(FULL) */ |
#endif /* defined(__HAVE_SPIN_MUTEX_STUBS) || defined(FULL) */ |
|
|
/* |
|
* sched_lock_idle: |
|
* |
|
* XXX Ugly hack for cpu_switch(). |
|
*/ |
|
void |
|
sched_lock_idle(void) |
|
{ |
|
#ifdef FULL |
|
kmutex_t *mtx = &sched_mutex; |
|
|
|
curcpu()->ci_mtx_count--; |
|
|
|
if (!__cpu_simple_lock_try(&mtx->mtx_lock)) { |
|
mutex_spin_retry(mtx); |
|
return; |
|
} |
|
|
|
MUTEX_LOCKED(mtx); |
|
#else |
|
curcpu()->ci_mtx_count--; |
|
#endif /* FULL */ |
|
} |
|
|
|
/* |
|
* sched_unlock_idle: |
|
* |
|
* XXX Ugly hack for cpu_switch(). |
|
*/ |
|
void |
|
sched_unlock_idle(void) |
|
{ |
|
#ifdef FULL |
|
kmutex_t *mtx = &sched_mutex; |
|
|
|
if (mtx->mtx_lock != __SIMPLELOCK_LOCKED) |
|
MUTEX_ABORT(mtx, "sched_mutex not locked"); |
|
|
|
MUTEX_UNLOCKED(mtx); |
|
__cpu_simple_unlock(&mtx->mtx_lock); |
|
#endif /* FULL */ |
|
curcpu()->ci_mtx_count++; |
|
} |
|