version 1.34, 2008/04/11 15:28:34 |
version 1.67, 2017/09/16 23:55:33 |
|
|
* 2. Redistributions in binary form must reproduce the above copyright |
* 2. Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* documentation and/or other materials provided with the distribution. |
* 3. All advertising materials mentioning features or use of this software |
|
* must display the following acknowledgement: |
|
* This product includes software developed by the NetBSD |
|
* Foundation, Inc. and its contributors. |
|
* 4. Neither the name of The NetBSD Foundation nor the names of its |
|
* contributors may be used to endorse or promote products derived |
|
* from this software without specific prior written permission. |
|
* |
* |
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS |
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS |
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED |
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED |
|
|
#include <sys/cdefs.h> |
#include <sys/cdefs.h> |
__KERNEL_RCSID(0, "$NetBSD$"); |
__KERNEL_RCSID(0, "$NetBSD$"); |
|
|
#include "opt_multiprocessor.h" |
|
|
|
#include <sys/param.h> |
#include <sys/param.h> |
|
#include <sys/atomic.h> |
#include <sys/proc.h> |
#include <sys/proc.h> |
#include <sys/mutex.h> |
#include <sys/mutex.h> |
#include <sys/sched.h> |
#include <sys/sched.h> |
Line 59 __KERNEL_RCSID(0, "$NetBSD$"); |
|
Line 51 __KERNEL_RCSID(0, "$NetBSD$"); |
|
#include <sys/systm.h> |
#include <sys/systm.h> |
#include <sys/lockdebug.h> |
#include <sys/lockdebug.h> |
#include <sys/kernel.h> |
#include <sys/kernel.h> |
#include <sys/atomic.h> |
|
#include <sys/intr.h> |
#include <sys/intr.h> |
#include <sys/lock.h> |
#include <sys/lock.h> |
#include <sys/pool.h> |
#include <sys/types.h> |
|
|
#include <dev/lockstat.h> |
#include <dev/lockstat.h> |
|
|
Line 84 __KERNEL_RCSID(0, "$NetBSD$"); |
|
Line 75 __KERNEL_RCSID(0, "$NetBSD$"); |
|
#define MUTEX_WANTLOCK(mtx) \ |
#define MUTEX_WANTLOCK(mtx) \ |
LOCKDEBUG_WANTLOCK(MUTEX_DEBUG_P(mtx), (mtx), \ |
LOCKDEBUG_WANTLOCK(MUTEX_DEBUG_P(mtx), (mtx), \ |
(uintptr_t)__builtin_return_address(0), 0) |
(uintptr_t)__builtin_return_address(0), 0) |
|
#define MUTEX_TESTLOCK(mtx) \ |
|
LOCKDEBUG_WANTLOCK(MUTEX_DEBUG_P(mtx), (mtx), \ |
|
(uintptr_t)__builtin_return_address(0), -1) |
#define MUTEX_LOCKED(mtx) \ |
#define MUTEX_LOCKED(mtx) \ |
LOCKDEBUG_LOCKED(MUTEX_DEBUG_P(mtx), (mtx), \ |
LOCKDEBUG_LOCKED(MUTEX_DEBUG_P(mtx), (mtx), NULL, \ |
(uintptr_t)__builtin_return_address(0), 0) |
(uintptr_t)__builtin_return_address(0), 0) |
#define MUTEX_UNLOCKED(mtx) \ |
#define MUTEX_UNLOCKED(mtx) \ |
LOCKDEBUG_UNLOCKED(MUTEX_DEBUG_P(mtx), (mtx), \ |
LOCKDEBUG_UNLOCKED(MUTEX_DEBUG_P(mtx), (mtx), \ |
(uintptr_t)__builtin_return_address(0), 0) |
(uintptr_t)__builtin_return_address(0), 0) |
#define MUTEX_ABORT(mtx, msg) \ |
#define MUTEX_ABORT(mtx, msg) \ |
mutex_abort(mtx, __func__, msg) |
mutex_abort(__func__, __LINE__, mtx, msg) |
|
|
#if defined(LOCKDEBUG) |
#if defined(LOCKDEBUG) |
|
|
|
|
#endif /* DIAGNOSTIC */ |
#endif /* DIAGNOSTIC */ |
|
|
/* |
/* |
* Spin mutex SPL save / restore. |
* Some architectures can't use __cpu_simple_lock as is so allow a way |
|
* for them to use an alternate definition. |
*/ |
*/ |
#ifndef MUTEX_COUNT_BIAS |
#ifndef MUTEX_SPINBIT_LOCK_INIT |
#define MUTEX_COUNT_BIAS 0 |
#define MUTEX_SPINBIT_LOCK_INIT(mtx) __cpu_simple_lock_init(&(mtx)->mtx_lock) |
|
#endif |
|
#ifndef MUTEX_SPINBIT_LOCKED_P |
|
#define MUTEX_SPINBIT_LOCKED_P(mtx) __SIMPLELOCK_LOCKED_P(&(mtx)->mtx_lock) |
|
#endif |
|
#ifndef MUTEX_SPINBIT_LOCK_TRY |
|
#define MUTEX_SPINBIT_LOCK_TRY(mtx) __cpu_simple_lock_try(&(mtx)->mtx_lock) |
#endif |
#endif |
|
#ifndef MUTEX_SPINBIT_LOCK_UNLOCK |
|
#define MUTEX_SPINBIT_LOCK_UNLOCK(mtx) __cpu_simple_unlock(&(mtx)->mtx_lock) |
|
#endif |
|
|
|
#ifndef MUTEX_INITIALIZE_SPIN_IPL |
|
#define MUTEX_INITIALIZE_SPIN_IPL(mtx, ipl) \ |
|
((mtx)->mtx_ipl = makeiplcookie((ipl))) |
|
#endif |
|
|
|
/* |
|
* Spin mutex SPL save / restore. |
|
*/ |
|
|
#define MUTEX_SPIN_SPLRAISE(mtx) \ |
#define MUTEX_SPIN_SPLRAISE(mtx) \ |
do { \ |
do { \ |
struct cpu_info *x__ci = curcpu(); \ |
struct cpu_info *x__ci; \ |
int x__cnt, s; \ |
int x__cnt, s; \ |
|
s = splraiseipl(MUTEX_SPIN_IPL(mtx)); \ |
|
x__ci = curcpu(); \ |
x__cnt = x__ci->ci_mtx_count--; \ |
x__cnt = x__ci->ci_mtx_count--; \ |
s = splraiseipl(mtx->mtx_ipl); \ |
__insn_barrier(); \ |
if (x__cnt == MUTEX_COUNT_BIAS) \ |
if (x__cnt == 0) \ |
x__ci->ci_mtx_oldspl = (s); \ |
x__ci->ci_mtx_oldspl = (s); \ |
} while (/* CONSTCOND */ 0) |
} while (/* CONSTCOND */ 0) |
|
|
|
|
struct cpu_info *x__ci = curcpu(); \ |
struct cpu_info *x__ci = curcpu(); \ |
int s = x__ci->ci_mtx_oldspl; \ |
int s = x__ci->ci_mtx_oldspl; \ |
__insn_barrier(); \ |
__insn_barrier(); \ |
if (++(x__ci->ci_mtx_count) == MUTEX_COUNT_BIAS) \ |
if (++(x__ci->ci_mtx_count) == 0) \ |
splx(s); \ |
splx(s); \ |
} while (/* CONSTCOND */ 0) |
} while (/* CONSTCOND */ 0) |
|
|
|
|
(((int)(mtx)->mtx_owner & MUTEX_BIT_WAITERS) != 0) |
(((int)(mtx)->mtx_owner & MUTEX_BIT_WAITERS) != 0) |
|
|
#define MUTEX_INITIALIZE_ADAPTIVE(mtx, dodebug) \ |
#define MUTEX_INITIALIZE_ADAPTIVE(mtx, dodebug) \ |
|
if (!dodebug) \ |
|
(mtx)->mtx_owner |= MUTEX_BIT_NODEBUG; \ |
do { \ |
do { \ |
if (dodebug) \ |
|
(mtx)->mtx_owner |= MUTEX_BIT_DEBUG; \ |
|
} while (/* CONSTCOND */ 0); |
} while (/* CONSTCOND */ 0); |
|
|
#define MUTEX_INITIALIZE_SPIN(mtx, dodebug, ipl) \ |
#define MUTEX_INITIALIZE_SPIN(mtx, dodebug, ipl) \ |
do { \ |
do { \ |
(mtx)->mtx_owner = MUTEX_BIT_SPIN; \ |
(mtx)->mtx_owner = MUTEX_BIT_SPIN; \ |
if (dodebug) \ |
if (!dodebug) \ |
(mtx)->mtx_owner |= MUTEX_BIT_DEBUG; \ |
(mtx)->mtx_owner |= MUTEX_BIT_NODEBUG; \ |
(mtx)->mtx_ipl = makeiplcookie((ipl)); \ |
MUTEX_INITIALIZE_SPIN_IPL((mtx), (ipl)); \ |
__cpu_simple_lock_init(&(mtx)->mtx_lock); \ |
MUTEX_SPINBIT_LOCK_INIT((mtx)); \ |
} while (/* CONSTCOND */ 0) |
} while (/* CONSTCOND */ 0) |
|
|
#define MUTEX_DESTROY(mtx) \ |
#define MUTEX_DESTROY(mtx) \ |
|
|
#define MUTEX_ADAPTIVE_P(mtx) \ |
#define MUTEX_ADAPTIVE_P(mtx) \ |
(((mtx)->mtx_owner & MUTEX_BIT_SPIN) == 0) |
(((mtx)->mtx_owner & MUTEX_BIT_SPIN) == 0) |
|
|
#define MUTEX_DEBUG_P(mtx) (((mtx)->mtx_owner & MUTEX_BIT_DEBUG) != 0) |
#define MUTEX_DEBUG_P(mtx) (((mtx)->mtx_owner & MUTEX_BIT_NODEBUG) == 0) |
#if defined(LOCKDEBUG) |
#if defined(LOCKDEBUG) |
#define MUTEX_OWNED(owner) (((owner) & ~MUTEX_BIT_DEBUG) != 0) |
#define MUTEX_OWNED(owner) (((owner) & ~MUTEX_BIT_NODEBUG) != 0) |
#define MUTEX_INHERITDEBUG(new, old) (new) |= (old) & MUTEX_BIT_DEBUG |
#define MUTEX_INHERITDEBUG(n, o) (n) |= (o) & MUTEX_BIT_NODEBUG |
#else /* defined(LOCKDEBUG) */ |
#else /* defined(LOCKDEBUG) */ |
#define MUTEX_OWNED(owner) ((owner) != 0) |
#define MUTEX_OWNED(owner) ((owner) != 0) |
#define MUTEX_INHERITDEBUG(new, old) /* nothing */ |
#define MUTEX_INHERITDEBUG(n, o) /* nothing */ |
#endif /* defined(LOCKDEBUG) */ |
#endif /* defined(LOCKDEBUG) */ |
|
|
static inline int |
static inline int |
MUTEX_ACQUIRE(kmutex_t *mtx, uintptr_t curthread) |
MUTEX_ACQUIRE(kmutex_t *mtx, uintptr_t curthread) |
{ |
{ |
int rv; |
int rv; |
uintptr_t old = 0; |
uintptr_t oldown = 0; |
uintptr_t new = curthread; |
uintptr_t newown = curthread; |
|
|
MUTEX_INHERITDEBUG(old, mtx->mtx_owner); |
MUTEX_INHERITDEBUG(oldown, mtx->mtx_owner); |
MUTEX_INHERITDEBUG(new, old); |
MUTEX_INHERITDEBUG(newown, oldown); |
rv = MUTEX_CAS(&mtx->mtx_owner, old, new); |
rv = MUTEX_CAS(&mtx->mtx_owner, oldown, newown); |
MUTEX_RECEIVE(mtx); |
MUTEX_RECEIVE(mtx); |
return rv; |
return rv; |
} |
} |
Line 221 MUTEX_SET_WAITERS(kmutex_t *mtx, uintptr |
|
Line 236 MUTEX_SET_WAITERS(kmutex_t *mtx, uintptr |
|
static inline void |
static inline void |
MUTEX_RELEASE(kmutex_t *mtx) |
MUTEX_RELEASE(kmutex_t *mtx) |
{ |
{ |
uintptr_t new; |
uintptr_t newown; |
|
|
MUTEX_GIVE(mtx); |
MUTEX_GIVE(mtx); |
new = 0; |
newown = 0; |
MUTEX_INHERITDEBUG(new, mtx->mtx_owner); |
MUTEX_INHERITDEBUG(newown, mtx->mtx_owner); |
mtx->mtx_owner = new; |
mtx->mtx_owner = newown; |
} |
|
|
|
static inline void |
|
MUTEX_CLEAR_WAITERS(kmutex_t *mtx) |
|
{ |
|
/* nothing */ |
|
} |
} |
#endif /* __HAVE_SIMPLE_MUTEXES */ |
#endif /* __HAVE_SIMPLE_MUTEXES */ |
|
|
Line 255 __strong_alias(mutex_spin_enter,mutex_ve |
|
Line 264 __strong_alias(mutex_spin_enter,mutex_ve |
|
__strong_alias(mutex_spin_exit,mutex_vector_exit); |
__strong_alias(mutex_spin_exit,mutex_vector_exit); |
#endif |
#endif |
|
|
void mutex_abort(kmutex_t *, const char *, const char *); |
static void mutex_abort(const char *, size_t, const kmutex_t *, |
void mutex_dump(volatile void *); |
const char *); |
int mutex_onproc(uintptr_t, struct cpu_info **); |
static void mutex_dump(const volatile void *); |
|
|
lockops_t mutex_spin_lockops = { |
lockops_t mutex_spin_lockops = { |
"Mutex", |
"Mutex", |
0, |
LOCKOPS_SPIN, |
mutex_dump |
mutex_dump |
}; |
}; |
|
|
lockops_t mutex_adaptive_lockops = { |
lockops_t mutex_adaptive_lockops = { |
"Mutex", |
"Mutex", |
1, |
LOCKOPS_SLEEP, |
mutex_dump |
mutex_dump |
}; |
}; |
|
|
Line 279 syncobj_t mutex_syncobj = { |
|
Line 288 syncobj_t mutex_syncobj = { |
|
(void *)mutex_owner, |
(void *)mutex_owner, |
}; |
}; |
|
|
/* Mutex cache */ |
|
#define MUTEX_OBJ_MAGIC 0x5aa3c85d |
|
struct kmutexobj { |
|
kmutex_t mo_lock; |
|
u_int mo_magic; |
|
u_int mo_refcnt; |
|
}; |
|
|
|
static int mutex_obj_ctor(void *, void *, int); |
|
|
|
static pool_cache_t mutex_obj_cache; |
|
|
|
/* |
/* |
* mutex_dump: |
* mutex_dump: |
* |
* |
* Dump the contents of a mutex structure. |
* Dump the contents of a mutex structure. |
*/ |
*/ |
void |
void |
mutex_dump(volatile void *cookie) |
mutex_dump(const volatile void *cookie) |
{ |
{ |
volatile kmutex_t *mtx = cookie; |
const volatile kmutex_t *mtx = cookie; |
|
|
printf_nolog("owner field : %#018lx wait/spin: %16d/%d\n", |
printf_nolog("owner field : %#018lx wait/spin: %16d/%d\n", |
(long)MUTEX_OWNER(mtx->mtx_owner), MUTEX_HAS_WAITERS(mtx), |
(long)MUTEX_OWNER(mtx->mtx_owner), MUTEX_HAS_WAITERS(mtx), |
Line 313 mutex_dump(volatile void *cookie) |
|
Line 310 mutex_dump(volatile void *cookie) |
|
* generates a lot of machine code in the DIAGNOSTIC case, so |
* generates a lot of machine code in the DIAGNOSTIC case, so |
* we ask the compiler to not inline it. |
* we ask the compiler to not inline it. |
*/ |
*/ |
|
void __noinline |
#if __GNUC_PREREQ__(3, 0) |
mutex_abort(const char *func, size_t line, const kmutex_t *mtx, const char *msg) |
__attribute ((noinline)) __attribute ((noreturn)) |
|
#endif |
|
void |
|
mutex_abort(kmutex_t *mtx, const char *func, const char *msg) |
|
{ |
{ |
|
|
LOCKDEBUG_ABORT(mtx, (MUTEX_SPIN_P(mtx) ? |
LOCKDEBUG_ABORT(func, line, mtx, (MUTEX_SPIN_P(mtx) ? |
&mutex_spin_lockops : &mutex_adaptive_lockops), func, msg); |
&mutex_spin_lockops : &mutex_adaptive_lockops), msg); |
/* NOTREACHED */ |
|
} |
} |
|
|
/* |
/* |
Line 395 mutex_destroy(kmutex_t *mtx) |
|
Line 387 mutex_destroy(kmutex_t *mtx) |
|
MUTEX_ASSERT(mtx, !MUTEX_OWNED(mtx->mtx_owner) && |
MUTEX_ASSERT(mtx, !MUTEX_OWNED(mtx->mtx_owner) && |
!MUTEX_HAS_WAITERS(mtx)); |
!MUTEX_HAS_WAITERS(mtx)); |
} else { |
} else { |
MUTEX_ASSERT(mtx, !__SIMPLELOCK_LOCKED_P(&mtx->mtx_lock)); |
MUTEX_ASSERT(mtx, !MUTEX_SPINBIT_LOCKED_P(mtx)); |
} |
} |
|
|
LOCKDEBUG_FREE(MUTEX_DEBUG_P(mtx), mtx); |
LOCKDEBUG_FREE(MUTEX_DEBUG_P(mtx), mtx); |
MUTEX_DESTROY(mtx); |
MUTEX_DESTROY(mtx); |
} |
} |
|
|
|
#ifdef MULTIPROCESSOR |
/* |
/* |
* mutex_onproc: |
* mutex_oncpu: |
* |
* |
* Return true if an adaptive mutex owner is running on a CPU in the |
* Return true if an adaptive mutex owner is running on a CPU in the |
* system. If the target is waiting on the kernel big lock, then we |
* system. If the target is waiting on the kernel big lock, then we |
* must release it. This is necessary to avoid deadlock. |
* must release it. This is necessary to avoid deadlock. |
* |
|
* Note that we can't use the mutex owner field as an LWP pointer. We |
|
* don't have full control over the timing of our execution, and so the |
|
* pointer could be completely invalid by the time we dereference it. |
|
*/ |
*/ |
#ifdef MULTIPROCESSOR |
static bool |
int |
mutex_oncpu(uintptr_t owner) |
mutex_onproc(uintptr_t owner, struct cpu_info **cip) |
|
{ |
{ |
CPU_INFO_ITERATOR cii; |
|
struct cpu_info *ci; |
struct cpu_info *ci; |
struct lwp *l; |
lwp_t *l; |
|
|
if (!MUTEX_OWNED(owner)) |
KASSERT(kpreempt_disabled()); |
return 0; |
|
l = (struct lwp *)MUTEX_OWNER(owner); |
|
|
|
/* See if the target is running on a CPU somewhere. */ |
if (!MUTEX_OWNED(owner)) { |
if ((ci = *cip) != NULL && ci->ci_curlwp == l) |
return false; |
goto run; |
} |
for (CPU_INFO_FOREACH(cii, ci)) |
|
if (ci->ci_curlwp == l) |
|
goto run; |
|
|
|
/* No: it may be safe to block now. */ |
/* |
*cip = NULL; |
* See lwp_dtor() why dereference of the LWP pointer is safe. |
return 0; |
* We must have kernel preemption disabled for that. |
|
*/ |
|
l = (lwp_t *)MUTEX_OWNER(owner); |
|
ci = l->l_cpu; |
|
|
|
if (ci && ci->ci_curlwp == l) { |
|
/* Target is running; do we need to block? */ |
|
return (ci->ci_biglock_wanted != l); |
|
} |
|
|
run: |
/* Not running. It may be safe to block now. */ |
/* Target is running; do we need to block? */ |
return false; |
*cip = ci; |
|
return ci->ci_biglock_wanted != l; |
|
} |
} |
#endif /* MULTIPROCESSOR */ |
#endif /* MULTIPROCESSOR */ |
|
|
/* |
/* |
* mutex_vector_enter: |
* mutex_vector_enter: |
* |
* |
* Support routine for mutex_enter() that must handles all cases. In |
* Support routine for mutex_enter() that must handle all cases. In |
* the LOCKDEBUG case, mutex_enter() is always aliased here, even if |
* the LOCKDEBUG case, mutex_enter() is always aliased here, even if |
* fast-path stubs are available. If an mutex_spin_enter() stub is |
* fast-path stubs are available. If a mutex_spin_enter() stub is |
* not available, then it is also aliased directly here. |
* not available, then it is also aliased directly here. |
*/ |
*/ |
void |
void |
Line 457 mutex_vector_enter(kmutex_t *mtx) |
|
Line 445 mutex_vector_enter(kmutex_t *mtx) |
|
uintptr_t owner, curthread; |
uintptr_t owner, curthread; |
turnstile_t *ts; |
turnstile_t *ts; |
#ifdef MULTIPROCESSOR |
#ifdef MULTIPROCESSOR |
struct cpu_info *ci = NULL; |
|
u_int count; |
u_int count; |
#endif |
#endif |
LOCKSTAT_COUNTER(spincnt); |
LOCKSTAT_COUNTER(spincnt); |
Line 476 mutex_vector_enter(kmutex_t *mtx) |
|
Line 463 mutex_vector_enter(kmutex_t *mtx) |
|
MUTEX_SPIN_SPLRAISE(mtx); |
MUTEX_SPIN_SPLRAISE(mtx); |
MUTEX_WANTLOCK(mtx); |
MUTEX_WANTLOCK(mtx); |
#ifdef FULL |
#ifdef FULL |
if (__cpu_simple_lock_try(&mtx->mtx_lock)) { |
if (MUTEX_SPINBIT_LOCK_TRY(mtx)) { |
MUTEX_LOCKED(mtx); |
MUTEX_LOCKED(mtx); |
return; |
return; |
} |
} |
Line 495 mutex_vector_enter(kmutex_t *mtx) |
|
Line 482 mutex_vector_enter(kmutex_t *mtx) |
|
do { |
do { |
if (panicstr != NULL) |
if (panicstr != NULL) |
break; |
break; |
while (__SIMPLELOCK_LOCKED_P(&mtx->mtx_lock)) { |
while (MUTEX_SPINBIT_LOCKED_P(mtx)) { |
SPINLOCK_BACKOFF(count); |
SPINLOCK_BACKOFF(count); |
#ifdef LOCKDEBUG |
#ifdef LOCKDEBUG |
if (SPINLOCK_SPINOUT(spins)) |
if (SPINLOCK_SPINOUT(spins)) |
MUTEX_ABORT(mtx, "spinout"); |
MUTEX_ABORT(mtx, "spinout"); |
#endif /* LOCKDEBUG */ |
#endif /* LOCKDEBUG */ |
} |
} |
} while (!__cpu_simple_lock_try(&mtx->mtx_lock)); |
} while (!MUTEX_SPINBIT_LOCK_TRY(mtx)); |
|
|
if (count != SPINLOCK_BACKOFF_MIN) { |
if (count != SPINLOCK_BACKOFF_MIN) { |
LOCKSTAT_STOP_TIMER(lsflag, spintime); |
LOCKSTAT_STOP_TIMER(lsflag, spintime); |
Line 533 mutex_vector_enter(kmutex_t *mtx) |
|
Line 520 mutex_vector_enter(kmutex_t *mtx) |
|
* determine that the owner is not running on a processor, |
* determine that the owner is not running on a processor, |
* then we stop spinning, and sleep instead. |
* then we stop spinning, and sleep instead. |
*/ |
*/ |
|
KPREEMPT_DISABLE(curlwp); |
for (owner = mtx->mtx_owner;;) { |
for (owner = mtx->mtx_owner;;) { |
if (!MUTEX_OWNED(owner)) { |
if (!MUTEX_OWNED(owner)) { |
/* |
/* |
Line 549 mutex_vector_enter(kmutex_t *mtx) |
|
Line 537 mutex_vector_enter(kmutex_t *mtx) |
|
owner = mtx->mtx_owner; |
owner = mtx->mtx_owner; |
continue; |
continue; |
} |
} |
|
if (__predict_false(panicstr != NULL)) { |
if (panicstr != NULL) |
KPREEMPT_ENABLE(curlwp); |
return; |
return; |
if (MUTEX_OWNER(owner) == curthread) |
} |
|
if (__predict_false(MUTEX_OWNER(owner) == curthread)) { |
MUTEX_ABORT(mtx, "locking against myself"); |
MUTEX_ABORT(mtx, "locking against myself"); |
|
} |
#ifdef MULTIPROCESSOR |
#ifdef MULTIPROCESSOR |
/* |
/* |
* Check to see if the owner is running on a processor. |
* Check to see if the owner is running on a processor. |
* If so, then we should just spin, as the owner will |
* If so, then we should just spin, as the owner will |
* likely release the lock very soon. |
* likely release the lock very soon. |
*/ |
*/ |
if (mutex_onproc(owner, &ci)) { |
if (mutex_oncpu(owner)) { |
LOCKSTAT_START_TIMER(lsflag, spintime); |
LOCKSTAT_START_TIMER(lsflag, spintime); |
count = SPINLOCK_BACKOFF_MIN; |
count = SPINLOCK_BACKOFF_MIN; |
for (;;) { |
do { |
|
KPREEMPT_ENABLE(curlwp); |
SPINLOCK_BACKOFF(count); |
SPINLOCK_BACKOFF(count); |
|
KPREEMPT_DISABLE(curlwp); |
owner = mtx->mtx_owner; |
owner = mtx->mtx_owner; |
if (!mutex_onproc(owner, &ci)) |
} while (mutex_oncpu(owner)); |
break; |
|
} |
|
LOCKSTAT_STOP_TIMER(lsflag, spintime); |
LOCKSTAT_STOP_TIMER(lsflag, spintime); |
LOCKSTAT_COUNT(spincnt, 1); |
LOCKSTAT_COUNT(spincnt, 1); |
if (!MUTEX_OWNED(owner)) |
if (!MUTEX_OWNED(owner)) |
Line 609 mutex_vector_enter(kmutex_t *mtx) |
|
Line 598 mutex_vector_enter(kmutex_t *mtx) |
|
* .. clear lock word, waiters |
* .. clear lock word, waiters |
* return success |
* return success |
* |
* |
* There is a another race that can occur: a third CPU could |
* There is another race that can occur: a third CPU could |
* acquire the mutex as soon as it is released. Since |
* acquire the mutex as soon as it is released. Since |
* adaptive mutexes are primarily spin mutexes, this is not |
* adaptive mutexes are primarily spin mutexes, this is not |
* something that we need to worry about too much. What we |
* something that we need to worry about too much. What we |
Line 654 mutex_vector_enter(kmutex_t *mtx) |
|
Line 643 mutex_vector_enter(kmutex_t *mtx) |
|
* waiters field) and check the lock holder's status again. |
* waiters field) and check the lock holder's status again. |
* Some of the possible outcomes (not an exhaustive list): |
* Some of the possible outcomes (not an exhaustive list): |
* |
* |
* 1. The onproc check returns true: the holding LWP is |
* 1. The on-CPU check returns true: the holding LWP is |
* running again. The lock may be released soon and |
* running again. The lock may be released soon and |
* we should spin. Importantly, we can't trust the |
* we should spin. Importantly, we can't trust the |
* value of the waiters flag. |
* value of the waiters flag. |
* |
* |
* 2. The onproc check returns false: the holding LWP is |
* 2. The on-CPU check returns false: the holding LWP is |
* not running. We now have the oppertunity to check |
* not running. We now have the opportunity to check |
* if mutex_exit() has blatted the modifications made |
* if mutex_exit() has blatted the modifications made |
* by MUTEX_SET_WAITERS(). |
* by MUTEX_SET_WAITERS(). |
* |
* |
* 3. The onproc check returns false: the holding LWP may |
* 3. The on-CPU check returns false: the holding LWP may |
* or may not be running. It has context switched at |
* or may not be running. It has context switched at |
* some point during our check. Again, we have the |
* some point during our check. Again, we have the |
* chance to see if the waiters bit is still set or |
* chance to see if the waiters bit is still set or |
* has been overwritten. |
* has been overwritten. |
* |
* |
* 4. The onproc check returns false: the holding LWP is |
* 4. The on-CPU check returns false: the holding LWP is |
* running on a CPU, but wants the big lock. It's OK |
* running on a CPU, but wants the big lock. It's OK |
* to check the waiters field in this case. |
* to check the waiters field in this case. |
* |
* |
Line 684 mutex_vector_enter(kmutex_t *mtx) |
|
Line 673 mutex_vector_enter(kmutex_t *mtx) |
|
* If the waiters bit is not set it's unsafe to go asleep, |
* If the waiters bit is not set it's unsafe to go asleep, |
* as we might never be awoken. |
* as we might never be awoken. |
*/ |
*/ |
if ((membar_consumer(), mutex_onproc(owner, &ci)) || |
if ((membar_consumer(), mutex_oncpu(owner)) || |
(membar_consumer(), !MUTEX_HAS_WAITERS(mtx))) { |
(membar_consumer(), !MUTEX_HAS_WAITERS(mtx))) { |
turnstile_exit(mtx); |
turnstile_exit(mtx); |
owner = mtx->mtx_owner; |
owner = mtx->mtx_owner; |
Line 701 mutex_vector_enter(kmutex_t *mtx) |
|
Line 690 mutex_vector_enter(kmutex_t *mtx) |
|
|
|
owner = mtx->mtx_owner; |
owner = mtx->mtx_owner; |
} |
} |
|
KPREEMPT_ENABLE(curlwp); |
|
|
LOCKSTAT_EVENT(lsflag, mtx, LB_ADAPTIVE_MUTEX | LB_SLEEP1, |
LOCKSTAT_EVENT(lsflag, mtx, LB_ADAPTIVE_MUTEX | LB_SLEEP1, |
slpcnt, slptime); |
slpcnt, slptime); |
Line 725 mutex_vector_exit(kmutex_t *mtx) |
|
Line 715 mutex_vector_exit(kmutex_t *mtx) |
|
|
|
if (MUTEX_SPIN_P(mtx)) { |
if (MUTEX_SPIN_P(mtx)) { |
#ifdef FULL |
#ifdef FULL |
if (__predict_false(!__SIMPLELOCK_LOCKED_P(&mtx->mtx_lock))) { |
if (__predict_false(!MUTEX_SPINBIT_LOCKED_P(mtx))) { |
if (panicstr != NULL) |
if (panicstr != NULL) |
return; |
return; |
MUTEX_ABORT(mtx, "exiting unheld spin mutex"); |
MUTEX_ABORT(mtx, "exiting unheld spin mutex"); |
} |
} |
MUTEX_UNLOCKED(mtx); |
MUTEX_UNLOCKED(mtx); |
__cpu_simple_unlock(&mtx->mtx_lock); |
MUTEX_SPINBIT_LOCK_UNLOCK(mtx); |
#endif |
#endif |
MUTEX_SPIN_SPLRESTORE(mtx); |
MUTEX_SPIN_SPLRESTORE(mtx); |
return; |
return; |
Line 747 mutex_vector_exit(kmutex_t *mtx) |
|
Line 737 mutex_vector_exit(kmutex_t *mtx) |
|
MUTEX_DASSERT(mtx, curthread != 0); |
MUTEX_DASSERT(mtx, curthread != 0); |
MUTEX_ASSERT(mtx, MUTEX_OWNER(mtx->mtx_owner) == curthread); |
MUTEX_ASSERT(mtx, MUTEX_OWNER(mtx->mtx_owner) == curthread); |
MUTEX_UNLOCKED(mtx); |
MUTEX_UNLOCKED(mtx); |
|
#if !defined(LOCKDEBUG) |
|
__USE(curthread); |
|
#endif |
|
|
#ifdef LOCKDEBUG |
#ifdef LOCKDEBUG |
/* |
/* |
Line 813 mutex_wakeup(kmutex_t *mtx) |
|
Line 806 mutex_wakeup(kmutex_t *mtx) |
|
* holds the mutex. |
* holds the mutex. |
*/ |
*/ |
int |
int |
mutex_owned(kmutex_t *mtx) |
mutex_owned(const kmutex_t *mtx) |
{ |
{ |
|
|
|
if (mtx == NULL) |
|
return 0; |
if (MUTEX_ADAPTIVE_P(mtx)) |
if (MUTEX_ADAPTIVE_P(mtx)) |
return MUTEX_OWNER(mtx->mtx_owner) == (uintptr_t)curlwp; |
return MUTEX_OWNER(mtx->mtx_owner) == (uintptr_t)curlwp; |
#ifdef FULL |
#ifdef FULL |
return __SIMPLELOCK_LOCKED_P(&mtx->mtx_lock); |
return MUTEX_SPINBIT_LOCKED_P(mtx); |
#else |
#else |
return 1; |
return 1; |
#endif |
#endif |
Line 832 mutex_owned(kmutex_t *mtx) |
|
Line 827 mutex_owned(kmutex_t *mtx) |
|
* priority inheritance. |
* priority inheritance. |
*/ |
*/ |
lwp_t * |
lwp_t * |
mutex_owner(kmutex_t *mtx) |
mutex_owner(const kmutex_t *mtx) |
{ |
{ |
|
|
MUTEX_ASSERT(mtx, MUTEX_ADAPTIVE_P(mtx)); |
MUTEX_ASSERT(mtx, MUTEX_ADAPTIVE_P(mtx)); |
Line 840 mutex_owner(kmutex_t *mtx) |
|
Line 835 mutex_owner(kmutex_t *mtx) |
|
} |
} |
|
|
/* |
/* |
|
* mutex_ownable: |
|
* |
|
* When compiled with DEBUG and LOCKDEBUG defined, ensure that |
|
* the mutex is available. We cannot use !mutex_owned() since |
|
* that won't work correctly for spin mutexes. |
|
*/ |
|
int |
|
mutex_ownable(const kmutex_t *mtx) |
|
{ |
|
|
|
#ifdef LOCKDEBUG |
|
MUTEX_TESTLOCK(mtx); |
|
#endif |
|
return 1; |
|
} |
|
|
|
/* |
* mutex_tryenter: |
* mutex_tryenter: |
* |
* |
* Try to acquire the mutex; return non-zero if we did. |
* Try to acquire the mutex; return non-zero if we did. |
Line 855 mutex_tryenter(kmutex_t *mtx) |
|
Line 867 mutex_tryenter(kmutex_t *mtx) |
|
if (MUTEX_SPIN_P(mtx)) { |
if (MUTEX_SPIN_P(mtx)) { |
MUTEX_SPIN_SPLRAISE(mtx); |
MUTEX_SPIN_SPLRAISE(mtx); |
#ifdef FULL |
#ifdef FULL |
if (__cpu_simple_lock_try(&mtx->mtx_lock)) { |
if (MUTEX_SPINBIT_LOCK_TRY(mtx)) { |
MUTEX_WANTLOCK(mtx); |
MUTEX_WANTLOCK(mtx); |
MUTEX_LOCKED(mtx); |
MUTEX_LOCKED(mtx); |
return 1; |
return 1; |
Line 912 mutex_spin_retry(kmutex_t *mtx) |
|
Line 924 mutex_spin_retry(kmutex_t *mtx) |
|
do { |
do { |
if (panicstr != NULL) |
if (panicstr != NULL) |
break; |
break; |
while (__SIMPLELOCK_LOCKED_P(&mtx->mtx_lock)) { |
while (MUTEX_SPINBIT_LOCKED_P(mtx)) { |
SPINLOCK_BACKOFF(count); |
SPINLOCK_BACKOFF(count); |
#ifdef LOCKDEBUG |
#ifdef LOCKDEBUG |
if (SPINLOCK_SPINOUT(spins)) |
if (SPINLOCK_SPINOUT(spins)) |
MUTEX_ABORT(mtx, "spinout"); |
MUTEX_ABORT(mtx, "spinout"); |
#endif /* LOCKDEBUG */ |
#endif /* LOCKDEBUG */ |
} |
} |
} while (!__cpu_simple_lock_try(&mtx->mtx_lock)); |
} while (!MUTEX_SPINBIT_LOCK_TRY(mtx)); |
|
|
LOCKSTAT_STOP_TIMER(lsflag, spintime); |
LOCKSTAT_STOP_TIMER(lsflag, spintime); |
LOCKSTAT_EVENT(lsflag, mtx, LB_SPIN_MUTEX | LB_SPIN, 1, spintime); |
LOCKSTAT_EVENT(lsflag, mtx, LB_SPIN_MUTEX | LB_SPIN, 1, spintime); |
Line 931 mutex_spin_retry(kmutex_t *mtx) |
|
Line 943 mutex_spin_retry(kmutex_t *mtx) |
|
#endif /* MULTIPROCESSOR */ |
#endif /* MULTIPROCESSOR */ |
} |
} |
#endif /* defined(__HAVE_SPIN_MUTEX_STUBS) || defined(FULL) */ |
#endif /* defined(__HAVE_SPIN_MUTEX_STUBS) || defined(FULL) */ |
|
|
/* |
|
* mutex_obj_init: |
|
* |
|
* Initialize the mutex object store. |
|
*/ |
|
void |
|
mutex_obj_init(void) |
|
{ |
|
|
|
mutex_obj_cache = pool_cache_init(sizeof(struct kmutexobj), |
|
coherency_unit, 0, 0, "mutex", NULL, IPL_NONE, mutex_obj_ctor, |
|
NULL, NULL); |
|
} |
|
|
|
/* |
|
* mutex_obj_ctor: |
|
* |
|
* Initialize a new lock for the cache. |
|
*/ |
|
static int |
|
mutex_obj_ctor(void *arg, void *obj, int flags) |
|
{ |
|
struct kmutexobj * mo = obj; |
|
|
|
mo->mo_magic = MUTEX_OBJ_MAGIC; |
|
|
|
return 0; |
|
} |
|
|
|
/* |
|
* mutex_obj_alloc: |
|
* |
|
* Allocate a single lock object. |
|
*/ |
|
kmutex_t * |
|
mutex_obj_alloc(kmutex_type_t type, int ipl) |
|
{ |
|
struct kmutexobj *mo; |
|
|
|
mo = pool_cache_get(mutex_obj_cache, PR_WAITOK); |
|
mutex_init(&mo->mo_lock, type, ipl); |
|
mo->mo_refcnt = 1; |
|
|
|
return (kmutex_t *)mo; |
|
} |
|
|
|
/* |
|
* mutex_obj_hold: |
|
* |
|
* Add a single reference to a lock object. A reference to the object |
|
* must already be held, and must be held across this call. |
|
*/ |
|
void |
|
mutex_obj_hold(kmutex_t *lock) |
|
{ |
|
struct kmutexobj *mo = (struct kmutexobj *)lock; |
|
|
|
KASSERT(mo->mo_magic == MUTEX_OBJ_MAGIC); |
|
KASSERT(mo->mo_refcnt > 0); |
|
|
|
atomic_inc_uint(&mo->mo_refcnt); |
|
} |
|
|
|
/* |
|
* mutex_obj_free: |
|
* |
|
* Drop a reference from a lock object. If the last reference is being |
|
* dropped, free the object and return true. Otherwise, return false. |
|
*/ |
|
bool |
|
mutex_obj_free(kmutex_t *lock) |
|
{ |
|
struct kmutexobj *mo = (struct kmutexobj *)lock; |
|
|
|
KASSERT(mo->mo_magic == MUTEX_OBJ_MAGIC); |
|
KASSERT(mo->mo_refcnt > 0); |
|
|
|
if (atomic_dec_uint_nv(&mo->mo_refcnt) > 0) { |
|
return false; |
|
} |
|
mutex_destroy(&mo->mo_lock); |
|
pool_cache_put(mutex_obj_cache, mo); |
|
return true; |
|
} |
|