version 1.9, 2007/03/04 21:06:13 |
version 1.65, 2017/05/01 21:35:25 |
|
|
/* $NetBSD$ */ |
/* $NetBSD$ */ |
|
|
/*- |
/*- |
* Copyright (c) 2002, 2006, 2007 The NetBSD Foundation, Inc. |
* Copyright (c) 2002, 2006, 2007, 2008 The NetBSD Foundation, Inc. |
* All rights reserved. |
* All rights reserved. |
* |
* |
* This code is derived from software contributed to The NetBSD Foundation |
* This code is derived from software contributed to The NetBSD Foundation |
|
|
* 2. Redistributions in binary form must reproduce the above copyright |
* 2. Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* documentation and/or other materials provided with the distribution. |
* 3. All advertising materials mentioning features or use of this software |
|
* must display the following acknowledgement: |
|
* This product includes software developed by the NetBSD |
|
* Foundation, Inc. and its contributors. |
|
* 4. Neither the name of The NetBSD Foundation nor the names of its |
|
* contributors may be used to endorse or promote products derived |
|
* from this software without specific prior written permission. |
|
* |
* |
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS |
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS |
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED |
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED |
|
|
* Richard McDougall. |
* Richard McDougall. |
*/ |
*/ |
|
|
#include "opt_multiprocessor.h" |
|
|
|
#define __MUTEX_PRIVATE |
#define __MUTEX_PRIVATE |
|
|
#include <sys/cdefs.h> |
#include <sys/cdefs.h> |
__KERNEL_RCSID(0, "$NetBSD$"); |
__KERNEL_RCSID(0, "$NetBSD$"); |
|
|
#include <sys/param.h> |
#include <sys/param.h> |
|
#include <sys/atomic.h> |
#include <sys/proc.h> |
#include <sys/proc.h> |
#include <sys/mutex.h> |
#include <sys/mutex.h> |
#include <sys/sched.h> |
#include <sys/sched.h> |
Line 59 __KERNEL_RCSID(0, "$NetBSD$"); |
|
Line 51 __KERNEL_RCSID(0, "$NetBSD$"); |
|
#include <sys/systm.h> |
#include <sys/systm.h> |
#include <sys/lockdebug.h> |
#include <sys/lockdebug.h> |
#include <sys/kernel.h> |
#include <sys/kernel.h> |
|
#include <sys/intr.h> |
|
#include <sys/lock.h> |
|
#include <sys/types.h> |
|
|
#include <dev/lockstat.h> |
#include <dev/lockstat.h> |
|
|
#include <machine/intr.h> |
#include <machine/lock.h> |
|
|
/* |
/* |
* When not running a debug kernel, spin mutexes are not much |
* When not running a debug kernel, spin mutexes are not much |
Line 78 __KERNEL_RCSID(0, "$NetBSD$"); |
|
Line 73 __KERNEL_RCSID(0, "$NetBSD$"); |
|
*/ |
*/ |
|
|
#define MUTEX_WANTLOCK(mtx) \ |
#define MUTEX_WANTLOCK(mtx) \ |
LOCKDEBUG_WANTLOCK(MUTEX_GETID(mtx), \ |
LOCKDEBUG_WANTLOCK(MUTEX_DEBUG_P(mtx), (mtx), \ |
(uintptr_t)__builtin_return_address(0), 0) |
(uintptr_t)__builtin_return_address(0), 0) |
|
#define MUTEX_TESTLOCK(mtx) \ |
|
LOCKDEBUG_WANTLOCK(MUTEX_DEBUG_P(mtx), (mtx), \ |
|
(uintptr_t)__builtin_return_address(0), -1) |
#define MUTEX_LOCKED(mtx) \ |
#define MUTEX_LOCKED(mtx) \ |
LOCKDEBUG_LOCKED(MUTEX_GETID(mtx), \ |
LOCKDEBUG_LOCKED(MUTEX_DEBUG_P(mtx), (mtx), NULL, \ |
(uintptr_t)__builtin_return_address(0), 0) |
(uintptr_t)__builtin_return_address(0), 0) |
#define MUTEX_UNLOCKED(mtx) \ |
#define MUTEX_UNLOCKED(mtx) \ |
LOCKDEBUG_UNLOCKED(MUTEX_GETID(mtx), \ |
LOCKDEBUG_UNLOCKED(MUTEX_DEBUG_P(mtx), (mtx), \ |
(uintptr_t)__builtin_return_address(0), 0) |
(uintptr_t)__builtin_return_address(0), 0) |
#define MUTEX_ABORT(mtx, msg) \ |
#define MUTEX_ABORT(mtx, msg) \ |
mutex_abort(mtx, __FUNCTION__, msg) |
mutex_abort(__func__, __LINE__, mtx, msg) |
|
|
#if defined(LOCKDEBUG) |
#if defined(LOCKDEBUG) |
|
|
|
|
#endif /* DIAGNOSTIC */ |
#endif /* DIAGNOSTIC */ |
|
|
/* |
/* |
|
* Some architectures can't use __cpu_simple_lock as is so allow a way |
|
* for them to use an alternate definition. |
|
*/ |
|
#ifndef MUTEX_SPINBIT_LOCK_INIT |
|
#define MUTEX_SPINBIT_LOCK_INIT(mtx) __cpu_simple_lock_init(&(mtx)->mtx_lock) |
|
#endif |
|
#ifndef MUTEX_SPINBIT_LOCKED_P |
|
#define MUTEX_SPINBIT_LOCKED_P(mtx) __SIMPLELOCK_LOCKED_P(&(mtx)->mtx_lock) |
|
#endif |
|
#ifndef MUTEX_SPINBIT_LOCK_TRY |
|
#define MUTEX_SPINBIT_LOCK_TRY(mtx) __cpu_simple_lock_try(&(mtx)->mtx_lock) |
|
#endif |
|
#ifndef MUTEX_SPINBIT_LOCK_UNLOCK |
|
#define MUTEX_SPINBIT_LOCK_UNLOCK(mtx) __cpu_simple_unlock(&(mtx)->mtx_lock) |
|
#endif |
|
|
|
#ifndef MUTEX_INITIALIZE_SPIN_IPL |
|
#define MUTEX_INITIALIZE_SPIN_IPL(mtx, ipl) \ |
|
((mtx)->mtx_ipl = makeiplcookie((ipl))) |
|
#endif |
|
|
|
/* |
* Spin mutex SPL save / restore. |
* Spin mutex SPL save / restore. |
*/ |
*/ |
|
|
#define MUTEX_SPIN_SPLRAISE(mtx) \ |
#define MUTEX_SPIN_SPLRAISE(mtx) \ |
do { \ |
do { \ |
struct cpu_info *x__ci = curcpu(); \ |
struct cpu_info *x__ci; \ |
int x__cnt, s; \ |
int x__cnt, s; \ |
|
s = splraiseipl(MUTEX_SPIN_IPL(mtx)); \ |
|
x__ci = curcpu(); \ |
x__cnt = x__ci->ci_mtx_count--; \ |
x__cnt = x__ci->ci_mtx_count--; \ |
s = splraiseipl(mtx->mtx_ipl); \ |
__insn_barrier(); \ |
if (x__cnt == 0) \ |
if (x__cnt == 0) \ |
x__ci->ci_mtx_oldspl = (s); \ |
x__ci->ci_mtx_oldspl = (s); \ |
} while (/* CONSTCOND */ 0) |
} while (/* CONSTCOND */ 0) |
|
|
struct cpu_info *x__ci = curcpu(); \ |
struct cpu_info *x__ci = curcpu(); \ |
int s = x__ci->ci_mtx_oldspl; \ |
int s = x__ci->ci_mtx_oldspl; \ |
__insn_barrier(); \ |
__insn_barrier(); \ |
if (++(x__ci->ci_mtx_count) == 0) \ |
if (++(x__ci->ci_mtx_count) == 0) \ |
splx(s); \ |
splx(s); \ |
} while (/* CONSTCOND */ 0) |
} while (/* CONSTCOND */ 0) |
|
|
|
|
|
|
#define MUTEX_OWNER(owner) \ |
#define MUTEX_OWNER(owner) \ |
(owner & MUTEX_THREAD) |
(owner & MUTEX_THREAD) |
#define MUTEX_OWNED(owner) \ |
|
(owner != 0) |
|
#define MUTEX_HAS_WAITERS(mtx) \ |
#define MUTEX_HAS_WAITERS(mtx) \ |
(((int)(mtx)->mtx_owner & MUTEX_BIT_WAITERS) != 0) |
(((int)(mtx)->mtx_owner & MUTEX_BIT_WAITERS) != 0) |
|
|
#define MUTEX_INITIALIZE_ADAPTIVE(mtx, id) \ |
#define MUTEX_INITIALIZE_ADAPTIVE(mtx, dodebug) \ |
|
if (!dodebug) \ |
|
(mtx)->mtx_owner |= MUTEX_BIT_NODEBUG; \ |
do { \ |
do { \ |
(mtx)->mtx_id = (id); \ |
|
} while (/* CONSTCOND */ 0); |
} while (/* CONSTCOND */ 0); |
|
|
#define MUTEX_INITIALIZE_SPIN(mtx, id, ipl) \ |
#define MUTEX_INITIALIZE_SPIN(mtx, dodebug, ipl) \ |
do { \ |
do { \ |
(mtx)->mtx_owner = MUTEX_BIT_SPIN; \ |
(mtx)->mtx_owner = MUTEX_BIT_SPIN; \ |
(mtx)->mtx_ipl = makeiplcookie((ipl)); \ |
if (!dodebug) \ |
(mtx)->mtx_id = (id); \ |
(mtx)->mtx_owner |= MUTEX_BIT_NODEBUG; \ |
__cpu_simple_lock_init(&(mtx)->mtx_lock); \ |
MUTEX_INITIALIZE_SPIN_IPL((mtx), (ipl)); \ |
|
MUTEX_SPINBIT_LOCK_INIT((mtx)); \ |
} while (/* CONSTCOND */ 0) |
} while (/* CONSTCOND */ 0) |
|
|
#define MUTEX_DESTROY(mtx) \ |
#define MUTEX_DESTROY(mtx) \ |
do { \ |
do { \ |
(mtx)->mtx_owner = MUTEX_THREAD; \ |
(mtx)->mtx_owner = MUTEX_THREAD; \ |
(mtx)->mtx_id = -1; \ |
|
} while (/* CONSTCOND */ 0); |
} while (/* CONSTCOND */ 0); |
|
|
#define MUTEX_SPIN_P(mtx) \ |
#define MUTEX_SPIN_P(mtx) \ |
|
|
#define MUTEX_ADAPTIVE_P(mtx) \ |
#define MUTEX_ADAPTIVE_P(mtx) \ |
(((mtx)->mtx_owner & MUTEX_BIT_SPIN) == 0) |
(((mtx)->mtx_owner & MUTEX_BIT_SPIN) == 0) |
|
|
#define MUTEX_GETID(mtx) ((mtx)->mtx_id) |
#define MUTEX_DEBUG_P(mtx) (((mtx)->mtx_owner & MUTEX_BIT_NODEBUG) == 0) |
|
#if defined(LOCKDEBUG) |
|
#define MUTEX_OWNED(owner) (((owner) & ~MUTEX_BIT_NODEBUG) != 0) |
|
#define MUTEX_INHERITDEBUG(n, o) (n) |= (o) & MUTEX_BIT_NODEBUG |
|
#else /* defined(LOCKDEBUG) */ |
|
#define MUTEX_OWNED(owner) ((owner) != 0) |
|
#define MUTEX_INHERITDEBUG(n, o) /* nothing */ |
|
#endif /* defined(LOCKDEBUG) */ |
|
|
static inline int |
static inline int |
MUTEX_ACQUIRE(kmutex_t *mtx, uintptr_t curthread) |
MUTEX_ACQUIRE(kmutex_t *mtx, uintptr_t curthread) |
{ |
{ |
int rv; |
int rv; |
rv = MUTEX_CAS(&mtx->mtx_owner, 0UL, curthread); |
uintptr_t oldown = 0; |
|
uintptr_t newown = curthread; |
|
|
|
MUTEX_INHERITDEBUG(oldown, mtx->mtx_owner); |
|
MUTEX_INHERITDEBUG(newown, oldown); |
|
rv = MUTEX_CAS(&mtx->mtx_owner, oldown, newown); |
MUTEX_RECEIVE(mtx); |
MUTEX_RECEIVE(mtx); |
return rv; |
return rv; |
} |
} |
Line 203 MUTEX_SET_WAITERS(kmutex_t *mtx, uintptr |
|
Line 236 MUTEX_SET_WAITERS(kmutex_t *mtx, uintptr |
|
static inline void |
static inline void |
MUTEX_RELEASE(kmutex_t *mtx) |
MUTEX_RELEASE(kmutex_t *mtx) |
{ |
{ |
MUTEX_GIVE(mtx); |
uintptr_t newown; |
mtx->mtx_owner = 0; |
|
} |
|
|
|
static inline void |
MUTEX_GIVE(mtx); |
MUTEX_CLEAR_WAITERS(kmutex_t *mtx) |
newown = 0; |
{ |
MUTEX_INHERITDEBUG(newown, mtx->mtx_owner); |
/* nothing */ |
mtx->mtx_owner = newown; |
} |
} |
#endif /* __HAVE_SIMPLE_MUTEXES */ |
#endif /* __HAVE_SIMPLE_MUTEXES */ |
|
|
Line 233 __strong_alias(mutex_spin_enter,mutex_ve |
|
Line 264 __strong_alias(mutex_spin_enter,mutex_ve |
|
__strong_alias(mutex_spin_exit,mutex_vector_exit); |
__strong_alias(mutex_spin_exit,mutex_vector_exit); |
#endif |
#endif |
|
|
void mutex_abort(kmutex_t *, const char *, const char *); |
static void mutex_abort(const char *, size_t, kmutex_t *, const char *); |
void mutex_dump(volatile void *); |
static void mutex_dump(volatile void *); |
int mutex_onproc(uintptr_t, struct cpu_info **); |
|
static struct lwp *mutex_owner(wchan_t); |
|
|
|
lockops_t mutex_spin_lockops = { |
lockops_t mutex_spin_lockops = { |
"Mutex", |
"Mutex", |
0, |
LOCKOPS_SPIN, |
mutex_dump |
mutex_dump |
}; |
}; |
|
|
lockops_t mutex_adaptive_lockops = { |
lockops_t mutex_adaptive_lockops = { |
"Mutex", |
"Mutex", |
1, |
LOCKOPS_SLEEP, |
mutex_dump |
mutex_dump |
}; |
}; |
|
|
Line 255 syncobj_t mutex_syncobj = { |
|
Line 284 syncobj_t mutex_syncobj = { |
|
turnstile_unsleep, |
turnstile_unsleep, |
turnstile_changepri, |
turnstile_changepri, |
sleepq_lendpri, |
sleepq_lendpri, |
mutex_owner, |
(void *)mutex_owner, |
}; |
}; |
|
|
/* |
/* |
Line 280 mutex_dump(volatile void *cookie) |
|
Line 309 mutex_dump(volatile void *cookie) |
|
* generates a lot of machine code in the DIAGNOSTIC case, so |
* generates a lot of machine code in the DIAGNOSTIC case, so |
* we ask the compiler to not inline it. |
* we ask the compiler to not inline it. |
*/ |
*/ |
|
void __noinline |
#if __GNUC_PREREQ__(3, 0) |
mutex_abort(const char *func, size_t line, kmutex_t *mtx, const char *msg) |
__attribute ((noinline)) __attribute ((noreturn)) |
|
#endif |
|
void |
|
mutex_abort(kmutex_t *mtx, const char *func, const char *msg) |
|
{ |
{ |
|
|
LOCKDEBUG_ABORT(MUTEX_GETID(mtx), mtx, (MUTEX_SPIN_P(mtx) ? |
LOCKDEBUG_ABORT(func, line, mtx, (MUTEX_SPIN_P(mtx) ? |
&mutex_spin_lockops : &mutex_adaptive_lockops), func, msg); |
&mutex_spin_lockops : &mutex_adaptive_lockops), msg); |
/* NOTREACHED */ |
|
} |
} |
|
|
/* |
/* |
Line 305 mutex_abort(kmutex_t *mtx, const char *f |
|
Line 329 mutex_abort(kmutex_t *mtx, const char *f |
|
void |
void |
mutex_init(kmutex_t *mtx, kmutex_type_t type, int ipl) |
mutex_init(kmutex_t *mtx, kmutex_type_t type, int ipl) |
{ |
{ |
u_int id; |
bool dodebug; |
|
|
memset(mtx, 0, sizeof(*mtx)); |
memset(mtx, 0, sizeof(*mtx)); |
|
|
if (type == MUTEX_DRIVER) |
|
type = (ipl == IPL_NONE ? MUTEX_ADAPTIVE : MUTEX_SPIN); |
|
|
|
switch (type) { |
switch (type) { |
case MUTEX_ADAPTIVE: |
case MUTEX_ADAPTIVE: |
case MUTEX_DEFAULT: |
|
KASSERT(ipl == IPL_NONE); |
KASSERT(ipl == IPL_NONE); |
id = LOCKDEBUG_ALLOC(mtx, &mutex_adaptive_lockops); |
break; |
MUTEX_INITIALIZE_ADAPTIVE(mtx, id); |
case MUTEX_DEFAULT: |
|
case MUTEX_DRIVER: |
|
if (ipl == IPL_NONE || ipl == IPL_SOFTCLOCK || |
|
ipl == IPL_SOFTBIO || ipl == IPL_SOFTNET || |
|
ipl == IPL_SOFTSERIAL) { |
|
type = MUTEX_ADAPTIVE; |
|
} else { |
|
type = MUTEX_SPIN; |
|
} |
|
break; |
|
default: |
|
break; |
|
} |
|
|
|
switch (type) { |
|
case MUTEX_NODEBUG: |
|
dodebug = LOCKDEBUG_ALLOC(mtx, NULL, |
|
(uintptr_t)__builtin_return_address(0)); |
|
MUTEX_INITIALIZE_SPIN(mtx, dodebug, ipl); |
|
break; |
|
case MUTEX_ADAPTIVE: |
|
dodebug = LOCKDEBUG_ALLOC(mtx, &mutex_adaptive_lockops, |
|
(uintptr_t)__builtin_return_address(0)); |
|
MUTEX_INITIALIZE_ADAPTIVE(mtx, dodebug); |
break; |
break; |
case MUTEX_SPIN: |
case MUTEX_SPIN: |
id = LOCKDEBUG_ALLOC(mtx, &mutex_spin_lockops); |
dodebug = LOCKDEBUG_ALLOC(mtx, &mutex_spin_lockops, |
MUTEX_INITIALIZE_SPIN(mtx, id, ipl); |
(uintptr_t)__builtin_return_address(0)); |
|
MUTEX_INITIALIZE_SPIN(mtx, dodebug, ipl); |
break; |
break; |
default: |
default: |
panic("mutex_init: impossible type"); |
panic("mutex_init: impossible type"); |
Line 342 mutex_destroy(kmutex_t *mtx) |
|
Line 386 mutex_destroy(kmutex_t *mtx) |
|
MUTEX_ASSERT(mtx, !MUTEX_OWNED(mtx->mtx_owner) && |
MUTEX_ASSERT(mtx, !MUTEX_OWNED(mtx->mtx_owner) && |
!MUTEX_HAS_WAITERS(mtx)); |
!MUTEX_HAS_WAITERS(mtx)); |
} else { |
} else { |
MUTEX_ASSERT(mtx, mtx->mtx_lock != __SIMPLELOCK_LOCKED); |
MUTEX_ASSERT(mtx, !MUTEX_SPINBIT_LOCKED_P(mtx)); |
} |
} |
|
|
LOCKDEBUG_FREE(mtx, MUTEX_GETID(mtx)); |
LOCKDEBUG_FREE(MUTEX_DEBUG_P(mtx), mtx); |
MUTEX_DESTROY(mtx); |
MUTEX_DESTROY(mtx); |
} |
} |
|
|
|
#ifdef MULTIPROCESSOR |
/* |
/* |
* mutex_onproc: |
* mutex_oncpu: |
* |
* |
* Return true if an adaptive mutex owner is running on a CPU in the |
* Return true if an adaptive mutex owner is running on a CPU in the |
* system. If the target is waiting on the kernel big lock, then we |
* system. If the target is waiting on the kernel big lock, then we |
* return false immediately. This is necessary to avoid deadlock |
* must release it. This is necessary to avoid deadlock. |
* against the big lock. |
|
* |
|
* Note that we can't use the mutex owner field as an LWP pointer. We |
|
* don't have full control over the timing of our execution, and so the |
|
* pointer could be completely invalid by the time we dereference it. |
|
* |
|
* XXX This should be optimised further to reduce potential cache line |
|
* ping-ponging and skewing of the spin time while busy waiting. |
|
*/ |
*/ |
#ifdef MULTIPROCESSOR |
static bool |
int |
mutex_oncpu(uintptr_t owner) |
mutex_onproc(uintptr_t owner, struct cpu_info **cip) |
|
{ |
{ |
CPU_INFO_ITERATOR cii; |
|
struct cpu_info *ci; |
struct cpu_info *ci; |
struct lwp *l; |
lwp_t *l; |
|
|
if (!MUTEX_OWNED(owner)) |
KASSERT(kpreempt_disabled()); |
return 0; |
|
l = (struct lwp *)MUTEX_OWNER(owner); |
|
|
|
if ((ci = *cip) != NULL && ci->ci_curlwp == l) { |
if (!MUTEX_OWNED(owner)) { |
mb_read(); /* XXXSMP Very expensive, necessary? */ |
return false; |
return ci->ci_biglock_wanted != l; |
|
} |
} |
|
|
for (CPU_INFO_FOREACH(cii, ci)) { |
/* |
if (ci->ci_curlwp == l) { |
* See lwp_dtor() why dereference of the LWP pointer is safe. |
*cip = ci; |
* We must have kernel preemption disabled for that. |
mb_read(); /* XXXSMP Very expensive, necessary? */ |
*/ |
return ci->ci_biglock_wanted != l; |
l = (lwp_t *)MUTEX_OWNER(owner); |
} |
ci = l->l_cpu; |
|
|
|
if (ci && ci->ci_curlwp == l) { |
|
/* Target is running; do we need to block? */ |
|
return (ci->ci_biglock_wanted != l); |
} |
} |
|
|
*cip = NULL; |
/* Not running. It may be safe to block now. */ |
return 0; |
return false; |
} |
} |
#endif |
#endif /* MULTIPROCESSOR */ |
|
|
/* |
/* |
* mutex_vector_enter: |
* mutex_vector_enter: |
* |
* |
* Support routine for mutex_enter() that must handles all cases. In |
* Support routine for mutex_enter() that must handle all cases. In |
* the LOCKDEBUG case, mutex_enter() is always aliased here, even if |
* the LOCKDEBUG case, mutex_enter() is always aliased here, even if |
* fast-path stubs are available. If an mutex_spin_enter() stub is |
* fast-path stubs are available. If a mutex_spin_enter() stub is |
* not available, then it is also aliased directly here. |
* not available, then it is also aliased directly here. |
*/ |
*/ |
void |
void |
Line 408 mutex_vector_enter(kmutex_t *mtx) |
|
Line 444 mutex_vector_enter(kmutex_t *mtx) |
|
uintptr_t owner, curthread; |
uintptr_t owner, curthread; |
turnstile_t *ts; |
turnstile_t *ts; |
#ifdef MULTIPROCESSOR |
#ifdef MULTIPROCESSOR |
struct cpu_info *ci = NULL; |
|
u_int count; |
u_int count; |
#endif |
#endif |
LOCKSTAT_COUNTER(spincnt); |
LOCKSTAT_COUNTER(spincnt); |
Line 427 mutex_vector_enter(kmutex_t *mtx) |
|
Line 462 mutex_vector_enter(kmutex_t *mtx) |
|
MUTEX_SPIN_SPLRAISE(mtx); |
MUTEX_SPIN_SPLRAISE(mtx); |
MUTEX_WANTLOCK(mtx); |
MUTEX_WANTLOCK(mtx); |
#ifdef FULL |
#ifdef FULL |
if (__cpu_simple_lock_try(&mtx->mtx_lock)) { |
if (MUTEX_SPINBIT_LOCK_TRY(mtx)) { |
MUTEX_LOCKED(mtx); |
MUTEX_LOCKED(mtx); |
return; |
return; |
} |
} |
Line 446 mutex_vector_enter(kmutex_t *mtx) |
|
Line 481 mutex_vector_enter(kmutex_t *mtx) |
|
do { |
do { |
if (panicstr != NULL) |
if (panicstr != NULL) |
break; |
break; |
while (mtx->mtx_lock == __SIMPLELOCK_LOCKED) { |
while (MUTEX_SPINBIT_LOCKED_P(mtx)) { |
SPINLOCK_BACKOFF(count); |
SPINLOCK_BACKOFF(count); |
#ifdef LOCKDEBUG |
#ifdef LOCKDEBUG |
if (SPINLOCK_SPINOUT(spins)) |
if (SPINLOCK_SPINOUT(spins)) |
MUTEX_ABORT(mtx, "spinout"); |
MUTEX_ABORT(mtx, "spinout"); |
#endif /* LOCKDEBUG */ |
#endif /* LOCKDEBUG */ |
} |
} |
} while (!__cpu_simple_lock_try(&mtx->mtx_lock)); |
} while (!MUTEX_SPINBIT_LOCK_TRY(mtx)); |
|
|
if (count != SPINLOCK_BACKOFF_MIN) { |
if (count != SPINLOCK_BACKOFF_MIN) { |
LOCKSTAT_STOP_TIMER(lsflag, spintime); |
LOCKSTAT_STOP_TIMER(lsflag, spintime); |
Line 473 mutex_vector_enter(kmutex_t *mtx) |
|
Line 508 mutex_vector_enter(kmutex_t *mtx) |
|
MUTEX_ASSERT(mtx, curthread != 0); |
MUTEX_ASSERT(mtx, curthread != 0); |
MUTEX_WANTLOCK(mtx); |
MUTEX_WANTLOCK(mtx); |
|
|
#ifdef LOCKDEBUG |
|
if (panicstr == NULL) { |
if (panicstr == NULL) { |
simple_lock_only_held(NULL, "mutex_enter"); |
|
#ifdef MULTIPROCESSOR |
|
LOCKDEBUG_BARRIER(&kernel_lock, 1); |
LOCKDEBUG_BARRIER(&kernel_lock, 1); |
#else |
|
LOCKDEBUG_BARRIER(NULL, 1); |
|
#endif |
|
} |
} |
#endif |
|
|
|
LOCKSTAT_ENTER(lsflag); |
LOCKSTAT_ENTER(lsflag); |
|
|
Line 491 mutex_vector_enter(kmutex_t *mtx) |
|
Line 519 mutex_vector_enter(kmutex_t *mtx) |
|
* determine that the owner is not running on a processor, |
* determine that the owner is not running on a processor, |
* then we stop spinning, and sleep instead. |
* then we stop spinning, and sleep instead. |
*/ |
*/ |
for (;;) { |
KPREEMPT_DISABLE(curlwp); |
owner = mtx->mtx_owner; |
for (owner = mtx->mtx_owner;;) { |
if (!MUTEX_OWNED(owner)) { |
if (!MUTEX_OWNED(owner)) { |
/* |
/* |
* Mutex owner clear could mean two things: |
* Mutex owner clear could mean two things: |
Line 505 mutex_vector_enter(kmutex_t *mtx) |
|
Line 533 mutex_vector_enter(kmutex_t *mtx) |
|
*/ |
*/ |
if (MUTEX_ACQUIRE(mtx, curthread)) |
if (MUTEX_ACQUIRE(mtx, curthread)) |
break; |
break; |
|
owner = mtx->mtx_owner; |
continue; |
continue; |
} |
} |
|
if (__predict_false(panicstr != NULL)) { |
if (panicstr != NULL) |
KPREEMPT_ENABLE(curlwp); |
return; |
return; |
if (MUTEX_OWNER(owner) == curthread) |
} |
|
if (__predict_false(MUTEX_OWNER(owner) == curthread)) { |
MUTEX_ABORT(mtx, "locking against myself"); |
MUTEX_ABORT(mtx, "locking against myself"); |
|
} |
#ifdef MULTIPROCESSOR |
#ifdef MULTIPROCESSOR |
/* |
/* |
* Check to see if the owner is running on a processor. |
* Check to see if the owner is running on a processor. |
* If so, then we should just spin, as the owner will |
* If so, then we should just spin, as the owner will |
* likely release the lock very soon. |
* likely release the lock very soon. |
*/ |
*/ |
if (mutex_onproc(owner, &ci)) { |
if (mutex_oncpu(owner)) { |
LOCKSTAT_START_TIMER(lsflag, spintime); |
LOCKSTAT_START_TIMER(lsflag, spintime); |
count = SPINLOCK_BACKOFF_MIN; |
count = SPINLOCK_BACKOFF_MIN; |
for (;;) { |
do { |
owner = mtx->mtx_owner; |
KPREEMPT_ENABLE(curlwp); |
if (!mutex_onproc(owner, &ci)) |
|
break; |
|
SPINLOCK_BACKOFF(count); |
SPINLOCK_BACKOFF(count); |
} |
KPREEMPT_DISABLE(curlwp); |
|
owner = mtx->mtx_owner; |
|
} while (mutex_oncpu(owner)); |
LOCKSTAT_STOP_TIMER(lsflag, spintime); |
LOCKSTAT_STOP_TIMER(lsflag, spintime); |
LOCKSTAT_COUNT(spincnt, 1); |
LOCKSTAT_COUNT(spincnt, 1); |
if (!MUTEX_OWNED(owner)) |
if (!MUTEX_OWNED(owner)) |
Line 544 mutex_vector_enter(kmutex_t *mtx) |
|
Line 574 mutex_vector_enter(kmutex_t *mtx) |
|
*/ |
*/ |
if (!MUTEX_SET_WAITERS(mtx, owner)) { |
if (!MUTEX_SET_WAITERS(mtx, owner)) { |
turnstile_exit(mtx); |
turnstile_exit(mtx); |
|
owner = mtx->mtx_owner; |
continue; |
continue; |
} |
} |
|
|
Line 566 mutex_vector_enter(kmutex_t *mtx) |
|
Line 597 mutex_vector_enter(kmutex_t *mtx) |
|
* .. clear lock word, waiters |
* .. clear lock word, waiters |
* return success |
* return success |
* |
* |
* There is a another race that can occur: a third CPU could |
* There is another race that can occur: a third CPU could |
* acquire the mutex as soon as it is released. Since |
* acquire the mutex as soon as it is released. Since |
* adaptive mutexes are primarily spin mutexes, this is not |
* adaptive mutexes are primarily spin mutexes, this is not |
* something that we need to worry about too much. What we |
* something that we need to worry about too much. What we |
Line 581 mutex_vector_enter(kmutex_t *mtx) |
|
Line 612 mutex_vector_enter(kmutex_t *mtx) |
|
* or preempted). |
* or preempted). |
* |
* |
* o At any given time, MUTEX_SET_WAITERS() can only ever |
* o At any given time, MUTEX_SET_WAITERS() can only ever |
* be in progress on one CPU in the system - guarenteed |
* be in progress on one CPU in the system - guaranteed |
* by the turnstile chain lock. |
* by the turnstile chain lock. |
* |
* |
* o No other operations other than MUTEX_SET_WAITERS() |
* o No other operations other than MUTEX_SET_WAITERS() |
Line 598 mutex_vector_enter(kmutex_t *mtx) |
|
Line 629 mutex_vector_enter(kmutex_t *mtx) |
|
* completes before the modification of curlwp becomes |
* completes before the modification of curlwp becomes |
* visible to this CPU. |
* visible to this CPU. |
* |
* |
* o cpu_switch() posts a store fence before setting curlwp |
* o mi_switch() posts a store fence before setting curlwp |
* and before resuming execution of an LWP. |
* and before resuming execution of an LWP. |
* |
* |
* o _kernel_lock() posts a store fence before setting |
* o _kernel_lock() posts a store fence before setting |
Line 611 mutex_vector_enter(kmutex_t *mtx) |
|
Line 642 mutex_vector_enter(kmutex_t *mtx) |
|
* waiters field) and check the lock holder's status again. |
* waiters field) and check the lock holder's status again. |
* Some of the possible outcomes (not an exhaustive list): |
* Some of the possible outcomes (not an exhaustive list): |
* |
* |
* 1. The onproc check returns true: the holding LWP is |
* 1. The on-CPU check returns true: the holding LWP is |
* running again. The lock may be released soon and |
* running again. The lock may be released soon and |
* we should spin. Importantly, we can't trust the |
* we should spin. Importantly, we can't trust the |
* value of the waiters flag. |
* value of the waiters flag. |
* |
* |
* 2. The onproc check returns false: the holding LWP is |
* 2. The on-CPU check returns false: the holding LWP is |
* not running. We now have the oppertunity to check |
* not running. We now have the opportunity to check |
* if mutex_exit() has blatted the modifications made |
* if mutex_exit() has blatted the modifications made |
* by MUTEX_SET_WAITERS(). |
* by MUTEX_SET_WAITERS(). |
* |
* |
* 3. The onproc check returns false: the holding LWP may |
* 3. The on-CPU check returns false: the holding LWP may |
* or may not be running. It has context switched at |
* or may not be running. It has context switched at |
* some point during our check. Again, we have the |
* some point during our check. Again, we have the |
* chance to see if the waiters bit is still set or |
* chance to see if the waiters bit is still set or |
* has been overwritten. |
* has been overwritten. |
* |
* |
* 4. The onproc check returns false: the holding LWP is |
* 4. The on-CPU check returns false: the holding LWP is |
* running on a CPU, but wants the big lock. It's OK |
* running on a CPU, but wants the big lock. It's OK |
* to check the waiters field in this case. |
* to check the waiters field in this case. |
* |
* |
Line 641 mutex_vector_enter(kmutex_t *mtx) |
|
Line 672 mutex_vector_enter(kmutex_t *mtx) |
|
* If the waiters bit is not set it's unsafe to go asleep, |
* If the waiters bit is not set it's unsafe to go asleep, |
* as we might never be awoken. |
* as we might never be awoken. |
*/ |
*/ |
mb_read(); |
if ((membar_consumer(), mutex_oncpu(owner)) || |
if (mutex_onproc(owner, &ci) || !MUTEX_HAS_WAITERS(mtx)) { |
(membar_consumer(), !MUTEX_HAS_WAITERS(mtx))) { |
turnstile_exit(mtx); |
turnstile_exit(mtx); |
|
owner = mtx->mtx_owner; |
continue; |
continue; |
} |
} |
#endif /* MULTIPROCESSOR */ |
#endif /* MULTIPROCESSOR */ |
Line 655 mutex_vector_enter(kmutex_t *mtx) |
|
Line 687 mutex_vector_enter(kmutex_t *mtx) |
|
LOCKSTAT_STOP_TIMER(lsflag, slptime); |
LOCKSTAT_STOP_TIMER(lsflag, slptime); |
LOCKSTAT_COUNT(slpcnt, 1); |
LOCKSTAT_COUNT(slpcnt, 1); |
|
|
turnstile_unblock(); |
owner = mtx->mtx_owner; |
} |
} |
|
KPREEMPT_ENABLE(curlwp); |
|
|
LOCKSTAT_EVENT(lsflag, mtx, LB_ADAPTIVE_MUTEX | LB_SLEEP1, |
LOCKSTAT_EVENT(lsflag, mtx, LB_ADAPTIVE_MUTEX | LB_SLEEP1, |
slpcnt, slptime); |
slpcnt, slptime); |
Line 681 mutex_vector_exit(kmutex_t *mtx) |
|
Line 714 mutex_vector_exit(kmutex_t *mtx) |
|
|
|
if (MUTEX_SPIN_P(mtx)) { |
if (MUTEX_SPIN_P(mtx)) { |
#ifdef FULL |
#ifdef FULL |
if (mtx->mtx_lock != __SIMPLELOCK_LOCKED) |
if (__predict_false(!MUTEX_SPINBIT_LOCKED_P(mtx))) { |
|
if (panicstr != NULL) |
|
return; |
MUTEX_ABORT(mtx, "exiting unheld spin mutex"); |
MUTEX_ABORT(mtx, "exiting unheld spin mutex"); |
|
} |
MUTEX_UNLOCKED(mtx); |
MUTEX_UNLOCKED(mtx); |
__cpu_simple_unlock(&mtx->mtx_lock); |
MUTEX_SPINBIT_LOCK_UNLOCK(mtx); |
#endif |
#endif |
MUTEX_SPIN_SPLRESTORE(mtx); |
MUTEX_SPIN_SPLRESTORE(mtx); |
return; |
return; |
} |
} |
|
|
if (__predict_false(panicstr != NULL) || __predict_false(cold)) { |
if (__predict_false((uintptr_t)panicstr | cold)) { |
MUTEX_UNLOCKED(mtx); |
MUTEX_UNLOCKED(mtx); |
MUTEX_RELEASE(mtx); |
MUTEX_RELEASE(mtx); |
return; |
return; |
Line 700 mutex_vector_exit(kmutex_t *mtx) |
|
Line 736 mutex_vector_exit(kmutex_t *mtx) |
|
MUTEX_DASSERT(mtx, curthread != 0); |
MUTEX_DASSERT(mtx, curthread != 0); |
MUTEX_ASSERT(mtx, MUTEX_OWNER(mtx->mtx_owner) == curthread); |
MUTEX_ASSERT(mtx, MUTEX_OWNER(mtx->mtx_owner) == curthread); |
MUTEX_UNLOCKED(mtx); |
MUTEX_UNLOCKED(mtx); |
|
#if !defined(LOCKDEBUG) |
|
__USE(curthread); |
|
#endif |
|
|
|
#ifdef LOCKDEBUG |
|
/* |
|
* Avoid having to take the turnstile chain lock every time |
|
* around. Raise the priority level to splhigh() in order |
|
* to disable preemption and so make the following atomic. |
|
*/ |
|
{ |
|
int s = splhigh(); |
|
if (!MUTEX_HAS_WAITERS(mtx)) { |
|
MUTEX_RELEASE(mtx); |
|
splx(s); |
|
return; |
|
} |
|
splx(s); |
|
} |
|
#endif |
|
|
/* |
/* |
* Get this lock's turnstile. This gets the interlock on |
* Get this lock's turnstile. This gets the interlock on |
|
|
mutex_owned(kmutex_t *mtx) |
mutex_owned(kmutex_t *mtx) |
{ |
{ |
|
|
|
if (mtx == NULL) |
|
return 0; |
if (MUTEX_ADAPTIVE_P(mtx)) |
if (MUTEX_ADAPTIVE_P(mtx)) |
return MUTEX_OWNER(mtx->mtx_owner) == (uintptr_t)curlwp; |
return MUTEX_OWNER(mtx->mtx_owner) == (uintptr_t)curlwp; |
#ifdef FULL |
#ifdef FULL |
return mtx->mtx_lock == __SIMPLELOCK_LOCKED; |
return MUTEX_SPINBIT_LOCKED_P(mtx); |
#else |
#else |
return 1; |
return 1; |
#endif |
#endif |
Line 767 mutex_owned(kmutex_t *mtx) |
|
Line 825 mutex_owned(kmutex_t *mtx) |
|
* Return the current owner of an adaptive mutex. Used for |
* Return the current owner of an adaptive mutex. Used for |
* priority inheritance. |
* priority inheritance. |
*/ |
*/ |
static struct lwp * |
lwp_t * |
mutex_owner(wchan_t obj) |
mutex_owner(kmutex_t *mtx) |
{ |
{ |
kmutex_t *mtx = (void *)(uintptr_t)obj; /* discard qualifiers */ |
|
|
|
MUTEX_ASSERT(mtx, MUTEX_ADAPTIVE_P(mtx)); |
MUTEX_ASSERT(mtx, MUTEX_ADAPTIVE_P(mtx)); |
return (struct lwp *)MUTEX_OWNER(mtx->mtx_owner); |
return (struct lwp *)MUTEX_OWNER(mtx->mtx_owner); |
} |
} |
|
|
/* |
/* |
|
* mutex_ownable: |
|
* |
|
* When compiled with DEBUG and LOCKDEBUG defined, ensure that |
|
* the mutex is available. We cannot use !mutex_owned() since |
|
* that won't work correctly for spin mutexes. |
|
*/ |
|
int |
|
mutex_ownable(kmutex_t *mtx) |
|
{ |
|
|
|
#ifdef LOCKDEBUG |
|
MUTEX_TESTLOCK(mtx); |
|
#endif |
|
return 1; |
|
} |
|
|
|
/* |
* mutex_tryenter: |
* mutex_tryenter: |
* |
* |
* Try to acquire the mutex; return non-zero if we did. |
* Try to acquire the mutex; return non-zero if we did. |
Line 792 mutex_tryenter(kmutex_t *mtx) |
|
Line 866 mutex_tryenter(kmutex_t *mtx) |
|
if (MUTEX_SPIN_P(mtx)) { |
if (MUTEX_SPIN_P(mtx)) { |
MUTEX_SPIN_SPLRAISE(mtx); |
MUTEX_SPIN_SPLRAISE(mtx); |
#ifdef FULL |
#ifdef FULL |
if (__cpu_simple_lock_try(&mtx->mtx_lock)) { |
if (MUTEX_SPINBIT_LOCK_TRY(mtx)) { |
MUTEX_WANTLOCK(mtx); |
MUTEX_WANTLOCK(mtx); |
MUTEX_LOCKED(mtx); |
MUTEX_LOCKED(mtx); |
return 1; |
return 1; |
Line 849 mutex_spin_retry(kmutex_t *mtx) |
|
Line 923 mutex_spin_retry(kmutex_t *mtx) |
|
do { |
do { |
if (panicstr != NULL) |
if (panicstr != NULL) |
break; |
break; |
while (mtx->mtx_lock == __SIMPLELOCK_LOCKED) { |
while (MUTEX_SPINBIT_LOCKED_P(mtx)) { |
SPINLOCK_BACKOFF(count); |
SPINLOCK_BACKOFF(count); |
#ifdef LOCKDEBUG |
#ifdef LOCKDEBUG |
if (SPINLOCK_SPINOUT(spins)) |
if (SPINLOCK_SPINOUT(spins)) |
MUTEX_ABORT(mtx, "spinout"); |
MUTEX_ABORT(mtx, "spinout"); |
#endif /* LOCKDEBUG */ |
#endif /* LOCKDEBUG */ |
} |
} |
} while (!__cpu_simple_lock_try(&mtx->mtx_lock)); |
} while (!MUTEX_SPINBIT_LOCK_TRY(mtx)); |
|
|
LOCKSTAT_STOP_TIMER(lsflag, spintime); |
LOCKSTAT_STOP_TIMER(lsflag, spintime); |
LOCKSTAT_EVENT(lsflag, mtx, LB_SPIN_MUTEX | LB_SPIN, 1, spintime); |
LOCKSTAT_EVENT(lsflag, mtx, LB_SPIN_MUTEX | LB_SPIN, 1, spintime); |
Line 868 mutex_spin_retry(kmutex_t *mtx) |
|
Line 942 mutex_spin_retry(kmutex_t *mtx) |
|
#endif /* MULTIPROCESSOR */ |
#endif /* MULTIPROCESSOR */ |
} |
} |
#endif /* defined(__HAVE_SPIN_MUTEX_STUBS) || defined(FULL) */ |
#endif /* defined(__HAVE_SPIN_MUTEX_STUBS) || defined(FULL) */ |
|
|
/* |
|
* sched_lock_idle: |
|
* |
|
* XXX Ugly hack for cpu_switch(). |
|
*/ |
|
void |
|
sched_lock_idle(void) |
|
{ |
|
#ifdef FULL |
|
kmutex_t *mtx = &sched_mutex; |
|
|
|
curcpu()->ci_mtx_count--; |
|
|
|
if (!__cpu_simple_lock_try(&mtx->mtx_lock)) { |
|
mutex_spin_retry(mtx); |
|
return; |
|
} |
|
|
|
MUTEX_LOCKED(mtx); |
|
#else |
|
curcpu()->ci_mtx_count--; |
|
#endif /* FULL */ |
|
} |
|
|
|
/* |
|
* sched_unlock_idle: |
|
* |
|
* XXX Ugly hack for cpu_switch(). |
|
*/ |
|
void |
|
sched_unlock_idle(void) |
|
{ |
|
#ifdef FULL |
|
kmutex_t *mtx = &sched_mutex; |
|
|
|
if (mtx->mtx_lock != __SIMPLELOCK_LOCKED) |
|
MUTEX_ABORT(mtx, "sched_mutex not locked"); |
|
|
|
MUTEX_UNLOCKED(mtx); |
|
__cpu_simple_unlock(&mtx->mtx_lock); |
|
#endif /* FULL */ |
|
curcpu()->ci_mtx_count++; |
|
} |
|