version 1.34, 2008/04/11 15:28:34 |
version 1.43, 2008/05/31 13:31:25 |
|
|
* 2. Redistributions in binary form must reproduce the above copyright |
* 2. Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* documentation and/or other materials provided with the distribution. |
* 3. All advertising materials mentioning features or use of this software |
|
* must display the following acknowledgement: |
|
* This product includes software developed by the NetBSD |
|
* Foundation, Inc. and its contributors. |
|
* 4. Neither the name of The NetBSD Foundation nor the names of its |
|
* contributors may be used to endorse or promote products derived |
|
* from this software without specific prior written permission. |
|
* |
* |
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS |
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS |
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED |
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED |
|
|
#include <sys/cdefs.h> |
#include <sys/cdefs.h> |
__KERNEL_RCSID(0, "$NetBSD$"); |
__KERNEL_RCSID(0, "$NetBSD$"); |
|
|
#include "opt_multiprocessor.h" |
|
|
|
#include <sys/param.h> |
#include <sys/param.h> |
#include <sys/proc.h> |
#include <sys/proc.h> |
#include <sys/mutex.h> |
#include <sys/mutex.h> |
Line 83 __KERNEL_RCSID(0, "$NetBSD$"); |
|
Line 74 __KERNEL_RCSID(0, "$NetBSD$"); |
|
|
|
#define MUTEX_WANTLOCK(mtx) \ |
#define MUTEX_WANTLOCK(mtx) \ |
LOCKDEBUG_WANTLOCK(MUTEX_DEBUG_P(mtx), (mtx), \ |
LOCKDEBUG_WANTLOCK(MUTEX_DEBUG_P(mtx), (mtx), \ |
(uintptr_t)__builtin_return_address(0), 0) |
(uintptr_t)__builtin_return_address(0), false, false) |
#define MUTEX_LOCKED(mtx) \ |
#define MUTEX_LOCKED(mtx) \ |
LOCKDEBUG_LOCKED(MUTEX_DEBUG_P(mtx), (mtx), \ |
LOCKDEBUG_LOCKED(MUTEX_DEBUG_P(mtx), (mtx), NULL, \ |
(uintptr_t)__builtin_return_address(0), 0) |
(uintptr_t)__builtin_return_address(0), 0) |
#define MUTEX_UNLOCKED(mtx) \ |
#define MUTEX_UNLOCKED(mtx) \ |
LOCKDEBUG_UNLOCKED(MUTEX_DEBUG_P(mtx), (mtx), \ |
LOCKDEBUG_UNLOCKED(MUTEX_DEBUG_P(mtx), (mtx), \ |
|
|
|
|
#define MUTEX_SPIN_SPLRAISE(mtx) \ |
#define MUTEX_SPIN_SPLRAISE(mtx) \ |
do { \ |
do { \ |
struct cpu_info *x__ci = curcpu(); \ |
struct cpu_info *x__ci; \ |
int x__cnt, s; \ |
int x__cnt, s; \ |
x__cnt = x__ci->ci_mtx_count--; \ |
|
s = splraiseipl(mtx->mtx_ipl); \ |
s = splraiseipl(mtx->mtx_ipl); \ |
|
x__ci = curcpu(); \ |
|
x__cnt = x__ci->ci_mtx_count--; \ |
|
__insn_barrier(); \ |
if (x__cnt == MUTEX_COUNT_BIAS) \ |
if (x__cnt == MUTEX_COUNT_BIAS) \ |
x__ci->ci_mtx_oldspl = (s); \ |
x__ci->ci_mtx_oldspl = (s); \ |
} while (/* CONSTCOND */ 0) |
} while (/* CONSTCOND */ 0) |
Line 261 int mutex_onproc(uintptr_t, struct cpu_i |
|
Line 254 int mutex_onproc(uintptr_t, struct cpu_i |
|
|
|
lockops_t mutex_spin_lockops = { |
lockops_t mutex_spin_lockops = { |
"Mutex", |
"Mutex", |
0, |
LOCKOPS_SPIN, |
mutex_dump |
mutex_dump |
}; |
}; |
|
|
lockops_t mutex_adaptive_lockops = { |
lockops_t mutex_adaptive_lockops = { |
"Mutex", |
"Mutex", |
1, |
LOCKOPS_SLEEP, |
mutex_dump |
mutex_dump |
}; |
}; |
|
|
Line 313 mutex_dump(volatile void *cookie) |
|
Line 306 mutex_dump(volatile void *cookie) |
|
* generates a lot of machine code in the DIAGNOSTIC case, so |
* generates a lot of machine code in the DIAGNOSTIC case, so |
* we ask the compiler to not inline it. |
* we ask the compiler to not inline it. |
*/ |
*/ |
|
void __noinline |
#if __GNUC_PREREQ__(3, 0) |
|
__attribute ((noinline)) __attribute ((noreturn)) |
|
#endif |
|
void |
|
mutex_abort(kmutex_t *mtx, const char *func, const char *msg) |
mutex_abort(kmutex_t *mtx, const char *func, const char *msg) |
{ |
{ |
|
|
LOCKDEBUG_ABORT(mtx, (MUTEX_SPIN_P(mtx) ? |
LOCKDEBUG_ABORT(mtx, (MUTEX_SPIN_P(mtx) ? |
&mutex_spin_lockops : &mutex_adaptive_lockops), func, msg); |
&mutex_spin_lockops : &mutex_adaptive_lockops), func, msg); |
/* NOTREACHED */ |
|
} |
} |
|
|
/* |
/* |
Line 660 mutex_vector_enter(kmutex_t *mtx) |
|
Line 648 mutex_vector_enter(kmutex_t *mtx) |
|
* value of the waiters flag. |
* value of the waiters flag. |
* |
* |
* 2. The onproc check returns false: the holding LWP is |
* 2. The onproc check returns false: the holding LWP is |
* not running. We now have the oppertunity to check |
* not running. We now have the opportunity to check |
* if mutex_exit() has blatted the modifications made |
* if mutex_exit() has blatted the modifications made |
* by MUTEX_SET_WAITERS(). |
* by MUTEX_SET_WAITERS(). |
* |
* |
|
|
mutex_owned(kmutex_t *mtx) |
mutex_owned(kmutex_t *mtx) |
{ |
{ |
|
|
|
if (mtx == NULL) |
|
return 0; |
if (MUTEX_ADAPTIVE_P(mtx)) |
if (MUTEX_ADAPTIVE_P(mtx)) |
return MUTEX_OWNER(mtx->mtx_owner) == (uintptr_t)curlwp; |
return MUTEX_OWNER(mtx->mtx_owner) == (uintptr_t)curlwp; |
#ifdef FULL |
#ifdef FULL |