version 1.72, 2018/02/06 07:46:24 |
version 1.73, 2018/02/25 18:54:29 |
Line 60 __KERNEL_RCSID(0, "$NetBSD$"); |
|
Line 60 __KERNEL_RCSID(0, "$NetBSD$"); |
|
|
|
#include <machine/lock.h> |
#include <machine/lock.h> |
|
|
|
#define MUTEX_PANIC_SKIP_SPIN 1 |
|
#define MUTEX_PANIC_SKIP_ADAPTIVE 1 |
|
|
/* |
/* |
* When not running a debug kernel, spin mutexes are not much |
* When not running a debug kernel, spin mutexes are not much |
* more than an splraiseipl() and splx() pair. |
* more than an splraiseipl() and splx() pair. |
Line 489 mutex_vector_enter(kmutex_t *mtx) |
|
Line 492 mutex_vector_enter(kmutex_t *mtx) |
|
* to reduce cache line ping-ponging between CPUs. |
* to reduce cache line ping-ponging between CPUs. |
*/ |
*/ |
do { |
do { |
|
#if MUTEX_PANIC_SKIP_SPIN |
if (panicstr != NULL) |
if (panicstr != NULL) |
break; |
break; |
|
#endif |
while (MUTEX_SPINBIT_LOCKED_P(mtx)) { |
while (MUTEX_SPINBIT_LOCKED_P(mtx)) { |
SPINLOCK_BACKOFF(count); |
SPINLOCK_BACKOFF(count); |
#ifdef LOCKDEBUG |
#ifdef LOCKDEBUG |
Line 547 mutex_vector_enter(kmutex_t *mtx) |
|
Line 552 mutex_vector_enter(kmutex_t *mtx) |
|
owner = mtx->mtx_owner; |
owner = mtx->mtx_owner; |
continue; |
continue; |
} |
} |
|
#if MUTEX_PANIC_SKIP_ADAPTIVE |
if (__predict_false(panicstr != NULL)) { |
if (__predict_false(panicstr != NULL)) { |
KPREEMPT_ENABLE(curlwp); |
KPREEMPT_ENABLE(curlwp); |
return; |
return; |
} |
} |
|
#endif |
if (__predict_false(MUTEX_OWNER(owner) == curthread)) { |
if (__predict_false(MUTEX_OWNER(owner) == curthread)) { |
MUTEX_ABORT(mtx, "locking against myself"); |
MUTEX_ABORT(mtx, "locking against myself"); |
} |
} |
Line 726 mutex_vector_exit(kmutex_t *mtx) |
|
Line 733 mutex_vector_exit(kmutex_t *mtx) |
|
if (MUTEX_SPIN_P(mtx)) { |
if (MUTEX_SPIN_P(mtx)) { |
#ifdef FULL |
#ifdef FULL |
if (__predict_false(!MUTEX_SPINBIT_LOCKED_P(mtx))) { |
if (__predict_false(!MUTEX_SPINBIT_LOCKED_P(mtx))) { |
|
#if MUTEX_PANIC_SKIP_SPIN |
if (panicstr != NULL) |
if (panicstr != NULL) |
return; |
return; |
|
#endif |
MUTEX_ABORT(mtx, "exiting unheld spin mutex"); |
MUTEX_ABORT(mtx, "exiting unheld spin mutex"); |
} |
} |
MUTEX_UNLOCKED(mtx); |
MUTEX_UNLOCKED(mtx); |
Line 737 mutex_vector_exit(kmutex_t *mtx) |
|
Line 746 mutex_vector_exit(kmutex_t *mtx) |
|
return; |
return; |
} |
} |
|
|
|
#ifdef MUTEX_PANIC_SKIP_ADAPTIVE |
if (__predict_false((uintptr_t)panicstr | cold)) { |
if (__predict_false((uintptr_t)panicstr | cold)) { |
MUTEX_UNLOCKED(mtx); |
MUTEX_UNLOCKED(mtx); |
MUTEX_RELEASE(mtx); |
MUTEX_RELEASE(mtx); |
return; |
return; |
} |
} |
|
#endif |
|
|
curthread = (uintptr_t)curlwp; |
curthread = (uintptr_t)curlwp; |
MUTEX_DASSERT(mtx, curthread != 0); |
MUTEX_DASSERT(mtx, curthread != 0); |
Line 932 mutex_spin_retry(kmutex_t *mtx) |
|
Line 943 mutex_spin_retry(kmutex_t *mtx) |
|
* to reduce cache line ping-ponging between CPUs. |
* to reduce cache line ping-ponging between CPUs. |
*/ |
*/ |
do { |
do { |
|
#if MUTEX_PANIC_SKIP_SPIN |
if (panicstr != NULL) |
if (panicstr != NULL) |
break; |
break; |
|
#endif |
while (MUTEX_SPINBIT_LOCKED_P(mtx)) { |
while (MUTEX_SPINBIT_LOCKED_P(mtx)) { |
SPINLOCK_BACKOFF(count); |
SPINLOCK_BACKOFF(count); |
#ifdef LOCKDEBUG |
#ifdef LOCKDEBUG |