Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files. =================================================================== RCS file: /ftp/cvs/cvsroot/src/sys/kern/kern_mutex.c,v rcsdiff: /ftp/cvs/cvsroot/src/sys/kern/kern_mutex.c,v: warning: Unknown phrases like `commitid ...;' are present. retrieving revision 1.2 retrieving revision 1.15 diff -u -p -r1.2 -r1.15 --- src/sys/kern/kern_mutex.c 2007/02/09 21:55:30 1.2 +++ src/sys/kern/kern_mutex.c 2007/07/09 21:10:53 1.15 @@ -1,4 +1,4 @@ -/* $NetBSD: kern_mutex.c,v 1.2 2007/02/09 21:55:30 ad Exp $ */ +/* $NetBSD: kern_mutex.c,v 1.15 2007/07/09 21:10:53 ad Exp $ */ /*- * Copyright (c) 2002, 2006, 2007 The NetBSD Foundation, Inc. @@ -49,7 +49,7 @@ #define __MUTEX_PRIVATE #include -__KERNEL_RCSID(0, "$NetBSD: kern_mutex.c,v 1.2 2007/02/09 21:55:30 ad Exp $"); +__KERNEL_RCSID(0, "$NetBSD: kern_mutex.c,v 1.15 2007/07/09 21:10:53 ad Exp $"); #include #include @@ -120,6 +120,9 @@ do { \ /* * Spin mutex SPL save / restore. */ +#ifndef MUTEX_COUNT_BIAS +#define MUTEX_COUNT_BIAS 0 +#endif #define MUTEX_SPIN_SPLRAISE(mtx) \ do { \ @@ -127,7 +130,7 @@ do { \ int x__cnt, s; \ x__cnt = x__ci->ci_mtx_count--; \ s = splraiseipl(mtx->mtx_ipl); \ - if (x__cnt == 0) \ + if (x__cnt == MUTEX_COUNT_BIAS) \ x__ci->ci_mtx_oldspl = (s); \ } while (/* CONSTCOND */ 0) @@ -136,7 +139,7 @@ do { \ struct cpu_info *x__ci = curcpu(); \ int s = x__ci->ci_mtx_oldspl; \ __insn_barrier(); \ - if (++(x__ci->ci_mtx_count) == 0) \ + if (++(x__ci->ci_mtx_count) == MUTEX_COUNT_BIAS) \ splx(s); \ } while (/* CONSTCOND */ 0) @@ -187,7 +190,7 @@ MUTEX_ACQUIRE(kmutex_t *mtx, uintptr_t c { int rv; rv = MUTEX_CAS(&mtx->mtx_owner, 0UL, curthread); - MUTEX_RECEIVE(); + MUTEX_RECEIVE(mtx); return rv; } @@ -196,16 +199,22 @@ MUTEX_SET_WAITERS(kmutex_t *mtx, uintptr { int rv; rv = MUTEX_CAS(&mtx->mtx_owner, owner, owner | MUTEX_BIT_WAITERS); - MUTEX_RECEIVE(); + MUTEX_RECEIVE(mtx); return rv; } static inline void MUTEX_RELEASE(kmutex_t *mtx) { - MUTEX_GIVE(); + MUTEX_GIVE(mtx); mtx->mtx_owner = 0; } + +static inline void +MUTEX_CLEAR_WAITERS(kmutex_t *mtx) +{ + /* nothing */ +} #endif /* __HAVE_SIMPLE_MUTEXES */ /* @@ -218,18 +227,19 @@ MUTEX_RELEASE(kmutex_t *mtx) #endif #ifndef __HAVE_MUTEX_STUBS -__strong_alias(mutex_enter, mutex_vector_enter); -__strong_alias(mutex_exit, mutex_vector_exit); +__strong_alias(mutex_enter,mutex_vector_enter); +__strong_alias(mutex_exit,mutex_vector_exit); #endif #ifndef __HAVE_SPIN_MUTEX_STUBS -__strong_alias(mutex_spin_enter, mutex_vector_enter); -__strong_alias(mutex_spin_exit, mutex_vector_exit); +__strong_alias(mutex_spin_enter,mutex_vector_enter); +__strong_alias(mutex_spin_exit,mutex_vector_exit); #endif void mutex_abort(kmutex_t *, const char *, const char *); void mutex_dump(volatile void *); int mutex_onproc(uintptr_t, struct cpu_info **); +static struct lwp *mutex_owner(wchan_t); lockops_t mutex_spin_lockops = { "Mutex", @@ -243,6 +253,14 @@ lockops_t mutex_adaptive_lockops = { mutex_dump }; +syncobj_t mutex_syncobj = { + SOBJ_SLEEPQ_SORTED, + turnstile_unsleep, + turnstile_changepri, + sleepq_lendpri, + mutex_owner, +}; + /* * mutex_dump: * @@ -261,15 +279,20 @@ mutex_dump(volatile void *cookie) /* * mutex_abort: * - * Dump information about an error and panic the system. + * Dump information about an error and panic the system. This + * generates a lot of machine code in the DIAGNOSTIC case, so + * we ask the compiler to not inline it. */ -__attribute ((noinline)) __attribute ((noreturn)) void + +#if __GNUC_PREREQ__(3, 0) +__attribute ((noinline)) __attribute ((noreturn)) +#endif +void mutex_abort(kmutex_t *mtx, const char *func, const char *msg) { LOCKDEBUG_ABORT(MUTEX_GETID(mtx), mtx, (MUTEX_SPIN_P(mtx) ? - &mutex_spin_lockops : &mutex_adaptive_lockops), - __FUNCTION__, msg); + &mutex_spin_lockops : &mutex_adaptive_lockops), func, msg); /* NOTREACHED */ } @@ -289,13 +312,25 @@ mutex_init(kmutex_t *mtx, kmutex_type_t memset(mtx, 0, sizeof(*mtx)); - if (type == MUTEX_DRIVER) + switch (type) { + case MUTEX_ADAPTIVE: + case MUTEX_DEFAULT: + KASSERT(ipl == IPL_NONE); + break; + case MUTEX_DRIVER: type = (ipl == IPL_NONE ? MUTEX_ADAPTIVE : MUTEX_SPIN); + break; + default: + break; + } switch (type) { + case MUTEX_NODEBUG: + id = LOCKDEBUG_ALLOC(mtx, NULL); + MUTEX_INITIALIZE_SPIN(mtx, id, ipl); + break; case MUTEX_ADAPTIVE: case MUTEX_DEFAULT: - KASSERT(ipl == IPL_NONE); id = LOCKDEBUG_ALLOC(mtx, &mutex_adaptive_lockops); MUTEX_INITIALIZE_ADAPTIVE(mtx, id); break; @@ -334,8 +369,7 @@ mutex_destroy(kmutex_t *mtx) * * Return true if an adaptive mutex owner is running on a CPU in the * system. If the target is waiting on the kernel big lock, then we - * return false immediately. This is necessary to avoid deadlock - * against the big lock. + * must release it. This is necessary to avoid deadlock. * * Note that we can't use the mutex owner field as an LWP pointer. We * don't have full control over the timing of our execution, and so the @@ -353,23 +387,23 @@ mutex_onproc(uintptr_t owner, struct cpu return 0; l = (struct lwp *)MUTEX_OWNER(owner); - if ((ci = *cip) != NULL && ci->ci_curlwp == l) { - mb_read(); /* XXXSMP Necessary? */ - return ci->ci_biglock_wanted != l; - } - - for (CPU_INFO_FOREACH(cii, ci)) { - if (ci->ci_curlwp == l) { - *cip = ci; - mb_read(); /* XXXSMP Necessary? */ - return ci->ci_biglock_wanted != l; - } - } + /* See if the target is running on a CPU somewhere. */ + if ((ci = *cip) != NULL && ci->ci_curlwp == l) + goto run; + for (CPU_INFO_FOREACH(cii, ci)) + if (ci->ci_curlwp == l) + goto run; + /* No: it may be safe to block now. */ *cip = NULL; return 0; + + run: + /* Target is running; do we need to block? */ + *cip = ci; + return ci->ci_biglock_wanted != l; } -#endif +#endif /* MULTIPROCESSOR */ /* * mutex_vector_enter: @@ -575,7 +609,7 @@ mutex_vector_enter(kmutex_t *mtx) * completes before the modification of curlwp becomes * visible to this CPU. * - * o cpu_switch() posts a store fence before setting curlwp + * o mi_switch() posts a store fence before setting curlwp * and before resuming execution of an LWP. * * o _kernel_lock() posts a store fence before setting @@ -618,8 +652,8 @@ mutex_vector_enter(kmutex_t *mtx) * If the waiters bit is not set it's unsafe to go asleep, * as we might never be awoken. */ - mb_read(); - if (mutex_onproc(owner, &ci) || !MUTEX_HAS_WAITERS(mtx)) { + if ((mb_read(), mutex_onproc(owner, &ci)) || + (mb_read(), !MUTEX_HAS_WAITERS(mtx))) { turnstile_exit(mtx); continue; } @@ -627,12 +661,10 @@ mutex_vector_enter(kmutex_t *mtx) LOCKSTAT_START_TIMER(lsflag, slptime); - turnstile_block(ts, TS_WRITER_Q, mtx); + turnstile_block(ts, TS_WRITER_Q, mtx, &mutex_syncobj); LOCKSTAT_STOP_TIMER(lsflag, slptime); LOCKSTAT_COUNT(slpcnt, 1); - - turnstile_unblock(); } LOCKSTAT_EVENT(lsflag, mtx, LB_ADAPTIVE_MUTEX | LB_SLEEP1, @@ -667,7 +699,7 @@ mutex_vector_exit(kmutex_t *mtx) return; } - if (__predict_false(panicstr != NULL) || __predict_false(cold)) { + if (__predict_false((uintptr_t)panicstr | cold)) { MUTEX_UNLOCKED(mtx); MUTEX_RELEASE(mtx); return; @@ -678,6 +710,23 @@ mutex_vector_exit(kmutex_t *mtx) MUTEX_ASSERT(mtx, MUTEX_OWNER(mtx->mtx_owner) == curthread); MUTEX_UNLOCKED(mtx); +#ifdef LOCKDEBUG + /* + * Avoid having to take the turnstile chain lock every time + * around. Raise the priority level to splhigh() in order + * to disable preemption and so make the following atomic. + */ + { + int s = splhigh(); + if (!MUTEX_HAS_WAITERS(mtx)) { + MUTEX_RELEASE(mtx); + splx(s); + return; + } + splx(s); + } +#endif + /* * Get this lock's turnstile. This gets the interlock on * the sleep queue. Once we have that, we can clear the @@ -696,10 +745,34 @@ mutex_vector_exit(kmutex_t *mtx) } } +#ifndef __HAVE_SIMPLE_MUTEXES +/* + * mutex_wakeup: + * + * Support routine for mutex_exit() that wakes up all waiters. + * We assume that the mutex has been released, but it need not + * be. + */ +void +mutex_wakeup(kmutex_t *mtx) +{ + turnstile_t *ts; + + ts = turnstile_lookup(mtx); + if (ts == NULL) { + turnstile_exit(mtx); + return; + } + MUTEX_CLEAR_WAITERS(mtx); + turnstile_wakeup(ts, TS_WRITER_Q, TS_WAITERS(ts, TS_WRITER_Q), NULL); +} +#endif /* !__HAVE_SIMPLE_MUTEXES */ + /* * mutex_owned: * - * Return true if the current thread holds the mutex. + * Return true if the current LWP (adaptive) or CPU (spin) + * holds the mutex. */ int mutex_owned(kmutex_t *mtx) @@ -717,11 +790,13 @@ mutex_owned(kmutex_t *mtx) /* * mutex_owner: * - * Return the current owner of an adaptive mutex. + * Return the current owner of an adaptive mutex. Used for + * priority inheritance. */ -struct lwp * -mutex_owner(kmutex_t *mtx) +static struct lwp * +mutex_owner(wchan_t obj) { + kmutex_t *mtx = (void *)(uintptr_t)obj; /* discard qualifiers */ MUTEX_ASSERT(mtx, MUTEX_ADAPTIVE_P(mtx)); return (struct lwp *)MUTEX_OWNER(mtx->mtx_owner); @@ -737,8 +812,6 @@ mutex_tryenter(kmutex_t *mtx) { uintptr_t curthread; - MUTEX_WANTLOCK(mtx); - /* * Handle spin mutexes. */ @@ -746,11 +819,13 @@ mutex_tryenter(kmutex_t *mtx) MUTEX_SPIN_SPLRAISE(mtx); #ifdef FULL if (__cpu_simple_lock_try(&mtx->mtx_lock)) { + MUTEX_WANTLOCK(mtx); MUTEX_LOCKED(mtx); return 1; } MUTEX_SPIN_SPLRESTORE(mtx); #else + MUTEX_WANTLOCK(mtx); MUTEX_LOCKED(mtx); return 1; #endif @@ -758,6 +833,7 @@ mutex_tryenter(kmutex_t *mtx) curthread = (uintptr_t)curlwp; MUTEX_ASSERT(mtx, curthread != 0); if (MUTEX_ACQUIRE(mtx, curthread)) { + MUTEX_WANTLOCK(mtx); MUTEX_LOCKED(mtx); MUTEX_DASSERT(mtx, MUTEX_OWNER(mtx->mtx_owner) == curthread); @@ -818,47 +894,3 @@ mutex_spin_retry(kmutex_t *mtx) #endif /* MULTIPROCESSOR */ } #endif /* defined(__HAVE_SPIN_MUTEX_STUBS) || defined(FULL) */ - -/* - * sched_lock_idle: - * - * XXX Ugly hack for cpu_switch(). - */ -void -sched_lock_idle(void) -{ -#ifdef FULL - kmutex_t *mtx = &sched_mutex; - - curcpu()->ci_mtx_count--; - - if (!__cpu_simple_lock_try(&mtx->mtx_lock)) { - mutex_spin_retry(mtx); - return; - } - - MUTEX_LOCKED(mtx); -#else - curcpu()->ci_mtx_count--; -#endif /* FULL */ -} - -/* - * sched_unlock_idle: - * - * XXX Ugly hack for cpu_switch(). - */ -void -sched_unlock_idle(void) -{ -#ifdef FULL - kmutex_t *mtx = &sched_mutex; - - if (mtx->mtx_lock != __SIMPLELOCK_LOCKED) - MUTEX_ABORT(mtx, "sched_unlock_idle"); - - MUTEX_UNLOCKED(mtx); - __cpu_simple_unlock(&mtx->mtx_lock); -#endif /* FULL */ - curcpu()->ci_mtx_count++; -}