Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files. =================================================================== RCS file: /ftp/cvs/cvsroot/src/sys/kern/kern_mutex.c,v rcsdiff: /ftp/cvs/cvsroot/src/sys/kern/kern_mutex.c,v: warning: Unknown phrases like `commitid ...;' are present. retrieving revision 1.27 retrieving revision 1.41 diff -u -p -r1.27 -r1.41 --- src/sys/kern/kern_mutex.c 2007/12/24 14:57:56 1.27 +++ src/sys/kern/kern_mutex.c 2008/05/19 17:06:02 1.41 @@ -1,7 +1,7 @@ -/* $NetBSD: kern_mutex.c,v 1.27 2007/12/24 14:57:56 ad Exp $ */ +/* $NetBSD: kern_mutex.c,v 1.41 2008/05/19 17:06:02 ad Exp $ */ /*- - * Copyright (c) 2002, 2006, 2007 The NetBSD Foundation, Inc. + * Copyright (c) 2002, 2006, 2007, 2008 The NetBSD Foundation, Inc. * All rights reserved. * * This code is derived from software contributed to The NetBSD Foundation @@ -15,13 +15,6 @@ * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. - * 3. All advertising materials mentioning features or use of this software - * must display the following acknowledgement: - * This product includes software developed by the NetBSD - * Foundation, Inc. and its contributors. - * 4. Neither the name of The NetBSD Foundation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED @@ -47,9 +40,7 @@ #define __MUTEX_PRIVATE #include -__KERNEL_RCSID(0, "$NetBSD: kern_mutex.c,v 1.27 2007/12/24 14:57:56 ad Exp $"); - -#include "opt_multiprocessor.h" +__KERNEL_RCSID(0, "$NetBSD: kern_mutex.c,v 1.41 2008/05/19 17:06:02 ad Exp $"); #include #include @@ -61,9 +52,13 @@ __KERNEL_RCSID(0, "$NetBSD: kern_mutex.c #include #include #include +#include +#include #include +#include + /* * When not running a debug kernel, spin mutexes are not much * more than an splraiseipl() and splx() pair. @@ -79,7 +74,7 @@ __KERNEL_RCSID(0, "$NetBSD: kern_mutex.c #define MUTEX_WANTLOCK(mtx) \ LOCKDEBUG_WANTLOCK(MUTEX_DEBUG_P(mtx), (mtx), \ - (uintptr_t)__builtin_return_address(0), 0) + (uintptr_t)__builtin_return_address(0), false, false) #define MUTEX_LOCKED(mtx) \ LOCKDEBUG_LOCKED(MUTEX_DEBUG_P(mtx), (mtx), \ (uintptr_t)__builtin_return_address(0), 0) @@ -126,10 +121,12 @@ do { \ #define MUTEX_SPIN_SPLRAISE(mtx) \ do { \ - struct cpu_info *x__ci = curcpu(); \ + struct cpu_info *x__ci; \ int x__cnt, s; \ - x__cnt = x__ci->ci_mtx_count--; \ s = splraiseipl(mtx->mtx_ipl); \ + x__ci = curcpu(); \ + x__cnt = x__ci->ci_mtx_count--; \ + __insn_barrier(); \ if (x__cnt == MUTEX_COUNT_BIAS) \ x__ci->ci_mtx_oldspl = (s); \ } while (/* CONSTCOND */ 0) @@ -275,6 +272,18 @@ syncobj_t mutex_syncobj = { (void *)mutex_owner, }; +/* Mutex cache */ +#define MUTEX_OBJ_MAGIC 0x5aa3c85d +struct kmutexobj { + kmutex_t mo_lock; + u_int mo_magic; + u_int mo_refcnt; +}; + +static int mutex_obj_ctor(void *, void *, int); + +static pool_cache_t mutex_obj_cache; + /* * mutex_dump: * @@ -506,16 +515,9 @@ mutex_vector_enter(kmutex_t *mtx) MUTEX_ASSERT(mtx, curthread != 0); MUTEX_WANTLOCK(mtx); -#ifdef LOCKDEBUG if (panicstr == NULL) { - simple_lock_only_held(NULL, "mutex_enter"); -#ifdef MULTIPROCESSOR LOCKDEBUG_BARRIER(&kernel_lock, 1); -#else - LOCKDEBUG_BARRIER(NULL, 1); -#endif } -#endif LOCKSTAT_ENTER(lsflag); @@ -524,8 +526,7 @@ mutex_vector_enter(kmutex_t *mtx) * determine that the owner is not running on a processor, * then we stop spinning, and sleep instead. */ - for (;;) { - owner = mtx->mtx_owner; + for (owner = mtx->mtx_owner;;) { if (!MUTEX_OWNED(owner)) { /* * Mutex owner clear could mean two things: @@ -538,6 +539,7 @@ mutex_vector_enter(kmutex_t *mtx) */ if (MUTEX_ACQUIRE(mtx, curthread)) break; + owner = mtx->mtx_owner; continue; } @@ -556,10 +558,10 @@ mutex_vector_enter(kmutex_t *mtx) LOCKSTAT_START_TIMER(lsflag, spintime); count = SPINLOCK_BACKOFF_MIN; for (;;) { + SPINLOCK_BACKOFF(count); owner = mtx->mtx_owner; if (!mutex_onproc(owner, &ci)) break; - SPINLOCK_BACKOFF(count); } LOCKSTAT_STOP_TIMER(lsflag, spintime); LOCKSTAT_COUNT(spincnt, 1); @@ -577,6 +579,7 @@ mutex_vector_enter(kmutex_t *mtx) */ if (!MUTEX_SET_WAITERS(mtx, owner)) { turnstile_exit(mtx); + owner = mtx->mtx_owner; continue; } @@ -650,7 +653,7 @@ mutex_vector_enter(kmutex_t *mtx) * value of the waiters flag. * * 2. The onproc check returns false: the holding LWP is - * not running. We now have the oppertunity to check + * not running. We now have the opportunity to check * if mutex_exit() has blatted the modifications made * by MUTEX_SET_WAITERS(). * @@ -677,6 +680,7 @@ mutex_vector_enter(kmutex_t *mtx) if ((membar_consumer(), mutex_onproc(owner, &ci)) || (membar_consumer(), !MUTEX_HAS_WAITERS(mtx))) { turnstile_exit(mtx); + owner = mtx->mtx_owner; continue; } #endif /* MULTIPROCESSOR */ @@ -687,6 +691,8 @@ mutex_vector_enter(kmutex_t *mtx) LOCKSTAT_STOP_TIMER(lsflag, slptime); LOCKSTAT_COUNT(slpcnt, 1); + + owner = mtx->mtx_owner; } LOCKSTAT_EVENT(lsflag, mtx, LB_ADAPTIVE_MUTEX | LB_SLEEP1, @@ -712,8 +718,11 @@ mutex_vector_exit(kmutex_t *mtx) if (MUTEX_SPIN_P(mtx)) { #ifdef FULL - if (!__SIMPLELOCK_LOCKED_P(&mtx->mtx_lock)) + if (__predict_false(!__SIMPLELOCK_LOCKED_P(&mtx->mtx_lock))) { + if (panicstr != NULL) + return; MUTEX_ABORT(mtx, "exiting unheld spin mutex"); + } MUTEX_UNLOCKED(mtx); __cpu_simple_unlock(&mtx->mtx_lock); #endif @@ -800,6 +809,8 @@ int mutex_owned(kmutex_t *mtx) { + if (mtx == NULL) + return 0; if (MUTEX_ADAPTIVE_P(mtx)) return MUTEX_OWNER(mtx->mtx_owner) == (uintptr_t)curlwp; #ifdef FULL @@ -915,3 +926,88 @@ mutex_spin_retry(kmutex_t *mtx) #endif /* MULTIPROCESSOR */ } #endif /* defined(__HAVE_SPIN_MUTEX_STUBS) || defined(FULL) */ + +/* + * mutex_obj_init: + * + * Initialize the mutex object store. + */ +void +mutex_obj_init(void) +{ + + mutex_obj_cache = pool_cache_init(sizeof(struct kmutexobj), + coherency_unit, 0, 0, "mutex", NULL, IPL_NONE, mutex_obj_ctor, + NULL, NULL); +} + +/* + * mutex_obj_ctor: + * + * Initialize a new lock for the cache. + */ +static int +mutex_obj_ctor(void *arg, void *obj, int flags) +{ + struct kmutexobj * mo = obj; + + mo->mo_magic = MUTEX_OBJ_MAGIC; + + return 0; +} + +/* + * mutex_obj_alloc: + * + * Allocate a single lock object. + */ +kmutex_t * +mutex_obj_alloc(kmutex_type_t type, int ipl) +{ + struct kmutexobj *mo; + + mo = pool_cache_get(mutex_obj_cache, PR_WAITOK); + mutex_init(&mo->mo_lock, type, ipl); + mo->mo_refcnt = 1; + + return (kmutex_t *)mo; +} + +/* + * mutex_obj_hold: + * + * Add a single reference to a lock object. A reference to the object + * must already be held, and must be held across this call. + */ +void +mutex_obj_hold(kmutex_t *lock) +{ + struct kmutexobj *mo = (struct kmutexobj *)lock; + + KASSERT(mo->mo_magic == MUTEX_OBJ_MAGIC); + KASSERT(mo->mo_refcnt > 0); + + atomic_inc_uint(&mo->mo_refcnt); +} + +/* + * mutex_obj_free: + * + * Drop a reference from a lock object. If the last reference is being + * dropped, free the object and return true. Otherwise, return false. + */ +bool +mutex_obj_free(kmutex_t *lock) +{ + struct kmutexobj *mo = (struct kmutexobj *)lock; + + KASSERT(mo->mo_magic == MUTEX_OBJ_MAGIC); + KASSERT(mo->mo_refcnt > 0); + + if (atomic_dec_uint_nv(&mo->mo_refcnt) > 0) { + return false; + } + mutex_destroy(&mo->mo_lock); + pool_cache_put(mutex_obj_cache, mo); + return true; +}