[BACK]Return to kern_mutex.c CVS log [TXT][DIR] Up to [cvs.NetBSD.org] / src / sys / kern

Annotation of src/sys/kern/kern_mutex.c, Revision 1.21.2.2

1.21.2.2! mjf         1: /*     $NetBSD: kern_mutex.c,v 1.26 2007/12/06 01:18:46 ad Exp $       */
1.2       ad          2:
                      3: /*-
                      4:  * Copyright (c) 2002, 2006, 2007 The NetBSD Foundation, Inc.
                      5:  * All rights reserved.
                      6:  *
                      7:  * This code is derived from software contributed to The NetBSD Foundation
                      8:  * by Jason R. Thorpe and Andrew Doran.
                      9:  *
                     10:  * Redistribution and use in source and binary forms, with or without
                     11:  * modification, are permitted provided that the following conditions
                     12:  * are met:
                     13:  * 1. Redistributions of source code must retain the above copyright
                     14:  *    notice, this list of conditions and the following disclaimer.
                     15:  * 2. Redistributions in binary form must reproduce the above copyright
                     16:  *    notice, this list of conditions and the following disclaimer in the
                     17:  *    documentation and/or other materials provided with the distribution.
                     18:  * 3. All advertising materials mentioning features or use of this software
                     19:  *    must display the following acknowledgement:
                     20:  *     This product includes software developed by the NetBSD
                     21:  *     Foundation, Inc. and its contributors.
                     22:  * 4. Neither the name of The NetBSD Foundation nor the names of its
                     23:  *    contributors may be used to endorse or promote products derived
                     24:  *    from this software without specific prior written permission.
                     25:  *
                     26:  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
                     27:  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
                     28:  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
                     29:  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
                     30:  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
                     31:  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
                     32:  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
                     33:  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
                     34:  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
                     35:  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
                     36:  * POSSIBILITY OF SUCH DAMAGE.
                     37:  */
                     38:
                     39: /*
                     40:  * Kernel mutex implementation, modeled after those found in Solaris,
                     41:  * a description of which can be found in:
                     42:  *
                     43:  *     Solaris Internals: Core Kernel Architecture, Jim Mauro and
                     44:  *         Richard McDougall.
                     45:  */
                     46:
                     47: #define        __MUTEX_PRIVATE
                     48:
                     49: #include <sys/cdefs.h>
1.21.2.2! mjf        50: __KERNEL_RCSID(0, "$NetBSD: kern_mutex.c,v 1.26 2007/12/06 01:18:46 ad Exp $");
1.18      dsl        51:
                     52: #include "opt_multiprocessor.h"
1.2       ad         53:
                     54: #include <sys/param.h>
                     55: #include <sys/proc.h>
                     56: #include <sys/mutex.h>
                     57: #include <sys/sched.h>
                     58: #include <sys/sleepq.h>
                     59: #include <sys/systm.h>
                     60: #include <sys/lockdebug.h>
                     61: #include <sys/kernel.h>
1.21.2.2! mjf        62: #include <sys/atomic.h>
        !            63: #include <sys/intr.h>
1.2       ad         64:
                     65: #include <dev/lockstat.h>
                     66:
                     67: /*
                     68:  * When not running a debug kernel, spin mutexes are not much
                     69:  * more than an splraiseipl() and splx() pair.
                     70:  */
                     71:
                     72: #if defined(DIAGNOSTIC) || defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
                     73: #define        FULL
                     74: #endif
                     75:
                     76: /*
                     77:  * Debugging support.
                     78:  */
                     79:
                     80: #define        MUTEX_WANTLOCK(mtx)                                     \
1.21.2.2! mjf        81:     LOCKDEBUG_WANTLOCK(MUTEX_DEBUG_P(mtx), (mtx),              \
1.2       ad         82:         (uintptr_t)__builtin_return_address(0), 0)
                     83: #define        MUTEX_LOCKED(mtx)                                       \
1.21.2.2! mjf        84:     LOCKDEBUG_LOCKED(MUTEX_DEBUG_P(mtx), (mtx),                        \
1.2       ad         85:         (uintptr_t)__builtin_return_address(0), 0)
                     86: #define        MUTEX_UNLOCKED(mtx)                                     \
1.21.2.2! mjf        87:     LOCKDEBUG_UNLOCKED(MUTEX_DEBUG_P(mtx), (mtx),              \
1.2       ad         88:         (uintptr_t)__builtin_return_address(0), 0)
                     89: #define        MUTEX_ABORT(mtx, msg)                                   \
1.17      ad         90:     mutex_abort(mtx, __func__, msg)
1.2       ad         91:
                     92: #if defined(LOCKDEBUG)
                     93:
                     94: #define        MUTEX_DASSERT(mtx, cond)                                \
                     95: do {                                                           \
                     96:        if (!(cond))                                            \
                     97:                MUTEX_ABORT(mtx, "assertion failed: " #cond);   \
                     98: } while (/* CONSTCOND */ 0);
                     99:
                    100: #else  /* LOCKDEBUG */
                    101:
                    102: #define        MUTEX_DASSERT(mtx, cond)        /* nothing */
                    103:
                    104: #endif /* LOCKDEBUG */
                    105:
                    106: #if defined(DIAGNOSTIC)
                    107:
                    108: #define        MUTEX_ASSERT(mtx, cond)                                 \
                    109: do {                                                           \
                    110:        if (!(cond))                                            \
                    111:                MUTEX_ABORT(mtx, "assertion failed: " #cond);   \
                    112: } while (/* CONSTCOND */ 0)
                    113:
                    114: #else  /* DIAGNOSTIC */
                    115:
                    116: #define        MUTEX_ASSERT(mtx, cond) /* nothing */
                    117:
                    118: #endif /* DIAGNOSTIC */
                    119:
                    120: /*
                    121:  * Spin mutex SPL save / restore.
                    122:  */
1.12      matt      123: #ifndef MUTEX_COUNT_BIAS
                    124: #define        MUTEX_COUNT_BIAS        0
                    125: #endif
1.2       ad        126:
                    127: #define        MUTEX_SPIN_SPLRAISE(mtx)                                        \
                    128: do {                                                                   \
                    129:        struct cpu_info *x__ci = curcpu();                              \
                    130:        int x__cnt, s;                                                  \
                    131:        x__cnt = x__ci->ci_mtx_count--;                                 \
                    132:        s = splraiseipl(mtx->mtx_ipl);                                  \
1.12      matt      133:        if (x__cnt == MUTEX_COUNT_BIAS)                                 \
1.2       ad        134:                x__ci->ci_mtx_oldspl = (s);                             \
                    135: } while (/* CONSTCOND */ 0)
                    136:
                    137: #define        MUTEX_SPIN_SPLRESTORE(mtx)                                      \
                    138: do {                                                                   \
                    139:        struct cpu_info *x__ci = curcpu();                              \
                    140:        int s = x__ci->ci_mtx_oldspl;                                   \
                    141:        __insn_barrier();                                               \
1.12      matt      142:        if (++(x__ci->ci_mtx_count) == MUTEX_COUNT_BIAS)                \
1.2       ad        143:                splx(s);                                                \
                    144: } while (/* CONSTCOND */ 0)
                    145:
                    146: /*
                    147:  * For architectures that provide 'simple' mutexes: they provide a
                    148:  * CAS function that is either MP-safe, or does not need to be MP
                    149:  * safe.  Adaptive mutexes on these architectures do not require an
                    150:  * additional interlock.
                    151:  */
                    152:
                    153: #ifdef __HAVE_SIMPLE_MUTEXES
                    154:
                    155: #define        MUTEX_OWNER(owner)                                              \
                    156:        (owner & MUTEX_THREAD)
                    157: #define        MUTEX_HAS_WAITERS(mtx)                                          \
                    158:        (((int)(mtx)->mtx_owner & MUTEX_BIT_WAITERS) != 0)
                    159:
1.21.2.2! mjf       160: #define        MUTEX_INITIALIZE_ADAPTIVE(mtx, dodebug)                         \
1.2       ad        161: do {                                                                   \
1.21.2.2! mjf       162:        if (dodebug)                                                    \
        !           163:                (mtx)->mtx_owner |= MUTEX_BIT_DEBUG;                    \
1.2       ad        164: } while (/* CONSTCOND */ 0);
                    165:
1.21.2.2! mjf       166: #define        MUTEX_INITIALIZE_SPIN(mtx, dodebug, ipl)                        \
1.2       ad        167: do {                                                                   \
                    168:        (mtx)->mtx_owner = MUTEX_BIT_SPIN;                              \
1.21.2.2! mjf       169:        if (dodebug)                                                    \
        !           170:                (mtx)->mtx_owner |= MUTEX_BIT_DEBUG;                    \
1.2       ad        171:        (mtx)->mtx_ipl = makeiplcookie((ipl));                          \
                    172:        __cpu_simple_lock_init(&(mtx)->mtx_lock);                       \
                    173: } while (/* CONSTCOND */ 0)
                    174:
                    175: #define        MUTEX_DESTROY(mtx)                                              \
                    176: do {                                                                   \
                    177:        (mtx)->mtx_owner = MUTEX_THREAD;                                \
                    178: } while (/* CONSTCOND */ 0);
                    179:
                    180: #define        MUTEX_SPIN_P(mtx)               \
                    181:     (((mtx)->mtx_owner & MUTEX_BIT_SPIN) != 0)
                    182: #define        MUTEX_ADAPTIVE_P(mtx)           \
                    183:     (((mtx)->mtx_owner & MUTEX_BIT_SPIN) == 0)
                    184:
1.21.2.2! mjf       185: #define        MUTEX_DEBUG_P(mtx)      (((mtx)->mtx_owner & MUTEX_BIT_DEBUG) != 0)
        !           186: #if defined(LOCKDEBUG)
        !           187: #define        MUTEX_OWNED(owner)              (((owner) & ~MUTEX_BIT_DEBUG) != 0)
        !           188: #define        MUTEX_INHERITDEBUG(new, old)    (new) |= (old) & MUTEX_BIT_DEBUG
        !           189: #else /* defined(LOCKDEBUG) */
        !           190: #define        MUTEX_OWNED(owner)              ((owner) != 0)
        !           191: #define        MUTEX_INHERITDEBUG(new, old)    /* nothing */
        !           192: #endif /* defined(LOCKDEBUG) */
1.2       ad        193:
                    194: static inline int
                    195: MUTEX_ACQUIRE(kmutex_t *mtx, uintptr_t curthread)
                    196: {
                    197:        int rv;
1.21.2.2! mjf       198:        uintptr_t old = 0;
        !           199:        uintptr_t new = curthread;
        !           200:
        !           201:        MUTEX_INHERITDEBUG(old, mtx->mtx_owner);
        !           202:        MUTEX_INHERITDEBUG(new, old);
        !           203:        rv = MUTEX_CAS(&mtx->mtx_owner, old, new);
1.7       itohy     204:        MUTEX_RECEIVE(mtx);
1.2       ad        205:        return rv;
                    206: }
                    207:
                    208: static inline int
                    209: MUTEX_SET_WAITERS(kmutex_t *mtx, uintptr_t owner)
                    210: {
                    211:        int rv;
                    212:        rv = MUTEX_CAS(&mtx->mtx_owner, owner, owner | MUTEX_BIT_WAITERS);
1.7       itohy     213:        MUTEX_RECEIVE(mtx);
1.2       ad        214:        return rv;
                    215: }
                    216:
                    217: static inline void
                    218: MUTEX_RELEASE(kmutex_t *mtx)
                    219: {
1.21.2.2! mjf       220:        uintptr_t new;
        !           221:
1.7       itohy     222:        MUTEX_GIVE(mtx);
1.21.2.2! mjf       223:        new = 0;
        !           224:        MUTEX_INHERITDEBUG(new, mtx->mtx_owner);
        !           225:        mtx->mtx_owner = new;
1.2       ad        226: }
1.4       ad        227:
                    228: static inline void
                    229: MUTEX_CLEAR_WAITERS(kmutex_t *mtx)
                    230: {
                    231:        /* nothing */
                    232: }
1.2       ad        233: #endif /* __HAVE_SIMPLE_MUTEXES */
                    234:
                    235: /*
                    236:  * Patch in stubs via strong alias where they are not available.
                    237:  */
                    238:
                    239: #if defined(LOCKDEBUG)
                    240: #undef __HAVE_MUTEX_STUBS
                    241: #undef __HAVE_SPIN_MUTEX_STUBS
                    242: #endif
                    243:
                    244: #ifndef __HAVE_MUTEX_STUBS
1.8       itohy     245: __strong_alias(mutex_enter,mutex_vector_enter);
                    246: __strong_alias(mutex_exit,mutex_vector_exit);
1.2       ad        247: #endif
                    248:
                    249: #ifndef __HAVE_SPIN_MUTEX_STUBS
1.8       itohy     250: __strong_alias(mutex_spin_enter,mutex_vector_enter);
                    251: __strong_alias(mutex_spin_exit,mutex_vector_exit);
1.2       ad        252: #endif
                    253:
                    254: void   mutex_abort(kmutex_t *, const char *, const char *);
                    255: void   mutex_dump(volatile void *);
                    256: int    mutex_onproc(uintptr_t, struct cpu_info **);
1.6       ad        257: static struct lwp *mutex_owner(wchan_t);
1.2       ad        258:
                    259: lockops_t mutex_spin_lockops = {
                    260:        "Mutex",
                    261:        0,
                    262:        mutex_dump
                    263: };
                    264:
                    265: lockops_t mutex_adaptive_lockops = {
                    266:        "Mutex",
                    267:        1,
                    268:        mutex_dump
                    269: };
                    270:
1.5       yamt      271: syncobj_t mutex_syncobj = {
                    272:        SOBJ_SLEEPQ_SORTED,
                    273:        turnstile_unsleep,
                    274:        turnstile_changepri,
                    275:        sleepq_lendpri,
1.6       ad        276:        mutex_owner,
1.5       yamt      277: };
                    278:
1.2       ad        279: /*
                    280:  * mutex_dump:
                    281:  *
                    282:  *     Dump the contents of a mutex structure.
                    283:  */
                    284: void
                    285: mutex_dump(volatile void *cookie)
                    286: {
                    287:        volatile kmutex_t *mtx = cookie;
                    288:
                    289:        printf_nolog("owner field  : %#018lx wait/spin: %16d/%d\n",
                    290:            (long)MUTEX_OWNER(mtx->mtx_owner), MUTEX_HAS_WAITERS(mtx),
                    291:            MUTEX_SPIN_P(mtx));
                    292: }
                    293:
                    294: /*
                    295:  * mutex_abort:
                    296:  *
1.3       ad        297:  *     Dump information about an error and panic the system.  This
                    298:  *     generates a lot of machine code in the DIAGNOSTIC case, so
                    299:  *     we ask the compiler to not inline it.
1.2       ad        300:  */
1.8       itohy     301:
                    302: #if __GNUC_PREREQ__(3, 0)
                    303: __attribute ((noinline)) __attribute ((noreturn))
                    304: #endif
                    305: void
1.2       ad        306: mutex_abort(kmutex_t *mtx, const char *func, const char *msg)
                    307: {
                    308:
1.21.2.2! mjf       309:        LOCKDEBUG_ABORT(mtx, (MUTEX_SPIN_P(mtx) ?
1.3       ad        310:            &mutex_spin_lockops : &mutex_adaptive_lockops), func, msg);
1.2       ad        311:        /* NOTREACHED */
                    312: }
                    313:
                    314: /*
                    315:  * mutex_init:
                    316:  *
                    317:  *     Initialize a mutex for use.  Note that adaptive mutexes are in
                    318:  *     essence spin mutexes that can sleep to avoid deadlock and wasting
                    319:  *     CPU time.  We can't easily provide a type of mutex that always
                    320:  *     sleeps - see comments in mutex_vector_enter() about releasing
                    321:  *     mutexes unlocked.
                    322:  */
                    323: void
                    324: mutex_init(kmutex_t *mtx, kmutex_type_t type, int ipl)
                    325: {
1.21.2.2! mjf       326:        bool dodebug;
1.2       ad        327:
                    328:        memset(mtx, 0, sizeof(*mtx));
                    329:
1.15      ad        330:        switch (type) {
                    331:        case MUTEX_ADAPTIVE:
                    332:                KASSERT(ipl == IPL_NONE);
                    333:                break;
1.21.2.1  mjf       334:        case MUTEX_DEFAULT:
1.15      ad        335:        case MUTEX_DRIVER:
1.21.2.2! mjf       336:                if (ipl == IPL_NONE || ipl == IPL_SOFTCLOCK ||
        !           337:                    ipl == IPL_SOFTBIO || ipl == IPL_SOFTNET ||
        !           338:                    ipl == IPL_SOFTSERIAL) {
1.21.2.1  mjf       339:                        type = MUTEX_ADAPTIVE;
1.21.2.2! mjf       340:                } else {
1.21.2.1  mjf       341:                        type = MUTEX_SPIN;
                    342:                }
1.15      ad        343:                break;
                    344:        default:
                    345:                break;
                    346:        }
1.2       ad        347:
                    348:        switch (type) {
1.11      ad        349:        case MUTEX_NODEBUG:
1.21.2.2! mjf       350:                dodebug = LOCKDEBUG_ALLOC(mtx, NULL,
1.19      ad        351:                    (uintptr_t)__builtin_return_address(0));
1.21.2.2! mjf       352:                MUTEX_INITIALIZE_SPIN(mtx, dodebug, ipl);
1.11      ad        353:                break;
1.2       ad        354:        case MUTEX_ADAPTIVE:
1.21.2.2! mjf       355:                dodebug = LOCKDEBUG_ALLOC(mtx, &mutex_adaptive_lockops,
1.19      ad        356:                    (uintptr_t)__builtin_return_address(0));
1.21.2.2! mjf       357:                MUTEX_INITIALIZE_ADAPTIVE(mtx, dodebug);
1.2       ad        358:                break;
                    359:        case MUTEX_SPIN:
1.21.2.2! mjf       360:                dodebug = LOCKDEBUG_ALLOC(mtx, &mutex_spin_lockops,
1.19      ad        361:                    (uintptr_t)__builtin_return_address(0));
1.21.2.2! mjf       362:                MUTEX_INITIALIZE_SPIN(mtx, dodebug, ipl);
1.2       ad        363:                break;
                    364:        default:
                    365:                panic("mutex_init: impossible type");
                    366:                break;
                    367:        }
                    368: }
                    369:
                    370: /*
                    371:  * mutex_destroy:
                    372:  *
                    373:  *     Tear down a mutex.
                    374:  */
                    375: void
                    376: mutex_destroy(kmutex_t *mtx)
                    377: {
                    378:
                    379:        if (MUTEX_ADAPTIVE_P(mtx)) {
                    380:                MUTEX_ASSERT(mtx, !MUTEX_OWNED(mtx->mtx_owner) &&
                    381:                    !MUTEX_HAS_WAITERS(mtx));
                    382:        } else {
1.16      skrll     383:                MUTEX_ASSERT(mtx, !__SIMPLELOCK_LOCKED_P(&mtx->mtx_lock));
1.2       ad        384:        }
                    385:
1.21.2.2! mjf       386:        LOCKDEBUG_FREE(MUTEX_DEBUG_P(mtx), mtx);
1.2       ad        387:        MUTEX_DESTROY(mtx);
                    388: }
                    389:
                    390: /*
                    391:  * mutex_onproc:
                    392:  *
                    393:  *     Return true if an adaptive mutex owner is running on a CPU in the
                    394:  *     system.  If the target is waiting on the kernel big lock, then we
1.15      ad        395:  *     must release it.  This is necessary to avoid deadlock.
1.2       ad        396:  *
                    397:  *     Note that we can't use the mutex owner field as an LWP pointer.  We
                    398:  *     don't have full control over the timing of our execution, and so the
                    399:  *     pointer could be completely invalid by the time we dereference it.
                    400:  */
                    401: #ifdef MULTIPROCESSOR
                    402: int
                    403: mutex_onproc(uintptr_t owner, struct cpu_info **cip)
                    404: {
                    405:        CPU_INFO_ITERATOR cii;
                    406:        struct cpu_info *ci;
                    407:        struct lwp *l;
                    408:
                    409:        if (!MUTEX_OWNED(owner))
                    410:                return 0;
                    411:        l = (struct lwp *)MUTEX_OWNER(owner);
                    412:
1.15      ad        413:        /* See if the target is running on a CPU somewhere. */
1.10      ad        414:        if ((ci = *cip) != NULL && ci->ci_curlwp == l)
1.15      ad        415:                goto run;
                    416:        for (CPU_INFO_FOREACH(cii, ci))
                    417:                if (ci->ci_curlwp == l)
                    418:                        goto run;
1.2       ad        419:
1.15      ad        420:        /* No: it may be safe to block now. */
1.2       ad        421:        *cip = NULL;
                    422:        return 0;
1.15      ad        423:
                    424:  run:
                    425:        /* Target is running; do we need to block? */
                    426:        *cip = ci;
                    427:        return ci->ci_biglock_wanted != l;
1.2       ad        428: }
1.15      ad        429: #endif /* MULTIPROCESSOR */
1.2       ad        430:
                    431: /*
                    432:  * mutex_vector_enter:
                    433:  *
                    434:  *     Support routine for mutex_enter() that must handles all cases.  In
                    435:  *     the LOCKDEBUG case, mutex_enter() is always aliased here, even if
                    436:  *     fast-path stubs are available.  If an mutex_spin_enter() stub is
                    437:  *     not available, then it is also aliased directly here.
                    438:  */
                    439: void
                    440: mutex_vector_enter(kmutex_t *mtx)
                    441: {
                    442:        uintptr_t owner, curthread;
                    443:        turnstile_t *ts;
                    444: #ifdef MULTIPROCESSOR
                    445:        struct cpu_info *ci = NULL;
                    446:        u_int count;
                    447: #endif
                    448:        LOCKSTAT_COUNTER(spincnt);
                    449:        LOCKSTAT_COUNTER(slpcnt);
                    450:        LOCKSTAT_TIMER(spintime);
                    451:        LOCKSTAT_TIMER(slptime);
                    452:        LOCKSTAT_FLAG(lsflag);
                    453:
                    454:        /*
                    455:         * Handle spin mutexes.
                    456:         */
                    457:        if (MUTEX_SPIN_P(mtx)) {
                    458: #if defined(LOCKDEBUG) && defined(MULTIPROCESSOR)
                    459:                u_int spins = 0;
                    460: #endif
                    461:                MUTEX_SPIN_SPLRAISE(mtx);
                    462:                MUTEX_WANTLOCK(mtx);
                    463: #ifdef FULL
                    464:                if (__cpu_simple_lock_try(&mtx->mtx_lock)) {
                    465:                        MUTEX_LOCKED(mtx);
                    466:                        return;
                    467:                }
                    468: #if !defined(MULTIPROCESSOR)
                    469:                MUTEX_ABORT(mtx, "locking against myself");
                    470: #else /* !MULTIPROCESSOR */
                    471:
                    472:                LOCKSTAT_ENTER(lsflag);
                    473:                LOCKSTAT_START_TIMER(lsflag, spintime);
                    474:                count = SPINLOCK_BACKOFF_MIN;
                    475:
                    476:                /*
                    477:                 * Spin testing the lock word and do exponential backoff
                    478:                 * to reduce cache line ping-ponging between CPUs.
                    479:                 */
                    480:                do {
                    481:                        if (panicstr != NULL)
                    482:                                break;
1.16      skrll     483:                        while (__SIMPLELOCK_LOCKED_P(&mtx->mtx_lock)) {
1.2       ad        484:                                SPINLOCK_BACKOFF(count);
                    485: #ifdef LOCKDEBUG
                    486:                                if (SPINLOCK_SPINOUT(spins))
                    487:                                        MUTEX_ABORT(mtx, "spinout");
                    488: #endif /* LOCKDEBUG */
                    489:                        }
                    490:                } while (!__cpu_simple_lock_try(&mtx->mtx_lock));
                    491:
                    492:                if (count != SPINLOCK_BACKOFF_MIN) {
                    493:                        LOCKSTAT_STOP_TIMER(lsflag, spintime);
                    494:                        LOCKSTAT_EVENT(lsflag, mtx,
                    495:                            LB_SPIN_MUTEX | LB_SPIN, 1, spintime);
                    496:                }
                    497:                LOCKSTAT_EXIT(lsflag);
                    498: #endif /* !MULTIPROCESSOR */
                    499: #endif /* FULL */
                    500:                MUTEX_LOCKED(mtx);
                    501:                return;
                    502:        }
                    503:
                    504:        curthread = (uintptr_t)curlwp;
                    505:
                    506:        MUTEX_DASSERT(mtx, MUTEX_ADAPTIVE_P(mtx));
                    507:        MUTEX_ASSERT(mtx, curthread != 0);
                    508:        MUTEX_WANTLOCK(mtx);
                    509:
                    510: #ifdef LOCKDEBUG
                    511:        if (panicstr == NULL) {
                    512:                simple_lock_only_held(NULL, "mutex_enter");
                    513: #ifdef MULTIPROCESSOR
                    514:                LOCKDEBUG_BARRIER(&kernel_lock, 1);
                    515: #else
                    516:                LOCKDEBUG_BARRIER(NULL, 1);
                    517: #endif
                    518:        }
                    519: #endif
                    520:
                    521:        LOCKSTAT_ENTER(lsflag);
                    522:
                    523:        /*
                    524:         * Adaptive mutex; spin trying to acquire the mutex.  If we
                    525:         * determine that the owner is not running on a processor,
                    526:         * then we stop spinning, and sleep instead.
                    527:         */
                    528:        for (;;) {
                    529:                owner = mtx->mtx_owner;
                    530:                if (!MUTEX_OWNED(owner)) {
                    531:                        /*
                    532:                         * Mutex owner clear could mean two things:
                    533:                         *
                    534:                         *      * The mutex has been released.
                    535:                         *      * The owner field hasn't been set yet.
                    536:                         *
                    537:                         * Try to acquire it again.  If that fails,
                    538:                         * we'll just loop again.
                    539:                         */
                    540:                        if (MUTEX_ACQUIRE(mtx, curthread))
                    541:                                break;
                    542:                        continue;
                    543:                }
                    544:
                    545:                if (panicstr != NULL)
                    546:                        return;
                    547:                if (MUTEX_OWNER(owner) == curthread)
                    548:                        MUTEX_ABORT(mtx, "locking against myself");
                    549:
                    550: #ifdef MULTIPROCESSOR
                    551:                /*
                    552:                 * Check to see if the owner is running on a processor.
                    553:                 * If so, then we should just spin, as the owner will
                    554:                 * likely release the lock very soon.
                    555:                 */
                    556:                if (mutex_onproc(owner, &ci)) {
                    557:                        LOCKSTAT_START_TIMER(lsflag, spintime);
                    558:                        count = SPINLOCK_BACKOFF_MIN;
                    559:                        for (;;) {
                    560:                                owner = mtx->mtx_owner;
                    561:                                if (!mutex_onproc(owner, &ci))
                    562:                                        break;
                    563:                                SPINLOCK_BACKOFF(count);
                    564:                        }
                    565:                        LOCKSTAT_STOP_TIMER(lsflag, spintime);
                    566:                        LOCKSTAT_COUNT(spincnt, 1);
                    567:                        if (!MUTEX_OWNED(owner))
                    568:                                continue;
                    569:                }
                    570: #endif
                    571:
                    572:                ts = turnstile_lookup(mtx);
                    573:
                    574:                /*
                    575:                 * Once we have the turnstile chain interlock, mark the
                    576:                 * mutex has having waiters.  If that fails, spin again:
                    577:                 * chances are that the mutex has been released.
                    578:                 */
                    579:                if (!MUTEX_SET_WAITERS(mtx, owner)) {
                    580:                        turnstile_exit(mtx);
                    581:                        continue;
                    582:                }
                    583:
                    584: #ifdef MULTIPROCESSOR
                    585:                /*
                    586:                 * mutex_exit() is permitted to release the mutex without
                    587:                 * any interlocking instructions, and the following can
                    588:                 * occur as a result:
                    589:                 *
                    590:                 *  CPU 1: MUTEX_SET_WAITERS()      CPU2: mutex_exit()
                    591:                 * ---------------------------- ----------------------------
                    592:                 *              ..                  acquire cache line
                    593:                 *              ..                   test for waiters
                    594:                 *      acquire cache line    <-      lose cache line
                    595:                 *       lock cache line                   ..
                    596:                 *     verify mutex is held                ..
                    597:                 *          set waiters                    ..
                    598:                 *       unlock cache line                 ..
                    599:                 *        lose cache line     ->    acquire cache line
                    600:                 *              ..                clear lock word, waiters
                    601:                 *        return success
                    602:                 *
                    603:                 * There is a another race that can occur: a third CPU could
                    604:                 * acquire the mutex as soon as it is released.  Since
                    605:                 * adaptive mutexes are primarily spin mutexes, this is not
                    606:                 * something that we need to worry about too much.  What we
                    607:                 * do need to ensure is that the waiters bit gets set.
                    608:                 *
                    609:                 * To allow the unlocked release, we need to make some
                    610:                 * assumptions here:
                    611:                 *
                    612:                 * o Release is the only non-atomic/unlocked operation
                    613:                 *   that can be performed on the mutex.  (It must still
                    614:                 *   be atomic on the local CPU, e.g. in case interrupted
                    615:                 *   or preempted).
                    616:                 *
                    617:                 * o At any given time, MUTEX_SET_WAITERS() can only ever
1.21      pooka     618:                 *   be in progress on one CPU in the system - guaranteed
1.2       ad        619:                 *   by the turnstile chain lock.
                    620:                 *
                    621:                 * o No other operations other than MUTEX_SET_WAITERS()
                    622:                 *   and release can modify a mutex with a non-zero
                    623:                 *   owner field.
                    624:                 *
                    625:                 * o The result of a successful MUTEX_SET_WAITERS() call
                    626:                 *   is an unbuffered write that is immediately visible
                    627:                 *   to all other processors in the system.
                    628:                 *
                    629:                 * o If the holding LWP switches away, it posts a store
                    630:                 *   fence before changing curlwp, ensuring that any
                    631:                 *   overwrite of the mutex waiters flag by mutex_exit()
                    632:                 *   completes before the modification of curlwp becomes
                    633:                 *   visible to this CPU.
                    634:                 *
1.14      yamt      635:                 * o mi_switch() posts a store fence before setting curlwp
1.2       ad        636:                 *   and before resuming execution of an LWP.
                    637:                 *
                    638:                 * o _kernel_lock() posts a store fence before setting
                    639:                 *   curcpu()->ci_biglock_wanted, and after clearing it.
                    640:                 *   This ensures that any overwrite of the mutex waiters
                    641:                 *   flag by mutex_exit() completes before the modification
                    642:                 *   of ci_biglock_wanted becomes visible.
                    643:                 *
                    644:                 * We now post a read memory barrier (after setting the
                    645:                 * waiters field) and check the lock holder's status again.
                    646:                 * Some of the possible outcomes (not an exhaustive list):
                    647:                 *
                    648:                 * 1. The onproc check returns true: the holding LWP is
                    649:                 *    running again.  The lock may be released soon and
                    650:                 *    we should spin.  Importantly, we can't trust the
                    651:                 *    value of the waiters flag.
                    652:                 *
                    653:                 * 2. The onproc check returns false: the holding LWP is
                    654:                 *    not running.  We now have the oppertunity to check
                    655:                 *    if mutex_exit() has blatted the modifications made
                    656:                 *    by MUTEX_SET_WAITERS().
                    657:                 *
                    658:                 * 3. The onproc check returns false: the holding LWP may
                    659:                 *    or may not be running.  It has context switched at
                    660:                 *    some point during our check.  Again, we have the
                    661:                 *    chance to see if the waiters bit is still set or
                    662:                 *    has been overwritten.
                    663:                 *
                    664:                 * 4. The onproc check returns false: the holding LWP is
                    665:                 *    running on a CPU, but wants the big lock.  It's OK
                    666:                 *    to check the waiters field in this case.
                    667:                 *
                    668:                 * 5. The has-waiters check fails: the mutex has been
                    669:                 *    released, the waiters flag cleared and another LWP
                    670:                 *    now owns the mutex.
                    671:                 *
                    672:                 * 6. The has-waiters check fails: the mutex has been
                    673:                 *    released.
                    674:                 *
                    675:                 * If the waiters bit is not set it's unsafe to go asleep,
                    676:                 * as we might never be awoken.
                    677:                 */
1.21.2.2! mjf       678:                if ((membar_consumer(), mutex_onproc(owner, &ci)) ||
        !           679:                    (membar_consumer(), !MUTEX_HAS_WAITERS(mtx))) {
1.2       ad        680:                        turnstile_exit(mtx);
                    681:                        continue;
                    682:                }
                    683: #endif /* MULTIPROCESSOR */
                    684:
                    685:                LOCKSTAT_START_TIMER(lsflag, slptime);
                    686:
1.5       yamt      687:                turnstile_block(ts, TS_WRITER_Q, mtx, &mutex_syncobj);
1.2       ad        688:
                    689:                LOCKSTAT_STOP_TIMER(lsflag, slptime);
                    690:                LOCKSTAT_COUNT(slpcnt, 1);
                    691:        }
                    692:
                    693:        LOCKSTAT_EVENT(lsflag, mtx, LB_ADAPTIVE_MUTEX | LB_SLEEP1,
                    694:            slpcnt, slptime);
                    695:        LOCKSTAT_EVENT(lsflag, mtx, LB_ADAPTIVE_MUTEX | LB_SPIN,
                    696:            spincnt, spintime);
                    697:        LOCKSTAT_EXIT(lsflag);
                    698:
                    699:        MUTEX_DASSERT(mtx, MUTEX_OWNER(mtx->mtx_owner) == curthread);
                    700:        MUTEX_LOCKED(mtx);
                    701: }
                    702:
                    703: /*
                    704:  * mutex_vector_exit:
                    705:  *
                    706:  *     Support routine for mutex_exit() that handles all cases.
                    707:  */
                    708: void
                    709: mutex_vector_exit(kmutex_t *mtx)
                    710: {
                    711:        turnstile_t *ts;
                    712:        uintptr_t curthread;
                    713:
                    714:        if (MUTEX_SPIN_P(mtx)) {
                    715: #ifdef FULL
1.16      skrll     716:                if (!__SIMPLELOCK_LOCKED_P(&mtx->mtx_lock))
1.2       ad        717:                        MUTEX_ABORT(mtx, "exiting unheld spin mutex");
                    718:                MUTEX_UNLOCKED(mtx);
                    719:                __cpu_simple_unlock(&mtx->mtx_lock);
                    720: #endif
                    721:                MUTEX_SPIN_SPLRESTORE(mtx);
                    722:                return;
                    723:        }
                    724:
1.11      ad        725:        if (__predict_false((uintptr_t)panicstr | cold)) {
1.2       ad        726:                MUTEX_UNLOCKED(mtx);
                    727:                MUTEX_RELEASE(mtx);
                    728:                return;
                    729:        }
                    730:
                    731:        curthread = (uintptr_t)curlwp;
                    732:        MUTEX_DASSERT(mtx, curthread != 0);
                    733:        MUTEX_ASSERT(mtx, MUTEX_OWNER(mtx->mtx_owner) == curthread);
                    734:        MUTEX_UNLOCKED(mtx);
                    735:
1.15      ad        736: #ifdef LOCKDEBUG
                    737:        /*
                    738:         * Avoid having to take the turnstile chain lock every time
                    739:         * around.  Raise the priority level to splhigh() in order
                    740:         * to disable preemption and so make the following atomic.
                    741:         */
                    742:        {
                    743:                int s = splhigh();
                    744:                if (!MUTEX_HAS_WAITERS(mtx)) {
                    745:                        MUTEX_RELEASE(mtx);
                    746:                        splx(s);
                    747:                        return;
                    748:                }
                    749:                splx(s);
                    750:        }
                    751: #endif
                    752:
1.2       ad        753:        /*
                    754:         * Get this lock's turnstile.  This gets the interlock on
                    755:         * the sleep queue.  Once we have that, we can clear the
                    756:         * lock.  If there was no turnstile for the lock, there
                    757:         * were no waiters remaining.
                    758:         */
                    759:        ts = turnstile_lookup(mtx);
                    760:
                    761:        if (ts == NULL) {
                    762:                MUTEX_RELEASE(mtx);
                    763:                turnstile_exit(mtx);
                    764:        } else {
                    765:                MUTEX_RELEASE(mtx);
                    766:                turnstile_wakeup(ts, TS_WRITER_Q,
                    767:                    TS_WAITERS(ts, TS_WRITER_Q), NULL);
                    768:        }
                    769: }
                    770:
1.4       ad        771: #ifndef __HAVE_SIMPLE_MUTEXES
                    772: /*
                    773:  * mutex_wakeup:
                    774:  *
                    775:  *     Support routine for mutex_exit() that wakes up all waiters.
                    776:  *     We assume that the mutex has been released, but it need not
                    777:  *     be.
                    778:  */
                    779: void
                    780: mutex_wakeup(kmutex_t *mtx)
                    781: {
                    782:        turnstile_t *ts;
                    783:
                    784:        ts = turnstile_lookup(mtx);
                    785:        if (ts == NULL) {
                    786:                turnstile_exit(mtx);
                    787:                return;
                    788:        }
                    789:        MUTEX_CLEAR_WAITERS(mtx);
                    790:        turnstile_wakeup(ts, TS_WRITER_Q, TS_WAITERS(ts, TS_WRITER_Q), NULL);
                    791: }
                    792: #endif /* !__HAVE_SIMPLE_MUTEXES */
                    793:
1.2       ad        794: /*
                    795:  * mutex_owned:
                    796:  *
1.3       ad        797:  *     Return true if the current LWP (adaptive) or CPU (spin)
                    798:  *     holds the mutex.
1.2       ad        799:  */
                    800: int
                    801: mutex_owned(kmutex_t *mtx)
                    802: {
                    803:
                    804:        if (MUTEX_ADAPTIVE_P(mtx))
                    805:                return MUTEX_OWNER(mtx->mtx_owner) == (uintptr_t)curlwp;
                    806: #ifdef FULL
1.16      skrll     807:        return __SIMPLELOCK_LOCKED_P(&mtx->mtx_lock);
1.2       ad        808: #else
                    809:        return 1;
                    810: #endif
                    811: }
                    812:
                    813: /*
                    814:  * mutex_owner:
                    815:  *
1.6       ad        816:  *     Return the current owner of an adaptive mutex.  Used for
                    817:  *     priority inheritance.
1.2       ad        818:  */
1.6       ad        819: static struct lwp *
                    820: mutex_owner(wchan_t obj)
1.2       ad        821: {
1.6       ad        822:        kmutex_t *mtx = (void *)(uintptr_t)obj; /* discard qualifiers */
1.2       ad        823:
                    824:        MUTEX_ASSERT(mtx, MUTEX_ADAPTIVE_P(mtx));
                    825:        return (struct lwp *)MUTEX_OWNER(mtx->mtx_owner);
                    826: }
                    827:
                    828: /*
                    829:  * mutex_tryenter:
                    830:  *
                    831:  *     Try to acquire the mutex; return non-zero if we did.
                    832:  */
                    833: int
                    834: mutex_tryenter(kmutex_t *mtx)
                    835: {
                    836:        uintptr_t curthread;
                    837:
                    838:        /*
                    839:         * Handle spin mutexes.
                    840:         */
                    841:        if (MUTEX_SPIN_P(mtx)) {
                    842:                MUTEX_SPIN_SPLRAISE(mtx);
                    843: #ifdef FULL
                    844:                if (__cpu_simple_lock_try(&mtx->mtx_lock)) {
1.4       ad        845:                        MUTEX_WANTLOCK(mtx);
1.2       ad        846:                        MUTEX_LOCKED(mtx);
                    847:                        return 1;
                    848:                }
                    849:                MUTEX_SPIN_SPLRESTORE(mtx);
                    850: #else
1.4       ad        851:                MUTEX_WANTLOCK(mtx);
1.2       ad        852:                MUTEX_LOCKED(mtx);
                    853:                return 1;
                    854: #endif
                    855:        } else {
                    856:                curthread = (uintptr_t)curlwp;
                    857:                MUTEX_ASSERT(mtx, curthread != 0);
                    858:                if (MUTEX_ACQUIRE(mtx, curthread)) {
1.4       ad        859:                        MUTEX_WANTLOCK(mtx);
1.2       ad        860:                        MUTEX_LOCKED(mtx);
                    861:                        MUTEX_DASSERT(mtx,
                    862:                            MUTEX_OWNER(mtx->mtx_owner) == curthread);
                    863:                        return 1;
                    864:                }
                    865:        }
                    866:
                    867:        return 0;
                    868: }
                    869:
                    870: #if defined(__HAVE_SPIN_MUTEX_STUBS) || defined(FULL)
                    871: /*
                    872:  * mutex_spin_retry:
                    873:  *
                    874:  *     Support routine for mutex_spin_enter().  Assumes that the caller
                    875:  *     has already raised the SPL, and adjusted counters.
                    876:  */
                    877: void
                    878: mutex_spin_retry(kmutex_t *mtx)
                    879: {
                    880: #ifdef MULTIPROCESSOR
                    881:        u_int count;
                    882:        LOCKSTAT_TIMER(spintime);
                    883:        LOCKSTAT_FLAG(lsflag);
                    884: #ifdef LOCKDEBUG
                    885:        u_int spins = 0;
                    886: #endif /* LOCKDEBUG */
                    887:
                    888:        MUTEX_WANTLOCK(mtx);
                    889:
                    890:        LOCKSTAT_ENTER(lsflag);
                    891:        LOCKSTAT_START_TIMER(lsflag, spintime);
                    892:        count = SPINLOCK_BACKOFF_MIN;
                    893:
                    894:        /*
                    895:         * Spin testing the lock word and do exponential backoff
                    896:         * to reduce cache line ping-ponging between CPUs.
                    897:         */
                    898:        do {
                    899:                if (panicstr != NULL)
                    900:                        break;
1.16      skrll     901:                while (__SIMPLELOCK_LOCKED_P(&mtx->mtx_lock)) {
1.2       ad        902:                        SPINLOCK_BACKOFF(count);
                    903: #ifdef LOCKDEBUG
                    904:                        if (SPINLOCK_SPINOUT(spins))
                    905:                                MUTEX_ABORT(mtx, "spinout");
                    906: #endif /* LOCKDEBUG */
                    907:                }
                    908:        } while (!__cpu_simple_lock_try(&mtx->mtx_lock));
                    909:
                    910:        LOCKSTAT_STOP_TIMER(lsflag, spintime);
                    911:        LOCKSTAT_EVENT(lsflag, mtx, LB_SPIN_MUTEX | LB_SPIN, 1, spintime);
                    912:        LOCKSTAT_EXIT(lsflag);
                    913:
                    914:        MUTEX_LOCKED(mtx);
                    915: #else  /* MULTIPROCESSOR */
                    916:        MUTEX_ABORT(mtx, "locking against myself");
                    917: #endif /* MULTIPROCESSOR */
                    918: }
                    919: #endif /* defined(__HAVE_SPIN_MUTEX_STUBS) || defined(FULL) */

CVSweb <webmaster@jp.NetBSD.org>