[BACK]Return to kern_mutex.c CVS log [TXT][DIR] Up to [cvs.NetBSD.org] / src / sys / kern

Annotation of src/sys/kern/kern_mutex.c, Revision 1.58

1.58    ! mrg         1: /*     $NetBSD: kern_mutex.c,v 1.57 2013/09/22 14:59:07 joerg Exp $    */
1.2       ad          2:
                      3: /*-
1.30      ad          4:  * Copyright (c) 2002, 2006, 2007, 2008 The NetBSD Foundation, Inc.
1.2       ad          5:  * All rights reserved.
                      6:  *
                      7:  * This code is derived from software contributed to The NetBSD Foundation
                      8:  * by Jason R. Thorpe and Andrew Doran.
                      9:  *
                     10:  * Redistribution and use in source and binary forms, with or without
                     11:  * modification, are permitted provided that the following conditions
                     12:  * are met:
                     13:  * 1. Redistributions of source code must retain the above copyright
                     14:  *    notice, this list of conditions and the following disclaimer.
                     15:  * 2. Redistributions in binary form must reproduce the above copyright
                     16:  *    notice, this list of conditions and the following disclaimer in the
                     17:  *    documentation and/or other materials provided with the distribution.
                     18:  *
                     19:  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
                     20:  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
                     21:  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
                     22:  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
                     23:  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
                     24:  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
                     25:  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
                     26:  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
                     27:  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
                     28:  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
                     29:  * POSSIBILITY OF SUCH DAMAGE.
                     30:  */
                     31:
                     32: /*
                     33:  * Kernel mutex implementation, modeled after those found in Solaris,
                     34:  * a description of which can be found in:
                     35:  *
                     36:  *     Solaris Internals: Core Kernel Architecture, Jim Mauro and
                     37:  *         Richard McDougall.
                     38:  */
                     39:
                     40: #define        __MUTEX_PRIVATE
                     41:
                     42: #include <sys/cdefs.h>
1.58    ! mrg        43: __KERNEL_RCSID(0, "$NetBSD: kern_mutex.c,v 1.57 2013/09/22 14:59:07 joerg Exp $");
1.2       ad         44:
                     45: #include <sys/param.h>
1.46      pooka      46: #include <sys/atomic.h>
1.2       ad         47: #include <sys/proc.h>
                     48: #include <sys/mutex.h>
                     49: #include <sys/sched.h>
                     50: #include <sys/sleepq.h>
                     51: #include <sys/systm.h>
                     52: #include <sys/lockdebug.h>
                     53: #include <sys/kernel.h>
1.24      ad         54: #include <sys/intr.h>
1.29      xtraeme    55: #include <sys/lock.h>
1.50      rmind      56: #include <sys/types.h>
1.2       ad         57:
                     58: #include <dev/lockstat.h>
                     59:
1.28      ad         60: #include <machine/lock.h>
                     61:
1.2       ad         62: /*
                     63:  * When not running a debug kernel, spin mutexes are not much
                     64:  * more than an splraiseipl() and splx() pair.
                     65:  */
                     66:
                     67: #if defined(DIAGNOSTIC) || defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
                     68: #define        FULL
                     69: #endif
                     70:
                     71: /*
                     72:  * Debugging support.
                     73:  */
                     74:
                     75: #define        MUTEX_WANTLOCK(mtx)                                     \
1.23      yamt       76:     LOCKDEBUG_WANTLOCK(MUTEX_DEBUG_P(mtx), (mtx),              \
1.54      mlelstv    77:         (uintptr_t)__builtin_return_address(0), 0)
1.2       ad         78: #define        MUTEX_LOCKED(mtx)                                       \
1.42      ad         79:     LOCKDEBUG_LOCKED(MUTEX_DEBUG_P(mtx), (mtx), NULL,          \
1.2       ad         80:         (uintptr_t)__builtin_return_address(0), 0)
                     81: #define        MUTEX_UNLOCKED(mtx)                                     \
1.23      yamt       82:     LOCKDEBUG_UNLOCKED(MUTEX_DEBUG_P(mtx), (mtx),              \
1.2       ad         83:         (uintptr_t)__builtin_return_address(0), 0)
                     84: #define        MUTEX_ABORT(mtx, msg)                                   \
1.17      ad         85:     mutex_abort(mtx, __func__, msg)
1.2       ad         86:
                     87: #if defined(LOCKDEBUG)
                     88:
                     89: #define        MUTEX_DASSERT(mtx, cond)                                \
                     90: do {                                                           \
                     91:        if (!(cond))                                            \
                     92:                MUTEX_ABORT(mtx, "assertion failed: " #cond);   \
                     93: } while (/* CONSTCOND */ 0);
                     94:
                     95: #else  /* LOCKDEBUG */
                     96:
                     97: #define        MUTEX_DASSERT(mtx, cond)        /* nothing */
                     98:
                     99: #endif /* LOCKDEBUG */
                    100:
                    101: #if defined(DIAGNOSTIC)
                    102:
                    103: #define        MUTEX_ASSERT(mtx, cond)                                 \
                    104: do {                                                           \
                    105:        if (!(cond))                                            \
                    106:                MUTEX_ABORT(mtx, "assertion failed: " #cond);   \
                    107: } while (/* CONSTCOND */ 0)
                    108:
                    109: #else  /* DIAGNOSTIC */
                    110:
                    111: #define        MUTEX_ASSERT(mtx, cond) /* nothing */
                    112:
                    113: #endif /* DIAGNOSTIC */
                    114:
                    115: /*
                    116:  * Spin mutex SPL save / restore.
                    117:  */
                    118:
                    119: #define        MUTEX_SPIN_SPLRAISE(mtx)                                        \
                    120: do {                                                                   \
1.36      ad        121:        struct cpu_info *x__ci;                                         \
1.2       ad        122:        int x__cnt, s;                                                  \
1.36      ad        123:        s = splraiseipl(mtx->mtx_ipl);                                  \
                    124:        x__ci = curcpu();                                               \
1.2       ad        125:        x__cnt = x__ci->ci_mtx_count--;                                 \
1.37      ad        126:        __insn_barrier();                                               \
1.51      rmind     127:        if (x__cnt == 0)                                                \
1.2       ad        128:                x__ci->ci_mtx_oldspl = (s);                             \
                    129: } while (/* CONSTCOND */ 0)
                    130:
                    131: #define        MUTEX_SPIN_SPLRESTORE(mtx)                                      \
                    132: do {                                                                   \
                    133:        struct cpu_info *x__ci = curcpu();                              \
                    134:        int s = x__ci->ci_mtx_oldspl;                                   \
                    135:        __insn_barrier();                                               \
1.51      rmind     136:        if (++(x__ci->ci_mtx_count) == 0)                       \
1.2       ad        137:                splx(s);                                                \
                    138: } while (/* CONSTCOND */ 0)
                    139:
                    140: /*
                    141:  * For architectures that provide 'simple' mutexes: they provide a
                    142:  * CAS function that is either MP-safe, or does not need to be MP
                    143:  * safe.  Adaptive mutexes on these architectures do not require an
                    144:  * additional interlock.
                    145:  */
                    146:
                    147: #ifdef __HAVE_SIMPLE_MUTEXES
                    148:
                    149: #define        MUTEX_OWNER(owner)                                              \
                    150:        (owner & MUTEX_THREAD)
                    151: #define        MUTEX_HAS_WAITERS(mtx)                                          \
                    152:        (((int)(mtx)->mtx_owner & MUTEX_BIT_WAITERS) != 0)
                    153:
1.23      yamt      154: #define        MUTEX_INITIALIZE_ADAPTIVE(mtx, dodebug)                         \
1.49      skrll     155:        if (!dodebug)                                                   \
                    156:                (mtx)->mtx_owner |= MUTEX_BIT_NODEBUG;                  \
1.2       ad        157: do {                                                                   \
                    158: } while (/* CONSTCOND */ 0);
                    159:
1.23      yamt      160: #define        MUTEX_INITIALIZE_SPIN(mtx, dodebug, ipl)                        \
1.2       ad        161: do {                                                                   \
                    162:        (mtx)->mtx_owner = MUTEX_BIT_SPIN;                              \
1.49      skrll     163:        if (!dodebug)                                                   \
                    164:                (mtx)->mtx_owner |= MUTEX_BIT_NODEBUG;                  \
1.2       ad        165:        (mtx)->mtx_ipl = makeiplcookie((ipl));                          \
                    166:        __cpu_simple_lock_init(&(mtx)->mtx_lock);                       \
                    167: } while (/* CONSTCOND */ 0)
                    168:
                    169: #define        MUTEX_DESTROY(mtx)                                              \
                    170: do {                                                                   \
                    171:        (mtx)->mtx_owner = MUTEX_THREAD;                                \
                    172: } while (/* CONSTCOND */ 0);
                    173:
                    174: #define        MUTEX_SPIN_P(mtx)               \
                    175:     (((mtx)->mtx_owner & MUTEX_BIT_SPIN) != 0)
                    176: #define        MUTEX_ADAPTIVE_P(mtx)           \
                    177:     (((mtx)->mtx_owner & MUTEX_BIT_SPIN) == 0)
                    178:
1.49      skrll     179: #define        MUTEX_DEBUG_P(mtx)      (((mtx)->mtx_owner & MUTEX_BIT_NODEBUG) == 0)
1.23      yamt      180: #if defined(LOCKDEBUG)
1.49      skrll     181: #define        MUTEX_OWNED(owner)              (((owner) & ~MUTEX_BIT_NODEBUG) != 0)
                    182: #define        MUTEX_INHERITDEBUG(new, old)    (new) |= (old) & MUTEX_BIT_NODEBUG
1.23      yamt      183: #else /* defined(LOCKDEBUG) */
                    184: #define        MUTEX_OWNED(owner)              ((owner) != 0)
                    185: #define        MUTEX_INHERITDEBUG(new, old)    /* nothing */
                    186: #endif /* defined(LOCKDEBUG) */
1.2       ad        187:
                    188: static inline int
                    189: MUTEX_ACQUIRE(kmutex_t *mtx, uintptr_t curthread)
                    190: {
                    191:        int rv;
1.23      yamt      192:        uintptr_t old = 0;
                    193:        uintptr_t new = curthread;
                    194:
                    195:        MUTEX_INHERITDEBUG(old, mtx->mtx_owner);
                    196:        MUTEX_INHERITDEBUG(new, old);
                    197:        rv = MUTEX_CAS(&mtx->mtx_owner, old, new);
1.7       itohy     198:        MUTEX_RECEIVE(mtx);
1.2       ad        199:        return rv;
                    200: }
                    201:
                    202: static inline int
                    203: MUTEX_SET_WAITERS(kmutex_t *mtx, uintptr_t owner)
                    204: {
                    205:        int rv;
                    206:        rv = MUTEX_CAS(&mtx->mtx_owner, owner, owner | MUTEX_BIT_WAITERS);
1.7       itohy     207:        MUTEX_RECEIVE(mtx);
1.2       ad        208:        return rv;
                    209: }
                    210:
                    211: static inline void
                    212: MUTEX_RELEASE(kmutex_t *mtx)
                    213: {
1.23      yamt      214:        uintptr_t new;
                    215:
1.7       itohy     216:        MUTEX_GIVE(mtx);
1.23      yamt      217:        new = 0;
                    218:        MUTEX_INHERITDEBUG(new, mtx->mtx_owner);
                    219:        mtx->mtx_owner = new;
1.2       ad        220: }
                    221: #endif /* __HAVE_SIMPLE_MUTEXES */
                    222:
                    223: /*
                    224:  * Patch in stubs via strong alias where they are not available.
                    225:  */
                    226:
                    227: #if defined(LOCKDEBUG)
                    228: #undef __HAVE_MUTEX_STUBS
                    229: #undef __HAVE_SPIN_MUTEX_STUBS
                    230: #endif
                    231:
                    232: #ifndef __HAVE_MUTEX_STUBS
1.8       itohy     233: __strong_alias(mutex_enter,mutex_vector_enter);
                    234: __strong_alias(mutex_exit,mutex_vector_exit);
1.2       ad        235: #endif
                    236:
                    237: #ifndef __HAVE_SPIN_MUTEX_STUBS
1.8       itohy     238: __strong_alias(mutex_spin_enter,mutex_vector_enter);
                    239: __strong_alias(mutex_spin_exit,mutex_vector_exit);
1.2       ad        240: #endif
                    241:
1.50      rmind     242: static void            mutex_abort(kmutex_t *, const char *, const char *);
                    243: static void            mutex_dump(volatile void *);
1.2       ad        244:
                    245: lockops_t mutex_spin_lockops = {
                    246:        "Mutex",
1.42      ad        247:        LOCKOPS_SPIN,
1.2       ad        248:        mutex_dump
                    249: };
                    250:
                    251: lockops_t mutex_adaptive_lockops = {
                    252:        "Mutex",
1.42      ad        253:        LOCKOPS_SLEEP,
1.2       ad        254:        mutex_dump
                    255: };
                    256:
1.5       yamt      257: syncobj_t mutex_syncobj = {
                    258:        SOBJ_SLEEPQ_SORTED,
                    259:        turnstile_unsleep,
                    260:        turnstile_changepri,
                    261:        sleepq_lendpri,
1.27      ad        262:        (void *)mutex_owner,
1.5       yamt      263: };
                    264:
1.2       ad        265: /*
                    266:  * mutex_dump:
                    267:  *
                    268:  *     Dump the contents of a mutex structure.
                    269:  */
                    270: void
                    271: mutex_dump(volatile void *cookie)
                    272: {
                    273:        volatile kmutex_t *mtx = cookie;
                    274:
                    275:        printf_nolog("owner field  : %#018lx wait/spin: %16d/%d\n",
                    276:            (long)MUTEX_OWNER(mtx->mtx_owner), MUTEX_HAS_WAITERS(mtx),
                    277:            MUTEX_SPIN_P(mtx));
                    278: }
                    279:
                    280: /*
                    281:  * mutex_abort:
                    282:  *
1.3       ad        283:  *     Dump information about an error and panic the system.  This
                    284:  *     generates a lot of machine code in the DIAGNOSTIC case, so
                    285:  *     we ask the compiler to not inline it.
1.2       ad        286:  */
1.43      ad        287: void __noinline
1.2       ad        288: mutex_abort(kmutex_t *mtx, const char *func, const char *msg)
                    289: {
                    290:
1.23      yamt      291:        LOCKDEBUG_ABORT(mtx, (MUTEX_SPIN_P(mtx) ?
1.3       ad        292:            &mutex_spin_lockops : &mutex_adaptive_lockops), func, msg);
1.2       ad        293: }
                    294:
                    295: /*
                    296:  * mutex_init:
                    297:  *
                    298:  *     Initialize a mutex for use.  Note that adaptive mutexes are in
                    299:  *     essence spin mutexes that can sleep to avoid deadlock and wasting
                    300:  *     CPU time.  We can't easily provide a type of mutex that always
                    301:  *     sleeps - see comments in mutex_vector_enter() about releasing
                    302:  *     mutexes unlocked.
                    303:  */
                    304: void
                    305: mutex_init(kmutex_t *mtx, kmutex_type_t type, int ipl)
                    306: {
1.23      yamt      307:        bool dodebug;
1.2       ad        308:
                    309:        memset(mtx, 0, sizeof(*mtx));
                    310:
1.15      ad        311:        switch (type) {
                    312:        case MUTEX_ADAPTIVE:
                    313:                KASSERT(ipl == IPL_NONE);
                    314:                break;
1.22      ad        315:        case MUTEX_DEFAULT:
1.15      ad        316:        case MUTEX_DRIVER:
1.26      ad        317:                if (ipl == IPL_NONE || ipl == IPL_SOFTCLOCK ||
                    318:                    ipl == IPL_SOFTBIO || ipl == IPL_SOFTNET ||
                    319:                    ipl == IPL_SOFTSERIAL) {
1.22      ad        320:                        type = MUTEX_ADAPTIVE;
1.26      ad        321:                } else {
1.22      ad        322:                        type = MUTEX_SPIN;
                    323:                }
1.15      ad        324:                break;
                    325:        default:
                    326:                break;
                    327:        }
1.2       ad        328:
                    329:        switch (type) {
1.11      ad        330:        case MUTEX_NODEBUG:
1.23      yamt      331:                dodebug = LOCKDEBUG_ALLOC(mtx, NULL,
1.19      ad        332:                    (uintptr_t)__builtin_return_address(0));
1.23      yamt      333:                MUTEX_INITIALIZE_SPIN(mtx, dodebug, ipl);
1.11      ad        334:                break;
1.2       ad        335:        case MUTEX_ADAPTIVE:
1.23      yamt      336:                dodebug = LOCKDEBUG_ALLOC(mtx, &mutex_adaptive_lockops,
1.19      ad        337:                    (uintptr_t)__builtin_return_address(0));
1.23      yamt      338:                MUTEX_INITIALIZE_ADAPTIVE(mtx, dodebug);
1.2       ad        339:                break;
                    340:        case MUTEX_SPIN:
1.23      yamt      341:                dodebug = LOCKDEBUG_ALLOC(mtx, &mutex_spin_lockops,
1.19      ad        342:                    (uintptr_t)__builtin_return_address(0));
1.23      yamt      343:                MUTEX_INITIALIZE_SPIN(mtx, dodebug, ipl);
1.2       ad        344:                break;
                    345:        default:
                    346:                panic("mutex_init: impossible type");
                    347:                break;
                    348:        }
                    349: }
                    350:
                    351: /*
                    352:  * mutex_destroy:
                    353:  *
                    354:  *     Tear down a mutex.
                    355:  */
                    356: void
                    357: mutex_destroy(kmutex_t *mtx)
                    358: {
                    359:
                    360:        if (MUTEX_ADAPTIVE_P(mtx)) {
                    361:                MUTEX_ASSERT(mtx, !MUTEX_OWNED(mtx->mtx_owner) &&
                    362:                    !MUTEX_HAS_WAITERS(mtx));
                    363:        } else {
1.16      skrll     364:                MUTEX_ASSERT(mtx, !__SIMPLELOCK_LOCKED_P(&mtx->mtx_lock));
1.2       ad        365:        }
                    366:
1.23      yamt      367:        LOCKDEBUG_FREE(MUTEX_DEBUG_P(mtx), mtx);
1.2       ad        368:        MUTEX_DESTROY(mtx);
                    369: }
                    370:
1.50      rmind     371: #ifdef MULTIPROCESSOR
1.2       ad        372: /*
1.50      rmind     373:  * mutex_oncpu:
1.2       ad        374:  *
                    375:  *     Return true if an adaptive mutex owner is running on a CPU in the
                    376:  *     system.  If the target is waiting on the kernel big lock, then we
1.15      ad        377:  *     must release it.  This is necessary to avoid deadlock.
1.2       ad        378:  */
1.50      rmind     379: static bool
                    380: mutex_oncpu(uintptr_t owner)
1.2       ad        381: {
                    382:        struct cpu_info *ci;
1.50      rmind     383:        lwp_t *l;
1.2       ad        384:
1.50      rmind     385:        KASSERT(kpreempt_disabled());
                    386:
                    387:        if (!MUTEX_OWNED(owner)) {
                    388:                return false;
                    389:        }
1.2       ad        390:
1.50      rmind     391:        /*
                    392:         * See lwp_dtor() why dereference of the LWP pointer is safe.
                    393:         * We must have kernel preemption disabled for that.
                    394:         */
                    395:        l = (lwp_t *)MUTEX_OWNER(owner);
                    396:        ci = l->l_cpu;
1.2       ad        397:
1.50      rmind     398:        if (ci && ci->ci_curlwp == l) {
                    399:                /* Target is running; do we need to block? */
                    400:                return (ci->ci_biglock_wanted != l);
                    401:        }
1.15      ad        402:
1.50      rmind     403:        /* Not running.  It may be safe to block now. */
                    404:        return false;
1.2       ad        405: }
1.15      ad        406: #endif /* MULTIPROCESSOR */
1.2       ad        407:
                    408: /*
                    409:  * mutex_vector_enter:
                    410:  *
1.45      rmind     411:  *     Support routine for mutex_enter() that must handle all cases.  In
1.2       ad        412:  *     the LOCKDEBUG case, mutex_enter() is always aliased here, even if
                    413:  *     fast-path stubs are available.  If an mutex_spin_enter() stub is
                    414:  *     not available, then it is also aliased directly here.
                    415:  */
                    416: void
                    417: mutex_vector_enter(kmutex_t *mtx)
                    418: {
                    419:        uintptr_t owner, curthread;
                    420:        turnstile_t *ts;
                    421: #ifdef MULTIPROCESSOR
                    422:        u_int count;
                    423: #endif
                    424:        LOCKSTAT_COUNTER(spincnt);
                    425:        LOCKSTAT_COUNTER(slpcnt);
                    426:        LOCKSTAT_TIMER(spintime);
                    427:        LOCKSTAT_TIMER(slptime);
                    428:        LOCKSTAT_FLAG(lsflag);
                    429:
                    430:        /*
                    431:         * Handle spin mutexes.
                    432:         */
                    433:        if (MUTEX_SPIN_P(mtx)) {
                    434: #if defined(LOCKDEBUG) && defined(MULTIPROCESSOR)
                    435:                u_int spins = 0;
                    436: #endif
                    437:                MUTEX_SPIN_SPLRAISE(mtx);
                    438:                MUTEX_WANTLOCK(mtx);
                    439: #ifdef FULL
                    440:                if (__cpu_simple_lock_try(&mtx->mtx_lock)) {
                    441:                        MUTEX_LOCKED(mtx);
                    442:                        return;
                    443:                }
                    444: #if !defined(MULTIPROCESSOR)
                    445:                MUTEX_ABORT(mtx, "locking against myself");
                    446: #else /* !MULTIPROCESSOR */
                    447:
                    448:                LOCKSTAT_ENTER(lsflag);
                    449:                LOCKSTAT_START_TIMER(lsflag, spintime);
                    450:                count = SPINLOCK_BACKOFF_MIN;
                    451:
                    452:                /*
                    453:                 * Spin testing the lock word and do exponential backoff
                    454:                 * to reduce cache line ping-ponging between CPUs.
                    455:                 */
                    456:                do {
                    457:                        if (panicstr != NULL)
                    458:                                break;
1.16      skrll     459:                        while (__SIMPLELOCK_LOCKED_P(&mtx->mtx_lock)) {
1.2       ad        460:                                SPINLOCK_BACKOFF(count);
                    461: #ifdef LOCKDEBUG
                    462:                                if (SPINLOCK_SPINOUT(spins))
                    463:                                        MUTEX_ABORT(mtx, "spinout");
                    464: #endif /* LOCKDEBUG */
                    465:                        }
                    466:                } while (!__cpu_simple_lock_try(&mtx->mtx_lock));
                    467:
                    468:                if (count != SPINLOCK_BACKOFF_MIN) {
                    469:                        LOCKSTAT_STOP_TIMER(lsflag, spintime);
                    470:                        LOCKSTAT_EVENT(lsflag, mtx,
                    471:                            LB_SPIN_MUTEX | LB_SPIN, 1, spintime);
                    472:                }
                    473:                LOCKSTAT_EXIT(lsflag);
                    474: #endif /* !MULTIPROCESSOR */
                    475: #endif /* FULL */
                    476:                MUTEX_LOCKED(mtx);
                    477:                return;
                    478:        }
                    479:
                    480:        curthread = (uintptr_t)curlwp;
                    481:
                    482:        MUTEX_DASSERT(mtx, MUTEX_ADAPTIVE_P(mtx));
                    483:        MUTEX_ASSERT(mtx, curthread != 0);
                    484:        MUTEX_WANTLOCK(mtx);
                    485:
                    486:        if (panicstr == NULL) {
                    487:                LOCKDEBUG_BARRIER(&kernel_lock, 1);
                    488:        }
                    489:
                    490:        LOCKSTAT_ENTER(lsflag);
                    491:
                    492:        /*
                    493:         * Adaptive mutex; spin trying to acquire the mutex.  If we
                    494:         * determine that the owner is not running on a processor,
                    495:         * then we stop spinning, and sleep instead.
                    496:         */
1.50      rmind     497:        KPREEMPT_DISABLE(curlwp);
1.34      ad        498:        for (owner = mtx->mtx_owner;;) {
1.2       ad        499:                if (!MUTEX_OWNED(owner)) {
                    500:                        /*
                    501:                         * Mutex owner clear could mean two things:
                    502:                         *
                    503:                         *      * The mutex has been released.
                    504:                         *      * The owner field hasn't been set yet.
                    505:                         *
                    506:                         * Try to acquire it again.  If that fails,
                    507:                         * we'll just loop again.
                    508:                         */
                    509:                        if (MUTEX_ACQUIRE(mtx, curthread))
                    510:                                break;
1.34      ad        511:                        owner = mtx->mtx_owner;
1.2       ad        512:                        continue;
                    513:                }
1.50      rmind     514:                if (__predict_false(panicstr != NULL)) {
                    515:                        kpreempt_enable();
1.2       ad        516:                        return;
1.50      rmind     517:                }
                    518:                if (__predict_false(MUTEX_OWNER(owner) == curthread)) {
1.2       ad        519:                        MUTEX_ABORT(mtx, "locking against myself");
1.50      rmind     520:                }
1.2       ad        521: #ifdef MULTIPROCESSOR
                    522:                /*
                    523:                 * Check to see if the owner is running on a processor.
                    524:                 * If so, then we should just spin, as the owner will
                    525:                 * likely release the lock very soon.
                    526:                 */
1.50      rmind     527:                if (mutex_oncpu(owner)) {
1.2       ad        528:                        LOCKSTAT_START_TIMER(lsflag, spintime);
                    529:                        count = SPINLOCK_BACKOFF_MIN;
1.50      rmind     530:                        do {
1.53      rmind     531:                                KPREEMPT_ENABLE(curlwp);
1.34      ad        532:                                SPINLOCK_BACKOFF(count);
1.53      rmind     533:                                KPREEMPT_DISABLE(curlwp);
1.2       ad        534:                                owner = mtx->mtx_owner;
1.50      rmind     535:                        } while (mutex_oncpu(owner));
1.2       ad        536:                        LOCKSTAT_STOP_TIMER(lsflag, spintime);
                    537:                        LOCKSTAT_COUNT(spincnt, 1);
                    538:                        if (!MUTEX_OWNED(owner))
                    539:                                continue;
                    540:                }
                    541: #endif
                    542:
                    543:                ts = turnstile_lookup(mtx);
                    544:
                    545:                /*
                    546:                 * Once we have the turnstile chain interlock, mark the
                    547:                 * mutex has having waiters.  If that fails, spin again:
                    548:                 * chances are that the mutex has been released.
                    549:                 */
                    550:                if (!MUTEX_SET_WAITERS(mtx, owner)) {
                    551:                        turnstile_exit(mtx);
1.34      ad        552:                        owner = mtx->mtx_owner;
1.2       ad        553:                        continue;
                    554:                }
                    555:
                    556: #ifdef MULTIPROCESSOR
                    557:                /*
                    558:                 * mutex_exit() is permitted to release the mutex without
                    559:                 * any interlocking instructions, and the following can
                    560:                 * occur as a result:
                    561:                 *
                    562:                 *  CPU 1: MUTEX_SET_WAITERS()      CPU2: mutex_exit()
                    563:                 * ---------------------------- ----------------------------
                    564:                 *              ..                  acquire cache line
                    565:                 *              ..                   test for waiters
                    566:                 *      acquire cache line    <-      lose cache line
                    567:                 *       lock cache line                   ..
                    568:                 *     verify mutex is held                ..
                    569:                 *          set waiters                    ..
                    570:                 *       unlock cache line                 ..
                    571:                 *        lose cache line     ->    acquire cache line
                    572:                 *              ..                clear lock word, waiters
                    573:                 *        return success
                    574:                 *
1.50      rmind     575:                 * There is another race that can occur: a third CPU could
1.2       ad        576:                 * acquire the mutex as soon as it is released.  Since
                    577:                 * adaptive mutexes are primarily spin mutexes, this is not
                    578:                 * something that we need to worry about too much.  What we
                    579:                 * do need to ensure is that the waiters bit gets set.
                    580:                 *
                    581:                 * To allow the unlocked release, we need to make some
                    582:                 * assumptions here:
                    583:                 *
                    584:                 * o Release is the only non-atomic/unlocked operation
                    585:                 *   that can be performed on the mutex.  (It must still
                    586:                 *   be atomic on the local CPU, e.g. in case interrupted
                    587:                 *   or preempted).
                    588:                 *
                    589:                 * o At any given time, MUTEX_SET_WAITERS() can only ever
1.21      pooka     590:                 *   be in progress on one CPU in the system - guaranteed
1.2       ad        591:                 *   by the turnstile chain lock.
                    592:                 *
                    593:                 * o No other operations other than MUTEX_SET_WAITERS()
                    594:                 *   and release can modify a mutex with a non-zero
                    595:                 *   owner field.
                    596:                 *
                    597:                 * o The result of a successful MUTEX_SET_WAITERS() call
                    598:                 *   is an unbuffered write that is immediately visible
                    599:                 *   to all other processors in the system.
                    600:                 *
                    601:                 * o If the holding LWP switches away, it posts a store
                    602:                 *   fence before changing curlwp, ensuring that any
                    603:                 *   overwrite of the mutex waiters flag by mutex_exit()
                    604:                 *   completes before the modification of curlwp becomes
                    605:                 *   visible to this CPU.
                    606:                 *
1.14      yamt      607:                 * o mi_switch() posts a store fence before setting curlwp
1.2       ad        608:                 *   and before resuming execution of an LWP.
                    609:                 *
                    610:                 * o _kernel_lock() posts a store fence before setting
                    611:                 *   curcpu()->ci_biglock_wanted, and after clearing it.
                    612:                 *   This ensures that any overwrite of the mutex waiters
                    613:                 *   flag by mutex_exit() completes before the modification
                    614:                 *   of ci_biglock_wanted becomes visible.
                    615:                 *
                    616:                 * We now post a read memory barrier (after setting the
                    617:                 * waiters field) and check the lock holder's status again.
                    618:                 * Some of the possible outcomes (not an exhaustive list):
                    619:                 *
1.50      rmind     620:                 * 1. The on-CPU check returns true: the holding LWP is
1.2       ad        621:                 *    running again.  The lock may be released soon and
                    622:                 *    we should spin.  Importantly, we can't trust the
                    623:                 *    value of the waiters flag.
                    624:                 *
1.50      rmind     625:                 * 2. The on-CPU check returns false: the holding LWP is
1.39      yamt      626:                 *    not running.  We now have the opportunity to check
1.2       ad        627:                 *    if mutex_exit() has blatted the modifications made
                    628:                 *    by MUTEX_SET_WAITERS().
                    629:                 *
1.50      rmind     630:                 * 3. The on-CPU check returns false: the holding LWP may
1.2       ad        631:                 *    or may not be running.  It has context switched at
                    632:                 *    some point during our check.  Again, we have the
                    633:                 *    chance to see if the waiters bit is still set or
                    634:                 *    has been overwritten.
                    635:                 *
1.50      rmind     636:                 * 4. The on-CPU check returns false: the holding LWP is
1.2       ad        637:                 *    running on a CPU, but wants the big lock.  It's OK
                    638:                 *    to check the waiters field in this case.
                    639:                 *
                    640:                 * 5. The has-waiters check fails: the mutex has been
                    641:                 *    released, the waiters flag cleared and another LWP
                    642:                 *    now owns the mutex.
                    643:                 *
                    644:                 * 6. The has-waiters check fails: the mutex has been
                    645:                 *    released.
                    646:                 *
                    647:                 * If the waiters bit is not set it's unsafe to go asleep,
                    648:                 * as we might never be awoken.
                    649:                 */
1.50      rmind     650:                if ((membar_consumer(), mutex_oncpu(owner)) ||
1.24      ad        651:                    (membar_consumer(), !MUTEX_HAS_WAITERS(mtx))) {
1.2       ad        652:                        turnstile_exit(mtx);
1.34      ad        653:                        owner = mtx->mtx_owner;
1.2       ad        654:                        continue;
                    655:                }
                    656: #endif /* MULTIPROCESSOR */
                    657:
                    658:                LOCKSTAT_START_TIMER(lsflag, slptime);
                    659:
1.5       yamt      660:                turnstile_block(ts, TS_WRITER_Q, mtx, &mutex_syncobj);
1.2       ad        661:
                    662:                LOCKSTAT_STOP_TIMER(lsflag, slptime);
                    663:                LOCKSTAT_COUNT(slpcnt, 1);
1.34      ad        664:
                    665:                owner = mtx->mtx_owner;
1.2       ad        666:        }
1.50      rmind     667:        KPREEMPT_ENABLE(curlwp);
1.2       ad        668:
                    669:        LOCKSTAT_EVENT(lsflag, mtx, LB_ADAPTIVE_MUTEX | LB_SLEEP1,
                    670:            slpcnt, slptime);
                    671:        LOCKSTAT_EVENT(lsflag, mtx, LB_ADAPTIVE_MUTEX | LB_SPIN,
                    672:            spincnt, spintime);
                    673:        LOCKSTAT_EXIT(lsflag);
                    674:
                    675:        MUTEX_DASSERT(mtx, MUTEX_OWNER(mtx->mtx_owner) == curthread);
                    676:        MUTEX_LOCKED(mtx);
                    677: }
                    678:
                    679: /*
                    680:  * mutex_vector_exit:
                    681:  *
                    682:  *     Support routine for mutex_exit() that handles all cases.
                    683:  */
                    684: void
                    685: mutex_vector_exit(kmutex_t *mtx)
                    686: {
                    687:        turnstile_t *ts;
                    688:        uintptr_t curthread;
                    689:
                    690:        if (MUTEX_SPIN_P(mtx)) {
                    691: #ifdef FULL
1.33      ad        692:                if (__predict_false(!__SIMPLELOCK_LOCKED_P(&mtx->mtx_lock))) {
                    693:                        if (panicstr != NULL)
                    694:                                return;
1.2       ad        695:                        MUTEX_ABORT(mtx, "exiting unheld spin mutex");
1.33      ad        696:                }
1.2       ad        697:                MUTEX_UNLOCKED(mtx);
                    698:                __cpu_simple_unlock(&mtx->mtx_lock);
                    699: #endif
                    700:                MUTEX_SPIN_SPLRESTORE(mtx);
                    701:                return;
                    702:        }
                    703:
1.11      ad        704:        if (__predict_false((uintptr_t)panicstr | cold)) {
1.2       ad        705:                MUTEX_UNLOCKED(mtx);
                    706:                MUTEX_RELEASE(mtx);
                    707:                return;
                    708:        }
                    709:
                    710:        curthread = (uintptr_t)curlwp;
                    711:        MUTEX_DASSERT(mtx, curthread != 0);
                    712:        MUTEX_ASSERT(mtx, MUTEX_OWNER(mtx->mtx_owner) == curthread);
                    713:        MUTEX_UNLOCKED(mtx);
1.58    ! mrg       714: #if !defined(LOCKDEBUG)
        !           715:        __USE(curthread);
        !           716: #endif
1.2       ad        717:
1.15      ad        718: #ifdef LOCKDEBUG
                    719:        /*
                    720:         * Avoid having to take the turnstile chain lock every time
                    721:         * around.  Raise the priority level to splhigh() in order
                    722:         * to disable preemption and so make the following atomic.
                    723:         */
                    724:        {
                    725:                int s = splhigh();
                    726:                if (!MUTEX_HAS_WAITERS(mtx)) {
                    727:                        MUTEX_RELEASE(mtx);
                    728:                        splx(s);
                    729:                        return;
                    730:                }
                    731:                splx(s);
                    732:        }
                    733: #endif
                    734:
1.2       ad        735:        /*
                    736:         * Get this lock's turnstile.  This gets the interlock on
                    737:         * the sleep queue.  Once we have that, we can clear the
                    738:         * lock.  If there was no turnstile for the lock, there
                    739:         * were no waiters remaining.
                    740:         */
                    741:        ts = turnstile_lookup(mtx);
                    742:
                    743:        if (ts == NULL) {
                    744:                MUTEX_RELEASE(mtx);
                    745:                turnstile_exit(mtx);
                    746:        } else {
                    747:                MUTEX_RELEASE(mtx);
                    748:                turnstile_wakeup(ts, TS_WRITER_Q,
                    749:                    TS_WAITERS(ts, TS_WRITER_Q), NULL);
                    750:        }
                    751: }
                    752:
1.4       ad        753: #ifndef __HAVE_SIMPLE_MUTEXES
                    754: /*
                    755:  * mutex_wakeup:
                    756:  *
                    757:  *     Support routine for mutex_exit() that wakes up all waiters.
                    758:  *     We assume that the mutex has been released, but it need not
                    759:  *     be.
                    760:  */
                    761: void
                    762: mutex_wakeup(kmutex_t *mtx)
                    763: {
                    764:        turnstile_t *ts;
                    765:
                    766:        ts = turnstile_lookup(mtx);
                    767:        if (ts == NULL) {
                    768:                turnstile_exit(mtx);
                    769:                return;
                    770:        }
                    771:        MUTEX_CLEAR_WAITERS(mtx);
                    772:        turnstile_wakeup(ts, TS_WRITER_Q, TS_WAITERS(ts, TS_WRITER_Q), NULL);
                    773: }
                    774: #endif /* !__HAVE_SIMPLE_MUTEXES */
                    775:
1.2       ad        776: /*
                    777:  * mutex_owned:
                    778:  *
1.3       ad        779:  *     Return true if the current LWP (adaptive) or CPU (spin)
                    780:  *     holds the mutex.
1.2       ad        781:  */
                    782: int
                    783: mutex_owned(kmutex_t *mtx)
                    784: {
                    785:
1.35      ad        786:        if (mtx == NULL)
                    787:                return 0;
1.2       ad        788:        if (MUTEX_ADAPTIVE_P(mtx))
                    789:                return MUTEX_OWNER(mtx->mtx_owner) == (uintptr_t)curlwp;
                    790: #ifdef FULL
1.16      skrll     791:        return __SIMPLELOCK_LOCKED_P(&mtx->mtx_lock);
1.2       ad        792: #else
                    793:        return 1;
                    794: #endif
                    795: }
                    796:
                    797: /*
                    798:  * mutex_owner:
                    799:  *
1.6       ad        800:  *     Return the current owner of an adaptive mutex.  Used for
                    801:  *     priority inheritance.
1.2       ad        802:  */
1.27      ad        803: lwp_t *
                    804: mutex_owner(kmutex_t *mtx)
1.2       ad        805: {
                    806:
                    807:        MUTEX_ASSERT(mtx, MUTEX_ADAPTIVE_P(mtx));
                    808:        return (struct lwp *)MUTEX_OWNER(mtx->mtx_owner);
                    809: }
                    810:
                    811: /*
                    812:  * mutex_tryenter:
                    813:  *
                    814:  *     Try to acquire the mutex; return non-zero if we did.
                    815:  */
                    816: int
                    817: mutex_tryenter(kmutex_t *mtx)
                    818: {
                    819:        uintptr_t curthread;
                    820:
                    821:        /*
                    822:         * Handle spin mutexes.
                    823:         */
                    824:        if (MUTEX_SPIN_P(mtx)) {
                    825:                MUTEX_SPIN_SPLRAISE(mtx);
                    826: #ifdef FULL
                    827:                if (__cpu_simple_lock_try(&mtx->mtx_lock)) {
1.4       ad        828:                        MUTEX_WANTLOCK(mtx);
1.2       ad        829:                        MUTEX_LOCKED(mtx);
                    830:                        return 1;
                    831:                }
                    832:                MUTEX_SPIN_SPLRESTORE(mtx);
                    833: #else
1.4       ad        834:                MUTEX_WANTLOCK(mtx);
1.2       ad        835:                MUTEX_LOCKED(mtx);
                    836:                return 1;
                    837: #endif
                    838:        } else {
                    839:                curthread = (uintptr_t)curlwp;
                    840:                MUTEX_ASSERT(mtx, curthread != 0);
                    841:                if (MUTEX_ACQUIRE(mtx, curthread)) {
1.4       ad        842:                        MUTEX_WANTLOCK(mtx);
1.2       ad        843:                        MUTEX_LOCKED(mtx);
                    844:                        MUTEX_DASSERT(mtx,
                    845:                            MUTEX_OWNER(mtx->mtx_owner) == curthread);
                    846:                        return 1;
                    847:                }
                    848:        }
                    849:
                    850:        return 0;
                    851: }
                    852:
                    853: #if defined(__HAVE_SPIN_MUTEX_STUBS) || defined(FULL)
                    854: /*
                    855:  * mutex_spin_retry:
                    856:  *
                    857:  *     Support routine for mutex_spin_enter().  Assumes that the caller
                    858:  *     has already raised the SPL, and adjusted counters.
                    859:  */
                    860: void
                    861: mutex_spin_retry(kmutex_t *mtx)
                    862: {
                    863: #ifdef MULTIPROCESSOR
                    864:        u_int count;
                    865:        LOCKSTAT_TIMER(spintime);
                    866:        LOCKSTAT_FLAG(lsflag);
                    867: #ifdef LOCKDEBUG
                    868:        u_int spins = 0;
                    869: #endif /* LOCKDEBUG */
                    870:
                    871:        MUTEX_WANTLOCK(mtx);
                    872:
                    873:        LOCKSTAT_ENTER(lsflag);
                    874:        LOCKSTAT_START_TIMER(lsflag, spintime);
                    875:        count = SPINLOCK_BACKOFF_MIN;
                    876:
                    877:        /*
                    878:         * Spin testing the lock word and do exponential backoff
                    879:         * to reduce cache line ping-ponging between CPUs.
                    880:         */
                    881:        do {
                    882:                if (panicstr != NULL)
                    883:                        break;
1.16      skrll     884:                while (__SIMPLELOCK_LOCKED_P(&mtx->mtx_lock)) {
1.2       ad        885:                        SPINLOCK_BACKOFF(count);
                    886: #ifdef LOCKDEBUG
                    887:                        if (SPINLOCK_SPINOUT(spins))
                    888:                                MUTEX_ABORT(mtx, "spinout");
                    889: #endif /* LOCKDEBUG */
                    890:                }
                    891:        } while (!__cpu_simple_lock_try(&mtx->mtx_lock));
                    892:
                    893:        LOCKSTAT_STOP_TIMER(lsflag, spintime);
                    894:        LOCKSTAT_EVENT(lsflag, mtx, LB_SPIN_MUTEX | LB_SPIN, 1, spintime);
                    895:        LOCKSTAT_EXIT(lsflag);
                    896:
                    897:        MUTEX_LOCKED(mtx);
                    898: #else  /* MULTIPROCESSOR */
                    899:        MUTEX_ABORT(mtx, "locking against myself");
                    900: #endif /* MULTIPROCESSOR */
                    901: }
                    902: #endif /* defined(__HAVE_SPIN_MUTEX_STUBS) || defined(FULL) */

CVSweb <webmaster@jp.NetBSD.org>