[BACK]Return to subr_lockdebug.c CVS log [TXT][DIR] Up to [cvs.NetBSD.org] / src / sys / kern

Annotation of src/sys/kern/subr_lockdebug.c, Revision 1.8.6.5

1.8.6.5 ! joerg       1: /*     $NetBSD: subr_lockdebug.c,v 1.8.6.4 2007/11/06 19:25:33 joerg Exp $     */
1.2       ad          2:
                      3: /*-
                      4:  * Copyright (c) 2006, 2007 The NetBSD Foundation, Inc.
                      5:  * All rights reserved.
                      6:  *
                      7:  * This code is derived from software contributed to The NetBSD Foundation
                      8:  * by Andrew Doran.
                      9:  *
                     10:  * Redistribution and use in source and binary forms, with or without
                     11:  * modification, are permitted provided that the following conditions
                     12:  * are met:
                     13:  * 1. Redistributions of source code must retain the above copyright
                     14:  *    notice, this list of conditions and the following disclaimer.
                     15:  * 2. Redistributions in binary form must reproduce the above copyright
                     16:  *    notice, this list of conditions and the following disclaimer in the
                     17:  *    documentation and/or other materials provided with the distribution.
                     18:  * 3. All advertising materials mentioning features or use of this software
                     19:  *    must display the following acknowledgement:
                     20:  *     This product includes software developed by the NetBSD
                     21:  *     Foundation, Inc. and its contributors.
                     22:  * 4. Neither the name of The NetBSD Foundation nor the names of its
                     23:  *    contributors may be used to endorse or promote products derived
                     24:  *    from this software without specific prior written permission.
                     25:  *
                     26:  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
                     27:  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
                     28:  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
                     29:  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
                     30:  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
                     31:  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
                     32:  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
                     33:  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
                     34:  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
                     35:  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
                     36:  * POSSIBILITY OF SUCH DAMAGE.
                     37:  */
                     38:
                     39: /*
1.8.6.3   joerg      40:  * Basic lock debugging code shared among lock primitives.
1.2       ad         41:  */
                     42:
1.8.6.1   joerg      43: #include <sys/cdefs.h>
1.8.6.5 ! joerg      44: __KERNEL_RCSID(0, "$NetBSD: subr_lockdebug.c,v 1.8.6.4 2007/11/06 19:25:33 joerg Exp $");
1.8.6.1   joerg      45:
1.2       ad         46: #include "opt_multiprocessor.h"
                     47: #include "opt_ddb.h"
                     48:
                     49: #include <sys/param.h>
                     50: #include <sys/proc.h>
                     51: #include <sys/systm.h>
1.8.6.2   joerg      52: #include <sys/kernel.h>
1.2       ad         53: #include <sys/kmem.h>
                     54: #include <sys/lock.h>
                     55: #include <sys/lockdebug.h>
                     56: #include <sys/sleepq.h>
1.8.6.2   joerg      57: #include <sys/cpu.h>
1.2       ad         58:
                     59: #ifdef LOCKDEBUG
                     60:
                     61: #define        LD_BATCH_SHIFT  9
                     62: #define        LD_BATCH        (1 << LD_BATCH_SHIFT)
                     63: #define        LD_BATCH_MASK   (LD_BATCH - 1)
                     64: #define        LD_MAX_LOCKS    1048576
                     65: #define        LD_SLOP         16
                     66:
                     67: #define        LD_LOCKED       0x01
                     68: #define        LD_SLEEPER      0x02
1.8.6.2   joerg      69: #define        LD_MLOCKS       8
                     70: #define        LD_MLISTS       8192
1.2       ad         71:
1.5       ad         72: #define        LD_NOID         (LD_MAX_LOCKS + 1)
1.2       ad         73:
                     74: typedef union lockdebuglk {
                     75:        struct {
                     76:                __cpu_simple_lock_t     lku_lock;
                     77:                int                     lku_oldspl;
                     78:        } ul;
                     79:        uint8_t lk_pad[64];
                     80: } volatile __aligned(64) lockdebuglk_t;
                     81:
                     82: #define        lk_lock         ul.lku_lock
                     83: #define        lk_oldspl       ul.lku_oldspl
                     84:
                     85: typedef struct lockdebug {
                     86:        _TAILQ_ENTRY(struct lockdebug, volatile) ld_chain;
                     87:        _TAILQ_ENTRY(struct lockdebug, volatile) ld_achain;
1.8.6.2   joerg      88:        _TAILQ_ENTRY(struct lockdebug, volatile) ld_mchain;
1.2       ad         89:        volatile void   *ld_lock;
                     90:        lockops_t       *ld_lockops;
                     91:        struct lwp      *ld_lwp;
                     92:        uintptr_t       ld_locked;
                     93:        uintptr_t       ld_unlocked;
1.8.6.2   joerg      94:        uintptr_t       ld_initaddr;
1.2       ad         95:        u_int           ld_id;
                     96:        uint16_t        ld_shares;
                     97:        uint16_t        ld_cpu;
                     98:        uint8_t         ld_flags;
                     99:        uint8_t         ld_shwant;      /* advisory */
                    100:        uint8_t         ld_exwant;      /* advisory */
                    101:        uint8_t         ld_unused;
                    102: } volatile lockdebug_t;
                    103:
                    104: typedef _TAILQ_HEAD(lockdebuglist, struct lockdebug, volatile) lockdebuglist_t;
                    105:
                    106: lockdebuglk_t          ld_sleeper_lk;
                    107: lockdebuglk_t          ld_spinner_lk;
                    108: lockdebuglk_t          ld_free_lk;
1.8.6.2   joerg     109: lockdebuglk_t          ld_mem_lk[LD_MLOCKS];
1.2       ad        110:
1.8.6.2   joerg     111: lockdebuglist_t                ld_mem_list[LD_MLISTS];
1.8.6.5 ! joerg     112: lockdebuglist_t                ld_sleepers = TAILQ_HEAD_INITIALIZER(ld_sleepers);
        !           113: lockdebuglist_t                ld_spinners = TAILQ_HEAD_INITIALIZER(ld_spinners);
        !           114: lockdebuglist_t                ld_free = TAILQ_HEAD_INITIALIZER(ld_free);
        !           115: lockdebuglist_t                ld_all = TAILQ_HEAD_INITIALIZER(ld_all);
1.2       ad        116: int                    ld_nfree;
                    117: int                    ld_freeptr;
                    118: int                    ld_recurse;
1.5       ad        119: bool                   ld_nomore;
1.2       ad        120: lockdebug_t            *ld_table[LD_MAX_LOCKS / LD_BATCH];
                    121:
                    122: lockdebug_t            ld_prime[LD_BATCH];
                    123:
1.5       ad        124: static void    lockdebug_abort1(lockdebug_t *, lockdebuglk_t *lk,
1.8.6.2   joerg     125:                                 const char *, const char *, bool);
1.5       ad        126: static void    lockdebug_more(void);
                    127: static void    lockdebug_init(void);
1.2       ad        128:
                    129: static inline void
                    130: lockdebug_lock(lockdebuglk_t *lk)
                    131: {
                    132:        int s;
                    133:
1.8       ad        134:        s = splhigh();
1.2       ad        135:        __cpu_simple_lock(&lk->lk_lock);
                    136:        lk->lk_oldspl = s;
                    137: }
                    138:
                    139: static inline void
                    140: lockdebug_unlock(lockdebuglk_t *lk)
                    141: {
                    142:        int s;
                    143:
                    144:        s = lk->lk_oldspl;
                    145:        __cpu_simple_unlock(&(lk->lk_lock));
                    146:        splx(s);
                    147: }
                    148:
1.8.6.2   joerg     149: static inline void
                    150: lockdebug_mhash(volatile void *addr, lockdebuglk_t **lk, lockdebuglist_t **head)
                    151: {
                    152:        u_int hash;
                    153:
                    154:        hash = (uintptr_t)addr >> PGSHIFT;
                    155:        *lk = &ld_mem_lk[hash & (LD_MLOCKS - 1)];
                    156:        *head = &ld_mem_list[hash & (LD_MLISTS - 1)];
                    157:        lockdebug_lock(*lk);
                    158: }
                    159:
1.2       ad        160: /*
                    161:  * lockdebug_lookup:
                    162:  *
                    163:  *     Find a lockdebug structure by ID and return it locked.
                    164:  */
                    165: static inline lockdebug_t *
                    166: lockdebug_lookup(u_int id, lockdebuglk_t **lk)
                    167: {
                    168:        lockdebug_t *base, *ld;
                    169:
                    170:        if (id == LD_NOID)
                    171:                return NULL;
                    172:
                    173:        if (id == 0 || id >= LD_MAX_LOCKS)
                    174:                panic("lockdebug_lookup: uninitialized lock (1, id=%d)", id);
                    175:
                    176:        base = ld_table[id >> LD_BATCH_SHIFT];
                    177:        ld = base + (id & LD_BATCH_MASK);
                    178:
                    179:        if (base == NULL || ld->ld_lock == NULL || ld->ld_id != id)
                    180:                panic("lockdebug_lookup: uninitialized lock (2, id=%d)", id);
                    181:
                    182:        if ((ld->ld_flags & LD_SLEEPER) != 0)
                    183:                *lk = &ld_sleeper_lk;
                    184:        else
                    185:                *lk = &ld_spinner_lk;
                    186:
                    187:        lockdebug_lock(*lk);
                    188:        return ld;
                    189: }
                    190:
                    191: /*
                    192:  * lockdebug_init:
                    193:  *
                    194:  *     Initialize the lockdebug system.  Allocate an initial pool of
                    195:  *     lockdebug structures before the VM system is up and running.
                    196:  */
1.5       ad        197: static void
1.2       ad        198: lockdebug_init(void)
                    199: {
                    200:        lockdebug_t *ld;
                    201:        int i;
                    202:
                    203:        __cpu_simple_lock_init(&ld_sleeper_lk.lk_lock);
                    204:        __cpu_simple_lock_init(&ld_spinner_lk.lk_lock);
                    205:        __cpu_simple_lock_init(&ld_free_lk.lk_lock);
                    206:
                    207:        ld = ld_prime;
                    208:        ld_table[0] = ld;
                    209:        for (i = 1, ld++; i < LD_BATCH; i++, ld++) {
                    210:                ld->ld_id = i;
                    211:                TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain);
                    212:                TAILQ_INSERT_TAIL(&ld_all, ld, ld_achain);
                    213:        }
                    214:        ld_freeptr = 1;
                    215:        ld_nfree = LD_BATCH - 1;
1.8.6.2   joerg     216:
                    217:        for (i = 0; i < LD_MLOCKS; i++)
                    218:                __cpu_simple_lock_init(&ld_mem_lk[i].lk_lock);
                    219:        for (i = 0; i < LD_MLISTS; i++)
                    220:                TAILQ_INIT(&ld_mem_list[i]);
1.2       ad        221: }
                    222:
                    223: /*
                    224:  * lockdebug_alloc:
                    225:  *
                    226:  *     A lock is being initialized, so allocate an associated debug
                    227:  *     structure.
                    228:  */
                    229: u_int
1.8.6.2   joerg     230: lockdebug_alloc(volatile void *lock, lockops_t *lo, uintptr_t initaddr)
1.2       ad        231: {
1.8.6.2   joerg     232: #if 0
                    233:        lockdebuglist_t *head;
                    234:        lockdebuglk_t *lk;
                    235: #endif
1.2       ad        236:        struct cpu_info *ci;
                    237:        lockdebug_t *ld;
                    238:
1.5       ad        239:        if (lo == NULL || panicstr != NULL)
1.2       ad        240:                return LD_NOID;
1.5       ad        241:        if (ld_freeptr == 0)
                    242:                lockdebug_init();
1.2       ad        243:
                    244:        ci = curcpu();
                    245:
                    246:        /*
                    247:         * Pinch a new debug structure.  We may recurse because we call
                    248:         * kmem_alloc(), which may need to initialize new locks somewhere
1.7       skrll     249:         * down the path.  If not recursing, we try to maintain at least
1.2       ad        250:         * LD_SLOP structures free, which should hopefully be enough to
                    251:         * satisfy kmem_alloc().  If we can't provide a structure, not to
                    252:         * worry: we'll just mark the lock as not having an ID.
                    253:         */
                    254:        lockdebug_lock(&ld_free_lk);
                    255:        ci->ci_lkdebug_recurse++;
                    256:
                    257:        if (TAILQ_EMPTY(&ld_free)) {
1.5       ad        258:                if (ci->ci_lkdebug_recurse > 1 || ld_nomore) {
1.2       ad        259:                        ci->ci_lkdebug_recurse--;
                    260:                        lockdebug_unlock(&ld_free_lk);
                    261:                        return LD_NOID;
                    262:                }
                    263:                lockdebug_more();
                    264:        } else if (ci->ci_lkdebug_recurse == 1 && ld_nfree < LD_SLOP)
                    265:                lockdebug_more();
                    266:
                    267:        if ((ld = TAILQ_FIRST(&ld_free)) == NULL) {
                    268:                lockdebug_unlock(&ld_free_lk);
                    269:                return LD_NOID;
                    270:        }
                    271:
                    272:        TAILQ_REMOVE(&ld_free, ld, ld_chain);
                    273:        ld_nfree--;
                    274:
                    275:        ci->ci_lkdebug_recurse--;
                    276:        lockdebug_unlock(&ld_free_lk);
                    277:
                    278:        if (ld->ld_lock != NULL)
                    279:                panic("lockdebug_alloc: corrupt table");
                    280:
                    281:        if (lo->lo_sleeplock)
                    282:                lockdebug_lock(&ld_sleeper_lk);
                    283:        else
                    284:                lockdebug_lock(&ld_spinner_lk);
                    285:
                    286:        /* Initialise the structure. */
                    287:        ld->ld_lock = lock;
                    288:        ld->ld_lockops = lo;
                    289:        ld->ld_locked = 0;
                    290:        ld->ld_unlocked = 0;
                    291:        ld->ld_lwp = NULL;
1.8.6.2   joerg     292:        ld->ld_initaddr = initaddr;
1.2       ad        293:
                    294:        if (lo->lo_sleeplock) {
                    295:                ld->ld_flags = LD_SLEEPER;
                    296:                lockdebug_unlock(&ld_sleeper_lk);
                    297:        } else {
                    298:                ld->ld_flags = 0;
                    299:                lockdebug_unlock(&ld_spinner_lk);
                    300:        }
                    301:
1.8.6.2   joerg     302: #if 0
                    303:        /* Insert into address hash. */
                    304:        lockdebug_mhash(lock, &lk, &head);
                    305:        TAILQ_INSERT_HEAD(head, ld, ld_mchain);
                    306:        lockdebug_unlock(lk);
                    307: #endif
                    308:
1.2       ad        309:        return ld->ld_id;
                    310: }
                    311:
                    312: /*
                    313:  * lockdebug_free:
                    314:  *
                    315:  *     A lock is being destroyed, so release debugging resources.
                    316:  */
                    317: void
                    318: lockdebug_free(volatile void *lock, u_int id)
                    319: {
1.8.6.2   joerg     320: #if 0
                    321:        lockdebuglist_t *head;
                    322: #endif
1.2       ad        323:        lockdebug_t *ld;
                    324:        lockdebuglk_t *lk;
                    325:
                    326:        if (panicstr != NULL)
                    327:                return;
                    328:
                    329:        if ((ld = lockdebug_lookup(id, &lk)) == NULL)
                    330:                return;
                    331:
                    332:        if (ld->ld_lock != lock) {
                    333:                panic("lockdebug_free: destroying uninitialized lock %p"
                    334:                    "(ld_id=%d ld_lock=%p)", lock, id, ld->ld_lock);
1.8.6.2   joerg     335:                lockdebug_abort1(ld, lk, __func__, "lock record follows",
                    336:                    true);
1.2       ad        337:        }
                    338:        if ((ld->ld_flags & LD_LOCKED) != 0 || ld->ld_shares != 0)
1.8.6.2   joerg     339:                lockdebug_abort1(ld, lk, __func__, "is locked", true);
1.2       ad        340:
                    341:        ld->ld_lock = NULL;
                    342:
                    343:        lockdebug_unlock(lk);
                    344:
                    345:        lockdebug_lock(&ld_free_lk);
                    346:        TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain);
                    347:        ld_nfree++;
                    348:        lockdebug_unlock(&ld_free_lk);
1.8.6.2   joerg     349:
                    350: #if 0
                    351:        /* Remove from address hash. */
                    352:        lockdebug_mhash(lock, &lk, &head);
                    353:        TAILQ_REMOVE(head, ld, ld_mchain);
                    354:        lockdebug_unlock(lk);
                    355: #endif
1.2       ad        356: }
                    357:
                    358: /*
                    359:  * lockdebug_more:
                    360:  *
                    361:  *     Allocate a batch of debug structures and add to the free list.
                    362:  *     Must be called with ld_free_lk held.
                    363:  */
1.5       ad        364: static void
1.2       ad        365: lockdebug_more(void)
                    366: {
                    367:        lockdebug_t *ld;
                    368:        void *block;
1.5       ad        369:        int i, base, m;
1.2       ad        370:
                    371:        while (ld_nfree < LD_SLOP) {
                    372:                lockdebug_unlock(&ld_free_lk);
                    373:                block = kmem_zalloc(LD_BATCH * sizeof(lockdebug_t), KM_SLEEP);
                    374:                lockdebug_lock(&ld_free_lk);
                    375:
                    376:                if (block == NULL)
                    377:                        return;
                    378:
                    379:                if (ld_nfree > LD_SLOP) {
                    380:                        /* Somebody beat us to it. */
                    381:                        lockdebug_unlock(&ld_free_lk);
                    382:                        kmem_free(block, LD_BATCH * sizeof(lockdebug_t));
                    383:                        lockdebug_lock(&ld_free_lk);
                    384:                        continue;
                    385:                }
                    386:
                    387:                base = ld_freeptr;
                    388:                ld_nfree += LD_BATCH;
                    389:                ld = block;
                    390:                base <<= LD_BATCH_SHIFT;
1.5       ad        391:                m = min(LD_MAX_LOCKS, base + LD_BATCH);
                    392:
                    393:                if (m == LD_MAX_LOCKS)
                    394:                        ld_nomore = true;
1.2       ad        395:
1.5       ad        396:                for (i = base; i < m; i++, ld++) {
                    397:                        ld->ld_id = i;
1.2       ad        398:                        TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain);
                    399:                        TAILQ_INSERT_TAIL(&ld_all, ld, ld_achain);
                    400:                }
                    401:
                    402:                mb_write();
                    403:                ld_table[ld_freeptr++] = block;
                    404:        }
                    405: }
                    406:
                    407: /*
                    408:  * lockdebug_wantlock:
                    409:  *
                    410:  *     Process the preamble to a lock acquire.
                    411:  */
                    412: void
                    413: lockdebug_wantlock(u_int id, uintptr_t where, int shared)
                    414: {
                    415:        struct lwp *l = curlwp;
                    416:        lockdebuglk_t *lk;
                    417:        lockdebug_t *ld;
1.3       thorpej   418:        bool recurse;
1.2       ad        419:
                    420:        (void)shared;
1.4       thorpej   421:        recurse = false;
1.2       ad        422:
                    423:        if (panicstr != NULL)
                    424:                return;
                    425:
                    426:        if ((ld = lockdebug_lookup(id, &lk)) == NULL)
                    427:                return;
                    428:
                    429:        if ((ld->ld_flags & LD_LOCKED) != 0) {
                    430:                if ((ld->ld_flags & LD_SLEEPER) != 0) {
                    431:                        if (ld->ld_lwp == l)
1.4       thorpej   432:                                recurse = true;
1.2       ad        433:                } else if (ld->ld_cpu == (uint16_t)cpu_number())
1.4       thorpej   434:                        recurse = true;
1.2       ad        435:        }
                    436:
1.8.6.2   joerg     437: #ifdef notyet
                    438:        if (cpu_intr_p()) {
                    439:                if ((ld->ld_flags & LD_SLEEPER) != 0)
                    440:                        lockdebug_abort1(ld, lk, __func__,
                    441:                            "acquiring sleep lock from interrupt context",
                    442:                            true);
                    443:        }
                    444: #endif
                    445:
1.2       ad        446:        if (shared)
                    447:                ld->ld_shwant++;
                    448:        else
                    449:                ld->ld_exwant++;
                    450:
                    451:        if (recurse)
1.8.6.2   joerg     452:                lockdebug_abort1(ld, lk, __func__, "locking against myself",
                    453:                    true);
1.2       ad        454:
                    455:        lockdebug_unlock(lk);
                    456: }
                    457:
                    458: /*
                    459:  * lockdebug_locked:
                    460:  *
                    461:  *     Process a lock acquire operation.
                    462:  */
                    463: void
                    464: lockdebug_locked(u_int id, uintptr_t where, int shared)
                    465: {
                    466:        struct lwp *l = curlwp;
                    467:        lockdebuglk_t *lk;
                    468:        lockdebug_t *ld;
                    469:
                    470:        if (panicstr != NULL)
                    471:                return;
                    472:
                    473:        if ((ld = lockdebug_lookup(id, &lk)) == NULL)
                    474:                return;
                    475:
                    476:        if (shared) {
                    477:                l->l_shlocks++;
                    478:                ld->ld_shares++;
                    479:                ld->ld_shwant--;
                    480:        } else {
                    481:                if ((ld->ld_flags & LD_LOCKED) != 0)
                    482:                        lockdebug_abort1(ld, lk, __func__,
1.8.6.2   joerg     483:                            "already locked", true);
1.2       ad        484:
                    485:                ld->ld_flags |= LD_LOCKED;
                    486:                ld->ld_locked = where;
                    487:                ld->ld_cpu = (uint16_t)cpu_number();
                    488:                ld->ld_lwp = l;
                    489:                ld->ld_exwant--;
                    490:
                    491:                if ((ld->ld_flags & LD_SLEEPER) != 0) {
                    492:                        l->l_exlocks++;
                    493:                        TAILQ_INSERT_TAIL(&ld_sleepers, ld, ld_chain);
                    494:                } else {
                    495:                        curcpu()->ci_spin_locks2++;
                    496:                        TAILQ_INSERT_TAIL(&ld_spinners, ld, ld_chain);
                    497:                }
                    498:        }
                    499:
                    500:        lockdebug_unlock(lk);
                    501: }
                    502:
                    503: /*
                    504:  * lockdebug_unlocked:
                    505:  *
                    506:  *     Process a lock release operation.
                    507:  */
                    508: void
                    509: lockdebug_unlocked(u_int id, uintptr_t where, int shared)
                    510: {
                    511:        struct lwp *l = curlwp;
                    512:        lockdebuglk_t *lk;
                    513:        lockdebug_t *ld;
                    514:
                    515:        if (panicstr != NULL)
                    516:                return;
                    517:
                    518:        if ((ld = lockdebug_lookup(id, &lk)) == NULL)
                    519:                return;
                    520:
                    521:        if (shared) {
                    522:                if (l->l_shlocks == 0)
                    523:                        lockdebug_abort1(ld, lk, __func__,
1.8.6.2   joerg     524:                            "no shared locks held by LWP", true);
1.2       ad        525:                if (ld->ld_shares == 0)
                    526:                        lockdebug_abort1(ld, lk, __func__,
1.8.6.2   joerg     527:                            "no shared holds on this lock", true);
1.2       ad        528:                l->l_shlocks--;
                    529:                ld->ld_shares--;
                    530:        } else {
                    531:                if ((ld->ld_flags & LD_LOCKED) == 0)
1.8.6.2   joerg     532:                        lockdebug_abort1(ld, lk, __func__, "not locked",
                    533:                            true);
1.2       ad        534:
                    535:                if ((ld->ld_flags & LD_SLEEPER) != 0) {
                    536:                        if (ld->ld_lwp != curlwp)
                    537:                                lockdebug_abort1(ld, lk, __func__,
1.8.6.2   joerg     538:                                    "not held by current LWP", true);
1.2       ad        539:                        ld->ld_flags &= ~LD_LOCKED;
                    540:                        ld->ld_unlocked = where;
                    541:                        ld->ld_lwp = NULL;
                    542:                        curlwp->l_exlocks--;
                    543:                        TAILQ_REMOVE(&ld_sleepers, ld, ld_chain);
                    544:                } else {
                    545:                        if (ld->ld_cpu != (uint16_t)cpu_number())
                    546:                                lockdebug_abort1(ld, lk, __func__,
1.8.6.2   joerg     547:                                    "not held by current CPU", true);
1.2       ad        548:                        ld->ld_flags &= ~LD_LOCKED;
                    549:                        ld->ld_unlocked = where;
                    550:                        ld->ld_lwp = NULL;
                    551:                        curcpu()->ci_spin_locks2--;
                    552:                        TAILQ_REMOVE(&ld_spinners, ld, ld_chain);
                    553:                }
                    554:        }
                    555:
                    556:        lockdebug_unlock(lk);
                    557: }
                    558:
                    559: /*
                    560:  * lockdebug_barrier:
                    561:  *
                    562:  *     Panic if we hold more than one specified spin lock, and optionally,
                    563:  *     if we hold sleep locks.
                    564:  */
                    565: void
                    566: lockdebug_barrier(volatile void *spinlock, int slplocks)
                    567: {
                    568:        struct lwp *l = curlwp;
                    569:        lockdebug_t *ld;
                    570:        uint16_t cpuno;
                    571:
                    572:        if (panicstr != NULL)
                    573:                return;
                    574:
                    575:        if (curcpu()->ci_spin_locks2 != 0) {
                    576:                cpuno = (uint16_t)cpu_number();
                    577:
                    578:                lockdebug_lock(&ld_spinner_lk);
                    579:                TAILQ_FOREACH(ld, &ld_spinners, ld_chain) {
                    580:                        if (ld->ld_lock == spinlock) {
                    581:                                if (ld->ld_cpu != cpuno)
                    582:                                        lockdebug_abort1(ld, &ld_spinner_lk,
                    583:                                            __func__,
1.8.6.2   joerg     584:                                            "not held by current CPU", true);
1.2       ad        585:                                continue;
                    586:                        }
1.8.6.4   joerg     587:                        if (ld->ld_cpu == cpuno && (l->l_pflag & LP_INTR) == 0)
1.2       ad        588:                                lockdebug_abort1(ld, &ld_spinner_lk,
1.8.6.2   joerg     589:                                    __func__, "spin lock held", true);
1.2       ad        590:                }
                    591:                lockdebug_unlock(&ld_spinner_lk);
                    592:        }
                    593:
                    594:        if (!slplocks) {
                    595:                if (l->l_exlocks != 0) {
                    596:                        lockdebug_lock(&ld_sleeper_lk);
                    597:                        TAILQ_FOREACH(ld, &ld_sleepers, ld_chain) {
                    598:                                if (ld->ld_lwp == l)
                    599:                                        lockdebug_abort1(ld, &ld_sleeper_lk,
1.8.6.2   joerg     600:                                            __func__, "sleep lock held", true);
1.2       ad        601:                        }
                    602:                        lockdebug_unlock(&ld_sleeper_lk);
                    603:                }
                    604:                if (l->l_shlocks != 0)
                    605:                        panic("lockdebug_barrier: holding %d shared locks",
                    606:                            l->l_shlocks);
                    607:        }
                    608: }
                    609:
                    610: /*
1.8.6.2   joerg     611:  * lockdebug_mem_check:
                    612:  *
                    613:  *     Check for in-use locks within a memory region that is
                    614:  *     being freed.  We only check for active locks within the
                    615:  *     first page of the allocation.
                    616:  */
                    617: void
                    618: lockdebug_mem_check(const char *func, void *base, size_t sz)
                    619: {
                    620: #if 0
                    621:        lockdebuglist_t *head;
                    622:        lockdebuglk_t *lk;
                    623:        lockdebug_t *ld;
                    624:        uintptr_t sa, ea, la;
                    625:
                    626:        sa = (uintptr_t)base;
                    627:        ea = sa + sz;
                    628:
                    629:        lockdebug_mhash(base, &lk, &head);
                    630:        TAILQ_FOREACH(ld, head, ld_mchain) {
                    631:                la = (uintptr_t)ld->ld_lock;
                    632:                if (la >= sa && la < ea) {
                    633:                        lockdebug_abort1(ld, lk, func,
                    634:                            "allocation contains active lock", !cold);
                    635:                        return;
                    636:                }
                    637:        }
                    638:        lockdebug_unlock(lk);
                    639: #endif
                    640: }
                    641:
                    642: /*
1.2       ad        643:  * lockdebug_dump:
                    644:  *
                    645:  *     Dump information about a lock on panic, or for DDB.
                    646:  */
                    647: static void
                    648: lockdebug_dump(lockdebug_t *ld, void (*pr)(const char *, ...))
                    649: {
                    650:        int sleeper = (ld->ld_flags & LD_SLEEPER);
                    651:
                    652:        (*pr)(
                    653:            "lock address : %#018lx type     : %18s\n"
                    654:            "shared holds : %18u exclusive: %18u\n"
                    655:            "shares wanted: %18u exclusive: %18u\n"
                    656:            "current cpu  : %18u last held: %18u\n"
                    657:            "current lwp  : %#018lx last held: %#018lx\n"
1.8.6.2   joerg     658:            "last locked  : %#018lx unlocked : %#018lx\n"
                    659:            "initialized  : %#018lx\n",
1.2       ad        660:            (long)ld->ld_lock, (sleeper ? "sleep/adaptive" : "spin"),
                    661:            (unsigned)ld->ld_shares, ((ld->ld_flags & LD_LOCKED) != 0),
                    662:            (unsigned)ld->ld_shwant, (unsigned)ld->ld_exwant,
                    663:            (unsigned)cpu_number(), (unsigned)ld->ld_cpu,
                    664:            (long)curlwp, (long)ld->ld_lwp,
1.8.6.2   joerg     665:            (long)ld->ld_locked, (long)ld->ld_unlocked,
                    666:            (long)ld->ld_initaddr);
1.2       ad        667:
                    668:        if (ld->ld_lockops->lo_dump != NULL)
                    669:                (*ld->ld_lockops->lo_dump)(ld->ld_lock);
                    670:
                    671:        if (sleeper) {
                    672:                (*pr)("\n");
                    673:                turnstile_print(ld->ld_lock, pr);
                    674:        }
                    675: }
                    676:
                    677: /*
                    678:  * lockdebug_dump:
                    679:  *
                    680:  *     Dump information about a known lock.
                    681:  */
1.5       ad        682: static void
1.2       ad        683: lockdebug_abort1(lockdebug_t *ld, lockdebuglk_t *lk, const char *func,
1.8.6.2   joerg     684:                 const char *msg, bool dopanic)
1.2       ad        685: {
                    686:
                    687:        printf_nolog("%s error: %s: %s\n\n", ld->ld_lockops->lo_name,
                    688:            func, msg);
                    689:        lockdebug_dump(ld, printf_nolog);
                    690:        lockdebug_unlock(lk);
                    691:        printf_nolog("\n");
1.8.6.2   joerg     692:        if (dopanic)
                    693:                panic("LOCKDEBUG");
1.2       ad        694: }
                    695:
                    696: #endif /* LOCKDEBUG */
                    697:
                    698: /*
                    699:  * lockdebug_lock_print:
                    700:  *
                    701:  *     Handle the DDB 'show lock' command.
                    702:  */
                    703: #ifdef DDB
                    704: void
                    705: lockdebug_lock_print(void *addr, void (*pr)(const char *, ...))
                    706: {
                    707: #ifdef LOCKDEBUG
                    708:        lockdebug_t *ld;
                    709:
                    710:        TAILQ_FOREACH(ld, &ld_all, ld_achain) {
                    711:                if (ld->ld_lock == addr) {
                    712:                        lockdebug_dump(ld, pr);
                    713:                        return;
                    714:                }
                    715:        }
                    716:        (*pr)("Sorry, no record of a lock with address %p found.\n", addr);
                    717: #else
                    718:        (*pr)("Sorry, kernel not built with the LOCKDEBUG option.\n");
                    719: #endif /* LOCKDEBUG */
                    720: }
                    721: #endif /* DDB */
                    722:
                    723: /*
                    724:  * lockdebug_abort:
                    725:  *
                    726:  *     An error has been trapped - dump lock info and call panic().
                    727:  */
                    728: void
1.6       yamt      729: lockdebug_abort(u_int id, volatile void *lock, lockops_t *ops,
1.2       ad        730:                const char *func, const char *msg)
                    731: {
                    732: #ifdef LOCKDEBUG
                    733:        lockdebug_t *ld;
                    734:        lockdebuglk_t *lk;
                    735:
                    736:        if ((ld = lockdebug_lookup(id, &lk)) != NULL) {
1.8.6.2   joerg     737:                lockdebug_abort1(ld, lk, func, msg, true);
1.2       ad        738:                /* NOTREACHED */
                    739:        }
                    740: #endif /* LOCKDEBUG */
                    741:
                    742:        printf_nolog("%s error: %s: %s\n\n"
                    743:            "lock address : %#018lx\n"
                    744:            "current cpu  : %18d\n"
                    745:            "current lwp  : %#018lx\n",
                    746:            ops->lo_name, func, msg, (long)lock, (int)cpu_number(),
                    747:            (long)curlwp);
                    748:
                    749:        (*ops->lo_dump)(lock);
                    750:
                    751:        printf_nolog("\n");
                    752:        panic("lock error");
                    753: }

CVSweb <webmaster@jp.NetBSD.org>