Annotation of src/sys/kern/subr_lockdebug.c, Revision 1.53
1.53 ! riastrad 1: /* $NetBSD: subr_lockdebug.c,v 1.52 2014/11/24 02:36:31 christos Exp $ */
1.2 ad 2:
3: /*-
1.28 ad 4: * Copyright (c) 2006, 2007, 2008 The NetBSD Foundation, Inc.
1.2 ad 5: * All rights reserved.
6: *
7: * This code is derived from software contributed to The NetBSD Foundation
8: * by Andrew Doran.
9: *
10: * Redistribution and use in source and binary forms, with or without
11: * modification, are permitted provided that the following conditions
12: * are met:
13: * 1. Redistributions of source code must retain the above copyright
14: * notice, this list of conditions and the following disclaimer.
15: * 2. Redistributions in binary form must reproduce the above copyright
16: * notice, this list of conditions and the following disclaimer in the
17: * documentation and/or other materials provided with the distribution.
18: *
19: * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20: * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21: * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22: * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23: * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24: * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25: * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26: * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27: * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28: * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29: * POSSIBILITY OF SUCH DAMAGE.
30: */
31:
32: /*
1.11 ad 33: * Basic lock debugging code shared among lock primitives.
1.2 ad 34: */
35:
1.9 dsl 36: #include <sys/cdefs.h>
1.53 ! riastrad 37: __KERNEL_RCSID(0, "$NetBSD: subr_lockdebug.c,v 1.52 2014/11/24 02:36:31 christos Exp $");
1.9 dsl 38:
1.2 ad 39: #include "opt_ddb.h"
40:
41: #include <sys/param.h>
42: #include <sys/proc.h>
43: #include <sys/systm.h>
1.10 ad 44: #include <sys/kernel.h>
1.2 ad 45: #include <sys/kmem.h>
46: #include <sys/lockdebug.h>
47: #include <sys/sleepq.h>
1.10 ad 48: #include <sys/cpu.h>
1.22 ad 49: #include <sys/atomic.h>
1.26 ad 50: #include <sys/lock.h>
1.43 matt 51: #include <sys/rbtree.h>
1.16 yamt 52:
1.25 ad 53: #include <machine/lock.h>
54:
1.28 ad 55: unsigned int ld_panic;
56:
1.2 ad 57: #ifdef LOCKDEBUG
58:
59: #define LD_BATCH_SHIFT 9
60: #define LD_BATCH (1 << LD_BATCH_SHIFT)
61: #define LD_BATCH_MASK (LD_BATCH - 1)
62: #define LD_MAX_LOCKS 1048576
63: #define LD_SLOP 16
64:
65: #define LD_LOCKED 0x01
66: #define LD_SLEEPER 0x02
67:
1.23 ad 68: #define LD_WRITE_LOCK 0x80000000
69:
1.2 ad 70: typedef struct lockdebug {
1.42 rmind 71: struct rb_node ld_rb_node;
1.34 ad 72: __cpu_simple_lock_t ld_spinlock;
1.2 ad 73: _TAILQ_ENTRY(struct lockdebug, volatile) ld_chain;
74: _TAILQ_ENTRY(struct lockdebug, volatile) ld_achain;
75: volatile void *ld_lock;
76: lockops_t *ld_lockops;
77: struct lwp *ld_lwp;
78: uintptr_t ld_locked;
79: uintptr_t ld_unlocked;
1.10 ad 80: uintptr_t ld_initaddr;
1.2 ad 81: uint16_t ld_shares;
82: uint16_t ld_cpu;
83: uint8_t ld_flags;
84: uint8_t ld_shwant; /* advisory */
85: uint8_t ld_exwant; /* advisory */
86: uint8_t ld_unused;
87: } volatile lockdebug_t;
88:
89: typedef _TAILQ_HEAD(lockdebuglist, struct lockdebug, volatile) lockdebuglist_t;
90:
1.34 ad 91: __cpu_simple_lock_t ld_mod_lk;
1.13 matt 92: lockdebuglist_t ld_free = TAILQ_HEAD_INITIALIZER(ld_free);
93: lockdebuglist_t ld_all = TAILQ_HEAD_INITIALIZER(ld_all);
1.2 ad 94: int ld_nfree;
95: int ld_freeptr;
96: int ld_recurse;
1.5 ad 97: bool ld_nomore;
1.2 ad 98: lockdebug_t ld_prime[LD_BATCH];
99:
1.34 ad 100: static void lockdebug_abort1(lockdebug_t *, int, const char *,
101: const char *, bool);
102: static int lockdebug_more(int);
1.5 ad 103: static void lockdebug_init(void);
1.52 christos 104: static void lockdebug_dump(lockdebug_t *, void (*)(const char *, ...)
105: __printflike(1, 2));
1.2 ad 106:
1.16 yamt 107: static signed int
1.42 rmind 108: ld_rbto_compare_nodes(void *ctx, const void *n1, const void *n2)
1.16 yamt 109: {
1.42 rmind 110: const lockdebug_t *ld1 = n1;
111: const lockdebug_t *ld2 = n2;
1.20 yamt 112: const uintptr_t a = (uintptr_t)ld1->ld_lock;
113: const uintptr_t b = (uintptr_t)ld2->ld_lock;
114:
115: if (a < b)
1.42 rmind 116: return -1;
117: if (a > b)
1.20 yamt 118: return 1;
1.16 yamt 119: return 0;
120: }
121:
122: static signed int
1.42 rmind 123: ld_rbto_compare_key(void *ctx, const void *n, const void *key)
1.16 yamt 124: {
1.42 rmind 125: const lockdebug_t *ld = n;
1.20 yamt 126: const uintptr_t a = (uintptr_t)ld->ld_lock;
127: const uintptr_t b = (uintptr_t)key;
128:
129: if (a < b)
1.42 rmind 130: return -1;
131: if (a > b)
1.20 yamt 132: return 1;
1.16 yamt 133: return 0;
134: }
135:
1.42 rmind 136: static rb_tree_t ld_rb_tree;
1.16 yamt 137:
1.42 rmind 138: static const rb_tree_ops_t ld_rb_tree_ops = {
1.37 matt 139: .rbto_compare_nodes = ld_rbto_compare_nodes,
140: .rbto_compare_key = ld_rbto_compare_key,
1.42 rmind 141: .rbto_node_offset = offsetof(lockdebug_t, ld_rb_node),
142: .rbto_context = NULL
1.16 yamt 143: };
144:
1.34 ad 145: static inline lockdebug_t *
146: lockdebug_lookup1(volatile void *lock)
1.23 ad 147: {
1.34 ad 148: lockdebug_t *ld;
149: struct cpu_info *ci;
1.23 ad 150:
1.34 ad 151: ci = curcpu();
152: __cpu_simple_lock(&ci->ci_data.cpu_ld_lock);
153: ld = (lockdebug_t *)rb_tree_find_node(&ld_rb_tree, __UNVOLATILE(lock));
154: __cpu_simple_unlock(&ci->ci_data.cpu_ld_lock);
155: if (ld == NULL) {
156: return NULL;
157: }
158: __cpu_simple_lock(&ld->ld_spinlock);
1.23 ad 159:
1.34 ad 160: return ld;
1.2 ad 161: }
162:
1.23 ad 163: static void
1.34 ad 164: lockdebug_lock_cpus(void)
1.2 ad 165: {
1.34 ad 166: CPU_INFO_ITERATOR cii;
167: struct cpu_info *ci;
1.2 ad 168:
1.34 ad 169: for (CPU_INFO_FOREACH(cii, ci)) {
170: __cpu_simple_lock(&ci->ci_data.cpu_ld_lock);
171: }
1.23 ad 172: }
173:
174: static void
1.34 ad 175: lockdebug_unlock_cpus(void)
1.23 ad 176: {
1.34 ad 177: CPU_INFO_ITERATOR cii;
178: struct cpu_info *ci;
1.23 ad 179:
1.34 ad 180: for (CPU_INFO_FOREACH(cii, ci)) {
181: __cpu_simple_unlock(&ci->ci_data.cpu_ld_lock);
182: }
1.2 ad 183: }
184:
185: /*
1.19 yamt 186: * lockdebug_lookup:
187: *
188: * Find a lockdebug structure by a pointer to a lock and return it locked.
189: */
190: static inline lockdebug_t *
1.38 rafal 191: lockdebug_lookup(volatile void *lock, uintptr_t where)
1.19 yamt 192: {
193: lockdebug_t *ld;
194:
1.34 ad 195: ld = lockdebug_lookup1(lock);
1.42 rmind 196: if (ld == NULL) {
197: panic("lockdebug_lookup: uninitialized lock "
198: "(lock=%p, from=%08"PRIxPTR")", lock, where);
199: }
1.19 yamt 200: return ld;
201: }
202:
203: /*
1.2 ad 204: * lockdebug_init:
205: *
206: * Initialize the lockdebug system. Allocate an initial pool of
207: * lockdebug structures before the VM system is up and running.
208: */
1.5 ad 209: static void
1.2 ad 210: lockdebug_init(void)
211: {
212: lockdebug_t *ld;
213: int i;
214:
1.34 ad 215: TAILQ_INIT(&curcpu()->ci_data.cpu_ld_locks);
216: TAILQ_INIT(&curlwp->l_ld_locks);
217: __cpu_simple_lock_init(&curcpu()->ci_data.cpu_ld_lock);
218: __cpu_simple_lock_init(&ld_mod_lk);
1.15 matt 219:
1.16 yamt 220: rb_tree_init(&ld_rb_tree, &ld_rb_tree_ops);
221:
1.2 ad 222: ld = ld_prime;
223: for (i = 1, ld++; i < LD_BATCH; i++, ld++) {
1.34 ad 224: __cpu_simple_lock_init(&ld->ld_spinlock);
1.2 ad 225: TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain);
226: TAILQ_INSERT_TAIL(&ld_all, ld, ld_achain);
227: }
228: ld_freeptr = 1;
229: ld_nfree = LD_BATCH - 1;
230: }
231:
232: /*
233: * lockdebug_alloc:
234: *
235: * A lock is being initialized, so allocate an associated debug
236: * structure.
237: */
1.16 yamt 238: bool
1.10 ad 239: lockdebug_alloc(volatile void *lock, lockops_t *lo, uintptr_t initaddr)
1.2 ad 240: {
241: struct cpu_info *ci;
242: lockdebug_t *ld;
1.34 ad 243: int s;
1.2 ad 244:
1.27 ad 245: if (lo == NULL || panicstr != NULL || ld_panic)
1.16 yamt 246: return false;
1.5 ad 247: if (ld_freeptr == 0)
248: lockdebug_init();
1.2 ad 249:
1.34 ad 250: s = splhigh();
251: __cpu_simple_lock(&ld_mod_lk);
252: if ((ld = lockdebug_lookup1(lock)) != NULL) {
253: __cpu_simple_unlock(&ld_mod_lk);
254: lockdebug_abort1(ld, s, __func__, "already initialized", true);
1.27 ad 255: return false;
1.19 yamt 256: }
257:
1.2 ad 258: /*
259: * Pinch a new debug structure. We may recurse because we call
260: * kmem_alloc(), which may need to initialize new locks somewhere
1.7 skrll 261: * down the path. If not recursing, we try to maintain at least
1.2 ad 262: * LD_SLOP structures free, which should hopefully be enough to
263: * satisfy kmem_alloc(). If we can't provide a structure, not to
264: * worry: we'll just mark the lock as not having an ID.
265: */
1.23 ad 266: ci = curcpu();
1.2 ad 267: ci->ci_lkdebug_recurse++;
268: if (TAILQ_EMPTY(&ld_free)) {
1.5 ad 269: if (ci->ci_lkdebug_recurse > 1 || ld_nomore) {
1.2 ad 270: ci->ci_lkdebug_recurse--;
1.34 ad 271: __cpu_simple_unlock(&ld_mod_lk);
272: splx(s);
1.16 yamt 273: return false;
1.2 ad 274: }
1.34 ad 275: s = lockdebug_more(s);
276: } else if (ci->ci_lkdebug_recurse == 1 && ld_nfree < LD_SLOP) {
277: s = lockdebug_more(s);
278: }
1.2 ad 279: if ((ld = TAILQ_FIRST(&ld_free)) == NULL) {
1.34 ad 280: __cpu_simple_unlock(&ld_mod_lk);
281: splx(s);
1.16 yamt 282: return false;
1.2 ad 283: }
284: TAILQ_REMOVE(&ld_free, ld, ld_chain);
285: ld_nfree--;
286: ci->ci_lkdebug_recurse--;
287:
1.34 ad 288: if (ld->ld_lock != NULL) {
1.51 matt 289: panic("lockdebug_alloc: corrupt table ld %p", ld);
1.34 ad 290: }
1.2 ad 291:
292: /* Initialise the structure. */
293: ld->ld_lock = lock;
294: ld->ld_lockops = lo;
295: ld->ld_locked = 0;
296: ld->ld_unlocked = 0;
297: ld->ld_lwp = NULL;
1.10 ad 298: ld->ld_initaddr = initaddr;
1.35 ad 299: ld->ld_flags = (lo->lo_type == LOCKOPS_SLEEP ? LD_SLEEPER : 0);
1.34 ad 300: lockdebug_lock_cpus();
1.42 rmind 301: (void)rb_tree_insert_node(&ld_rb_tree, __UNVOLATILE(ld));
1.34 ad 302: lockdebug_unlock_cpus();
303: __cpu_simple_unlock(&ld_mod_lk);
1.2 ad 304:
1.34 ad 305: splx(s);
1.16 yamt 306: return true;
1.2 ad 307: }
308:
309: /*
310: * lockdebug_free:
311: *
312: * A lock is being destroyed, so release debugging resources.
313: */
314: void
1.16 yamt 315: lockdebug_free(volatile void *lock)
1.2 ad 316: {
317: lockdebug_t *ld;
1.34 ad 318: int s;
1.2 ad 319:
1.27 ad 320: if (panicstr != NULL || ld_panic)
1.2 ad 321: return;
322:
1.34 ad 323: s = splhigh();
324: __cpu_simple_lock(&ld_mod_lk);
1.38 rafal 325: ld = lockdebug_lookup(lock, (uintptr_t) __builtin_return_address(0));
1.16 yamt 326: if (ld == NULL) {
1.34 ad 327: __cpu_simple_unlock(&ld_mod_lk);
1.35 ad 328: panic("lockdebug_free: destroying uninitialized object %p"
1.16 yamt 329: "(ld_lock=%p)", lock, ld->ld_lock);
1.27 ad 330: return;
1.2 ad 331: }
1.27 ad 332: if ((ld->ld_flags & LD_LOCKED) != 0 || ld->ld_shares != 0) {
1.34 ad 333: __cpu_simple_unlock(&ld_mod_lk);
1.35 ad 334: lockdebug_abort1(ld, s, __func__, "is locked or in use", true);
1.27 ad 335: return;
336: }
1.34 ad 337: lockdebug_lock_cpus();
1.42 rmind 338: rb_tree_remove_node(&ld_rb_tree, __UNVOLATILE(ld));
1.34 ad 339: lockdebug_unlock_cpus();
1.2 ad 340: ld->ld_lock = NULL;
341: TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain);
342: ld_nfree++;
1.34 ad 343: __cpu_simple_unlock(&ld->ld_spinlock);
344: __cpu_simple_unlock(&ld_mod_lk);
345: splx(s);
1.2 ad 346: }
347:
348: /*
349: * lockdebug_more:
350: *
351: * Allocate a batch of debug structures and add to the free list.
1.34 ad 352: * Must be called with ld_mod_lk held.
1.2 ad 353: */
1.34 ad 354: static int
355: lockdebug_more(int s)
1.2 ad 356: {
357: lockdebug_t *ld;
358: void *block;
1.5 ad 359: int i, base, m;
1.2 ad 360:
1.35 ad 361: /*
362: * Can't call kmem_alloc() if in interrupt context. XXX We could
363: * deadlock, because we don't know which locks the caller holds.
364: */
365: if (cpu_intr_p() || (curlwp->l_pflag & LP_INTR) != 0) {
366: return s;
367: }
368:
1.2 ad 369: while (ld_nfree < LD_SLOP) {
1.34 ad 370: __cpu_simple_unlock(&ld_mod_lk);
371: splx(s);
1.2 ad 372: block = kmem_zalloc(LD_BATCH * sizeof(lockdebug_t), KM_SLEEP);
1.34 ad 373: s = splhigh();
374: __cpu_simple_lock(&ld_mod_lk);
1.2 ad 375:
376: if (block == NULL)
1.34 ad 377: return s;
1.2 ad 378:
379: if (ld_nfree > LD_SLOP) {
380: /* Somebody beat us to it. */
1.34 ad 381: __cpu_simple_unlock(&ld_mod_lk);
382: splx(s);
1.2 ad 383: kmem_free(block, LD_BATCH * sizeof(lockdebug_t));
1.34 ad 384: s = splhigh();
385: __cpu_simple_lock(&ld_mod_lk);
1.2 ad 386: continue;
387: }
388:
389: base = ld_freeptr;
390: ld_nfree += LD_BATCH;
391: ld = block;
392: base <<= LD_BATCH_SHIFT;
1.5 ad 393: m = min(LD_MAX_LOCKS, base + LD_BATCH);
394:
395: if (m == LD_MAX_LOCKS)
396: ld_nomore = true;
1.2 ad 397:
1.5 ad 398: for (i = base; i < m; i++, ld++) {
1.34 ad 399: __cpu_simple_lock_init(&ld->ld_spinlock);
1.2 ad 400: TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain);
401: TAILQ_INSERT_TAIL(&ld_all, ld, ld_achain);
402: }
403:
1.22 ad 404: membar_producer();
1.2 ad 405: }
1.34 ad 406:
407: return s;
1.2 ad 408: }
409:
410: /*
411: * lockdebug_wantlock:
412: *
413: * Process the preamble to a lock acquire.
414: */
415: void
1.49 mlelstv 416: lockdebug_wantlock(volatile void *lock, uintptr_t where, int shared)
1.2 ad 417: {
418: struct lwp *l = curlwp;
419: lockdebug_t *ld;
1.3 thorpej 420: bool recurse;
1.34 ad 421: int s;
1.2 ad 422:
423: (void)shared;
1.4 thorpej 424: recurse = false;
1.2 ad 425:
1.27 ad 426: if (panicstr != NULL || ld_panic)
1.2 ad 427: return;
428:
1.34 ad 429: s = splhigh();
1.38 rafal 430: if ((ld = lockdebug_lookup(lock, where)) == NULL) {
1.34 ad 431: splx(s);
1.2 ad 432: return;
1.34 ad 433: }
1.32 yamt 434: if ((ld->ld_flags & LD_LOCKED) != 0 || ld->ld_shares != 0) {
1.2 ad 435: if ((ld->ld_flags & LD_SLEEPER) != 0) {
1.49 mlelstv 436: if (ld->ld_lwp == l)
1.4 thorpej 437: recurse = true;
1.40 rmind 438: } else if (ld->ld_cpu == (uint16_t)cpu_index(curcpu()))
1.4 thorpej 439: recurse = true;
1.2 ad 440: }
1.10 ad 441: if (cpu_intr_p()) {
1.27 ad 442: if ((ld->ld_flags & LD_SLEEPER) != 0) {
1.34 ad 443: lockdebug_abort1(ld, s, __func__,
1.10 ad 444: "acquiring sleep lock from interrupt context",
445: true);
1.27 ad 446: return;
447: }
1.10 ad 448: }
1.2 ad 449: if (shared)
450: ld->ld_shwant++;
451: else
452: ld->ld_exwant++;
1.27 ad 453: if (recurse) {
1.34 ad 454: lockdebug_abort1(ld, s, __func__, "locking against myself",
1.10 ad 455: true);
1.27 ad 456: return;
457: }
1.34 ad 458: __cpu_simple_unlock(&ld->ld_spinlock);
459: splx(s);
1.2 ad 460: }
461:
462: /*
463: * lockdebug_locked:
464: *
465: * Process a lock acquire operation.
466: */
467: void
1.35 ad 468: lockdebug_locked(volatile void *lock, void *cvlock, uintptr_t where,
469: int shared)
1.2 ad 470: {
471: struct lwp *l = curlwp;
472: lockdebug_t *ld;
1.34 ad 473: int s;
1.2 ad 474:
1.27 ad 475: if (panicstr != NULL || ld_panic)
1.2 ad 476: return;
477:
1.34 ad 478: s = splhigh();
1.38 rafal 479: if ((ld = lockdebug_lookup(lock, where)) == NULL) {
1.34 ad 480: splx(s);
1.2 ad 481: return;
1.34 ad 482: }
1.35 ad 483: if (cvlock) {
484: KASSERT(ld->ld_lockops->lo_type == LOCKOPS_CV);
485: if (lock == (void *)&lbolt) {
486: /* nothing */
487: } else if (ld->ld_shares++ == 0) {
488: ld->ld_locked = (uintptr_t)cvlock;
489: } else if (cvlock != (void *)ld->ld_locked) {
490: lockdebug_abort1(ld, s, __func__, "multiple locks used"
491: " with condition variable", true);
492: return;
493: }
494: } else if (shared) {
1.2 ad 495: l->l_shlocks++;
1.45 yamt 496: ld->ld_locked = where;
1.2 ad 497: ld->ld_shares++;
498: ld->ld_shwant--;
499: } else {
1.27 ad 500: if ((ld->ld_flags & LD_LOCKED) != 0) {
1.34 ad 501: lockdebug_abort1(ld, s, __func__, "already locked",
502: true);
1.27 ad 503: return;
504: }
1.2 ad 505: ld->ld_flags |= LD_LOCKED;
506: ld->ld_locked = where;
507: ld->ld_exwant--;
508: if ((ld->ld_flags & LD_SLEEPER) != 0) {
1.34 ad 509: TAILQ_INSERT_TAIL(&l->l_ld_locks, ld, ld_chain);
1.2 ad 510: } else {
1.34 ad 511: TAILQ_INSERT_TAIL(&curcpu()->ci_data.cpu_ld_locks,
512: ld, ld_chain);
1.2 ad 513: }
514: }
1.40 rmind 515: ld->ld_cpu = (uint16_t)cpu_index(curcpu());
1.32 yamt 516: ld->ld_lwp = l;
1.34 ad 517: __cpu_simple_unlock(&ld->ld_spinlock);
518: splx(s);
1.2 ad 519: }
520:
521: /*
522: * lockdebug_unlocked:
523: *
524: * Process a lock release operation.
525: */
526: void
1.16 yamt 527: lockdebug_unlocked(volatile void *lock, uintptr_t where, int shared)
1.2 ad 528: {
529: struct lwp *l = curlwp;
530: lockdebug_t *ld;
1.34 ad 531: int s;
1.2 ad 532:
1.27 ad 533: if (panicstr != NULL || ld_panic)
1.2 ad 534: return;
535:
1.34 ad 536: s = splhigh();
1.38 rafal 537: if ((ld = lockdebug_lookup(lock, where)) == NULL) {
1.34 ad 538: splx(s);
1.2 ad 539: return;
1.34 ad 540: }
1.35 ad 541: if (ld->ld_lockops->lo_type == LOCKOPS_CV) {
542: if (lock == (void *)&lbolt) {
543: /* nothing */
544: } else {
545: ld->ld_shares--;
546: }
547: } else if (shared) {
1.27 ad 548: if (l->l_shlocks == 0) {
1.34 ad 549: lockdebug_abort1(ld, s, __func__,
1.10 ad 550: "no shared locks held by LWP", true);
1.27 ad 551: return;
552: }
553: if (ld->ld_shares == 0) {
1.34 ad 554: lockdebug_abort1(ld, s, __func__,
1.10 ad 555: "no shared holds on this lock", true);
1.27 ad 556: return;
557: }
1.2 ad 558: l->l_shlocks--;
559: ld->ld_shares--;
1.45 yamt 560: if (ld->ld_lwp == l) {
561: ld->ld_unlocked = where;
1.32 yamt 562: ld->ld_lwp = NULL;
1.45 yamt 563: }
1.40 rmind 564: if (ld->ld_cpu == (uint16_t)cpu_index(curcpu()))
1.32 yamt 565: ld->ld_cpu = (uint16_t)-1;
1.2 ad 566: } else {
1.27 ad 567: if ((ld->ld_flags & LD_LOCKED) == 0) {
1.34 ad 568: lockdebug_abort1(ld, s, __func__, "not locked", true);
1.27 ad 569: return;
570: }
1.2 ad 571:
572: if ((ld->ld_flags & LD_SLEEPER) != 0) {
1.27 ad 573: if (ld->ld_lwp != curlwp) {
1.34 ad 574: lockdebug_abort1(ld, s, __func__,
1.10 ad 575: "not held by current LWP", true);
1.27 ad 576: return;
577: }
1.34 ad 578: TAILQ_REMOVE(&l->l_ld_locks, ld, ld_chain);
1.2 ad 579: } else {
1.40 rmind 580: if (ld->ld_cpu != (uint16_t)cpu_index(curcpu())) {
1.34 ad 581: lockdebug_abort1(ld, s, __func__,
1.10 ad 582: "not held by current CPU", true);
1.27 ad 583: return;
584: }
1.34 ad 585: TAILQ_REMOVE(&curcpu()->ci_data.cpu_ld_locks, ld,
586: ld_chain);
1.2 ad 587: }
1.44 matt 588: ld->ld_flags &= ~LD_LOCKED;
589: ld->ld_unlocked = where;
590: ld->ld_lwp = NULL;
1.2 ad 591: }
1.34 ad 592: __cpu_simple_unlock(&ld->ld_spinlock);
593: splx(s);
1.2 ad 594: }
595:
596: /*
1.35 ad 597: * lockdebug_wakeup:
598: *
599: * Process a wakeup on a condition variable.
600: */
601: void
602: lockdebug_wakeup(volatile void *lock, uintptr_t where)
603: {
604: lockdebug_t *ld;
605: int s;
606:
607: if (panicstr != NULL || ld_panic || lock == (void *)&lbolt)
608: return;
609:
610: s = splhigh();
611: /* Find the CV... */
1.38 rafal 612: if ((ld = lockdebug_lookup(lock, where)) == NULL) {
1.35 ad 613: splx(s);
614: return;
615: }
616: /*
617: * If it has any waiters, ensure that they are using the
618: * same interlock.
619: */
620: if (ld->ld_shares != 0 && !mutex_owned((kmutex_t *)ld->ld_locked)) {
621: lockdebug_abort1(ld, s, __func__, "interlocking mutex not "
622: "held during wakeup", true);
623: return;
624: }
625: __cpu_simple_unlock(&ld->ld_spinlock);
626: splx(s);
627: }
628:
629: /*
1.2 ad 630: * lockdebug_barrier:
631: *
632: * Panic if we hold more than one specified spin lock, and optionally,
633: * if we hold sleep locks.
634: */
635: void
636: lockdebug_barrier(volatile void *spinlock, int slplocks)
637: {
638: struct lwp *l = curlwp;
639: lockdebug_t *ld;
1.34 ad 640: int s;
1.2 ad 641:
1.27 ad 642: if (panicstr != NULL || ld_panic)
1.2 ad 643: return;
644:
1.34 ad 645: s = splhigh();
646: if ((l->l_pflag & LP_INTR) == 0) {
647: TAILQ_FOREACH(ld, &curcpu()->ci_data.cpu_ld_locks, ld_chain) {
1.2 ad 648: if (ld->ld_lock == spinlock) {
649: continue;
650: }
1.34 ad 651: __cpu_simple_lock(&ld->ld_spinlock);
652: lockdebug_abort1(ld, s, __func__,
653: "spin lock held", true);
654: return;
1.2 ad 655: }
656: }
1.34 ad 657: if (slplocks) {
658: splx(s);
659: return;
660: }
661: if ((ld = TAILQ_FIRST(&l->l_ld_locks)) != NULL) {
662: __cpu_simple_lock(&ld->ld_spinlock);
663: lockdebug_abort1(ld, s, __func__, "sleep lock held", true);
664: return;
665: }
666: splx(s);
667: if (l->l_shlocks != 0) {
1.52 christos 668: TAILQ_FOREACH(ld, &ld_all, ld_achain) {
669: if (ld->ld_lockops->lo_type == LOCKOPS_CV)
670: continue;
671: if (ld->ld_lwp == l)
672: lockdebug_dump(ld, printf);
673: }
674: panic("%s: holding %d shared locks", __func__, l->l_shlocks);
1.2 ad 675: }
676: }
677:
678: /*
1.10 ad 679: * lockdebug_mem_check:
680: *
681: * Check for in-use locks within a memory region that is
1.16 yamt 682: * being freed.
1.10 ad 683: */
684: void
685: lockdebug_mem_check(const char *func, void *base, size_t sz)
686: {
1.16 yamt 687: lockdebug_t *ld;
1.34 ad 688: struct cpu_info *ci;
1.23 ad 689: int s;
1.10 ad 690:
1.27 ad 691: if (panicstr != NULL || ld_panic)
1.24 ad 692: return;
693:
1.34 ad 694: s = splhigh();
695: ci = curcpu();
696: __cpu_simple_lock(&ci->ci_data.cpu_ld_lock);
1.16 yamt 697: ld = (lockdebug_t *)rb_tree_find_node_geq(&ld_rb_tree, base);
1.23 ad 698: if (ld != NULL) {
699: const uintptr_t lock = (uintptr_t)ld->ld_lock;
700:
701: if ((uintptr_t)base > lock)
702: panic("%s: corrupt tree ld=%p, base=%p, sz=%zu",
703: __func__, ld, base, sz);
704: if (lock >= (uintptr_t)base + sz)
705: ld = NULL;
706: }
1.34 ad 707: __cpu_simple_unlock(&ci->ci_data.cpu_ld_lock);
708: if (ld != NULL) {
709: __cpu_simple_lock(&ld->ld_spinlock);
710: lockdebug_abort1(ld, s, func,
711: "allocation contains active lock", !cold);
1.16 yamt 712: return;
1.34 ad 713: }
714: splx(s);
1.10 ad 715: }
716:
717: /*
1.2 ad 718: * lockdebug_dump:
719: *
720: * Dump information about a lock on panic, or for DDB.
721: */
722: static void
1.47 christos 723: lockdebug_dump(lockdebug_t *ld, void (*pr)(const char *, ...)
724: __printflike(1, 2))
1.2 ad 725: {
726: int sleeper = (ld->ld_flags & LD_SLEEPER);
727:
728: (*pr)(
729: "lock address : %#018lx type : %18s\n"
1.35 ad 730: "initialized : %#018lx",
1.2 ad 731: (long)ld->ld_lock, (sleeper ? "sleep/adaptive" : "spin"),
1.10 ad 732: (long)ld->ld_initaddr);
1.2 ad 733:
1.35 ad 734: if (ld->ld_lockops->lo_type == LOCKOPS_CV) {
1.48 njoly 735: (*pr)(" interlock: %#018lx\n", (long)ld->ld_locked);
1.35 ad 736: } else {
737: (*pr)("\n"
738: "shared holds : %18u exclusive: %18u\n"
739: "shares wanted: %18u exclusive: %18u\n"
740: "current cpu : %18u last held: %18u\n"
741: "current lwp : %#018lx last held: %#018lx\n"
1.44 matt 742: "last locked%c : %#018lx unlocked%c: %#018lx\n",
1.35 ad 743: (unsigned)ld->ld_shares, ((ld->ld_flags & LD_LOCKED) != 0),
744: (unsigned)ld->ld_shwant, (unsigned)ld->ld_exwant,
1.40 rmind 745: (unsigned)cpu_index(curcpu()), (unsigned)ld->ld_cpu,
1.35 ad 746: (long)curlwp, (long)ld->ld_lwp,
1.44 matt 747: ((ld->ld_flags & LD_LOCKED) ? '*' : ' '),
748: (long)ld->ld_locked,
749: ((ld->ld_flags & LD_LOCKED) ? ' ' : '*'),
750: (long)ld->ld_unlocked);
1.35 ad 751: }
752:
1.2 ad 753: if (ld->ld_lockops->lo_dump != NULL)
754: (*ld->ld_lockops->lo_dump)(ld->ld_lock);
755:
756: if (sleeper) {
757: (*pr)("\n");
758: turnstile_print(ld->ld_lock, pr);
759: }
760: }
761:
762: /*
1.27 ad 763: * lockdebug_abort1:
1.2 ad 764: *
1.27 ad 765: * An error has been trapped - dump lock info and panic.
1.2 ad 766: */
1.5 ad 767: static void
1.34 ad 768: lockdebug_abort1(lockdebug_t *ld, int s, const char *func,
1.10 ad 769: const char *msg, bool dopanic)
1.2 ad 770: {
771:
1.27 ad 772: /*
1.46 christos 773: * Don't make the situation worse if the system is already going
1.27 ad 774: * down in flames. Once a panic is triggered, lockdebug state
775: * becomes stale and cannot be trusted.
776: */
777: if (atomic_inc_uint_nv(&ld_panic) != 1) {
1.34 ad 778: __cpu_simple_unlock(&ld->ld_spinlock);
779: splx(s);
1.27 ad 780: return;
781: }
782:
1.2 ad 783: printf_nolog("%s error: %s: %s\n\n", ld->ld_lockops->lo_name,
784: func, msg);
785: lockdebug_dump(ld, printf_nolog);
1.34 ad 786: __cpu_simple_unlock(&ld->ld_spinlock);
787: splx(s);
1.2 ad 788: printf_nolog("\n");
1.10 ad 789: if (dopanic)
1.50 christos 790: panic("LOCKDEBUG: %s error: %s: %s", ld->ld_lockops->lo_name,
791: func, msg);
1.2 ad 792: }
793:
794: #endif /* LOCKDEBUG */
795:
796: /*
797: * lockdebug_lock_print:
798: *
799: * Handle the DDB 'show lock' command.
800: */
801: #ifdef DDB
802: void
803: lockdebug_lock_print(void *addr, void (*pr)(const char *, ...))
804: {
805: #ifdef LOCKDEBUG
806: lockdebug_t *ld;
807:
808: TAILQ_FOREACH(ld, &ld_all, ld_achain) {
1.41 dyoung 809: if (ld->ld_lock == NULL)
810: continue;
811: if (addr == NULL || ld->ld_lock == addr) {
1.2 ad 812: lockdebug_dump(ld, pr);
1.41 dyoung 813: if (addr != NULL)
814: return;
1.2 ad 815: }
816: }
1.41 dyoung 817: if (addr != NULL) {
818: (*pr)("Sorry, no record of a lock with address %p found.\n",
819: addr);
820: }
1.2 ad 821: #else
822: (*pr)("Sorry, kernel not built with the LOCKDEBUG option.\n");
823: #endif /* LOCKDEBUG */
824: }
825: #endif /* DDB */
826:
827: /*
828: * lockdebug_abort:
829: *
830: * An error has been trapped - dump lock info and call panic().
831: */
832: void
1.16 yamt 833: lockdebug_abort(volatile void *lock, lockops_t *ops, const char *func,
834: const char *msg)
1.2 ad 835: {
836: #ifdef LOCKDEBUG
837: lockdebug_t *ld;
1.34 ad 838: int s;
1.2 ad 839:
1.34 ad 840: s = splhigh();
1.38 rafal 841: if ((ld = lockdebug_lookup(lock,
842: (uintptr_t) __builtin_return_address(0))) != NULL) {
1.34 ad 843: lockdebug_abort1(ld, s, func, msg, true);
844: return;
1.2 ad 845: }
1.34 ad 846: splx(s);
1.2 ad 847: #endif /* LOCKDEBUG */
848:
1.27 ad 849: /*
850: * Complain first on the occurrance only. Otherwise proceeed to
851: * panic where we will `rendezvous' with other CPUs if the machine
852: * is going down in flames.
853: */
854: if (atomic_inc_uint_nv(&ld_panic) == 1) {
855: printf_nolog("%s error: %s: %s\n\n"
856: "lock address : %#018lx\n"
857: "current cpu : %18d\n"
858: "current lwp : %#018lx\n",
1.40 rmind 859: ops->lo_name, func, msg, (long)lock,
860: (int)cpu_index(curcpu()), (long)curlwp);
1.27 ad 861: (*ops->lo_dump)(lock);
862: printf_nolog("\n");
863: }
1.2 ad 864:
1.53 ! riastrad 865: panic("lock error: %s: %s: %s: lock %p cpu %d lwp %p",
! 866: ops->lo_name, func, msg, lock, cpu_index(curcpu()), curlwp);
1.2 ad 867: }
CVSweb <webmaster@jp.NetBSD.org>