Annotation of src/sys/kern/subr_lockdebug.c, Revision 1.66
1.66 ! riastrad 1: /* $NetBSD$ */
1.2 ad 2:
3: /*-
1.28 ad 4: * Copyright (c) 2006, 2007, 2008 The NetBSD Foundation, Inc.
1.2 ad 5: * All rights reserved.
6: *
7: * This code is derived from software contributed to The NetBSD Foundation
8: * by Andrew Doran.
9: *
10: * Redistribution and use in source and binary forms, with or without
11: * modification, are permitted provided that the following conditions
12: * are met:
13: * 1. Redistributions of source code must retain the above copyright
14: * notice, this list of conditions and the following disclaimer.
15: * 2. Redistributions in binary form must reproduce the above copyright
16: * notice, this list of conditions and the following disclaimer in the
17: * documentation and/or other materials provided with the distribution.
18: *
19: * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20: * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21: * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22: * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23: * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24: * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25: * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26: * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27: * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28: * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29: * POSSIBILITY OF SUCH DAMAGE.
30: */
31:
32: /*
1.11 ad 33: * Basic lock debugging code shared among lock primitives.
1.2 ad 34: */
35:
1.9 dsl 36: #include <sys/cdefs.h>
1.66 ! riastrad 37: __KERNEL_RCSID(0, "$NetBSD$");
1.9 dsl 38:
1.54 ozaki-r 39: #ifdef _KERNEL_OPT
1.2 ad 40: #include "opt_ddb.h"
1.54 ozaki-r 41: #endif
1.2 ad 42:
43: #include <sys/param.h>
44: #include <sys/proc.h>
45: #include <sys/systm.h>
1.10 ad 46: #include <sys/kernel.h>
1.2 ad 47: #include <sys/kmem.h>
48: #include <sys/lockdebug.h>
49: #include <sys/sleepq.h>
1.10 ad 50: #include <sys/cpu.h>
1.22 ad 51: #include <sys/atomic.h>
1.26 ad 52: #include <sys/lock.h>
1.43 matt 53: #include <sys/rbtree.h>
1.62 ozaki-r 54: #include <sys/ksyms.h>
1.16 yamt 55:
1.25 ad 56: #include <machine/lock.h>
57:
1.28 ad 58: unsigned int ld_panic;
59:
1.2 ad 60: #ifdef LOCKDEBUG
61:
62: #define LD_BATCH_SHIFT 9
63: #define LD_BATCH (1 << LD_BATCH_SHIFT)
64: #define LD_BATCH_MASK (LD_BATCH - 1)
65: #define LD_MAX_LOCKS 1048576
66: #define LD_SLOP 16
67:
68: #define LD_LOCKED 0x01
69: #define LD_SLEEPER 0x02
70:
1.23 ad 71: #define LD_WRITE_LOCK 0x80000000
72:
1.2 ad 73: typedef struct lockdebug {
1.42 rmind 74: struct rb_node ld_rb_node;
1.34 ad 75: __cpu_simple_lock_t ld_spinlock;
1.2 ad 76: _TAILQ_ENTRY(struct lockdebug, volatile) ld_chain;
77: _TAILQ_ENTRY(struct lockdebug, volatile) ld_achain;
78: volatile void *ld_lock;
79: lockops_t *ld_lockops;
80: struct lwp *ld_lwp;
81: uintptr_t ld_locked;
82: uintptr_t ld_unlocked;
1.10 ad 83: uintptr_t ld_initaddr;
1.2 ad 84: uint16_t ld_shares;
85: uint16_t ld_cpu;
86: uint8_t ld_flags;
87: uint8_t ld_shwant; /* advisory */
88: uint8_t ld_exwant; /* advisory */
89: uint8_t ld_unused;
90: } volatile lockdebug_t;
91:
92: typedef _TAILQ_HEAD(lockdebuglist, struct lockdebug, volatile) lockdebuglist_t;
93:
1.34 ad 94: __cpu_simple_lock_t ld_mod_lk;
1.13 matt 95: lockdebuglist_t ld_free = TAILQ_HEAD_INITIALIZER(ld_free);
96: lockdebuglist_t ld_all = TAILQ_HEAD_INITIALIZER(ld_all);
1.2 ad 97: int ld_nfree;
98: int ld_freeptr;
99: int ld_recurse;
1.5 ad 100: bool ld_nomore;
1.2 ad 101: lockdebug_t ld_prime[LD_BATCH];
102:
1.55 christos 103: static void lockdebug_abort1(const char *, size_t, lockdebug_t *, int,
104: const char *, bool);
1.34 ad 105: static int lockdebug_more(int);
1.5 ad 106: static void lockdebug_init(void);
1.52 christos 107: static void lockdebug_dump(lockdebug_t *, void (*)(const char *, ...)
108: __printflike(1, 2));
1.2 ad 109:
1.16 yamt 110: static signed int
1.42 rmind 111: ld_rbto_compare_nodes(void *ctx, const void *n1, const void *n2)
1.16 yamt 112: {
1.42 rmind 113: const lockdebug_t *ld1 = n1;
114: const lockdebug_t *ld2 = n2;
1.20 yamt 115: const uintptr_t a = (uintptr_t)ld1->ld_lock;
116: const uintptr_t b = (uintptr_t)ld2->ld_lock;
117:
118: if (a < b)
1.42 rmind 119: return -1;
120: if (a > b)
1.20 yamt 121: return 1;
1.16 yamt 122: return 0;
123: }
124:
125: static signed int
1.42 rmind 126: ld_rbto_compare_key(void *ctx, const void *n, const void *key)
1.16 yamt 127: {
1.42 rmind 128: const lockdebug_t *ld = n;
1.20 yamt 129: const uintptr_t a = (uintptr_t)ld->ld_lock;
130: const uintptr_t b = (uintptr_t)key;
131:
132: if (a < b)
1.42 rmind 133: return -1;
134: if (a > b)
1.20 yamt 135: return 1;
1.16 yamt 136: return 0;
137: }
138:
1.42 rmind 139: static rb_tree_t ld_rb_tree;
1.16 yamt 140:
1.42 rmind 141: static const rb_tree_ops_t ld_rb_tree_ops = {
1.37 matt 142: .rbto_compare_nodes = ld_rbto_compare_nodes,
143: .rbto_compare_key = ld_rbto_compare_key,
1.42 rmind 144: .rbto_node_offset = offsetof(lockdebug_t, ld_rb_node),
145: .rbto_context = NULL
1.16 yamt 146: };
147:
1.34 ad 148: static inline lockdebug_t *
1.58 christos 149: lockdebug_lookup1(const volatile void *lock)
1.23 ad 150: {
1.34 ad 151: lockdebug_t *ld;
152: struct cpu_info *ci;
1.23 ad 153:
1.34 ad 154: ci = curcpu();
155: __cpu_simple_lock(&ci->ci_data.cpu_ld_lock);
1.58 christos 156: ld = rb_tree_find_node(&ld_rb_tree, (void *)(intptr_t)lock);
1.34 ad 157: __cpu_simple_unlock(&ci->ci_data.cpu_ld_lock);
158: if (ld == NULL) {
159: return NULL;
160: }
161: __cpu_simple_lock(&ld->ld_spinlock);
1.23 ad 162:
1.34 ad 163: return ld;
1.2 ad 164: }
165:
1.23 ad 166: static void
1.34 ad 167: lockdebug_lock_cpus(void)
1.2 ad 168: {
1.34 ad 169: CPU_INFO_ITERATOR cii;
170: struct cpu_info *ci;
1.2 ad 171:
1.34 ad 172: for (CPU_INFO_FOREACH(cii, ci)) {
173: __cpu_simple_lock(&ci->ci_data.cpu_ld_lock);
174: }
1.23 ad 175: }
176:
177: static void
1.34 ad 178: lockdebug_unlock_cpus(void)
1.23 ad 179: {
1.34 ad 180: CPU_INFO_ITERATOR cii;
181: struct cpu_info *ci;
1.23 ad 182:
1.34 ad 183: for (CPU_INFO_FOREACH(cii, ci)) {
184: __cpu_simple_unlock(&ci->ci_data.cpu_ld_lock);
185: }
1.2 ad 186: }
187:
188: /*
1.19 yamt 189: * lockdebug_lookup:
190: *
191: * Find a lockdebug structure by a pointer to a lock and return it locked.
192: */
193: static inline lockdebug_t *
1.58 christos 194: lockdebug_lookup(const char *func, size_t line, const volatile void *lock,
1.55 christos 195: uintptr_t where)
1.19 yamt 196: {
197: lockdebug_t *ld;
198:
1.34 ad 199: ld = lockdebug_lookup1(lock);
1.60 ozaki-r 200: if (__predict_false(ld == NULL)) {
1.55 christos 201: panic("%s,%zu: uninitialized lock (lock=%p, from=%08"
202: PRIxPTR ")", func, line, lock, where);
1.42 rmind 203: }
1.19 yamt 204: return ld;
205: }
206:
207: /*
1.2 ad 208: * lockdebug_init:
209: *
210: * Initialize the lockdebug system. Allocate an initial pool of
211: * lockdebug structures before the VM system is up and running.
212: */
1.5 ad 213: static void
1.2 ad 214: lockdebug_init(void)
215: {
216: lockdebug_t *ld;
217: int i;
218:
1.34 ad 219: TAILQ_INIT(&curcpu()->ci_data.cpu_ld_locks);
220: TAILQ_INIT(&curlwp->l_ld_locks);
221: __cpu_simple_lock_init(&curcpu()->ci_data.cpu_ld_lock);
222: __cpu_simple_lock_init(&ld_mod_lk);
1.15 matt 223:
1.16 yamt 224: rb_tree_init(&ld_rb_tree, &ld_rb_tree_ops);
225:
1.2 ad 226: ld = ld_prime;
227: for (i = 1, ld++; i < LD_BATCH; i++, ld++) {
1.34 ad 228: __cpu_simple_lock_init(&ld->ld_spinlock);
1.2 ad 229: TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain);
230: TAILQ_INSERT_TAIL(&ld_all, ld, ld_achain);
231: }
232: ld_freeptr = 1;
233: ld_nfree = LD_BATCH - 1;
234: }
235:
236: /*
237: * lockdebug_alloc:
238: *
239: * A lock is being initialized, so allocate an associated debug
240: * structure.
241: */
1.16 yamt 242: bool
1.55 christos 243: lockdebug_alloc(const char *func, size_t line, volatile void *lock,
244: lockops_t *lo, uintptr_t initaddr)
1.2 ad 245: {
246: struct cpu_info *ci;
247: lockdebug_t *ld;
1.34 ad 248: int s;
1.2 ad 249:
1.60 ozaki-r 250: if (__predict_false(lo == NULL || panicstr != NULL || ld_panic))
1.16 yamt 251: return false;
1.60 ozaki-r 252: if (__predict_false(ld_freeptr == 0))
1.5 ad 253: lockdebug_init();
1.2 ad 254:
1.34 ad 255: s = splhigh();
256: __cpu_simple_lock(&ld_mod_lk);
1.60 ozaki-r 257: if (__predict_false((ld = lockdebug_lookup1(lock)) != NULL)) {
1.34 ad 258: __cpu_simple_unlock(&ld_mod_lk);
1.55 christos 259: lockdebug_abort1(func, line, ld, s, "already initialized",
260: true);
1.27 ad 261: return false;
1.19 yamt 262: }
263:
1.2 ad 264: /*
265: * Pinch a new debug structure. We may recurse because we call
266: * kmem_alloc(), which may need to initialize new locks somewhere
1.7 skrll 267: * down the path. If not recursing, we try to maintain at least
1.2 ad 268: * LD_SLOP structures free, which should hopefully be enough to
269: * satisfy kmem_alloc(). If we can't provide a structure, not to
270: * worry: we'll just mark the lock as not having an ID.
271: */
1.23 ad 272: ci = curcpu();
1.2 ad 273: ci->ci_lkdebug_recurse++;
274: if (TAILQ_EMPTY(&ld_free)) {
1.5 ad 275: if (ci->ci_lkdebug_recurse > 1 || ld_nomore) {
1.2 ad 276: ci->ci_lkdebug_recurse--;
1.34 ad 277: __cpu_simple_unlock(&ld_mod_lk);
278: splx(s);
1.16 yamt 279: return false;
1.2 ad 280: }
1.34 ad 281: s = lockdebug_more(s);
282: } else if (ci->ci_lkdebug_recurse == 1 && ld_nfree < LD_SLOP) {
283: s = lockdebug_more(s);
284: }
1.60 ozaki-r 285: if (__predict_false((ld = TAILQ_FIRST(&ld_free)) == NULL)) {
1.34 ad 286: __cpu_simple_unlock(&ld_mod_lk);
287: splx(s);
1.16 yamt 288: return false;
1.2 ad 289: }
290: TAILQ_REMOVE(&ld_free, ld, ld_chain);
291: ld_nfree--;
292: ci->ci_lkdebug_recurse--;
293:
1.60 ozaki-r 294: if (__predict_false(ld->ld_lock != NULL)) {
1.55 christos 295: panic("%s,%zu: corrupt table ld %p", func, line, ld);
1.34 ad 296: }
1.2 ad 297:
298: /* Initialise the structure. */
299: ld->ld_lock = lock;
300: ld->ld_lockops = lo;
301: ld->ld_locked = 0;
302: ld->ld_unlocked = 0;
303: ld->ld_lwp = NULL;
1.10 ad 304: ld->ld_initaddr = initaddr;
1.35 ad 305: ld->ld_flags = (lo->lo_type == LOCKOPS_SLEEP ? LD_SLEEPER : 0);
1.34 ad 306: lockdebug_lock_cpus();
1.42 rmind 307: (void)rb_tree_insert_node(&ld_rb_tree, __UNVOLATILE(ld));
1.34 ad 308: lockdebug_unlock_cpus();
309: __cpu_simple_unlock(&ld_mod_lk);
1.2 ad 310:
1.34 ad 311: splx(s);
1.16 yamt 312: return true;
1.2 ad 313: }
314:
315: /*
316: * lockdebug_free:
317: *
318: * A lock is being destroyed, so release debugging resources.
319: */
320: void
1.55 christos 321: lockdebug_free(const char *func, size_t line, volatile void *lock)
1.2 ad 322: {
323: lockdebug_t *ld;
1.34 ad 324: int s;
1.2 ad 325:
1.60 ozaki-r 326: if (__predict_false(panicstr != NULL || ld_panic))
1.2 ad 327: return;
328:
1.34 ad 329: s = splhigh();
330: __cpu_simple_lock(&ld_mod_lk);
1.55 christos 331: ld = lockdebug_lookup(func, line, lock,
332: (uintptr_t) __builtin_return_address(0));
1.60 ozaki-r 333: if (__predict_false(ld == NULL)) {
1.34 ad 334: __cpu_simple_unlock(&ld_mod_lk);
1.55 christos 335: panic("%s,%zu: destroying uninitialized object %p"
336: "(ld_lock=%p)", func, line, lock, ld->ld_lock);
1.27 ad 337: return;
1.2 ad 338: }
1.60 ozaki-r 339: if (__predict_false((ld->ld_flags & LD_LOCKED) != 0 ||
340: ld->ld_shares != 0)) {
1.34 ad 341: __cpu_simple_unlock(&ld_mod_lk);
1.55 christos 342: lockdebug_abort1(func, line, ld, s, "is locked or in use",
343: true);
1.27 ad 344: return;
345: }
1.34 ad 346: lockdebug_lock_cpus();
1.42 rmind 347: rb_tree_remove_node(&ld_rb_tree, __UNVOLATILE(ld));
1.34 ad 348: lockdebug_unlock_cpus();
1.2 ad 349: ld->ld_lock = NULL;
350: TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain);
351: ld_nfree++;
1.34 ad 352: __cpu_simple_unlock(&ld->ld_spinlock);
353: __cpu_simple_unlock(&ld_mod_lk);
354: splx(s);
1.2 ad 355: }
356:
357: /*
358: * lockdebug_more:
359: *
360: * Allocate a batch of debug structures and add to the free list.
1.34 ad 361: * Must be called with ld_mod_lk held.
1.2 ad 362: */
1.34 ad 363: static int
364: lockdebug_more(int s)
1.2 ad 365: {
366: lockdebug_t *ld;
367: void *block;
1.5 ad 368: int i, base, m;
1.2 ad 369:
1.35 ad 370: /*
371: * Can't call kmem_alloc() if in interrupt context. XXX We could
372: * deadlock, because we don't know which locks the caller holds.
373: */
1.59 ozaki-r 374: if (cpu_intr_p() || cpu_softintr_p()) {
1.35 ad 375: return s;
376: }
377:
1.2 ad 378: while (ld_nfree < LD_SLOP) {
1.34 ad 379: __cpu_simple_unlock(&ld_mod_lk);
380: splx(s);
1.2 ad 381: block = kmem_zalloc(LD_BATCH * sizeof(lockdebug_t), KM_SLEEP);
1.34 ad 382: s = splhigh();
383: __cpu_simple_lock(&ld_mod_lk);
1.2 ad 384:
385: if (ld_nfree > LD_SLOP) {
386: /* Somebody beat us to it. */
1.34 ad 387: __cpu_simple_unlock(&ld_mod_lk);
388: splx(s);
1.2 ad 389: kmem_free(block, LD_BATCH * sizeof(lockdebug_t));
1.34 ad 390: s = splhigh();
391: __cpu_simple_lock(&ld_mod_lk);
1.2 ad 392: continue;
393: }
394:
395: base = ld_freeptr;
396: ld_nfree += LD_BATCH;
397: ld = block;
398: base <<= LD_BATCH_SHIFT;
1.66 ! riastrad 399: m = uimin(LD_MAX_LOCKS, base + LD_BATCH);
1.5 ad 400:
401: if (m == LD_MAX_LOCKS)
402: ld_nomore = true;
1.2 ad 403:
1.5 ad 404: for (i = base; i < m; i++, ld++) {
1.34 ad 405: __cpu_simple_lock_init(&ld->ld_spinlock);
1.2 ad 406: TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain);
407: TAILQ_INSERT_TAIL(&ld_all, ld, ld_achain);
408: }
409:
1.22 ad 410: membar_producer();
1.2 ad 411: }
1.34 ad 412:
413: return s;
1.2 ad 414: }
415:
416: /*
417: * lockdebug_wantlock:
418: *
1.56 pgoyette 419: * Process the preamble to a lock acquire. The "shared"
420: * parameter controls which ld_{ex,sh}want counter is
421: * updated; a negative value of shared updates neither.
1.2 ad 422: */
423: void
1.55 christos 424: lockdebug_wantlock(const char *func, size_t line,
1.58 christos 425: const volatile void *lock, uintptr_t where, int shared)
1.2 ad 426: {
427: struct lwp *l = curlwp;
428: lockdebug_t *ld;
1.3 thorpej 429: bool recurse;
1.34 ad 430: int s;
1.2 ad 431:
432: (void)shared;
1.4 thorpej 433: recurse = false;
1.2 ad 434:
1.60 ozaki-r 435: if (__predict_false(panicstr != NULL || ld_panic))
1.2 ad 436: return;
437:
1.34 ad 438: s = splhigh();
1.55 christos 439: if ((ld = lockdebug_lookup(func, line, lock, where)) == NULL) {
1.34 ad 440: splx(s);
1.2 ad 441: return;
1.34 ad 442: }
1.32 yamt 443: if ((ld->ld_flags & LD_LOCKED) != 0 || ld->ld_shares != 0) {
1.2 ad 444: if ((ld->ld_flags & LD_SLEEPER) != 0) {
1.49 mlelstv 445: if (ld->ld_lwp == l)
1.4 thorpej 446: recurse = true;
1.40 rmind 447: } else if (ld->ld_cpu == (uint16_t)cpu_index(curcpu()))
1.4 thorpej 448: recurse = true;
1.2 ad 449: }
1.10 ad 450: if (cpu_intr_p()) {
1.60 ozaki-r 451: if (__predict_false((ld->ld_flags & LD_SLEEPER) != 0)) {
1.55 christos 452: lockdebug_abort1(func, line, ld, s,
1.10 ad 453: "acquiring sleep lock from interrupt context",
454: true);
1.27 ad 455: return;
456: }
1.10 ad 457: }
1.56 pgoyette 458: if (shared > 0)
1.2 ad 459: ld->ld_shwant++;
1.56 pgoyette 460: else if (shared == 0)
1.2 ad 461: ld->ld_exwant++;
1.60 ozaki-r 462: if (__predict_false(recurse)) {
1.55 christos 463: lockdebug_abort1(func, line, ld, s, "locking against myself",
1.10 ad 464: true);
1.27 ad 465: return;
466: }
1.34 ad 467: __cpu_simple_unlock(&ld->ld_spinlock);
468: splx(s);
1.2 ad 469: }
470:
471: /*
472: * lockdebug_locked:
473: *
474: * Process a lock acquire operation.
475: */
476: void
1.55 christos 477: lockdebug_locked(const char *func, size_t line,
478: volatile void *lock, void *cvlock, uintptr_t where, int shared)
1.2 ad 479: {
480: struct lwp *l = curlwp;
481: lockdebug_t *ld;
1.34 ad 482: int s;
1.2 ad 483:
1.60 ozaki-r 484: if (__predict_false(panicstr != NULL || ld_panic))
1.2 ad 485: return;
486:
1.34 ad 487: s = splhigh();
1.55 christos 488: if ((ld = lockdebug_lookup(func, line, lock, where)) == NULL) {
1.34 ad 489: splx(s);
1.2 ad 490: return;
1.34 ad 491: }
1.35 ad 492: if (cvlock) {
493: KASSERT(ld->ld_lockops->lo_type == LOCKOPS_CV);
494: if (lock == (void *)&lbolt) {
495: /* nothing */
496: } else if (ld->ld_shares++ == 0) {
497: ld->ld_locked = (uintptr_t)cvlock;
1.60 ozaki-r 498: } else if (__predict_false(cvlock != (void *)ld->ld_locked)) {
1.55 christos 499: lockdebug_abort1(func, line, ld, s,
500: "multiple locks used with condition variable",
501: true);
1.35 ad 502: return;
503: }
504: } else if (shared) {
1.2 ad 505: l->l_shlocks++;
1.45 yamt 506: ld->ld_locked = where;
1.2 ad 507: ld->ld_shares++;
508: ld->ld_shwant--;
509: } else {
1.60 ozaki-r 510: if (__predict_false((ld->ld_flags & LD_LOCKED) != 0)) {
1.55 christos 511: lockdebug_abort1(func, line, ld, s, "already locked",
1.34 ad 512: true);
1.27 ad 513: return;
514: }
1.2 ad 515: ld->ld_flags |= LD_LOCKED;
516: ld->ld_locked = where;
517: ld->ld_exwant--;
518: if ((ld->ld_flags & LD_SLEEPER) != 0) {
1.34 ad 519: TAILQ_INSERT_TAIL(&l->l_ld_locks, ld, ld_chain);
1.2 ad 520: } else {
1.34 ad 521: TAILQ_INSERT_TAIL(&curcpu()->ci_data.cpu_ld_locks,
522: ld, ld_chain);
1.2 ad 523: }
524: }
1.40 rmind 525: ld->ld_cpu = (uint16_t)cpu_index(curcpu());
1.32 yamt 526: ld->ld_lwp = l;
1.34 ad 527: __cpu_simple_unlock(&ld->ld_spinlock);
528: splx(s);
1.2 ad 529: }
530:
531: /*
532: * lockdebug_unlocked:
533: *
534: * Process a lock release operation.
535: */
536: void
1.55 christos 537: lockdebug_unlocked(const char *func, size_t line,
538: volatile void *lock, uintptr_t where, int shared)
1.2 ad 539: {
540: struct lwp *l = curlwp;
541: lockdebug_t *ld;
1.34 ad 542: int s;
1.2 ad 543:
1.60 ozaki-r 544: if (__predict_false(panicstr != NULL || ld_panic))
1.2 ad 545: return;
546:
1.34 ad 547: s = splhigh();
1.55 christos 548: if ((ld = lockdebug_lookup(func, line, lock, where)) == NULL) {
1.34 ad 549: splx(s);
1.2 ad 550: return;
1.34 ad 551: }
1.35 ad 552: if (ld->ld_lockops->lo_type == LOCKOPS_CV) {
553: if (lock == (void *)&lbolt) {
554: /* nothing */
555: } else {
556: ld->ld_shares--;
557: }
558: } else if (shared) {
1.60 ozaki-r 559: if (__predict_false(l->l_shlocks == 0)) {
1.55 christos 560: lockdebug_abort1(func, line, ld, s,
1.10 ad 561: "no shared locks held by LWP", true);
1.27 ad 562: return;
563: }
1.60 ozaki-r 564: if (__predict_false(ld->ld_shares == 0)) {
1.55 christos 565: lockdebug_abort1(func, line, ld, s,
1.10 ad 566: "no shared holds on this lock", true);
1.27 ad 567: return;
568: }
1.2 ad 569: l->l_shlocks--;
570: ld->ld_shares--;
1.45 yamt 571: if (ld->ld_lwp == l) {
572: ld->ld_unlocked = where;
1.32 yamt 573: ld->ld_lwp = NULL;
1.45 yamt 574: }
1.40 rmind 575: if (ld->ld_cpu == (uint16_t)cpu_index(curcpu()))
1.32 yamt 576: ld->ld_cpu = (uint16_t)-1;
1.2 ad 577: } else {
1.60 ozaki-r 578: if (__predict_false((ld->ld_flags & LD_LOCKED) == 0)) {
1.55 christos 579: lockdebug_abort1(func, line, ld, s, "not locked", true);
1.27 ad 580: return;
581: }
1.2 ad 582:
583: if ((ld->ld_flags & LD_SLEEPER) != 0) {
1.60 ozaki-r 584: if (__predict_false(ld->ld_lwp != curlwp)) {
1.55 christos 585: lockdebug_abort1(func, line, ld, s,
1.10 ad 586: "not held by current LWP", true);
1.27 ad 587: return;
588: }
1.34 ad 589: TAILQ_REMOVE(&l->l_ld_locks, ld, ld_chain);
1.2 ad 590: } else {
1.60 ozaki-r 591: uint16_t idx = (uint16_t)cpu_index(curcpu());
592: if (__predict_false(ld->ld_cpu != idx)) {
1.55 christos 593: lockdebug_abort1(func, line, ld, s,
1.10 ad 594: "not held by current CPU", true);
1.27 ad 595: return;
596: }
1.34 ad 597: TAILQ_REMOVE(&curcpu()->ci_data.cpu_ld_locks, ld,
598: ld_chain);
1.2 ad 599: }
1.44 matt 600: ld->ld_flags &= ~LD_LOCKED;
601: ld->ld_unlocked = where;
602: ld->ld_lwp = NULL;
1.2 ad 603: }
1.34 ad 604: __cpu_simple_unlock(&ld->ld_spinlock);
605: splx(s);
1.2 ad 606: }
607:
608: /*
1.35 ad 609: * lockdebug_wakeup:
610: *
611: * Process a wakeup on a condition variable.
612: */
613: void
1.55 christos 614: lockdebug_wakeup(const char *func, size_t line, volatile void *lock,
615: uintptr_t where)
1.35 ad 616: {
617: lockdebug_t *ld;
618: int s;
619:
1.60 ozaki-r 620: if (__predict_false(panicstr != NULL || ld_panic || lock == (void *)&lbolt))
1.35 ad 621: return;
622:
623: s = splhigh();
624: /* Find the CV... */
1.55 christos 625: if ((ld = lockdebug_lookup(func, line, lock, where)) == NULL) {
1.35 ad 626: splx(s);
627: return;
628: }
629: /*
630: * If it has any waiters, ensure that they are using the
631: * same interlock.
632: */
1.60 ozaki-r 633: if (__predict_false(ld->ld_shares != 0 &&
634: !mutex_owned((kmutex_t *)ld->ld_locked))) {
1.55 christos 635: lockdebug_abort1(func, line, ld, s, "interlocking mutex not "
1.35 ad 636: "held during wakeup", true);
637: return;
638: }
639: __cpu_simple_unlock(&ld->ld_spinlock);
640: splx(s);
641: }
642:
643: /*
1.2 ad 644: * lockdebug_barrier:
645: *
646: * Panic if we hold more than one specified spin lock, and optionally,
647: * if we hold sleep locks.
648: */
649: void
1.55 christos 650: lockdebug_barrier(const char *func, size_t line, volatile void *spinlock,
651: int slplocks)
1.2 ad 652: {
653: struct lwp *l = curlwp;
654: lockdebug_t *ld;
1.34 ad 655: int s;
1.2 ad 656:
1.60 ozaki-r 657: if (__predict_false(panicstr != NULL || ld_panic))
1.2 ad 658: return;
659:
1.34 ad 660: s = splhigh();
661: if ((l->l_pflag & LP_INTR) == 0) {
662: TAILQ_FOREACH(ld, &curcpu()->ci_data.cpu_ld_locks, ld_chain) {
1.2 ad 663: if (ld->ld_lock == spinlock) {
664: continue;
665: }
1.34 ad 666: __cpu_simple_lock(&ld->ld_spinlock);
1.55 christos 667: lockdebug_abort1(func, line, ld, s,
1.34 ad 668: "spin lock held", true);
669: return;
1.2 ad 670: }
671: }
1.34 ad 672: if (slplocks) {
673: splx(s);
674: return;
675: }
1.60 ozaki-r 676: ld = TAILQ_FIRST(&l->l_ld_locks);
677: if (__predict_false(ld != NULL)) {
1.34 ad 678: __cpu_simple_lock(&ld->ld_spinlock);
1.55 christos 679: lockdebug_abort1(func, line, ld, s, "sleep lock held", true);
1.34 ad 680: return;
681: }
682: splx(s);
683: if (l->l_shlocks != 0) {
1.52 christos 684: TAILQ_FOREACH(ld, &ld_all, ld_achain) {
685: if (ld->ld_lockops->lo_type == LOCKOPS_CV)
686: continue;
687: if (ld->ld_lwp == l)
688: lockdebug_dump(ld, printf);
689: }
1.55 christos 690: panic("%s,%zu: holding %d shared locks", func, line,
691: l->l_shlocks);
1.2 ad 692: }
693: }
694:
695: /*
1.10 ad 696: * lockdebug_mem_check:
697: *
698: * Check for in-use locks within a memory region that is
1.16 yamt 699: * being freed.
1.10 ad 700: */
701: void
1.55 christos 702: lockdebug_mem_check(const char *func, size_t line, void *base, size_t sz)
1.10 ad 703: {
1.16 yamt 704: lockdebug_t *ld;
1.34 ad 705: struct cpu_info *ci;
1.23 ad 706: int s;
1.10 ad 707:
1.60 ozaki-r 708: if (__predict_false(panicstr != NULL || ld_panic))
1.24 ad 709: return;
710:
1.34 ad 711: s = splhigh();
712: ci = curcpu();
713: __cpu_simple_lock(&ci->ci_data.cpu_ld_lock);
1.16 yamt 714: ld = (lockdebug_t *)rb_tree_find_node_geq(&ld_rb_tree, base);
1.23 ad 715: if (ld != NULL) {
716: const uintptr_t lock = (uintptr_t)ld->ld_lock;
717:
1.60 ozaki-r 718: if (__predict_false((uintptr_t)base > lock))
1.55 christos 719: panic("%s,%zu: corrupt tree ld=%p, base=%p, sz=%zu",
720: func, line, ld, base, sz);
1.23 ad 721: if (lock >= (uintptr_t)base + sz)
722: ld = NULL;
723: }
1.34 ad 724: __cpu_simple_unlock(&ci->ci_data.cpu_ld_lock);
1.60 ozaki-r 725: if (__predict_false(ld != NULL)) {
1.34 ad 726: __cpu_simple_lock(&ld->ld_spinlock);
1.55 christos 727: lockdebug_abort1(func, line, ld, s,
1.34 ad 728: "allocation contains active lock", !cold);
1.16 yamt 729: return;
1.34 ad 730: }
731: splx(s);
1.10 ad 732: }
733:
734: /*
1.2 ad 735: * lockdebug_dump:
736: *
737: * Dump information about a lock on panic, or for DDB.
738: */
739: static void
1.47 christos 740: lockdebug_dump(lockdebug_t *ld, void (*pr)(const char *, ...)
741: __printflike(1, 2))
1.2 ad 742: {
743: int sleeper = (ld->ld_flags & LD_SLEEPER);
744:
745: (*pr)(
746: "lock address : %#018lx type : %18s\n"
1.35 ad 747: "initialized : %#018lx",
1.2 ad 748: (long)ld->ld_lock, (sleeper ? "sleep/adaptive" : "spin"),
1.10 ad 749: (long)ld->ld_initaddr);
1.2 ad 750:
1.35 ad 751: if (ld->ld_lockops->lo_type == LOCKOPS_CV) {
1.48 njoly 752: (*pr)(" interlock: %#018lx\n", (long)ld->ld_locked);
1.35 ad 753: } else {
754: (*pr)("\n"
755: "shared holds : %18u exclusive: %18u\n"
756: "shares wanted: %18u exclusive: %18u\n"
757: "current cpu : %18u last held: %18u\n"
758: "current lwp : %#018lx last held: %#018lx\n"
1.44 matt 759: "last locked%c : %#018lx unlocked%c: %#018lx\n",
1.35 ad 760: (unsigned)ld->ld_shares, ((ld->ld_flags & LD_LOCKED) != 0),
761: (unsigned)ld->ld_shwant, (unsigned)ld->ld_exwant,
1.40 rmind 762: (unsigned)cpu_index(curcpu()), (unsigned)ld->ld_cpu,
1.35 ad 763: (long)curlwp, (long)ld->ld_lwp,
1.44 matt 764: ((ld->ld_flags & LD_LOCKED) ? '*' : ' '),
765: (long)ld->ld_locked,
766: ((ld->ld_flags & LD_LOCKED) ? ' ' : '*'),
767: (long)ld->ld_unlocked);
1.35 ad 768: }
769:
1.2 ad 770: if (ld->ld_lockops->lo_dump != NULL)
771: (*ld->ld_lockops->lo_dump)(ld->ld_lock);
772:
773: if (sleeper) {
774: (*pr)("\n");
775: turnstile_print(ld->ld_lock, pr);
776: }
777: }
778:
779: /*
1.27 ad 780: * lockdebug_abort1:
1.2 ad 781: *
1.27 ad 782: * An error has been trapped - dump lock info and panic.
1.2 ad 783: */
1.5 ad 784: static void
1.55 christos 785: lockdebug_abort1(const char *func, size_t line, lockdebug_t *ld, int s,
1.10 ad 786: const char *msg, bool dopanic)
1.2 ad 787: {
788:
1.27 ad 789: /*
1.46 christos 790: * Don't make the situation worse if the system is already going
1.27 ad 791: * down in flames. Once a panic is triggered, lockdebug state
792: * becomes stale and cannot be trusted.
793: */
794: if (atomic_inc_uint_nv(&ld_panic) != 1) {
1.34 ad 795: __cpu_simple_unlock(&ld->ld_spinlock);
796: splx(s);
1.27 ad 797: return;
798: }
799:
1.55 christos 800: printf_nolog("%s error: %s,%zu: %s\n\n", ld->ld_lockops->lo_name,
801: func, line, msg);
1.2 ad 802: lockdebug_dump(ld, printf_nolog);
1.34 ad 803: __cpu_simple_unlock(&ld->ld_spinlock);
804: splx(s);
1.2 ad 805: printf_nolog("\n");
1.10 ad 806: if (dopanic)
1.55 christos 807: panic("LOCKDEBUG: %s error: %s,%zu: %s",
808: ld->ld_lockops->lo_name, func, line, msg);
1.2 ad 809: }
810:
811: #endif /* LOCKDEBUG */
812:
813: /*
814: * lockdebug_lock_print:
815: *
816: * Handle the DDB 'show lock' command.
817: */
818: #ifdef DDB
1.62 ozaki-r 819: #include <machine/db_machdep.h>
820: #include <ddb/db_interface.h>
821:
1.2 ad 822: void
823: lockdebug_lock_print(void *addr, void (*pr)(const char *, ...))
824: {
825: #ifdef LOCKDEBUG
826: lockdebug_t *ld;
827:
828: TAILQ_FOREACH(ld, &ld_all, ld_achain) {
1.41 dyoung 829: if (ld->ld_lock == NULL)
830: continue;
831: if (addr == NULL || ld->ld_lock == addr) {
1.2 ad 832: lockdebug_dump(ld, pr);
1.41 dyoung 833: if (addr != NULL)
834: return;
1.2 ad 835: }
836: }
1.41 dyoung 837: if (addr != NULL) {
838: (*pr)("Sorry, no record of a lock with address %p found.\n",
839: addr);
840: }
1.2 ad 841: #else
842: (*pr)("Sorry, kernel not built with the LOCKDEBUG option.\n");
843: #endif /* LOCKDEBUG */
844: }
1.61 ozaki-r 845:
1.62 ozaki-r 846: #ifdef LOCKDEBUG
847: static void
848: lockdebug_show_all_locks_lwp(void (*pr)(const char *, ...), bool show_trace)
849: {
850: struct proc *p;
851:
852: LIST_FOREACH(p, &allproc, p_list) {
853: struct lwp *l;
854: LIST_FOREACH(l, &p->p_lwps, l_sibling) {
855: lockdebug_t *ld;
856: const char *sym;
857: int i = 0;
858: if (TAILQ_EMPTY(&l->l_ld_locks))
859: continue;
860: (*pr)("Locks held by an LWP (%s):\n",
861: l->l_name ? l->l_name : p->p_comm);
862: TAILQ_FOREACH(ld, &l->l_ld_locks, ld_chain) {
863: ksyms_getname(NULL, &sym,
864: (vaddr_t)ld->ld_initaddr,
865: KSYMS_CLOSEST|KSYMS_PROC|KSYMS_ANY);
866: (*pr)("Lock %d (initialized at %s)\n", i++, sym);
867: lockdebug_dump(ld, pr);
868: }
869: if (show_trace) {
870: db_stack_trace_print((db_expr_t)(intptr_t)l,
871: true,
872: 32 /* Limit just in case */,
873: "a", pr);
874: }
875: (*pr)("\n");
876: }
877: }
878: }
879:
880: static void
881: lockdebug_show_all_locks_cpu(void (*pr)(const char *, ...), bool show_trace)
882: {
883: lockdebug_t *ld;
884: CPU_INFO_ITERATOR cii;
885: struct cpu_info *ci;
886: const char *sym;
887:
888: for (CPU_INFO_FOREACH(cii, ci)) {
889: int i = 0;
890: if (TAILQ_EMPTY(&ci->ci_data.cpu_ld_locks))
891: continue;
892: (*pr)("Locks held on CPU %u:\n", ci->ci_index);
893: TAILQ_FOREACH(ld, &ci->ci_data.cpu_ld_locks, ld_chain) {
894: ksyms_getname(NULL, &sym,
895: (vaddr_t)ld->ld_initaddr,
896: KSYMS_CLOSEST|KSYMS_PROC|KSYMS_ANY);
897: (*pr)("Lock %d (initialized at %s)\n", i++, sym);
898: lockdebug_dump(ld, pr);
899: if (show_trace) {
900: db_stack_trace_print(
901: (db_expr_t)(intptr_t)ci->ci_curlwp,
902: true,
903: 32 /* Limit just in case */,
904: "a", pr);
905: }
906: (*pr)("\n");
907: }
908: }
909: }
910: #endif /* LOCKDEBUG */
911:
912: void
913: lockdebug_show_all_locks(void (*pr)(const char *, ...), const char *modif)
914: {
915: #ifdef LOCKDEBUG
916: bool show_trace = false;
917: if (modif[0] == 't')
918: show_trace = true;
919:
920: (*pr)("[Locks tracked through LWPs]\n");
921: lockdebug_show_all_locks_lwp(pr, show_trace);
922: (*pr)("\n");
923:
924: (*pr)("[Locks tracked through CPUs]\n");
925: lockdebug_show_all_locks_cpu(pr, show_trace);
926: (*pr)("\n");
927: #else
928: (*pr)("Sorry, kernel not built with the LOCKDEBUG option.\n");
929: #endif /* LOCKDEBUG */
930: }
931:
1.61 ozaki-r 932: void
1.64 ozaki-r 933: lockdebug_show_lockstats(void (*pr)(const char *, ...))
1.61 ozaki-r 934: {
935: #ifdef LOCKDEBUG
936: lockdebug_t *ld;
937: void *_ld;
938: uint32_t n_null = 0;
939: uint32_t n_spin_mutex = 0;
940: uint32_t n_adaptive_mutex = 0;
941: uint32_t n_rwlock = 0;
942: uint32_t n_cv = 0;
943: uint32_t n_others = 0;
944:
945: RB_TREE_FOREACH(_ld, &ld_rb_tree) {
946: ld = _ld;
947: if (ld->ld_lock == NULL) {
948: n_null++;
949: continue;
950: }
951: if (ld->ld_lockops->lo_type == LOCKOPS_CV) {
952: n_cv++;
953: continue;
954: }
955: if (ld->ld_lockops->lo_name[0] == 'M') {
956: if (ld->ld_lockops->lo_type == LOCKOPS_SLEEP)
957: n_adaptive_mutex++;
958: else
959: n_spin_mutex++;
960: continue;
961: }
962: if (ld->ld_lockops->lo_name[0] == 'R') {
963: n_rwlock++;
964: continue;
965: }
966: n_others++;
967: }
968: (*pr)(
969: "condvar: %u\n"
970: "spin mutex: %u\n"
971: "adaptive mutex: %u\n"
972: "rwlock: %u\n"
973: "null locks: %u\n"
974: "others: %u\n",
975: n_cv, n_spin_mutex, n_adaptive_mutex, n_rwlock,
976: n_null, n_others);
977: #else
978: (*pr)("Sorry, kernel not built with the LOCKDEBUG option.\n");
979: #endif /* LOCKDEBUG */
980: }
1.2 ad 981: #endif /* DDB */
982:
983: /*
1.65 mrg 984: * lockdebug_dismiss:
985: *
986: * The system is rebooting, and potentially from an unsafe
987: * place so avoid any future aborts.
988: */
989: void
990: lockdebug_dismiss(void)
991: {
992:
993: atomic_inc_uint_nv(&ld_panic);
994: }
995:
996: /*
1.2 ad 997: * lockdebug_abort:
998: *
999: * An error has been trapped - dump lock info and call panic().
1000: */
1001: void
1.58 christos 1002: lockdebug_abort(const char *func, size_t line, const volatile void *lock,
1.55 christos 1003: lockops_t *ops, const char *msg)
1.2 ad 1004: {
1005: #ifdef LOCKDEBUG
1006: lockdebug_t *ld;
1.34 ad 1007: int s;
1.2 ad 1008:
1.34 ad 1009: s = splhigh();
1.55 christos 1010: if ((ld = lockdebug_lookup(func, line, lock,
1.38 rafal 1011: (uintptr_t) __builtin_return_address(0))) != NULL) {
1.55 christos 1012: lockdebug_abort1(func, line, ld, s, msg, true);
1.34 ad 1013: return;
1.2 ad 1014: }
1.34 ad 1015: splx(s);
1.2 ad 1016: #endif /* LOCKDEBUG */
1017:
1.27 ad 1018: /*
1019: * Complain first on the occurrance only. Otherwise proceeed to
1020: * panic where we will `rendezvous' with other CPUs if the machine
1021: * is going down in flames.
1022: */
1023: if (atomic_inc_uint_nv(&ld_panic) == 1) {
1.55 christos 1024: printf_nolog("%s error: %s,%zu: %s\n\n"
1.27 ad 1025: "lock address : %#018lx\n"
1026: "current cpu : %18d\n"
1027: "current lwp : %#018lx\n",
1.55 christos 1028: ops->lo_name, func, line, msg, (long)lock,
1.40 rmind 1029: (int)cpu_index(curcpu()), (long)curlwp);
1.27 ad 1030: (*ops->lo_dump)(lock);
1031: printf_nolog("\n");
1032: }
1.2 ad 1033:
1.55 christos 1034: panic("lock error: %s: %s,%zu: %s: lock %p cpu %d lwp %p",
1035: ops->lo_name, func, line, msg, lock, cpu_index(curcpu()), curlwp);
1.2 ad 1036: }
CVSweb <webmaster@jp.NetBSD.org>