Annotation of src/sys/kern/subr_lockdebug.c, Revision 1.76
1.76 ! ad 1: /* $NetBSD: subr_lockdebug.c,v 1.75 2020/03/09 01:47:50 christos Exp $ */
1.2 ad 2:
3: /*-
1.73 ad 4: * Copyright (c) 2006, 2007, 2008, 2020 The NetBSD Foundation, Inc.
1.2 ad 5: * All rights reserved.
6: *
7: * This code is derived from software contributed to The NetBSD Foundation
8: * by Andrew Doran.
9: *
10: * Redistribution and use in source and binary forms, with or without
11: * modification, are permitted provided that the following conditions
12: * are met:
13: * 1. Redistributions of source code must retain the above copyright
14: * notice, this list of conditions and the following disclaimer.
15: * 2. Redistributions in binary form must reproduce the above copyright
16: * notice, this list of conditions and the following disclaimer in the
17: * documentation and/or other materials provided with the distribution.
18: *
19: * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20: * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21: * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22: * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23: * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24: * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25: * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26: * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27: * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28: * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29: * POSSIBILITY OF SUCH DAMAGE.
30: */
31:
32: /*
1.11 ad 33: * Basic lock debugging code shared among lock primitives.
1.2 ad 34: */
35:
1.9 dsl 36: #include <sys/cdefs.h>
1.76 ! ad 37: __KERNEL_RCSID(0, "$NetBSD: subr_lockdebug.c,v 1.75 2020/03/09 01:47:50 christos Exp $");
1.9 dsl 38:
1.54 ozaki-r 39: #ifdef _KERNEL_OPT
1.2 ad 40: #include "opt_ddb.h"
1.54 ozaki-r 41: #endif
1.2 ad 42:
43: #include <sys/param.h>
44: #include <sys/proc.h>
45: #include <sys/systm.h>
1.10 ad 46: #include <sys/kernel.h>
1.2 ad 47: #include <sys/kmem.h>
48: #include <sys/lockdebug.h>
49: #include <sys/sleepq.h>
1.10 ad 50: #include <sys/cpu.h>
1.22 ad 51: #include <sys/atomic.h>
1.26 ad 52: #include <sys/lock.h>
1.43 matt 53: #include <sys/rbtree.h>
1.62 ozaki-r 54: #include <sys/ksyms.h>
1.16 yamt 55:
1.25 ad 56: #include <machine/lock.h>
57:
1.28 ad 58: unsigned int ld_panic;
59:
1.2 ad 60: #ifdef LOCKDEBUG
61:
1.71 scole 62: #ifdef __ia64__
63: #define LD_BATCH_SHIFT 16
64: #else
1.2 ad 65: #define LD_BATCH_SHIFT 9
1.71 scole 66: #endif
1.2 ad 67: #define LD_BATCH (1 << LD_BATCH_SHIFT)
68: #define LD_BATCH_MASK (LD_BATCH - 1)
69: #define LD_MAX_LOCKS 1048576
70: #define LD_SLOP 16
71:
72: #define LD_LOCKED 0x01
73: #define LD_SLEEPER 0x02
74:
1.23 ad 75: #define LD_WRITE_LOCK 0x80000000
76:
1.2 ad 77: typedef struct lockdebug {
1.42 rmind 78: struct rb_node ld_rb_node;
1.34 ad 79: __cpu_simple_lock_t ld_spinlock;
1.2 ad 80: _TAILQ_ENTRY(struct lockdebug, volatile) ld_chain;
81: _TAILQ_ENTRY(struct lockdebug, volatile) ld_achain;
82: volatile void *ld_lock;
83: lockops_t *ld_lockops;
84: struct lwp *ld_lwp;
85: uintptr_t ld_locked;
86: uintptr_t ld_unlocked;
1.10 ad 87: uintptr_t ld_initaddr;
1.2 ad 88: uint16_t ld_shares;
89: uint16_t ld_cpu;
90: uint8_t ld_flags;
91: uint8_t ld_shwant; /* advisory */
92: uint8_t ld_exwant; /* advisory */
93: uint8_t ld_unused;
94: } volatile lockdebug_t;
95:
96: typedef _TAILQ_HEAD(lockdebuglist, struct lockdebug, volatile) lockdebuglist_t;
97:
1.34 ad 98: __cpu_simple_lock_t ld_mod_lk;
1.13 matt 99: lockdebuglist_t ld_free = TAILQ_HEAD_INITIALIZER(ld_free);
1.75 christos 100: #ifdef _KERNEL
1.13 matt 101: lockdebuglist_t ld_all = TAILQ_HEAD_INITIALIZER(ld_all);
1.75 christos 102: #else
103: extern lockdebuglist_t ld_all;
104: #define cpu_name(a) "?"
105: #define cpu_index(a) -1
106: #define curlwp NULL
107: #endif /* _KERNEL */
1.2 ad 108: int ld_nfree;
109: int ld_freeptr;
110: int ld_recurse;
1.5 ad 111: bool ld_nomore;
1.2 ad 112: lockdebug_t ld_prime[LD_BATCH];
113:
1.75 christos 114: #ifdef _KERNEL
1.55 christos 115: static void lockdebug_abort1(const char *, size_t, lockdebug_t *, int,
116: const char *, bool);
1.34 ad 117: static int lockdebug_more(int);
1.5 ad 118: static void lockdebug_init(void);
1.74 ad 119: static void lockdebug_dump(lwp_t *, lockdebug_t *,
120: void (*)(const char *, ...)
1.52 christos 121: __printflike(1, 2));
1.2 ad 122:
1.16 yamt 123: static signed int
1.42 rmind 124: ld_rbto_compare_nodes(void *ctx, const void *n1, const void *n2)
1.16 yamt 125: {
1.42 rmind 126: const lockdebug_t *ld1 = n1;
127: const lockdebug_t *ld2 = n2;
1.20 yamt 128: const uintptr_t a = (uintptr_t)ld1->ld_lock;
129: const uintptr_t b = (uintptr_t)ld2->ld_lock;
130:
131: if (a < b)
1.42 rmind 132: return -1;
133: if (a > b)
1.20 yamt 134: return 1;
1.16 yamt 135: return 0;
136: }
137:
138: static signed int
1.42 rmind 139: ld_rbto_compare_key(void *ctx, const void *n, const void *key)
1.16 yamt 140: {
1.42 rmind 141: const lockdebug_t *ld = n;
1.20 yamt 142: const uintptr_t a = (uintptr_t)ld->ld_lock;
143: const uintptr_t b = (uintptr_t)key;
144:
145: if (a < b)
1.42 rmind 146: return -1;
147: if (a > b)
1.20 yamt 148: return 1;
1.16 yamt 149: return 0;
150: }
151:
1.42 rmind 152: static rb_tree_t ld_rb_tree;
1.16 yamt 153:
1.42 rmind 154: static const rb_tree_ops_t ld_rb_tree_ops = {
1.37 matt 155: .rbto_compare_nodes = ld_rbto_compare_nodes,
156: .rbto_compare_key = ld_rbto_compare_key,
1.42 rmind 157: .rbto_node_offset = offsetof(lockdebug_t, ld_rb_node),
158: .rbto_context = NULL
1.16 yamt 159: };
160:
1.34 ad 161: static inline lockdebug_t *
1.58 christos 162: lockdebug_lookup1(const volatile void *lock)
1.23 ad 163: {
1.34 ad 164: lockdebug_t *ld;
165: struct cpu_info *ci;
1.23 ad 166:
1.34 ad 167: ci = curcpu();
168: __cpu_simple_lock(&ci->ci_data.cpu_ld_lock);
1.58 christos 169: ld = rb_tree_find_node(&ld_rb_tree, (void *)(intptr_t)lock);
1.34 ad 170: __cpu_simple_unlock(&ci->ci_data.cpu_ld_lock);
171: if (ld == NULL) {
172: return NULL;
173: }
174: __cpu_simple_lock(&ld->ld_spinlock);
1.23 ad 175:
1.34 ad 176: return ld;
1.2 ad 177: }
178:
1.23 ad 179: static void
1.34 ad 180: lockdebug_lock_cpus(void)
1.2 ad 181: {
1.34 ad 182: CPU_INFO_ITERATOR cii;
183: struct cpu_info *ci;
1.2 ad 184:
1.34 ad 185: for (CPU_INFO_FOREACH(cii, ci)) {
186: __cpu_simple_lock(&ci->ci_data.cpu_ld_lock);
187: }
1.23 ad 188: }
189:
190: static void
1.34 ad 191: lockdebug_unlock_cpus(void)
1.23 ad 192: {
1.34 ad 193: CPU_INFO_ITERATOR cii;
194: struct cpu_info *ci;
1.23 ad 195:
1.34 ad 196: for (CPU_INFO_FOREACH(cii, ci)) {
197: __cpu_simple_unlock(&ci->ci_data.cpu_ld_lock);
198: }
1.2 ad 199: }
200:
201: /*
1.19 yamt 202: * lockdebug_lookup:
203: *
204: * Find a lockdebug structure by a pointer to a lock and return it locked.
205: */
206: static inline lockdebug_t *
1.58 christos 207: lockdebug_lookup(const char *func, size_t line, const volatile void *lock,
1.55 christos 208: uintptr_t where)
1.19 yamt 209: {
210: lockdebug_t *ld;
211:
1.34 ad 212: ld = lockdebug_lookup1(lock);
1.60 ozaki-r 213: if (__predict_false(ld == NULL)) {
1.55 christos 214: panic("%s,%zu: uninitialized lock (lock=%p, from=%08"
215: PRIxPTR ")", func, line, lock, where);
1.42 rmind 216: }
1.19 yamt 217: return ld;
218: }
219:
220: /*
1.2 ad 221: * lockdebug_init:
222: *
223: * Initialize the lockdebug system. Allocate an initial pool of
224: * lockdebug structures before the VM system is up and running.
225: */
1.5 ad 226: static void
1.2 ad 227: lockdebug_init(void)
228: {
229: lockdebug_t *ld;
230: int i;
231:
1.34 ad 232: TAILQ_INIT(&curcpu()->ci_data.cpu_ld_locks);
233: TAILQ_INIT(&curlwp->l_ld_locks);
234: __cpu_simple_lock_init(&curcpu()->ci_data.cpu_ld_lock);
235: __cpu_simple_lock_init(&ld_mod_lk);
1.15 matt 236:
1.16 yamt 237: rb_tree_init(&ld_rb_tree, &ld_rb_tree_ops);
238:
1.2 ad 239: ld = ld_prime;
240: for (i = 1, ld++; i < LD_BATCH; i++, ld++) {
1.34 ad 241: __cpu_simple_lock_init(&ld->ld_spinlock);
1.2 ad 242: TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain);
243: TAILQ_INSERT_TAIL(&ld_all, ld, ld_achain);
244: }
245: ld_freeptr = 1;
246: ld_nfree = LD_BATCH - 1;
247: }
248:
249: /*
250: * lockdebug_alloc:
251: *
252: * A lock is being initialized, so allocate an associated debug
253: * structure.
254: */
1.16 yamt 255: bool
1.55 christos 256: lockdebug_alloc(const char *func, size_t line, volatile void *lock,
257: lockops_t *lo, uintptr_t initaddr)
1.2 ad 258: {
259: struct cpu_info *ci;
260: lockdebug_t *ld;
1.34 ad 261: int s;
1.2 ad 262:
1.60 ozaki-r 263: if (__predict_false(lo == NULL || panicstr != NULL || ld_panic))
1.16 yamt 264: return false;
1.60 ozaki-r 265: if (__predict_false(ld_freeptr == 0))
1.5 ad 266: lockdebug_init();
1.2 ad 267:
1.34 ad 268: s = splhigh();
269: __cpu_simple_lock(&ld_mod_lk);
1.60 ozaki-r 270: if (__predict_false((ld = lockdebug_lookup1(lock)) != NULL)) {
1.34 ad 271: __cpu_simple_unlock(&ld_mod_lk);
1.55 christos 272: lockdebug_abort1(func, line, ld, s, "already initialized",
273: true);
1.27 ad 274: return false;
1.19 yamt 275: }
276:
1.2 ad 277: /*
278: * Pinch a new debug structure. We may recurse because we call
279: * kmem_alloc(), which may need to initialize new locks somewhere
1.7 skrll 280: * down the path. If not recursing, we try to maintain at least
1.2 ad 281: * LD_SLOP structures free, which should hopefully be enough to
282: * satisfy kmem_alloc(). If we can't provide a structure, not to
283: * worry: we'll just mark the lock as not having an ID.
284: */
1.23 ad 285: ci = curcpu();
1.2 ad 286: ci->ci_lkdebug_recurse++;
287: if (TAILQ_EMPTY(&ld_free)) {
1.5 ad 288: if (ci->ci_lkdebug_recurse > 1 || ld_nomore) {
1.2 ad 289: ci->ci_lkdebug_recurse--;
1.34 ad 290: __cpu_simple_unlock(&ld_mod_lk);
291: splx(s);
1.16 yamt 292: return false;
1.2 ad 293: }
1.34 ad 294: s = lockdebug_more(s);
295: } else if (ci->ci_lkdebug_recurse == 1 && ld_nfree < LD_SLOP) {
296: s = lockdebug_more(s);
297: }
1.60 ozaki-r 298: if (__predict_false((ld = TAILQ_FIRST(&ld_free)) == NULL)) {
1.34 ad 299: __cpu_simple_unlock(&ld_mod_lk);
300: splx(s);
1.16 yamt 301: return false;
1.2 ad 302: }
303: TAILQ_REMOVE(&ld_free, ld, ld_chain);
304: ld_nfree--;
305: ci->ci_lkdebug_recurse--;
306:
1.60 ozaki-r 307: if (__predict_false(ld->ld_lock != NULL)) {
1.55 christos 308: panic("%s,%zu: corrupt table ld %p", func, line, ld);
1.34 ad 309: }
1.2 ad 310:
311: /* Initialise the structure. */
312: ld->ld_lock = lock;
313: ld->ld_lockops = lo;
314: ld->ld_locked = 0;
315: ld->ld_unlocked = 0;
316: ld->ld_lwp = NULL;
1.10 ad 317: ld->ld_initaddr = initaddr;
1.35 ad 318: ld->ld_flags = (lo->lo_type == LOCKOPS_SLEEP ? LD_SLEEPER : 0);
1.34 ad 319: lockdebug_lock_cpus();
1.42 rmind 320: (void)rb_tree_insert_node(&ld_rb_tree, __UNVOLATILE(ld));
1.34 ad 321: lockdebug_unlock_cpus();
322: __cpu_simple_unlock(&ld_mod_lk);
1.2 ad 323:
1.34 ad 324: splx(s);
1.16 yamt 325: return true;
1.2 ad 326: }
327:
328: /*
329: * lockdebug_free:
330: *
331: * A lock is being destroyed, so release debugging resources.
332: */
333: void
1.55 christos 334: lockdebug_free(const char *func, size_t line, volatile void *lock)
1.2 ad 335: {
336: lockdebug_t *ld;
1.34 ad 337: int s;
1.2 ad 338:
1.60 ozaki-r 339: if (__predict_false(panicstr != NULL || ld_panic))
1.2 ad 340: return;
341:
1.34 ad 342: s = splhigh();
343: __cpu_simple_lock(&ld_mod_lk);
1.55 christos 344: ld = lockdebug_lookup(func, line, lock,
345: (uintptr_t) __builtin_return_address(0));
1.60 ozaki-r 346: if (__predict_false(ld == NULL)) {
1.34 ad 347: __cpu_simple_unlock(&ld_mod_lk);
1.55 christos 348: panic("%s,%zu: destroying uninitialized object %p"
349: "(ld_lock=%p)", func, line, lock, ld->ld_lock);
1.27 ad 350: return;
1.2 ad 351: }
1.60 ozaki-r 352: if (__predict_false((ld->ld_flags & LD_LOCKED) != 0 ||
353: ld->ld_shares != 0)) {
1.34 ad 354: __cpu_simple_unlock(&ld_mod_lk);
1.55 christos 355: lockdebug_abort1(func, line, ld, s, "is locked or in use",
356: true);
1.27 ad 357: return;
358: }
1.34 ad 359: lockdebug_lock_cpus();
1.42 rmind 360: rb_tree_remove_node(&ld_rb_tree, __UNVOLATILE(ld));
1.34 ad 361: lockdebug_unlock_cpus();
1.2 ad 362: ld->ld_lock = NULL;
363: TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain);
364: ld_nfree++;
1.34 ad 365: __cpu_simple_unlock(&ld->ld_spinlock);
366: __cpu_simple_unlock(&ld_mod_lk);
367: splx(s);
1.2 ad 368: }
369:
370: /*
371: * lockdebug_more:
372: *
373: * Allocate a batch of debug structures and add to the free list.
1.34 ad 374: * Must be called with ld_mod_lk held.
1.2 ad 375: */
1.34 ad 376: static int
377: lockdebug_more(int s)
1.2 ad 378: {
379: lockdebug_t *ld;
380: void *block;
1.5 ad 381: int i, base, m;
1.2 ad 382:
1.35 ad 383: /*
384: * Can't call kmem_alloc() if in interrupt context. XXX We could
385: * deadlock, because we don't know which locks the caller holds.
386: */
1.59 ozaki-r 387: if (cpu_intr_p() || cpu_softintr_p()) {
1.35 ad 388: return s;
389: }
390:
1.2 ad 391: while (ld_nfree < LD_SLOP) {
1.34 ad 392: __cpu_simple_unlock(&ld_mod_lk);
393: splx(s);
1.2 ad 394: block = kmem_zalloc(LD_BATCH * sizeof(lockdebug_t), KM_SLEEP);
1.34 ad 395: s = splhigh();
396: __cpu_simple_lock(&ld_mod_lk);
1.2 ad 397:
398: if (ld_nfree > LD_SLOP) {
399: /* Somebody beat us to it. */
1.34 ad 400: __cpu_simple_unlock(&ld_mod_lk);
401: splx(s);
1.2 ad 402: kmem_free(block, LD_BATCH * sizeof(lockdebug_t));
1.34 ad 403: s = splhigh();
404: __cpu_simple_lock(&ld_mod_lk);
1.2 ad 405: continue;
406: }
407:
408: base = ld_freeptr;
409: ld_nfree += LD_BATCH;
410: ld = block;
411: base <<= LD_BATCH_SHIFT;
1.66 riastrad 412: m = uimin(LD_MAX_LOCKS, base + LD_BATCH);
1.5 ad 413:
414: if (m == LD_MAX_LOCKS)
415: ld_nomore = true;
1.2 ad 416:
1.5 ad 417: for (i = base; i < m; i++, ld++) {
1.34 ad 418: __cpu_simple_lock_init(&ld->ld_spinlock);
1.2 ad 419: TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain);
420: TAILQ_INSERT_TAIL(&ld_all, ld, ld_achain);
421: }
422:
1.22 ad 423: membar_producer();
1.2 ad 424: }
1.34 ad 425:
426: return s;
1.2 ad 427: }
428:
429: /*
430: * lockdebug_wantlock:
431: *
1.56 pgoyette 432: * Process the preamble to a lock acquire. The "shared"
433: * parameter controls which ld_{ex,sh}want counter is
434: * updated; a negative value of shared updates neither.
1.2 ad 435: */
436: void
1.55 christos 437: lockdebug_wantlock(const char *func, size_t line,
1.58 christos 438: const volatile void *lock, uintptr_t where, int shared)
1.2 ad 439: {
440: struct lwp *l = curlwp;
441: lockdebug_t *ld;
1.3 thorpej 442: bool recurse;
1.34 ad 443: int s;
1.2 ad 444:
445: (void)shared;
1.4 thorpej 446: recurse = false;
1.2 ad 447:
1.60 ozaki-r 448: if (__predict_false(panicstr != NULL || ld_panic))
1.2 ad 449: return;
450:
1.34 ad 451: s = splhigh();
1.55 christos 452: if ((ld = lockdebug_lookup(func, line, lock, where)) == NULL) {
1.34 ad 453: splx(s);
1.2 ad 454: return;
1.34 ad 455: }
1.32 yamt 456: if ((ld->ld_flags & LD_LOCKED) != 0 || ld->ld_shares != 0) {
1.2 ad 457: if ((ld->ld_flags & LD_SLEEPER) != 0) {
1.49 mlelstv 458: if (ld->ld_lwp == l)
1.4 thorpej 459: recurse = true;
1.40 rmind 460: } else if (ld->ld_cpu == (uint16_t)cpu_index(curcpu()))
1.4 thorpej 461: recurse = true;
1.2 ad 462: }
1.10 ad 463: if (cpu_intr_p()) {
1.60 ozaki-r 464: if (__predict_false((ld->ld_flags & LD_SLEEPER) != 0)) {
1.55 christos 465: lockdebug_abort1(func, line, ld, s,
1.10 ad 466: "acquiring sleep lock from interrupt context",
467: true);
1.27 ad 468: return;
469: }
1.10 ad 470: }
1.56 pgoyette 471: if (shared > 0)
1.2 ad 472: ld->ld_shwant++;
1.56 pgoyette 473: else if (shared == 0)
1.2 ad 474: ld->ld_exwant++;
1.60 ozaki-r 475: if (__predict_false(recurse)) {
1.55 christos 476: lockdebug_abort1(func, line, ld, s, "locking against myself",
1.10 ad 477: true);
1.27 ad 478: return;
479: }
1.74 ad 480: if (l->l_ld_wanted == NULL) {
481: l->l_ld_wanted = ld;
482: }
1.34 ad 483: __cpu_simple_unlock(&ld->ld_spinlock);
484: splx(s);
1.2 ad 485: }
486:
487: /*
488: * lockdebug_locked:
489: *
490: * Process a lock acquire operation.
491: */
492: void
1.55 christos 493: lockdebug_locked(const char *func, size_t line,
494: volatile void *lock, void *cvlock, uintptr_t where, int shared)
1.2 ad 495: {
496: struct lwp *l = curlwp;
497: lockdebug_t *ld;
1.34 ad 498: int s;
1.2 ad 499:
1.60 ozaki-r 500: if (__predict_false(panicstr != NULL || ld_panic))
1.2 ad 501: return;
502:
1.34 ad 503: s = splhigh();
1.55 christos 504: if ((ld = lockdebug_lookup(func, line, lock, where)) == NULL) {
1.34 ad 505: splx(s);
1.2 ad 506: return;
1.34 ad 507: }
1.76 ! ad 508: if (shared) {
1.2 ad 509: l->l_shlocks++;
1.45 yamt 510: ld->ld_locked = where;
1.2 ad 511: ld->ld_shares++;
512: ld->ld_shwant--;
513: } else {
1.60 ozaki-r 514: if (__predict_false((ld->ld_flags & LD_LOCKED) != 0)) {
1.55 christos 515: lockdebug_abort1(func, line, ld, s, "already locked",
1.34 ad 516: true);
1.27 ad 517: return;
518: }
1.2 ad 519: ld->ld_flags |= LD_LOCKED;
520: ld->ld_locked = where;
521: ld->ld_exwant--;
522: if ((ld->ld_flags & LD_SLEEPER) != 0) {
1.34 ad 523: TAILQ_INSERT_TAIL(&l->l_ld_locks, ld, ld_chain);
1.2 ad 524: } else {
1.34 ad 525: TAILQ_INSERT_TAIL(&curcpu()->ci_data.cpu_ld_locks,
526: ld, ld_chain);
1.2 ad 527: }
528: }
1.40 rmind 529: ld->ld_cpu = (uint16_t)cpu_index(curcpu());
1.32 yamt 530: ld->ld_lwp = l;
1.34 ad 531: __cpu_simple_unlock(&ld->ld_spinlock);
1.74 ad 532: if (l->l_ld_wanted == ld) {
533: l->l_ld_wanted = NULL;
534: }
1.34 ad 535: splx(s);
1.2 ad 536: }
537:
538: /*
539: * lockdebug_unlocked:
540: *
541: * Process a lock release operation.
542: */
543: void
1.55 christos 544: lockdebug_unlocked(const char *func, size_t line,
545: volatile void *lock, uintptr_t where, int shared)
1.2 ad 546: {
547: struct lwp *l = curlwp;
548: lockdebug_t *ld;
1.34 ad 549: int s;
1.2 ad 550:
1.60 ozaki-r 551: if (__predict_false(panicstr != NULL || ld_panic))
1.2 ad 552: return;
553:
1.34 ad 554: s = splhigh();
1.55 christos 555: if ((ld = lockdebug_lookup(func, line, lock, where)) == NULL) {
1.34 ad 556: splx(s);
1.2 ad 557: return;
1.34 ad 558: }
1.76 ! ad 559: if (shared) {
1.60 ozaki-r 560: if (__predict_false(l->l_shlocks == 0)) {
1.55 christos 561: lockdebug_abort1(func, line, ld, s,
1.10 ad 562: "no shared locks held by LWP", true);
1.27 ad 563: return;
564: }
1.60 ozaki-r 565: if (__predict_false(ld->ld_shares == 0)) {
1.55 christos 566: lockdebug_abort1(func, line, ld, s,
1.10 ad 567: "no shared holds on this lock", true);
1.27 ad 568: return;
569: }
1.2 ad 570: l->l_shlocks--;
571: ld->ld_shares--;
1.45 yamt 572: if (ld->ld_lwp == l) {
573: ld->ld_unlocked = where;
1.32 yamt 574: ld->ld_lwp = NULL;
1.45 yamt 575: }
1.40 rmind 576: if (ld->ld_cpu == (uint16_t)cpu_index(curcpu()))
1.32 yamt 577: ld->ld_cpu = (uint16_t)-1;
1.2 ad 578: } else {
1.60 ozaki-r 579: if (__predict_false((ld->ld_flags & LD_LOCKED) == 0)) {
1.55 christos 580: lockdebug_abort1(func, line, ld, s, "not locked", true);
1.27 ad 581: return;
582: }
1.2 ad 583:
584: if ((ld->ld_flags & LD_SLEEPER) != 0) {
1.60 ozaki-r 585: if (__predict_false(ld->ld_lwp != curlwp)) {
1.55 christos 586: lockdebug_abort1(func, line, ld, s,
1.10 ad 587: "not held by current LWP", true);
1.27 ad 588: return;
589: }
1.34 ad 590: TAILQ_REMOVE(&l->l_ld_locks, ld, ld_chain);
1.2 ad 591: } else {
1.60 ozaki-r 592: uint16_t idx = (uint16_t)cpu_index(curcpu());
593: if (__predict_false(ld->ld_cpu != idx)) {
1.55 christos 594: lockdebug_abort1(func, line, ld, s,
1.10 ad 595: "not held by current CPU", true);
1.27 ad 596: return;
597: }
1.34 ad 598: TAILQ_REMOVE(&curcpu()->ci_data.cpu_ld_locks, ld,
599: ld_chain);
1.2 ad 600: }
1.44 matt 601: ld->ld_flags &= ~LD_LOCKED;
602: ld->ld_unlocked = where;
603: ld->ld_lwp = NULL;
1.2 ad 604: }
1.34 ad 605: __cpu_simple_unlock(&ld->ld_spinlock);
606: splx(s);
1.2 ad 607: }
608:
609: /*
610: * lockdebug_barrier:
611: *
1.73 ad 612: * Panic if we hold more than one specified lock, and optionally, if we
613: * hold any sleep locks.
1.2 ad 614: */
615: void
1.73 ad 616: lockdebug_barrier(const char *func, size_t line, volatile void *onelock,
1.55 christos 617: int slplocks)
1.2 ad 618: {
619: struct lwp *l = curlwp;
620: lockdebug_t *ld;
1.34 ad 621: int s;
1.2 ad 622:
1.60 ozaki-r 623: if (__predict_false(panicstr != NULL || ld_panic))
1.2 ad 624: return;
625:
1.34 ad 626: s = splhigh();
627: if ((l->l_pflag & LP_INTR) == 0) {
628: TAILQ_FOREACH(ld, &curcpu()->ci_data.cpu_ld_locks, ld_chain) {
1.73 ad 629: if (ld->ld_lock == onelock) {
1.2 ad 630: continue;
631: }
1.34 ad 632: __cpu_simple_lock(&ld->ld_spinlock);
1.55 christos 633: lockdebug_abort1(func, line, ld, s,
1.34 ad 634: "spin lock held", true);
635: return;
1.2 ad 636: }
637: }
1.34 ad 638: if (slplocks) {
639: splx(s);
640: return;
641: }
1.60 ozaki-r 642: ld = TAILQ_FIRST(&l->l_ld_locks);
1.73 ad 643: if (__predict_false(ld != NULL && ld->ld_lock != onelock)) {
1.34 ad 644: __cpu_simple_lock(&ld->ld_spinlock);
1.55 christos 645: lockdebug_abort1(func, line, ld, s, "sleep lock held", true);
1.34 ad 646: return;
647: }
648: splx(s);
649: if (l->l_shlocks != 0) {
1.52 christos 650: TAILQ_FOREACH(ld, &ld_all, ld_achain) {
1.73 ad 651: if (ld->ld_lock == onelock) {
652: continue;
653: }
1.52 christos 654: if (ld->ld_lwp == l)
1.74 ad 655: lockdebug_dump(l, ld, printf);
1.52 christos 656: }
1.55 christos 657: panic("%s,%zu: holding %d shared locks", func, line,
658: l->l_shlocks);
1.2 ad 659: }
660: }
661:
662: /*
1.10 ad 663: * lockdebug_mem_check:
664: *
665: * Check for in-use locks within a memory region that is
1.16 yamt 666: * being freed.
1.10 ad 667: */
668: void
1.55 christos 669: lockdebug_mem_check(const char *func, size_t line, void *base, size_t sz)
1.10 ad 670: {
1.16 yamt 671: lockdebug_t *ld;
1.34 ad 672: struct cpu_info *ci;
1.23 ad 673: int s;
1.10 ad 674:
1.60 ozaki-r 675: if (__predict_false(panicstr != NULL || ld_panic))
1.24 ad 676: return;
677:
1.34 ad 678: s = splhigh();
679: ci = curcpu();
680: __cpu_simple_lock(&ci->ci_data.cpu_ld_lock);
1.16 yamt 681: ld = (lockdebug_t *)rb_tree_find_node_geq(&ld_rb_tree, base);
1.23 ad 682: if (ld != NULL) {
683: const uintptr_t lock = (uintptr_t)ld->ld_lock;
684:
1.60 ozaki-r 685: if (__predict_false((uintptr_t)base > lock))
1.55 christos 686: panic("%s,%zu: corrupt tree ld=%p, base=%p, sz=%zu",
687: func, line, ld, base, sz);
1.23 ad 688: if (lock >= (uintptr_t)base + sz)
689: ld = NULL;
690: }
1.34 ad 691: __cpu_simple_unlock(&ci->ci_data.cpu_ld_lock);
1.60 ozaki-r 692: if (__predict_false(ld != NULL)) {
1.34 ad 693: __cpu_simple_lock(&ld->ld_spinlock);
1.55 christos 694: lockdebug_abort1(func, line, ld, s,
1.34 ad 695: "allocation contains active lock", !cold);
1.16 yamt 696: return;
1.34 ad 697: }
698: splx(s);
1.10 ad 699: }
1.75 christos 700: #endif /* _KERNEL */
701:
702: #ifdef DDB
703: #include <machine/db_machdep.h>
704: #include <ddb/db_interface.h>
705: #include <ddb/db_access.h>
706: #endif
1.10 ad 707:
708: /*
1.2 ad 709: * lockdebug_dump:
710: *
711: * Dump information about a lock on panic, or for DDB.
712: */
713: static void
1.74 ad 714: lockdebug_dump(lwp_t *l, lockdebug_t *ld, void (*pr)(const char *, ...)
1.47 christos 715: __printflike(1, 2))
1.2 ad 716: {
717: int sleeper = (ld->ld_flags & LD_SLEEPER);
1.75 christos 718: lockops_t *lo = ld->ld_lockops;
1.2 ad 719:
720: (*pr)(
721: "lock address : %#018lx type : %18s\n"
1.35 ad 722: "initialized : %#018lx",
1.2 ad 723: (long)ld->ld_lock, (sleeper ? "sleep/adaptive" : "spin"),
1.10 ad 724: (long)ld->ld_initaddr);
1.2 ad 725:
1.75 christos 726: #ifndef _KERNEL
727: lockops_t los;
728: lo = &los;
729: db_read_bytes((db_addr_t)ld->ld_lockops, sizeof(los), (char *)lo);
730: #endif
1.76 ! ad 731: (*pr)("\n"
! 732: "shared holds : %18u exclusive: %18u\n"
! 733: "shares wanted: %18u exclusive: %18u\n"
! 734: "relevant cpu : %18u last held: %18u\n"
! 735: "relevant lwp : %#018lx last held: %#018lx\n"
! 736: "last locked%c : %#018lx unlocked%c: %#018lx\n",
! 737: (unsigned)ld->ld_shares, ((ld->ld_flags & LD_LOCKED) != 0),
! 738: (unsigned)ld->ld_shwant, (unsigned)ld->ld_exwant,
! 739: (unsigned)cpu_index(l->l_cpu), (unsigned)ld->ld_cpu,
! 740: (long)l, (long)ld->ld_lwp,
! 741: ((ld->ld_flags & LD_LOCKED) ? '*' : ' '),
! 742: (long)ld->ld_locked,
! 743: ((ld->ld_flags & LD_LOCKED) ? ' ' : '*'),
! 744: (long)ld->ld_unlocked);
1.35 ad 745:
1.75 christos 746: #ifdef _KERNEL
747: if (lo->lo_dump != NULL)
748: (*lo->lo_dump)(ld->ld_lock, pr);
1.2 ad 749:
750: if (sleeper) {
751: turnstile_print(ld->ld_lock, pr);
752: }
1.75 christos 753: #endif
1.2 ad 754: }
755:
1.75 christos 756: #ifdef _KERNEL
1.2 ad 757: /*
1.27 ad 758: * lockdebug_abort1:
1.2 ad 759: *
1.27 ad 760: * An error has been trapped - dump lock info and panic.
1.2 ad 761: */
1.5 ad 762: static void
1.55 christos 763: lockdebug_abort1(const char *func, size_t line, lockdebug_t *ld, int s,
1.10 ad 764: const char *msg, bool dopanic)
1.2 ad 765: {
766:
1.27 ad 767: /*
1.46 christos 768: * Don't make the situation worse if the system is already going
1.27 ad 769: * down in flames. Once a panic is triggered, lockdebug state
770: * becomes stale and cannot be trusted.
771: */
772: if (atomic_inc_uint_nv(&ld_panic) != 1) {
1.34 ad 773: __cpu_simple_unlock(&ld->ld_spinlock);
774: splx(s);
1.27 ad 775: return;
776: }
777:
1.55 christos 778: printf_nolog("%s error: %s,%zu: %s\n\n", ld->ld_lockops->lo_name,
779: func, line, msg);
1.74 ad 780: lockdebug_dump(curlwp, ld, printf_nolog);
1.34 ad 781: __cpu_simple_unlock(&ld->ld_spinlock);
782: splx(s);
1.2 ad 783: printf_nolog("\n");
1.10 ad 784: if (dopanic)
1.55 christos 785: panic("LOCKDEBUG: %s error: %s,%zu: %s",
786: ld->ld_lockops->lo_name, func, line, msg);
1.2 ad 787: }
788:
1.75 christos 789: #endif /* _KERNEL */
1.2 ad 790: #endif /* LOCKDEBUG */
791:
792: /*
793: * lockdebug_lock_print:
794: *
795: * Handle the DDB 'show lock' command.
796: */
797: #ifdef DDB
798: void
1.69 christos 799: lockdebug_lock_print(void *addr,
800: void (*pr)(const char *, ...) __printflike(1, 2))
1.2 ad 801: {
802: #ifdef LOCKDEBUG
1.75 christos 803: lockdebug_t *ld, lds;
1.2 ad 804:
805: TAILQ_FOREACH(ld, &ld_all, ld_achain) {
1.75 christos 806: db_read_bytes((db_addr_t)ld, sizeof(lds), __UNVOLATILE(&lds));
807: ld = &lds;
1.41 dyoung 808: if (ld->ld_lock == NULL)
809: continue;
810: if (addr == NULL || ld->ld_lock == addr) {
1.74 ad 811: lockdebug_dump(curlwp, ld, pr);
1.41 dyoung 812: if (addr != NULL)
813: return;
1.2 ad 814: }
815: }
1.41 dyoung 816: if (addr != NULL) {
817: (*pr)("Sorry, no record of a lock with address %p found.\n",
818: addr);
819: }
1.2 ad 820: #else
821: (*pr)("Sorry, kernel not built with the LOCKDEBUG option.\n");
822: #endif /* LOCKDEBUG */
823: }
1.61 ozaki-r 824:
1.75 christos 825: #ifdef _KERNEL
1.62 ozaki-r 826: #ifdef LOCKDEBUG
827: static void
1.74 ad 828: lockdebug_show_one(lwp_t *l, lockdebug_t *ld, int i,
1.69 christos 829: void (*pr)(const char *, ...) __printflike(1, 2))
830: {
831: const char *sym;
832:
1.75 christos 833: #ifdef _KERNEL
1.69 christos 834: ksyms_getname(NULL, &sym, (vaddr_t)ld->ld_initaddr,
835: KSYMS_CLOSEST|KSYMS_PROC|KSYMS_ANY);
1.75 christos 836: #endif
1.74 ad 837: (*pr)("* Lock %d (initialized at %s)\n", i++, sym);
838: lockdebug_dump(l, ld, pr);
1.69 christos 839: }
840:
841: static void
842: lockdebug_show_trace(const void *ptr,
843: void (*pr)(const char *, ...) __printflike(1, 2))
844: {
845: db_stack_trace_print((db_expr_t)(intptr_t)ptr, true, 32, "a", pr);
846: }
847:
848: static void
849: lockdebug_show_all_locks_lwp(void (*pr)(const char *, ...) __printflike(1, 2),
850: bool show_trace)
1.62 ozaki-r 851: {
852: struct proc *p;
853:
854: LIST_FOREACH(p, &allproc, p_list) {
855: struct lwp *l;
856: LIST_FOREACH(l, &p->p_lwps, l_sibling) {
857: lockdebug_t *ld;
858: int i = 0;
1.74 ad 859: if (TAILQ_EMPTY(&l->l_ld_locks) &&
860: l->l_ld_wanted == NULL) {
861: continue;
862: }
863: (*pr)("\n****** LWP %d.%d (%s) @ %p, l_stat=%d\n",
864: p->p_pid, l->l_lid,
865: l->l_name ? l->l_name : p->p_comm, l, l->l_stat);
866: if (!TAILQ_EMPTY(&l->l_ld_locks)) {
867: (*pr)("\n*** Locks held: \n");
868: TAILQ_FOREACH(ld, &l->l_ld_locks, ld_chain) {
869: (*pr)("\n");
870: lockdebug_show_one(l, ld, i++, pr);
871: }
872: } else {
873: (*pr)("\n*** Locks held: none\n");
874: }
875:
876: if (l->l_ld_wanted != NULL) {
877: (*pr)("\n*** Locks wanted: \n\n");
878: lockdebug_show_one(l, l->l_ld_wanted, 0, pr);
879: } else {
880: (*pr)("\n*** Locks wanted: none\n");
1.62 ozaki-r 881: }
1.74 ad 882: if (show_trace) {
883: (*pr)("\n*** Traceback: \n\n");
1.69 christos 884: lockdebug_show_trace(l, pr);
1.74 ad 885: (*pr)("\n");
886: }
1.62 ozaki-r 887: }
888: }
889: }
890:
891: static void
1.69 christos 892: lockdebug_show_all_locks_cpu(void (*pr)(const char *, ...) __printflike(1, 2),
893: bool show_trace)
1.62 ozaki-r 894: {
895: lockdebug_t *ld;
896: CPU_INFO_ITERATOR cii;
897: struct cpu_info *ci;
898:
899: for (CPU_INFO_FOREACH(cii, ci)) {
900: int i = 0;
901: if (TAILQ_EMPTY(&ci->ci_data.cpu_ld_locks))
902: continue;
1.74 ad 903: (*pr)("\n******* Locks held on %s:\n", cpu_name(ci));
1.62 ozaki-r 904: TAILQ_FOREACH(ld, &ci->ci_data.cpu_ld_locks, ld_chain) {
1.74 ad 905: (*pr)("\n");
906: #ifdef MULTIPROCESSOR
907: lockdebug_show_one(ci->ci_curlwp, ld, i++, pr);
1.69 christos 908: if (show_trace)
909: lockdebug_show_trace(ci->ci_curlwp, pr);
1.68 mrg 910: #else
1.74 ad 911: lockdebug_show_one(curlwp, ld, i++, pr);
912: if (show_trace)
1.72 ryo 913: lockdebug_show_trace(curlwp, pr);
1.68 mrg 914: #endif
1.62 ozaki-r 915: }
916: }
917: }
1.75 christos 918: #endif /* _KERNEL */
1.62 ozaki-r 919: #endif /* LOCKDEBUG */
920:
1.75 christos 921: #ifdef _KERNEL
1.62 ozaki-r 922: void
1.69 christos 923: lockdebug_show_all_locks(void (*pr)(const char *, ...) __printflike(1, 2),
924: const char *modif)
1.62 ozaki-r 925: {
926: #ifdef LOCKDEBUG
927: bool show_trace = false;
928: if (modif[0] == 't')
929: show_trace = true;
930:
931: (*pr)("[Locks tracked through LWPs]\n");
932: lockdebug_show_all_locks_lwp(pr, show_trace);
933: (*pr)("\n");
934:
935: (*pr)("[Locks tracked through CPUs]\n");
936: lockdebug_show_all_locks_cpu(pr, show_trace);
937: (*pr)("\n");
938: #else
939: (*pr)("Sorry, kernel not built with the LOCKDEBUG option.\n");
940: #endif /* LOCKDEBUG */
941: }
942:
1.61 ozaki-r 943: void
1.69 christos 944: lockdebug_show_lockstats(void (*pr)(const char *, ...) __printflike(1, 2))
1.61 ozaki-r 945: {
946: #ifdef LOCKDEBUG
947: lockdebug_t *ld;
948: void *_ld;
949: uint32_t n_null = 0;
950: uint32_t n_spin_mutex = 0;
951: uint32_t n_adaptive_mutex = 0;
952: uint32_t n_rwlock = 0;
953: uint32_t n_others = 0;
954:
955: RB_TREE_FOREACH(_ld, &ld_rb_tree) {
956: ld = _ld;
957: if (ld->ld_lock == NULL) {
958: n_null++;
959: continue;
960: }
961: if (ld->ld_lockops->lo_name[0] == 'M') {
962: if (ld->ld_lockops->lo_type == LOCKOPS_SLEEP)
963: n_adaptive_mutex++;
964: else
965: n_spin_mutex++;
966: continue;
967: }
968: if (ld->ld_lockops->lo_name[0] == 'R') {
969: n_rwlock++;
970: continue;
971: }
972: n_others++;
973: }
974: (*pr)(
975: "spin mutex: %u\n"
976: "adaptive mutex: %u\n"
977: "rwlock: %u\n"
978: "null locks: %u\n"
979: "others: %u\n",
1.76 ! ad 980: n_spin_mutex, n_adaptive_mutex, n_rwlock,
1.61 ozaki-r 981: n_null, n_others);
982: #else
983: (*pr)("Sorry, kernel not built with the LOCKDEBUG option.\n");
984: #endif /* LOCKDEBUG */
985: }
1.75 christos 986: #endif /* _KERNEL */
1.2 ad 987: #endif /* DDB */
988:
1.75 christos 989: #ifdef _KERNEL
1.2 ad 990: /*
1.65 mrg 991: * lockdebug_dismiss:
992: *
993: * The system is rebooting, and potentially from an unsafe
994: * place so avoid any future aborts.
995: */
996: void
997: lockdebug_dismiss(void)
998: {
999:
1000: atomic_inc_uint_nv(&ld_panic);
1001: }
1002:
1003: /*
1.2 ad 1004: * lockdebug_abort:
1005: *
1006: * An error has been trapped - dump lock info and call panic().
1007: */
1008: void
1.58 christos 1009: lockdebug_abort(const char *func, size_t line, const volatile void *lock,
1.55 christos 1010: lockops_t *ops, const char *msg)
1.2 ad 1011: {
1012: #ifdef LOCKDEBUG
1013: lockdebug_t *ld;
1.34 ad 1014: int s;
1.2 ad 1015:
1.34 ad 1016: s = splhigh();
1.55 christos 1017: if ((ld = lockdebug_lookup(func, line, lock,
1.38 rafal 1018: (uintptr_t) __builtin_return_address(0))) != NULL) {
1.55 christos 1019: lockdebug_abort1(func, line, ld, s, msg, true);
1.34 ad 1020: return;
1.2 ad 1021: }
1.34 ad 1022: splx(s);
1.2 ad 1023: #endif /* LOCKDEBUG */
1024:
1.27 ad 1025: /*
1.67 mrg 1026: * Don't make the situation worse if the system is already going
1027: * down in flames. Once a panic is triggered, lockdebug state
1028: * becomes stale and cannot be trusted.
1.27 ad 1029: */
1.67 mrg 1030: if (atomic_inc_uint_nv(&ld_panic) > 1)
1031: return;
1032:
1033: printf_nolog("%s error: %s,%zu: %s\n\n"
1034: "lock address : %#018lx\n"
1035: "current cpu : %18d\n"
1036: "current lwp : %#018lx\n",
1037: ops->lo_name, func, line, msg, (long)lock,
1038: (int)cpu_index(curcpu()), (long)curlwp);
1.70 ozaki-r 1039: (*ops->lo_dump)(lock, printf_nolog);
1.67 mrg 1040: printf_nolog("\n");
1.2 ad 1041:
1.55 christos 1042: panic("lock error: %s: %s,%zu: %s: lock %p cpu %d lwp %p",
1043: ops->lo_name, func, line, msg, lock, cpu_index(curcpu()), curlwp);
1.2 ad 1044: }
1.75 christos 1045: #endif /* _KERNEL */
CVSweb <webmaster@jp.NetBSD.org>