Annotation of src/sys/kern/subr_lockdebug.c, Revision 1.77.2.2
1.77.2.2! thorpej 1: /* $NetBSD: subr_lockdebug.c,v 1.77.2.1 2021/01/03 16:35:04 thorpej Exp $ */
1.2 ad 2:
3: /*-
1.73 ad 4: * Copyright (c) 2006, 2007, 2008, 2020 The NetBSD Foundation, Inc.
1.2 ad 5: * All rights reserved.
6: *
7: * This code is derived from software contributed to The NetBSD Foundation
8: * by Andrew Doran.
9: *
10: * Redistribution and use in source and binary forms, with or without
11: * modification, are permitted provided that the following conditions
12: * are met:
13: * 1. Redistributions of source code must retain the above copyright
14: * notice, this list of conditions and the following disclaimer.
15: * 2. Redistributions in binary form must reproduce the above copyright
16: * notice, this list of conditions and the following disclaimer in the
17: * documentation and/or other materials provided with the distribution.
18: *
19: * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20: * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21: * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22: * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23: * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24: * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25: * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26: * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27: * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28: * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29: * POSSIBILITY OF SUCH DAMAGE.
30: */
31:
32: /*
1.11 ad 33: * Basic lock debugging code shared among lock primitives.
1.2 ad 34: */
35:
1.9 dsl 36: #include <sys/cdefs.h>
1.77.2.2! thorpej 37: __KERNEL_RCSID(0, "$NetBSD: subr_lockdebug.c,v 1.77.2.1 2021/01/03 16:35:04 thorpej Exp $");
1.9 dsl 38:
1.54 ozaki-r 39: #ifdef _KERNEL_OPT
1.2 ad 40: #include "opt_ddb.h"
1.54 ozaki-r 41: #endif
1.2 ad 42:
43: #include <sys/param.h>
44: #include <sys/proc.h>
45: #include <sys/systm.h>
1.10 ad 46: #include <sys/kernel.h>
1.2 ad 47: #include <sys/kmem.h>
48: #include <sys/lockdebug.h>
49: #include <sys/sleepq.h>
1.10 ad 50: #include <sys/cpu.h>
1.22 ad 51: #include <sys/atomic.h>
1.26 ad 52: #include <sys/lock.h>
1.43 matt 53: #include <sys/rbtree.h>
1.62 ozaki-r 54: #include <sys/ksyms.h>
1.77 maxv 55: #include <sys/kcov.h>
1.16 yamt 56:
1.25 ad 57: #include <machine/lock.h>
58:
1.28 ad 59: unsigned int ld_panic;
60:
1.2 ad 61: #ifdef LOCKDEBUG
62:
1.71 scole 63: #ifdef __ia64__
64: #define LD_BATCH_SHIFT 16
65: #else
1.2 ad 66: #define LD_BATCH_SHIFT 9
1.71 scole 67: #endif
1.2 ad 68: #define LD_BATCH (1 << LD_BATCH_SHIFT)
69: #define LD_BATCH_MASK (LD_BATCH - 1)
70: #define LD_MAX_LOCKS 1048576
71: #define LD_SLOP 16
72:
73: #define LD_LOCKED 0x01
74: #define LD_SLEEPER 0x02
75:
1.23 ad 76: #define LD_WRITE_LOCK 0x80000000
77:
1.2 ad 78: typedef struct lockdebug {
1.42 rmind 79: struct rb_node ld_rb_node;
1.34 ad 80: __cpu_simple_lock_t ld_spinlock;
1.2 ad 81: _TAILQ_ENTRY(struct lockdebug, volatile) ld_chain;
82: _TAILQ_ENTRY(struct lockdebug, volatile) ld_achain;
83: volatile void *ld_lock;
84: lockops_t *ld_lockops;
85: struct lwp *ld_lwp;
86: uintptr_t ld_locked;
87: uintptr_t ld_unlocked;
1.10 ad 88: uintptr_t ld_initaddr;
1.2 ad 89: uint16_t ld_shares;
90: uint16_t ld_cpu;
91: uint8_t ld_flags;
92: uint8_t ld_shwant; /* advisory */
93: uint8_t ld_exwant; /* advisory */
94: uint8_t ld_unused;
95: } volatile lockdebug_t;
96:
97: typedef _TAILQ_HEAD(lockdebuglist, struct lockdebug, volatile) lockdebuglist_t;
98:
1.34 ad 99: __cpu_simple_lock_t ld_mod_lk;
1.13 matt 100: lockdebuglist_t ld_free = TAILQ_HEAD_INITIALIZER(ld_free);
1.75 christos 101: #ifdef _KERNEL
1.13 matt 102: lockdebuglist_t ld_all = TAILQ_HEAD_INITIALIZER(ld_all);
1.75 christos 103: #else
104: extern lockdebuglist_t ld_all;
105: #define cpu_name(a) "?"
106: #define cpu_index(a) -1
107: #define curlwp NULL
108: #endif /* _KERNEL */
1.2 ad 109: int ld_nfree;
110: int ld_freeptr;
111: int ld_recurse;
1.5 ad 112: bool ld_nomore;
1.2 ad 113: lockdebug_t ld_prime[LD_BATCH];
114:
1.75 christos 115: #ifdef _KERNEL
1.55 christos 116: static void lockdebug_abort1(const char *, size_t, lockdebug_t *, int,
117: const char *, bool);
1.34 ad 118: static int lockdebug_more(int);
1.5 ad 119: static void lockdebug_init(void);
1.74 ad 120: static void lockdebug_dump(lwp_t *, lockdebug_t *,
121: void (*)(const char *, ...)
1.52 christos 122: __printflike(1, 2));
1.2 ad 123:
1.16 yamt 124: static signed int
1.42 rmind 125: ld_rbto_compare_nodes(void *ctx, const void *n1, const void *n2)
1.16 yamt 126: {
1.42 rmind 127: const lockdebug_t *ld1 = n1;
128: const lockdebug_t *ld2 = n2;
1.20 yamt 129: const uintptr_t a = (uintptr_t)ld1->ld_lock;
130: const uintptr_t b = (uintptr_t)ld2->ld_lock;
131:
132: if (a < b)
1.42 rmind 133: return -1;
134: if (a > b)
1.20 yamt 135: return 1;
1.16 yamt 136: return 0;
137: }
138:
139: static signed int
1.42 rmind 140: ld_rbto_compare_key(void *ctx, const void *n, const void *key)
1.16 yamt 141: {
1.42 rmind 142: const lockdebug_t *ld = n;
1.20 yamt 143: const uintptr_t a = (uintptr_t)ld->ld_lock;
144: const uintptr_t b = (uintptr_t)key;
145:
146: if (a < b)
1.42 rmind 147: return -1;
148: if (a > b)
1.20 yamt 149: return 1;
1.16 yamt 150: return 0;
151: }
152:
1.42 rmind 153: static rb_tree_t ld_rb_tree;
1.16 yamt 154:
1.42 rmind 155: static const rb_tree_ops_t ld_rb_tree_ops = {
1.37 matt 156: .rbto_compare_nodes = ld_rbto_compare_nodes,
157: .rbto_compare_key = ld_rbto_compare_key,
1.42 rmind 158: .rbto_node_offset = offsetof(lockdebug_t, ld_rb_node),
159: .rbto_context = NULL
1.16 yamt 160: };
161:
1.34 ad 162: static inline lockdebug_t *
1.58 christos 163: lockdebug_lookup1(const volatile void *lock)
1.23 ad 164: {
1.34 ad 165: lockdebug_t *ld;
166: struct cpu_info *ci;
1.23 ad 167:
1.34 ad 168: ci = curcpu();
169: __cpu_simple_lock(&ci->ci_data.cpu_ld_lock);
1.58 christos 170: ld = rb_tree_find_node(&ld_rb_tree, (void *)(intptr_t)lock);
1.34 ad 171: __cpu_simple_unlock(&ci->ci_data.cpu_ld_lock);
172: if (ld == NULL) {
173: return NULL;
174: }
175: __cpu_simple_lock(&ld->ld_spinlock);
1.23 ad 176:
1.34 ad 177: return ld;
1.2 ad 178: }
179:
1.23 ad 180: static void
1.34 ad 181: lockdebug_lock_cpus(void)
1.2 ad 182: {
1.34 ad 183: CPU_INFO_ITERATOR cii;
184: struct cpu_info *ci;
1.2 ad 185:
1.34 ad 186: for (CPU_INFO_FOREACH(cii, ci)) {
187: __cpu_simple_lock(&ci->ci_data.cpu_ld_lock);
188: }
1.23 ad 189: }
190:
191: static void
1.34 ad 192: lockdebug_unlock_cpus(void)
1.23 ad 193: {
1.34 ad 194: CPU_INFO_ITERATOR cii;
195: struct cpu_info *ci;
1.23 ad 196:
1.34 ad 197: for (CPU_INFO_FOREACH(cii, ci)) {
198: __cpu_simple_unlock(&ci->ci_data.cpu_ld_lock);
199: }
1.2 ad 200: }
201:
202: /*
1.19 yamt 203: * lockdebug_lookup:
204: *
205: * Find a lockdebug structure by a pointer to a lock and return it locked.
206: */
207: static inline lockdebug_t *
1.58 christos 208: lockdebug_lookup(const char *func, size_t line, const volatile void *lock,
1.55 christos 209: uintptr_t where)
1.19 yamt 210: {
211: lockdebug_t *ld;
212:
1.77 maxv 213: kcov_silence_enter();
1.34 ad 214: ld = lockdebug_lookup1(lock);
1.77 maxv 215: kcov_silence_leave();
216:
1.60 ozaki-r 217: if (__predict_false(ld == NULL)) {
1.55 christos 218: panic("%s,%zu: uninitialized lock (lock=%p, from=%08"
219: PRIxPTR ")", func, line, lock, where);
1.42 rmind 220: }
1.19 yamt 221: return ld;
222: }
223:
224: /*
1.2 ad 225: * lockdebug_init:
226: *
227: * Initialize the lockdebug system. Allocate an initial pool of
228: * lockdebug structures before the VM system is up and running.
229: */
1.5 ad 230: static void
1.2 ad 231: lockdebug_init(void)
232: {
233: lockdebug_t *ld;
234: int i;
235:
1.34 ad 236: TAILQ_INIT(&curcpu()->ci_data.cpu_ld_locks);
237: TAILQ_INIT(&curlwp->l_ld_locks);
238: __cpu_simple_lock_init(&curcpu()->ci_data.cpu_ld_lock);
239: __cpu_simple_lock_init(&ld_mod_lk);
1.15 matt 240:
1.16 yamt 241: rb_tree_init(&ld_rb_tree, &ld_rb_tree_ops);
242:
1.2 ad 243: ld = ld_prime;
244: for (i = 1, ld++; i < LD_BATCH; i++, ld++) {
1.34 ad 245: __cpu_simple_lock_init(&ld->ld_spinlock);
1.2 ad 246: TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain);
247: TAILQ_INSERT_TAIL(&ld_all, ld, ld_achain);
248: }
249: ld_freeptr = 1;
250: ld_nfree = LD_BATCH - 1;
251: }
252:
253: /*
254: * lockdebug_alloc:
255: *
256: * A lock is being initialized, so allocate an associated debug
257: * structure.
258: */
1.16 yamt 259: bool
1.55 christos 260: lockdebug_alloc(const char *func, size_t line, volatile void *lock,
261: lockops_t *lo, uintptr_t initaddr)
1.2 ad 262: {
263: struct cpu_info *ci;
264: lockdebug_t *ld;
1.34 ad 265: int s;
1.2 ad 266:
1.60 ozaki-r 267: if (__predict_false(lo == NULL || panicstr != NULL || ld_panic))
1.16 yamt 268: return false;
1.60 ozaki-r 269: if (__predict_false(ld_freeptr == 0))
1.5 ad 270: lockdebug_init();
1.2 ad 271:
1.34 ad 272: s = splhigh();
273: __cpu_simple_lock(&ld_mod_lk);
1.60 ozaki-r 274: if (__predict_false((ld = lockdebug_lookup1(lock)) != NULL)) {
1.34 ad 275: __cpu_simple_unlock(&ld_mod_lk);
1.55 christos 276: lockdebug_abort1(func, line, ld, s, "already initialized",
277: true);
1.27 ad 278: return false;
1.19 yamt 279: }
280:
1.2 ad 281: /*
282: * Pinch a new debug structure. We may recurse because we call
283: * kmem_alloc(), which may need to initialize new locks somewhere
1.7 skrll 284: * down the path. If not recursing, we try to maintain at least
1.2 ad 285: * LD_SLOP structures free, which should hopefully be enough to
286: * satisfy kmem_alloc(). If we can't provide a structure, not to
287: * worry: we'll just mark the lock as not having an ID.
288: */
1.23 ad 289: ci = curcpu();
1.2 ad 290: ci->ci_lkdebug_recurse++;
291: if (TAILQ_EMPTY(&ld_free)) {
1.5 ad 292: if (ci->ci_lkdebug_recurse > 1 || ld_nomore) {
1.2 ad 293: ci->ci_lkdebug_recurse--;
1.34 ad 294: __cpu_simple_unlock(&ld_mod_lk);
295: splx(s);
1.16 yamt 296: return false;
1.2 ad 297: }
1.34 ad 298: s = lockdebug_more(s);
299: } else if (ci->ci_lkdebug_recurse == 1 && ld_nfree < LD_SLOP) {
300: s = lockdebug_more(s);
301: }
1.60 ozaki-r 302: if (__predict_false((ld = TAILQ_FIRST(&ld_free)) == NULL)) {
1.34 ad 303: __cpu_simple_unlock(&ld_mod_lk);
304: splx(s);
1.16 yamt 305: return false;
1.2 ad 306: }
307: TAILQ_REMOVE(&ld_free, ld, ld_chain);
308: ld_nfree--;
309: ci->ci_lkdebug_recurse--;
310:
1.60 ozaki-r 311: if (__predict_false(ld->ld_lock != NULL)) {
1.55 christos 312: panic("%s,%zu: corrupt table ld %p", func, line, ld);
1.34 ad 313: }
1.2 ad 314:
315: /* Initialise the structure. */
316: ld->ld_lock = lock;
317: ld->ld_lockops = lo;
318: ld->ld_locked = 0;
319: ld->ld_unlocked = 0;
320: ld->ld_lwp = NULL;
1.10 ad 321: ld->ld_initaddr = initaddr;
1.35 ad 322: ld->ld_flags = (lo->lo_type == LOCKOPS_SLEEP ? LD_SLEEPER : 0);
1.34 ad 323: lockdebug_lock_cpus();
1.42 rmind 324: (void)rb_tree_insert_node(&ld_rb_tree, __UNVOLATILE(ld));
1.34 ad 325: lockdebug_unlock_cpus();
326: __cpu_simple_unlock(&ld_mod_lk);
1.2 ad 327:
1.34 ad 328: splx(s);
1.16 yamt 329: return true;
1.2 ad 330: }
331:
332: /*
333: * lockdebug_free:
334: *
335: * A lock is being destroyed, so release debugging resources.
336: */
337: void
1.55 christos 338: lockdebug_free(const char *func, size_t line, volatile void *lock)
1.2 ad 339: {
340: lockdebug_t *ld;
1.34 ad 341: int s;
1.2 ad 342:
1.60 ozaki-r 343: if (__predict_false(panicstr != NULL || ld_panic))
1.2 ad 344: return;
345:
1.34 ad 346: s = splhigh();
347: __cpu_simple_lock(&ld_mod_lk);
1.55 christos 348: ld = lockdebug_lookup(func, line, lock,
349: (uintptr_t) __builtin_return_address(0));
1.60 ozaki-r 350: if (__predict_false(ld == NULL)) {
1.34 ad 351: __cpu_simple_unlock(&ld_mod_lk);
1.55 christos 352: panic("%s,%zu: destroying uninitialized object %p"
353: "(ld_lock=%p)", func, line, lock, ld->ld_lock);
1.27 ad 354: return;
1.2 ad 355: }
1.60 ozaki-r 356: if (__predict_false((ld->ld_flags & LD_LOCKED) != 0 ||
357: ld->ld_shares != 0)) {
1.34 ad 358: __cpu_simple_unlock(&ld_mod_lk);
1.55 christos 359: lockdebug_abort1(func, line, ld, s, "is locked or in use",
360: true);
1.27 ad 361: return;
362: }
1.34 ad 363: lockdebug_lock_cpus();
1.42 rmind 364: rb_tree_remove_node(&ld_rb_tree, __UNVOLATILE(ld));
1.34 ad 365: lockdebug_unlock_cpus();
1.2 ad 366: ld->ld_lock = NULL;
367: TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain);
368: ld_nfree++;
1.34 ad 369: __cpu_simple_unlock(&ld->ld_spinlock);
370: __cpu_simple_unlock(&ld_mod_lk);
371: splx(s);
1.2 ad 372: }
373:
374: /*
375: * lockdebug_more:
376: *
377: * Allocate a batch of debug structures and add to the free list.
1.34 ad 378: * Must be called with ld_mod_lk held.
1.2 ad 379: */
1.34 ad 380: static int
381: lockdebug_more(int s)
1.2 ad 382: {
383: lockdebug_t *ld;
384: void *block;
1.5 ad 385: int i, base, m;
1.2 ad 386:
1.35 ad 387: /*
388: * Can't call kmem_alloc() if in interrupt context. XXX We could
389: * deadlock, because we don't know which locks the caller holds.
390: */
1.59 ozaki-r 391: if (cpu_intr_p() || cpu_softintr_p()) {
1.35 ad 392: return s;
393: }
394:
1.2 ad 395: while (ld_nfree < LD_SLOP) {
1.34 ad 396: __cpu_simple_unlock(&ld_mod_lk);
397: splx(s);
1.2 ad 398: block = kmem_zalloc(LD_BATCH * sizeof(lockdebug_t), KM_SLEEP);
1.34 ad 399: s = splhigh();
400: __cpu_simple_lock(&ld_mod_lk);
1.2 ad 401:
402: if (ld_nfree > LD_SLOP) {
403: /* Somebody beat us to it. */
1.34 ad 404: __cpu_simple_unlock(&ld_mod_lk);
405: splx(s);
1.2 ad 406: kmem_free(block, LD_BATCH * sizeof(lockdebug_t));
1.34 ad 407: s = splhigh();
408: __cpu_simple_lock(&ld_mod_lk);
1.2 ad 409: continue;
410: }
411:
412: base = ld_freeptr;
413: ld_nfree += LD_BATCH;
414: ld = block;
415: base <<= LD_BATCH_SHIFT;
1.66 riastrad 416: m = uimin(LD_MAX_LOCKS, base + LD_BATCH);
1.5 ad 417:
418: if (m == LD_MAX_LOCKS)
419: ld_nomore = true;
1.2 ad 420:
1.5 ad 421: for (i = base; i < m; i++, ld++) {
1.34 ad 422: __cpu_simple_lock_init(&ld->ld_spinlock);
1.2 ad 423: TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain);
424: TAILQ_INSERT_TAIL(&ld_all, ld, ld_achain);
425: }
426:
1.22 ad 427: membar_producer();
1.2 ad 428: }
1.34 ad 429:
430: return s;
1.2 ad 431: }
432:
433: /*
434: * lockdebug_wantlock:
435: *
1.56 pgoyette 436: * Process the preamble to a lock acquire. The "shared"
437: * parameter controls which ld_{ex,sh}want counter is
438: * updated; a negative value of shared updates neither.
1.2 ad 439: */
440: void
1.55 christos 441: lockdebug_wantlock(const char *func, size_t line,
1.58 christos 442: const volatile void *lock, uintptr_t where, int shared)
1.2 ad 443: {
444: struct lwp *l = curlwp;
445: lockdebug_t *ld;
1.3 thorpej 446: bool recurse;
1.34 ad 447: int s;
1.2 ad 448:
449: (void)shared;
1.4 thorpej 450: recurse = false;
1.2 ad 451:
1.60 ozaki-r 452: if (__predict_false(panicstr != NULL || ld_panic))
1.2 ad 453: return;
454:
1.34 ad 455: s = splhigh();
1.55 christos 456: if ((ld = lockdebug_lookup(func, line, lock, where)) == NULL) {
1.34 ad 457: splx(s);
1.2 ad 458: return;
1.34 ad 459: }
1.32 yamt 460: if ((ld->ld_flags & LD_LOCKED) != 0 || ld->ld_shares != 0) {
1.2 ad 461: if ((ld->ld_flags & LD_SLEEPER) != 0) {
1.49 mlelstv 462: if (ld->ld_lwp == l)
1.4 thorpej 463: recurse = true;
1.40 rmind 464: } else if (ld->ld_cpu == (uint16_t)cpu_index(curcpu()))
1.4 thorpej 465: recurse = true;
1.2 ad 466: }
1.10 ad 467: if (cpu_intr_p()) {
1.60 ozaki-r 468: if (__predict_false((ld->ld_flags & LD_SLEEPER) != 0)) {
1.55 christos 469: lockdebug_abort1(func, line, ld, s,
1.10 ad 470: "acquiring sleep lock from interrupt context",
471: true);
1.27 ad 472: return;
473: }
1.10 ad 474: }
1.56 pgoyette 475: if (shared > 0)
1.2 ad 476: ld->ld_shwant++;
1.56 pgoyette 477: else if (shared == 0)
1.2 ad 478: ld->ld_exwant++;
1.60 ozaki-r 479: if (__predict_false(recurse)) {
1.55 christos 480: lockdebug_abort1(func, line, ld, s, "locking against myself",
1.10 ad 481: true);
1.27 ad 482: return;
483: }
1.74 ad 484: if (l->l_ld_wanted == NULL) {
485: l->l_ld_wanted = ld;
486: }
1.34 ad 487: __cpu_simple_unlock(&ld->ld_spinlock);
488: splx(s);
1.2 ad 489: }
490:
491: /*
492: * lockdebug_locked:
493: *
494: * Process a lock acquire operation.
495: */
496: void
1.55 christos 497: lockdebug_locked(const char *func, size_t line,
498: volatile void *lock, void *cvlock, uintptr_t where, int shared)
1.2 ad 499: {
500: struct lwp *l = curlwp;
501: lockdebug_t *ld;
1.34 ad 502: int s;
1.2 ad 503:
1.60 ozaki-r 504: if (__predict_false(panicstr != NULL || ld_panic))
1.2 ad 505: return;
506:
1.34 ad 507: s = splhigh();
1.55 christos 508: if ((ld = lockdebug_lookup(func, line, lock, where)) == NULL) {
1.34 ad 509: splx(s);
1.2 ad 510: return;
1.34 ad 511: }
1.76 ad 512: if (shared) {
1.2 ad 513: l->l_shlocks++;
1.45 yamt 514: ld->ld_locked = where;
1.2 ad 515: ld->ld_shares++;
516: ld->ld_shwant--;
517: } else {
1.60 ozaki-r 518: if (__predict_false((ld->ld_flags & LD_LOCKED) != 0)) {
1.55 christos 519: lockdebug_abort1(func, line, ld, s, "already locked",
1.34 ad 520: true);
1.27 ad 521: return;
522: }
1.2 ad 523: ld->ld_flags |= LD_LOCKED;
524: ld->ld_locked = where;
525: ld->ld_exwant--;
526: if ((ld->ld_flags & LD_SLEEPER) != 0) {
1.34 ad 527: TAILQ_INSERT_TAIL(&l->l_ld_locks, ld, ld_chain);
1.2 ad 528: } else {
1.34 ad 529: TAILQ_INSERT_TAIL(&curcpu()->ci_data.cpu_ld_locks,
530: ld, ld_chain);
1.2 ad 531: }
532: }
1.40 rmind 533: ld->ld_cpu = (uint16_t)cpu_index(curcpu());
1.32 yamt 534: ld->ld_lwp = l;
1.34 ad 535: __cpu_simple_unlock(&ld->ld_spinlock);
1.74 ad 536: if (l->l_ld_wanted == ld) {
537: l->l_ld_wanted = NULL;
538: }
1.34 ad 539: splx(s);
1.2 ad 540: }
541:
542: /*
543: * lockdebug_unlocked:
544: *
545: * Process a lock release operation.
546: */
547: void
1.55 christos 548: lockdebug_unlocked(const char *func, size_t line,
549: volatile void *lock, uintptr_t where, int shared)
1.2 ad 550: {
551: struct lwp *l = curlwp;
552: lockdebug_t *ld;
1.34 ad 553: int s;
1.2 ad 554:
1.60 ozaki-r 555: if (__predict_false(panicstr != NULL || ld_panic))
1.2 ad 556: return;
557:
1.34 ad 558: s = splhigh();
1.55 christos 559: if ((ld = lockdebug_lookup(func, line, lock, where)) == NULL) {
1.34 ad 560: splx(s);
1.2 ad 561: return;
1.34 ad 562: }
1.76 ad 563: if (shared) {
1.60 ozaki-r 564: if (__predict_false(l->l_shlocks == 0)) {
1.55 christos 565: lockdebug_abort1(func, line, ld, s,
1.10 ad 566: "no shared locks held by LWP", true);
1.27 ad 567: return;
568: }
1.60 ozaki-r 569: if (__predict_false(ld->ld_shares == 0)) {
1.55 christos 570: lockdebug_abort1(func, line, ld, s,
1.10 ad 571: "no shared holds on this lock", true);
1.27 ad 572: return;
573: }
1.2 ad 574: l->l_shlocks--;
575: ld->ld_shares--;
1.45 yamt 576: if (ld->ld_lwp == l) {
577: ld->ld_unlocked = where;
1.32 yamt 578: ld->ld_lwp = NULL;
1.45 yamt 579: }
1.40 rmind 580: if (ld->ld_cpu == (uint16_t)cpu_index(curcpu()))
1.32 yamt 581: ld->ld_cpu = (uint16_t)-1;
1.2 ad 582: } else {
1.60 ozaki-r 583: if (__predict_false((ld->ld_flags & LD_LOCKED) == 0)) {
1.55 christos 584: lockdebug_abort1(func, line, ld, s, "not locked", true);
1.27 ad 585: return;
586: }
1.2 ad 587:
588: if ((ld->ld_flags & LD_SLEEPER) != 0) {
1.60 ozaki-r 589: if (__predict_false(ld->ld_lwp != curlwp)) {
1.55 christos 590: lockdebug_abort1(func, line, ld, s,
1.10 ad 591: "not held by current LWP", true);
1.27 ad 592: return;
593: }
1.34 ad 594: TAILQ_REMOVE(&l->l_ld_locks, ld, ld_chain);
1.2 ad 595: } else {
1.60 ozaki-r 596: uint16_t idx = (uint16_t)cpu_index(curcpu());
597: if (__predict_false(ld->ld_cpu != idx)) {
1.55 christos 598: lockdebug_abort1(func, line, ld, s,
1.10 ad 599: "not held by current CPU", true);
1.27 ad 600: return;
601: }
1.34 ad 602: TAILQ_REMOVE(&curcpu()->ci_data.cpu_ld_locks, ld,
603: ld_chain);
1.2 ad 604: }
1.44 matt 605: ld->ld_flags &= ~LD_LOCKED;
1.77.2.1 thorpej 606: ld->ld_unlocked = where;
1.44 matt 607: ld->ld_lwp = NULL;
1.2 ad 608: }
1.34 ad 609: __cpu_simple_unlock(&ld->ld_spinlock);
610: splx(s);
1.2 ad 611: }
612:
613: /*
614: * lockdebug_barrier:
1.77.2.1 thorpej 615: *
1.73 ad 616: * Panic if we hold more than one specified lock, and optionally, if we
617: * hold any sleep locks.
1.2 ad 618: */
619: void
1.73 ad 620: lockdebug_barrier(const char *func, size_t line, volatile void *onelock,
1.55 christos 621: int slplocks)
1.2 ad 622: {
623: struct lwp *l = curlwp;
624: lockdebug_t *ld;
1.34 ad 625: int s;
1.2 ad 626:
1.60 ozaki-r 627: if (__predict_false(panicstr != NULL || ld_panic))
1.2 ad 628: return;
629:
1.34 ad 630: s = splhigh();
631: if ((l->l_pflag & LP_INTR) == 0) {
632: TAILQ_FOREACH(ld, &curcpu()->ci_data.cpu_ld_locks, ld_chain) {
1.73 ad 633: if (ld->ld_lock == onelock) {
1.2 ad 634: continue;
635: }
1.34 ad 636: __cpu_simple_lock(&ld->ld_spinlock);
1.55 christos 637: lockdebug_abort1(func, line, ld, s,
1.34 ad 638: "spin lock held", true);
639: return;
1.2 ad 640: }
641: }
1.34 ad 642: if (slplocks) {
643: splx(s);
644: return;
645: }
1.60 ozaki-r 646: ld = TAILQ_FIRST(&l->l_ld_locks);
1.73 ad 647: if (__predict_false(ld != NULL && ld->ld_lock != onelock)) {
1.34 ad 648: __cpu_simple_lock(&ld->ld_spinlock);
1.55 christos 649: lockdebug_abort1(func, line, ld, s, "sleep lock held", true);
1.34 ad 650: return;
651: }
652: splx(s);
653: if (l->l_shlocks != 0) {
1.52 christos 654: TAILQ_FOREACH(ld, &ld_all, ld_achain) {
1.73 ad 655: if (ld->ld_lock == onelock) {
656: continue;
657: }
1.52 christos 658: if (ld->ld_lwp == l)
1.74 ad 659: lockdebug_dump(l, ld, printf);
1.52 christos 660: }
1.55 christos 661: panic("%s,%zu: holding %d shared locks", func, line,
662: l->l_shlocks);
1.2 ad 663: }
664: }
665:
666: /*
1.10 ad 667: * lockdebug_mem_check:
668: *
669: * Check for in-use locks within a memory region that is
1.16 yamt 670: * being freed.
1.10 ad 671: */
672: void
1.55 christos 673: lockdebug_mem_check(const char *func, size_t line, void *base, size_t sz)
1.10 ad 674: {
1.16 yamt 675: lockdebug_t *ld;
1.34 ad 676: struct cpu_info *ci;
1.23 ad 677: int s;
1.10 ad 678:
1.60 ozaki-r 679: if (__predict_false(panicstr != NULL || ld_panic))
1.24 ad 680: return;
681:
1.77 maxv 682: kcov_silence_enter();
683:
1.34 ad 684: s = splhigh();
685: ci = curcpu();
686: __cpu_simple_lock(&ci->ci_data.cpu_ld_lock);
1.16 yamt 687: ld = (lockdebug_t *)rb_tree_find_node_geq(&ld_rb_tree, base);
1.23 ad 688: if (ld != NULL) {
689: const uintptr_t lock = (uintptr_t)ld->ld_lock;
690:
1.60 ozaki-r 691: if (__predict_false((uintptr_t)base > lock))
1.55 christos 692: panic("%s,%zu: corrupt tree ld=%p, base=%p, sz=%zu",
693: func, line, ld, base, sz);
1.23 ad 694: if (lock >= (uintptr_t)base + sz)
695: ld = NULL;
696: }
1.34 ad 697: __cpu_simple_unlock(&ci->ci_data.cpu_ld_lock);
1.60 ozaki-r 698: if (__predict_false(ld != NULL)) {
1.34 ad 699: __cpu_simple_lock(&ld->ld_spinlock);
1.55 christos 700: lockdebug_abort1(func, line, ld, s,
1.34 ad 701: "allocation contains active lock", !cold);
1.77 maxv 702: kcov_silence_leave();
1.16 yamt 703: return;
1.34 ad 704: }
705: splx(s);
1.77 maxv 706:
707: kcov_silence_leave();
1.10 ad 708: }
1.75 christos 709: #endif /* _KERNEL */
710:
711: #ifdef DDB
712: #include <machine/db_machdep.h>
713: #include <ddb/db_interface.h>
714: #include <ddb/db_access.h>
715: #endif
1.10 ad 716:
717: /*
1.2 ad 718: * lockdebug_dump:
719: *
720: * Dump information about a lock on panic, or for DDB.
721: */
722: static void
1.74 ad 723: lockdebug_dump(lwp_t *l, lockdebug_t *ld, void (*pr)(const char *, ...)
1.47 christos 724: __printflike(1, 2))
1.2 ad 725: {
726: int sleeper = (ld->ld_flags & LD_SLEEPER);
1.75 christos 727: lockops_t *lo = ld->ld_lockops;
1.2 ad 728:
729: (*pr)(
730: "lock address : %#018lx type : %18s\n"
1.35 ad 731: "initialized : %#018lx",
1.2 ad 732: (long)ld->ld_lock, (sleeper ? "sleep/adaptive" : "spin"),
1.10 ad 733: (long)ld->ld_initaddr);
1.2 ad 734:
1.75 christos 735: #ifndef _KERNEL
736: lockops_t los;
737: lo = &los;
738: db_read_bytes((db_addr_t)ld->ld_lockops, sizeof(los), (char *)lo);
739: #endif
1.76 ad 740: (*pr)("\n"
741: "shared holds : %18u exclusive: %18u\n"
742: "shares wanted: %18u exclusive: %18u\n"
743: "relevant cpu : %18u last held: %18u\n"
744: "relevant lwp : %#018lx last held: %#018lx\n"
745: "last locked%c : %#018lx unlocked%c: %#018lx\n",
746: (unsigned)ld->ld_shares, ((ld->ld_flags & LD_LOCKED) != 0),
747: (unsigned)ld->ld_shwant, (unsigned)ld->ld_exwant,
748: (unsigned)cpu_index(l->l_cpu), (unsigned)ld->ld_cpu,
749: (long)l, (long)ld->ld_lwp,
750: ((ld->ld_flags & LD_LOCKED) ? '*' : ' '),
751: (long)ld->ld_locked,
752: ((ld->ld_flags & LD_LOCKED) ? ' ' : '*'),
753: (long)ld->ld_unlocked);
1.35 ad 754:
1.75 christos 755: #ifdef _KERNEL
756: if (lo->lo_dump != NULL)
757: (*lo->lo_dump)(ld->ld_lock, pr);
1.2 ad 758:
759: if (sleeper) {
760: turnstile_print(ld->ld_lock, pr);
761: }
1.75 christos 762: #endif
1.2 ad 763: }
764:
1.75 christos 765: #ifdef _KERNEL
1.2 ad 766: /*
1.27 ad 767: * lockdebug_abort1:
1.2 ad 768: *
1.27 ad 769: * An error has been trapped - dump lock info and panic.
1.2 ad 770: */
1.5 ad 771: static void
1.55 christos 772: lockdebug_abort1(const char *func, size_t line, lockdebug_t *ld, int s,
1.10 ad 773: const char *msg, bool dopanic)
1.2 ad 774: {
775:
1.27 ad 776: /*
1.46 christos 777: * Don't make the situation worse if the system is already going
1.27 ad 778: * down in flames. Once a panic is triggered, lockdebug state
779: * becomes stale and cannot be trusted.
780: */
781: if (atomic_inc_uint_nv(&ld_panic) != 1) {
1.34 ad 782: __cpu_simple_unlock(&ld->ld_spinlock);
783: splx(s);
1.27 ad 784: return;
785: }
786:
1.77.2.1 thorpej 787: printf("%s error: %s,%zu: %s\n\n", ld->ld_lockops->lo_name,
1.55 christos 788: func, line, msg);
1.77.2.1 thorpej 789: lockdebug_dump(curlwp, ld, printf);
1.34 ad 790: __cpu_simple_unlock(&ld->ld_spinlock);
791: splx(s);
1.77.2.1 thorpej 792: printf("\n");
1.10 ad 793: if (dopanic)
1.55 christos 794: panic("LOCKDEBUG: %s error: %s,%zu: %s",
795: ld->ld_lockops->lo_name, func, line, msg);
1.2 ad 796: }
797:
1.75 christos 798: #endif /* _KERNEL */
1.2 ad 799: #endif /* LOCKDEBUG */
800:
801: /*
802: * lockdebug_lock_print:
803: *
804: * Handle the DDB 'show lock' command.
805: */
806: #ifdef DDB
807: void
1.69 christos 808: lockdebug_lock_print(void *addr,
809: void (*pr)(const char *, ...) __printflike(1, 2))
1.2 ad 810: {
811: #ifdef LOCKDEBUG
1.75 christos 812: lockdebug_t *ld, lds;
1.2 ad 813:
814: TAILQ_FOREACH(ld, &ld_all, ld_achain) {
1.75 christos 815: db_read_bytes((db_addr_t)ld, sizeof(lds), __UNVOLATILE(&lds));
816: ld = &lds;
1.41 dyoung 817: if (ld->ld_lock == NULL)
818: continue;
819: if (addr == NULL || ld->ld_lock == addr) {
1.74 ad 820: lockdebug_dump(curlwp, ld, pr);
1.41 dyoung 821: if (addr != NULL)
822: return;
1.2 ad 823: }
824: }
1.41 dyoung 825: if (addr != NULL) {
826: (*pr)("Sorry, no record of a lock with address %p found.\n",
827: addr);
828: }
1.2 ad 829: #else
830: (*pr)("Sorry, kernel not built with the LOCKDEBUG option.\n");
831: #endif /* LOCKDEBUG */
832: }
1.61 ozaki-r 833:
1.75 christos 834: #ifdef _KERNEL
1.62 ozaki-r 835: #ifdef LOCKDEBUG
836: static void
1.74 ad 837: lockdebug_show_one(lwp_t *l, lockdebug_t *ld, int i,
1.69 christos 838: void (*pr)(const char *, ...) __printflike(1, 2))
839: {
840: const char *sym;
841:
1.75 christos 842: #ifdef _KERNEL
1.69 christos 843: ksyms_getname(NULL, &sym, (vaddr_t)ld->ld_initaddr,
844: KSYMS_CLOSEST|KSYMS_PROC|KSYMS_ANY);
1.75 christos 845: #endif
1.74 ad 846: (*pr)("* Lock %d (initialized at %s)\n", i++, sym);
847: lockdebug_dump(l, ld, pr);
1.69 christos 848: }
849:
850: static void
851: lockdebug_show_trace(const void *ptr,
852: void (*pr)(const char *, ...) __printflike(1, 2))
853: {
1.77.2.2! thorpej 854:
! 855: db_stack_trace_print((db_expr_t)(intptr_t)ptr, true, 32, "a", pr);
1.69 christos 856: }
857:
858: static void
859: lockdebug_show_all_locks_lwp(void (*pr)(const char *, ...) __printflike(1, 2),
860: bool show_trace)
1.62 ozaki-r 861: {
862: struct proc *p;
863:
864: LIST_FOREACH(p, &allproc, p_list) {
865: struct lwp *l;
866: LIST_FOREACH(l, &p->p_lwps, l_sibling) {
867: lockdebug_t *ld;
868: int i = 0;
1.74 ad 869: if (TAILQ_EMPTY(&l->l_ld_locks) &&
870: l->l_ld_wanted == NULL) {
871: continue;
872: }
873: (*pr)("\n****** LWP %d.%d (%s) @ %p, l_stat=%d\n",
874: p->p_pid, l->l_lid,
875: l->l_name ? l->l_name : p->p_comm, l, l->l_stat);
876: if (!TAILQ_EMPTY(&l->l_ld_locks)) {
877: (*pr)("\n*** Locks held: \n");
878: TAILQ_FOREACH(ld, &l->l_ld_locks, ld_chain) {
879: (*pr)("\n");
880: lockdebug_show_one(l, ld, i++, pr);
881: }
882: } else {
883: (*pr)("\n*** Locks held: none\n");
884: }
885:
886: if (l->l_ld_wanted != NULL) {
887: (*pr)("\n*** Locks wanted: \n\n");
888: lockdebug_show_one(l, l->l_ld_wanted, 0, pr);
889: } else {
890: (*pr)("\n*** Locks wanted: none\n");
1.62 ozaki-r 891: }
1.74 ad 892: if (show_trace) {
893: (*pr)("\n*** Traceback: \n\n");
1.69 christos 894: lockdebug_show_trace(l, pr);
1.74 ad 895: (*pr)("\n");
896: }
1.62 ozaki-r 897: }
898: }
899: }
900:
901: static void
1.69 christos 902: lockdebug_show_all_locks_cpu(void (*pr)(const char *, ...) __printflike(1, 2),
903: bool show_trace)
1.62 ozaki-r 904: {
905: lockdebug_t *ld;
906: CPU_INFO_ITERATOR cii;
907: struct cpu_info *ci;
908:
909: for (CPU_INFO_FOREACH(cii, ci)) {
910: int i = 0;
911: if (TAILQ_EMPTY(&ci->ci_data.cpu_ld_locks))
912: continue;
1.74 ad 913: (*pr)("\n******* Locks held on %s:\n", cpu_name(ci));
1.62 ozaki-r 914: TAILQ_FOREACH(ld, &ci->ci_data.cpu_ld_locks, ld_chain) {
1.74 ad 915: (*pr)("\n");
916: #ifdef MULTIPROCESSOR
917: lockdebug_show_one(ci->ci_curlwp, ld, i++, pr);
1.69 christos 918: if (show_trace)
919: lockdebug_show_trace(ci->ci_curlwp, pr);
1.68 mrg 920: #else
1.74 ad 921: lockdebug_show_one(curlwp, ld, i++, pr);
922: if (show_trace)
1.72 ryo 923: lockdebug_show_trace(curlwp, pr);
1.68 mrg 924: #endif
1.62 ozaki-r 925: }
926: }
927: }
1.75 christos 928: #endif /* _KERNEL */
1.62 ozaki-r 929: #endif /* LOCKDEBUG */
930:
1.75 christos 931: #ifdef _KERNEL
1.62 ozaki-r 932: void
1.69 christos 933: lockdebug_show_all_locks(void (*pr)(const char *, ...) __printflike(1, 2),
934: const char *modif)
1.62 ozaki-r 935: {
936: #ifdef LOCKDEBUG
937: bool show_trace = false;
938: if (modif[0] == 't')
939: show_trace = true;
940:
941: (*pr)("[Locks tracked through LWPs]\n");
942: lockdebug_show_all_locks_lwp(pr, show_trace);
943: (*pr)("\n");
944:
945: (*pr)("[Locks tracked through CPUs]\n");
946: lockdebug_show_all_locks_cpu(pr, show_trace);
947: (*pr)("\n");
948: #else
949: (*pr)("Sorry, kernel not built with the LOCKDEBUG option.\n");
950: #endif /* LOCKDEBUG */
951: }
952:
1.61 ozaki-r 953: void
1.69 christos 954: lockdebug_show_lockstats(void (*pr)(const char *, ...) __printflike(1, 2))
1.61 ozaki-r 955: {
956: #ifdef LOCKDEBUG
957: lockdebug_t *ld;
958: void *_ld;
959: uint32_t n_null = 0;
960: uint32_t n_spin_mutex = 0;
961: uint32_t n_adaptive_mutex = 0;
962: uint32_t n_rwlock = 0;
963: uint32_t n_others = 0;
964:
965: RB_TREE_FOREACH(_ld, &ld_rb_tree) {
966: ld = _ld;
967: if (ld->ld_lock == NULL) {
968: n_null++;
969: continue;
970: }
971: if (ld->ld_lockops->lo_name[0] == 'M') {
972: if (ld->ld_lockops->lo_type == LOCKOPS_SLEEP)
973: n_adaptive_mutex++;
974: else
975: n_spin_mutex++;
976: continue;
977: }
978: if (ld->ld_lockops->lo_name[0] == 'R') {
979: n_rwlock++;
980: continue;
981: }
982: n_others++;
983: }
984: (*pr)(
985: "spin mutex: %u\n"
986: "adaptive mutex: %u\n"
987: "rwlock: %u\n"
988: "null locks: %u\n"
989: "others: %u\n",
1.76 ad 990: n_spin_mutex, n_adaptive_mutex, n_rwlock,
1.61 ozaki-r 991: n_null, n_others);
992: #else
993: (*pr)("Sorry, kernel not built with the LOCKDEBUG option.\n");
994: #endif /* LOCKDEBUG */
995: }
1.75 christos 996: #endif /* _KERNEL */
1.2 ad 997: #endif /* DDB */
998:
1.75 christos 999: #ifdef _KERNEL
1.2 ad 1000: /*
1.65 mrg 1001: * lockdebug_dismiss:
1002: *
1003: * The system is rebooting, and potentially from an unsafe
1004: * place so avoid any future aborts.
1005: */
1006: void
1007: lockdebug_dismiss(void)
1008: {
1009:
1010: atomic_inc_uint_nv(&ld_panic);
1011: }
1012:
1013: /*
1.2 ad 1014: * lockdebug_abort:
1015: *
1016: * An error has been trapped - dump lock info and call panic().
1017: */
1018: void
1.58 christos 1019: lockdebug_abort(const char *func, size_t line, const volatile void *lock,
1.55 christos 1020: lockops_t *ops, const char *msg)
1.2 ad 1021: {
1022: #ifdef LOCKDEBUG
1023: lockdebug_t *ld;
1.34 ad 1024: int s;
1.2 ad 1025:
1.34 ad 1026: s = splhigh();
1.77.2.1 thorpej 1027: if ((ld = lockdebug_lookup(func, line, lock,
1.38 rafal 1028: (uintptr_t) __builtin_return_address(0))) != NULL) {
1.55 christos 1029: lockdebug_abort1(func, line, ld, s, msg, true);
1.34 ad 1030: return;
1.2 ad 1031: }
1.34 ad 1032: splx(s);
1.2 ad 1033: #endif /* LOCKDEBUG */
1034:
1.27 ad 1035: /*
1.67 mrg 1036: * Don't make the situation worse if the system is already going
1037: * down in flames. Once a panic is triggered, lockdebug state
1038: * becomes stale and cannot be trusted.
1.27 ad 1039: */
1.67 mrg 1040: if (atomic_inc_uint_nv(&ld_panic) > 1)
1041: return;
1042:
1.77.2.1 thorpej 1043: printf("%s error: %s,%zu: %s\n\n"
1.67 mrg 1044: "lock address : %#018lx\n"
1045: "current cpu : %18d\n"
1046: "current lwp : %#018lx\n",
1047: ops->lo_name, func, line, msg, (long)lock,
1048: (int)cpu_index(curcpu()), (long)curlwp);
1.77.2.1 thorpej 1049: (*ops->lo_dump)(lock, printf);
1050: printf("\n");
1.2 ad 1051:
1.55 christos 1052: panic("lock error: %s: %s,%zu: %s: lock %p cpu %d lwp %p",
1053: ops->lo_name, func, line, msg, lock, cpu_index(curcpu()), curlwp);
1.2 ad 1054: }
1.75 christos 1055: #endif /* _KERNEL */
CVSweb <webmaster@jp.NetBSD.org>