Annotation of src/sys/kern/kern_lock.c, Revision 1.116
1.116 ! ad 1: /* $NetBSD: kern_lock.c,v 1.115 2007/06/15 20:59:38 ad Exp $ */
1.19 thorpej 2:
3: /*-
1.114 ad 4: * Copyright (c) 1999, 2000, 2006, 2007 The NetBSD Foundation, Inc.
1.19 thorpej 5: * All rights reserved.
6: *
7: * This code is derived from software contributed to The NetBSD Foundation
8: * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
1.105 ad 9: * NASA Ames Research Center, and by Andrew Doran.
1.19 thorpej 10: *
11: * This code is derived from software contributed to The NetBSD Foundation
12: * by Ross Harvey.
13: *
14: * Redistribution and use in source and binary forms, with or without
15: * modification, are permitted provided that the following conditions
16: * are met:
17: * 1. Redistributions of source code must retain the above copyright
18: * notice, this list of conditions and the following disclaimer.
19: * 2. Redistributions in binary form must reproduce the above copyright
20: * notice, this list of conditions and the following disclaimer in the
21: * documentation and/or other materials provided with the distribution.
22: * 3. All advertising materials mentioning features or use of this software
23: * must display the following acknowledgement:
24: * This product includes software developed by the NetBSD
25: * Foundation, Inc. and its contributors.
26: * 4. Neither the name of The NetBSD Foundation nor the names of its
27: * contributors may be used to endorse or promote products derived
28: * from this software without specific prior written permission.
29: *
30: * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
31: * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
32: * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
33: * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
34: * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
35: * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
36: * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
37: * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
38: * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
39: * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
40: * POSSIBILITY OF SUCH DAMAGE.
41: */
1.2 fvdl 42:
1.86 perry 43: /*
1.1 fvdl 44: * Copyright (c) 1995
45: * The Regents of the University of California. All rights reserved.
46: *
47: * This code contains ideas from software contributed to Berkeley by
48: * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating
49: * System project at Carnegie-Mellon University.
50: *
51: * Redistribution and use in source and binary forms, with or without
52: * modification, are permitted provided that the following conditions
53: * are met:
54: * 1. Redistributions of source code must retain the above copyright
55: * notice, this list of conditions and the following disclaimer.
56: * 2. Redistributions in binary form must reproduce the above copyright
57: * notice, this list of conditions and the following disclaimer in the
58: * documentation and/or other materials provided with the distribution.
1.72 agc 59: * 3. Neither the name of the University nor the names of its contributors
1.1 fvdl 60: * may be used to endorse or promote products derived from this software
61: * without specific prior written permission.
62: *
63: * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
64: * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
65: * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
66: * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
67: * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
68: * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
69: * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
70: * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
71: * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
72: * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
73: * SUCH DAMAGE.
74: *
75: * @(#)kern_lock.c 8.18 (Berkeley) 5/21/95
76: */
1.60 lukem 77:
78: #include <sys/cdefs.h>
1.116 ! ad 79: __KERNEL_RCSID(0, "$NetBSD: kern_lock.c,v 1.115 2007/06/15 20:59:38 ad Exp $");
1.7 thorpej 80:
1.21 thorpej 81: #include "opt_multiprocessor.h"
1.18 chs 82: #include "opt_ddb.h"
1.1 fvdl 83:
1.105 ad 84: #define __MUTEX_PRIVATE
85:
1.1 fvdl 86: #include <sys/param.h>
87: #include <sys/proc.h>
88: #include <sys/lock.h>
1.2 fvdl 89: #include <sys/systm.h>
1.105 ad 90: #include <sys/lockdebug.h>
91:
1.1 fvdl 92: #include <machine/cpu.h>
1.110 christos 93: #include <machine/stdarg.h>
1.1 fvdl 94:
1.98 ad 95: #include <dev/lockstat.h>
96:
1.25 thorpej 97: #if defined(LOCKDEBUG)
98: #include <sys/syslog.h>
99: /*
100: * note that stdarg.h and the ansi style va_start macro is used for both
101: * ansi and traditional c compiles.
102: * XXX: this requires that stdarg.h define: va_alist and va_dcl
103: */
104: #include <machine/stdarg.h>
105:
1.36 thorpej 106: void lock_printf(const char *fmt, ...)
1.37 eeh 107: __attribute__((__format__(__printf__,1,2)));
1.25 thorpej 108:
1.105 ad 109: static int acquire(volatile struct lock **, int *, int, int, int, uintptr_t);
1.73 yamt 110:
1.57 sommerfe 111: int lock_debug_syslog = 0; /* defaults to printf, but can be patched */
1.55 thorpej 112:
113: #ifdef DDB
114: #include <ddb/ddbvar.h>
115: #include <machine/db_machdep.h>
116: #include <ddb/db_command.h>
117: #include <ddb/db_interface.h>
118: #endif
1.85 yamt 119: #endif /* defined(LOCKDEBUG) */
120:
1.1 fvdl 121: /*
122: * Locking primitives implementation.
1.56 wiz 123: * Locks provide shared/exclusive synchronization.
1.1 fvdl 124: */
125:
1.21 thorpej 126: #if defined(LOCKDEBUG) || defined(DIAGNOSTIC) /* { */
127: #if defined(MULTIPROCESSOR) /* { */
128: #define COUNT_CPU(cpu_id, x) \
1.47 sommerfe 129: curcpu()->ci_spin_locks += (x)
1.21 thorpej 130: #else
131: u_long spin_locks;
132: #define COUNT_CPU(cpu_id, x) spin_locks += (x)
133: #endif /* MULTIPROCESSOR */ /* } */
134:
1.69 thorpej 135: #define COUNT(lkp, l, cpu_id, x) \
1.21 thorpej 136: do { \
137: if ((lkp)->lk_flags & LK_SPIN) \
138: COUNT_CPU((cpu_id), (x)); \
139: else \
1.69 thorpej 140: (l)->l_locks += (x); \
1.30 thorpej 141: } while (/*CONSTCOND*/0)
1.1 fvdl 142: #else
1.22 mellon 143: #define COUNT(lkp, p, cpu_id, x)
1.48 sommerfe 144: #define COUNT_CPU(cpu_id, x)
1.21 thorpej 145: #endif /* LOCKDEBUG || DIAGNOSTIC */ /* } */
1.1 fvdl 146:
1.43 thorpej 147: #define INTERLOCK_ACQUIRE(lkp, flags, s) \
1.40 thorpej 148: do { \
1.43 thorpej 149: if ((flags) & LK_SPIN) \
1.114 ad 150: s = splhigh(); \
1.40 thorpej 151: simple_lock(&(lkp)->lk_interlock); \
1.66 perry 152: } while (/*CONSTCOND*/ 0)
1.40 thorpej 153:
1.43 thorpej 154: #define INTERLOCK_RELEASE(lkp, flags, s) \
1.40 thorpej 155: do { \
156: simple_unlock(&(lkp)->lk_interlock); \
1.52 thorpej 157: if ((flags) & LK_SPIN) \
1.40 thorpej 158: splx(s); \
1.66 perry 159: } while (/*CONSTCOND*/ 0)
1.40 thorpej 160:
1.63 chs 161: #ifdef DDB /* { */
1.89 chs 162: #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
1.63 chs 163: int simple_lock_debugger = 1; /* more serious on MP */
164: #else
165: int simple_lock_debugger = 0;
166: #endif
1.93 erh 167: #define SLOCK_DEBUGGER() if (simple_lock_debugger && db_onpanic) Debugger()
1.63 chs 168: #define SLOCK_TRACE() \
169: db_stack_trace_print((db_expr_t)__builtin_frame_address(0), \
1.108 thorpej 170: true, 65535, "", lock_printf);
1.63 chs 171: #else
172: #define SLOCK_DEBUGGER() /* nothing */
173: #define SLOCK_TRACE() /* nothing */
174: #endif /* } */
175:
1.50 thorpej 176: #if defined(LOCKDEBUG)
177: #if defined(DDB)
1.93 erh 178: #define SPINLOCK_SPINCHECK_DEBUGGER if (db_onpanic) Debugger()
1.50 thorpej 179: #else
180: #define SPINLOCK_SPINCHECK_DEBUGGER /* nothing */
181: #endif
182:
183: #define SPINLOCK_SPINCHECK_DECL \
184: /* 32-bits of count -- wrap constitutes a "spinout" */ \
185: uint32_t __spinc = 0
186:
187: #define SPINLOCK_SPINCHECK \
188: do { \
189: if (++__spinc == 0) { \
1.71 pk 190: lock_printf("LK_SPIN spinout, excl %d, share %d\n", \
1.50 thorpej 191: lkp->lk_exclusivecount, lkp->lk_sharecount); \
192: if (lkp->lk_exclusivecount) \
1.71 pk 193: lock_printf("held by CPU %lu\n", \
1.50 thorpej 194: (u_long) lkp->lk_cpu); \
195: if (lkp->lk_lock_file) \
1.71 pk 196: lock_printf("last locked at %s:%d\n", \
1.50 thorpej 197: lkp->lk_lock_file, lkp->lk_lock_line); \
198: if (lkp->lk_unlock_file) \
1.71 pk 199: lock_printf("last unlocked at %s:%d\n", \
1.50 thorpej 200: lkp->lk_unlock_file, lkp->lk_unlock_line); \
1.63 chs 201: SLOCK_TRACE(); \
1.50 thorpej 202: SPINLOCK_SPINCHECK_DEBUGGER; \
203: } \
1.66 perry 204: } while (/*CONSTCOND*/ 0)
1.50 thorpej 205: #else
206: #define SPINLOCK_SPINCHECK_DECL /* nothing */
207: #define SPINLOCK_SPINCHECK /* nothing */
208: #endif /* LOCKDEBUG && DDB */
209:
1.98 ad 210: #define RETURN_ADDRESS ((uintptr_t)__builtin_return_address(0))
211:
1.1 fvdl 212: /*
213: * Acquire a resource.
214: */
1.73 yamt 215: static int
1.91 perry 216: acquire(volatile struct lock **lkpp, int *s, int extflags,
1.102 yamt 217: int drain, int wanted, uintptr_t ra)
1.73 yamt 218: {
219: int error;
1.91 perry 220: volatile struct lock *lkp = *lkpp;
1.98 ad 221: LOCKSTAT_TIMER(slptime);
1.105 ad 222: LOCKSTAT_FLAG(lsflag);
1.73 yamt 223:
224: KASSERT(drain || (wanted & LK_WAIT_NONZERO) == 0);
225:
226: if (extflags & LK_SPIN) {
227: int interlocked;
228:
229: SPINLOCK_SPINCHECK_DECL;
230:
231: if (!drain) {
232: lkp->lk_waitcount++;
233: lkp->lk_flags |= LK_WAIT_NONZERO;
234: }
235: for (interlocked = 1;;) {
236: SPINLOCK_SPINCHECK;
237: if ((lkp->lk_flags & wanted) != 0) {
238: if (interlocked) {
1.74 hannken 239: INTERLOCK_RELEASE(lkp, LK_SPIN, *s);
1.73 yamt 240: interlocked = 0;
241: }
242: SPINLOCK_SPIN_HOOK;
243: } else if (interlocked) {
244: break;
245: } else {
1.74 hannken 246: INTERLOCK_ACQUIRE(lkp, LK_SPIN, *s);
1.73 yamt 247: interlocked = 1;
248: }
249: }
250: if (!drain) {
251: lkp->lk_waitcount--;
252: if (lkp->lk_waitcount == 0)
253: lkp->lk_flags &= ~LK_WAIT_NONZERO;
254: }
255: KASSERT((lkp->lk_flags & wanted) == 0);
256: error = 0; /* sanity */
257: } else {
1.105 ad 258: LOCKSTAT_ENTER(lsflag);
259:
1.73 yamt 260: for (error = 0; (lkp->lk_flags & wanted) != 0; ) {
261: if (drain)
262: lkp->lk_flags |= LK_WAITDRAIN;
263: else {
264: lkp->lk_waitcount++;
265: lkp->lk_flags |= LK_WAIT_NONZERO;
266: }
267: /* XXX Cast away volatile. */
1.105 ad 268: LOCKSTAT_START_TIMER(lsflag, slptime);
1.73 yamt 269: error = ltsleep(drain ?
1.87 christos 270: (volatile const void *)&lkp->lk_flags :
271: (volatile const void *)lkp, lkp->lk_prio,
1.73 yamt 272: lkp->lk_wmesg, lkp->lk_timo, &lkp->lk_interlock);
1.105 ad 273: LOCKSTAT_STOP_TIMER(lsflag, slptime);
274: LOCKSTAT_EVENT_RA(lsflag, (void *)(uintptr_t)lkp,
1.104 ad 275: LB_LOCKMGR | LB_SLEEP1, 1, slptime, ra);
1.73 yamt 276: if (!drain) {
277: lkp->lk_waitcount--;
278: if (lkp->lk_waitcount == 0)
279: lkp->lk_flags &= ~LK_WAIT_NONZERO;
280: }
281: if (error)
282: break;
283: if (extflags & LK_SLEEPFAIL) {
284: error = ENOLCK;
285: break;
286: }
1.78 hannken 287: if (lkp->lk_newlock != NULL) {
288: simple_lock(&lkp->lk_newlock->lk_interlock);
289: simple_unlock(&lkp->lk_interlock);
290: if (lkp->lk_waitcount == 0)
1.87 christos 291: wakeup(&lkp->lk_newlock);
1.78 hannken 292: *lkpp = lkp = lkp->lk_newlock;
293: }
1.73 yamt 294: }
1.105 ad 295:
296: LOCKSTAT_EXIT(lsflag);
1.1 fvdl 297: }
298:
1.73 yamt 299: return error;
300: }
301:
1.69 thorpej 302: #define SETHOLDER(lkp, pid, lid, cpu_id) \
1.19 thorpej 303: do { \
304: if ((lkp)->lk_flags & LK_SPIN) \
305: (lkp)->lk_cpu = cpu_id; \
1.69 thorpej 306: else { \
1.19 thorpej 307: (lkp)->lk_lockholder = pid; \
1.69 thorpej 308: (lkp)->lk_locklwp = lid; \
309: } \
1.30 thorpej 310: } while (/*CONSTCOND*/0)
1.19 thorpej 311:
1.69 thorpej 312: #define WEHOLDIT(lkp, pid, lid, cpu_id) \
1.19 thorpej 313: (((lkp)->lk_flags & LK_SPIN) != 0 ? \
1.69 thorpej 314: ((lkp)->lk_cpu == (cpu_id)) : \
315: ((lkp)->lk_lockholder == (pid) && (lkp)->lk_locklwp == (lid)))
1.19 thorpej 316:
1.23 thorpej 317: #define WAKEUP_WAITER(lkp) \
318: do { \
1.73 yamt 319: if (((lkp)->lk_flags & (LK_SPIN | LK_WAIT_NONZERO)) == \
320: LK_WAIT_NONZERO) { \
1.87 christos 321: wakeup((lkp)); \
1.23 thorpej 322: } \
1.30 thorpej 323: } while (/*CONSTCOND*/0)
1.23 thorpej 324:
1.21 thorpej 325: #if defined(LOCKDEBUG) /* { */
326: #if defined(MULTIPROCESSOR) /* { */
327: struct simplelock spinlock_list_slock = SIMPLELOCK_INITIALIZER;
328:
1.27 thorpej 329: #define SPINLOCK_LIST_LOCK() \
1.29 sommerfe 330: __cpu_simple_lock(&spinlock_list_slock.lock_data)
1.21 thorpej 331:
1.27 thorpej 332: #define SPINLOCK_LIST_UNLOCK() \
1.29 sommerfe 333: __cpu_simple_unlock(&spinlock_list_slock.lock_data)
1.21 thorpej 334: #else
335: #define SPINLOCK_LIST_LOCK() /* nothing */
336:
337: #define SPINLOCK_LIST_UNLOCK() /* nothing */
338: #endif /* MULTIPROCESSOR */ /* } */
339:
1.91 perry 340: _TAILQ_HEAD(, struct lock, volatile) spinlock_list =
1.21 thorpej 341: TAILQ_HEAD_INITIALIZER(spinlock_list);
342:
343: #define HAVEIT(lkp) \
344: do { \
345: if ((lkp)->lk_flags & LK_SPIN) { \
1.114 ad 346: int sp = splhigh(); \
1.21 thorpej 347: SPINLOCK_LIST_LOCK(); \
1.87 christos 348: TAILQ_INSERT_TAIL(&spinlock_list, (lkp), lk_list); \
1.21 thorpej 349: SPINLOCK_LIST_UNLOCK(); \
1.87 christos 350: splx(sp); \
1.21 thorpej 351: } \
1.30 thorpej 352: } while (/*CONSTCOND*/0)
1.21 thorpej 353:
354: #define DONTHAVEIT(lkp) \
355: do { \
356: if ((lkp)->lk_flags & LK_SPIN) { \
1.114 ad 357: int sp = splhigh(); \
1.21 thorpej 358: SPINLOCK_LIST_LOCK(); \
1.87 christos 359: TAILQ_REMOVE(&spinlock_list, (lkp), lk_list); \
1.21 thorpej 360: SPINLOCK_LIST_UNLOCK(); \
1.87 christos 361: splx(sp); \
1.21 thorpej 362: } \
1.30 thorpej 363: } while (/*CONSTCOND*/0)
1.21 thorpej 364: #else
365: #define HAVEIT(lkp) /* nothing */
366:
367: #define DONTHAVEIT(lkp) /* nothing */
368: #endif /* LOCKDEBUG */ /* } */
369:
1.25 thorpej 370: #if defined(LOCKDEBUG)
371: /*
372: * Lock debug printing routine; can be configured to print to console
373: * or log to syslog.
374: */
375: void
376: lock_printf(const char *fmt, ...)
377: {
1.68 pk 378: char b[150];
1.25 thorpej 379: va_list ap;
380:
381: va_start(ap, fmt);
382: if (lock_debug_syslog)
383: vlog(LOG_DEBUG, fmt, ap);
1.68 pk 384: else {
385: vsnprintf(b, sizeof(b), fmt, ap);
386: printf_nolog("%s", b);
387: }
1.25 thorpej 388: va_end(ap);
389: }
390: #endif /* LOCKDEBUG */
391:
1.110 christos 392: static void
393: lockpanic(volatile struct lock *lkp, const char *fmt, ...)
394: {
395: char s[150], b[150];
396: #ifdef LOCKDEBUG
397: static const char *locktype[] = {
398: "*0*", "shared", "exclusive", "upgrade", "exclupgrade",
399: "downgrade", "release", "drain", "exclother", "*9*",
400: "*10*", "*11*", "*12*", "*13*", "*14*", "*15*"
401: };
402: #endif
403:
404: va_list ap;
405: va_start(ap, fmt);
406: vsnprintf(s, sizeof(s), fmt, ap);
407: va_end(ap);
408: bitmask_snprintf(lkp->lk_flags, __LK_FLAG_BITS, b, sizeof(b));
409: panic("%s ("
410: #ifdef LOCKDEBUG
411: "type %s "
412: #endif
413: "flags %s, sharecount %d, exclusivecount %d, "
414: "recurselevel %d, waitcount %d, wmesg %s"
415: #ifdef LOCKDEBUG
416: ", lock_file %s, unlock_file %s, lock_line %d, unlock_line %d"
417: #endif
418: ")\n",
419: s,
420: #ifdef LOCKDEBUG
421: locktype[lkp->lk_flags & LK_TYPE_MASK],
422: #endif
423: b, lkp->lk_sharecount, lkp->lk_exclusivecount,
424: lkp->lk_recurselevel, lkp->lk_waitcount, lkp->lk_wmesg
425: #ifdef LOCKDEBUG
426: , lkp->lk_lock_file, lkp->lk_unlock_file, lkp->lk_lock_line,
427: lkp->lk_unlock_line
428: #endif
429: );
430: }
431:
1.1 fvdl 432: /*
1.78 hannken 433: * Transfer any waiting processes from one lock to another.
434: */
435: void
436: transferlockers(struct lock *from, struct lock *to)
437: {
438:
439: KASSERT(from != to);
440: KASSERT((from->lk_flags & LK_WAITDRAIN) == 0);
441: if (from->lk_waitcount == 0)
442: return;
443: from->lk_newlock = to;
444: wakeup((void *)from);
445: tsleep((void *)&from->lk_newlock, from->lk_prio, "lkxfer", 0);
446: from->lk_newlock = NULL;
447: from->lk_flags &= ~(LK_WANT_EXCL | LK_WANT_UPGRADE);
448: KASSERT(from->lk_waitcount == 0);
449: }
450:
451:
452: /*
1.1 fvdl 453: * Initialize a lock; required before use.
454: */
455: void
1.109 yamt 456: lockinit(struct lock *lkp, pri_t prio, const char *wmesg, int timo, int flags)
1.1 fvdl 457: {
458:
1.8 perry 459: memset(lkp, 0, sizeof(struct lock));
1.1 fvdl 460: simple_lock_init(&lkp->lk_interlock);
461: lkp->lk_flags = flags & LK_EXTFLG_MASK;
1.19 thorpej 462: if (flags & LK_SPIN)
463: lkp->lk_cpu = LK_NOCPU;
464: else {
465: lkp->lk_lockholder = LK_NOPROC;
1.78 hannken 466: lkp->lk_newlock = NULL;
1.19 thorpej 467: lkp->lk_prio = prio;
468: lkp->lk_timo = timo;
469: }
470: lkp->lk_wmesg = wmesg; /* just a name for spin locks */
1.50 thorpej 471: #if defined(LOCKDEBUG)
472: lkp->lk_lock_file = NULL;
473: lkp->lk_unlock_file = NULL;
474: #endif
1.1 fvdl 475: }
476:
477: /*
478: * Determine the status of a lock.
479: */
480: int
1.33 thorpej 481: lockstatus(struct lock *lkp)
1.1 fvdl 482: {
1.76 yamt 483: int s = 0; /* XXX: gcc */
484: int lock_type = 0;
485: struct lwp *l = curlwp; /* XXX */
486: pid_t pid;
487: lwpid_t lid;
1.88 blymn 488: cpuid_t cpu_num;
1.76 yamt 489:
490: if ((lkp->lk_flags & LK_SPIN) || l == NULL) {
1.88 blymn 491: cpu_num = cpu_number();
1.76 yamt 492: pid = LK_KERNPROC;
493: lid = 0;
494: } else {
1.88 blymn 495: cpu_num = LK_NOCPU;
1.76 yamt 496: pid = l->l_proc->p_pid;
497: lid = l->l_lid;
498: }
1.1 fvdl 499:
1.43 thorpej 500: INTERLOCK_ACQUIRE(lkp, lkp->lk_flags, s);
1.76 yamt 501: if (lkp->lk_exclusivecount != 0) {
1.88 blymn 502: if (WEHOLDIT(lkp, pid, lid, cpu_num))
1.76 yamt 503: lock_type = LK_EXCLUSIVE;
504: else
505: lock_type = LK_EXCLOTHER;
506: } else if (lkp->lk_sharecount != 0)
1.1 fvdl 507: lock_type = LK_SHARED;
1.103 chs 508: else if (lkp->lk_flags & (LK_WANT_EXCL | LK_WANT_UPGRADE))
509: lock_type = LK_EXCLOTHER;
1.43 thorpej 510: INTERLOCK_RELEASE(lkp, lkp->lk_flags, s);
1.1 fvdl 511: return (lock_type);
512: }
1.35 thorpej 513:
1.92 chs 514: #if defined(LOCKDEBUG)
1.35 thorpej 515: /*
516: * Make sure no spin locks are held by a CPU that is about
517: * to context switch.
518: */
519: void
520: spinlock_switchcheck(void)
521: {
522: u_long cnt;
523: int s;
524:
1.114 ad 525: s = splhigh();
1.35 thorpej 526: #if defined(MULTIPROCESSOR)
527: cnt = curcpu()->ci_spin_locks;
528: #else
529: cnt = spin_locks;
530: #endif
531: splx(s);
532:
533: if (cnt != 0)
534: panic("spinlock_switchcheck: CPU %lu has %lu spin locks",
535: (u_long) cpu_number(), cnt);
536: }
1.92 chs 537: #endif /* LOCKDEBUG */
1.1 fvdl 538:
539: /*
1.44 thorpej 540: * Locks and IPLs (interrupt priority levels):
541: *
542: * Locks which may be taken from interrupt context must be handled
543: * very carefully; you must spl to the highest IPL where the lock
544: * is needed before acquiring the lock.
545: *
546: * It is also important to avoid deadlock, since certain (very high
547: * priority) interrupts are often needed to keep the system as a whole
548: * from deadlocking, and must not be blocked while you are spinning
549: * waiting for a lower-priority lock.
550: *
551: * In addition, the lock-debugging hooks themselves need to use locks!
552: *
553: * A raw __cpu_simple_lock may be used from interrupts are long as it
554: * is acquired and held at a single IPL.
555: */
556:
557: /*
1.32 sommerfe 558: * XXX XXX kludge around another kludge..
559: *
560: * vfs_shutdown() may be called from interrupt context, either as a result
561: * of a panic, or from the debugger. It proceeds to call
562: * sys_sync(&proc0, ...), pretending its running on behalf of proc0
563: *
564: * We would like to make an attempt to sync the filesystems in this case, so
565: * if this happens, we treat attempts to acquire locks specially.
566: * All locks are acquired on behalf of proc0.
567: *
568: * If we've already paniced, we don't block waiting for locks, but
569: * just barge right ahead since we're already going down in flames.
570: */
571:
572: /*
1.1 fvdl 573: * Set, change, or release a lock.
574: *
575: * Shared requests increment the shared count. Exclusive requests set the
576: * LK_WANT_EXCL flag (preventing further shared locks), and wait for already
577: * accepted shared locks and shared-to-exclusive upgrades to go away.
578: */
579: int
1.50 thorpej 580: #if defined(LOCKDEBUG)
1.91 perry 581: _lockmgr(volatile struct lock *lkp, u_int flags,
1.50 thorpej 582: struct simplelock *interlkp, const char *file, int line)
583: #else
1.91 perry 584: lockmgr(volatile struct lock *lkp, u_int flags,
1.33 thorpej 585: struct simplelock *interlkp)
1.50 thorpej 586: #endif
1.1 fvdl 587: {
588: int error;
589: pid_t pid;
1.69 thorpej 590: lwpid_t lid;
1.1 fvdl 591: int extflags;
1.88 blymn 592: cpuid_t cpu_num;
1.69 thorpej 593: struct lwp *l = curlwp;
1.32 sommerfe 594: int lock_shutdown_noblock = 0;
1.67 scw 595: int s = 0;
1.1 fvdl 596:
597: error = 0;
1.19 thorpej 598:
1.80 yamt 599: /* LK_RETRY is for vn_lock, not for lockmgr. */
1.79 yamt 600: KASSERT((flags & LK_RETRY) == 0);
601:
1.43 thorpej 602: INTERLOCK_ACQUIRE(lkp, lkp->lk_flags, s);
1.1 fvdl 603: if (flags & LK_INTERLOCK)
604: simple_unlock(interlkp);
605: extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK;
1.19 thorpej 606:
1.21 thorpej 607: #ifdef DIAGNOSTIC /* { */
1.19 thorpej 608: /*
609: * Don't allow spins on sleep locks and don't allow sleeps
610: * on spin locks.
611: */
612: if ((flags ^ lkp->lk_flags) & LK_SPIN)
1.110 christos 613: lockpanic(lkp, "lockmgr: sleep/spin mismatch");
1.21 thorpej 614: #endif /* } */
1.19 thorpej 615:
1.69 thorpej 616: if (extflags & LK_SPIN) {
1.19 thorpej 617: pid = LK_KERNPROC;
1.69 thorpej 618: lid = 0;
619: } else {
620: if (l == NULL) {
1.32 sommerfe 621: if (!doing_shutdown) {
622: panic("lockmgr: no context");
623: } else {
1.69 thorpej 624: l = &lwp0;
1.32 sommerfe 625: if (panicstr && (!(flags & LK_NOWAIT))) {
626: flags |= LK_NOWAIT;
627: lock_shutdown_noblock = 1;
628: }
629: }
630: }
1.69 thorpej 631: lid = l->l_lid;
632: pid = l->l_proc->p_pid;
1.19 thorpej 633: }
1.88 blymn 634: cpu_num = cpu_number();
1.19 thorpej 635:
1.1 fvdl 636: /*
637: * Once a lock has drained, the LK_DRAINING flag is set and an
638: * exclusive lock is returned. The only valid operation thereafter
639: * is a single release of that exclusive lock. This final release
640: * clears the LK_DRAINING flag and sets the LK_DRAINED flag. Any
641: * further requests of any sort will result in a panic. The bits
642: * selected for these two flags are chosen so that they will be set
643: * in memory that is freed (freed memory is filled with 0xdeadbeef).
644: * The final release is permitted to give a new lease on life to
645: * the lock by specifying LK_REENABLE.
646: */
647: if (lkp->lk_flags & (LK_DRAINING|LK_DRAINED)) {
1.28 thorpej 648: #ifdef DIAGNOSTIC /* { */
1.1 fvdl 649: if (lkp->lk_flags & LK_DRAINED)
1.110 christos 650: lockpanic(lkp, "lockmgr: using decommissioned lock");
1.1 fvdl 651: if ((flags & LK_TYPE_MASK) != LK_RELEASE ||
1.88 blymn 652: WEHOLDIT(lkp, pid, lid, cpu_num) == 0)
1.110 christos 653: lockpanic(lkp, "lockmgr: non-release on draining lock: %d",
1.1 fvdl 654: flags & LK_TYPE_MASK);
1.28 thorpej 655: #endif /* DIAGNOSTIC */ /* } */
1.1 fvdl 656: lkp->lk_flags &= ~LK_DRAINING;
657: if ((flags & LK_REENABLE) == 0)
658: lkp->lk_flags |= LK_DRAINED;
659: }
660:
661: switch (flags & LK_TYPE_MASK) {
662:
663: case LK_SHARED:
1.88 blymn 664: if (WEHOLDIT(lkp, pid, lid, cpu_num) == 0) {
1.1 fvdl 665: /*
666: * If just polling, check to see if we will block.
667: */
668: if ((extflags & LK_NOWAIT) && (lkp->lk_flags &
669: (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE))) {
670: error = EBUSY;
671: break;
672: }
673: /*
674: * Wait for exclusive locks and upgrades to clear.
675: */
1.78 hannken 676: error = acquire(&lkp, &s, extflags, 0,
1.98 ad 677: LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE,
678: RETURN_ADDRESS);
1.1 fvdl 679: if (error)
680: break;
681: lkp->lk_sharecount++;
1.73 yamt 682: lkp->lk_flags |= LK_SHARE_NONZERO;
1.88 blymn 683: COUNT(lkp, l, cpu_num, 1);
1.1 fvdl 684: break;
685: }
686: /*
687: * We hold an exclusive lock, so downgrade it to shared.
688: * An alternative would be to fail with EDEADLK.
689: */
690: lkp->lk_sharecount++;
1.73 yamt 691: lkp->lk_flags |= LK_SHARE_NONZERO;
1.88 blymn 692: COUNT(lkp, l, cpu_num, 1);
1.1 fvdl 693: /* fall into downgrade */
694:
695: case LK_DOWNGRADE:
1.88 blymn 696: if (WEHOLDIT(lkp, pid, lid, cpu_num) == 0 ||
1.19 thorpej 697: lkp->lk_exclusivecount == 0)
1.110 christos 698: lockpanic(lkp, "lockmgr: not holding exclusive lock");
1.1 fvdl 699: lkp->lk_sharecount += lkp->lk_exclusivecount;
1.73 yamt 700: lkp->lk_flags |= LK_SHARE_NONZERO;
1.1 fvdl 701: lkp->lk_exclusivecount = 0;
1.15 fvdl 702: lkp->lk_recurselevel = 0;
1.1 fvdl 703: lkp->lk_flags &= ~LK_HAVE_EXCL;
1.69 thorpej 704: SETHOLDER(lkp, LK_NOPROC, 0, LK_NOCPU);
1.50 thorpej 705: #if defined(LOCKDEBUG)
706: lkp->lk_unlock_file = file;
707: lkp->lk_unlock_line = line;
708: #endif
1.21 thorpej 709: DONTHAVEIT(lkp);
1.23 thorpej 710: WAKEUP_WAITER(lkp);
1.1 fvdl 711: break;
712:
713: case LK_EXCLUPGRADE:
714: /*
715: * If another process is ahead of us to get an upgrade,
716: * then we want to fail rather than have an intervening
717: * exclusive access.
718: */
719: if (lkp->lk_flags & LK_WANT_UPGRADE) {
720: lkp->lk_sharecount--;
1.73 yamt 721: if (lkp->lk_sharecount == 0)
722: lkp->lk_flags &= ~LK_SHARE_NONZERO;
1.88 blymn 723: COUNT(lkp, l, cpu_num, -1);
1.1 fvdl 724: error = EBUSY;
725: break;
726: }
727: /* fall into normal upgrade */
728:
729: case LK_UPGRADE:
730: /*
731: * Upgrade a shared lock to an exclusive one. If another
732: * shared lock has already requested an upgrade to an
733: * exclusive lock, our shared lock is released and an
734: * exclusive lock is requested (which will be granted
735: * after the upgrade). If we return an error, the file
736: * will always be unlocked.
737: */
1.88 blymn 738: if (WEHOLDIT(lkp, pid, lid, cpu_num) || lkp->lk_sharecount <= 0)
1.110 christos 739: lockpanic(lkp, "lockmgr: upgrade exclusive lock");
1.1 fvdl 740: lkp->lk_sharecount--;
1.73 yamt 741: if (lkp->lk_sharecount == 0)
742: lkp->lk_flags &= ~LK_SHARE_NONZERO;
1.88 blymn 743: COUNT(lkp, l, cpu_num, -1);
1.1 fvdl 744: /*
745: * If we are just polling, check to see if we will block.
746: */
747: if ((extflags & LK_NOWAIT) &&
748: ((lkp->lk_flags & LK_WANT_UPGRADE) ||
749: lkp->lk_sharecount > 1)) {
750: error = EBUSY;
751: break;
752: }
753: if ((lkp->lk_flags & LK_WANT_UPGRADE) == 0) {
754: /*
755: * We are first shared lock to request an upgrade, so
756: * request upgrade and wait for the shared count to
757: * drop to zero, then take exclusive lock.
758: */
759: lkp->lk_flags |= LK_WANT_UPGRADE;
1.98 ad 760: error = acquire(&lkp, &s, extflags, 0, LK_SHARE_NONZERO,
761: RETURN_ADDRESS);
1.1 fvdl 762: lkp->lk_flags &= ~LK_WANT_UPGRADE;
1.83 yamt 763: if (error) {
764: WAKEUP_WAITER(lkp);
1.1 fvdl 765: break;
1.83 yamt 766: }
1.1 fvdl 767: lkp->lk_flags |= LK_HAVE_EXCL;
1.88 blymn 768: SETHOLDER(lkp, pid, lid, cpu_num);
1.50 thorpej 769: #if defined(LOCKDEBUG)
770: lkp->lk_lock_file = file;
771: lkp->lk_lock_line = line;
772: #endif
1.21 thorpej 773: HAVEIT(lkp);
1.1 fvdl 774: if (lkp->lk_exclusivecount != 0)
1.110 christos 775: lockpanic(lkp, "lockmgr: non-zero exclusive count");
1.1 fvdl 776: lkp->lk_exclusivecount = 1;
1.15 fvdl 777: if (extflags & LK_SETRECURSE)
778: lkp->lk_recurselevel = 1;
1.88 blymn 779: COUNT(lkp, l, cpu_num, 1);
1.1 fvdl 780: break;
781: }
782: /*
783: * Someone else has requested upgrade. Release our shared
784: * lock, awaken upgrade requestor if we are the last shared
785: * lock, then request an exclusive lock.
786: */
1.23 thorpej 787: if (lkp->lk_sharecount == 0)
788: WAKEUP_WAITER(lkp);
1.1 fvdl 789: /* fall into exclusive request */
790:
791: case LK_EXCLUSIVE:
1.88 blymn 792: if (WEHOLDIT(lkp, pid, lid, cpu_num)) {
1.1 fvdl 793: /*
1.19 thorpej 794: * Recursive lock.
1.1 fvdl 795: */
1.15 fvdl 796: if ((extflags & LK_CANRECURSE) == 0 &&
1.16 sommerfe 797: lkp->lk_recurselevel == 0) {
798: if (extflags & LK_RECURSEFAIL) {
799: error = EDEADLK;
800: break;
801: } else
1.110 christos 802: lockpanic(lkp, "lockmgr: locking against myself");
1.16 sommerfe 803: }
1.1 fvdl 804: lkp->lk_exclusivecount++;
1.15 fvdl 805: if (extflags & LK_SETRECURSE &&
806: lkp->lk_recurselevel == 0)
807: lkp->lk_recurselevel = lkp->lk_exclusivecount;
1.88 blymn 808: COUNT(lkp, l, cpu_num, 1);
1.1 fvdl 809: break;
810: }
811: /*
812: * If we are just polling, check to see if we will sleep.
813: */
1.73 yamt 814: if ((extflags & LK_NOWAIT) && (lkp->lk_flags &
815: (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE |
816: LK_SHARE_NONZERO))) {
1.1 fvdl 817: error = EBUSY;
818: break;
819: }
820: /*
821: * Try to acquire the want_exclusive flag.
822: */
1.82 yamt 823: error = acquire(&lkp, &s, extflags, 0,
1.98 ad 824: LK_HAVE_EXCL | LK_WANT_EXCL, RETURN_ADDRESS);
1.1 fvdl 825: if (error)
826: break;
827: lkp->lk_flags |= LK_WANT_EXCL;
828: /*
829: * Wait for shared locks and upgrades to finish.
830: */
1.78 hannken 831: error = acquire(&lkp, &s, extflags, 0,
1.98 ad 832: LK_HAVE_EXCL | LK_WANT_UPGRADE | LK_SHARE_NONZERO,
833: RETURN_ADDRESS);
1.1 fvdl 834: lkp->lk_flags &= ~LK_WANT_EXCL;
1.83 yamt 835: if (error) {
836: WAKEUP_WAITER(lkp);
1.1 fvdl 837: break;
1.83 yamt 838: }
1.1 fvdl 839: lkp->lk_flags |= LK_HAVE_EXCL;
1.88 blymn 840: SETHOLDER(lkp, pid, lid, cpu_num);
1.50 thorpej 841: #if defined(LOCKDEBUG)
842: lkp->lk_lock_file = file;
843: lkp->lk_lock_line = line;
844: #endif
1.21 thorpej 845: HAVEIT(lkp);
1.1 fvdl 846: if (lkp->lk_exclusivecount != 0)
1.110 christos 847: lockpanic(lkp, "lockmgr: non-zero exclusive count");
1.1 fvdl 848: lkp->lk_exclusivecount = 1;
1.15 fvdl 849: if (extflags & LK_SETRECURSE)
850: lkp->lk_recurselevel = 1;
1.88 blymn 851: COUNT(lkp, l, cpu_num, 1);
1.1 fvdl 852: break;
853:
854: case LK_RELEASE:
855: if (lkp->lk_exclusivecount != 0) {
1.88 blymn 856: if (WEHOLDIT(lkp, pid, lid, cpu_num) == 0) {
1.19 thorpej 857: if (lkp->lk_flags & LK_SPIN) {
1.110 christos 858: lockpanic(lkp,
859: "lockmgr: processor %lu, not "
1.19 thorpej 860: "exclusive lock holder %lu "
1.88 blymn 861: "unlocking", cpu_num, lkp->lk_cpu);
1.19 thorpej 862: } else {
1.112 perseant 863: lockpanic(lkp, "lockmgr: pid %d.%d, not "
864: "exclusive lock holder %d.%d "
865: "unlocking", pid, lid,
866: lkp->lk_lockholder,
867: lkp->lk_locklwp);
1.19 thorpej 868: }
869: }
1.15 fvdl 870: if (lkp->lk_exclusivecount == lkp->lk_recurselevel)
871: lkp->lk_recurselevel = 0;
1.1 fvdl 872: lkp->lk_exclusivecount--;
1.88 blymn 873: COUNT(lkp, l, cpu_num, -1);
1.1 fvdl 874: if (lkp->lk_exclusivecount == 0) {
875: lkp->lk_flags &= ~LK_HAVE_EXCL;
1.69 thorpej 876: SETHOLDER(lkp, LK_NOPROC, 0, LK_NOCPU);
1.50 thorpej 877: #if defined(LOCKDEBUG)
878: lkp->lk_unlock_file = file;
879: lkp->lk_unlock_line = line;
880: #endif
1.21 thorpej 881: DONTHAVEIT(lkp);
1.1 fvdl 882: }
883: } else if (lkp->lk_sharecount != 0) {
884: lkp->lk_sharecount--;
1.73 yamt 885: if (lkp->lk_sharecount == 0)
886: lkp->lk_flags &= ~LK_SHARE_NONZERO;
1.88 blymn 887: COUNT(lkp, l, cpu_num, -1);
1.1 fvdl 888: }
1.39 thorpej 889: #ifdef DIAGNOSTIC
890: else
1.110 christos 891: lockpanic(lkp, "lockmgr: release of unlocked lock!");
1.39 thorpej 892: #endif
1.23 thorpej 893: WAKEUP_WAITER(lkp);
1.1 fvdl 894: break;
895:
896: case LK_DRAIN:
897: /*
1.86 perry 898: * Check that we do not already hold the lock, as it can
1.1 fvdl 899: * never drain if we do. Unfortunately, we have no way to
900: * check for holding a shared lock, but at least we can
901: * check for an exclusive one.
902: */
1.88 blymn 903: if (WEHOLDIT(lkp, pid, lid, cpu_num))
1.110 christos 904: lockpanic(lkp, "lockmgr: draining against myself");
1.1 fvdl 905: /*
906: * If we are just polling, check to see if we will sleep.
907: */
1.73 yamt 908: if ((extflags & LK_NOWAIT) && (lkp->lk_flags &
909: (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE |
910: LK_SHARE_NONZERO | LK_WAIT_NONZERO))) {
1.1 fvdl 911: error = EBUSY;
912: break;
913: }
1.78 hannken 914: error = acquire(&lkp, &s, extflags, 1,
1.73 yamt 915: LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE |
1.98 ad 916: LK_SHARE_NONZERO | LK_WAIT_NONZERO,
917: RETURN_ADDRESS);
1.23 thorpej 918: if (error)
919: break;
1.1 fvdl 920: lkp->lk_flags |= LK_DRAINING | LK_HAVE_EXCL;
1.88 blymn 921: SETHOLDER(lkp, pid, lid, cpu_num);
1.50 thorpej 922: #if defined(LOCKDEBUG)
923: lkp->lk_lock_file = file;
924: lkp->lk_lock_line = line;
925: #endif
1.21 thorpej 926: HAVEIT(lkp);
1.1 fvdl 927: lkp->lk_exclusivecount = 1;
1.15 fvdl 928: /* XXX unlikely that we'd want this */
929: if (extflags & LK_SETRECURSE)
930: lkp->lk_recurselevel = 1;
1.88 blymn 931: COUNT(lkp, l, cpu_num, 1);
1.1 fvdl 932: break;
933:
934: default:
1.43 thorpej 935: INTERLOCK_RELEASE(lkp, lkp->lk_flags, s);
1.110 christos 936: lockpanic(lkp, "lockmgr: unknown locktype request %d",
1.1 fvdl 937: flags & LK_TYPE_MASK);
938: /* NOTREACHED */
939: }
1.23 thorpej 940: if ((lkp->lk_flags & (LK_WAITDRAIN|LK_SPIN)) == LK_WAITDRAIN &&
941: ((lkp->lk_flags &
1.73 yamt 942: (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE |
943: LK_SHARE_NONZERO | LK_WAIT_NONZERO)) == 0)) {
1.1 fvdl 944: lkp->lk_flags &= ~LK_WAITDRAIN;
1.87 christos 945: wakeup(&lkp->lk_flags);
1.1 fvdl 946: }
1.32 sommerfe 947: /*
948: * Note that this panic will be a recursive panic, since
949: * we only set lock_shutdown_noblock above if panicstr != NULL.
950: */
951: if (error && lock_shutdown_noblock)
1.110 christos 952: lockpanic(lkp, "lockmgr: deadlock (see previous panic)");
1.86 perry 953:
1.43 thorpej 954: INTERLOCK_RELEASE(lkp, lkp->lk_flags, s);
1.1 fvdl 955: return (error);
956: }
957:
958: /*
1.47 sommerfe 959: * For a recursive spinlock held one or more times by the current CPU,
960: * release all N locks, and return N.
961: * Intended for use in mi_switch() shortly before context switching.
962: */
963:
964: int
1.50 thorpej 965: #if defined(LOCKDEBUG)
1.91 perry 966: _spinlock_release_all(volatile struct lock *lkp, const char *file, int line)
1.50 thorpej 967: #else
1.91 perry 968: spinlock_release_all(volatile struct lock *lkp)
1.50 thorpej 969: #endif
1.47 sommerfe 970: {
971: int s, count;
1.88 blymn 972: cpuid_t cpu_num;
1.86 perry 973:
1.47 sommerfe 974: KASSERT(lkp->lk_flags & LK_SPIN);
1.86 perry 975:
1.47 sommerfe 976: INTERLOCK_ACQUIRE(lkp, LK_SPIN, s);
977:
1.88 blymn 978: cpu_num = cpu_number();
1.47 sommerfe 979: count = lkp->lk_exclusivecount;
1.86 perry 980:
1.47 sommerfe 981: if (count != 0) {
1.86 perry 982: #ifdef DIAGNOSTIC
1.88 blymn 983: if (WEHOLDIT(lkp, 0, 0, cpu_num) == 0) {
1.110 christos 984: lockpanic(lkp, "spinlock_release_all: processor %lu, not "
1.47 sommerfe 985: "exclusive lock holder %lu "
1.88 blymn 986: "unlocking", (long)cpu_num, lkp->lk_cpu);
1.47 sommerfe 987: }
988: #endif
989: lkp->lk_recurselevel = 0;
990: lkp->lk_exclusivecount = 0;
1.88 blymn 991: COUNT_CPU(cpu_num, -count);
1.47 sommerfe 992: lkp->lk_flags &= ~LK_HAVE_EXCL;
1.69 thorpej 993: SETHOLDER(lkp, LK_NOPROC, 0, LK_NOCPU);
1.50 thorpej 994: #if defined(LOCKDEBUG)
995: lkp->lk_unlock_file = file;
996: lkp->lk_unlock_line = line;
997: #endif
1.47 sommerfe 998: DONTHAVEIT(lkp);
999: }
1000: #ifdef DIAGNOSTIC
1001: else if (lkp->lk_sharecount != 0)
1.110 christos 1002: lockpanic(lkp, "spinlock_release_all: release of shared lock!");
1.47 sommerfe 1003: else
1.110 christos 1004: lockpanic(lkp, "spinlock_release_all: release of unlocked lock!");
1.47 sommerfe 1005: #endif
1.86 perry 1006: INTERLOCK_RELEASE(lkp, LK_SPIN, s);
1.47 sommerfe 1007:
1008: return (count);
1009: }
1010:
1011: /*
1012: * For a recursive spinlock held one or more times by the current CPU,
1013: * release all N locks, and return N.
1014: * Intended for use in mi_switch() right after resuming execution.
1015: */
1016:
1017: void
1.50 thorpej 1018: #if defined(LOCKDEBUG)
1.91 perry 1019: _spinlock_acquire_count(volatile struct lock *lkp, int count,
1.50 thorpej 1020: const char *file, int line)
1021: #else
1.91 perry 1022: spinlock_acquire_count(volatile struct lock *lkp, int count)
1.50 thorpej 1023: #endif
1.47 sommerfe 1024: {
1025: int s, error;
1.88 blymn 1026: cpuid_t cpu_num;
1.86 perry 1027:
1.47 sommerfe 1028: KASSERT(lkp->lk_flags & LK_SPIN);
1.86 perry 1029:
1.47 sommerfe 1030: INTERLOCK_ACQUIRE(lkp, LK_SPIN, s);
1031:
1.88 blymn 1032: cpu_num = cpu_number();
1.47 sommerfe 1033:
1034: #ifdef DIAGNOSTIC
1.88 blymn 1035: if (WEHOLDIT(lkp, LK_NOPROC, 0, cpu_num))
1.110 christos 1036: lockpanic(lkp, "spinlock_acquire_count: processor %lu already holds lock", (long)cpu_num);
1.47 sommerfe 1037: #endif
1038: /*
1039: * Try to acquire the want_exclusive flag.
1040: */
1.98 ad 1041: error = acquire(&lkp, &s, LK_SPIN, 0, LK_HAVE_EXCL | LK_WANT_EXCL,
1042: RETURN_ADDRESS);
1.47 sommerfe 1043: lkp->lk_flags |= LK_WANT_EXCL;
1044: /*
1045: * Wait for shared locks and upgrades to finish.
1046: */
1.78 hannken 1047: error = acquire(&lkp, &s, LK_SPIN, 0,
1.98 ad 1048: LK_HAVE_EXCL | LK_SHARE_NONZERO | LK_WANT_UPGRADE,
1049: RETURN_ADDRESS);
1.47 sommerfe 1050: lkp->lk_flags &= ~LK_WANT_EXCL;
1051: lkp->lk_flags |= LK_HAVE_EXCL;
1.88 blymn 1052: SETHOLDER(lkp, LK_NOPROC, 0, cpu_num);
1.50 thorpej 1053: #if defined(LOCKDEBUG)
1054: lkp->lk_lock_file = file;
1055: lkp->lk_lock_line = line;
1056: #endif
1.47 sommerfe 1057: HAVEIT(lkp);
1058: if (lkp->lk_exclusivecount != 0)
1.110 christos 1059: lockpanic(lkp, "lockmgr: non-zero exclusive count");
1.47 sommerfe 1060: lkp->lk_exclusivecount = count;
1061: lkp->lk_recurselevel = 1;
1.88 blymn 1062: COUNT_CPU(cpu_num, count);
1.47 sommerfe 1063:
1.86 perry 1064: INTERLOCK_RELEASE(lkp, lkp->lk_flags, s);
1.47 sommerfe 1065: }
1066:
1067:
1068:
1069: /*
1.1 fvdl 1070: * Print out information about state of a lock. Used by VOP_PRINT
1071: * routines to display ststus about contained locks.
1072: */
1.2 fvdl 1073: void
1.91 perry 1074: lockmgr_printinfo(volatile struct lock *lkp)
1.1 fvdl 1075: {
1076:
1077: if (lkp->lk_sharecount)
1078: printf(" lock type %s: SHARED (count %d)", lkp->lk_wmesg,
1079: lkp->lk_sharecount);
1.19 thorpej 1080: else if (lkp->lk_flags & LK_HAVE_EXCL) {
1081: printf(" lock type %s: EXCL (count %d) by ",
1082: lkp->lk_wmesg, lkp->lk_exclusivecount);
1083: if (lkp->lk_flags & LK_SPIN)
1084: printf("processor %lu", lkp->lk_cpu);
1085: else
1.69 thorpej 1086: printf("pid %d.%d", lkp->lk_lockholder,
1087: lkp->lk_locklwp);
1.19 thorpej 1088: } else
1089: printf(" not locked");
1090: if ((lkp->lk_flags & LK_SPIN) == 0 && lkp->lk_waitcount > 0)
1.1 fvdl 1091: printf(" with %d pending", lkp->lk_waitcount);
1092: }
1093:
1.21 thorpej 1094: #if defined(LOCKDEBUG) /* { */
1.91 perry 1095: _TAILQ_HEAD(, struct simplelock, volatile) simplelock_list =
1.21 thorpej 1096: TAILQ_HEAD_INITIALIZER(simplelock_list);
1097:
1098: #if defined(MULTIPROCESSOR) /* { */
1099: struct simplelock simplelock_list_slock = SIMPLELOCK_INITIALIZER;
1100:
1101: #define SLOCK_LIST_LOCK() \
1.29 sommerfe 1102: __cpu_simple_lock(&simplelock_list_slock.lock_data)
1.21 thorpej 1103:
1104: #define SLOCK_LIST_UNLOCK() \
1.29 sommerfe 1105: __cpu_simple_unlock(&simplelock_list_slock.lock_data)
1.21 thorpej 1106:
1107: #define SLOCK_COUNT(x) \
1.47 sommerfe 1108: curcpu()->ci_simple_locks += (x)
1.21 thorpej 1109: #else
1110: u_long simple_locks;
1111:
1112: #define SLOCK_LIST_LOCK() /* nothing */
1113:
1114: #define SLOCK_LIST_UNLOCK() /* nothing */
1115:
1116: #define SLOCK_COUNT(x) simple_locks += (x)
1117: #endif /* MULTIPROCESSOR */ /* } */
1118:
1.26 sommerfe 1119: #ifdef MULTIPROCESSOR
1.75 wiz 1120: #define SLOCK_MP() lock_printf("on CPU %ld\n", \
1.46 thorpej 1121: (u_long) cpu_number())
1.26 sommerfe 1122: #else
1123: #define SLOCK_MP() /* nothing */
1124: #endif
1125:
1.21 thorpej 1126: #define SLOCK_WHERE(str, alp, id, l) \
1127: do { \
1.58 chs 1128: lock_printf("\n"); \
1.25 thorpej 1129: lock_printf(str); \
1.33 thorpej 1130: lock_printf("lock: %p, currently at: %s:%d\n", (alp), (id), (l)); \
1.26 sommerfe 1131: SLOCK_MP(); \
1.21 thorpej 1132: if ((alp)->lock_file != NULL) \
1.25 thorpej 1133: lock_printf("last locked: %s:%d\n", (alp)->lock_file, \
1.21 thorpej 1134: (alp)->lock_line); \
1135: if ((alp)->unlock_file != NULL) \
1.25 thorpej 1136: lock_printf("last unlocked: %s:%d\n", (alp)->unlock_file, \
1.21 thorpej 1137: (alp)->unlock_line); \
1.58 chs 1138: SLOCK_TRACE() \
1.21 thorpej 1139: SLOCK_DEBUGGER(); \
1.30 thorpej 1140: } while (/*CONSTCOND*/0)
1.12 chs 1141:
1.1 fvdl 1142: /*
1143: * Simple lock functions so that the debugger can see from whence
1144: * they are being called.
1145: */
1146: void
1.91 perry 1147: simple_lock_init(volatile struct simplelock *alp)
1.1 fvdl 1148: {
1.21 thorpej 1149:
1150: #if defined(MULTIPROCESSOR) /* { */
1.27 thorpej 1151: __cpu_simple_lock_init(&alp->lock_data);
1.21 thorpej 1152: #else
1.27 thorpej 1153: alp->lock_data = __SIMPLELOCK_UNLOCKED;
1.21 thorpej 1154: #endif /* } */
1.5 chs 1155: alp->lock_file = NULL;
1156: alp->lock_line = 0;
1157: alp->unlock_file = NULL;
1158: alp->unlock_line = 0;
1.41 thorpej 1159: alp->lock_holder = LK_NOCPU;
1.1 fvdl 1160: }
1161:
1162: void
1.91 perry 1163: _simple_lock(volatile struct simplelock *alp, const char *id, int l)
1.1 fvdl 1164: {
1.88 blymn 1165: cpuid_t cpu_num = cpu_number();
1.12 chs 1166: int s;
1167:
1.114 ad 1168: s = splhigh();
1.21 thorpej 1169:
1170: /*
1171: * MULTIPROCESSOR case: This is `safe' since if it's not us, we
1172: * don't take any action, and just fall into the normal spin case.
1173: */
1.27 thorpej 1174: if (alp->lock_data == __SIMPLELOCK_LOCKED) {
1.21 thorpej 1175: #if defined(MULTIPROCESSOR) /* { */
1.88 blymn 1176: if (alp->lock_holder == cpu_num) {
1.21 thorpej 1177: SLOCK_WHERE("simple_lock: locking against myself\n",
1178: alp, id, l);
1179: goto out;
1.1 fvdl 1180: }
1.21 thorpej 1181: #else
1182: SLOCK_WHERE("simple_lock: lock held\n", alp, id, l);
1183: goto out;
1184: #endif /* MULTIPROCESSOR */ /* } */
1.1 fvdl 1185: }
1.21 thorpej 1186:
1187: #if defined(MULTIPROCESSOR) /* { */
1188: /* Acquire the lock before modifying any fields. */
1.70 pk 1189: splx(s);
1.27 thorpej 1190: __cpu_simple_lock(&alp->lock_data);
1.114 ad 1191: s = splhigh();
1.21 thorpej 1192: #else
1.27 thorpej 1193: alp->lock_data = __SIMPLELOCK_LOCKED;
1.21 thorpej 1194: #endif /* } */
1195:
1.45 sommerfe 1196: if (alp->lock_holder != LK_NOCPU) {
1197: SLOCK_WHERE("simple_lock: uninitialized lock\n",
1198: alp, id, l);
1199: }
1.5 chs 1200: alp->lock_file = id;
1201: alp->lock_line = l;
1.88 blymn 1202: alp->lock_holder = cpu_num;
1.21 thorpej 1203:
1204: SLOCK_LIST_LOCK();
1.87 christos 1205: TAILQ_INSERT_TAIL(&simplelock_list, alp, list);
1.21 thorpej 1206: SLOCK_LIST_UNLOCK();
1207:
1208: SLOCK_COUNT(1);
1209:
1210: out:
1.18 chs 1211: splx(s);
1.38 thorpej 1212: }
1213:
1214: int
1.91 perry 1215: _simple_lock_held(volatile struct simplelock *alp)
1.38 thorpej 1216: {
1.54 enami 1217: #if defined(MULTIPROCESSOR) || defined(DIAGNOSTIC)
1.88 blymn 1218: cpuid_t cpu_num = cpu_number();
1.54 enami 1219: #endif
1.38 thorpej 1220: int s, locked = 0;
1221:
1.114 ad 1222: s = splhigh();
1.42 thorpej 1223:
1224: #if defined(MULTIPROCESSOR)
1.38 thorpej 1225: if (__cpu_simple_lock_try(&alp->lock_data) == 0)
1.88 blymn 1226: locked = (alp->lock_holder == cpu_num);
1.38 thorpej 1227: else
1228: __cpu_simple_unlock(&alp->lock_data);
1229: #else
1.42 thorpej 1230: if (alp->lock_data == __SIMPLELOCK_LOCKED) {
1231: locked = 1;
1.88 blymn 1232: KASSERT(alp->lock_holder == cpu_num);
1.42 thorpej 1233: }
1234: #endif
1.38 thorpej 1235:
1236: splx(s);
1.42 thorpej 1237:
1.38 thorpej 1238: return (locked);
1.1 fvdl 1239: }
1240:
1241: int
1.91 perry 1242: _simple_lock_try(volatile struct simplelock *alp, const char *id, int l)
1.1 fvdl 1243: {
1.88 blymn 1244: cpuid_t cpu_num = cpu_number();
1.21 thorpej 1245: int s, rv = 0;
1.1 fvdl 1246:
1.114 ad 1247: s = splhigh();
1.21 thorpej 1248:
1249: /*
1250: * MULTIPROCESSOR case: This is `safe' since if it's not us, we
1251: * don't take any action.
1252: */
1253: #if defined(MULTIPROCESSOR) /* { */
1.27 thorpej 1254: if ((rv = __cpu_simple_lock_try(&alp->lock_data)) == 0) {
1.88 blymn 1255: if (alp->lock_holder == cpu_num)
1.21 thorpej 1256: SLOCK_WHERE("simple_lock_try: locking against myself\n",
1.26 sommerfe 1257: alp, id, l);
1.21 thorpej 1258: goto out;
1259: }
1260: #else
1.27 thorpej 1261: if (alp->lock_data == __SIMPLELOCK_LOCKED) {
1.21 thorpej 1262: SLOCK_WHERE("simple_lock_try: lock held\n", alp, id, l);
1263: goto out;
1.18 chs 1264: }
1.27 thorpej 1265: alp->lock_data = __SIMPLELOCK_LOCKED;
1.21 thorpej 1266: #endif /* MULTIPROCESSOR */ /* } */
1267:
1268: /*
1269: * At this point, we have acquired the lock.
1270: */
1271:
1272: rv = 1;
1.18 chs 1273:
1.5 chs 1274: alp->lock_file = id;
1275: alp->lock_line = l;
1.88 blymn 1276: alp->lock_holder = cpu_num;
1.21 thorpej 1277:
1278: SLOCK_LIST_LOCK();
1.87 christos 1279: TAILQ_INSERT_TAIL(&simplelock_list, alp, list);
1.21 thorpej 1280: SLOCK_LIST_UNLOCK();
1281:
1282: SLOCK_COUNT(1);
1283:
1284: out:
1.12 chs 1285: splx(s);
1.21 thorpej 1286: return (rv);
1.1 fvdl 1287: }
1288:
1289: void
1.91 perry 1290: _simple_unlock(volatile struct simplelock *alp, const char *id, int l)
1.1 fvdl 1291: {
1.12 chs 1292: int s;
1.1 fvdl 1293:
1.114 ad 1294: s = splhigh();
1.21 thorpej 1295:
1296: /*
1297: * MULTIPROCESSOR case: This is `safe' because we think we hold
1298: * the lock, and if we don't, we don't take any action.
1299: */
1.27 thorpej 1300: if (alp->lock_data == __SIMPLELOCK_UNLOCKED) {
1.21 thorpej 1301: SLOCK_WHERE("simple_unlock: lock not held\n",
1302: alp, id, l);
1303: goto out;
1304: }
1305:
1306: SLOCK_LIST_LOCK();
1307: TAILQ_REMOVE(&simplelock_list, alp, list);
1308: SLOCK_LIST_UNLOCK();
1309:
1310: SLOCK_COUNT(-1);
1311:
1312: alp->list.tqe_next = NULL; /* sanity */
1313: alp->list.tqe_prev = NULL; /* sanity */
1314:
1.5 chs 1315: alp->unlock_file = id;
1316: alp->unlock_line = l;
1.21 thorpej 1317:
1318: #if defined(MULTIPROCESSOR) /* { */
1.26 sommerfe 1319: alp->lock_holder = LK_NOCPU;
1.21 thorpej 1320: /* Now that we've modified all fields, release the lock. */
1.27 thorpej 1321: __cpu_simple_unlock(&alp->lock_data);
1.21 thorpej 1322: #else
1.27 thorpej 1323: alp->lock_data = __SIMPLELOCK_UNLOCKED;
1.41 thorpej 1324: KASSERT(alp->lock_holder == cpu_number());
1325: alp->lock_holder = LK_NOCPU;
1.21 thorpej 1326: #endif /* } */
1327:
1328: out:
1.18 chs 1329: splx(s);
1.12 chs 1330: }
1331:
1332: void
1.33 thorpej 1333: simple_lock_dump(void)
1.12 chs 1334: {
1.91 perry 1335: volatile struct simplelock *alp;
1.12 chs 1336: int s;
1337:
1.114 ad 1338: s = splhigh();
1.21 thorpej 1339: SLOCK_LIST_LOCK();
1.25 thorpej 1340: lock_printf("all simple locks:\n");
1.58 chs 1341: TAILQ_FOREACH(alp, &simplelock_list, list) {
1.25 thorpej 1342: lock_printf("%p CPU %lu %s:%d\n", alp, alp->lock_holder,
1.21 thorpej 1343: alp->lock_file, alp->lock_line);
1.12 chs 1344: }
1.21 thorpej 1345: SLOCK_LIST_UNLOCK();
1.12 chs 1346: splx(s);
1347: }
1348:
1349: void
1.33 thorpej 1350: simple_lock_freecheck(void *start, void *end)
1.12 chs 1351: {
1.91 perry 1352: volatile struct simplelock *alp;
1.12 chs 1353: int s;
1354:
1.114 ad 1355: s = splhigh();
1.21 thorpej 1356: SLOCK_LIST_LOCK();
1.58 chs 1357: TAILQ_FOREACH(alp, &simplelock_list, list) {
1.91 perry 1358: if ((volatile void *)alp >= start &&
1359: (volatile void *)alp < end) {
1.25 thorpej 1360: lock_printf("freeing simple_lock %p CPU %lu %s:%d\n",
1.34 thorpej 1361: alp, alp->lock_holder, alp->lock_file,
1362: alp->lock_line);
1363: SLOCK_DEBUGGER();
1364: }
1365: }
1366: SLOCK_LIST_UNLOCK();
1367: splx(s);
1368: }
1369:
1.55 thorpej 1370: /*
1.113 yamt 1371: * We must be holding exactly one lock: the spc_lock.
1.55 thorpej 1372: */
1373:
1.34 thorpej 1374: void
1375: simple_lock_switchcheck(void)
1376: {
1.55 thorpej 1377:
1.105 ad 1378: simple_lock_only_held(NULL, "switching");
1.55 thorpej 1379: }
1380:
1.93 erh 1381: /*
1382: * Drop into the debugger if lp isn't the only lock held.
1383: * lp may be NULL.
1384: */
1.55 thorpej 1385: void
1386: simple_lock_only_held(volatile struct simplelock *lp, const char *where)
1387: {
1.91 perry 1388: volatile struct simplelock *alp;
1.88 blymn 1389: cpuid_t cpu_num = cpu_number();
1.34 thorpej 1390: int s;
1391:
1.55 thorpej 1392: if (lp) {
1393: LOCK_ASSERT(simple_lock_held(lp));
1394: }
1.114 ad 1395: s = splhigh();
1.34 thorpej 1396: SLOCK_LIST_LOCK();
1.58 chs 1397: TAILQ_FOREACH(alp, &simplelock_list, list) {
1.55 thorpej 1398: if (alp == lp)
1.42 thorpej 1399: continue;
1.88 blymn 1400: if (alp->lock_holder == cpu_num)
1.55 thorpej 1401: break;
1.12 chs 1402: }
1.21 thorpej 1403: SLOCK_LIST_UNLOCK();
1.12 chs 1404: splx(s);
1.55 thorpej 1405:
1406: if (alp != NULL) {
1.58 chs 1407: lock_printf("\n%s with held simple_lock %p "
1.55 thorpej 1408: "CPU %lu %s:%d\n",
1409: where, alp, alp->lock_holder, alp->lock_file,
1410: alp->lock_line);
1.58 chs 1411: SLOCK_TRACE();
1.55 thorpej 1412: SLOCK_DEBUGGER();
1413: }
1.1 fvdl 1414: }
1.94 erh 1415:
1416: /*
1417: * Set to 1 by simple_lock_assert_*().
1418: * Can be cleared from ddb to avoid a panic.
1419: */
1420: int slock_assert_will_panic;
1421:
1422: /*
1423: * If the lock isn't held, print a traceback, optionally drop into the
1424: * debugger, then panic.
1425: * The panic can be avoided by clearing slock_assert_with_panic from the
1426: * debugger.
1427: */
1428: void
1429: _simple_lock_assert_locked(volatile struct simplelock *alp,
1430: const char *lockname, const char *id, int l)
1431: {
1432: if (simple_lock_held(alp) == 0) {
1433: slock_assert_will_panic = 1;
1434: lock_printf("%s lock not held\n", lockname);
1435: SLOCK_WHERE("lock not held", alp, id, l);
1436: if (slock_assert_will_panic)
1437: panic("%s: not locked", lockname);
1438: }
1439: }
1440:
1441: void
1442: _simple_lock_assert_unlocked(volatile struct simplelock *alp,
1443: const char *lockname, const char *id, int l)
1444: {
1445: if (simple_lock_held(alp)) {
1446: slock_assert_will_panic = 1;
1447: lock_printf("%s lock held\n", lockname);
1448: SLOCK_WHERE("lock held", alp, id, l);
1449: if (slock_assert_will_panic)
1450: panic("%s: locked", lockname);
1451: }
1452: }
1453:
1.96 yamt 1454: void
1455: assert_sleepable(struct simplelock *interlock, const char *msg)
1456: {
1457:
1.113 yamt 1458: if (CURCPU_IDLE_P()) {
1459: panic("assert_sleepable: idle");
1.97 yamt 1460: }
1.96 yamt 1461: simple_lock_only_held(interlock, msg);
1462: }
1463:
1.21 thorpej 1464: #endif /* LOCKDEBUG */ /* } */
1.62 thorpej 1465:
1.116 ! ad 1466: int kernel_lock_id;
! 1467: __cpu_simple_lock_t kernel_lock;
! 1468:
1.62 thorpej 1469: #if defined(MULTIPROCESSOR)
1.105 ad 1470:
1.62 thorpej 1471: /*
1472: * Functions for manipulating the kernel_lock. We put them here
1473: * so that they show up in profiles.
1474: */
1475:
1.105 ad 1476: #define _KERNEL_LOCK_ABORT(msg) \
1477: LOCKDEBUG_ABORT(kernel_lock_id, &kernel_lock, &_kernel_lock_ops, \
1478: __FUNCTION__, msg)
1479:
1480: #ifdef LOCKDEBUG
1481: #define _KERNEL_LOCK_ASSERT(cond) \
1482: do { \
1483: if (!(cond)) \
1484: _KERNEL_LOCK_ABORT("assertion failed: " #cond); \
1485: } while (/* CONSTCOND */ 0)
1486: #else
1487: #define _KERNEL_LOCK_ASSERT(cond) /* nothing */
1488: #endif
1489:
1490: void _kernel_lock_dump(volatile void *);
1491:
1492: lockops_t _kernel_lock_ops = {
1493: "Kernel lock",
1494: 0,
1495: _kernel_lock_dump
1496: };
1497:
1.85 yamt 1498: /*
1.105 ad 1499: * Initialize the kernel lock.
1.85 yamt 1500: */
1.62 thorpej 1501: void
1502: _kernel_lock_init(void)
1503: {
1504:
1.105 ad 1505: __cpu_simple_lock_init(&kernel_lock);
1506: kernel_lock_id = LOCKDEBUG_ALLOC(&kernel_lock, &_kernel_lock_ops);
1.62 thorpej 1507: }
1508:
1509: /*
1.105 ad 1510: * Print debugging information about the kernel lock.
1.62 thorpej 1511: */
1512: void
1.105 ad 1513: _kernel_lock_dump(volatile void *junk)
1.62 thorpej 1514: {
1.85 yamt 1515: struct cpu_info *ci = curcpu();
1.62 thorpej 1516:
1.105 ad 1517: (void)junk;
1.85 yamt 1518:
1.105 ad 1519: printf_nolog("curcpu holds : %18d wanted by: %#018lx\n",
1520: ci->ci_biglock_count, (long)ci->ci_biglock_wanted);
1.62 thorpej 1521: }
1522:
1.105 ad 1523: /*
1524: * Acquire 'nlocks' holds on the kernel lock. If 'l' is non-null, the
1525: * acquisition is from process context.
1526: */
1.62 thorpej 1527: void
1.105 ad 1528: _kernel_lock(int nlocks, struct lwp *l)
1.62 thorpej 1529: {
1.85 yamt 1530: struct cpu_info *ci = curcpu();
1.105 ad 1531: LOCKSTAT_TIMER(spintime);
1532: LOCKSTAT_FLAG(lsflag);
1533: struct lwp *owant;
1534: #ifdef LOCKDEBUG
1535: u_int spins;
1536: #endif
1.85 yamt 1537: int s;
1538:
1.105 ad 1539: (void)l;
1540:
1541: if (nlocks == 0)
1542: return;
1543: _KERNEL_LOCK_ASSERT(nlocks > 0);
1.62 thorpej 1544:
1.115 ad 1545: s = splsched(); /* XXX splvm() */
1.105 ad 1546:
1547: if (ci->ci_biglock_count != 0) {
1548: _KERNEL_LOCK_ASSERT(kernel_lock == __SIMPLELOCK_LOCKED);
1549: ci->ci_biglock_count += nlocks;
1550: splx(s);
1551: return;
1552: }
1553:
1.107 ad 1554: LOCKDEBUG_WANTLOCK(kernel_lock_id,
1555: (uintptr_t)__builtin_return_address(0), 0);
1556:
1.105 ad 1557: if (__cpu_simple_lock_try(&kernel_lock)) {
1558: ci->ci_biglock_count = nlocks;
1559: LOCKDEBUG_LOCKED(kernel_lock_id,
1560: (uintptr_t)__builtin_return_address(0), 0);
1561: splx(s);
1562: return;
1563: }
1564:
1565: LOCKSTAT_ENTER(lsflag);
1566: LOCKSTAT_START_TIMER(lsflag, spintime);
1567:
1568: /*
1569: * Before setting ci_biglock_wanted we must post a store
1570: * fence (see kern_mutex.c). This is accomplished by the
1571: * __cpu_simple_lock_try() above.
1572: */
1573: owant = ci->ci_biglock_wanted;
1574: ci->ci_biglock_wanted = curlwp; /* XXXAD */
1575:
1576: #ifdef LOCKDEBUG
1577: spins = 0;
1578: #endif
1579:
1580: do {
1581: while (kernel_lock == __SIMPLELOCK_LOCKED) {
1582: #ifdef LOCKDEBUG
1583: if (SPINLOCK_SPINOUT(spins))
1584: _KERNEL_LOCK_ABORT("spinout");
1585: #endif
1586: splx(s);
1587: SPINLOCK_SPIN_HOOK;
1.115 ad 1588: (void)splsched(); /* XXX splvm() */
1.105 ad 1589: }
1590: } while (!__cpu_simple_lock_try(&kernel_lock));
1591:
1592: ci->ci_biglock_wanted = owant;
1593: ci->ci_biglock_count += nlocks;
1.107 ad 1594: LOCKSTAT_STOP_TIMER(lsflag, spintime);
1595: LOCKDEBUG_LOCKED(kernel_lock_id,
1596: (uintptr_t)__builtin_return_address(0), 0);
1.85 yamt 1597: splx(s);
1.105 ad 1598:
1599: /*
1600: * Again, another store fence is required (see kern_mutex.c).
1601: */
1602: mb_write();
1.107 ad 1603: if (owant == NULL) {
1604: LOCKSTAT_EVENT(lsflag, &kernel_lock, LB_KERNEL_LOCK | LB_SPIN,
1605: 1, spintime);
1606: }
1.105 ad 1607: LOCKSTAT_EXIT(lsflag);
1.62 thorpej 1608: }
1609:
1610: /*
1.105 ad 1611: * Release 'nlocks' holds on the kernel lock. If 'nlocks' is zero, release
1612: * all holds. If 'l' is non-null, the release is from process context.
1.62 thorpej 1613: */
1614: void
1.105 ad 1615: _kernel_unlock(int nlocks, struct lwp *l, int *countp)
1.62 thorpej 1616: {
1.105 ad 1617: struct cpu_info *ci = curcpu();
1618: u_int olocks;
1619: int s;
1.62 thorpej 1620:
1.105 ad 1621: (void)l;
1.62 thorpej 1622:
1.105 ad 1623: _KERNEL_LOCK_ASSERT(nlocks < 2);
1.62 thorpej 1624:
1.105 ad 1625: olocks = ci->ci_biglock_count;
1.77 yamt 1626:
1.105 ad 1627: if (olocks == 0) {
1628: _KERNEL_LOCK_ASSERT(nlocks <= 0);
1629: if (countp != NULL)
1630: *countp = 0;
1631: return;
1632: }
1.77 yamt 1633:
1.105 ad 1634: _KERNEL_LOCK_ASSERT(kernel_lock == __SIMPLELOCK_LOCKED);
1.85 yamt 1635:
1.105 ad 1636: if (nlocks == 0)
1637: nlocks = olocks;
1638: else if (nlocks == -1) {
1639: nlocks = 1;
1640: _KERNEL_LOCK_ASSERT(olocks == 1);
1641: }
1.85 yamt 1642:
1.115 ad 1643: s = splsched(); /* XXX splvm() */
1.105 ad 1644: if ((ci->ci_biglock_count -= nlocks) == 0) {
1645: LOCKDEBUG_UNLOCKED(kernel_lock_id,
1646: (uintptr_t)__builtin_return_address(0), 0);
1647: __cpu_simple_unlock(&kernel_lock);
1.85 yamt 1648: }
1.105 ad 1649: splx(s);
1.77 yamt 1650:
1.105 ad 1651: if (countp != NULL)
1652: *countp = olocks;
1.77 yamt 1653: }
1654:
1.84 yamt 1655: #if defined(DEBUG)
1.105 ad 1656: /*
1657: * Assert that the kernel lock is held.
1658: */
1.84 yamt 1659: void
1.105 ad 1660: _kernel_lock_assert_locked(void)
1.84 yamt 1661: {
1.100 yamt 1662:
1.105 ad 1663: if (kernel_lock != __SIMPLELOCK_LOCKED ||
1664: curcpu()->ci_biglock_count == 0)
1665: _KERNEL_LOCK_ABORT("not locked");
1.84 yamt 1666: }
1.100 yamt 1667:
1668: void
1669: _kernel_lock_assert_unlocked()
1670: {
1671:
1.105 ad 1672: if (curcpu()->ci_biglock_count != 0)
1673: _KERNEL_LOCK_ABORT("locked");
1.100 yamt 1674: }
1.84 yamt 1675: #endif
1.94 erh 1676:
1.105 ad 1677: #endif /* MULTIPROCESSOR || LOCKDEBUG */
CVSweb <webmaster@jp.NetBSD.org>