Annotation of src/sys/kern/kern_mutex.c, Revision 1.4.2.4
1.4.2.4 ! rmind 1: /* $NetBSD: kern_mutex.c,v 1.4.2.3 2007/02/27 16:54:22 yamt Exp $ */
1.2 ad 2:
3: /*-
4: * Copyright (c) 2002, 2006, 2007 The NetBSD Foundation, Inc.
5: * All rights reserved.
6: *
7: * This code is derived from software contributed to The NetBSD Foundation
8: * by Jason R. Thorpe and Andrew Doran.
9: *
10: * Redistribution and use in source and binary forms, with or without
11: * modification, are permitted provided that the following conditions
12: * are met:
13: * 1. Redistributions of source code must retain the above copyright
14: * notice, this list of conditions and the following disclaimer.
15: * 2. Redistributions in binary form must reproduce the above copyright
16: * notice, this list of conditions and the following disclaimer in the
17: * documentation and/or other materials provided with the distribution.
18: * 3. All advertising materials mentioning features or use of this software
19: * must display the following acknowledgement:
20: * This product includes software developed by the NetBSD
21: * Foundation, Inc. and its contributors.
22: * 4. Neither the name of The NetBSD Foundation nor the names of its
23: * contributors may be used to endorse or promote products derived
24: * from this software without specific prior written permission.
25: *
26: * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27: * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28: * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29: * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30: * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31: * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32: * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33: * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34: * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35: * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36: * POSSIBILITY OF SUCH DAMAGE.
37: */
38:
39: /*
40: * Kernel mutex implementation, modeled after those found in Solaris,
41: * a description of which can be found in:
42: *
43: * Solaris Internals: Core Kernel Architecture, Jim Mauro and
44: * Richard McDougall.
45: */
46:
47: #include "opt_multiprocessor.h"
48:
49: #define __MUTEX_PRIVATE
50:
51: #include <sys/cdefs.h>
1.4.2.4 ! rmind 52: __KERNEL_RCSID(0, "$NetBSD: kern_mutex.c,v 1.4.2.3 2007/02/27 16:54:22 yamt Exp $");
1.2 ad 53:
54: #include <sys/param.h>
55: #include <sys/proc.h>
56: #include <sys/mutex.h>
57: #include <sys/sched.h>
58: #include <sys/sleepq.h>
59: #include <sys/systm.h>
60: #include <sys/lockdebug.h>
61: #include <sys/kernel.h>
62:
63: #include <dev/lockstat.h>
64:
65: #include <machine/intr.h>
66:
67: /*
68: * When not running a debug kernel, spin mutexes are not much
69: * more than an splraiseipl() and splx() pair.
70: */
71:
72: #if defined(DIAGNOSTIC) || defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
73: #define FULL
74: #endif
75:
76: /*
77: * Debugging support.
78: */
79:
80: #define MUTEX_WANTLOCK(mtx) \
81: LOCKDEBUG_WANTLOCK(MUTEX_GETID(mtx), \
82: (uintptr_t)__builtin_return_address(0), 0)
83: #define MUTEX_LOCKED(mtx) \
84: LOCKDEBUG_LOCKED(MUTEX_GETID(mtx), \
85: (uintptr_t)__builtin_return_address(0), 0)
86: #define MUTEX_UNLOCKED(mtx) \
87: LOCKDEBUG_UNLOCKED(MUTEX_GETID(mtx), \
88: (uintptr_t)__builtin_return_address(0), 0)
89: #define MUTEX_ABORT(mtx, msg) \
90: mutex_abort(mtx, __FUNCTION__, msg)
91:
92: #if defined(LOCKDEBUG)
93:
94: #define MUTEX_DASSERT(mtx, cond) \
95: do { \
96: if (!(cond)) \
97: MUTEX_ABORT(mtx, "assertion failed: " #cond); \
98: } while (/* CONSTCOND */ 0);
99:
100: #else /* LOCKDEBUG */
101:
102: #define MUTEX_DASSERT(mtx, cond) /* nothing */
103:
104: #endif /* LOCKDEBUG */
105:
106: #if defined(DIAGNOSTIC)
107:
108: #define MUTEX_ASSERT(mtx, cond) \
109: do { \
110: if (!(cond)) \
111: MUTEX_ABORT(mtx, "assertion failed: " #cond); \
112: } while (/* CONSTCOND */ 0)
113:
114: #else /* DIAGNOSTIC */
115:
116: #define MUTEX_ASSERT(mtx, cond) /* nothing */
117:
118: #endif /* DIAGNOSTIC */
119:
120: /*
121: * Spin mutex SPL save / restore.
122: */
1.4.2.4 ! rmind 123: #ifndef MUTEX_COUNT_BIAS
! 124: #define MUTEX_COUNT_BIAS 0
! 125: #endif
1.2 ad 126:
127: #define MUTEX_SPIN_SPLRAISE(mtx) \
128: do { \
129: struct cpu_info *x__ci = curcpu(); \
130: int x__cnt, s; \
131: x__cnt = x__ci->ci_mtx_count--; \
132: s = splraiseipl(mtx->mtx_ipl); \
1.4.2.4 ! rmind 133: if (x__cnt == MUTEX_COUNT_BIAS) \
1.2 ad 134: x__ci->ci_mtx_oldspl = (s); \
135: } while (/* CONSTCOND */ 0)
136:
137: #define MUTEX_SPIN_SPLRESTORE(mtx) \
138: do { \
139: struct cpu_info *x__ci = curcpu(); \
140: int s = x__ci->ci_mtx_oldspl; \
141: __insn_barrier(); \
1.4.2.4 ! rmind 142: if (++(x__ci->ci_mtx_count) == MUTEX_COUNT_BIAS) \
1.2 ad 143: splx(s); \
144: } while (/* CONSTCOND */ 0)
145:
146: /*
147: * For architectures that provide 'simple' mutexes: they provide a
148: * CAS function that is either MP-safe, or does not need to be MP
149: * safe. Adaptive mutexes on these architectures do not require an
150: * additional interlock.
151: */
152:
153: #ifdef __HAVE_SIMPLE_MUTEXES
154:
155: #define MUTEX_OWNER(owner) \
156: (owner & MUTEX_THREAD)
157: #define MUTEX_OWNED(owner) \
158: (owner != 0)
159: #define MUTEX_HAS_WAITERS(mtx) \
160: (((int)(mtx)->mtx_owner & MUTEX_BIT_WAITERS) != 0)
161:
162: #define MUTEX_INITIALIZE_ADAPTIVE(mtx, id) \
163: do { \
164: (mtx)->mtx_id = (id); \
165: } while (/* CONSTCOND */ 0);
166:
167: #define MUTEX_INITIALIZE_SPIN(mtx, id, ipl) \
168: do { \
169: (mtx)->mtx_owner = MUTEX_BIT_SPIN; \
170: (mtx)->mtx_ipl = makeiplcookie((ipl)); \
171: (mtx)->mtx_id = (id); \
172: __cpu_simple_lock_init(&(mtx)->mtx_lock); \
173: } while (/* CONSTCOND */ 0)
174:
175: #define MUTEX_DESTROY(mtx) \
176: do { \
177: (mtx)->mtx_owner = MUTEX_THREAD; \
178: (mtx)->mtx_id = -1; \
179: } while (/* CONSTCOND */ 0);
180:
181: #define MUTEX_SPIN_P(mtx) \
182: (((mtx)->mtx_owner & MUTEX_BIT_SPIN) != 0)
183: #define MUTEX_ADAPTIVE_P(mtx) \
184: (((mtx)->mtx_owner & MUTEX_BIT_SPIN) == 0)
185:
186: #define MUTEX_GETID(mtx) ((mtx)->mtx_id)
187:
188: static inline int
189: MUTEX_ACQUIRE(kmutex_t *mtx, uintptr_t curthread)
190: {
191: int rv;
192: rv = MUTEX_CAS(&mtx->mtx_owner, 0UL, curthread);
1.4.2.4 ! rmind 193: MUTEX_RECEIVE(mtx);
1.2 ad 194: return rv;
195: }
196:
197: static inline int
198: MUTEX_SET_WAITERS(kmutex_t *mtx, uintptr_t owner)
199: {
200: int rv;
201: rv = MUTEX_CAS(&mtx->mtx_owner, owner, owner | MUTEX_BIT_WAITERS);
1.4.2.4 ! rmind 202: MUTEX_RECEIVE(mtx);
1.2 ad 203: return rv;
204: }
205:
206: static inline void
207: MUTEX_RELEASE(kmutex_t *mtx)
208: {
1.4.2.4 ! rmind 209: MUTEX_GIVE(mtx);
1.2 ad 210: mtx->mtx_owner = 0;
211: }
1.4 ad 212:
213: static inline void
214: MUTEX_CLEAR_WAITERS(kmutex_t *mtx)
215: {
216: /* nothing */
217: }
1.2 ad 218: #endif /* __HAVE_SIMPLE_MUTEXES */
219:
220: /*
221: * Patch in stubs via strong alias where they are not available.
222: */
223:
224: #if defined(LOCKDEBUG)
225: #undef __HAVE_MUTEX_STUBS
226: #undef __HAVE_SPIN_MUTEX_STUBS
227: #endif
228:
229: #ifndef __HAVE_MUTEX_STUBS
1.4.2.4 ! rmind 230: __strong_alias(mutex_enter,mutex_vector_enter);
! 231: __strong_alias(mutex_exit,mutex_vector_exit);
1.2 ad 232: #endif
233:
234: #ifndef __HAVE_SPIN_MUTEX_STUBS
1.4.2.4 ! rmind 235: __strong_alias(mutex_spin_enter,mutex_vector_enter);
! 236: __strong_alias(mutex_spin_exit,mutex_vector_exit);
1.2 ad 237: #endif
238:
239: void mutex_abort(kmutex_t *, const char *, const char *);
240: void mutex_dump(volatile void *);
241: int mutex_onproc(uintptr_t, struct cpu_info **);
1.4.2.3 yamt 242: static struct lwp *mutex_owner(wchan_t);
1.2 ad 243:
244: lockops_t mutex_spin_lockops = {
245: "Mutex",
246: 0,
247: mutex_dump
248: };
249:
250: lockops_t mutex_adaptive_lockops = {
251: "Mutex",
252: 1,
253: mutex_dump
254: };
255:
1.4.2.3 yamt 256: syncobj_t mutex_syncobj = {
257: SOBJ_SLEEPQ_SORTED,
258: turnstile_unsleep,
259: turnstile_changepri,
260: sleepq_lendpri,
261: mutex_owner,
262: };
263:
1.2 ad 264: /*
265: * mutex_dump:
266: *
267: * Dump the contents of a mutex structure.
268: */
269: void
270: mutex_dump(volatile void *cookie)
271: {
272: volatile kmutex_t *mtx = cookie;
273:
274: printf_nolog("owner field : %#018lx wait/spin: %16d/%d\n",
275: (long)MUTEX_OWNER(mtx->mtx_owner), MUTEX_HAS_WAITERS(mtx),
276: MUTEX_SPIN_P(mtx));
277: }
278:
279: /*
280: * mutex_abort:
281: *
1.3 ad 282: * Dump information about an error and panic the system. This
283: * generates a lot of machine code in the DIAGNOSTIC case, so
284: * we ask the compiler to not inline it.
1.2 ad 285: */
1.4.2.4 ! rmind 286:
! 287: #if __GNUC_PREREQ__(3, 0)
! 288: __attribute ((noinline)) __attribute ((noreturn))
! 289: #endif
! 290: void
1.2 ad 291: mutex_abort(kmutex_t *mtx, const char *func, const char *msg)
292: {
293:
294: LOCKDEBUG_ABORT(MUTEX_GETID(mtx), mtx, (MUTEX_SPIN_P(mtx) ?
1.3 ad 295: &mutex_spin_lockops : &mutex_adaptive_lockops), func, msg);
1.2 ad 296: /* NOTREACHED */
297: }
298:
299: /*
300: * mutex_init:
301: *
302: * Initialize a mutex for use. Note that adaptive mutexes are in
303: * essence spin mutexes that can sleep to avoid deadlock and wasting
304: * CPU time. We can't easily provide a type of mutex that always
305: * sleeps - see comments in mutex_vector_enter() about releasing
306: * mutexes unlocked.
307: */
308: void
309: mutex_init(kmutex_t *mtx, kmutex_type_t type, int ipl)
310: {
311: u_int id;
312:
313: memset(mtx, 0, sizeof(*mtx));
314:
315: if (type == MUTEX_DRIVER)
316: type = (ipl == IPL_NONE ? MUTEX_ADAPTIVE : MUTEX_SPIN);
317:
318: switch (type) {
1.4.2.4 ! rmind 319: case MUTEX_NODEBUG:
! 320: KASSERT(ipl == IPL_NONE);
! 321: id = LOCKDEBUG_ALLOC(mtx, NULL);
! 322: MUTEX_INITIALIZE_ADAPTIVE(mtx, id);
! 323: break;
1.2 ad 324: case MUTEX_ADAPTIVE:
325: case MUTEX_DEFAULT:
326: KASSERT(ipl == IPL_NONE);
327: id = LOCKDEBUG_ALLOC(mtx, &mutex_adaptive_lockops);
328: MUTEX_INITIALIZE_ADAPTIVE(mtx, id);
329: break;
330: case MUTEX_SPIN:
331: id = LOCKDEBUG_ALLOC(mtx, &mutex_spin_lockops);
332: MUTEX_INITIALIZE_SPIN(mtx, id, ipl);
333: break;
334: default:
335: panic("mutex_init: impossible type");
336: break;
337: }
338: }
339:
340: /*
341: * mutex_destroy:
342: *
343: * Tear down a mutex.
344: */
345: void
346: mutex_destroy(kmutex_t *mtx)
347: {
348:
349: if (MUTEX_ADAPTIVE_P(mtx)) {
350: MUTEX_ASSERT(mtx, !MUTEX_OWNED(mtx->mtx_owner) &&
351: !MUTEX_HAS_WAITERS(mtx));
352: } else {
353: MUTEX_ASSERT(mtx, mtx->mtx_lock != __SIMPLELOCK_LOCKED);
354: }
355:
356: LOCKDEBUG_FREE(mtx, MUTEX_GETID(mtx));
357: MUTEX_DESTROY(mtx);
358: }
359:
360: /*
361: * mutex_onproc:
362: *
363: * Return true if an adaptive mutex owner is running on a CPU in the
364: * system. If the target is waiting on the kernel big lock, then we
365: * return false immediately. This is necessary to avoid deadlock
366: * against the big lock.
367: *
368: * Note that we can't use the mutex owner field as an LWP pointer. We
369: * don't have full control over the timing of our execution, and so the
370: * pointer could be completely invalid by the time we dereference it.
1.3 ad 371: *
372: * XXX This should be optimised further to reduce potential cache line
373: * ping-ponging and skewing of the spin time while busy waiting.
1.2 ad 374: */
375: #ifdef MULTIPROCESSOR
376: int
377: mutex_onproc(uintptr_t owner, struct cpu_info **cip)
378: {
379: CPU_INFO_ITERATOR cii;
380: struct cpu_info *ci;
381: struct lwp *l;
382:
383: if (!MUTEX_OWNED(owner))
384: return 0;
385: l = (struct lwp *)MUTEX_OWNER(owner);
386:
1.4.2.4 ! rmind 387: if ((ci = *cip) != NULL && ci->ci_curlwp == l)
1.2 ad 388: return ci->ci_biglock_wanted != l;
389:
390: for (CPU_INFO_FOREACH(cii, ci)) {
391: if (ci->ci_curlwp == l) {
392: *cip = ci;
393: return ci->ci_biglock_wanted != l;
394: }
395: }
396:
397: *cip = NULL;
398: return 0;
399: }
400: #endif
401:
402: /*
403: * mutex_vector_enter:
404: *
405: * Support routine for mutex_enter() that must handles all cases. In
406: * the LOCKDEBUG case, mutex_enter() is always aliased here, even if
407: * fast-path stubs are available. If an mutex_spin_enter() stub is
408: * not available, then it is also aliased directly here.
409: */
410: void
411: mutex_vector_enter(kmutex_t *mtx)
412: {
413: uintptr_t owner, curthread;
414: turnstile_t *ts;
415: #ifdef MULTIPROCESSOR
416: struct cpu_info *ci = NULL;
417: u_int count;
418: #endif
419: LOCKSTAT_COUNTER(spincnt);
420: LOCKSTAT_COUNTER(slpcnt);
421: LOCKSTAT_TIMER(spintime);
422: LOCKSTAT_TIMER(slptime);
423: LOCKSTAT_FLAG(lsflag);
424:
425: /*
426: * Handle spin mutexes.
427: */
428: if (MUTEX_SPIN_P(mtx)) {
429: #if defined(LOCKDEBUG) && defined(MULTIPROCESSOR)
430: u_int spins = 0;
431: #endif
432: MUTEX_SPIN_SPLRAISE(mtx);
433: MUTEX_WANTLOCK(mtx);
434: #ifdef FULL
435: if (__cpu_simple_lock_try(&mtx->mtx_lock)) {
436: MUTEX_LOCKED(mtx);
437: return;
438: }
439: #if !defined(MULTIPROCESSOR)
440: MUTEX_ABORT(mtx, "locking against myself");
441: #else /* !MULTIPROCESSOR */
442:
443: LOCKSTAT_ENTER(lsflag);
444: LOCKSTAT_START_TIMER(lsflag, spintime);
445: count = SPINLOCK_BACKOFF_MIN;
446:
447: /*
448: * Spin testing the lock word and do exponential backoff
449: * to reduce cache line ping-ponging between CPUs.
450: */
451: do {
452: if (panicstr != NULL)
453: break;
454: while (mtx->mtx_lock == __SIMPLELOCK_LOCKED) {
455: SPINLOCK_BACKOFF(count);
456: #ifdef LOCKDEBUG
457: if (SPINLOCK_SPINOUT(spins))
458: MUTEX_ABORT(mtx, "spinout");
459: #endif /* LOCKDEBUG */
460: }
461: } while (!__cpu_simple_lock_try(&mtx->mtx_lock));
462:
463: if (count != SPINLOCK_BACKOFF_MIN) {
464: LOCKSTAT_STOP_TIMER(lsflag, spintime);
465: LOCKSTAT_EVENT(lsflag, mtx,
466: LB_SPIN_MUTEX | LB_SPIN, 1, spintime);
467: }
468: LOCKSTAT_EXIT(lsflag);
469: #endif /* !MULTIPROCESSOR */
470: #endif /* FULL */
471: MUTEX_LOCKED(mtx);
472: return;
473: }
474:
475: curthread = (uintptr_t)curlwp;
476:
477: MUTEX_DASSERT(mtx, MUTEX_ADAPTIVE_P(mtx));
478: MUTEX_ASSERT(mtx, curthread != 0);
479: MUTEX_WANTLOCK(mtx);
480:
481: #ifdef LOCKDEBUG
482: if (panicstr == NULL) {
483: simple_lock_only_held(NULL, "mutex_enter");
484: #ifdef MULTIPROCESSOR
485: LOCKDEBUG_BARRIER(&kernel_lock, 1);
486: #else
487: LOCKDEBUG_BARRIER(NULL, 1);
488: #endif
489: }
490: #endif
491:
492: LOCKSTAT_ENTER(lsflag);
493:
494: /*
495: * Adaptive mutex; spin trying to acquire the mutex. If we
496: * determine that the owner is not running on a processor,
497: * then we stop spinning, and sleep instead.
498: */
499: for (;;) {
500: owner = mtx->mtx_owner;
501: if (!MUTEX_OWNED(owner)) {
502: /*
503: * Mutex owner clear could mean two things:
504: *
505: * * The mutex has been released.
506: * * The owner field hasn't been set yet.
507: *
508: * Try to acquire it again. If that fails,
509: * we'll just loop again.
510: */
511: if (MUTEX_ACQUIRE(mtx, curthread))
512: break;
513: continue;
514: }
515:
516: if (panicstr != NULL)
517: return;
518: if (MUTEX_OWNER(owner) == curthread)
519: MUTEX_ABORT(mtx, "locking against myself");
520:
521: #ifdef MULTIPROCESSOR
522: /*
523: * Check to see if the owner is running on a processor.
524: * If so, then we should just spin, as the owner will
525: * likely release the lock very soon.
526: */
527: if (mutex_onproc(owner, &ci)) {
528: LOCKSTAT_START_TIMER(lsflag, spintime);
529: count = SPINLOCK_BACKOFF_MIN;
530: for (;;) {
531: owner = mtx->mtx_owner;
532: if (!mutex_onproc(owner, &ci))
533: break;
534: SPINLOCK_BACKOFF(count);
535: }
536: LOCKSTAT_STOP_TIMER(lsflag, spintime);
537: LOCKSTAT_COUNT(spincnt, 1);
538: if (!MUTEX_OWNED(owner))
539: continue;
540: }
541: #endif
542:
543: ts = turnstile_lookup(mtx);
544:
545: /*
546: * Once we have the turnstile chain interlock, mark the
547: * mutex has having waiters. If that fails, spin again:
548: * chances are that the mutex has been released.
549: */
550: if (!MUTEX_SET_WAITERS(mtx, owner)) {
551: turnstile_exit(mtx);
552: continue;
553: }
554:
555: #ifdef MULTIPROCESSOR
556: /*
557: * mutex_exit() is permitted to release the mutex without
558: * any interlocking instructions, and the following can
559: * occur as a result:
560: *
561: * CPU 1: MUTEX_SET_WAITERS() CPU2: mutex_exit()
562: * ---------------------------- ----------------------------
563: * .. acquire cache line
564: * .. test for waiters
565: * acquire cache line <- lose cache line
566: * lock cache line ..
567: * verify mutex is held ..
568: * set waiters ..
569: * unlock cache line ..
570: * lose cache line -> acquire cache line
571: * .. clear lock word, waiters
572: * return success
573: *
574: * There is a another race that can occur: a third CPU could
575: * acquire the mutex as soon as it is released. Since
576: * adaptive mutexes are primarily spin mutexes, this is not
577: * something that we need to worry about too much. What we
578: * do need to ensure is that the waiters bit gets set.
579: *
580: * To allow the unlocked release, we need to make some
581: * assumptions here:
582: *
583: * o Release is the only non-atomic/unlocked operation
584: * that can be performed on the mutex. (It must still
585: * be atomic on the local CPU, e.g. in case interrupted
586: * or preempted).
587: *
588: * o At any given time, MUTEX_SET_WAITERS() can only ever
589: * be in progress on one CPU in the system - guarenteed
590: * by the turnstile chain lock.
591: *
592: * o No other operations other than MUTEX_SET_WAITERS()
593: * and release can modify a mutex with a non-zero
594: * owner field.
595: *
596: * o The result of a successful MUTEX_SET_WAITERS() call
597: * is an unbuffered write that is immediately visible
598: * to all other processors in the system.
599: *
600: * o If the holding LWP switches away, it posts a store
601: * fence before changing curlwp, ensuring that any
602: * overwrite of the mutex waiters flag by mutex_exit()
603: * completes before the modification of curlwp becomes
604: * visible to this CPU.
605: *
1.4.2.2 yamt 606: * o mi_switch() posts a store fence before setting curlwp
1.2 ad 607: * and before resuming execution of an LWP.
608: *
609: * o _kernel_lock() posts a store fence before setting
610: * curcpu()->ci_biglock_wanted, and after clearing it.
611: * This ensures that any overwrite of the mutex waiters
612: * flag by mutex_exit() completes before the modification
613: * of ci_biglock_wanted becomes visible.
614: *
615: * We now post a read memory barrier (after setting the
616: * waiters field) and check the lock holder's status again.
617: * Some of the possible outcomes (not an exhaustive list):
618: *
619: * 1. The onproc check returns true: the holding LWP is
620: * running again. The lock may be released soon and
621: * we should spin. Importantly, we can't trust the
622: * value of the waiters flag.
623: *
624: * 2. The onproc check returns false: the holding LWP is
625: * not running. We now have the oppertunity to check
626: * if mutex_exit() has blatted the modifications made
627: * by MUTEX_SET_WAITERS().
628: *
629: * 3. The onproc check returns false: the holding LWP may
630: * or may not be running. It has context switched at
631: * some point during our check. Again, we have the
632: * chance to see if the waiters bit is still set or
633: * has been overwritten.
634: *
635: * 4. The onproc check returns false: the holding LWP is
636: * running on a CPU, but wants the big lock. It's OK
637: * to check the waiters field in this case.
638: *
639: * 5. The has-waiters check fails: the mutex has been
640: * released, the waiters flag cleared and another LWP
641: * now owns the mutex.
642: *
643: * 6. The has-waiters check fails: the mutex has been
644: * released.
645: *
646: * If the waiters bit is not set it's unsafe to go asleep,
647: * as we might never be awoken.
648: */
649: mb_read();
650: if (mutex_onproc(owner, &ci) || !MUTEX_HAS_WAITERS(mtx)) {
651: turnstile_exit(mtx);
652: continue;
653: }
654: #endif /* MULTIPROCESSOR */
655:
656: LOCKSTAT_START_TIMER(lsflag, slptime);
657:
1.4.2.3 yamt 658: turnstile_block(ts, TS_WRITER_Q, mtx, &mutex_syncobj);
1.2 ad 659:
660: LOCKSTAT_STOP_TIMER(lsflag, slptime);
661: LOCKSTAT_COUNT(slpcnt, 1);
662:
663: turnstile_unblock();
664: }
665:
666: LOCKSTAT_EVENT(lsflag, mtx, LB_ADAPTIVE_MUTEX | LB_SLEEP1,
667: slpcnt, slptime);
668: LOCKSTAT_EVENT(lsflag, mtx, LB_ADAPTIVE_MUTEX | LB_SPIN,
669: spincnt, spintime);
670: LOCKSTAT_EXIT(lsflag);
671:
672: MUTEX_DASSERT(mtx, MUTEX_OWNER(mtx->mtx_owner) == curthread);
673: MUTEX_LOCKED(mtx);
674: }
675:
676: /*
677: * mutex_vector_exit:
678: *
679: * Support routine for mutex_exit() that handles all cases.
680: */
681: void
682: mutex_vector_exit(kmutex_t *mtx)
683: {
684: turnstile_t *ts;
685: uintptr_t curthread;
686:
687: if (MUTEX_SPIN_P(mtx)) {
688: #ifdef FULL
689: if (mtx->mtx_lock != __SIMPLELOCK_LOCKED)
690: MUTEX_ABORT(mtx, "exiting unheld spin mutex");
691: MUTEX_UNLOCKED(mtx);
692: __cpu_simple_unlock(&mtx->mtx_lock);
693: #endif
694: MUTEX_SPIN_SPLRESTORE(mtx);
695: return;
696: }
697:
1.4.2.4 ! rmind 698: if (__predict_false((uintptr_t)panicstr | cold)) {
1.2 ad 699: MUTEX_UNLOCKED(mtx);
700: MUTEX_RELEASE(mtx);
701: return;
702: }
703:
704: curthread = (uintptr_t)curlwp;
705: MUTEX_DASSERT(mtx, curthread != 0);
706: MUTEX_ASSERT(mtx, MUTEX_OWNER(mtx->mtx_owner) == curthread);
707: MUTEX_UNLOCKED(mtx);
708:
709: /*
710: * Get this lock's turnstile. This gets the interlock on
711: * the sleep queue. Once we have that, we can clear the
712: * lock. If there was no turnstile for the lock, there
713: * were no waiters remaining.
714: */
715: ts = turnstile_lookup(mtx);
716:
717: if (ts == NULL) {
718: MUTEX_RELEASE(mtx);
719: turnstile_exit(mtx);
720: } else {
721: MUTEX_RELEASE(mtx);
722: turnstile_wakeup(ts, TS_WRITER_Q,
723: TS_WAITERS(ts, TS_WRITER_Q), NULL);
724: }
725: }
726:
1.4 ad 727: #ifndef __HAVE_SIMPLE_MUTEXES
728: /*
729: * mutex_wakeup:
730: *
731: * Support routine for mutex_exit() that wakes up all waiters.
732: * We assume that the mutex has been released, but it need not
733: * be.
734: */
735: void
736: mutex_wakeup(kmutex_t *mtx)
737: {
738: turnstile_t *ts;
739:
740: ts = turnstile_lookup(mtx);
741: if (ts == NULL) {
742: turnstile_exit(mtx);
743: return;
744: }
745: MUTEX_CLEAR_WAITERS(mtx);
746: turnstile_wakeup(ts, TS_WRITER_Q, TS_WAITERS(ts, TS_WRITER_Q), NULL);
747: }
748: #endif /* !__HAVE_SIMPLE_MUTEXES */
749:
1.2 ad 750: /*
751: * mutex_owned:
752: *
1.3 ad 753: * Return true if the current LWP (adaptive) or CPU (spin)
754: * holds the mutex.
1.2 ad 755: */
756: int
757: mutex_owned(kmutex_t *mtx)
758: {
759:
760: if (MUTEX_ADAPTIVE_P(mtx))
761: return MUTEX_OWNER(mtx->mtx_owner) == (uintptr_t)curlwp;
762: #ifdef FULL
763: return mtx->mtx_lock == __SIMPLELOCK_LOCKED;
764: #else
765: return 1;
766: #endif
767: }
768:
769: /*
770: * mutex_owner:
771: *
1.4.2.3 yamt 772: * Return the current owner of an adaptive mutex. Used for
773: * priority inheritance.
1.2 ad 774: */
1.4.2.3 yamt 775: static struct lwp *
776: mutex_owner(wchan_t obj)
1.2 ad 777: {
1.4.2.3 yamt 778: kmutex_t *mtx = (void *)(uintptr_t)obj; /* discard qualifiers */
1.2 ad 779:
780: MUTEX_ASSERT(mtx, MUTEX_ADAPTIVE_P(mtx));
781: return (struct lwp *)MUTEX_OWNER(mtx->mtx_owner);
782: }
783:
784: /*
785: * mutex_tryenter:
786: *
787: * Try to acquire the mutex; return non-zero if we did.
788: */
789: int
790: mutex_tryenter(kmutex_t *mtx)
791: {
792: uintptr_t curthread;
793:
794: /*
795: * Handle spin mutexes.
796: */
797: if (MUTEX_SPIN_P(mtx)) {
798: MUTEX_SPIN_SPLRAISE(mtx);
799: #ifdef FULL
800: if (__cpu_simple_lock_try(&mtx->mtx_lock)) {
1.4 ad 801: MUTEX_WANTLOCK(mtx);
1.2 ad 802: MUTEX_LOCKED(mtx);
803: return 1;
804: }
805: MUTEX_SPIN_SPLRESTORE(mtx);
806: #else
1.4 ad 807: MUTEX_WANTLOCK(mtx);
1.2 ad 808: MUTEX_LOCKED(mtx);
809: return 1;
810: #endif
811: } else {
812: curthread = (uintptr_t)curlwp;
813: MUTEX_ASSERT(mtx, curthread != 0);
814: if (MUTEX_ACQUIRE(mtx, curthread)) {
1.4 ad 815: MUTEX_WANTLOCK(mtx);
1.2 ad 816: MUTEX_LOCKED(mtx);
817: MUTEX_DASSERT(mtx,
818: MUTEX_OWNER(mtx->mtx_owner) == curthread);
819: return 1;
820: }
821: }
822:
823: return 0;
824: }
825:
826: #if defined(__HAVE_SPIN_MUTEX_STUBS) || defined(FULL)
827: /*
828: * mutex_spin_retry:
829: *
830: * Support routine for mutex_spin_enter(). Assumes that the caller
831: * has already raised the SPL, and adjusted counters.
832: */
833: void
834: mutex_spin_retry(kmutex_t *mtx)
835: {
836: #ifdef MULTIPROCESSOR
837: u_int count;
838: LOCKSTAT_TIMER(spintime);
839: LOCKSTAT_FLAG(lsflag);
840: #ifdef LOCKDEBUG
841: u_int spins = 0;
842: #endif /* LOCKDEBUG */
843:
844: MUTEX_WANTLOCK(mtx);
845:
846: LOCKSTAT_ENTER(lsflag);
847: LOCKSTAT_START_TIMER(lsflag, spintime);
848: count = SPINLOCK_BACKOFF_MIN;
849:
850: /*
851: * Spin testing the lock word and do exponential backoff
852: * to reduce cache line ping-ponging between CPUs.
853: */
854: do {
855: if (panicstr != NULL)
856: break;
857: while (mtx->mtx_lock == __SIMPLELOCK_LOCKED) {
858: SPINLOCK_BACKOFF(count);
859: #ifdef LOCKDEBUG
860: if (SPINLOCK_SPINOUT(spins))
861: MUTEX_ABORT(mtx, "spinout");
862: #endif /* LOCKDEBUG */
863: }
864: } while (!__cpu_simple_lock_try(&mtx->mtx_lock));
865:
866: LOCKSTAT_STOP_TIMER(lsflag, spintime);
867: LOCKSTAT_EVENT(lsflag, mtx, LB_SPIN_MUTEX | LB_SPIN, 1, spintime);
868: LOCKSTAT_EXIT(lsflag);
869:
870: MUTEX_LOCKED(mtx);
871: #else /* MULTIPROCESSOR */
872: MUTEX_ABORT(mtx, "locking against myself");
873: #endif /* MULTIPROCESSOR */
874: }
875: #endif /* defined(__HAVE_SPIN_MUTEX_STUBS) || defined(FULL) */
CVSweb <webmaster@jp.NetBSD.org>