Annotation of src/sys/kern/kern_mutex.c, Revision 1.45
1.45 ! rmind 1: /* $NetBSD: kern_mutex.c,v 1.44 2008/10/15 06:51:20 wrstuden Exp $ */
1.2 ad 2:
3: /*-
1.30 ad 4: * Copyright (c) 2002, 2006, 2007, 2008 The NetBSD Foundation, Inc.
1.2 ad 5: * All rights reserved.
6: *
7: * This code is derived from software contributed to The NetBSD Foundation
8: * by Jason R. Thorpe and Andrew Doran.
9: *
10: * Redistribution and use in source and binary forms, with or without
11: * modification, are permitted provided that the following conditions
12: * are met:
13: * 1. Redistributions of source code must retain the above copyright
14: * notice, this list of conditions and the following disclaimer.
15: * 2. Redistributions in binary form must reproduce the above copyright
16: * notice, this list of conditions and the following disclaimer in the
17: * documentation and/or other materials provided with the distribution.
18: *
19: * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20: * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21: * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22: * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23: * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24: * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25: * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26: * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27: * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28: * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29: * POSSIBILITY OF SUCH DAMAGE.
30: */
31:
32: /*
33: * Kernel mutex implementation, modeled after those found in Solaris,
34: * a description of which can be found in:
35: *
36: * Solaris Internals: Core Kernel Architecture, Jim Mauro and
37: * Richard McDougall.
38: */
39:
40: #define __MUTEX_PRIVATE
41:
42: #include <sys/cdefs.h>
1.45 ! rmind 43: __KERNEL_RCSID(0, "$NetBSD: kern_mutex.c,v 1.44 2008/10/15 06:51:20 wrstuden Exp $");
1.2 ad 44:
45: #include <sys/param.h>
46: #include <sys/proc.h>
47: #include <sys/mutex.h>
48: #include <sys/sched.h>
49: #include <sys/sleepq.h>
50: #include <sys/systm.h>
51: #include <sys/lockdebug.h>
52: #include <sys/kernel.h>
1.24 ad 53: #include <sys/atomic.h>
54: #include <sys/intr.h>
1.29 xtraeme 55: #include <sys/lock.h>
1.31 ad 56: #include <sys/pool.h>
1.2 ad 57:
58: #include <dev/lockstat.h>
59:
1.28 ad 60: #include <machine/lock.h>
61:
1.44 wrstuden 62: #include "opt_sa.h"
63:
1.2 ad 64: /*
65: * When not running a debug kernel, spin mutexes are not much
66: * more than an splraiseipl() and splx() pair.
67: */
68:
69: #if defined(DIAGNOSTIC) || defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
70: #define FULL
71: #endif
72:
73: /*
74: * Debugging support.
75: */
76:
77: #define MUTEX_WANTLOCK(mtx) \
1.23 yamt 78: LOCKDEBUG_WANTLOCK(MUTEX_DEBUG_P(mtx), (mtx), \
1.40 ad 79: (uintptr_t)__builtin_return_address(0), false, false)
1.2 ad 80: #define MUTEX_LOCKED(mtx) \
1.42 ad 81: LOCKDEBUG_LOCKED(MUTEX_DEBUG_P(mtx), (mtx), NULL, \
1.2 ad 82: (uintptr_t)__builtin_return_address(0), 0)
83: #define MUTEX_UNLOCKED(mtx) \
1.23 yamt 84: LOCKDEBUG_UNLOCKED(MUTEX_DEBUG_P(mtx), (mtx), \
1.2 ad 85: (uintptr_t)__builtin_return_address(0), 0)
86: #define MUTEX_ABORT(mtx, msg) \
1.17 ad 87: mutex_abort(mtx, __func__, msg)
1.2 ad 88:
89: #if defined(LOCKDEBUG)
90:
91: #define MUTEX_DASSERT(mtx, cond) \
92: do { \
93: if (!(cond)) \
94: MUTEX_ABORT(mtx, "assertion failed: " #cond); \
95: } while (/* CONSTCOND */ 0);
96:
97: #else /* LOCKDEBUG */
98:
99: #define MUTEX_DASSERT(mtx, cond) /* nothing */
100:
101: #endif /* LOCKDEBUG */
102:
103: #if defined(DIAGNOSTIC)
104:
105: #define MUTEX_ASSERT(mtx, cond) \
106: do { \
107: if (!(cond)) \
108: MUTEX_ABORT(mtx, "assertion failed: " #cond); \
109: } while (/* CONSTCOND */ 0)
110:
111: #else /* DIAGNOSTIC */
112:
113: #define MUTEX_ASSERT(mtx, cond) /* nothing */
114:
115: #endif /* DIAGNOSTIC */
116:
117: /*
118: * Spin mutex SPL save / restore.
119: */
1.12 matt 120: #ifndef MUTEX_COUNT_BIAS
121: #define MUTEX_COUNT_BIAS 0
122: #endif
1.2 ad 123:
124: #define MUTEX_SPIN_SPLRAISE(mtx) \
125: do { \
1.36 ad 126: struct cpu_info *x__ci; \
1.2 ad 127: int x__cnt, s; \
1.36 ad 128: s = splraiseipl(mtx->mtx_ipl); \
129: x__ci = curcpu(); \
1.2 ad 130: x__cnt = x__ci->ci_mtx_count--; \
1.37 ad 131: __insn_barrier(); \
1.12 matt 132: if (x__cnt == MUTEX_COUNT_BIAS) \
1.2 ad 133: x__ci->ci_mtx_oldspl = (s); \
134: } while (/* CONSTCOND */ 0)
135:
136: #define MUTEX_SPIN_SPLRESTORE(mtx) \
137: do { \
138: struct cpu_info *x__ci = curcpu(); \
139: int s = x__ci->ci_mtx_oldspl; \
140: __insn_barrier(); \
1.12 matt 141: if (++(x__ci->ci_mtx_count) == MUTEX_COUNT_BIAS) \
1.2 ad 142: splx(s); \
143: } while (/* CONSTCOND */ 0)
144:
145: /*
146: * For architectures that provide 'simple' mutexes: they provide a
147: * CAS function that is either MP-safe, or does not need to be MP
148: * safe. Adaptive mutexes on these architectures do not require an
149: * additional interlock.
150: */
151:
152: #ifdef __HAVE_SIMPLE_MUTEXES
153:
154: #define MUTEX_OWNER(owner) \
155: (owner & MUTEX_THREAD)
156: #define MUTEX_HAS_WAITERS(mtx) \
157: (((int)(mtx)->mtx_owner & MUTEX_BIT_WAITERS) != 0)
158:
1.23 yamt 159: #define MUTEX_INITIALIZE_ADAPTIVE(mtx, dodebug) \
1.2 ad 160: do { \
1.23 yamt 161: if (dodebug) \
162: (mtx)->mtx_owner |= MUTEX_BIT_DEBUG; \
1.2 ad 163: } while (/* CONSTCOND */ 0);
164:
1.23 yamt 165: #define MUTEX_INITIALIZE_SPIN(mtx, dodebug, ipl) \
1.2 ad 166: do { \
167: (mtx)->mtx_owner = MUTEX_BIT_SPIN; \
1.23 yamt 168: if (dodebug) \
169: (mtx)->mtx_owner |= MUTEX_BIT_DEBUG; \
1.2 ad 170: (mtx)->mtx_ipl = makeiplcookie((ipl)); \
171: __cpu_simple_lock_init(&(mtx)->mtx_lock); \
172: } while (/* CONSTCOND */ 0)
173:
174: #define MUTEX_DESTROY(mtx) \
175: do { \
176: (mtx)->mtx_owner = MUTEX_THREAD; \
177: } while (/* CONSTCOND */ 0);
178:
179: #define MUTEX_SPIN_P(mtx) \
180: (((mtx)->mtx_owner & MUTEX_BIT_SPIN) != 0)
181: #define MUTEX_ADAPTIVE_P(mtx) \
182: (((mtx)->mtx_owner & MUTEX_BIT_SPIN) == 0)
183:
1.23 yamt 184: #define MUTEX_DEBUG_P(mtx) (((mtx)->mtx_owner & MUTEX_BIT_DEBUG) != 0)
185: #if defined(LOCKDEBUG)
186: #define MUTEX_OWNED(owner) (((owner) & ~MUTEX_BIT_DEBUG) != 0)
187: #define MUTEX_INHERITDEBUG(new, old) (new) |= (old) & MUTEX_BIT_DEBUG
188: #else /* defined(LOCKDEBUG) */
189: #define MUTEX_OWNED(owner) ((owner) != 0)
190: #define MUTEX_INHERITDEBUG(new, old) /* nothing */
191: #endif /* defined(LOCKDEBUG) */
1.2 ad 192:
193: static inline int
194: MUTEX_ACQUIRE(kmutex_t *mtx, uintptr_t curthread)
195: {
196: int rv;
1.23 yamt 197: uintptr_t old = 0;
198: uintptr_t new = curthread;
199:
200: MUTEX_INHERITDEBUG(old, mtx->mtx_owner);
201: MUTEX_INHERITDEBUG(new, old);
202: rv = MUTEX_CAS(&mtx->mtx_owner, old, new);
1.7 itohy 203: MUTEX_RECEIVE(mtx);
1.2 ad 204: return rv;
205: }
206:
207: static inline int
208: MUTEX_SET_WAITERS(kmutex_t *mtx, uintptr_t owner)
209: {
210: int rv;
211: rv = MUTEX_CAS(&mtx->mtx_owner, owner, owner | MUTEX_BIT_WAITERS);
1.7 itohy 212: MUTEX_RECEIVE(mtx);
1.2 ad 213: return rv;
214: }
215:
216: static inline void
217: MUTEX_RELEASE(kmutex_t *mtx)
218: {
1.23 yamt 219: uintptr_t new;
220:
1.7 itohy 221: MUTEX_GIVE(mtx);
1.23 yamt 222: new = 0;
223: MUTEX_INHERITDEBUG(new, mtx->mtx_owner);
224: mtx->mtx_owner = new;
1.2 ad 225: }
1.4 ad 226:
227: static inline void
228: MUTEX_CLEAR_WAITERS(kmutex_t *mtx)
229: {
230: /* nothing */
231: }
1.2 ad 232: #endif /* __HAVE_SIMPLE_MUTEXES */
233:
234: /*
235: * Patch in stubs via strong alias where they are not available.
236: */
237:
238: #if defined(LOCKDEBUG)
239: #undef __HAVE_MUTEX_STUBS
240: #undef __HAVE_SPIN_MUTEX_STUBS
241: #endif
242:
243: #ifndef __HAVE_MUTEX_STUBS
1.8 itohy 244: __strong_alias(mutex_enter,mutex_vector_enter);
245: __strong_alias(mutex_exit,mutex_vector_exit);
1.2 ad 246: #endif
247:
248: #ifndef __HAVE_SPIN_MUTEX_STUBS
1.8 itohy 249: __strong_alias(mutex_spin_enter,mutex_vector_enter);
250: __strong_alias(mutex_spin_exit,mutex_vector_exit);
1.2 ad 251: #endif
252:
253: void mutex_abort(kmutex_t *, const char *, const char *);
254: void mutex_dump(volatile void *);
255: int mutex_onproc(uintptr_t, struct cpu_info **);
256:
257: lockops_t mutex_spin_lockops = {
258: "Mutex",
1.42 ad 259: LOCKOPS_SPIN,
1.2 ad 260: mutex_dump
261: };
262:
263: lockops_t mutex_adaptive_lockops = {
264: "Mutex",
1.42 ad 265: LOCKOPS_SLEEP,
1.2 ad 266: mutex_dump
267: };
268:
1.5 yamt 269: syncobj_t mutex_syncobj = {
270: SOBJ_SLEEPQ_SORTED,
271: turnstile_unsleep,
272: turnstile_changepri,
273: sleepq_lendpri,
1.27 ad 274: (void *)mutex_owner,
1.5 yamt 275: };
276:
1.31 ad 277: /* Mutex cache */
278: #define MUTEX_OBJ_MAGIC 0x5aa3c85d
279: struct kmutexobj {
280: kmutex_t mo_lock;
281: u_int mo_magic;
282: u_int mo_refcnt;
283: };
284:
285: static int mutex_obj_ctor(void *, void *, int);
286:
287: static pool_cache_t mutex_obj_cache;
288:
1.2 ad 289: /*
290: * mutex_dump:
291: *
292: * Dump the contents of a mutex structure.
293: */
294: void
295: mutex_dump(volatile void *cookie)
296: {
297: volatile kmutex_t *mtx = cookie;
298:
299: printf_nolog("owner field : %#018lx wait/spin: %16d/%d\n",
300: (long)MUTEX_OWNER(mtx->mtx_owner), MUTEX_HAS_WAITERS(mtx),
301: MUTEX_SPIN_P(mtx));
302: }
303:
304: /*
305: * mutex_abort:
306: *
1.3 ad 307: * Dump information about an error and panic the system. This
308: * generates a lot of machine code in the DIAGNOSTIC case, so
309: * we ask the compiler to not inline it.
1.2 ad 310: */
1.43 ad 311: void __noinline
1.2 ad 312: mutex_abort(kmutex_t *mtx, const char *func, const char *msg)
313: {
314:
1.23 yamt 315: LOCKDEBUG_ABORT(mtx, (MUTEX_SPIN_P(mtx) ?
1.3 ad 316: &mutex_spin_lockops : &mutex_adaptive_lockops), func, msg);
1.2 ad 317: }
318:
319: /*
320: * mutex_init:
321: *
322: * Initialize a mutex for use. Note that adaptive mutexes are in
323: * essence spin mutexes that can sleep to avoid deadlock and wasting
324: * CPU time. We can't easily provide a type of mutex that always
325: * sleeps - see comments in mutex_vector_enter() about releasing
326: * mutexes unlocked.
327: */
328: void
329: mutex_init(kmutex_t *mtx, kmutex_type_t type, int ipl)
330: {
1.23 yamt 331: bool dodebug;
1.2 ad 332:
333: memset(mtx, 0, sizeof(*mtx));
334:
1.15 ad 335: switch (type) {
336: case MUTEX_ADAPTIVE:
337: KASSERT(ipl == IPL_NONE);
338: break;
1.22 ad 339: case MUTEX_DEFAULT:
1.15 ad 340: case MUTEX_DRIVER:
1.26 ad 341: if (ipl == IPL_NONE || ipl == IPL_SOFTCLOCK ||
342: ipl == IPL_SOFTBIO || ipl == IPL_SOFTNET ||
343: ipl == IPL_SOFTSERIAL) {
1.22 ad 344: type = MUTEX_ADAPTIVE;
1.26 ad 345: } else {
1.22 ad 346: type = MUTEX_SPIN;
347: }
1.15 ad 348: break;
349: default:
350: break;
351: }
1.2 ad 352:
353: switch (type) {
1.11 ad 354: case MUTEX_NODEBUG:
1.23 yamt 355: dodebug = LOCKDEBUG_ALLOC(mtx, NULL,
1.19 ad 356: (uintptr_t)__builtin_return_address(0));
1.23 yamt 357: MUTEX_INITIALIZE_SPIN(mtx, dodebug, ipl);
1.11 ad 358: break;
1.2 ad 359: case MUTEX_ADAPTIVE:
1.23 yamt 360: dodebug = LOCKDEBUG_ALLOC(mtx, &mutex_adaptive_lockops,
1.19 ad 361: (uintptr_t)__builtin_return_address(0));
1.23 yamt 362: MUTEX_INITIALIZE_ADAPTIVE(mtx, dodebug);
1.2 ad 363: break;
364: case MUTEX_SPIN:
1.23 yamt 365: dodebug = LOCKDEBUG_ALLOC(mtx, &mutex_spin_lockops,
1.19 ad 366: (uintptr_t)__builtin_return_address(0));
1.23 yamt 367: MUTEX_INITIALIZE_SPIN(mtx, dodebug, ipl);
1.2 ad 368: break;
369: default:
370: panic("mutex_init: impossible type");
371: break;
372: }
373: }
374:
375: /*
376: * mutex_destroy:
377: *
378: * Tear down a mutex.
379: */
380: void
381: mutex_destroy(kmutex_t *mtx)
382: {
383:
384: if (MUTEX_ADAPTIVE_P(mtx)) {
385: MUTEX_ASSERT(mtx, !MUTEX_OWNED(mtx->mtx_owner) &&
386: !MUTEX_HAS_WAITERS(mtx));
387: } else {
1.16 skrll 388: MUTEX_ASSERT(mtx, !__SIMPLELOCK_LOCKED_P(&mtx->mtx_lock));
1.2 ad 389: }
390:
1.23 yamt 391: LOCKDEBUG_FREE(MUTEX_DEBUG_P(mtx), mtx);
1.2 ad 392: MUTEX_DESTROY(mtx);
393: }
394:
395: /*
396: * mutex_onproc:
397: *
398: * Return true if an adaptive mutex owner is running on a CPU in the
399: * system. If the target is waiting on the kernel big lock, then we
1.15 ad 400: * must release it. This is necessary to avoid deadlock.
1.2 ad 401: *
402: * Note that we can't use the mutex owner field as an LWP pointer. We
403: * don't have full control over the timing of our execution, and so the
404: * pointer could be completely invalid by the time we dereference it.
405: */
406: #ifdef MULTIPROCESSOR
407: int
408: mutex_onproc(uintptr_t owner, struct cpu_info **cip)
409: {
410: CPU_INFO_ITERATOR cii;
411: struct cpu_info *ci;
412: struct lwp *l;
413:
414: if (!MUTEX_OWNED(owner))
415: return 0;
416: l = (struct lwp *)MUTEX_OWNER(owner);
417:
1.15 ad 418: /* See if the target is running on a CPU somewhere. */
1.10 ad 419: if ((ci = *cip) != NULL && ci->ci_curlwp == l)
1.15 ad 420: goto run;
421: for (CPU_INFO_FOREACH(cii, ci))
422: if (ci->ci_curlwp == l)
423: goto run;
1.2 ad 424:
1.15 ad 425: /* No: it may be safe to block now. */
1.2 ad 426: *cip = NULL;
427: return 0;
1.15 ad 428:
429: run:
430: /* Target is running; do we need to block? */
431: *cip = ci;
432: return ci->ci_biglock_wanted != l;
1.2 ad 433: }
1.15 ad 434: #endif /* MULTIPROCESSOR */
1.2 ad 435:
436: /*
437: * mutex_vector_enter:
438: *
1.45 ! rmind 439: * Support routine for mutex_enter() that must handle all cases. In
1.2 ad 440: * the LOCKDEBUG case, mutex_enter() is always aliased here, even if
441: * fast-path stubs are available. If an mutex_spin_enter() stub is
442: * not available, then it is also aliased directly here.
443: */
444: void
445: mutex_vector_enter(kmutex_t *mtx)
446: {
447: uintptr_t owner, curthread;
448: turnstile_t *ts;
449: #ifdef MULTIPROCESSOR
450: struct cpu_info *ci = NULL;
451: u_int count;
452: #endif
1.44 wrstuden 453: #ifdef KERN_SA
454: int f;
455: #endif
1.2 ad 456: LOCKSTAT_COUNTER(spincnt);
457: LOCKSTAT_COUNTER(slpcnt);
458: LOCKSTAT_TIMER(spintime);
459: LOCKSTAT_TIMER(slptime);
460: LOCKSTAT_FLAG(lsflag);
461:
462: /*
463: * Handle spin mutexes.
464: */
465: if (MUTEX_SPIN_P(mtx)) {
466: #if defined(LOCKDEBUG) && defined(MULTIPROCESSOR)
467: u_int spins = 0;
468: #endif
469: MUTEX_SPIN_SPLRAISE(mtx);
470: MUTEX_WANTLOCK(mtx);
471: #ifdef FULL
472: if (__cpu_simple_lock_try(&mtx->mtx_lock)) {
473: MUTEX_LOCKED(mtx);
474: return;
475: }
476: #if !defined(MULTIPROCESSOR)
477: MUTEX_ABORT(mtx, "locking against myself");
478: #else /* !MULTIPROCESSOR */
479:
480: LOCKSTAT_ENTER(lsflag);
481: LOCKSTAT_START_TIMER(lsflag, spintime);
482: count = SPINLOCK_BACKOFF_MIN;
483:
484: /*
485: * Spin testing the lock word and do exponential backoff
486: * to reduce cache line ping-ponging between CPUs.
487: */
488: do {
489: if (panicstr != NULL)
490: break;
1.16 skrll 491: while (__SIMPLELOCK_LOCKED_P(&mtx->mtx_lock)) {
1.2 ad 492: SPINLOCK_BACKOFF(count);
493: #ifdef LOCKDEBUG
494: if (SPINLOCK_SPINOUT(spins))
495: MUTEX_ABORT(mtx, "spinout");
496: #endif /* LOCKDEBUG */
497: }
498: } while (!__cpu_simple_lock_try(&mtx->mtx_lock));
499:
500: if (count != SPINLOCK_BACKOFF_MIN) {
501: LOCKSTAT_STOP_TIMER(lsflag, spintime);
502: LOCKSTAT_EVENT(lsflag, mtx,
503: LB_SPIN_MUTEX | LB_SPIN, 1, spintime);
504: }
505: LOCKSTAT_EXIT(lsflag);
506: #endif /* !MULTIPROCESSOR */
507: #endif /* FULL */
508: MUTEX_LOCKED(mtx);
509: return;
510: }
511:
512: curthread = (uintptr_t)curlwp;
513:
514: MUTEX_DASSERT(mtx, MUTEX_ADAPTIVE_P(mtx));
515: MUTEX_ASSERT(mtx, curthread != 0);
516: MUTEX_WANTLOCK(mtx);
517:
518: if (panicstr == NULL) {
519: LOCKDEBUG_BARRIER(&kernel_lock, 1);
520: }
521:
522: LOCKSTAT_ENTER(lsflag);
523:
524: /*
525: * Adaptive mutex; spin trying to acquire the mutex. If we
526: * determine that the owner is not running on a processor,
527: * then we stop spinning, and sleep instead.
528: */
1.34 ad 529: for (owner = mtx->mtx_owner;;) {
1.2 ad 530: if (!MUTEX_OWNED(owner)) {
531: /*
532: * Mutex owner clear could mean two things:
533: *
534: * * The mutex has been released.
535: * * The owner field hasn't been set yet.
536: *
537: * Try to acquire it again. If that fails,
538: * we'll just loop again.
539: */
540: if (MUTEX_ACQUIRE(mtx, curthread))
541: break;
1.34 ad 542: owner = mtx->mtx_owner;
1.2 ad 543: continue;
544: }
545:
1.45 ! rmind 546: if (__predict_false(panicstr != NULL))
1.2 ad 547: return;
1.45 ! rmind 548: if (__predict_false(MUTEX_OWNER(owner) == curthread))
1.2 ad 549: MUTEX_ABORT(mtx, "locking against myself");
550:
551: #ifdef MULTIPROCESSOR
552: /*
553: * Check to see if the owner is running on a processor.
554: * If so, then we should just spin, as the owner will
555: * likely release the lock very soon.
556: */
557: if (mutex_onproc(owner, &ci)) {
558: LOCKSTAT_START_TIMER(lsflag, spintime);
559: count = SPINLOCK_BACKOFF_MIN;
560: for (;;) {
1.34 ad 561: SPINLOCK_BACKOFF(count);
1.2 ad 562: owner = mtx->mtx_owner;
563: if (!mutex_onproc(owner, &ci))
564: break;
565: }
566: LOCKSTAT_STOP_TIMER(lsflag, spintime);
567: LOCKSTAT_COUNT(spincnt, 1);
568: if (!MUTEX_OWNED(owner))
569: continue;
570: }
571: #endif
572:
573: ts = turnstile_lookup(mtx);
574:
575: /*
576: * Once we have the turnstile chain interlock, mark the
577: * mutex has having waiters. If that fails, spin again:
578: * chances are that the mutex has been released.
579: */
580: if (!MUTEX_SET_WAITERS(mtx, owner)) {
581: turnstile_exit(mtx);
1.34 ad 582: owner = mtx->mtx_owner;
1.2 ad 583: continue;
584: }
585:
586: #ifdef MULTIPROCESSOR
587: /*
588: * mutex_exit() is permitted to release the mutex without
589: * any interlocking instructions, and the following can
590: * occur as a result:
591: *
592: * CPU 1: MUTEX_SET_WAITERS() CPU2: mutex_exit()
593: * ---------------------------- ----------------------------
594: * .. acquire cache line
595: * .. test for waiters
596: * acquire cache line <- lose cache line
597: * lock cache line ..
598: * verify mutex is held ..
599: * set waiters ..
600: * unlock cache line ..
601: * lose cache line -> acquire cache line
602: * .. clear lock word, waiters
603: * return success
604: *
605: * There is a another race that can occur: a third CPU could
606: * acquire the mutex as soon as it is released. Since
607: * adaptive mutexes are primarily spin mutexes, this is not
608: * something that we need to worry about too much. What we
609: * do need to ensure is that the waiters bit gets set.
610: *
611: * To allow the unlocked release, we need to make some
612: * assumptions here:
613: *
614: * o Release is the only non-atomic/unlocked operation
615: * that can be performed on the mutex. (It must still
616: * be atomic on the local CPU, e.g. in case interrupted
617: * or preempted).
618: *
619: * o At any given time, MUTEX_SET_WAITERS() can only ever
1.21 pooka 620: * be in progress on one CPU in the system - guaranteed
1.2 ad 621: * by the turnstile chain lock.
622: *
623: * o No other operations other than MUTEX_SET_WAITERS()
624: * and release can modify a mutex with a non-zero
625: * owner field.
626: *
627: * o The result of a successful MUTEX_SET_WAITERS() call
628: * is an unbuffered write that is immediately visible
629: * to all other processors in the system.
630: *
631: * o If the holding LWP switches away, it posts a store
632: * fence before changing curlwp, ensuring that any
633: * overwrite of the mutex waiters flag by mutex_exit()
634: * completes before the modification of curlwp becomes
635: * visible to this CPU.
636: *
1.14 yamt 637: * o mi_switch() posts a store fence before setting curlwp
1.2 ad 638: * and before resuming execution of an LWP.
639: *
640: * o _kernel_lock() posts a store fence before setting
641: * curcpu()->ci_biglock_wanted, and after clearing it.
642: * This ensures that any overwrite of the mutex waiters
643: * flag by mutex_exit() completes before the modification
644: * of ci_biglock_wanted becomes visible.
645: *
646: * We now post a read memory barrier (after setting the
647: * waiters field) and check the lock holder's status again.
648: * Some of the possible outcomes (not an exhaustive list):
649: *
650: * 1. The onproc check returns true: the holding LWP is
651: * running again. The lock may be released soon and
652: * we should spin. Importantly, we can't trust the
653: * value of the waiters flag.
654: *
655: * 2. The onproc check returns false: the holding LWP is
1.39 yamt 656: * not running. We now have the opportunity to check
1.2 ad 657: * if mutex_exit() has blatted the modifications made
658: * by MUTEX_SET_WAITERS().
659: *
660: * 3. The onproc check returns false: the holding LWP may
661: * or may not be running. It has context switched at
662: * some point during our check. Again, we have the
663: * chance to see if the waiters bit is still set or
664: * has been overwritten.
665: *
666: * 4. The onproc check returns false: the holding LWP is
667: * running on a CPU, but wants the big lock. It's OK
668: * to check the waiters field in this case.
669: *
670: * 5. The has-waiters check fails: the mutex has been
671: * released, the waiters flag cleared and another LWP
672: * now owns the mutex.
673: *
674: * 6. The has-waiters check fails: the mutex has been
675: * released.
676: *
677: * If the waiters bit is not set it's unsafe to go asleep,
678: * as we might never be awoken.
679: */
1.24 ad 680: if ((membar_consumer(), mutex_onproc(owner, &ci)) ||
681: (membar_consumer(), !MUTEX_HAS_WAITERS(mtx))) {
1.2 ad 682: turnstile_exit(mtx);
1.34 ad 683: owner = mtx->mtx_owner;
1.2 ad 684: continue;
685: }
686: #endif /* MULTIPROCESSOR */
687:
1.44 wrstuden 688: #ifdef KERN_SA
689: /*
690: * Sleeping for a mutex should not generate an upcall.
691: * So set LP_SA_NOBLOCK to indicate this.
692: * f indicates if we should clear LP_SA_NOBLOCK when done.
693: */
694: f = ~curlwp->l_pflag & LP_SA_NOBLOCK;
695: curlwp->l_pflag |= LP_SA_NOBLOCK;
696: #endif /* KERN_SA */
697:
1.2 ad 698: LOCKSTAT_START_TIMER(lsflag, slptime);
699:
1.5 yamt 700: turnstile_block(ts, TS_WRITER_Q, mtx, &mutex_syncobj);
1.2 ad 701:
702: LOCKSTAT_STOP_TIMER(lsflag, slptime);
703: LOCKSTAT_COUNT(slpcnt, 1);
1.34 ad 704:
1.44 wrstuden 705: #ifdef KERN_SA
706: curlwp->l_pflag ^= f;
707: #endif /* KERN_SA */
708:
1.34 ad 709: owner = mtx->mtx_owner;
1.2 ad 710: }
711:
712: LOCKSTAT_EVENT(lsflag, mtx, LB_ADAPTIVE_MUTEX | LB_SLEEP1,
713: slpcnt, slptime);
714: LOCKSTAT_EVENT(lsflag, mtx, LB_ADAPTIVE_MUTEX | LB_SPIN,
715: spincnt, spintime);
716: LOCKSTAT_EXIT(lsflag);
717:
718: MUTEX_DASSERT(mtx, MUTEX_OWNER(mtx->mtx_owner) == curthread);
719: MUTEX_LOCKED(mtx);
720: }
721:
722: /*
723: * mutex_vector_exit:
724: *
725: * Support routine for mutex_exit() that handles all cases.
726: */
727: void
728: mutex_vector_exit(kmutex_t *mtx)
729: {
730: turnstile_t *ts;
731: uintptr_t curthread;
732:
733: if (MUTEX_SPIN_P(mtx)) {
734: #ifdef FULL
1.33 ad 735: if (__predict_false(!__SIMPLELOCK_LOCKED_P(&mtx->mtx_lock))) {
736: if (panicstr != NULL)
737: return;
1.2 ad 738: MUTEX_ABORT(mtx, "exiting unheld spin mutex");
1.33 ad 739: }
1.2 ad 740: MUTEX_UNLOCKED(mtx);
741: __cpu_simple_unlock(&mtx->mtx_lock);
742: #endif
743: MUTEX_SPIN_SPLRESTORE(mtx);
744: return;
745: }
746:
1.11 ad 747: if (__predict_false((uintptr_t)panicstr | cold)) {
1.2 ad 748: MUTEX_UNLOCKED(mtx);
749: MUTEX_RELEASE(mtx);
750: return;
751: }
752:
753: curthread = (uintptr_t)curlwp;
754: MUTEX_DASSERT(mtx, curthread != 0);
755: MUTEX_ASSERT(mtx, MUTEX_OWNER(mtx->mtx_owner) == curthread);
756: MUTEX_UNLOCKED(mtx);
757:
1.15 ad 758: #ifdef LOCKDEBUG
759: /*
760: * Avoid having to take the turnstile chain lock every time
761: * around. Raise the priority level to splhigh() in order
762: * to disable preemption and so make the following atomic.
763: */
764: {
765: int s = splhigh();
766: if (!MUTEX_HAS_WAITERS(mtx)) {
767: MUTEX_RELEASE(mtx);
768: splx(s);
769: return;
770: }
771: splx(s);
772: }
773: #endif
774:
1.2 ad 775: /*
776: * Get this lock's turnstile. This gets the interlock on
777: * the sleep queue. Once we have that, we can clear the
778: * lock. If there was no turnstile for the lock, there
779: * were no waiters remaining.
780: */
781: ts = turnstile_lookup(mtx);
782:
783: if (ts == NULL) {
784: MUTEX_RELEASE(mtx);
785: turnstile_exit(mtx);
786: } else {
787: MUTEX_RELEASE(mtx);
788: turnstile_wakeup(ts, TS_WRITER_Q,
789: TS_WAITERS(ts, TS_WRITER_Q), NULL);
790: }
791: }
792:
1.4 ad 793: #ifndef __HAVE_SIMPLE_MUTEXES
794: /*
795: * mutex_wakeup:
796: *
797: * Support routine for mutex_exit() that wakes up all waiters.
798: * We assume that the mutex has been released, but it need not
799: * be.
800: */
801: void
802: mutex_wakeup(kmutex_t *mtx)
803: {
804: turnstile_t *ts;
805:
806: ts = turnstile_lookup(mtx);
807: if (ts == NULL) {
808: turnstile_exit(mtx);
809: return;
810: }
811: MUTEX_CLEAR_WAITERS(mtx);
812: turnstile_wakeup(ts, TS_WRITER_Q, TS_WAITERS(ts, TS_WRITER_Q), NULL);
813: }
814: #endif /* !__HAVE_SIMPLE_MUTEXES */
815:
1.2 ad 816: /*
817: * mutex_owned:
818: *
1.3 ad 819: * Return true if the current LWP (adaptive) or CPU (spin)
820: * holds the mutex.
1.2 ad 821: */
822: int
823: mutex_owned(kmutex_t *mtx)
824: {
825:
1.35 ad 826: if (mtx == NULL)
827: return 0;
1.2 ad 828: if (MUTEX_ADAPTIVE_P(mtx))
829: return MUTEX_OWNER(mtx->mtx_owner) == (uintptr_t)curlwp;
830: #ifdef FULL
1.16 skrll 831: return __SIMPLELOCK_LOCKED_P(&mtx->mtx_lock);
1.2 ad 832: #else
833: return 1;
834: #endif
835: }
836:
837: /*
838: * mutex_owner:
839: *
1.6 ad 840: * Return the current owner of an adaptive mutex. Used for
841: * priority inheritance.
1.2 ad 842: */
1.27 ad 843: lwp_t *
844: mutex_owner(kmutex_t *mtx)
1.2 ad 845: {
846:
847: MUTEX_ASSERT(mtx, MUTEX_ADAPTIVE_P(mtx));
848: return (struct lwp *)MUTEX_OWNER(mtx->mtx_owner);
849: }
850:
851: /*
852: * mutex_tryenter:
853: *
854: * Try to acquire the mutex; return non-zero if we did.
855: */
856: int
857: mutex_tryenter(kmutex_t *mtx)
858: {
859: uintptr_t curthread;
860:
861: /*
862: * Handle spin mutexes.
863: */
864: if (MUTEX_SPIN_P(mtx)) {
865: MUTEX_SPIN_SPLRAISE(mtx);
866: #ifdef FULL
867: if (__cpu_simple_lock_try(&mtx->mtx_lock)) {
1.4 ad 868: MUTEX_WANTLOCK(mtx);
1.2 ad 869: MUTEX_LOCKED(mtx);
870: return 1;
871: }
872: MUTEX_SPIN_SPLRESTORE(mtx);
873: #else
1.4 ad 874: MUTEX_WANTLOCK(mtx);
1.2 ad 875: MUTEX_LOCKED(mtx);
876: return 1;
877: #endif
878: } else {
879: curthread = (uintptr_t)curlwp;
880: MUTEX_ASSERT(mtx, curthread != 0);
881: if (MUTEX_ACQUIRE(mtx, curthread)) {
1.4 ad 882: MUTEX_WANTLOCK(mtx);
1.2 ad 883: MUTEX_LOCKED(mtx);
884: MUTEX_DASSERT(mtx,
885: MUTEX_OWNER(mtx->mtx_owner) == curthread);
886: return 1;
887: }
888: }
889:
890: return 0;
891: }
892:
893: #if defined(__HAVE_SPIN_MUTEX_STUBS) || defined(FULL)
894: /*
895: * mutex_spin_retry:
896: *
897: * Support routine for mutex_spin_enter(). Assumes that the caller
898: * has already raised the SPL, and adjusted counters.
899: */
900: void
901: mutex_spin_retry(kmutex_t *mtx)
902: {
903: #ifdef MULTIPROCESSOR
904: u_int count;
905: LOCKSTAT_TIMER(spintime);
906: LOCKSTAT_FLAG(lsflag);
907: #ifdef LOCKDEBUG
908: u_int spins = 0;
909: #endif /* LOCKDEBUG */
910:
911: MUTEX_WANTLOCK(mtx);
912:
913: LOCKSTAT_ENTER(lsflag);
914: LOCKSTAT_START_TIMER(lsflag, spintime);
915: count = SPINLOCK_BACKOFF_MIN;
916:
917: /*
918: * Spin testing the lock word and do exponential backoff
919: * to reduce cache line ping-ponging between CPUs.
920: */
921: do {
922: if (panicstr != NULL)
923: break;
1.16 skrll 924: while (__SIMPLELOCK_LOCKED_P(&mtx->mtx_lock)) {
1.2 ad 925: SPINLOCK_BACKOFF(count);
926: #ifdef LOCKDEBUG
927: if (SPINLOCK_SPINOUT(spins))
928: MUTEX_ABORT(mtx, "spinout");
929: #endif /* LOCKDEBUG */
930: }
931: } while (!__cpu_simple_lock_try(&mtx->mtx_lock));
932:
933: LOCKSTAT_STOP_TIMER(lsflag, spintime);
934: LOCKSTAT_EVENT(lsflag, mtx, LB_SPIN_MUTEX | LB_SPIN, 1, spintime);
935: LOCKSTAT_EXIT(lsflag);
936:
937: MUTEX_LOCKED(mtx);
938: #else /* MULTIPROCESSOR */
939: MUTEX_ABORT(mtx, "locking against myself");
940: #endif /* MULTIPROCESSOR */
941: }
942: #endif /* defined(__HAVE_SPIN_MUTEX_STUBS) || defined(FULL) */
1.31 ad 943:
944: /*
945: * mutex_obj_init:
946: *
947: * Initialize the mutex object store.
948: */
949: void
950: mutex_obj_init(void)
951: {
952:
953: mutex_obj_cache = pool_cache_init(sizeof(struct kmutexobj),
954: coherency_unit, 0, 0, "mutex", NULL, IPL_NONE, mutex_obj_ctor,
955: NULL, NULL);
956: }
957:
958: /*
959: * mutex_obj_ctor:
960: *
961: * Initialize a new lock for the cache.
962: */
963: static int
964: mutex_obj_ctor(void *arg, void *obj, int flags)
965: {
966: struct kmutexobj * mo = obj;
967:
968: mo->mo_magic = MUTEX_OBJ_MAGIC;
969:
970: return 0;
971: }
972:
973: /*
974: * mutex_obj_alloc:
975: *
976: * Allocate a single lock object.
977: */
978: kmutex_t *
979: mutex_obj_alloc(kmutex_type_t type, int ipl)
980: {
981: struct kmutexobj *mo;
982:
983: mo = pool_cache_get(mutex_obj_cache, PR_WAITOK);
984: mutex_init(&mo->mo_lock, type, ipl);
985: mo->mo_refcnt = 1;
986:
987: return (kmutex_t *)mo;
988: }
989:
990: /*
991: * mutex_obj_hold:
992: *
993: * Add a single reference to a lock object. A reference to the object
994: * must already be held, and must be held across this call.
995: */
996: void
997: mutex_obj_hold(kmutex_t *lock)
998: {
999: struct kmutexobj *mo = (struct kmutexobj *)lock;
1000:
1001: KASSERT(mo->mo_magic == MUTEX_OBJ_MAGIC);
1002: KASSERT(mo->mo_refcnt > 0);
1003:
1004: atomic_inc_uint(&mo->mo_refcnt);
1005: }
1006:
1007: /*
1008: * mutex_obj_free:
1009: *
1010: * Drop a reference from a lock object. If the last reference is being
1011: * dropped, free the object and return true. Otherwise, return false.
1012: */
1013: bool
1014: mutex_obj_free(kmutex_t *lock)
1015: {
1016: struct kmutexobj *mo = (struct kmutexobj *)lock;
1017:
1018: KASSERT(mo->mo_magic == MUTEX_OBJ_MAGIC);
1019: KASSERT(mo->mo_refcnt > 0);
1020:
1021: if (atomic_dec_uint_nv(&mo->mo_refcnt) > 0) {
1022: return false;
1023: }
1024: mutex_destroy(&mo->mo_lock);
1025: pool_cache_put(mutex_obj_cache, mo);
1026: return true;
1027: }
CVSweb <webmaster@jp.NetBSD.org>