Annotation of src/sys/arch/x86/include/lock.h, Revision 1.14.12.1
1.14.12.1! thorpej 1: /* $NetBSD: lock.h,v 1.14 2007/02/10 16:19:39 ad Exp $ */
1.1 fvdl 2:
3: /*-
1.12 ad 4: * Copyright (c) 2000, 2006 The NetBSD Foundation, Inc.
1.1 fvdl 5: * All rights reserved.
6: *
7: * This code is derived from software contributed to The NetBSD Foundation
1.12 ad 8: * by Jason R. Thorpe and Andrew Doran.
1.1 fvdl 9: *
10: * Redistribution and use in source and binary forms, with or without
11: * modification, are permitted provided that the following conditions
12: * are met:
13: * 1. Redistributions of source code must retain the above copyright
14: * notice, this list of conditions and the following disclaimer.
15: * 2. Redistributions in binary form must reproduce the above copyright
16: * notice, this list of conditions and the following disclaimer in the
17: * documentation and/or other materials provided with the distribution.
18: * 3. All advertising materials mentioning features or use of this software
19: * must display the following acknowledgement:
20: * This product includes software developed by the NetBSD
21: * Foundation, Inc. and its contributors.
22: * 4. Neither the name of The NetBSD Foundation nor the names of its
23: * contributors may be used to endorse or promote products derived
24: * from this software without specific prior written permission.
25: *
26: * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27: * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28: * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29: * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30: * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31: * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32: * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33: * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34: * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35: * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36: * POSSIBILITY OF SUCH DAMAGE.
37: */
38:
39: /*
40: * Machine-dependent spin lock operations.
41: */
42:
1.9 yamt 43: #ifndef _X86_LOCK_H_
44: #define _X86_LOCK_H_
1.1 fvdl 45:
46: #if defined(_KERNEL_OPT)
47: #include "opt_lockdebug.h"
48: #endif
49:
1.2 fvdl 50: #include <machine/cpufunc.h>
1.1 fvdl 51:
52: #ifdef LOCKDEBUG
53:
1.5 junyoung 54: extern void __cpu_simple_lock_init(__cpu_simple_lock_t *);
55: extern void __cpu_simple_lock(__cpu_simple_lock_t *);
56: extern int __cpu_simple_lock_try(__cpu_simple_lock_t *);
57: extern void __cpu_simple_unlock(__cpu_simple_lock_t *);
1.1 fvdl 58:
59: #else
60:
1.14.12.1! thorpej 61: #include <sys/atomic.h>
1.1 fvdl 62:
1.11 perry 63: static __inline void __cpu_simple_lock_init(__cpu_simple_lock_t *)
1.1 fvdl 64: __attribute__((__unused__));
1.11 perry 65: static __inline void __cpu_simple_lock(__cpu_simple_lock_t *)
1.1 fvdl 66: __attribute__((__unused__));
1.11 perry 67: static __inline int __cpu_simple_lock_try(__cpu_simple_lock_t *)
1.1 fvdl 68: __attribute__((__unused__));
1.11 perry 69: static __inline void __cpu_simple_unlock(__cpu_simple_lock_t *)
1.1 fvdl 70: __attribute__((__unused__));
71:
1.11 perry 72: static __inline void
1.1 fvdl 73: __cpu_simple_lock_init(__cpu_simple_lock_t *lockp)
74: {
75:
76: *lockp = __SIMPLELOCK_UNLOCKED;
1.7 yamt 77: __insn_barrier();
1.1 fvdl 78: }
79:
1.11 perry 80: static __inline void
1.1 fvdl 81: __cpu_simple_lock(__cpu_simple_lock_t *lockp)
82: {
83:
1.14.12.1! thorpej 84: while (atomic_swap_8(lockp, __SIMPLELOCK_LOCKED)
1.6 yamt 85: != __SIMPLELOCK_UNLOCKED) {
86: do {
87: x86_pause();
88: } while (*lockp == __SIMPLELOCK_LOCKED);
89: }
1.7 yamt 90: __insn_barrier();
1.1 fvdl 91: }
92:
1.11 perry 93: static __inline int
1.1 fvdl 94: __cpu_simple_lock_try(__cpu_simple_lock_t *lockp)
95: {
1.14.12.1! thorpej 96: int r = (atomic_swap_8(lockp, __SIMPLELOCK_LOCKED)
1.1 fvdl 97: == __SIMPLELOCK_UNLOCKED);
98:
1.7 yamt 99: __insn_barrier();
1.1 fvdl 100:
101: return (r);
102: }
103:
1.12 ad 104: /*
105: * Note on x86 memory ordering
106: *
107: * When releasing a lock we must ensure that no stores or loads from within
108: * the critical section are re-ordered by the CPU to occur outside of it:
109: * they must have completed and be visible to other processors once the lock
110: * has been released.
111: *
112: * NetBSD usually runs with the kernel mapped (via MTRR) in a WB (write
113: * back) memory region. In that case, memory ordering on x86 platforms
114: * looks like this:
115: *
116: * i386 All loads/stores occur in instruction sequence.
117: *
118: * i486 All loads/stores occur in instruction sequence. In
119: * Pentium exceptional circumstances, loads can be re-ordered around
120: * stores, but for the purposes of releasing a lock it does
121: * not matter. Stores may not be immediately visible to other
122: * processors as they can be buffered. However, since the
123: * stores are buffered in order the lock release will always be
124: * the last operation in the critical section that becomes
125: * visible to other CPUs.
126: *
127: * Pentium Pro The "Intel 64 and IA-32 Architectures Software Developer's
128: * onwards Manual" volume 3A (order number 248966) says that (1) "Reads
129: * can be carried out speculatively and in any order" and (2)
130: * "Reads can pass buffered stores, but the processor is
131: * self-consistent.". This would be a problem for the below,
132: * and would mandate a locked instruction cycle or load fence
133: * before releasing the simple lock.
134: *
135: * The "Intel Pentium 4 Processor Optimization" guide (order
136: * number 253668-022US) says: "Loads can be moved before stores
137: * that occurred earlier in the program if they are not
138: * predicted to load from the same linear address.". This is
139: * not a problem since the only loads that can be re-ordered
140: * take place once the lock has been released via a store.
141: *
142: * The above two documents seem to contradict each other,
143: * however with the exception of early steppings of the Pentium
144: * Pro, the second document is closer to the truth: a store
145: * will always act as a load fence for all loads that precede
146: * the store in instruction order.
147: *
148: * Again, note that stores can be buffered and will not always
149: * become immediately visible to other CPUs: they are however
150: * buffered in order.
151: *
152: * AMD64 Stores occur in order and are buffered. Loads can be
153: * reordered, however stores act as load fences, meaning that
154: * loads can not be reordered around stores.
155: */
1.11 perry 156: static __inline void
1.1 fvdl 157: __cpu_simple_unlock(__cpu_simple_lock_t *lockp)
158: {
159:
1.7 yamt 160: __insn_barrier();
1.1 fvdl 161: *lockp = __SIMPLELOCK_UNLOCKED;
162: }
163:
164: #endif /* !LOCKDEBUG */
1.4 yamt 165:
1.13 ad 166: #define SPINLOCK_SPIN_HOOK /* nothing */
167: #define SPINLOCK_BACKOFF_HOOK x86_pause()
168:
1.4 yamt 169: #ifdef _KERNEL
1.13 ad 170: void mb_read(void);
171: void mb_write(void);
172: void mb_memory(void);
173: #else /* _KERNEL */
1.14 ad 174: static __inline void
1.13 ad 175: mb_read(void)
176: {
177: x86_lfence();
178: }
179:
1.14 ad 180: static __inline void
1.13 ad 181: mb_write(void)
182: {
183: __insn_barrier();
184: }
185:
1.14 ad 186: static __inline void
1.13 ad 187: mb_memory(void)
188: {
189: x86_mfence();
190: }
191: #endif /* _KERNEL */
1.1 fvdl 192:
1.9 yamt 193: #endif /* _X86_LOCK_H_ */
CVSweb <webmaster@jp.NetBSD.org>