Annotation of src/sys/arch/x86/include/lock.h, Revision 1.19.6.3
1.19.6.1 bouyer 1: /* $NetBSD$ */
1.1 fvdl 2:
3: /*-
1.12 ad 4: * Copyright (c) 2000, 2006 The NetBSD Foundation, Inc.
1.1 fvdl 5: * All rights reserved.
6: *
7: * This code is derived from software contributed to The NetBSD Foundation
1.12 ad 8: * by Jason R. Thorpe and Andrew Doran.
1.1 fvdl 9: *
10: * Redistribution and use in source and binary forms, with or without
11: * modification, are permitted provided that the following conditions
12: * are met:
13: * 1. Redistributions of source code must retain the above copyright
14: * notice, this list of conditions and the following disclaimer.
15: * 2. Redistributions in binary form must reproduce the above copyright
16: * notice, this list of conditions and the following disclaimer in the
17: * documentation and/or other materials provided with the distribution.
18: * 3. All advertising materials mentioning features or use of this software
19: * must display the following acknowledgement:
20: * This product includes software developed by the NetBSD
21: * Foundation, Inc. and its contributors.
22: * 4. Neither the name of The NetBSD Foundation nor the names of its
23: * contributors may be used to endorse or promote products derived
24: * from this software without specific prior written permission.
25: *
26: * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27: * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28: * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29: * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30: * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31: * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32: * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33: * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34: * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35: * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36: * POSSIBILITY OF SUCH DAMAGE.
37: */
38:
39: /*
40: * Machine-dependent spin lock operations.
41: */
42:
1.9 yamt 43: #ifndef _X86_LOCK_H_
44: #define _X86_LOCK_H_
1.1 fvdl 45:
1.15 skrll 46: static __inline int
47: __SIMPLELOCK_LOCKED_P(__cpu_simple_lock_t *__ptr)
48: {
49: return *__ptr == __SIMPLELOCK_LOCKED;
50: }
51:
52: static __inline int
53: __SIMPLELOCK_UNLOCKED_P(__cpu_simple_lock_t *__ptr)
54: {
55: return *__ptr == __SIMPLELOCK_UNLOCKED;
56: }
57:
1.16 skrll 58: static __inline void
59: __cpu_simple_lock_set(__cpu_simple_lock_t *__ptr)
60: {
61:
62: *__ptr = __SIMPLELOCK_LOCKED;
63: }
64:
65: static __inline void
66: __cpu_simple_lock_clear(__cpu_simple_lock_t *__ptr)
67: {
68:
69: *__ptr = __SIMPLELOCK_UNLOCKED;
70: }
71:
1.19.6.1 bouyer 72: #ifdef _KERNEL
73:
74: #include <machine/cpufunc.h>
75:
76: void __cpu_simple_lock_init(__cpu_simple_lock_t *);
77: void __cpu_simple_lock(__cpu_simple_lock_t *);
78: int __cpu_simple_lock_try(__cpu_simple_lock_t *);
79: void __cpu_simple_unlock(__cpu_simple_lock_t *);
80:
81: #define SPINLOCK_SPIN_HOOK /* nothing */
1.19.6.2 bouyer 82:
83: #ifdef SPINLOCK_BACKOFF_HOOK
84: #undef SPINLOCK_BACKOFF_HOOK
85: #endif
1.19.6.3! bouyer 86: #define SPINLOCK_BACKOFF_HOOK x86_pause()
1.19.6.1 bouyer 87:
88: #else
89:
1.11 perry 90: static __inline void __cpu_simple_lock_init(__cpu_simple_lock_t *)
1.19.6.1 bouyer 91: __unused;
1.11 perry 92: static __inline void __cpu_simple_lock(__cpu_simple_lock_t *)
1.19.6.1 bouyer 93: __unused;
1.11 perry 94: static __inline int __cpu_simple_lock_try(__cpu_simple_lock_t *)
1.19.6.1 bouyer 95: __unused;
1.11 perry 96: static __inline void __cpu_simple_unlock(__cpu_simple_lock_t *)
1.19.6.1 bouyer 97: __unused;
1.1 fvdl 98:
1.11 perry 99: static __inline void
1.1 fvdl 100: __cpu_simple_lock_init(__cpu_simple_lock_t *lockp)
101: {
102:
103: *lockp = __SIMPLELOCK_UNLOCKED;
1.7 yamt 104: __insn_barrier();
1.1 fvdl 105: }
106:
1.19.6.1 bouyer 107: static __inline int
108: __cpu_simple_lock_try(__cpu_simple_lock_t *lockp)
1.1 fvdl 109: {
1.19.6.1 bouyer 110: uint8_t val;
1.1 fvdl 111:
1.19.6.1 bouyer 112: val = __SIMPLELOCK_LOCKED;
113: __asm volatile ("xchgb %0,(%2)" :
114: "=r" (val)
115: :"0" (val), "r" (lockp));
1.7 yamt 116: __insn_barrier();
1.19.6.1 bouyer 117: return val == __SIMPLELOCK_UNLOCKED;
1.1 fvdl 118: }
119:
1.19.6.1 bouyer 120: static __inline void
121: __cpu_simple_lock(__cpu_simple_lock_t *lockp)
1.1 fvdl 122: {
123:
1.19.6.1 bouyer 124: while (!__cpu_simple_lock_try(lockp))
125: /* nothing */;
1.7 yamt 126: __insn_barrier();
1.1 fvdl 127: }
128:
1.12 ad 129: /*
130: * Note on x86 memory ordering
131: *
132: * When releasing a lock we must ensure that no stores or loads from within
133: * the critical section are re-ordered by the CPU to occur outside of it:
134: * they must have completed and be visible to other processors once the lock
135: * has been released.
136: *
137: * NetBSD usually runs with the kernel mapped (via MTRR) in a WB (write
138: * back) memory region. In that case, memory ordering on x86 platforms
139: * looks like this:
140: *
141: * i386 All loads/stores occur in instruction sequence.
142: *
143: * i486 All loads/stores occur in instruction sequence. In
144: * Pentium exceptional circumstances, loads can be re-ordered around
145: * stores, but for the purposes of releasing a lock it does
146: * not matter. Stores may not be immediately visible to other
147: * processors as they can be buffered. However, since the
148: * stores are buffered in order the lock release will always be
149: * the last operation in the critical section that becomes
150: * visible to other CPUs.
151: *
152: * Pentium Pro The "Intel 64 and IA-32 Architectures Software Developer's
153: * onwards Manual" volume 3A (order number 248966) says that (1) "Reads
154: * can be carried out speculatively and in any order" and (2)
155: * "Reads can pass buffered stores, but the processor is
156: * self-consistent.". This would be a problem for the below,
157: * and would mandate a locked instruction cycle or load fence
158: * before releasing the simple lock.
159: *
160: * The "Intel Pentium 4 Processor Optimization" guide (order
161: * number 253668-022US) says: "Loads can be moved before stores
162: * that occurred earlier in the program if they are not
163: * predicted to load from the same linear address.". This is
164: * not a problem since the only loads that can be re-ordered
165: * take place once the lock has been released via a store.
166: *
167: * The above two documents seem to contradict each other,
168: * however with the exception of early steppings of the Pentium
169: * Pro, the second document is closer to the truth: a store
170: * will always act as a load fence for all loads that precede
171: * the store in instruction order.
172: *
173: * Again, note that stores can be buffered and will not always
174: * become immediately visible to other CPUs: they are however
175: * buffered in order.
176: *
177: * AMD64 Stores occur in order and are buffered. Loads can be
178: * reordered, however stores act as load fences, meaning that
179: * loads can not be reordered around stores.
180: */
1.11 perry 181: static __inline void
1.1 fvdl 182: __cpu_simple_unlock(__cpu_simple_lock_t *lockp)
183: {
184:
1.7 yamt 185: __insn_barrier();
1.1 fvdl 186: *lockp = __SIMPLELOCK_UNLOCKED;
187: }
188:
1.13 ad 189: #endif /* _KERNEL */
1.1 fvdl 190:
1.9 yamt 191: #endif /* _X86_LOCK_H_ */
CVSweb <webmaster@jp.NetBSD.org>