Annotation of src/sys/arch/vax/include/lock.h, Revision 1.25.10.1
1.25.10.1! garbled 1: /* $NetBSD: lock.h,v 1.26 2007/09/10 11:34:10 skrll Exp $ */
1.1 ragge 2:
3: /*
4: * Copyright (c) 2000 Ludd, University of Lule}, Sweden.
5: * All rights reserved.
6: *
7: * Redistribution and use in source and binary forms, with or without
8: * modification, are permitted provided that the following conditions
9: * are met:
10: * 1. Redistributions of source code must retain the above copyright
11: * notice, this list of conditions and the following disclaimer.
12: * 2. Redistributions in binary form must reproduce the above copyright
13: * notice, this list of conditions and the following disclaimer in the
14: * documentation and/or other materials provided with the distribution.
15: * 3. All advertising materials mentioning features or use of this software
16: * must display the following acknowledgement:
17: * This product includes software developed at Ludd, University of Lule}.
18: * 4. The name of the author may not be used to endorse or promote products
19: * derived from this software without specific prior written permission
20: *
21: * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
22: * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
23: * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
24: * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
25: * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
26: * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27: * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28: * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29: * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
30: * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31: */
32:
33: #ifndef _VAX_LOCK_H_
34: #define _VAX_LOCK_H_
1.11 matt 35:
36: #ifdef _KERNEL
1.13 he 37: #ifdef _KERNEL_OPT
1.12 martin 38: #include "opt_multiprocessor.h"
1.16 he 39: #include <machine/intr.h>
1.13 he 40: #endif
1.11 matt 41: #include <machine/cpu.h>
42: #endif
1.3 ragge 43:
1.25.10.1! garbled 44: static __inline int
! 45: __SIMPLELOCK_LOCKED_P(__cpu_simple_lock_t *__ptr)
! 46: {
! 47: return *__ptr == __SIMPLELOCK_LOCKED;
! 48: }
! 49:
! 50: static __inline int
! 51: __SIMPLELOCK_UNLOCKED_P(__cpu_simple_lock_t *__ptr)
! 52: {
! 53: return *__ptr == __SIMPLELOCK_UNLOCKED;
! 54: }
! 55:
! 56: static __inline void
! 57: __cpu_simple_lock_clear(__cpu_simple_lock_t *__ptr)
! 58: {
! 59: *__ptr = __SIMPLELOCK_UNLOCKED;
! 60: }
! 61:
! 62: static __inline void
! 63: __cpu_simple_lock_set(__cpu_simple_lock_t *__ptr)
! 64: {
! 65: *__ptr = __SIMPLELOCK_LOCKED;
! 66: }
! 67:
1.24 christos 68: static __inline void __cpu_simple_lock_init(__cpu_simple_lock_t *);
69: static __inline void
70: __cpu_simple_lock_init(__cpu_simple_lock_t *__alp)
1.1 ragge 71: {
1.9 matt 72: #ifdef _KERNEL
1.20 perry 73: __asm volatile ("movl %0,%%r1;jsb Sunlock"
1.6 ragge 74: : /* No output */
1.24 christos 75: : "g"(__alp)
1.6 ragge 76: : "r1","cc","memory");
1.9 matt 77: #else
1.20 perry 78: __asm volatile ("bbcci $0,%0,1f;1:"
1.6 ragge 79: : /* No output */
1.24 christos 80: : "m"(*__alp)
1.9 matt 81: : "cc");
1.6 ragge 82: #endif
1.1 ragge 83: }
84:
1.24 christos 85: static __inline int __cpu_simple_lock_try(__cpu_simple_lock_t *);
86: static __inline int
87: __cpu_simple_lock_try(__cpu_simple_lock_t *__alp)
1.7 ragge 88: {
89: int ret;
90:
1.9 matt 91: #ifdef _KERNEL
1.20 perry 92: __asm volatile ("movl %1,%%r1;jsb Slocktry;movl %%r0,%0"
1.7 ragge 93: : "=&r"(ret)
1.24 christos 94: : "g"(__alp)
1.7 ragge 95: : "r0","r1","cc","memory");
1.9 matt 96: #else
1.20 perry 97: __asm volatile ("clrl %0;bbssi $0,%1,1f;incl %0;1:"
1.7 ragge 98: : "=&r"(ret)
1.24 christos 99: : "m"(*__alp)
1.9 matt 100: : "cc");
1.7 ragge 101: #endif
102:
103: return ret;
104: }
105:
1.9 matt 106: #ifdef _KERNEL
1.25 ragge 107: #if defined(MULTIPROCESSOR)
1.8 ragge 108: #define VAX_LOCK_CHECKS ((1 << IPI_SEND_CNCHAR) | (1 << IPI_DDB))
1.24 christos 109: #define __cpu_simple_lock(__alp) \
1.9 matt 110: do { \
1.7 ragge 111: struct cpu_info *__ci = curcpu(); \
112: \
1.24 christos 113: while (__cpu_simple_lock_try(__alp) == 0) { \
114: int __s; \
1.7 ragge 115: \
1.8 ragge 116: if (__ci->ci_ipimsgs & VAX_LOCK_CHECKS) { \
1.24 christos 117: __s = splipi(); \
1.7 ragge 118: cpu_handle_ipi(); \
1.24 christos 119: splx(__s); \
1.7 ragge 120: } \
121: } \
1.24 christos 122: } while (/*CONSTCOND*/0)
1.25 ragge 123: #else /* MULTIPROCESSOR */
124: #define __cpu_simple_lock(__alp) \
125: do { \
126: while (__cpu_simple_lock_try(__alp) == 0) { \
127: ; \
128: } \
129: } while (/*CONSTCOND*/0)
130: #endif
1.9 matt 131: #else
1.24 christos 132: static __inline void __cpu_simple_lock(__cpu_simple_lock_t *);
133: static __inline void
134: __cpu_simple_lock(__cpu_simple_lock_t *__alp)
1.9 matt 135: {
1.20 perry 136: __asm volatile ("1:bbssi $0,%0,1b"
1.9 matt 137: : /* No outputs */
1.24 christos 138: : "m"(*__alp)
1.9 matt 139: : "cc");
1.7 ragge 140: }
1.9 matt 141: #endif /* _KERNEL */
1.7 ragge 142:
143: #if 0
1.24 christos 144: static __inline void __cpu_simple_lock(__cpu_simple_lock_t *);
145: static __inline void
146: __cpu_simple_lock(__cpu_simple_lock_t *__alp)
1.1 ragge 147: {
1.7 ragge 148: struct cpu_info *ci = curcpu();
149:
1.24 christos 150: while (__cpu_simple_lock_try(__alp) == 0) {
1.7 ragge 151: int s;
152:
153: if (ci->ci_ipimsgs & IPI_SEND_CNCHAR) {
154: s = splipi();
155: cpu_handle_ipi();
156: splx(s);
157: }
158: }
159:
160: #if 0
1.20 perry 161: __asm volatile ("movl %0,%%r1;jsb Slock"
1.6 ragge 162: : /* No output */
1.24 christos 163: : "g"(__alp)
1.6 ragge 164: : "r0","r1","cc","memory");
1.7 ragge 165: #endif
1.6 ragge 166: #if 0
1.20 perry 167: __asm volatile ("1:;bbssi $0, %0, 1b"
1.1 ragge 168: : /* No output */
1.24 christos 169: : "m"(*__alp));
1.6 ragge 170: #endif
1.1 ragge 171: }
1.7 ragge 172: #endif
1.1 ragge 173:
1.24 christos 174: static __inline void __cpu_simple_unlock(__cpu_simple_lock_t *);
175: static __inline void
176: __cpu_simple_unlock(__cpu_simple_lock_t *__alp)
1.1 ragge 177: {
1.9 matt 178: #ifdef _KERNEL
1.20 perry 179: __asm volatile ("movl %0,%%r1;jsb Sunlock"
1.6 ragge 180: : /* No output */
1.24 christos 181: : "g"(__alp)
1.6 ragge 182: : "r1","cc","memory");
1.9 matt 183: #else
1.20 perry 184: __asm volatile ("bbcci $0,%0,1f;1:"
1.6 ragge 185: : /* No output */
1.24 christos 186: : "m"(*__alp)
1.9 matt 187: : "cc");
1.6 ragge 188: #endif
1.1 ragge 189: }
190:
1.6 ragge 191: #if defined(MULTIPROCESSOR)
192: /*
193: * On the Vax, interprocessor interrupts can come in at device priority
194: * level or lower. This can cause some problems while waiting for r/w
195: * spinlocks from a high'ish priority level: IPIs that come in will not
196: * be processed. This can lead to deadlock.
197: *
198: * This hook allows IPIs to be processed while a spinlock's interlock
199: * is released.
200: */
201: #define SPINLOCK_SPIN_HOOK \
202: do { \
203: struct cpu_info *__ci = curcpu(); \
1.24 christos 204: int __s; \
1.6 ragge 205: \
206: if (__ci->ci_ipimsgs != 0) { \
207: /* printf("CPU %lu has IPIs pending\n", \
208: __ci->ci_cpuid); */ \
1.24 christos 209: __s = splipi(); \
1.6 ragge 210: cpu_handle_ipi(); \
1.24 christos 211: splx(__s); \
1.6 ragge 212: } \
1.24 christos 213: } while (/*CONSTCOND*/0)
1.6 ragge 214: #endif /* MULTIPROCESSOR */
1.22 matt 215:
1.24 christos 216: static __inline void mb_read(void);
217: static __inline void
1.22 matt 218: mb_read(void)
219: {
220: }
221:
1.24 christos 222: static __inline void mb_write(void);
223: static __inline void
1.22 matt 224: mb_write(void)
225: {
226: }
1.1 ragge 227: #endif /* _VAX_LOCK_H_ */
CVSweb <webmaster@jp.NetBSD.org>