[BACK]Return to lock.h CVS log [TXT][DIR] Up to [cvs.NetBSD.org] / src / sys / arch / sparc / include

Annotation of src/sys/arch/sparc/include/lock.h, Revision 1.30.20.1

1.30.20.1! yamt        1: /*     $NetBSD: lock.h,v 1.30 2007/10/17 19:57:13 garbled Exp $ */
1.1       pk          2:
                      3: /*-
1.24      ad          4:  * Copyright (c) 1998, 1999, 2006 The NetBSD Foundation, Inc.
1.1       pk          5:  * All rights reserved.
                      6:  *
                      7:  * This code is derived from software contributed to The NetBSD Foundation
1.24      ad          8:  * by Paul Kranenburg and Andrew Doran.
1.1       pk          9:  *
                     10:  * Redistribution and use in source and binary forms, with or without
                     11:  * modification, are permitted provided that the following conditions
                     12:  * are met:
                     13:  * 1. Redistributions of source code must retain the above copyright
                     14:  *    notice, this list of conditions and the following disclaimer.
                     15:  * 2. Redistributions in binary form must reproduce the above copyright
                     16:  *    notice, this list of conditions and the following disclaimer in the
                     17:  *    documentation and/or other materials provided with the distribution.
                     18:  *
                     19:  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
                     20:  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
                     21:  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
                     22:  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
                     23:  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
                     24:  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
                     25:  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
                     26:  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
                     27:  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
                     28:  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
                     29:  * POSSIBILITY OF SUCH DAMAGE.
                     30:  */
                     31:
                     32: #ifndef _MACHINE_LOCK_H
                     33: #define _MACHINE_LOCK_H
                     34:
                     35: /*
                     36:  * Machine dependent spin lock operations.
                     37:  */
1.5       thorpej    38:
1.15      pk         39: #if __SIMPLELOCK_UNLOCKED != 0
                     40: #error __SIMPLELOCK_UNLOCKED must be 0 for this implementation
                     41: #endif
                     42:
1.7       thorpej    43: /* XXX So we can expose this to userland. */
1.10      hannken    44: #ifdef __lint__
                     45: #define __ldstub(__addr)       (__addr)
                     46: #else /* !__lint__ */
1.22      perry      47: static __inline int __ldstub(__cpu_simple_lock_t *addr);
                     48: static __inline int __ldstub(__cpu_simple_lock_t *addr)
1.14      mrg        49: {
                     50:        int v;
                     51:
1.20      perry      52:        __asm volatile("ldstub [%1],%0"
1.18      chs        53:            : "=&r" (v)
1.14      mrg        54:            : "r" (addr)
                     55:            : "memory");
                     56:
                     57:        return v;
                     58: }
1.10      hannken    59: #endif /* __lint__ */
1.7       thorpej    60:
1.23      uwe        61: static __inline void __cpu_simple_lock_init(__cpu_simple_lock_t *)
1.7       thorpej    62:        __attribute__((__unused__));
1.23      uwe        63: static __inline int __cpu_simple_lock_try(__cpu_simple_lock_t *)
1.7       thorpej    64:        __attribute__((__unused__));
1.23      uwe        65: static __inline void __cpu_simple_unlock(__cpu_simple_lock_t *)
1.7       thorpej    66:        __attribute__((__unused__));
1.17      pk         67: #ifndef __CPU_SIMPLE_LOCK_NOINLINE
1.23      uwe        68: static __inline void __cpu_simple_lock(__cpu_simple_lock_t *)
1.15      pk         69:        __attribute__((__unused__));
                     70: #else
1.23      uwe        71: extern void __cpu_simple_lock(__cpu_simple_lock_t *);
1.12      pk         72: #endif
                     73:
1.29      skrll      74: static __inline int
                     75: __SIMPLELOCK_LOCKED_P(__cpu_simple_lock_t *__ptr)
                     76: {
                     77:        return *__ptr == __SIMPLELOCK_LOCKED;
                     78: }
                     79:
                     80: static __inline int
                     81: __SIMPLELOCK_UNLOCKED_P(__cpu_simple_lock_t *__ptr)
                     82: {
                     83:        return *__ptr == __SIMPLELOCK_UNLOCKED;
                     84: }
                     85:
                     86: static __inline void
                     87: __cpu_simple_lock_clear(__cpu_simple_lock_t *__ptr)
                     88: {
                     89:        *__ptr = __SIMPLELOCK_UNLOCKED;
                     90: }
                     91:
                     92: static __inline void
                     93: __cpu_simple_lock_set(__cpu_simple_lock_t *__ptr)
                     94: {
                     95:        *__ptr = __SIMPLELOCK_LOCKED;
                     96: }
                     97:
1.22      perry      98: static __inline void
1.9       thorpej    99: __cpu_simple_lock_init(__cpu_simple_lock_t *alp)
1.1       pk        100: {
1.2       pk        101:
1.7       thorpej   102:        *alp = __SIMPLELOCK_UNLOCKED;
1.1       pk        103: }
                    104:
1.17      pk        105: #ifndef __CPU_SIMPLE_LOCK_NOINLINE
1.22      perry     106: static __inline void
1.9       thorpej   107: __cpu_simple_lock(__cpu_simple_lock_t *alp)
1.1       pk        108: {
1.2       pk        109:
                    110:        /*
1.7       thorpej   111:         * If someone else holds the lock use simple reads until it
                    112:         * is released, then retry the atomic operation. This reduces
                    113:         * memory bus contention because the cache-coherency logic
                    114:         * does not have to broadcast invalidates on the lock while
                    115:         * we spin on it.
1.2       pk        116:         */
1.7       thorpej   117:        while (__ldstub(alp) != __SIMPLELOCK_UNLOCKED) {
                    118:                while (*alp != __SIMPLELOCK_UNLOCKED)
                    119:                        /* spin */ ;
1.2       pk        120:        }
1.1       pk        121: }
1.17      pk        122: #endif /* __CPU_SIMPLE_LOCK_NOINLINE */
1.1       pk        123:
1.22      perry     124: static __inline int
1.9       thorpej   125: __cpu_simple_lock_try(__cpu_simple_lock_t *alp)
1.1       pk        126: {
1.2       pk        127:
1.7       thorpej   128:        return (__ldstub(alp) == __SIMPLELOCK_UNLOCKED);
1.1       pk        129: }
                    130:
1.22      perry     131: static __inline void
1.9       thorpej   132: __cpu_simple_unlock(__cpu_simple_lock_t *alp)
1.1       pk        133: {
1.2       pk        134:
1.12      pk        135:        /*
1.13      pk        136:         * Insert compiler barrier to prevent instruction re-ordering
                    137:         * around the lock release.
1.12      pk        138:         */
1.13      pk        139:        __insn_barrier();
                    140:        *alp = __SIMPLELOCK_UNLOCKED;
1.1       pk        141: }
                    142:
1.24      ad        143: #if defined(__sparc_v9__)
1.25      ad        144: static __inline void
1.24      ad        145: mb_read(void)
                    146: {
                    147:        __asm __volatile("membar #LoadLoad" : : : "memory");
                    148: }
                    149:
1.25      ad        150: static __inline void
1.24      ad        151: mb_write(void)
                    152: {
                    153:        __asm __volatile("" : : : "memory");
                    154: }
                    155:
1.25      ad        156: static __inline void
1.24      ad        157: mb_memory(void)
                    158: {
                    159:        __asm __volatile("membar #MemIssue" : : : "memory");
                    160: }
                    161: #else  /* __sparc_v9__ */
1.25      ad        162: static __inline void
1.24      ad        163: mb_read(void)
                    164: {
                    165:        static volatile int junk;
1.28      ad        166:        __asm volatile("st %%g0,[%0]"
                    167:            :
                    168:            : "r" (&junk)
                    169:            : "memory");
1.24      ad        170: }
                    171:
1.25      ad        172: static __inline void
1.24      ad        173: mb_write(void)
                    174: {
                    175:        __insn_barrier();
                    176: }
                    177:
1.25      ad        178: static __inline void
1.24      ad        179: mb_memory(void)
                    180: {
1.27      ad        181:        mb_read();
1.24      ad        182: }
                    183: #endif /* __sparc_v9__ */
                    184:
1.1       pk        185: #endif /* _MACHINE_LOCK_H */

CVSweb <webmaster@jp.NetBSD.org>