version 1.22.2.2, 2007/03/12 05:51:13 |
version 1.23, 2007/02/24 02:25:02 |
|
|
#include <machine/cpu.h> |
#include <machine/cpu.h> |
#endif |
#endif |
|
|
static __inline void __cpu_simple_lock_init(__cpu_simple_lock_t *); |
static inline void |
static __inline void |
__cpu_simple_lock_init(__cpu_simple_lock_t *alp) |
__cpu_simple_lock_init(__cpu_simple_lock_t *__alp) |
|
{ |
{ |
#ifdef _KERNEL |
#ifdef _KERNEL |
__asm volatile ("movl %0,%%r1;jsb Sunlock" |
__asm volatile ("movl %0,%%r1;jsb Sunlock" |
: /* No output */ |
: /* No output */ |
: "g"(__alp) |
: "g"(alp) |
: "r1","cc","memory"); |
: "r1","cc","memory"); |
#else |
#else |
__asm volatile ("bbcci $0,%0,1f;1:" |
__asm volatile ("bbcci $0,%0,1f;1:" |
: /* No output */ |
: /* No output */ |
: "m"(*__alp) |
: "m"(*alp) |
: "cc"); |
: "cc"); |
#endif |
#endif |
} |
} |
|
|
static __inline int __cpu_simple_lock_try(__cpu_simple_lock_t *); |
static inline int |
static __inline int |
__cpu_simple_lock_try(__cpu_simple_lock_t *alp) |
__cpu_simple_lock_try(__cpu_simple_lock_t *__alp) |
|
{ |
{ |
int ret; |
int ret; |
|
|
#ifdef _KERNEL |
#ifdef _KERNEL |
__asm volatile ("movl %1,%%r1;jsb Slocktry;movl %%r0,%0" |
__asm volatile ("movl %1,%%r1;jsb Slocktry;movl %%r0,%0" |
: "=&r"(ret) |
: "=&r"(ret) |
: "g"(__alp) |
: "g"(alp) |
: "r0","r1","cc","memory"); |
: "r0","r1","cc","memory"); |
#else |
#else |
__asm volatile ("clrl %0;bbssi $0,%1,1f;incl %0;1:" |
__asm volatile ("clrl %0;bbssi $0,%1,1f;incl %0;1:" |
: "=&r"(ret) |
: "=&r"(ret) |
: "m"(*__alp) |
: "m"(*alp) |
: "cc"); |
: "cc"); |
#endif |
#endif |
|
|
Line 80 __cpu_simple_lock_try(__cpu_simple_lock_ |
|
Line 78 __cpu_simple_lock_try(__cpu_simple_lock_ |
|
} |
} |
|
|
#ifdef _KERNEL |
#ifdef _KERNEL |
#if defined(MULTIPROCESSOR) |
|
#define VAX_LOCK_CHECKS ((1 << IPI_SEND_CNCHAR) | (1 << IPI_DDB)) |
#define VAX_LOCK_CHECKS ((1 << IPI_SEND_CNCHAR) | (1 << IPI_DDB)) |
#define __cpu_simple_lock(__alp) \ |
#define __cpu_simple_lock(alp) \ |
do { \ |
do { \ |
struct cpu_info *__ci = curcpu(); \ |
struct cpu_info *__ci = curcpu(); \ |
\ |
\ |
while (__cpu_simple_lock_try(__alp) == 0) { \ |
while (__cpu_simple_lock_try(alp) == 0) { \ |
int __s; \ |
int ___s; \ |
\ |
\ |
if (__ci->ci_ipimsgs & VAX_LOCK_CHECKS) { \ |
if (__ci->ci_ipimsgs & VAX_LOCK_CHECKS) { \ |
__s = splipi(); \ |
___s = splipi(); \ |
cpu_handle_ipi(); \ |
cpu_handle_ipi(); \ |
splx(__s); \ |
splx(___s); \ |
} \ |
} \ |
} \ |
} \ |
} while (/*CONSTCOND*/0) |
} while (0) |
#else /* MULTIPROCESSOR */ |
|
#define __cpu_simple_lock(__alp) \ |
|
do { \ |
|
while (__cpu_simple_lock_try(__alp) == 0) { \ |
|
; \ |
|
} \ |
|
} while (/*CONSTCOND*/0) |
|
#endif |
|
#else |
#else |
static __inline void __cpu_simple_lock(__cpu_simple_lock_t *); |
static inline void |
static __inline void |
__cpu_simple_lock(__cpu_simple_lock_t *alp) |
__cpu_simple_lock(__cpu_simple_lock_t *__alp) |
|
{ |
{ |
__asm volatile ("1:bbssi $0,%0,1b" |
__asm volatile ("1:bbssi $0,%0,1b" |
: /* No outputs */ |
: /* No outputs */ |
: "m"(*__alp) |
: "m"(*alp) |
: "cc"); |
: "cc"); |
} |
} |
#endif /* _KERNEL */ |
#endif /* _KERNEL */ |
|
|
#if 0 |
#if 0 |
static __inline void __cpu_simple_lock(__cpu_simple_lock_t *); |
static inline void |
static __inline void |
__cpu_simple_lock(__cpu_simple_lock_t *alp) |
__cpu_simple_lock(__cpu_simple_lock_t *__alp) |
|
{ |
{ |
struct cpu_info *ci = curcpu(); |
struct cpu_info *ci = curcpu(); |
|
|
while (__cpu_simple_lock_try(__alp) == 0) { |
while (__cpu_simple_lock_try(alp) == 0) { |
int s; |
int s; |
|
|
if (ci->ci_ipimsgs & IPI_SEND_CNCHAR) { |
if (ci->ci_ipimsgs & IPI_SEND_CNCHAR) { |
Line 136 __cpu_simple_lock(__cpu_simple_lock_t *_ |
|
Line 123 __cpu_simple_lock(__cpu_simple_lock_t *_ |
|
#if 0 |
#if 0 |
__asm volatile ("movl %0,%%r1;jsb Slock" |
__asm volatile ("movl %0,%%r1;jsb Slock" |
: /* No output */ |
: /* No output */ |
: "g"(__alp) |
: "g"(alp) |
: "r0","r1","cc","memory"); |
: "r0","r1","cc","memory"); |
#endif |
#endif |
#if 0 |
#if 0 |
__asm volatile ("1:;bbssi $0, %0, 1b" |
__asm volatile ("1:;bbssi $0, %0, 1b" |
: /* No output */ |
: /* No output */ |
: "m"(*__alp)); |
: "m"(*alp)); |
#endif |
#endif |
} |
} |
#endif |
#endif |
|
|
static __inline void __cpu_simple_unlock(__cpu_simple_lock_t *); |
static inline void |
static __inline void |
__cpu_simple_unlock(__cpu_simple_lock_t *alp) |
__cpu_simple_unlock(__cpu_simple_lock_t *__alp) |
|
{ |
{ |
#ifdef _KERNEL |
#ifdef _KERNEL |
__asm volatile ("movl %0,%%r1;jsb Sunlock" |
__asm volatile ("movl %0,%%r1;jsb Sunlock" |
: /* No output */ |
: /* No output */ |
: "g"(__alp) |
: "g"(alp) |
: "r1","cc","memory"); |
: "r1","cc","memory"); |
#else |
#else |
__asm volatile ("bbcci $0,%0,1f;1:" |
__asm volatile ("bbcci $0,%0,1f;1:" |
: /* No output */ |
: /* No output */ |
: "m"(*__alp) |
: "m"(*alp) |
: "cc"); |
: "cc"); |
#endif |
#endif |
} |
} |
Line 177 __cpu_simple_unlock(__cpu_simple_lock_t |
|
Line 163 __cpu_simple_unlock(__cpu_simple_lock_t |
|
#define SPINLOCK_SPIN_HOOK \ |
#define SPINLOCK_SPIN_HOOK \ |
do { \ |
do { \ |
struct cpu_info *__ci = curcpu(); \ |
struct cpu_info *__ci = curcpu(); \ |
int __s; \ |
int ___s; \ |
\ |
\ |
if (__ci->ci_ipimsgs != 0) { \ |
if (__ci->ci_ipimsgs != 0) { \ |
/* printf("CPU %lu has IPIs pending\n", \ |
/* printf("CPU %lu has IPIs pending\n", \ |
__ci->ci_cpuid); */ \ |
__ci->ci_cpuid); */ \ |
__s = splipi(); \ |
___s = splipi(); \ |
cpu_handle_ipi(); \ |
cpu_handle_ipi(); \ |
splx(__s); \ |
splx(___s); \ |
} \ |
} \ |
} while (/*CONSTCOND*/0) |
} while (0) |
#endif /* MULTIPROCESSOR */ |
#endif /* MULTIPROCESSOR */ |
|
|
static __inline void mb_read(void); |
static inline void |
static __inline void |
|
mb_read(void) |
mb_read(void) |
{ |
{ |
} |
} |
|
|
static __inline void mb_write(void); |
static inline void |
static __inline void |
|
mb_write(void) |
mb_write(void) |
{ |
{ |
} |
} |