version 1.10, 2008/02/13 18:42:36 |
version 1.11, 2008/02/22 03:16:01 |
|
|
#include <machine/asm.h> |
#include <machine/asm.h> |
#include "assym.h" |
#include "assym.h" |
|
|
|
#ifdef MULTIPROCESSOR |
|
.section .bss |
|
.p2align 2 |
|
.lcomm cashash,256 /* 2048 bits */ |
|
#endif |
|
|
#ifndef LOCKDEBUG |
#ifndef LOCKDEBUG |
/* |
/* |
* void mutex_enter(kmutex_t *); |
* void mutex_enter(kmutex_t *); |
* |
|
* |
|
*/ |
*/ |
NENTRY(mutex_enter, 0) |
NENTRY(mutex_enter, 0) |
movl 4(%ap), %r0 /* get mutex */ |
mfpr $PR_SSP, %r4 /* get cpu_info */ |
bbssi $31, (%r0), 1f /* is there an owner? */ |
movl 4(%ap), %r1 /* get mutex (ptr) */ |
mfpr $PR_SSP, %r1 /* Note, get curcpu */ |
#ifdef DIAGNOSTIC |
movl CI_CURLWP(%r1),(%r0) /* set owner to curlwp */ |
blbs (%r1), 1f /* go slow if this is SPIN */ |
ret /* and return */ |
#endif |
1: |
clrl %r2 /* set old value (zero) */ |
callg (%ap), _C_LABEL(mutex_vector_enter) |
movl CI_CURLWP(%r4), %r3 /* set new value (curlwp) */ |
/* there is an owner */ |
bsbw _do_cas+2 /* do the compare-and-swap */ |
/* so go slow */ |
tstl %r0 /* is the old value what we wanted? */ |
ret |
beql 2f /* yep, just branch to the return */ |
|
1: callg (%ap), _C_LABEL(mutex_vector_enter) |
|
/* nope, there's an owner so go slow */ |
|
2: ret |
|
|
/* |
/* |
* void mutex_exit(kmutex_t *); |
* void mutex_exit(kmutex_t *); |
*/ |
*/ |
NENTRY(mutex_exit, 0) |
NENTRY(mutex_exit, 0) |
movl 4(%ap), %r0 /* get mutex */ |
movl 4(%ap), %r1 /* get mutex (ptr) */ |
blbs MTX_FLAGS(%r0), 1f /* go slow if this is SPIN */ |
#ifdef DIAGNOSTIC |
mfpr $PR_SSP, %r1 /* get curcpu */ |
blbs (%r1), 1f /* go slow if this is SPIN */ |
cmpl (%r0),CI_CURLWP(%r1) /* is the owner still us and */ |
#endif |
/* no waiters? */ |
mfpr $PR_SSP, %r4 /* get cpu_info */ |
bneq 1f /* no, slow path */ |
movl CI_CURLWP(%r4),%r2 /* get curlwp (old) */ |
clrl (%r0) /* clear owner */ |
clrl %r3 /* get zero (new) */ |
ret |
bsbw _do_cas+2 /* do the compare-and-swap */ |
|
cmpl %r0,%r2 /* return == old? */ |
1: callg (%ap), _C_LABEL(mutex_vector_exit) |
beql 2f /* yes, branch to return */ |
ret |
1: callg (%ap), _C_LABEL(mutex_vector_exit) /* no, slow path */ |
|
2: ret |
|
|
/* |
/* |
* void mutex_spin_enter(kmutex_t *); |
* void mutex_spin_enter(kmutex_t *); |
*/ |
*/ |
NENTRY(mutex_spin_enter, 0) |
NENTRY(mutex_spin_enter, 0) |
|
#if 0 |
|
callg (%ap), _C_LABEL(mutex_vector_enter) |
|
#else |
movl 4(%ap), %r0 /* get spin mutex */ |
movl 4(%ap), %r0 /* get spin mutex */ |
#ifdef DIAGNOSTIC |
#ifdef DIAGNOSTIC |
blbc MTX_FLAGS(%r0), 3f |
blbc (%r0), 3f |
#endif |
#endif |
mfpr $PR_IPL, %r2 /* get current IPL */ |
mfpr $PR_IPL, %r2 /* get current IPL */ |
movzbl MTX_IPL(%r0), %r3 |
movzbl MTX_IPL(%r0), %r3 |
cmpl %r3, %r2 /* does mutex have > IPL? */ |
cmpl %r3, %r2 /* does mutex have > IPL? */ |
bleq 1f /* no, leave IPL alone */ |
bleq 1f /* no, leave IPL alone */ |
mtpr %r3, $PR_IPL /* yes, raise IPL */ |
mtpr %r3, $PR_IPL /* yes, raise IPL */ |
1: mfpr $PR_SSP, %r1 /* get curcpu */ |
1: mfpr $PR_SSP, %r4 /* get cpu_info */ |
sobgeq CI_MTX_COUNT(%r1), 2f /* decr mutex count */ |
tstl CI_MTX_COUNT(%r4) /* any spin mutexes active? */ |
brb 3f |
bneq 3f /* yep, don't save IPL */ |
2: movl %r2, CI_MTX_OLDSPL(%r1) /* save was-current IPL */ |
movl %r2, CI_MTX_OLDSPL(%r4) /* nope, save old IPL */ |
3: |
3: decl CI_MTX_COUNT(%r4) /* decr mutex count */ |
#if defined(DIAGNOSTIC) || defined(MULTIPROCESSOR) |
#if defined(DIAGNOSTIC) || defined(MULTIPROCESSOR) |
bbssi $0, (%r0), 4f /* take out mutex */ |
bbssi $0, MTX_LOCK(%r0), 4f /* take out mutex */ |
ret |
ret |
4: callg (%ap), _C_LABEL(mutex_spin_retry) /* slow path */ |
4: callg (%ap), _C_LABEL(mutex_spin_retry) /* slow path */ |
|
#else |
|
movb $1, MTX_LOCK(%r0) /* for ddb use only */ |
|
#endif |
#endif |
#endif |
ret |
ret |
|
|
Line 106 NENTRY(mutex_spin_enter, 0) |
|
Line 118 NENTRY(mutex_spin_enter, 0) |
|
* void mutex_spin_exit(kmutex_t *); |
* void mutex_spin_exit(kmutex_t *); |
*/ |
*/ |
NENTRY(mutex_spin_exit, 0) |
NENTRY(mutex_spin_exit, 0) |
|
#if 0 |
|
callg (%ap), _C_LABEL(mutex_vector_exit) /* slow path */ |
|
ret |
|
#else |
movl 4(%ap), %r0 /* get spin mutex */ |
movl 4(%ap), %r0 /* get spin mutex */ |
#ifdef DIAGNOSTIC |
#if defined(DIAGNOSTIC) |
blbc MTX_FLAGS(%r0), 2f /* assert this is a spinlock */ |
blbc (%r0), 2f /* assert this is a spinlock */ |
#endif |
#endif |
#if defined(DIAGNOSTIC) || defined(MULTIPROCESSOR) |
#if defined(DIAGNOSTIC) || defined(MULTIPROCESSOR) |
bbcci $0, (%r0), 2f /* clear mutex */ |
bbcci $0, MTX_LOCK(%r0), 2f /* clear mutex */ |
#endif |
#else |
mfpr $PR_SSP, %r1 /* get curcpu */ |
clrb MTX_LOCK(%r0) /* for ddb use only */ |
movl CI_MTX_OLDSPL(%r1), %r2 /* fetch oldspl */ |
#endif |
aoblss $MTX_COUNT_BIAS, CI_MTX_COUNT(%r1), 1f /* incr mtx count */ |
mfpr $PR_SSP, %r4 /* get curcpu */ |
mtpr %r2, $PR_IPL /* yes, restore saved ipl */ |
movl CI_MTX_OLDSPL(%r4), %r2 /* fetch oldspl */ |
|
incl CI_MTX_COUNT(%r4) /* incr mtx count */ |
|
bneq 1f /* still held? */ |
|
mtpr %r2, $PR_IPL /* no, restore saved ipl */ |
1: ret |
1: ret |
|
|
#if defined(DIAGNOSTIC) || defined(MULTIPROCESSOR) |
#if defined(DIAGNOSTIC) || defined(MULTIPROCESSOR) |
2: callg (%ap), _C_LABEL(mutex_vector_exit) /* slow path */ |
2: callg (%ap), _C_LABEL(mutex_vector_exit) /* slow path */ |
ret |
ret |
#endif |
#endif |
|
#endif |
|
|
#endif /* LOCKDEBUG */ |
#endif /* LOCKDEBUG */ |
|
|
.section .bss |
|
.p2align 2 |
|
.lcomm cashash,256 |
|
/* |
/* |
* |
* |
*/ |
*/ |
NENTRY(_atomic_cas_32, 0) |
NENTRY(_atomic_cas_32, 0) |
movq 4(%ap), %r1 /* cache ptr, old */ |
movq 4(%ap), %r1 /* cache ptr, old */ |
|
movl 12(%ap), %r3 /* cache new */ |
|
bsbw _do_cas+2 /* do it */ |
|
ret |
|
|
|
NENTRY(_do_cas, 0) |
|
#ifdef MULTIPROCESSOR |
movl (%r1), %r0 /* get value */ |
movl (%r1), %r0 /* get value */ |
cmpl %r0, %r2 /* does it equal old? */ |
cmpl %r0, %r2 /* does it equal old? */ |
bneq 4f /* nope, return */ |
bneq 4f /* nope, return */ |
Line 142 NENTRY(_atomic_cas_32, 0) |
|
Line 165 NENTRY(_atomic_cas_32, 0) |
|
*/ |
*/ |
mfpr $PR_IPL, %r5 /* save IPL */ |
mfpr $PR_IPL, %r5 /* save IPL */ |
mtpr $IPL_HIGH, $PR_IPL /* block everything */ |
mtpr $IPL_HIGH, $PR_IPL /* block everything */ |
#ifdef MULTIPROCESSOR |
|
extzv $2,$11,%r1,%r4 /* gets bits 2-12 */ |
extzv $2,$11,%r1,%r4 /* gets bits 2-12 */ |
1: bbssi %r4,cashash,1b /* is this pos in the hash table set */ |
1: bbssi %r4,cashash,1b /* is this pos in the hash table set */ |
#endif |
|
movl (%r1), %r0 /* get value again */ |
movl (%r1), %r0 /* get value again */ |
cmpl %r0, %r2 /* does it still equal old? */ |
cmpl %r0, %r2 /* does it still equal old? */ |
bneq 2f /* nope, return */ |
bneq 2f /* nope, return */ |
movl 12(%ap),(%r1) /* update rw->rw_owner with new */ |
movl %r3,(%r1) /* update *ptr with new */ |
2: |
2: |
#ifdef MULTIPROCESSOR |
|
bbcci %r4,cashash,3f /* clear this pos in the hash table */ |
bbcci %r4,cashash,3f /* clear this pos in the hash table */ |
3: |
3: |
#endif |
|
mtpr %r5, $PR_IPL /* restore IPL */ |
mtpr %r5, $PR_IPL /* restore IPL */ |
4: |
4: |
ret /* return */ |
rsb /* return */ |
|
#else |
|
mfpr $PR_IPL, %r5 /* save IPL */ |
|
mtpr $IPL_SCHED, $PR_IPL /* block interrupts */ |
|
movl (%r1), %r0 /* get current value */ |
|
cmpl %r2, %r0 /* does it equal old value? */ |
|
bneq 1f /* no, don't update */ |
|
movl %r3, (%r1) /* yes, update */ |
|
1: mtpr %r5, $PR_IPL /* drop/restore IPL */ |
|
rsb /* return */ |
|
#endif |
STRONG_ALIAS(atomic_cas_ptr,_atomic_cas_32) |
STRONG_ALIAS(atomic_cas_ptr,_atomic_cas_32) |
STRONG_ALIAS(_atomic_cas_ptr,_atomic_cas_32) |
STRONG_ALIAS(_atomic_cas_ptr,_atomic_cas_32) |
STRONG_ALIAS(atomic_cas_uint,_atomic_cas_32) |
STRONG_ALIAS(atomic_cas_uint,_atomic_cas_32) |