Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files. =================================================================== RCS file: /ftp/cvs/cvsroot/src/sys/arch/i386/i386/lock_stubs.S,v rcsdiff: /ftp/cvs/cvsroot/src/sys/arch/i386/i386/lock_stubs.S,v: warning: Unknown phrases like `commitid ...;' are present. retrieving revision 1.2 retrieving revision 1.2.8.5 diff -u -p -r1.2 -r1.2.8.5 --- src/sys/arch/i386/i386/lock_stubs.S 2007/02/09 21:55:04 1.2 +++ src/sys/arch/i386/i386/lock_stubs.S 2007/10/09 13:37:55 1.2.8.5 @@ -1,4 +1,4 @@ -/* $NetBSD: lock_stubs.S,v 1.2 2007/02/09 21:55:04 ad Exp $ */ +/* $NetBSD: lock_stubs.S,v 1.2.8.5 2007/10/09 13:37:55 ad Exp $ */ /*- * Copyright (c) 2006, 2007 The NetBSD Foundation, Inc. @@ -58,13 +58,9 @@ #include "assym.h" -#if defined(DIAGNOSTIC) || defined(MULTIPROCESSOR) || defined(LOCKDEBUG) -#define FULL -#endif - #if defined(I386_CPU) #define STUB(name, alternate) \ - NENTRY(name) ; \ + ENTRY(name) ; \ cmpl $CPUCLASS_386, _C_LABEL(cpu_class) ; \ movl 4(%esp), %edx ; \ je _C_LABEL(alternate) @@ -72,7 +68,7 @@ #define ALIGN32 .align 16 /* don't bother */ #else #define STUB(name, alternate) \ - NENTRY(name) ; \ + ENTRY(name) ; \ movl 4(%esp), %edx #define ALIGN64 .align 64 #define ALIGN32 .align 32 @@ -112,8 +108,6 @@ STUB(mutex_enter, mutex_vector_enter) / * on multiprocessor systems, and comments in arch/x86/include/lock.h about * memory ordering on Intel x86 systems. */ - ALIGN32 - STUB(mutex_exit, mutex_vector_exit) /* 0x0020, 19 bytes */ movl CPUVAR(CURLWP), %eax xorl %ecx, %ecx @@ -126,8 +120,6 @@ STUB(mutex_exit, mutex_vector_exit) /* * * Acquire one hold on a RW lock. */ - ALIGN64 - STUB(rw_enter, rw_vector_enter) /* 0x0040, 60 bytes */ cmpl $RW_READER, 8(%esp) jne 2f @@ -160,8 +152,6 @@ STUB(rw_enter, rw_vector_enter) /* 0x0 * * Release one hold on a RW lock. */ - ALIGN64 - STUB(rw_exit, rw_vector_exit) /* 0x0080, 61 bytes */ movl RW_OWNER(%edx), %eax testb $RW_WRITE_LOCKED, %al @@ -203,25 +193,21 @@ STUB(rw_exit, rw_vector_exit) /* 0x008 * * Acquire a spin mutex and post a load fence. */ - ALIGN64 - -STUB(mutex_spin_enter, mutex_vector_enter) /* 0x00c0, 51 bytes */ - movl CPUVAR(SELF150), %eax - movl (CPU_INFO_ILEVEL-0x150)(%eax), %ecx - subl $1, (CPU_INFO_MTX_COUNT-0x150)(%eax)/* decl does not set CF */ +STUB(mutex_spin_enter, mutex_vector_enter) + movl CPUVAR(SELF), %eax + movl CPU_INFO_ILEVEL(%eax), %ecx + subl $1, CPU_INFO_MTX_COUNT(%eax) /* decl does not set CF */ jnc 1f - movl %ecx, (CPU_INFO_MTX_OLDSPL-0x150)(%eax) + movl %ecx, CPU_INFO_MTX_OLDSPL(%eax) 1: movb MTX_IPL(%edx), %ch cmpb %ch, %cl jg,pn 2f - movb %ch, (CPU_INFO_ILEVEL-0x150)(%eax)/* splraiseipl() */ + movb %ch, CPU_INFO_ILEVEL(%eax) /* splraiseipl() */ 2: -#if defined(FULL) mov $0x0100, %eax /* new + expected value */ LOCK cmpxchgb %ah, MTX_LOCK(%edx) /* lock it */ jnz,pn _C_LABEL(mutex_spin_retry) -#endif ret ALIGN64 @@ -232,66 +218,56 @@ LABEL(mutex_spin_enter_end) * * Release a spin mutex and post a store fence. */ - ALIGN64 - -STUB(mutex_spin_exit, mutex_vector_exit) /* 0x0100, 50 bytes */ -#if defined(DIAGNOSTIC) +STUB(mutex_spin_exit, mutex_vector_exit) movl $0x0001, %eax /* new + expected value */ cmpxchgb %ah, MTX_LOCK(%edx) jnz,pn _C_LABEL(mutex_vector_exit) -#elif defined(MULTIPROCESSOR) - movb $0x00,MTX_LOCK(%edx) -#endif - movl CPUVAR(SELF150), %eax - movl (CPU_INFO_MTX_OLDSPL-0x150)(%eax), %ecx - incl (CPU_INFO_MTX_COUNT-0x150)(%eax) + movl CPUVAR(SELF), %eax + movl CPU_INFO_MTX_OLDSPL(%eax), %ecx + incl CPU_INFO_MTX_COUNT(%eax) jnz 1f - cmpl (CPU_INFO_ILEVEL-0x150)(%eax), %ecx + cmpl CPU_INFO_ILEVEL(%eax), %ecx movl %ecx, 4(%esp) jae 1f - movl (CPU_INFO_IUNMASK-0x150)(%eax,%ecx,4), %edx + movl CPU_INFO_IUNMASK(%eax,%ecx,4), %edx cli - testl (CPU_INFO_IPENDING-0x150)(%eax), %edx + testl CPU_INFO_IPENDING(%eax), %edx jnz _C_LABEL(Xspllower) /* does sti */ - movl %ecx, (CPU_INFO_ILEVEL-0x150)(%eax) + movl %ecx, CPU_INFO_ILEVEL(%eax) sti 1: ret - - ALIGN64 + nop /* XXX round up */ + .align 32 LABEL(mutex_spin_exit_end) -#if !defined(I386_CPU) && defined(I686_CPU) && !defined(DIAGNOSTIC) - /* * Patch for i686 CPUs where cli/sti is prohibitavely expensive. * Must be the same size as mutex_spin_exit(). */ - ALIGN64 - -ENTRY(i686_mutex_spin_exit) /* 64 bytes */ +ENTRY(i686_mutex_spin_exit) mov 4(%esp),%edx - xorl %eax,%eax pushl %edi fs - movl (CPU_INFO_SELF150)(%eax), %edi /* now splx() */ + movl CPUVAR(SELF), %edi /* now splx() */ pushl %ebx - movl (CPU_INFO_MTX_OLDSPL-0x150)(%edi), %ecx - incl (CPU_INFO_MTX_COUNT-0x150)(%edi) - movb %al, MTX_LOCK(%edx) /* zero */ - movl (CPU_INFO_ILEVEL-0x150)(%edi), %edx + movl CPU_INFO_MTX_OLDSPL(%edi), %ecx + incl CPU_INFO_MTX_COUNT(%edi) + movb $0, MTX_LOCK(%edx) /* zero */ + movl CPU_INFO_ILEVEL(%edi), %edx jnz 1f cmpl %edx, %ecx /* new level is lower? */ - movl (CPU_INFO_IPENDING-0x150)(%edi), %eax jae,pn 1f - testl %eax,(CPU_INFO_IUNMASK-0x150)(%edi,%ecx,4) +0: + movl CPU_INFO_IPENDING(%edi), %eax + testl %eax,CPU_INFO_IUNMASK(%edi,%ecx,4) movl %eax, %ebx /* * On a P4 this jump is cheaper than patching in junk using * cmovnz. Is cmpxchg expensive if it fails? */ jnz,pn 2f - cmpxchg8b (CPU_INFO_ISTATE-0x150)(%edi) /* swap in new ilevel */ - jnz,pn 2f + cmpxchg8b CPU_INFO_ISTATE(%edi) /* swap in new ilevel */ + jnz,pn 0b 1: popl %ebx popl %edi @@ -302,18 +278,16 @@ ENTRY(i686_mutex_spin_exit) /* 64 byte movl %ecx,4(%esp) LABEL(i686_mutex_spin_exit_patch) jmp _C_LABEL(Xspllower) - ALIGN64 + .align 32 LABEL(i686_mutex_spin_exit_end) -#endif /* !defined(I386_CPU) && defined(I686_CPU) && !defined(DIAGNOSTIC) */ - #else /* !__XEN__ */ /* For now; strong alias not working for some reason. */ -NENTRY(mutex_spin_enter) +ENTRY(mutex_spin_enter) jmp _C_LABEL(mutex_vector_enter) -NENTRY(mutex_spin_exit) +ENTRY(mutex_spin_exit) jmp _C_LABEL(mutex_vector_exit) #endif /* !__XEN__ */ @@ -325,9 +299,7 @@ NENTRY(mutex_spin_exit) * * Perform an atomic compare-and-set operation. */ - ALIGN64 - -STUB(_lock_cas, _80386_lock_cas) /* 32 bytes */ +STUB(_lock_cas, _80386_lock_cas) movl 8(%esp), %eax movl 12(%esp), %ecx LOCK @@ -364,40 +336,38 @@ _80386_lock_cas: */ .align 8 -NENTRY(mb_read) +ENTRY(mb_read) lock addl $0, 0(%esp) ret END(mb_read_end, 8) -NENTRY(mb_write) +ENTRY(mb_write) /* Nothing at the moment. */ ret END(mb_write_end, 8) -NENTRY(mb_memory) +ENTRY(mb_memory) lock addl $0, 0(%esp) ret END(mb_memory_end, 8) -#ifdef I686_CPU -NENTRY(sse2_mb_read) +ENTRY(sse2_mb_read) lfence ret END(sse2_mb_read_end, 8) -NENTRY(sse2_mb_memory) +ENTRY(sse2_mb_memory) mfence ret END(sse2_mb_memory_end, 8) -#endif /* I686_CPU */ /* * Make sure code after the ret is properly encoded with nopness * by gas, or could stall newer processors. */ -NENTRY(x86_mb_nop) +ENTRY(x86_mb_nop) ret END(x86_mb_nop_end, 8)