Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files. =================================================================== RCS file: /ftp/cvs/cvsroot/src/sys/arch/i386/i386/locore.S,v rcsdiff: /ftp/cvs/cvsroot/src/sys/arch/i386/i386/locore.S,v: warning: Unknown phrases like `commitid ...;' are present. retrieving revision 1.78 retrieving revision 1.84 diff -u -p -r1.78 -r1.84 --- src/sys/arch/i386/i386/locore.S 2008/10/19 14:12:28 1.78 +++ src/sys/arch/i386/i386/locore.S 2009/03/08 16:03:31 1.84 @@ -1,4 +1,4 @@ -/* $NetBSD: locore.S,v 1.78 2008/10/19 14:12:28 joerg Exp $ */ +/* $NetBSD: locore.S,v 1.84 2009/03/08 16:03:31 ad Exp $ */ /* * Copyright-o-rama! @@ -71,11 +71,11 @@ /*- - * Copyright (c) 1998, 2000, 2004, 2006, 2007 The NetBSD Foundation, Inc. + * Copyright (c) 1998, 2000, 2004, 2006, 2007, 2009 The NetBSD Foundation, Inc. * All rights reserved. * * This code is derived from software contributed to The NetBSD Foundation - * by Charles M. Hannum. + * by Charles M. Hannum, and by Andrew Doran. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -134,10 +134,11 @@ */ #include -__KERNEL_RCSID(0, "$NetBSD: locore.S,v 1.78 2008/10/19 14:12:28 joerg Exp $"); +__KERNEL_RCSID(0, "$NetBSD: locore.S,v 1.84 2009/03/08 16:03:31 ad Exp $"); #include "opt_compat_oldboot.h" #include "opt_ddb.h" +#include "opt_modular.h" #include "opt_multiboot.h" #include "opt_realmem.h" #include "opt_vm86.h" @@ -193,7 +194,7 @@ __KERNEL_RCSID(0, "$NetBSD: locore.S,v 1 .ascii "GUEST_OS=netbsd,GUEST_VER=2.0,XEN_VER=2.0" #endif /* XEN3 */ .ascii ",LOADER=generic" -#if (NKSYMS || defined(DDB) || defined(LKM)) && !defined(SYMTAB_SPACE) +#if (NKSYMS || defined(DDB) || defined(MODULAR)) && !defined(SYMTAB_SPACE) .ascii ",BSD_SYMTAB=yes" #endif .byte 0 @@ -517,7 +518,7 @@ try586: /* Use the `cpuid' instruction. /* Find end of kernel image. */ movl $RELOC(end),%edi -#if (NKSYMS || defined(DDB) || defined(LKM)) && !defined(SYMTAB_SPACE) +#if (NKSYMS || defined(DDB) || defined(MODULAR)) && !defined(SYMTAB_SPACE) /* Save the symbols (if loaded). */ movl RELOC(esym),%eax testl %eax,%eax @@ -910,7 +911,7 @@ ENTRY(dumpsys) END(dumpsys) /* - * struct lwp *cpu_switchto(struct lwp *oldlwp, struct newlwp, + * struct lwp *cpu_switchto(struct lwp *oldlwp, struct *newlwp, * bool returning) * * 1. if (oldlwp != NULL), save its context. @@ -1008,15 +1009,14 @@ ENTRY(cpu_switchto) 2: #ifndef XEN movl $IPL_IPI,CPUVAR(ILEVEL) - movl PCB_CR0(%ebx),%ecx + movl PCB_CR0(%ebx),%ecx /* has CR0_TS clear */ movl %cr0,%edx /* * If our floating point registers are on a different CPU, * set CR0_TS so we'll trap rather than reuse bogus state. */ - movl PCB_FPCPU(%ebx),%eax - cmpl CPUVAR(SELF),%eax + cmpl CPUVAR(FPCURLWP),%edi je 3f orl $CR0_TS,%ecx @@ -1100,7 +1100,7 @@ syscall1: testl %ebx,%ebx jz 1f pushl $5f - call _C_LABEL(printf) + call _C_LABEL(panic) addl $4,%esp #ifdef DDB int $3 @@ -1151,14 +1151,14 @@ syscall1: INTRFASTEXIT 3: STI(%eax) pushl $4f - call _C_LABEL(printf) + call _C_LABEL(panic) addl $4,%esp pushl $IPL_NONE call _C_LABEL(spllower) addl $4,%esp jmp .Lsyscall_checkast -4: .asciz "WARNING: SPL NOT LOWERED ON SYSCALL EXIT\n" -5: .asciz "WARNING: SPL NOT ZERO ON SYSCALL ENTRY\n" +4: .asciz "SPL NOT LOWERED ON SYSCALL EXIT\n" +5: .asciz "SPL NOT ZERO ON SYSCALL ENTRY\n" #endif /* DIAGNOSTIC */ 9: cmpl $0, CPUVAR(WANT_PMAPLOAD) @@ -1177,6 +1177,38 @@ syscall1: jmp .Lsyscall_checkast /* re-check ASTs */ IDTVEC_END(syscall) +IDTVEC(svr4_fasttrap) + pushl $2 # size of instruction for restart + pushl $T_ASTFLT # trap # for doing ASTs + INTRENTRY + pushl $RW_READER + pushl $_C_LABEL(svr4_fasttrap_lock) + call _C_LABEL(rw_enter) + addl $8,%esp + call *_C_LABEL(svr4_fasttrap_vec) + pushl $_C_LABEL(svr4_fasttrap_lock) + call _C_LABEL(rw_exit) + addl $4,%esp +2: /* Check for ASTs on exit to user mode. */ + cli + CHECK_ASTPENDING(%eax) + je 1f + /* Always returning to user mode here. */ + CLEAR_ASTPENDING(%eax) + sti + /* Pushed T_ASTFLT into tf_trapno on entry. */ + pushl %esp + call _C_LABEL(trap) + addl $4,%esp + jmp 2b +1: CHECK_DEFERRED_SWITCH + jnz 9f + INTRFASTEXIT +9: sti + call _C_LABEL(pmap_load) + cli + jmp 2b + #if NNPX > 0 /* * Special interrupt handlers. Someday intr0-intr15 will be used to count @@ -1241,7 +1273,7 @@ ENTRY(sse2_idlezero_page) xorl %eax, %eax .align 16 1: - cmpl $0, CPUVAR(RESCHED) + testl $RESCHED_KPREEMPT, CPUVAR(RESCHED) jnz 2f movnti %eax, 0(%edx) movnti %eax, 4(%edx)