Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files. =================================================================== RCS file: /ftp/cvs/cvsroot/src/sys/arch/i386/i386/locore.S,v rcsdiff: /ftp/cvs/cvsroot/src/sys/arch/i386/i386/locore.S,v: warning: Unknown phrases like `commitid ...;' are present. retrieving revision 1.34.2.10 retrieving revision 1.47.4.6 diff -u -p -r1.34.2.10 -r1.47.4.6 --- src/sys/arch/i386/i386/locore.S 2008/03/17 09:14:20 1.34.2.10 +++ src/sys/arch/i386/i386/locore.S 2007/10/09 13:37:55 1.47.4.6 @@ -1,74 +1,4 @@ -/* $NetBSD: locore.S,v 1.34.2.10 2008/03/17 09:14:20 yamt Exp $ */ - -/* - * Copyright-o-rama! - */ - -/* - * Copyright (c) 2006 Manuel Bouyer. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. All advertising materials mentioning features or use of this software - * must display the following acknowledgement: - * This product includes software developed by Manuel Bouyer. - * 4. The name of the author may not be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -/* - * Copyright (c) 2001 Wasabi Systems, Inc. - * All rights reserved. - * - * Written by Frank van der Linden for Wasabi Systems, Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. All advertising materials mentioning features or use of this software - * must display the following acknowledgement: - * This product includes software developed for the NetBSD Project by - * Wasabi Systems, Inc. - * 4. The name of Wasabi Systems, Inc. may not be used to endorse - * or promote products derived from this software without specific prior - * written permission. - * - * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED - * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC - * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - +/* $NetBSD: locore.S,v 1.47.4.6 2007/10/09 13:37:55 ad Exp $ */ /*- * Copyright (c) 1998, 2000, 2004, 2006, 2007 The NetBSD Foundation, Inc. @@ -140,14 +70,11 @@ * @(#)locore.s 7.3 (Berkeley) 5/13/91 */ -#include -__KERNEL_RCSID(0, "$NetBSD: locore.S,v 1.34.2.10 2008/03/17 09:14:20 yamt Exp $"); - #include "opt_compat_oldboot.h" +#include "opt_cputype.h" #include "opt_ddb.h" #include "opt_realmem.h" #include "opt_vm86.h" -#include "opt_xen.h" #include "npx.h" #include "assym.h" @@ -163,48 +90,15 @@ __KERNEL_RCSID(0, "$NetBSD: locore.S,v 1 #include #include #include +#include +#include #include #include -#ifndef XEN -#include -#endif /* XXX temporary kluge; these should not be here */ /* Get definitions for IOM_BEGIN, IOM_END, and IOM_SIZE */ #include -#ifdef XEN -/* - * Xen guest identifier and loader selection - */ -.section __xen_guest -#ifdef XEN3 - .ascii "GUEST_OS=netbsd,GUEST_VER=3.0,XEN_VER=xen-3.0" -#if defined(DOM0OPS) || !defined(XEN_COMPAT_030001) - .ascii ",VIRT_BASE=0xc0000000" /* KERNBASE */ - .ascii ",ELF_PADDR_OFFSET=0xc0000000" /* KERNBASE */ -#else - .ascii ",VIRT_BASE=0xc0100000" /* KERNTEXTOFF */ - .ascii ",ELF_PADDR_OFFSET=0xc0100000" /* KERNTEXTOFF */ -#endif - .ascii ",VIRT_ENTRY=0xc0100000" /* KERNTEXTOFF */ -#if !defined(XEN_COMPAT_030001) - .ascii ",HYPERCALL_PAGE=0x00000101" - /* (???+HYPERCALL_PAGE_OFFSET)/PAGE_SIZE) */ -#endif -#ifdef PAE - .ascii ",PAE=yes[extended-cr3]" -#endif -#else /* !XEN3 */ - .ascii "GUEST_OS=netbsd,GUEST_VER=2.0,XEN_VER=2.0" -#endif /* XEN3 */ - .ascii ",LOADER=generic" -#if (NKSYMS || defined(DDB) || defined(LKM)) && !defined(SYMTAB_SPACE) - .ascii ",BSD_SYMTAB=yes" -#endif - .byte 0 -#endif - /* * Initialization */ @@ -245,16 +139,12 @@ _C_LABEL(cpu): .long 0 # are we 80486, _C_LABEL(atdevbase): .long 0 # location of start of iomem in virtual _C_LABEL(proc0uarea): .long 0 _C_LABEL(PDPpaddr): .long 0 # paddr of PDP, for libkvm -_C_LABEL(tablesize): .long 0 .space 512 tmpstk: -#ifndef XEN + #define _RELOC(x) ((x) - KERNBASE) -#else -#define _RELOC(x) ((x)) -#endif /* XEN */ #define RELOC(x) _RELOC(_C_LABEL(x)) .text @@ -262,7 +152,6 @@ tmpstk: .set _C_LABEL(kernel_text),KERNTEXTOFF .globl start -#ifndef XEN start: movw $0x1234,0x472 # warm boot #if defined(MULTIBOOT) @@ -495,27 +384,13 @@ try586: /* Use the `cpuid' instruction. /* * Virtual address space of kernel: * - * text | data | bss | [syms] | page dir | proc0 kstack | L1 ptp + * text | data | bss | [syms] | page dir | proc0 kstack * 0 1 2 3 */ - -#define PROC0_PDIR_OFF 0 -#define PROC0_STK_OFF (PROC0_PDIR_OFF + PAGE_SIZE) -#define PROC0_PTP1_OFF (PROC0_STK_OFF + UPAGES * PAGE_SIZE) - -/* - * fillkpt - * eax = pte (page frame | control | status) - * ebx = page table address - * ecx = number of pages to map - */ - -#define fillkpt \ -1: movl %eax,(%ebx) ; /* store phys addr */ \ - addl $4,%ebx ; /* next pte/pde */ \ - addl $PAGE_SIZE,%eax ; /* next phys page */ \ - loop 1b ; \ - +#define PROC0PDIR ((0) * PAGE_SIZE) +#define PROC0STACK ((1) * PAGE_SIZE) +#define SYSMAP ((1+UPAGES) * PAGE_SIZE) +#define TABLESIZE ((1+UPAGES) * PAGE_SIZE) /* + nkpde * PAGE_SIZE */ /* Find end of kernel image. */ movl $RELOC(end),%edi @@ -529,60 +404,67 @@ try586: /* Use the `cpuid' instruction. 1: #endif - /* Compute sizes */ + /* Calculate where to start the bootstrap tables. */ movl %edi,%esi # edi = esym ? esym : end addl $PGOFSET,%esi # page align up andl $~PGOFSET,%esi - /* nkptp[1] = (esi + ~L2_FRAME) >> L2_SHIFT + 1; */ - movl %esi,%eax - addl $~L2_FRAME,%eax - shrl $L2_SHIFT,%eax - incl %eax /* one more ptp for VAs stolen by bootstrap */ -1: movl %eax,RELOC(nkptp)+1*4 - - /* tablesize = (1 + UPAGES + nkptp) << PGSHIFT; */ - addl $(1+UPAGES),%eax - shll $PGSHIFT,%eax - movl %eax,RELOC(tablesize) - - /* ensure that nkptp covers bootstrap tables */ - addl %esi,%eax - addl $~L2_FRAME,%eax - shrl $L2_SHIFT,%eax - incl %eax - cmpl %eax,RELOC(nkptp)+1*4 - jnz 1b - - /* Clear tables */ - movl %esi,%edi + /* + * Calculate the size of the kernel page table directory, and + * how many entries it will have. Adjust nkpde to the actual + * kernel size automatically. Account for the bootstrap tables, + * round up, and add an extra 4MB. + */ + leal TABLESIZE+NBPD+PDOFSET(%edi),%eax + shrl $PDSHIFT,%eax + movl RELOC(nkpde),%ecx # get nkpde + cmpl %ecx,%eax + jb 1f + movl %eax,%ecx +1: cmpl $NKPTP_MIN,%ecx # larger than min? + jge 1f + movl $NKPTP_MIN,%ecx # set at min + jmp 2f +1: cmpl $NKPTP_MAX,%ecx # larger than max? + jle 2f + movl $NKPTP_MAX,%ecx +2: movl %ecx,RELOC(nkpde) + + /* Clear memory for bootstrap tables. */ + shll $PGSHIFT,%ecx + addl $TABLESIZE,%ecx + addl %esi,%ecx # end of tables + subl %edi,%ecx # size of tables + shrl $2,%ecx xorl %eax,%eax cld - movl RELOC(tablesize),%ecx - shrl $2,%ecx rep stosl - leal (PROC0_PTP1_OFF)(%esi), %ebx +/* + * fillkpt + * eax = pte (page frame | control | status) + * ebx = page table address + * ecx = number of pages to map + */ +#define fillkpt \ +1: movl %eax,(%ebx) ; \ + addl $PAGE_SIZE,%eax ; /* increment physical address */ \ + addl $4,%ebx ; /* next pte */ \ + loop 1b ; /* * Build initial page tables. */ - /* - * Compute &__data_start - KERNBASE. This can't be > 4G, - * or we can't deal with it anyway, since we can't load it in - * 32 bit mode. So use the bottom 32 bits. - */ - movl $RELOC(__data_start),%edx + /* Calculate end of text segment, rounded to a page. */ + leal (RELOC(etext)+PGOFSET),%edx andl $~PGOFSET,%edx - /* - * Skip the first MB. - */ + /* Skip over the first 1MB. */ movl $_RELOC(KERNTEXTOFF),%eax movl %eax,%ecx - shrl $(PGSHIFT-2),%ecx /* ((n >> PGSHIFT) << 2) for # pdes */ - addl %ecx,%ebx + shrl $PGSHIFT,%ecx + leal (SYSMAP)(%esi,%ecx,4),%ebx /* Map the kernel text read-only. */ movl %edx,%ecx @@ -593,13 +475,15 @@ try586: /* Use the `cpuid' instruction. /* Map the data, BSS, and bootstrap tables read-write. */ leal (PG_V|PG_KW)(%edx),%eax - movl RELOC(tablesize),%ecx + movl RELOC(nkpde),%ecx + shll $PGSHIFT,%ecx + addl $TABLESIZE,%ecx addl %esi,%ecx # end of tables subl %edx,%ecx # subtract end of text shrl $PGSHIFT,%ecx fillkpt - /* Map ISA I/O mem (later atdevbase) */ + /* Map ISA I/O memory. */ movl $(IOM_BEGIN|PG_V|PG_KW/*|PG_N*/),%eax # having these bits set movl $(IOM_SIZE>>PGSHIFT),%ecx # for this many pte s, fillkpt @@ -607,40 +491,28 @@ try586: /* Use the `cpuid' instruction. /* * Construct a page table directory. */ - /* Set up top level entries for identity mapping */ - leal (PROC0_PDIR_OFF)(%esi),%ebx - leal (PROC0_PTP1_OFF)(%esi),%eax - orl $(PG_V|PG_KW), %eax - movl RELOC(nkptp)+1*4,%ecx + /* Install PDEs for temporary double map of kernel. */ + movl RELOC(nkpde),%ecx # for this many pde s, + leal (PROC0PDIR+0*4)(%esi),%ebx # which is where temp maps! + leal (SYSMAP+PG_V|PG_KW)(%esi),%eax # pte for KPT in proc 0, fillkpt - /* Set up top level entries for actual kernel mapping */ - leal (PROC0_PDIR_OFF + L2_SLOT_KERNBASE*4)(%esi),%ebx - leal (PROC0_PTP1_OFF)(%esi),%eax - orl $(PG_V|PG_KW), %eax - movl RELOC(nkptp)+1*4,%ecx + /* Map kernel PDEs. */ + movl RELOC(nkpde),%ecx # for this many pde s, + leal (PROC0PDIR+PDSLOT_KERN*4)(%esi),%ebx # kernel pde offset + leal (SYSMAP+PG_V|PG_KW)(%esi),%eax # pte for KPT in proc 0, fillkpt /* Install a PDE recursively mapping page directory as a page table! */ - leal (PROC0_PDIR_OFF + PDIR_SLOT_PTE*4)(%esi),%ebx - leal (PROC0_PDIR_OFF)(%esi),%eax - orl $(PG_V|PG_KW),%eax - movl %eax,(%ebx) - + leal (PROC0PDIR+PG_V|PG_KW)(%esi),%eax # pte for ptd + movl %eax,(PROC0PDIR+PDSLOT_PTE*4)(%esi) # recursive PD slot /* Save phys. addr of PDP, for libkvm. */ movl %esi,RELOC(PDPpaddr) - /* - * Startup checklist: - * 1. Load %cr3 with pointer to PDIR. - */ + /* Load base of page directory and enable mapping. */ movl %esi,%eax # phys address of ptd in proc 0 movl %eax,%cr3 # load ptd addr into mmu - - /* - * 2. Enable paging and the rest of it. - */ movl %cr0,%eax # get control word # enable paging & NPX emulation orl $(CR0_PE|CR0_PG|CR0_NE|CR0_TS|CR0_EM|CR0_MP),%eax @@ -650,26 +522,23 @@ try586: /* Use the `cpuid' instruction. ret begin: - /* - * We have arrived. - * There's no need anymore for the identity mapping in low - * memory, remove it. - */ - movl _C_LABEL(nkptp)+1*4,%ecx - leal (PROC0_PDIR_OFF)(%esi),%ebx # old, phys address of PDIR - addl $(KERNBASE), %ebx # new, virtual address of PDIR + /* Now running relocated at KERNBASE. Remove double mapping. */ + movl _C_LABEL(nkpde),%ecx # for this many pde s, + leal (PROC0PDIR+0*4)(%esi),%ebx # which is where temp maps! + addl $(KERNBASE), %ebx # now use relocated address 1: movl $0,(%ebx) - addl $4,%ebx + addl $4,%ebx # next pde loop 1b /* Relocate atdevbase. */ - movl $KERNBASE,%edx - addl _C_LABEL(tablesize),%edx + movl _C_LABEL(nkpde),%edx + shll $PGSHIFT,%edx + addl $(TABLESIZE+KERNBASE),%edx addl %esi,%edx movl %edx,_C_LABEL(atdevbase) /* Set up bootstrap stack. */ - leal (PROC0_STK_OFF+KERNBASE)(%esi),%eax + leal (PROC0STACK+KERNBASE)(%esi),%eax movl %eax,_C_LABEL(proc0uarea) leal (KSTACK_SIZE-FRAMESIZE)(%eax),%esp movl %esi,(KSTACK_SIZE+PCB_CR3)(%eax) # pcb->pcb_cr3 @@ -688,7 +557,9 @@ begin: call _C_LABEL(initgdt) addl $4,%esp - movl _C_LABEL(tablesize),%eax + movl _C_LABEL(nkpde),%eax + shll $PGSHIFT,%eax + addl $TABLESIZE,%eax addl %esi,%eax # skip past stack and page tables pushl %eax @@ -714,97 +585,6 @@ begin: #endif /* SAFARI_FIFO_HACK */ call _C_LABEL(main) -#else /* XEN */ -start: - /* First, reset the PSL. */ - pushl $PSL_MBO - popfl - - cld -#ifdef XEN3 - movl %esp, %ebx # save start of available space -#else - movl %esi,%ebx # save start_info pointer -#endif - movl $_RELOC(tmpstk),%esp # bootstrap stack end location - - /* Clear BSS first so that there are no surprises... */ - xorl %eax,%eax - movl $RELOC(__bss_start),%edi - movl $RELOC(_end),%ecx - subl %edi,%ecx - rep stosb - - /* Copy the necessary stuff from start_info structure. */ - /* We need to copy shared_info early, so that sti/cli work */ - movl $RELOC(start_info_union),%edi - movl $128,%ecx - rep movsl - - /* Clear segment registers; always null in proc0. */ - xorl %eax,%eax - movw %ax,%fs - movw %ax,%gs - decl %eax - movl %eax,RELOC(cpu_info_primary)+CPU_INFO_LEVEL - - xorl %eax,%eax - cpuid - movl %eax,RELOC(cpu_info_primary)+CPU_INFO_LEVEL - - call xen_pmap_bootstrap - /* - * First avail returned by xen_pmap_bootstrap in %eax - */ - movl %eax, %esi; - movl %esi, _C_LABEL(proc0uarea) - -#define PROC0PDIR ((0) * PAGE_SIZE) -#define PROC0STACK ((1) * PAGE_SIZE) - - /* Set up bootstrap stack. */ - leal (KSTACK_SIZE-FRAMESIZE)(%eax),%esp - xorl %ebp,%ebp # mark end of frames - - addl $USPACE, %esi - subl $KERNBASE, %esi #init386 want a physical address - pushl %esi - call _C_LABEL(init386) # wire 386 chip for unix operation - addl $4,%esp - call _C_LABEL(main) - -#if defined(XEN3) && !defined(XEN_COMPAT_030001) -/* space for the hypercall call page */ -#define HYPERCALL_PAGE_OFFSET 0x1000 -.org HYPERCALL_PAGE_OFFSET -ENTRY(hypercall_page) -.skip 0x1000 -#endif /* defined(XEN3) && !defined(XEN_COMPAT_030001) */ - -/* - * void lgdt_finish(void); - * Finish load a new GDT pointer (do any necessary cleanup). - * XXX It's somewhat questionable whether reloading all the segment registers - * is necessary, since the actual descriptor data is not changed except by - * process creation and exit, both of which clean up via task switches. OTOH, - * this only happens at run time when the GDT is resized. - */ -/* LINTSTUB: Func: void lgdt_finish(void) */ -NENTRY(lgdt_finish) - movl $GSEL(GDATA_SEL, SEL_KPL),%eax - movw %ax,%ds - movw %ax,%es - movw %ax,%gs - movw %ax,%ss - movl $GSEL(GCPU_SEL, SEL_KPL),%eax - movw %ax,%fs - /* Reload code selector by doing intersegment return. */ - popl %eax - pushl $GSEL(GCODE_SEL, SEL_KPL) - pushl %eax - lret - -#endif /* XEN */ /* * void lwp_trampoline(void); @@ -824,7 +604,7 @@ NENTRY(lwp_trampoline) pushl %ebx call *%esi addl $4,%esp - DO_DEFERRED_SWITCH + DO_DEFERRED_SWITCH(%eax) INTRFASTEXIT /* NOTREACHED */ @@ -884,21 +664,6 @@ ENTRY(longjmp) ret /* - * void dumpsys(void) - * - * Mimic cpu_switchto() for postmortem debugging. - */ -ENTRY(dumpsys) - pushl %ebx # set up fake switchframe - pushl %esi # and save context - pushl %edi - movl %esp,_C_LABEL(dumppcb)+PCB_ESP - movl %ebp,_C_LABEL(dumppcb)+PCB_EBP - call _C_LABEL(dodumpsys) # dump! - addl $(3*4), %esp # unwind switchframe - ret - -/* * struct lwp *cpu_switchto(struct lwp *oldlwp, struct newlwp, * bool returning) * @@ -930,49 +695,25 @@ ENTRY(cpu_switchto) movl PCB_EBP(%ebx),%ebp movl PCB_ESP(%ebx),%esp - /* - * Set curlwp. This must be globally visible in order to permit - * non-interlocked mutex release. - */ - movl %edi,%ecx - xchgl %ecx,CPUVAR(CURLWP) + /* Set curlwp. */ + movl %edi,CPUVAR(CURLWP) /* Skip the rest if returning to a pinned LWP. */ testl %edx,%edx jnz 4f -#ifdef XEN - pushl %edi - call _C_LABEL(i386_switch_context) - addl $4,%esp -#else /* XEN */ - /* Switch ring0 esp */ - movl PCB_ESP0(%ebx),%eax - movl %eax,CPUVAR(ESP0) + /* Switch TSS. Reset "task busy" flag before loading. */ + movl %cr3,%eax + movl %eax,PCB_CR3(%ebx) # for TSS gates + movl CPUVAR(GDT),%eax + movl L_MD_TSS_SEL(%edi),%edx + andl $~0x0200,4(%eax,%edx, 1) + ltr %dx /* Don't bother with the rest if switching to a system process. */ testl $LW_SYSTEM,L_FLAG(%edi) jnz 4f - /* Restore thread-private %fs/%gs descriptors. */ - movl CPUVAR(GDT),%ecx - movl PCB_FSD(%ebx), %eax - movl PCB_FSD+4(%ebx), %edx - movl %eax, (GUFS_SEL*8)(%ecx) - movl %edx, (GUFS_SEL*8+4)(%ecx) - movl PCB_GSD(%ebx), %eax - movl PCB_GSD+4(%ebx), %edx - movl %eax, (GUGS_SEL*8)(%ecx) - movl %edx, (GUGS_SEL*8+4)(%ecx) -#endif /* XEN */ - - /* Switch I/O bitmap */ - movl PCB_IOMAP(%ebx),%eax - orl %eax,%eax - jnz,pn .Lcopy_iobitmap - movl $(IOMAP_INVALOFF << 16),CPUVAR(IOBASE) -.Liobitmap_done: - /* Is this process using RAS (restartable atomic sequences)? */ movl L_PROC(%edi),%eax cmpl $0,P_RASLIST(%eax) @@ -983,9 +724,7 @@ ENTRY(cpu_switchto) * FPU IPIs can alter the LWP's saved cr0. Dropping the priority * is deferred until mi_switch(), when cpu_switchto() returns. */ -2: -#ifndef XEN - movl $IPL_IPI,CPUVAR(ILEVEL) +2: movl $IPL_IPI,CPUVAR(ILEVEL) movl PCB_CR0(%ebx),%ecx movl %cr0,%edx @@ -1002,7 +741,6 @@ ENTRY(cpu_switchto) 3: cmpl %edx,%ecx je 4f movl %ecx,%cr0 -#endif /* XEN */ /* Return to the new LWP, returning 'oldlwp' in %eax. */ 4: movl %esi,%eax @@ -1023,21 +761,6 @@ ENTRY(cpu_switchto) movl %eax,TF_EIP(%ecx) jmp 2b -.Lcopy_iobitmap: - /* Copy I/O bitmap. */ - movl $(IOMAPSIZE/4),%ecx - pushl %esi - pushl %edi - movl %eax,%esi /* pcb_iomap */ - movl CPUVAR(SELF),%edi - leal CPU_INFO_IOMAP(%edi),%edi - rep - movsl - popl %edi - popl %esi - movl $((CPU_INFO_IOMAP - CPU_INFO_TSS) << 16),CPUVAR(IOBASE) - jmp .Liobitmap_done - /* * void savectx(struct pcb *pcb); * @@ -1090,7 +813,6 @@ syscall1: 1: #endif /* DIAGNOSTIC */ movl CPUVAR(CURLWP),%edx - incl CPUVAR(NSYSCALL) # count it atomically movl %esp,L_MD_REGS(%edx) # save pointer to frame movl L_PROC(%edx),%edx pushl %esp @@ -1098,61 +820,39 @@ syscall1: addl $4,%esp .Lsyscall_checkast: /* Check for ASTs on exit to user mode. */ - CLI(%eax) + cli CHECK_ASTPENDING(%eax) je 1f /* Always returning to user mode here. */ CLEAR_ASTPENDING(%eax) - STI(%eax) + sti /* Pushed T_ASTFLT into tf_trapno on entry. */ pushl %esp call _C_LABEL(trap) addl $4,%esp jmp .Lsyscall_checkast /* re-check ASTs */ -1: CHECK_DEFERRED_SWITCH +1: CHECK_DEFERRED_SWITCH(%eax) jnz 9f -#ifdef XEN - STIC(%eax) - jz 14f - call _C_LABEL(stipending) - testl %eax,%eax - jz 14f - /* process pending interrupts */ - CLI(%eax) - movl CPUVAR(ILEVEL), %ebx - movl $.Lsyscall_resume, %esi # address to resume loop at -.Lsyscall_resume: - movl %ebx,%eax # get cpl - movl CPUVAR(IUNMASK)(,%eax,4),%eax - andl CPUVAR(IPENDING),%eax # any non-masked bits left? - jz 17f - bsrl %eax,%eax - btrl %eax,CPUVAR(IPENDING) - movl CPUVAR(ISOURCES)(,%eax,4),%eax - jmp *IS_RESUME(%eax) -17: movl %ebx, CPUVAR(ILEVEL) #restore cpl - jmp .Lsyscall_checkast -14: -#endif /* XEN */ #ifndef DIAGNOSTIC INTRFASTEXIT #else /* DIAGNOSTIC */ cmpl $IPL_NONE,CPUVAR(ILEVEL) jne 3f INTRFASTEXIT -3: STI(%eax) +3: sti pushl $4f call _C_LABEL(printf) addl $4,%esp - pushl $IPL_NONE - call _C_LABEL(spllower) - addl $4,%esp - jmp .Lsyscall_checkast +#ifdef DDB + int $3 +#endif /* DDB */ + movl $IPL_NONE,CPUVAR(ILEVEL) + jmp 2b 4: .asciz "WARNING: SPL NOT LOWERED ON SYSCALL EXIT\n" 5: .asciz "WARNING: SPL NOT ZERO ON SYSCALL ENTRY\n" 6: .asciz "WARNING: WANT PMAPLOAD ON SYSCALL ENTRY\n" #endif /* DIAGNOSTIC */ -9: STI(%eax) +9: sti call _C_LABEL(pmap_load) jmp .Lsyscall_checkast /* re-check ASTs */ @@ -1264,3 +964,65 @@ ENTRY(sse2_copy_page) popl %ebx popl %ebp ret + +/* + * void sse2_zero_page(void *pg) + * + * Zero a page without polluting the cache. + */ +ENTRY(sse2_zero_page) + pushl %ebp + movl %esp,%ebp + movl 8(%esp), %edx + movl $PAGE_SIZE, %ecx + xorl %eax, %eax + .align 16 +1: + movnti %eax, 0(%edx) + movnti %eax, 4(%edx) + movnti %eax, 8(%edx) + movnti %eax, 12(%edx) + movnti %eax, 16(%edx) + movnti %eax, 20(%edx) + movnti %eax, 24(%edx) + movnti %eax, 28(%edx) + subl $32, %ecx + leal 32(%edx), %edx + jnz 1b + sfence + pop %ebp + ret + +/* + * void sse2_copy_page(void *src, void *dst) + * + * Copy a page without polluting the cache. + */ +ENTRY(sse2_copy_page) + pushl %ebp + pushl %ebx + pushl %esi + pushl %edi + movl 20(%esp), %esi + movl 24(%esp), %edi + movl $PAGE_SIZE, %ebp + .align 16 +1: + movl 0(%esi), %eax + movl 4(%esi), %ebx + movl 8(%esi), %ecx + movl 12(%esi), %edx + movnti %eax, 0(%edi) + movnti %ebx, 4(%edi) + movnti %ecx, 8(%edi) + movnti %edx, 12(%edi) + subl $16, %ebp + leal 16(%esi), %esi + leal 16(%edi), %edi + jnz 1b + sfence + popl %edi + popl %esi + popl %ebx + popl %ebp + ret