/* $NetBSD: locore.S,v 1.3 2002/12/17 19:47:15 gmcgarry Exp $ */ /*- * Copyright (c) 1998, 2000 The NetBSD Foundation, Inc. * All rights reserved. * * This code is derived from software contributed to The NetBSD Foundation * by Charles M. Hannum. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the NetBSD * Foundation, Inc. and its contributors. * 4. Neither the name of The NetBSD Foundation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /*- * Copyright (c) 1990 The Regents of the University of California. * All rights reserved. * * This code is derived from software contributed to Berkeley by * William Jolitz. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the University of * California, Berkeley and its contributors. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)locore.s 7.3 (Berkeley) 5/13/91 */ #include "opt_cputype.h" #include "opt_ddb.h" #include "opt_ipkdb.h" #include "opt_vm86.h" #include "opt_user_ldt.h" #include "opt_dummy_nops.h" #include "opt_compat_oldboot.h" #include "opt_multiprocessor.h" #include "opt_lockdebug.h" #include "opt_realmem.h" #include "npx.h" #include "assym.h" #include "apm.h" #include "lapic.h" #include "ioapic.h" #include #include #include #include #include #include #include #include #include #if NLAPIC > 0 #include #endif /* LINTSTUB: include */ /* LINTSTUB: include */ /* LINTSTUB: include */ #include #if defined(MULTIPROCESSOR) #define SET_CURPROC(proc,cpu) \ movl CPUVAR(SELF),cpu ; \ movl proc,CPUVAR(CURPROC) ; \ movl cpu,P_CPU(proc) #else #define SET_CURPROC(proc,tcpu) movl proc,CPUVAR(CURPROC) #define GET_CURPROC(reg) movl CPUVAR(CURPROC),reg #endif #define GET_CURPCB(reg) movl CPUVAR(CURPCB),reg #define SET_CURPCB(reg) movl reg,CPUVAR(CURPCB) #define CLEAR_RESCHED(reg) movl reg,CPUVAR(RESCHED) /* XXX temporary kluge; these should not be here */ /* Get definitions for IOM_BEGIN, IOM_END, and IOM_SIZE */ #include /* Disallow old names for REALBASEMEM */ #ifdef BIOSBASEMEM #error BIOSBASEMEM option deprecated; use REALBASEMEM only if memory size reported by latest boot block is incorrect #endif /* Disallow old names for REALEXTMEM */ #ifdef EXTMEM_SIZE #error EXTMEM_SIZE option deprecated; use REALEXTMEM only if memory size reported by latest boot block is incorrect #endif #ifdef BIOSEXTMEM #error BIOSEXTMEM option deprecated; use REALEXTMEM only if memory size reported by latest boot block is incorrect #endif #include #ifdef MULTIPROCESSOR #include #endif /* * PTmap is recursive pagemap at top of virtual address space. * Within PTmap, the page directory can be found (third indirection). * * XXX 4 == sizeof pde */ .set _C_LABEL(PTmap),(PDSLOT_PTE << PDSHIFT) .set _C_LABEL(PTD),(_C_LABEL(PTmap) + PDSLOT_PTE * NBPG) .set _C_LABEL(PTDpde),(_C_LABEL(PTD) + PDSLOT_PTE * 4) /* * APTmap, APTD is the alternate recursive pagemap. * It's used when modifying another process's page tables. * * XXX 4 == sizeof pde */ .set _C_LABEL(APTmap),(PDSLOT_APTE << PDSHIFT) .set _C_LABEL(APTD),(_C_LABEL(APTmap) + PDSLOT_APTE * NBPG) .set _C_LABEL(APTDpde),(_C_LABEL(PTD) + PDSLOT_APTE * 4) /* * Initialization */ .data .globl _C_LABEL(cpu) .globl _C_LABEL(cpu_feature) .globl _C_LABEL(esym),_C_LABEL(boothowto) .globl _C_LABEL(bootinfo),_C_LABEL(atdevbase) #ifdef COMPAT_OLDBOOT .globl _C_LABEL(bootdev) #endif .globl _C_LABEL(proc0paddr),_C_LABEL(PTDpaddr) .globl _C_LABEL(biosbasemem),_C_LABEL(biosextmem) .globl _C_LABEL(gdt) #ifdef I586_CPU .globl _C_LABEL(idt) #endif .globl _C_LABEL(lapic_tpr) #if NLAPIC > 0 #ifdef __ELF__ .align NBPG #else .align 12 #endif .globl _C_LABEL(local_apic), _C_LABEL(lapic_id) _C_LABEL(local_apic): .space LAPIC_ID _C_LABEL(lapic_id): .long 0x00000000 .space LAPIC_TPRI-(LAPIC_ID+4) _C_LABEL(lapic_tpr): .space LAPIC_PPRI-LAPIC_TPRI _C_LABEL(lapic_ppr): .space LAPIC_ISR-LAPIC_PPRI _C_LABEL(lapic_isr): .space NBPG-LAPIC_ISR #else _C_LABEL(lapic_tpr): .long 0 #endif _C_LABEL(cpu): .long 0 # are we 386, 386sx, or 486, # or Pentium, or.. _C_LABEL(cpu_feature): .long 0 # feature flags from 'cpuid' # instruction _C_LABEL(esym): .long 0 # ptr to end of syms _C_LABEL(atdevbase): .long 0 # location of start of iomem in virtual _C_LABEL(proc0paddr): .long 0 _C_LABEL(PTDpaddr): .long 0 # paddr of PTD, for libkvm #ifndef REALBASEMEM _C_LABEL(biosbasemem): .long 0 # base memory reported by BIOS #else _C_LABEL(biosbasemem): .long REALBASEMEM #endif #ifndef REALEXTMEM _C_LABEL(biosextmem): .long 0 # extended memory reported by BIOS #else _C_LABEL(biosextmem): .long REALEXTMEM #endif .space 512 tmpstk: #define _RELOC(x) ((x) - KERNBASE) #define RELOC(x) _RELOC(_C_LABEL(x)) .text .globl _C_LABEL(kernel_text) .set _C_LABEL(kernel_text),KERNTEXTOFF .globl start start: movw $0x1234,0x472 # warm boot /* * Load parameters from stack * (howto, [bootdev], bootinfo, esym, basemem, extmem). */ movl 4(%esp),%eax movl %eax,RELOC(boothowto) #ifdef COMPAT_OLDBOOT movl 8(%esp),%eax movl %eax,RELOC(bootdev) #endif movl 12(%esp),%eax testl %eax, %eax jz 1f movl (%eax), %ebx /* number of entries */ movl $RELOC(bootinfo), %edi movl %ebx, (%edi) addl $4, %edi 2: testl %ebx, %ebx jz 1f addl $4, %eax movl (%eax), %ecx /* address of entry */ pushl %eax pushl (%ecx) /* len */ pushl %ecx pushl %edi addl (%ecx), %edi /* update dest pointer */ cmpl $_RELOC(_C_LABEL(bootinfo) + BOOTINFO_MAXSIZE), %edi jg 2f call _C_LABEL(memcpy) addl $12, %esp popl %eax subl $1, %ebx jmp 2b 2: /* cleanup for overflow case */ addl $16, %esp movl $RELOC(bootinfo), %edi subl %ebx, (%edi) /* correct number of entries */ 1: movl 16(%esp),%eax testl %eax,%eax jz 1f addl $KERNBASE,%eax 1: movl %eax,RELOC(esym) movl RELOC(biosextmem),%eax testl %eax,%eax jnz 1f movl 20(%esp),%eax movl %eax,RELOC(biosextmem) 1: movl RELOC(biosbasemem),%eax testl %eax,%eax jnz 1f movl 24(%esp),%eax movl %eax,RELOC(biosbasemem) 1: /* First, reset the PSL. */ pushl $PSL_MBO popfl /* Clear segment registers; always null in proc0. */ xorl %eax,%eax movw %ax,%fs movw %ax,%gs decl %eax movl %eax,RELOC(cpu_info_primary)+CPU_INFO_LEVEL /* Find out our CPU type. */ try386: /* Try to toggle alignment check flag; does not exist on 386. */ pushfl popl %eax movl %eax,%ecx orl $PSL_AC,%eax pushl %eax popfl pushfl popl %eax xorl %ecx,%eax andl $PSL_AC,%eax pushl %ecx popfl testl %eax,%eax jnz try486 /* * Try the test of a NexGen CPU -- ZF will not change on a DIV * instruction on a NexGen, it will on an i386. Documented in * Nx586 Processor Recognition Application Note, NexGen, Inc. */ movl $0x5555,%eax xorl %edx,%edx movl $2,%ecx divl %ecx jnz is386 isnx586: /* * Don't try cpuid, as Nx586s reportedly don't support the * PSL_ID bit. */ movl $CPU_NX586,RELOC(cpu) jmp 2f is386: movl $CPU_386,RELOC(cpu) jmp 2f try486: /* Try to toggle identification flag; does not exist on early 486s. */ pushfl popl %eax movl %eax,%ecx xorl $PSL_ID,%eax pushl %eax popfl pushfl popl %eax xorl %ecx,%eax andl $PSL_ID,%eax pushl %ecx popfl testl %eax,%eax jnz try586 is486: movl $CPU_486,RELOC(cpu) /* * Check Cyrix CPU * Cyrix CPUs do not change the undefined flags following * execution of the divide instruction which divides 5 by 2. * * Note: CPUID is enabled on M2, so it passes another way. */ pushfl movl $0x5555, %eax xorl %edx, %edx movl $2, %ecx clc divl %ecx jnc trycyrix486 popfl jmp 2f trycyrix486: movl $CPU_6x86,RELOC(cpu) # set CPU type /* * Check for Cyrix 486 CPU by seeing if the flags change during a * divide. This is documented in the Cx486SLC/e SMM Programmer's * Guide. */ xorl %edx,%edx cmpl %edx,%edx # set flags to known state pushfl popl %ecx # store flags in ecx movl $-1,%eax movl $4,%ebx divl %ebx # do a long division pushfl popl %eax xorl %ecx,%eax # are the flags different? testl $0x8d5,%eax # only check C|PF|AF|Z|N|V jne 2f # yes; must be Cyrix 6x86 CPU movl $CPU_486DLC,RELOC(cpu) # set CPU type #ifndef CYRIX_CACHE_WORKS /* Disable caching of the ISA hole only. */ invd movb $CCR0,%al # Configuration Register index (CCR0) outb %al,$0x22 inb $0x23,%al orb $(CCR0_NC1|CCR0_BARB),%al movb %al,%ah movb $CCR0,%al outb %al,$0x22 movb %ah,%al outb %al,$0x23 invd #else /* CYRIX_CACHE_WORKS */ /* Set cache parameters */ invd # Start with guaranteed clean cache movb $CCR0,%al # Configuration Register index (CCR0) outb %al,$0x22 inb $0x23,%al andb $~CCR0_NC0,%al #ifndef CYRIX_CACHE_REALLY_WORKS orb $(CCR0_NC1|CCR0_BARB),%al #else orb $CCR0_NC1,%al #endif movb %al,%ah movb $CCR0,%al outb %al,$0x22 movb %ah,%al outb %al,$0x23 /* clear non-cacheable region 1 */ movb $(NCR1+2),%al outb %al,$0x22 movb $NCR_SIZE_0K,%al outb %al,$0x23 /* clear non-cacheable region 2 */ movb $(NCR2+2),%al outb %al,$0x22 movb $NCR_SIZE_0K,%al outb %al,$0x23 /* clear non-cacheable region 3 */ movb $(NCR3+2),%al outb %al,$0x22 movb $NCR_SIZE_0K,%al outb %al,$0x23 /* clear non-cacheable region 4 */ movb $(NCR4+2),%al outb %al,$0x22 movb $NCR_SIZE_0K,%al outb %al,$0x23 /* enable caching in CR0 */ movl %cr0,%eax andl $~(CR0_CD|CR0_NW),%eax movl %eax,%cr0 invd #endif /* CYRIX_CACHE_WORKS */ jmp 2f try586: /* Use the `cpuid' instruction. */ xorl %eax,%eax cpuid movl %eax,RELOC(cpu_info_primary)+CPU_INFO_LEVEL 2: /* * Finished with old stack; load new %esp now instead of later so we * can trace this code without having to worry about the trace trap * clobbering the memory test or the zeroing of the bss+bootstrap page * tables. * * The boot program should check: * text+data <= &stack_variable - more_space_for_stack * text+data+bss+pad+space_for_page_tables <= end_of_memory * Oops, the gdt is in the carcass of the boot program so clearing * the rest of memory is still not possible. */ movl $_RELOC(tmpstk),%esp # bootstrap stack end location /* * Virtual address space of kernel: * * text | data | bss | [syms] | page dir | proc0 kstack * 0 1 2 3 */ #define PROC0PDIR ((0) * NBPG) #define PROC0STACK ((1) * NBPG) #define SYSMAP ((1+UPAGES) * NBPG) #define TABLESIZE ((1+UPAGES) * NBPG) /* + nkpde * NBPG */ /* Find end of kernel image. */ movl $RELOC(end),%edi #if defined(DDB) && !defined(SYMTAB_SPACE) /* Save the symbols (if loaded). */ movl RELOC(esym),%eax testl %eax,%eax jz 1f subl $KERNBASE,%eax movl %eax,%edi 1: #endif /* Calculate where to start the bootstrap tables. */ movl %edi,%esi # edi = esym ? esym : end addl $PGOFSET,%esi # page align up andl $~PGOFSET,%esi /* * Calculate the size of the kernel page table directory, and * how many entries it will have. */ movl RELOC(nkpde),%ecx # get nkpde cmpl $NKPTP_MIN,%ecx # larger than min? jge 1f movl $NKPTP_MIN,%ecx # set at min jmp 2f 1: cmpl $NKPTP_MAX,%ecx # larger than max? jle 2f movl $NKPTP_MAX,%ecx 2: /* Clear memory for bootstrap tables. */ shll $PGSHIFT,%ecx addl $TABLESIZE,%ecx addl %esi,%ecx # end of tables subl %edi,%ecx # size of tables shrl $2,%ecx xorl %eax,%eax cld rep stosl /* * fillkpt * eax = pte (page frame | control | status) * ebx = page table address * ecx = number of pages to map */ #define fillkpt \ 1: movl %eax,(%ebx) ; \ addl $NBPG,%eax ; /* increment physical address */ \ addl $4,%ebx ; /* next pte */ \ loop 1b ; /* * Build initial page tables. */ /* Calculate end of text segment, rounded to a page. */ leal (RELOC(etext)+PGOFSET),%edx andl $~PGOFSET,%edx /* Skip over the first 1MB. */ movl $_RELOC(KERNTEXTOFF),%eax movl %eax,%ecx shrl $PGSHIFT,%ecx leal (SYSMAP)(%esi,%ecx,4),%ebx /* Map the kernel text read-only. */ movl %edx,%ecx subl %eax,%ecx shrl $PGSHIFT,%ecx orl $(PG_V|PG_KR),%eax fillkpt /* Map the data, BSS, and bootstrap tables read-write. */ leal (PG_V|PG_KW)(%edx),%eax movl RELOC(nkpde),%ecx shll $PGSHIFT,%ecx addl $TABLESIZE,%ecx addl %esi,%ecx # end of tables subl %edx,%ecx # subtract end of text shrl $PGSHIFT,%ecx fillkpt /* Map ISA I/O memory. */ movl $(IOM_BEGIN|PG_V|PG_KW/*|PG_N*/),%eax # having these bits set movl $(IOM_SIZE>>PGSHIFT),%ecx # for this many pte s, fillkpt /* * Construct a page table directory. */ /* Install PDEs for temporary double map of kernel. */ movl RELOC(nkpde),%ecx # for this many pde s, leal (PROC0PDIR+0*4)(%esi),%ebx # which is where temp maps! leal (SYSMAP+PG_V|PG_KW)(%esi),%eax # pte for KPT in proc 0, fillkpt /* Map kernel PDEs. */ movl RELOC(nkpde),%ecx # for this many pde s, leal (PROC0PDIR+PDSLOT_KERN*4)(%esi),%ebx # kernel pde offset leal (SYSMAP+PG_V|PG_KW)(%esi),%eax # pte for KPT in proc 0, fillkpt /* Install a PDE recursively mapping page directory as a page table! */ leal (PROC0PDIR+PG_V|PG_KW)(%esi),%eax # pte for ptd movl %eax,(PROC0PDIR+PDSLOT_PTE*4)(%esi) # recursive PD slot /* Save phys. addr of PTD, for libkvm. */ movl %esi,RELOC(PTDpaddr) /* Load base of page directory and enable mapping. */ movl %esi,%eax # phys address of ptd in proc 0 movl %eax,%cr3 # load ptd addr into mmu movl %cr0,%eax # get control word # enable paging & NPX emulation orl $(CR0_PE|CR0_PG|CR0_NE|CR0_TS|CR0_EM|CR0_MP),%eax movl %eax,%cr0 # and let's page NOW! pushl $begin # jump to high mem ret begin: /* Now running relocated at KERNBASE. Remove double mapping. */ movl _C_LABEL(nkpde),%ecx # for this many pde s, leal (PROC0PDIR+0*4)(%esi),%ebx # which is where temp maps! addl $(KERNBASE), %ebx # now use relocated address 1: movl $0,(%ebx) addl $4,%ebx # next pde loop 1b /* Relocate atdevbase. */ movl _C_LABEL(nkpde),%edx shll $PGSHIFT,%edx addl $(TABLESIZE+KERNBASE),%edx addl %esi,%edx movl %edx,_C_LABEL(atdevbase) /* Set up bootstrap stack. */ leal (PROC0STACK+KERNBASE)(%esi),%eax movl %eax,_C_LABEL(proc0paddr) leal (USPACE-FRAMESIZE)(%eax),%esp movl %esi,PCB_CR3(%eax) # pcb->pcb_cr3 xorl %ebp,%ebp # mark end of frames subl $NGDT*8, %esp # space for temporary gdt pushl %esp call _C_LABEL(initgdt) addl $4,%esp movl _C_LABEL(nkpde),%eax shll $PGSHIFT,%eax addl $TABLESIZE,%eax addl %esi,%eax # skip past stack and page tables pushl %eax call _C_LABEL(init386) # wire 386 chip for unix operation addl $4+NGDT*8,%esp # pop temporary gdt #ifdef SAFARI_FIFO_HACK movb $5,%al movw $0x37b,%dx outb %al,%dx movw $0x37f,%dx inb %dx,%al movb %al,%cl orb $1,%cl movb $5,%al movw $0x37b,%dx outb %al,%dx movw $0x37f,%dx movb %cl,%al outb %al,%dx #endif /* SAFARI_FIFO_HACK */ call _C_LABEL(main) /* * void proc_trampoline(void); * This is a trampoline function pushed onto the stack of a newly created * process in order to do some additional setup. The trampoline is entered by * cpu_switch()ing to the process, so we abuse the callee-saved registers used * by cpu_switch() to store the information about the stub to call. * NOTE: This function does not have a normal calling sequence! */ /* LINTSTUB: Func: void proc_trampoline(void) */ NENTRY(proc_trampoline) #ifdef MULTIPROCESSOR call _C_LABEL(proc_trampoline_mp) #endif movl $IPL_NONE,CPUVAR(ILEVEL) pushl %ebx call *%esi addl $4,%esp INTRFASTEXIT /* NOTREACHED */ /*****************************************************************************/ /* * Signal trampoline; copied to top of user stack. */ /* LINTSTUB: Var: char sigcode[1], esigcode[1]; */ NENTRY(sigcode) /* * Handler has returned here as if we called it. The sigcontext * is on the stack after the 3 args "we" pushed. */ leal 12(%esp),%eax # get pointer to sigcontext movl %eax,4(%esp) # put it in the argument slot # fake return address already there movl $SYS___sigreturn14,%eax int $0x80 # enter kernel with args on stack movl $SYS_exit,%eax int $0x80 # exit if sigreturn fails .globl _C_LABEL(esigcode) _C_LABEL(esigcode): /*****************************************************************************/ /* * The following primitives are used to fill and copy regions of memory. */ /* * XXX No section 9 man page for fillw. * fillw seems to be very sparsely used (only in pccons it seems.) * One wonders if it couldn't be done without. * -- Perry Metzger, May 7, 2001 */ /* * void fillw(short pattern, void *addr, size_t len); * Write len copies of pattern at addr. */ /* LINTSTUB: Func: void fillw(short pattern, void *addr, size_t len) */ ENTRY(fillw) pushl %edi movl 8(%esp),%eax movl 12(%esp),%edi movw %ax,%cx rorl $16,%eax movw %cx,%ax cld movl 16(%esp),%ecx shrl %ecx # do longwords rep stosl movl 16(%esp),%ecx andl $1,%ecx # do remainder rep stosw popl %edi ret /* * int kcopy(const void *from, void *to, size_t len); * Copy len bytes, abort on fault. */ /* LINTSTUB: Func: int kcopy(const void *from, void *to, size_t len) */ ENTRY(kcopy) pushl %esi pushl %edi GET_CURPCB(%eax) # load curpcb into eax and set on-fault pushl PCB_ONFAULT(%eax) movl $_C_LABEL(copy_fault), PCB_ONFAULT(%eax) movl 16(%esp),%esi movl 20(%esp),%edi movl 24(%esp),%ecx movl %edi,%eax subl %esi,%eax cmpl %ecx,%eax # overlapping? jb 1f cld # nope, copy forward shrl $2,%ecx # copy by 32-bit words rep movsl movl 24(%esp),%ecx andl $3,%ecx # any bytes left? rep movsb GET_CURPCB(%edx) # XXX save curpcb? popl PCB_ONFAULT(%edx) popl %edi popl %esi xorl %eax,%eax ret ALIGN_TEXT 1: addl %ecx,%edi # copy backward addl %ecx,%esi std andl $3,%ecx # any fractional bytes? decl %edi decl %esi rep movsb movl 24(%esp),%ecx # copy remainder by 32-bit words shrl $2,%ecx subl $3,%esi subl $3,%edi rep movsl cld GET_CURPCB(%edx) popl PCB_ONFAULT(%edx) popl %edi popl %esi xorl %eax,%eax ret /*****************************************************************************/ /* * The following primitives are used to copy data in and out of the user's * address space. */ /* * Default to the lowest-common-denominator. We will improve it * later. */ #if defined(I386_CPU) #define DEFAULT_COPYOUT _C_LABEL(i386_copyout) #define DEFAULT_COPYIN _C_LABEL(i386_copyin) #elif defined(I486_CPU) #define DEFAULT_COPYOUT _C_LABEL(i486_copyout) #define DEFAULT_COPYIN _C_LABEL(i386_copyin) #elif defined(I586_CPU) #define DEFAULT_COPYOUT _C_LABEL(i486_copyout) /* XXX */ #define DEFAULT_COPYIN _C_LABEL(i386_copyin) /* XXX */ #elif defined(I686_CPU) #define DEFAULT_COPYOUT _C_LABEL(i486_copyout) /* XXX */ #define DEFAULT_COPYIN _C_LABEL(i386_copyin) /* XXX */ #endif .data .globl _C_LABEL(copyout_func) _C_LABEL(copyout_func): .long DEFAULT_COPYOUT .globl _C_LABEL(copyin_func) _C_LABEL(copyin_func): .long DEFAULT_COPYIN .text /* * int copyout(const void *from, void *to, size_t len); * Copy len bytes into the user's address space. * see copyout(9) */ /* LINTSTUB: Func: int copyout(const void *kaddr, void *uaddr, size_t len) */ ENTRY(copyout) jmp *_C_LABEL(copyout_func) #if defined(I386_CPU) /* LINTSTUB: Func: int i386_copyout(const void *kaddr, void *uaddr, size_t len) */ ENTRY(i386_copyout) pushl %esi pushl %edi pushl $0 movl 16(%esp),%esi movl 20(%esp),%edi movl 24(%esp),%eax /* * We check that the end of the destination buffer is not past the end * of the user's address space. If it's not, then we only need to * check that each page is writable. The 486 will do this for us; the * 386 will not. (We assume that pages in user space that are not * writable by the user are not writable by the kernel either.) */ movl %edi,%edx addl %eax,%edx jc _C_LABEL(copy_efault) cmpl $VM_MAXUSER_ADDRESS,%edx ja _C_LABEL(copy_efault) testl %eax,%eax # anything to do? jz 3f /* * We have to check each PTE for (write) permission, since the CPU * doesn't do it for us. */ /* Compute number of pages. */ movl %edi,%ecx andl $PGOFSET,%ecx addl %eax,%ecx decl %ecx shrl $PGSHIFT,%ecx /* Compute PTE offset for start address. */ shrl $PGSHIFT,%edi GET_CURPCB(%edx) movl $2f,PCB_ONFAULT(%edx) 1: /* Check PTE for each page. */ testb $PG_RW,_C_LABEL(PTmap)(,%edi,4) jz 2f 4: incl %edi decl %ecx jns 1b movl 20(%esp),%edi movl 24(%esp),%eax jmp 3f 2: /* Simulate a trap. */ pushl %ecx movl %edi,%eax shll $PGSHIFT,%eax pushl %eax call _C_LABEL(trapwrite) # trapwrite(addr) addl $4,%esp # pop argument popl %ecx testl %eax,%eax # if not ok, return EFAULT jz 4b jmp _C_LABEL(copy_efault) 3: GET_CURPCB(%edx) movl $_C_LABEL(copy_fault),PCB_ONFAULT(%edx) /* bcopy(%esi, %edi, %eax); */ cld movl %eax,%ecx shrl $2,%ecx rep movsl movl %eax,%ecx andl $3,%ecx rep movsb popl PCB_ONFAULT(%edx) popl %edi popl %esi xorl %eax,%eax ret #endif /* I386_CPU */ #if defined(I486_CPU) || defined(I586_CPU) || defined(I686_CPU) /* LINTSTUB: Func: int i486_copyout(const void *kaddr, void *uaddr, size_t len) */ ENTRY(i486_copyout) pushl %esi pushl %edi pushl $0 movl 16(%esp),%esi movl 20(%esp),%edi movl 24(%esp),%eax /* * We check that the end of the destination buffer is not past the end * of the user's address space. */ movl %edi,%edx addl %eax,%edx jc _C_LABEL(copy_efault) cmpl $VM_MAXUSER_ADDRESS,%edx ja _C_LABEL(copy_efault) GET_CURPCB(%edx) movl $_C_LABEL(copy_fault),PCB_ONFAULT(%edx) /* bcopy(%esi, %edi, %eax); */ cld movl %eax,%ecx shrl $2,%ecx rep movsl movl %eax,%ecx andl $3,%ecx rep movsb popl PCB_ONFAULT(%edx) popl %edi popl %esi xorl %eax,%eax ret #endif /* I486_CPU || I586_CPU || I686_CPU */ /* * int copyin(const void *from, void *to, size_t len); * Copy len bytes from the user's address space. * see copyin(9) */ /* LINTSTUB: Func: int copyin(const void *uaddr, void *kaddr, size_t len) */ ENTRY(copyin) jmp *_C_LABEL(copyin_func) #if defined(I386_CPU) || defined(I486_CPU) || defined(I586_CPU) || \ defined(I686_CPU) /* LINTSTUB: Func: int i386_copyin(const void *uaddr, void *kaddr, size_t len) */ ENTRY(i386_copyin) pushl %esi pushl %edi GET_CURPCB(%eax) pushl $0 movl $_C_LABEL(copy_fault),PCB_ONFAULT(%eax) movl 16(%esp),%esi movl 20(%esp),%edi movl 24(%esp),%eax /* * We check that the end of the destination buffer is not past the end * of the user's address space. If it's not, then we only need to * check that each page is readable, and the CPU will do that for us. */ movl %esi,%edx addl %eax,%edx jc _C_LABEL(copy_efault) cmpl $VM_MAXUSER_ADDRESS,%edx ja _C_LABEL(copy_efault) /* bcopy(%esi, %edi, %eax); */ cld movl %eax,%ecx shrl $2,%ecx rep movsl movl %eax,%ecx andl $3,%ecx rep movsb GET_CURPCB(%edx) popl PCB_ONFAULT(%edx) popl %edi popl %esi xorl %eax,%eax ret #endif /* I386_CPU || I486_CPU || I586_CPU || I686_CPU */ /* LINTSTUB: Ignore */ NENTRY(copy_efault) movl $EFAULT,%eax /* LINTSTUB: Ignore */ NENTRY(copy_fault) GET_CURPCB(%edx) popl PCB_ONFAULT(%edx) popl %edi popl %esi ret /* * int copyoutstr(const void *from, void *to, size_t maxlen, size_t *lencopied); * Copy a NUL-terminated string, at most maxlen characters long, into the * user's address space. Return the number of characters copied (including the * NUL) in *lencopied. If the string is too long, return ENAMETOOLONG; else * return 0 or EFAULT. * see copyoutstr(9) */ /* LINTSTUB: Func: int copyoutstr(const void *kaddr, void *uaddr, size_t len, size_t *done) */ ENTRY(copyoutstr) pushl %esi pushl %edi movl 12(%esp),%esi # esi = from movl 16(%esp),%edi # edi = to movl 20(%esp),%edx # edx = maxlen #if defined(I386_CPU) #if defined(I486_CPU) || defined(I586_CPU) || defined(I686_CPU) cmpl $CPUCLASS_386,_C_LABEL(cpu_class) jne 5f #endif /* I486_CPU || I586_CPU || I686_CPU */ /* Compute number of bytes in first page. */ movl %edi,%eax andl $PGOFSET,%eax movl $NBPG,%ecx subl %eax,%ecx # ecx = NBPG - (src % NBPG) GET_CURPCB(%eax) movl $6f,PCB_ONFAULT(%eax) 1: /* * Once per page, check that we are still within the bounds of user * space, and check for a write fault. */ cmpl $VM_MAXUSER_ADDRESS,%edi jae _C_LABEL(copystr_efault) /* Compute PTE offset. */ movl %edi,%eax shrl $PGSHIFT,%eax # calculate pte address testb $PG_RW,_C_LABEL(PTmap)(,%eax,4) jnz 2f 6: /* Simulate a trap. */ pushl %edx pushl %edi call _C_LABEL(trapwrite) # trapwrite(addr) addl $4,%esp # clear argument from stack popl %edx testl %eax,%eax jnz _C_LABEL(copystr_efault) 2: /* Copy up to end of this page. */ subl %ecx,%edx # predecrement total count jnc 3f addl %edx,%ecx # ecx += (edx - ecx) = edx xorl %edx,%edx 3: decl %ecx js 4f lodsb stosb testb %al,%al jnz 3b /* Success -- 0 byte reached. */ addl %ecx,%edx # add back residual for this page xorl %eax,%eax jmp copystr_return 4: /* Go to next page, if any. */ movl $NBPG,%ecx testl %edx,%edx jnz 1b /* edx is zero -- return ENAMETOOLONG. */ movl $ENAMETOOLONG,%eax jmp copystr_return #endif /* I386_CPU */ #if defined(I486_CPU) || defined(I586_CPU) || defined(I686_CPU) 5: GET_CURPCB(%eax) movl $_C_LABEL(copystr_fault),PCB_ONFAULT(%eax) /* * Get min(%edx, VM_MAXUSER_ADDRESS-%edi). */ movl $VM_MAXUSER_ADDRESS,%eax subl %edi,%eax cmpl %edx,%eax jae 1f movl %eax,%edx movl %eax,20(%esp) 1: incl %edx cld 1: decl %edx jz 2f lodsb stosb testb %al,%al jnz 1b /* Success -- 0 byte reached. */ decl %edx xorl %eax,%eax jmp copystr_return 2: /* edx is zero -- return EFAULT or ENAMETOOLONG. */ cmpl $VM_MAXUSER_ADDRESS,%edi jae _C_LABEL(copystr_efault) movl $ENAMETOOLONG,%eax jmp copystr_return #endif /* I486_CPU || I586_CPU || I686_CPU */ /* * int copyinstr(const void *from, void *to, size_t maxlen, size_t *lencopied); * Copy a NUL-terminated string, at most maxlen characters long, from the * user's address space. Return the number of characters copied (including the * NUL) in *lencopied. If the string is too long, return ENAMETOOLONG; else * return 0 or EFAULT. * see copyinstr(9) */ /* LINTSTUB: Func: int copyinstr(const void *uaddr, void *kaddr, size_t len, size_t *done) */ ENTRY(copyinstr) pushl %esi pushl %edi GET_CURPCB(%ecx) movl $_C_LABEL(copystr_fault),PCB_ONFAULT(%ecx) movl 12(%esp),%esi # %esi = from movl 16(%esp),%edi # %edi = to movl 20(%esp),%edx # %edx = maxlen /* * Get min(%edx, VM_MAXUSER_ADDRESS-%esi). */ movl $VM_MAXUSER_ADDRESS,%eax subl %esi,%eax cmpl %edx,%eax jae 1f movl %eax,%edx movl %eax,20(%esp) 1: incl %edx cld 1: decl %edx jz 2f lodsb stosb testb %al,%al jnz 1b /* Success -- 0 byte reached. */ decl %edx xorl %eax,%eax jmp copystr_return 2: /* edx is zero -- return EFAULT or ENAMETOOLONG. */ cmpl $VM_MAXUSER_ADDRESS,%esi jae _C_LABEL(copystr_efault) movl $ENAMETOOLONG,%eax jmp copystr_return /* LINTSTUB: Ignore */ NENTRY(copystr_efault) movl $EFAULT,%eax /* LINTSTUB: Ignore */ NENTRY(copystr_fault) copystr_return: /* Set *lencopied and return %eax. */ GET_CURPCB(%ecx) movl $0,PCB_ONFAULT(%ecx) movl 20(%esp),%ecx subl %edx,%ecx movl 24(%esp),%edx testl %edx,%edx jz 8f movl %ecx,(%edx) 8: popl %edi popl %esi ret /* * int copystr(const void *from, void *to, size_t maxlen, size_t *lencopied); * Copy a NUL-terminated string, at most maxlen characters long. Return the * number of characters copied (including the NUL) in *lencopied. If the * string is too long, return ENAMETOOLONG; else return 0. * see copystr(9) */ /* LINTSTUB: Func: int copystr(const void *kfaddr, void *kdaddr, size_t len, size_t *done) */ ENTRY(copystr) pushl %esi pushl %edi movl 12(%esp),%esi # esi = from movl 16(%esp),%edi # edi = to movl 20(%esp),%edx # edx = maxlen incl %edx cld 1: decl %edx jz 4f lodsb stosb testb %al,%al jnz 1b /* Success -- 0 byte reached. */ decl %edx xorl %eax,%eax jmp 6f 4: /* edx is zero -- return ENAMETOOLONG. */ movl $ENAMETOOLONG,%eax 6: /* Set *lencopied and return %eax. */ movl 20(%esp),%ecx subl %edx,%ecx movl 24(%esp),%edx testl %edx,%edx jz 7f movl %ecx,(%edx) 7: popl %edi popl %esi ret /* * long fuword(const void *uaddr); * Fetch an int from the user's address space. * see fuword(9) */ /* LINTSTUB: Func: long fuword(const void *base) */ ENTRY(fuword) movl 4(%esp),%edx cmpl $VM_MAXUSER_ADDRESS-4,%edx ja _C_LABEL(fusuaddrfault) GET_CURPCB(%ecx) movl $_C_LABEL(fusufault),PCB_ONFAULT(%ecx) movl (%edx),%eax movl $0,PCB_ONFAULT(%ecx) ret /* * int fusword(const void *uaddr); * Fetch a short from the user's address space. * see fusword(9) */ /* LINTSTUB: Func: int fusword(const void *base) */ ENTRY(fusword) movl 4(%esp),%edx cmpl $VM_MAXUSER_ADDRESS-2,%edx ja _C_LABEL(fusuaddrfault) GET_CURPCB(%ecx) movl $_C_LABEL(fusufault),PCB_ONFAULT(%ecx) movzwl (%edx),%eax movl $0,PCB_ONFAULT(%ecx) ret /* * int fuswintr(const void *uaddr); * Fetch a short from the user's address space. Can be called during an * interrupt. * see fuswintr(9) */ /* LINTSTUB: Func: int fuswintr(const void *base) */ ENTRY(fuswintr) movl 4(%esp),%edx cmpl $VM_MAXUSER_ADDRESS-2,%edx ja _C_LABEL(fusuaddrfault) movl CPUVAR(CURPROC),%ecx movl P_ADDR(%ecx),%ecx movl $_C_LABEL(fusubail),PCB_ONFAULT(%ecx) movzwl (%edx),%eax movl $0,PCB_ONFAULT(%ecx) ret /* * int fubyte(const void *uaddr); * Fetch a byte from the user's address space. * see fubyte(9) */ /* LINTSTUB: Func: int fubyte(const void *base) */ ENTRY(fubyte) movl 4(%esp),%edx cmpl $VM_MAXUSER_ADDRESS-1,%edx ja _C_LABEL(fusuaddrfault) GET_CURPCB(%ecx) movl $_C_LABEL(fusufault),PCB_ONFAULT(%ecx) movzbl (%edx),%eax movl $0,PCB_ONFAULT(%ecx) ret /* * Handle faults from [fs]u*(). Clean up and return -1. */ /* LINTSTUB: Ignore */ NENTRY(fusufault) movl $0,PCB_ONFAULT(%ecx) movl $-1,%eax ret /* * Handle faults from [fs]u*(). Clean up and return -1. This differs from * fusufault() in that trap() will recognize it and return immediately rather * than trying to page fault. */ /* LINTSTUB: Ignore */ NENTRY(fusubail) movl $0,PCB_ONFAULT(%ecx) movl $-1,%eax ret /* * Handle earlier faults from [fs]u*(), due to our of range addresses. */ /* LINTSTUB: Ignore */ NENTRY(fusuaddrfault) movl $-1,%eax ret /* * int suword(void *uaddr, long x); * Store an int in the user's address space. * see suword(9) */ /* LINTSTUB: Func: int suword(void *base, long c) */ ENTRY(suword) movl 4(%esp),%edx cmpl $VM_MAXUSER_ADDRESS-4,%edx ja _C_LABEL(fusuaddrfault) #if defined(I386_CPU) #if defined(I486_CPU) || defined(I586_CPU) || defined(I686_CPU) cmpl $CPUCLASS_386,_C_LABEL(cpu_class) jne 2f #endif /* I486_CPU || I586_CPU || I686_CPU */ GET_CURPCB(%eax) movl $3f,PCB_ONFAULT(%eax) movl %edx,%eax shrl $PGSHIFT,%eax # calculate pte address testb $PG_RW,_C_LABEL(PTmap)(,%eax,4) jnz 1f 3: /* Simulate a trap. */ pushl %edx pushl %edx call _C_LABEL(trapwrite) # trapwrite(addr) addl $4,%esp # clear parameter from the stack popl %edx GET_CURPCB(%ecx) testl %eax,%eax jnz _C_LABEL(fusufault) 1: /* XXX also need to check the following 3 bytes for validity! */ #endif 2: GET_CURPCB(%ecx) movl $_C_LABEL(fusufault),PCB_ONFAULT(%ecx) movl 8(%esp),%eax movl %eax,(%edx) xorl %eax,%eax movl %eax,PCB_ONFAULT(%ecx) ret /* * int susword(void *uaddr, short x); * Store a short in the user's address space. * see susword(9) */ /* LINTSTUB: Func: int susword(void *base, short c) */ ENTRY(susword) movl 4(%esp),%edx cmpl $VM_MAXUSER_ADDRESS-2,%edx ja _C_LABEL(fusuaddrfault) #if defined(I386_CPU) #if defined(I486_CPU) || defined(I586_CPU) || defined(I686_CPU) cmpl $CPUCLASS_386,_C_LABEL(cpu_class) jne 2f #endif /* I486_CPU || I586_CPU || I686_CPU */ GET_CURPCB(%eax) movl $3f,PCB_ONFAULT(%eax) movl %edx,%eax shrl $PGSHIFT,%eax # calculate pte address testb $PG_RW,_C_LABEL(PTmap)(,%eax,4) jnz 1f 3: /* Simulate a trap. */ pushl %edx pushl %edx call _C_LABEL(trapwrite) # trapwrite(addr) addl $4,%esp # clear parameter from the stack popl %edx GET_CURPCB(%ecx) testl %eax,%eax jnz _C_LABEL(fusufault) 1: /* XXX also need to check the following byte for validity! */ #endif 2: GET_CURPCB(%ecx) movl $_C_LABEL(fusufault),PCB_ONFAULT(%ecx) movl 8(%esp),%eax movw %ax,(%edx) xorl %eax,%eax movl %eax,PCB_ONFAULT(%ecx) ret /* * int suswintr(void *uaddr, short x); * Store a short in the user's address space. Can be called during an * interrupt. * see suswintr(9) */ /* LINTSTUB: Func: int suswintr(void *base, short c) */ ENTRY(suswintr) movl 4(%esp),%edx cmpl $VM_MAXUSER_ADDRESS-2,%edx ja _C_LABEL(fusuaddrfault) movl CPUVAR(CURPROC),%ecx movl P_ADDR(%ecx),%ecx movl $_C_LABEL(fusubail),PCB_ONFAULT(%ecx) #if defined(I386_CPU) #if defined(I486_CPU) || defined(I586_CPU) || defined(I686_CPU) cmpl $CPUCLASS_386,_C_LABEL(cpu_class) jne 2f #endif /* I486_CPU || I586_CPU || I686_CPU */ movl %edx,%eax shrl $PGSHIFT,%eax # calculate pte address testb $PG_RW,_C_LABEL(PTmap)(,%eax,4) jnz 1f /* Simulate a trap. */ jmp _C_LABEL(fusubail) 1: /* XXX also need to check the following byte for validity! */ #endif 2: movl 8(%esp),%eax movw %ax,(%edx) xorl %eax,%eax movl %eax,PCB_ONFAULT(%ecx) ret /* * int subyte(void *uaddr, char x); * Store a byte in the user's address space. * see subyte(9) */ /* LINTSTUB: Func: int subyte(void *base, int c) */ ENTRY(subyte) movl 4(%esp),%edx cmpl $VM_MAXUSER_ADDRESS-1,%edx ja _C_LABEL(fusuaddrfault) #if defined(I386_CPU) #if defined(I486_CPU) || defined(I586_CPU) || defined(I686_CPU) cmpl $CPUCLASS_386,_C_LABEL(cpu_class) jne 2f #endif /* I486_CPU || I586_CPU || I686_CPU */ GET_CURPCB(%eax) movl $3f,PCB_ONFAULT(%eax) movl %edx,%eax shrl $PGSHIFT,%eax # calculate pte address testb $PG_RW,_C_LABEL(PTmap)(,%eax,4) jnz 1f 3: /* Simulate a trap. */ pushl %edx pushl %edx call _C_LABEL(trapwrite) # trapwrite(addr) addl $4,%esp # clear parameter from the stack popl %edx GET_CURPCB(%ecx) testl %eax,%eax jnz _C_LABEL(fusufault) 1: #endif 2: GET_CURPCB(%ecx) movl $_C_LABEL(fusufault),PCB_ONFAULT(%ecx) movb 8(%esp),%al movb %al,(%edx) xorl %eax,%eax movl %eax,PCB_ONFAULT(%ecx) ret /*****************************************************************************/ /* * The following is i386-specific nonsense. */ /* * void lgdt(struct region_descriptor *rdp); * Load a new GDT pointer (and do any necessary cleanup). * XXX It's somewhat questionable whether reloading all the segment registers * is necessary, since the actual descriptor data is not changed except by * process creation and exit, both of which clean up via task switches. OTOH, * this only happens at run time when the GDT is resized. */ /* LINTSTUB: Func: void lgdt(struct region_descriptor *rdp) */ NENTRY(lgdt) /* Reload the descriptor table. */ movl 4(%esp),%eax lgdt (%eax) /* Flush the prefetch queue. */ jmp 1f nop 1: /* Reload "stale" selectors. */ movl $GSEL(GDATA_SEL, SEL_KPL),%eax movw %ax,%ds movw %ax,%es movw %ax,%gs movw %ax,%ss movl $GSEL(GCPU_SEL, SEL_KPL),%eax movw %ax,%fs /* Reload code selector by doing intersegment return. */ popl %eax pushl $GSEL(GCODE_SEL, SEL_KPL) pushl %eax lret /*****************************************************************************/ /* * These functions are primarily used by DDB. */ /* LINTSTUB: Func: int setjmp (label_t *l) */ ENTRY(setjmp) movl 4(%esp),%eax movl %ebx,(%eax) # save ebx movl %esp,4(%eax) # save esp movl %ebp,8(%eax) # save ebp movl %esi,12(%eax) # save esi movl %edi,16(%eax) # save edi movl (%esp),%edx # get rta movl %edx,20(%eax) # save eip xorl %eax,%eax # return (0); ret /* LINTSTUB: Func: void longjmp (label_t *l) */ ENTRY(longjmp) movl 4(%esp),%eax movl (%eax),%ebx # restore ebx movl 4(%eax),%esp # restore esp movl 8(%eax),%ebp # restore ebp movl 12(%eax),%esi # restore esi movl 16(%eax),%edi # restore edi movl 20(%eax),%edx # get rta movl %edx,(%esp) # put in return frame xorl %eax,%eax # return (1); incl %eax ret /*****************************************************************************/ .globl _C_LABEL(sched_whichqs),_C_LABEL(sched_qs) .globl _C_LABEL(uvmexp),_C_LABEL(panic) #ifdef DIAGNOSTIC NENTRY(switch_error) pushl $1f call _C_LABEL(panic) /* NOTREACHED */ 1: .asciz "cpu_switch" #endif /* DIAGNOSTIC */ /* * void cpu_switch(struct proc *) * Find a runnable process and switch to it. Wait if necessary. If the new * process is the same as the old one, we short-circuit the context save and * restore. * * Note that the stack frame layout is known to "struct switchframe" * in and to the code in cpu_fork() which initializes * it for a new process. */ ENTRY(cpu_switch) pushl %ebx pushl %esi pushl %edi #ifdef DEBUG cmpl $IPL_SCHED,CPUVAR(ILEVEL) jae 1f pushl 2f call _C_LABEL(panic) /* NOTREACHED */ 2: .asciz "not splhigh() in cpu_switch!" 1: #endif /* DEBUG */ movl CPUVAR(CURPROC),%esi /* * Clear curproc so that we don't accumulate system time while idle. * This also insures that schedcpu() will move the old process to * the correct queue if it happens to get called from the spllower() * below and changes the priority. (See corresponding comment in * userret()). */ movl $0,CPUVAR(CURPROC) /* * First phase: find new process. * * Registers: * %eax - queue head, scratch, then zero * %ebx - queue number * %ecx - cached value of whichqs * %edx - next process in queue * %esi - old process * %edi - new process */ /* Look for new process. */ cli # splhigh doesn't do a cli movl _C_LABEL(sched_whichqs),%ecx bsfl %ecx,%ebx # find a full q jnz switch_dequeue /* * idling: save old context. * * Registers: * %eax, %ecx - scratch * %esi - old process, then old pcb * %edi - idle pcb */ pushl %esi call _C_LABEL(pmap_deactivate) # pmap_deactivate(oldproc) addl $4,%esp movl P_ADDR(%esi),%esi /* Save stack pointers. */ movl %esp,PCB_ESP(%esi) movl %ebp,PCB_EBP(%esi) /* Find idle PCB for this CPU */ #ifndef MULTIPROCESSOR movl $_C_LABEL(proc0),%ebx movl P_ADDR(%ebx),%edi movl P_MD_TSS_SEL(%ebx),%edx #else movl CPUVAR(IDLE_PCB),%edi movl CPUVAR(IDLE_TSS_SEL),%edx #endif movl $0,CPUVAR(CURPROC) /* In case we fault... */ /* Restore the idle context (avoid interrupts) */ cli /* Restore stack pointers. */ movl PCB_ESP(%edi),%esp movl PCB_EBP(%edi),%ebp /* Switch address space. */ movl PCB_CR3(%edi),%ecx movl %ecx,%cr3 /* Switch TSS. Reset "task busy" flag before loading. */ #ifdef MULTIPROCESSOR movl CPUVAR(GDT),%eax #else movl _C_LABEL(gdt),%eax #endif andl $~0x0200,4-SEL_KPL(%eax,%edx,1) ltr %dx /* We're always in the kernel, so we don't need the LDT. */ /* Restore cr0 (including FPU state). */ movl PCB_CR0(%edi),%ecx movl %ecx,%cr0 /* Record new pcb. */ SET_CURPCB(%edi) xorl %esi,%esi sti idle_unlock: #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG) call _C_LABEL(sched_unlock_idle) #endif /* Interrupts are okay again. */ pushl $IPL_NONE # spl0() call _C_LABEL(Xspllower) # process pending interrupts addl $4,%esp jmp idle_start idle_zero: sti call _C_LABEL(uvm_pageidlezero) cli cmpl $0,_C_LABEL(sched_whichqs) jnz idle_exit idle_loop: /* Try to zero some pages. */ movl _C_LABEL(uvm)+UVM_PAGE_IDLE_ZERO,%ecx testl %ecx,%ecx jnz idle_zero sti hlt NENTRY(mpidle) idle_start: cli cmpl $0,_C_LABEL(sched_whichqs) jz idle_loop idle_exit: movl $IPL_HIGH,CPUVAR(ILEVEL) # splhigh #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG) call _C_LABEL(sched_lock_idle) #endif movl _C_LABEL(sched_whichqs),%ecx bsfl %ecx,%ebx jz idle_unlock switch_dequeue: /* * we're running at splhigh(), but it's otherwise okay to take * interrupts here. */ sti leal _C_LABEL(sched_qs)(,%ebx,8),%eax # select q movl P_FORW(%eax),%edi # unlink from front of process q #ifdef DIAGNOSTIC cmpl %edi,%eax # linked to self (i.e. nothing queued)? je _C_LABEL(switch_error) # not possible #endif /* DIAGNOSTIC */ movl P_FORW(%edi),%edx movl %edx,P_FORW(%eax) movl %eax,P_BACK(%edx) cmpl %edx,%eax # q empty? jne 3f btrl %ebx,%ecx # yes, clear to indicate empty movl %ecx,_C_LABEL(sched_whichqs) # update q status 3: /* We just did it. */ xorl %eax,%eax CLEAR_RESCHED(%eax) #ifdef DIAGNOSTIC cmpl %eax,P_WCHAN(%edi) # Waiting for something? jne _C_LABEL(switch_error) # Yes; shouldn't be queued. cmpb $SRUN,P_STAT(%edi) # In run state? jne _C_LABEL(switch_error) # No; shouldn't be queued. #endif /* DIAGNOSTIC */ /* Isolate process. XXX Is this necessary? */ movl %eax,P_BACK(%edi) /* Record new process. */ movb $SONPROC,P_STAT(%edi) # p->p_stat = SONPROC SET_CURPROC(%edi,%ecx) /* Skip context switch if same process. */ cmpl %edi,%esi je switch_return /* If old process exited, don't bother. */ testl %esi,%esi jz switch_exited /* * Second phase: save old context. * * Registers: * %eax, %ecx - scratch * %esi - old process, then old pcb * %edi - new process */ pushl %esi call _C_LABEL(pmap_deactivate) # pmap_deactivate(oldproc) addl $4,%esp movl P_ADDR(%esi),%esi /* Save stack pointers. */ movl %esp,PCB_ESP(%esi) movl %ebp,PCB_EBP(%esi) switch_exited: /* * Third phase: restore saved context. * * Registers: * %eax, %ebx, %ecx, %edx - scratch * %esi - new pcb * %edi - new process */ /* No interrupts while loading new state. */ cli movl P_ADDR(%edi),%esi /* Restore stack pointers. */ movl PCB_ESP(%esi),%esp movl PCB_EBP(%esi),%ebp #if 0 /* Don't bother with the rest if switching to a system process. */ testl $P_SYSTEM,P_FLAG(%edi) jnz switch_restored #endif #ifdef MULTIPROCESSOR movl CPUVAR(GDT),%eax #else /* Load TSS info. */ movl _C_LABEL(gdt),%eax #endif movl P_MD_TSS_SEL(%edi),%edx /* Switch TSS. Reset "task busy" flag before loading. */ andl $~0x0200,4(%eax,%edx, 1) ltr %dx pushl %edi call _C_LABEL(pmap_activate) # pmap_activate(p) addl $4,%esp #if 0 switch_restored: #endif /* Restore cr0 (including FPU state). */ movl PCB_CR0(%esi),%ecx #ifdef MULTIPROCESSOR /* * If our floating point registers are on a different cpu, * clear CR0_TS so we'll trap rather than reuse bogus state. */ movl PCB_FPCPU(%esi),%ebx cmpl CPUVAR(SELF),%ebx jz 1f orl $CR0_TS,%ecx 1: #endif movl %ecx,%cr0 /* Record new pcb. */ SET_CURPCB(%esi) /* Interrupts are okay again. */ sti /* * Check for restartable atomic sequences (RAS) * XXX %edi reloads are not necessary here as %edi is callee-saved! */ movl CPUVAR(CURPROC),%edi cmpl $0,P_NRAS(%edi) je 1f movl P_MD_REGS(%edi),%edx movl TF_EIP(%edx),%eax pushl %eax pushl %edi call _C_LABEL(ras_lookup) addl $8,%esp cmpl $-1,%eax je 1f movl CPUVAR(CURPROC),%edi movl P_MD_REGS(%edi),%edx movl %eax,TF_EIP(%edx) 1: switch_return: #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG) call _C_LABEL(sched_unlock_idle) #endif pushl $IPL_NONE # spl0() call _C_LABEL(Xspllower) # process pending interrupts addl $4,%esp movl $IPL_HIGH,CPUVAR(ILEVEL) # splhigh() movl %edi,%eax # return (p); popl %edi popl %esi popl %ebx ret /* * void switch_exit(struct proc *p); * switch_exit(struct proc *p); * Switch to the appropriate idle context (proc0's if uniprocessor; the cpu's * if multiprocessor) and deallocate the address space and kernel stack for p. * Then jump into cpu_switch(), as if we were in the idle proc all along. */ #ifndef MULTIPROCESSOR .globl _C_LABEL(proc0) #endif .globl _C_LABEL(uvmspace_free),_C_LABEL(kernel_map) .globl _C_LABEL(uvm_km_free),_C_LABEL(tss_free) /* LINTSTUB: Func: void switch_exit(struct proc *p) */ ENTRY(switch_exit) movl 4(%esp),%edi # old process #ifndef MULTIPROCESSOR movl $_C_LABEL(proc0),%ebx movl P_ADDR(%ebx),%esi movl P_MD_TSS_SEL(%ebx),%edx #else movl CPUVAR(IDLE_PCB),%esi movl CPUVAR(IDLE_TSS_SEL),%edx #endif /* In case we fault... */ movl $0,CPUVAR(CURPROC) /* Restore the idle context. */ cli /* Restore stack pointers. */ movl PCB_ESP(%esi),%esp movl PCB_EBP(%esi),%ebp /* Load TSS info. */ #ifdef MULTIPROCESSOR movl CPUVAR(GDT),%eax #else /* Load TSS info. */ movl _C_LABEL(gdt),%eax #endif /* Switch address space. */ movl PCB_CR3(%esi),%ecx movl %ecx,%cr3 /* Switch TSS. */ andl $~0x0200,4-SEL_KPL(%eax,%edx,1) ltr %dx /* We're always in the kernel, so we don't need the LDT. */ /* Restore cr0 (including FPU state). */ movl PCB_CR0(%esi),%ecx movl %ecx,%cr0 /* Record new pcb. */ SET_CURPCB(%esi) /* Interrupts are okay again. */ sti /* * Schedule the dead process's vmspace and stack to be freed. */ pushl %edi /* exit2(p) */ call _C_LABEL(exit2) addl $4,%esp /* Jump into cpu_switch() with the right state. */ xorl %esi,%esi movl %esi,CPUVAR(CURPROC) jmp idle_start /* * void savectx(struct pcb *pcb); * Update pcb, saving current processor state. */ /* LINTSTUB: Func: void savectx(struct pcb *pcb) */ ENTRY(savectx) movl 4(%esp),%edx # edx = p->p_addr /* Save stack pointers. */ movl %esp,PCB_ESP(%edx) movl %ebp,PCB_EBP(%edx) ret /* * Old call gate entry for syscall */ /* LINTSTUB: Var: char Xosyscall[1]; */ IDTVEC(osyscall) /* Set eflags in trap frame. */ pushfl popl 8(%esp) pushl $7 # size of instruction for restart jmp syscall1 /* * Trap gate entry for syscall */ /* LINTSTUB: Var: char Xsyscall[1]; */ IDTVEC(syscall) pushl $2 # size of instruction for restart syscall1: pushl $T_ASTFLT # trap # for doing ASTs INTRENTRY #ifdef DIAGNOSTIC movl CPUVAR(ILEVEL),%ebx testl %ebx,%ebx jz 1f pushl $5f call _C_LABEL(printf) addl $4,%esp #ifdef DDB int $3 #endif 1: #endif /* DIAGNOSTIC */ movl CPUVAR(CURPROC),%edx movl %esp,P_MD_REGS(%edx) # save pointer to frame call *P_MD_SYSCALL(%edx) # get pointer to syscall() function 2: /* Check for ASTs on exit to user mode. */ cli CHECK_ASTPENDING() je 1f /* Always returning to user mode here. */ CLEAR_ASTPENDING() sti /* Pushed T_ASTFLT into tf_trapno on entry. */ call _C_LABEL(trap) jmp 2b #ifndef DIAGNOSTIC 1: INTRFASTEXIT #else /* DIAGNOSTIC */ 1: cmpl $IPL_NONE,CPUVAR(ILEVEL) jne 3f INTRFASTEXIT 3: sti pushl $4f call _C_LABEL(printf) addl $4,%esp #ifdef DDB int $3 #endif /* DDB */ movl $IPL_NONE,CPUVAR(ILEVEL) jmp 2b 4: .asciz "WARNING: SPL NOT LOWERED ON SYSCALL EXIT\n" 5: .asciz "WARNING: SPL NOT ZERO ON SYSCALL ENTRY\n" #endif /* DIAGNOSTIC */ #if NNPX > 0 /* * Special interrupt handlers. Someday intr0-intr15 will be used to count * interrupts. We'll still need a special exception 16 handler. The busy * latch stuff in probintr() can be moved to npxprobe(). */ /* LINTSTUB: Func: void probeintr(void) */ NENTRY(probeintr) ss incl _C_LABEL(npx_intrs_while_probing) pushl %eax movb $0x20,%al # EOI (asm in strings loses cpp features) outb %al,$0xa0 # IO_ICU2 outb %al,$0x20 # IO_ICU1 movb $0,%al outb %al,$0xf0 # clear BUSY# latch popl %eax iret /* LINTSTUB: Func: void probetrap(void) */ NENTRY(probetrap) ss incl _C_LABEL(npx_traps_while_probing) fnclex iret /* LINTSTUB: Func: int npx586bug1(int a, int b) */ NENTRY(npx586bug1) fildl 4(%esp) # x fildl 8(%esp) # y fld %st(1) fdiv %st(1),%st # x/y fmulp %st,%st(1) # (x/y)*y fsubrp %st,%st(1) # x-(x/y)*y pushl $0 fistpl (%esp) popl %eax ret #endif /* NNPX > 0 */