Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files. =================================================================== RCS file: /ftp/cvs/cvsroot/src/sys/arch/i386/i386/locore.S,v rcsdiff: /ftp/cvs/cvsroot/src/sys/arch/i386/i386/locore.S,v: warning: Unknown phrases like `commitid ...;' are present. retrieving revision 1.95.8.2 retrieving revision 1.116 diff -u -p -r1.95.8.2 -r1.116 --- src/sys/arch/i386/i386/locore.S 2012/03/06 09:56:07 1.95.8.2 +++ src/sys/arch/i386/i386/locore.S 2016/05/12 06:45:16 1.116 @@ -1,4 +1,4 @@ -/* $NetBSD: locore.S,v 1.95.8.2 2012/03/06 09:56:07 mrg Exp $ */ +/* $NetBSD: locore.S,v 1.116 2016/05/12 06:45:16 maxv Exp $ */ /* * Copyright-o-rama! @@ -64,7 +64,6 @@ * POSSIBILITY OF SUCH DAMAGE. */ - /*- * Copyright (c) 1998, 2000, 2004, 2006, 2007, 2009 The NetBSD Foundation, Inc. * All rights reserved. @@ -129,9 +128,10 @@ */ #include -__KERNEL_RCSID(0, "$NetBSD: locore.S,v 1.95.8.2 2012/03/06 09:56:07 mrg Exp $"); +__KERNEL_RCSID(0, "$NetBSD: locore.S,v 1.116 2016/05/12 06:45:16 maxv Exp $"); #include "opt_compat_oldboot.h" +#include "opt_copy_symtab.h" #include "opt_ddb.h" #include "opt_modular.h" #include "opt_multiboot.h" @@ -139,7 +139,6 @@ __KERNEL_RCSID(0, "$NetBSD: locore.S,v 1 #include "opt_vm86.h" #include "opt_xen.h" -#include "npx.h" #include "assym.h" #include "lapic.h" #include "ioapic.h" @@ -159,49 +158,78 @@ __KERNEL_RCSID(0, "$NetBSD: locore.S,v 1 #include #endif -/* XXX temporary kluge; these should not be here */ /* Get definitions for IOM_BEGIN, IOM_END, and IOM_SIZE */ #include +#ifndef XEN +#define _RELOC(x) ((x) - KERNBASE) +#else +#define _RELOC(x) ((x)) +#endif /* XEN */ +#define RELOC(x) _RELOC(_C_LABEL(x)) + +#ifndef PAE +#define PROC0_PDIR_OFF 0 +#else +#define PROC0_L3_OFF 0 +#define PROC0_PDIR_OFF 1 * PAGE_SIZE +#endif + +#define PROC0_STK_OFF (PROC0_PDIR_OFF + PDP_SIZE * PAGE_SIZE) +#define PROC0_PTP1_OFF (PROC0_STK_OFF + UPAGES * PAGE_SIZE) + +/* + * fillkpt - Fill in a kernel page table + * eax = pte (page frame | control | status) + * ebx = page table address + * ecx = number of pages to map + * + * For PAE, each entry is 8 bytes long: we must set the 4 upper bytes to 0. + * This is done by the first instruction of fillkpt. In the non-PAE case, this + * instruction just clears the page table entry. + */ + +#define fillkpt \ +1: movl $0,(PDE_SIZE-4)(%ebx) ; /* upper 32 bits: 0 */ \ + movl %eax,(%ebx) ; /* store phys addr */ \ + addl $PDE_SIZE,%ebx ; /* next PTE/PDE */ \ + addl $PAGE_SIZE,%eax ; /* next phys page */ \ + loop 1b ; + + #ifdef XEN /* * Xen guest identifier and loader selection */ .section __xen_guest .ascii "GUEST_OS=netbsd,GUEST_VER=3.0,XEN_VER=xen-3.0" -#if defined(DOM0OPS) || !defined(XEN_COMPAT_030001) - .ascii ",VIRT_BASE=0xc0000000" /* KERNBASE */ - .ascii ",ELF_PADDR_OFFSET=0xc0000000" /* KERNBASE */ -#else - .ascii ",VIRT_BASE=0xc0100000" /* KERNTEXTOFF */ - .ascii ",ELF_PADDR_OFFSET=0xc0100000" /* KERNTEXTOFF */ -#endif - .ascii ",VIRT_ENTRY=0xc0100000" /* KERNTEXTOFF */ -#if !defined(XEN_COMPAT_030001) - .ascii ",HYPERCALL_PAGE=0x00000101" + .ascii ",VIRT_BASE=0xc0000000" /* KERNBASE */ + .ascii ",ELF_PADDR_OFFSET=0xc0000000" /* KERNBASE */ + .ascii ",VIRT_ENTRY=0xc0100000" /* KERNTEXTOFF */ + .ascii ",HYPERCALL_PAGE=0x00000101" /* (???+HYPERCALL_PAGE_OFFSET)/PAGE_SIZE) */ -#endif #ifdef PAE - .ascii ",PAE=yes[extended-cr3]" + .ascii ",PAE=yes[extended-cr3]" #endif .ascii ",LOADER=generic" -#if (NKSYMS || defined(DDB) || defined(MODULAR)) && !defined(SYMTAB_SPACE) +#if (NKSYMS || defined(DDB) || defined(MODULAR)) && !defined(makeoptions_COPY_SYMTAB) .ascii ",BSD_SYMTAB=yes" #endif .byte 0 -#endif +#endif /* XEN */ /* * Initialization */ .data - .globl _C_LABEL(cpu) + .globl _C_LABEL(cputype) .globl _C_LABEL(cpuid_level) .globl _C_LABEL(esym) .globl _C_LABEL(eblob) .globl _C_LABEL(atdevbase) - .globl _C_LABEL(lwp0uarea),_C_LABEL(PDPpaddr) + .globl _C_LABEL(lwp0uarea) + .globl _C_LABEL(PDPpaddr) .globl _C_LABEL(gdt) .globl _C_LABEL(idt) .globl _C_LABEL(lapic_tpr) @@ -213,51 +241,72 @@ __KERNEL_RCSID(0, "$NetBSD: locore.S,v 1 .align 12 #endif .globl _C_LABEL(local_apic), _C_LABEL(lapic_id) -_C_LABEL(local_apic): + .type _C_LABEL(local_apic), @object +LABEL(local_apic) .space LAPIC_ID -_C_LABEL(lapic_id): +END(local_apic) + .type _C_LABEL(lapic_id), @object +LABEL(lapic_id) .long 0x00000000 - .space LAPIC_TPRI-(LAPIC_ID+4) -_C_LABEL(lapic_tpr): - .space LAPIC_PPRI-LAPIC_TPRI + .space LAPIC_TPRI-(LAPIC_ID+4) +END(lapic_id) + .type _C_LABEL(lapic_tpr), @object +LABEL(lapic_tpr) + .space LAPIC_PPRI-LAPIC_TPRI +END(lapic_tpr) + .type _C_LABEL(lapic_ppr), @object _C_LABEL(lapic_ppr): .space LAPIC_ISR-LAPIC_PPRI +END(lapic_ppr) + .type _C_LABEL(lapic_isr), @object _C_LABEL(lapic_isr): .space PAGE_SIZE-LAPIC_ISR +END(lapic_isr) #else -_C_LABEL(lapic_tpr): + .type _C_LABEL(lapic_tpr), @object +LABEL(lapic_tpr) .long 0 +END(lapic_tpr) #endif - -_C_LABEL(cpu): .long 0 # are we 80486, Pentium, or.. -_C_LABEL(cpuid_level): .long 0 -_C_LABEL(atdevbase): .long 0 # location of start of iomem in virtual -_C_LABEL(lwp0uarea): .long 0 -_C_LABEL(PDPpaddr): .long 0 # paddr of PDP, for libkvm + .type _C_LABEL(cputype), @object +LABEL(cputype) .long 0 /* are we 80486, Pentium, or.. */ +END(cputype) + .type _C_LABEL(cpuid_level), @object +LABEL(cpuid_level) .long 0 +END(cpuid_level) + .type _C_LABEL(atdevbase), @object +LABEL(atdevbase) .long 0 /* location of start of iomem in virt */ +END(atdevbase) + .type _C_LABEL(lwp0uarea), @object +LABEL(lwp0uarea) .long 0 +END(lwp0uarea) + .type _C_LABEL(PDPpaddr), @object +LABEL(PDPpaddr) .long 0 /* paddr of PDP, for libkvm */ +END(PDPpaddr) + .type _C_LABEL(tablesize), @object _C_LABEL(tablesize): .long 0 +END(tablesize) - .space 512 + /* Space for the temporary stack */ + .size tmpstk, tmpstk - . + .space 512 tmpstk: #ifdef XEN - .align PAGE_SIZE, 0x0 # Align on page boundary -_C_LABEL(tmpgdt): - .space PAGE_SIZE # Xen expects a page + .align PAGE_SIZE, 0x0 /* Align on page boundary */ +LABEL(tmpgdt) + .space PAGE_SIZE /* Xen expects a page */ +END(tmpgdt) #endif /* XEN */ - .globl tmpgdt -#ifndef XEN -#define _RELOC(x) ((x) - KERNBASE) -#else -#define _RELOC(x) ((x)) -#endif /* XEN */ -#define RELOC(x) _RELOC(_C_LABEL(x)) .text .globl _C_LABEL(kernel_text) .set _C_LABEL(kernel_text),KERNTEXTOFF - .globl start +ENTRY(start) #ifndef XEN -start: movw $0x1234,0x472 # warm boot + + /* Warm boot */ + movw $0x1234,0x472 #if defined(MULTIBOOT) jmp 1f @@ -282,7 +331,7 @@ _C_LABEL(Multiboot_Header): * space to process it later -- after we are relocated. It will * be safer to run complex C code than doing it at this point. */ - pushl %ebx # Address of Multiboot information + pushl %ebx /* Address of Multiboot information */ call _C_LABEL(multiboot_pre_reloc) addl $4,%esp jmp 2f @@ -295,7 +344,7 @@ _C_LABEL(Multiboot_Header): * (boothowto, [bootdev], bootinfo, esym, biosextmem, biosbasemem), * 4 bytes each. */ - addl $4,%esp # Discard return address to boot loader + addl $4,%esp /* Discard return address to boot loader */ call _C_LABEL(native_loader) addl $24,%esp @@ -346,11 +395,11 @@ isnx586: * Don't try cpuid, as Nx586s reportedly don't support the * PSL_ID bit. */ - movl $CPU_NX586,RELOC(cpu) + movl $CPU_NX586,RELOC(cputype) jmp 2f is386: - movl $CPU_386,RELOC(cpu) + movl $CPU_386,RELOC(cputype) jmp 2f try486: /* Try to toggle identification flag; does not exist on early 486s. */ @@ -369,7 +418,7 @@ try486: /* Try to toggle identification testl %eax,%eax jnz try586 -is486: movl $CPU_486,RELOC(cpu) +is486: movl $CPU_486,RELOC(cputype) /* * Check Cyrix CPU * Cyrix CPUs do not change the undefined flags following @@ -387,30 +436,30 @@ is486: movl $CPU_486,RELOC(cpu) popfl jmp 2f trycyrix486: - movl $CPU_6x86,RELOC(cpu) # set CPU type + movl $CPU_6x86,RELOC(cputype) /* set CPU type */ /* * Check for Cyrix 486 CPU by seeing if the flags change during a * divide. This is documented in the Cx486SLC/e SMM Programmer's * Guide. */ xorl %edx,%edx - cmpl %edx,%edx # set flags to known state + cmpl %edx,%edx /* set flags to known state */ pushfl - popl %ecx # store flags in ecx + popl %ecx /* store flags in ecx */ movl $-1,%eax movl $4,%ebx - divl %ebx # do a long division + divl %ebx /* do a long division */ pushfl popl %eax - xorl %ecx,%eax # are the flags different? - testl $0x8d5,%eax # only check C|PF|AF|Z|N|V - jne 2f # yes; must be Cyrix 6x86 CPU - movl $CPU_486DLC,RELOC(cpu) # set CPU type + xorl %ecx,%eax /* are the flags different? */ + testl $0x8d5,%eax /* only check C|PF|AF|Z|N|V */ + jne 2f /* yes; must be Cyrix 6x86 CPU */ + movl $CPU_486DLC,RELOC(cputype) /* set CPU type */ #ifndef CYRIX_CACHE_WORKS /* Disable caching of the ISA hole only. */ invd - movb $CCR0,%al # Configuration Register index (CCR0) + movb $CCR0,%al /* Configuration Register index (CCR0) */ outb %al,$0x22 inb $0x23,%al orb $(CCR0_NC1|CCR0_BARB),%al @@ -422,8 +471,8 @@ trycyrix486: invd #else /* CYRIX_CACHE_WORKS */ /* Set cache parameters */ - invd # Start with guaranteed clean cache - movb $CCR0,%al # Configuration Register index (CCR0) + invd /* Start with guaranteed clean cache */ + movb $CCR0,%al /* Configuration Register index (CCR0) */ outb %al,$0x22 inb $0x23,%al andb $~CCR0_NC0,%al @@ -481,10 +530,11 @@ try586: /* Use the `cpuid' instruction. * The boot program should check: * text+data <= &stack_variable - more_space_for_stack * text+data+bss+pad+space_for_page_tables <= end_of_memory - * Oops, the gdt is in the carcass of the boot program so clearing + * + * XXX: the gdt is in the carcass of the boot program so clearing * the rest of memory is still not possible. */ - movl $_RELOC(tmpstk),%esp # bootstrap stack end location + movl $_RELOC(tmpstk),%esp /* * Virtual address space of kernel, without PAE. The page dir is 1 page long. @@ -497,38 +547,11 @@ try586: /* Use the `cpuid' instruction. * text | data | bss | [syms] | [blobs] | L3 | page dir | proc0 kstack | L1 ptp * 0 1 5 6 7 */ -#ifndef PAE -#define PROC0_PDIR_OFF 0 -#else -#define PROC0_L3_OFF 0 -#define PROC0_PDIR_OFF 1 * PAGE_SIZE -#endif - -#define PROC0_STK_OFF (PROC0_PDIR_OFF + PDP_SIZE * PAGE_SIZE) -#define PROC0_PTP1_OFF (PROC0_STK_OFF + UPAGES * PAGE_SIZE) - -/* - * fillkpt - Fill in a kernel page table - * eax = pte (page frame | control | status) - * ebx = page table address - * ecx = number of pages to map - * - * For PAE, each entry is 8 bytes long: we must set the 4 upper bytes to 0. - * This is done by the first instruction of fillkpt. In the non-PAE case, this - * instruction just clears the page table entry. - */ - -#define fillkpt \ -1: movl $0,(PDE_SIZE-4)(%ebx) ; /* clear bits */ \ - movl %eax,(%ebx) ; /* store phys addr */ \ - addl $PDE_SIZE,%ebx ; /* next pte/pde */ \ - addl $PAGE_SIZE,%eax ; /* next phys page */ \ - loop 1b ; /* Find end of kernel image. */ movl $RELOC(end),%edi -#if (NKSYMS || defined(DDB) || defined(MODULAR)) && !defined(SYMTAB_SPACE) +#if (NKSYMS || defined(DDB) || defined(MODULAR)) && !defined(makeoptions_COPY_SYMTAB) /* Save the symbols (if loaded). */ movl RELOC(esym),%eax testl %eax,%eax @@ -547,7 +570,7 @@ try586: /* Use the `cpuid' instruction. 1: /* Compute sizes */ movl %edi,%esi - addl $PGOFSET,%esi # page align up + addl $PGOFSET,%esi andl $~PGOFSET,%esi /* nkptp[1] = (esi + ~L2_FRAME) >> L2_SHIFT + 1; */ @@ -575,38 +598,38 @@ try586: /* Use the `cpuid' instruction. cmpl %eax,RELOC(nkptp)+1*4 jnz 1b - /* Clear tables */ - movl %esi,%edi + /* Clear tables */ + movl %esi,%edi xorl %eax,%eax cld - movl RELOC(tablesize),%ecx - shrl $2,%ecx + movl RELOC(tablesize),%ecx + shrl $2,%ecx rep stosl - leal (PROC0_PTP1_OFF)(%esi), %ebx + leal (PROC0_PTP1_OFF)(%esi), %ebx /* * Build initial page tables. */ - /* - * Compute &__data_start - KERNBASE. This can't be > 4G, + /* + * Compute &__rodata_start - KERNBASE. This can't be > 4G, * or we can't deal with it anyway, since we can't load it in * 32 bit mode. So use the bottom 32 bits. - */ - movl $RELOC(__data_start),%edx + */ + movl $RELOC(__rodata_start),%edx andl $~PGOFSET,%edx - /* - * Skip the first MB. - */ + /* + * Skip the first MB. + */ movl $_RELOC(KERNTEXTOFF),%eax movl %eax,%ecx shrl $(PGSHIFT-2),%ecx /* ((n >> PGSHIFT) << 2) for # pdes */ #ifdef PAE - shll $1,%ecx /* pdes are twice larger with PAE */ + shll $1,%ecx /* pdes are twice larger with PAE */ #endif - addl %ecx,%ebx + addl %ecx,%ebx /* Map the kernel text read-only. */ movl %edx,%ecx @@ -618,37 +641,37 @@ try586: /* Use the `cpuid' instruction. /* Map the data, BSS, and bootstrap tables read-write. */ leal (PG_V|PG_KW)(%edx),%eax movl RELOC(tablesize),%ecx - addl %esi,%ecx # end of tables - subl %edx,%ecx # subtract end of text + addl %esi,%ecx /* end of tables */ + subl %edx,%ecx /* subtract end of text */ shrl $PGSHIFT,%ecx fillkpt /* Map ISA I/O mem (later atdevbase) */ - movl $(IOM_BEGIN|PG_V|PG_KW/*|PG_N*/),%eax # having these bits set - movl $(IOM_SIZE>>PGSHIFT),%ecx # for this many pte s, + movl $(IOM_BEGIN|PG_V|PG_KW/*|PG_N*/),%eax + movl $(IOM_SIZE>>PGSHIFT),%ecx fillkpt /* * Construct a page table directory. */ - /* Set up top level entries for identity mapping */ - leal (PROC0_PDIR_OFF)(%esi),%ebx - leal (PROC0_PTP1_OFF)(%esi),%eax - orl $(PG_V|PG_KW), %eax - movl RELOC(nkptp)+1*4,%ecx + /* Set up top level entries for identity mapping */ + leal (PROC0_PDIR_OFF)(%esi),%ebx + leal (PROC0_PTP1_OFF)(%esi),%eax + orl $(PG_V|PG_KW), %eax + movl RELOC(nkptp)+1*4,%ecx fillkpt - /* Set up top level entries for actual kernel mapping */ - leal (PROC0_PDIR_OFF + L2_SLOT_KERNBASE*PDE_SIZE)(%esi),%ebx - leal (PROC0_PTP1_OFF)(%esi),%eax - orl $(PG_V|PG_KW), %eax - movl RELOC(nkptp)+1*4,%ecx + /* Set up top level entries for actual kernel mapping */ + leal (PROC0_PDIR_OFF + L2_SLOT_KERNBASE*PDE_SIZE)(%esi),%ebx + leal (PROC0_PTP1_OFF)(%esi),%eax + orl $(PG_V|PG_KW), %eax + movl RELOC(nkptp)+1*4,%ecx fillkpt /* Install a PDE recursively mapping page directory as a page table! */ - leal (PROC0_PDIR_OFF + PDIR_SLOT_PTE*PDE_SIZE)(%esi),%ebx - leal (PROC0_PDIR_OFF)(%esi),%eax - orl $(PG_V|PG_KW),%eax + leal (PROC0_PDIR_OFF + PDIR_SLOT_PTE*PDE_SIZE)(%esi),%ebx + leal (PROC0_PDIR_OFF)(%esi),%eax + orl $(PG_V|PG_KW),%eax movl $PDP_SIZE,%ecx fillkpt @@ -674,32 +697,31 @@ try586: /* Use the `cpuid' instruction. * Startup checklist: * 1. Load %cr3 with pointer to PDIR (or L3 PD page for PAE). */ - movl %esi,%eax # phys address of ptd in proc 0 - movl %eax,%cr3 # load ptd addr into mmu + movl %esi,%eax /* phys address of PTD in proc0 */ + movl %eax,%cr3 /* load PTD addr into MMU */ - /* - * 2. Enable paging and the rest of it. - */ - movl %cr0,%eax # get control word - # enable paging & NPX emulation - orl $(CR0_PE|CR0_PG|CR0_NE|CR0_TS|CR0_EM|CR0_MP),%eax - movl %eax,%cr0 # and page NOW! + /* + * 2. Enable paging and the rest of it. + */ + movl %cr0,%eax + orl $(CR0_PE|CR0_PG|CR0_NE|CR0_TS|CR0_MP|CR0_WP|CR0_AM),%eax + movl %eax,%cr0 - pushl $begin # jump to high mem + pushl $begin /* jump to high mem */ ret begin: - /* - * We have arrived. - * There's no need anymore for the identity mapping in low - * memory, remove it. - */ - movl _C_LABEL(nkptp)+1*4,%ecx - leal (PROC0_PDIR_OFF)(%esi),%ebx # old, phys address of PDIR - addl $(KERNBASE), %ebx # new, virtual address of PDIR -1: movl $0,(PDE_SIZE-4)(%ebx) # Upper bits (for PAE) + /* + * We have arrived. + * There's no need anymore for the identity mapping in low + * memory, remove it. + */ + movl _C_LABEL(nkptp)+1*4,%ecx + leal (PROC0_PDIR_OFF)(%esi),%ebx /* old, phys address of PDIR */ + addl $(KERNBASE), %ebx /* new, virt address of PDIR */ +1: movl $0,(PDE_SIZE-4)(%ebx) /* upper bits (for PAE) */ movl $0,(%ebx) - addl $PDE_SIZE,%ebx + addl $PDE_SIZE,%ebx loop 1b /* Relocate atdevbase. */ @@ -709,11 +731,11 @@ begin: movl %edx,_C_LABEL(atdevbase) /* Set up bootstrap stack. */ - leal (PROC0_STK_OFF+KERNBASE)(%esi),%eax + leal (PROC0_STK_OFF+KERNBASE)(%esi),%eax movl %eax,_C_LABEL(lwp0uarea) - leal (KSTACK_SIZE-FRAMESIZE)(%eax),%esp - movl %esi,(KSTACK_SIZE+PCB_CR3)(%eax) # pcb->pcb_cr3 - xorl %ebp,%ebp # mark end of frames + leal (USPACE-FRAMESIZE)(%eax),%esp + movl %esi,PCB_CR3(%eax) /* pcb->pcb_cr3 */ + xorl %ebp,%ebp /* mark end of frames */ #if defined(MULTIBOOT) /* It is now safe to parse the Multiboot information structure @@ -723,21 +745,21 @@ begin: call _C_LABEL(multiboot_post_reloc) #endif - subl $NGDT*8, %esp # space for temporary gdt + subl $NGDT*8, %esp /* space for temporary gdt */ pushl %esp call _C_LABEL(initgdt) addl $4,%esp - movl _C_LABEL(tablesize),%eax - addl %esi,%eax # skip past stack and page tables + movl _C_LABEL(tablesize),%eax + addl %esi,%eax /* skip past stack and page tables */ #ifdef PAE - pushl $0 # init386() expects a 64 bits paddr_t with PAE + pushl $0 /* init386() expects a 64 bits paddr_t with PAE */ #endif pushl %eax - call _C_LABEL(init386) # wire 386 chip for unix operation - addl $PDE_SIZE,%esp # pop paddr_t - addl $NGDT*8,%esp # pop temporary gdt + call _C_LABEL(init386) /* wire 386 chip for unix operation */ + addl $PDE_SIZE,%esp /* pop paddr_t */ + addl $NGDT*8,%esp /* pop temporary gdt */ #ifdef SAFARI_FIFO_HACK movb $5,%al @@ -759,16 +781,15 @@ begin: call _C_LABEL(main) #else /* XEN */ -start: /* First, reset the PSL. */ pushl $PSL_MBO popfl cld - movl %esp, %ebx # save start of available space - movl $_RELOC(tmpstk),%esp # bootstrap stack end location + movl %esp, %ebx /* save start of available space */ + movl $_RELOC(tmpstk),%esp /* bootstrap stack end location */ - /* Clear BSS first so that there are no surprises... */ + /* Clear BSS. */ xorl %eax,%eax movl $RELOC(__bss_start),%edi movl $RELOC(_end),%ecx @@ -776,7 +797,7 @@ start: rep stosb /* Copy the necessary stuff from start_info structure. */ - /* We need to copy shared_info early, so that sti/cli work */ + /* We need to copy shared_info early, so that sti/cli work */ movl $RELOC(start_info_union),%edi movl $128,%ecx rep movsl @@ -797,7 +818,7 @@ start: * done using it. */ movl $RELOC(tmpgdt), %eax - pushl %eax # start of temporary gdt + pushl %eax /* start of temporary gdt */ call _C_LABEL(initgdt) addl $4,%esp @@ -810,27 +831,29 @@ start: movl %esi, _C_LABEL(lwp0uarea) /* Set up bootstrap stack. */ - leal (KSTACK_SIZE-FRAMESIZE)(%eax),%esp - xorl %ebp,%ebp # mark end of frames + leal (USPACE-FRAMESIZE)(%eax),%esp + xorl %ebp,%ebp /* mark end of frames */ addl $USPACE, %esi - subl $KERNBASE, %esi #init386 want a physical address + subl $KERNBASE, %esi /* init386 wants a physical address */ #ifdef PAE - pushl $0 # init386() expects a 64 bits paddr_t with PAE + pushl $0 /* init386() expects a 64 bits paddr_t with PAE */ #endif pushl %esi - call _C_LABEL(init386) # wire 386 chip for unix operation - addl $PDE_SIZE,%esp # pop paddr_t + call _C_LABEL(init386) /* wire 386 chip for unix operation */ + addl $PDE_SIZE,%esp /* pop paddr_t */ call _C_LABEL(main) +#endif /* XEN */ +END(start) -#if defined(XEN) && !defined(XEN_COMPAT_030001) +#if defined(XEN) /* space for the hypercall call page */ #define HYPERCALL_PAGE_OFFSET 0x1000 .org HYPERCALL_PAGE_OFFSET ENTRY(hypercall_page) .skip 0x1000 -#endif /* defined(XEN) && !defined(XEN_COMPAT_030001) */ +END(hypercall_page) /* * void lgdt_finish(void); @@ -863,8 +886,9 @@ END(lgdt_finish) * * This is a trampoline function pushed onto the stack of a newly created * process in order to do some additional setup. The trampoline is entered by - * cpu_switch()ing to the process, so we abuse the callee-saved registers used - * by cpu_switch() to store the information about the stub to call. + * cpu_switchto()ing to the process, so we abuse the callee-saved + * registers used by cpu_switchto() to store the information about the + * stub to call. * NOTE: This function does not have a normal calling sequence! */ NENTRY(lwp_trampoline) @@ -892,13 +916,13 @@ NENTRY(sigcode) * Handler has returned here as if we called it. The sigcontext * is on the stack after the 3 args "we" pushed. */ - leal 12(%esp),%eax # get pointer to sigcontext - movl %eax,4(%esp) # put it in the argument slot - # fake return address already there + leal 12(%esp),%eax /* get pointer to sigcontext */ + movl %eax,4(%esp) /* put it in the argument slot */ + /* fake return address already there */ movl $SYS_compat_16___sigreturn14,%eax - int $0x80 # enter kernel with args on stack + int $0x80 /* enter kernel with args on stack */ movl $SYS_exit,%eax - int $0x80 # exit if sigreturn fails + int $0x80 /* exit if sigreturn fails */ .globl _C_LABEL(esigcode) _C_LABEL(esigcode): END(sigcode) @@ -910,14 +934,14 @@ END(sigcode) */ ENTRY(setjmp) movl 4(%esp),%eax - movl %ebx,(%eax) # save ebx - movl %esp,4(%eax) # save esp - movl %ebp,8(%eax) # save ebp - movl %esi,12(%eax) # save esi - movl %edi,16(%eax) # save edi - movl (%esp),%edx # get rta - movl %edx,20(%eax) # save eip - xorl %eax,%eax # return 0 + movl %ebx,(%eax) /* save ebx */ + movl %esp,4(%eax) /* save esp */ + movl %ebp,8(%eax) /* save ebp */ + movl %esi,12(%eax) /* save esi */ + movl %edi,16(%eax) /* save edi */ + movl (%esp),%edx /* get rta */ + movl %edx,20(%eax) /* save eip */ + xorl %eax,%eax /* return 0 */ ret END(setjmp) @@ -928,14 +952,14 @@ END(setjmp) */ ENTRY(longjmp) movl 4(%esp),%eax - movl (%eax),%ebx # restore ebx - movl 4(%eax),%esp # restore esp - movl 8(%eax),%ebp # restore ebp - movl 12(%eax),%esi # restore esi - movl 16(%eax),%edi # restore edi - movl 20(%eax),%edx # get rta - movl %edx,(%esp) # put in return frame - movl $1,%eax # return 1 + movl (%eax),%ebx /* restore ebx */ + movl 4(%eax),%esp /* restore esp */ + movl 8(%eax),%ebp /* restore ebp */ + movl 12(%eax),%esi /* restore esi */ + movl 16(%eax),%edi /* restore edi */ + movl 20(%eax),%edx /* get rta */ + movl %edx,(%esp) /* put in return frame */ + movl $1,%eax /* return 1 */ ret END(longjmp) @@ -945,18 +969,18 @@ END(longjmp) * Mimic cpu_switchto() for postmortem debugging. */ ENTRY(dumpsys) - pushl %ebx # set up fake switchframe - pushl %esi # and save context - pushl %edi + pushl %ebx /* set up fake switchframe */ + pushl %esi /* and save context */ + pushl %edi movl %esp,_C_LABEL(dumppcb)+PCB_ESP movl %ebp,_C_LABEL(dumppcb)+PCB_EBP - call _C_LABEL(dodumpsys) # dump! - addl $(3*4), %esp # unwind switchframe + call _C_LABEL(dodumpsys) /* dump! */ + addl $(3*4), %esp /* unwind switchframe */ ret END(dumpsys) /* - * struct lwp *cpu_switchto(struct lwp *oldlwp, struct *newlwp, + * struct lwp *cpu_switchto(struct lwp *oldlwp, struct lwp *newlwp, * bool returning) * * 1. if (oldlwp != NULL), save its context. @@ -982,9 +1006,9 @@ ENTRY(cpu_switchto) 0: #endif - movl 16(%esp),%esi # oldlwp - movl 20(%esp),%edi # newlwp - movl 24(%esp),%edx # returning + movl 16(%esp),%esi /* oldlwp */ + movl 20(%esp),%edi /* newlwp */ + movl 24(%esp),%edx /* returning */ testl %esi,%esi jz 1f @@ -1013,15 +1037,17 @@ ENTRY(cpu_switchto) pushl %edi call _C_LABEL(i386_switch_context) addl $4,%esp -#else /* XEN */ +#else /* !XEN */ /* Switch ring0 esp */ movl PCB_ESP0(%ebx),%eax movl %eax,CPUVAR(ESP0) +#endif /* !XEN */ /* Don't bother with the rest if switching to a system process. */ testl $LW_SYSTEM,L_FLAG(%edi) jnz 4f +#ifndef XEN /* Restore thread-private %fs/%gs descriptors. */ movl CPUVAR(GDT),%ecx movl PCB_FSD(%ebx), %eax @@ -1032,7 +1058,7 @@ ENTRY(cpu_switchto) movl PCB_GSD+4(%ebx), %edx movl %eax, (GUGS_SEL*8)(%ecx) movl %edx, (GUGS_SEL*8+4)(%ecx) -#endif /* XEN */ +#endif /* !XEN */ /* Switch I/O bitmap */ movl PCB_IOMAP(%ebx),%eax @@ -1052,7 +1078,11 @@ ENTRY(cpu_switchto) * is deferred until mi_switch(), when cpu_switchto() returns. */ 2: -#ifndef XEN +#ifdef XEN + pushl %edi + call _C_LABEL(i386_tls_switch) + addl $4,%esp +#else /* !XEN */ movl $IPL_HIGH,CPUVAR(ILEVEL) movl PCB_CR0(%ebx),%ecx /* has CR0_TS clear */ movl %cr0,%edx @@ -1069,7 +1099,7 @@ ENTRY(cpu_switchto) 3: cmpl %edx,%ecx je 4f movl %ecx,%cr0 -#endif /* XEN */ +#endif /* !XEN */ /* Return to the new LWP, returning 'oldlwp' in %eax. */ 4: movl %esi,%eax @@ -1113,7 +1143,7 @@ END(cpu_switchto) * Update pcb, saving current processor state. */ ENTRY(savectx) - movl 4(%esp),%edx # edx = pcb + movl 4(%esp),%edx /* edx = pcb */ movl %esp,PCB_ESP(%edx) movl %ebp,PCB_EBP(%edx) ret @@ -1127,12 +1157,12 @@ END(savectx) IDTVEC(osyscall) #ifndef XEN /* XXX we are in trouble! interrupts be off here. */ - cli # must be first instruction + cli /* must be first instruction */ #endif - pushfl # set eflags in trap frame + pushfl /* set eflags in trap frame */ popl 8(%esp) - orl $PSL_I,(%esp) # re-enable ints on return to user - pushl $7 # size of instruction for restart + orl $PSL_I,8(%esp) /* re-enable ints on return to user */ + pushl $7 /* size of instruction for restart */ jmp syscall1 IDTVEC_END(osyscall) @@ -1142,9 +1172,9 @@ IDTVEC_END(osyscall) * Trap gate entry for syscall */ IDTVEC(syscall) - pushl $2 # size of instruction for restart + pushl $2 /* size of instruction for restart */ syscall1: - pushl $T_ASTFLT # trap # for doing ASTs + pushl $T_ASTFLT /* trap # for doing ASTs */ INTRENTRY STI(%eax) #ifdef DIAGNOSTIC @@ -1159,13 +1189,13 @@ syscall1: #endif 1: #endif /* DIAGNOSTIC */ - addl $1,CPUVAR(NSYSCALL) # count it atomically - adcl $0,CPUVAR(NSYSCALL)+4 # count it atomically + addl $1,CPUVAR(NSYSCALL) /* count it atomically */ + adcl $0,CPUVAR(NSYSCALL)+4 /* count it atomically */ movl CPUVAR(CURLWP),%edi movl L_PROC(%edi),%edx - movl %esp,L_MD_REGS(%edi) # save pointer to frame + movl %esp,L_MD_REGS(%edi) /* save pointer to frame */ pushl %esp - call *P_MD_SYSCALL(%edx) # get pointer to syscall() function + call *P_MD_SYSCALL(%edx) /* get pointer to syscall() function */ addl $4,%esp .Lsyscall_checkast: /* Check for ASTs on exit to user mode. */ @@ -1182,17 +1212,17 @@ syscall1: /* process pending interrupts */ CLI(%eax) movl CPUVAR(ILEVEL), %ebx - movl $.Lsyscall_resume, %esi # address to resume loop at + movl $.Lsyscall_resume, %esi /* address to resume loop at */ .Lsyscall_resume: - movl %ebx,%eax # get cpl + movl %ebx,%eax /* get cpl */ movl CPUVAR(IUNMASK)(,%eax,4),%eax - andl CPUVAR(IPENDING),%eax # any non-masked bits left? + andl CPUVAR(IPENDING),%eax /* any non-masked bits left? */ jz 17f bsrl %eax,%eax btrl %eax,CPUVAR(IPENDING) movl CPUVAR(ISOURCES)(,%eax,4),%eax jmp *IS_RESUME(%eax) -17: movl %ebx, CPUVAR(ILEVEL) #restore cpl +17: movl %ebx, CPUVAR(ILEVEL) /* restore cpl */ jmp .Lsyscall_checkast 14: #endif /* XEN */ @@ -1231,8 +1261,8 @@ syscall1: IDTVEC_END(syscall) IDTVEC(svr4_fasttrap) - pushl $2 # size of instruction for restart - pushl $T_ASTFLT # trap # for doing ASTs + pushl $2 /* size of instruction for restart */ + pushl $T_ASTFLT /* trap # for doing ASTs */ INTRENTRY STI(%eax) pushl $RW_READER @@ -1245,7 +1275,7 @@ IDTVEC(svr4_fasttrap) addl $4,%esp 2: /* Check for ASTs on exit to user mode. */ cli - CHECK_ASTPENDING(%eax) + CHECK_ASTPENDING(%eax) je 1f /* Always returning to user mode here. */ CLEAR_ASTPENDING(%eax) @@ -1263,55 +1293,23 @@ IDTVEC(svr4_fasttrap) cli jmp 2b -#if NNPX > 0 -/* - * Special interrupt handlers. Someday intr0-intr15 will be used to count - * interrupts. We'll still need a special exception 16 handler. The busy - * latch stuff in probintr() can be moved to npxprobe(). - */ - -/* - * void probeintr(void) - */ -NENTRY(probeintr) - ss - incl _C_LABEL(npx_intrs_while_probing) - pushl %eax - movb $0x20,%al # EOI (asm in strings loses cpp features) - outb %al,$0xa0 # IO_ICU2 - outb %al,$0x20 # IO_ICU1 - movb $0,%al - outb %al,$0xf0 # clear BUSY# latch - popl %eax - iret -END(probeintr) - -/* - * void probetrap(void) - */ -NENTRY(probetrap) - ss - incl _C_LABEL(npx_traps_while_probing) - fnclex - iret -END(probetrap) - /* * int npx586bug1(int a, int b) + * Used when checking for the FDIV bug on first generations pentiums. + * Anything 120MHz or above is fine. */ NENTRY(npx586bug1) - fildl 4(%esp) # x - fildl 8(%esp) # y + fildl 4(%esp) /* x */ + fildl 8(%esp) /* y */ fld %st(1) - fdiv %st(1),%st # x/y - fmulp %st,%st(1) # (x/y)*y - fsubrp %st,%st(1) # x-(x/y)*y + fdiv %st(1),%st /* x/y */ + fmulp %st,%st(1) /* (x/y)*y */ + fsubrp %st,%st(1) /* x-(x/y)*y */ pushl $0 fistpl (%esp) popl %eax ret END(npx586bug1) -#endif /* NNPX > 0 */ /* * void sse2_idlezero_page(void *pg)