[BACK]Return to locore.S CVS log [TXT][DIR] Up to [cvs.NetBSD.org] / src / sys / arch / i386 / i386

Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files.

Diff for /src/sys/arch/i386/i386/locore.S between version 1.58 and 1.58.6.8

version 1.58, 2007/12/03 19:06:36 version 1.58.6.8, 2008/01/23 19:27:17
Line 5 
Line 5 
  */   */
   
 /*  /*
    * Copyright (c) 2006 Manuel Bouyer.
    *
    * Redistribution and use in source and binary forms, with or without
    * modification, are permitted provided that the following conditions
    * are met:
    * 1. Redistributions of source code must retain the above copyright
    *    notice, this list of conditions and the following disclaimer.
    * 2. Redistributions in binary form must reproduce the above copyright
    *    notice, this list of conditions and the following disclaimer in the
    *    documentation and/or other materials provided with the distribution.
    * 3. All advertising materials mentioning features or use of this software
    *    must display the following acknowledgement:
    *      This product includes software developed by Manuel Bouyer.
    * 4. The name of the author may not be used to endorse or promote products
    *    derived from this software without specific prior written permission.
    *
    * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
    * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
    * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
    * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
    * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
    * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
    * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
    * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
    * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
    * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
    *
    */
   
   /*
  * Copyright (c) 2001 Wasabi Systems, Inc.   * Copyright (c) 2001 Wasabi Systems, Inc.
  * All rights reserved.   * All rights reserved.
  *   *
Line 110 
Line 140 
  *      @(#)locore.s    7.3 (Berkeley) 5/13/91   *      @(#)locore.s    7.3 (Berkeley) 5/13/91
  */   */
   
   #include <machine/asm.h>
   __KERNEL_RCSID(0, "$NetBSD$");
   
 #include "opt_compat_oldboot.h"  #include "opt_compat_oldboot.h"
 #include "opt_ddb.h"  #include "opt_ddb.h"
 #include "opt_realmem.h"  #include "opt_realmem.h"
 #include "opt_vm86.h"  #include "opt_vm86.h"
   #include "opt_xen.h"
   
 #include "npx.h"  #include "npx.h"
 #include "assym.h"  #include "assym.h"
Line 129 
Line 163 
 #include <machine/specialreg.h>  #include <machine/specialreg.h>
 #include <machine/trap.h>  #include <machine/trap.h>
 #include <machine/i82489reg.h>  #include <machine/i82489reg.h>
 #include <machine/multiboot.h>  
 #include <machine/asm.h>  
 #include <machine/frameasm.h>  #include <machine/frameasm.h>
 #include <machine/i82489reg.h>  #include <machine/i82489reg.h>
   #ifndef XEN
   #include <machine/multiboot.h>
   #endif
   
 /* XXX temporary kluge; these should not be here */  /* XXX temporary kluge; these should not be here */
 /* Get definitions for IOM_BEGIN, IOM_END, and IOM_SIZE */  /* Get definitions for IOM_BEGIN, IOM_END, and IOM_SIZE */
 #include <dev/isa/isareg.h>  #include <dev/isa/isareg.h>
   
   #ifdef XEN
   /*
    * Xen guest identifier and loader selection
    */
   .section __xen_guest
   #ifdef XEN3
           .ascii  "GUEST_OS=netbsd,GUEST_VER=3.0,XEN_VER=xen-3.0"
   #if defined(DOM0OPS) || !defined(XEN_COMPAT_030001)
           .ascii  ",VIRT_BASE=0xc0000000" /* KERNBASE */
           .ascii  ",ELF_PADDR_OFFSET=0xc0000000" /* KERNBASE */
   #else
           .ascii  ",VIRT_BASE=0xc0100000" /* KERNTEXTOFF */
           .ascii  ",ELF_PADDR_OFFSET=0xc0100000" /* KERNTEXTOFF */
   #endif
           .ascii  ",VIRT_ENTRY=0xc0100000" /* KERNTEXTOFF */
   #if !defined(XEN_COMPAT_030001)
           .ascii  ",HYPERCALL_PAGE=0x00000101"
                   /* (???+HYPERCALL_PAGE_OFFSET)/PAGE_SIZE) */
   #endif
   #ifdef PAE
           .ascii  ",PAE=yes[extended-cr3]"
   #endif
   #else /* !XEN3 */
           .ascii  "GUEST_OS=netbsd,GUEST_VER=2.0,XEN_VER=2.0"
   #endif /* XEN3 */
           .ascii  ",LOADER=generic"
   #if (NKSYMS || defined(DDB) || defined(LKM)) && !defined(SYMTAB_SPACE)
           .ascii  ",BSD_SYMTAB=yes"
   #endif
           .byte   0
   #endif
   
 /*  /*
  * Initialization   * Initialization
  */   */
Line 183  _C_LABEL(tablesize): .long 0
Line 250  _C_LABEL(tablesize): .long 0
         .space 512          .space 512
 tmpstk:  tmpstk:
   
   #ifndef XEN
 #define _RELOC(x)       ((x) - KERNBASE)  #define _RELOC(x)       ((x) - KERNBASE)
   #else
   #define _RELOC(x)       ((x))
   #endif /* XEN */
 #define RELOC(x)        _RELOC(_C_LABEL(x))  #define RELOC(x)        _RELOC(_C_LABEL(x))
   
         .text          .text
Line 192  tmpstk:
Line 262  tmpstk:
         .set    _C_LABEL(kernel_text),KERNTEXTOFF          .set    _C_LABEL(kernel_text),KERNTEXTOFF
   
         .globl  start          .globl  start
   #ifndef XEN
 start:  movw    $0x1234,0x472                   # warm boot  start:  movw    $0x1234,0x472                   # warm boot
   
 #if defined(MULTIBOOT)  #if defined(MULTIBOOT)
Line 643  begin:
Line 714  begin:
 #endif /* SAFARI_FIFO_HACK */  #endif /* SAFARI_FIFO_HACK */
   
         call    _C_LABEL(main)          call    _C_LABEL(main)
   #else /* XEN */
   start:
           /* First, reset the PSL. */
           pushl   $PSL_MBO
           popfl
   
           cld
   #ifdef XEN3
           movl    %esp, %ebx              # save start of available space
   #else
           movl    %esi,%ebx               # save start_info pointer
   #endif
           movl    $_RELOC(tmpstk),%esp    # bootstrap stack end location
   
           /* Clear BSS first so that there are no surprises... */
           xorl    %eax,%eax
           movl    $RELOC(__bss_start),%edi
           movl    $RELOC(_end),%ecx
           subl    %edi,%ecx
           rep stosb
   
           /* Copy the necessary stuff from start_info structure. */
           /* We need to copy shared_info early, so that sti/cli work */
           movl    $RELOC(start_info_union),%edi
           movl    $128,%ecx
           rep movsl
   
           /* Clear segment registers; always null in proc0. */
           xorl    %eax,%eax
           movw    %ax,%fs
           movw    %ax,%gs
           decl    %eax
           movl    %eax,RELOC(cpu_info_primary)+CPU_INFO_LEVEL
   
           xorl    %eax,%eax
           cpuid
           movl    %eax,RELOC(cpu_info_primary)+CPU_INFO_LEVEL
   
           call    xen_pmap_bootstrap
           /*
            * First avail returned by xen_pmap_bootstrap in %eax
            */
           movl    %eax, %esi;
           movl    %esi, _C_LABEL(proc0uarea)
   
   #define PROC0PDIR       ((0)              * PAGE_SIZE)
   #define PROC0STACK      ((1)              * PAGE_SIZE)
   
           /* Set up bootstrap stack. */
           leal    (KSTACK_SIZE-FRAMESIZE)(%eax),%esp
           xorl    %ebp,%ebp               # mark end of frames
   
           addl    $USPACE, %esi
           subl    $KERNBASE, %esi         #init386 want a physical address
           pushl   %esi
           call    _C_LABEL(init386)       # wire 386 chip for unix operation
           addl    $4,%esp
           call    _C_LABEL(main)
   
   #if defined(XEN3) && !defined(XEN_COMPAT_030001)
   /* space for the hypercall call page */
   #define HYPERCALL_PAGE_OFFSET 0x1000
   .org HYPERCALL_PAGE_OFFSET
   ENTRY(hypercall_page)
   .skip 0x1000
   #endif /* defined(XEN3) && !defined(XEN_COMPAT_030001) */
   
   /*
    * void lgdt_finish(void);
    * Finish load a new GDT pointer (do any necessary cleanup).
    * XXX It's somewhat questionable whether reloading all the segment registers
    * is necessary, since the actual descriptor data is not changed except by
    * process creation and exit, both of which clean up via task switches.  OTOH,
    * this only happens at run time when the GDT is resized.
    */
   /* LINTSTUB: Func: void lgdt_finish(void) */
   NENTRY(lgdt_finish)
           movl    $GSEL(GDATA_SEL, SEL_KPL),%eax
           movw    %ax,%ds
           movw    %ax,%es
           movw    %ax,%gs
           movw    %ax,%ss
           movl    $GSEL(GCPU_SEL, SEL_KPL),%eax
           movw    %ax,%fs
           /* Reload code selector by doing intersegment return. */
           popl    %eax
           pushl   $GSEL(GCODE_SEL, SEL_KPL)
           pushl   %eax
           lret
   
   #endif /* XEN */
   
 /*  /*
  * void lwp_trampoline(void);   * void lwp_trampoline(void);
Line 722  ENTRY(longjmp)
Line 884  ENTRY(longjmp)
         ret          ret
   
 /*  /*
    * void dumpsys(void)
    *
    * Mimic cpu_switchto() for postmortem debugging.
    */
   ENTRY(dumpsys)
           pushl   %ebx                    # set up fake switchframe
           pushl   %esi                    #   and save context
           pushl   %edi
           movl    %esp,_C_LABEL(dumppcb)+PCB_ESP
           movl    %ebp,_C_LABEL(dumppcb)+PCB_EBP
           call    _C_LABEL(dodumpsys)     # dump!
           addl    $(3*4), %esp            # unwind switchframe
           ret
   
   /*
  * struct lwp *cpu_switchto(struct lwp *oldlwp, struct newlwp,   * struct lwp *cpu_switchto(struct lwp *oldlwp, struct newlwp,
  *                          bool returning)   *                          bool returning)
  *   *
Line 753  ENTRY(cpu_switchto)
Line 930  ENTRY(cpu_switchto)
         movl    PCB_EBP(%ebx),%ebp          movl    PCB_EBP(%ebx),%ebp
         movl    PCB_ESP(%ebx),%esp          movl    PCB_ESP(%ebx),%esp
   
         /* Set curlwp. */          /*
         movl    %edi,CPUVAR(CURLWP)           * Set curlwp.  This must be globally visible in order to permit
            * non-interlocked mutex release.
            */
           movl    %edi,%ecx
           xchgl   %ecx,CPUVAR(CURLWP)
   
         /* Skip the rest if returning to a pinned LWP. */          /* Skip the rest if returning to a pinned LWP. */
         testl   %edx,%edx          testl   %edx,%edx
         jnz     4f          jnz     4f
   
         /* Switch TSS.  Reset "task busy" flag before loading. */  #ifdef XEN
         movl    %cr3,%eax          pushl   %edi
         movl    %eax,PCB_CR3(%ebx)      # for TSS gates          call    _C_LABEL(i386_switch_context)
         movl    CPUVAR(GDT),%ecx          addl    $4,%esp
         movl    L_MD_TSS_SEL(%edi),%edx  #else /* XEN */
         andl    $~0x0200,4(%ecx,%edx, 1)          /* Switch ring0 esp */
         ltr     %dx          movl    PCB_ESP0(%ebx),%eax
           movl    %eax,CPUVAR(ESP0)
   
         /* Don't bother with the rest if switching to a system process. */          /* Don't bother with the rest if switching to a system process. */
         testl   $LW_SYSTEM,L_FLAG(%edi)          testl   $LW_SYSTEM,L_FLAG(%edi)
         jnz     4f          jnz     4f
   
         /* Restore thread-private %fs/%gs descriptors. */          /* Restore thread-private %fs/%gs descriptors. */
           movl    CPUVAR(GDT),%ecx
         movl    PCB_FSD(%ebx), %eax          movl    PCB_FSD(%ebx), %eax
         movl    PCB_FSD+4(%ebx), %edx          movl    PCB_FSD+4(%ebx), %edx
         movl    %eax, (GUFS_SEL*8)(%ecx)          movl    %eax, (GUFS_SEL*8)(%ecx)
Line 781  ENTRY(cpu_switchto)
Line 964  ENTRY(cpu_switchto)
         movl    PCB_GSD+4(%ebx), %edx          movl    PCB_GSD+4(%ebx), %edx
         movl    %eax, (GUGS_SEL*8)(%ecx)          movl    %eax, (GUGS_SEL*8)(%ecx)
         movl    %edx, (GUGS_SEL*8+4)(%ecx)          movl    %edx, (GUGS_SEL*8+4)(%ecx)
   #endif /* XEN */
   
           /* Switch I/O bitmap */
           movl    PCB_IOMAP(%ebx),%eax
           orl     %eax,%eax
           jnz,pn  .Lcopy_iobitmap
           movl    $(IOMAP_INVALOFF << 16),CPUVAR(IOBASE)
   .Liobitmap_done:
   
         /* Is this process using RAS (restartable atomic sequences)? */          /* Is this process using RAS (restartable atomic sequences)? */
         movl    L_PROC(%edi),%eax          movl    L_PROC(%edi),%eax
Line 792  ENTRY(cpu_switchto)
Line 983  ENTRY(cpu_switchto)
          * FPU IPIs can alter the LWP's saved cr0.  Dropping the priority           * FPU IPIs can alter the LWP's saved cr0.  Dropping the priority
          * is deferred until mi_switch(), when cpu_switchto() returns.           * is deferred until mi_switch(), when cpu_switchto() returns.
          */           */
 2:      movl    $IPL_IPI,CPUVAR(ILEVEL)  2:
   #ifndef XEN
           movl    $IPL_IPI,CPUVAR(ILEVEL)
         movl    PCB_CR0(%ebx),%ecx          movl    PCB_CR0(%ebx),%ecx
         movl    %cr0,%edx          movl    %cr0,%edx
   
Line 809  ENTRY(cpu_switchto)
Line 1002  ENTRY(cpu_switchto)
 3:      cmpl    %edx,%ecx  3:      cmpl    %edx,%ecx
         je      4f          je      4f
         movl    %ecx,%cr0          movl    %ecx,%cr0
   #endif /* XEN */
   
         /* Return to the new LWP, returning 'oldlwp' in %eax. */          /* Return to the new LWP, returning 'oldlwp' in %eax. */
 4:      movl    %esi,%eax  4:      movl    %esi,%eax
Line 829  ENTRY(cpu_switchto)
Line 1023  ENTRY(cpu_switchto)
         movl    %eax,TF_EIP(%ecx)          movl    %eax,TF_EIP(%ecx)
         jmp     2b          jmp     2b
   
   .Lcopy_iobitmap:
           /* Copy I/O bitmap. */
           movl    $(IOMAPSIZE/4),%ecx
           pushl   %esi
           pushl   %edi
           movl    %eax,%esi               /* pcb_iomap */
           movl    CPUVAR(SELF),%edi
           leal    CPU_INFO_IOMAP(%edi),%edi
           rep
           movsl
           popl    %edi
           popl    %esi
           movl    $((CPU_INFO_IOMAP - CPU_INFO_TSS) << 16),CPUVAR(IOBASE)
           jmp     .Liobitmap_done
   
 /*  /*
  * void savectx(struct pcb *pcb);   * void savectx(struct pcb *pcb);
  *   *
Line 888  syscall1:
Line 1097  syscall1:
         addl    $4,%esp          addl    $4,%esp
 .Lsyscall_checkast:  .Lsyscall_checkast:
         /* Check for ASTs on exit to user mode. */          /* Check for ASTs on exit to user mode. */
         cli          CLI(%eax)
         CHECK_ASTPENDING(%eax)          CHECK_ASTPENDING(%eax)
         je      1f          je      1f
         /* Always returning to user mode here. */          /* Always returning to user mode here. */
         CLEAR_ASTPENDING(%eax)          CLEAR_ASTPENDING(%eax)
         sti          STI(%eax)
         /* Pushed T_ASTFLT into tf_trapno on entry. */          /* Pushed T_ASTFLT into tf_trapno on entry. */
         pushl   %esp          pushl   %esp
         call    _C_LABEL(trap)          call    _C_LABEL(trap)
Line 901  syscall1:
Line 1110  syscall1:
         jmp     .Lsyscall_checkast      /* re-check ASTs */          jmp     .Lsyscall_checkast      /* re-check ASTs */
 1:      CHECK_DEFERRED_SWITCH  1:      CHECK_DEFERRED_SWITCH
         jnz     9f          jnz     9f
   #ifdef XEN
           STIC(%eax)
           jz      14f
           call    _C_LABEL(stipending)
           testl   %eax,%eax
           jz      14f
           /* process pending interrupts */
           CLI(%eax)
           movl    CPUVAR(ILEVEL), %ebx
           movl    $.Lsyscall_resume, %esi # address to resume loop at
   .Lsyscall_resume:
           movl    %ebx,%eax       # get cpl
           movl    CPUVAR(IUNMASK)(,%eax,4),%eax
           andl    CPUVAR(IPENDING),%eax   # any non-masked bits left?
           jz      17f
           bsrl    %eax,%eax
           btrl    %eax,CPUVAR(IPENDING)
           movl    CPUVAR(ISOURCES)(,%eax,4),%eax
           jmp     *IS_RESUME(%eax)
   17:     movl    %ebx, CPUVAR(ILEVEL)    #restore cpl
           jmp     .Lsyscall_checkast
   14:
   #endif /* XEN */
 #ifndef DIAGNOSTIC  #ifndef DIAGNOSTIC
         INTRFASTEXIT          INTRFASTEXIT
 #else /* DIAGNOSTIC */  #else /* DIAGNOSTIC */
         cmpl    $IPL_NONE,CPUVAR(ILEVEL)          cmpl    $IPL_NONE,CPUVAR(ILEVEL)
         jne     3f          jne     3f
         INTRFASTEXIT          INTRFASTEXIT
 3:      sti  3:      STI(%eax)
         pushl   $4f          pushl   $4f
         call    _C_LABEL(printf)          call    _C_LABEL(printf)
         addl    $4,%esp          addl    $4,%esp
Line 919  syscall1:
Line 1151  syscall1:
 5:      .asciz  "WARNING: SPL NOT ZERO ON SYSCALL ENTRY\n"  5:      .asciz  "WARNING: SPL NOT ZERO ON SYSCALL ENTRY\n"
 6:      .asciz  "WARNING: WANT PMAPLOAD ON SYSCALL ENTRY\n"  6:      .asciz  "WARNING: WANT PMAPLOAD ON SYSCALL ENTRY\n"
 #endif /* DIAGNOSTIC */  #endif /* DIAGNOSTIC */
 9:      sti  9:      STI(%eax)
         call    _C_LABEL(pmap_load)          call    _C_LABEL(pmap_load)
         jmp     .Lsyscall_checkast      /* re-check ASTs */          jmp     .Lsyscall_checkast      /* re-check ASTs */
   

Legend:
Removed from v.1.58  
changed lines
  Added in v.1.58.6.8

CVSweb <webmaster@jp.NetBSD.org>