[BACK]Return to locore.S CVS log [TXT][DIR] Up to [cvs.NetBSD.org] / src / sys / arch / i386 / i386

Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files.

Diff for /src/sys/arch/i386/i386/locore.S between version 1.15 and 1.23

version 1.15, 2003/08/20 21:48:37 version 1.23, 2004/02/16 17:11:27
Line 70 
Line 70 
  *      @(#)locore.s    7.3 (Berkeley) 5/13/91   *      @(#)locore.s    7.3 (Berkeley) 5/13/91
  */   */
   
   #include "opt_compat_netbsd.h"
   #include "opt_compat_oldboot.h"
 #include "opt_cputype.h"  #include "opt_cputype.h"
 #include "opt_ddb.h"  #include "opt_ddb.h"
 #include "opt_ipkdb.h"  #include "opt_ipkdb.h"
 #include "opt_vm86.h"  
 #include "opt_user_ldt.h"  
 #include "opt_dummy_nops.h"  
 #include "opt_compat_oldboot.h"  
 #include "opt_multiprocessor.h"  
 #include "opt_lockdebug.h"  #include "opt_lockdebug.h"
   #include "opt_multiprocessor.h"
 #include "opt_realmem.h"  #include "opt_realmem.h"
   #include "opt_user_ldt.h"
   #include "opt_vm86.h"
   
 #include "npx.h"  #include "npx.h"
 #include "assym.h"  #include "assym.h"
Line 705  NENTRY(proc_trampoline)
Line 705  NENTRY(proc_trampoline)
         /* NOTREACHED */          /* NOTREACHED */
   
 /*****************************************************************************/  /*****************************************************************************/
   #ifdef COMPAT_16
 /*  /*
  * Signal trampoline; copied to top of user stack.   * Signal trampoline; copied to top of user stack.
  */   */
Line 718  NENTRY(sigcode)
Line 718  NENTRY(sigcode)
         leal    12(%esp),%eax           # get pointer to sigcontext          leal    12(%esp),%eax           # get pointer to sigcontext
         movl    %eax,4(%esp)            # put it in the argument slot          movl    %eax,4(%esp)            # put it in the argument slot
                                         # fake return address already there                                          # fake return address already there
   #if defined(SYS_compat_16___sigreturn14)
           movl    $SYS_compat_16___sigreturn14,%eax
   #elif defined(SYS___sigreturn14)
         movl    $SYS___sigreturn14,%eax          movl    $SYS___sigreturn14,%eax
   #else
           #error "no sigreturn14 syscall"
   #endif
         int     $0x80                   # enter kernel with args on stack          int     $0x80                   # enter kernel with args on stack
         movl    $SYS_exit,%eax          movl    $SYS_exit,%eax
         int     $0x80                   # exit if sigreturn fails          int     $0x80                   # exit if sigreturn fails
         .globl  _C_LABEL(esigcode)          .globl  _C_LABEL(esigcode)
 _C_LABEL(esigcode):  _C_LABEL(esigcode):
   #endif
   
 /*****************************************************************************/  /*****************************************************************************/
   
Line 1921  switch_restored:
Line 1928  switch_restored:
         movl    PCB_CR0(%esi),%ecx          movl    PCB_CR0(%esi),%ecx
 #ifdef MULTIPROCESSOR  #ifdef MULTIPROCESSOR
         /*          /*
          * If our floating point registers are on a different cpu,           * If our floating point registers are on a different CPU,
          * clear CR0_TS so we'll trap rather than reuse bogus state.           * clear CR0_TS so we'll trap rather than reuse bogus state.
          */           */
         movl    PCB_FPCPU(%esi),%ebx          movl    PCB_FPCPU(%esi),%ebx
Line 1943  switch_restored:
Line 1950  switch_restored:
  */   */
         movl    CPUVAR(CURLWP),%edi          movl    CPUVAR(CURLWP),%edi
         movl    L_PROC(%edi),%esi          movl    L_PROC(%edi),%esi
         cmpl    $0,P_NRAS(%esi)          cmpl    $0,P_RASLIST(%esi)
         je      1f          jne     2f
         movl    L_MD_REGS(%edi),%ebx  
         movl    TF_EIP(%ebx),%eax  
         pushl   %eax  
         pushl   %esi  
         call    _C_LABEL(ras_lookup)  
         addl    $8,%esp  
         cmpl    $-1,%eax  
         je      1f  
         movl    %eax,TF_EIP(%ebx)  
 1:  1:
         movl    $1,%ebx          movl    $1,%ebx
   
Line 1973  switch_return:
Line 1971  switch_return:
         popl    %ebx          popl    %ebx
         ret          ret
   
   2:                                      # check RAS list
           movl    L_MD_REGS(%edi),%ebx
           movl    TF_EIP(%ebx),%eax
           pushl   %eax
           pushl   %esi
           call    _C_LABEL(ras_lookup)
           addl    $8,%esp
           cmpl    $-1,%eax
           je      1b
           movl    %eax,TF_EIP(%ebx)
           jmp     1b
   
 /*  /*
  * void cpu_switchto(struct lwp *current, struct lwp *next)   * void cpu_switchto(struct lwp *current, struct lwp *next)
  * Switch to the specified next LWP.   * Switch to the specified next LWP.
Line 2017  ENTRY(cpu_switchto)
Line 2027  ENTRY(cpu_switchto)
         jmp     switch_resume          jmp     switch_resume
   
 /*  /*
  * void switch_exit(struct lwp *l, void (*exit)(struct lwp *));   * void cpu_exit(struct lwp *l)
  * Switch to the appropriate idle context (lwp0's if uniprocessor; the cpu's   * Switch to the appropriate idle context (lwp0's if uniprocessor; the CPU's
  * if multiprocessor) and deallocate the address space and kernel stack for p.   * if multiprocessor) and deallocate the address space and kernel stack for p.
  * Then jump into cpu_switch(), as if we were in the idle proc all along.   * Then jump into cpu_switch(), as if we were in the idle proc all along.
  */   */
Line 2027  ENTRY(cpu_switchto)
Line 2037  ENTRY(cpu_switchto)
 #endif  #endif
         .globl  _C_LABEL(uvmspace_free),_C_LABEL(kernel_map)          .globl  _C_LABEL(uvmspace_free),_C_LABEL(kernel_map)
         .globl  _C_LABEL(uvm_km_free),_C_LABEL(tss_free)          .globl  _C_LABEL(uvm_km_free),_C_LABEL(tss_free)
 /* LINTSTUB: Func: void switch_exit(struct lwp *l, void (*exit)(struct lwp *)) */  /* LINTSTUB: Func: void cpu_exit(struct lwp *l) */
 ENTRY(switch_exit)  ENTRY(cpu_exit)
         movl    4(%esp),%edi            # old process          movl    4(%esp),%edi            # old process
         movl    8(%esp),%eax            # exit func  
 #ifndef MULTIPROCESSOR  #ifndef MULTIPROCESSOR
         movl    $_C_LABEL(lwp0),%ebx          movl    $_C_LABEL(lwp0),%ebx
         movl    L_ADDR(%ebx),%esi          movl    L_ADDR(%ebx),%esi
Line 2049  ENTRY(switch_exit)
Line 2058  ENTRY(switch_exit)
         movl    PCB_ESP(%esi),%esp          movl    PCB_ESP(%esi),%esp
         movl    PCB_EBP(%esi),%ebp          movl    PCB_EBP(%esi),%ebp
   
         /* Save exit func. */  
         pushl   %eax  
   
         /* Load TSS info. */          /* Load TSS info. */
 #ifdef MULTIPROCESSOR  #ifdef MULTIPROCESSOR
         movl    CPUVAR(GDT),%eax          movl    CPUVAR(GDT),%eax
Line 2081  ENTRY(switch_exit)
Line 2087  ENTRY(switch_exit)
         sti          sti
   
         /*          /*
          * Schedule the dead process's vmspace and stack to be freed.           * Schedule the dead LWP's stack to be freed.
          */           */
         movl    0(%esp),%eax            /* %eax = exit func */          pushl   %edi
         movl    %edi,0(%esp)            /* {lwp_}exit2(l) */          call    _C_LABEL(lwp_exit2)
         call    *%eax  
         addl    $4,%esp          addl    $4,%esp
   
         /* Jump into cpu_switch() with the right state. */          /* Jump into cpu_switch() with the right state. */

Legend:
Removed from v.1.15  
changed lines
  Added in v.1.23

CVSweb <webmaster@jp.NetBSD.org>