[BACK]Return to locore.S CVS log [TXT][DIR] Up to [cvs.NetBSD.org] / src / sys / arch / i386 / i386

Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files.

Diff for /src/sys/arch/i386/i386/locore.S between version 1.1.2.8 and 1.2

version 1.1.2.8, 2003/01/03 23:02:59 version 1.2, 2002/12/11 12:02:07
Line 114 
Line 114 
   
 #if defined(MULTIPROCESSOR)  #if defined(MULTIPROCESSOR)
   
 #define SET_CURLWP(lwp,cpu)                             \  #define SET_CURPROC(proc,cpu)                           \
         movl    CPUVAR(SELF),cpu                ;       \          movl    CPUVAR(SELF),cpu                ;       \
         movl    lwp,CPUVAR(CURLWP)      ;       \          movl    proc,CPUVAR(CURPROC)    ;       \
         movl    cpu,L_CPU(lwp)          movl    cpu,P_CPU(proc)
   
 #else  #else
   
 #define SET_CURLWP(lwp,tcpu)            movl    lwp,CPUVAR(CURLWP)  #define SET_CURPROC(proc,tcpu)          movl    proc,CPUVAR(CURPROC)
 #define GET_CURLWP(reg)                 movl    CPUVAR(CURLWP),reg  #define GET_CURPROC(reg)                movl    CPUVAR(CURPROC),reg
   
 #endif  #endif
   
Line 1346  ENTRY(fuswintr)
Line 1346  ENTRY(fuswintr)
         movl    4(%esp),%edx          movl    4(%esp),%edx
         cmpl    $VM_MAXUSER_ADDRESS-2,%edx          cmpl    $VM_MAXUSER_ADDRESS-2,%edx
         ja      _C_LABEL(fusuaddrfault)          ja      _C_LABEL(fusuaddrfault)
         movl    CPUVAR(CURLWP),%ecx          movl    CPUVAR(CURPROC),%ecx
         movl    L_ADDR(%ecx),%ecx          movl    P_ADDR(%ecx),%ecx
         movl    $_C_LABEL(fusubail),PCB_ONFAULT(%ecx)          movl    $_C_LABEL(fusubail),PCB_ONFAULT(%ecx)
         movzwl  (%edx),%eax          movzwl  (%edx),%eax
         movl    $0,PCB_ONFAULT(%ecx)          movl    $0,PCB_ONFAULT(%ecx)
Line 1502  ENTRY(suswintr)
Line 1502  ENTRY(suswintr)
         movl    4(%esp),%edx          movl    4(%esp),%edx
         cmpl    $VM_MAXUSER_ADDRESS-2,%edx          cmpl    $VM_MAXUSER_ADDRESS-2,%edx
         ja      _C_LABEL(fusuaddrfault)          ja      _C_LABEL(fusuaddrfault)
         movl    CPUVAR(CURLWP),%ecx          movl    CPUVAR(CURPROC),%ecx
         movl    L_ADDR(%ecx),%ecx          movl    P_ADDR(%ecx),%ecx
         movl    $_C_LABEL(fusubail),PCB_ONFAULT(%ecx)          movl    $_C_LABEL(fusubail),PCB_ONFAULT(%ecx)
   
 #if defined(I386_CPU)  #if defined(I386_CPU)
Line 1647  ENTRY(longjmp)
Line 1647  ENTRY(longjmp)
   
 /*****************************************************************************/  /*****************************************************************************/
   
   /*
    * The following primitives manipulate the run queues.
    * _whichqs tells which of the 32 queues _qs
    * have processes in them.  Setrq puts processes into queues, Remrq
    * removes them from queues.  The running process is on no queue,
    * other processes are on a queue related to p->p_pri, divided by 4
    * actually to shrink the 0-127 range of priorities into the 32 available
    * queues.
    */
         .globl  _C_LABEL(sched_whichqs),_C_LABEL(sched_qs)          .globl  _C_LABEL(sched_whichqs),_C_LABEL(sched_qs)
         .globl  _C_LABEL(uvmexp),_C_LABEL(panic)          .globl  _C_LABEL(uvmexp),_C_LABEL(panic)
   
   /*
    * void setrunqueue(struct proc *p);
    * Insert a process on the appropriate queue.  Should be called at splclock().
    * See setrunqueue(9) for more details.
    */
   /* LINTSTUB: Func: void setrunqueue(struct proc *p) */
   NENTRY(setrunqueue)
           movl    4(%esp),%eax
   #ifdef DIAGNOSTIC
           cmpl    $0,P_BACK(%eax) # should not be on q already
           jne     1f
           cmpl    $0,P_WCHAN(%eax)
           jne     1f
           cmpb    $SRUN,P_STAT(%eax)
           jne     1f
   #endif /* DIAGNOSTIC */
           movzbl  P_PRIORITY(%eax),%edx
           shrl    $2,%edx
           btsl    %edx,_C_LABEL(sched_whichqs)    # set q full bit
           leal    _C_LABEL(sched_qs)(,%edx,8),%edx # locate q hdr
           movl    P_BACK(%edx),%ecx
           movl    %edx,P_FORW(%eax)       # link process on tail of q
           movl    %eax,P_BACK(%edx)
           movl    %eax,P_FORW(%ecx)
           movl    %ecx,P_BACK(%eax)
           ret
   #ifdef DIAGNOSTIC
   1:      pushl   $2f
           call    _C_LABEL(panic)
           /* NOTREACHED */
   2:      .asciz  "setrunqueue"
   #endif /* DIAGNOSTIC */
   
   /*
    * void remrunqueue(struct proc *p);
    * Remove a process from its queue.  Should be called at splclock().
    * See remrunqueue(9) for more details.
    */
   /* LINTSTUB: Func: void remrunqueue(struct proc *p) */
   NENTRY(remrunqueue)
           movl    4(%esp),%ecx
           movzbl  P_PRIORITY(%ecx),%eax
   #ifdef DIAGNOSTIC
           shrl    $2,%eax
           btl     %eax,_C_LABEL(sched_whichqs)
           jnc     1f
   #endif /* DIAGNOSTIC */
           movl    P_BACK(%ecx),%edx       # unlink process
           movl    $0,P_BACK(%ecx)         # zap reverse link to indicate off list
           movl    P_FORW(%ecx),%ecx
           movl    %ecx,P_FORW(%edx)
           movl    %edx,P_BACK(%ecx)
           cmpl    %ecx,%edx               # q still has something?
           jne     2f
   #ifndef DIAGNOSTIC
           shrl    $2,%eax
   #endif
           btrl    %eax,_C_LABEL(sched_whichqs)    # no; clear bit
   2:      ret
   #ifdef DIAGNOSTIC
   1:      pushl   $3f
           call    _C_LABEL(panic)
           /* NOTREACHED */
   3:      .asciz  "remrunqueue"
   #endif /* DIAGNOSTIC */
   
 #ifdef DIAGNOSTIC  #ifdef DIAGNOSTIC
 NENTRY(switch_error)  NENTRY(switch_error)
         pushl   $1f          pushl   $1f
Line 1659  NENTRY(switch_error)
Line 1734  NENTRY(switch_error)
 #endif /* DIAGNOSTIC */  #endif /* DIAGNOSTIC */
   
 /*  /*
  * void cpu_switch(struct lwp *)   * void cpu_switch(struct proc *)
  * Find a runnable process and switch to it.  Wait if necessary.  If the new   * Find a runnable process and switch to it.  Wait if necessary.  If the new
  * process is the same as the old one, we short-circuit the context save and   * process is the same as the old one, we short-circuit the context save and
  * restore.   * restore.
  *   *
  * Note that the stack frame layout is known to "struct switchframe"   * Note that the stack frame layout is known to "struct switchframe"
  * in <machine/frame.h> and to the code in cpu_fork() which initializes   * in <machine/frame.h> and to the code in cpu_fork() which initializes
  * it for a new lwp.   * it for a new process.
  */   */
 ENTRY(cpu_switch)  ENTRY(cpu_switch)
         pushl   %ebx          pushl   %ebx
Line 1676  ENTRY(cpu_switch)
Line 1751  ENTRY(cpu_switch)
 #ifdef DEBUG  #ifdef DEBUG
         cmpl    $IPL_SCHED,CPUVAR(ILEVEL)          cmpl    $IPL_SCHED,CPUVAR(ILEVEL)
         jae     1f          jae     1f
         pushl   $2f          pushl   2f
         call    _C_LABEL(panic)          call    _C_LABEL(panic)
         /* NOTREACHED */          /* NOTREACHED */
 2:      .asciz  "not splsched() in cpu_switch!"  2:      .asciz  "not splhigh() in cpu_switch!"
   
 1:  1:
 #endif /* DEBUG */  #endif /* DEBUG */
   
         movl    16(%esp),%esi           # current          movl    CPUVAR(CURPROC),%esi
   
         /*          /*
          * Clear curlwp so that we don't accumulate system time while idle.           * Clear curproc so that we don't accumulate system time while idle.
          * This also insures that schedcpu() will move the old lwp to           * This also insures that schedcpu() will move the old process to
          * the correct queue if it happens to get called from the spllower()           * the correct queue if it happens to get called from the spllower()
          * below and changes the priority.  (See corresponding comment in           * below and changes the priority.  (See corresponding comment in
          * userret()).           * userret()).
          */           */
         movl    $0,CPUVAR(CURLWP)          movl    $0,CPUVAR(CURPROC)
         /*          /*
          * First phase: find new lwp.           * First phase: find new process.
          *           *
          * Registers:           * Registers:
          *   %eax - queue head, scratch, then zero           *   %eax - queue head, scratch, then zero
          *   %ebx - queue number           *   %ebx - queue number
          *   %ecx - cached value of whichqs           *   %ecx - cached value of whichqs
          *   %edx - next lwp in queue           *   %edx - next process in queue
          *   %esi - old lwp           *   %esi - old process
          *   %edi - new lwp           *   %edi - new process
          */           */
   
         /* Look for new lwp. */          /* Look for new process. */
         cli                             # splhigh doesn't do a cli          cli                             # splhigh doesn't do a cli
         movl    _C_LABEL(sched_whichqs),%ecx          movl    _C_LABEL(sched_whichqs),%ecx
         bsfl    %ecx,%ebx               # find a full q          bsfl    %ecx,%ebx               # find a full q
Line 1716  ENTRY(cpu_switch)
Line 1792  ENTRY(cpu_switch)
          *           *
          * Registers:           * Registers:
          *   %eax, %ecx - scratch           *   %eax, %ecx - scratch
          *   %esi - old lwp, then old pcb           *   %esi - old process, then old pcb
          *   %edi - idle pcb           *   %edi - idle pcb
          */           */
   
Line 1724  ENTRY(cpu_switch)
Line 1800  ENTRY(cpu_switch)
         call    _C_LABEL(pmap_deactivate)       # pmap_deactivate(oldproc)          call    _C_LABEL(pmap_deactivate)       # pmap_deactivate(oldproc)
         addl    $4,%esp          addl    $4,%esp
   
         movl    L_ADDR(%esi),%esi          movl    P_ADDR(%esi),%esi
   
         /* Save stack pointers. */          /* Save stack pointers. */
         movl    %esp,PCB_ESP(%esi)          movl    %esp,PCB_ESP(%esi)
Line 1732  ENTRY(cpu_switch)
Line 1808  ENTRY(cpu_switch)
   
         /* Find idle PCB for this CPU */          /* Find idle PCB for this CPU */
 #ifndef MULTIPROCESSOR  #ifndef MULTIPROCESSOR
         movl    $_C_LABEL(lwp0),%ebx          movl    $_C_LABEL(proc0),%ebx
         movl    L_ADDR(%ebx),%edi          movl    P_ADDR(%ebx),%edi
         movl    L_MD_TSS_SEL(%ebx),%edx          movl    P_MD_TSS_SEL(%ebx),%edx
 #else  #else
         movl    CPUVAR(IDLE_PCB),%edi          movl    CPUVAR(IDLE_PCB),%edi
         movl    CPUVAR(IDLE_TSS_SEL),%edx          movl    CPUVAR(IDLE_TSS_SEL),%edx
 #endif  #endif
         movl    $0,CPUVAR(CURLWP)               /* In case we fault... */          movl    $0,CPUVAR(CURPROC)              /* In case we fault... */
   
         /* Restore the idle context (avoid interrupts) */          /* Restore the idle context (avoid interrupts) */
         cli          cli
Line 1817  switch_dequeue:  
Line 1893  switch_dequeue:  
         sti          sti
         leal    _C_LABEL(sched_qs)(,%ebx,8),%eax # select q          leal    _C_LABEL(sched_qs)(,%ebx,8),%eax # select q
   
         movl    L_FORW(%eax),%edi       # unlink from front of process q          movl    P_FORW(%eax),%edi       # unlink from front of process q
 #ifdef  DIAGNOSTIC  #ifdef  DIAGNOSTIC
         cmpl    %edi,%eax               # linked to self (i.e. nothing queued)?          cmpl    %edi,%eax               # linked to self (i.e. nothing queued)?
         je      _C_LABEL(switch_error)  # not possible          je      _C_LABEL(switch_error)  # not possible
 #endif /* DIAGNOSTIC */  #endif /* DIAGNOSTIC */
         movl    L_FORW(%edi),%edx          movl    P_FORW(%edi),%edx
         movl    %edx,L_FORW(%eax)          movl    %edx,P_FORW(%eax)
         movl    %eax,L_BACK(%edx)          movl    %eax,P_BACK(%edx)
   
         cmpl    %edx,%eax               # q empty?          cmpl    %edx,%eax               # q empty?
         jne     3f          jne     3f
Line 1836  switch_dequeue:  
Line 1912  switch_dequeue:  
         xorl    %eax,%eax          xorl    %eax,%eax
         CLEAR_RESCHED(%eax)          CLEAR_RESCHED(%eax)
   
 switch_resume:  
 #ifdef  DIAGNOSTIC  #ifdef  DIAGNOSTIC
         cmpl    %eax,L_WCHAN(%edi)      # Waiting for something?          cmpl    %eax,P_WCHAN(%edi)      # Waiting for something?
         jne     _C_LABEL(switch_error)  # Yes; shouldn't be queued.          jne     _C_LABEL(switch_error)  # Yes; shouldn't be queued.
         cmpb    $LSRUN,L_STAT(%edi)     # In run state?          cmpb    $SRUN,P_STAT(%edi)      # In run state?
         jne     _C_LABEL(switch_error)  # No; shouldn't be queued.          jne     _C_LABEL(switch_error)  # No; shouldn't be queued.
 #endif /* DIAGNOSTIC */  #endif /* DIAGNOSTIC */
   
         /* Isolate lwp.  XXX Is this necessary? */          /* Isolate process.  XXX Is this necessary? */
         movl    %eax,L_BACK(%edi)          movl    %eax,P_BACK(%edi)
   
         /* Record new lwp. */          /* Record new process. */
         movb    $LSONPROC,L_STAT(%edi)  # l->l_stat = LSONPROC          movb    $SONPROC,P_STAT(%edi)   # p->p_stat = SONPROC
         SET_CURLWP(%edi,%ecx)          SET_CURPROC(%edi,%ecx)
   
         /* Skip context switch if same lwp. */          /* Skip context switch if same process. */
         movl    $1, %eax  
         cmpl    %edi,%esi          cmpl    %edi,%esi
         je      switch_return          je      switch_return
   
         /* If old lwp exited, don't bother. */          /* If old process exited, don't bother. */
         testl   %esi,%esi          testl   %esi,%esi
         jz      switch_exited          jz      switch_exited
   
Line 1865  switch_resume:
Line 1939  switch_resume:
          *           *
          * Registers:           * Registers:
          *   %eax, %ecx - scratch           *   %eax, %ecx - scratch
          *   %esi - old lwp, then old pcb           *   %esi - old process, then old pcb
          *   %edi - new lwp           *   %edi - new process
          */           */
   
         pushl   %esi          pushl   %esi
         call    _C_LABEL(pmap_deactivate)       # pmap_deactivate(oldproc)          call    _C_LABEL(pmap_deactivate)       # pmap_deactivate(oldproc)
         addl    $4,%esp          addl    $4,%esp
   
         movl    L_ADDR(%esi),%esi          movl    P_ADDR(%esi),%esi
   
         /* Save stack pointers. */          /* Save stack pointers. */
         movl    %esp,PCB_ESP(%esi)          movl    %esp,PCB_ESP(%esi)
Line 1886  switch_exited:
Line 1960  switch_exited:
          * Registers:           * Registers:
          *   %eax, %ebx, %ecx, %edx - scratch           *   %eax, %ebx, %ecx, %edx - scratch
          *   %esi - new pcb           *   %esi - new pcb
          *   %edi - new lwp           *   %edi - new process
          */           */
   
         /* No interrupts while loading new state. */          /* No interrupts while loading new state. */
         cli          cli
         movl    L_ADDR(%edi),%esi          movl    P_ADDR(%edi),%esi
   
         /* Restore stack pointers. */          /* Restore stack pointers. */
         movl    PCB_ESP(%esi),%esp          movl    PCB_ESP(%esi),%esp
Line 1899  switch_exited:
Line 1973  switch_exited:
   
 #if 0  #if 0
         /* Don't bother with the rest if switching to a system process. */          /* Don't bother with the rest if switching to a system process. */
         testl   $P_SYSTEM,L_FLAG(%edi); XXX NJWLWP lwp's don't have P_SYSTEM!          testl   $P_SYSTEM,P_FLAG(%edi)
         jnz     switch_restored          jnz     switch_restored
 #endif  #endif
   
Line 1909  switch_exited:
Line 1983  switch_exited:
         /* Load TSS info. */          /* Load TSS info. */
         movl    _C_LABEL(gdt),%eax          movl    _C_LABEL(gdt),%eax
 #endif  #endif
         movl    L_MD_TSS_SEL(%edi),%edx          movl    P_MD_TSS_SEL(%edi),%edx
   
         /* Switch TSS. Reset "task busy" flag before loading. */          /* Switch TSS. Reset "task busy" flag before loading. */
         andl    $~0x0200,4(%eax,%edx, 1)          andl    $~0x0200,4(%eax,%edx, 1)
Line 1945  switch_restored:
Line 2019  switch_restored:
   
 /*  /*
  *  Check for restartable atomic sequences (RAS)   *  Check for restartable atomic sequences (RAS)
    *  XXX %edi reloads are not necessary here as %edi is callee-saved!
  */   */
         movl    CPUVAR(CURLWP),%edi          movl    CPUVAR(CURPROC),%edi
         movl    L_PROC(%edi),%esi          cmpl    $0,P_NRAS(%edi)
         cmpl    $0,P_NRAS(%esi)  
         je      1f          je      1f
         movl    L_MD_REGS(%edi),%ebx          movl    P_MD_REGS(%edi),%edx
         movl    TF_EIP(%ebx),%eax          movl    TF_EIP(%edx),%eax
         pushl   %eax          pushl   %eax
         pushl   %esi          pushl   %edi
         call    _C_LABEL(ras_lookup)          call    _C_LABEL(ras_lookup)
         addl    $8,%esp          addl    $8,%esp
         cmpl    $-1,%eax          cmpl    $-1,%eax
         je      1f          je      1f
         movl    %eax,TF_EIP(%ebx)          movl    CPUVAR(CURPROC),%edi
           movl    P_MD_REGS(%edi),%edx
           movl    %eax,TF_EIP(%edx)
 1:  1:
         xor     %eax,%eax  
   
 switch_return:  switch_return:
 #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)  #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
Line 1971  switch_return:
Line 2046  switch_return:
         addl    $4,%esp          addl    $4,%esp
         movl    $IPL_HIGH,CPUVAR(ILEVEL)        # splhigh()          movl    $IPL_HIGH,CPUVAR(ILEVEL)        # splhigh()
   
           movl    %edi,%eax               # return (p);
         popl    %edi          popl    %edi
         popl    %esi          popl    %esi
         popl    %ebx          popl    %ebx
         ret          ret
   
 /*  /*
  * void cpu_switchto(struct lwp *current, struct lwp *next)   * void switch_exit(struct proc *p);
  * Switch to the specified next LWP.   * switch_exit(struct proc *p);
  */   * Switch to the appropriate idle context (proc0's if uniprocessor; the cpu's
 ENTRY(cpu_switchto)  
         pushl   %ebx  
         pushl   %esi  
         pushl   %edi  
   
 #ifdef DEBUG  
         cmpl    $IPL_SCHED,CPUVAR(ILEVEL)  
         jae     1f  
         pushl   $2f  
         call    _C_LABEL(panic)  
         /* NOTREACHED */  
 2:      .asciz  "not splsched() in cpu_switchto!"  
 1:  
 #endif /* DEBUG */  
   
         movl    16(%esp),%esi           # current  
         movl    20(%esp),%edi           # next  
   
         /*  
          * Clear curlwp so that we don't accumulate system time while idle.  
          * This also insures that schedcpu() will move the old process to  
          * the correct queue if it happens to get called from the spllower()  
          * below and changes the priority.  (See corresponding comment in  
          * usrret()).  
          *  
          * XXX Is this necessary?  We know we won't go idle.  
          */  
         movl    $0,CPUVAR(CURLWP)  
   
         /*  
          * We're running at splhigh(), but it's otherwise okay to take  
          * interrupts here.  
          */  
         sti  
   
         /* Jump into the middle of cpu_switch */  
         xorl    %eax,%eax  
         jmp     switch_resume  
   
 /*  
  * void switch_exit(struct lwp *l, void (*exit)(struct lwp *));  
  * Switch to the appropriate idle context (lwp0's if uniprocessor; the cpu's  
  * if multiprocessor) and deallocate the address space and kernel stack for p.   * if multiprocessor) and deallocate the address space and kernel stack for p.
  * Then jump into cpu_switch(), as if we were in the idle proc all along.   * Then jump into cpu_switch(), as if we were in the idle proc all along.
  */   */
 #ifndef MULTIPROCESSOR  #ifndef MULTIPROCESSOR
         .globl  _C_LABEL(lwp0)          .globl  _C_LABEL(proc0)
 #endif  #endif
         .globl  _C_LABEL(uvmspace_free),_C_LABEL(kernel_map)          .globl  _C_LABEL(uvmspace_free),_C_LABEL(kernel_map)
         .globl  _C_LABEL(uvm_km_free),_C_LABEL(tss_free)          .globl  _C_LABEL(uvm_km_free),_C_LABEL(tss_free)
 /* LINTSTUB: Func: void switch_exit(struct lwp *l) */  /* LINTSTUB: Func: void switch_exit(struct proc *p) */
 ENTRY(switch_exit)  ENTRY(switch_exit)
         movl    4(%esp),%edi            # old process          movl    4(%esp),%edi            # old process
         movl    8(%esp),%eax            # exit func  
 #ifndef MULTIPROCESSOR  #ifndef MULTIPROCESSOR
         movl    $_C_LABEL(lwp0),%ebx          movl    $_C_LABEL(proc0),%ebx
         movl    L_ADDR(%ebx),%esi          movl    P_ADDR(%ebx),%esi
         movl    L_MD_TSS_SEL(%ebx),%edx          movl    P_MD_TSS_SEL(%ebx),%edx
 #else  #else
         movl    CPUVAR(IDLE_PCB),%esi          movl    CPUVAR(IDLE_PCB),%esi
         movl    CPUVAR(IDLE_TSS_SEL),%edx          movl    CPUVAR(IDLE_TSS_SEL),%edx
 #endif  #endif
         /* In case we fault... */          /* In case we fault... */
         movl    $0,CPUVAR(CURLWP)          movl    $0,CPUVAR(CURPROC)
   
         /* Restore the idle context. */          /* Restore the idle context. */
         cli          cli
Line 2052  ENTRY(switch_exit)
Line 2085  ENTRY(switch_exit)
         movl    PCB_ESP(%esi),%esp          movl    PCB_ESP(%esi),%esp
         movl    PCB_EBP(%esi),%ebp          movl    PCB_EBP(%esi),%ebp
   
         /* Save exit func. */  
         pushl   %eax  
   
         /* Load TSS info. */          /* Load TSS info. */
 #ifdef MULTIPROCESSOR  #ifdef MULTIPROCESSOR
         movl    CPUVAR(GDT),%eax          movl    CPUVAR(GDT),%eax
Line 2086  ENTRY(switch_exit)
Line 2116  ENTRY(switch_exit)
         /*          /*
          * Schedule the dead process's vmspace and stack to be freed.           * Schedule the dead process's vmspace and stack to be freed.
          */           */
         movl    0(%esp),%eax            /* %eax = exit func */          pushl   %edi                    /* exit2(p) */
         movl    %edi,0(%esp)            /* {lwp_}exit2(l) */          call    _C_LABEL(exit2)
         call    *%eax  
         addl    $4,%esp          addl    $4,%esp
   
         /* Jump into cpu_switch() with the right state. */          /* Jump into cpu_switch() with the right state. */
         xorl    %esi,%esi          xorl    %esi,%esi
         movl    %esi,CPUVAR(CURLWP)          movl    %esi,CPUVAR(CURPROC)
         jmp     idle_start          jmp     idle_start
   
 /*  /*
Line 2143  syscall1:
Line 2172  syscall1:
 #endif  #endif
 1:  1:
 #endif /* DIAGNOSTIC */  #endif /* DIAGNOSTIC */
         movl    CPUVAR(CURLWP),%edx          movl    CPUVAR(CURPROC),%edx
         movl    %esp,L_MD_REGS(%edx)    # save pointer to frame          movl    %esp,P_MD_REGS(%edx)    # save pointer to frame
         movl    L_PROC(%edx),%edx  
         call    *P_MD_SYSCALL(%edx)     # get pointer to syscall() function          call    *P_MD_SYSCALL(%edx)     # get pointer to syscall() function
 2:      /* Check for ASTs on exit to user mode. */  2:      /* Check for ASTs on exit to user mode. */
         cli          cli
         CHECK_ASTPENDING(%eax)          CHECK_ASTPENDING()
         je      1f          je      1f
         /* Always returning to user mode here. */          /* Always returning to user mode here. */
         CLEAR_ASTPENDING(%eax)          CLEAR_ASTPENDING()
         sti          sti
         /* Pushed T_ASTFLT into tf_trapno on entry. */          /* Pushed T_ASTFLT into tf_trapno on entry. */
         call    _C_LABEL(trap)          call    _C_LABEL(trap)

Legend:
Removed from v.1.1.2.8  
changed lines
  Added in v.1.2

CVSweb <webmaster@jp.NetBSD.org>