[BACK]Return to locore.S CVS log [TXT][DIR] Up to [cvs.NetBSD.org] / src / sys / arch / amd64 / amd64

Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files.

Diff for /src/sys/arch/amd64/amd64/locore.S between version 1.18.12.1 and 1.19

version 1.18.12.1, 2008/09/16 18:49:33 version 1.19, 2007/02/09 21:55:01
Line 675  NENTRY(lgdt)
Line 675  NENTRY(lgdt)
         pushq   %rax          pushq   %rax
         lretq          lretq
   
   NENTRY(x86_flush)
           popq    %rax
           pushq   $GSEL(GCODE_SEL, SEL_KPL)
           pushq   %rax
           lretq
   
 ENTRY(setjmp)  ENTRY(setjmp)
         /*          /*
          * Only save registers that must be preserved across function           * Only save registers that must be preserved across function
Line 711  ENTRY(longjmp)
Line 717  ENTRY(longjmp)
   
 /*****************************************************************************/  /*****************************************************************************/
   
 ENTRY(dumpsys)  
         # mimic cpu_switch() for postmortem debugging.  
   
         # build a fake switch frame.  
         pushq   %rbx  
         pushq   %rbp  
         pushq   %r12  
         pushq   %r13  
         pushq   %r14  
         pushq   %r15  
   
         # save a context.  
         movq    $dumppcb, %rax  
         movq    %rsp, PCB_RSP(%rax)  
         movq    %rbp, PCB_RBP(%rax)  
   
         call    _C_LABEL(dodumpsys)  
   
         addq    $(6*8), %rsp    # sizeof(switchframe) - sizeof(%rip)  
         ret  
   
 /*  /*
  * The following primitives manipulate the run queues.   * The following primitives manipulate the run queues.
  * _whichqs tells which of the 32 queues _qs   * _whichqs tells which of the 32 queues _qs
Line 770  NENTRY(switch_error3)
Line 755  NENTRY(switch_error3)
  * int cpu_switch(struct lwp *)   * int cpu_switch(struct lwp *)
  * Find a runnable lwp and switch to it.  Wait if necessary.  If the new   * Find a runnable lwp and switch to it.  Wait if necessary.  If the new
  * lwp is the same as the old one, we short-circuit the context save and   * lwp is the same as the old one, we short-circuit the context save and
  * restore.   * restore.  On MP, must be entered with the sched_mutex held.
  */   */
 ENTRY(cpu_switch)  ENTRY(cpu_switch)
         pushq   %rbx          pushq   %rbx
Line 870  ENTRY(cpu_switch)
Line 855  ENTRY(cpu_switch)
         xorq    %r13,%r13          xorq    %r13,%r13
         sti          sti
 idle_unlock:  idle_unlock:
 #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)  
         call    _C_LABEL(sched_unlock_idle)          call    _C_LABEL(sched_unlock_idle)
 #endif  
         /* Interrupts are okay again. */          /* Interrupts are okay again. */
         movl    $IPL_NONE,%edi          movl    $IPL_NONE,%edi
         call    _C_LABEL(Xspllower)          call    _C_LABEL(spllower)
         jmp     idle_start          jmp     idle_start
 idle_zero:  idle_zero:
         sti          sti
Line 898  idle_start:
Line 881  idle_start:
 idle_exit:  idle_exit:
         movl    $IPL_HIGH,CPUVAR(ILEVEL)          movl    $IPL_HIGH,CPUVAR(ILEVEL)
         sti          sti
 #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)  
         call    _C_LABEL(sched_lock_idle)          call    _C_LABEL(sched_lock_idle)
 #endif  
         movl    _C_LABEL(sched_whichqs)(%rip),%ecx          movl    _C_LABEL(sched_whichqs)(%rip),%ecx
         bsfl    %ecx,%r8d          bsfl    %ecx,%r8d
         jz      idle_unlock          jz      idle_unlock
Line 941  switch_resume:
Line 922  switch_resume:
         jne     _C_LABEL(switch_error3)          jne     _C_LABEL(switch_error3)
 #endif  #endif
   
         /* Isolate lwp.  XXX Is this necessary? */          /* Isolate lwp. */
         movq    %rax,L_BACK(%r12)          movq    %rax,L_BACK(%r12)
   
         /* Record new lwp. */          /* Record new lwp. */
Line 1050  switch_restored:
Line 1031  switch_restored:
         movl    $1,%ebx          movl    $1,%ebx
   
 switch_return:  switch_return:
 #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)  
         call    _C_LABEL(sched_unlock_idle)          call    _C_LABEL(sched_unlock_idle)
 #endif  
   
         movl    $IPL_NONE,%edi          movl    $IPL_NONE,%edi
         call    _C_LABEL(Xspllower)          call    _C_LABEL(spllower)
         movl    $IPL_HIGH,CPUVAR(ILEVEL)          movl    $IPL_HIGH,CPUVAR(ILEVEL)
   
         movl    %ebx,%eax          movl    %ebx,%eax
Line 1101  ENTRY(switch_exit)
Line 1080  ENTRY(switch_exit)
         movl    L_MD_TSS_SEL(%r9),%edx          movl    L_MD_TSS_SEL(%r9),%edx
 #endif  #endif
   
         /* In case we fault... */  
         movq    $0,CPUVAR(CURLWP)  
   
         cli          cli
   
         /* Restore stack pointers. */          /* Restore stack pointers. */
Line 1134  ENTRY(switch_exit)
Line 1110  ENTRY(switch_exit)
         /* Record new pcb. */          /* Record new pcb. */
         SET_CURPCB(%r8)          SET_CURPCB(%r8)
   
           /* Now off the CPU. */
           movq    $0,CPUVAR(CURLWP)
   
         /* Interrupts are okay again. */          /* Interrupts are okay again. */
         sti          sti
   
Line 1147  ENTRY(switch_exit)
Line 1126  ENTRY(switch_exit)
   
         /* Jump into cpu_switch() with the right state. */          /* Jump into cpu_switch() with the right state. */
         xorq    %r13,%r13          xorq    %r13,%r13
         movq    %r13, CPUVAR(CURLWP)  
         jmp     idle_start          jmp     idle_start
   
 /*  /*
Line 1207  IDTVEC(syscall)
Line 1185  IDTVEC(syscall)
         call    *P_MD_SYSCALL(%r15)          call    *P_MD_SYSCALL(%r15)
 1:      /* Check for ASTs on exit to user mode. */  1:      /* Check for ASTs on exit to user mode. */
         cli          cli
         CHECK_ASTPENDING(%r11)          CHECK_ASTPENDING(%r14)
         je      2f          je      2f
         /* Always returning to user mode here. */          /* Always returning to user mode here. */
         CLEAR_ASTPENDING(%r11)          CLEAR_ASTPENDING(%r14)
         sti          sti
         /* Pushed T_ASTFLT into tf_trapno on entry. */          /* Pushed T_ASTFLT into tf_trapno on entry. */
         movq    %rsp,%rdi          movq    %rsp,%rdi
Line 1307  osyscall1:
Line 1285  osyscall1:
         pushq   $T_ASTFLT       # trap # for doing ASTs          pushq   $T_ASTFLT       # trap # for doing ASTs
         INTRENTRY          INTRENTRY
         sti          sti
         movq    CPUVAR(CURLWP),%rdx          movq    CPUVAR(CURLWP),%r14
         movq    %rsp,L_MD_REGS(%rdx)    # save pointer to frame          movq    %rsp,L_MD_REGS(%r14)    # save pointer to frame
         movq    L_PROC(%rdx),%rdx          movq    L_PROC(%r14),%rdx
         movq    %rsp,%rdi          movq    %rsp,%rdi
         call    *P_MD_SYSCALL(%rdx)          call    *P_MD_SYSCALL(%rdx)
 _C_LABEL(osyscall_return):  _C_LABEL(osyscall_return):
 2:      /* Check for ASTs on exit to user mode. */  2:      /* Check for ASTs on exit to user mode. */
         cli          cli
         CHECK_ASTPENDING(%r11)          CHECK_ASTPENDING(%r14)
         je      1f          je      1f
         /* Always returning to user mode here. */          /* Always returning to user mode here. */
         CLEAR_ASTPENDING(%r11)          CLEAR_ASTPENDING(%r14)
         sti          sti
         /* Pushed T_ASTFLT into tf_trapno on entry. */          /* Pushed T_ASTFLT into tf_trapno on entry. */
         movq    %rsp,%rdi          movq    %rsp,%rdi

Legend:
Removed from v.1.18.12.1  
changed lines
  Added in v.1.19

CVSweb <webmaster@jp.NetBSD.org>