version 1.6, 2007/02/09 21:55:01 |
version 1.6.6.7, 2007/12/03 19:02:37 |
|
|
#define ALIGN_TEXT .align 16,0x90 |
#define ALIGN_TEXT .align 16,0x90 |
|
|
#include <machine/asm.h> |
#include <machine/asm.h> |
#include <machine/psl.h> |
|
#include <machine/trap.h> |
#include <machine/trap.h> |
#include <machine/segments.h> |
#include <machine/segments.h> |
#include <machine/frameasm.h> |
#include <machine/frameasm.h> |
|
|
#include "assym.h" |
#include "assym.h" |
|
|
.data |
|
.globl _C_LABEL(netisr) |
|
.text |
.text |
|
|
#if 0 |
#ifndef XEN |
#if defined(PROF) || defined(GPROF) |
|
/* |
/* |
* XXXX TODO |
* softintr() |
|
* |
|
* Switch to the LWP assigned to handle interrupts from the given |
|
* source. We borrow the VM context from the interrupted LWP. |
|
* |
|
* On entry: |
|
* |
|
* %rax intrsource |
|
* %r13 address to return to |
|
*/ |
|
IDTVEC(softintr) |
|
pushq $_C_LABEL(softintr_ret) /* set up struct switchframe */ |
|
pushq %rbx |
|
pushq %r12 |
|
pushq %r13 |
|
pushq %r14 |
|
pushq %r15 |
|
movl $IPL_HIGH,CPUVAR(ILEVEL) |
|
movq CPUVAR(CURLWP),%r15 |
|
movq IS_LWP(%rax),%rdi /* switch to handler LWP */ |
|
movq L_ADDR(%rdi),%rdx |
|
movq L_ADDR(%r15),%rcx |
|
movq %rdi,CPUVAR(CURLWP) |
|
movq %rsp,PCB_RSP(%rcx) |
|
movq %rbp,PCB_RBP(%rcx) |
|
movq PCB_RSP0(%rdx),%rsp /* onto new stack */ |
|
sti |
|
movq %r15,%rdi /* interrupted LWP */ |
|
movl IS_MAXLEVEL(%rax),%esi /* ipl to run at */ |
|
call _C_LABEL(softint_dispatch)/* run handlers */ |
|
movq L_ADDR(%r15),%rcx |
|
movq PCB_RSP(%rcx),%rsp |
|
movq %r15,CPUVAR(CURLWP) |
|
popq %r15 /* unwind switchframe */ |
|
addq $(5 * 8),%rsp |
|
cli |
|
jmp *%r13 /* back to splx/doreti */ |
|
|
|
/* |
|
* softintr_ret() |
|
* |
|
* Trampoline function that gets returned to by cpu_switchto() when |
|
* an interrupt handler blocks. On entry: |
|
* |
|
* %rax prevlwp from cpu_switchto() |
*/ |
*/ |
.globl _C_LABEL(splhigh), _C_LABEL(splx) |
NENTRY(softintr_ret) |
|
movl $0, L_CTXSWTCH(%rax) /* %rax from cpu_switchto */ |
|
cli |
|
jmp *%r13 /* back to splx/doreti */ |
|
|
ALIGN_TEXT |
/* |
_C_LABEL(splhigh): |
* void softint_trigger(uintptr_t machdep); |
movl CPUVAR(ILEVEL), %eax |
* |
movl $IPL_HIGH, CPUVAR(ILEVEL) |
* Software interrupt registration. |
|
*/ |
|
NENTRY(softint_trigger) |
|
orl %edi,CPUVAR(IPENDING) /* atomic on local cpu */ |
ret |
ret |
|
|
ALIGN_TEXT |
/* |
_C_LABEL(splx): |
* int splraise(int s); |
movq %rdi, %rcx |
*/ |
SPLLOWER(_C_LABEL(Xspllower)) |
ENTRY(splraise) |
|
movl CPUVAR(ILEVEL),%eax |
|
cmpl %edi,%eax |
|
cmoval %eax,%edi |
|
movl %edi,CPUVAR(ILEVEL) |
ret |
ret |
#endif /* PROF || GPROF */ |
|
#endif |
|
|
|
/* |
/* |
* void spllower(int s); |
* void spllower(int s); |
|
|
* pushf/cli/popf as it is used early in boot where interrupts |
* pushf/cli/popf as it is used early in boot where interrupts |
* are disabled via eflags/IE. |
* are disabled via eflags/IE. |
*/ |
*/ |
.align 64 |
|
|
|
ENTRY(spllower) |
ENTRY(spllower) |
cmpl CPUVAR(ILEVEL), %edi |
cmpl CPUVAR(ILEVEL), %edi |
jae 1f |
jae 1f |
|
|
movl %edi, CPUVAR(ILEVEL) |
movl %edi, CPUVAR(ILEVEL) |
popf |
popf |
1: |
1: |
rep |
ret |
ret |
ret |
2: |
2: |
popf |
popf |
jmp _C_LABEL(Xspllower) |
jmp _C_LABEL(Xspllower) |
.align 64 |
nop |
|
nop |
|
.align 16 |
LABEL(spllower_end) |
LABEL(spllower_end) |
|
|
|
#endif /* !XEN */ |
|
|
/* |
/* |
* void amd64_spllower(int s); |
* void cx8_spllower(int s); |
* |
* |
* For cmpxchg8b, edx/ecx are the high words and eax/ebx the low. |
* For cmpxchg8b, edx/ecx are the high words and eax/ebx the low. |
* |
* |
* edx : eax = old level / old ipending |
* edx : eax = old level / old ipending |
* ecx : ebx = new level / old ipending |
* ecx : ebx = new level / old ipending |
*/ |
*/ |
.align 64 |
ENTRY(cx8_spllower) |
|
|
ENTRY(amd64_spllower) |
|
movl CPUVAR(ILEVEL),%edx |
movl CPUVAR(ILEVEL),%edx |
|
movq %rbx,%r8 |
cmpl %edx,%edi /* new level is lower? */ |
cmpl %edx,%edi /* new level is lower? */ |
pushq %rbx |
|
jae,pn 1f |
jae,pn 1f |
|
0: |
movl CPUVAR(IPENDING),%eax |
movl CPUVAR(IPENDING),%eax |
movl %edi,%ecx |
movl %edi,%ecx |
testl %eax,CPUVAR(IUNMASK)(,%rcx,4)/* deferred interrupts? */ |
testl %eax,CPUVAR(IUNMASK)(,%rcx,4)/* deferred interrupts? */ |
Line 161 ENTRY(amd64_spllower) |
|
Line 210 ENTRY(amd64_spllower) |
|
*/ |
*/ |
jnz,pn 2f |
jnz,pn 2f |
cmpxchg8b CPUVAR(ISTATE) /* swap in new ilevel */ |
cmpxchg8b CPUVAR(ISTATE) /* swap in new ilevel */ |
jnz,pn 2f |
jnz,pn 0b |
1: |
1: |
popq %rbx |
movq %r8,%rbx |
ret |
ret |
2: |
2: |
popq %rbx |
movq %r8,%rbx |
LABEL(amd64_spllower_patch) |
LABEL(cx8_spllower_patch) |
jmp _C_LABEL(Xspllower) |
jmp _C_LABEL(Xspllower) |
|
|
.align 64 |
.align 16 |
LABEL(amd64_spllower_end) |
LABEL(cx8_spllower_end) |
|
|
/* |
/* |
* void Xspllower(int s); |
* void Xspllower(int s); |
Line 191 LABEL(amd64_spllower_end) |
|
Line 240 LABEL(amd64_spllower_end) |
|
* (see pmap_tlb_shootnow). |
* (see pmap_tlb_shootnow). |
*/ |
*/ |
nop |
nop |
.align 4 /* Avoid confusion with amd64_spllower_end */ |
.align 4 /* Avoid confusion with cx8_spllower_end */ |
|
|
IDTVEC(spllower) |
IDTVEC(spllower) |
pushq %rbx |
pushq %rbx |
Line 201 IDTVEC(spllower) |
|
Line 250 IDTVEC(spllower) |
|
leaq 1f(%rip),%r13 # address to resume loop at |
leaq 1f(%rip),%r13 # address to resume loop at |
1: movl %ebx,%eax # get cpl |
1: movl %ebx,%eax # get cpl |
movl CPUVAR(IUNMASK)(,%rax,4),%eax |
movl CPUVAR(IUNMASK)(,%rax,4),%eax |
cli |
CLI(si,di) |
andl CPUVAR(IPENDING),%eax # any non-masked bits left? |
andl CPUVAR(IPENDING),%eax # any non-masked bits left? |
jz 2f |
jz 2f |
bsrl %eax,%eax |
bsrl %eax,%eax |
Line 210 IDTVEC(spllower) |
|
Line 259 IDTVEC(spllower) |
|
jmp *IS_RECURSE(%rax) |
jmp *IS_RECURSE(%rax) |
2: |
2: |
movl %ebx,CPUVAR(ILEVEL) |
movl %ebx,CPUVAR(ILEVEL) |
sti |
STI(si,di) |
popq %r12 |
popq %r12 |
popq %r13 |
popq %r13 |
popq %rbx |
popq %rbx |
|
|
leaq 1f(%rip),%r13 |
leaq 1f(%rip),%r13 |
1: movl %ebx,%eax |
1: movl %ebx,%eax |
movl CPUVAR(IUNMASK)(,%rax,4),%eax |
movl CPUVAR(IUNMASK)(,%rax,4),%eax |
cli |
CLI(si,di) |
andl CPUVAR(IPENDING),%eax |
andl CPUVAR(IPENDING),%eax |
jz 2f |
jz 2f |
bsrl %eax,%eax # slow, but not worth optimizing |
bsrl %eax,%eax # slow, but not worth optimizing |
|
|
jmp *IS_RESUME(%rax) |
jmp *IS_RESUME(%rax) |
2: /* Check for ASTs on exit to user mode. */ |
2: /* Check for ASTs on exit to user mode. */ |
movl %ebx,CPUVAR(ILEVEL) |
movl %ebx,CPUVAR(ILEVEL) |
|
5: |
|
testb $SEL_RPL,TF_CS(%rsp) |
|
jz 6f |
|
.globl doreti_checkast |
|
doreti_checkast: |
movq CPUVAR(CURLWP),%r14 |
movq CPUVAR(CURLWP),%r14 |
5: CHECK_ASTPENDING(%r14) |
CHECK_ASTPENDING(%r14) |
je 3f |
je 3f |
testb $SEL_RPL,TF_CS(%rsp) |
|
jz 3f |
|
CLEAR_ASTPENDING(%r14) |
CLEAR_ASTPENDING(%r14) |
sti |
STI(si,di) |
movl $T_ASTFLT,TF_TRAPNO(%rsp) /* XXX undo later.. */ |
movl $T_ASTFLT,TF_TRAPNO(%rsp) /* XXX undo later.. */ |
/* Pushed T_ASTFLT into tf_trapno on entry. */ |
/* Pushed T_ASTFLT into tf_trapno on entry. */ |
movq %rsp,%rdi |
movq %rsp,%rdi |
call _C_LABEL(trap) |
call _C_LABEL(trap) |
cli |
CLI(si,di) |
jmp 5b |
jmp doreti_checkast |
3: INTRFASTEXIT |
3: |
|
CHECK_DEFERRED_SWITCH |
|
jnz 9f |
|
6: |
|
INTRFASTEXIT |
|
9: |
|
STI(si,di) |
|
call _C_LABEL(do_pmap_load) |
|
CLI(si,di) |
|
jmp doreti_checkast /* recheck ASTs */ |
|
|
|
#ifdef XEN |
|
NENTRY(call_evtchn_do_event) |
|
incl CPUVAR(IDEPTH) |
|
call _C_LABEL(evtchn_do_event) |
|
decl CPUVAR(IDEPTH) |
|
ret |
|
#endif |