version 1.11, 2019/01/06 18:32:54 |
version 1.12, 2019/01/07 14:08:02 |
Line 314 struct vmcb_ctrl { |
|
Line 314 struct vmcb_ctrl { |
|
|
|
uint64_t intr; |
uint64_t intr; |
#define VMCB_CTRL_INTR_SHADOW __BIT(0) |
#define VMCB_CTRL_INTR_SHADOW __BIT(0) |
#define VMCB_CTRL_GUEST_INTR_MASK __BIT(1) |
|
|
|
uint64_t exitcode; |
uint64_t exitcode; |
uint64_t exitinfo1; |
uint64_t exitinfo1; |
Line 538 struct svm_cpudata { |
|
Line 537 struct svm_cpudata { |
|
struct xsave_header gfpu __aligned(16); |
struct xsave_header gfpu __aligned(16); |
}; |
}; |
|
|
|
static void |
|
svm_vmcb_cache_default(struct vmcb *vmcb) |
|
{ |
|
vmcb->ctrl.vmcb_clean = |
|
VMCB_CTRL_VMCB_CLEAN_I | |
|
VMCB_CTRL_VMCB_CLEAN_IOPM | |
|
VMCB_CTRL_VMCB_CLEAN_ASID | |
|
VMCB_CTRL_VMCB_CLEAN_TPR | |
|
VMCB_CTRL_VMCB_CLEAN_NP | |
|
VMCB_CTRL_VMCB_CLEAN_CR | |
|
VMCB_CTRL_VMCB_CLEAN_DR | |
|
VMCB_CTRL_VMCB_CLEAN_DT | |
|
VMCB_CTRL_VMCB_CLEAN_SEG | |
|
VMCB_CTRL_VMCB_CLEAN_CR2 | |
|
VMCB_CTRL_VMCB_CLEAN_LBR | |
|
VMCB_CTRL_VMCB_CLEAN_AVIC; |
|
} |
|
|
|
static void |
|
svm_vmcb_cache_update(struct vmcb *vmcb, uint64_t flags) |
|
{ |
|
if (flags & NVMM_X64_STATE_SEGS) { |
|
vmcb->ctrl.vmcb_clean &= |
|
~(VMCB_CTRL_VMCB_CLEAN_SEG | VMCB_CTRL_VMCB_CLEAN_DT); |
|
} |
|
if (flags & NVMM_X64_STATE_CRS) { |
|
vmcb->ctrl.vmcb_clean &= |
|
~(VMCB_CTRL_VMCB_CLEAN_CR | VMCB_CTRL_VMCB_CLEAN_CR2); |
|
} |
|
if (flags & NVMM_X64_STATE_DRS) { |
|
vmcb->ctrl.vmcb_clean &= ~VMCB_CTRL_VMCB_CLEAN_DR; |
|
} |
|
if (flags & NVMM_X64_STATE_MSRS) { |
|
/* CR for EFER, NP for PAT. */ |
|
vmcb->ctrl.vmcb_clean &= |
|
~(VMCB_CTRL_VMCB_CLEAN_CR | VMCB_CTRL_VMCB_CLEAN_NP); |
|
} |
|
if (flags & NVMM_X64_STATE_MISC) { |
|
/* SEG for CPL. */ |
|
vmcb->ctrl.vmcb_clean &= ~VMCB_CTRL_VMCB_CLEAN_SEG; |
|
} |
|
} |
|
|
|
static inline void |
|
svm_vmcb_cache_flush(struct vmcb *vmcb, uint64_t flags) |
|
{ |
|
vmcb->ctrl.vmcb_clean &= ~flags; |
|
} |
|
|
|
static inline void |
|
svm_vmcb_cache_flush_all(struct vmcb *vmcb) |
|
{ |
|
vmcb->ctrl.vmcb_clean = 0; |
|
} |
|
|
#define SVM_EVENT_TYPE_HW_INT 0 |
#define SVM_EVENT_TYPE_HW_INT 0 |
#define SVM_EVENT_TYPE_NMI 2 |
#define SVM_EVENT_TYPE_NMI 2 |
#define SVM_EVENT_TYPE_EXC 3 |
#define SVM_EVENT_TYPE_EXC 3 |
Line 555 svm_event_waitexit_enable(struct nvmm_cp |
|
Line 609 svm_event_waitexit_enable(struct nvmm_cp |
|
} else { |
} else { |
vmcb->ctrl.intercept_misc1 |= VMCB_CTRL_INTERCEPT_VINTR; |
vmcb->ctrl.intercept_misc1 |= VMCB_CTRL_INTERCEPT_VINTR; |
vmcb->ctrl.v |= (VMCB_CTRL_V_IRQ | VMCB_CTRL_V_IGN_TPR); |
vmcb->ctrl.v |= (VMCB_CTRL_V_IRQ | VMCB_CTRL_V_IGN_TPR); |
|
svm_vmcb_cache_flush(vmcb, VMCB_CTRL_VMCB_CLEAN_TPR); |
cpudata->int_window_exit = true; |
cpudata->int_window_exit = true; |
} |
} |
|
|
|
svm_vmcb_cache_flush(vmcb, VMCB_CTRL_VMCB_CLEAN_I); |
} |
} |
|
|
static void |
static void |
Line 571 svm_event_waitexit_disable(struct nvmm_c |
|
Line 628 svm_event_waitexit_disable(struct nvmm_c |
|
} else { |
} else { |
vmcb->ctrl.intercept_misc1 &= ~VMCB_CTRL_INTERCEPT_VINTR; |
vmcb->ctrl.intercept_misc1 &= ~VMCB_CTRL_INTERCEPT_VINTR; |
vmcb->ctrl.v &= ~(VMCB_CTRL_V_IRQ | VMCB_CTRL_V_IGN_TPR); |
vmcb->ctrl.v &= ~(VMCB_CTRL_V_IRQ | VMCB_CTRL_V_IGN_TPR); |
|
svm_vmcb_cache_flush(vmcb, VMCB_CTRL_VMCB_CLEAN_TPR); |
cpudata->int_window_exit = false; |
cpudata->int_window_exit = false; |
} |
} |
|
|
|
svm_vmcb_cache_flush(vmcb, VMCB_CTRL_VMCB_CLEAN_I); |
} |
} |
|
|
static inline int |
static inline int |
|
|
} |
} |
|
|
static void |
static void |
svm_vmcb_cache_default(struct vmcb *vmcb) |
|
{ |
|
vmcb->ctrl.vmcb_clean = |
|
VMCB_CTRL_VMCB_CLEAN_I | |
|
VMCB_CTRL_VMCB_CLEAN_IOPM | |
|
VMCB_CTRL_VMCB_CLEAN_ASID | |
|
VMCB_CTRL_VMCB_CLEAN_LBR | |
|
VMCB_CTRL_VMCB_CLEAN_AVIC; |
|
} |
|
|
|
static void |
|
svm_vmcb_cache_flush(struct vmcb *vmcb) |
|
{ |
|
vmcb->ctrl.vmcb_clean = 0; |
|
} |
|
|
|
static void |
|
svm_vcpu_guest_fpu_enter(struct nvmm_cpu *vcpu) |
svm_vcpu_guest_fpu_enter(struct nvmm_cpu *vcpu) |
{ |
{ |
struct svm_cpudata *cpudata = vcpu->cpudata; |
struct svm_cpudata *cpudata = vcpu->cpudata; |
Line 1164 svm_vcpu_run(struct nvmm_machine *mach, |
|
Line 1207 svm_vcpu_run(struct nvmm_machine *mach, |
|
if (vcpu->hcpu_last != hcpu) { |
if (vcpu->hcpu_last != hcpu) { |
vmcb->ctrl.tsc_offset = cpudata->tsc_offset + |
vmcb->ctrl.tsc_offset = cpudata->tsc_offset + |
curcpu()->ci_data.cpu_cc_skew; |
curcpu()->ci_data.cpu_cc_skew; |
svm_vmcb_cache_flush(vmcb); |
svm_vmcb_cache_flush_all(vmcb); |
} |
} |
|
|
svm_vcpu_guest_dbregs_enter(vcpu); |
svm_vcpu_guest_dbregs_enter(vcpu); |
Line 1821 svm_vcpu_setstate(struct nvmm_cpu *vcpu, |
|
Line 1864 svm_vcpu_setstate(struct nvmm_cpu *vcpu, |
|
fpustate->fx_mxcsr_mask &= x86_fpu_mxcsr_mask; |
fpustate->fx_mxcsr_mask &= x86_fpu_mxcsr_mask; |
fpustate->fx_mxcsr &= fpustate->fx_mxcsr_mask; |
fpustate->fx_mxcsr &= fpustate->fx_mxcsr_mask; |
} |
} |
|
|
|
svm_vmcb_cache_update(vmcb, flags); |
} |
} |
|
|
static void |
static void |