version 1.111, 2014/05/12 11:56:02 |
version 1.111.4.4, 2015/12/27 12:09:45 |
Line 138 struct cpu_softc { |
|
Line 138 struct cpu_softc { |
|
}; |
}; |
|
|
#ifdef MULTIPROCESSOR |
#ifdef MULTIPROCESSOR |
int mp_cpu_start(struct cpu_info *, paddr_t); |
int mp_cpu_start(struct cpu_info *, paddr_t); |
void mp_cpu_start_cleanup(struct cpu_info *); |
void mp_cpu_start_cleanup(struct cpu_info *); |
const struct cpu_functions mp_cpu_funcs = { mp_cpu_start, NULL, |
const struct cpu_functions mp_cpu_funcs = { mp_cpu_start, NULL, |
mp_cpu_start_cleanup }; |
mp_cpu_start_cleanup }; |
Line 177 static void tss_init(struct i386tss *, v |
|
Line 177 static void tss_init(struct i386tss *, v |
|
|
|
static void cpu_init_idle_lwp(struct cpu_info *); |
static void cpu_init_idle_lwp(struct cpu_info *); |
|
|
uint32_t cpu_feature[5]; /* X86 CPUID feature bits |
uint32_t cpu_feature[7]; /* X86 CPUID feature bits */ |
* [0] basic features %edx |
/* [0] basic features cpuid.1:%edx |
* [1] basic features %ecx |
* [1] basic features cpuid.1:%ecx (CPUID2_xxx bits) |
* [2] extended features %edx |
* [2] extended features cpuid:80000001:%edx |
* [3] extended features %ecx |
* [3] extended features cpuid:80000001:%ecx |
* [4] VIA padlock features |
* [4] VIA padlock features |
*/ |
* [5] structured extended features cpuid.7:%ebx |
|
* [6] structured extended features cpuid.7:%ecx |
|
*/ |
|
|
extern char x86_64_doubleflt_stack[]; |
extern char x86_64_doubleflt_stack[]; |
|
|
Line 357 cpu_attach(device_t parent, device_t sel |
|
Line 359 cpu_attach(device_t parent, device_t sel |
|
ci->ci_acpiid = caa->cpu_id; |
ci->ci_acpiid = caa->cpu_id; |
ci->ci_cpuid = caa->cpu_number; |
ci->ci_cpuid = caa->cpu_number; |
ci->ci_func = caa->cpu_func; |
ci->ci_func = caa->cpu_func; |
|
aprint_normal("\n"); |
|
|
/* Must be before mi_cpu_attach(). */ |
/* Must be before mi_cpu_attach(). */ |
cpu_vm_init(ci); |
cpu_vm_init(ci); |
Line 366 cpu_attach(device_t parent, device_t sel |
|
Line 369 cpu_attach(device_t parent, device_t sel |
|
|
|
error = mi_cpu_attach(ci); |
error = mi_cpu_attach(ci); |
if (error != 0) { |
if (error != 0) { |
aprint_normal("\n"); |
|
aprint_error_dev(self, |
aprint_error_dev(self, |
"mi_cpu_attach failed with %d\n", error); |
"mi_cpu_attach failed with %d\n", error); |
return; |
return; |
Line 446 cpu_attach(device_t parent, device_t sel |
|
Line 448 cpu_attach(device_t parent, device_t sel |
|
#endif |
#endif |
|
|
default: |
default: |
aprint_normal("\n"); |
|
panic("unknown processor type??\n"); |
panic("unknown processor type??\n"); |
} |
} |
|
|
Line 552 cpu_childdetached(device_t self, device_ |
|
Line 553 cpu_childdetached(device_t self, device_ |
|
void |
void |
cpu_init(struct cpu_info *ci) |
cpu_init(struct cpu_info *ci) |
{ |
{ |
uint32_t cr4; |
uint32_t cr4 = 0; |
|
|
lcr0(rcr0() | CR0_WP); |
lcr0(rcr0() | CR0_WP); |
|
|
cr4 = rcr4(); |
|
/* |
/* |
* On a P6 or above, enable global TLB caching if the |
* On a P6 or above, enable global TLB caching if the |
* hardware supports it. |
* hardware supports it. |
Line 581 cpu_init(struct cpu_info *ci) |
|
Line 581 cpu_init(struct cpu_info *ci) |
|
if (cpu_feature[1] & CPUID2_XSAVE) |
if (cpu_feature[1] & CPUID2_XSAVE) |
cr4 |= CR4_OSXSAVE; |
cr4 |= CR4_OSXSAVE; |
|
|
lcr4(cr4); |
/* If SMEP is supported, enable it */ |
|
if (cpu_feature[5] & CPUID_SEF_SMEP) |
|
cr4 |= CR4_SMEP; |
|
|
|
if (cr4) { |
|
cr4 |= rcr4(); |
|
lcr4(cr4); |
|
} |
|
|
/* If xsave is enabled, enable all fpu features */ |
/* If xsave is enabled, enable all fpu features */ |
if (cr4 & CR4_OSXSAVE) |
if (cr4 & CR4_OSXSAVE) |
Line 782 cpu_boot_secondary(struct cpu_info *ci) |
|
Line 789 cpu_boot_secondary(struct cpu_info *ci) |
|
} |
} |
|
|
/* |
/* |
* The CPU ends up here when its ready to run |
* The CPU ends up here when it's ready to run. |
* This is called from code in mptramp.s; at this point, we are running |
* This is called from code in mptramp.s; at this point, we are running |
* in the idle pcb/idle stack of the new CPU. When this function returns, |
* in the idle pcb/idle stack of the new CPU. When this function returns, |
* this processor will enter the idle loop and start looking for work. |
* this processor will enter the idle loop and start looking for work. |
Line 798 cpu_hatch(void *v) |
|
Line 805 cpu_hatch(void *v) |
|
cpu_probe(ci); |
cpu_probe(ci); |
|
|
ci->ci_data.cpu_cc_freq = cpu_info_primary.ci_data.cpu_cc_freq; |
ci->ci_data.cpu_cc_freq = cpu_info_primary.ci_data.cpu_cc_freq; |
/* cpu_get_tsc_freq(ci); */ |
/* cpu_get_tsc_freq(ci); */ |
|
|
KDASSERT((ci->ci_flags & CPUF_PRESENT) == 0); |
KDASSERT((ci->ci_flags & CPUF_PRESENT) == 0); |
|
|
Line 814 cpu_hatch(void *v) |
|
Line 821 cpu_hatch(void *v) |
|
/* |
/* |
* Wait to be brought online. Use 'monitor/mwait' if available, |
* Wait to be brought online. Use 'monitor/mwait' if available, |
* in order to make the TSC drift as much as possible. so that |
* in order to make the TSC drift as much as possible. so that |
* we can detect it later. If not available, try 'pause'. |
* we can detect it later. If not available, try 'pause'. |
* We'd like to use 'hlt', but we have interrupts off. |
* We'd like to use 'hlt', but we have interrupts off. |
*/ |
*/ |
while ((ci->ci_flags & CPUF_GO) == 0) { |
while ((ci->ci_flags & CPUF_GO) == 0) { |
Line 921 cpu_copy_trampoline(void) |
|
Line 928 cpu_copy_trampoline(void) |
|
*/ |
*/ |
extern u_char cpu_spinup_trampoline[]; |
extern u_char cpu_spinup_trampoline[]; |
extern u_char cpu_spinup_trampoline_end[]; |
extern u_char cpu_spinup_trampoline_end[]; |
|
|
vaddr_t mp_trampoline_vaddr; |
vaddr_t mp_trampoline_vaddr; |
|
|
mp_trampoline_vaddr = uvm_km_alloc(kernel_map, PAGE_SIZE, 0, |
mp_trampoline_vaddr = uvm_km_alloc(kernel_map, PAGE_SIZE, 0, |
Line 1276 cpu_load_pmap(struct pmap *pmap, struct |
|
Line 1283 cpu_load_pmap(struct pmap *pmap, struct |
|
{ |
{ |
#ifdef PAE |
#ifdef PAE |
struct cpu_info *ci = curcpu(); |
struct cpu_info *ci = curcpu(); |
|
bool interrupts_enabled; |
pd_entry_t *l3_pd = ci->ci_pae_l3_pdir; |
pd_entry_t *l3_pd = ci->ci_pae_l3_pdir; |
int i; |
int i; |
|
|
Line 1284 cpu_load_pmap(struct pmap *pmap, struct |
|
Line 1292 cpu_load_pmap(struct pmap *pmap, struct |
|
* while this doesn't block NMIs, it's probably ok as NMIs unlikely |
* while this doesn't block NMIs, it's probably ok as NMIs unlikely |
* reload cr3. |
* reload cr3. |
*/ |
*/ |
x86_disable_intr(); |
interrupts_enabled = (x86_read_flags() & PSL_I) != 0; |
|
if (interrupts_enabled) |
|
x86_disable_intr(); |
|
|
for (i = 0 ; i < PDP_SIZE; i++) { |
for (i = 0 ; i < PDP_SIZE; i++) { |
l3_pd[i] = pmap->pm_pdirpa[i] | PG_V; |
l3_pd[i] = pmap->pm_pdirpa[i] | PG_V; |
} |
} |
x86_enable_intr(); |
|
|
if (interrupts_enabled) |
|
x86_enable_intr(); |
tlbflush(); |
tlbflush(); |
#else /* PAE */ |
#else /* PAE */ |
lcr3(pmap_pdirpa(pmap, 0)); |
lcr3(pmap_pdirpa(pmap, 0)); |