version 1.1.4.3, 2011/05/31 03:04:09 |
version 1.2, 2011/02/20 07:45:47 |
|
|
/* $NetBSD$ */ |
|
|
|
/*- |
/*- |
* Copyright (c) 2010 The NetBSD Foundation, Inc. |
* Copyright (c) 2010 The NetBSD Foundation, Inc. |
* All rights reserved. |
* All rights reserved. |
|
|
*/ |
*/ |
|
|
#include <sys/cdefs.h> |
#include <sys/cdefs.h> |
__KERNEL_RCSID(0, "$NetBSD$"); |
|
|
|
#include "opt_ddb.h" |
#include "opt_ddb.h" |
#include "opt_multiprocessor.h" |
#include "opt_multiprocessor.h" |
#include "opt_sa.h" |
#include "opt_sa.h" |
|
|
|
__KERNEL_RCSID(0, "$NetBSD$"); |
|
|
#include <sys/param.h> |
#include <sys/param.h> |
#include <sys/cpu.h> |
#include <sys/cpu.h> |
#include <sys/intr.h> |
#include <sys/intr.h> |
Line 101 volatile __cpuset_t cpus_halted = 0; |
|
Line 100 volatile __cpuset_t cpus_halted = 0; |
|
static int cpu_ipi_wait(volatile __cpuset_t *, u_long); |
static int cpu_ipi_wait(volatile __cpuset_t *, u_long); |
static void cpu_ipi_error(const char *, __cpuset_t, __cpuset_t); |
static void cpu_ipi_error(const char *, __cpuset_t, __cpuset_t); |
|
|
|
|
static struct cpu_info *cpu_info_last = &cpu_info_store; |
static struct cpu_info *cpu_info_last = &cpu_info_store; |
|
|
struct cpu_info * |
struct cpu_info * |
cpu_info_alloc(struct pmap_tlb_info *ti, cpuid_t cpu_id, cpuid_t cpu_package_id, |
cpu_info_alloc(struct pmap_tlb_info *ti, cpuid_t cpu_id, cpuid_t cpu_node_id, |
cpuid_t cpu_core_id, cpuid_t cpu_smt_id) |
cpuid_t cpu_core_id, cpuid_t cpu_smt_id) |
{ |
{ |
vaddr_t cpu_info_offset = (vaddr_t)&cpu_info_store & PAGE_MASK; |
vaddr_t cpu_info_offset = (vaddr_t)&cpu_info_store & PAGE_MASK; |
Line 142 cpu_info_alloc(struct pmap_tlb_info *ti, |
|
Line 142 cpu_info_alloc(struct pmap_tlb_info *ti, |
|
} |
} |
|
|
ci->ci_cpuid = cpu_id; |
ci->ci_cpuid = cpu_id; |
ci->ci_data.cpu_package_id = cpu_package_id; |
ci->ci_data.cpu_package_id = cpu_node_id; |
ci->ci_data.cpu_core_id = cpu_core_id; |
ci->ci_data.cpu_core_id = cpu_core_id; |
ci->ci_data.cpu_smt_id = cpu_smt_id; |
ci->ci_data.cpu_smt_id = cpu_smt_id; |
ci->ci_cpu_freq = cpu_info_store.ci_cpu_freq; |
ci->ci_cpu_freq = cpu_info_store.ci_cpu_freq; |
Line 150 cpu_info_alloc(struct pmap_tlb_info *ti, |
|
Line 150 cpu_info_alloc(struct pmap_tlb_info *ti, |
|
ci->ci_cycles_per_hz = cpu_info_store.ci_cycles_per_hz; |
ci->ci_cycles_per_hz = cpu_info_store.ci_cycles_per_hz; |
ci->ci_divisor_delay = cpu_info_store.ci_divisor_delay; |
ci->ci_divisor_delay = cpu_info_store.ci_divisor_delay; |
ci->ci_divisor_recip = cpu_info_store.ci_divisor_recip; |
ci->ci_divisor_recip = cpu_info_store.ci_divisor_recip; |
ci->ci_cpuwatch_count = cpu_info_store.ci_cpuwatch_count; |
|
|
|
/* |
/* |
* Attach its TLB info (which must be direct-mapped) |
* Attach its TLB info (which must be direct-mapped) |
Line 185 cpu_info_alloc(struct pmap_tlb_info *ti, |
|
Line 184 cpu_info_alloc(struct pmap_tlb_info *ti, |
|
} |
} |
#endif /* MULTIPROCESSOR */ |
#endif /* MULTIPROCESSOR */ |
|
|
static void |
|
cpu_hwrena_setup(void) |
|
{ |
|
#if (MIPS32R2 + MIPS64R2) > 0 |
|
const int cp0flags = mips_options.mips_cpu->cpu_cp0flags; |
|
if ((cp0flags & MIPS_CP0FL_USE) == 0) |
|
return; |
|
|
|
if (cp0flags & MIPS_CP0FL_HWRENA) { |
|
mipsNN_cp0_hwrena_write( |
|
MIPS_HWRENA_UL |
|
|MIPS_HWRENA_CCRES |
|
|MIPS_HWRENA_CC |
|
|MIPS_HWRENA_SYNCI_STEP |
|
|MIPS_HWRENA_CPUNUM); |
|
if (cp0flags & MIPS_CP0FL_USERLOCAL) { |
|
mipsNN_cp0_userlocal_write(curlwp->l_private); |
|
} |
|
} |
|
#endif |
|
} |
|
|
|
void |
void |
cpu_attach_common(device_t self, struct cpu_info *ci) |
cpu_attach_common(device_t self, struct cpu_info *ci) |
{ |
{ |
const char * const xname = device_xname(self); |
|
|
|
/* |
/* |
* Cross link cpu_info and its device together |
* Cross link cpu_info and its device together |
*/ |
*/ |
Line 220 cpu_attach_common(device_t self, struct |
|
Line 195 cpu_attach_common(device_t self, struct |
|
KASSERT(ci->ci_idepth == 0); |
KASSERT(ci->ci_idepth == 0); |
|
|
evcnt_attach_dynamic(&ci->ci_ev_count_compare, |
evcnt_attach_dynamic(&ci->ci_ev_count_compare, |
EVCNT_TYPE_INTR, NULL, xname, |
EVCNT_TYPE_INTR, NULL, device_xname(self), |
"int 5 (clock)"); |
"int 5 (clock)"); |
evcnt_attach_dynamic(&ci->ci_ev_count_compare_missed, |
evcnt_attach_dynamic(&ci->ci_ev_count_compare_missed, |
EVCNT_TYPE_INTR, NULL, xname, |
EVCNT_TYPE_INTR, NULL, device_xname(self), |
"int 5 (clock) missed"); |
"int 5 (clock) missed"); |
evcnt_attach_dynamic(&ci->ci_ev_fpu_loads, |
evcnt_attach_dynamic(&ci->ci_ev_fpu_loads, |
EVCNT_TYPE_MISC, NULL, xname, |
EVCNT_TYPE_MISC, NULL, device_xname(self), |
"fpu loads"); |
"fpu loads"); |
evcnt_attach_dynamic(&ci->ci_ev_fpu_saves, |
evcnt_attach_dynamic(&ci->ci_ev_fpu_saves, |
EVCNT_TYPE_MISC, NULL, xname, |
EVCNT_TYPE_MISC, NULL, device_xname(self), |
"fpu saves"); |
"fpu saves"); |
evcnt_attach_dynamic(&ci->ci_ev_tlbmisses, |
evcnt_attach_dynamic(&ci->ci_ev_tlbmisses, |
EVCNT_TYPE_TRAP, NULL, xname, |
EVCNT_TYPE_TRAP, NULL, device_xname(self), |
"tlb misses"); |
"tlb misses"); |
|
|
if (ci == &cpu_info_store) |
if (ci == &cpu_info_store) |
Line 249 cpu_attach_common(device_t self, struct |
|
Line 224 cpu_attach_common(device_t self, struct |
|
cpu_info_last = ci; |
cpu_info_last = ci; |
} |
} |
evcnt_attach_dynamic(&ci->ci_evcnt_synci_activate_rqst, |
evcnt_attach_dynamic(&ci->ci_evcnt_synci_activate_rqst, |
EVCNT_TYPE_MISC, NULL, xname, |
EVCNT_TYPE_MISC, NULL, device_xname(self), |
"syncicache activate request"); |
"syncicache activate request"); |
evcnt_attach_dynamic(&ci->ci_evcnt_synci_deferred_rqst, |
evcnt_attach_dynamic(&ci->ci_evcnt_synci_deferred_rqst, |
EVCNT_TYPE_MISC, NULL, xname, |
EVCNT_TYPE_MISC, NULL, device_xname(self), |
"syncicache deferred request"); |
"syncicache deferred request"); |
evcnt_attach_dynamic(&ci->ci_evcnt_synci_ipi_rqst, |
evcnt_attach_dynamic(&ci->ci_evcnt_synci_ipi_rqst, |
EVCNT_TYPE_MISC, NULL, xname, |
EVCNT_TYPE_MISC, NULL, device_xname(self), |
"syncicache ipi request"); |
"syncicache ipi request"); |
evcnt_attach_dynamic(&ci->ci_evcnt_synci_onproc_rqst, |
evcnt_attach_dynamic(&ci->ci_evcnt_synci_onproc_rqst, |
EVCNT_TYPE_MISC, NULL, xname, |
EVCNT_TYPE_MISC, NULL, device_xname(self), |
"syncicache onproc request"); |
"syncicache onproc request"); |
|
|
/* |
/* |
Line 276 cpu_startup_common(void) |
|
Line 251 cpu_startup_common(void) |
|
|
|
pmap_tlb_info_evcnt_attach(&pmap_tlb0_info); |
pmap_tlb_info_evcnt_attach(&pmap_tlb0_info); |
|
|
cpu_hwrena_setup(); |
|
|
|
/* |
/* |
* Good {morning,afternoon,evening,night}. |
* Good {morning,afternoon,evening,night}. |
*/ |
*/ |
Line 386 cpu_getmcontext(struct lwp *l, mcontext_ |
|
Line 359 cpu_getmcontext(struct lwp *l, mcontext_ |
|
gr[_REG_CAUSE] = tf->tf_regs[_R_CAUSE]; |
gr[_REG_CAUSE] = tf->tf_regs[_R_CAUSE]; |
gr[_REG_EPC] = tf->tf_regs[_R_PC]; |
gr[_REG_EPC] = tf->tf_regs[_R_PC]; |
gr[_REG_SR] = tf->tf_regs[_R_SR]; |
gr[_REG_SR] = tf->tf_regs[_R_SR]; |
mcp->_mc_tlsbase = (intptr_t)l->l_private; |
|
|
|
if ((ras_pc = (intptr_t)ras_lookup(l->l_proc, |
if ((ras_pc = (intptr_t)ras_lookup(l->l_proc, |
(void *) (intptr_t)gr[_REG_EPC])) != -1) |
(void *) (intptr_t)gr[_REG_EPC])) != -1) |
gr[_REG_EPC] = ras_pc; |
gr[_REG_EPC] = ras_pc; |
|
|
*flags |= _UC_CPU | _UC_TLSBASE; |
*flags |= _UC_CPU; |
|
|
/* Save floating point register context, if any. */ |
/* Save floating point register context, if any. */ |
KASSERT(l == curlwp); |
if (fpu_used_p(l)) { |
if (fpu_used_p()) { |
|
size_t fplen; |
size_t fplen; |
/* |
/* |
* If this process is the current FP owner, dump its |
* If this process is the current FP owner, dump its |
Line 454 cpu_setmcontext(struct lwp *l, const mco |
|
Line 425 cpu_setmcontext(struct lwp *l, const mco |
|
/* Do not restore SR. */ |
/* Do not restore SR. */ |
} |
} |
|
|
/* Restore the private thread context */ |
|
if (flags & _UC_TLSBASE) { |
|
lwp_setprivate(l, (void *)(intptr_t)mcp->_mc_tlsbase); |
|
} |
|
|
|
/* Restore floating point register context, if any. */ |
/* Restore floating point register context, if any. */ |
if (flags & _UC_FPU) { |
if (flags & _UC_FPU) { |
size_t fplen; |
size_t fplen; |
Line 701 cpu_ipi_error(const char *s, __cpuset_t |
|
Line 667 cpu_ipi_error(const char *s, __cpuset_t |
|
int index = CPUSET_NEXT(expected); |
int index = CPUSET_NEXT(expected); |
CPUSET_DEL(expected, index); |
CPUSET_DEL(expected, index); |
printf(" cpu%d", index); |
printf(" cpu%d", index); |
} while (!CPUSET_EMPTY_P(expected)); |
} while(!CPUSET_EMPTY_P(expected)); |
printf("\n"); |
printf("\n"); |
} |
} |
} |
} |
Line 775 cpu_pause(struct reg *regsp) |
|
Line 741 cpu_pause(struct reg *regsp) |
|
CPUSET_ADD(cpus_paused, index); |
CPUSET_ADD(cpus_paused, index); |
do { |
do { |
; |
; |
} while (CPUSET_HAS_P(cpus_paused, index)); |
} while(CPUSET_HAS_P(cpus_paused, index)); |
CPUSET_ADD(cpus_resumed, index); |
CPUSET_ADD(cpus_resumed, index); |
|
|
#if defined(DDB) |
#if defined(DDB) |
if (ddb_running_on_this_cpu_p()) |
if (ddb_running_on_this_cpu()) |
cpu_Debugger(); |
cpu_Debugger(); |
if (ddb_running_on_any_cpu_p()) |
if (ddb_running_on_any_cpu()) |
continue; |
continue; |
#endif |
#endif |
break; |
break; |
} |
} |
|
#if defined(DDB) && defined(MIPS_DDB_WATCH) |
|
db_mach_watch_set_all(); |
|
#endif |
|
|
splx(s); |
splx(s); |
} |
} |
Line 846 cpu_is_paused(int index) |
|
Line 815 cpu_is_paused(int index) |
|
return CPUSET_HAS_P(cpus_paused, index); |
return CPUSET_HAS_P(cpus_paused, index); |
} |
} |
|
|
#ifdef DDB |
|
void |
void |
cpu_debug_dump(void) |
cpu_debug_dump(void) |
{ |
{ |
Line 870 cpu_debug_dump(void) |
|
Line 838 cpu_debug_dump(void) |
|
ci->ci_active_ipis, ci->ci_request_ipis); |
ci->ci_active_ipis, ci->ci_request_ipis); |
} |
} |
} |
} |
#endif |
|
|
|
void |
void |
cpu_hatch(struct cpu_info *ci) |
cpu_hatch(struct cpu_info *ci) |
Line 886 cpu_hatch(struct cpu_info *ci) |
|
Line 853 cpu_hatch(struct cpu_info *ci) |
|
mips3_cp0_wired_write(ti->ti_wired); |
mips3_cp0_wired_write(ti->ti_wired); |
|
|
/* |
/* |
* Setup HWRENA and USERLOCAL COP0 registers (MIPSxxR2). |
|
*/ |
|
cpu_hwrena_setup(); |
|
|
|
/* |
|
* If we are using register zero relative addressing to access cpu_info |
* If we are using register zero relative addressing to access cpu_info |
* in the exception vectors, enter that mapping into TLB now. |
* in the exception vectors, enter that mapping into TLB now. |
*/ |
*/ |
Line 934 cpu_hatch(struct cpu_info *ci) |
|
Line 896 cpu_hatch(struct cpu_info *ci) |
|
ci->ci_data.cpu_cc_skew = 0; |
ci->ci_data.cpu_cc_skew = 0; |
|
|
/* |
/* |
* Let this CPU do its own post-running initialization |
|
* (for things that have to be done on the local CPU). |
|
*/ |
|
(*mips_locoresw.lsw_cpu_run)(ci); |
|
|
|
/* |
|
* Now turn on interrupts. |
* Now turn on interrupts. |
*/ |
*/ |
spl0(); |
spl0(); |
|
|
xc_send_ipi(struct cpu_info *ci) |
xc_send_ipi(struct cpu_info *ci) |
{ |
{ |
|
|
(*mips_locoresw.lsw_send_ipi)(ci, IPI_XCALL); |
(*mips_locoresw.lsw_send_ipi)(ci, IPI_NOP); |
} |
} |
#endif /* MULTIPROCESSOR */ |
#endif /* MULTIPROCESSOR */ |
|
|
Line 1002 cpu_vmspace_exec(lwp_t *l, vaddr_t start |
|
Line 958 cpu_vmspace_exec(lwp_t *l, vaddr_t start |
|
} |
} |
} |
} |
#endif |
#endif |
|
|
int |
|
cpu_lwp_setprivate(lwp_t *l, void *v) |
|
{ |
|
#if (MIPS32R2 + MIPS64R2) > 0 |
|
if (l == curlwp |
|
&& (mips_options.mips_cpu->cpu_cp0flags & MIPS_CP0FL_USERLOCAL)) { |
|
mipsNN_cp0_userlocal_write(v); |
|
} |
|
#endif |
|
return 0; |
|
} |
|
|
|
|
|
#if (MIPS32 + MIPS32R2 + MIPS64 + MIPS64R2) > 0 |
|
|
|
#if (CPUWATCH_MAX != 8) |
|
# error CPUWATCH_MAX |
|
#endif |
|
|
|
/* |
|
* cpuwatch_discover - determine how many COP0 watchpoints this CPU supports |
|
*/ |
|
u_int |
|
cpuwatch_discover(void) |
|
{ |
|
int i; |
|
|
|
for (i=0; i < CPUWATCH_MAX; i++) { |
|
uint32_t watchhi = mipsNN_cp0_watchhi_read(i); |
|
if ((watchhi & __BIT(31)) == 0) /* test 'M' bit */ |
|
break; |
|
} |
|
return i + 1; |
|
} |
|
|
|
void |
|
cpuwatch_free(cpu_watchpoint_t *cwp) |
|
{ |
|
#ifdef DIAGNOSTIC |
|
struct cpu_info * const ci = curcpu(); |
|
KASSERT(cwp >= &ci->ci_cpuwatch_tab[0] && |
|
cwp <= &ci->ci_cpuwatch_tab[ci->ci_cpuwatch_count-1]); |
|
#endif |
|
cwp->cw_mode = 0; |
|
cwp->cw_asid = 0; |
|
cwp->cw_addr = 0; |
|
cpuwatch_clr(cwp); |
|
} |
|
|
|
/* |
|
* cpuwatch_alloc |
|
* find an empty slot |
|
* no locking for the table since it is CPU private |
|
*/ |
|
cpu_watchpoint_t * |
|
cpuwatch_alloc(void) |
|
{ |
|
struct cpu_info * const ci = curcpu(); |
|
cpu_watchpoint_t *cwp; |
|
|
|
for (int i=0; i < ci->ci_cpuwatch_count; i++) { |
|
cwp = &ci->ci_cpuwatch_tab[i]; |
|
if ((cwp->cw_mode & CPUWATCH_RWX) == 0) |
|
return cwp; |
|
} |
|
return NULL; |
|
} |
|
|
|
|
|
void |
|
cpuwatch_set_all(void) |
|
{ |
|
struct cpu_info * const ci = curcpu(); |
|
cpu_watchpoint_t *cwp; |
|
int i; |
|
|
|
for (i=0; i < ci->ci_cpuwatch_count; i++) { |
|
cwp = &ci->ci_cpuwatch_tab[i]; |
|
if ((cwp->cw_mode & CPUWATCH_RWX) != 0) |
|
cpuwatch_set(cwp); |
|
} |
|
} |
|
|
|
void |
|
cpuwatch_clr_all(void) |
|
{ |
|
struct cpu_info * const ci = curcpu(); |
|
cpu_watchpoint_t *cwp; |
|
int i; |
|
|
|
for (i=0; i < ci->ci_cpuwatch_count; i++) { |
|
cwp = &ci->ci_cpuwatch_tab[i]; |
|
if ((cwp->cw_mode & CPUWATCH_RWX) != 0) |
|
cpuwatch_clr(cwp); |
|
} |
|
} |
|
|
|
/* |
|
* cpuwatch_set - establish a MIPS COP0 watchpoint |
|
*/ |
|
void |
|
cpuwatch_set(cpu_watchpoint_t *cwp) |
|
{ |
|
struct cpu_info * const ci = curcpu(); |
|
uint32_t watchhi; |
|
register_t watchlo; |
|
int cwnum = cwp - &ci->ci_cpuwatch_tab[0]; |
|
|
|
KASSERT(cwp >= &ci->ci_cpuwatch_tab[0] && |
|
cwp <= &ci->ci_cpuwatch_tab[ci->ci_cpuwatch_count-1]); |
|
|
|
watchlo = cwp->cw_addr; |
|
if (cwp->cw_mode & CPUWATCH_WRITE) |
|
watchlo |= __BIT(0); |
|
if (cwp->cw_mode & CPUWATCH_READ) |
|
watchlo |= __BIT(1); |
|
if (cwp->cw_mode & CPUWATCH_EXEC) |
|
watchlo |= __BIT(2); |
|
|
|
if (cwp->cw_mode & CPUWATCH_ASID) |
|
watchhi = cwp->cw_asid << 16; /* addr qualified by asid */ |
|
else |
|
watchhi = __BIT(30); /* addr not qual. by asid (Global) */ |
|
if (cwp->cw_mode & CPUWATCH_MASK) |
|
watchhi |= cwp->cw_mask; /* set "dont care" addr match bits */ |
|
|
|
mipsNN_cp0_watchhi_write(cwnum, watchhi); |
|
mipsNN_cp0_watchlo_write(cwnum, watchlo); |
|
} |
|
|
|
/* |
|
* cpuwatch_clr - disestablish a MIPS COP0 watchpoint |
|
*/ |
|
void |
|
cpuwatch_clr(cpu_watchpoint_t *cwp) |
|
{ |
|
struct cpu_info * const ci = curcpu(); |
|
int cwnum = cwp - &ci->ci_cpuwatch_tab[0]; |
|
|
|
KASSERT(cwp >= &ci->ci_cpuwatch_tab[0] && |
|
cwp <= &ci->ci_cpuwatch_tab[ci->ci_cpuwatch_count-1]); |
|
|
|
mipsNN_cp0_watchhi_write(cwnum, 0); |
|
mipsNN_cp0_watchlo_write(cwnum, 0); |
|
} |
|
|
|
#endif /* (MIPS32 + MIPS32R2 + MIPS64 + MIPS64R2) > 0 */ |
|