Annotation of src/sys/arch/mips/mips/cpu_subr.c, Revision 1.58
1.58 ! skrll 1: /* $NetBSD: cpu_subr.c,v 1.57 2020/08/09 06:26:49 skrll Exp $ */
1.3 rmind 2:
1.2 matt 3: /*-
1.36 ad 4: * Copyright (c) 2010, 2019 The NetBSD Foundation, Inc.
1.2 matt 5: * All rights reserved.
6: *
7: * This code is derived from software contributed to The NetBSD Foundation
8: * by Matt Thomas of 3am Software Foundry.
9: *
10: * Redistribution and use in source and binary forms, with or without
11: * modification, are permitted provided that the following conditions
12: * are met:
13: * 1. Redistributions of source code must retain the above copyright
14: * notice, this list of conditions and the following disclaimer.
15: * 2. Redistributions in binary form must reproduce the above copyright
16: * notice, this list of conditions and the following disclaimer in the
17: * documentation and/or other materials provided with the distribution.
18: *
19: * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20: * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21: * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22: * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23: * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24: * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25: * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26: * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27: * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28: * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29: * POSSIBILITY OF SUCH DAMAGE.
30: */
31:
32: #include <sys/cdefs.h>
1.58 ! skrll 33: __KERNEL_RCSID(0, "$NetBSD: cpu_subr.c,v 1.57 2020/08/09 06:26:49 skrll Exp $");
1.2 matt 34:
1.32 skrll 35: #include "opt_cputype.h"
1.27 matt 36: #include "opt_ddb.h"
37: #include "opt_modular.h"
1.2 matt 38: #include "opt_multiprocessor.h"
39:
40: #include <sys/param.h>
1.56 simonb 41: #include <sys/atomic.h>
42: #include <sys/bitops.h>
1.2 matt 43: #include <sys/cpu.h>
1.56 simonb 44: #include <sys/device.h>
45: #include <sys/idle.h>
1.2 matt 46: #include <sys/intr.h>
1.56 simonb 47: #include <sys/ipi.h>
48: #include <sys/kernel.h>
1.2 matt 49: #include <sys/lwp.h>
1.56 simonb 50: #include <sys/module.h>
1.2 matt 51: #include <sys/proc.h>
52: #include <sys/ras.h>
1.56 simonb 53: #include <sys/reboot.h>
1.2 matt 54: #include <sys/xcall.h>
55:
56: #include <uvm/uvm.h>
57:
58: #include <mips/locore.h>
59: #include <mips/regnum.h>
60: #include <mips/pcb.h>
61: #include <mips/cache.h>
62: #include <mips/frame.h>
63: #include <mips/userret.h>
64: #include <mips/pte.h>
65:
66: #if defined(DDB) || defined(KGDB)
1.29 skrll 67: #ifdef DDB
1.2 matt 68: #include <mips/db_machdep.h>
69: #include <ddb/db_command.h>
70: #include <ddb/db_output.h>
71: #endif
72: #endif
73:
1.20 matt 74: #ifdef MIPS64_OCTEON
1.49 jmcneill 75: #include <mips/cavium/octeonvar.h>
76: extern struct cpu_softc octeon_cpu_softc[];
1.20 matt 77: #endif
78:
1.2 matt 79: struct cpu_info cpu_info_store
1.20 matt 80: #if defined(MULTIPROCESSOR) && !defined(MIPS64_OCTEON)
1.2 matt 81: __section(".data1")
82: __aligned(1LU << ilog2((2*sizeof(struct cpu_info)-1)))
83: #endif
84: = {
85: .ci_curlwp = &lwp0,
86: .ci_tlb_info = &pmap_tlb0_info,
1.27 matt 87: .ci_pmap_kern_segtab = &pmap_kern_segtab,
88: .ci_pmap_user_segtab = NULL,
1.2 matt 89: #ifdef _LP64
1.27 matt 90: .ci_pmap_user_seg0tab = NULL,
1.2 matt 91: #endif
92: .ci_cpl = IPL_HIGH,
93: .ci_tlb_slot = -1,
94: #ifdef MULTIPROCESSOR
95: .ci_flags = CPUF_PRIMARY|CPUF_PRESENT|CPUF_RUNNING,
96: #endif
1.20 matt 97: #ifdef MIPS64_OCTEON
1.49 jmcneill 98: .ci_softc = &octeon_cpu_softc[0],
1.20 matt 99: #endif
1.2 matt 100: };
101:
1.14 matt 102: const pcu_ops_t * const pcu_ops_md_defs[PCU_UNIT_COUNT] = {
103: [PCU_FPU] = &mips_fpu_ops,
104: #if (MIPS32R2 + MIPS64R2) > 0
105: [PCU_DSP] = &mips_dsp_ops,
106: #endif
107: };
108:
1.2 matt 109: #ifdef MULTIPROCESSOR
1.20 matt 110: struct cpu_info * cpuid_infos[MAXCPUS] = {
111: [0] = &cpu_info_store,
112: };
1.2 matt 113:
1.24 matt 114: kcpuset_t *cpus_halted;
115: kcpuset_t *cpus_hatched;
116: kcpuset_t *cpus_paused;
117: kcpuset_t *cpus_resumed;
118: kcpuset_t *cpus_running;
1.2 matt 119:
1.24 matt 120: static void cpu_ipi_wait(const char *, const kcpuset_t *, const kcpuset_t *);
1.2 matt 121:
122: struct cpu_info *
1.6 matt 123: cpu_info_alloc(struct pmap_tlb_info *ti, cpuid_t cpu_id, cpuid_t cpu_package_id,
1.2 matt 124: cpuid_t cpu_core_id, cpuid_t cpu_smt_id)
125: {
1.50 simonb 126:
1.20 matt 127: KASSERT(cpu_id < MAXCPUS);
128:
1.21 matt 129: #ifdef MIPS64_OCTEON
1.52 skrll 130: const int exc_step = 1 << MIPS_EBASE_EXC_BASE_SHIFT;
1.51 simonb 131: vaddr_t exc_page = MIPS_UTLB_MISS_EXC_VEC + exc_step * cpu_id;
132: __CTASSERT(sizeof(struct cpu_info) + sizeof(struct pmap_tlb_info)
133: <= exc_step - 0x280);
1.29 skrll 134:
1.51 simonb 135: struct cpu_info * const ci = ((struct cpu_info *)(exc_page + exc_step)) - 1;
1.52 skrll 136: memset((void *)exc_page, 0, exc_step);
1.20 matt 137:
138: if (ti == NULL) {
139: ti = ((struct pmap_tlb_info *)ci) - 1;
140: pmap_tlb_info_init(ti);
141: }
142: #else
1.29 skrll 143: const vaddr_t cpu_info_offset = (vaddr_t)&cpu_info_store & PAGE_MASK;
1.21 matt 144: struct pglist pglist;
145: int error;
1.20 matt 146:
1.2 matt 147: /*
148: * Grab a page from the first 512MB (mappable by KSEG0) to use to store
149: * exception vectors and cpu_info for this cpu.
150: */
151: error = uvm_pglistalloc(PAGE_SIZE,
152: 0, MIPS_KSEG1_START - MIPS_KSEG0_START,
153: PAGE_SIZE, PAGE_SIZE, &pglist, 1, false);
154: if (error)
155: return NULL;
156:
157: const paddr_t pa = VM_PAGE_TO_PHYS(TAILQ_FIRST(&pglist));
158: const vaddr_t va = MIPS_PHYS_TO_KSEG0(pa);
159: struct cpu_info * const ci = (void *) (va + cpu_info_offset);
160: memset((void *)va, 0, PAGE_SIZE);
161:
162: /*
163: * If we weren't passed a pmap_tlb_info to use, the caller wants us
164: * to take care of that for him. Since we have room left over in the
165: * page we just allocated, just use a piece of that for it.
166: */
167: if (ti == NULL) {
168: if (cpu_info_offset >= sizeof(*ti)) {
169: ti = (void *) va;
170: } else {
171: KASSERT(PAGE_SIZE - cpu_info_offset + sizeof(*ci) >= sizeof(*ti));
172: ti = (struct pmap_tlb_info *)(va + PAGE_SIZE) - 1;
173: }
174: pmap_tlb_info_init(ti);
175: }
176:
1.20 matt 177: /*
178: * Attach its TLB info (which must be direct-mapped)
179: */
180: #ifdef _LP64
181: KASSERT(MIPS_KSEG0_P(ti) || MIPS_XKPHYS_P(ti));
182: #else
183: KASSERT(MIPS_KSEG0_P(ti));
184: #endif
185: #endif /* MIPS64_OCTEON */
186:
1.21 matt 187: KASSERT(cpu_id != 0);
1.2 matt 188: ci->ci_cpuid = cpu_id;
1.28 skrll 189: ci->ci_pmap_kern_segtab = &pmap_kern_segtab,
1.2 matt 190: ci->ci_cpu_freq = cpu_info_store.ci_cpu_freq;
191: ci->ci_cctr_freq = cpu_info_store.ci_cctr_freq;
1.29 skrll 192: ci->ci_cycles_per_hz = cpu_info_store.ci_cycles_per_hz;
193: ci->ci_divisor_delay = cpu_info_store.ci_divisor_delay;
194: ci->ci_divisor_recip = cpu_info_store.ci_divisor_recip;
1.11 matt 195: ci->ci_cpuwatch_count = cpu_info_store.ci_cpuwatch_count;
1.2 matt 196:
1.46 skrll 197: cpu_topology_set(ci, cpu_package_id, cpu_core_id, cpu_smt_id, 0);
1.39 ad 198:
1.29 skrll 199: pmap_md_alloc_ephemeral_address_space(ci);
1.2 matt 200:
201: mi_cpu_attach(ci);
202:
203: pmap_tlb_info_attach(ti, ci);
204:
205: return ci;
206: }
207: #endif /* MULTIPROCESSOR */
208:
1.5 matt 209: static void
210: cpu_hwrena_setup(void)
211: {
212: #if (MIPS32R2 + MIPS64R2) > 0
213: const int cp0flags = mips_options.mips_cpu->cpu_cp0flags;
1.50 simonb 214:
1.5 matt 215: if ((cp0flags & MIPS_CP0FL_USE) == 0)
216: return;
217:
1.48 simonb 218: if (CPUISMIPSNNR2) {
1.5 matt 219: mipsNN_cp0_hwrena_write(
1.48 simonb 220: (MIPS_HAS_USERLOCAL ? MIPS_HWRENA_UL : 0)
221: | MIPS_HWRENA_CCRES
222: | MIPS_HWRENA_CC
223: | MIPS_HWRENA_SYNCI_STEP
224: | MIPS_HWRENA_CPUNUM);
225: if (MIPS_HAS_USERLOCAL) {
1.5 matt 226: mipsNN_cp0_userlocal_write(curlwp->l_private);
227: }
228: }
229: #endif
230: }
231:
1.2 matt 232: void
233: cpu_attach_common(device_t self, struct cpu_info *ci)
234: {
1.6 matt 235: const char * const xname = device_xname(self);
236:
1.2 matt 237: /*
238: * Cross link cpu_info and its device together
239: */
240: ci->ci_dev = self;
241: self->dv_private = ci;
242: KASSERT(ci->ci_idepth == 0);
243:
244: evcnt_attach_dynamic(&ci->ci_ev_count_compare,
1.6 matt 245: EVCNT_TYPE_INTR, NULL, xname,
1.2 matt 246: "int 5 (clock)");
247: evcnt_attach_dynamic(&ci->ci_ev_count_compare_missed,
1.6 matt 248: EVCNT_TYPE_INTR, NULL, xname,
1.2 matt 249: "int 5 (clock) missed");
250: evcnt_attach_dynamic(&ci->ci_ev_fpu_loads,
1.6 matt 251: EVCNT_TYPE_MISC, NULL, xname,
1.2 matt 252: "fpu loads");
253: evcnt_attach_dynamic(&ci->ci_ev_fpu_saves,
1.6 matt 254: EVCNT_TYPE_MISC, NULL, xname,
1.2 matt 255: "fpu saves");
1.14 matt 256: evcnt_attach_dynamic(&ci->ci_ev_dsp_loads,
257: EVCNT_TYPE_MISC, NULL, xname,
258: "dsp loads");
259: evcnt_attach_dynamic(&ci->ci_ev_dsp_saves,
260: EVCNT_TYPE_MISC, NULL, xname,
261: "dsp saves");
1.2 matt 262: evcnt_attach_dynamic(&ci->ci_ev_tlbmisses,
1.6 matt 263: EVCNT_TYPE_TRAP, NULL, xname,
1.2 matt 264: "tlb misses");
265:
266: #ifdef MULTIPROCESSOR
267: if (ci != &cpu_info_store) {
268: /*
269: * Tail insert this onto the list of cpu_info's.
270: */
1.20 matt 271: KASSERT(cpuid_infos[ci->ci_cpuid] == NULL);
272: cpuid_infos[ci->ci_cpuid] = ci;
273: membar_producer();
1.2 matt 274: }
1.21 matt 275: KASSERT(cpuid_infos[ci->ci_cpuid] != NULL);
1.2 matt 276: evcnt_attach_dynamic(&ci->ci_evcnt_synci_activate_rqst,
1.6 matt 277: EVCNT_TYPE_MISC, NULL, xname,
1.2 matt 278: "syncicache activate request");
279: evcnt_attach_dynamic(&ci->ci_evcnt_synci_deferred_rqst,
1.6 matt 280: EVCNT_TYPE_MISC, NULL, xname,
1.2 matt 281: "syncicache deferred request");
282: evcnt_attach_dynamic(&ci->ci_evcnt_synci_ipi_rqst,
1.6 matt 283: EVCNT_TYPE_MISC, NULL, xname,
1.2 matt 284: "syncicache ipi request");
285: evcnt_attach_dynamic(&ci->ci_evcnt_synci_onproc_rqst,
1.6 matt 286: EVCNT_TYPE_MISC, NULL, xname,
1.2 matt 287: "syncicache onproc request");
288:
289: /*
290: * Initialize IPI framework for this cpu instance
291: */
292: ipi_init(ci);
1.30 skrll 293:
1.57 skrll 294: kcpuset_create(&ci->ci_shootdowncpus, true);
1.30 skrll 295: kcpuset_create(&ci->ci_multicastcpus, true);
296: kcpuset_create(&ci->ci_watchcpus, true);
297: kcpuset_create(&ci->ci_ddbcpus, true);
1.2 matt 298: #endif
299: }
300:
301: void
302: cpu_startup_common(void)
303: {
304: vaddr_t minaddr, maxaddr;
305: char pbuf[9]; /* "99999 MB" */
306:
307: pmap_tlb_info_evcnt_attach(&pmap_tlb0_info);
308:
1.24 matt 309: #ifdef MULTIPROCESSOR
310: kcpuset_create(&cpus_halted, true);
311: KASSERT(cpus_halted != NULL);
312: kcpuset_create(&cpus_hatched, true);
313: KASSERT(cpus_hatched != NULL);
314: kcpuset_create(&cpus_paused, true);
315: KASSERT(cpus_paused != NULL);
316: kcpuset_create(&cpus_resumed, true);
317: KASSERT(cpus_resumed != NULL);
318: kcpuset_create(&cpus_running, true);
319: KASSERT(cpus_running != NULL);
320: kcpuset_set(cpus_hatched, cpu_number());
321: kcpuset_set(cpus_running, cpu_number());
322: #endif
323:
1.5 matt 324: cpu_hwrena_setup();
325:
1.2 matt 326: /*
327: * Good {morning,afternoon,evening,night}.
328: */
329: printf("%s%s", copyright, version);
1.18 matt 330: printf("%s\n", cpu_getmodel());
1.2 matt 331: format_bytes(pbuf, sizeof(pbuf), ctob(physmem));
332: printf("total memory = %s\n", pbuf);
333:
334: minaddr = 0;
335: /*
336: * Allocate a submap for physio.
337: */
338: phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
339: VM_PHYS_SIZE, 0, FALSE, NULL);
340:
341: /*
342: * (No need to allocate an mbuf cluster submap. Mbuf clusters
343: * are allocated via the pool allocator, and we use KSEG/XKPHYS to
344: * map those pages.)
345: */
346:
1.47 ad 347: format_bytes(pbuf, sizeof(pbuf), ptoa(uvm_availmem(false)));
1.2 matt 348: printf("avail memory = %s\n", pbuf);
1.27 matt 349:
350: #if defined(__mips_n32)
351: module_machine = "mips-n32";
352: #endif
1.2 matt 353: }
354:
355: void
356: cpu_getmcontext(struct lwp *l, mcontext_t *mcp, unsigned int *flags)
357: {
358: const struct trapframe *tf = l->l_md.md_utf;
359: __greg_t *gr = mcp->__gregs;
360: __greg_t ras_pc;
361:
362: /* Save register context. Dont copy R0 - it is always 0 */
363: memcpy(&gr[_REG_AT], &tf->tf_regs[_R_AST], sizeof(mips_reg_t) * 31);
364:
365: gr[_REG_MDLO] = tf->tf_regs[_R_MULLO];
366: gr[_REG_MDHI] = tf->tf_regs[_R_MULHI];
367: gr[_REG_CAUSE] = tf->tf_regs[_R_CAUSE];
368: gr[_REG_EPC] = tf->tf_regs[_R_PC];
369: gr[_REG_SR] = tf->tf_regs[_R_SR];
1.6 matt 370: mcp->_mc_tlsbase = (intptr_t)l->l_private;
1.2 matt 371:
372: if ((ras_pc = (intptr_t)ras_lookup(l->l_proc,
373: (void *) (intptr_t)gr[_REG_EPC])) != -1)
374: gr[_REG_EPC] = ras_pc;
375:
1.6 matt 376: *flags |= _UC_CPU | _UC_TLSBASE;
1.4 joerg 377:
1.2 matt 378: /* Save floating point register context, if any. */
1.13 rmind 379: KASSERT(l == curlwp);
1.31 chs 380: if (fpu_used_p(l)) {
1.2 matt 381: size_t fplen;
382: /*
383: * If this process is the current FP owner, dump its
384: * context to the PCB first.
385: */
1.31 chs 386: fpu_save(l);
1.2 matt 387:
388: /*
389: * The PCB FP regs struct includes the FP CSR, so use the
390: * size of __fpregs.__fp_r when copying.
391: */
392: #if !defined(__mips_o32)
393: if (_MIPS_SIM_NEWABI_P(l->l_proc->p_md.md_abi)) {
394: #endif
395: fplen = sizeof(struct fpreg);
396: #if !defined(__mips_o32)
397: } else {
398: fplen = sizeof(struct fpreg_oabi);
399: }
400: #endif
401: struct pcb * const pcb = lwp_getpcb(l);
402: memcpy(&mcp->__fpregs, &pcb->pcb_fpregs, fplen);
403: *flags |= _UC_FPU;
404: }
405: }
406:
407: int
1.16 martin 408: cpu_mcontext_validate(struct lwp *l, const mcontext_t *mcp)
409: {
1.50 simonb 410:
1.16 martin 411: /* XXX: Do we validate the addresses?? */
412: return 0;
413: }
414:
415: int
1.2 matt 416: cpu_setmcontext(struct lwp *l, const mcontext_t *mcp, unsigned int flags)
417: {
418: struct trapframe *tf = l->l_md.md_utf;
419: struct proc *p = l->l_proc;
420: const __greg_t *gr = mcp->__gregs;
1.16 martin 421: int error;
1.2 matt 422:
423: /* Restore register context, if any. */
424: if (flags & _UC_CPU) {
1.16 martin 425: error = cpu_mcontext_validate(l, mcp);
426: if (error)
427: return error;
428:
1.2 matt 429: /* Save register context. */
1.16 martin 430:
1.2 matt 431: #ifdef __mips_n32
432: CTASSERT(_R_AST == _REG_AT);
433: if (__predict_false(p->p_md.md_abi == _MIPS_BSD_API_O32)) {
434: const mcontext_o32_t *mcp32 = (const mcontext_o32_t *)mcp;
435: const __greg32_t *gr32 = mcp32->__gregs;
436: for (size_t i = _R_AST; i < 32; i++) {
437: tf->tf_regs[i] = gr32[i];
438: }
439: } else
440: #endif
441: memcpy(&tf->tf_regs[_R_AST], &gr[_REG_AT],
442: sizeof(mips_reg_t) * 31);
443:
444: tf->tf_regs[_R_MULLO] = gr[_REG_MDLO];
445: tf->tf_regs[_R_MULHI] = gr[_REG_MDHI];
446: tf->tf_regs[_R_CAUSE] = gr[_REG_CAUSE];
447: tf->tf_regs[_R_PC] = gr[_REG_EPC];
448: /* Do not restore SR. */
449: }
450:
1.6 matt 451: /* Restore the private thread context */
452: if (flags & _UC_TLSBASE) {
453: lwp_setprivate(l, (void *)(intptr_t)mcp->_mc_tlsbase);
454: }
455:
1.2 matt 456: /* Restore floating point register context, if any. */
457: if (flags & _UC_FPU) {
458: size_t fplen;
459:
460: /* Disable the FPU contents. */
1.31 chs 461: fpu_discard(l);
1.2 matt 462:
463: #if !defined(__mips_o32)
464: if (_MIPS_SIM_NEWABI_P(l->l_proc->p_md.md_abi)) {
465: #endif
466: fplen = sizeof(struct fpreg);
467: #if !defined(__mips_o32)
468: } else {
469: fplen = sizeof(struct fpreg_oabi);
470: }
471: #endif
472: /*
473: * The PCB FP regs struct includes the FP CSR, so use the
474: * proper size of fpreg when copying.
475: */
476: struct pcb * const pcb = lwp_getpcb(l);
477: memcpy(&pcb->pcb_fpregs, &mcp->__fpregs, fplen);
478: }
479:
480: mutex_enter(p->p_lock);
481: if (flags & _UC_SETSTACK)
482: l->l_sigstk.ss_flags |= SS_ONSTACK;
483: if (flags & _UC_CLRSTACK)
484: l->l_sigstk.ss_flags &= ~SS_ONSTACK;
485: mutex_exit(p->p_lock);
486:
487: return (0);
488: }
489:
490: void
1.36 ad 491: cpu_need_resched(struct cpu_info *ci, struct lwp *l, int flags)
1.2 matt 492: {
1.50 simonb 493:
1.2 matt 494: KASSERT(kpreempt_disabled());
495:
1.36 ad 496: if ((flags & RESCHED_KPREEMPT) != 0) {
1.2 matt 497: #ifdef __HAVE_PREEMPTION
1.36 ad 498: if ((flags & RESCHED_REMOTE) != 0) {
499: cpu_send_ipi(ci, IPI_KPREEMPT);
500: } else {
1.2 matt 501: softint_trigger(SOFTINT_KPREEMPT);
1.29 skrll 502: }
1.2 matt 503: #endif
504: return;
505: }
1.37 ad 506: if ((flags & RESCHED_REMOTE) != 0) {
1.2 matt 507: #ifdef MULTIPROCESSOR
508: cpu_send_ipi(ci, IPI_AST);
1.36 ad 509: #endif
510: } else {
511: l->l_md.md_astpending = 1; /* force call to ast() */
1.29 skrll 512: }
1.2 matt 513: }
514:
1.27 matt 515: uint32_t
516: cpu_clkf_usermode_mask(void)
517: {
1.50 simonb 518:
1.27 matt 519: return CPUISMIPS3 ? MIPS_SR_KSU_USER : MIPS_SR_KU_PREV;
520: }
521:
1.2 matt 522: void
523: cpu_signotify(struct lwp *l)
524: {
1.50 simonb 525:
1.2 matt 526: KASSERT(kpreempt_disabled());
527: #ifdef __HAVE_FAST_SOFTINTS
528: KASSERT(lwp_locked(l, NULL));
529: #endif
530:
1.36 ad 531: if (l->l_cpu != curcpu()) {
532: #ifdef MULTIPROCESSOR
533: cpu_send_ipi(l->l_cpu, IPI_AST);
534: #endif
535: } else {
536: l->l_md.md_astpending = 1; /* force call to ast() */
537: }
1.2 matt 538: }
539:
540: void
541: cpu_need_proftick(struct lwp *l)
542: {
1.50 simonb 543:
1.2 matt 544: KASSERT(kpreempt_disabled());
545: KASSERT(l->l_cpu == curcpu());
546:
547: l->l_pflag |= LP_OWEUPC;
548: l->l_md.md_astpending = 1; /* force call to ast() */
549: }
550:
551: #ifdef __HAVE_PREEMPTION
552: bool
553: cpu_kpreempt_enter(uintptr_t where, int s)
554: {
1.50 simonb 555:
1.29 skrll 556: KASSERT(kpreempt_disabled());
1.2 matt 557:
558: #if 0
559: if (where == (intptr_t)-2) {
560: KASSERT(curcpu()->ci_mtx_count == 0);
561: /*
562: * We must be called via kern_intr (which already checks for
563: * IPL_NONE so of course we call be preempted).
564: */
565: return true;
566: }
567: /*
568: * We are called from KPREEMPT_ENABLE(). If we are at IPL_NONE,
569: * of course we can be preempted. If we aren't, ask for a
570: * softint so that kern_intr can call kpreempt.
571: */
572: if (s == IPL_NONE) {
573: KASSERT(curcpu()->ci_mtx_count == 0);
574: return true;
575: }
576: softint_trigger(SOFTINT_KPREEMPT);
577: #endif
578: return false;
579: }
580:
581: void
582: cpu_kpreempt_exit(uintptr_t where)
583: {
584:
585: /* do nothing */
586: }
587:
588: /*
589: * Return true if preemption is disabled for MD reasons. Must be called
590: * with preemption disabled, and thus is only for diagnostic checks.
591: */
592: bool
593: cpu_kpreempt_disabled(void)
594: {
1.50 simonb 595:
1.2 matt 596: /*
597: * Any elevated IPL disables preemption.
598: */
599: return curcpu()->ci_cpl > IPL_NONE;
600: }
601: #endif /* __HAVE_PREEMPTION */
602:
603: void
604: cpu_idle(void)
605: {
606: void (*const mach_idle)(void) = mips_locoresw.lsw_cpu_idle;
607: struct cpu_info * const ci = curcpu();
608:
609: while (!ci->ci_want_resched) {
610: #ifdef __HAVE_FAST_SOFTINTS
611: KASSERT(ci->ci_data.cpu_softints == 0);
612: #endif
613: (*mach_idle)();
614: }
615: }
616:
617: bool
618: cpu_intr_p(void)
619: {
1.38 ad 620: uint64_t ncsw;
621: int idepth;
622: lwp_t *l;
623:
624: l = curlwp;
625: do {
626: ncsw = l->l_ncsw;
1.40 riastrad 627: __insn_barrier();
1.38 ad 628: idepth = l->l_cpu->ci_idepth;
1.40 riastrad 629: __insn_barrier();
1.38 ad 630: } while (__predict_false(ncsw != l->l_ncsw));
631:
632: return idepth != 0;
1.2 matt 633: }
634:
635: #ifdef MULTIPROCESSOR
636:
637: void
638: cpu_broadcast_ipi(int tag)
639: {
1.50 simonb 640:
1.24 matt 641: // No reason to remove ourselves since multicast_ipi will do that for us
642: cpu_multicast_ipi(cpus_running, tag);
1.2 matt 643: }
644:
645: void
1.24 matt 646: cpu_multicast_ipi(const kcpuset_t *kcp, int tag)
1.2 matt 647: {
1.24 matt 648: struct cpu_info * const ci = curcpu();
1.30 skrll 649: kcpuset_t *kcp2 = ci->ci_multicastcpus;
1.2 matt 650:
1.24 matt 651: if (kcpuset_match(cpus_running, ci->ci_data.cpu_kcpuset))
1.2 matt 652: return;
653:
1.30 skrll 654: kcpuset_copy(kcp2, kcp);
1.24 matt 655: kcpuset_remove(kcp2, ci->ci_data.cpu_kcpuset);
656: for (cpuid_t cii; (cii = kcpuset_ffs(kcp2)) != 0; ) {
657: kcpuset_clear(kcp2, --cii);
658: (void)cpu_send_ipi(cpu_lookup(cii), tag);
1.2 matt 659: }
660: }
661:
662: int
663: cpu_send_ipi(struct cpu_info *ci, int tag)
664: {
665:
666: return (*mips_locoresw.lsw_send_ipi)(ci, tag);
667: }
668:
669: static void
1.24 matt 670: cpu_ipi_wait(const char *s, const kcpuset_t *watchset, const kcpuset_t *wanted)
1.2 matt 671: {
1.24 matt 672: bool done = false;
1.30 skrll 673: struct cpu_info * const ci = curcpu();
674: kcpuset_t *kcp = ci->ci_watchcpus;
1.29 skrll 675:
1.24 matt 676: /* some finite amount of time */
677:
678: for (u_long limit = curcpu()->ci_cpu_freq/10; !done && limit--; ) {
679: kcpuset_copy(kcp, watchset);
680: kcpuset_intersect(kcp, wanted);
681: done = kcpuset_match(kcp, wanted);
682: }
683:
684: if (!done) {
685: cpuid_t cii;
686: kcpuset_copy(kcp, wanted);
687: kcpuset_remove(kcp, watchset);
688: if ((cii = kcpuset_ffs(kcp)) != 0) {
689: printf("Failed to %s:", s);
690: do {
691: kcpuset_clear(kcp, --cii);
692: printf(" cpu%lu", cii);
693: } while ((cii = kcpuset_ffs(kcp)) != 0);
694: printf("\n");
695: }
1.2 matt 696: }
697: }
698:
699: /*
700: * Halt this cpu
701: */
702: void
703: cpu_halt(void)
704: {
1.24 matt 705: cpuid_t cii = cpu_index(curcpu());
1.2 matt 706:
1.24 matt 707: printf("cpu%lu: shutting down\n", cii);
708: kcpuset_atomic_set(cpus_halted, cii);
1.2 matt 709: spl0(); /* allow interrupts e.g. further ipi ? */
710: for (;;) ; /* spin */
711:
712: /* NOTREACHED */
713: }
714:
715: /*
716: * Halt all running cpus, excluding current cpu.
717: */
718: void
719: cpu_halt_others(void)
720: {
1.24 matt 721: kcpuset_t *kcp;
1.2 matt 722:
1.24 matt 723: // If we are the only CPU running, there's nothing to do.
724: if (kcpuset_match(cpus_running, curcpu()->ci_data.cpu_kcpuset))
1.2 matt 725: return;
726:
1.24 matt 727: // Get all running CPUs
728: kcpuset_clone(&kcp, cpus_running);
729: // Remove ourself
730: kcpuset_remove(kcp, curcpu()->ci_data.cpu_kcpuset);
731: // Remove any halted CPUs
732: kcpuset_remove(kcp, cpus_halted);
733: // If there are CPUs left, send the IPIs
734: if (!kcpuset_iszero(kcp)) {
735: cpu_multicast_ipi(kcp, IPI_HALT);
736: cpu_ipi_wait("halt", cpus_halted, kcp);
737: }
738: kcpuset_destroy(kcp);
1.2 matt 739:
740: /*
741: * TBD
742: * Depending on available firmware methods, other cpus will
1.24 matt 743: * either shut down themselves, or spin and wait for us to
1.2 matt 744: * stop them.
745: */
746: }
747:
748: /*
749: * Pause this cpu
750: */
751: void
752: cpu_pause(struct reg *regsp)
753: {
754: int s = splhigh();
1.24 matt 755: cpuid_t cii = cpu_index(curcpu());
1.2 matt 756:
1.33 maxv 757: if (__predict_false(cold)) {
758: splx(s);
1.24 matt 759: return;
1.33 maxv 760: }
1.24 matt 761:
762: do {
763: kcpuset_atomic_set(cpus_paused, cii);
1.2 matt 764: do {
765: ;
1.24 matt 766: } while (kcpuset_isset(cpus_paused, cii));
767: kcpuset_atomic_set(cpus_resumed, cii);
1.2 matt 768: #if defined(DDB)
1.6 matt 769: if (ddb_running_on_this_cpu_p())
1.2 matt 770: cpu_Debugger();
1.6 matt 771: if (ddb_running_on_any_cpu_p())
1.2 matt 772: continue;
773: #endif
1.24 matt 774: } while (false);
1.2 matt 775:
776: splx(s);
777: }
778:
779: /*
780: * Pause all running cpus, excluding current cpu.
781: */
782: void
783: cpu_pause_others(void)
784: {
1.24 matt 785: struct cpu_info * const ci = curcpu();
1.50 simonb 786:
1.24 matt 787: if (cold || kcpuset_match(cpus_running, ci->ci_data.cpu_kcpuset))
788: return;
789:
1.30 skrll 790: kcpuset_t *kcp = ci->ci_ddbcpus;
791:
792: kcpuset_copy(kcp, cpus_running);
1.24 matt 793: kcpuset_remove(kcp, ci->ci_data.cpu_kcpuset);
794: kcpuset_remove(kcp, cpus_paused);
1.2 matt 795:
1.24 matt 796: cpu_broadcast_ipi(IPI_SUSPEND);
797: cpu_ipi_wait("pause", cpus_paused, kcp);
1.2 matt 798: }
799:
800: /*
801: * Resume a single cpu
802: */
803: void
1.24 matt 804: cpu_resume(cpuid_t cii)
1.2 matt 805: {
1.50 simonb 806:
1.24 matt 807: if (__predict_false(cold))
808: return;
809:
1.30 skrll 810: struct cpu_info * const ci = curcpu();
811: kcpuset_t *kcp = ci->ci_ddbcpus;
812:
1.24 matt 813: kcpuset_set(kcp, cii);
814: kcpuset_atomicly_remove(cpus_resumed, cpus_resumed);
815: kcpuset_atomic_clear(cpus_paused, cii);
816:
817: cpu_ipi_wait("resume", cpus_resumed, kcp);
1.2 matt 818: }
819:
820: /*
821: * Resume all paused cpus.
822: */
823: void
824: cpu_resume_others(void)
825: {
1.50 simonb 826:
1.24 matt 827: if (__predict_false(cold))
828: return;
1.2 matt 829:
1.30 skrll 830: struct cpu_info * const ci = curcpu();
831: kcpuset_t *kcp = ci->ci_ddbcpus;
832:
1.24 matt 833: kcpuset_atomicly_remove(cpus_resumed, cpus_resumed);
1.30 skrll 834: kcpuset_copy(kcp, cpus_paused);
1.24 matt 835: kcpuset_atomicly_remove(cpus_paused, cpus_paused);
1.2 matt 836:
837: /* CPUs awake on cpus_paused clear */
1.24 matt 838: cpu_ipi_wait("resume", cpus_resumed, kcp);
1.2 matt 839: }
840:
1.24 matt 841: bool
842: cpu_is_paused(cpuid_t cii)
1.2 matt 843: {
844:
1.24 matt 845: return !cold && kcpuset_isset(cpus_paused, cii);
1.2 matt 846: }
847:
1.7 cliff 848: #ifdef DDB
1.2 matt 849: void
850: cpu_debug_dump(void)
851: {
852: CPU_INFO_ITERATOR cii;
853: struct cpu_info *ci;
854: char running, hatched, paused, resumed, halted;
1.58 ! skrll 855: db_printf("CPU CPUID STATE CPUINFO CPL INT MTX IPIS(A/R)\n");
1.2 matt 856: for (CPU_INFO_FOREACH(cii, ci)) {
1.24 matt 857: hatched = (kcpuset_isset(cpus_hatched, cpu_index(ci)) ? 'H' : '-');
858: running = (kcpuset_isset(cpus_running, cpu_index(ci)) ? 'R' : '-');
859: paused = (kcpuset_isset(cpus_paused, cpu_index(ci)) ? 'P' : '-');
860: resumed = (kcpuset_isset(cpus_resumed, cpu_index(ci)) ? 'r' : '-');
861: halted = (kcpuset_isset(cpus_halted, cpu_index(ci)) ? 'h' : '-');
1.2 matt 862: db_printf("%3d 0x%03lx %c%c%c%c%c %p "
863: "%3d %3d %3d "
864: "0x%02" PRIx64 "/0x%02" PRIx64 "\n",
865: cpu_index(ci), ci->ci_cpuid,
866: running, hatched, paused, resumed, halted,
867: ci, ci->ci_cpl, ci->ci_idepth, ci->ci_mtx_count,
868: ci->ci_active_ipis, ci->ci_request_ipis);
869: }
870: }
1.7 cliff 871: #endif
1.2 matt 872:
873: void
874: cpu_hatch(struct cpu_info *ci)
875: {
876: struct pmap_tlb_info * const ti = ci->ci_tlb_info;
877:
878: /*
879: * Invalidate all the TLB enties (even wired ones) and then reserve
880: * space for the wired TLB entries.
881: */
882: mips3_cp0_wired_write(0);
883: tlb_invalidate_all();
884: mips3_cp0_wired_write(ti->ti_wired);
885:
886: /*
1.5 matt 887: * Setup HWRENA and USERLOCAL COP0 registers (MIPSxxR2).
888: */
889: cpu_hwrena_setup();
890:
891: /*
1.2 matt 892: * If we are using register zero relative addressing to access cpu_info
893: * in the exception vectors, enter that mapping into TLB now.
894: */
895: if (ci->ci_tlb_slot >= 0) {
896: const uint32_t tlb_lo = MIPS3_PG_G|MIPS3_PG_V
897: | mips3_paddr_to_tlbpfn((vaddr_t)ci);
1.27 matt 898: const struct tlbmask tlbmask = {
899: .tlb_hi = -PAGE_SIZE | KERNEL_PID,
900: #if (PGSHIFT & 1)
901: .tlb_lo0 = tlb_lo,
902: .tlb_lo1 = tlb_lo + MIPS3_PG_NEXT,
903: #else
904: .tlb_lo0 = 0,
905: .tlb_lo1 = tlb_lo,
906: #endif
907: .tlb_mask = -1,
908: };
1.2 matt 909:
1.27 matt 910: tlb_invalidate_addr(tlbmask.tlb_hi, KERNEL_PID);
911: tlb_write_entry(ci->ci_tlb_slot, &tlbmask);
1.2 matt 912: }
913:
914: /*
915: * Flush the icache just be sure.
916: */
917: mips_icache_sync_all();
918:
919: /*
920: * Let this CPU do its own initialization (for things that have to be
921: * done on the local CPU).
922: */
923: (*mips_locoresw.lsw_cpu_init)(ci);
924:
1.21 matt 925: // Show this CPU as present.
926: atomic_or_ulong(&ci->ci_flags, CPUF_PRESENT);
927:
1.2 matt 928: /*
929: * Announce we are hatched
930: */
1.24 matt 931: kcpuset_atomic_set(cpus_hatched, cpu_index(ci));
1.2 matt 932:
933: /*
934: * Now wait to be set free!
935: */
1.24 matt 936: while (! kcpuset_isset(cpus_running, cpu_index(ci))) {
1.2 matt 937: /* spin, spin, spin */
938: }
939:
940: /*
941: * initialize the MIPS count/compare clock
942: */
943: mips3_cp0_count_write(ci->ci_data.cpu_cc_skew);
944: KASSERT(ci->ci_cycles_per_hz != 0);
945: ci->ci_next_cp0_clk_intr = ci->ci_data.cpu_cc_skew + ci->ci_cycles_per_hz;
946: mips3_cp0_compare_write(ci->ci_next_cp0_clk_intr);
947: ci->ci_data.cpu_cc_skew = 0;
948:
949: /*
1.7 cliff 950: * Let this CPU do its own post-running initialization
951: * (for things that have to be done on the local CPU).
952: */
1.8 cliff 953: (*mips_locoresw.lsw_cpu_run)(ci);
1.7 cliff 954:
955: /*
1.23 matt 956: * Now turn on interrupts (and verify they are on).
1.2 matt 957: */
958: spl0();
1.23 matt 959: KASSERTMSG(ci->ci_cpl == IPL_NONE, "cpl %d", ci->ci_cpl);
960: KASSERT(mips_cp0_status_read() & MIPS_SR_INT_IE);
1.2 matt 961:
1.24 matt 962: kcpuset_atomic_set(pmap_kernel()->pm_onproc, cpu_index(ci));
963: kcpuset_atomic_set(pmap_kernel()->pm_active, cpu_index(ci));
964:
1.2 matt 965: /*
966: * And do a tail call to idle_loop
967: */
968: idle_loop(NULL);
969: }
970:
971: void
972: cpu_boot_secondary_processors(void)
973: {
1.20 matt 974: CPU_INFO_ITERATOR cii;
975: struct cpu_info *ci;
1.50 simonb 976:
1.56 simonb 977: if ((boothowto & RB_MD1) != 0)
978: return;
979:
1.20 matt 980: for (CPU_INFO_FOREACH(cii, ci)) {
981: if (CPU_IS_PRIMARY(ci))
982: continue;
1.2 matt 983: KASSERT(ci->ci_data.cpu_idlelwp);
984:
985: /*
1.43 msaitoh 986: * Skip this CPU if it didn't successfully hatch.
1.2 matt 987: */
1.24 matt 988: if (!kcpuset_isset(cpus_hatched, cpu_index(ci)))
1.2 matt 989: continue;
990:
991: ci->ci_data.cpu_cc_skew = mips3_cp0_count_read();
992: atomic_or_ulong(&ci->ci_flags, CPUF_RUNNING);
1.24 matt 993: kcpuset_set(cpus_running, cpu_index(ci));
1.23 matt 994: // Spin until the cpu calls idle_loop
1.53 jmcneill 995: for (u_int i = 0; i < 10000; i++) {
996: if (kcpuset_isset(kcpuset_running, cpu_index(ci)))
1.23 matt 997: break;
998: delay(1000);
999: }
1.2 matt 1000: }
1001: }
1002:
1003: void
1004: xc_send_ipi(struct cpu_info *ci)
1005: {
1006:
1.12 matt 1007: (*mips_locoresw.lsw_send_ipi)(ci, IPI_XCALL);
1.2 matt 1008: }
1.17 rmind 1009:
1010: void
1011: cpu_ipi(struct cpu_info *ci)
1012: {
1.50 simonb 1013:
1.17 rmind 1014: (*mips_locoresw.lsw_send_ipi)(ci, IPI_GENERIC);
1015: }
1016:
1.2 matt 1017: #endif /* MULTIPROCESSOR */
1018:
1019: void
1020: cpu_offline_md(void)
1021: {
1022:
1023: (*mips_locoresw.lsw_cpu_offline_md)();
1024: }
1025:
1026: #ifdef _LP64
1027: void
1028: cpu_vmspace_exec(lwp_t *l, vaddr_t start, vaddr_t end)
1029: {
1030: /*
1031: * We need to turn on/off UX so that copyout/copyin will work
1032: * well before setreg gets called.
1033: */
1034: uint32_t sr = mips_cp0_status_read();
1.50 simonb 1035:
1.2 matt 1036: if (end != (uint32_t) end) {
1037: mips_cp0_status_write(sr | MIPS3_SR_UX);
1038: } else {
1039: mips_cp0_status_write(sr & ~MIPS3_SR_UX);
1040: }
1041: }
1042: #endif
1.5 matt 1043:
1044: int
1045: cpu_lwp_setprivate(lwp_t *l, void *v)
1046: {
1.50 simonb 1047:
1.5 matt 1048: #if (MIPS32R2 + MIPS64R2) > 0
1.48 simonb 1049: if (l == curlwp && MIPS_HAS_USERLOCAL) {
1.5 matt 1050: mipsNN_cp0_userlocal_write(v);
1051: }
1052: #endif
1053: return 0;
1054: }
1.7 cliff 1055:
1056:
1.9 cliff 1057: #if (MIPS32 + MIPS32R2 + MIPS64 + MIPS64R2) > 0
1058:
1.7 cliff 1059: #if (CPUWATCH_MAX != 8)
1060: # error CPUWATCH_MAX
1061: #endif
1062:
1063: /*
1064: * cpuwatch_discover - determine how many COP0 watchpoints this CPU supports
1065: */
1066: u_int
1067: cpuwatch_discover(void)
1068: {
1069: int i;
1070:
1071: for (i=0; i < CPUWATCH_MAX; i++) {
1072: uint32_t watchhi = mipsNN_cp0_watchhi_read(i);
1073: if ((watchhi & __BIT(31)) == 0) /* test 'M' bit */
1074: break;
1075: }
1076: return i + 1;
1077: }
1078:
1079: void
1080: cpuwatch_free(cpu_watchpoint_t *cwp)
1081: {
1082: #ifdef DIAGNOSTIC
1083: struct cpu_info * const ci = curcpu();
1.50 simonb 1084:
1.7 cliff 1085: KASSERT(cwp >= &ci->ci_cpuwatch_tab[0] &&
1086: cwp <= &ci->ci_cpuwatch_tab[ci->ci_cpuwatch_count-1]);
1087: #endif
1088: cwp->cw_mode = 0;
1089: cwp->cw_asid = 0;
1090: cwp->cw_addr = 0;
1091: cpuwatch_clr(cwp);
1092: }
1093:
1094: /*
1095: * cpuwatch_alloc
1096: * find an empty slot
1097: * no locking for the table since it is CPU private
1098: */
1099: cpu_watchpoint_t *
1100: cpuwatch_alloc(void)
1101: {
1102: struct cpu_info * const ci = curcpu();
1103: cpu_watchpoint_t *cwp;
1104:
1105: for (int i=0; i < ci->ci_cpuwatch_count; i++) {
1106: cwp = &ci->ci_cpuwatch_tab[i];
1107: if ((cwp->cw_mode & CPUWATCH_RWX) == 0)
1108: return cwp;
1109: }
1110: return NULL;
1111: }
1112:
1113:
1114: void
1115: cpuwatch_set_all(void)
1116: {
1117: struct cpu_info * const ci = curcpu();
1118: cpu_watchpoint_t *cwp;
1119: int i;
1120:
1121: for (i=0; i < ci->ci_cpuwatch_count; i++) {
1122: cwp = &ci->ci_cpuwatch_tab[i];
1123: if ((cwp->cw_mode & CPUWATCH_RWX) != 0)
1124: cpuwatch_set(cwp);
1125: }
1126: }
1127:
1128: void
1129: cpuwatch_clr_all(void)
1130: {
1131: struct cpu_info * const ci = curcpu();
1132: cpu_watchpoint_t *cwp;
1133: int i;
1134:
1135: for (i=0; i < ci->ci_cpuwatch_count; i++) {
1136: cwp = &ci->ci_cpuwatch_tab[i];
1137: if ((cwp->cw_mode & CPUWATCH_RWX) != 0)
1138: cpuwatch_clr(cwp);
1139: }
1140: }
1141:
1142: /*
1143: * cpuwatch_set - establish a MIPS COP0 watchpoint
1144: */
1145: void
1146: cpuwatch_set(cpu_watchpoint_t *cwp)
1147: {
1148: struct cpu_info * const ci = curcpu();
1149: uint32_t watchhi;
1150: register_t watchlo;
1151: int cwnum = cwp - &ci->ci_cpuwatch_tab[0];
1152:
1153: KASSERT(cwp >= &ci->ci_cpuwatch_tab[0] &&
1154: cwp <= &ci->ci_cpuwatch_tab[ci->ci_cpuwatch_count-1]);
1155:
1156: watchlo = cwp->cw_addr;
1157: if (cwp->cw_mode & CPUWATCH_WRITE)
1158: watchlo |= __BIT(0);
1159: if (cwp->cw_mode & CPUWATCH_READ)
1160: watchlo |= __BIT(1);
1161: if (cwp->cw_mode & CPUWATCH_EXEC)
1162: watchlo |= __BIT(2);
1163:
1164: if (cwp->cw_mode & CPUWATCH_ASID)
1165: watchhi = cwp->cw_asid << 16; /* addr qualified by asid */
1166: else
1167: watchhi = __BIT(30); /* addr not qual. by asid (Global) */
1168: if (cwp->cw_mode & CPUWATCH_MASK)
1169: watchhi |= cwp->cw_mask; /* set "dont care" addr match bits */
1170:
1171: mipsNN_cp0_watchhi_write(cwnum, watchhi);
1172: mipsNN_cp0_watchlo_write(cwnum, watchlo);
1173: }
1174:
1175: /*
1176: * cpuwatch_clr - disestablish a MIPS COP0 watchpoint
1177: */
1178: void
1179: cpuwatch_clr(cpu_watchpoint_t *cwp)
1180: {
1181: struct cpu_info * const ci = curcpu();
1182: int cwnum = cwp - &ci->ci_cpuwatch_tab[0];
1183:
1184: KASSERT(cwp >= &ci->ci_cpuwatch_tab[0] &&
1185: cwp <= &ci->ci_cpuwatch_tab[ci->ci_cpuwatch_count-1]);
1186:
1187: mipsNN_cp0_watchhi_write(cwnum, 0);
1188: mipsNN_cp0_watchlo_write(cwnum, 0);
1189: }
1190:
1.9 cliff 1191: #endif /* (MIPS32 + MIPS32R2 + MIPS64 + MIPS64R2) > 0 */
CVSweb <webmaster@jp.NetBSD.org>