Annotation of src/sys/arch/sparc64/sparc64/pmap.c, Revision 1.296
1.296 ! martin 1: /* $NetBSD: pmap.c,v 1.295 2015/06/14 19:05:27 martin Exp $ */
1.1 eeh 2: /*
1.156 pk 3: *
1.41 eeh 4: * Copyright (C) 1996-1999 Eduardo Horvath.
1.1 eeh 5: * All rights reserved.
6: *
1.41 eeh 7: *
1.1 eeh 8: * Redistribution and use in source and binary forms, with or without
9: * modification, are permitted provided that the following conditions
10: * are met:
11: * 1. Redistributions of source code must retain the above copyright
12: * notice, this list of conditions and the following disclaimer.
1.156 pk 13: *
1.41 eeh 14: * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND
15: * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16: * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17: * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE
18: * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19: * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20: * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21: * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22: * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23: * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24: * SUCH DAMAGE.
25: *
1.1 eeh 26: */
1.142 lukem 27:
28: #include <sys/cdefs.h>
1.296 ! martin 29: __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.295 2015/06/14 19:05:27 martin Exp $");
1.142 lukem 30:
31: #undef NO_VCACHE /* Don't forget the locked TLB in dostart */
32: #define HWREF
1.27 mrg 33:
1.8 eeh 34: #include "opt_ddb.h"
1.172 rjs 35: #include "opt_multiprocessor.h"
1.276 martin 36: #include "opt_modular.h"
1.1 eeh 37:
38: #include <sys/param.h>
39: #include <sys/malloc.h>
40: #include <sys/queue.h>
41: #include <sys/systm.h>
42: #include <sys/msgbuf.h>
1.54 eeh 43: #include <sys/pool.h>
1.2 eeh 44: #include <sys/exec.h>
45: #include <sys/core.h>
46: #include <sys/kcore.h>
1.113 chs 47: #include <sys/proc.h>
1.203 ad 48: #include <sys/atomic.h>
49: #include <sys/cpu.h>
1.1 eeh 50:
1.238 matt 51: #include <sys/exec_aout.h> /* for MID_* */
52:
1.1 eeh 53: #include <uvm/uvm.h>
54:
55: #include <machine/pcb.h>
56: #include <machine/sparc64.h>
57: #include <machine/ctlreg.h>
1.156 pk 58: #include <machine/promlib.h>
1.2 eeh 59: #include <machine/kcore.h>
1.168 cdi 60: #include <machine/bootinfo.h>
1.1 eeh 61:
1.224 nakayama 62: #include <sparc64/sparc64/cache.h>
1.282 palle 63: #ifdef SUN4V
64: #include <sparc64/hypervisor.h>
65: #endif
1.1 eeh 66:
1.8 eeh 67: #ifdef DDB
68: #include <machine/db_machdep.h>
69: #include <ddb/db_command.h>
70: #include <ddb/db_sym.h>
71: #include <ddb/db_variables.h>
72: #include <ddb/db_extern.h>
73: #include <ddb/db_access.h>
74: #include <ddb/db_output.h>
75: #else
1.7 mrg 76: #define Debugger()
1.25 eeh 77: #define db_printf printf
1.7 mrg 78: #endif
1.1 eeh 79:
1.98 eeh 80: #define MEG (1<<20) /* 1MB */
81: #define KB (1<<10) /* 1KB */
82:
1.152 petrov 83: paddr_t cpu0paddr; /* contigious phys memory preallocated for cpus */
1.54 eeh 84:
1.1 eeh 85: /* These routines are in assembly to allow access thru physical mappings */
1.259 mrg 86: extern int64_t pseg_get_real(struct pmap *, vaddr_t);
87: extern int pseg_set_real(struct pmap *, vaddr_t, int64_t, paddr_t);
1.41 eeh 88:
89: /*
90: * Diatribe on ref/mod counting:
91: *
92: * First of all, ref/mod info must be non-volatile. Hence we need to keep it
1.156 pk 93: * in the pv_entry structure for each page. (We could bypass this for the
1.97 chs 94: * vm_page, but that's a long story....)
1.156 pk 95: *
1.41 eeh 96: * This architecture has nice, fast traps with lots of space for software bits
1.104 wiz 97: * in the TTE. To accelerate ref/mod counts we make use of these features.
1.41 eeh 98: *
1.156 pk 99: * When we map a page initially, we place a TTE in the page table. It's
1.41 eeh 100: * inserted with the TLB_W and TLB_ACCESS bits cleared. If a page is really
1.131 wiz 101: * writable we set the TLB_REAL_W bit for the trap handler.
1.41 eeh 102: *
103: * Whenever we take a TLB miss trap, the trap handler will set the TLB_ACCESS
104: * bit in the approprate TTE in the page table. Whenever we take a protection
105: * fault, if the TLB_REAL_W bit is set then we flip both the TLB_W and TLB_MOD
106: * bits to enable writing and mark the page as modified.
107: *
108: * This means that we may have ref/mod information all over the place. The
109: * pmap routines must traverse the page tables of all pmaps with a given page
110: * and collect/clear all the ref/mod information and copy it into the pv_entry.
111: */
112:
1.56 eeh 113: #ifdef NO_VCACHE
114: #define FORCE_ALIAS 1
115: #else
116: #define FORCE_ALIAS 0
117: #endif
118:
1.1 eeh 119: #define PV_ALIAS 0x1LL
120: #define PV_REF 0x2LL
121: #define PV_MOD 0x4LL
1.2 eeh 122: #define PV_NVC 0x8LL
1.86 eeh 123: #define PV_NC 0x10LL
1.127 chs 124: #define PV_WE 0x20LL /* Debug -- this page was writable somtime */
1.86 eeh 125: #define PV_MASK (0x03fLL)
1.138 thorpej 126: #define PV_VAMASK (~(PAGE_SIZE - 1))
1.127 chs 127: #define PV_MATCH(pv,va) (!(((pv)->pv_va ^ (va)) & PV_VAMASK))
128: #define PV_SETVA(pv,va) ((pv)->pv_va = (((va) & PV_VAMASK) | \
129: (((pv)->pv_va) & PV_MASK)))
130:
1.203 ad 131: struct pool_cache pmap_cache;
132: struct pool_cache pmap_pv_cache;
1.127 chs 133:
1.170 cdi 134: pv_entry_t pmap_remove_pv(struct pmap *, vaddr_t, struct vm_page *);
135: void pmap_enter_pv(struct pmap *, vaddr_t, paddr_t, struct vm_page *,
136: pv_entry_t);
137: void pmap_page_cache(struct pmap *, paddr_t, int);
1.1 eeh 138:
139: /*
1.127 chs 140: * First and last managed physical addresses.
141: * XXX only used for dumping the system.
1.1 eeh 142: */
1.4 eeh 143: paddr_t vm_first_phys, vm_num_phys;
1.1 eeh 144:
145: /*
146: * Here's the CPU TSB stuff. It's allocated in pmap_bootstrap.
147: */
148: int tsbsize; /* tsbents = 512 * 2^^tsbsize */
149: #define TSBENTS (512<<tsbsize)
150: #define TSBSIZE (TSBENTS * 16)
151:
1.230 martin 152: static struct pmap kernel_pmap_;
1.231 pooka 153: struct pmap *const kernel_pmap_ptr = &kernel_pmap_;
1.1 eeh 154:
1.212 nakayama 155: static int ctx_alloc(struct pmap *);
1.226 spz 156: static bool pmap_is_referenced_locked(struct vm_page *);
157:
1.212 nakayama 158: static void ctx_free(struct pmap *, struct cpu_info *);
1.216 martin 159:
160: /*
161: * Check if any MMU has a non-zero context
162: */
163: static inline bool
164: pmap_has_ctx(struct pmap *p)
165: {
166: int i;
167:
168: /* any context on any cpu? */
169: for (i = 0; i < sparc_ncpus; i++)
170: if (p->pm_ctx[i] > 0)
171: return true;
172:
173: return false;
174: }
175:
1.257 mrg 176: #ifdef MULTIPROCESSOR
177: #define pmap_ctx(PM) ((PM)->pm_ctx[cpu_number()])
178: #else
179: #define pmap_ctx(PM) ((PM)->pm_ctx[0])
180: #endif
181:
1.216 martin 182: /*
183: * Check if this pmap has a live mapping on some MMU.
184: */
185: static inline bool
186: pmap_is_on_mmu(struct pmap *p)
187: {
188: /* The kernel pmap is always on all MMUs */
189: if (p == pmap_kernel())
190: return true;
191:
192: return pmap_has_ctx(p);
193: }
1.210 martin 194:
1.56 eeh 195: /*
196: * Virtual and physical addresses of the start and end of kernel text
197: * and data segments.
198: */
199: vaddr_t ktext;
200: paddr_t ktextp;
201: vaddr_t ektext;
202: paddr_t ektextp;
203: vaddr_t kdata;
204: paddr_t kdatap;
205: vaddr_t ekdata;
206: paddr_t ekdatap;
207:
1.168 cdi 208: /*
209: * Kernel 4MB pages.
210: */
211: extern struct tlb_entry *kernel_tlbs;
1.295 martin 212: extern int kernel_dtlb_slots, kernel_itlb_slots;
1.168 cdi 213:
1.1 eeh 214: static int npgs;
215:
1.56 eeh 216: vaddr_t vmmap; /* one reserved MI vpage for /dev/mem */
1.2 eeh 217:
1.168 cdi 218: int phys_installed_size; /* Installed physical memory */
219: struct mem_region *phys_installed;
1.1 eeh 220:
1.123 eeh 221: paddr_t avail_start, avail_end; /* These are used by ps & family */
1.1 eeh 222:
1.170 cdi 223: static int ptelookup_va(vaddr_t va);
1.143 chs 224:
1.166 perry 225: static inline void
1.143 chs 226: clrx(void *addr)
227: {
1.166 perry 228: __asm volatile("clrx [%0]" : : "r" (addr) : "memory");
1.143 chs 229: }
230:
1.212 nakayama 231: static void
232: tsb_invalidate(vaddr_t va, pmap_t pm)
233: {
234: struct cpu_info *ci;
235: int ctx;
236: bool kpm = (pm == pmap_kernel());
237: int i;
238: int64_t tag;
239:
240: i = ptelookup_va(va);
1.257 mrg 241: #ifdef MULTIPROCESSOR
1.212 nakayama 242: for (ci = cpus; ci != NULL; ci = ci->ci_next) {
243: if (!CPUSET_HAS(cpus_active, ci->ci_index))
244: continue;
1.257 mrg 245: #else
246: ci = curcpu();
247: #endif
1.212 nakayama 248: ctx = pm->pm_ctx[ci->ci_index];
249: if (kpm || ctx > 0) {
250: tag = TSB_TAG(0, ctx, va);
251: if (ci->ci_tsb_dmmu[i].tag == tag) {
252: clrx(&ci->ci_tsb_dmmu[i].data);
253: }
254: if (ci->ci_tsb_immu[i].tag == tag) {
255: clrx(&ci->ci_tsb_immu[i].data);
256: }
257: }
1.257 mrg 258: #ifdef MULTIPROCESSOR
1.212 nakayama 259: }
1.257 mrg 260: #endif
1.212 nakayama 261: }
1.1 eeh 262:
263: struct prom_map *prom_map;
264: int prom_map_size;
265:
1.278 mrg 266: #define PDB_CREATE 0x000001
267: #define PDB_DESTROY 0x000002
268: #define PDB_REMOVE 0x000004
269: #define PDB_CHANGEPROT 0x000008
270: #define PDB_ENTER 0x000010
271: #define PDB_DEMAP 0x000020 /* used in locore */
272: #define PDB_REF 0x000040
273: #define PDB_COPY 0x000080
274: #define PDB_MMU_ALLOC 0x000100
275: #define PDB_MMU_STEAL 0x000200
276: #define PDB_CTX_ALLOC 0x000400
277: #define PDB_CTX_STEAL 0x000800
278: #define PDB_MMUREG_ALLOC 0x001000
279: #define PDB_MMUREG_STEAL 0x002000
280: #define PDB_CACHESTUFF 0x004000
281: #define PDB_ALIAS 0x008000
282: #define PDB_EXTRACT 0x010000
283: #define PDB_BOOT 0x020000
284: #define PDB_BOOT1 0x040000
285: #define PDB_GROW 0x080000
286: #define PDB_CTX_FLUSHALL 0x100000
287: #define PDB_ACTIVATE 0x200000
288:
289: #if defined(DEBUG) && !defined(PMAP_DEBUG)
290: #define PMAP_DEBUG
291: #endif
292:
293: #ifdef PMAP_DEBUG
1.1 eeh 294: struct {
295: int kernel; /* entering kernel mapping */
296: int user; /* entering user mapping */
297: int ptpneeded; /* needed to allocate a PT page */
298: int pwchange; /* no mapping change, just wiring or protection */
299: int wchange; /* no mapping change, just wiring */
300: int mchange; /* was mapped but mapping to different page */
301: int managed; /* a managed page */
302: int firstpv; /* first mapping for this PA */
303: int secondpv; /* second mapping for this PA */
304: int ci; /* cache inhibited */
305: int unmanaged; /* not a managed page */
306: int flushes; /* cache flushes */
307: int cachehit; /* new entry forced valid entry out */
308: } enter_stats;
309: struct {
310: int calls;
311: int removes;
312: int flushes;
1.4 eeh 313: int tflushes; /* TLB flushes */
1.1 eeh 314: int pidflushes; /* HW pid stolen */
315: int pvfirst;
316: int pvsearch;
317: } remove_stats;
1.256 mrg 318: #define ENTER_STAT(x) do { enter_stats.x ++; } while (0)
319: #define REMOVE_STAT(x) do { remove_stats.x ++; } while (0)
1.127 chs 320:
1.75 eeh 321: int pmapdebug = 0;
1.278 mrg 322: //int pmapdebug = 0 | PDB_CTX_ALLOC | PDB_ACTIVATE;
1.1 eeh 323: /* Number of H/W pages stolen for page tables */
324: int pmap_pages_stolen = 0;
1.66 eeh 325:
326: #define BDPRINTF(n, f) if (pmapdebug & (n)) prom_printf f
1.156 pk 327: #define DPRINTF(n, f) if (pmapdebug & (n)) printf f
1.69 mrg 328: #else
1.256 mrg 329: #define ENTER_STAT(x) do { /* nothing */ } while (0)
330: #define REMOVE_STAT(x) do { /* nothing */ } while (0)
1.69 mrg 331: #define BDPRINTF(n, f)
1.77 eeh 332: #define DPRINTF(n, f)
1.278 mrg 333: #define pmapdebug 0
1.1 eeh 334: #endif
335:
336: #define pv_check()
337:
1.262 mrg 338: static int pmap_get_page(paddr_t *);
339: static void pmap_free_page(paddr_t, sparc64_cpuset_t);
340: static void pmap_free_page_noflush(paddr_t);
1.56 eeh 341:
1.203 ad 342: /*
1.262 mrg 343: * Global pmap locks.
1.203 ad 344: */
345: static kmutex_t pmap_lock;
1.250 martin 346: static bool lock_available = false;
1.56 eeh 347:
348: /*
1.98 eeh 349: * Support for big page sizes. This maps the page size to the
350: * page bits. That is: these are the bits between 8K pages and
351: * larger page sizes that cause aliasing.
352: */
1.180 martin 353: #define PSMAP_ENTRY(MASK, CODE) { .mask = MASK, .code = CODE }
1.98 eeh 354: struct page_size_map page_size_map[] = {
355: #ifdef DEBUG
1.180 martin 356: PSMAP_ENTRY(0, PGSZ_8K & 0), /* Disable large pages */
1.98 eeh 357: #endif
1.180 martin 358: PSMAP_ENTRY((4 * 1024 * 1024 - 1) & ~(8 * 1024 - 1), PGSZ_4M),
359: PSMAP_ENTRY((512 * 1024 - 1) & ~(8 * 1024 - 1), PGSZ_512K),
360: PSMAP_ENTRY((64 * 1024 - 1) & ~(8 * 1024 - 1), PGSZ_64K),
361: PSMAP_ENTRY((8 * 1024 - 1) & ~(8 * 1024 - 1), PGSZ_8K),
362: PSMAP_ENTRY(0, 0),
1.98 eeh 363: };
364:
365: /*
1.259 mrg 366: * This probably shouldn't be necessary, but it stops USIII machines from
367: * breaking in general, and not just for MULTIPROCESSOR.
368: */
369: #define USE_LOCKSAFE_PSEG_GETSET
370: #if defined(USE_LOCKSAFE_PSEG_GETSET)
371:
372: static kmutex_t pseg_lock;
373:
374: static __inline__ int64_t
375: pseg_get_locksafe(struct pmap *pm, vaddr_t va)
376: {
377: int64_t rv;
378: bool took_lock = lock_available /*&& pm == pmap_kernel()*/;
379:
380: if (__predict_true(took_lock))
381: mutex_enter(&pseg_lock);
382: rv = pseg_get_real(pm, va);
383: if (__predict_true(took_lock))
384: mutex_exit(&pseg_lock);
385: return rv;
386: }
387:
388: static __inline__ int
389: pseg_set_locksafe(struct pmap *pm, vaddr_t va, int64_t data, paddr_t ptp)
390: {
391: int rv;
392: bool took_lock = lock_available /*&& pm == pmap_kernel()*/;
393:
394: if (__predict_true(took_lock))
395: mutex_enter(&pseg_lock);
396: rv = pseg_set_real(pm, va, data, ptp);
397: if (__predict_true(took_lock))
398: mutex_exit(&pseg_lock);
399: return rv;
400: }
401:
402: #define pseg_get(pm, va) pseg_get_locksafe(pm, va)
403: #define pseg_set(pm, va, data, ptp) pseg_set_locksafe(pm, va, data, ptp)
404:
405: #else /* USE_LOCKSAFE_PSEG_GETSET */
406:
407: #define pseg_get(pm, va) pseg_get_real(pm, va)
408: #define pseg_set(pm, va, data, ptp) pseg_set_real(pm, va, data, ptp)
409:
410: #endif /* USE_LOCKSAFE_PSEG_GETSET */
411:
412: /*
1.56 eeh 413: * Enter a TTE into the kernel pmap only. Don't do anything else.
1.156 pk 414: *
415: * Use only during bootstrapping since it does no locking and
1.73 eeh 416: * can lose ref/mod info!!!!
417: *
1.56 eeh 418: */
1.156 pk 419: static void pmap_enter_kpage(vaddr_t va, int64_t data)
1.56 eeh 420: {
421: paddr_t newp;
422:
1.146 petrov 423: newp = 0UL;
1.127 chs 424: while (pseg_set(pmap_kernel(), va, data, newp) & 1) {
1.151 petrov 425: if (!pmap_get_page(&newp)) {
1.73 eeh 426: prom_printf("pmap_enter_kpage: out of pages\n");
427: panic("pmap_enter_kpage");
428: }
1.127 chs 429:
430: ENTER_STAT(ptpneeded);
1.156 pk 431: BDPRINTF(PDB_BOOT1,
432: ("pseg_set: pm=%p va=%p data=%lx newp %lx\n",
1.66 eeh 433: pmap_kernel(), va, (long)data, (long)newp));
434: if (pmapdebug & PDB_BOOT1)
1.56 eeh 435: {int i; for (i=0; i<140000000; i++) ;}
436: }
437: }
438:
1.1 eeh 439: /*
1.111 eeh 440: * Check the bootargs to see if we need to enable bootdebug.
1.75 eeh 441: */
442: #ifdef DEBUG
1.156 pk 443: static void pmap_bootdebug(void)
1.75 eeh 444: {
1.162 christos 445: const char *cp = prom_getbootargs();
1.75 eeh 446:
1.156 pk 447: for (;;)
1.75 eeh 448: switch (*++cp) {
449: case '\0':
450: return;
451: case 'V':
1.77 eeh 452: pmapdebug |= PDB_BOOT|PDB_BOOT1;
1.75 eeh 453: break;
454: case 'D':
455: pmapdebug |= PDB_BOOT1;
456: break;
457: }
458: }
1.278 mrg 459: #else
460: #define pmap_bootdebug() /* nothing */
1.75 eeh 461: #endif
462:
1.103 eeh 463:
464: /*
465: * Calculate the correct number of page colors to use. This should be the
1.138 thorpej 466: * size of the E$/PAGE_SIZE. However, different CPUs can have different sized
1.103 eeh 467: * E$, so we need to take the GCM of the E$ size.
468: */
1.156 pk 469: static int pmap_calculate_colors(void)
470: {
471: int node;
1.103 eeh 472: int size, assoc, color, maxcolor = 1;
473:
1.156 pk 474: for (node = prom_firstchild(prom_findroot()); node != 0;
475: node = prom_nextsibling(node)) {
476: char *name = prom_getpropstring(node, "device_type");
477: if (strcmp("cpu", name) != 0)
478: continue;
479:
480: /* Found a CPU, get the E$ info. */
481: size = prom_getpropint(node, "ecache-size", -1);
482: if (size == -1) {
1.288 palle 483: /* XXX sun4v support missing */
1.156 pk 484: prom_printf("pmap_calculate_colors: node %x has "
485: "no ecache-size\n", node);
486: /* If we can't get the E$ size, skip the node */
487: continue;
1.103 eeh 488: }
1.156 pk 489:
490: assoc = prom_getpropint(node, "ecache-associativity", 1);
491: color = size/assoc/PAGE_SIZE;
492: if (color > maxcolor)
493: maxcolor = color;
1.103 eeh 494: }
495: return (maxcolor);
496: }
497:
1.156 pk 498: static void pmap_alloc_bootargs(void)
1.152 petrov 499: {
500: char *v;
501:
502: v = OF_claim(NULL, 2*PAGE_SIZE, PAGE_SIZE);
503: if ((v == NULL) || (v == (void*)-1))
1.263 mrg 504: panic("Can't claim two pages of memory.");
1.152 petrov 505:
506: memset(v, 0, 2*PAGE_SIZE);
507:
508: cpu_args = (struct cpu_bootargs*)v;
509: }
510:
1.168 cdi 511: #if defined(MULTIPROCESSOR)
512: static void pmap_mp_init(void);
513:
514: static void
515: pmap_mp_init(void)
516: {
517: pte_t *tp;
518: char *v;
519: int i;
520:
521: extern void cpu_mp_startup(void);
522:
1.173 mrg 523: if ((v = OF_claim(NULL, PAGE_SIZE, PAGE_SIZE)) == NULL) {
1.168 cdi 524: panic("pmap_mp_init: Cannot claim a page.");
525: }
526:
1.241 cegger 527: memcpy(v, mp_tramp_code, mp_tramp_code_len);
1.296 ! martin 528: *(u_long *)(v + mp_tramp_dtlb_slots) = kernel_dtlb_slots;
! 529: *(u_long *)(v + mp_tramp_itlb_slots) = kernel_itlb_slots;
1.168 cdi 530: *(u_long *)(v + mp_tramp_func) = (u_long)cpu_mp_startup;
531: *(u_long *)(v + mp_tramp_ci) = (u_long)cpu_args;
532: tp = (pte_t *)(v + mp_tramp_code_len);
1.295 martin 533: for (i = 0; i < kernel_dtlb_slots; i++) {
1.168 cdi 534: tp[i].tag = kernel_tlbs[i].te_va;
535: tp[i].data = TSB_DATA(0, /* g */
536: PGSZ_4M, /* sz */
537: kernel_tlbs[i].te_pa, /* pa */
538: 1, /* priv */
1.293 palle 539: 0, /* write */
1.168 cdi 540: 1, /* cache */
541: 1, /* aliased */
542: 1, /* valid */
543: 0 /* ie */);
544: tp[i].data |= TLB_L | TLB_CV;
1.293 palle 545:
1.295 martin 546: if (i >= kernel_itlb_slots) {
1.293 palle 547: tp[i].data |= TLB_W;
1.295 martin 548: } else {
1.293 palle 549: if (CPU_ISSUN4V)
550: tp[i].data |= SUN4V_TLB_X;
1.295 martin 551: }
1.291 palle 552:
1.220 martin 553: DPRINTF(PDB_BOOT1, ("xtlb[%d]: Tag: %" PRIx64 " Data: %"
554: PRIx64 "\n", i, tp[i].tag, tp[i].data));
1.168 cdi 555: }
556:
557: for (i = 0; i < PAGE_SIZE; i += sizeof(long))
558: flush(v + i);
559:
560: cpu_spinup_trampoline = (vaddr_t)v;
561: }
562: #else
563: #define pmap_mp_init() ((void)0)
564: #endif
565:
566: paddr_t pmap_kextract(vaddr_t va);
567:
568: paddr_t
569: pmap_kextract(vaddr_t va)
570: {
571: int i;
572: paddr_t paddr = (paddr_t)-1;
573:
1.295 martin 574: for (i = 0; i < kernel_dtlb_slots; i++) {
1.168 cdi 575: if ((va & ~PAGE_MASK_4M) == kernel_tlbs[i].te_va) {
576: paddr = kernel_tlbs[i].te_pa +
577: (paddr_t)(va & PAGE_MASK_4M);
578: break;
579: }
580: }
581:
1.295 martin 582: if (i == kernel_dtlb_slots) {
1.168 cdi 583: panic("pmap_kextract: Address %p is not from kernel space.\n"
584: "Data segment is too small?\n", (void*)va);
585: }
586:
587: return (paddr);
588: }
589:
590: /*
591: * Bootstrap kernel allocator, allocates from unused space in 4MB kernel
592: * data segment meaning that
593: *
594: * - Access to allocated memory will never generate a trap
595: * - Allocated chunks are never reclaimed or freed
596: * - Allocation calls do not change PROM memlists
597: */
598: static struct mem_region kdata_mem_pool;
599:
600: static void
601: kdata_alloc_init(vaddr_t va_start, vaddr_t va_end)
602: {
603: vsize_t va_size = va_end - va_start;
604:
605: kdata_mem_pool.start = va_start;
606: kdata_mem_pool.size = va_size;
607:
608: BDPRINTF(PDB_BOOT, ("kdata_alloc_init(): %d bytes @%p.\n", va_size,
609: va_start));
610: }
611:
612: static vaddr_t
613: kdata_alloc(vsize_t size, vsize_t align)
614: {
615: vaddr_t va;
616: vsize_t asize;
617:
618: asize = roundup(kdata_mem_pool.start, align) - kdata_mem_pool.start;
619:
620: kdata_mem_pool.start += asize;
621: kdata_mem_pool.size -= asize;
622:
623: if (kdata_mem_pool.size < size) {
624: panic("kdata_alloc(): Data segment is too small.\n");
625: }
626:
627: va = kdata_mem_pool.start;
628: kdata_mem_pool.start += size;
629: kdata_mem_pool.size -= size;
630:
631: BDPRINTF(PDB_BOOT, ("kdata_alloc(): Allocated %d@%p, %d free.\n",
632: size, (void*)va, kdata_mem_pool.size));
633:
634: return (va);
635: }
636:
637: /*
638: * Unified routine for reading PROM properties.
639: */
640: static void
641: pmap_read_memlist(const char *device, const char *property, void **ml,
642: int *ml_size, vaddr_t (* ml_alloc)(vsize_t, vsize_t))
643: {
644: void *va;
645: int size, handle;
646:
647: if ( (handle = prom_finddevice(device)) == 0) {
648: prom_printf("pmap_read_memlist(): No %s device found.\n",
649: device);
650: prom_halt();
651: }
652: if ( (size = OF_getproplen(handle, property)) < 0) {
653: prom_printf("pmap_read_memlist(): %s/%s has no length.\n",
654: device, property);
655: prom_halt();
656: }
1.170 cdi 657: if ( (va = (void*)(* ml_alloc)(size, sizeof(uint64_t))) == NULL) {
1.168 cdi 658: prom_printf("pmap_read_memlist(): Cannot allocate memlist.\n");
659: prom_halt();
660: }
661: if (OF_getprop(handle, property, va, size) <= 0) {
662: prom_printf("pmap_read_memlist(): Cannot read %s/%s.\n",
663: device, property);
664: prom_halt();
665: }
666:
667: *ml = va;
668: *ml_size = size;
669: }
670:
1.75 eeh 671: /*
1.41 eeh 672: * This is called during bootstrap, before the system is really initialized.
1.1 eeh 673: *
1.56 eeh 674: * It's called with the start and end virtual addresses of the kernel. We
675: * bootstrap the pmap allocator now. We will allocate the basic structures we
676: * need to bootstrap the VM system here: the page frame tables, the TSB, and
677: * the free memory lists.
678: *
679: * Now all this is becoming a bit obsolete. maxctx is still important, but by
680: * separating the kernel text and data segments we really would need to
681: * provide the start and end of each segment. But we can't. The rodata
682: * segment is attached to the end of the kernel segment and has nothing to
683: * delimit its end. We could still pass in the beginning of the kernel and
684: * the beginning and end of the data segment but we could also just as easily
685: * calculate that all in here.
686: *
687: * To handle the kernel text, we need to do a reverse mapping of the start of
688: * the kernel, then traverse the free memory lists to find out how big it is.
1.1 eeh 689: */
1.56 eeh 690:
1.1 eeh 691: void
1.171 cdi 692: pmap_bootstrap(u_long kernelstart, u_long kernelend)
1.1 eeh 693: {
1.276 martin 694: #ifdef MODULAR
695: extern vaddr_t module_start, module_end;
696: #endif
1.200 martin 697: extern char etext[], data_start[]; /* start of data segment */
1.1 eeh 698: extern int msgbufmapped;
1.168 cdi 699: struct mem_region *mp, *mp1, *avail, *orig;
700: int i, j, pcnt, msgbufsiz;
1.9 eeh 701: size_t s, sz;
1.56 eeh 702: int64_t data;
1.168 cdi 703: vaddr_t va, intstk;
1.170 cdi 704: uint64_t phys_msgbuf;
1.181 mrg 705: paddr_t newp = 0;
1.168 cdi 706:
707: void *prom_memlist;
708: int prom_memlist_size;
709:
710: BDPRINTF(PDB_BOOT, ("Entered pmap_bootstrap.\n"));
711:
1.272 mrg 712: cache_setup_funcs();
713:
1.168 cdi 714: /*
715: * Calculate kernel size.
716: */
717: ktext = kernelstart;
718: ktextp = pmap_kextract(ktext);
1.200 martin 719: ektext = roundup((vaddr_t)etext, PAGE_SIZE_4M);
720: ektextp = roundup(pmap_kextract((vaddr_t)etext), PAGE_SIZE_4M);
1.168 cdi 721:
722: kdata = (vaddr_t)data_start;
723: kdatap = pmap_kextract(kdata);
724: ekdata = roundup(kernelend, PAGE_SIZE_4M);
725: ekdatap = roundup(pmap_kextract(kernelend), PAGE_SIZE_4M);
726:
727: BDPRINTF(PDB_BOOT, ("Virtual layout: text %lx-%lx, data %lx-%lx.\n",
728: ktext, ektext, kdata, ekdata));
729: BDPRINTF(PDB_BOOT, ("Physical layout: text %lx-%lx, data %lx-%lx.\n",
730: ktextp, ektextp, kdatap, ekdatap));
731:
732: /* Initialize bootstrap allocator. */
733: kdata_alloc_init(kernelend + 1 * 1024 * 1024, ekdata);
1.75 eeh 734:
735: pmap_bootdebug();
1.152 petrov 736: pmap_alloc_bootargs();
1.168 cdi 737: pmap_mp_init();
1.152 petrov 738:
1.1 eeh 739: /*
740: * set machine page size
741: */
742: uvmexp.pagesize = NBPG;
1.103 eeh 743: uvmexp.ncolors = pmap_calculate_colors();
1.1 eeh 744: uvm_setpagesize();
1.103 eeh 745:
1.1 eeh 746: /*
747: * Get hold or the message buffer.
748: */
1.71 eeh 749: msgbufp = (struct kern_msgbuf *)(vaddr_t)MSGBUF_VA;
1.289 jdc 750: msgbufsiz = MSGBUFSIZE;
1.156 pk 751: BDPRINTF(PDB_BOOT, ("Trying to allocate msgbuf at %lx, size %lx\n",
1.66 eeh 752: (long)msgbufp, (long)msgbufsiz));
1.22 mrg 753: if ((long)msgbufp !=
754: (long)(phys_msgbuf = prom_claim_virt((vaddr_t)msgbufp, msgbufsiz)))
755: prom_printf(
1.156 pk 756: "cannot get msgbuf VA, msgbufp=%p, phys_msgbuf=%lx\n",
1.22 mrg 757: (void *)msgbufp, (long)phys_msgbuf);
1.4 eeh 758: phys_msgbuf = prom_get_msgbuf(msgbufsiz, MMU_PAGE_ALIGN);
1.156 pk 759: BDPRINTF(PDB_BOOT,
760: ("We should have the memory at %lx, let's map it in\n",
1.98 eeh 761: phys_msgbuf));
1.156 pk 762: if (prom_map_phys(phys_msgbuf, msgbufsiz, (vaddr_t)msgbufp,
1.177 martin 763: -1/* sunos does this */) == -1) {
1.156 pk 764: prom_printf("Failed to map msgbuf\n");
1.177 martin 765: } else {
1.156 pk 766: BDPRINTF(PDB_BOOT, ("msgbuf mapped at %p\n",
1.98 eeh 767: (void *)msgbufp));
1.177 martin 768: }
1.1 eeh 769: msgbufmapped = 1; /* enable message buffer */
1.186 christos 770: initmsgbuf((void *)msgbufp, msgbufsiz);
1.1 eeh 771:
1.156 pk 772: /*
1.1 eeh 773: * Find out how much RAM we have installed.
774: */
1.156 pk 775: BDPRINTF(PDB_BOOT, ("pmap_bootstrap: getting phys installed\n"));
1.168 cdi 776: pmap_read_memlist("/memory", "reg", &prom_memlist, &prom_memlist_size,
777: kdata_alloc);
778: phys_installed = prom_memlist;
779: phys_installed_size = prom_memlist_size / sizeof(*phys_installed);
1.1 eeh 780:
1.66 eeh 781: if (pmapdebug & PDB_BOOT1) {
782: /* print out mem list */
1.156 pk 783: prom_printf("Installed physical memory:\n");
1.168 cdi 784: for (i = 0; i < phys_installed_size; i++) {
1.156 pk 785: prom_printf("memlist start %lx size %lx\n",
1.168 cdi 786: (u_long)phys_installed[i].start,
787: (u_long)phys_installed[i].size);
1.66 eeh 788: }
1.1 eeh 789: }
1.168 cdi 790:
1.66 eeh 791: BDPRINTF(PDB_BOOT1, ("Calculating physmem:"));
1.168 cdi 792: for (i = 0; i < phys_installed_size; i++)
793: physmem += btoc(phys_installed[i].size);
1.156 pk 794: BDPRINTF(PDB_BOOT1, (" result %x or %d pages\n",
1.66 eeh 795: (int)physmem, (int)physmem));
1.168 cdi 796:
1.156 pk 797: /*
1.1 eeh 798: * Calculate approx TSB size. This probably needs tweaking.
799: */
1.127 chs 800: if (physmem < btoc(64 * 1024 * 1024))
1.22 mrg 801: tsbsize = 0;
1.127 chs 802: else if (physmem < btoc(512 * 1024 * 1024))
1.22 mrg 803: tsbsize = 1;
804: else
805: tsbsize = 2;
1.1 eeh 806:
807: /*
808: * Save the prom translations
809: */
1.168 cdi 810: pmap_read_memlist("/virtual-memory", "translations", &prom_memlist,
811: &prom_memlist_size, kdata_alloc);
812: prom_map = prom_memlist;
813: prom_map_size = prom_memlist_size / sizeof(struct prom_map);
814:
1.66 eeh 815: if (pmapdebug & PDB_BOOT) {
816: /* print out mem list */
1.156 pk 817: prom_printf("Prom xlations:\n");
1.66 eeh 818: for (i = 0; i < prom_map_size; i++) {
1.156 pk 819: prom_printf("start %016lx size %016lx tte %016lx\n",
820: (u_long)prom_map[i].vstart,
1.66 eeh 821: (u_long)prom_map[i].vsize,
822: (u_long)prom_map[i].tte);
823: }
1.156 pk 824: prom_printf("End of prom xlations\n");
1.1 eeh 825: }
1.56 eeh 826:
827: /*
1.22 mrg 828: * Here's a quick in-lined reverse bubble sort. It gets rid of
1.56 eeh 829: * any translations inside the kernel data VA range.
1.1 eeh 830: */
1.175 mrg 831: for (i = 0; i < prom_map_size; i++) {
832: for (j = i; j < prom_map_size; j++) {
1.22 mrg 833: if (prom_map[j].vstart > prom_map[i].vstart) {
1.1 eeh 834: struct prom_map tmp;
1.175 mrg 835:
1.1 eeh 836: tmp = prom_map[i];
837: prom_map[i] = prom_map[j];
838: prom_map[j] = tmp;
839: }
840: }
841: }
1.66 eeh 842: if (pmapdebug & PDB_BOOT) {
843: /* print out mem list */
1.156 pk 844: prom_printf("Prom xlations:\n");
1.66 eeh 845: for (i = 0; i < prom_map_size; i++) {
1.156 pk 846: prom_printf("start %016lx size %016lx tte %016lx\n",
847: (u_long)prom_map[i].vstart,
1.66 eeh 848: (u_long)prom_map[i].vsize,
849: (u_long)prom_map[i].tte);
850: }
1.156 pk 851: prom_printf("End of prom xlations\n");
1.1 eeh 852: }
853:
854: /*
1.195 martin 855: * Allocate a ncpu*64KB page for the cpu_info & stack structure now.
1.56 eeh 856: */
1.195 martin 857: cpu0paddr = prom_alloc_phys(8 * PAGE_SIZE * sparc_ncpus, 8 * PAGE_SIZE);
1.175 mrg 858: if (cpu0paddr == 0) {
859: prom_printf("Cannot allocate cpu_infos\n");
1.156 pk 860: prom_halt();
1.56 eeh 861: }
862:
863: /*
864: * Now the kernel text segment is in its final location we can try to
1.156 pk 865: * find out how much memory really is free.
1.56 eeh 866: */
1.168 cdi 867: pmap_read_memlist("/memory", "available", &prom_memlist,
868: &prom_memlist_size, kdata_alloc);
869: orig = prom_memlist;
870: sz = prom_memlist_size;
871: pcnt = prom_memlist_size / sizeof(*orig);
872:
873: BDPRINTF(PDB_BOOT1, ("Available physical memory:\n"));
1.170 cdi 874: avail = (struct mem_region*)kdata_alloc(sz, sizeof(uint64_t));
1.168 cdi 875: for (i = 0; i < pcnt; i++) {
876: avail[i] = orig[i];
877: BDPRINTF(PDB_BOOT1, ("memlist start %lx size %lx\n",
878: (u_long)orig[i].start,
879: (u_long)orig[i].size));
1.56 eeh 880: }
1.168 cdi 881: BDPRINTF(PDB_BOOT1, ("End of available physical memory\n"));
1.56 eeh 882:
1.168 cdi 883: BDPRINTF(PDB_BOOT, ("ktext %08lx[%08lx] - %08lx[%08lx] : "
884: "kdata %08lx[%08lx] - %08lx[%08lx]\n",
885: (u_long)ktext, (u_long)ktextp,
886: (u_long)ektext, (u_long)ektextp,
887: (u_long)kdata, (u_long)kdatap,
888: (u_long)ekdata, (u_long)ekdatap));
1.66 eeh 889: if (pmapdebug & PDB_BOOT1) {
890: /* print out mem list */
1.156 pk 891: prom_printf("Available %lx physical memory before cleanup:\n",
1.66 eeh 892: (u_long)avail);
1.168 cdi 893: for (i = 0; i < pcnt; i++) {
1.156 pk 894: prom_printf("memlist start %lx size %lx\n",
1.168 cdi 895: (u_long)avail[i].start,
896: (u_long)avail[i].size);
1.66 eeh 897: }
1.156 pk 898: prom_printf("End of available physical memory before cleanup\n");
899: prom_printf("kernel physical text size %08lx - %08lx\n",
1.66 eeh 900: (u_long)ktextp, (u_long)ektextp);
1.156 pk 901: prom_printf("kernel physical data size %08lx - %08lx\n",
1.66 eeh 902: (u_long)kdatap, (u_long)ekdatap);
903: }
1.278 mrg 904:
1.1 eeh 905: /*
906: * Here's a another quick in-lined bubble sort.
907: */
1.22 mrg 908: for (i = 0; i < pcnt; i++) {
909: for (j = i; j < pcnt; j++) {
910: if (avail[j].start < avail[i].start) {
1.1 eeh 911: struct mem_region tmp;
912: tmp = avail[i];
913: avail[i] = avail[j];
914: avail[j] = tmp;
915: }
916: }
917: }
918:
1.56 eeh 919: /* Throw away page zero if we have it. */
920: if (avail->start == 0) {
1.138 thorpej 921: avail->start += PAGE_SIZE;
922: avail->size -= PAGE_SIZE;
1.56 eeh 923: }
1.155 chs 924:
1.56 eeh 925: /*
926: * Now we need to remove the area we valloc'ed from the available
927: * memory lists. (NB: we may have already alloc'ed the entire space).
928: */
1.1 eeh 929: npgs = 0;
1.168 cdi 930: for (mp = avail, i = 0; i < pcnt; i++, mp = &avail[i]) {
1.1 eeh 931: /*
932: * Now page align the start of the region.
933: */
1.138 thorpej 934: s = mp->start % PAGE_SIZE;
1.1 eeh 935: if (mp->size >= s) {
936: mp->size -= s;
937: mp->start += s;
938: }
939: /*
940: * And now align the size of the region.
941: */
1.138 thorpej 942: mp->size -= mp->size % PAGE_SIZE;
1.1 eeh 943: /*
944: * Check whether some memory is left here.
945: */
946: if (mp->size == 0) {
1.149 martin 947: memcpy(mp, mp + 1,
1.1 eeh 948: (pcnt - (mp - avail)) * sizeof *mp);
949: pcnt--;
950: mp--;
951: continue;
952: }
953: s = mp->start;
954: sz = mp->size;
955: npgs += btoc(sz);
956: for (mp1 = avail; mp1 < mp; mp1++)
957: if (s < mp1->start)
958: break;
959: if (mp1 < mp) {
1.149 martin 960: memcpy(mp1 + 1, mp1, (char *)mp - (char *)mp1);
1.1 eeh 961: mp1->start = s;
962: mp1->size = sz;
963: }
1.73 eeh 964: #ifdef DEBUG
965: /* Clear all memory we give to the VM system. I want to make sure
966: * the PROM isn't using it for something, so this should break the PROM.
967: */
1.116 pk 968:
969: /* Calling pmap_zero_page() at this point also hangs some machines
970: * so don't do it at all. -- pk 26/02/2002
971: */
972: #if 0
1.73 eeh 973: {
974: paddr_t p;
1.138 thorpej 975: for (p = mp->start; p < mp->start+mp->size;
976: p += PAGE_SIZE)
1.73 eeh 977: pmap_zero_page(p);
978: }
979: #endif
1.116 pk 980: #endif /* DEBUG */
1.156 pk 981: /*
1.1 eeh 982: * In future we should be able to specify both allocated
983: * and free.
984: */
1.168 cdi 985: BDPRINTF(PDB_BOOT1, ("uvm_page_physload(%lx, %lx)\n",
986: (long)mp->start,
987: (long)(mp->start + mp->size)));
1.1 eeh 988: uvm_page_physload(
989: atop(mp->start),
1.10 eeh 990: atop(mp->start+mp->size),
1.1 eeh 991: atop(mp->start),
1.10 eeh 992: atop(mp->start+mp->size),
1.3 thorpej 993: VM_FREELIST_DEFAULT);
1.1 eeh 994: }
995:
1.66 eeh 996: if (pmapdebug & PDB_BOOT) {
997: /* print out mem list */
1.156 pk 998: prom_printf("Available physical memory after cleanup:\n");
1.168 cdi 999: for (i = 0; i < pcnt; i++) {
1.156 pk 1000: prom_printf("avail start %lx size %lx\n",
1.168 cdi 1001: (long)avail[i].start, (long)avail[i].size);
1.66 eeh 1002: }
1.156 pk 1003: prom_printf("End of available physical memory after cleanup\n");
1.1 eeh 1004: }
1.278 mrg 1005:
1.1 eeh 1006: /*
1.8 eeh 1007: * Allocate and clear out pmap_kernel()->pm_segs[]
1.1 eeh 1008: */
1.127 chs 1009: pmap_kernel()->pm_refs = 1;
1.210 martin 1010: memset(&pmap_kernel()->pm_ctx, 0, sizeof(pmap_kernel()->pm_ctx));
1.127 chs 1011:
1012: /* Throw away page zero */
1013: do {
1014: pmap_get_page(&newp);
1015: } while (!newp);
1016: pmap_kernel()->pm_segs=(paddr_t *)(u_long)newp;
1017: pmap_kernel()->pm_physaddr = newp;
1.1 eeh 1018:
1019: /*
1020: * finish filling out kernel pmap.
1021: */
1022:
1.156 pk 1023: BDPRINTF(PDB_BOOT, ("pmap_kernel()->pm_physaddr = %lx\n",
1.66 eeh 1024: (long)pmap_kernel()->pm_physaddr));
1.1 eeh 1025: /*
1026: * Tell pmap about our mesgbuf -- Hope this works already
1027: */
1.156 pk 1028: BDPRINTF(PDB_BOOT1, ("Calling consinit()\n"));
1.127 chs 1029: if (pmapdebug & PDB_BOOT1)
1030: consinit();
1.156 pk 1031: BDPRINTF(PDB_BOOT1, ("Inserting mesgbuf into pmap_kernel()\n"));
1.1 eeh 1032: /* it's not safe to call pmap_enter so we need to do this ourselves */
1.56 eeh 1033: va = (vaddr_t)msgbufp;
1034: while (msgbufsiz) {
1.156 pk 1035: data = TSB_DATA(0 /* global */,
1.160 martin 1036: PGSZ_8K,
1.56 eeh 1037: phys_msgbuf,
1038: 1 /* priv */,
1039: 1 /* Write */,
1040: 1 /* Cacheable */,
1041: FORCE_ALIAS /* ALIAS -- Disable D$ */,
1042: 1 /* valid */,
1043: 0 /* IE */);
1.160 martin 1044: pmap_enter_kpage(va, data);
1045: va += PAGE_SIZE;
1046: msgbufsiz -= PAGE_SIZE;
1047: phys_msgbuf += PAGE_SIZE;
1.56 eeh 1048: }
1.156 pk 1049: BDPRINTF(PDB_BOOT1, ("Done inserting mesgbuf into pmap_kernel()\n"));
1050:
1051: BDPRINTF(PDB_BOOT1, ("Inserting PROM mappings into pmap_kernel()\n"));
1.22 mrg 1052: for (i = 0; i < prom_map_size; i++)
1.143 chs 1053: if (prom_map[i].vstart && ((prom_map[i].vstart >> 32) == 0))
1.138 thorpej 1054: for (j = 0; j < prom_map[i].vsize; j += PAGE_SIZE) {
1.1 eeh 1055: int k;
1.156 pk 1056:
1.22 mrg 1057: for (k = 0; page_size_map[k].mask; k++) {
1058: if (((prom_map[i].vstart |
1059: prom_map[i].tte) &
1060: page_size_map[k].mask) == 0 &&
1061: page_size_map[k].mask <
1062: prom_map[i].vsize)
1.1 eeh 1063: break;
1064: }
1065: page_size_map[k].use++;
1.37 eeh 1066: /* Enter PROM map into pmap_kernel() */
1.56 eeh 1067: pmap_enter_kpage(prom_map[i].vstart + j,
1.143 chs 1068: (prom_map[i].tte + j) | TLB_EXEC |
1.56 eeh 1069: page_size_map[k].code);
1.1 eeh 1070: }
1.156 pk 1071: BDPRINTF(PDB_BOOT1, ("Done inserting PROM mappings into pmap_kernel()\n"));
1.1 eeh 1072:
1.54 eeh 1073: /*
1074: * Fix up start of kernel heap.
1075: */
1.98 eeh 1076: vmmap = (vaddr_t)roundup(ekdata, 4*MEG);
1.54 eeh 1077: /* Let's keep 1 page of redzone after the kernel */
1.138 thorpej 1078: vmmap += PAGE_SIZE;
1.156 pk 1079: {
1.170 cdi 1080: extern void main(void);
1.178 mrg 1081: vaddr_t u0va;
1.54 eeh 1082: paddr_t pa;
1083:
1.178 mrg 1084: u0va = vmmap;
1.54 eeh 1085:
1.156 pk 1086: BDPRINTF(PDB_BOOT1,
1.243 matt 1087: ("Inserting lwp0 USPACE into pmap_kernel() at %p\n",
1.98 eeh 1088: vmmap));
1.56 eeh 1089:
1.178 mrg 1090: while (vmmap < u0va + 2*USPACE) {
1.162 christos 1091: int64_t data1;
1.1 eeh 1092:
1.151 petrov 1093: if (!pmap_get_page(&pa))
1094: panic("pmap_bootstrap: no pages");
1.138 thorpej 1095: prom_map_phys(pa, PAGE_SIZE, vmmap, -1);
1.162 christos 1096: data1 = TSB_DATA(0 /* global */,
1.98 eeh 1097: PGSZ_8K,
1.55 mrg 1098: pa,
1.54 eeh 1099: 1 /* priv */,
1100: 1 /* Write */,
1101: 1 /* Cacheable */,
1.56 eeh 1102: FORCE_ALIAS /* ALIAS -- Disable D$ */,
1.54 eeh 1103: 1 /* valid */,
1104: 0 /* IE */);
1.162 christos 1105: pmap_enter_kpage(vmmap, data1);
1.138 thorpej 1106: vmmap += PAGE_SIZE;
1.56 eeh 1107: }
1.156 pk 1108: BDPRINTF(PDB_BOOT1,
1109: ("Done inserting stack 0 into pmap_kernel()\n"));
1.56 eeh 1110:
1111: /* Now map in and initialize our cpu_info structure */
1112: #ifdef DIAGNOSTIC
1.138 thorpej 1113: vmmap += PAGE_SIZE; /* redzone -- XXXX do we need one? */
1.56 eeh 1114: #endif
1.155 chs 1115: if ((vmmap ^ INTSTACK) & VA_ALIAS_MASK)
1.138 thorpej 1116: vmmap += PAGE_SIZE; /* Matchup virtual color for D$ */
1.57 eeh 1117: intstk = vmmap;
1.155 chs 1118: cpus = (struct cpu_info *)(intstk + CPUINFO_VA - INTSTACK);
1.56 eeh 1119:
1.98 eeh 1120: BDPRINTF(PDB_BOOT1,
1.156 pk 1121: ("Inserting cpu_info into pmap_kernel() at %p\n",
1.98 eeh 1122: cpus));
1.195 martin 1123: /* Now map in all 8 pages of interrupt stack/cpu_info */
1.56 eeh 1124: pa = cpu0paddr;
1.195 martin 1125: prom_map_phys(pa, 64*KB, vmmap, -1);
1.155 chs 1126:
1.156 pk 1127: /*
1.106 eeh 1128: * Also map it in as the interrupt stack.
1129: * This lets the PROM see this if needed.
1130: *
1131: * XXXX locore.s does not flush these mappings
1132: * before installing the locked TTE.
1133: */
1.178 mrg 1134: prom_map_phys(pa, 64*KB, INTSTACK, -1);
1.195 martin 1135: for (i = 0; i < 8; i++) {
1.162 christos 1136: int64_t data1;
1.56 eeh 1137:
1.162 christos 1138: data1 = TSB_DATA(0 /* global */,
1.98 eeh 1139: PGSZ_8K,
1.55 mrg 1140: pa,
1.54 eeh 1141: 1 /* priv */,
1142: 1 /* Write */,
1143: 1 /* Cacheable */,
1.56 eeh 1144: FORCE_ALIAS /* ALIAS -- Disable D$ */,
1.54 eeh 1145: 1 /* valid */,
1146: 0 /* IE */);
1.162 christos 1147: pmap_enter_kpage(vmmap, data1);
1.138 thorpej 1148: vmmap += PAGE_SIZE;
1149: pa += PAGE_SIZE;
1.56 eeh 1150: }
1.156 pk 1151: BDPRINTF(PDB_BOOT1, ("Initializing cpu_info\n"));
1.56 eeh 1152:
1153: /* Initialize our cpu_info structure */
1.195 martin 1154: memset((void *)intstk, 0, 64 * KB);
1.155 chs 1155: cpus->ci_self = cpus;
1.152 petrov 1156: cpus->ci_next = NULL;
1.133 thorpej 1157: cpus->ci_curlwp = &lwp0;
1.152 petrov 1158: cpus->ci_flags = CPUF_PRIMARY;
1.282 palle 1159: cpus->ci_cpuid = cpu_myid();
1.133 thorpej 1160: cpus->ci_fplwp = NULL;
1.274 nakayama 1161: cpus->ci_eintstack = NULL;
1.56 eeh 1162: cpus->ci_spinup = main; /* Call main when we're running. */
1163: cpus->ci_paddr = cpu0paddr;
1.290 palle 1164: if (CPU_ISSUN4V) {
1.285 palle 1165: cpus->ci_mmfsa = cpu0paddr;
1.290 palle 1166: cpus->ci_tsb_desc = NULL;
1167: }
1.178 mrg 1168: cpus->ci_cpcb = (struct pcb *)u0va;
1.202 martin 1169: cpus->ci_idepth = -1;
1.211 nakayama 1170: memset(cpus->ci_intrpending, -1, sizeof(cpus->ci_intrpending));
1.152 petrov 1171:
1.244 rmind 1172: uvm_lwp_setuarea(&lwp0, u0va);
1.192 rjs 1173: lwp0.l_md.md_tf = (struct trapframe64*)(u0va + USPACE
1174: - sizeof(struct trapframe64));
1.191 martin 1175:
1.195 martin 1176: cpu0paddr += 64 * KB;
1.155 chs 1177:
1178: CPUSET_CLEAR(cpus_active);
1179: CPUSET_ADD(cpus_active, 0);
1.150 cdi 1180:
1.210 martin 1181: cpu_pmap_prepare(cpus, true);
1182: cpu_pmap_init(cpus);
1183:
1.56 eeh 1184: /* The rest will be done at CPU attach time. */
1.156 pk 1185: BDPRINTF(PDB_BOOT1,
1186: ("Done inserting cpu_info into pmap_kernel()\n"));
1.54 eeh 1187: }
1.63 eeh 1188:
1.186 christos 1189: vmmap = (vaddr_t)reserve_dumppages((void *)(u_long)vmmap);
1.155 chs 1190:
1.276 martin 1191: #ifdef MODULAR
1192: /*
1.279 martin 1193: * For 32bit kernels:
1194: * Reserve 16 MB of VA for module loading. Right now our full
1195: * GENERIC kernel is about 13 MB, so this looks good enough.
1196: * For 64bit kernels:
1197: * We can use all the space left before the special addresses,
1198: * but leave 2 pages at vmmap alone (see pmap_virtual_space)
1199: * and another red zone page.
1200: */
1201: #ifdef __arch64__
1202: module_start = vmmap + 3*PAGE_SIZE;
1203: module_end = 0x08000000; /* keep all modules within 2GB */
1204: KASSERT(module_end < KERNEND); /* of kernel text */
1205: #else
1.276 martin 1206: module_start = vmmap;
1207: vmmap += 16 * 1024*1024;
1208: module_end = vmmap;
1209: #endif
1.279 martin 1210: #endif
1.276 martin 1211:
1.1 eeh 1212: /*
1213: * Set up bounds of allocatable memory for vmstat et al.
1214: */
1.245 mrg 1215: avail_start = avail->start;
1.1 eeh 1216: for (mp = avail; mp->size; mp++)
1217: avail_end = mp->start+mp->size;
1.205 martin 1218:
1.156 pk 1219: BDPRINTF(PDB_BOOT1, ("Finished pmap_bootstrap()\n"));
1.139 thorpej 1220:
1.168 cdi 1221: BDPRINTF(PDB_BOOT, ("left kdata: %" PRId64 " @%" PRIx64 ".\n",
1222: kdata_mem_pool.size, kdata_mem_pool.start));
1.1 eeh 1223: }
1224:
1225: /*
1.210 martin 1226: * Allocate TSBs for both mmus from the locked kernel data segment page.
1227: * This is run before the cpu itself is activated (or by the first cpu
1228: * itself)
1229: */
1230: void
1231: cpu_pmap_prepare(struct cpu_info *ci, bool initial)
1232: {
1233: /* allocate our TSBs */
1234: ci->ci_tsb_dmmu = (pte_t *)kdata_alloc(TSBSIZE, TSBSIZE);
1235: ci->ci_tsb_immu = (pte_t *)kdata_alloc(TSBSIZE, TSBSIZE);
1236: memset(ci->ci_tsb_dmmu, 0, TSBSIZE);
1237: memset(ci->ci_tsb_immu, 0, TSBSIZE);
1238: if (!initial) {
1239: KASSERT(ci != curcpu());
1240: /*
1241: * Initially share ctxbusy with the boot cpu, the
1242: * cpu will replace it as soon as it runs (and can
1243: * probe the number of available contexts itself).
1244: * Untill then only context 0 (aka kernel) will be
1245: * referenced anyway.
1246: */
1247: ci->ci_numctx = curcpu()->ci_numctx;
1248: ci->ci_ctxbusy = curcpu()->ci_ctxbusy;
1249: }
1250:
1.290 palle 1251: if (CPU_ISSUN4V) {
1252: ci->ci_tsb_desc = (struct tsb_desc *)kdata_alloc(
1.282 palle 1253: sizeof(struct tsb_desc), 16);
1.290 palle 1254: memset(ci->ci_tsb_desc, 0, sizeof(struct tsb_desc));
1.282 palle 1255: /* 8K page size used for TSB index computation */
1.290 palle 1256: ci->ci_tsb_desc->td_idxpgsz = 0;
1257: ci->ci_tsb_desc->td_assoc = 1;
1258: ci->ci_tsb_desc->td_size = TSBENTS;
1259: ci->ci_tsb_desc->td_ctxidx = -1;
1260: ci->ci_tsb_desc->td_pgsz = 0xf;
1261: ci->ci_tsb_desc->td_pa = pmap_kextract((vaddr_t)ci->ci_tsb_dmmu);
1.282 palle 1262: BDPRINTF(PDB_BOOT1, ("cpu %d: TSB descriptor allocated at %p "
1263: "size %08x - td_pa at %p\n",
1.290 palle 1264: ci->ci_index, ci->ci_tsb_desc, sizeof(struct tsb_desc),
1265: ci->ci_tsb_desc->td_pa));
1.282 palle 1266: }
1267:
1.210 martin 1268: BDPRINTF(PDB_BOOT1, ("cpu %d: TSB allocated at %p/%p size %08x\n",
1269: ci->ci_index, ci->ci_tsb_dmmu, ci->ci_tsb_immu, TSBSIZE));
1270: }
1271:
1272: /*
1.264 mrg 1273: * Initialize the per CPU parts for the cpu running this code.
1.210 martin 1274: */
1275: void
1276: cpu_pmap_init(struct cpu_info *ci)
1277: {
1278: size_t ctxsize;
1279:
1.264 mrg 1280: /*
1281: * We delay initialising ci_ctx_lock here as LOCKDEBUG isn't
1282: * running for cpu0 yet..
1283: */
1.210 martin 1284: ci->ci_pmap_next_ctx = 1;
1.282 palle 1285: /* all SUN4U use 13 bit contexts - SUN4V use at least 13 bit contexts */
1286: ci->ci_numctx = 0x2000;
1.210 martin 1287: ctxsize = sizeof(paddr_t)*ci->ci_numctx;
1288: ci->ci_ctxbusy = (paddr_t *)kdata_alloc(ctxsize, sizeof(uint64_t));
1289: memset(ci->ci_ctxbusy, 0, ctxsize);
1290: LIST_INIT(&ci->ci_pmap_ctxlist);
1291:
1292: /* mark kernel context as busy */
1293: ci->ci_ctxbusy[0] = pmap_kernel()->pm_physaddr;
1294: }
1295:
1296: /*
1.1 eeh 1297: * Initialize anything else for pmap handling.
1298: * Called during vm_init().
1299: */
1300: void
1.234 cegger 1301: pmap_init(void)
1.1 eeh 1302: {
1.127 chs 1303: struct vm_page *pg;
1304: struct pglist pglist;
1.170 cdi 1305: uint64_t data;
1.54 eeh 1306: paddr_t pa;
1307: psize_t size;
1308: vaddr_t va;
1309:
1.156 pk 1310: BDPRINTF(PDB_BOOT1, ("pmap_init()\n"));
1.54 eeh 1311:
1312: size = sizeof(struct pv_entry) * physmem;
1313: if (uvm_pglistalloc((psize_t)size, (paddr_t)0, (paddr_t)-1,
1.138 thorpej 1314: (paddr_t)PAGE_SIZE, (paddr_t)0, &pglist, 1, 0) != 0)
1.152 petrov 1315: panic("pmap_init: no memory");
1.54 eeh 1316:
1.161 yamt 1317: va = uvm_km_alloc(kernel_map, size, 0, UVM_KMF_VAONLY);
1.54 eeh 1318: if (va == 0)
1.152 petrov 1319: panic("pmap_init: no memory");
1.54 eeh 1320:
1321: /* Map the pages */
1.218 ad 1322: TAILQ_FOREACH(pg, &pglist, pageq.queue) {
1.127 chs 1323: pa = VM_PAGE_TO_PHYS(pg);
1.54 eeh 1324: pmap_zero_page(pa);
1.156 pk 1325: data = TSB_DATA(0 /* global */,
1.98 eeh 1326: PGSZ_8K,
1.54 eeh 1327: pa,
1328: 1 /* priv */,
1329: 1 /* Write */,
1330: 1 /* Cacheable */,
1.56 eeh 1331: FORCE_ALIAS /* ALIAS -- Disable D$ */,
1.54 eeh 1332: 1 /* valid */,
1333: 0 /* IE */);
1.56 eeh 1334: pmap_enter_kpage(va, data);
1.138 thorpej 1335: va += PAGE_SIZE;
1.54 eeh 1336: }
1.73 eeh 1337:
1338: /*
1.127 chs 1339: * initialize the pmap pools.
1.73 eeh 1340: */
1.275 mrg 1341: pool_cache_bootstrap(&pmap_cache, sizeof(struct pmap),
1342: SPARC64_BLOCK_SIZE, 0, 0, "pmappl", NULL, IPL_NONE, NULL, NULL,
1343: NULL);
1.222 nakayama 1344: pool_cache_bootstrap(&pmap_pv_cache, sizeof(struct pv_entry), 0, 0,
1.223 nakayama 1345: PR_LARGECACHE, "pv_entry", NULL, IPL_NONE, NULL, NULL, NULL);
1.54 eeh 1346:
1.1 eeh 1347: vm_first_phys = avail_start;
1348: vm_num_phys = avail_end - avail_start;
1.206 martin 1349:
1.261 mrg 1350: mutex_init(&pmap_lock, MUTEX_DEFAULT, IPL_NONE);
1.259 mrg 1351: #if defined(USE_LOCKSAFE_PSEG_GETSET)
1352: mutex_init(&pseg_lock, MUTEX_SPIN, IPL_VM);
1353: #endif
1.250 martin 1354: lock_available = true;
1.140 martin 1355: }
1356:
1357: /*
1358: * How much virtual space is available to the kernel?
1359: */
1360: static vaddr_t kbreak; /* End of kernel VA */
1361: void
1.233 dsl 1362: pmap_virtual_space(vaddr_t *start, vaddr_t *end)
1.140 martin 1363: {
1364:
1365: /*
1.279 martin 1366: * Reserve one segment for kernel virtual memory.
1367: */
1368: #ifdef __arch64__
1369: /*
1370: * On 64 bit kernels, start it beyound firmware, so
1371: * we are basically unrestricted.
1372: */
1373: *start = kbreak = VM_KERNEL_MEM_VA_START;
1374: *end = VM_MAX_KERNEL_ADDRESS;
1375: #else
1376: /*
1377: * Reserve two pages for pmap_copy_page && /dev/mem, but otherwise
1378: * end it beyound the iospace and other special fixed addresses.
1.140 martin 1379: */
1380: *start = kbreak = (vaddr_t)(vmmap + 2*PAGE_SIZE);
1381: *end = VM_MAX_KERNEL_ADDRESS;
1.279 martin 1382: #endif
1.156 pk 1383: BDPRINTF(PDB_BOOT1, ("pmap_virtual_space: %x-%x\n", *start, *end));
1.1 eeh 1384: }
1385:
1.77 eeh 1386: /*
1387: * Preallocate kernel page tables to a specified VA.
1388: * This simply loops through the first TTE for each
1.156 pk 1389: * page table from the beginning of the kernel pmap,
1.77 eeh 1390: * reads the entry, and if the result is
1391: * zero (either invalid entry or no page table) it stores
1392: * a zero there, populating page tables in the process.
1393: * This is not the most efficient technique but i don't
1394: * expect it to be called that often.
1395: */
1.156 pk 1396: vaddr_t
1.232 dsl 1397: pmap_growkernel(vaddr_t maxkvaddr)
1.77 eeh 1398: {
1399: struct pmap *pm = pmap_kernel();
1.127 chs 1400: paddr_t pa;
1.156 pk 1401:
1.279 martin 1402: if (maxkvaddr >= VM_MAX_KERNEL_ADDRESS) {
1.98 eeh 1403: printf("WARNING: cannot extend kernel pmap beyond %p to %p\n",
1.279 martin 1404: (void *)VM_MAX_KERNEL_ADDRESS, (void *)maxkvaddr);
1.98 eeh 1405: return (kbreak);
1406: }
1.127 chs 1407: DPRINTF(PDB_GROW, ("pmap_growkernel(%lx...%lx)\n", kbreak, maxkvaddr));
1.77 eeh 1408: /* Align with the start of a page table */
1.127 chs 1409: for (kbreak &= (-1 << PDSHIFT); kbreak < maxkvaddr;
1410: kbreak += (1 << PDSHIFT)) {
1.254 mrg 1411: if (pseg_get(pm, kbreak) & TLB_V)
1.127 chs 1412: continue;
1.77 eeh 1413:
1.127 chs 1414: pa = 0;
1415: while (pseg_set(pm, kbreak, 0, pa) & 1) {
1.156 pk 1416: DPRINTF(PDB_GROW,
1.127 chs 1417: ("pmap_growkernel: extending %lx\n", kbreak));
1418: pa = 0;
1419: if (!pmap_get_page(&pa))
1.175 mrg 1420: panic("pmap_growkernel: no pages");
1.127 chs 1421: ENTER_STAT(ptpneeded);
1.77 eeh 1422: }
1423: }
1.78 eeh 1424: return (kbreak);
1.77 eeh 1425: }
1426:
1.1 eeh 1427: /*
1428: * Create and return a physical map.
1429: */
1.4 eeh 1430: struct pmap *
1.234 cegger 1431: pmap_create(void)
1.1 eeh 1432: {
1433: struct pmap *pm;
1.41 eeh 1434:
1.77 eeh 1435: DPRINTF(PDB_CREATE, ("pmap_create()\n"));
1.44 chs 1436:
1.203 ad 1437: pm = pool_cache_get(&pmap_cache, PR_WAITOK);
1.127 chs 1438: memset(pm, 0, sizeof *pm);
1439: DPRINTF(PDB_CREATE, ("pmap_create(): created %p\n", pm));
1.1 eeh 1440:
1.273 rmind 1441: mutex_init(&pm->pm_obj_lock, MUTEX_DEFAULT, IPL_NONE);
1442: uvm_obj_init(&pm->pm_obj, NULL, false, 1);
1443: uvm_obj_setlock(&pm->pm_obj, &pm->pm_obj_lock);
1444:
1.127 chs 1445: if (pm != pmap_kernel()) {
1.151 petrov 1446: while (!pmap_get_page(&pm->pm_physaddr)) {
1.127 chs 1447: uvm_wait("pmap_create");
1448: }
1.49 mrg 1449: pm->pm_segs = (paddr_t *)(u_long)pm->pm_physaddr;
1.1 eeh 1450: }
1.210 martin 1451: DPRINTF(PDB_CREATE, ("pmap_create(%p): ctx %d\n", pm, pmap_ctx(pm)));
1.127 chs 1452: return pm;
1.1 eeh 1453: }
1454:
1455: /*
1456: * Add a reference to the given pmap.
1457: */
1458: void
1.232 dsl 1459: pmap_reference(struct pmap *pm)
1.1 eeh 1460: {
1.73 eeh 1461:
1.203 ad 1462: atomic_inc_uint(&pm->pm_refs);
1.1 eeh 1463: }
1464:
1465: /*
1466: * Retire the given pmap from service.
1467: * Should only be called if the map contains no valid mappings.
1468: */
1469: void
1.232 dsl 1470: pmap_destroy(struct pmap *pm)
1.1 eeh 1471: {
1.212 nakayama 1472: #ifdef MULTIPROCESSOR
1473: struct cpu_info *ci;
1.262 mrg 1474: sparc64_cpuset_t pmap_cpus_active;
1475: #else
1476: #define pmap_cpus_active 0
1.212 nakayama 1477: #endif
1.127 chs 1478: struct vm_page *pg, *nextpg;
1.1 eeh 1479:
1.208 ad 1480: if ((int)atomic_dec_uint_nv(&pm->pm_refs) > 0) {
1.127 chs 1481: return;
1.73 eeh 1482: }
1.127 chs 1483: DPRINTF(PDB_DESTROY, ("pmap_destroy: freeing pmap %p\n", pm));
1.212 nakayama 1484: #ifdef MULTIPROCESSOR
1.262 mrg 1485: CPUSET_CLEAR(pmap_cpus_active);
1.212 nakayama 1486: for (ci = cpus; ci != NULL; ci = ci->ci_next) {
1.262 mrg 1487: /* XXXMRG: Move the lock inside one or both tests? */
1488: mutex_enter(&ci->ci_ctx_lock);
1489: if (CPUSET_HAS(cpus_active, ci->ci_index)) {
1490: if (pm->pm_ctx[ci->ci_index] > 0) {
1491: CPUSET_ADD(pmap_cpus_active, ci->ci_index);
1492: ctx_free(pm, ci);
1493: }
1494: }
1495: mutex_exit(&ci->ci_ctx_lock);
1.212 nakayama 1496: }
1497: #else
1.262 mrg 1498: if (pmap_ctx(pm)) {
1499: mutex_enter(&curcpu()->ci_ctx_lock);
1500: ctx_free(pm, curcpu());
1501: mutex_exit(&curcpu()->ci_ctx_lock);
1502: }
1.212 nakayama 1503: #endif
1.127 chs 1504:
1505: /* we could be a little smarter and leave pages zeroed */
1506: for (pg = TAILQ_FIRST(&pm->pm_obj.memq); pg != NULL; pg = nextpg) {
1.268 uebayasi 1507: #ifdef DIAGNOSTIC
1.267 uebayasi 1508: struct vm_page_md *md = VM_PAGE_TO_MD(pg);
1.268 uebayasi 1509: #endif
1.267 uebayasi 1510:
1.265 hannken 1511: KASSERT((pg->flags & PG_MARKER) == 0);
1.218 ad 1512: nextpg = TAILQ_NEXT(pg, listq.queue);
1513: TAILQ_REMOVE(&pm->pm_obj.memq, pg, listq.queue);
1.266 uebayasi 1514: KASSERT(md->mdpg_pvh.pv_pmap == NULL);
1.262 mrg 1515: dcache_flush_page_cpuset(VM_PAGE_TO_PHYS(pg), pmap_cpus_active);
1.127 chs 1516: uvm_pagefree(pg);
1.1 eeh 1517: }
1.262 mrg 1518: pmap_free_page((paddr_t)(u_long)pm->pm_segs, pmap_cpus_active);
1.273 rmind 1519:
1520: uvm_obj_destroy(&pm->pm_obj, false);
1521: mutex_destroy(&pm->pm_obj_lock);
1.203 ad 1522: pool_cache_put(&pmap_cache, pm);
1.1 eeh 1523: }
1524:
1525: /*
1526: * Copy the range specified by src_addr/len
1527: * from the source map to the range dst_addr/len
1528: * in the destination map.
1529: *
1530: * This routine is only advisory and need not do anything.
1531: */
1532: void
1.233 dsl 1533: pmap_copy(struct pmap *dst_pmap, struct pmap *src_pmap, vaddr_t dst_addr, vsize_t len, vaddr_t src_addr)
1.1 eeh 1534: {
1.127 chs 1535:
1536: DPRINTF(PDB_CREATE, ("pmap_copy(%p, %p, %p, %lx, %p)\n",
1537: dst_pmap, src_pmap, (void *)(u_long)dst_addr,
1538: (u_long)len, (void *)(u_long)src_addr));
1.1 eeh 1539: }
1540:
1541: /*
1542: * Activate the address space for the specified process. If the
1543: * process is the current process, load the new MMU context.
1544: */
1545: void
1.232 dsl 1546: pmap_activate(struct lwp *l)
1.1 eeh 1547: {
1.133 thorpej 1548: struct pmap *pmap = l->l_proc->p_vmspace->vm_map.pmap;
1.1 eeh 1549:
1.154 chs 1550: if (pmap == pmap_kernel()) {
1551: return;
1552: }
1553:
1.1 eeh 1554: /*
1.246 skrll 1555: * This is essentially the same thing that happens in cpu_switchto()
1.1 eeh 1556: * when the newly selected process is about to run, except that we
1557: * have to make sure to clean the register windows before we set
1558: * the new context.
1559: */
1560:
1.133 thorpej 1561: if (l != curlwp) {
1.127 chs 1562: return;
1563: }
1564: write_user_windows();
1565: pmap_activate_pmap(pmap);
1566: }
1567:
1568: void
1569: pmap_activate_pmap(struct pmap *pmap)
1570: {
1571:
1.210 martin 1572: if (pmap_ctx(pmap) == 0) {
1.127 chs 1573: (void) ctx_alloc(pmap);
1.1 eeh 1574: }
1.278 mrg 1575: DPRINTF(PDB_ACTIVATE,
1576: ("%s: cpu%d activating ctx %d\n", __func__,
1577: cpu_number(), pmap_ctx(pmap)));
1.210 martin 1578: dmmu_set_secondary_context(pmap_ctx(pmap));
1.1 eeh 1579: }
1580:
1581: /*
1582: * Deactivate the address space of the specified process.
1583: */
1584: void
1.232 dsl 1585: pmap_deactivate(struct lwp *l)
1.1 eeh 1586: {
1.278 mrg 1587:
1588: DPRINTF(PDB_ACTIVATE,
1589: ("%s: cpu%d deactivating ctx %d\n", __func__,
1590: cpu_number(), pmap_ctx(l->l_proc->p_vmspace->vm_map.pmap)));
1.1 eeh 1591: }
1592:
1.4 eeh 1593: /*
1594: * pmap_kenter_pa: [ INTERFACE ]
1595: *
1596: * Enter a va -> pa mapping into the kernel pmap without any
1597: * physical->virtual tracking.
1598: *
1599: * Note: no locking is necessary in this function.
1600: */
1601: void
1.242 cegger 1602: pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
1.4 eeh 1603: {
1604: pte_t tte;
1.127 chs 1605: paddr_t ptp;
1.4 eeh 1606: struct pmap *pm = pmap_kernel();
1.127 chs 1607: int i;
1.4 eeh 1608:
1.127 chs 1609: KASSERT(va < INTSTACK || va > EINTSTACK);
1610: KASSERT(va < kdata || va > ekdata);
1.102 eeh 1611:
1.4 eeh 1612: /*
1613: * Construct the TTE.
1614: */
1.127 chs 1615:
1616: ENTER_STAT(unmanaged);
1.177 martin 1617: if (pa & (PMAP_NVC|PMAP_NC)) {
1.127 chs 1618: ENTER_STAT(ci);
1.177 martin 1619: }
1.127 chs 1620:
1.106 eeh 1621: tte.data = TSB_DATA(0, PGSZ_8K, pa, 1 /* Privileged */,
1.127 chs 1622: (VM_PROT_WRITE & prot),
1623: !(pa & PMAP_NC), pa & (PMAP_NVC), 1, 0);
1624: /* We don't track mod/ref here. */
1625: if (prot & VM_PROT_WRITE)
1626: tte.data |= TLB_REAL_W|TLB_W;
1.190 martin 1627: if (prot & VM_PROT_EXECUTE)
1628: tte.data |= TLB_EXEC;
1.105 eeh 1629: tte.data |= TLB_TSB_LOCK; /* wired */
1.127 chs 1630: ptp = 0;
1631:
1632: retry:
1633: i = pseg_set(pm, va, tte.data, ptp);
1634: if (i & 1) {
1635: KASSERT((i & 4) == 0);
1636: ptp = 0;
1637: if (!pmap_get_page(&ptp))
1.111 eeh 1638: panic("pmap_kenter_pa: no pages");
1.127 chs 1639: ENTER_STAT(ptpneeded);
1640: goto retry;
1.4 eeh 1641: }
1.127 chs 1642: if (ptp && i == 0) {
1.86 eeh 1643: /* We allocated a spare page but didn't use it. Free it. */
1.156 pk 1644: printf("pmap_kenter_pa: freeing unused page %llx\n",
1.127 chs 1645: (long long)ptp);
1.262 mrg 1646: pmap_free_page_noflush(ptp);
1.86 eeh 1647: }
1.278 mrg 1648: #ifdef PMAP_DEBUG
1.4 eeh 1649: i = ptelookup_va(va);
1.125 mrg 1650: if (pmapdebug & PDB_ENTER)
1651: prom_printf("pmap_kenter_pa: va=%08x data=%08x:%08x "
1.156 pk 1652: "tsb_dmmu[%d]=%08x\n", va, (int)(tte.data>>32),
1.210 martin 1653: (int)tte.data, i, &curcpu()->ci_tsb_dmmu[i]);
1654: if (pmapdebug & PDB_MMU_STEAL && curcpu()->ci_tsb_dmmu[i].data) {
1.125 mrg 1655: prom_printf("pmap_kenter_pa: evicting entry tag=%x:%08x "
1.156 pk 1656: "data=%08x:%08x tsb_dmmu[%d]=%08x\n",
1.210 martin 1657: (int)(curcpu()->ci_tsb_dmmu[i].tag>>32), (int)curcpu()->ci_tsb_dmmu[i].tag,
1658: (int)(curcpu()->ci_tsb_dmmu[i].data>>32), (int)curcpu()->ci_tsb_dmmu[i].data,
1659: i, &curcpu()->ci_tsb_dmmu[i]);
1.156 pk 1660: prom_printf("with va=%08x data=%08x:%08x tsb_dmmu[%d]=%08x\n",
1.143 chs 1661: va, (int)(tte.data>>32), (int)tte.data, i,
1.210 martin 1662: &curcpu()->ci_tsb_dmmu[i]);
1.4 eeh 1663: }
1664: #endif
1665: }
1.91 eeh 1666:
1.4 eeh 1667: /*
1668: * pmap_kremove: [ INTERFACE ]
1669: *
1.94 thorpej 1670: * Remove a mapping entered with pmap_kenter_pa() starting at va,
1671: * for size bytes (assumed to be page rounded).
1.4 eeh 1672: */
1673: void
1.232 dsl 1674: pmap_kremove(vaddr_t va, vsize_t size)
1.4 eeh 1675: {
1676: struct pmap *pm = pmap_kernel();
1677: int64_t data;
1.127 chs 1678: paddr_t pa;
1.158 martin 1679: int rv;
1.185 thorpej 1680: bool flush = FALSE;
1.4 eeh 1681:
1.127 chs 1682: KASSERT(va < INTSTACK || va > EINTSTACK);
1683: KASSERT(va < kdata || va > ekdata);
1.102 eeh 1684:
1.127 chs 1685: DPRINTF(PDB_DEMAP, ("pmap_kremove: start 0x%lx size %lx\n", va, size));
1.138 thorpej 1686: for (; size >= PAGE_SIZE; va += PAGE_SIZE, size -= PAGE_SIZE) {
1.127 chs 1687:
1688: #ifdef DIAGNOSTIC
1.4 eeh 1689: /*
1690: * Is this part of the permanent 4MB mapping?
1691: */
1.127 chs 1692: if (va >= ktext && va < roundup(ekdata, 4*MEG))
1.156 pk 1693: panic("pmap_kremove: va=%08x in locked TLB", (u_int)va);
1.4 eeh 1694: #endif
1.127 chs 1695:
1696: data = pseg_get(pm, va);
1.254 mrg 1697: if ((data & TLB_V) == 0) {
1.127 chs 1698: continue;
1699: }
1700:
1701: flush = TRUE;
1702: pa = data & TLB_PA_MASK;
1703:
1704: /*
1705: * We need to flip the valid bit and
1706: * clear the access statistics.
1707: */
1708:
1.158 martin 1709: rv = pseg_set(pm, va, 0, 0);
1710: if (rv & 1)
1711: panic("pmap_kremove: pseg_set needs spare, rv=%d\n",
1712: rv);
1.127 chs 1713: DPRINTF(PDB_DEMAP, ("pmap_kremove: seg %x pdir %x pte %x\n",
1714: (int)va_to_seg(va), (int)va_to_dir(va),
1715: (int)va_to_pte(va)));
1716: REMOVE_STAT(removes);
1717:
1.212 nakayama 1718: tsb_invalidate(va, pm);
1.127 chs 1719: REMOVE_STAT(tflushes);
1720:
1721: /*
1722: * Here we assume nothing can get into the TLB
1723: * unless it has a PTE.
1724: */
1725:
1.212 nakayama 1726: tlb_flush_pte(va, pm);
1.255 mrg 1727: dcache_flush_page_all(pa);
1.4 eeh 1728: }
1.255 mrg 1729: if (flush)
1.127 chs 1730: REMOVE_STAT(flushes);
1.4 eeh 1731: }
1732:
1.1 eeh 1733: /*
1734: * Insert physical page at pa into the given pmap at virtual address va.
1735: * Supports 64-bit pa so we can map I/O space.
1736: */
1.143 chs 1737:
1.45 thorpej 1738: int
1.236 cegger 1739: pmap_enter(struct pmap *pm, vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
1.1 eeh 1740: {
1741: pte_t tte;
1.127 chs 1742: int64_t data;
1.148 christos 1743: paddr_t opa = 0, ptp; /* XXX: gcc */
1.217 ad 1744: pv_entry_t pvh, npv = NULL, freepv;
1.127 chs 1745: struct vm_page *pg, *opg, *ptpg;
1.217 ad 1746: int s, i, uncached = 0, error = 0;
1.160 martin 1747: int size = PGSZ_8K; /* PMAP_SZ_TO_TTE(pa); */
1.185 thorpej 1748: bool wired = (flags & PMAP_WIRED) != 0;
1749: bool wasmapped = FALSE;
1750: bool dopv = TRUE;
1.1 eeh 1751:
1752: /*
1.102 eeh 1753: * Is this part of the permanent mappings?
1.1 eeh 1754: */
1.127 chs 1755: KASSERT(pm != pmap_kernel() || va < INTSTACK || va > EINTSTACK);
1756: KASSERT(pm != pmap_kernel() || va < kdata || va > ekdata);
1757:
1.217 ad 1758: /* Grab a spare PV. */
1759: freepv = pool_cache_get(&pmap_pv_cache, PR_NOWAIT);
1760: if (__predict_false(freepv == NULL)) {
1761: if (flags & PMAP_CANFAIL)
1762: return (ENOMEM);
1763: panic("pmap_enter: no pv entries available");
1764: }
1765: freepv->pv_next = NULL;
1766:
1.1 eeh 1767: /*
1.127 chs 1768: * If a mapping at this address already exists, check if we're
1769: * entering the same PA again. if it's different remove it.
1.38 eeh 1770: */
1.127 chs 1771:
1.203 ad 1772: mutex_enter(&pmap_lock);
1.127 chs 1773: data = pseg_get(pm, va);
1774: if (data & TLB_V) {
1775: wasmapped = TRUE;
1776: opa = data & TLB_PA_MASK;
1777: if (opa != pa) {
1778: opg = PHYS_TO_VM_PAGE(opa);
1779: if (opg != NULL) {
1780: npv = pmap_remove_pv(pm, va, opg);
1781: }
1782: }
1.41 eeh 1783: }
1784:
1.38 eeh 1785: /*
1.1 eeh 1786: * Construct the TTE.
1787: */
1.127 chs 1788: pg = PHYS_TO_VM_PAGE(pa);
1789: if (pg) {
1.266 uebayasi 1790: struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
1791:
1792: pvh = &md->mdpg_pvh;
1.127 chs 1793: uncached = (pvh->pv_va & (PV_ALIAS|PV_NVC));
1.38 eeh 1794: #ifdef DIAGNOSTIC
1.45 thorpej 1795: if ((flags & VM_PROT_ALL) & ~prot)
1.38 eeh 1796: panic("pmap_enter: access_type exceeds prot");
1797: #endif
1.127 chs 1798: /*
1799: * If we don't have the traphandler do it,
1800: * set the ref/mod bits now.
1801: */
1.119 eeh 1802: if (flags & VM_PROT_ALL)
1.127 chs 1803: pvh->pv_va |= PV_REF;
1.119 eeh 1804: if (flags & VM_PROT_WRITE)
1.127 chs 1805: pvh->pv_va |= PV_MOD;
1806:
1807: /*
1808: * make sure we have a pv entry ready if we need one.
1809: */
1810: if (pvh->pv_pmap == NULL || (wasmapped && opa == pa)) {
1811: if (npv != NULL) {
1.217 ad 1812: /* free it */
1813: npv->pv_next = freepv;
1814: freepv = npv;
1.127 chs 1815: npv = NULL;
1816: }
1817: if (wasmapped && opa == pa) {
1818: dopv = FALSE;
1819: }
1820: } else if (npv == NULL) {
1.217 ad 1821: /* use the pre-allocated pv */
1822: npv = freepv;
1823: freepv = freepv->pv_next;
1.127 chs 1824: }
1825: ENTER_STAT(managed);
1.1 eeh 1826: } else {
1.127 chs 1827: ENTER_STAT(unmanaged);
1828: dopv = FALSE;
1829: if (npv != NULL) {
1.217 ad 1830: /* free it */
1831: npv->pv_next = freepv;
1832: freepv = npv;
1.127 chs 1833: npv = NULL;
1834: }
1.1 eeh 1835: }
1.127 chs 1836:
1837: #ifndef NO_VCACHE
1838: if (pa & PMAP_NVC)
1.4 eeh 1839: #endif
1.127 chs 1840: uncached = 1;
1.177 martin 1841: if (uncached) {
1.127 chs 1842: ENTER_STAT(ci);
1.177 martin 1843: }
1.105 eeh 1844: tte.data = TSB_DATA(0, size, pa, pm == pmap_kernel(),
1.156 pk 1845: flags & VM_PROT_WRITE, !(pa & PMAP_NC),
1.127 chs 1846: uncached, 1, pa & PMAP_LITTLE);
1.41 eeh 1847: #ifdef HWREF
1.127 chs 1848: if (prot & VM_PROT_WRITE)
1849: tte.data |= TLB_REAL_W;
1.143 chs 1850: if (prot & VM_PROT_EXECUTE)
1851: tte.data |= TLB_EXEC;
1.73 eeh 1852: #else
1853: /* If it needs ref accounting do nothing. */
1.119 eeh 1854: if (!(flags & VM_PROT_READ)) {
1.203 ad 1855: mutex_exit(&pmap_lock);
1.217 ad 1856: goto out;
1.73 eeh 1857: }
1.41 eeh 1858: #endif
1.119 eeh 1859: if (flags & VM_PROT_EXECUTE) {
1860: if ((flags & (VM_PROT_READ|VM_PROT_WRITE)) == 0)
1861: tte.data |= TLB_EXEC_ONLY|TLB_EXEC;
1862: else
1863: tte.data |= TLB_EXEC;
1864: }
1.127 chs 1865: if (wired)
1866: tte.data |= TLB_TSB_LOCK;
1867: ptp = 0;
1868:
1869: retry:
1870: i = pseg_set(pm, va, tte.data, ptp);
1.281 martin 1871: if (i == -2) {
1872: if (flags & PMAP_CANFAIL)
1873: return (ENOMEM);
1874: panic("pmap_enter: invalid VA (inside hole)");
1875: }
1.127 chs 1876: if (i & 4) {
1877: /* ptp used as L3 */
1878: KASSERT(ptp != 0);
1879: KASSERT((i & 3) == 0);
1880: ptpg = PHYS_TO_VM_PAGE(ptp);
1881: if (ptpg) {
1882: ptpg->offset = (uint64_t)va & (0xfffffLL << 23);
1.218 ad 1883: TAILQ_INSERT_TAIL(&pm->pm_obj.memq, ptpg, listq.queue);
1.127 chs 1884: } else {
1885: KASSERT(pm == pmap_kernel());
1886: }
1887: }
1888: if (i & 2) {
1889: /* ptp used as L2 */
1890: KASSERT(ptp != 0);
1891: KASSERT((i & 4) == 0);
1892: ptpg = PHYS_TO_VM_PAGE(ptp);
1893: if (ptpg) {
1894: ptpg->offset = (((uint64_t)va >> 43) & 0x3ffLL) << 13;
1.218 ad 1895: TAILQ_INSERT_TAIL(&pm->pm_obj.memq, ptpg, listq.queue);
1.127 chs 1896: } else {
1897: KASSERT(pm == pmap_kernel());
1898: }
1899: }
1900: if (i & 1) {
1901: KASSERT((i & 4) == 0);
1902: ptp = 0;
1903: if (!pmap_get_page(&ptp)) {
1.217 ad 1904: mutex_exit(&pmap_lock);
1.127 chs 1905: if (flags & PMAP_CANFAIL) {
1906: if (npv != NULL) {
1.217 ad 1907: /* free it */
1908: npv->pv_next = freepv;
1909: freepv = npv;
1.127 chs 1910: }
1.217 ad 1911: error = ENOMEM;
1912: goto out;
1.127 chs 1913: } else {
1.111 eeh 1914: panic("pmap_enter: no pages");
1.127 chs 1915: }
1.111 eeh 1916: }
1.127 chs 1917: ENTER_STAT(ptpneeded);
1918: goto retry;
1919: }
1920: if (ptp && i == 0) {
1921: /* We allocated a spare page but didn't use it. Free it. */
1922: printf("pmap_enter: freeing unused page %llx\n",
1923: (long long)ptp);
1.262 mrg 1924: pmap_free_page_noflush(ptp);
1.127 chs 1925: }
1926: if (dopv) {
1927: pmap_enter_pv(pm, va, pa, pg, npv);
1.1 eeh 1928: }
1929:
1.203 ad 1930: mutex_exit(&pmap_lock);
1.278 mrg 1931: #ifdef PMAP_DEBUG
1.1 eeh 1932: i = ptelookup_va(va);
1.125 mrg 1933: if (pmapdebug & PDB_ENTER)
1934: prom_printf("pmap_enter: va=%08x data=%08x:%08x "
1.156 pk 1935: "tsb_dmmu[%d]=%08x\n", va, (int)(tte.data>>32),
1.210 martin 1936: (int)tte.data, i, &curcpu()->ci_tsb_dmmu[i]);
1937: if (pmapdebug & PDB_MMU_STEAL && curcpu()->ci_tsb_dmmu[i].data) {
1.125 mrg 1938: prom_printf("pmap_enter: evicting entry tag=%x:%08x "
1.156 pk 1939: "data=%08x:%08x tsb_dmmu[%d]=%08x\n",
1.210 martin 1940: (int)(curcpu()->ci_tsb_dmmu[i].tag>>32), (int)curcpu()->ci_tsb_dmmu[i].tag,
1941: (int)(curcpu()->ci_tsb_dmmu[i].data>>32), (int)curcpu()->ci_tsb_dmmu[i].data, i,
1942: &curcpu()->ci_tsb_dmmu[i]);
1.156 pk 1943: prom_printf("with va=%08x data=%08x:%08x tsb_dmmu[%d]=%08x\n",
1.143 chs 1944: va, (int)(tte.data>>32), (int)tte.data, i,
1.210 martin 1945: &curcpu()->ci_tsb_dmmu[i]);
1.1 eeh 1946: }
1947: #endif
1.143 chs 1948:
1949: if (flags & (VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE)) {
1.127 chs 1950:
1951: /*
1952: * preload the TSB with the new entry,
1953: * since we're going to need it immediately anyway.
1954: */
1955:
1.210 martin 1956: KASSERT(pmap_ctx(pm)>=0);
1.127 chs 1957: i = ptelookup_va(va);
1.210 martin 1958: tte.tag = TSB_TAG(0, pmap_ctx(pm), va);
1.127 chs 1959: s = splhigh();
1.216 martin 1960: if (wasmapped && pmap_is_on_mmu(pm)) {
1.212 nakayama 1961: tsb_invalidate(va, pm);
1.144 chs 1962: }
1.143 chs 1963: if (flags & (VM_PROT_READ | VM_PROT_WRITE)) {
1.210 martin 1964: curcpu()->ci_tsb_dmmu[i].tag = tte.tag;
1.166 perry 1965: __asm volatile("" : : : "memory");
1.210 martin 1966: curcpu()->ci_tsb_dmmu[i].data = tte.data;
1.143 chs 1967: }
1968: if (flags & VM_PROT_EXECUTE) {
1.210 martin 1969: curcpu()->ci_tsb_immu[i].tag = tte.tag;
1.166 perry 1970: __asm volatile("" : : : "memory");
1.210 martin 1971: curcpu()->ci_tsb_immu[i].data = tte.data;
1.143 chs 1972: }
1.127 chs 1973:
1974: /*
1975: * it's only necessary to flush the TLB if this page was
1976: * previously mapped, but for some reason it's a lot faster
1977: * for the fork+exit microbenchmark if we always do it.
1978: */
1979:
1.210 martin 1980: KASSERT(pmap_ctx(pm)>=0);
1.212 nakayama 1981: #ifdef MULTIPROCESSOR
1.216 martin 1982: if (wasmapped && pmap_is_on_mmu(pm))
1.212 nakayama 1983: tlb_flush_pte(va, pm);
1.252 mrg 1984: else
1985: sp_tlb_flush_pte(va, pmap_ctx(pm));
1.212 nakayama 1986: #else
1987: tlb_flush_pte(va, pm);
1988: #endif
1.127 chs 1989: splx(s);
1.216 martin 1990: } else if (wasmapped && pmap_is_on_mmu(pm)) {
1.41 eeh 1991: /* Force reload -- protections may be changed */
1.210 martin 1992: KASSERT(pmap_ctx(pm)>=0);
1.212 nakayama 1993: tsb_invalidate(va, pm);
1994: tlb_flush_pte(va, pm);
1.30 eeh 1995: }
1.70 eeh 1996:
1.1 eeh 1997: /* We will let the fast mmu miss interrupt load the new translation */
1998: pv_check();
1.217 ad 1999: out:
2000: /* Catch up on deferred frees. */
2001: for (; freepv != NULL; freepv = npv) {
2002: npv = freepv->pv_next;
2003: pool_cache_put(&pmap_pv_cache, freepv);
2004: }
2005: return error;
1.1 eeh 2006: }
2007:
1.127 chs 2008: void
1.232 dsl 2009: pmap_remove_all(struct pmap *pm)
1.127 chs 2010: {
1.212 nakayama 2011: #ifdef MULTIPROCESSOR
2012: struct cpu_info *ci;
1.258 mrg 2013: sparc64_cpuset_t pmap_cpus_active;
1.212 nakayama 2014: #endif
1.127 chs 2015:
2016: if (pm == pmap_kernel()) {
2017: return;
2018: }
1.188 martin 2019: write_user_windows();
1.127 chs 2020: pm->pm_refs = 0;
1.258 mrg 2021:
1.262 mrg 2022: /*
2023: * XXXMRG: pmap_destroy() does exactly the same dance here.
2024: * surely one of them isn't necessary?
2025: */
1.212 nakayama 2026: #ifdef MULTIPROCESSOR
1.258 mrg 2027: CPUSET_CLEAR(pmap_cpus_active);
1.212 nakayama 2028: for (ci = cpus; ci != NULL; ci = ci->ci_next) {
1.262 mrg 2029: /* XXXMRG: Move the lock inside one or both tests? */
2030: mutex_enter(&ci->ci_ctx_lock);
1.258 mrg 2031: if (CPUSET_HAS(cpus_active, ci->ci_index)) {
1.262 mrg 2032: if (pm->pm_ctx[ci->ci_index] > 0) {
1.258 mrg 2033: CPUSET_ADD(pmap_cpus_active, ci->ci_index);
1.262 mrg 2034: ctx_free(pm, ci);
2035: }
1.258 mrg 2036: }
1.262 mrg 2037: mutex_exit(&ci->ci_ctx_lock);
1.212 nakayama 2038: }
2039: #else
1.262 mrg 2040: if (pmap_ctx(pm)) {
2041: mutex_enter(&curcpu()->ci_ctx_lock);
2042: ctx_free(pm, curcpu());
2043: mutex_exit(&curcpu()->ci_ctx_lock);
2044: }
1.212 nakayama 2045: #endif
1.258 mrg 2046:
1.188 martin 2047: REMOVE_STAT(flushes);
1.262 mrg 2048: /*
2049: * XXXMRG: couldn't we do something less severe here, and
2050: * only flush the right context on each CPU?
2051: */
1.270 mrg 2052: blast_dcache();
1.127 chs 2053: }
2054:
1.1 eeh 2055: /*
2056: * Remove the given range of mapping entries.
2057: */
2058: void
1.233 dsl 2059: pmap_remove(struct pmap *pm, vaddr_t va, vaddr_t endva)
1.1 eeh 2060: {
2061: int64_t data;
1.127 chs 2062: paddr_t pa;
2063: struct vm_page *pg;
1.217 ad 2064: pv_entry_t pv, freepv = NULL;
1.158 martin 2065: int rv;
1.185 thorpej 2066: bool flush = FALSE;
1.1 eeh 2067:
1.156 pk 2068: /*
1.1 eeh 2069: * In here we should check each pseg and if there are no more entries,
2070: * free it. It's just that linear scans of 8K pages gets expensive.
2071: */
2072:
1.127 chs 2073: KASSERT(pm != pmap_kernel() || endva < INTSTACK || va > EINTSTACK);
2074: KASSERT(pm != pmap_kernel() || endva < kdata || va > ekdata);
1.102 eeh 2075:
1.203 ad 2076: mutex_enter(&pmap_lock);
1.127 chs 2077: DPRINTF(PDB_REMOVE, ("pmap_remove(pm=%p, va=%p, endva=%p):", pm,
2078: (void *)(u_long)va, (void *)(u_long)endva));
2079: REMOVE_STAT(calls);
1.1 eeh 2080:
2081: /* Now do the real work */
1.138 thorpej 2082: for (; va < endva; va += PAGE_SIZE) {
1.127 chs 2083: #ifdef DIAGNOSTIC
1.1 eeh 2084: /*
2085: * Is this part of the permanent 4MB mapping?
2086: */
1.156 pk 2087: if (pm == pmap_kernel() && va >= ktext &&
1.98 eeh 2088: va < roundup(ekdata, 4*MEG))
1.156 pk 2089: panic("pmap_remove: va=%08llx in locked TLB",
1.127 chs 2090: (long long)va);
1.4 eeh 2091: #endif
1.73 eeh 2092:
1.127 chs 2093: data = pseg_get(pm, va);
1.254 mrg 2094: if ((data & TLB_V) == 0) {
1.127 chs 2095: continue;
2096: }
2097:
2098: flush = TRUE;
2099: /* First remove the pv entry, if there is one */
2100: pa = data & TLB_PA_MASK;
2101: pg = PHYS_TO_VM_PAGE(pa);
2102: if (pg) {
2103: pv = pmap_remove_pv(pm, va, pg);
2104: if (pv != NULL) {
1.217 ad 2105: /* free it */
2106: pv->pv_next = freepv;
2107: freepv = pv;
1.1 eeh 2108: }
1.127 chs 2109: }
2110:
2111: /*
2112: * We need to flip the valid bit and
2113: * clear the access statistics.
2114: */
2115:
1.158 martin 2116: rv = pseg_set(pm, va, 0, 0);
2117: if (rv & 1)
2118: panic("pmap_remove: pseg_set needed spare, rv=%d!\n",
2119: rv);
2120:
1.127 chs 2121: DPRINTF(PDB_REMOVE, (" clearing seg %x pte %x\n",
2122: (int)va_to_seg(va), (int)va_to_pte(va)));
2123: REMOVE_STAT(removes);
2124:
1.216 martin 2125: if (pm != pmap_kernel() && !pmap_has_ctx(pm))
1.127 chs 2126: continue;
1.143 chs 2127:
2128: /*
1.262 mrg 2129: * if the pmap is being torn down, don't bother flushing,
2130: * we already have done so.
1.143 chs 2131: */
2132:
2133: if (!pm->pm_refs)
2134: continue;
1.127 chs 2135:
2136: /*
2137: * Here we assume nothing can get into the TLB
2138: * unless it has a PTE.
2139: */
2140:
1.210 martin 2141: KASSERT(pmap_ctx(pm)>=0);
1.212 nakayama 2142: tsb_invalidate(va, pm);
1.143 chs 2143: REMOVE_STAT(tflushes);
1.212 nakayama 2144: tlb_flush_pte(va, pm);
1.255 mrg 2145: dcache_flush_page_all(pa);
1.4 eeh 2146: }
1.255 mrg 2147: if (flush && pm->pm_refs)
1.127 chs 2148: REMOVE_STAT(flushes);
2149: DPRINTF(PDB_REMOVE, ("\n"));
1.1 eeh 2150: pv_check();
1.203 ad 2151: mutex_exit(&pmap_lock);
1.217 ad 2152:
2153: /* Catch up on deferred frees. */
2154: for (; freepv != NULL; freepv = pv) {
2155: pv = freepv->pv_next;
2156: pool_cache_put(&pmap_pv_cache, freepv);
2157: }
1.1 eeh 2158: }
2159:
2160: /*
2161: * Change the protection on the specified range of this pmap.
2162: */
2163: void
1.233 dsl 2164: pmap_protect(struct pmap *pm, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
1.1 eeh 2165: {
1.4 eeh 2166: paddr_t pa;
1.1 eeh 2167: int64_t data;
1.127 chs 2168: struct vm_page *pg;
2169: pv_entry_t pv;
1.158 martin 2170: int rv;
1.143 chs 2171:
1.127 chs 2172: KASSERT(pm != pmap_kernel() || eva < INTSTACK || sva > EINTSTACK);
2173: KASSERT(pm != pmap_kernel() || eva < kdata || sva > ekdata);
1.102 eeh 2174:
1.1 eeh 2175: if (prot == VM_PROT_NONE) {
2176: pmap_remove(pm, sva, eva);
2177: return;
2178: }
1.156 pk 2179:
1.240 nakayama 2180: sva = trunc_page(sva);
1.277 mrg 2181: mutex_enter(&pmap_lock);
1.138 thorpej 2182: for (; sva < eva; sva += PAGE_SIZE) {
1.278 mrg 2183: #ifdef PMAP_DEBUG
1.1 eeh 2184: /*
2185: * Is this part of the permanent 4MB mapping?
2186: */
1.156 pk 2187: if (pm == pmap_kernel() && sva >= ktext &&
1.127 chs 2188: sva < roundup(ekdata, 4 * MEG)) {
1.277 mrg 2189: mutex_exit(&pmap_lock);
1.156 pk 2190: prom_printf("pmap_protect: va=%08x in locked TLB\n",
1.127 chs 2191: sva);
1.156 pk 2192: prom_abort();
1.1 eeh 2193: return;
2194: }
1.127 chs 2195: #endif
2196: DPRINTF(PDB_CHANGEPROT, ("pmap_protect: va %p\n",
2197: (void *)(u_long)sva));
2198: data = pseg_get(pm, sva);
2199: if ((data & TLB_V) == 0) {
2200: continue;
2201: }
2202:
2203: pa = data & TLB_PA_MASK;
2204: DPRINTF(PDB_CHANGEPROT|PDB_REF,
2205: ("pmap_protect: va=%08x data=%08llx "
1.156 pk 2206: "seg=%08x pte=%08x\n",
1.127 chs 2207: (u_int)sva, (long long)pa, (int)va_to_seg(sva),
2208: (int)va_to_pte(sva)));
1.1 eeh 2209:
1.127 chs 2210: pg = PHYS_TO_VM_PAGE(pa);
2211: if (pg) {
1.266 uebayasi 2212: struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
2213:
1.127 chs 2214: /* Save REF/MOD info */
1.266 uebayasi 2215: pv = &md->mdpg_pvh;
1.127 chs 2216: if (data & TLB_ACCESS)
2217: pv->pv_va |= PV_REF;
1.156 pk 2218: if (data & TLB_MODIFY)
1.127 chs 2219: pv->pv_va |= PV_MOD;
2220: }
2221:
2222: /* Just do the pmap and TSB, not the pv_list */
1.143 chs 2223: if ((prot & VM_PROT_WRITE) == 0)
2224: data &= ~(TLB_W|TLB_REAL_W);
2225: if ((prot & VM_PROT_EXECUTE) == 0)
2226: data &= ~(TLB_EXEC);
2227:
1.158 martin 2228: rv = pseg_set(pm, sva, data, 0);
2229: if (rv & 1)
2230: panic("pmap_protect: pseg_set needs spare! rv=%d\n",
2231: rv);
1.73 eeh 2232:
1.216 martin 2233: if (pm != pmap_kernel() && !pmap_has_ctx(pm))
1.127 chs 2234: continue;
1.143 chs 2235:
1.210 martin 2236: KASSERT(pmap_ctx(pm)>=0);
1.212 nakayama 2237: tsb_invalidate(sva, pm);
2238: tlb_flush_pte(sva, pm);
1.1 eeh 2239: }
2240: pv_check();
1.277 mrg 2241: mutex_exit(&pmap_lock);
1.1 eeh 2242: }
2243:
2244: /*
2245: * Extract the physical page address associated
2246: * with the given map/virtual_address pair.
2247: */
1.185 thorpej 2248: bool
1.232 dsl 2249: pmap_extract(struct pmap *pm, vaddr_t va, paddr_t *pap)
1.1 eeh 2250: {
1.4 eeh 2251: paddr_t pa;
1.197 martin 2252: int64_t data = 0;
1.1 eeh 2253:
1.127 chs 2254: if (pm == pmap_kernel() && va >= kdata && va < roundup(ekdata, 4*MEG)) {
1.56 eeh 2255: /* Need to deal w/locked TLB entry specially. */
1.168 cdi 2256: pa = pmap_kextract(va);
1.127 chs 2257: DPRINTF(PDB_EXTRACT, ("pmap_extract: va=%lx pa=%llx\n",
2258: (u_long)va, (unsigned long long)pa));
1.225 nakayama 2259: if (pap != NULL)
2260: *pap = pa;
2261: return TRUE;
1.127 chs 2262: } else if (pm == pmap_kernel() && va >= ktext && va < ektext) {
1.1 eeh 2263: /* Need to deal w/locked TLB entry specially. */
1.168 cdi 2264: pa = pmap_kextract(va);
1.127 chs 2265: DPRINTF(PDB_EXTRACT, ("pmap_extract: va=%lx pa=%llx\n",
2266: (u_long)va, (unsigned long long)pa));
1.225 nakayama 2267: if (pap != NULL)
2268: *pap = pa;
2269: return TRUE;
1.178 mrg 2270: } else if (pm == pmap_kernel() && va >= INTSTACK && va < (INTSTACK + 64*KB)) {
1.174 mrg 2271: pa = (paddr_t)(curcpu()->ci_paddr - INTSTACK + va);
1.178 mrg 2272: DPRINTF(PDB_EXTRACT, ("pmap_extract (intstack): va=%lx pa=%llx\n",
2273: (u_long)va, (unsigned long long)pa));
1.183 martin 2274: if (pap != NULL)
2275: *pap = pa;
2276: return TRUE;
1.127 chs 2277: } else {
1.197 martin 2278: data = pseg_get(pm, va);
2279: pa = data & TLB_PA_MASK;
1.1 eeh 2280: if (pmapdebug & PDB_EXTRACT) {
1.127 chs 2281: paddr_t npa = ldxa((vaddr_t)&pm->pm_segs[va_to_seg(va)],
2282: ASI_PHYS_CACHED);
2283: printf("pmap_extract: va=%p segs[%ld]=%llx",
2284: (void *)(u_long)va, (long)va_to_seg(va),
2285: (unsigned long long)npa);
1.123 eeh 2286: if (npa) {
1.127 chs 2287: npa = (paddr_t)
2288: ldxa((vaddr_t)&((paddr_t *)(u_long)npa)
2289: [va_to_dir(va)],
2290: ASI_PHYS_CACHED);
2291: printf(" segs[%ld][%ld]=%lx",
2292: (long)va_to_seg(va),
2293: (long)va_to_dir(va), (long)npa);
1.8 eeh 2294: }
1.123 eeh 2295: if (npa) {
1.127 chs 2296: npa = (paddr_t)
2297: ldxa((vaddr_t)&((paddr_t *)(u_long)npa)
2298: [va_to_pte(va)],
2299: ASI_PHYS_CACHED);
2300: printf(" segs[%ld][%ld][%ld]=%lx",
1.156 pk 2301: (long)va_to_seg(va),
1.127 chs 2302: (long)va_to_dir(va),
2303: (long)va_to_pte(va), (long)npa);
1.8 eeh 2304: }
1.25 eeh 2305: printf(" pseg_get: %lx\n", (long)pa);
1.1 eeh 2306: }
2307: }
1.198 martin 2308: if ((data & TLB_V) == 0)
2309: return (FALSE);
1.42 thorpej 2310: if (pap != NULL)
1.127 chs 2311: *pap = pa + (va & PGOFSET);
1.42 thorpej 2312: return (TRUE);
1.1 eeh 2313: }
2314:
2315: /*
1.143 chs 2316: * Change protection on a kernel address.
2317: * This should only be called from MD code.
2318: */
2319: void
1.232 dsl 2320: pmap_kprotect(vaddr_t va, vm_prot_t prot)
1.143 chs 2321: {
2322: struct pmap *pm = pmap_kernel();
2323: int64_t data;
1.158 martin 2324: int rv;
1.143 chs 2325:
2326: data = pseg_get(pm, va);
1.158 martin 2327: KASSERT(data & TLB_V);
1.143 chs 2328: if (prot & VM_PROT_WRITE) {
2329: data |= (TLB_W|TLB_REAL_W);
2330: } else {
2331: data &= ~(TLB_W|TLB_REAL_W);
2332: }
1.158 martin 2333: rv = pseg_set(pm, va, data, 0);
2334: if (rv & 1)
2335: panic("pmap_kprotect: pseg_set needs spare! rv=%d", rv);
1.210 martin 2336: KASSERT(pmap_ctx(pm)>=0);
1.212 nakayama 2337: tsb_invalidate(va, pm);
2338: tlb_flush_pte(va, pm);
1.143 chs 2339: }
2340:
2341: /*
1.1 eeh 2342: * Return the number bytes that pmap_dumpmmu() will dump.
2343: */
2344: int
1.234 cegger 2345: pmap_dumpsize(void)
1.1 eeh 2346: {
1.2 eeh 2347: int sz;
2348:
2349: sz = ALIGN(sizeof(kcore_seg_t)) + ALIGN(sizeof(cpu_kcore_hdr_t));
1.295 martin 2350: sz += kernel_dtlb_slots * sizeof(struct cpu_kcore_4mbseg);
1.168 cdi 2351: sz += phys_installed_size * sizeof(phys_ram_seg_t);
1.2 eeh 2352:
2353: return btodb(sz + DEV_BSIZE - 1);
1.1 eeh 2354: }
2355:
2356: /*
2357: * Write the mmu contents to the dump device.
2358: * This gets appended to the end of a crash dump since
2359: * there is no in-core copy of kernel memory mappings on a 4/4c machine.
1.2 eeh 2360: *
2361: * Write the core dump headers and MD data to the dump device.
2362: * We dump the following items:
1.156 pk 2363: *
1.2 eeh 2364: * kcore_seg_t MI header defined in <sys/kcore.h>)
2365: * cpu_kcore_hdr_t MD header defined in <machine/kcore.h>)
1.168 cdi 2366: * phys_ram_seg_t[phys_installed_size] physical memory segments
1.1 eeh 2367: */
2368: int
1.186 christos 2369: pmap_dumpmmu(int (*dump)(dev_t, daddr_t, void *, size_t), daddr_t blkno)
1.1 eeh 2370: {
1.28 eeh 2371: kcore_seg_t *kseg;
2372: cpu_kcore_hdr_t *kcpu;
1.2 eeh 2373: phys_ram_seg_t memseg;
1.201 martin 2374: struct cpu_kcore_4mbseg ktlb;
1.127 chs 2375: int error = 0;
1.201 martin 2376: int i;
1.127 chs 2377: int buffer[dbtob(1) / sizeof(int)];
2378: int *bp, *ep;
1.2 eeh 2379:
2380: #define EXPEDITE(p,n) do { \
1.141 nakayama 2381: int *sp = (void *)(p); \
1.2 eeh 2382: int sz = (n); \
2383: while (sz > 0) { \
2384: *bp++ = *sp++; \
2385: if (bp >= ep) { \
2386: error = (*dump)(dumpdev, blkno, \
1.186 christos 2387: (void *)buffer, dbtob(1)); \
1.2 eeh 2388: if (error != 0) \
2389: return (error); \
2390: ++blkno; \
2391: bp = buffer; \
2392: } \
2393: sz -= 4; \
2394: } \
2395: } while (0)
2396:
2397: /* Setup bookkeeping pointers */
2398: bp = buffer;
2399: ep = &buffer[sizeof(buffer) / sizeof(buffer[0])];
2400:
2401: /* Fill in MI segment header */
1.28 eeh 2402: kseg = (kcore_seg_t *)bp;
2403: CORE_SETMAGIC(*kseg, KCORE_MAGIC, MID_MACHINE, CORE_CPU);
2404: kseg->c_size = dbtob(pmap_dumpsize()) - ALIGN(sizeof(kcore_seg_t));
1.2 eeh 2405:
2406: /* Fill in MD segment header (interpreted by MD part of libkvm) */
1.28 eeh 2407: kcpu = (cpu_kcore_hdr_t *)((long)bp + ALIGN(sizeof(kcore_seg_t)));
1.271 mrg 2408: kcpu->cputype = cputyp;
1.170 cdi 2409: kcpu->kernbase = (uint64_t)KERNBASE;
2410: kcpu->cpubase = (uint64_t)CPUINFO_VA;
1.70 eeh 2411:
2412: /* Describe the locked text segment */
1.170 cdi 2413: kcpu->ktextbase = (uint64_t)ktext;
2414: kcpu->ktextp = (uint64_t)ktextp;
1.201 martin 2415: kcpu->ktextsz = (uint64_t)ektext - ktext;
2416: if (kcpu->ktextsz > 4*MEG)
2417: kcpu->ktextsz = 0; /* old version can not work */
1.70 eeh 2418:
2419: /* Describe locked data segment */
1.170 cdi 2420: kcpu->kdatabase = (uint64_t)kdata;
2421: kcpu->kdatap = (uint64_t)kdatap;
2422: kcpu->kdatasz = (uint64_t)ekdatap - kdatap;
1.70 eeh 2423:
1.201 martin 2424: /* new version of locked segments description */
2425: kcpu->newmagic = SPARC64_KCORE_NEWMAGIC;
1.295 martin 2426: kcpu->num4mbsegs = kernel_dtlb_slots;
1.201 martin 2427: kcpu->off4mbsegs = ALIGN(sizeof(cpu_kcore_hdr_t));
2428:
1.209 martin 2429: /* description of per-cpu mappings */
2430: kcpu->numcpuinfos = sparc_ncpus;
2431: kcpu->percpusz = 64 * 1024; /* used to be 128k for some time */
2432: kcpu->thiscpu = cpu_number(); /* which cpu is doing this dump */
2433: kcpu->cpusp = cpu0paddr - 64 * 1024 * sparc_ncpus;
2434:
1.70 eeh 2435: /* Now the memsegs */
1.168 cdi 2436: kcpu->nmemseg = phys_installed_size;
1.201 martin 2437: kcpu->memsegoffset = kcpu->off4mbsegs
1.295 martin 2438: + kernel_dtlb_slots * sizeof(struct cpu_kcore_4mbseg);
1.70 eeh 2439:
2440: /* Now we need to point this at our kernel pmap. */
1.28 eeh 2441: kcpu->nsegmap = STSZ;
1.170 cdi 2442: kcpu->segmapoffset = (uint64_t)pmap_kernel()->pm_physaddr;
1.2 eeh 2443:
2444: /* Note: we have assumed everything fits in buffer[] so far... */
1.28 eeh 2445: bp = (int *)((long)kcpu + ALIGN(sizeof(cpu_kcore_hdr_t)));
1.2 eeh 2446:
1.201 martin 2447: /* write locked kernel 4MB TLBs */
1.295 martin 2448: for (i = 0; i < kernel_dtlb_slots; i++) {
1.201 martin 2449: ktlb.va = kernel_tlbs[i].te_va;
2450: ktlb.pa = kernel_tlbs[i].te_pa;
2451: EXPEDITE(&ktlb, sizeof(ktlb));
2452: }
2453:
2454: /* write memsegs */
1.168 cdi 2455: for (i = 0; i < phys_installed_size; i++) {
2456: memseg.start = phys_installed[i].start;
2457: memseg.size = phys_installed[i].size;
1.2 eeh 2458: EXPEDITE(&memseg, sizeof(phys_ram_seg_t));
2459: }
2460:
2461: if (bp != buffer)
1.186 christos 2462: error = (*dump)(dumpdev, blkno++, (void *)buffer, dbtob(1));
1.2 eeh 2463:
2464: return (error);
1.1 eeh 2465: }
2466:
2467: /*
1.184 wiz 2468: * Determine (non)existence of physical page
1.1 eeh 2469: */
1.127 chs 2470: int
1.171 cdi 2471: pmap_pa_exists(paddr_t pa)
1.1 eeh 2472: {
1.168 cdi 2473: int i;
1.1 eeh 2474:
2475: /* Just go through physical memory list & see if we're there */
1.168 cdi 2476: for (i = 0; i < phys_installed_size; i++) {
2477: if ((phys_installed[i].start <= pa) &&
2478: (phys_installed[i].start +
2479: phys_installed[i].size >= pa))
1.1 eeh 2480: return 1;
1.168 cdi 2481: }
1.1 eeh 2482: return 0;
2483: }
2484:
2485: /*
2486: * Lookup the appropriate TSB entry.
2487: *
2488: * Here is the full official pseudo code:
2489: *
2490: */
2491:
2492: #ifdef NOTYET
2493: int64 GenerateTSBPointer(
2494: int64 va, /* Missing VA */
2495: PointerType type, /* 8K_POINTER or 16K_POINTER */
2496: int64 TSBBase, /* TSB Register[63:13] << 13 */
2497: Boolean split, /* TSB Register[12] */
2498: int TSBSize) /* TSB Register[2:0] */
2499: {
2500: int64 vaPortion;
2501: int64 TSBBaseMask;
2502: int64 splitMask;
1.156 pk 2503:
1.1 eeh 2504: /* TSBBaseMask marks the bits from TSB Base Reg */
2505: TSBBaseMask = 0xffffffffffffe000 <<
2506: (split? (TSBsize + 1) : TSBsize);
2507:
2508: /* Shift va towards lsb appropriately and */
2509: /* zero out the original va page offset */
2510: vaPortion = (va >> ((type == 8K_POINTER)? 9: 12)) &
2511: 0xfffffffffffffff0;
1.156 pk 2512:
1.1 eeh 2513: if (split) {
2514: /* There's only one bit in question for split */
2515: splitMask = 1 << (13 + TSBsize);
2516: if (type == 8K_POINTER)
2517: /* Make sure we're in the lower half */
2518: vaPortion &= ~splitMask;
2519: else
2520: /* Make sure we're in the upper half */
2521: vaPortion |= splitMask;
2522: }
2523: return (TSBBase & TSBBaseMask) | (vaPortion & ~TSBBaseMask);
2524: }
2525: #endif
2526: /*
2527: * Of course, since we are not using a split TSB or variable page sizes,
1.156 pk 2528: * we can optimize this a bit.
1.1 eeh 2529: *
2530: * The following only works for a unified 8K TSB. It will find the slot
2531: * for that particular va and return it. IT MAY BE FOR ANOTHER MAPPING!
2532: */
2533: int
1.170 cdi 2534: ptelookup_va(vaddr_t va)
1.1 eeh 2535: {
1.9 eeh 2536: long tsbptr;
1.127 chs 2537: #define TSBBASEMASK (0xffffffffffffe000LL << tsbsize)
1.1 eeh 2538:
1.127 chs 2539: tsbptr = (((va >> 9) & 0xfffffffffffffff0LL) & ~TSBBASEMASK);
2540: return (tsbptr / sizeof(pte_t));
1.1 eeh 2541: }
2542:
1.5 eeh 2543: /*
2544: * Do whatever is needed to sync the MOD/REF flags
2545: */
2546:
1.185 thorpej 2547: bool
1.232 dsl 2548: pmap_clear_modify(struct vm_page *pg)
1.1 eeh 2549: {
1.266 uebayasi 2550: struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
1.127 chs 2551: pv_entry_t pv;
1.158 martin 2552: int rv;
1.143 chs 2553: int changed = 0;
1.51 mrg 2554: #ifdef DEBUG
1.43 eeh 2555: int modified = 0;
1.127 chs 2556:
2557: DPRINTF(PDB_CHANGEPROT|PDB_REF, ("pmap_clear_modify(%p)\n", pg));
1.1 eeh 2558:
1.43 eeh 2559: modified = pmap_is_modified(pg);
2560: #endif
1.203 ad 2561: mutex_enter(&pmap_lock);
1.1 eeh 2562: /* Clear all mappings */
1.266 uebayasi 2563: pv = &md->mdpg_pvh;
1.4 eeh 2564: #ifdef DEBUG
2565: if (pv->pv_va & PV_MOD)
2566: pv->pv_va |= PV_WE; /* Remember this was modified */
2567: #endif
1.225 nakayama 2568: if (pv->pv_va & PV_MOD) {
1.4 eeh 2569: changed |= 1;
1.225 nakayama 2570: pv->pv_va &= ~PV_MOD;
2571: }
1.127 chs 2572: #ifdef DEBUG
1.17 eeh 2573: if (pv->pv_next && !pv->pv_pmap) {
2574: printf("pmap_clear_modify: npv but no pmap for pv %p\n", pv);
2575: Debugger();
2576: }
2577: #endif
1.137 thorpej 2578: if (pv->pv_pmap != NULL) {
1.1 eeh 2579: for (; pv; pv = pv->pv_next) {
2580: int64_t data;
1.127 chs 2581: struct pmap *pmap = pv->pv_pmap;
2582: vaddr_t va = pv->pv_va & PV_VAMASK;
1.4 eeh 2583:
2584: /* First clear the mod bit in the PTE and make it R/O */
1.127 chs 2585: data = pseg_get(pmap, va);
1.158 martin 2586: KASSERT(data & TLB_V);
1.1 eeh 2587: /* Need to both clear the modify and write bits */
1.127 chs 2588: if (data & TLB_MODIFY)
1.4 eeh 2589: changed |= 1;
1.41 eeh 2590: #ifdef HWREF
1.110 chs 2591: data &= ~(TLB_MODIFY|TLB_W);
1.41 eeh 2592: #else
2593: data &= ~(TLB_MODIFY|TLB_W|TLB_REAL_W);
2594: #endif
1.158 martin 2595: rv = pseg_set(pmap, va, data, 0);
2596: if (rv & 1)
2597: printf("pmap_clear_modify: pseg_set needs"
2598: " spare! rv=%d\n", rv);
1.216 martin 2599: if (pmap_is_on_mmu(pmap)) {
1.210 martin 2600: KASSERT(pmap_ctx(pmap)>=0);
1.212 nakayama 2601: tsb_invalidate(va, pmap);
2602: tlb_flush_pte(va, pmap);
1.41 eeh 2603: }
1.4 eeh 2604: /* Then clear the mod bit in the pv */
1.225 nakayama 2605: if (pv->pv_va & PV_MOD) {
1.4 eeh 2606: changed |= 1;
1.225 nakayama 2607: pv->pv_va &= ~PV_MOD;
2608: }
1.4 eeh 2609: }
1.137 thorpej 2610: }
1.1 eeh 2611: pv_check();
1.203 ad 2612: mutex_exit(&pmap_lock);
1.5 eeh 2613: #ifdef DEBUG
1.127 chs 2614: DPRINTF(PDB_CHANGEPROT|PDB_REF, ("pmap_clear_modify: pg %p %s\n", pg,
2615: (changed ? "was modified" : "was not modified")));
1.280 hannken 2616: if (modified && modified != changed) {
1.127 chs 2617: printf("pmap_clear_modify: modified %d changed %d\n",
2618: modified, changed);
1.43 eeh 2619: Debugger();
1.280 hannken 2620: }
1.25 eeh 2621: #endif
1.4 eeh 2622: return (changed);
1.1 eeh 2623: }
2624:
1.185 thorpej 2625: bool
1.232 dsl 2626: pmap_clear_reference(struct vm_page *pg)
1.1 eeh 2627: {
1.266 uebayasi 2628: struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
1.127 chs 2629: pv_entry_t pv;
1.158 martin 2630: int rv;
1.143 chs 2631: int changed = 0;
1.294 mrg 2632: #if defined(DEBUG) && !defined(MULTIPROCESSOR)
1.43 eeh 2633: int referenced = 0;
1.51 mrg 2634: #endif
1.1 eeh 2635:
1.203 ad 2636: mutex_enter(&pmap_lock);
1.294 mrg 2637: #if defined(DEBUG) && !defined(MULTIPROCESSOR)
1.127 chs 2638: DPRINTF(PDB_CHANGEPROT|PDB_REF, ("pmap_clear_reference(%p)\n", pg));
1.221 martin 2639: referenced = pmap_is_referenced_locked(pg);
1.43 eeh 2640: #endif
1.1 eeh 2641: /* Clear all references */
1.266 uebayasi 2642: pv = &md->mdpg_pvh;
1.225 nakayama 2643: if (pv->pv_va & PV_REF) {
1.4 eeh 2644: changed |= 1;
1.225 nakayama 2645: pv->pv_va &= ~PV_REF;
2646: }
1.156 pk 2647: #ifdef DEBUG
1.17 eeh 2648: if (pv->pv_next && !pv->pv_pmap) {
2649: printf("pmap_clear_reference: npv but no pmap for pv %p\n", pv);
2650: Debugger();
2651: }
2652: #endif
1.25 eeh 2653: if (pv->pv_pmap != NULL) {
1.1 eeh 2654: for (; pv; pv = pv->pv_next) {
2655: int64_t data;
1.127 chs 2656: struct pmap *pmap = pv->pv_pmap;
2657: vaddr_t va = pv->pv_va & PV_VAMASK;
1.73 eeh 2658:
1.127 chs 2659: data = pseg_get(pmap, va);
1.158 martin 2660: KASSERT(data & TLB_V);
1.127 chs 2661: DPRINTF(PDB_CHANGEPROT,
2662: ("clearing ref pm:%p va:%p ctx:%lx data:%llx\n",
1.210 martin 2663: pmap, (void *)(u_long)va,
2664: (u_long)pmap_ctx(pmap),
1.127 chs 2665: (long long)data));
1.25 eeh 2666: #ifdef HWREF
1.225 nakayama 2667: if (data & TLB_ACCESS) {
1.4 eeh 2668: changed |= 1;
1.225 nakayama 2669: data &= ~TLB_ACCESS;
2670: }
1.25 eeh 2671: #else
2672: if (data < 0)
2673: changed |= 1;
2674: data = 0;
1.1 eeh 2675: #endif
1.158 martin 2676: rv = pseg_set(pmap, va, data, 0);
2677: if (rv & 1)
2678: panic("pmap_clear_reference: pseg_set needs"
2679: " spare! rv=%d\n", rv);
1.216 martin 2680: if (pmap_is_on_mmu(pmap)) {
1.210 martin 2681: KASSERT(pmap_ctx(pmap)>=0);
1.212 nakayama 2682: tsb_invalidate(va, pmap);
2683: tlb_flush_pte(va, pmap);
1.41 eeh 2684: }
1.225 nakayama 2685: if (pv->pv_va & PV_REF) {
1.4 eeh 2686: changed |= 1;
1.225 nakayama 2687: pv->pv_va &= ~PV_REF;
2688: }
1.25 eeh 2689: }
2690: }
1.255 mrg 2691: dcache_flush_page_all(VM_PAGE_TO_PHYS(pg));
1.1 eeh 2692: pv_check();
1.294 mrg 2693: #if defined(DEBUG) && !defined(MULTIPROCESSOR)
1.221 martin 2694: if (pmap_is_referenced_locked(pg)) {
1.266 uebayasi 2695: pv = &md->mdpg_pvh;
1.215 martin 2696: printf("pmap_clear_reference(): %p still referenced "
2697: "(pmap = %p, ctx = %d)\n", pg, pv->pv_pmap,
2698: pv->pv_pmap ? pmap_ctx(pv->pv_pmap) : 0);
1.5 eeh 2699: Debugger();
2700: }
1.127 chs 2701: DPRINTF(PDB_CHANGEPROT|PDB_REF,
2702: ("pmap_clear_reference: pg %p %s\n", pg,
2703: (changed ? "was referenced" : "was not referenced")));
1.43 eeh 2704: if (referenced != changed) {
1.127 chs 2705: printf("pmap_clear_reference: referenced %d changed %d\n",
2706: referenced, changed);
1.43 eeh 2707: Debugger();
1.203 ad 2708: } else {
2709: mutex_exit(&pmap_lock);
2710: return (referenced);
2711: }
1.25 eeh 2712: #endif
1.203 ad 2713: mutex_exit(&pmap_lock);
1.4 eeh 2714: return (changed);
1.1 eeh 2715: }
2716:
1.185 thorpej 2717: bool
1.232 dsl 2718: pmap_is_modified(struct vm_page *pg)
1.1 eeh 2719: {
1.266 uebayasi 2720: struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
1.127 chs 2721: pv_entry_t pv, npv;
1.221 martin 2722: bool res = false;
2723:
1.1 eeh 2724: /* Check if any mapping has been modified */
1.266 uebayasi 2725: pv = &md->mdpg_pvh;
1.127 chs 2726: if (pv->pv_va & PV_MOD)
1.221 martin 2727: res = true;
1.1 eeh 2728: #ifdef HWREF
1.127 chs 2729: #ifdef DEBUG
1.17 eeh 2730: if (pv->pv_next && !pv->pv_pmap) {
2731: printf("pmap_is_modified: npv but no pmap for pv %p\n", pv);
2732: Debugger();
2733: }
2734: #endif
1.221 martin 2735: if (!res && pv->pv_pmap != NULL) {
2736: mutex_enter(&pmap_lock);
2737: for (npv = pv; !res && npv && npv->pv_pmap;
1.127 chs 2738: npv = npv->pv_next) {
1.1 eeh 2739: int64_t data;
1.127 chs 2740:
2741: data = pseg_get(npv->pv_pmap, npv->pv_va & PV_VAMASK);
1.158 martin 2742: KASSERT(data & TLB_V);
1.127 chs 2743: if (data & TLB_MODIFY)
1.221 martin 2744: res = true;
1.127 chs 2745:
1.38 eeh 2746: /* Migrate modify info to head pv */
1.225 nakayama 2747: if (npv->pv_va & PV_MOD) {
1.221 martin 2748: res = true;
1.225 nakayama 2749: npv->pv_va &= ~PV_MOD;
2750: }
1.1 eeh 2751: }
1.221 martin 2752: /* Save modify info */
2753: if (res)
2754: pv->pv_va |= PV_MOD;
1.4 eeh 2755: #ifdef DEBUG
1.221 martin 2756: if (res)
2757: pv->pv_va |= PV_WE;
1.4 eeh 2758: #endif
1.221 martin 2759: mutex_exit(&pmap_lock);
2760: }
1.237 martin 2761: #endif
1.1 eeh 2762:
1.221 martin 2763: DPRINTF(PDB_CHANGEPROT|PDB_REF, ("pmap_is_modified(%p) = %d\n", pg,
2764: res));
1.1 eeh 2765: pv_check();
1.221 martin 2766: return res;
1.1 eeh 2767: }
2768:
1.221 martin 2769: /*
2770: * Variant of pmap_is_reference() where caller already holds pmap_lock
2771: */
2772: static bool
2773: pmap_is_referenced_locked(struct vm_page *pg)
1.1 eeh 2774: {
1.266 uebayasi 2775: struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
1.127 chs 2776: pv_entry_t pv, npv;
1.221 martin 2777: bool res = false;
2778:
2779: KASSERT(mutex_owned(&pmap_lock));
1.1 eeh 2780:
2781: /* Check if any mapping has been referenced */
1.266 uebayasi 2782: pv = &md->mdpg_pvh;
1.127 chs 2783: if (pv->pv_va & PV_REF)
1.221 martin 2784: return true;
2785:
1.156 pk 2786: #ifdef HWREF
1.127 chs 2787: #ifdef DEBUG
1.17 eeh 2788: if (pv->pv_next && !pv->pv_pmap) {
2789: printf("pmap_is_referenced: npv but no pmap for pv %p\n", pv);
2790: Debugger();
2791: }
2792: #endif
1.221 martin 2793: if (pv->pv_pmap == NULL)
2794: return false;
2795:
2796: for (npv = pv; npv; npv = npv->pv_next) {
2797: int64_t data;
1.156 pk 2798:
1.221 martin 2799: data = pseg_get(npv->pv_pmap, npv->pv_va & PV_VAMASK);
2800: KASSERT(data & TLB_V);
2801: if (data & TLB_ACCESS)
2802: res = true;
1.127 chs 2803:
1.221 martin 2804: /* Migrate ref info to head pv */
1.225 nakayama 2805: if (npv->pv_va & PV_REF) {
1.221 martin 2806: res = true;
1.225 nakayama 2807: npv->pv_va &= ~PV_REF;
2808: }
1.221 martin 2809: }
1.38 eeh 2810: /* Save ref info */
1.221 martin 2811: if (res)
1.127 chs 2812: pv->pv_va |= PV_REF;
1.1 eeh 2813: #endif
2814:
1.127 chs 2815: DPRINTF(PDB_CHANGEPROT|PDB_REF,
1.221 martin 2816: ("pmap_is_referenced(%p) = %d\n", pg, res));
2817: pv_check();
2818: return res;
2819: }
2820:
2821: bool
2822: pmap_is_referenced(struct vm_page *pg)
2823: {
1.266 uebayasi 2824: struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
1.221 martin 2825: pv_entry_t pv;
2826: bool res = false;
2827:
2828: /* Check if any mapping has been referenced */
1.266 uebayasi 2829: pv = &md->mdpg_pvh;
1.221 martin 2830: if (pv->pv_va & PV_REF)
2831: return true;
2832:
2833: #ifdef HWREF
2834: #ifdef DEBUG
2835: if (pv->pv_next && !pv->pv_pmap) {
2836: printf("pmap_is_referenced: npv but no pmap for pv %p\n", pv);
2837: Debugger();
2838: }
2839: #endif
2840: if (pv->pv_pmap != NULL) {
2841: mutex_enter(&pmap_lock);
2842: res = pmap_is_referenced_locked(pg);
2843: mutex_exit(&pmap_lock);
2844: }
2845: #endif
2846:
2847: DPRINTF(PDB_CHANGEPROT|PDB_REF,
2848: ("pmap_is_referenced(%p) = %d\n", pg, res));
1.1 eeh 2849: pv_check();
1.221 martin 2850: return res;
1.1 eeh 2851: }
2852:
2853:
2854:
2855: /*
1.40 thorpej 2856: * Routine: pmap_unwire
2857: * Function: Clear the wired attribute for a map/virtual-address
1.1 eeh 2858: * pair.
2859: * In/out conditions:
2860: * The mapping must already exist in the pmap.
2861: */
2862: void
1.232 dsl 2863: pmap_unwire(pmap_t pmap, vaddr_t va)
1.1 eeh 2864: {
2865: int64_t data;
1.158 martin 2866: int rv;
1.127 chs 2867:
2868: DPRINTF(PDB_MMU_STEAL, ("pmap_unwire(%p, %lx)\n", pmap, va));
1.1 eeh 2869:
2870: #ifdef DEBUG
2871: /*
2872: * Is this part of the permanent 4MB mapping?
2873: */
1.156 pk 2874: if (pmap == pmap_kernel() && va >= ktext &&
1.98 eeh 2875: va < roundup(ekdata, 4*MEG)) {
1.156 pk 2876: prom_printf("pmap_unwire: va=%08x in locked TLB\n", va);
2877: prom_abort();
1.1 eeh 2878: return;
2879: }
1.127 chs 2880: #endif
2881: data = pseg_get(pmap, va & PV_VAMASK);
1.158 martin 2882: KASSERT(data & TLB_V);
1.40 thorpej 2883: data &= ~TLB_TSB_LOCK;
1.158 martin 2884: rv = pseg_set(pmap, va & PV_VAMASK, data, 0);
2885: if (rv & 1)
2886: panic("pmap_unwire: pseg_set needs spare! rv=%d\n", rv);
1.1 eeh 2887: pv_check();
2888: }
2889:
2890: /*
2891: * Lower the protection on the specified physical page.
2892: *
2893: * Never enable writing as it will break COW
2894: */
1.4 eeh 2895:
2896: void
1.232 dsl 2897: pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
1.1 eeh 2898: {
1.266 uebayasi 2899: struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
1.127 chs 2900: int64_t clear, set;
2901: int64_t data = 0;
1.158 martin 2902: int rv;
1.225 nakayama 2903: pv_entry_t pv, npv, freepv = NULL;
1.127 chs 2904: struct pmap *pmap;
2905: vaddr_t va;
1.185 thorpej 2906: bool needflush = FALSE;
1.1 eeh 2907:
1.127 chs 2908: DPRINTF(PDB_CHANGEPROT,
2909: ("pmap_page_protect: pg %p prot %x\n", pg, prot));
1.1 eeh 2910:
1.203 ad 2911: mutex_enter(&pmap_lock);
1.266 uebayasi 2912: pv = &md->mdpg_pvh;
1.1 eeh 2913: if (prot & (VM_PROT_READ|VM_PROT_EXECUTE)) {
2914: /* copy_on_write */
2915:
2916: set = TLB_V;
1.9 eeh 2917: clear = TLB_REAL_W|TLB_W;
1.38 eeh 2918: if (VM_PROT_EXECUTE & prot)
1.1 eeh 2919: set |= TLB_EXEC;
2920: else
2921: clear |= TLB_EXEC;
1.38 eeh 2922: if (VM_PROT_EXECUTE == prot)
1.1 eeh 2923: set |= TLB_EXEC_ONLY;
2924:
1.156 pk 2925: #ifdef DEBUG
1.17 eeh 2926: if (pv->pv_next && !pv->pv_pmap) {
1.127 chs 2927: printf("pmap_page_protect: no pmap for pv %p\n", pv);
1.17 eeh 2928: Debugger();
2929: }
2930: #endif
1.1 eeh 2931: if (pv->pv_pmap != NULL) {
2932: for (; pv; pv = pv->pv_next) {
1.127 chs 2933: pmap = pv->pv_pmap;
2934: va = pv->pv_va & PV_VAMASK;
2935:
2936: DPRINTF(PDB_CHANGEPROT | PDB_REF,
2937: ("pmap_page_protect: "
2938: "RO va %p of pg %p...\n",
2939: (void *)(u_long)pv->pv_va, pg));
2940: data = pseg_get(pmap, va);
1.158 martin 2941: KASSERT(data & TLB_V);
1.73 eeh 2942:
2943: /* Save REF/MOD info */
1.127 chs 2944: if (data & TLB_ACCESS)
2945: pv->pv_va |= PV_REF;
2946: if (data & TLB_MODIFY)
1.73 eeh 2947: pv->pv_va |= PV_MOD;
2948:
1.127 chs 2949: data &= ~clear;
2950: data |= set;
1.158 martin 2951: rv = pseg_set(pmap, va, data, 0);
2952: if (rv & 1)
2953: panic("pmap_page_protect: "
2954: "pseg_set needs spare! rv=%d\n",
2955: rv);
1.216 martin 2956: if (pmap_is_on_mmu(pmap)) {
1.210 martin 2957: KASSERT(pmap_ctx(pmap)>=0);
1.212 nakayama 2958: tsb_invalidate(va, pmap);
2959: tlb_flush_pte(va, pmap);
1.41 eeh 2960: }
1.1 eeh 2961: }
2962: }
2963: } else {
2964: /* remove mappings */
1.127 chs 2965: DPRINTF(PDB_REMOVE,
2966: ("pmap_page_protect: demapping pg %p\n", pg));
2967:
1.159 chs 2968: /* First remove the entire list of continuation pv's */
1.1 eeh 2969: for (npv = pv->pv_next; npv; npv = pv->pv_next) {
1.127 chs 2970: pmap = npv->pv_pmap;
2971: va = npv->pv_va & PV_VAMASK;
2972:
1.1 eeh 2973: /* We're removing npv from pv->pv_next */
1.156 pk 2974: DPRINTF(PDB_CHANGEPROT|PDB_REF|PDB_REMOVE,
1.127 chs 2975: ("pmap_page_protect: "
2976: "demap va %p of pg %p in pmap %p...\n",
2977: (void *)(u_long)va, pg, pmap));
2978:
1.1 eeh 2979: /* clear the entry in the page table */
1.127 chs 2980: data = pseg_get(pmap, va);
1.158 martin 2981: KASSERT(data & TLB_V);
1.1 eeh 2982:
2983: /* Save ref/mod info */
1.127 chs 2984: if (data & TLB_ACCESS)
1.225 nakayama 2985: pv->pv_va |= PV_REF;
1.127 chs 2986: if (data & TLB_MODIFY)
1.225 nakayama 2987: pv->pv_va |= PV_MOD;
1.1 eeh 2988: /* Clear mapping */
1.158 martin 2989: rv = pseg_set(pmap, va, 0, 0);
2990: if (rv & 1)
2991: panic("pmap_page_protect: pseg_set needs"
2992: " spare! rv=%d\n", rv);
1.216 martin 2993: if (pmap_is_on_mmu(pmap)) {
1.210 martin 2994: KASSERT(pmap_ctx(pmap)>=0);
1.212 nakayama 2995: tsb_invalidate(va, pmap);
2996: tlb_flush_pte(va, pmap);
1.41 eeh 2997: }
1.127 chs 2998: if (pmap->pm_refs > 0) {
2999: needflush = TRUE;
3000: }
1.73 eeh 3001:
1.1 eeh 3002: /* free the pv */
3003: pv->pv_next = npv->pv_next;
1.203 ad 3004: npv->pv_next = freepv;
3005: freepv = npv;
1.1 eeh 3006: }
3007:
3008: /* Then remove the primary pv */
1.156 pk 3009: #ifdef DEBUG
1.17 eeh 3010: if (pv->pv_next && !pv->pv_pmap) {
1.127 chs 3011: printf("pmap_page_protect: no pmap for pv %p\n", pv);
1.17 eeh 3012: Debugger();
3013: }
3014: #endif
1.1 eeh 3015: if (pv->pv_pmap != NULL) {
1.127 chs 3016: pmap = pv->pv_pmap;
3017: va = pv->pv_va & PV_VAMASK;
3018:
3019: DPRINTF(PDB_CHANGEPROT|PDB_REF|PDB_REMOVE,
3020: ("pmap_page_protect: "
3021: "demap va %p of pg %p from pm %p...\n",
3022: (void *)(u_long)va, pg, pmap));
3023:
3024: data = pseg_get(pmap, va);
1.158 martin 3025: KASSERT(data & TLB_V);
1.1 eeh 3026: /* Save ref/mod info */
1.156 pk 3027: if (data & TLB_ACCESS)
1.1 eeh 3028: pv->pv_va |= PV_REF;
1.127 chs 3029: if (data & TLB_MODIFY)
1.1 eeh 3030: pv->pv_va |= PV_MOD;
1.158 martin 3031: rv = pseg_set(pmap, va, 0, 0);
3032: if (rv & 1)
3033: panic("pmap_page_protect: pseg_set needs"
3034: " spare! rv=%d\n", rv);
1.216 martin 3035: if (pmap_is_on_mmu(pmap)) {
1.210 martin 3036: KASSERT(pmap_ctx(pmap)>=0);
1.212 nakayama 3037: tsb_invalidate(va, pmap);
3038: tlb_flush_pte(va, pmap);
1.41 eeh 3039: }
1.127 chs 3040: if (pmap->pm_refs > 0) {
3041: needflush = TRUE;
3042: }
1.1 eeh 3043: npv = pv->pv_next;
3044: /* dump the first pv */
3045: if (npv) {
3046: /* First save mod/ref bits */
1.159 chs 3047: pv->pv_pmap = npv->pv_pmap;
1.225 nakayama 3048: pv->pv_va = (pv->pv_va & PV_MASK) | npv->pv_va;
1.73 eeh 3049: pv->pv_next = npv->pv_next;
1.203 ad 3050: npv->pv_next = freepv;
3051: freepv = npv;
1.1 eeh 3052: } else {
3053: pv->pv_pmap = NULL;
3054: pv->pv_next = NULL;
3055: }
3056: }
1.255 mrg 3057: if (needflush)
3058: dcache_flush_page_all(VM_PAGE_TO_PHYS(pg));
1.1 eeh 3059: }
3060: /* We should really only flush the pages we demapped. */
3061: pv_check();
1.203 ad 3062: mutex_exit(&pmap_lock);
3063:
3064: /* Catch up on deferred frees. */
3065: for (; freepv != NULL; freepv = npv) {
3066: npv = freepv->pv_next;
3067: pool_cache_put(&pmap_pv_cache, freepv);
3068: }
1.1 eeh 3069: }
3070:
1.135 martin 3071: #ifdef PMAP_COUNT_DEBUG
1.1 eeh 3072: /*
3073: * count pages in pmap -- this can be slow.
3074: */
3075: int
1.171 cdi 3076: pmap_count_res(struct pmap *pm)
1.1 eeh 3077: {
1.127 chs 3078: int64_t data;
1.8 eeh 3079: paddr_t *pdir, *ptbl;
1.127 chs 3080: int i, j, k, n;
3081:
1.14 eeh 3082: /* Don't want one of these pages reused while we're reading it. */
1.203 ad 3083: mutex_enter(&pmap_lock);
1.1 eeh 3084: n = 0;
1.127 chs 3085: for (i = 0; i < STSZ; i++) {
3086: pdir = (paddr_t *)(u_long)ldxa((vaddr_t)&pm->pm_segs[i],
3087: ASI_PHYS_CACHED);
3088: if (pdir == NULL) {
3089: continue;
3090: }
3091: for (k = 0; k < PDSZ; k++) {
3092: ptbl = (paddr_t *)(u_long)ldxa((vaddr_t)&pdir[k],
3093: ASI_PHYS_CACHED);
3094: if (ptbl == NULL) {
3095: continue;
3096: }
3097: for (j = 0; j < PTSZ; j++) {
3098: data = (int64_t)ldxa((vaddr_t)&ptbl[j],
3099: ASI_PHYS_CACHED);
3100: if (data & TLB_V)
3101: n++;
1.112 chs 3102: }
3103: }
3104: }
1.203 ad 3105: mutex_exit(&pmap_lock);
1.135 martin 3106:
1.156 pk 3107: if (pm->pm_stats.resident_count != n)
3108: printf("pmap_count_resident: pm_stats = %ld, counted: %d\n",
1.135 martin 3109: pm->pm_stats.resident_count, n);
3110:
1.112 chs 3111: return n;
3112: }
3113:
3114: /*
3115: * count wired pages in pmap -- this can be slow.
3116: */
3117: int
1.171 cdi 3118: pmap_count_wired(struct pmap *pm)
1.112 chs 3119: {
1.127 chs 3120: int64_t data;
1.112 chs 3121: paddr_t *pdir, *ptbl;
1.127 chs 3122: int i, j, k, n;
3123:
1.112 chs 3124: /* Don't want one of these pages reused while we're reading it. */
1.273 rmind 3125: mutex_enter(&pmap_lock); /* XXX uvmplock */
1.112 chs 3126: n = 0;
3127: for (i = 0; i < STSZ; i++) {
1.127 chs 3128: pdir = (paddr_t *)(u_long)ldxa((vaddr_t)&pm->pm_segs[i],
3129: ASI_PHYS_CACHED);
3130: if (pdir == NULL) {
3131: continue;
3132: }
3133: for (k = 0; k < PDSZ; k++) {
3134: ptbl = (paddr_t *)(u_long)ldxa((vaddr_t)&pdir[k],
3135: ASI_PHYS_CACHED);
3136: if (ptbl == NULL) {
3137: continue;
3138: }
3139: for (j = 0; j < PTSZ; j++) {
3140: data = (int64_t)ldxa((vaddr_t)&ptbl[j],
3141: ASI_PHYS_CACHED);
3142: if (data & TLB_TSB_LOCK)
3143: n++;
1.1 eeh 3144: }
3145: }
3146: }
1.273 rmind 3147: mutex_exit(&pmap_lock); /* XXX uvmplock */
1.135 martin 3148:
1.156 pk 3149: if (pm->pm_stats.wired_count != n)
3150: printf("pmap_count_wired: pm_stats = %ld, counted: %d\n",
1.135 martin 3151: pm->pm_stats.wired_count, n);
3152:
1.1 eeh 3153: return n;
3154: }
1.135 martin 3155: #endif /* PMAP_COUNT_DEBUG */
1.1 eeh 3156:
1.127 chs 3157: void
3158: pmap_procwr(struct proc *p, vaddr_t va, size_t len)
3159: {
3160:
3161: blast_icache();
3162: }
3163:
1.1 eeh 3164: /*
1.127 chs 3165: * Allocate a hardware context to the given pmap.
1.1 eeh 3166: */
1.212 nakayama 3167: static int
1.171 cdi 3168: ctx_alloc(struct pmap *pm)
1.1 eeh 3169: {
1.127 chs 3170: int i, ctx;
3171:
1.132 chs 3172: KASSERT(pm != pmap_kernel());
3173: KASSERT(pm == curproc->p_vmspace->vm_map.pmap);
1.262 mrg 3174: mutex_enter(&curcpu()->ci_ctx_lock);
1.210 martin 3175: ctx = curcpu()->ci_pmap_next_ctx++;
1.127 chs 3176:
3177: /*
3178: * if we have run out of contexts, remove all user entries from
3179: * the TSB, TLB and dcache and start over with context 1 again.
3180: */
1.1 eeh 3181:
1.210 martin 3182: if (ctx == curcpu()->ci_numctx) {
1.247 mrg 3183: DPRINTF(PDB_CTX_ALLOC|PDB_CTX_FLUSHALL,
1.212 nakayama 3184: ("ctx_alloc: cpu%d run out of contexts %d\n",
3185: cpu_number(), curcpu()->ci_numctx));
1.127 chs 3186: write_user_windows();
1.210 martin 3187: while (!LIST_EMPTY(&curcpu()->ci_pmap_ctxlist)) {
1.212 nakayama 3188: #ifdef MULTIPROCESSOR
1.213 nakayama 3189: KASSERT(pmap_ctx(LIST_FIRST(&curcpu()->ci_pmap_ctxlist)) != 0);
1.257 mrg 3190: #endif
1.212 nakayama 3191: ctx_free(LIST_FIRST(&curcpu()->ci_pmap_ctxlist),
3192: curcpu());
1.25 eeh 3193: }
1.127 chs 3194: for (i = TSBENTS - 1; i >= 0; i--) {
1.210 martin 3195: if (TSB_TAG_CTX(curcpu()->ci_tsb_dmmu[i].tag) != 0) {
3196: clrx(&curcpu()->ci_tsb_dmmu[i].data);
1.143 chs 3197: }
1.210 martin 3198: if (TSB_TAG_CTX(curcpu()->ci_tsb_immu[i].tag) != 0) {
3199: clrx(&curcpu()->ci_tsb_immu[i].data);
1.127 chs 3200: }
3201: }
1.252 mrg 3202: sp_tlb_flush_all();
1.127 chs 3203: ctx = 1;
1.210 martin 3204: curcpu()->ci_pmap_next_ctx = 2;
1.127 chs 3205: }
1.210 martin 3206: curcpu()->ci_ctxbusy[ctx] = pm->pm_physaddr;
1.257 mrg 3207: LIST_INSERT_HEAD(&curcpu()->ci_pmap_ctxlist, pm, pm_list[cpu_number()]);
1.210 martin 3208: pmap_ctx(pm) = ctx;
1.262 mrg 3209: mutex_exit(&curcpu()->ci_ctx_lock);
1.247 mrg 3210: DPRINTF(PDB_CTX_ALLOC, ("ctx_alloc: cpu%d allocated ctx %d\n",
3211: cpu_number(), ctx));
1.127 chs 3212: return ctx;
1.1 eeh 3213: }
3214:
3215: /*
3216: * Give away a context.
3217: */
1.212 nakayama 3218: static void
3219: ctx_free(struct pmap *pm, struct cpu_info *ci)
3220: {
3221: int oldctx;
1.257 mrg 3222: int cpunum;
1.212 nakayama 3223:
1.263 mrg 3224: KASSERT(mutex_owned(&ci->ci_ctx_lock));
1.257 mrg 3225:
3226: #ifdef MULTIPROCESSOR
3227: cpunum = ci->ci_index;
3228: #else
3229: /* Give the compiler a hint.. */
3230: cpunum = 0;
3231: #endif
3232:
3233: oldctx = pm->pm_ctx[cpunum];
1.212 nakayama 3234: if (oldctx == 0)
3235: return;
3236:
3237: #ifdef DIAGNOSTIC
3238: if (pm == pmap_kernel())
3239: panic("ctx_free: freeing kernel context");
3240: if (ci->ci_ctxbusy[oldctx] == 0)
3241: printf("ctx_free: freeing free context %d\n", oldctx);
3242: if (ci->ci_ctxbusy[oldctx] != pm->pm_physaddr) {
3243: printf("ctx_free: freeing someone else's context\n "
3244: "ctxbusy[%d] = %p, pm(%p)->pm_ctx = %p\n",
3245: oldctx, (void *)(u_long)ci->ci_ctxbusy[oldctx], pm,
3246: (void *)(u_long)pm->pm_physaddr);
3247: Debugger();
3248: }
3249: #endif
3250: /* We should verify it has not been stolen and reallocated... */
1.247 mrg 3251: DPRINTF(PDB_CTX_ALLOC, ("ctx_free: cpu%d freeing ctx %d\n",
3252: cpu_number(), oldctx));
1.212 nakayama 3253: ci->ci_ctxbusy[oldctx] = 0UL;
1.257 mrg 3254: pm->pm_ctx[cpunum] = 0;
3255: LIST_REMOVE(pm, pm_list[cpunum]);
1.212 nakayama 3256: }
1.1 eeh 3257:
3258: /*
1.41 eeh 3259: * Enter the pmap and virtual address into the
3260: * physical to virtual map table.
1.86 eeh 3261: *
3262: * We enter here with the pmap locked.
1.41 eeh 3263: */
1.127 chs 3264:
1.41 eeh 3265: void
1.170 cdi 3266: pmap_enter_pv(struct pmap *pmap, vaddr_t va, paddr_t pa, struct vm_page *pg,
3267: pv_entry_t npv)
1.41 eeh 3268: {
1.266 uebayasi 3269: struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
1.127 chs 3270: pv_entry_t pvh;
3271:
1.203 ad 3272: KASSERT(mutex_owned(&pmap_lock));
3273:
1.266 uebayasi 3274: pvh = &md->mdpg_pvh;
1.127 chs 3275: DPRINTF(PDB_ENTER, ("pmap_enter: pvh %p: was %lx/%p/%p\n",
3276: pvh, pvh->pv_va, pvh->pv_pmap, pvh->pv_next));
3277: if (pvh->pv_pmap == NULL) {
1.41 eeh 3278:
3279: /*
3280: * No entries yet, use header as the first entry
3281: */
1.127 chs 3282: DPRINTF(PDB_ENTER, ("pmap_enter: first pv: pmap %p va %lx\n",
3283: pmap, va));
3284: ENTER_STAT(firstpv);
3285: PV_SETVA(pvh, va);
3286: pvh->pv_pmap = pmap;
3287: pvh->pv_next = NULL;
3288: KASSERT(npv == NULL);
1.41 eeh 3289: } else {
1.127 chs 3290: if (pg->loan_count == 0 && !(pvh->pv_va & PV_ALIAS)) {
3291:
1.41 eeh 3292: /*
3293: * There is at least one other VA mapping this page.
3294: * Check if they are cache index compatible. If not
3295: * remove all mappings, flush the cache and set page
3296: * to be mapped uncached. Caching will be restored
3297: * when pages are mapped compatible again.
3298: */
1.127 chs 3299: if ((pvh->pv_va ^ va) & VA_ALIAS_MASK) {
3300: pvh->pv_va |= PV_ALIAS;
1.86 eeh 3301: pmap_page_cache(pmap, pa, 0);
1.127 chs 3302: ENTER_STAT(ci);
1.41 eeh 3303: }
3304: }
1.127 chs 3305:
1.41 eeh 3306: /*
3307: * There is at least one other VA mapping this page.
3308: * Place this entry after the header.
3309: */
3310:
1.127 chs 3311: DPRINTF(PDB_ENTER, ("pmap_enter: new pv: pmap %p va %lx\n",
3312: pmap, va));
1.41 eeh 3313: npv->pv_pmap = pmap;
1.127 chs 3314: npv->pv_va = va & PV_VAMASK;
3315: npv->pv_next = pvh->pv_next;
3316: pvh->pv_next = npv;
3317:
1.177 martin 3318: if (!npv->pv_next) {
1.127 chs 3319: ENTER_STAT(secondpv);
1.177 martin 3320: }
1.41 eeh 3321: }
3322: }
3323:
3324: /*
1.1 eeh 3325: * Remove a physical to virtual address translation.
3326: */
3327:
1.127 chs 3328: pv_entry_t
1.170 cdi 3329: pmap_remove_pv(struct pmap *pmap, vaddr_t va, struct vm_page *pg)
1.1 eeh 3330: {
1.266 uebayasi 3331: struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
1.127 chs 3332: pv_entry_t pvh, npv, pv;
3333: int64_t data = 0;
1.1 eeh 3334:
1.203 ad 3335: KASSERT(mutex_owned(&pmap_lock));
3336:
1.266 uebayasi 3337: pvh = &md->mdpg_pvh;
1.127 chs 3338:
3339: DPRINTF(PDB_REMOVE, ("pmap_remove_pv(pm=%p, va=%p, pg=%p)\n", pmap,
3340: (void *)(u_long)va, pg));
1.1 eeh 3341: pv_check();
1.127 chs 3342:
1.1 eeh 3343: /*
1.127 chs 3344: * Remove page from the PV table.
1.1 eeh 3345: * If it is the first entry on the list, it is actually
3346: * in the header and we must copy the following entry up
3347: * to the header. Otherwise we must search the list for
3348: * the entry. In either case we free the now unused entry.
3349: */
1.127 chs 3350: if (pmap == pvh->pv_pmap && PV_MATCH(pvh, va)) {
3351: data = pseg_get(pvh->pv_pmap, pvh->pv_va & PV_VAMASK);
1.158 martin 3352: KASSERT(data & TLB_V);
1.127 chs 3353: npv = pvh->pv_next;
1.1 eeh 3354: if (npv) {
3355: /* First save mod/ref bits */
1.127 chs 3356: pvh->pv_va = (pvh->pv_va & PV_MASK) | npv->pv_va;
3357: pvh->pv_next = npv->pv_next;
3358: pvh->pv_pmap = npv->pv_pmap;
1.1 eeh 3359: } else {
1.127 chs 3360: pvh->pv_pmap = NULL;
3361: pvh->pv_next = NULL;
3362: pvh->pv_va &= (PV_REF|PV_MOD);
1.1 eeh 3363: }
1.159 chs 3364: REMOVE_STAT(pvfirst);
1.1 eeh 3365: } else {
1.127 chs 3366: for (pv = pvh, npv = pvh->pv_next; npv;
3367: pv = npv, npv = npv->pv_next) {
1.159 chs 3368: REMOVE_STAT(pvsearch);
1.127 chs 3369: if (pmap == npv->pv_pmap && PV_MATCH(npv, va))
3370: break;
1.1 eeh 3371: }
3372: pv->pv_next = npv->pv_next;
1.127 chs 3373: data = pseg_get(npv->pv_pmap, npv->pv_va & PV_VAMASK);
1.158 martin 3374: KASSERT(data & TLB_V);
1.1 eeh 3375: }
3376:
3377: /* Save ref/mod info */
1.156 pk 3378: if (data & TLB_ACCESS)
1.127 chs 3379: pvh->pv_va |= PV_REF;
3380: if (data & TLB_MODIFY)
3381: pvh->pv_va |= PV_MOD;
1.86 eeh 3382:
3383: /* Check to see if the alias went away */
1.127 chs 3384: if (pvh->pv_va & PV_ALIAS) {
3385: pvh->pv_va &= ~PV_ALIAS;
3386: for (pv = pvh; pv; pv = pv->pv_next) {
3387: if ((pv->pv_va ^ pvh->pv_va) & VA_ALIAS_MASK) {
3388: pvh->pv_va |= PV_ALIAS;
3389: break;
1.86 eeh 3390: }
3391: }
1.127 chs 3392: if (!(pvh->pv_va & PV_ALIAS))
3393: pmap_page_cache(pmap, VM_PAGE_TO_PHYS(pg), 1);
1.86 eeh 3394: }
1.1 eeh 3395: pv_check();
1.127 chs 3396: return npv;
1.41 eeh 3397: }
3398:
3399: /*
3400: * pmap_page_cache:
3401: *
3402: * Change all mappings of a page to cached/uncached.
3403: */
3404: void
1.170 cdi 3405: pmap_page_cache(struct pmap *pm, paddr_t pa, int mode)
1.41 eeh 3406: {
1.127 chs 3407: struct vm_page *pg;
1.266 uebayasi 3408: struct vm_page_md *md;
1.41 eeh 3409: pv_entry_t pv;
1.127 chs 3410: vaddr_t va;
1.158 martin 3411: int rv;
1.73 eeh 3412:
1.271 mrg 3413: #if 0
3414: /*
3415: * Why is this?
3416: */
3417: if (CPU_ISSUN4US || CPU_ISSUN4V)
3418: return;
3419: #endif
3420:
1.203 ad 3421: KASSERT(mutex_owned(&pmap_lock));
3422:
1.127 chs 3423: DPRINTF(PDB_ENTER, ("pmap_page_uncache(%llx)\n",
3424: (unsigned long long)pa));
3425: pg = PHYS_TO_VM_PAGE(pa);
1.266 uebayasi 3426: md = VM_PAGE_TO_MD(pg);
3427: pv = &md->mdpg_pvh;
1.41 eeh 3428: while (pv) {
1.127 chs 3429: va = pv->pv_va & PV_VAMASK;
1.86 eeh 3430: if (pv->pv_va & PV_NC) {
1.158 martin 3431: int64_t data;
3432:
1.86 eeh 3433: /* Non-cached -- I/O mapping */
1.158 martin 3434: data = pseg_get(pv->pv_pmap, va);
3435: KASSERT(data & TLB_V);
3436: rv = pseg_set(pv->pv_pmap, va,
3437: data & ~(TLB_CV|TLB_CP), 0);
3438: if (rv & 1)
3439: panic("pmap_page_cache: pseg_set needs"
3440: " spare! rv=%d\n", rv);
1.86 eeh 3441: } else if (mode && (!(pv->pv_va & PV_NVC))) {
1.158 martin 3442: int64_t data;
3443:
1.41 eeh 3444: /* Enable caching */
1.158 martin 3445: data = pseg_get(pv->pv_pmap, va);
3446: KASSERT(data & TLB_V);
3447: rv = pseg_set(pv->pv_pmap, va, data | TLB_CV, 0);
3448: if (rv & 1)
3449: panic("pmap_page_cache: pseg_set needs"
3450: " spare! rv=%d\n", rv);
1.41 eeh 3451: } else {
1.158 martin 3452: int64_t data;
3453:
1.41 eeh 3454: /* Disable caching */
1.158 martin 3455: data = pseg_get(pv->pv_pmap, va);
1.254 mrg 3456: KASSERT(data & TLB_V);
1.158 martin 3457: rv = pseg_set(pv->pv_pmap, va, data & ~TLB_CV, 0);
3458: if (rv & 1)
3459: panic("pmap_page_cache: pseg_set needs"
3460: " spare! rv=%d\n", rv);
1.41 eeh 3461: }
1.216 martin 3462: if (pmap_is_on_mmu(pv->pv_pmap)) {
1.127 chs 3463: /* Force reload -- cache bits have changed */
1.210 martin 3464: KASSERT(pmap_ctx(pv->pv_pmap)>=0);
1.212 nakayama 3465: tsb_invalidate(va, pv->pv_pmap);
3466: tlb_flush_pte(va, pv->pv_pmap);
1.41 eeh 3467: }
3468: pv = pv->pv_next;
3469: }
1.1 eeh 3470: }
3471:
1.263 mrg 3472: /*
3473: * Some routines to allocate and free PTPs.
3474: */
1.111 eeh 3475: static int
1.127 chs 3476: pmap_get_page(paddr_t *p)
1.1 eeh 3477: {
1.111 eeh 3478: struct vm_page *pg;
3479: paddr_t pa;
3480:
1.127 chs 3481: if (uvm.page_init_done) {
3482: pg = uvm_pagealloc(NULL, 0, NULL,
3483: UVM_PGA_ZERO | UVM_PGA_USERESERVE);
3484: if (pg == NULL)
3485: return (0);
3486: pa = VM_PAGE_TO_PHYS(pg);
1.111 eeh 3487: } else {
3488: if (!uvm_page_physget(&pa))
3489: return (0);
3490: pmap_zero_page(pa);
1.47 eeh 3491: }
1.111 eeh 3492: *p = pa;
3493: return (1);
1.1 eeh 3494: }
3495:
1.111 eeh 3496: static void
1.262 mrg 3497: pmap_free_page(paddr_t pa, sparc64_cpuset_t cs)
1.1 eeh 3498: {
1.111 eeh 3499: struct vm_page *pg = PHYS_TO_VM_PAGE(pa);
3500:
1.262 mrg 3501: dcache_flush_page_cpuset(pa, cs);
1.127 chs 3502: uvm_pagefree(pg);
1.1 eeh 3503: }
3504:
1.262 mrg 3505: static void
3506: pmap_free_page_noflush(paddr_t pa)
3507: {
3508: struct vm_page *pg = PHYS_TO_VM_PAGE(pa);
3509:
3510: uvm_pagefree(pg);
3511: }
1.111 eeh 3512:
1.8 eeh 3513: #ifdef DDB
1.1 eeh 3514:
1.170 cdi 3515: void db_dump_pv(db_expr_t, int, db_expr_t, const char *);
1.1 eeh 3516: void
1.170 cdi 3517: db_dump_pv(db_expr_t addr, int have_addr, db_expr_t count, const char *modif)
1.1 eeh 3518: {
1.127 chs 3519: struct vm_page *pg;
1.266 uebayasi 3520: struct vm_page_md *md;
1.1 eeh 3521: struct pv_entry *pv;
3522:
3523: if (!have_addr) {
3524: db_printf("Need addr for pv\n");
3525: return;
3526: }
3527:
1.127 chs 3528: pg = PHYS_TO_VM_PAGE((paddr_t)addr);
3529: if (pg == NULL) {
3530: db_printf("page is not managed\n");
3531: return;
3532: }
1.266 uebayasi 3533: md = VM_PAGE_TO_MD(pg);
3534: for (pv = &md->mdpg_pvh; pv; pv = pv->pv_next)
1.81 fvdl 3535: db_printf("pv@%p: next=%p pmap=%p va=0x%llx\n",
3536: pv, pv->pv_next, pv->pv_pmap,
3537: (unsigned long long)pv->pv_va);
1.1 eeh 3538: }
3539:
3540: #endif
3541:
1.43 eeh 3542: #ifdef DEBUG
3543: /*
1.111 eeh 3544: * Test ref/modify handling. */
1.170 cdi 3545: void pmap_testout(void);
1.43 eeh 3546: void
1.234 cegger 3547: pmap_testout(void)
1.43 eeh 3548: {
3549: vaddr_t va;
3550: volatile int *loc;
3551: int val = 0;
1.230 martin 3552: paddr_t pa;
1.43 eeh 3553: struct vm_page *pg;
3554: int ref, mod;
3555:
3556: /* Allocate a page */
1.138 thorpej 3557: va = (vaddr_t)(vmmap - PAGE_SIZE);
1.147 kleink 3558: KASSERT(va != 0);
1.43 eeh 3559: loc = (int*)va;
3560:
1.127 chs 3561: pmap_get_page(&pa);
1.111 eeh 3562: pg = PHYS_TO_VM_PAGE(pa);
1.45 thorpej 3563: pmap_enter(pmap_kernel(), va, pa, VM_PROT_ALL, VM_PROT_ALL);
1.108 chris 3564: pmap_update(pmap_kernel());
1.43 eeh 3565:
3566: /* Now clear reference and modify */
3567: ref = pmap_clear_reference(pg);
3568: mod = pmap_clear_modify(pg);
3569: printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
1.82 mrg 3570: (void *)(u_long)va, (long)pa,
1.43 eeh 3571: ref, mod);
3572:
3573: /* Check it's properly cleared */
3574: ref = pmap_is_referenced(pg);
3575: mod = pmap_is_modified(pg);
3576: printf("Checking cleared page: ref %d, mod %d\n",
3577: ref, mod);
3578:
3579: /* Reference page */
3580: val = *loc;
3581:
3582: ref = pmap_is_referenced(pg);
3583: mod = pmap_is_modified(pg);
3584: printf("Referenced page: ref %d, mod %d val %x\n",
3585: ref, mod, val);
3586:
3587: /* Now clear reference and modify */
3588: ref = pmap_clear_reference(pg);
3589: mod = pmap_clear_modify(pg);
3590: printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
1.82 mrg 3591: (void *)(u_long)va, (long)pa,
1.43 eeh 3592: ref, mod);
1.156 pk 3593:
1.43 eeh 3594: /* Modify page */
3595: *loc = 1;
3596:
3597: ref = pmap_is_referenced(pg);
3598: mod = pmap_is_modified(pg);
3599: printf("Modified page: ref %d, mod %d\n",
3600: ref, mod);
3601:
3602: /* Now clear reference and modify */
3603: ref = pmap_clear_reference(pg);
3604: mod = pmap_clear_modify(pg);
3605: printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
1.82 mrg 3606: (void *)(u_long)va, (long)pa,
1.43 eeh 3607: ref, mod);
3608:
3609: /* Check it's properly cleared */
3610: ref = pmap_is_referenced(pg);
3611: mod = pmap_is_modified(pg);
3612: printf("Checking cleared page: ref %d, mod %d\n",
3613: ref, mod);
3614:
3615: /* Modify page */
3616: *loc = 1;
3617:
3618: ref = pmap_is_referenced(pg);
3619: mod = pmap_is_modified(pg);
3620: printf("Modified page: ref %d, mod %d\n",
1.73 eeh 3621: ref, mod);
3622:
3623: /* Check pmap_protect() */
3624: pmap_protect(pmap_kernel(), va, va+1, VM_PROT_READ);
1.108 chris 3625: pmap_update(pmap_kernel());
1.73 eeh 3626: ref = pmap_is_referenced(pg);
3627: mod = pmap_is_modified(pg);
3628: printf("pmap_protect(VM_PROT_READ): ref %d, mod %d\n",
3629: ref, mod);
3630:
3631: /* Now clear reference and modify */
3632: ref = pmap_clear_reference(pg);
3633: mod = pmap_clear_modify(pg);
3634: printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
1.82 mrg 3635: (void *)(u_long)va, (long)pa,
1.73 eeh 3636: ref, mod);
3637:
3638: /* Modify page */
3639: pmap_enter(pmap_kernel(), va, pa, VM_PROT_ALL, VM_PROT_ALL);
1.108 chris 3640: pmap_update(pmap_kernel());
1.73 eeh 3641: *loc = 1;
3642:
3643: ref = pmap_is_referenced(pg);
3644: mod = pmap_is_modified(pg);
3645: printf("Modified page: ref %d, mod %d\n",
3646: ref, mod);
3647:
3648: /* Check pmap_protect() */
3649: pmap_protect(pmap_kernel(), va, va+1, VM_PROT_NONE);
1.108 chris 3650: pmap_update(pmap_kernel());
1.73 eeh 3651: ref = pmap_is_referenced(pg);
3652: mod = pmap_is_modified(pg);
3653: printf("pmap_protect(VM_PROT_READ): ref %d, mod %d\n",
3654: ref, mod);
3655:
3656: /* Now clear reference and modify */
3657: ref = pmap_clear_reference(pg);
3658: mod = pmap_clear_modify(pg);
3659: printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
1.82 mrg 3660: (void *)(u_long)va, (long)pa,
1.73 eeh 3661: ref, mod);
3662:
3663: /* Modify page */
3664: pmap_enter(pmap_kernel(), va, pa, VM_PROT_ALL, VM_PROT_ALL);
1.108 chris 3665: pmap_update(pmap_kernel());
1.73 eeh 3666: *loc = 1;
3667:
3668: ref = pmap_is_referenced(pg);
3669: mod = pmap_is_modified(pg);
3670: printf("Modified page: ref %d, mod %d\n",
3671: ref, mod);
3672:
3673: /* Check pmap_pag_protect() */
3674: pmap_page_protect(pg, VM_PROT_READ);
3675: ref = pmap_is_referenced(pg);
3676: mod = pmap_is_modified(pg);
3677: printf("pmap_protect(): ref %d, mod %d\n",
3678: ref, mod);
3679:
3680: /* Now clear reference and modify */
3681: ref = pmap_clear_reference(pg);
3682: mod = pmap_clear_modify(pg);
3683: printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
1.82 mrg 3684: (void *)(u_long)va, (long)pa,
1.73 eeh 3685: ref, mod);
3686:
3687:
3688: /* Modify page */
3689: pmap_enter(pmap_kernel(), va, pa, VM_PROT_ALL, VM_PROT_ALL);
1.108 chris 3690: pmap_update(pmap_kernel());
1.73 eeh 3691: *loc = 1;
3692:
3693: ref = pmap_is_referenced(pg);
3694: mod = pmap_is_modified(pg);
3695: printf("Modified page: ref %d, mod %d\n",
3696: ref, mod);
3697:
3698: /* Check pmap_pag_protect() */
3699: pmap_page_protect(pg, VM_PROT_NONE);
3700: ref = pmap_is_referenced(pg);
3701: mod = pmap_is_modified(pg);
3702: printf("pmap_protect(): ref %d, mod %d\n",
3703: ref, mod);
3704:
3705: /* Now clear reference and modify */
3706: ref = pmap_clear_reference(pg);
3707: mod = pmap_clear_modify(pg);
3708: printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
1.82 mrg 3709: (void *)(u_long)va, (long)pa,
1.43 eeh 3710: ref, mod);
3711:
3712: /* Unmap page */
3713: pmap_remove(pmap_kernel(), va, va+1);
1.108 chris 3714: pmap_update(pmap_kernel());
1.43 eeh 3715: ref = pmap_is_referenced(pg);
3716: mod = pmap_is_modified(pg);
1.82 mrg 3717: printf("Unmapped page: ref %d, mod %d\n", ref, mod);
1.43 eeh 3718:
3719: /* Now clear reference and modify */
3720: ref = pmap_clear_reference(pg);
3721: mod = pmap_clear_modify(pg);
3722: printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
1.82 mrg 3723: (void *)(u_long)va, (long)pa, ref, mod);
1.43 eeh 3724:
3725: /* Check it's properly cleared */
3726: ref = pmap_is_referenced(pg);
3727: mod = pmap_is_modified(pg);
3728: printf("Checking cleared page: ref %d, mod %d\n",
3729: ref, mod);
3730:
3731: pmap_remove(pmap_kernel(), va, va+1);
1.108 chris 3732: pmap_update(pmap_kernel());
1.262 mrg 3733: pmap_free_page(pa, cpus_active);
1.43 eeh 3734: }
3735: #endif
1.155 chs 3736:
3737: void
3738: pmap_update(struct pmap *pmap)
3739: {
3740:
3741: if (pmap->pm_refs > 0) {
3742: return;
3743: }
3744: pmap->pm_refs = 1;
3745: pmap_activate_pmap(pmap);
3746: }
1.255 mrg 3747:
3748: /*
3749: * pmap_copy_page()/pmap_zero_page()
3750: *
3751: * we make sure that the destination page is flushed from all D$'s
3752: * before we perform the copy/zero.
3753: */
3754: extern int cold;
3755: void
3756: pmap_copy_page(paddr_t src, paddr_t dst)
3757: {
3758:
3759: if (!cold)
3760: dcache_flush_page_all(dst);
3761: pmap_copy_page_phys(src, dst);
3762: }
3763:
3764: void
3765: pmap_zero_page(paddr_t pa)
3766: {
3767:
3768: if (!cold)
3769: dcache_flush_page_all(pa);
3770: pmap_zero_page_phys(pa);
3771: }
1.281 martin 3772:
3773: #ifdef _LP64
3774: int
3775: sparc64_mmap_range_test(vaddr_t addr, vaddr_t eaddr)
3776: {
3777: const vaddr_t hole_start = 0x000007ffffffffff;
3778: const vaddr_t hole_end = 0xfffff80000000000;
3779:
3780: if (addr >= hole_end)
3781: return 0;
3782: if (eaddr <= hole_start)
3783: return 0;
3784:
3785: return EINVAL;
3786: }
3787: #endif
1.283 palle 3788:
3789: #ifdef SUN4V
3790: void
3791: pmap_setup_intstack_sun4v(paddr_t pa)
3792: {
3793: int64_t hv_rc;
3794: int64_t data;
3795: data = SUN4V_TSB_DATA(
3796: 0 /* global */,
3797: PGSZ_64K,
3798: pa,
3799: 1 /* priv */,
3800: 1 /* Write */,
3801: 1 /* Cacheable */,
3802: FORCE_ALIAS /* ALIAS -- Disable D$ */,
3803: 1 /* valid */,
3804: 0 /* IE */);
3805: hv_rc = hv_mmu_map_perm_addr(INTSTACK, data, MAP_DTLB);
3806: if ( hv_rc != H_EOK ) {
1.284 nakayama 3807: panic("hv_mmu_map_perm_addr() failed - rc = %" PRId64 "\n",
3808: hv_rc);
1.283 palle 3809: }
3810: }
3811:
3812: void
1.290 palle 3813: pmap_setup_tsb_sun4v(struct tsb_desc* tsb_desc)
1.283 palle 3814: {
3815: int err;
3816: paddr_t tsb_desc_p;
3817: tsb_desc_p = pmap_kextract((vaddr_t)tsb_desc);
1.290 palle 3818: if (!tsb_desc_p) {
1.283 palle 3819: panic("pmap_setup_tsb_sun4v() pmap_kextract() failed");
3820: }
3821: err = hv_mmu_tsb_ctx0(1, tsb_desc_p);
3822: if (err != H_EOK) {
3823: prom_printf("hv_mmu_tsb_ctx0() err: %d\n", err);
3824: panic("pmap_setup_tsb_sun4v() hv_mmu_tsb_ctx0() failed");
3825: }
3826: err = hv_mmu_tsb_ctxnon0(1, tsb_desc_p);
3827: if (err != H_EOK) {
3828: prom_printf("hv_mmu_tsb_ctxnon0() err: %d\n", err);
3829: panic("pmap_setup_tsb_sun4v() hv_mmu_tsb_ctxnon0() failed");
3830: }
3831: }
3832:
3833: #endif
CVSweb <webmaster@jp.NetBSD.org>