Annotation of src/sys/arch/sparc/sparc/pmap.c, Revision 1.139
1.139 ! chs 1: /* $NetBSD: pmap.c,v 1.138 1999/03/12 22:42:30 perry Exp $ */
1.22 deraadt 2:
1.1 deraadt 3: /*
1.55 pk 4: * Copyright (c) 1996
1.57 abrown 5: * The President and Fellows of Harvard College. All rights reserved.
1.1 deraadt 6: * Copyright (c) 1992, 1993
7: * The Regents of the University of California. All rights reserved.
8: *
9: * This software was developed by the Computer Systems Engineering group
10: * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
11: * contributed to Berkeley.
12: *
13: * All advertising materials mentioning features or use of this software
14: * must display the following acknowledgement:
1.55 pk 15: * This product includes software developed by Harvard University.
1.1 deraadt 16: * This product includes software developed by the University of
17: * California, Lawrence Berkeley Laboratory.
18: *
19: * Redistribution and use in source and binary forms, with or without
20: * modification, are permitted provided that the following conditions
21: * are met:
1.55 pk 22: *
1.1 deraadt 23: * 1. Redistributions of source code must retain the above copyright
24: * notice, this list of conditions and the following disclaimer.
25: * 2. Redistributions in binary form must reproduce the above copyright
26: * notice, this list of conditions and the following disclaimer in the
27: * documentation and/or other materials provided with the distribution.
28: * 3. All advertising materials mentioning features or use of this software
29: * must display the following acknowledgement:
1.55 pk 30: * This product includes software developed by Aaron Brown and
31: * Harvard University.
32: * This product includes software developed by the University of
33: * California, Berkeley and its contributors.
1.1 deraadt 34: * 4. Neither the name of the University nor the names of its contributors
35: * may be used to endorse or promote products derived from this software
36: * without specific prior written permission.
37: *
38: * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
39: * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
40: * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
41: * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
42: * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
43: * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
44: * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
45: * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
46: * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
47: * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
48: * SUCH DAMAGE.
49: *
1.22 deraadt 50: * @(#)pmap.c 8.4 (Berkeley) 2/5/94
1.55 pk 51: *
1.1 deraadt 52: */
53:
54: /*
55: * SPARC physical map management code.
56: * Does not function on multiprocessors (yet).
57: */
1.112 mrg 58:
1.119 jonathan 59: #include "opt_ddb.h"
1.112 mrg 60: #include "opt_uvm.h"
1.1 deraadt 61:
62: #include <sys/param.h>
63: #include <sys/systm.h>
64: #include <sys/device.h>
65: #include <sys/proc.h>
1.43 pk 66: #include <sys/queue.h>
1.1 deraadt 67: #include <sys/malloc.h>
1.87 pk 68: #include <sys/lock.h>
1.121 pk 69: #include <sys/pool.h>
1.67 pk 70: #include <sys/exec.h>
71: #include <sys/core.h>
72: #include <sys/kcore.h>
1.1 deraadt 73:
74: #include <vm/vm.h>
75: #include <vm/vm_kern.h>
76: #include <vm/vm_prot.h>
77: #include <vm/vm_page.h>
78:
1.110 mrg 79: #if defined(UVM)
80: #include <uvm/uvm.h>
1.128 pk 81: #else
82: #define uvm_pglistalloc(s,l,h,a,b,m,n,f) \
83: vm_page_alloc_memory(s,l,h,a,b,m,n,f)
84: #define uvm_km_alloc(m,s) kmem_alloc_pageable(m,s)
1.110 mrg 85: #endif
86:
1.1 deraadt 87: #include <machine/autoconf.h>
88: #include <machine/bsd_openprom.h>
1.19 deraadt 89: #include <machine/oldmon.h>
1.1 deraadt 90: #include <machine/cpu.h>
91: #include <machine/ctlreg.h>
1.67 pk 92: #include <machine/kcore.h>
1.1 deraadt 93:
94: #include <sparc/sparc/asm.h>
95: #include <sparc/sparc/cache.h>
1.3 deraadt 96: #include <sparc/sparc/vaddrs.h>
1.69 pk 97: #include <sparc/sparc/cpuvar.h>
1.1 deraadt 98:
99: #ifdef DEBUG
100: #define PTE_BITS "\20\40V\37W\36S\35NC\33IO\32U\31M"
1.55 pk 101: #define PTE_BITS4M "\20\10C\7M\6R\5ACC3\4ACC2\3ACC1\2TYP2\1TYP1"
1.1 deraadt 102: #endif
103:
104: /*
105: * The SPARCstation offers us the following challenges:
106: *
107: * 1. A virtual address cache. This is, strictly speaking, not
108: * part of the architecture, but the code below assumes one.
109: * This is a write-through cache on the 4c and a write-back cache
110: * on others.
111: *
1.55 pk 112: * 2. (4/4c only) An MMU that acts like a cache. There is not enough
113: * space in the MMU to map everything all the time. Instead, we need
1.1 deraadt 114: * to load MMU with the `working set' of translations for each
1.55 pk 115: * process. The sun4m does not act like a cache; tables are maintained
116: * in physical memory.
1.1 deraadt 117: *
118: * 3. Segmented virtual and physical spaces. The upper 12 bits of
119: * a virtual address (the virtual segment) index a segment table,
120: * giving a physical segment. The physical segment selects a
121: * `Page Map Entry Group' (PMEG) and the virtual page number---the
122: * next 5 or 6 bits of the virtual address---select the particular
123: * `Page Map Entry' for the page. We call the latter a PTE and
124: * call each Page Map Entry Group a pmeg (for want of a better name).
1.55 pk 125: * Note that the sun4m has an unsegmented 36-bit physical space.
1.1 deraadt 126: *
127: * Since there are no valid bits in the segment table, the only way
128: * to have an invalid segment is to make one full pmeg of invalid PTEs.
1.55 pk 129: * We use the last one (since the ROM does as well) (sun4/4c only)
1.1 deraadt 130: *
131: * 4. Discontiguous physical pages. The Mach VM expects physical pages
132: * to be in one sequential lump.
133: *
134: * 5. The MMU is always on: it is not possible to disable it. This is
135: * mainly a startup hassle.
136: */
137:
138: struct pmap_stats {
139: int ps_unlink_pvfirst; /* # of pv_unlinks on head */
140: int ps_unlink_pvsearch; /* # of pv_unlink searches */
141: int ps_changeprots; /* # of calls to changeprot */
142: int ps_useless_changeprots; /* # of changeprots for wiring */
143: int ps_enter_firstpv; /* pv heads entered */
144: int ps_enter_secondpv; /* pv nonheads entered */
145: int ps_useless_changewire; /* useless wiring changes */
146: int ps_npg_prot_all; /* # of active pages protected */
147: int ps_npg_prot_actual; /* # pages actually affected */
1.70 pk 148: int ps_npmeg_free; /* # of free pmegs */
149: int ps_npmeg_locked; /* # of pmegs on locked list */
150: int ps_npmeg_lru; /* # of pmegs on lru list */
1.1 deraadt 151: } pmap_stats;
152:
153: #ifdef DEBUG
154: #define PDB_CREATE 0x0001
155: #define PDB_DESTROY 0x0002
156: #define PDB_REMOVE 0x0004
157: #define PDB_CHANGEPROT 0x0008
158: #define PDB_ENTER 0x0010
1.90 pk 159: #define PDB_FOLLOW 0x0020
1.1 deraadt 160:
161: #define PDB_MMU_ALLOC 0x0100
162: #define PDB_MMU_STEAL 0x0200
163: #define PDB_CTX_ALLOC 0x0400
164: #define PDB_CTX_STEAL 0x0800
1.43 pk 165: #define PDB_MMUREG_ALLOC 0x1000
166: #define PDB_MMUREG_STEAL 0x2000
1.55 pk 167: #define PDB_CACHESTUFF 0x4000
1.72 pk 168: #define PDB_SWITCHMAP 0x8000
169: #define PDB_SANITYCHK 0x10000
1.55 pk 170: int pmapdebug = 0;
1.1 deraadt 171: #endif
172:
1.55 pk 173: #if 0
1.10 deraadt 174: #define splpmap() splimp()
1.55 pk 175: #endif
1.1 deraadt 176:
177: /*
178: * First and last managed physical addresses.
179: */
1.124 pk 180: paddr_t vm_first_phys, vm_num_phys;
1.1 deraadt 181:
182: /*
183: * For each managed physical page, there is a list of all currently
184: * valid virtual mappings of that page. Since there is usually one
185: * (or zero) mapping per page, the table begins with an initial entry,
186: * rather than a pointer; this head entry is empty iff its pv_pmap
187: * field is NULL.
188: *
189: * Note that these are per machine independent page (so there may be
190: * only one for every two hardware pages, e.g.). Since the virtual
191: * address is aligned on a page boundary, the low order bits are free
192: * for storing flags. Only the head of each list has flags.
193: *
194: * THIS SHOULD BE PART OF THE CORE MAP
195: */
196: struct pvlist {
1.84 pk 197: struct pvlist *pv_next; /* next pvlist, if any */
198: struct pmap *pv_pmap; /* pmap of this va */
1.124 pk 199: vaddr_t pv_va; /* virtual address */
1.84 pk 200: int pv_flags; /* flags (below) */
1.1 deraadt 201: };
202:
203: /*
204: * Flags in pv_flags. Note that PV_MOD must be 1 and PV_REF must be 2
205: * since they must line up with the bits in the hardware PTEs (see pte.h).
1.115 pk 206: * SUN4M bits are at a slightly different location in the PTE.
207: * Note: the REF, MOD and ANC flag bits occur only in the head of a pvlist.
208: * The cacheable bit (either PV_NC or PV_C4M) is meaningful in each
209: * individual pv entry.
210: */
211: #define PV_MOD 1 /* page modified */
212: #define PV_REF 2 /* page referenced */
213: #define PV_NC 4 /* page cannot be cached */
214: #define PV_REF4M 1 /* page referenced (SRMMU) */
215: #define PV_MOD4M 2 /* page modified (SRMMU) */
216: #define PV_C4M 4 /* page _can_ be cached (SRMMU) */
217: #define PV_ANC 0x10 /* page has incongruent aliases */
1.1 deraadt 218:
219: struct pvlist *pv_table; /* array of entries, one per physical page */
220:
1.124 pk 221: #define pvhead(pa) (&pv_table[((pa) - vm_first_phys) >> PGSHIFT])
1.1 deraadt 222:
1.124 pk 223: static vsize_t pv_table_map __P((paddr_t, int));
224: static paddr_t pv_physmem;
1.122 pk 225: static struct pool pv_pool;
226:
227:
1.1 deraadt 228: /*
229: * Each virtual segment within each pmap is either valid or invalid.
230: * It is valid if pm_npte[VA_VSEG(va)] is not 0. This does not mean
231: * it is in the MMU, however; that is true iff pm_segmap[VA_VSEG(va)]
232: * does not point to the invalid PMEG.
233: *
1.55 pk 234: * In the older SPARC architectures (pre-4m), page tables are cached in the
235: * MMU. The following discussion applies to these architectures:
236: *
1.1 deraadt 237: * If a virtual segment is valid and loaded, the correct PTEs appear
238: * in the MMU only. If it is valid and unloaded, the correct PTEs appear
239: * in the pm_pte[VA_VSEG(va)] only. However, some effort is made to keep
240: * the software copies consistent enough with the MMU so that libkvm can
241: * do user address translations. In particular, pv_changepte() and
242: * pmap_enu() maintain consistency, while less critical changes are
243: * not maintained. pm_pte[VA_VSEG(va)] always points to space for those
244: * PTEs, unless this is the kernel pmap, in which case pm_pte[x] is not
245: * used (sigh).
246: *
247: * Each PMEG in the MMU is either free or contains PTEs corresponding to
248: * some pmap and virtual segment. If it contains some PTEs, it also contains
249: * reference and modify bits that belong in the pv_table. If we need
250: * to steal a PMEG from some process (if we need one and none are free)
251: * we must copy the ref and mod bits, and update pm_segmap in the other
252: * pmap to show that its virtual segment is no longer in the MMU.
253: *
254: * There are 128 PMEGs in a small Sun-4, of which only a few dozen are
255: * tied down permanently, leaving `about' 100 to be spread among
256: * running processes. These are managed as an LRU cache. Before
257: * calling the VM paging code for a user page fault, the fault handler
258: * calls mmu_load(pmap, va) to try to get a set of PTEs put into the
259: * MMU. mmu_load will check the validity of the segment and tell whether
260: * it did something.
261: *
262: * Since I hate the name PMEG I call this data structure an `mmu entry'.
263: * Each mmuentry is on exactly one of three `usage' lists: free, LRU,
264: * or locked. The LRU list is for user processes; the locked list is
265: * for kernel entries; both are doubly linked queues headed by `mmuhd's.
266: * The free list is a simple list, headed by a free list pointer.
1.55 pk 267: *
268: * In the sun4m architecture using the SPARC Reference MMU (SRMMU), three
269: * levels of page tables are maintained in physical memory. We use the same
270: * structures as with the 3-level old-style MMU (pm_regmap, pm_segmap,
271: * rg_segmap, sg_pte, etc) to maintain kernel-edible page tables; we also
272: * build a parallel set of physical tables that can be used by the MMU.
273: * (XXX: This seems redundant, but is it necessary for the unified kernel?)
274: *
275: * If a virtual segment is valid, its entries will be in both parallel lists.
276: * If it is not valid, then its entry in the kernel tables will be zero, and
277: * its entry in the MMU tables will either be nonexistent or zero as well.
1.72 pk 278: *
279: * The Reference MMU generally uses a Translation Look-aside Buffer (TLB)
280: * to cache the result of recently executed page table walks. When
281: * manipulating page tables, we need to ensure consistency of the
282: * in-memory and TLB copies of the page table entries. This is handled
283: * by flushing (and invalidating) a TLB entry when appropriate before
284: * altering an in-memory page table entry.
1.1 deraadt 285: */
286: struct mmuentry {
1.43 pk 287: TAILQ_ENTRY(mmuentry) me_list; /* usage list link */
288: TAILQ_ENTRY(mmuentry) me_pmchain; /* pmap owner link */
1.1 deraadt 289: struct pmap *me_pmap; /* pmap, if in use */
1.43 pk 290: u_short me_vreg; /* associated virtual region/segment */
291: u_short me_vseg; /* associated virtual region/segment */
1.45 pk 292: u_short me_cookie; /* hardware SMEG/PMEG number */
1.1 deraadt 293: };
1.43 pk 294: struct mmuentry *mmusegments; /* allocated in pmap_bootstrap */
295: struct mmuentry *mmuregions; /* allocated in pmap_bootstrap */
1.1 deraadt 296:
1.43 pk 297: struct mmuhd segm_freelist, segm_lru, segm_locked;
298: struct mmuhd region_freelist, region_lru, region_locked;
1.1 deraadt 299:
1.69 pk 300: int seginval; /* [4/4c] the invalid segment number */
301: int reginval; /* [4/3mmu] the invalid region number */
1.1 deraadt 302:
303: /*
1.55 pk 304: * (sun4/4c)
1.1 deraadt 305: * A context is simply a small number that dictates which set of 4096
306: * segment map entries the MMU uses. The Sun 4c has eight such sets.
307: * These are alloted in an `almost MRU' fashion.
1.55 pk 308: * (sun4m)
309: * A context is simply a small number that indexes the context table, the
310: * root-level page table mapping 4G areas. Each entry in this table points
311: * to a 1st-level region table. A SPARC reference MMU will usually use 16
312: * such contexts, but some offer as many as 64k contexts; the theoretical
313: * maximum is 2^32 - 1, but this would create overlarge context tables.
1.1 deraadt 314: *
315: * Each context is either free or attached to a pmap.
316: *
317: * Since the virtual address cache is tagged by context, when we steal
318: * a context we have to flush (that part of) the cache.
319: */
320: union ctxinfo {
321: union ctxinfo *c_nextfree; /* free list (if free) */
322: struct pmap *c_pmap; /* pmap (if busy) */
323: };
1.69 pk 324:
325: #define ncontext (cpuinfo.mmu_ncontext)
326: #define ctx_kick (cpuinfo.ctx_kick)
327: #define ctx_kickdir (cpuinfo.ctx_kickdir)
328: #define ctx_freelist (cpuinfo.ctx_freelist)
329:
1.122 pk 330: void ctx_alloc __P((struct pmap *));
331: void ctx_free __P((struct pmap *));
332:
1.69 pk 333: #if 0
1.1 deraadt 334: union ctxinfo *ctxinfo; /* allocated at in pmap_bootstrap */
335:
336: union ctxinfo *ctx_freelist; /* context free list */
337: int ctx_kick; /* allocation rover when none free */
338: int ctx_kickdir; /* ctx_kick roves both directions */
339:
1.69 pk 340: char *ctxbusyvector; /* [4m] tells what contexts are busy (XXX)*/
341: #endif
1.55 pk 342:
1.1 deraadt 343: caddr_t vpage[2]; /* two reserved MD virtual pages */
1.101 pk 344: #if defined(SUN4M)
345: int *vpage_pte[2]; /* pte location of vpage[] */
346: #endif
1.41 mycroft 347: caddr_t vmmap; /* one reserved MI vpage for /dev/mem */
1.55 pk 348: caddr_t vdumppages; /* 32KB worth of reserved dump pages */
1.1 deraadt 349:
1.69 pk 350: smeg_t tregion; /* [4/3mmu] Region for temporary mappings */
351:
1.43 pk 352: struct pmap kernel_pmap_store; /* the kernel's pmap */
353: struct regmap kernel_regmap_store[NKREG]; /* the kernel's regmap */
354: struct segmap kernel_segmap_store[NKREG*NSEGRG];/* the kernel's segmaps */
1.1 deraadt 355:
1.69 pk 356: #if defined(SUN4M)
1.55 pk 357: u_int *kernel_regtable_store; /* 1k of storage to map the kernel */
358: u_int *kernel_segtable_store; /* 2k of storage to map the kernel */
359: u_int *kernel_pagtable_store; /* 128k of storage to map the kernel */
360:
361: u_int *kernel_iopte_table; /* 64k of storage for iommu */
362: u_int kernel_iopte_table_pa;
1.121 pk 363:
364: /*
365: * Memory pools and back-end supplier for SRMMU page tables.
366: * Share a pool between the level 2 and level 3 page tables,
367: * since these are equal in size.
368: */
369: static struct pool L1_pool;
370: static struct pool L23_pool;
371:
372: static void *pgt_page_alloc __P((unsigned long, int, int));
373: static void pgt_page_free __P((void *, unsigned long, int));
374:
1.55 pk 375: #endif
376:
1.30 pk 377: #define MA_SIZE 32 /* size of memory descriptor arrays */
1.1 deraadt 378: struct memarr pmemarr[MA_SIZE];/* physical memory regions */
379: int npmemarr; /* number of entries in pmemarr */
1.124 pk 380: /*static*/ paddr_t avail_start; /* first free physical page */
381: /*static*/ paddr_t avail_end; /* last free physical page */
382: /*static*/ paddr_t unavail_gap_start;/* first stolen free phys page */
383: /*static*/ paddr_t unavail_gap_end;/* last stolen free physical page */
384: /*static*/ vaddr_t virtual_avail; /* first free virtual page number */
385: /*static*/ vaddr_t virtual_end; /* last free virtual page number */
1.29 pk 386:
1.107 pk 387: static void pmap_page_upload __P((void));
1.118 thorpej 388: void pmap_pinit __P((pmap_t));
389: void pmap_release __P((pmap_t));
1.107 pk 390:
1.45 pk 391: int mmu_has_hole;
392:
1.124 pk 393: vaddr_t prom_vstart; /* For /dev/kmem */
394: vaddr_t prom_vend;
1.1 deraadt 395:
1.134 thorpej 396: /*
397: * Memory pool for pmap structures.
398: */
399: static struct pool pmap_pmap_pool;
400:
1.55 pk 401: #if defined(SUN4)
1.31 pk 402: /*
1.55 pk 403: * [sun4]: segfixmask: on some systems (4/110) "getsegmap()" returns a
404: * partly invalid value. getsegmap returns a 16 bit value on the sun4,
405: * but only the first 8 or so bits are valid (the rest are *supposed* to
406: * be zero. On the 4/110 the bits that are supposed to be zero are
407: * all one instead. e.g. KERNBASE is usually mapped by pmeg number zero.
408: * On a 4/300 getsegmap(KERNBASE) == 0x0000, but
1.31 pk 409: * on a 4/100 getsegmap(KERNBASE) == 0xff00
410: *
1.55 pk 411: * This confuses mmu_reservemon() and causes it to not reserve the PROM's
412: * pmegs. Then the PROM's pmegs get used during autoconfig and everything
1.31 pk 413: * falls apart! (not very fun to debug, BTW.)
414: *
1.43 pk 415: * solution: mask the invalid bits in the getsetmap macro.
1.31 pk 416: */
417:
418: static u_long segfixmask = 0xffffffff; /* all bits valid to start */
1.55 pk 419: #else
420: #define segfixmask 0xffffffff /* It's in getsegmap's scope */
1.31 pk 421: #endif
422:
1.1 deraadt 423: /*
424: * pseudo-functions for mnemonic value
425: */
1.55 pk 426: #define getsegmap(va) (CPU_ISSUN4C \
427: ? lduba(va, ASI_SEGMAP) \
428: : (lduha(va, ASI_SEGMAP) & segfixmask))
429: #define setsegmap(va, pmeg) (CPU_ISSUN4C \
430: ? stba(va, ASI_SEGMAP, pmeg) \
431: : stha(va, ASI_SEGMAP, pmeg))
432:
433: /* 3-level sun4 MMU only: */
434: #define getregmap(va) ((unsigned)lduha((va)+2, ASI_REGMAP) >> 8)
435: #define setregmap(va, smeg) stha((va)+2, ASI_REGMAP, (smeg << 8))
436:
437: #if defined(SUN4M)
1.132 pk 438: void setpgt4m __P((int *ptep, int pte));
439: void setpte4m __P((vaddr_t va, int pte));
440: void setptesw4m __P((struct pmap *pm, vaddr_t va, int pte));
1.124 pk 441: static u_int getptesw4m __P((struct pmap *pm, vaddr_t va));
1.55 pk 442: #endif
443:
444: /* Function pointer messiness for supporting multiple sparc architectures
445: * within a single kernel: notice that there are two versions of many of the
446: * functions within this file/module, one for the sun4/sun4c and the other
447: * for the sun4m. For performance reasons (since things like pte bits don't
448: * map nicely between the two architectures), there are separate functions
449: * rather than unified functions which test the cputyp variable. If only
450: * one architecture is being used, then the non-suffixed function calls
451: * are macro-translated into the appropriate xxx4_4c or xxx4m call. If
452: * multiple architectures are defined, the calls translate to (*xxx_p),
453: * i.e. they indirect through function pointers initialized as appropriate
454: * to the run-time architecture in pmap_bootstrap. See also pmap.h.
455: */
456:
457: #if defined(SUN4M)
1.71 pk 458: static void mmu_setup4m_L1 __P((int, struct pmap *));
459: static void mmu_setup4m_L2 __P((int, struct regmap *));
460: static void mmu_setup4m_L3 __P((int, struct segmap *));
1.77 pk 461: /*static*/ void mmu_reservemon4m __P((struct pmap *));
1.58 pk 462:
1.124 pk 463: /*static*/ void pmap_rmk4m __P((struct pmap *, vaddr_t, vaddr_t, int, int));
464: /*static*/ void pmap_rmu4m __P((struct pmap *, vaddr_t, vaddr_t, int, int));
465: /*static*/ void pmap_enk4m __P((struct pmap *, vaddr_t, vm_prot_t,
466: int, struct pvlist *, int));
467: /*static*/ void pmap_enu4m __P((struct pmap *, vaddr_t, vm_prot_t,
468: int, struct pvlist *, int));
1.55 pk 469: /*static*/ void pv_changepte4m __P((struct pvlist *, int, int));
470: /*static*/ int pv_syncflags4m __P((struct pvlist *));
1.124 pk 471: /*static*/ int pv_link4m __P((struct pvlist *, struct pmap *, vaddr_t, int));
472: /*static*/ void pv_unlink4m __P((struct pvlist *, struct pmap *, vaddr_t));
1.55 pk 473: #endif
474:
475: #if defined(SUN4) || defined(SUN4C)
1.58 pk 476: /*static*/ void mmu_reservemon4_4c __P((int *, int *));
1.124 pk 477: /*static*/ void pmap_rmk4_4c __P((struct pmap *, vaddr_t, vaddr_t, int, int));
478: /*static*/ void pmap_rmu4_4c __P((struct pmap *, vaddr_t, vaddr_t, int, int));
479: /*static*/ void pmap_enk4_4c __P((struct pmap *, vaddr_t, vm_prot_t,
480: int, struct pvlist *, int));
481: /*static*/ void pmap_enu4_4c __P((struct pmap *, vaddr_t, vm_prot_t,
482: int, struct pvlist *, int));
1.55 pk 483: /*static*/ void pv_changepte4_4c __P((struct pvlist *, int, int));
484: /*static*/ int pv_syncflags4_4c __P((struct pvlist *));
1.124 pk 485: /*static*/ int pv_link4_4c __P((struct pvlist *, struct pmap *, vaddr_t, int));
486: /*static*/ void pv_unlink4_4c __P((struct pvlist *, struct pmap *, vaddr_t));
1.55 pk 487: #endif
488:
489: #if !defined(SUN4M) && (defined(SUN4) || defined(SUN4C))
490: #define pmap_rmk pmap_rmk4_4c
491: #define pmap_rmu pmap_rmu4_4c
492:
493: #elif defined(SUN4M) && !(defined(SUN4) || defined(SUN4C))
494: #define pmap_rmk pmap_rmk4m
495: #define pmap_rmu pmap_rmu4m
496:
497: #else /* must use function pointers */
498:
499: /* function pointer declarations */
500: /* from pmap.h: */
1.124 pk 501: void (*pmap_clear_modify_p) __P((paddr_t pa));
502: void (*pmap_clear_reference_p) __P((paddr_t pa));
503: void (*pmap_copy_page_p) __P((paddr_t, paddr_t));
504: void (*pmap_enter_p) __P((pmap_t, vaddr_t, paddr_t,
505: vm_prot_t, boolean_t));
506: paddr_t (*pmap_extract_p) __P((pmap_t, vaddr_t));
507: boolean_t (*pmap_is_modified_p) __P((paddr_t pa));
508: boolean_t (*pmap_is_referenced_p) __P((paddr_t pa));
509: void (*pmap_page_protect_p) __P((paddr_t, vm_prot_t));
510: void (*pmap_protect_p) __P((pmap_t, vaddr_t, vaddr_t, vm_prot_t));
511: void (*pmap_zero_page_p) __P((paddr_t));
512: void (*pmap_changeprot_p) __P((pmap_t, vaddr_t, vm_prot_t, int));
1.55 pk 513: /* local: */
1.124 pk 514: void (*pmap_rmk_p) __P((struct pmap *, vaddr_t, vaddr_t, int, int));
515: void (*pmap_rmu_p) __P((struct pmap *, vaddr_t, vaddr_t, int, int));
1.55 pk 516:
517: #define pmap_rmk (*pmap_rmk_p)
518: #define pmap_rmu (*pmap_rmu_p)
519:
520: #endif
521:
522: /* --------------------------------------------------------------*/
523:
524: /*
525: * Next we have some Sun4m-specific routines which have no 4/4c
526: * counterparts, or which are 4/4c macros.
527: */
528:
529: #if defined(SUN4M)
530:
531: /* Macros which implement SRMMU TLB flushing/invalidation */
532:
533: #define tlb_flush_page(va) sta((va & ~0xfff) | ASI_SRMMUFP_L3, ASI_SRMMUFP,0)
534: #define tlb_flush_segment(vreg, vseg) sta((vreg << RGSHIFT) | (vseg << SGSHIFT)\
535: | ASI_SRMMUFP_L2, ASI_SRMMUFP,0)
536: #define tlb_flush_context() sta(ASI_SRMMUFP_L1, ASI_SRMMUFP, 0)
537: #define tlb_flush_all() sta(ASI_SRMMUFP_LN, ASI_SRMMUFP, 0)
538:
539: static u_int VA2PA __P((caddr_t));
1.97 pk 540: static u_long srmmu_bypass_read __P((u_long));
1.55 pk 541:
542: /*
543: * VA2PA(addr) -- converts a virtual address to a physical address using
544: * the MMU's currently-installed page tables. As a side effect, the address
545: * translation used may cause the associated pte to be encached. The correct
546: * context for VA must be set before this is called.
547: *
548: * This routine should work with any level of mapping, as it is used
549: * during bootup to interact with the ROM's initial L1 mapping of the kernel.
550: */
551: static __inline u_int
552: VA2PA(addr)
1.124 pk 553: caddr_t addr;
1.55 pk 554: {
1.124 pk 555: u_int pte;
1.55 pk 556:
557: /* we'll use that handy SRMMU flush/probe! %%%: make consts below! */
558: /* Try each level in turn until we find a valid pte. Otherwise panic */
559:
560: pte = lda(((u_int)addr & ~0xfff) | ASI_SRMMUFP_L3, ASI_SRMMUFP);
1.137 pk 561: (void)lda(SRMMU_SFSR, ASI_SRMMU);
1.55 pk 562: if ((pte & SRMMU_TETYPE) == SRMMU_TEPTE)
563: return (((pte & SRMMU_PPNMASK) << SRMMU_PPNPASHIFT) |
564: ((u_int)addr & 0xfff));
1.60 pk 565:
566: /* A `TLB Flush Entire' is required before any L0, L1 or L2 probe */
567: tlb_flush_all();
568:
1.55 pk 569: pte = lda(((u_int)addr & ~0xfff) | ASI_SRMMUFP_L2, ASI_SRMMUFP);
570: if ((pte & SRMMU_TETYPE) == SRMMU_TEPTE)
571: return (((pte & SRMMU_PPNMASK) << SRMMU_PPNPASHIFT) |
572: ((u_int)addr & 0x3ffff));
573: pte = lda(((u_int)addr & ~0xfff) | ASI_SRMMUFP_L1, ASI_SRMMUFP);
574: if ((pte & SRMMU_TETYPE) == SRMMU_TEPTE)
575: return (((pte & SRMMU_PPNMASK) << SRMMU_PPNPASHIFT) |
576: ((u_int)addr & 0xffffff));
577: pte = lda(((u_int)addr & ~0xfff) | ASI_SRMMUFP_L0, ASI_SRMMUFP);
578: if ((pte & SRMMU_TETYPE) == SRMMU_TEPTE)
579: return (((pte & SRMMU_PPNMASK) << SRMMU_PPNPASHIFT) |
580: ((u_int)addr & 0xffffffff));
581:
582: panic("VA2PA: Asked to translate unmapped VA %p", addr);
583: }
584:
585: /*
586: * Get the page table entry (PTE) for va by looking it up in the software
587: * page tables. These are the same tables that are used by the MMU; this
588: * routine allows easy access to the page tables even if the context
589: * corresponding to the table is not loaded or selected.
590: * This routine should NOT be used if there is any chance that the desired
591: * pte is in the TLB cache, since it will return stale data in that case.
592: * For that case, and for general use, use getpte4m, which is much faster
593: * and avoids walking in-memory page tables if the page is in the cache.
594: * Note also that this routine only works if a kernel mapping has been
595: * installed for the given page!
596: */
597: __inline u_int
598: getptesw4m(pm, va) /* Assumes L3 mapping! */
1.124 pk 599: struct pmap *pm;
600: vaddr_t va;
1.55 pk 601: {
1.124 pk 602: struct regmap *rm;
603: struct segmap *sm;
1.55 pk 604:
605: rm = &pm->pm_regmap[VA_VREG(va)];
606: #ifdef DEBUG
607: if (rm == NULL)
1.58 pk 608: panic("getptesw4m: no regmap entry");
1.55 pk 609: #endif
610: sm = &rm->rg_segmap[VA_VSEG(va)];
611: #ifdef DEBUG
612: if (sm == NULL)
1.58 pk 613: panic("getptesw4m: no segmap");
1.55 pk 614: #endif
615: return (sm->sg_pte[VA_SUN4M_VPG(va)]); /* return pte */
616: }
617:
1.85 pk 618: __inline void
619: setpgt4m(ptep, pte)
620: int *ptep;
621: int pte;
622: {
1.130 pk 623: swap(ptep, pte);
1.103 pk 624: #if 1
1.121 pk 625: /* XXX - uncaching in pgt_page_alloc() below is not yet quite Okay */
1.103 pk 626: if (cpuinfo.cpu_type == CPUTYP_SS1_MBUS_NOMXCC)
1.85 pk 627: cpuinfo.pcache_flush_line((int)ptep, VA2PA((caddr_t)ptep));
1.100 pk 628: #endif
1.85 pk 629: }
630:
1.55 pk 631: /*
632: * Set the page table entry for va to pte. Only affects software MMU page-
633: * tables (the in-core pagetables read by the MMU). Ignores TLB, and
634: * thus should _not_ be called if the pte translation could be in the TLB.
635: * In this case, use setpte4m().
636: */
637: __inline void
638: setptesw4m(pm, va, pte)
1.124 pk 639: struct pmap *pm;
640: vaddr_t va;
641: int pte;
1.55 pk 642: {
1.124 pk 643: struct regmap *rm;
644: struct segmap *sm;
1.55 pk 645:
646: rm = &pm->pm_regmap[VA_VREG(va)];
647:
648: #ifdef DEBUG
649: if (pm->pm_regmap == NULL || rm == NULL)
1.82 pk 650: panic("setptesw4m: no regmap entry");
1.55 pk 651: #endif
652: sm = &rm->rg_segmap[VA_VSEG(va)];
653:
654: #ifdef DEBUG
655: if (rm->rg_segmap == NULL || sm == NULL || sm->sg_pte == NULL)
1.82 pk 656: panic("setptesw4m: no segmap for va %p", (caddr_t)va);
1.55 pk 657: #endif
1.85 pk 658: setpgt4m(sm->sg_pte + VA_SUN4M_VPG(va), pte);
1.72 pk 659: }
660:
1.92 pk 661: /* Set the page table entry for va to pte. */
1.55 pk 662: __inline void
663: setpte4m(va, pte)
1.124 pk 664: vaddr_t va;
1.115 pk 665: int pte;
1.55 pk 666: {
1.115 pk 667: struct pmap *pm;
668: struct regmap *rm;
669: struct segmap *sm;
1.55 pk 670:
1.100 pk 671: if (getcontext4m() != 0)
672: panic("setpte4m: user context");
673:
674: pm = pmap_kernel();
1.55 pk 675:
676: /* Note: inline version of setptesw4m() */
677: #ifdef DEBUG
678: if (pm->pm_regmap == NULL)
679: panic("setpte4m: no regmap entry");
1.43 pk 680: #endif
1.55 pk 681: rm = &pm->pm_regmap[VA_VREG(va)];
682: sm = &rm->rg_segmap[VA_VSEG(va)];
1.1 deraadt 683:
1.55 pk 684: #ifdef DEBUG
1.100 pk 685: if (rm->rg_segmap == NULL)
686: panic("setpte4m: no segmap for va %p (rp=%p)",
687: (caddr_t)va, (caddr_t)rm);
688:
689: if (sm->sg_pte == NULL)
690: panic("setpte4m: no pte for va %p (rp=%p, sp=%p)",
691: (caddr_t)va, rm, sm);
1.55 pk 692: #endif
693: tlb_flush_page(va);
1.72 pk 694: setpgt4m(sm->sg_pte + VA_SUN4M_VPG(va), pte);
1.55 pk 695: }
1.72 pk 696:
1.100 pk 697: void pcache_flush __P((caddr_t, caddr_t, int));
698: void
699: pcache_flush(va, pa, n)
700: caddr_t va, pa;
701: int n;
702: {
1.109 pk 703: void (*f)__P((int,int)) = cpuinfo.pcache_flush_line;
704:
1.100 pk 705: while ((n -= 4) >= 0)
1.109 pk 706: (*f)((u_int)va+n, (u_int)pa+n);
1.100 pk 707: }
708:
709: /*
1.121 pk 710: * Page table pool back-end.
711: */
1.100 pk 712: void *
1.121 pk 713: pgt_page_alloc(sz, flags, mtype)
714: unsigned long sz;
715: int flags;
716: int mtype;
1.100 pk 717: {
1.121 pk 718: caddr_t p;
1.100 pk 719:
1.126 pk 720: #if defined(UVM)
1.121 pk 721: p = (caddr_t)uvm_km_kmemalloc(kernel_map, uvm.kernel_object,
1.124 pk 722: (vsize_t)sz, UVM_KMF_NOWAIT);
1.126 pk 723: #else
724: p = (caddr_t)kmem_malloc(kmem_map, (vsize_t)sz, 0);
725: #endif
1.100 pk 726:
1.121 pk 727: if ((cpuinfo.flags & CPUFLG_CACHEPAGETABLES) == 0) {
728: pcache_flush(p, (caddr_t)VA2PA(p), sz);
729: kvm_uncache(p, sz/NBPG);
1.100 pk 730: }
731: return (p);
1.121 pk 732: }
733:
1.100 pk 734: void
1.121 pk 735: pgt_page_free(v, sz, mtype)
736: void *v;
737: unsigned long sz;
738: int mtype;
1.100 pk 739: {
1.126 pk 740: #if defined(UVM)
1.124 pk 741: uvm_km_free(kernel_map, (vaddr_t)v, sz);
1.126 pk 742: #else
743: kmem_free(kernel_map, (vaddr_t)v, sz);
744: #endif
1.100 pk 745: }
1.55 pk 746: #endif /* 4m only */
1.1 deraadt 747:
748: /*----------------------------------------------------------------*/
749:
1.72 pk 750: /*
751: * The following three macros are to be used in sun4/sun4c code only.
752: */
1.69 pk 753: #if defined(SUN4_MMU3L)
754: #define CTX_USABLE(pm,rp) ( \
1.72 pk 755: ((pm)->pm_ctx != NULL && \
756: (!HASSUN4_MMU3L || (rp)->rg_smeg != reginval)) \
1.69 pk 757: )
1.43 pk 758: #else
1.55 pk 759: #define CTX_USABLE(pm,rp) ((pm)->pm_ctx != NULL )
1.43 pk 760: #endif
761:
1.55 pk 762: #define GAP_WIDEN(pm,vr) do if (CPU_ISSUN4OR4C) { \
763: if (vr + 1 == pm->pm_gap_start) \
764: pm->pm_gap_start = vr; \
765: if (vr == pm->pm_gap_end) \
766: pm->pm_gap_end = vr + 1; \
1.43 pk 767: } while (0)
768:
1.55 pk 769: #define GAP_SHRINK(pm,vr) do if (CPU_ISSUN4OR4C) { \
1.124 pk 770: int x; \
1.43 pk 771: x = pm->pm_gap_start + (pm->pm_gap_end - pm->pm_gap_start) / 2; \
772: if (vr > x) { \
773: if (vr < pm->pm_gap_end) \
774: pm->pm_gap_end = vr; \
775: } else { \
776: if (vr >= pm->pm_gap_start && x != pm->pm_gap_start) \
777: pm->pm_gap_start = vr + 1; \
778: } \
779: } while (0)
780:
1.72 pk 781:
1.122 pk 782: static void get_phys_mem __P((void));
1.53 christos 783: static void sortm __P((struct memarr *, int));
784: void pv_flushcache __P((struct pvlist *));
785: void kvm_iocache __P((caddr_t, int));
1.122 pk 786:
1.53 christos 787: #ifdef DEBUG
788: void pm_check __P((char *, struct pmap *));
789: void pm_check_k __P((char *, struct pmap *));
790: void pm_check_u __P((char *, struct pmap *));
791: #endif
792:
793:
1.2 deraadt 794: /*
1.122 pk 795: * Grab physical memory list and use it to compute `physmem' and
1.136 pk 796: * `avail_end'. The latter is used in conjunction with
1.122 pk 797: * `avail_start' to dispatch left-over physical pages to the
798: * VM system.
799: */
800: void
801: get_phys_mem()
802: {
803: struct memarr *mp;
804: int i;
805:
806: npmemarr = makememarr(pmemarr, MA_SIZE, MEMARR_AVAILPHYS);
807: sortm(pmemarr, npmemarr);
1.136 pk 808: if (pmemarr[0].addr != 0)
809: panic("pmap_bootstrap: no memory?!");
810:
1.122 pk 811: avail_end = pmemarr[npmemarr-1].addr + pmemarr[npmemarr-1].len;
812: for (physmem = 0, mp = pmemarr, i = npmemarr; --i >= 0; mp++)
813: physmem += btoc(mp->len);
814: }
815:
816: /*
1.2 deraadt 817: * Sort a memory array by address.
818: */
819: static void
820: sortm(mp, n)
1.124 pk 821: struct memarr *mp;
822: int n;
1.2 deraadt 823: {
1.124 pk 824: struct memarr *mpj;
825: int i, j;
826: paddr_t addr;
827: psize_t len;
1.2 deraadt 828:
829: /* Insertion sort. This is O(n^2), but so what? */
830: for (i = 1; i < n; i++) {
831: /* save i'th entry */
832: addr = mp[i].addr;
833: len = mp[i].len;
834: /* find j such that i'th entry goes before j'th */
835: for (j = 0, mpj = mp; j < i; j++, mpj++)
836: if (addr < mpj->addr)
837: break;
838: /* slide up any additional entries */
1.138 perry 839: memmove(mpj + 1, mpj, (i - j) * sizeof(*mp));
1.2 deraadt 840: mpj->addr = addr;
841: mpj->len = len;
842: }
843: }
844:
1.29 pk 845: /*
1.106 thorpej 846: * Support functions for vm_page_bootstrap().
1.29 pk 847: */
848:
849: /*
850: * How much virtual space does this kernel have?
851: * (After mapping kernel text, data, etc.)
852: */
853: void
854: pmap_virtual_space(v_start, v_end)
1.124 pk 855: vaddr_t *v_start;
856: vaddr_t *v_end;
1.29 pk 857: {
858: *v_start = virtual_avail;
859: *v_end = virtual_end;
860: }
861:
862: /*
1.107 pk 863: * Helper routine that hands off available physical pages to the VM system.
1.29 pk 864: */
1.107 pk 865: static void
866: pmap_page_upload()
1.29 pk 867: {
1.124 pk 868: int n = 0;
869: paddr_t start, end, avail_next;
1.29 pk 870:
1.108 pk 871: avail_next = avail_start;
872: if (unavail_gap_start != 0) {
1.107 pk 873: /* First, the gap we created in pmap_bootstrap() */
1.108 pk 874: if (avail_next != unavail_gap_start)
875: /* Avoid empty ranges */
1.110 mrg 876: #if defined(UVM)
877: uvm_page_physload(
878: atop(avail_next),
879: atop(unavail_gap_start),
880: atop(avail_next),
1.120 thorpej 881: atop(unavail_gap_start),
882: VM_FREELIST_DEFAULT);
1.110 mrg 883: #else
1.108 pk 884: vm_page_physload(
885: atop(avail_next),
886: atop(unavail_gap_start),
887: atop(avail_next),
888: atop(unavail_gap_start));
1.110 mrg 889: #endif
1.108 pk 890: avail_next = unavail_gap_end;
1.107 pk 891: }
1.29 pk 892:
1.107 pk 893: for (n = 0; n < npmemarr; n++) {
894: /*
895: * Assume `avail_next' is always in the first segment; we
896: * already made that assumption in pmap_bootstrap()..
897: */
898: start = (n == 0) ? avail_next : pmemarr[n].addr;
899: end = pmemarr[n].addr + pmemarr[n].len;
900: if (start == end)
901: continue;
1.29 pk 902:
1.110 mrg 903: #if defined(UVM)
904: uvm_page_physload(
905: atop(start),
906: atop(end),
907: atop(start),
1.120 thorpej 908: atop(end), VM_FREELIST_DEFAULT);
1.110 mrg 909: #else
1.107 pk 910: vm_page_physload(
911: atop(start),
912: atop(end),
913: atop(start),
914: atop(end));
1.110 mrg 915: #endif
1.29 pk 916: }
917:
918: }
919:
1.124 pk 920: #if 0
1.29 pk 921: /*
922: * pmap_page_index()
923: *
924: * Given a physical address, return a page index.
925: *
926: * There can be some values that we never return (i.e. a hole)
927: * as long as the range of indices returned by this function
928: * is smaller than the value returned by pmap_free_pages().
929: * The returned index does NOT need to start at zero.
930: *
931: */
1.50 christos 932: int
1.29 pk 933: pmap_page_index(pa)
1.124 pk 934: paddr_t pa;
1.29 pk 935: {
1.124 pk 936: paddr_t idx;
1.29 pk 937: int nmem;
1.124 pk 938: struct memarr *mp;
1.29 pk 939:
940: #ifdef DIAGNOSTIC
941: if (pa < avail_start || pa >= avail_end)
1.54 christos 942: panic("pmap_page_index: pa=0x%lx", pa);
1.29 pk 943: #endif
944:
945: for (idx = 0, mp = pmemarr, nmem = npmemarr; --nmem >= 0; mp++) {
946: if (pa >= mp->addr && pa < mp->addr + mp->len)
947: break;
948: idx += atop(mp->len);
949: }
950:
1.124 pk 951: return (int)(idx + atop(pa - mp->addr));
1.29 pk 952: }
1.124 pk 953: #endif
1.39 pk 954:
955: int
956: pmap_pa_exists(pa)
1.124 pk 957: paddr_t pa;
1.39 pk 958: {
1.124 pk 959: int nmem;
960: struct memarr *mp;
1.39 pk 961:
962: for (mp = pmemarr, nmem = npmemarr; --nmem >= 0; mp++) {
963: if (pa >= mp->addr && pa < mp->addr + mp->len)
964: return 1;
965: }
966:
967: return 0;
968: }
1.29 pk 969:
1.1 deraadt 970: /* update pv_flags given a valid pte */
1.55 pk 971: #define MR4_4C(pte) (((pte) >> PG_M_SHIFT) & (PV_MOD | PV_REF))
972: #define MR4M(pte) (((pte) >> PG_M_SHIFT4M) & (PV_MOD4M | PV_REF4M))
1.1 deraadt 973:
974: /*----------------------------------------------------------------*/
975:
976: /*
977: * Agree with the monitor ROM as to how many MMU entries are
978: * to be reserved, and map all of its segments into all contexts.
979: *
980: * Unfortunately, while the Version 0 PROM had a nice linked list of
981: * taken virtual memory, the Version 2 PROM provides instead a convoluted
982: * description of *free* virtual memory. Rather than invert this, we
983: * resort to two magic constants from the PROM vector description file.
984: */
1.55 pk 985: #if defined(SUN4) || defined(SUN4C)
1.43 pk 986: void
1.58 pk 987: mmu_reservemon4_4c(nrp, nsp)
1.124 pk 988: int *nrp, *nsp;
1.1 deraadt 989: {
1.124 pk 990: u_int va = 0, eva = 0;
991: int mmuseg, i, nr, ns, vr, lastvr;
1.69 pk 992: #if defined(SUN4_MMU3L)
1.124 pk 993: int mmureg;
1.53 christos 994: #endif
1.124 pk 995: struct regmap *rp;
1.1 deraadt 996:
1.55 pk 997: #if defined(SUN4M)
998: if (CPU_ISSUN4M) {
1.81 pk 999: panic("mmu_reservemon4_4c called on Sun4M machine");
1.55 pk 1000: return;
1001: }
1002: #endif
1003:
1.20 deraadt 1004: #if defined(SUN4)
1.55 pk 1005: if (CPU_ISSUN4) {
1.29 pk 1006: prom_vstart = va = OLDMON_STARTVADDR;
1007: prom_vend = eva = OLDMON_ENDVADDR;
1.20 deraadt 1008: }
1009: #endif
1010: #if defined(SUN4C)
1.55 pk 1011: if (CPU_ISSUN4C) {
1.29 pk 1012: prom_vstart = va = OPENPROM_STARTVADDR;
1013: prom_vend = eva = OPENPROM_ENDVADDR;
1.19 deraadt 1014: }
1.20 deraadt 1015: #endif
1.43 pk 1016: ns = *nsp;
1017: nr = *nrp;
1018: lastvr = 0;
1.1 deraadt 1019: while (va < eva) {
1.43 pk 1020: vr = VA_VREG(va);
1021: rp = &pmap_kernel()->pm_regmap[vr];
1022:
1.69 pk 1023: #if defined(SUN4_MMU3L)
1024: if (HASSUN4_MMU3L && vr != lastvr) {
1.43 pk 1025: lastvr = vr;
1026: mmureg = getregmap(va);
1027: if (mmureg < nr)
1028: rp->rg_smeg = nr = mmureg;
1029: /*
1030: * On 3-level MMU machines, we distribute regions,
1031: * rather than segments, amongst the contexts.
1032: */
1033: for (i = ncontext; --i > 0;)
1.137 pk 1034: prom_setcontext(i, (caddr_t)va, mmureg);
1.43 pk 1035: }
1036: #endif
1.1 deraadt 1037: mmuseg = getsegmap(va);
1.43 pk 1038: if (mmuseg < ns)
1039: ns = mmuseg;
1.69 pk 1040:
1041: if (!HASSUN4_MMU3L)
1.43 pk 1042: for (i = ncontext; --i > 0;)
1.137 pk 1043: prom_setcontext(i, (caddr_t)va, mmuseg);
1.43 pk 1044:
1.1 deraadt 1045: if (mmuseg == seginval) {
1046: va += NBPSG;
1047: continue;
1048: }
1.43 pk 1049: /*
1050: * Another PROM segment. Enter into region map.
1051: * Assume the entire segment is valid.
1052: */
1053: rp->rg_nsegmap += 1;
1054: rp->rg_segmap[VA_VSEG(va)].sg_pmeg = mmuseg;
1055: rp->rg_segmap[VA_VSEG(va)].sg_npte = NPTESG;
1056:
1.1 deraadt 1057: /* PROM maps its memory user-accessible: fix it. */
1058: for (i = NPTESG; --i >= 0; va += NBPG)
1.55 pk 1059: setpte4(va, getpte4(va) | PG_S);
1.1 deraadt 1060: }
1.43 pk 1061: *nsp = ns;
1062: *nrp = nr;
1063: return;
1.1 deraadt 1064: }
1.55 pk 1065: #endif
1066:
1067: #if defined(SUN4M) /* Sun4M versions of above */
1068:
1.97 pk 1069: u_long
1070: srmmu_bypass_read(paddr)
1071: u_long paddr;
1072: {
1073: unsigned long v;
1074:
1075: if (/*cpuinfo.cpu_impl == 4 && */cpuinfo.mxcc) {
1076: /*
1077: * We're going to have to use MMU passthrough. If we're on
1078: * a Viking MicroSparc without an mbus, we need to turn
1079: * off traps and set the AC bit at 0x8000 in the MMU's
1080: * control register. Ugh.
1081: */
1082:
1083: unsigned long s = lda(SRMMU_PCR,ASI_SRMMU);
1084:
1085: /* set MMU AC bit */
1086: sta(SRMMU_PCR, ASI_SRMMU, s | VIKING_PCR_AC);
1087: v = lda(paddr, ASI_BYPASS);
1088: sta(SRMMU_PCR, ASI_SRMMU, s);
1089: } else
1090: v = lda(paddr, ASI_BYPASS);
1091:
1092: return (v);
1093: }
1094:
1095:
1.55 pk 1096: /*
1097: * Take the monitor's initial page table layout, convert it to 3rd-level pte's
1098: * (it starts out as a L1 mapping), and install it along with a set of kernel
1099: * mapping tables as the kernel's initial page table setup. Also create and
1100: * enable a context table. I suppose we also want to block user-mode access
1101: * to the new kernel/ROM mappings.
1102: */
1103:
1.58 pk 1104: /*
1105: * mmu_reservemon4m(): Copies the existing (ROM) page tables to kernel space,
1.55 pk 1106: * converting any L1/L2 PTEs to L3 PTEs. Does *not* copy the L1 entry mapping
1.96 pk 1107: * the kernel at KERNBASE since we don't want to map 16M of physical
1108: * memory for the kernel. Thus the kernel must be installed later!
1.55 pk 1109: * Also installs ROM mappings into the kernel pmap.
1110: * NOTE: This also revokes all user-mode access to the mapped regions.
1111: */
1112: void
1.77 pk 1113: mmu_reservemon4m(kpmap)
1.55 pk 1114: struct pmap *kpmap;
1115: {
1.71 pk 1116: unsigned int rom_ctxtbl;
1.124 pk 1117: int te;
1.55 pk 1118:
1.97 pk 1119: prom_vstart = OPENPROM_STARTVADDR;
1120: prom_vend = OPENPROM_ENDVADDR;
1.55 pk 1121:
1122: /*
1123: * XXX: although the Sun4M can handle 36 bits of physical
1124: * address space, we assume that all these page tables, etc
1125: * are in the lower 4G (32-bits) of address space, i.e. out of I/O
1126: * space. Eventually this should be changed to support the 36 bit
1127: * physical addressing, in case some crazed ROM designer decides to
1128: * stick the pagetables up there. In that case, we should use MMU
1129: * transparent mode, (i.e. ASI 0x20 to 0x2f) to access
1130: * physical memory.
1131: */
1132:
1.71 pk 1133: rom_ctxtbl = (lda(SRMMU_CXTPTR,ASI_SRMMU) << SRMMU_PPNPASHIFT);
1.55 pk 1134:
1.97 pk 1135: te = srmmu_bypass_read(rom_ctxtbl); /* i.e. context 0 */
1.69 pk 1136:
1.55 pk 1137: switch (te & SRMMU_TETYPE) {
1.62 pk 1138: case SRMMU_TEINVALID:
1.69 pk 1139: cpuinfo.ctx_tbl[0] = SRMMU_TEINVALID;
1.77 pk 1140: panic("mmu_reservemon4m: no existing L0 mapping! "
1141: "(How are we running?");
1.55 pk 1142: break;
1.62 pk 1143: case SRMMU_TEPTE:
1.55 pk 1144: #ifdef DEBUG
1.66 christos 1145: printf("mmu_reservemon4m: trying to remap 4G segment!\n");
1.55 pk 1146: #endif
1147: panic("mmu_reservemon4m: can't handle ROM 4G page size");
1148: /* XXX: Should make this work, however stupid it is */
1149: break;
1.62 pk 1150: case SRMMU_TEPTD:
1.71 pk 1151: mmu_setup4m_L1(te, kpmap);
1.55 pk 1152: break;
1.62 pk 1153: default:
1.55 pk 1154: panic("mmu_reservemon4m: unknown pagetable entry type");
1155: }
1156: }
1157:
1158: void
1.71 pk 1159: mmu_setup4m_L1(regtblptd, kpmap)
1.55 pk 1160: int regtblptd; /* PTD for region table to be remapped */
1161: struct pmap *kpmap;
1162: {
1.124 pk 1163: unsigned int regtblrover;
1164: int i;
1.55 pk 1165: unsigned int te;
1.71 pk 1166: struct regmap *rp;
1.55 pk 1167: int j, k;
1168:
1.69 pk 1169: /*
1170: * Here we scan the region table to copy any entries which appear.
1.55 pk 1171: * We are only concerned with regions in kernel space and above
1.96 pk 1172: * (i.e. regions VA_VREG(KERNBASE)+1 to 0xff). We ignore the first
1173: * region (at VA_VREG(KERNBASE)), since that is the 16MB L1 mapping
1174: * that the ROM used to map the kernel in initially. Later, we will
1175: * rebuild a new L3 mapping for the kernel and install it before
1176: * switching to the new pagetables.
1.55 pk 1177: */
1.71 pk 1178: regtblrover =
1179: ((regtblptd & ~SRMMU_TETYPE) << SRMMU_PPNPASHIFT) +
1180: (VA_VREG(KERNBASE)+1) * sizeof(long); /* kernel only */
1.55 pk 1181:
1182: for (i = VA_VREG(KERNBASE) + 1; i < SRMMU_L1SIZE;
1183: i++, regtblrover += sizeof(long)) {
1.71 pk 1184:
1185: /* The region we're dealing with */
1186: rp = &kpmap->pm_regmap[i];
1187:
1.97 pk 1188: te = srmmu_bypass_read(regtblrover);
1.55 pk 1189: switch(te & SRMMU_TETYPE) {
1.62 pk 1190: case SRMMU_TEINVALID:
1.55 pk 1191: break;
1.71 pk 1192:
1.62 pk 1193: case SRMMU_TEPTE:
1.55 pk 1194: #ifdef DEBUG
1.81 pk 1195: printf("mmu_setup4m_L1: "
1.77 pk 1196: "converting region 0x%x from L1->L3\n", i);
1.55 pk 1197: #endif
1.71 pk 1198: /*
1199: * This region entry covers 64MB of memory -- or
1200: * (NSEGRG * NPTESG) pages -- which we must convert
1201: * into a 3-level description.
1.55 pk 1202: */
1.71 pk 1203:
1.55 pk 1204: for (j = 0; j < SRMMU_L2SIZE; j++) {
1.71 pk 1205: struct segmap *sp = &rp->rg_segmap[j];
1.55 pk 1206:
1207: for (k = 0; k < SRMMU_L3SIZE; k++) {
1.71 pk 1208: sp->sg_npte++;
1.97 pk 1209: setpgt4m(&sp->sg_pte[k],
1210: (te & SRMMU_L1PPNMASK) |
1211: (j << SRMMU_L2PPNSHFT) |
1212: (k << SRMMU_L3PPNSHFT) |
1213: (te & SRMMU_PGBITSMSK) |
1214: ((te & SRMMU_PROT_MASK) |
1215: PPROT_U2S_OMASK) |
1216: SRMMU_TEPTE);
1.55 pk 1217: }
1218: }
1219: break;
1.71 pk 1220:
1.62 pk 1221: case SRMMU_TEPTD:
1.71 pk 1222: mmu_setup4m_L2(te, rp);
1.55 pk 1223: break;
1.71 pk 1224:
1.62 pk 1225: default:
1.55 pk 1226: panic("mmu_setup4m_L1: unknown pagetable entry type");
1227: }
1228: }
1229: }
1230:
1231: void
1.71 pk 1232: mmu_setup4m_L2(segtblptd, rp)
1.55 pk 1233: int segtblptd;
1.71 pk 1234: struct regmap *rp;
1.55 pk 1235: {
1.124 pk 1236: unsigned int segtblrover;
1237: int i, k;
1.55 pk 1238: unsigned int te;
1.71 pk 1239: struct segmap *sp;
1.55 pk 1240:
1241: segtblrover = (segtblptd & ~SRMMU_TETYPE) << SRMMU_PPNPASHIFT;
1242: for (i = 0; i < SRMMU_L2SIZE; i++, segtblrover += sizeof(long)) {
1.71 pk 1243:
1244: sp = &rp->rg_segmap[i];
1245:
1.97 pk 1246: te = srmmu_bypass_read(segtblrover);
1.55 pk 1247: switch(te & SRMMU_TETYPE) {
1.62 pk 1248: case SRMMU_TEINVALID:
1.55 pk 1249: break;
1.71 pk 1250:
1.62 pk 1251: case SRMMU_TEPTE:
1.55 pk 1252: #ifdef DEBUG
1.81 pk 1253: printf("mmu_setup4m_L2: converting L2 entry at segment 0x%x to L3\n",i);
1.55 pk 1254: #endif
1.71 pk 1255: /*
1256: * This segment entry covers 256KB of memory -- or
1257: * (NPTESG) pages -- which we must convert
1258: * into a 3-level description.
1259: */
1.55 pk 1260: for (k = 0; k < SRMMU_L3SIZE; k++) {
1.71 pk 1261: sp->sg_npte++;
1.97 pk 1262: setpgt4m(&sp->sg_pte[k],
1263: (te & SRMMU_L1PPNMASK) |
1264: (te & SRMMU_L2PPNMASK) |
1265: (k << SRMMU_L3PPNSHFT) |
1266: (te & SRMMU_PGBITSMSK) |
1267: ((te & SRMMU_PROT_MASK) |
1268: PPROT_U2S_OMASK) |
1269: SRMMU_TEPTE);
1.55 pk 1270: }
1271: break;
1.71 pk 1272:
1.62 pk 1273: case SRMMU_TEPTD:
1.71 pk 1274: mmu_setup4m_L3(te, sp);
1.55 pk 1275: break;
1.71 pk 1276:
1.62 pk 1277: default:
1.55 pk 1278: panic("mmu_setup4m_L2: unknown pagetable entry type");
1279: }
1280: }
1281: }
1282:
1.71 pk 1283: void
1284: mmu_setup4m_L3(pagtblptd, sp)
1.124 pk 1285: int pagtblptd;
1.71 pk 1286: struct segmap *sp;
1.55 pk 1287: {
1.124 pk 1288: unsigned int pagtblrover;
1289: int i;
1290: unsigned int te;
1.55 pk 1291:
1292: pagtblrover = (pagtblptd & ~SRMMU_TETYPE) << SRMMU_PPNPASHIFT;
1293: for (i = 0; i < SRMMU_L3SIZE; i++, pagtblrover += sizeof(long)) {
1.97 pk 1294: te = srmmu_bypass_read(pagtblrover);
1.55 pk 1295: switch(te & SRMMU_TETYPE) {
1.62 pk 1296: case SRMMU_TEINVALID:
1.55 pk 1297: break;
1.62 pk 1298: case SRMMU_TEPTE:
1.71 pk 1299: sp->sg_npte++;
1.97 pk 1300: setpgt4m(&sp->sg_pte[i], te | PPROT_U2S_OMASK);
1.55 pk 1301: break;
1.62 pk 1302: case SRMMU_TEPTD:
1.55 pk 1303: panic("mmu_setup4m_L3: PTD found in L3 page table");
1.62 pk 1304: default:
1.55 pk 1305: panic("mmu_setup4m_L3: unknown pagetable entry type");
1306: }
1307: }
1308: }
1309: #endif /* defined SUN4M */
1.1 deraadt 1310:
1311: /*----------------------------------------------------------------*/
1312:
1313: /*
1314: * MMU management.
1315: */
1.43 pk 1316: struct mmuentry *me_alloc __P((struct mmuhd *, struct pmap *, int, int));
1317: void me_free __P((struct pmap *, u_int));
1318: struct mmuentry *region_alloc __P((struct mmuhd *, struct pmap *, int));
1319: void region_free __P((struct pmap *, u_int));
1.1 deraadt 1320:
1321: /*
1322: * Change contexts. We need the old context number as well as the new
1323: * one. If the context is changing, we must write all user windows
1324: * first, lest an interrupt cause them to be written to the (other)
1325: * user whose context we set here.
1326: */
1327: #define CHANGE_CONTEXTS(old, new) \
1328: if ((old) != (new)) { \
1329: write_user_windows(); \
1330: setcontext(new); \
1331: }
1332:
1.55 pk 1333: #if defined(SUN4) || defined(SUN4C) /* This is old sun MMU stuff */
1.1 deraadt 1334: /*
1335: * Allocate an MMU entry (i.e., a PMEG).
1336: * If necessary, steal one from someone else.
1337: * Put it on the tail of the given queue
1338: * (which is either the LRU list or the locked list).
1339: * The locked list is not actually ordered, but this is easiest.
1340: * Also put it on the given (new) pmap's chain,
1341: * enter its pmeg number into that pmap's segmap,
1342: * and store the pmeg's new virtual segment number (me->me_vseg).
1343: *
1344: * This routine is large and complicated, but it must be fast
1345: * since it implements the dynamic allocation of MMU entries.
1346: */
1347: struct mmuentry *
1.43 pk 1348: me_alloc(mh, newpm, newvreg, newvseg)
1.124 pk 1349: struct mmuhd *mh;
1350: struct pmap *newpm;
1351: int newvreg, newvseg;
1352: {
1353: struct mmuentry *me;
1354: struct pmap *pm;
1355: int i, va, pa, *pte, tpte;
1.1 deraadt 1356: int ctx;
1.43 pk 1357: struct regmap *rp;
1358: struct segmap *sp;
1.1 deraadt 1359:
1360: /* try free list first */
1.43 pk 1361: if ((me = segm_freelist.tqh_first) != NULL) {
1362: TAILQ_REMOVE(&segm_freelist, me, me_list);
1.1 deraadt 1363: #ifdef DEBUG
1364: if (me->me_pmap != NULL)
1365: panic("me_alloc: freelist entry has pmap");
1366: if (pmapdebug & PDB_MMU_ALLOC)
1.66 christos 1367: printf("me_alloc: got pmeg %d\n", me->me_cookie);
1.1 deraadt 1368: #endif
1.43 pk 1369: TAILQ_INSERT_TAIL(mh, me, me_list);
1.1 deraadt 1370:
1371: /* onto on pmap chain; pmap is already locked, if needed */
1.43 pk 1372: TAILQ_INSERT_TAIL(&newpm->pm_seglist, me, me_pmchain);
1.70 pk 1373: #ifdef DIAGNOSTIC
1374: pmap_stats.ps_npmeg_free--;
1375: if (mh == &segm_locked)
1376: pmap_stats.ps_npmeg_locked++;
1377: else
1378: pmap_stats.ps_npmeg_lru++;
1379: #endif
1.1 deraadt 1380:
1381: /* into pmap segment table, with backpointers */
1.43 pk 1382: newpm->pm_regmap[newvreg].rg_segmap[newvseg].sg_pmeg = me->me_cookie;
1.1 deraadt 1383: me->me_pmap = newpm;
1384: me->me_vseg = newvseg;
1.43 pk 1385: me->me_vreg = newvreg;
1.1 deraadt 1386:
1387: return (me);
1388: }
1389:
1390: /* no luck, take head of LRU list */
1.43 pk 1391: if ((me = segm_lru.tqh_first) == NULL)
1.1 deraadt 1392: panic("me_alloc: all pmegs gone");
1.43 pk 1393:
1.1 deraadt 1394: pm = me->me_pmap;
1395: if (pm == NULL)
1396: panic("me_alloc: LRU entry has no pmap");
1.42 mycroft 1397: if (pm == pmap_kernel())
1.1 deraadt 1398: panic("me_alloc: stealing from kernel");
1.12 pk 1399: #ifdef DEBUG
1.1 deraadt 1400: if (pmapdebug & (PDB_MMU_ALLOC | PDB_MMU_STEAL))
1.91 fair 1401: printf("me_alloc: stealing pmeg 0x%x from pmap %p\n",
1.43 pk 1402: me->me_cookie, pm);
1.1 deraadt 1403: #endif
1404: /*
1405: * Remove from LRU list, and insert at end of new list
1406: * (probably the LRU list again, but so what?).
1407: */
1.43 pk 1408: TAILQ_REMOVE(&segm_lru, me, me_list);
1409: TAILQ_INSERT_TAIL(mh, me, me_list);
1410:
1.70 pk 1411: #ifdef DIAGNOSTIC
1412: if (mh == &segm_locked) {
1413: pmap_stats.ps_npmeg_lru--;
1414: pmap_stats.ps_npmeg_locked++;
1415: }
1416: #endif
1417:
1.43 pk 1418: rp = &pm->pm_regmap[me->me_vreg];
1419: if (rp->rg_segmap == NULL)
1420: panic("me_alloc: LRU entry's pmap has no segments");
1421: sp = &rp->rg_segmap[me->me_vseg];
1422: pte = sp->sg_pte;
1423: if (pte == NULL)
1424: panic("me_alloc: LRU entry's pmap has no ptes");
1.1 deraadt 1425:
1426: /*
1427: * The PMEG must be mapped into some context so that we can
1428: * read its PTEs. Use its current context if it has one;
1429: * if not, and since context 0 is reserved for the kernel,
1430: * the simplest method is to switch to 0 and map the PMEG
1431: * to virtual address 0---which, being a user space address,
1432: * is by definition not in use.
1433: *
1434: * XXX for ncpus>1 must use per-cpu VA?
1435: * XXX do not have to flush cache immediately
1436: */
1.71 pk 1437: ctx = getcontext4();
1.43 pk 1438: if (CTX_USABLE(pm,rp)) {
1.1 deraadt 1439: CHANGE_CONTEXTS(ctx, pm->pm_ctxnum);
1.69 pk 1440: cache_flush_segment(me->me_vreg, me->me_vseg);
1.43 pk 1441: va = VSTOVA(me->me_vreg,me->me_vseg);
1.1 deraadt 1442: } else {
1443: CHANGE_CONTEXTS(ctx, 0);
1.69 pk 1444: if (HASSUN4_MMU3L)
1.43 pk 1445: setregmap(0, tregion);
1446: setsegmap(0, me->me_cookie);
1.1 deraadt 1447: /*
1448: * No cache flush needed: it happened earlier when
1449: * the old context was taken.
1450: */
1451: va = 0;
1452: }
1453:
1454: /*
1455: * Record reference and modify bits for each page,
1456: * and copy PTEs into kernel memory so that they can
1457: * be reloaded later.
1458: */
1459: i = NPTESG;
1460: do {
1.55 pk 1461: tpte = getpte4(va);
1.33 pk 1462: if ((tpte & (PG_V | PG_TYPE)) == (PG_V | PG_OBMEM)) {
1.60 pk 1463: pa = ptoa(tpte & PG_PFNUM);
1.1 deraadt 1464: if (managed(pa))
1.55 pk 1465: pvhead(pa)->pv_flags |= MR4_4C(tpte);
1.1 deraadt 1466: }
1467: *pte++ = tpte & ~(PG_U|PG_M);
1468: va += NBPG;
1469: } while (--i > 0);
1470:
1471: /* update segment tables */
1472: simple_lock(&pm->pm_lock); /* what if other cpu takes mmuentry ?? */
1.43 pk 1473: if (CTX_USABLE(pm,rp))
1474: setsegmap(VSTOVA(me->me_vreg,me->me_vseg), seginval);
1475: sp->sg_pmeg = seginval;
1.1 deraadt 1476:
1477: /* off old pmap chain */
1.43 pk 1478: TAILQ_REMOVE(&pm->pm_seglist, me, me_pmchain);
1.1 deraadt 1479: simple_unlock(&pm->pm_lock);
1.71 pk 1480: setcontext4(ctx); /* done with old context */
1.1 deraadt 1481:
1482: /* onto new pmap chain; new pmap is already locked, if needed */
1.43 pk 1483: TAILQ_INSERT_TAIL(&newpm->pm_seglist, me, me_pmchain);
1.1 deraadt 1484:
1485: /* into new segment table, with backpointers */
1.43 pk 1486: newpm->pm_regmap[newvreg].rg_segmap[newvseg].sg_pmeg = me->me_cookie;
1.1 deraadt 1487: me->me_pmap = newpm;
1488: me->me_vseg = newvseg;
1.43 pk 1489: me->me_vreg = newvreg;
1.1 deraadt 1490:
1491: return (me);
1492: }
1493:
1494: /*
1495: * Free an MMU entry.
1496: *
1497: * Assumes the corresponding pmap is already locked.
1498: * Does NOT flush cache, but does record ref and mod bits.
1499: * The rest of each PTE is discarded.
1500: * CALLER MUST SET CONTEXT to pm->pm_ctxnum (if pmap has
1501: * a context) or to 0 (if not). Caller must also update
1502: * pm->pm_segmap and (possibly) the hardware.
1503: */
1504: void
1505: me_free(pm, pmeg)
1.124 pk 1506: struct pmap *pm;
1507: u_int pmeg;
1.1 deraadt 1508: {
1.124 pk 1509: struct mmuentry *me = &mmusegments[pmeg];
1510: int i, va, pa, tpte;
1511: int vr;
1512: struct regmap *rp;
1.43 pk 1513:
1514: vr = me->me_vreg;
1.1 deraadt 1515:
1516: #ifdef DEBUG
1517: if (pmapdebug & PDB_MMU_ALLOC)
1.66 christos 1518: printf("me_free: freeing pmeg %d from pmap %p\n",
1.43 pk 1519: me->me_cookie, pm);
1520: if (me->me_cookie != pmeg)
1.1 deraadt 1521: panic("me_free: wrong mmuentry");
1522: if (pm != me->me_pmap)
1523: panic("me_free: pm != me_pmap");
1524: #endif
1525:
1.43 pk 1526: rp = &pm->pm_regmap[vr];
1527:
1.1 deraadt 1528: /* just like me_alloc, but no cache flush, and context already set */
1.43 pk 1529: if (CTX_USABLE(pm,rp)) {
1530: va = VSTOVA(vr,me->me_vseg);
1531: } else {
1532: #ifdef DEBUG
1.71 pk 1533: if (getcontext4() != 0) panic("me_free: ctx != 0");
1.43 pk 1534: #endif
1.69 pk 1535: if (HASSUN4_MMU3L)
1.43 pk 1536: setregmap(0, tregion);
1537: setsegmap(0, me->me_cookie);
1.1 deraadt 1538: va = 0;
1539: }
1540: i = NPTESG;
1541: do {
1.55 pk 1542: tpte = getpte4(va);
1.33 pk 1543: if ((tpte & (PG_V | PG_TYPE)) == (PG_V | PG_OBMEM)) {
1.60 pk 1544: pa = ptoa(tpte & PG_PFNUM);
1.1 deraadt 1545: if (managed(pa))
1.55 pk 1546: pvhead(pa)->pv_flags |= MR4_4C(tpte);
1.1 deraadt 1547: }
1548: va += NBPG;
1549: } while (--i > 0);
1550:
1551: /* take mmu entry off pmap chain */
1.43 pk 1552: TAILQ_REMOVE(&pm->pm_seglist, me, me_pmchain);
1553: /* ... and remove from segment map */
1554: if (rp->rg_segmap == NULL)
1555: panic("me_free: no segments in pmap");
1556: rp->rg_segmap[me->me_vseg].sg_pmeg = seginval;
1557:
1558: /* off LRU or lock chain */
1559: if (pm == pmap_kernel()) {
1560: TAILQ_REMOVE(&segm_locked, me, me_list);
1.70 pk 1561: #ifdef DIAGNOSTIC
1562: pmap_stats.ps_npmeg_locked--;
1563: #endif
1.43 pk 1564: } else {
1565: TAILQ_REMOVE(&segm_lru, me, me_list);
1.70 pk 1566: #ifdef DIAGNOSTIC
1567: pmap_stats.ps_npmeg_lru--;
1568: #endif
1.43 pk 1569: }
1570:
1571: /* no associated pmap; on free list */
1572: me->me_pmap = NULL;
1573: TAILQ_INSERT_TAIL(&segm_freelist, me, me_list);
1.70 pk 1574: #ifdef DIAGNOSTIC
1575: pmap_stats.ps_npmeg_free++;
1576: #endif
1.43 pk 1577: }
1578:
1.69 pk 1579: #if defined(SUN4_MMU3L)
1.43 pk 1580:
1581: /* XXX - Merge with segm_alloc/segm_free ? */
1582:
1583: struct mmuentry *
1584: region_alloc(mh, newpm, newvr)
1.124 pk 1585: struct mmuhd *mh;
1586: struct pmap *newpm;
1587: int newvr;
1.43 pk 1588: {
1.124 pk 1589: struct mmuentry *me;
1590: struct pmap *pm;
1.43 pk 1591: int ctx;
1592: struct regmap *rp;
1593:
1594: /* try free list first */
1595: if ((me = region_freelist.tqh_first) != NULL) {
1596: TAILQ_REMOVE(®ion_freelist, me, me_list);
1597: #ifdef DEBUG
1598: if (me->me_pmap != NULL)
1599: panic("region_alloc: freelist entry has pmap");
1600: if (pmapdebug & PDB_MMUREG_ALLOC)
1.91 fair 1601: printf("region_alloc: got smeg 0x%x\n", me->me_cookie);
1.43 pk 1602: #endif
1603: TAILQ_INSERT_TAIL(mh, me, me_list);
1604:
1605: /* onto on pmap chain; pmap is already locked, if needed */
1606: TAILQ_INSERT_TAIL(&newpm->pm_reglist, me, me_pmchain);
1607:
1608: /* into pmap segment table, with backpointers */
1609: newpm->pm_regmap[newvr].rg_smeg = me->me_cookie;
1610: me->me_pmap = newpm;
1611: me->me_vreg = newvr;
1612:
1613: return (me);
1614: }
1615:
1616: /* no luck, take head of LRU list */
1617: if ((me = region_lru.tqh_first) == NULL)
1618: panic("region_alloc: all smegs gone");
1619:
1620: pm = me->me_pmap;
1621: if (pm == NULL)
1622: panic("region_alloc: LRU entry has no pmap");
1623: if (pm == pmap_kernel())
1624: panic("region_alloc: stealing from kernel");
1625: #ifdef DEBUG
1626: if (pmapdebug & (PDB_MMUREG_ALLOC | PDB_MMUREG_STEAL))
1.91 fair 1627: printf("region_alloc: stealing smeg 0x%x from pmap %p\n",
1.43 pk 1628: me->me_cookie, pm);
1629: #endif
1630: /*
1631: * Remove from LRU list, and insert at end of new list
1632: * (probably the LRU list again, but so what?).
1633: */
1634: TAILQ_REMOVE(®ion_lru, me, me_list);
1635: TAILQ_INSERT_TAIL(mh, me, me_list);
1636:
1637: rp = &pm->pm_regmap[me->me_vreg];
1.71 pk 1638: ctx = getcontext4();
1.43 pk 1639: if (pm->pm_ctx) {
1640: CHANGE_CONTEXTS(ctx, pm->pm_ctxnum);
1.69 pk 1641: cache_flush_region(me->me_vreg);
1.43 pk 1642: }
1643:
1644: /* update region tables */
1645: simple_lock(&pm->pm_lock); /* what if other cpu takes mmuentry ?? */
1646: if (pm->pm_ctx)
1647: setregmap(VRTOVA(me->me_vreg), reginval);
1648: rp->rg_smeg = reginval;
1649:
1650: /* off old pmap chain */
1651: TAILQ_REMOVE(&pm->pm_reglist, me, me_pmchain);
1652: simple_unlock(&pm->pm_lock);
1.71 pk 1653: setcontext4(ctx); /* done with old context */
1.43 pk 1654:
1655: /* onto new pmap chain; new pmap is already locked, if needed */
1656: TAILQ_INSERT_TAIL(&newpm->pm_reglist, me, me_pmchain);
1657:
1658: /* into new segment table, with backpointers */
1659: newpm->pm_regmap[newvr].rg_smeg = me->me_cookie;
1660: me->me_pmap = newpm;
1661: me->me_vreg = newvr;
1662:
1663: return (me);
1664: }
1665:
1666: /*
1667: * Free an MMU entry.
1668: *
1669: * Assumes the corresponding pmap is already locked.
1670: * Does NOT flush cache. ???
1671: * CALLER MUST SET CONTEXT to pm->pm_ctxnum (if pmap has
1672: * a context) or to 0 (if not). Caller must also update
1673: * pm->pm_regmap and (possibly) the hardware.
1674: */
1675: void
1676: region_free(pm, smeg)
1.124 pk 1677: struct pmap *pm;
1678: u_int smeg;
1.43 pk 1679: {
1.124 pk 1680: struct mmuentry *me = &mmuregions[smeg];
1.43 pk 1681:
1682: #ifdef DEBUG
1683: if (pmapdebug & PDB_MMUREG_ALLOC)
1.91 fair 1684: printf("region_free: freeing smeg 0x%x from pmap %p\n",
1.43 pk 1685: me->me_cookie, pm);
1686: if (me->me_cookie != smeg)
1687: panic("region_free: wrong mmuentry");
1688: if (pm != me->me_pmap)
1689: panic("region_free: pm != me_pmap");
1690: #endif
1691:
1692: if (pm->pm_ctx)
1.69 pk 1693: cache_flush_region(me->me_vreg);
1.43 pk 1694:
1695: /* take mmu entry off pmap chain */
1696: TAILQ_REMOVE(&pm->pm_reglist, me, me_pmchain);
1.1 deraadt 1697: /* ... and remove from segment map */
1.43 pk 1698: pm->pm_regmap[smeg].rg_smeg = reginval;
1.1 deraadt 1699:
1700: /* off LRU or lock chain */
1.43 pk 1701: if (pm == pmap_kernel()) {
1702: TAILQ_REMOVE(®ion_locked, me, me_list);
1703: } else {
1704: TAILQ_REMOVE(®ion_lru, me, me_list);
1705: }
1.1 deraadt 1706:
1707: /* no associated pmap; on free list */
1708: me->me_pmap = NULL;
1.43 pk 1709: TAILQ_INSERT_TAIL(®ion_freelist, me, me_list);
1.1 deraadt 1710: }
1.43 pk 1711: #endif
1.1 deraadt 1712:
1713: /*
1714: * `Page in' (load or inspect) an MMU entry; called on page faults.
1715: * Returns 1 if we reloaded the segment, -1 if the segment was
1716: * already loaded and the page was marked valid (in which case the
1717: * fault must be a bus error or something), or 0 (segment loaded but
1718: * PTE not valid, or segment not loaded at all).
1719: */
1720: int
1.61 pk 1721: mmu_pagein(pm, va, prot)
1.124 pk 1722: struct pmap *pm;
1723: int va, prot;
1.1 deraadt 1724: {
1.124 pk 1725: int *pte;
1726: int vr, vs, pmeg, i, s, bits;
1.43 pk 1727: struct regmap *rp;
1728: struct segmap *sp;
1729:
1.45 pk 1730: if (prot != VM_PROT_NONE)
1731: bits = PG_V | ((prot & VM_PROT_WRITE) ? PG_W : 0);
1732: else
1733: bits = 0;
1734:
1.43 pk 1735: vr = VA_VREG(va);
1736: vs = VA_VSEG(va);
1737: rp = &pm->pm_regmap[vr];
1738: #ifdef DEBUG
1739: if (pm == pmap_kernel())
1.91 fair 1740: printf("mmu_pagein: kernel wants map at va 0x%x, vr %d, vs %d\n", va, vr, vs);
1.43 pk 1741: #endif
1742:
1743: /* return 0 if we have no PMEGs to load */
1744: if (rp->rg_segmap == NULL)
1745: return (0);
1.69 pk 1746: #if defined(SUN4_MMU3L)
1747: if (HASSUN4_MMU3L && rp->rg_smeg == reginval) {
1.43 pk 1748: smeg_t smeg;
1749: unsigned int tva = VA_ROUNDDOWNTOREG(va);
1750: struct segmap *sp = rp->rg_segmap;
1751:
1752: s = splpmap(); /* paranoid */
1753: smeg = region_alloc(®ion_lru, pm, vr)->me_cookie;
1754: setregmap(tva, smeg);
1755: i = NSEGRG;
1756: do {
1757: setsegmap(tva, sp++->sg_pmeg);
1758: tva += NBPSG;
1759: } while (--i > 0);
1760: splx(s);
1761: }
1762: #endif
1763: sp = &rp->rg_segmap[vs];
1.1 deraadt 1764:
1765: /* return 0 if we have no PTEs to load */
1.43 pk 1766: if ((pte = sp->sg_pte) == NULL)
1.1 deraadt 1767: return (0);
1.43 pk 1768:
1.1 deraadt 1769: /* return -1 if the fault is `hard', 0 if not */
1.43 pk 1770: if (sp->sg_pmeg != seginval)
1.55 pk 1771: return (bits && (getpte4(va) & bits) == bits ? -1 : 0);
1.1 deraadt 1772:
1773: /* reload segment: write PTEs into a new LRU entry */
1774: va = VA_ROUNDDOWNTOSEG(va);
1775: s = splpmap(); /* paranoid */
1.43 pk 1776: pmeg = me_alloc(&segm_lru, pm, vr, vs)->me_cookie;
1.1 deraadt 1777: setsegmap(va, pmeg);
1778: i = NPTESG;
1779: do {
1.55 pk 1780: setpte4(va, *pte++);
1.1 deraadt 1781: va += NBPG;
1782: } while (--i > 0);
1783: splx(s);
1784: return (1);
1785: }
1.55 pk 1786: #endif /* defined SUN4 or SUN4C */
1787:
1.1 deraadt 1788: /*
1789: * Allocate a context. If necessary, steal one from someone else.
1790: * Changes hardware context number and loads segment map.
1791: *
1792: * This routine is only ever called from locore.s just after it has
1793: * saved away the previous process, so there are no active user windows.
1794: */
1795: void
1796: ctx_alloc(pm)
1.124 pk 1797: struct pmap *pm;
1.1 deraadt 1798: {
1.124 pk 1799: union ctxinfo *c;
1800: int s, cnum, i, doflush;
1801: struct regmap *rp;
1802: int gap_start, gap_end;
1803: unsigned long va;
1.1 deraadt 1804:
1.55 pk 1805: /*XXX-GCC!*/gap_start=gap_end=0;
1.1 deraadt 1806: #ifdef DEBUG
1807: if (pm->pm_ctx)
1808: panic("ctx_alloc pm_ctx");
1809: if (pmapdebug & PDB_CTX_ALLOC)
1.66 christos 1810: printf("ctx_alloc(%p)\n", pm);
1.1 deraadt 1811: #endif
1.55 pk 1812: if (CPU_ISSUN4OR4C) {
1813: gap_start = pm->pm_gap_start;
1814: gap_end = pm->pm_gap_end;
1815: }
1.13 pk 1816:
1.49 pk 1817: s = splpmap();
1.1 deraadt 1818: if ((c = ctx_freelist) != NULL) {
1819: ctx_freelist = c->c_nextfree;
1.69 pk 1820: cnum = c - cpuinfo.ctxinfo;
1.49 pk 1821: doflush = 0;
1.1 deraadt 1822: } else {
1823: if ((ctx_kick += ctx_kickdir) >= ncontext) {
1824: ctx_kick = ncontext - 1;
1825: ctx_kickdir = -1;
1826: } else if (ctx_kick < 1) {
1827: ctx_kick = 1;
1828: ctx_kickdir = 1;
1829: }
1.69 pk 1830: c = &cpuinfo.ctxinfo[cnum = ctx_kick];
1.1 deraadt 1831: #ifdef DEBUG
1832: if (c->c_pmap == NULL)
1833: panic("ctx_alloc cu_pmap");
1834: if (pmapdebug & (PDB_CTX_ALLOC | PDB_CTX_STEAL))
1.66 christos 1835: printf("ctx_alloc: steal context %d from %p\n",
1.1 deraadt 1836: cnum, c->c_pmap);
1837: #endif
1838: c->c_pmap->pm_ctx = NULL;
1.69 pk 1839: doflush = (CACHEINFO.c_vactype != VAC_NONE);
1.55 pk 1840: if (CPU_ISSUN4OR4C) {
1841: if (gap_start < c->c_pmap->pm_gap_start)
1842: gap_start = c->c_pmap->pm_gap_start;
1843: if (gap_end > c->c_pmap->pm_gap_end)
1844: gap_end = c->c_pmap->pm_gap_end;
1845: }
1.1 deraadt 1846: }
1.49 pk 1847:
1.1 deraadt 1848: c->c_pmap = pm;
1849: pm->pm_ctx = c;
1850: pm->pm_ctxnum = cnum;
1851:
1.55 pk 1852: if (CPU_ISSUN4OR4C) {
1853: /*
1854: * Write pmap's region (3-level MMU) or segment table into
1855: * the MMU.
1856: *
1857: * Only write those entries that actually map something in
1858: * this context by maintaining a pair of region numbers in
1859: * between which the pmap has no valid mappings.
1860: *
1861: * If a context was just allocated from the free list, trust
1862: * that all its pmeg numbers are `seginval'. We make sure this
1863: * is the case initially in pmap_bootstrap(). Otherwise, the
1864: * context was freed by calling ctx_free() in pmap_release(),
1865: * which in turn is supposedly called only when all mappings
1866: * have been removed.
1867: *
1868: * On the other hand, if the context had to be stolen from
1869: * another pmap, we possibly shrink the gap to be the
1870: * disjuction of the new and the previous map.
1871: */
1.43 pk 1872:
1.80 pk 1873: setcontext4(cnum);
1.55 pk 1874: splx(s);
1875: if (doflush)
1876: cache_flush_context();
1.43 pk 1877:
1.55 pk 1878: rp = pm->pm_regmap;
1879: for (va = 0, i = NUREG; --i >= 0; ) {
1880: if (VA_VREG(va) >= gap_start) {
1881: va = VRTOVA(gap_end);
1882: i -= gap_end - gap_start;
1883: rp += gap_end - gap_start;
1884: if (i < 0)
1885: break;
1886: /* mustn't re-enter this branch */
1887: gap_start = NUREG;
1888: }
1.69 pk 1889: if (HASSUN4_MMU3L) {
1.55 pk 1890: setregmap(va, rp++->rg_smeg);
1891: va += NBPRG;
1.69 pk 1892: } else {
1.124 pk 1893: int j;
1894: struct segmap *sp = rp->rg_segmap;
1.55 pk 1895: for (j = NSEGRG; --j >= 0; va += NBPSG)
1896: setsegmap(va,
1897: sp?sp++->sg_pmeg:seginval);
1898: rp++;
1899: }
1.43 pk 1900: }
1.55 pk 1901:
1902: } else if (CPU_ISSUN4M) {
1903:
1.80 pk 1904: #if defined(SUN4M)
1.55 pk 1905: /*
1906: * Reload page and context tables to activate the page tables
1907: * for this context.
1908: *
1909: * The gap stuff isn't really needed in the Sun4m architecture,
1910: * since we don't have to worry about excessive mappings (all
1911: * mappings exist since the page tables must be complete for
1912: * the mmu to be happy).
1913: *
1914: * If a context was just allocated from the free list, trust
1915: * that all of its mmu-edible page tables are zeroed out
1916: * (except for those associated with the kernel). We make
1917: * sure this is the case initially in pmap_bootstrap() and
1918: * pmap_init() (?).
1919: * Otherwise, the context was freed by calling ctx_free() in
1920: * pmap_release(), which in turn is supposedly called only
1921: * when all mappings have been removed.
1922: *
1923: * XXX: Do we have to flush cache after reloading ctx tbl?
1924: */
1925:
1.123 pk 1926: /* Do any cache flush needed on context switch */
1927: (*cpuinfo.pure_vcache_flush)();
1.79 pk 1928: #ifdef DEBUG
1.55 pk 1929: if (pm->pm_reg_ptps_pa == 0)
1930: panic("ctx_alloc: no region table in current pmap");
1931: #endif
1932: /*setcontext(0); * paranoia? can we modify curr. ctx? */
1.133 pk 1933: #if defined(MULTIPROCESSOR)
1934: for (i = 0; i < ncpu; i++) {
1935: struct cpu_info *cpi = cpus[i];
1936: if (cpi == NULL)
1937: continue;
1938:
1939: setpgt4m(&cpi->ctx_tbl[cnum],
1940: (pm->pm_reg_ptps_pa >> SRMMU_PPNPASHIFT) |
1941: SRMMU_TEPTD);
1942: }
1943: /* Fixup CPUINFO_VA region table entry for current CPU */
1944: setpgt4m(&pm->pm_reg_ptps[VA_VREG(CPUINFO_VA)],
1945: cpuinfo.cpu_seg_ptd);
1946: #else
1.79 pk 1947: setpgt4m(&cpuinfo.ctx_tbl[cnum],
1948: (pm->pm_reg_ptps_pa >> SRMMU_PPNPASHIFT) | SRMMU_TEPTD);
1.133 pk 1949: #endif
1.55 pk 1950:
1.80 pk 1951: setcontext4m(cnum);
1.55 pk 1952: if (doflush)
1953: cache_flush_context();
1954: tlb_flush_context(); /* remove any remnant garbage from tlb */
1.43 pk 1955: #endif
1.55 pk 1956: splx(s);
1.13 pk 1957: }
1.1 deraadt 1958: }
1959:
1960: /*
1961: * Give away a context. Flushes cache and sets current context to 0.
1962: */
1963: void
1964: ctx_free(pm)
1965: struct pmap *pm;
1966: {
1.124 pk 1967: union ctxinfo *c;
1968: int newc, oldc;
1.1 deraadt 1969:
1970: if ((c = pm->pm_ctx) == NULL)
1971: panic("ctx_free");
1972: pm->pm_ctx = NULL;
1973: oldc = getcontext();
1.55 pk 1974:
1.69 pk 1975: if (CACHEINFO.c_vactype != VAC_NONE) {
1.123 pk 1976: /* Do any cache flush needed on context switch */
1977: (*cpuinfo.pure_vcache_flush)();
1978:
1.1 deraadt 1979: newc = pm->pm_ctxnum;
1980: CHANGE_CONTEXTS(oldc, newc);
1981: cache_flush_context();
1.55 pk 1982: #if defined(SUN4M)
1983: if (CPU_ISSUN4M)
1984: tlb_flush_context();
1985: #endif
1.1 deraadt 1986: setcontext(0);
1987: } else {
1.55 pk 1988: #if defined(SUN4M)
1.88 pk 1989: if (CPU_ISSUN4M) {
1.123 pk 1990: /* Do any cache flush needed on context switch */
1991: (*cpuinfo.pure_vcache_flush)();
1.88 pk 1992: newc = pm->pm_ctxnum;
1993: CHANGE_CONTEXTS(oldc, newc);
1.55 pk 1994: tlb_flush_context();
1.88 pk 1995: }
1.55 pk 1996: #endif
1.1 deraadt 1997: CHANGE_CONTEXTS(oldc, 0);
1998: }
1999: c->c_nextfree = ctx_freelist;
2000: ctx_freelist = c;
1.55 pk 2001:
1.69 pk 2002: #if 0
1.55 pk 2003: #if defined(SUN4M)
2004: if (CPU_ISSUN4M) {
2005: /* Map kernel back into unused context */
2006: newc = pm->pm_ctxnum;
1.69 pk 2007: cpuinfo.ctx_tbl[newc] = cpuinfo.ctx_tbl[0];
1.55 pk 2008: if (newc)
2009: ctxbusyvector[newc] = 0; /* mark as free */
2010: }
2011: #endif
1.69 pk 2012: #endif
1.1 deraadt 2013: }
2014:
2015:
2016: /*----------------------------------------------------------------*/
2017:
2018: /*
2019: * pvlist functions.
2020: */
2021:
2022: /*
2023: * Walk the given pv list, and for each PTE, set or clear some bits
2024: * (e.g., PG_W or PG_NC).
2025: *
2026: * As a special case, this never clears PG_W on `pager' pages.
2027: * These, being kernel addresses, are always in hardware and have
2028: * a context.
2029: *
2030: * This routine flushes the cache for any page whose PTE changes,
2031: * as long as the process has a context; this is overly conservative.
2032: * It also copies ref and mod bits to the pvlist, on the theory that
2033: * this might save work later. (XXX should test this theory)
1.115 pk 2034: *
2035: * In addition, if the cacheable bit (PG_NC) is updated in the PTE
2036: * the corresponding PV_NC flag is also updated in each pv entry. This
2037: * is done so kvm_uncache() can use this routine and have the uncached
2038: * status stick.
1.1 deraadt 2039: */
1.55 pk 2040:
2041: #if defined(SUN4) || defined(SUN4C)
2042:
1.1 deraadt 2043: void
1.55 pk 2044: pv_changepte4_4c(pv0, bis, bic)
1.115 pk 2045: struct pvlist *pv0;
2046: int bis, bic;
1.1 deraadt 2047: {
1.115 pk 2048: int *pte;
2049: struct pvlist *pv;
2050: struct pmap *pm;
2051: int va, vr, vs;
1.1 deraadt 2052: int ctx, s;
1.43 pk 2053: struct regmap *rp;
2054: struct segmap *sp;
1.1 deraadt 2055:
2056: write_user_windows(); /* paranoid? */
2057:
2058: s = splpmap(); /* paranoid? */
2059: if (pv0->pv_pmap == NULL) {
2060: splx(s);
2061: return;
2062: }
1.71 pk 2063: ctx = getcontext4();
1.1 deraadt 2064: for (pv = pv0; pv != NULL; pv = pv->pv_next) {
2065: pm = pv->pv_pmap;
1.81 pk 2066: #ifdef DIAGNOSTIC
2067: if(pm == NULL)
2068: panic("pv_changepte: pm == NULL");
2069: #endif
1.1 deraadt 2070: va = pv->pv_va;
1.43 pk 2071: vr = VA_VREG(va);
2072: vs = VA_VSEG(va);
2073: rp = &pm->pm_regmap[vr];
2074: if (rp->rg_segmap == NULL)
2075: panic("pv_changepte: no segments");
2076:
2077: sp = &rp->rg_segmap[vs];
2078: pte = sp->sg_pte;
2079:
2080: if (sp->sg_pmeg == seginval) {
2081: /* not in hardware: just fix software copy */
2082: if (pte == NULL)
1.81 pk 2083: panic("pv_changepte: pte == NULL");
1.43 pk 2084: pte += VA_VPG(va);
2085: *pte = (*pte | bis) & ~bic;
2086: } else {
1.124 pk 2087: int tpte;
1.1 deraadt 2088:
2089: /* in hardware: fix hardware copy */
1.43 pk 2090: if (CTX_USABLE(pm,rp)) {
1.110 mrg 2091: /*
2092: * Bizarreness: we never clear PG_W on
1.125 pk 2093: * pager pages.
1.110 mrg 2094: */
1.125 pk 2095: #if defined(UVM)
1.110 mrg 2096: if (bic == PG_W &&
2097: va >= uvm.pager_sva && va < uvm.pager_eva)
2098: continue;
2099: #else
1.124 pk 2100: extern vaddr_t pager_sva, pager_eva;
1.1 deraadt 2101: if (bic == PG_W &&
2102: va >= pager_sva && va < pager_eva)
1.3 deraadt 2103: continue;
1.110 mrg 2104: #endif
1.125 pk 2105:
1.71 pk 2106: setcontext4(pm->pm_ctxnum);
1.1 deraadt 2107: /* XXX should flush only when necessary */
1.55 pk 2108: tpte = getpte4(va);
1.88 pk 2109: /*
2110: * XXX: always flush cache; conservative, but
2111: * needed to invalidate cache tag protection
2112: * bits and when disabling caching.
2113: */
2114: cache_flush_page(va);
1.1 deraadt 2115: } else {
2116: /* XXX per-cpu va? */
1.71 pk 2117: setcontext4(0);
1.69 pk 2118: if (HASSUN4_MMU3L)
1.43 pk 2119: setregmap(0, tregion);
2120: setsegmap(0, sp->sg_pmeg);
1.18 deraadt 2121: va = VA_VPG(va) << PGSHIFT;
1.55 pk 2122: tpte = getpte4(va);
1.1 deraadt 2123: }
2124: if (tpte & PG_V)
1.115 pk 2125: pv0->pv_flags |= MR4_4C(tpte);
1.1 deraadt 2126: tpte = (tpte | bis) & ~bic;
1.55 pk 2127: setpte4(va, tpte);
1.1 deraadt 2128: if (pte != NULL) /* update software copy */
2129: pte[VA_VPG(va)] = tpte;
1.115 pk 2130:
2131: /* Update PV_NC flag if required */
2132: if (bis & PG_NC)
2133: pv->pv_flags |= PV_NC;
2134: if (bic & PG_NC)
2135: pv->pv_flags &= ~PV_NC;
1.1 deraadt 2136: }
2137: }
1.71 pk 2138: setcontext4(ctx);
1.1 deraadt 2139: splx(s);
2140: }
2141:
2142: /*
2143: * Sync ref and mod bits in pvlist (turns off same in hardware PTEs).
2144: * Returns the new flags.
2145: *
2146: * This is just like pv_changepte, but we never add or remove bits,
2147: * hence never need to adjust software copies.
2148: */
2149: int
1.55 pk 2150: pv_syncflags4_4c(pv0)
1.124 pk 2151: struct pvlist *pv0;
1.1 deraadt 2152: {
1.124 pk 2153: struct pvlist *pv;
2154: struct pmap *pm;
2155: int tpte, va, vr, vs, pmeg, flags;
1.1 deraadt 2156: int ctx, s;
1.43 pk 2157: struct regmap *rp;
2158: struct segmap *sp;
1.1 deraadt 2159:
2160: write_user_windows(); /* paranoid? */
2161:
2162: s = splpmap(); /* paranoid? */
2163: if (pv0->pv_pmap == NULL) { /* paranoid */
2164: splx(s);
2165: return (0);
2166: }
1.71 pk 2167: ctx = getcontext4();
1.1 deraadt 2168: flags = pv0->pv_flags;
2169: for (pv = pv0; pv != NULL; pv = pv->pv_next) {
2170: pm = pv->pv_pmap;
2171: va = pv->pv_va;
1.43 pk 2172: vr = VA_VREG(va);
2173: vs = VA_VSEG(va);
2174: rp = &pm->pm_regmap[vr];
2175: if (rp->rg_segmap == NULL)
2176: panic("pv_syncflags: no segments");
2177: sp = &rp->rg_segmap[vs];
2178:
2179: if ((pmeg = sp->sg_pmeg) == seginval)
1.1 deraadt 2180: continue;
1.43 pk 2181:
2182: if (CTX_USABLE(pm,rp)) {
1.71 pk 2183: setcontext4(pm->pm_ctxnum);
1.1 deraadt 2184: /* XXX should flush only when necessary */
1.55 pk 2185: tpte = getpte4(va);
1.69 pk 2186: if (tpte & PG_M)
1.34 pk 2187: cache_flush_page(va);
1.1 deraadt 2188: } else {
2189: /* XXX per-cpu va? */
1.71 pk 2190: setcontext4(0);
1.69 pk 2191: if (HASSUN4_MMU3L)
1.43 pk 2192: setregmap(0, tregion);
1.1 deraadt 2193: setsegmap(0, pmeg);
1.18 deraadt 2194: va = VA_VPG(va) << PGSHIFT;
1.55 pk 2195: tpte = getpte4(va);
1.1 deraadt 2196: }
2197: if (tpte & (PG_M|PG_U) && tpte & PG_V) {
1.86 pk 2198: flags |= MR4_4C(tpte);
1.1 deraadt 2199: tpte &= ~(PG_M|PG_U);
1.55 pk 2200: setpte4(va, tpte);
1.1 deraadt 2201: }
2202: }
2203: pv0->pv_flags = flags;
1.71 pk 2204: setcontext4(ctx);
1.1 deraadt 2205: splx(s);
2206: return (flags);
2207: }
2208:
2209: /*
2210: * pv_unlink is a helper function for pmap_remove.
2211: * It takes a pointer to the pv_table head for some physical address
2212: * and removes the appropriate (pmap, va) entry.
2213: *
2214: * Once the entry is removed, if the pv_table head has the cache
2215: * inhibit bit set, see if we can turn that off; if so, walk the
2216: * pvlist and turn off PG_NC in each PTE. (The pvlist is by
2217: * definition nonempty, since it must have at least two elements
2218: * in it to have PV_NC set, and we only remove one here.)
2219: */
1.43 pk 2220: /*static*/ void
1.55 pk 2221: pv_unlink4_4c(pv, pm, va)
1.124 pk 2222: struct pvlist *pv;
2223: struct pmap *pm;
2224: vaddr_t va;
1.1 deraadt 2225: {
1.124 pk 2226: struct pvlist *npv;
1.1 deraadt 2227:
1.11 pk 2228: #ifdef DIAGNOSTIC
2229: if (pv->pv_pmap == NULL)
2230: panic("pv_unlink0");
2231: #endif
1.1 deraadt 2232: /*
2233: * First entry is special (sigh).
2234: */
2235: npv = pv->pv_next;
2236: if (pv->pv_pmap == pm && pv->pv_va == va) {
2237: pmap_stats.ps_unlink_pvfirst++;
2238: if (npv != NULL) {
1.115 pk 2239: /*
2240: * Shift next entry into the head.
2241: * Make sure to retain the REF, MOD and ANC flags.
2242: */
1.1 deraadt 2243: pv->pv_next = npv->pv_next;
2244: pv->pv_pmap = npv->pv_pmap;
2245: pv->pv_va = npv->pv_va;
1.115 pk 2246: pv->pv_flags &= ~PV_NC;
2247: pv->pv_flags |= npv->pv_flags & PV_NC;
1.122 pk 2248: pool_put(&pv_pool, npv);
1.86 pk 2249: } else {
1.115 pk 2250: /*
2251: * No mappings left; we still need to maintain
2252: * the REF and MOD flags. since pmap_is_modified()
2253: * can still be called for this page.
2254: */
1.1 deraadt 2255: pv->pv_pmap = NULL;
1.115 pk 2256: pv->pv_flags &= ~(PV_NC|PV_ANC);
1.86 pk 2257: return;
2258: }
1.1 deraadt 2259: } else {
1.124 pk 2260: struct pvlist *prev;
1.1 deraadt 2261:
2262: for (prev = pv;; prev = npv, npv = npv->pv_next) {
2263: pmap_stats.ps_unlink_pvsearch++;
2264: if (npv == NULL)
2265: panic("pv_unlink");
2266: if (npv->pv_pmap == pm && npv->pv_va == va)
2267: break;
2268: }
2269: prev->pv_next = npv->pv_next;
1.122 pk 2270: pool_put(&pv_pool, npv);
1.1 deraadt 2271: }
1.115 pk 2272: if (pv->pv_flags & PV_ANC && (pv->pv_flags & PV_NC) == 0) {
1.1 deraadt 2273: /*
2274: * Not cached: check to see if we can fix that now.
2275: */
2276: va = pv->pv_va;
2277: for (npv = pv->pv_next; npv != NULL; npv = npv->pv_next)
1.115 pk 2278: if (BADALIAS(va, npv->pv_va) || (npv->pv_flags & PV_NC))
1.1 deraadt 2279: return;
1.115 pk 2280: pv->pv_flags &= ~PV_ANC;
1.58 pk 2281: pv_changepte4_4c(pv, 0, PG_NC);
1.1 deraadt 2282: }
2283: }
2284:
2285: /*
2286: * pv_link is the inverse of pv_unlink, and is used in pmap_enter.
2287: * It returns PG_NC if the (new) pvlist says that the address cannot
2288: * be cached.
2289: */
1.43 pk 2290: /*static*/ int
1.115 pk 2291: pv_link4_4c(pv, pm, va, nc)
2292: struct pvlist *pv;
2293: struct pmap *pm;
1.124 pk 2294: vaddr_t va;
1.115 pk 2295: int nc;
1.1 deraadt 2296: {
1.115 pk 2297: struct pvlist *npv;
2298: int ret;
2299:
2300: ret = nc ? PG_NC : 0;
1.1 deraadt 2301:
2302: if (pv->pv_pmap == NULL) {
2303: /* no pvlist entries yet */
2304: pmap_stats.ps_enter_firstpv++;
2305: pv->pv_next = NULL;
2306: pv->pv_pmap = pm;
2307: pv->pv_va = va;
1.115 pk 2308: pv->pv_flags |= nc ? PV_NC : 0;
2309: return (ret);
1.1 deraadt 2310: }
2311: /*
2312: * Before entering the new mapping, see if
2313: * it will cause old mappings to become aliased
2314: * and thus need to be `discached'.
2315: */
2316: pmap_stats.ps_enter_secondpv++;
1.115 pk 2317: if (pv->pv_flags & (PV_NC|PV_ANC)) {
1.1 deraadt 2318: /* already uncached, just stay that way */
2319: ret = PG_NC;
2320: } else {
2321: for (npv = pv; npv != NULL; npv = npv->pv_next) {
1.115 pk 2322: if (npv->pv_flags & PV_NC) {
2323: ret = PG_NC;
2324: break;
2325: }
1.1 deraadt 2326: if (BADALIAS(va, npv->pv_va)) {
1.43 pk 2327: #ifdef DEBUG
1.84 pk 2328: if (pmapdebug & PDB_CACHESTUFF)
2329: printf(
1.91 fair 2330: "pv_link: badalias: pid %d, 0x%lx<=>0x%lx, pa 0x%lx\n",
1.84 pk 2331: curproc ? curproc->p_pid : -1,
2332: va, npv->pv_va,
2333: vm_first_phys + (pv-pv_table)*NBPG);
1.43 pk 2334: #endif
1.115 pk 2335: /* Mark list head `uncached due to aliases' */
2336: pv->pv_flags |= PV_ANC;
1.58 pk 2337: pv_changepte4_4c(pv, ret = PG_NC, 0);
1.1 deraadt 2338: break;
2339: }
2340: }
2341: }
1.122 pk 2342: npv = pool_get(&pv_pool, PR_WAITOK);
1.1 deraadt 2343: npv->pv_next = pv->pv_next;
2344: npv->pv_pmap = pm;
2345: npv->pv_va = va;
1.115 pk 2346: npv->pv_flags = nc ? PV_NC : 0;
1.1 deraadt 2347: pv->pv_next = npv;
2348: return (ret);
2349: }
2350:
1.55 pk 2351: #endif /* sun4, sun4c code */
2352:
2353: #if defined(SUN4M) /* Sun4M versions of above */
1.1 deraadt 2354: /*
1.55 pk 2355: * Walk the given pv list, and for each PTE, set or clear some bits
2356: * (e.g., PG_W or PG_NC).
2357: *
2358: * As a special case, this never clears PG_W on `pager' pages.
2359: * These, being kernel addresses, are always in hardware and have
2360: * a context.
2361: *
2362: * This routine flushes the cache for any page whose PTE changes,
2363: * as long as the process has a context; this is overly conservative.
2364: * It also copies ref and mod bits to the pvlist, on the theory that
2365: * this might save work later. (XXX should test this theory)
1.115 pk 2366: *
2367: * In addition, if the cacheable bit (SRMMU_PG_C) is updated in the PTE
2368: * the corresponding PV_C4M flag is also updated in each pv entry. This
2369: * is done so kvm_uncache() can use this routine and have the uncached
2370: * status stick.
1.1 deraadt 2371: */
1.53 christos 2372: void
1.55 pk 2373: pv_changepte4m(pv0, bis, bic)
1.115 pk 2374: struct pvlist *pv0;
2375: int bis, bic;
1.55 pk 2376: {
1.115 pk 2377: struct pvlist *pv;
2378: struct pmap *pm;
2379: int va, vr;
1.55 pk 2380: int ctx, s;
2381: struct regmap *rp;
1.72 pk 2382: struct segmap *sp;
1.1 deraadt 2383:
1.55 pk 2384: write_user_windows(); /* paranoid? */
1.1 deraadt 2385:
1.55 pk 2386: s = splpmap(); /* paranoid? */
2387: if (pv0->pv_pmap == NULL) {
2388: splx(s);
2389: return;
1.1 deraadt 2390: }
1.71 pk 2391: ctx = getcontext4m();
1.55 pk 2392: for (pv = pv0; pv != NULL; pv = pv->pv_next) {
1.115 pk 2393: int tpte;
1.55 pk 2394: pm = pv->pv_pmap;
1.81 pk 2395: #ifdef DIAGNOSTIC
1.61 pk 2396: if (pm == NULL)
1.81 pk 2397: panic("pv_changepte: pm == NULL");
2398: #endif
1.55 pk 2399: va = pv->pv_va;
2400: vr = VA_VREG(va);
2401: rp = &pm->pm_regmap[vr];
2402: if (rp->rg_segmap == NULL)
2403: panic("pv_changepte: no segments");
2404:
1.72 pk 2405: sp = &rp->rg_segmap[VA_VSEG(va)];
2406:
2407: if (pm->pm_ctx) {
1.110 mrg 2408: #if defined(UVM)
2409: /*
2410: * Bizarreness: we never clear PG_W on
1.125 pk 2411: * pager pages.
1.110 mrg 2412: */
2413: if ((bic & PPROT_WRITE) &&
2414: va >= uvm.pager_sva && va < uvm.pager_eva)
2415: continue;
2416: #else
2417:
1.124 pk 2418: extern vaddr_t pager_sva, pager_eva;
1.1 deraadt 2419:
1.55 pk 2420: /*
2421: * Bizarreness: we never clear PG_W on
1.125 pk 2422: * pager pages.
1.55 pk 2423: */
2424: if ((bic & PPROT_WRITE) &&
2425: va >= pager_sva && va < pager_eva)
1.60 pk 2426: continue;
1.110 mrg 2427: #endif
1.72 pk 2428:
1.88 pk 2429: setcontext4m(pm->pm_ctxnum);
2430:
2431: /*
2432: * XXX: always flush cache; conservative, but
2433: * needed to invalidate cache tag protection
2434: * bits and when disabling caching.
2435: */
2436: cache_flush_page(va);
2437:
1.72 pk 2438: /* Flush TLB so memory copy is up-to-date */
2439: tlb_flush_page(va);
1.88 pk 2440:
1.72 pk 2441: }
2442:
2443: tpte = sp->sg_pte[VA_SUN4M_VPG(va)];
2444: if ((tpte & SRMMU_TETYPE) != SRMMU_TEPTE) {
2445: printf("pv_changepte: invalid PTE for 0x%x\n", va);
2446: continue;
1.55 pk 2447: }
2448:
1.115 pk 2449: pv0->pv_flags |= MR4M(tpte);
1.55 pk 2450: tpte = (tpte | bis) & ~bic;
1.115 pk 2451: setpgt4m(&sp->sg_pte[VA_SUN4M_VPG(va)], tpte);
2452:
2453: /* Update PV_C4M flag if required */
2454: if (bis & SRMMU_PG_C)
2455: pv->pv_flags |= PV_C4M;
2456: if (bic & SRMMU_PG_C)
2457: pv->pv_flags &= ~PV_C4M;
1.55 pk 2458:
2459: }
1.71 pk 2460: setcontext4m(ctx);
1.55 pk 2461: splx(s);
2462: }
2463:
2464: /*
2465: * Sync ref and mod bits in pvlist. If page has been ref'd or modified,
2466: * update ref/mod bits in pvlist, and clear the hardware bits.
2467: *
2468: * Return the new flags.
2469: */
2470: int
2471: pv_syncflags4m(pv0)
1.124 pk 2472: struct pvlist *pv0;
1.55 pk 2473: {
1.124 pk 2474: struct pvlist *pv;
2475: struct pmap *pm;
2476: int tpte, va, vr, vs, flags;
1.55 pk 2477: int ctx, s;
2478: struct regmap *rp;
2479: struct segmap *sp;
2480:
2481: write_user_windows(); /* paranoid? */
2482:
2483: s = splpmap(); /* paranoid? */
2484: if (pv0->pv_pmap == NULL) { /* paranoid */
2485: splx(s);
2486: return (0);
2487: }
1.71 pk 2488: ctx = getcontext4m();
1.55 pk 2489: flags = pv0->pv_flags;
2490: for (pv = pv0; pv != NULL; pv = pv->pv_next) {
2491: pm = pv->pv_pmap;
2492: va = pv->pv_va;
2493: vr = VA_VREG(va);
2494: vs = VA_VSEG(va);
2495: rp = &pm->pm_regmap[vr];
2496: if (rp->rg_segmap == NULL)
2497: panic("pv_syncflags: no segments");
2498: sp = &rp->rg_segmap[vs];
2499:
2500: if (sp->sg_pte == NULL) /* invalid */
1.60 pk 2501: continue;
1.55 pk 2502:
1.62 pk 2503: /*
2504: * We need the PTE from memory as the TLB version will
2505: * always have the SRMMU_PG_R bit on.
2506: */
1.72 pk 2507: if (pm->pm_ctx) {
1.71 pk 2508: setcontext4m(pm->pm_ctxnum);
1.55 pk 2509: tlb_flush_page(va);
2510: }
1.72 pk 2511: tpte = sp->sg_pte[VA_SUN4M_VPG(va)];
1.62 pk 2512:
1.55 pk 2513: if ((tpte & SRMMU_TETYPE) == SRMMU_TEPTE && /* if valid pte */
2514: (tpte & (SRMMU_PG_M|SRMMU_PG_R))) { /* and mod/refd */
1.72 pk 2515:
1.115 pk 2516: flags |= MR4M(tpte);
1.72 pk 2517:
2518: if (pm->pm_ctx && (tpte & SRMMU_PG_M)) {
2519: cache_flush_page(va); /* XXX: do we need this?*/
2520: tlb_flush_page(va); /* paranoid? */
2521: }
2522:
2523: /* Clear mod/ref bits from PTE and write it back */
1.55 pk 2524: tpte &= ~(SRMMU_PG_M | SRMMU_PG_R);
1.72 pk 2525: setpgt4m(&sp->sg_pte[VA_SUN4M_VPG(va)], tpte);
1.55 pk 2526: }
2527: }
2528: pv0->pv_flags = flags;
1.71 pk 2529: setcontext4m(ctx);
1.55 pk 2530: splx(s);
2531: return (flags);
2532: }
2533:
2534: void
2535: pv_unlink4m(pv, pm, va)
1.124 pk 2536: struct pvlist *pv;
2537: struct pmap *pm;
2538: vaddr_t va;
1.55 pk 2539: {
1.124 pk 2540: struct pvlist *npv;
1.55 pk 2541:
2542: #ifdef DIAGNOSTIC
2543: if (pv->pv_pmap == NULL)
2544: panic("pv_unlink0");
2545: #endif
2546: /*
2547: * First entry is special (sigh).
2548: */
2549: npv = pv->pv_next;
2550: if (pv->pv_pmap == pm && pv->pv_va == va) {
2551: pmap_stats.ps_unlink_pvfirst++;
2552: if (npv != NULL) {
1.115 pk 2553: /*
2554: * Shift next entry into the head.
2555: * Make sure to retain the REF, MOD and ANC flags.
2556: */
1.55 pk 2557: pv->pv_next = npv->pv_next;
2558: pv->pv_pmap = npv->pv_pmap;
2559: pv->pv_va = npv->pv_va;
1.115 pk 2560: pv->pv_flags &= ~PV_C4M;
2561: pv->pv_flags |= (npv->pv_flags & PV_C4M);
1.122 pk 2562: pool_put(&pv_pool, npv);
1.86 pk 2563: } else {
1.115 pk 2564: /*
2565: * No mappings left; we still need to maintain
2566: * the REF and MOD flags. since pmap_is_modified()
2567: * can still be called for this page.
2568: */
1.55 pk 2569: pv->pv_pmap = NULL;
1.115 pk 2570: pv->pv_flags &= ~(PV_C4M|PV_ANC);
1.86 pk 2571: return;
2572: }
1.55 pk 2573: } else {
1.124 pk 2574: struct pvlist *prev;
1.55 pk 2575:
2576: for (prev = pv;; prev = npv, npv = npv->pv_next) {
2577: pmap_stats.ps_unlink_pvsearch++;
2578: if (npv == NULL)
2579: panic("pv_unlink");
2580: if (npv->pv_pmap == pm && npv->pv_va == va)
2581: break;
2582: }
2583: prev->pv_next = npv->pv_next;
1.122 pk 2584: pool_put(&pv_pool, npv);
1.55 pk 2585: }
1.115 pk 2586: if ((pv->pv_flags & (PV_C4M|PV_ANC)) == (PV_C4M|PV_ANC)) {
1.55 pk 2587: /*
2588: * Not cached: check to see if we can fix that now.
2589: */
2590: va = pv->pv_va;
2591: for (npv = pv->pv_next; npv != NULL; npv = npv->pv_next)
1.115 pk 2592: if (BADALIAS(va, npv->pv_va) ||
2593: (npv->pv_flags & PV_C4M) == 0)
1.55 pk 2594: return;
1.115 pk 2595: pv->pv_flags &= PV_ANC;
1.55 pk 2596: pv_changepte4m(pv, SRMMU_PG_C, 0);
2597: }
2598: }
2599:
2600: /*
2601: * pv_link is the inverse of pv_unlink, and is used in pmap_enter.
2602: * It returns SRMMU_PG_C if the (new) pvlist says that the address cannot
2603: * be cached (i.e. its results must be (& ~)'d in.
2604: */
2605: /*static*/ int
1.115 pk 2606: pv_link4m(pv, pm, va, nc)
2607: struct pvlist *pv;
2608: struct pmap *pm;
1.124 pk 2609: vaddr_t va;
1.115 pk 2610: int nc;
1.55 pk 2611: {
1.115 pk 2612: struct pvlist *npv;
2613: int ret;
2614:
2615: ret = nc ? SRMMU_PG_C : 0;
1.55 pk 2616:
2617: if (pv->pv_pmap == NULL) {
2618: /* no pvlist entries yet */
2619: pmap_stats.ps_enter_firstpv++;
2620: pv->pv_next = NULL;
2621: pv->pv_pmap = pm;
2622: pv->pv_va = va;
1.115 pk 2623: pv->pv_flags |= nc ? 0 : PV_C4M;
2624: return (ret);
1.55 pk 2625: }
2626: /*
2627: * Before entering the new mapping, see if
2628: * it will cause old mappings to become aliased
2629: * and thus need to be `discached'.
2630: */
2631: pmap_stats.ps_enter_secondpv++;
1.115 pk 2632: if ((pv->pv_flags & PV_ANC) != 0 || (pv->pv_flags & PV_C4M) == 0) {
1.55 pk 2633: /* already uncached, just stay that way */
2634: ret = SRMMU_PG_C;
2635: } else {
2636: for (npv = pv; npv != NULL; npv = npv->pv_next) {
1.115 pk 2637: if ((npv->pv_flags & PV_C4M) == 0) {
2638: ret = SRMMU_PG_C;
2639: break;
2640: }
1.55 pk 2641: if (BADALIAS(va, npv->pv_va)) {
2642: #ifdef DEBUG
1.84 pk 2643: if (pmapdebug & PDB_CACHESTUFF)
2644: printf(
1.91 fair 2645: "pv_link: badalias: pid %d, 0x%lx<=>0x%lx, pa 0x%lx\n",
1.84 pk 2646: curproc ? curproc->p_pid : -1,
2647: va, npv->pv_va,
2648: vm_first_phys + (pv-pv_table)*NBPG);
1.55 pk 2649: #endif
1.115 pk 2650: /* Mark list head `uncached due to aliases' */
2651: pv->pv_flags |= PV_ANC;
1.58 pk 2652: pv_changepte4m(pv, 0, ret = SRMMU_PG_C);
1.55 pk 2653: /* cache_flush_page(va); XXX: needed? */
2654: break;
2655: }
2656: }
2657: }
1.122 pk 2658: npv = pool_get(&pv_pool, PR_WAITOK);
1.55 pk 2659: npv->pv_next = pv->pv_next;
2660: npv->pv_pmap = pm;
2661: npv->pv_va = va;
1.115 pk 2662: npv->pv_flags = nc ? 0 : PV_C4M;
1.55 pk 2663: pv->pv_next = npv;
2664: return (ret);
2665: }
2666: #endif
2667:
2668: /*
2669: * Walk the given list and flush the cache for each (MI) page that is
2670: * potentially in the cache. Called only if vactype != VAC_NONE.
2671: */
2672: void
2673: pv_flushcache(pv)
1.124 pk 2674: struct pvlist *pv;
1.55 pk 2675: {
1.124 pk 2676: struct pmap *pm;
2677: int s, ctx;
1.55 pk 2678:
2679: write_user_windows(); /* paranoia? */
2680:
2681: s = splpmap(); /* XXX extreme paranoia */
2682: if ((pm = pv->pv_pmap) != NULL) {
2683: ctx = getcontext();
2684: for (;;) {
2685: if (pm->pm_ctx) {
2686: setcontext(pm->pm_ctxnum);
2687: cache_flush_page(pv->pv_va);
2688: }
2689: pv = pv->pv_next;
2690: if (pv == NULL)
2691: break;
2692: pm = pv->pv_pmap;
2693: }
2694: setcontext(ctx);
2695: }
2696: splx(s);
2697: }
2698:
1.124 pk 2699: vsize_t
1.122 pk 2700: pv_table_map(base, mapit)
1.124 pk 2701: paddr_t base;
1.122 pk 2702: int mapit;
2703: {
2704: int nmem;
2705: struct memarr *mp;
1.124 pk 2706: vsize_t s;
2707: vaddr_t sva, va, eva;
2708: paddr_t pa;
1.122 pk 2709:
2710: /*
2711: * Map pv_table[] as a `sparse' array. pv_table_map() is called
2712: * twice: the first time `mapit' is 0, and the number of
2713: * physical pages needed to map the used pieces of pv_table[]
2714: * is computed; the second time those pages are used to
2715: * actually map pv_table[].
2716: * In both cases, this function returns the amount of physical
2717: * memory needed.
2718: */
2719:
2720: if (!mapit)
2721: /* Mark physical pages for pv_table[] */
2722: pv_physmem = base;
2723:
2724: pa = pv_physmem; /* XXX - always init `pa' to appease gcc */
2725:
2726: s = 0;
2727: sva = eva = 0;
2728: for (mp = pmemarr, nmem = npmemarr; --nmem >= 0; mp++) {
2729: int len;
1.124 pk 2730: paddr_t addr;
1.122 pk 2731:
2732: len = mp->len;
2733: if ((addr = mp->addr) < base) {
2734: /*
2735: * pv_table[] covers everything above `avail_start'.
2736: */
2737: addr = base;
2738: len -= base;
2739: }
2740:
2741: /* Calculate stretch of pv_table */
2742: len = sizeof(struct pvlist) * btoc(len);
1.124 pk 2743: va = (vaddr_t)&pv_table[btoc(addr - base)];
1.122 pk 2744: sva = trunc_page(va);
2745:
2746: if (sva < eva) {
2747: /* This chunk overlaps the previous in pv_table[] */
2748: sva += NBPG;
2749: if (sva < eva)
2750: panic("pv_table_map: sva(0x%lx)<eva(0x%lx)",
2751: sva, eva);
2752: }
2753: eva = roundup(va + len, NBPG);
2754:
2755: /* Add this range to the total */
2756: s += eva - sva;
2757:
2758: if (mapit) {
2759: /* Map this piece of pv_table[] */
2760: for (va = sva; va < eva; va += PAGE_SIZE) {
2761: pmap_enter(pmap_kernel(), va, pa,
2762: VM_PROT_READ|VM_PROT_WRITE, 1);
2763: pa += PAGE_SIZE;
2764: }
2765: bzero((caddr_t)sva, eva - sva);
2766: }
2767: }
2768: return (s);
2769: }
2770:
1.55 pk 2771: /*----------------------------------------------------------------*/
2772:
2773: /*
2774: * At last, pmap code.
2775: */
1.1 deraadt 2776:
1.99 fair 2777: #if defined(SUN4) && (defined(SUN4C) || defined(SUN4M))
1.18 deraadt 2778: int nptesg;
2779: #endif
2780:
1.55 pk 2781: #if defined(SUN4M)
2782: static void pmap_bootstrap4m __P((void));
2783: #endif
2784: #if defined(SUN4) || defined(SUN4C)
2785: static void pmap_bootstrap4_4c __P((int, int, int));
2786: #endif
2787:
1.1 deraadt 2788: /*
2789: * Bootstrap the system enough to run with VM enabled.
2790: *
1.43 pk 2791: * nsegment is the number of mmu segment entries (``PMEGs'');
2792: * nregion is the number of mmu region entries (``SMEGs'');
1.1 deraadt 2793: * nctx is the number of contexts.
2794: */
2795: void
1.43 pk 2796: pmap_bootstrap(nctx, nregion, nsegment)
2797: int nsegment, nctx, nregion;
1.1 deraadt 2798: {
1.55 pk 2799:
1.110 mrg 2800: #if defined(UVM)
2801: uvmexp.pagesize = NBPG;
2802: uvm_setpagesize();
2803: #else
1.55 pk 2804: cnt.v_page_size = NBPG;
2805: vm_set_page_size();
1.110 mrg 2806: #endif
1.55 pk 2807:
2808: #if defined(SUN4) && (defined(SUN4C) || defined(SUN4M))
2809: /* In this case NPTESG is not a #define */
2810: nptesg = (NBPSG >> pgshift);
2811: #endif
2812:
1.69 pk 2813: #if 0
1.55 pk 2814: ncontext = nctx;
1.69 pk 2815: #endif
1.55 pk 2816:
2817: #if defined(SUN4M)
2818: if (CPU_ISSUN4M) {
2819: pmap_bootstrap4m();
2820: return;
2821: }
2822: #endif
2823: #if defined(SUN4) || defined(SUN4C)
2824: if (CPU_ISSUN4OR4C) {
2825: pmap_bootstrap4_4c(nctx, nregion, nsegment);
2826: return;
2827: }
2828: #endif
2829: }
2830:
2831: #if defined(SUN4) || defined(SUN4C)
2832: void
2833: pmap_bootstrap4_4c(nctx, nregion, nsegment)
2834: int nsegment, nctx, nregion;
2835: {
1.122 pk 2836: union ctxinfo *ci;
2837: struct mmuentry *mmuseg;
1.77 pk 2838: #if defined(SUN4_MMU3L)
1.122 pk 2839: struct mmuentry *mmureg;
1.53 christos 2840: #endif
1.43 pk 2841: struct regmap *rp;
1.122 pk 2842: int i, j;
2843: int npte, zseg, vr, vs;
2844: int rcookie, scookie;
2845: caddr_t p;
1.1 deraadt 2846: int lastpage;
1.139 ! chs 2847: vaddr_t va;
1.1 deraadt 2848: extern char end[];
1.7 pk 2849: #ifdef DDB
2850: extern char *esym;
2851: #endif
1.1 deraadt 2852:
1.45 pk 2853: switch (cputyp) {
2854: case CPU_SUN4C:
2855: mmu_has_hole = 1;
2856: break;
2857: case CPU_SUN4:
1.69 pk 2858: if (cpuinfo.cpu_type != CPUTYP_4_400) {
1.45 pk 2859: mmu_has_hole = 1;
2860: break;
2861: }
2862: }
2863:
1.110 mrg 2864: #if defined(UVM)
2865: uvmexp.pagesize = NBPG;
2866: uvm_setpagesize();
2867: #else
1.19 deraadt 2868: cnt.v_page_size = NBPG;
2869: vm_set_page_size();
1.110 mrg 2870: #endif
1.19 deraadt 2871:
1.31 pk 2872: #if defined(SUN4)
2873: /*
2874: * set up the segfixmask to mask off invalid bits
2875: */
1.43 pk 2876: segfixmask = nsegment - 1; /* assume nsegment is a power of 2 */
2877: #ifdef DIAGNOSTIC
2878: if (((nsegment & segfixmask) | (nsegment & ~segfixmask)) != nsegment) {
1.66 christos 2879: printf("pmap_bootstrap: unsuitable number of segments (%d)\n",
1.43 pk 2880: nsegment);
2881: callrom();
2882: }
2883: #endif
1.31 pk 2884: #endif
2885:
1.55 pk 2886: #if defined(SUN4M) /* We're in a dual-arch kernel. Setup 4/4c fn. ptrs */
2887: pmap_clear_modify_p = pmap_clear_modify4_4c;
2888: pmap_clear_reference_p = pmap_clear_reference4_4c;
2889: pmap_copy_page_p = pmap_copy_page4_4c;
2890: pmap_enter_p = pmap_enter4_4c;
2891: pmap_extract_p = pmap_extract4_4c;
2892: pmap_is_modified_p = pmap_is_modified4_4c;
2893: pmap_is_referenced_p = pmap_is_referenced4_4c;
2894: pmap_page_protect_p = pmap_page_protect4_4c;
2895: pmap_protect_p = pmap_protect4_4c;
2896: pmap_zero_page_p = pmap_zero_page4_4c;
2897: pmap_changeprot_p = pmap_changeprot4_4c;
2898: pmap_rmk_p = pmap_rmk4_4c;
2899: pmap_rmu_p = pmap_rmu4_4c;
2900: #endif /* defined SUN4M */
1.43 pk 2901:
1.1 deraadt 2902: /*
2903: * Last segment is the `invalid' one (one PMEG of pte's with !pg_v).
2904: * It will never be used for anything else.
2905: */
1.43 pk 2906: seginval = --nsegment;
2907:
1.69 pk 2908: #if defined(SUN4_MMU3L)
2909: if (HASSUN4_MMU3L)
1.43 pk 2910: reginval = --nregion;
2911: #endif
2912:
2913: /*
2914: * Intialize the kernel pmap.
2915: */
2916: /* kernel_pmap_store.pm_ctxnum = 0; */
1.111 chs 2917: simple_lock_init(&kernel_pmap_store.pm_lock);
1.43 pk 2918: kernel_pmap_store.pm_refcount = 1;
1.69 pk 2919: #if defined(SUN4_MMU3L)
1.43 pk 2920: TAILQ_INIT(&kernel_pmap_store.pm_reglist);
2921: #endif
2922: TAILQ_INIT(&kernel_pmap_store.pm_seglist);
2923:
2924: kernel_pmap_store.pm_regmap = &kernel_regmap_store[-NUREG];
2925: for (i = NKREG; --i >= 0;) {
1.69 pk 2926: #if defined(SUN4_MMU3L)
1.43 pk 2927: kernel_regmap_store[i].rg_smeg = reginval;
2928: #endif
2929: kernel_regmap_store[i].rg_segmap =
2930: &kernel_segmap_store[i * NSEGRG];
2931: for (j = NSEGRG; --j >= 0;)
2932: kernel_segmap_store[i * NSEGRG + j].sg_pmeg = seginval;
2933: }
1.1 deraadt 2934:
2935: /*
2936: * Preserve the monitor ROM's reserved VM region, so that
2937: * we can use L1-A or the monitor's debugger. As a side
2938: * effect we map the ROM's reserved VM into all contexts
2939: * (otherwise L1-A crashes the machine!).
2940: */
1.43 pk 2941:
1.58 pk 2942: mmu_reservemon4_4c(&nregion, &nsegment);
1.43 pk 2943:
1.69 pk 2944: #if defined(SUN4_MMU3L)
1.43 pk 2945: /* Reserve one region for temporary mappings */
2946: tregion = --nregion;
2947: #endif
1.1 deraadt 2948:
2949: /*
1.43 pk 2950: * Allocate and clear mmu entries and context structures.
1.1 deraadt 2951: */
2952: p = end;
1.7 pk 2953: #ifdef DDB
2954: if (esym != 0)
1.78 pk 2955: p = esym;
1.7 pk 2956: #endif
1.69 pk 2957: #if defined(SUN4_MMU3L)
1.43 pk 2958: mmuregions = mmureg = (struct mmuentry *)p;
2959: p += nregion * sizeof(struct mmuentry);
1.78 pk 2960: bzero(mmuregions, nregion * sizeof(struct mmuentry));
1.43 pk 2961: #endif
2962: mmusegments = mmuseg = (struct mmuentry *)p;
2963: p += nsegment * sizeof(struct mmuentry);
1.78 pk 2964: bzero(mmusegments, nsegment * sizeof(struct mmuentry));
2965:
1.69 pk 2966: pmap_kernel()->pm_ctx = cpuinfo.ctxinfo = ci = (union ctxinfo *)p;
1.1 deraadt 2967: p += nctx * sizeof *ci;
2968:
1.43 pk 2969: /* Initialize MMU resource queues */
1.69 pk 2970: #if defined(SUN4_MMU3L)
1.43 pk 2971: TAILQ_INIT(®ion_freelist);
2972: TAILQ_INIT(®ion_lru);
2973: TAILQ_INIT(®ion_locked);
2974: #endif
2975: TAILQ_INIT(&segm_freelist);
2976: TAILQ_INIT(&segm_lru);
2977: TAILQ_INIT(&segm_locked);
2978:
1.1 deraadt 2979: /*
2980: * Set up the `constants' for the call to vm_init()
2981: * in main(). All pages beginning at p (rounded up to
2982: * the next whole page) and continuing through the number
2983: * of available pages are free, but they start at a higher
2984: * virtual address. This gives us two mappable MD pages
2985: * for pmap_zero_page and pmap_copy_page, and one MI page
2986: * for /dev/mem, all with no associated physical memory.
2987: */
2988: p = (caddr_t)(((u_int)p + NBPG - 1) & ~PGOFSET);
1.36 pk 2989:
2990: /*
1.122 pk 2991: * Grab physical memory list.
1.36 pk 2992: */
1.122 pk 2993: get_phys_mem();
2994:
2995: /* Allocate physical memory for pv_table[] */
1.124 pk 2996: p += pv_table_map((paddr_t)p - KERNBASE, 0);
2997: avail_start = (paddr_t)p - KERNBASE;
1.38 pk 2998:
2999: i = (int)p;
3000: vpage[0] = p, p += NBPG;
3001: vpage[1] = p, p += NBPG;
1.41 mycroft 3002: vmmap = p, p += NBPG;
1.38 pk 3003: p = reserve_dumppages(p);
1.39 pk 3004:
1.122 pk 3005: /* Allocate virtual memory for pv_table[]. */
1.37 pk 3006: pv_table = (struct pvlist *)p;
3007: p += round_page(sizeof(struct pvlist) * atop(avail_end - avail_start));
1.36 pk 3008:
1.124 pk 3009: virtual_avail = (vaddr_t)p;
1.1 deraadt 3010: virtual_end = VM_MAX_KERNEL_ADDRESS;
3011:
3012: p = (caddr_t)i; /* retract to first free phys */
3013:
3014: /*
3015: * All contexts are free except the kernel's.
3016: *
3017: * XXX sun4c could use context 0 for users?
3018: */
1.42 mycroft 3019: ci->c_pmap = pmap_kernel();
1.1 deraadt 3020: ctx_freelist = ci + 1;
3021: for (i = 1; i < ncontext; i++) {
3022: ci++;
3023: ci->c_nextfree = ci + 1;
3024: }
3025: ci->c_nextfree = NULL;
3026: ctx_kick = 0;
3027: ctx_kickdir = -1;
3028:
3029: /*
3030: * Init mmu entries that map the kernel physical addresses.
3031: *
3032: * All the other MMU entries are free.
3033: *
3034: * THIS ASSUMES SEGMENT i IS MAPPED BY MMU ENTRY i DURING THE
3035: * BOOT PROCESS
3036: */
1.43 pk 3037:
3038: zseg = ((((u_int)p + NBPSG - 1) & ~SGOFSET) - KERNBASE) >> SGSHIFT;
1.1 deraadt 3039: lastpage = VA_VPG(p);
3040: if (lastpage == 0)
1.43 pk 3041: /*
3042: * If the page bits in p are 0, we filled the last segment
3043: * exactly (now how did that happen?); if not, it is
3044: * the last page filled in the last segment.
3045: */
1.1 deraadt 3046: lastpage = NPTESG;
1.43 pk 3047:
1.1 deraadt 3048: p = (caddr_t)KERNBASE; /* first va */
3049: vs = VA_VSEG(KERNBASE); /* first virtual segment */
1.43 pk 3050: vr = VA_VREG(KERNBASE); /* first virtual region */
3051: rp = &pmap_kernel()->pm_regmap[vr];
3052:
3053: for (rcookie = 0, scookie = 0;;) {
3054:
1.1 deraadt 3055: /*
1.43 pk 3056: * Distribute each kernel region/segment into all contexts.
1.1 deraadt 3057: * This is done through the monitor ROM, rather than
3058: * directly here: if we do a setcontext we will fault,
3059: * as we are not (yet) mapped in any other context.
3060: */
1.43 pk 3061:
3062: if ((vs % NSEGRG) == 0) {
3063: /* Entering a new region */
3064: if (VA_VREG(p) > vr) {
3065: #ifdef DEBUG
1.66 christos 3066: printf("note: giant kernel!\n");
1.43 pk 3067: #endif
3068: vr++, rp++;
3069: }
1.69 pk 3070: #if defined(SUN4_MMU3L)
3071: if (HASSUN4_MMU3L) {
1.43 pk 3072: for (i = 1; i < nctx; i++)
1.137 pk 3073: prom_setcontext(i, p, rcookie);
1.43 pk 3074:
3075: TAILQ_INSERT_TAIL(®ion_locked,
3076: mmureg, me_list);
3077: TAILQ_INSERT_TAIL(&pmap_kernel()->pm_reglist,
3078: mmureg, me_pmchain);
3079: mmureg->me_cookie = rcookie;
3080: mmureg->me_pmap = pmap_kernel();
3081: mmureg->me_vreg = vr;
3082: rp->rg_smeg = rcookie;
3083: mmureg++;
3084: rcookie++;
3085: }
3086: #endif
3087: }
3088:
1.69 pk 3089: #if defined(SUN4_MMU3L)
3090: if (!HASSUN4_MMU3L)
1.43 pk 3091: #endif
3092: for (i = 1; i < nctx; i++)
1.137 pk 3093: prom_setcontext(i, p, scookie);
1.1 deraadt 3094:
3095: /* set up the mmu entry */
1.43 pk 3096: TAILQ_INSERT_TAIL(&segm_locked, mmuseg, me_list);
3097: TAILQ_INSERT_TAIL(&pmap_kernel()->pm_seglist, mmuseg, me_pmchain);
1.70 pk 3098: pmap_stats.ps_npmeg_locked++;
1.43 pk 3099: mmuseg->me_cookie = scookie;
3100: mmuseg->me_pmap = pmap_kernel();
3101: mmuseg->me_vreg = vr;
3102: mmuseg->me_vseg = vs % NSEGRG;
3103: rp->rg_segmap[vs % NSEGRG].sg_pmeg = scookie;
3104: npte = ++scookie < zseg ? NPTESG : lastpage;
3105: rp->rg_segmap[vs % NSEGRG].sg_npte = npte;
3106: rp->rg_nsegmap += 1;
3107: mmuseg++;
1.1 deraadt 3108: vs++;
1.43 pk 3109: if (scookie < zseg) {
1.1 deraadt 3110: p += NBPSG;
3111: continue;
3112: }
1.43 pk 3113:
1.1 deraadt 3114: /*
3115: * Unmap the pages, if any, that are not part of
3116: * the final segment.
3117: */
1.43 pk 3118: for (p += npte << PGSHIFT; npte < NPTESG; npte++, p += NBPG)
1.55 pk 3119: setpte4(p, 0);
1.43 pk 3120:
1.69 pk 3121: #if defined(SUN4_MMU3L)
3122: if (HASSUN4_MMU3L) {
1.43 pk 3123: /*
3124: * Unmap the segments, if any, that are not part of
3125: * the final region.
3126: */
3127: for (i = rp->rg_nsegmap; i < NSEGRG; i++, p += NBPSG)
3128: setsegmap(p, seginval);
1.139 ! chs 3129:
! 3130: /*
! 3131: * Unmap any kernel regions that we aren't using.
! 3132: */
! 3133: for (i = 0; i < nctx; i++) {
! 3134: setcontext4(i);
! 3135: for (va = (vaddr_t)p;
! 3136: va < (OPENPROM_STARTVADDR & ~(NBPRG - 1));
! 3137: va += NBPRG)
! 3138: setregmap(va, reginval);
! 3139: }
! 3140:
! 3141: } else
! 3142: #endif
! 3143: {
! 3144: /*
! 3145: * Unmap any kernel segments that we aren't using.
! 3146: */
! 3147: for (i = 0; i < nctx; i++) {
! 3148: setcontext4(i);
! 3149: for (va = (vaddr_t)p;
! 3150: va < (OPENPROM_STARTVADDR & ~(NBPSG - 1));
! 3151: va += NBPSG)
! 3152: setsegmap(va, seginval);
! 3153: }
1.43 pk 3154: }
1.1 deraadt 3155: break;
3156: }
1.43 pk 3157:
1.69 pk 3158: #if defined(SUN4_MMU3L)
3159: if (HASSUN4_MMU3L)
1.43 pk 3160: for (; rcookie < nregion; rcookie++, mmureg++) {
3161: mmureg->me_cookie = rcookie;
3162: TAILQ_INSERT_TAIL(®ion_freelist, mmureg, me_list);
3163: }
3164: #endif
3165:
3166: for (; scookie < nsegment; scookie++, mmuseg++) {
3167: mmuseg->me_cookie = scookie;
3168: TAILQ_INSERT_TAIL(&segm_freelist, mmuseg, me_list);
1.70 pk 3169: pmap_stats.ps_npmeg_free++;
1.1 deraadt 3170: }
3171:
1.13 pk 3172: /* Erase all spurious user-space segmaps */
3173: for (i = 1; i < ncontext; i++) {
1.71 pk 3174: setcontext4(i);
1.69 pk 3175: if (HASSUN4_MMU3L)
1.43 pk 3176: for (p = 0, j = NUREG; --j >= 0; p += NBPRG)
3177: setregmap(p, reginval);
3178: else
3179: for (p = 0, vr = 0; vr < NUREG; vr++) {
1.45 pk 3180: if (VA_INHOLE(p)) {
3181: p = (caddr_t)MMU_HOLE_END;
3182: vr = VA_VREG(p);
1.43 pk 3183: }
3184: for (j = NSEGRG; --j >= 0; p += NBPSG)
3185: setsegmap(p, seginval);
3186: }
1.13 pk 3187: }
1.71 pk 3188: setcontext4(0);
1.13 pk 3189:
1.1 deraadt 3190: /*
3191: * write protect & encache kernel text;
3192: * set red zone at kernel base; enable cache on message buffer.
3193: */
3194: {
1.23 deraadt 3195: extern char etext[];
1.1 deraadt 3196: #ifdef KGDB
1.124 pk 3197: int mask = ~PG_NC; /* XXX chgkprot is busted */
1.1 deraadt 3198: #else
1.124 pk 3199: int mask = ~(PG_W | PG_NC);
1.1 deraadt 3200: #endif
1.2 deraadt 3201:
1.23 deraadt 3202: for (p = (caddr_t)trapbase; p < etext; p += NBPG)
1.55 pk 3203: setpte4(p, getpte4(p) & mask);
1.1 deraadt 3204: }
1.107 pk 3205: pmap_page_upload();
1.1 deraadt 3206: }
1.55 pk 3207: #endif
1.1 deraadt 3208:
1.55 pk 3209: #if defined(SUN4M) /* Sun4M version of pmap_bootstrap */
3210: /*
3211: * Bootstrap the system enough to run with VM enabled on a Sun4M machine.
3212: *
3213: * Switches from ROM to kernel page tables, and sets up initial mappings.
3214: */
3215: static void
3216: pmap_bootstrap4m(void)
1.36 pk 3217: {
1.124 pk 3218: int i, j;
1.122 pk 3219: caddr_t p, q;
3220: union ctxinfo *ci;
3221: int reg, seg;
1.71 pk 3222: unsigned int ctxtblsize;
1.79 pk 3223: caddr_t pagetables_start, pagetables_end;
1.55 pk 3224: extern char end[];
3225: extern char etext[];
1.78 pk 3226: extern caddr_t reserve_dumppages(caddr_t);
1.55 pk 3227: #ifdef DDB
3228: extern char *esym;
3229: #endif
1.36 pk 3230:
1.55 pk 3231: #if defined(SUN4) || defined(SUN4C) /* setup 4M fn. ptrs for dual-arch kernel */
3232: pmap_clear_modify_p = pmap_clear_modify4m;
3233: pmap_clear_reference_p = pmap_clear_reference4m;
3234: pmap_copy_page_p = pmap_copy_page4m;
3235: pmap_enter_p = pmap_enter4m;
3236: pmap_extract_p = pmap_extract4m;
3237: pmap_is_modified_p = pmap_is_modified4m;
3238: pmap_is_referenced_p = pmap_is_referenced4m;
3239: pmap_page_protect_p = pmap_page_protect4m;
3240: pmap_protect_p = pmap_protect4m;
3241: pmap_zero_page_p = pmap_zero_page4m;
3242: pmap_changeprot_p = pmap_changeprot4m;
3243: pmap_rmk_p = pmap_rmk4m;
3244: pmap_rmu_p = pmap_rmu4m;
3245: #endif /* defined Sun4/Sun4c */
1.37 pk 3246:
1.36 pk 3247: /*
1.55 pk 3248: * Intialize the kernel pmap.
3249: */
3250: /* kernel_pmap_store.pm_ctxnum = 0; */
1.87 pk 3251: simple_lock_init(&kernel_pmap_store.pm_lock);
1.55 pk 3252: kernel_pmap_store.pm_refcount = 1;
1.71 pk 3253:
3254: /*
3255: * Set up pm_regmap for kernel to point NUREG *below* the beginning
1.55 pk 3256: * of kernel regmap storage. Since the kernel only uses regions
3257: * above NUREG, we save storage space and can index kernel and
3258: * user regions in the same way
1.36 pk 3259: */
1.55 pk 3260: kernel_pmap_store.pm_regmap = &kernel_regmap_store[-NUREG];
3261: kernel_pmap_store.pm_reg_ptps = NULL;
3262: kernel_pmap_store.pm_reg_ptps_pa = 0;
3263: bzero(kernel_regmap_store, NKREG * sizeof(struct regmap));
3264: bzero(kernel_segmap_store, NKREG * NSEGRG * sizeof(struct segmap));
3265: for (i = NKREG; --i >= 0;) {
3266: kernel_regmap_store[i].rg_segmap =
3267: &kernel_segmap_store[i * NSEGRG];
3268: kernel_regmap_store[i].rg_seg_ptps = NULL;
3269: for (j = NSEGRG; --j >= 0;)
3270: kernel_segmap_store[i * NSEGRG + j].sg_pte = NULL;
3271: }
1.38 pk 3272:
1.55 pk 3273: p = end; /* p points to top of kernel mem */
3274: #ifdef DDB
3275: if (esym != 0)
1.78 pk 3276: p = esym;
1.55 pk 3277: #endif
3278:
1.77 pk 3279:
1.71 pk 3280: /* Allocate context administration */
1.69 pk 3281: pmap_kernel()->pm_ctx = cpuinfo.ctxinfo = ci = (union ctxinfo *)p;
1.55 pk 3282: p += ncontext * sizeof *ci;
1.69 pk 3283: bzero((caddr_t)ci, (u_int)p - (u_int)ci);
1.77 pk 3284: #if 0
1.55 pk 3285: ctxbusyvector = p;
3286: p += ncontext;
3287: bzero(ctxbusyvector, ncontext);
3288: ctxbusyvector[0] = 1; /* context 0 is always in use */
1.69 pk 3289: #endif
1.55 pk 3290:
1.77 pk 3291:
3292: /*
3293: * Set up the `constants' for the call to vm_init()
3294: * in main(). All pages beginning at p (rounded up to
3295: * the next whole page) and continuing through the number
3296: * of available pages are free.
3297: */
3298: p = (caddr_t)(((u_int)p + NBPG - 1) & ~PGOFSET);
1.122 pk 3299:
1.77 pk 3300: /*
1.122 pk 3301: * Grab physical memory list.
1.77 pk 3302: */
1.122 pk 3303: get_phys_mem();
3304:
3305: /* Allocate physical memory for pv_table[] */
1.124 pk 3306: p += pv_table_map((paddr_t)p - KERNBASE, 0);
3307: avail_start = (paddr_t)p - KERNBASE;
1.77 pk 3308:
3309: /*
3310: * Reserve memory for MMU pagetables. Some of these have severe
3311: * alignment restrictions. We allocate in a sequence that
3312: * minimizes alignment gaps.
3313: * The amount of physical memory that becomes unavailable for
1.108 pk 3314: * general VM use is marked by [unavail_gap_start, unavail_gap_end>.
1.77 pk 3315: */
3316:
1.55 pk 3317: /*
1.71 pk 3318: * Reserve memory for I/O pagetables. This takes 64k of memory
1.55 pk 3319: * since we want to have 64M of dvma space (this actually depends
1.125 pk 3320: * on the definition of IOMMU_DVMA_BASE...we may drop it back to 32M).
3321: * The table must be aligned on a (-IOMMU_DVMA_BASE/NBPG) boundary
1.77 pk 3322: * (i.e. 64K for 64M of dvma space).
1.55 pk 3323: */
3324: #ifdef DEBUG
1.125 pk 3325: if ((0 - IOMMU_DVMA_BASE) % (16*1024*1024))
3326: panic("pmap_bootstrap4m: invalid IOMMU_DVMA_BASE of 0x%x",
3327: IOMMU_DVMA_BASE);
1.55 pk 3328: #endif
3329:
1.125 pk 3330: p = (caddr_t) roundup((u_int)p, (0 - IOMMU_DVMA_BASE) / 1024);
1.124 pk 3331: unavail_gap_start = (paddr_t)p - KERNBASE;
1.55 pk 3332:
3333: kernel_iopte_table = (u_int *)p;
3334: kernel_iopte_table_pa = VA2PA((caddr_t)kernel_iopte_table);
1.125 pk 3335: p += (0 - IOMMU_DVMA_BASE) / 1024;
1.55 pk 3336:
1.79 pk 3337: pagetables_start = p;
1.55 pk 3338: /*
1.77 pk 3339: * Allocate context table.
1.71 pk 3340: * To keep supersparc happy, minimum aligment is on a 4K boundary.
3341: */
3342: ctxtblsize = max(ncontext,1024) * sizeof(int);
3343: cpuinfo.ctx_tbl = (int *)roundup((u_int)p, ctxtblsize);
1.128 pk 3344: cpuinfo.ctx_tbl_pa = (paddr_t)cpuinfo.ctx_tbl - KERNBASE;
1.71 pk 3345: p = (caddr_t)((u_int)cpuinfo.ctx_tbl + ctxtblsize);
3346:
3347: /*
3348: * Reserve memory for segment and page tables needed to map the entire
1.96 pk 3349: * kernel. This takes (2k + NKREG * 16k) of space, but
1.55 pk 3350: * unfortunately is necessary since pmap_enk *must* be able to enter
3351: * a kernel mapping without resorting to malloc, or else the
3352: * possibility of deadlock arises (pmap_enk4m is called to enter a
3353: * mapping; it needs to malloc a page table; malloc then calls
3354: * pmap_enk4m to enter the new malloc'd page; pmap_enk4m needs to
3355: * malloc a page table to enter _that_ mapping; malloc deadlocks since
3356: * it is already allocating that object).
3357: */
1.122 pk 3358: p = (caddr_t) roundup((u_int)p, SRMMU_L1SIZE * sizeof(u_int));
3359: qzero(p, SRMMU_L1SIZE * sizeof(u_int));
1.77 pk 3360: kernel_regtable_store = (u_int *)p;
1.122 pk 3361: p += SRMMU_L1SIZE * sizeof(u_int);
1.77 pk 3362:
1.122 pk 3363: p = (caddr_t) roundup((u_int)p, SRMMU_L2SIZE * sizeof(u_int));
3364: qzero(p, (SRMMU_L2SIZE * sizeof(u_int)) * NKREG);
1.77 pk 3365: kernel_segtable_store = (u_int *)p;
1.122 pk 3366: p += (SRMMU_L2SIZE * sizeof(u_int)) * NKREG;
1.77 pk 3367:
1.122 pk 3368: p = (caddr_t) roundup((u_int)p, SRMMU_L3SIZE * sizeof(u_int));
3369: /* zero it: all will be SRMMU_TEINVALID */
3370: qzero(p, ((SRMMU_L3SIZE * sizeof(u_int)) * NKREG) * NSEGRG);
1.77 pk 3371: kernel_pagtable_store = (u_int *)p;
1.122 pk 3372: p += ((SRMMU_L3SIZE * sizeof(u_int)) * NKREG) * NSEGRG;
1.77 pk 3373:
3374: /* Round to next page and mark end of stolen pages */
3375: p = (caddr_t)(((u_int)p + NBPG - 1) & ~PGOFSET);
1.79 pk 3376: pagetables_end = p;
1.124 pk 3377: unavail_gap_end = (paddr_t)p - KERNBASE;
1.71 pk 3378:
3379: /*
3380: * Since we've statically allocated space to map the entire kernel,
3381: * we might as well pre-wire the mappings to save time in pmap_enter.
3382: * This also gets around nasty problems with caching of L1/L2 ptp's.
3383: *
3384: * XXX WHY DO WE HAVE THIS CACHING PROBLEM WITH L1/L2 PTPS????? %%%
3385: */
3386:
3387: pmap_kernel()->pm_reg_ptps = (int *) kernel_regtable_store;
3388: pmap_kernel()->pm_reg_ptps_pa =
3389: VA2PA((caddr_t)pmap_kernel()->pm_reg_ptps);
3390:
3391: /* Install L1 table in context 0 */
1.79 pk 3392: setpgt4m(&cpuinfo.ctx_tbl[0],
3393: (pmap_kernel()->pm_reg_ptps_pa >> SRMMU_PPNPASHIFT) | SRMMU_TEPTD);
1.71 pk 3394:
1.96 pk 3395: for (reg = 0; reg < NKREG; reg++) {
1.77 pk 3396: struct regmap *rp;
1.71 pk 3397: caddr_t kphyssegtbl;
1.133 pk 3398: u_int ptd;
1.71 pk 3399:
3400: /*
1.77 pk 3401: * Entering new region; install & build segtbl
1.71 pk 3402: */
3403:
1.96 pk 3404: rp = &pmap_kernel()->pm_regmap[reg + VA_VREG(KERNBASE)];
1.71 pk 3405:
3406: kphyssegtbl = (caddr_t)
1.96 pk 3407: &kernel_segtable_store[reg * SRMMU_L2SIZE];
1.71 pk 3408:
1.133 pk 3409: ptd = (VA2PA(kphyssegtbl) >> SRMMU_PPNPASHIFT) | SRMMU_TEPTD;
1.96 pk 3410: setpgt4m(&pmap_kernel()->pm_reg_ptps[reg + VA_VREG(KERNBASE)],
1.133 pk 3411: ptd);
1.71 pk 3412:
3413: rp->rg_seg_ptps = (int *)kphyssegtbl;
3414:
1.133 pk 3415: if (reg + VA_VREG(KERNBASE) == VA_VREG(CPUINFO_VA)) {
3416: /*
3417: * Store the segment page table descriptor
3418: * corresponding to CPUINFO_VA, so we can install
3419: * this CPU-dependent translation into user pmaps
3420: * at context switch time.
3421: */
3422: cpuinfo.cpu_seg_ptd = ptd;
3423: }
3424:
1.71 pk 3425: if (rp->rg_segmap == NULL) {
3426: printf("rp->rg_segmap == NULL!\n");
1.96 pk 3427: rp->rg_segmap = &kernel_segmap_store[reg * NSEGRG];
1.71 pk 3428: }
3429:
3430: for (seg = 0; seg < NSEGRG; seg++) {
1.77 pk 3431: struct segmap *sp;
1.71 pk 3432: caddr_t kphyspagtbl;
3433:
3434: rp->rg_nsegmap++;
3435:
3436: sp = &rp->rg_segmap[seg];
3437: kphyspagtbl = (caddr_t)
3438: &kernel_pagtable_store
1.96 pk 3439: [((reg * NSEGRG) + seg) * SRMMU_L3SIZE];
1.71 pk 3440:
1.77 pk 3441: setpgt4m(&rp->rg_seg_ptps[seg],
3442: (VA2PA(kphyspagtbl) >> SRMMU_PPNPASHIFT) |
3443: SRMMU_TEPTD);
1.71 pk 3444: sp->sg_pte = (int *) kphyspagtbl;
3445: }
3446: }
3447:
3448: /*
3449: * Preserve the monitor ROM's reserved VM region, so that
3450: * we can use L1-A or the monitor's debugger.
1.55 pk 3451: */
1.77 pk 3452: mmu_reservemon4m(&kernel_pmap_store);
1.55 pk 3453:
3454: /*
1.77 pk 3455: * Reserve virtual address space for two mappable MD pages
3456: * for pmap_zero_page and pmap_copy_page, one MI page
3457: * for /dev/mem, and some more for dumpsys().
1.55 pk 3458: */
1.77 pk 3459: q = p;
1.55 pk 3460: vpage[0] = p, p += NBPG;
3461: vpage[1] = p, p += NBPG;
3462: vmmap = p, p += NBPG;
3463: p = reserve_dumppages(p);
3464:
1.101 pk 3465: /* Find PTE locations of vpage[] to optimize zero_fill() et.al. */
3466: for (i = 0; i < 2; i++) {
3467: struct regmap *rp;
3468: struct segmap *sp;
3469: rp = &pmap_kernel()->pm_regmap[VA_VREG(vpage[i])];
3470: sp = &rp->rg_segmap[VA_VSEG(vpage[i])];
3471: vpage_pte[i] = &sp->sg_pte[VA_SUN4M_VPG(vpage[i])];
3472: }
3473:
1.122 pk 3474: /* Allocate virtual memory for pv_table[]. */
1.55 pk 3475: pv_table = (struct pvlist *)p;
3476: p += round_page(sizeof(struct pvlist) * atop(avail_end - avail_start));
3477:
1.124 pk 3478: virtual_avail = (vaddr_t)p;
1.55 pk 3479: virtual_end = VM_MAX_KERNEL_ADDRESS;
3480:
1.77 pk 3481: p = q; /* retract to first free phys */
1.55 pk 3482:
1.69 pk 3483: /*
3484: * Set up the ctxinfo structures (freelist of contexts)
1.55 pk 3485: */
3486: ci->c_pmap = pmap_kernel();
3487: ctx_freelist = ci + 1;
3488: for (i = 1; i < ncontext; i++) {
3489: ci++;
3490: ci->c_nextfree = ci + 1;
3491: }
3492: ci->c_nextfree = NULL;
3493: ctx_kick = 0;
3494: ctx_kickdir = -1;
3495:
1.69 pk 3496: /*
3497: * Now map the kernel into our new set of page tables, then
1.55 pk 3498: * (finally) switch over to our running page tables.
3499: * We map from KERNBASE to p into context 0's page tables (and
3500: * the kernel pmap).
3501: */
3502: #ifdef DEBUG /* Sanity checks */
3503: if ((u_int)p % NBPG != 0)
1.69 pk 3504: panic("pmap_bootstrap4m: p misaligned?!?");
1.55 pk 3505: if (KERNBASE % NBPRG != 0)
1.69 pk 3506: panic("pmap_bootstrap4m: KERNBASE not region-aligned");
1.55 pk 3507: #endif
1.69 pk 3508:
3509: for (q = (caddr_t) KERNBASE; q < p; q += NBPG) {
1.77 pk 3510: struct regmap *rp;
3511: struct segmap *sp;
3512: int pte;
3513:
1.79 pk 3514: if ((int)q >= KERNBASE + avail_start &&
1.108 pk 3515: (int)q < KERNBASE + unavail_gap_start)
1.77 pk 3516: /* This gap is part of VM-managed pages */
3517: continue;
3518:
1.69 pk 3519: /*
1.71 pk 3520: * Now install entry for current page.
1.69 pk 3521: */
1.77 pk 3522: rp = &pmap_kernel()->pm_regmap[VA_VREG(q)];
3523: sp = &rp->rg_segmap[VA_VSEG(q)];
3524: sp->sg_npte++;
3525:
3526: pte = ((int)q - KERNBASE) >> SRMMU_PPNPASHIFT;
1.122 pk 3527: pte |= PPROT_N_RX | SRMMU_TEPTE;
3528:
3529: /* Deal with the cacheable bit for pagetable memory */
3530: if ((cpuinfo.flags & CPUFLG_CACHEPAGETABLES) != 0 ||
3531: q < pagetables_start || q >= pagetables_end)
3532: pte |= SRMMU_PG_C;
3533:
1.77 pk 3534: /* write-protect kernel text */
3535: if (q < (caddr_t) trapbase || q >= etext)
3536: pte |= PPROT_WRITE;
3537:
3538: setpgt4m(&sp->sg_pte[VA_VPG(q)], pte);
1.69 pk 3539: }
3540:
1.77 pk 3541: #if 0
1.55 pk 3542: /*
3543: * We also install the kernel mapping into all other contexts by
1.69 pk 3544: * copying the context 0 L1 PTP from cpuinfo.ctx_tbl[0] into the
1.55 pk 3545: * remainder of the context table (i.e. we share the kernel page-
3546: * tables). Each user pmap automatically gets the kernel mapped
3547: * into it when it is created, but we do this extra step early on
3548: * in case some twit decides to switch to a context with no user
3549: * pmap associated with it.
3550: */
3551: for (i = 1; i < ncontext; i++)
1.69 pk 3552: cpuinfo.ctx_tbl[i] = cpuinfo.ctx_tbl[0];
3553: #endif
1.55 pk 3554:
1.100 pk 3555: if ((cpuinfo.flags & CPUFLG_CACHEPAGETABLES) == 0)
3556: /* Flush page tables from cache */
3557: pcache_flush(pagetables_start, (caddr_t)VA2PA(pagetables_start),
3558: pagetables_end - pagetables_start);
3559:
1.55 pk 3560: /*
3561: * Now switch to kernel pagetables (finally!)
3562: */
1.69 pk 3563: mmu_install_tables(&cpuinfo);
1.79 pk 3564:
1.107 pk 3565: pmap_page_upload();
1.69 pk 3566: }
3567:
1.97 pk 3568: static u_long prom_ctxreg;
3569:
1.69 pk 3570: void
3571: mmu_install_tables(sc)
1.127 pk 3572: struct cpu_info *sc;
1.69 pk 3573: {
3574:
3575: #ifdef DEBUG
3576: printf("pmap_bootstrap: installing kernel page tables...");
3577: #endif
1.71 pk 3578: setcontext4m(0); /* paranoia? %%%: Make 0x3 a define! below */
1.69 pk 3579:
3580: /* Enable MMU tablewalk caching, flush TLB */
3581: if (sc->mmu_enable != 0)
3582: sc->mmu_enable();
3583:
3584: tlb_flush_all();
1.97 pk 3585: prom_ctxreg = lda(SRMMU_CXTPTR, ASI_SRMMU);
1.69 pk 3586:
3587: sta(SRMMU_CXTPTR, ASI_SRMMU,
1.128 pk 3588: (sc->ctx_tbl_pa >> SRMMU_PPNPASHIFT) & ~0x3);
1.69 pk 3589:
3590: tlb_flush_all();
3591:
3592: #ifdef DEBUG
3593: printf("done.\n");
3594: #endif
3595: }
1.55 pk 3596:
1.97 pk 3597: void srmmu_restore_prom_ctx __P((void));
3598:
3599: void
3600: srmmu_restore_prom_ctx()
3601: {
3602: tlb_flush_all();
3603: sta(SRMMU_CXTPTR, ASI_SRMMU, prom_ctxreg);
3604: tlb_flush_all();
3605: }
3606:
1.69 pk 3607: /*
1.128 pk 3608: * Allocate per-CPU page tables. One region, segment and page table
3609: * is needed to map CPUINFO_VA to different physical addresses on
3610: * each CPU. Since the kernel region and segment tables are all
3611: * pre-wired (in bootstrap() above) and we also assume that the
3612: * first segment (256K) of kernel space is fully populated with
3613: * pages from the start, these per-CPU tables will never need
3614: * to be updated when mapping kernel virtual memory.
3615: *
1.69 pk 3616: * Note: this routine is called in the context of the boot CPU
3617: * during autoconfig.
3618: */
3619: void
3620: pmap_alloc_cpu(sc)
1.127 pk 3621: struct cpu_info *sc;
1.69 pk 3622: {
1.128 pk 3623: vaddr_t va;
3624: u_int *ctxtable, *regtable, *segtable, *pagtable;
1.72 pk 3625: int vr, vs, vpg;
3626: struct regmap *rp;
3627: struct segmap *sp;
1.128 pk 3628: int ctxsize;
3629: struct pglist mlist;
3630: vm_page_t m;
3631: int cachebit;
3632:
1.129 pk 3633: cachebit = (sc->flags & CPUFLG_CACHEPAGETABLES) != 0;
1.128 pk 3634:
3635: /*
3636: * Allocate properly aligned and contiguous physically memory
3637: * for the context table.
3638: */
3639: TAILQ_INIT(&mlist);
3640: ctxsize = sc->mmu_ncontext * sizeof(int);
3641: if (uvm_pglistalloc(ctxsize, vm_first_phys, vm_first_phys+vm_num_phys,
3642: ctxsize, 0, &mlist, 1, 0) != 0)
3643: panic("pmap_alloc_cpu: no memory");
3644:
3645: va = uvm_km_valloc(kernel_map, ctxsize);
3646: if (va == 0)
3647: panic("pmap_alloc_cpu: no memory");
3648:
3649: ctxtable = (int *)va;
3650:
3651: m = TAILQ_FIRST(&mlist);
3652: sc->ctx_tbl_pa = VM_PAGE_TO_PHYS(m);
3653:
3654: /* Map the pages */
3655: for (; m != NULL; m = TAILQ_NEXT(m,pageq)) {
3656: paddr_t pa = VM_PAGE_TO_PHYS(m);
1.129 pk 3657: pmap_enter(pmap_kernel(), va, pa | (cachebit ? 0 : PMAP_NC),
1.128 pk 3658: VM_PROT_READ|VM_PROT_WRITE, 1);
3659: va += NBPG;
3660: }
1.72 pk 3661:
1.128 pk 3662: /*
3663: * Get memory for a region, segment and page table.
3664: */
3665: va = uvm_km_alloc(kernel_map, NBPG);
3666: if (va == 0)
3667: panic("pmap_alloc_cpu: no memory");
3668: if (cachebit == 0)
3669: kvm_uncache((caddr_t)va, 1);
3670:
3671: regtable = (u_int *)va;
3672: segtable = regtable + SRMMU_L1SIZE;
3673: pagtable = segtable + SRMMU_L2SIZE;
1.72 pk 3674:
1.133 pk 3675: /*
3676: * Store the segment page table descriptor corresponding
3677: * to CPUINFO_VA, so we can install this CPU-dependent
3678: * translation into user pmaps at context switch time.
3679: * Note that the region table we allocate here (`regtable')
3680: * is only used in context 0 on this CPU. Non-zero context
3681: * numbers always have a user pmap associated with it.
3682: * That pmap's region table is fixed up at context switch
3683: * time so that the entry corresponding to CPUINFO_VA points
3684: * at the correct CPU's segment table (and hence the per-CPU
3685: * page table).
3686: */
3687: sc->cpu_seg_ptd =
3688: (VA2PA((caddr_t)segtable) >> SRMMU_PPNPASHIFT) | SRMMU_TEPTD;
3689:
1.72 pk 3690: vr = VA_VREG(CPUINFO_VA);
3691: vs = VA_VSEG(CPUINFO_VA);
3692: vpg = VA_VPG(CPUINFO_VA);
3693: rp = &pmap_kernel()->pm_regmap[vr];
3694: sp = &rp->rg_segmap[vs];
3695:
3696: /*
3697: * Copy page tables, then modify entry for CPUINFO_VA so that
3698: * it points at the per-CPU pages.
3699: */
1.69 pk 3700:
1.128 pk 3701: setpgt4m(&ctxtable[0],
3702: (VA2PA((caddr_t)regtable) >> SRMMU_PPNPASHIFT) | SRMMU_TEPTD);
3703:
3704: qcopy(pmap_kernel()->pm_reg_ptps, regtable, SRMMU_L1SIZE * sizeof(int));
3705: setpgt4m(®table[vr],
3706: (VA2PA((caddr_t)segtable) >> SRMMU_PPNPASHIFT) | SRMMU_TEPTD);
3707:
3708: qcopy(rp->rg_seg_ptps, segtable, SRMMU_L2SIZE * sizeof(int));
3709: setpgt4m(&segtable[vs],
3710: (VA2PA((caddr_t)pagtable) >> SRMMU_PPNPASHIFT) | SRMMU_TEPTD);
3711:
3712: qcopy(sp->sg_pte, pagtable, SRMMU_L3SIZE * sizeof(int));
3713: setpgt4m(&pagtable[vpg],
3714: (VA2PA((caddr_t)sc) >> SRMMU_PPNPASHIFT) |
1.129 pk 3715: (SRMMU_TEPTE | PPROT_N_RWX | SRMMU_PG_C));
1.72 pk 3716:
3717: sc->ctx_tbl = ctxtable;
1.55 pk 3718: }
1.97 pk 3719: #endif /* SUN4M */
1.55 pk 3720:
1.69 pk 3721:
1.55 pk 3722: void
3723: pmap_init()
3724: {
3725:
3726: if (PAGE_SIZE != NBPG)
3727: panic("pmap_init: CLSIZE!=1");
3728:
1.122 pk 3729: /* Map pv_table[] */
3730: (void)pv_table_map(avail_start, 1);
1.55 pk 3731:
1.38 pk 3732: vm_first_phys = avail_start;
3733: vm_num_phys = avail_end - avail_start;
1.121 pk 3734:
1.122 pk 3735: /* Setup a pool for additional pvlist structures */
3736: pool_init(&pv_pool, sizeof(struct pvlist), 0, 0, 0, "pvtable", 0,
1.121 pk 3737: NULL, NULL, 0);
3738:
1.134 thorpej 3739: /* Setup a pool for pmap structures. */
3740: pool_init(&pmap_pmap_pool, sizeof(struct pmap), 0, 0, 0, "pmappl",
3741: 0, pool_page_alloc_nointr, pool_page_free_nointr, M_VMPMAP);
3742:
1.121 pk 3743: #if defined(SUN4M)
3744: if (CPU_ISSUN4M) {
3745: /*
3746: * The SRMMU only ever needs chunks in one of two sizes:
3747: * 1024 (for region level tables) and 256 (for segment
3748: * and page level tables).
3749: */
3750: int n;
3751:
3752: n = SRMMU_L1SIZE * sizeof(int);
3753: pool_init(&L1_pool, n, n, 0, 0, "L1 pagetable", 0,
3754: pgt_page_alloc, pgt_page_free, 0);
3755:
3756: n = SRMMU_L2SIZE * sizeof(int);
3757: pool_init(&L23_pool, n, n, 0, 0, "L2/L3 pagetable", 0,
3758: pgt_page_alloc, pgt_page_free, 0);
3759: }
3760: #endif
1.36 pk 3761: }
3762:
1.1 deraadt 3763:
3764: /*
3765: * Map physical addresses into kernel VM.
3766: */
1.124 pk 3767: vaddr_t
1.1 deraadt 3768: pmap_map(va, pa, endpa, prot)
1.124 pk 3769: vaddr_t va;
3770: paddr_t pa, endpa;
3771: int prot;
1.1 deraadt 3772: {
1.124 pk 3773: int pgsize = PAGE_SIZE;
1.1 deraadt 3774:
3775: while (pa < endpa) {
1.42 mycroft 3776: pmap_enter(pmap_kernel(), va, pa, prot, 1);
1.1 deraadt 3777: va += pgsize;
3778: pa += pgsize;
3779: }
3780: return (va);
3781: }
3782:
3783: /*
3784: * Create and return a physical map.
3785: *
3786: * If size is nonzero, the map is useless. (ick)
3787: */
3788: struct pmap *
3789: pmap_create(size)
1.124 pk 3790: vsize_t size;
1.1 deraadt 3791: {
1.124 pk 3792: struct pmap *pm;
1.1 deraadt 3793:
3794: if (size)
3795: return (NULL);
1.134 thorpej 3796: pm = pool_get(&pmap_pmap_pool, PR_WAITOK);
1.1 deraadt 3797: #ifdef DEBUG
3798: if (pmapdebug & PDB_CREATE)
1.66 christos 3799: printf("pmap_create: created %p\n", pm);
1.1 deraadt 3800: #endif
1.134 thorpej 3801: bzero(pm, sizeof *pm);
1.1 deraadt 3802: pmap_pinit(pm);
3803: return (pm);
3804: }
3805:
3806: /*
3807: * Initialize a preallocated and zeroed pmap structure,
3808: * such as one in a vmspace structure.
3809: */
3810: void
3811: pmap_pinit(pm)
1.124 pk 3812: struct pmap *pm;
1.1 deraadt 3813: {
1.124 pk 3814: int size;
1.43 pk 3815: void *urp;
1.1 deraadt 3816:
3817: #ifdef DEBUG
3818: if (pmapdebug & PDB_CREATE)
1.66 christos 3819: printf("pmap_pinit(%p)\n", pm);
1.1 deraadt 3820: #endif
1.13 pk 3821:
1.43 pk 3822: size = NUREG * sizeof(struct regmap);
1.55 pk 3823:
1.43 pk 3824: pm->pm_regstore = urp = malloc(size, M_VMPMAP, M_WAITOK);
1.55 pk 3825: qzero((caddr_t)urp, size);
1.1 deraadt 3826: /* pm->pm_ctx = NULL; */
3827: simple_lock_init(&pm->pm_lock);
3828: pm->pm_refcount = 1;
1.43 pk 3829: pm->pm_regmap = urp;
1.55 pk 3830:
3831: if (CPU_ISSUN4OR4C) {
3832: TAILQ_INIT(&pm->pm_seglist);
1.69 pk 3833: #if defined(SUN4_MMU3L)
1.55 pk 3834: TAILQ_INIT(&pm->pm_reglist);
1.69 pk 3835: if (HASSUN4_MMU3L) {
3836: int i;
3837: for (i = NUREG; --i >= 0;)
3838: pm->pm_regmap[i].rg_smeg = reginval;
3839: }
1.43 pk 3840: #endif
1.100 pk 3841: pm->pm_gap_end = VA_VREG(VM_MAXUSER_ADDRESS);
1.55 pk 3842: }
3843: #if defined(SUN4M)
3844: else {
1.79 pk 3845: int i;
3846:
1.55 pk 3847: /*
3848: * We must allocate and initialize hardware-readable (MMU)
3849: * pagetables. We must also map the kernel regions into this
3850: * pmap's pagetables, so that we can access the kernel from
1.89 pk 3851: * this user context.
1.55 pk 3852: *
3853: * Note: pm->pm_regmap's have been zeroed already, so we don't
3854: * need to explicitly mark them as invalid (a null
3855: * rg_seg_ptps pointer indicates invalid for the 4m)
3856: */
1.121 pk 3857: urp = pool_get(&L1_pool, PR_WAITOK);
1.55 pk 3858: pm->pm_reg_ptps = urp;
3859: pm->pm_reg_ptps_pa = VA2PA(urp);
1.89 pk 3860: for (i = 0; i < NUREG; i++)
3861: setpgt4m(&pm->pm_reg_ptps[i], SRMMU_TEINVALID);
1.55 pk 3862:
1.79 pk 3863: /* Copy kernel regions */
3864: for (i = 0; i < NKREG; i++) {
1.133 pk 3865: int *kpt, *upt;
3866: kpt = &pmap_kernel()->pm_reg_ptps[VA_VREG(KERNBASE)];
3867: upt = &pm->pm_reg_ptps[VA_VREG(KERNBASE)];
3868: setpgt4m(&upt[i], kpt[i]);
1.79 pk 3869: }
1.55 pk 3870: }
3871: #endif
3872:
1.43 pk 3873: return;
1.1 deraadt 3874: }
3875:
3876: /*
3877: * Retire the given pmap from service.
3878: * Should only be called if the map contains no valid mappings.
3879: */
3880: void
3881: pmap_destroy(pm)
1.124 pk 3882: struct pmap *pm;
1.1 deraadt 3883: {
3884: int count;
3885:
3886: if (pm == NULL)
3887: return;
3888: #ifdef DEBUG
3889: if (pmapdebug & PDB_DESTROY)
1.66 christos 3890: printf("pmap_destroy(%p)\n", pm);
1.1 deraadt 3891: #endif
3892: simple_lock(&pm->pm_lock);
3893: count = --pm->pm_refcount;
3894: simple_unlock(&pm->pm_lock);
3895: if (count == 0) {
3896: pmap_release(pm);
1.134 thorpej 3897: pool_put(&pmap_pmap_pool, pm);
1.1 deraadt 3898: }
3899: }
3900:
3901: /*
3902: * Release any resources held by the given physical map.
3903: * Called when a pmap initialized by pmap_pinit is being released.
3904: */
3905: void
3906: pmap_release(pm)
1.124 pk 3907: struct pmap *pm;
1.1 deraadt 3908: {
1.124 pk 3909: union ctxinfo *c;
3910: int s = splpmap(); /* paranoia */
1.1 deraadt 3911:
3912: #ifdef DEBUG
3913: if (pmapdebug & PDB_DESTROY)
1.66 christos 3914: printf("pmap_release(%p)\n", pm);
1.1 deraadt 3915: #endif
1.55 pk 3916:
3917: if (CPU_ISSUN4OR4C) {
1.69 pk 3918: #if defined(SUN4_MMU3L)
1.55 pk 3919: if (pm->pm_reglist.tqh_first)
3920: panic("pmap_release: region list not empty");
1.43 pk 3921: #endif
1.55 pk 3922: if (pm->pm_seglist.tqh_first)
3923: panic("pmap_release: segment list not empty");
3924:
3925: if ((c = pm->pm_ctx) != NULL) {
3926: if (pm->pm_ctxnum == 0)
3927: panic("pmap_release: releasing kernel");
3928: ctx_free(pm);
3929: }
1.1 deraadt 3930: }
1.102 pk 3931:
3932: #if defined(SUN4M)
3933: if (CPU_ISSUN4M) {
3934: if ((c = pm->pm_ctx) != NULL) {
3935: if (pm->pm_ctxnum == 0)
3936: panic("pmap_release: releasing kernel");
3937: ctx_free(pm);
3938: }
1.121 pk 3939: pool_put(&L1_pool, pm->pm_reg_ptps);
1.102 pk 3940: pm->pm_reg_ptps = NULL;
3941: pm->pm_reg_ptps_pa = 0;
3942: }
3943: #endif
1.1 deraadt 3944: splx(s);
1.55 pk 3945:
1.43 pk 3946: #ifdef DEBUG
1.55 pk 3947: if (pmapdebug) {
1.43 pk 3948: int vs, vr;
3949: for (vr = 0; vr < NUREG; vr++) {
3950: struct regmap *rp = &pm->pm_regmap[vr];
3951: if (rp->rg_nsegmap != 0)
1.66 christos 3952: printf("pmap_release: %d segments remain in "
1.43 pk 3953: "region %d\n", rp->rg_nsegmap, vr);
3954: if (rp->rg_segmap != NULL) {
1.66 christos 3955: printf("pmap_release: segments still "
1.43 pk 3956: "allocated in region %d\n", vr);
3957: for (vs = 0; vs < NSEGRG; vs++) {
3958: struct segmap *sp = &rp->rg_segmap[vs];
3959: if (sp->sg_npte != 0)
1.66 christos 3960: printf("pmap_release: %d ptes "
1.43 pk 3961: "remain in segment %d\n",
3962: sp->sg_npte, vs);
3963: if (sp->sg_pte != NULL) {
1.66 christos 3964: printf("pmap_release: ptes still "
1.43 pk 3965: "allocated in segment %d\n", vs);
3966: }
3967: }
3968: }
3969: }
3970: }
3971: #endif
1.102 pk 3972:
1.43 pk 3973: if (pm->pm_regstore)
1.49 pk 3974: free(pm->pm_regstore, M_VMPMAP);
1.1 deraadt 3975: }
3976:
3977: /*
3978: * Add a reference to the given pmap.
3979: */
3980: void
3981: pmap_reference(pm)
3982: struct pmap *pm;
3983: {
3984:
3985: if (pm != NULL) {
3986: simple_lock(&pm->pm_lock);
3987: pm->pm_refcount++;
3988: simple_unlock(&pm->pm_lock);
3989: }
3990: }
3991:
3992: /*
3993: * Remove the given range of mapping entries.
3994: * The starting and ending addresses are already rounded to pages.
3995: * Sheer lunacy: pmap_remove is often asked to remove nonexistent
3996: * mappings.
3997: */
3998: void
3999: pmap_remove(pm, va, endva)
1.124 pk 4000: struct pmap *pm;
4001: vaddr_t va, endva;
1.1 deraadt 4002: {
1.124 pk 4003: vaddr_t nva;
4004: int vr, vs, s, ctx;
4005: void (*rm)(struct pmap *, vaddr_t, vaddr_t, int, int);
1.1 deraadt 4006:
4007: if (pm == NULL)
4008: return;
1.13 pk 4009:
1.1 deraadt 4010: #ifdef DEBUG
4011: if (pmapdebug & PDB_REMOVE)
1.91 fair 4012: printf("pmap_remove(%p, 0x%lx, 0x%lx)\n", pm, va, endva);
1.1 deraadt 4013: #endif
4014:
1.42 mycroft 4015: if (pm == pmap_kernel()) {
1.1 deraadt 4016: /*
4017: * Removing from kernel address space.
4018: */
4019: rm = pmap_rmk;
4020: } else {
4021: /*
4022: * Removing from user address space.
4023: */
4024: write_user_windows();
4025: rm = pmap_rmu;
4026: }
4027:
4028: ctx = getcontext();
4029: s = splpmap(); /* XXX conservative */
4030: simple_lock(&pm->pm_lock);
4031: for (; va < endva; va = nva) {
4032: /* do one virtual segment at a time */
1.43 pk 4033: vr = VA_VREG(va);
4034: vs = VA_VSEG(va);
4035: nva = VSTOVA(vr, vs + 1);
1.1 deraadt 4036: if (nva == 0 || nva > endva)
4037: nva = endva;
1.76 pk 4038: if (pm->pm_regmap[vr].rg_nsegmap != 0)
4039: (*rm)(pm, va, nva, vr, vs);
1.1 deraadt 4040: }
4041: simple_unlock(&pm->pm_lock);
4042: splx(s);
4043: setcontext(ctx);
4044: }
4045:
4046: /*
4047: * The following magic number was chosen because:
4048: * 1. It is the same amount of work to cache_flush_page 4 pages
4049: * as to cache_flush_segment 1 segment (so at 4 the cost of
4050: * flush is the same).
4051: * 2. Flushing extra pages is bad (causes cache not to work).
4052: * 3. The current code, which malloc()s 5 pages for each process
4053: * for a user vmspace/pmap, almost never touches all 5 of those
4054: * pages.
4055: */
1.13 pk 4056: #if 0
4057: #define PMAP_RMK_MAGIC (cacheinfo.c_hwflush?5:64) /* if > magic, use cache_flush_segment */
4058: #else
1.1 deraadt 4059: #define PMAP_RMK_MAGIC 5 /* if > magic, use cache_flush_segment */
1.13 pk 4060: #endif
1.1 deraadt 4061:
4062: /*
4063: * Remove a range contained within a single segment.
4064: * These are egregiously complicated routines.
4065: */
4066:
1.55 pk 4067: #if defined(SUN4) || defined(SUN4C)
4068:
1.43 pk 4069: /* remove from kernel */
1.55 pk 4070: /*static*/ void
4071: pmap_rmk4_4c(pm, va, endva, vr, vs)
1.124 pk 4072: struct pmap *pm;
4073: vaddr_t va, endva;
4074: int vr, vs;
4075: {
4076: int i, tpte, perpage, npg;
4077: struct pvlist *pv;
4078: int nleft, pmeg;
1.43 pk 4079: struct regmap *rp;
4080: struct segmap *sp;
4081:
4082: rp = &pm->pm_regmap[vr];
4083: sp = &rp->rg_segmap[vs];
4084:
4085: if (rp->rg_nsegmap == 0)
4086: return;
4087:
4088: #ifdef DEBUG
4089: if (rp->rg_segmap == NULL)
4090: panic("pmap_rmk: no segments");
4091: #endif
4092:
4093: if ((nleft = sp->sg_npte) == 0)
4094: return;
4095:
4096: pmeg = sp->sg_pmeg;
1.1 deraadt 4097:
4098: #ifdef DEBUG
4099: if (pmeg == seginval)
4100: panic("pmap_rmk: not loaded");
4101: if (pm->pm_ctx == NULL)
4102: panic("pmap_rmk: lost context");
4103: #endif
4104:
1.71 pk 4105: setcontext4(0);
1.1 deraadt 4106: /* decide how to flush cache */
4107: npg = (endva - va) >> PGSHIFT;
4108: if (npg > PMAP_RMK_MAGIC) {
4109: /* flush the whole segment */
4110: perpage = 0;
1.69 pk 4111: cache_flush_segment(vr, vs);
1.1 deraadt 4112: } else {
4113: /* flush each page individually; some never need flushing */
1.69 pk 4114: perpage = (CACHEINFO.c_vactype != VAC_NONE);
1.1 deraadt 4115: }
4116: while (va < endva) {
1.55 pk 4117: tpte = getpte4(va);
1.1 deraadt 4118: if ((tpte & PG_V) == 0) {
1.63 pk 4119: va += NBPG;
1.1 deraadt 4120: continue;
4121: }
1.35 pk 4122: if ((tpte & PG_TYPE) == PG_OBMEM) {
4123: /* if cacheable, flush page as needed */
4124: if (perpage && (tpte & PG_NC) == 0)
1.1 deraadt 4125: cache_flush_page(va);
1.60 pk 4126: i = ptoa(tpte & PG_PFNUM);
1.1 deraadt 4127: if (managed(i)) {
4128: pv = pvhead(i);
1.55 pk 4129: pv->pv_flags |= MR4_4C(tpte);
1.58 pk 4130: pv_unlink4_4c(pv, pm, va);
1.1 deraadt 4131: }
4132: }
4133: nleft--;
1.131 pk 4134: #ifdef DIAGNOSTIC
4135: if (nleft < 0)
4136: panic("pmap_rmk: too many PTEs in segment; "
4137: "va 0x%lx; endva 0x%lx", va, endva);
4138: #endif
1.55 pk 4139: setpte4(va, 0);
1.1 deraadt 4140: va += NBPG;
4141: }
4142:
4143: /*
4144: * If the segment is all gone, remove it from everyone and
4145: * free the MMU entry.
4146: */
1.43 pk 4147: if ((sp->sg_npte = nleft) == 0) {
4148: va = VSTOVA(vr,vs); /* retract */
1.69 pk 4149: #if defined(SUN4_MMU3L)
4150: if (HASSUN4_MMU3L)
1.1 deraadt 4151: setsegmap(va, seginval);
1.43 pk 4152: else
4153: #endif
4154: for (i = ncontext; --i >= 0;) {
1.71 pk 4155: setcontext4(i);
1.43 pk 4156: setsegmap(va, seginval);
4157: }
4158: me_free(pm, pmeg);
4159: if (--rp->rg_nsegmap == 0) {
1.69 pk 4160: #if defined(SUN4_MMU3L)
4161: if (HASSUN4_MMU3L) {
1.43 pk 4162: for (i = ncontext; --i >= 0;) {
1.71 pk 4163: setcontext4(i);
1.43 pk 4164: setregmap(va, reginval);
4165: }
4166: /* note: context is 0 */
4167: region_free(pm, rp->rg_smeg);
4168: }
4169: #endif
1.1 deraadt 4170: }
4171: }
4172: }
4173:
1.55 pk 4174: #endif /* sun4, sun4c */
1.1 deraadt 4175:
1.55 pk 4176: #if defined(SUN4M) /* 4M version of pmap_rmk */
4177: /* remove from kernel (4m)*/
4178: /*static*/ void
4179: pmap_rmk4m(pm, va, endva, vr, vs)
1.124 pk 4180: struct pmap *pm;
4181: vaddr_t va, endva;
4182: int vr, vs;
4183: {
4184: int i, tpte, perpage, npg;
4185: struct pvlist *pv;
4186: int nleft;
1.43 pk 4187: struct regmap *rp;
4188: struct segmap *sp;
4189:
4190: rp = &pm->pm_regmap[vr];
1.55 pk 4191: sp = &rp->rg_segmap[vs];
4192:
1.43 pk 4193: if (rp->rg_nsegmap == 0)
4194: return;
1.55 pk 4195:
4196: #ifdef DEBUG
1.43 pk 4197: if (rp->rg_segmap == NULL)
1.55 pk 4198: panic("pmap_rmk: no segments");
4199: #endif
1.43 pk 4200:
4201: if ((nleft = sp->sg_npte) == 0)
4202: return;
4203:
1.55 pk 4204: #ifdef DEBUG
4205: if (sp->sg_pte == NULL || rp->rg_seg_ptps == NULL)
4206: panic("pmap_rmk: segment/region does not exist");
4207: if (pm->pm_ctx == NULL)
4208: panic("pmap_rmk: lost context");
4209: #endif
1.43 pk 4210:
1.71 pk 4211: setcontext4m(0);
1.55 pk 4212: /* decide how to flush cache */
4213: npg = (endva - va) >> PGSHIFT;
4214: if (npg > PMAP_RMK_MAGIC) {
4215: /* flush the whole segment */
4216: perpage = 0;
1.69 pk 4217: if (CACHEINFO.c_vactype != VAC_NONE)
1.55 pk 4218: cache_flush_segment(vr, vs);
4219: } else {
4220: /* flush each page individually; some never need flushing */
1.69 pk 4221: perpage = (CACHEINFO.c_vactype != VAC_NONE);
1.55 pk 4222: }
4223: while (va < endva) {
1.72 pk 4224: tpte = sp->sg_pte[VA_SUN4M_VPG(va)];
1.55 pk 4225: if ((tpte & SRMMU_TETYPE) != SRMMU_TEPTE) {
1.72 pk 4226: #ifdef DEBUG
4227: if ((pmapdebug & PDB_SANITYCHK) &&
4228: (getpte4m(va) & SRMMU_TETYPE) == SRMMU_TEPTE)
1.91 fair 4229: panic("pmap_rmk: Spurious kTLB entry for 0x%lx",
1.81 pk 4230: va);
1.72 pk 4231: #endif
1.61 pk 4232: va += NBPG;
1.55 pk 4233: continue;
4234: }
4235: if ((tpte & SRMMU_PGTYPE) == PG_SUN4M_OBMEM) {
4236: /* if cacheable, flush page as needed */
4237: if (perpage && (tpte & SRMMU_PG_C))
1.69 pk 4238: cache_flush_page(va);
1.60 pk 4239: i = ptoa((tpte & SRMMU_PPNMASK) >> SRMMU_PPNSHIFT);
1.55 pk 4240: if (managed(i)) {
4241: pv = pvhead(i);
4242: pv->pv_flags |= MR4M(tpte);
1.58 pk 4243: pv_unlink4m(pv, pm, va);
1.55 pk 4244: }
4245: }
1.72 pk 4246: tlb_flush_page(va);
4247: setpgt4m(&sp->sg_pte[VA_SUN4M_VPG(va)], SRMMU_TEINVALID);
1.131 pk 4248: nleft--;
4249: #ifdef DIAGNOSTIC
4250: if (nleft < 0)
4251: panic("pmap_rmk: too many PTEs in segment; "
4252: "va 0x%lx; endva 0x%lx", va, endva);
4253: #endif
1.55 pk 4254: va += NBPG;
4255: }
4256:
4257: /*
4258: * If the segment is all gone, remove it from everyone and
4259: * flush the TLB.
4260: */
4261: if ((sp->sg_npte = nleft) == 0) {
4262: va = VSTOVA(vr,vs); /* retract */
4263:
4264: tlb_flush_segment(vr, vs); /* Paranoia? */
4265:
1.58 pk 4266: /*
4267: * We need to free the segment table. The problem is that
1.55 pk 4268: * we can't free the initial (bootstrap) mapping, so
4269: * we have to explicitly check for this case (ugh).
4270: */
4271: if (va < virtual_avail) {
4272: #ifdef DEBUG
1.66 christos 4273: printf("pmap_rmk4m: attempt to free base kernel alloc\n");
1.55 pk 4274: #endif
4275: /* sp->sg_pte = NULL; */
4276: sp->sg_npte = 0;
4277: return;
4278: }
4279: /* no need to free the table; it is statically allocated */
4280: qzero(sp->sg_pte, SRMMU_L3SIZE * sizeof(long));
4281: }
4282: /* if we're done with a region, leave it wired */
4283: }
4284: #endif /* sun4m */
4285: /*
4286: * Just like pmap_rmk_magic, but we have a different threshold.
4287: * Note that this may well deserve further tuning work.
4288: */
4289: #if 0
4290: #define PMAP_RMU_MAGIC (cacheinfo.c_hwflush?4:64) /* if > magic, use cache_flush_segment */
4291: #else
4292: #define PMAP_RMU_MAGIC 4 /* if > magic, use cache_flush_segment */
4293: #endif
4294:
4295: #if defined(SUN4) || defined(SUN4C)
4296:
4297: /* remove from user */
4298: /*static*/ void
4299: pmap_rmu4_4c(pm, va, endva, vr, vs)
1.124 pk 4300: struct pmap *pm;
4301: vaddr_t va, endva;
4302: int vr, vs;
4303: {
4304: int *pte0, i, pteva, tpte, perpage, npg;
4305: struct pvlist *pv;
4306: int nleft, pmeg;
1.55 pk 4307: struct regmap *rp;
4308: struct segmap *sp;
4309:
4310: rp = &pm->pm_regmap[vr];
4311: if (rp->rg_nsegmap == 0)
4312: return;
4313: if (rp->rg_segmap == NULL)
4314: panic("pmap_rmu: no segments");
4315:
4316: sp = &rp->rg_segmap[vs];
4317: if ((nleft = sp->sg_npte) == 0)
4318: return;
4319: if (sp->sg_pte == NULL)
4320: panic("pmap_rmu: no pages");
4321:
4322:
4323: pmeg = sp->sg_pmeg;
4324: pte0 = sp->sg_pte;
1.1 deraadt 4325:
4326: if (pmeg == seginval) {
1.124 pk 4327: int *pte = pte0 + VA_VPG(va);
1.1 deraadt 4328:
4329: /*
4330: * PTEs are not in MMU. Just invalidate software copies.
4331: */
1.63 pk 4332: for (; va < endva; pte++, va += NBPG) {
1.1 deraadt 4333: tpte = *pte;
4334: if ((tpte & PG_V) == 0) {
4335: /* nothing to remove (braindead VM layer) */
4336: continue;
4337: }
4338: if ((tpte & PG_TYPE) == PG_OBMEM) {
1.60 pk 4339: i = ptoa(tpte & PG_PFNUM);
1.21 deraadt 4340: if (managed(i))
1.58 pk 4341: pv_unlink4_4c(pvhead(i), pm, va);
1.1 deraadt 4342: }
4343: nleft--;
1.131 pk 4344: #ifdef DIAGNOSTIC
4345: if (nleft < 0)
4346: panic("pmap_rmu: too many PTEs in segment; "
4347: "va 0x%lx; endva 0x%lx", va, endva);
4348: #endif
1.1 deraadt 4349: *pte = 0;
4350: }
1.43 pk 4351: if ((sp->sg_npte = nleft) == 0) {
1.49 pk 4352: free(pte0, M_VMPMAP);
1.43 pk 4353: sp->sg_pte = NULL;
4354: if (--rp->rg_nsegmap == 0) {
1.49 pk 4355: free(rp->rg_segmap, M_VMPMAP);
1.43 pk 4356: rp->rg_segmap = NULL;
1.69 pk 4357: #if defined(SUN4_MMU3L)
4358: if (HASSUN4_MMU3L && rp->rg_smeg != reginval) {
1.43 pk 4359: if (pm->pm_ctx) {
1.71 pk 4360: setcontext4(pm->pm_ctxnum);
1.43 pk 4361: setregmap(va, reginval);
4362: } else
1.71 pk 4363: setcontext4(0);
1.43 pk 4364: region_free(pm, rp->rg_smeg);
4365: }
4366: #endif
4367: }
1.1 deraadt 4368: }
1.43 pk 4369: return;
1.1 deraadt 4370: }
4371:
4372: /*
4373: * PTEs are in MMU. Invalidate in hardware, update ref &
4374: * mod bits, and flush cache if required.
4375: */
1.43 pk 4376: if (CTX_USABLE(pm,rp)) {
1.1 deraadt 4377: /* process has a context, must flush cache */
4378: npg = (endva - va) >> PGSHIFT;
1.71 pk 4379: setcontext4(pm->pm_ctxnum);
1.1 deraadt 4380: if (npg > PMAP_RMU_MAGIC) {
4381: perpage = 0; /* flush the whole segment */
1.69 pk 4382: cache_flush_segment(vr, vs);
1.1 deraadt 4383: } else
1.69 pk 4384: perpage = (CACHEINFO.c_vactype != VAC_NONE);
1.1 deraadt 4385: pteva = va;
4386: } else {
4387: /* no context, use context 0; cache flush unnecessary */
1.71 pk 4388: setcontext4(0);
1.69 pk 4389: if (HASSUN4_MMU3L)
1.43 pk 4390: setregmap(0, tregion);
1.1 deraadt 4391: /* XXX use per-cpu pteva? */
4392: setsegmap(0, pmeg);
1.18 deraadt 4393: pteva = VA_VPG(va) << PGSHIFT;
1.1 deraadt 4394: perpage = 0;
4395: }
1.63 pk 4396: for (; va < endva; pteva += NBPG, va += NBPG) {
1.55 pk 4397: tpte = getpte4(pteva);
1.1 deraadt 4398: if ((tpte & PG_V) == 0)
4399: continue;
1.35 pk 4400: if ((tpte & PG_TYPE) == PG_OBMEM) {
4401: /* if cacheable, flush page as needed */
4402: if (perpage && (tpte & PG_NC) == 0)
1.1 deraadt 4403: cache_flush_page(va);
1.60 pk 4404: i = ptoa(tpte & PG_PFNUM);
1.1 deraadt 4405: if (managed(i)) {
4406: pv = pvhead(i);
1.55 pk 4407: pv->pv_flags |= MR4_4C(tpte);
1.58 pk 4408: pv_unlink4_4c(pv, pm, va);
1.1 deraadt 4409: }
4410: }
4411: nleft--;
1.131 pk 4412: #ifdef DIAGNOSTIC
4413: if (nleft < 0)
4414: panic("pmap_rmu: too many PTEs in segment; "
4415: "va 0x%lx; endva 0x%lx; pmeg %d", va, endva, pmeg);
4416: #endif
1.55 pk 4417: setpte4(pteva, 0);
1.43 pk 4418: pte0[VA_VPG(pteva)] = 0;
1.1 deraadt 4419: }
4420:
4421: /*
4422: * If the segment is all gone, and the context is loaded, give
4423: * the segment back.
4424: */
1.43 pk 4425: if ((sp->sg_npte = nleft) == 0 /* ??? && pm->pm_ctx != NULL*/) {
4426: #ifdef DEBUG
4427: if (pm->pm_ctx == NULL) {
1.66 christos 4428: printf("pmap_rmu: no context here...");
1.43 pk 4429: }
4430: #endif
4431: va = VSTOVA(vr,vs); /* retract */
4432: if (CTX_USABLE(pm,rp))
4433: setsegmap(va, seginval);
1.69 pk 4434: else if (HASSUN4_MMU3L && rp->rg_smeg != reginval) {
1.43 pk 4435: /* note: context already set earlier */
4436: setregmap(0, rp->rg_smeg);
4437: setsegmap(vs << SGSHIFT, seginval);
4438: }
1.49 pk 4439: free(pte0, M_VMPMAP);
1.43 pk 4440: sp->sg_pte = NULL;
1.1 deraadt 4441: me_free(pm, pmeg);
1.13 pk 4442:
1.43 pk 4443: if (--rp->rg_nsegmap == 0) {
1.49 pk 4444: free(rp->rg_segmap, M_VMPMAP);
1.43 pk 4445: rp->rg_segmap = NULL;
4446: GAP_WIDEN(pm,vr);
4447:
1.69 pk 4448: #if defined(SUN4_MMU3L)
4449: if (HASSUN4_MMU3L && rp->rg_smeg != reginval) {
1.43 pk 4450: /* note: context already set */
4451: if (pm->pm_ctx)
4452: setregmap(va, reginval);
4453: region_free(pm, rp->rg_smeg);
4454: }
4455: #endif
4456: }
1.13 pk 4457:
1.1 deraadt 4458: }
4459: }
4460:
1.55 pk 4461: #endif /* sun4,4c */
4462:
4463: #if defined(SUN4M) /* 4M version of pmap_rmu */
4464: /* remove from user */
4465: /*static*/ void
4466: pmap_rmu4m(pm, va, endva, vr, vs)
1.124 pk 4467: struct pmap *pm;
4468: vaddr_t va, endva;
4469: int vr, vs;
4470: {
4471: int *pte0, i, perpage, npg;
4472: struct pvlist *pv;
4473: int nleft;
1.55 pk 4474: struct regmap *rp;
4475: struct segmap *sp;
4476:
4477: rp = &pm->pm_regmap[vr];
4478: if (rp->rg_nsegmap == 0)
4479: return;
4480: if (rp->rg_segmap == NULL)
4481: panic("pmap_rmu: no segments");
4482:
4483: sp = &rp->rg_segmap[vs];
4484: if ((nleft = sp->sg_npte) == 0)
4485: return;
1.76 pk 4486:
1.55 pk 4487: if (sp->sg_pte == NULL)
4488: panic("pmap_rmu: no pages");
4489:
4490: pte0 = sp->sg_pte;
4491:
4492: /*
4493: * Invalidate PTE in MMU pagetables. Flush cache if necessary.
4494: */
1.72 pk 4495: if (pm->pm_ctx) {
1.55 pk 4496: /* process has a context, must flush cache */
1.71 pk 4497: setcontext4m(pm->pm_ctxnum);
1.69 pk 4498: if (CACHEINFO.c_vactype != VAC_NONE) {
1.63 pk 4499: npg = (endva - va) >> PGSHIFT;
4500: if (npg > PMAP_RMU_MAGIC) {
4501: perpage = 0; /* flush the whole segment */
1.55 pk 4502: cache_flush_segment(vr, vs);
1.63 pk 4503: } else
4504: perpage = 1;
1.55 pk 4505: } else
1.63 pk 4506: perpage = 0;
1.55 pk 4507: } else {
4508: /* no context; cache flush unnecessary */
4509: perpage = 0;
4510: }
1.63 pk 4511: for (; va < endva; va += NBPG) {
1.100 pk 4512: int tpte;
4513:
4514: if (pm->pm_ctx)
4515: tlb_flush_page(va);
1.72 pk 4516:
1.100 pk 4517: tpte = pte0[VA_SUN4M_VPG(va)];
1.72 pk 4518:
4519: if ((tpte & SRMMU_TETYPE) != SRMMU_TEPTE) {
4520: #ifdef DEBUG
4521: if ((pmapdebug & PDB_SANITYCHK) &&
4522: pm->pm_ctx &&
4523: (getpte4m(va) & SRMMU_TEPTE) == SRMMU_TEPTE)
1.91 fair 4524: panic("pmap_rmu: Spurious uTLB entry for 0x%lx",
1.81 pk 4525: va);
1.72 pk 4526: #endif
1.55 pk 4527: continue;
1.72 pk 4528: }
4529:
1.55 pk 4530: if ((tpte & SRMMU_PGTYPE) == PG_SUN4M_OBMEM) {
4531: /* if cacheable, flush page as needed */
4532: if (perpage && (tpte & SRMMU_PG_C))
1.60 pk 4533: cache_flush_page(va);
4534: i = ptoa((tpte & SRMMU_PPNMASK) >> SRMMU_PPNSHIFT);
1.55 pk 4535: if (managed(i)) {
4536: pv = pvhead(i);
4537: pv->pv_flags |= MR4M(tpte);
1.58 pk 4538: pv_unlink4m(pv, pm, va);
1.55 pk 4539: }
4540: }
4541: nleft--;
1.131 pk 4542: #ifdef DIAGNOSTIC
4543: if (nleft < 0)
4544: panic("pmap_rmu: too many PTEs in segment; "
4545: "va 0x%lx; endva 0x%lx", va, endva);
4546: #endif
1.72 pk 4547: setpgt4m(&pte0[VA_SUN4M_VPG(va)], SRMMU_TEINVALID);
1.55 pk 4548: }
4549:
4550: /*
4551: * If the segment is all gone, and the context is loaded, give
4552: * the segment back.
4553: */
1.72 pk 4554: if ((sp->sg_npte = nleft) == 0) {
1.55 pk 4555: #ifdef DEBUG
4556: if (pm->pm_ctx == NULL) {
1.66 christos 4557: printf("pmap_rmu: no context here...");
1.55 pk 4558: }
4559: #endif
4560: va = VSTOVA(vr,vs); /* retract */
4561:
1.88 pk 4562: if (pm->pm_ctx)
4563: tlb_flush_segment(vr, vs); /* Paranoia? */
1.73 pk 4564: setpgt4m(&rp->rg_seg_ptps[vs], SRMMU_TEINVALID);
1.121 pk 4565: pool_put(&L23_pool, pte0);
1.55 pk 4566: sp->sg_pte = NULL;
4567:
4568: if (--rp->rg_nsegmap == 0) {
1.88 pk 4569: if (pm->pm_ctx)
4570: tlb_flush_context(); /* Paranoia? */
4571: setpgt4m(&pm->pm_reg_ptps[vr], SRMMU_TEINVALID);
1.55 pk 4572: free(rp->rg_segmap, M_VMPMAP);
4573: rp->rg_segmap = NULL;
1.121 pk 4574: pool_put(&L23_pool, rp->rg_seg_ptps);
1.55 pk 4575: }
4576: }
4577: }
4578: #endif /* sun4m */
4579:
1.1 deraadt 4580: /*
4581: * Lower (make more strict) the protection on the specified
4582: * physical page.
4583: *
4584: * There are only two cases: either the protection is going to 0
4585: * (in which case we do the dirty work here), or it is going from
4586: * to read-only (in which case pv_changepte does the trick).
4587: */
1.55 pk 4588:
4589: #if defined(SUN4) || defined(SUN4C)
1.1 deraadt 4590: void
1.55 pk 4591: pmap_page_protect4_4c(pa, prot)
1.124 pk 4592: paddr_t pa;
1.1 deraadt 4593: vm_prot_t prot;
4594: {
1.124 pk 4595: struct pvlist *pv, *pv0, *npv;
4596: struct pmap *pm;
4597: int va, vr, vs, pteva, tpte;
4598: int flags, nleft, i, s, ctx;
1.43 pk 4599: struct regmap *rp;
4600: struct segmap *sp;
1.1 deraadt 4601:
4602: #ifdef DEBUG
4603: if ((pmapdebug & PDB_CHANGEPROT) ||
4604: (pmapdebug & PDB_REMOVE && prot == VM_PROT_NONE))
1.91 fair 4605: printf("pmap_page_protect(0x%lx, 0x%x)\n", pa, prot);
1.1 deraadt 4606: #endif
4607: /*
4608: * Skip unmanaged pages, or operations that do not take
4609: * away write permission.
4610: */
1.82 pk 4611: if ((pa & (PMAP_TNC_4 & ~PMAP_NC)) ||
1.34 pk 4612: !managed(pa) || prot & VM_PROT_WRITE)
1.1 deraadt 4613: return;
4614: write_user_windows(); /* paranoia */
4615: if (prot & VM_PROT_READ) {
1.58 pk 4616: pv_changepte4_4c(pvhead(pa), 0, PG_W);
1.1 deraadt 4617: return;
4618: }
4619:
4620: /*
4621: * Remove all access to all people talking to this page.
4622: * Walk down PV list, removing all mappings.
4623: * The logic is much like that for pmap_remove,
4624: * but we know we are removing exactly one page.
4625: */
4626: pv = pvhead(pa);
4627: s = splpmap();
4628: if ((pm = pv->pv_pmap) == NULL) {
4629: splx(s);
4630: return;
4631: }
1.71 pk 4632: ctx = getcontext4();
1.1 deraadt 4633: pv0 = pv;
4634: flags = pv->pv_flags & ~PV_NC;
4635: for (;; pm = pv->pv_pmap) {
4636: va = pv->pv_va;
1.43 pk 4637: vr = VA_VREG(va);
4638: vs = VA_VSEG(va);
4639: rp = &pm->pm_regmap[vr];
4640: if (rp->rg_nsegmap == 0)
4641: panic("pmap_remove_all: empty vreg");
4642: sp = &rp->rg_segmap[vs];
4643: if ((nleft = sp->sg_npte) == 0)
1.1 deraadt 4644: panic("pmap_remove_all: empty vseg");
4645: nleft--;
1.43 pk 4646: sp->sg_npte = nleft;
4647:
4648: if (sp->sg_pmeg == seginval) {
4649: /* Definitely not a kernel map */
1.1 deraadt 4650: if (nleft) {
1.43 pk 4651: sp->sg_pte[VA_VPG(va)] = 0;
1.1 deraadt 4652: } else {
1.49 pk 4653: free(sp->sg_pte, M_VMPMAP);
1.43 pk 4654: sp->sg_pte = NULL;
4655: if (--rp->rg_nsegmap == 0) {
1.49 pk 4656: free(rp->rg_segmap, M_VMPMAP);
1.43 pk 4657: rp->rg_segmap = NULL;
4658: GAP_WIDEN(pm,vr);
1.69 pk 4659: #if defined(SUN4_MMU3L)
4660: if (HASSUN4_MMU3L && rp->rg_smeg != reginval) {
1.43 pk 4661: if (pm->pm_ctx) {
1.71 pk 4662: setcontext4(pm->pm_ctxnum);
1.43 pk 4663: setregmap(va, reginval);
4664: } else
1.71 pk 4665: setcontext4(0);
1.43 pk 4666: region_free(pm, rp->rg_smeg);
4667: }
4668: #endif
4669: }
1.1 deraadt 4670: }
4671: goto nextpv;
4672: }
1.84 pk 4673:
1.43 pk 4674: if (CTX_USABLE(pm,rp)) {
1.71 pk 4675: setcontext4(pm->pm_ctxnum);
1.1 deraadt 4676: pteva = va;
1.69 pk 4677: cache_flush_page(va);
1.1 deraadt 4678: } else {
1.71 pk 4679: setcontext4(0);
1.1 deraadt 4680: /* XXX use per-cpu pteva? */
1.69 pk 4681: if (HASSUN4_MMU3L)
1.43 pk 4682: setregmap(0, tregion);
4683: setsegmap(0, sp->sg_pmeg);
1.18 deraadt 4684: pteva = VA_VPG(va) << PGSHIFT;
1.1 deraadt 4685: }
1.43 pk 4686:
1.55 pk 4687: tpte = getpte4(pteva);
1.43 pk 4688: if ((tpte & PG_V) == 0)
1.91 fair 4689: panic("pmap_page_protect !PG_V: ctx %d, va 0x%x, pte 0x%x",
1.84 pk 4690: pm->pm_ctxnum, va, tpte);
1.55 pk 4691: flags |= MR4_4C(tpte);
1.43 pk 4692:
1.1 deraadt 4693: if (nleft) {
1.55 pk 4694: setpte4(pteva, 0);
1.44 pk 4695: if (sp->sg_pte != NULL)
4696: sp->sg_pte[VA_VPG(pteva)] = 0;
1.84 pk 4697: goto nextpv;
4698: }
4699:
4700: /* Entire segment is gone */
4701: if (pm == pmap_kernel()) {
4702: #if defined(SUN4_MMU3L)
4703: if (!HASSUN4_MMU3L)
1.43 pk 4704: #endif
1.84 pk 4705: for (i = ncontext; --i >= 0;) {
4706: setcontext4(i);
4707: setsegmap(va, seginval);
4708: }
4709: me_free(pm, sp->sg_pmeg);
4710: if (--rp->rg_nsegmap == 0) {
1.69 pk 4711: #if defined(SUN4_MMU3L)
1.84 pk 4712: if (HASSUN4_MMU3L) {
1.43 pk 4713: for (i = ncontext; --i >= 0;) {
1.71 pk 4714: setcontext4(i);
1.84 pk 4715: setregmap(va, reginval);
1.43 pk 4716: }
1.84 pk 4717: region_free(pm, rp->rg_smeg);
4718: }
1.43 pk 4719: #endif
1.84 pk 4720: }
4721: } else {
4722: if (CTX_USABLE(pm,rp))
4723: /* `pteva'; we might be using tregion */
4724: setsegmap(pteva, seginval);
1.69 pk 4725: #if defined(SUN4_MMU3L)
1.84 pk 4726: else if (HASSUN4_MMU3L &&
4727: rp->rg_smeg != reginval) {
4728: /* note: context already set earlier */
4729: setregmap(0, rp->rg_smeg);
4730: setsegmap(vs << SGSHIFT, seginval);
4731: }
1.43 pk 4732: #endif
1.84 pk 4733: free(sp->sg_pte, M_VMPMAP);
4734: sp->sg_pte = NULL;
4735: me_free(pm, sp->sg_pmeg);
1.43 pk 4736:
1.84 pk 4737: if (--rp->rg_nsegmap == 0) {
1.69 pk 4738: #if defined(SUN4_MMU3L)
1.84 pk 4739: if (HASSUN4_MMU3L &&
4740: rp->rg_smeg != reginval) {
4741: if (pm->pm_ctx)
4742: setregmap(va, reginval);
4743: region_free(pm, rp->rg_smeg);
4744: }
1.43 pk 4745: #endif
1.84 pk 4746: free(rp->rg_segmap, M_VMPMAP);
4747: rp->rg_segmap = NULL;
4748: GAP_WIDEN(pm,vr);
1.1 deraadt 4749: }
4750: }
1.84 pk 4751:
1.1 deraadt 4752: nextpv:
4753: npv = pv->pv_next;
4754: if (pv != pv0)
1.122 pk 4755: pool_put(&pv_pool, pv);
1.1 deraadt 4756: if ((pv = npv) == NULL)
4757: break;
4758: }
4759: pv0->pv_pmap = NULL;
1.11 pk 4760: pv0->pv_next = NULL; /* ? */
1.1 deraadt 4761: pv0->pv_flags = flags;
1.71 pk 4762: setcontext4(ctx);
1.1 deraadt 4763: splx(s);
4764: }
4765:
4766: /*
4767: * Lower (make more strict) the protection on the specified
4768: * range of this pmap.
4769: *
4770: * There are only two cases: either the protection is going to 0
4771: * (in which case we call pmap_remove to do the dirty work), or
4772: * it is going from read/write to read-only. The latter is
4773: * fairly easy.
4774: */
4775: void
1.55 pk 4776: pmap_protect4_4c(pm, sva, eva, prot)
1.124 pk 4777: struct pmap *pm;
4778: vaddr_t sva, eva;
1.1 deraadt 4779: vm_prot_t prot;
4780: {
1.124 pk 4781: int va, nva, vr, vs;
4782: int s, ctx;
1.43 pk 4783: struct regmap *rp;
4784: struct segmap *sp;
1.1 deraadt 4785:
4786: if (pm == NULL || prot & VM_PROT_WRITE)
4787: return;
1.43 pk 4788:
1.1 deraadt 4789: if ((prot & VM_PROT_READ) == 0) {
4790: pmap_remove(pm, sva, eva);
4791: return;
4792: }
4793:
4794: write_user_windows();
1.71 pk 4795: ctx = getcontext4();
1.1 deraadt 4796: s = splpmap();
4797: simple_lock(&pm->pm_lock);
4798:
4799: for (va = sva; va < eva;) {
1.43 pk 4800: vr = VA_VREG(va);
4801: vs = VA_VSEG(va);
4802: rp = &pm->pm_regmap[vr];
4803: nva = VSTOVA(vr,vs + 1);
1.1 deraadt 4804: if (nva == 0) panic("pmap_protect: last segment"); /* cannot happen */
4805: if (nva > eva)
4806: nva = eva;
1.43 pk 4807: if (rp->rg_nsegmap == 0) {
1.1 deraadt 4808: va = nva;
4809: continue;
4810: }
1.43 pk 4811: #ifdef DEBUG
4812: if (rp->rg_segmap == NULL)
4813: panic("pmap_protect: no segments");
4814: #endif
4815: sp = &rp->rg_segmap[vs];
4816: if (sp->sg_npte == 0) {
4817: va = nva;
4818: continue;
4819: }
4820: #ifdef DEBUG
4821: if (pm != pmap_kernel() && sp->sg_pte == NULL)
4822: panic("pmap_protect: no pages");
4823: #endif
4824: if (sp->sg_pmeg == seginval) {
1.124 pk 4825: int *pte = &sp->sg_pte[VA_VPG(va)];
1.1 deraadt 4826:
4827: /* not in MMU; just clear PG_W from core copies */
4828: for (; va < nva; va += NBPG)
4829: *pte++ &= ~PG_W;
4830: } else {
4831: /* in MMU: take away write bits from MMU PTEs */
1.43 pk 4832: if (CTX_USABLE(pm,rp)) {
1.124 pk 4833: int tpte;
1.1 deraadt 4834:
4835: /*
4836: * Flush cache so that any existing cache
4837: * tags are updated. This is really only
4838: * needed for PTEs that lose PG_W.
4839: */
1.71 pk 4840: setcontext4(pm->pm_ctxnum);
1.1 deraadt 4841: for (; va < nva; va += NBPG) {
1.55 pk 4842: tpte = getpte4(va);
1.1 deraadt 4843: pmap_stats.ps_npg_prot_all++;
1.35 pk 4844: if ((tpte & (PG_W|PG_TYPE)) ==
4845: (PG_W|PG_OBMEM)) {
1.1 deraadt 4846: pmap_stats.ps_npg_prot_actual++;
1.69 pk 4847: cache_flush_page(va);
1.55 pk 4848: setpte4(va, tpte & ~PG_W);
1.1 deraadt 4849: }
4850: }
4851: } else {
1.124 pk 4852: int pteva;
1.1 deraadt 4853:
4854: /*
4855: * No context, hence not cached;
4856: * just update PTEs.
4857: */
1.71 pk 4858: setcontext4(0);
1.1 deraadt 4859: /* XXX use per-cpu pteva? */
1.69 pk 4860: if (HASSUN4_MMU3L)
1.43 pk 4861: setregmap(0, tregion);
4862: setsegmap(0, sp->sg_pmeg);
1.18 deraadt 4863: pteva = VA_VPG(va) << PGSHIFT;
1.1 deraadt 4864: for (; va < nva; pteva += NBPG, va += NBPG)
1.55 pk 4865: setpte4(pteva, getpte4(pteva) & ~PG_W);
1.1 deraadt 4866: }
4867: }
4868: }
4869: simple_unlock(&pm->pm_lock);
1.12 pk 4870: splx(s);
1.71 pk 4871: setcontext4(ctx);
1.1 deraadt 4872: }
4873:
4874: /*
4875: * Change the protection and/or wired status of the given (MI) virtual page.
4876: * XXX: should have separate function (or flag) telling whether only wiring
4877: * is changing.
4878: */
4879: void
1.55 pk 4880: pmap_changeprot4_4c(pm, va, prot, wired)
1.124 pk 4881: struct pmap *pm;
4882: vaddr_t va;
1.1 deraadt 4883: vm_prot_t prot;
4884: int wired;
4885: {
1.124 pk 4886: int vr, vs, tpte, newprot, ctx, s;
1.43 pk 4887: struct regmap *rp;
4888: struct segmap *sp;
1.1 deraadt 4889:
4890: #ifdef DEBUG
4891: if (pmapdebug & PDB_CHANGEPROT)
1.91 fair 4892: printf("pmap_changeprot(%p, 0x%lx, 0x%x, 0x%x)\n",
1.1 deraadt 4893: pm, va, prot, wired);
4894: #endif
4895:
4896: write_user_windows(); /* paranoia */
4897:
1.64 pk 4898: va &= ~(NBPG-1);
1.42 mycroft 4899: if (pm == pmap_kernel())
1.1 deraadt 4900: newprot = prot & VM_PROT_WRITE ? PG_S|PG_W : PG_S;
4901: else
4902: newprot = prot & VM_PROT_WRITE ? PG_W : 0;
1.43 pk 4903: vr = VA_VREG(va);
4904: vs = VA_VSEG(va);
1.1 deraadt 4905: s = splpmap(); /* conservative */
1.43 pk 4906: rp = &pm->pm_regmap[vr];
4907: if (rp->rg_nsegmap == 0) {
1.66 christos 4908: printf("pmap_changeprot: no segments in %d\n", vr);
1.43 pk 4909: return;
4910: }
4911: if (rp->rg_segmap == NULL) {
1.66 christos 4912: printf("pmap_changeprot: no segments in %d!\n", vr);
1.43 pk 4913: return;
4914: }
4915: sp = &rp->rg_segmap[vs];
4916:
1.1 deraadt 4917: pmap_stats.ps_changeprots++;
4918:
1.43 pk 4919: #ifdef DEBUG
4920: if (pm != pmap_kernel() && sp->sg_pte == NULL)
4921: panic("pmap_changeprot: no pages");
4922: #endif
4923:
1.1 deraadt 4924: /* update PTEs in software or hardware */
1.43 pk 4925: if (sp->sg_pmeg == seginval) {
1.124 pk 4926: int *pte = &sp->sg_pte[VA_VPG(va)];
1.1 deraadt 4927:
4928: /* update in software */
4929: if ((*pte & PG_PROT) == newprot)
4930: goto useless;
4931: *pte = (*pte & ~PG_PROT) | newprot;
4932: } else {
4933: /* update in hardware */
1.71 pk 4934: ctx = getcontext4();
1.43 pk 4935: if (CTX_USABLE(pm,rp)) {
1.88 pk 4936: /*
4937: * Use current context.
4938: * Flush cache if page has been referenced to
4939: * avoid stale protection bits in the cache tags.
4940: */
1.71 pk 4941: setcontext4(pm->pm_ctxnum);
1.55 pk 4942: tpte = getpte4(va);
1.11 pk 4943: if ((tpte & PG_PROT) == newprot) {
1.71 pk 4944: setcontext4(ctx);
1.1 deraadt 4945: goto useless;
1.11 pk 4946: }
1.88 pk 4947: if ((tpte & (PG_U|PG_NC|PG_TYPE)) == (PG_U|PG_OBMEM))
1.1 deraadt 4948: cache_flush_page((int)va);
4949: } else {
1.71 pk 4950: setcontext4(0);
1.1 deraadt 4951: /* XXX use per-cpu va? */
1.69 pk 4952: if (HASSUN4_MMU3L)
1.43 pk 4953: setregmap(0, tregion);
4954: setsegmap(0, sp->sg_pmeg);
1.18 deraadt 4955: va = VA_VPG(va) << PGSHIFT;
1.55 pk 4956: tpte = getpte4(va);
1.11 pk 4957: if ((tpte & PG_PROT) == newprot) {
1.71 pk 4958: setcontext4(ctx);
1.1 deraadt 4959: goto useless;
1.11 pk 4960: }
1.1 deraadt 4961: }
4962: tpte = (tpte & ~PG_PROT) | newprot;
1.55 pk 4963: setpte4(va, tpte);
1.71 pk 4964: setcontext4(ctx);
1.1 deraadt 4965: }
4966: splx(s);
4967: return;
4968:
4969: useless:
4970: /* only wiring changed, and we ignore wiring */
4971: pmap_stats.ps_useless_changeprots++;
4972: splx(s);
4973: }
4974:
1.55 pk 4975: #endif /* sun4, 4c */
4976:
4977: #if defined(SUN4M) /* 4M version of protection routines above */
1.1 deraadt 4978: /*
1.55 pk 4979: * Lower (make more strict) the protection on the specified
4980: * physical page.
1.1 deraadt 4981: *
1.55 pk 4982: * There are only two cases: either the protection is going to 0
4983: * (in which case we do the dirty work here), or it is going
4984: * to read-only (in which case pv_changepte does the trick).
1.1 deraadt 4985: */
4986: void
1.55 pk 4987: pmap_page_protect4m(pa, prot)
1.124 pk 4988: paddr_t pa;
1.1 deraadt 4989: vm_prot_t prot;
4990: {
1.124 pk 4991: struct pvlist *pv, *pv0, *npv;
4992: struct pmap *pm;
4993: int va, vr, vs, tpte;
4994: int flags, nleft, s, ctx;
1.55 pk 4995: struct regmap *rp;
4996: struct segmap *sp;
1.45 pk 4997:
4998: #ifdef DEBUG
1.55 pk 4999: if ((pmapdebug & PDB_CHANGEPROT) ||
5000: (pmapdebug & PDB_REMOVE && prot == VM_PROT_NONE))
1.91 fair 5001: printf("pmap_page_protect(0x%lx, 0x%x)\n", pa, prot);
1.45 pk 5002: #endif
1.55 pk 5003: /*
5004: * Skip unmanaged pages, or operations that do not take
5005: * away write permission.
5006: */
5007: if (!managed(pa) || prot & VM_PROT_WRITE)
5008: return;
5009: write_user_windows(); /* paranoia */
5010: if (prot & VM_PROT_READ) {
5011: pv_changepte4m(pvhead(pa), 0, PPROT_WRITE);
1.45 pk 5012: return;
5013: }
1.39 pk 5014:
1.1 deraadt 5015: /*
1.55 pk 5016: * Remove all access to all people talking to this page.
5017: * Walk down PV list, removing all mappings.
5018: * The logic is much like that for pmap_remove,
5019: * but we know we are removing exactly one page.
1.1 deraadt 5020: */
1.55 pk 5021: pv = pvhead(pa);
5022: s = splpmap();
5023: if ((pm = pv->pv_pmap) == NULL) {
5024: splx(s);
5025: return;
1.1 deraadt 5026: }
1.71 pk 5027: ctx = getcontext4m();
1.55 pk 5028: pv0 = pv;
5029: flags = pv->pv_flags /*| PV_C4M*/; /* %%%: ???? */
5030: for (;; pm = pv->pv_pmap) {
5031: va = pv->pv_va;
5032: vr = VA_VREG(va);
5033: vs = VA_VSEG(va);
5034: rp = &pm->pm_regmap[vr];
5035: if (rp->rg_nsegmap == 0)
5036: panic("pmap_remove_all: empty vreg");
5037: sp = &rp->rg_segmap[vs];
5038: if ((nleft = sp->sg_npte) == 0)
5039: panic("pmap_remove_all: empty vseg");
5040: nleft--;
5041: sp->sg_npte = nleft;
1.1 deraadt 5042:
1.55 pk 5043: /* Invalidate PTE in MMU pagetables. Flush cache if necessary */
1.72 pk 5044: if (pm->pm_ctx) {
1.71 pk 5045: setcontext4m(pm->pm_ctxnum);
1.69 pk 5046: cache_flush_page(va);
1.55 pk 5047: tlb_flush_page(va);
1.72 pk 5048: }
5049:
5050: tpte = sp->sg_pte[VA_SUN4M_VPG(va)];
1.1 deraadt 5051:
1.55 pk 5052: if ((tpte & SRMMU_TETYPE) != SRMMU_TEPTE)
5053: panic("pmap_page_protect !PG_V");
1.72 pk 5054:
1.55 pk 5055: flags |= MR4M(tpte);
1.43 pk 5056:
1.83 pk 5057: if (nleft) {
5058: setpgt4m(&sp->sg_pte[VA_SUN4M_VPG(va)], SRMMU_TEINVALID);
5059: goto nextpv;
5060: }
5061:
5062: /* Entire segment is gone */
5063: if (pm == pmap_kernel()) {
5064: tlb_flush_segment(vr, vs); /* Paranoid? */
1.72 pk 5065: setpgt4m(&sp->sg_pte[VA_SUN4M_VPG(va)], SRMMU_TEINVALID);
1.83 pk 5066: if (va < virtual_avail) {
1.55 pk 5067: #ifdef DEBUG
1.83 pk 5068: printf(
5069: "pmap_page_protect: attempt to free"
5070: " base kernel allocation\n");
1.55 pk 5071: #endif
1.83 pk 5072: goto nextpv;
5073: }
1.72 pk 5074: #if 0 /* no need for this */
1.83 pk 5075: /* no need to free the table; it is static */
5076: qzero(sp->sg_pte, SRMMU_L3SIZE * sizeof(int));
1.72 pk 5077: #endif
1.43 pk 5078:
1.83 pk 5079: /* if we're done with a region, leave it */
1.55 pk 5080:
1.83 pk 5081: } else { /* User mode mapping */
5082: if (pm->pm_ctx)
5083: tlb_flush_segment(vr, vs);
5084: setpgt4m(&rp->rg_seg_ptps[vs], SRMMU_TEINVALID);
1.121 pk 5085: pool_put(&L23_pool, sp->sg_pte);
1.83 pk 5086: sp->sg_pte = NULL;
1.55 pk 5087:
1.83 pk 5088: if (--rp->rg_nsegmap == 0) {
1.88 pk 5089: if (pm->pm_ctx)
5090: tlb_flush_context();
5091: setpgt4m(&pm->pm_reg_ptps[vr], SRMMU_TEINVALID);
1.83 pk 5092: free(rp->rg_segmap, M_VMPMAP);
5093: rp->rg_segmap = NULL;
1.121 pk 5094: pool_put(&L23_pool, rp->rg_seg_ptps);
1.55 pk 5095: }
5096: }
1.83 pk 5097:
1.55 pk 5098: nextpv:
5099: npv = pv->pv_next;
5100: if (pv != pv0)
1.122 pk 5101: pool_put(&pv_pool, pv);
1.55 pk 5102: if ((pv = npv) == NULL)
5103: break;
5104: }
5105: pv0->pv_pmap = NULL;
5106: pv0->pv_next = NULL; /* ? */
5107: pv0->pv_flags = flags;
1.71 pk 5108: setcontext4m(ctx);
1.55 pk 5109: splx(s);
5110: }
5111:
5112: /*
5113: * Lower (make more strict) the protection on the specified
5114: * range of this pmap.
5115: *
5116: * There are only two cases: either the protection is going to 0
5117: * (in which case we call pmap_remove to do the dirty work), or
5118: * it is going from read/write to read-only. The latter is
5119: * fairly easy.
5120: */
5121: void
5122: pmap_protect4m(pm, sva, eva, prot)
1.124 pk 5123: struct pmap *pm;
5124: vaddr_t sva, eva;
1.55 pk 5125: vm_prot_t prot;
5126: {
1.124 pk 5127: int va, nva, vr, vs;
5128: int s, ctx;
1.55 pk 5129: struct regmap *rp;
5130: struct segmap *sp;
5131:
5132: if (pm == NULL || prot & VM_PROT_WRITE)
5133: return;
5134:
5135: if ((prot & VM_PROT_READ) == 0) {
5136: pmap_remove(pm, sva, eva);
5137: return;
5138: }
5139:
5140: write_user_windows();
1.71 pk 5141: ctx = getcontext4m();
1.55 pk 5142: s = splpmap();
5143: simple_lock(&pm->pm_lock);
5144:
5145: for (va = sva; va < eva;) {
5146: vr = VA_VREG(va);
5147: vs = VA_VSEG(va);
5148: rp = &pm->pm_regmap[vr];
5149: nva = VSTOVA(vr,vs + 1);
5150: if (nva == 0) /* XXX */
5151: panic("pmap_protect: last segment"); /* cannot happen(why?)*/
5152: if (nva > eva)
5153: nva = eva;
5154: if (rp->rg_nsegmap == 0) {
5155: va = nva;
5156: continue;
5157: }
5158: #ifdef DEBUG
5159: if (rp->rg_segmap == NULL)
5160: panic("pmap_protect: no segments");
5161: #endif
5162: sp = &rp->rg_segmap[vs];
5163: if (sp->sg_npte == 0) {
5164: va = nva;
5165: continue;
5166: }
5167: #ifdef DEBUG
5168: if (sp->sg_pte == NULL)
5169: panic("pmap_protect: no pages");
5170: #endif
1.72 pk 5171: /* pages loaded: take away write bits from MMU PTEs */
5172: if (pm->pm_ctx)
5173: setcontext4m(pm->pm_ctxnum);
5174:
5175: pmap_stats.ps_npg_prot_all = (nva - va) >> PGSHIFT;
5176: for (; va < nva; va += NBPG) {
5177: int tpte;
1.100 pk 5178:
5179: if (pm->pm_ctx) {
5180: /* Flush TLB entry */
5181: tlb_flush_page(va);
5182: }
5183:
1.72 pk 5184: tpte = sp->sg_pte[VA_SUN4M_VPG(va)];
1.55 pk 5185: /*
5186: * Flush cache so that any existing cache
5187: * tags are updated. This is really only
5188: * needed for PTEs that lose PG_W.
5189: */
1.72 pk 5190: if ((tpte & (PPROT_WRITE|SRMMU_PGTYPE)) ==
5191: (PPROT_WRITE|PG_SUN4M_OBMEM)) {
5192: pmap_stats.ps_npg_prot_actual++;
5193: if (pm->pm_ctx) {
1.69 pk 5194: cache_flush_page(va);
1.55 pk 5195: }
1.72 pk 5196: setpgt4m(&sp->sg_pte[VA_SUN4M_VPG(va)],
5197: tpte & ~PPROT_WRITE);
1.55 pk 5198: }
5199: }
5200: }
5201: simple_unlock(&pm->pm_lock);
5202: splx(s);
1.71 pk 5203: setcontext4m(ctx);
1.55 pk 5204: }
5205:
5206: /*
5207: * Change the protection and/or wired status of the given (MI) virtual page.
5208: * XXX: should have separate function (or flag) telling whether only wiring
5209: * is changing.
5210: */
5211: void
5212: pmap_changeprot4m(pm, va, prot, wired)
1.124 pk 5213: struct pmap *pm;
5214: vaddr_t va;
1.55 pk 5215: vm_prot_t prot;
5216: int wired;
5217: {
1.124 pk 5218: int pte, newprot, ctx, s;
1.100 pk 5219: struct regmap *rp;
5220: struct segmap *sp;
1.55 pk 5221:
5222: #ifdef DEBUG
5223: if (pmapdebug & PDB_CHANGEPROT)
1.91 fair 5224: printf("pmap_changeprot(%p, 0x%lx, 0x%x, 0x%x)\n",
1.55 pk 5225: pm, va, prot, wired);
5226: #endif
5227:
5228: write_user_windows(); /* paranoia */
5229:
1.64 pk 5230: va &= ~(NBPG-1);
1.55 pk 5231: if (pm == pmap_kernel())
5232: newprot = prot & VM_PROT_WRITE ? PPROT_N_RWX : PPROT_N_RX;
5233: else
5234: newprot = prot & VM_PROT_WRITE ? PPROT_RWX_RWX : PPROT_RX_RX;
5235:
5236: pmap_stats.ps_changeprots++;
5237:
5238: s = splpmap(); /* conservative */
1.100 pk 5239:
5240: rp = &pm->pm_regmap[VA_VREG(va)];
5241: sp = &rp->rg_segmap[VA_VSEG(va)];
5242:
1.71 pk 5243: ctx = getcontext4m();
1.55 pk 5244: if (pm->pm_ctx) {
1.100 pk 5245: /* Flush TLB entry */
5246: setcontext4m(pm->pm_ctxnum);
5247: tlb_flush_page(va);
5248: }
5249: pte = sp->sg_pte[VA_SUN4M_VPG(va)];
5250:
5251: if ((pte & SRMMU_PROT_MASK) == newprot) {
5252: /* only wiring changed, and we ignore wiring */
5253: pmap_stats.ps_useless_changeprots++;
5254: goto out;
5255: }
5256:
5257: if (pm->pm_ctx) {
1.88 pk 5258: /*
5259: * Use current context.
5260: * Flush cache if page has been referenced to
5261: * avoid stale protection bits in the cache tags.
5262: */
1.100 pk 5263: if ((pte & (SRMMU_PG_C|SRMMU_PGTYPE)) ==
1.88 pk 5264: (SRMMU_PG_C|PG_SUN4M_OBMEM))
5265: cache_flush_page(va);
1.55 pk 5266: }
1.100 pk 5267:
5268: setpgt4m(&sp->sg_pte[VA_SUN4M_VPG(va)],
5269: (pte & ~SRMMU_PROT_MASK) | newprot);
1.72 pk 5270: out:
1.71 pk 5271: setcontext4m(ctx);
1.55 pk 5272: splx(s);
5273: }
5274: #endif /* 4m */
5275:
5276: /*
5277: * Insert (MI) physical page pa at virtual address va in the given pmap.
5278: * NB: the pa parameter includes type bits PMAP_OBIO, PMAP_NC as necessary.
5279: *
5280: * If pa is not in the `managed' range it will not be `bank mapped'.
5281: * This works during bootstrap only because the first 4MB happens to
5282: * map one-to-one.
5283: *
5284: * There may already be something else there, or we might just be
5285: * changing protections and/or wiring on an existing mapping.
5286: * XXX should have different entry points for changing!
5287: */
5288:
5289: #if defined(SUN4) || defined(SUN4C)
5290:
5291: void
5292: pmap_enter4_4c(pm, va, pa, prot, wired)
1.124 pk 5293: struct pmap *pm;
5294: vaddr_t va;
5295: paddr_t pa;
1.55 pk 5296: vm_prot_t prot;
5297: int wired;
5298: {
1.124 pk 5299: struct pvlist *pv;
5300: int pteproto, ctx;
1.55 pk 5301:
5302: if (pm == NULL)
5303: return;
5304:
5305: if (VA_INHOLE(va)) {
5306: #ifdef DEBUG
1.91 fair 5307: printf("pmap_enter: pm %p, va 0x%lx, pa 0x%lx: in MMU hole\n",
1.55 pk 5308: pm, va, pa);
5309: #endif
5310: return;
5311: }
5312:
5313: #ifdef DEBUG
5314: if (pmapdebug & PDB_ENTER)
1.91 fair 5315: printf("pmap_enter(%p, 0x%lx, 0x%lx, 0x%x, 0x%x)\n",
1.55 pk 5316: pm, va, pa, prot, wired);
5317: #endif
5318:
1.82 pk 5319: pteproto = PG_V | PMAP_T2PTE_4(pa);
5320: pa &= ~PMAP_TNC_4;
1.55 pk 5321: /*
5322: * Set up prototype for new PTE. Cannot set PG_NC from PV_NC yet
5323: * since the pvlist no-cache bit might change as a result of the
5324: * new mapping.
5325: */
5326: if ((pteproto & PG_TYPE) == PG_OBMEM && managed(pa)) {
5327: #ifdef DIAGNOSTIC
5328: if (!pmap_pa_exists(pa))
1.91 fair 5329: panic("pmap_enter: no such address: 0x%lx", pa);
1.55 pk 5330: #endif
5331: pv = pvhead(pa);
5332: } else {
5333: pv = NULL;
5334: }
1.60 pk 5335: pteproto |= atop(pa) & PG_PFNUM;
1.55 pk 5336: if (prot & VM_PROT_WRITE)
5337: pteproto |= PG_W;
5338:
1.71 pk 5339: ctx = getcontext4();
1.55 pk 5340: if (pm == pmap_kernel())
5341: pmap_enk4_4c(pm, va, prot, wired, pv, pteproto | PG_S);
5342: else
5343: pmap_enu4_4c(pm, va, prot, wired, pv, pteproto);
1.71 pk 5344: setcontext4(ctx);
1.55 pk 5345: }
5346:
5347: /* enter new (or change existing) kernel mapping */
5348: void
5349: pmap_enk4_4c(pm, va, prot, wired, pv, pteproto)
1.124 pk 5350: struct pmap *pm;
5351: vaddr_t va;
1.55 pk 5352: vm_prot_t prot;
5353: int wired;
1.124 pk 5354: struct pvlist *pv;
5355: int pteproto;
1.55 pk 5356: {
1.124 pk 5357: int vr, vs, tpte, i, s;
1.55 pk 5358: struct regmap *rp;
5359: struct segmap *sp;
5360:
5361: vr = VA_VREG(va);
5362: vs = VA_VSEG(va);
5363: rp = &pm->pm_regmap[vr];
5364: sp = &rp->rg_segmap[vs];
5365: s = splpmap(); /* XXX way too conservative */
5366:
1.69 pk 5367: #if defined(SUN4_MMU3L)
5368: if (HASSUN4_MMU3L && rp->rg_smeg == reginval) {
1.124 pk 5369: vaddr_t tva;
1.55 pk 5370: rp->rg_smeg = region_alloc(®ion_locked, pm, vr)->me_cookie;
5371: i = ncontext - 1;
5372: do {
1.71 pk 5373: setcontext4(i);
1.55 pk 5374: setregmap(va, rp->rg_smeg);
5375: } while (--i >= 0);
1.1 deraadt 5376:
1.43 pk 5377: /* set all PTEs to invalid, then overwrite one PTE below */
5378: tva = VA_ROUNDDOWNTOREG(va);
5379: for (i = 0; i < NSEGRG; i++) {
5380: setsegmap(tva, rp->rg_segmap[i].sg_pmeg);
5381: tva += NBPSG;
5382: };
5383: }
5384: #endif
1.55 pk 5385: if (sp->sg_pmeg != seginval && (tpte = getpte4(va)) & PG_V) {
1.124 pk 5386: int addr;
1.1 deraadt 5387:
1.34 pk 5388: /* old mapping exists, and is of the same pa type */
5389: if ((tpte & (PG_PFNUM|PG_TYPE)) ==
5390: (pteproto & (PG_PFNUM|PG_TYPE))) {
1.1 deraadt 5391: /* just changing protection and/or wiring */
5392: splx(s);
1.81 pk 5393: pmap_changeprot4_4c(pm, va, prot, wired);
1.1 deraadt 5394: return;
5395: }
5396:
1.34 pk 5397: if ((tpte & PG_TYPE) == PG_OBMEM) {
1.43 pk 5398: #ifdef DEBUG
1.91 fair 5399: printf("pmap_enk: changing existing va=>pa entry: va 0x%lx, pteproto 0x%x\n",
1.43 pk 5400: va, pteproto);
5401: #endif
1.34 pk 5402: /*
5403: * Switcheroo: changing pa for this va.
5404: * If old pa was managed, remove from pvlist.
5405: * If old page was cached, flush cache.
5406: */
1.60 pk 5407: addr = ptoa(tpte & PG_PFNUM);
1.31 pk 5408: if (managed(addr))
1.58 pk 5409: pv_unlink4_4c(pvhead(addr), pm, va);
1.34 pk 5410: if ((tpte & PG_NC) == 0) {
1.71 pk 5411: setcontext4(0); /* ??? */
1.69 pk 5412: cache_flush_page((int)va);
1.34 pk 5413: }
1.1 deraadt 5414: }
5415: } else {
5416: /* adding new entry */
1.43 pk 5417: sp->sg_npte++;
1.1 deraadt 5418: }
5419:
5420: /*
5421: * If the new mapping is for a managed PA, enter into pvlist.
5422: * Note that the mapping for a malloc page will always be
5423: * unique (hence will never cause a second call to malloc).
5424: */
5425: if (pv != NULL)
1.115 pk 5426: pteproto |= pv_link4_4c(pv, pm, va, pteproto & PG_NC);
1.1 deraadt 5427:
1.43 pk 5428: if (sp->sg_pmeg == seginval) {
1.124 pk 5429: int tva;
1.1 deraadt 5430:
5431: /*
5432: * Allocate an MMU entry now (on locked list),
5433: * and map it into every context. Set all its
5434: * PTEs invalid (we will then overwrite one, but
5435: * this is more efficient than looping twice).
5436: */
5437: #ifdef DEBUG
5438: if (pm->pm_ctx == NULL || pm->pm_ctxnum != 0)
5439: panic("pmap_enk: kern seg but no kern ctx");
5440: #endif
1.43 pk 5441: sp->sg_pmeg = me_alloc(&segm_locked, pm, vr, vs)->me_cookie;
5442: rp->rg_nsegmap++;
5443:
1.69 pk 5444: #if defined(SUN4_MMU3L)
5445: if (HASSUN4_MMU3L)
1.43 pk 5446: setsegmap(va, sp->sg_pmeg);
5447: else
5448: #endif
5449: {
5450: i = ncontext - 1;
5451: do {
1.71 pk 5452: setcontext4(i);
1.43 pk 5453: setsegmap(va, sp->sg_pmeg);
5454: } while (--i >= 0);
5455: }
1.1 deraadt 5456:
5457: /* set all PTEs to invalid, then overwrite one PTE below */
5458: tva = VA_ROUNDDOWNTOSEG(va);
5459: i = NPTESG;
5460: do {
1.55 pk 5461: setpte4(tva, 0);
1.1 deraadt 5462: tva += NBPG;
5463: } while (--i > 0);
5464: }
5465:
5466: /* ptes kept in hardware only */
1.55 pk 5467: setpte4(va, pteproto);
1.1 deraadt 5468: splx(s);
5469: }
5470:
5471: /* enter new (or change existing) user mapping */
1.53 christos 5472: void
1.55 pk 5473: pmap_enu4_4c(pm, va, prot, wired, pv, pteproto)
1.124 pk 5474: struct pmap *pm;
5475: vaddr_t va;
1.1 deraadt 5476: vm_prot_t prot;
5477: int wired;
1.124 pk 5478: struct pvlist *pv;
5479: int pteproto;
1.1 deraadt 5480: {
1.124 pk 5481: int vr, vs, *pte, tpte, pmeg, s, doflush;
1.43 pk 5482: struct regmap *rp;
5483: struct segmap *sp;
1.1 deraadt 5484:
5485: write_user_windows(); /* XXX conservative */
1.43 pk 5486: vr = VA_VREG(va);
5487: vs = VA_VSEG(va);
5488: rp = &pm->pm_regmap[vr];
1.1 deraadt 5489: s = splpmap(); /* XXX conservative */
5490:
5491: /*
5492: * If there is no space in which the PTEs can be written
5493: * while they are not in the hardware, this must be a new
5494: * virtual segment. Get PTE space and count the segment.
5495: *
5496: * TO SPEED UP CTX ALLOC, PUT SEGMENT BOUNDS STUFF HERE
5497: * AND IN pmap_rmu()
5498: */
1.13 pk 5499:
1.43 pk 5500: GAP_SHRINK(pm,vr);
1.13 pk 5501:
5502: #ifdef DEBUG
5503: if (pm->pm_gap_end < pm->pm_gap_start) {
1.91 fair 5504: printf("pmap_enu: gap_start 0x%x, gap_end 0x%x",
1.13 pk 5505: pm->pm_gap_start, pm->pm_gap_end);
5506: panic("pmap_enu: gap botch");
5507: }
5508: #endif
5509:
1.43 pk 5510: rretry:
5511: if (rp->rg_segmap == NULL) {
5512: /* definitely a new mapping */
1.124 pk 5513: int i;
5514: int size = NSEGRG * sizeof (struct segmap);
1.43 pk 5515:
5516: sp = (struct segmap *)malloc((u_long)size, M_VMPMAP, M_WAITOK);
5517: if (rp->rg_segmap != NULL) {
1.66 christos 5518: printf("pmap_enter: segment filled during sleep\n"); /* can this happen? */
1.49 pk 5519: free(sp, M_VMPMAP);
1.43 pk 5520: goto rretry;
5521: }
1.55 pk 5522: qzero((caddr_t)sp, size);
1.43 pk 5523: rp->rg_segmap = sp;
5524: rp->rg_nsegmap = 0;
5525: for (i = NSEGRG; --i >= 0;)
5526: sp++->sg_pmeg = seginval;
5527: }
5528:
5529: sp = &rp->rg_segmap[vs];
5530:
5531: sretry:
5532: if ((pte = sp->sg_pte) == NULL) {
1.1 deraadt 5533: /* definitely a new mapping */
1.124 pk 5534: int size = NPTESG * sizeof *pte;
1.1 deraadt 5535:
5536: pte = (int *)malloc((u_long)size, M_VMPMAP, M_WAITOK);
1.43 pk 5537: if (sp->sg_pte != NULL) {
1.66 christos 5538: printf("pmap_enter: pte filled during sleep\n"); /* can this happen? */
1.49 pk 5539: free(pte, M_VMPMAP);
1.43 pk 5540: goto sretry;
1.1 deraadt 5541: }
5542: #ifdef DEBUG
1.43 pk 5543: if (sp->sg_pmeg != seginval)
1.1 deraadt 5544: panic("pmap_enter: new ptes, but not seginval");
5545: #endif
1.55 pk 5546: qzero((caddr_t)pte, size);
1.43 pk 5547: sp->sg_pte = pte;
5548: sp->sg_npte = 1;
5549: rp->rg_nsegmap++;
1.1 deraadt 5550: } else {
5551: /* might be a change: fetch old pte */
5552: doflush = 0;
1.55 pk 5553: if ((pmeg = sp->sg_pmeg) == seginval) {
5554: /* software pte */
5555: tpte = pte[VA_VPG(va)];
5556: } else {
5557: /* hardware pte */
5558: if (CTX_USABLE(pm,rp)) {
1.71 pk 5559: setcontext4(pm->pm_ctxnum);
1.55 pk 5560: tpte = getpte4(va);
1.69 pk 5561: doflush = CACHEINFO.c_vactype != VAC_NONE;
1.55 pk 5562: } else {
1.71 pk 5563: setcontext4(0);
1.55 pk 5564: /* XXX use per-cpu pteva? */
1.69 pk 5565: if (HASSUN4_MMU3L)
1.55 pk 5566: setregmap(0, tregion);
5567: setsegmap(0, pmeg);
5568: tpte = getpte4(VA_VPG(va) << PGSHIFT);
5569: }
5570: }
5571: if (tpte & PG_V) {
1.124 pk 5572: int addr;
1.55 pk 5573:
5574: /* old mapping exists, and is of the same pa type */
5575: if ((tpte & (PG_PFNUM|PG_TYPE)) ==
5576: (pteproto & (PG_PFNUM|PG_TYPE))) {
5577: /* just changing prot and/or wiring */
5578: splx(s);
5579: /* caller should call this directly: */
1.60 pk 5580: pmap_changeprot4_4c(pm, va, prot, wired);
1.55 pk 5581: if (wired)
5582: pm->pm_stats.wired_count++;
5583: else
5584: pm->pm_stats.wired_count--;
5585: return;
5586: }
5587: /*
5588: * Switcheroo: changing pa for this va.
5589: * If old pa was managed, remove from pvlist.
5590: * If old page was cached, flush cache.
5591: */
1.65 christos 5592: #if 0
1.91 fair 5593: printf("%s[%d]: pmap_enu: changing existing va(0x%x)=>pa entry\n",
1.65 christos 5594: curproc->p_comm, curproc->p_pid, va);
5595: #endif
1.55 pk 5596: if ((tpte & PG_TYPE) == PG_OBMEM) {
1.60 pk 5597: addr = ptoa(tpte & PG_PFNUM);
1.55 pk 5598: if (managed(addr))
1.58 pk 5599: pv_unlink4_4c(pvhead(addr), pm, va);
1.69 pk 5600: if (doflush && (tpte & PG_NC) == 0)
1.55 pk 5601: cache_flush_page((int)va);
5602: }
5603: } else {
5604: /* adding new entry */
5605: sp->sg_npte++;
5606:
5607: /*
5608: * Increment counters
5609: */
5610: if (wired)
5611: pm->pm_stats.wired_count++;
5612: }
5613: }
5614:
5615: if (pv != NULL)
1.115 pk 5616: pteproto |= pv_link4_4c(pv, pm, va, pteproto & PG_NC);
1.55 pk 5617:
5618: /*
5619: * Update hardware & software PTEs.
5620: */
5621: if ((pmeg = sp->sg_pmeg) != seginval) {
1.81 pk 5622: /* ptes are in hardware */
1.55 pk 5623: if (CTX_USABLE(pm,rp))
1.71 pk 5624: setcontext4(pm->pm_ctxnum);
1.55 pk 5625: else {
1.71 pk 5626: setcontext4(0);
1.55 pk 5627: /* XXX use per-cpu pteva? */
1.69 pk 5628: if (HASSUN4_MMU3L)
1.55 pk 5629: setregmap(0, tregion);
5630: setsegmap(0, pmeg);
5631: va = VA_VPG(va) << PGSHIFT;
5632: }
5633: setpte4(va, pteproto);
5634: }
5635: /* update software copy */
5636: pte += VA_VPG(va);
5637: *pte = pteproto;
5638:
5639: splx(s);
5640: }
5641:
5642: #endif /*sun4,4c*/
5643:
5644: #if defined(SUN4M) /* Sun4M versions of enter routines */
5645: /*
5646: * Insert (MI) physical page pa at virtual address va in the given pmap.
5647: * NB: the pa parameter includes type bits PMAP_OBIO, PMAP_NC as necessary.
5648: *
5649: * If pa is not in the `managed' range it will not be `bank mapped'.
5650: * This works during bootstrap only because the first 4MB happens to
5651: * map one-to-one.
5652: *
5653: * There may already be something else there, or we might just be
5654: * changing protections and/or wiring on an existing mapping.
5655: * XXX should have different entry points for changing!
5656: */
5657:
5658: void
5659: pmap_enter4m(pm, va, pa, prot, wired)
1.124 pk 5660: struct pmap *pm;
5661: vaddr_t va;
5662: paddr_t pa;
1.55 pk 5663: vm_prot_t prot;
5664: int wired;
5665: {
1.124 pk 5666: struct pvlist *pv;
5667: int pteproto, ctx;
1.55 pk 5668:
5669: if (pm == NULL)
5670: return;
5671:
5672: #ifdef DEBUG
5673: if (pmapdebug & PDB_ENTER)
1.91 fair 5674: printf("pmap_enter(%p, 0x%lx, 0x%lx, 0x%x, 0x%x)\n",
1.55 pk 5675: pm, va, pa, prot, wired);
5676: #endif
1.60 pk 5677:
5678: /* Initialise pteproto with cache bit */
5679: pteproto = (pa & PMAP_NC) == 0 ? SRMMU_PG_C : 0;
1.55 pk 5680:
1.82 pk 5681: #ifdef DEBUG
5682: if (pa & PMAP_TYPE_SRMMU) { /* this page goes in an iospace */
1.69 pk 5683: if (cpuinfo.cpu_type == CPUTYP_MS1)
1.58 pk 5684: panic("pmap_enter4m: attempt to use 36-bit iospace on"
5685: " MicroSPARC");
1.55 pk 5686: }
1.82 pk 5687: #endif
5688: pteproto |= PMAP_T2PTE_SRMMU(pa);
1.55 pk 5689:
5690: /* Make sure we get a pte with appropriate perms! */
5691: pteproto |= SRMMU_TEPTE | PPROT_RX_RX;
5692:
1.82 pk 5693: pa &= ~PMAP_TNC_SRMMU;
1.55 pk 5694: /*
5695: * Set up prototype for new PTE. Cannot set PG_NC from PV_NC yet
5696: * since the pvlist no-cache bit might change as a result of the
5697: * new mapping.
5698: */
5699: if ((pteproto & SRMMU_PGTYPE) == PG_SUN4M_OBMEM && managed(pa)) {
5700: #ifdef DIAGNOSTIC
5701: if (!pmap_pa_exists(pa))
1.91 fair 5702: panic("pmap_enter: no such address: 0x%lx", pa);
1.55 pk 5703: #endif
5704: pv = pvhead(pa);
5705: } else {
5706: pv = NULL;
5707: }
1.60 pk 5708: pteproto |= (atop(pa) << SRMMU_PPNSHIFT);
1.55 pk 5709:
5710: if (prot & VM_PROT_WRITE)
5711: pteproto |= PPROT_WRITE;
5712:
1.71 pk 5713: ctx = getcontext4m();
1.55 pk 5714:
5715: if (pm == pmap_kernel())
1.58 pk 5716: pmap_enk4m(pm, va, prot, wired, pv, pteproto | PPROT_S);
1.55 pk 5717: else
1.58 pk 5718: pmap_enu4m(pm, va, prot, wired, pv, pteproto);
1.55 pk 5719:
1.71 pk 5720: setcontext4m(ctx);
1.55 pk 5721: }
5722:
5723: /* enter new (or change existing) kernel mapping */
5724: void
5725: pmap_enk4m(pm, va, prot, wired, pv, pteproto)
1.124 pk 5726: struct pmap *pm;
5727: vaddr_t va;
1.55 pk 5728: vm_prot_t prot;
5729: int wired;
1.124 pk 5730: struct pvlist *pv;
5731: int pteproto;
1.55 pk 5732: {
1.124 pk 5733: int vr, vs, tpte, s;
1.55 pk 5734: struct regmap *rp;
5735: struct segmap *sp;
5736:
5737: #ifdef DEBUG
5738: if (va < KERNBASE)
1.72 pk 5739: panic("pmap_enk4m: can't enter va 0x%lx below KERNBASE", va);
1.55 pk 5740: #endif
5741: vr = VA_VREG(va);
5742: vs = VA_VSEG(va);
5743: rp = &pm->pm_regmap[vr];
5744: sp = &rp->rg_segmap[vs];
5745:
5746: s = splpmap(); /* XXX way too conservative */
5747:
5748: if (rp->rg_seg_ptps == NULL) /* enter new region */
1.91 fair 5749: panic("pmap_enk4m: missing kernel region table for va 0x%lx",va);
1.55 pk 5750:
1.72 pk 5751: tpte = sp->sg_pte[VA_SUN4M_VPG(va)];
5752: if ((tpte & SRMMU_TETYPE) == SRMMU_TEPTE) {
1.124 pk 5753: int addr;
1.55 pk 5754:
5755: /* old mapping exists, and is of the same pa type */
5756:
5757: if ((tpte & SRMMU_PPNMASK) == (pteproto & SRMMU_PPNMASK)) {
5758: /* just changing protection and/or wiring */
5759: splx(s);
1.81 pk 5760: pmap_changeprot4m(pm, va, prot, wired);
1.55 pk 5761: return;
5762: }
5763:
5764: if ((tpte & SRMMU_PGTYPE) == PG_SUN4M_OBMEM) {
5765: #ifdef DEBUG
1.91 fair 5766: printf("pmap_enk4m: changing existing va=>pa entry: va 0x%lx, pteproto 0x%x, "
5767: "oldpte 0x%x\n", va, pteproto, tpte);
1.55 pk 5768: #endif
5769: /*
5770: * Switcheroo: changing pa for this va.
5771: * If old pa was managed, remove from pvlist.
5772: * If old page was cached, flush cache.
5773: */
1.60 pk 5774: addr = ptoa((tpte & SRMMU_PPNMASK) >> SRMMU_PPNSHIFT);
1.55 pk 5775: if (managed(addr))
1.58 pk 5776: pv_unlink4m(pvhead(addr), pm, va);
1.55 pk 5777: if (tpte & SRMMU_PG_C) {
1.71 pk 5778: setcontext4m(0); /* ??? */
1.69 pk 5779: cache_flush_page((int)va);
1.55 pk 5780: }
5781: }
5782: } else {
5783: /* adding new entry */
5784: sp->sg_npte++;
5785: }
5786:
5787: /*
5788: * If the new mapping is for a managed PA, enter into pvlist.
5789: * Note that the mapping for a malloc page will always be
5790: * unique (hence will never cause a second call to malloc).
5791: */
5792: if (pv != NULL)
1.115 pk 5793: pteproto &= ~(pv_link4m(pv, pm, va, (pteproto & SRMMU_PG_C) == 0));
1.55 pk 5794:
1.72 pk 5795: #ifdef DEBUG
1.55 pk 5796: if (sp->sg_pte == NULL) /* If no existing pagetable */
1.60 pk 5797: panic("pmap_enk4m: missing segment table for va 0x%lx",va);
1.72 pk 5798: #endif
1.55 pk 5799:
1.72 pk 5800: tlb_flush_page(va);
5801: setpgt4m(&sp->sg_pte[VA_SUN4M_VPG(va)], pteproto);
1.55 pk 5802:
5803: splx(s);
5804: }
5805:
5806: /* enter new (or change existing) user mapping */
5807: void
5808: pmap_enu4m(pm, va, prot, wired, pv, pteproto)
1.124 pk 5809: struct pmap *pm;
5810: vaddr_t va;
1.55 pk 5811: vm_prot_t prot;
5812: int wired;
1.124 pk 5813: struct pvlist *pv;
5814: int pteproto;
1.55 pk 5815: {
1.124 pk 5816: int vr, vs, *pte, tpte, s;
1.55 pk 5817: struct regmap *rp;
5818: struct segmap *sp;
5819:
1.72 pk 5820: #ifdef DEBUG
5821: if (KERNBASE < va)
5822: panic("pmap_enu4m: can't enter va 0x%lx above KERNBASE", va);
5823: #endif
5824:
1.55 pk 5825: write_user_windows(); /* XXX conservative */
5826: vr = VA_VREG(va);
5827: vs = VA_VSEG(va);
5828: rp = &pm->pm_regmap[vr];
5829: s = splpmap(); /* XXX conservative */
5830:
5831: rretry:
5832: if (rp->rg_segmap == NULL) {
5833: /* definitely a new mapping */
1.124 pk 5834: int size = NSEGRG * sizeof (struct segmap);
1.55 pk 5835:
5836: sp = (struct segmap *)malloc((u_long)size, M_VMPMAP, M_WAITOK);
5837: if (rp->rg_segmap != NULL) {
5838: #ifdef DEBUG
1.66 christos 5839: printf("pmap_enu4m: segment filled during sleep\n"); /* can this happen? */
1.55 pk 5840: #endif
5841: free(sp, M_VMPMAP);
5842: goto rretry;
5843: }
5844: qzero((caddr_t)sp, size);
5845: rp->rg_segmap = sp;
5846: rp->rg_nsegmap = 0;
5847: rp->rg_seg_ptps = NULL;
5848: }
5849: rgretry:
5850: if (rp->rg_seg_ptps == NULL) {
5851: /* Need a segment table */
1.100 pk 5852: int i, *ptd;
1.73 pk 5853:
1.121 pk 5854: ptd = pool_get(&L23_pool, PR_WAITOK);
1.55 pk 5855: if (rp->rg_seg_ptps != NULL) {
5856: #ifdef DEBUG
1.66 christos 5857: printf("pmap_enu4m: bizarre segment table fill during sleep\n");
1.55 pk 5858: #endif
1.121 pk 5859: pool_put(&L23_pool, ptd);
1.55 pk 5860: goto rgretry;
5861: }
5862:
1.73 pk 5863: rp->rg_seg_ptps = ptd;
5864: for (i = 0; i < SRMMU_L2SIZE; i++)
1.74 pk 5865: setpgt4m(&ptd[i], SRMMU_TEINVALID);
1.72 pk 5866: setpgt4m(&pm->pm_reg_ptps[vr],
1.73 pk 5867: (VA2PA((caddr_t)ptd) >> SRMMU_PPNPASHIFT) | SRMMU_TEPTD);
1.55 pk 5868: }
5869:
5870: sp = &rp->rg_segmap[vs];
5871:
5872: sretry:
5873: if ((pte = sp->sg_pte) == NULL) {
5874: /* definitely a new mapping */
1.100 pk 5875: int i;
1.55 pk 5876:
1.121 pk 5877: pte = pool_get(&L23_pool, PR_WAITOK);
1.55 pk 5878: if (sp->sg_pte != NULL) {
1.66 christos 5879: printf("pmap_enter: pte filled during sleep\n"); /* can this happen? */
1.121 pk 5880: pool_put(&L23_pool, pte);
1.55 pk 5881: goto sretry;
5882: }
5883:
5884: sp->sg_pte = pte;
5885: sp->sg_npte = 1;
5886: rp->rg_nsegmap++;
1.74 pk 5887: for (i = 0; i < SRMMU_L3SIZE; i++)
5888: setpgt4m(&pte[i], SRMMU_TEINVALID);
1.72 pk 5889: setpgt4m(&rp->rg_seg_ptps[vs],
5890: (VA2PA((caddr_t)pte) >> SRMMU_PPNPASHIFT) | SRMMU_TEPTD);
1.55 pk 5891: } else {
1.72 pk 5892: /*
5893: * Might be a change: fetch old pte
5894: */
1.100 pk 5895: tlb_flush_page(va);
1.72 pk 5896: tpte = pte[VA_SUN4M_VPG(va)];
1.55 pk 5897:
5898: if ((tpte & SRMMU_TETYPE) == SRMMU_TEPTE) {
1.124 pk 5899: int addr;
1.1 deraadt 5900:
1.34 pk 5901: /* old mapping exists, and is of the same pa type */
1.55 pk 5902: if ((tpte & SRMMU_PPNMASK) ==
5903: (pteproto & SRMMU_PPNMASK)) {
1.1 deraadt 5904: /* just changing prot and/or wiring */
5905: splx(s);
5906: /* caller should call this directly: */
1.60 pk 5907: pmap_changeprot4m(pm, va, prot, wired);
1.15 deraadt 5908: if (wired)
5909: pm->pm_stats.wired_count++;
5910: else
5911: pm->pm_stats.wired_count--;
1.1 deraadt 5912: return;
5913: }
5914: /*
5915: * Switcheroo: changing pa for this va.
5916: * If old pa was managed, remove from pvlist.
5917: * If old page was cached, flush cache.
5918: */
1.60 pk 5919: #ifdef DEBUG
1.72 pk 5920: if (pmapdebug & PDB_SWITCHMAP)
1.91 fair 5921: printf("%s[%d]: pmap_enu: changing existing va(0x%x)=>pa(pte=0x%x) entry\n",
1.72 pk 5922: curproc->p_comm, curproc->p_pid, (int)va, (int)pte);
1.60 pk 5923: #endif
1.55 pk 5924: if ((tpte & SRMMU_PGTYPE) == PG_SUN4M_OBMEM) {
1.60 pk 5925: addr = ptoa( (tpte & SRMMU_PPNMASK) >>
5926: SRMMU_PPNSHIFT);
1.100 pk 5927: if (managed(addr)) {
5928: pvhead(addr)->pv_flags |= MR4M(tpte);
1.58 pk 5929: pv_unlink4m(pvhead(addr), pm, va);
1.100 pk 5930: }
1.72 pk 5931: if (pm->pm_ctx && (tpte & SRMMU_PG_C))
1.34 pk 5932: cache_flush_page((int)va);
1.31 pk 5933: }
1.1 deraadt 5934: } else {
5935: /* adding new entry */
1.43 pk 5936: sp->sg_npte++;
1.15 deraadt 5937:
5938: /*
5939: * Increment counters
5940: */
5941: if (wired)
5942: pm->pm_stats.wired_count++;
1.1 deraadt 5943: }
5944: }
5945: if (pv != NULL)
1.115 pk 5946: pteproto &= ~(pv_link4m(pv, pm, va, (pteproto & SRMMU_PG_C) == 0));
1.1 deraadt 5947:
5948: /*
1.72 pk 5949: * Update PTEs, flush TLB as necessary.
1.1 deraadt 5950: */
1.72 pk 5951: if (pm->pm_ctx) {
1.71 pk 5952: setcontext4m(pm->pm_ctxnum);
1.72 pk 5953: tlb_flush_page(va);
5954: }
5955: setpgt4m(&sp->sg_pte[VA_SUN4M_VPG(va)], pteproto);
1.1 deraadt 5956:
5957: splx(s);
5958: }
1.55 pk 5959: #endif /* sun4m */
1.1 deraadt 5960:
5961: /*
5962: * Change the wiring attribute for a map/virtual-address pair.
5963: */
5964: /* ARGSUSED */
5965: void
5966: pmap_change_wiring(pm, va, wired)
5967: struct pmap *pm;
1.124 pk 5968: vaddr_t va;
1.1 deraadt 5969: int wired;
5970: {
5971:
5972: pmap_stats.ps_useless_changewire++;
5973: }
5974:
5975: /*
5976: * Extract the physical page address associated
5977: * with the given map/virtual_address pair.
5978: * GRR, the vm code knows; we should not have to do this!
5979: */
1.55 pk 5980:
5981: #if defined(SUN4) || defined(SUN4C)
1.124 pk 5982: paddr_t
1.55 pk 5983: pmap_extract4_4c(pm, va)
1.124 pk 5984: struct pmap *pm;
5985: vaddr_t va;
1.1 deraadt 5986: {
1.124 pk 5987: int tpte;
5988: int vr, vs;
1.43 pk 5989: struct regmap *rp;
5990: struct segmap *sp;
1.1 deraadt 5991:
5992: if (pm == NULL) {
1.90 pk 5993: #ifdef DEBUG
5994: if (pmapdebug & PDB_FOLLOW)
5995: printf("pmap_extract: null pmap\n");
5996: #endif
1.1 deraadt 5997: return (0);
5998: }
1.43 pk 5999: vr = VA_VREG(va);
6000: vs = VA_VSEG(va);
6001: rp = &pm->pm_regmap[vr];
6002: if (rp->rg_segmap == NULL) {
1.90 pk 6003: #ifdef DEBUG
6004: if (pmapdebug & PDB_FOLLOW)
6005: printf("pmap_extract: invalid segment (%d)\n", vr);
6006: #endif
1.43 pk 6007: return (0);
6008: }
6009: sp = &rp->rg_segmap[vs];
6010:
6011: if (sp->sg_pmeg != seginval) {
1.124 pk 6012: int ctx = getcontext4();
1.1 deraadt 6013:
1.43 pk 6014: if (CTX_USABLE(pm,rp)) {
1.61 pk 6015: CHANGE_CONTEXTS(ctx, pm->pm_ctxnum);
1.55 pk 6016: tpte = getpte4(va);
1.1 deraadt 6017: } else {
1.61 pk 6018: CHANGE_CONTEXTS(ctx, 0);
1.69 pk 6019: if (HASSUN4_MMU3L)
1.43 pk 6020: setregmap(0, tregion);
6021: setsegmap(0, sp->sg_pmeg);
1.55 pk 6022: tpte = getpte4(VA_VPG(va) << PGSHIFT);
1.1 deraadt 6023: }
1.71 pk 6024: setcontext4(ctx);
1.1 deraadt 6025: } else {
1.124 pk 6026: int *pte = sp->sg_pte;
1.1 deraadt 6027:
6028: if (pte == NULL) {
1.90 pk 6029: #ifdef DEBUG
6030: if (pmapdebug & PDB_FOLLOW)
6031: printf("pmap_extract: invalid segment\n");
6032: #endif
1.1 deraadt 6033: return (0);
6034: }
6035: tpte = pte[VA_VPG(va)];
6036: }
6037: if ((tpte & PG_V) == 0) {
1.90 pk 6038: #ifdef DEBUG
6039: if (pmapdebug & PDB_FOLLOW)
6040: printf("pmap_extract: invalid pte\n");
6041: #endif
1.1 deraadt 6042: return (0);
6043: }
6044: tpte &= PG_PFNUM;
1.60 pk 6045: tpte = tpte;
1.1 deraadt 6046: return ((tpte << PGSHIFT) | (va & PGOFSET));
6047: }
1.55 pk 6048: #endif /*4,4c*/
6049:
6050: #if defined(SUN4M) /* 4m version of pmap_extract */
6051: /*
6052: * Extract the physical page address associated
6053: * with the given map/virtual_address pair.
6054: * GRR, the vm code knows; we should not have to do this!
6055: */
1.124 pk 6056: paddr_t
1.55 pk 6057: pmap_extract4m(pm, va)
1.124 pk 6058: struct pmap *pm;
6059: vaddr_t va;
1.55 pk 6060: {
1.90 pk 6061: struct regmap *rm;
6062: struct segmap *sm;
6063: int pte;
1.55 pk 6064:
6065: if (pm == NULL) {
1.90 pk 6066: #ifdef DEBUG
6067: if (pmapdebug & PDB_FOLLOW)
6068: printf("pmap_extract: null pmap\n");
6069: #endif
1.55 pk 6070: return (0);
6071: }
6072:
1.113 pk 6073: if ((rm = pm->pm_regmap) == NULL) {
1.90 pk 6074: #ifdef DEBUG
6075: if (pmapdebug & PDB_FOLLOW)
1.92 pk 6076: printf("pmap_extract: no regmap entry");
1.90 pk 6077: #endif
6078: return (0);
6079: }
1.113 pk 6080:
6081: rm += VA_VREG(va);
6082: if ((sm = rm->rg_segmap) == NULL) {
1.90 pk 6083: #ifdef DEBUG
6084: if (pmapdebug & PDB_FOLLOW)
1.92 pk 6085: panic("pmap_extract: no segmap");
1.90 pk 6086: #endif
6087: return (0);
6088: }
1.113 pk 6089:
6090: sm += VA_VSEG(va);
6091: if (sm->sg_pte == NULL) {
6092: #ifdef DEBUG
6093: if (pmapdebug & PDB_FOLLOW)
6094: panic("pmap_extract: no ptes");
6095: #endif
6096: return (0);
6097: }
6098:
1.90 pk 6099: pte = sm->sg_pte[VA_SUN4M_VPG(va)];
6100: if ((pte & SRMMU_TETYPE) != SRMMU_TEPTE) {
1.72 pk 6101: #ifdef DEBUG
1.90 pk 6102: if (pmapdebug & PDB_FOLLOW)
6103: printf("pmap_extract: invalid pte of type %d\n",
6104: pte & SRMMU_TETYPE);
6105: #endif
1.72 pk 6106: return (0);
6107: }
1.55 pk 6108:
1.79 pk 6109: return (ptoa((pte & SRMMU_PPNMASK) >> SRMMU_PPNSHIFT) | VA_OFF(va));
1.55 pk 6110: }
6111: #endif /* sun4m */
1.1 deraadt 6112:
6113: /*
6114: * Copy the range specified by src_addr/len
6115: * from the source map to the range dst_addr/len
6116: * in the destination map.
6117: *
6118: * This routine is only advisory and need not do anything.
6119: */
6120: /* ARGSUSED */
1.94 pk 6121: int pmap_copy_disabled=0;
1.1 deraadt 6122: void
6123: pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr)
6124: struct pmap *dst_pmap, *src_pmap;
1.124 pk 6125: vaddr_t dst_addr;
6126: vsize_t len;
6127: vaddr_t src_addr;
1.1 deraadt 6128: {
1.94 pk 6129: #if notyet
1.92 pk 6130: struct regmap *rm;
6131: struct segmap *sm;
6132:
1.94 pk 6133: if (pmap_copy_disabled)
6134: return;
1.92 pk 6135: #ifdef DIAGNOSTIC
6136: if (VA_OFF(src_addr) != 0)
6137: printf("pmap_copy: addr not page aligned: 0x%lx\n", src_addr);
6138: if ((len & (NBPG-1)) != 0)
6139: printf("pmap_copy: length not page aligned: 0x%lx\n", len);
6140: #endif
6141:
6142: if (src_pmap == NULL)
6143: return;
6144:
1.55 pk 6145: if (CPU_ISSUN4M) {
1.92 pk 6146: int i, npg, pte;
1.124 pk 6147: paddr_t pa;
1.92 pk 6148:
6149: npg = len >> PGSHIFT;
6150: for (i = 0; i < npg; i++) {
6151: tlb_flush_page(src_addr);
1.115 pk 6152: if ((rm = src_pmap->pm_regmap) == NULL)
6153: continue;
6154: rm += VA_VREG(src_addr);
6155:
6156: if ((sm = rm->rg_segmap) == NULL)
1.92 pk 6157: continue;
1.115 pk 6158: sm += VA_VSEG(src_addr);
6159: if (sm->sg_npte == 0)
1.92 pk 6160: continue;
1.115 pk 6161:
1.92 pk 6162: pte = sm->sg_pte[VA_SUN4M_VPG(src_addr)];
6163: if ((pte & SRMMU_TETYPE) != SRMMU_TEPTE)
6164: continue;
6165:
6166: pa = ptoa((pte & SRMMU_PPNMASK) >> SRMMU_PPNSHIFT);
1.55 pk 6167: pmap_enter(dst_pmap, dst_addr,
1.92 pk 6168: pa,
1.60 pk 6169: (pte & PPROT_WRITE)
1.92 pk 6170: ? (VM_PROT_WRITE | VM_PROT_READ)
1.60 pk 6171: : VM_PROT_READ,
1.55 pk 6172: 0);
6173: src_addr += NBPG;
6174: dst_addr += NBPG;
6175: }
6176: }
6177: #endif
1.1 deraadt 6178: }
6179:
6180: /*
6181: * Require that all active physical maps contain no
6182: * incorrect entries NOW. [This update includes
6183: * forcing updates of any address map caching.]
6184: */
6185: void
6186: pmap_update()
6187: {
1.55 pk 6188: #if defined(SUN4M)
6189: if (CPU_ISSUN4M)
6190: tlb_flush_all(); /* %%%: Extreme Paranoia? */
6191: #endif
1.1 deraadt 6192: }
6193:
6194: /*
6195: * Garbage collects the physical map system for
6196: * pages which are no longer used.
6197: * Success need not be guaranteed -- that is, there
6198: * may well be pages which are not referenced, but
6199: * others may be collected.
6200: * Called by the pageout daemon when pages are scarce.
6201: */
6202: /* ARGSUSED */
6203: void
6204: pmap_collect(pm)
6205: struct pmap *pm;
6206: {
6207: }
6208:
1.55 pk 6209: #if defined(SUN4) || defined(SUN4C)
6210:
1.1 deraadt 6211: /*
6212: * Clear the modify bit for the given physical page.
6213: */
6214: void
1.55 pk 6215: pmap_clear_modify4_4c(pa)
1.124 pk 6216: paddr_t pa;
1.1 deraadt 6217: {
1.124 pk 6218: struct pvlist *pv;
1.1 deraadt 6219:
1.82 pk 6220: if ((pa & (PMAP_TNC_4 & ~PMAP_NC)) == 0 && managed(pa)) {
1.1 deraadt 6221: pv = pvhead(pa);
1.58 pk 6222: (void) pv_syncflags4_4c(pv);
1.1 deraadt 6223: pv->pv_flags &= ~PV_MOD;
6224: }
6225: }
6226:
6227: /*
6228: * Tell whether the given physical page has been modified.
6229: */
6230: int
1.55 pk 6231: pmap_is_modified4_4c(pa)
1.124 pk 6232: paddr_t pa;
1.1 deraadt 6233: {
1.124 pk 6234: struct pvlist *pv;
1.1 deraadt 6235:
1.82 pk 6236: if ((pa & (PMAP_TNC_4 & ~PMAP_NC)) == 0 && managed(pa)) {
1.1 deraadt 6237: pv = pvhead(pa);
1.58 pk 6238: if (pv->pv_flags & PV_MOD || pv_syncflags4_4c(pv) & PV_MOD)
1.1 deraadt 6239: return (1);
6240: }
6241: return (0);
6242: }
6243:
6244: /*
6245: * Clear the reference bit for the given physical page.
6246: */
6247: void
1.55 pk 6248: pmap_clear_reference4_4c(pa)
1.124 pk 6249: paddr_t pa;
1.1 deraadt 6250: {
1.124 pk 6251: struct pvlist *pv;
1.1 deraadt 6252:
1.82 pk 6253: if ((pa & (PMAP_TNC_4 & ~PMAP_NC)) == 0 && managed(pa)) {
1.1 deraadt 6254: pv = pvhead(pa);
1.58 pk 6255: (void) pv_syncflags4_4c(pv);
1.1 deraadt 6256: pv->pv_flags &= ~PV_REF;
6257: }
6258: }
6259:
6260: /*
6261: * Tell whether the given physical page has been referenced.
6262: */
6263: int
1.55 pk 6264: pmap_is_referenced4_4c(pa)
1.124 pk 6265: paddr_t pa;
1.1 deraadt 6266: {
1.124 pk 6267: struct pvlist *pv;
1.1 deraadt 6268:
1.82 pk 6269: if ((pa & (PMAP_TNC_4 & ~PMAP_NC)) == 0 && managed(pa)) {
1.1 deraadt 6270: pv = pvhead(pa);
1.58 pk 6271: if (pv->pv_flags & PV_REF || pv_syncflags4_4c(pv) & PV_REF)
1.1 deraadt 6272: return (1);
6273: }
6274: return (0);
6275: }
1.55 pk 6276: #endif /*4,4c*/
6277:
1.58 pk 6278: #if defined(SUN4M)
6279:
6280: /*
6281: * 4m versions of bit test/set routines
6282: *
6283: * Note that the 4m-specific routines should eventually service these
6284: * requests from their page tables, and the whole pvlist bit mess should
6285: * be dropped for the 4m (unless this causes a performance hit from
6286: * tracing down pagetables/regmap/segmaps).
6287: */
6288:
1.55 pk 6289: /*
6290: * Clear the modify bit for the given physical page.
6291: */
6292: void
6293: pmap_clear_modify4m(pa) /* XXX %%%: Should service from swpagetbl for 4m */
1.124 pk 6294: paddr_t pa;
1.55 pk 6295: {
1.124 pk 6296: struct pvlist *pv;
1.55 pk 6297:
1.82 pk 6298: if ((pa & (PMAP_TNC_SRMMU & ~PMAP_NC)) == 0 && managed(pa)) {
1.55 pk 6299: pv = pvhead(pa);
1.58 pk 6300: (void) pv_syncflags4m(pv);
1.55 pk 6301: pv->pv_flags &= ~PV_MOD4M;
6302: }
6303: }
6304:
6305: /*
6306: * Tell whether the given physical page has been modified.
6307: */
6308: int
6309: pmap_is_modified4m(pa) /* Test performance with SUN4M && SUN4/4C. XXX */
1.124 pk 6310: paddr_t pa;
1.55 pk 6311: {
1.124 pk 6312: struct pvlist *pv;
1.55 pk 6313:
1.82 pk 6314: if ((pa & (PMAP_TNC_SRMMU & ~PMAP_NC)) == 0 && managed(pa)) {
1.55 pk 6315: pv = pvhead(pa);
6316: if (pv->pv_flags & PV_MOD4M || pv_syncflags4m(pv) & PV_MOD4M)
6317: return(1);
6318: }
6319: return (0);
6320: }
6321:
6322: /*
6323: * Clear the reference bit for the given physical page.
6324: */
6325: void
6326: pmap_clear_reference4m(pa)
1.124 pk 6327: paddr_t pa;
1.55 pk 6328: {
1.124 pk 6329: struct pvlist *pv;
1.55 pk 6330:
1.82 pk 6331: if ((pa & (PMAP_TNC_SRMMU & ~PMAP_NC)) == 0 && managed(pa)) {
1.55 pk 6332: pv = pvhead(pa);
1.58 pk 6333: (void) pv_syncflags4m(pv);
1.55 pk 6334: pv->pv_flags &= ~PV_REF4M;
6335: }
6336: }
6337:
6338: /*
6339: * Tell whether the given physical page has been referenced.
6340: */
6341: int
6342: pmap_is_referenced4m(pa)
1.124 pk 6343: paddr_t pa;
1.55 pk 6344: {
1.124 pk 6345: struct pvlist *pv;
1.55 pk 6346:
1.82 pk 6347: if ((pa & (PMAP_TNC_SRMMU & ~PMAP_NC)) == 0 && managed(pa)) {
1.55 pk 6348: pv = pvhead(pa);
6349: if (pv->pv_flags & PV_REF4M || pv_syncflags4m(pv) & PV_REF4M)
6350: return(1);
6351: }
6352: return (0);
6353: }
6354: #endif /* 4m */
1.1 deraadt 6355:
6356: /*
6357: * Make the specified pages (by pmap, offset) pageable (or not) as requested.
6358: *
6359: * A page which is not pageable may not take a fault; therefore, its page
6360: * table entry must remain valid for the duration (or at least, the trap
6361: * handler must not call vm_fault).
6362: *
6363: * This routine is merely advisory; pmap_enter will specify that these pages
6364: * are to be wired down (or not) as appropriate.
6365: */
6366: /* ARGSUSED */
6367: void
6368: pmap_pageable(pm, start, end, pageable)
6369: struct pmap *pm;
1.124 pk 6370: vaddr_t start, end;
1.1 deraadt 6371: int pageable;
6372: {
1.2 deraadt 6373: }
6374:
6375: /*
1.1 deraadt 6376: * Fill the given MI physical page with zero bytes.
6377: *
6378: * We avoid stomping on the cache.
6379: * XXX might be faster to use destination's context and allow cache to fill?
6380: */
1.55 pk 6381:
6382: #if defined(SUN4) || defined(SUN4C)
6383:
1.1 deraadt 6384: void
1.55 pk 6385: pmap_zero_page4_4c(pa)
1.124 pk 6386: paddr_t pa;
1.1 deraadt 6387: {
1.124 pk 6388: caddr_t va;
6389: int pte;
1.1 deraadt 6390:
1.82 pk 6391: if (((pa & (PMAP_TNC_4 & ~PMAP_NC)) == 0) && managed(pa)) {
1.1 deraadt 6392: /*
6393: * The following might not be necessary since the page
6394: * is being cleared because it is about to be allocated,
6395: * i.e., is in use by no one.
6396: */
1.69 pk 6397: pv_flushcache(pvhead(pa));
1.60 pk 6398: }
6399: pte = PG_V | PG_S | PG_W | PG_NC | (atop(pa) & PG_PFNUM);
1.1 deraadt 6400:
6401: va = vpage[0];
1.55 pk 6402: setpte4(va, pte);
1.1 deraadt 6403: qzero(va, NBPG);
1.55 pk 6404: setpte4(va, 0);
1.1 deraadt 6405: }
6406:
6407: /*
6408: * Copy the given MI physical source page to its destination.
6409: *
6410: * We avoid stomping on the cache as above (with same `XXX' note).
6411: * We must first flush any write-back cache for the source page.
6412: * We go ahead and stomp on the kernel's virtual cache for the
6413: * source page, since the cache can read memory MUCH faster than
6414: * the processor.
6415: */
6416: void
1.55 pk 6417: pmap_copy_page4_4c(src, dst)
1.124 pk 6418: paddr_t src, dst;
1.1 deraadt 6419: {
1.124 pk 6420: caddr_t sva, dva;
6421: int spte, dpte;
1.1 deraadt 6422:
6423: if (managed(src)) {
1.69 pk 6424: if (CACHEINFO.c_vactype == VAC_WRITEBACK)
1.1 deraadt 6425: pv_flushcache(pvhead(src));
1.60 pk 6426: }
6427: spte = PG_V | PG_S | (atop(src) & PG_PFNUM);
1.1 deraadt 6428:
6429: if (managed(dst)) {
6430: /* similar `might not be necessary' comment applies */
1.69 pk 6431: if (CACHEINFO.c_vactype != VAC_NONE)
1.1 deraadt 6432: pv_flushcache(pvhead(dst));
1.60 pk 6433: }
6434: dpte = PG_V | PG_S | PG_W | PG_NC | (atop(dst) & PG_PFNUM);
1.1 deraadt 6435:
6436: sva = vpage[0];
6437: dva = vpage[1];
1.55 pk 6438: setpte4(sva, spte);
6439: setpte4(dva, dpte);
1.1 deraadt 6440: qcopy(sva, dva, NBPG); /* loads cache, so we must ... */
1.69 pk 6441: cache_flush_page((int)sva);
1.55 pk 6442: setpte4(sva, 0);
6443: setpte4(dva, 0);
6444: }
6445: #endif /* 4, 4c */
6446:
6447: #if defined(SUN4M) /* Sun4M version of copy/zero routines */
6448: /*
6449: * Fill the given MI physical page with zero bytes.
6450: *
6451: * We avoid stomping on the cache.
6452: * XXX might be faster to use destination's context and allow cache to fill?
6453: */
6454: void
6455: pmap_zero_page4m(pa)
1.124 pk 6456: paddr_t pa;
1.55 pk 6457: {
1.124 pk 6458: caddr_t va;
6459: int pte;
1.55 pk 6460:
1.82 pk 6461: if (((pa & (PMAP_TNC_SRMMU & ~PMAP_NC)) == 0) && managed(pa)) {
1.55 pk 6462: /*
6463: * The following might not be necessary since the page
6464: * is being cleared because it is about to be allocated,
6465: * i.e., is in use by no one.
6466: */
1.69 pk 6467: if (CACHEINFO.c_vactype != VAC_NONE)
1.55 pk 6468: pv_flushcache(pvhead(pa));
1.60 pk 6469: }
1.68 abrown 6470: pte = (SRMMU_TEPTE | PPROT_S | PPROT_WRITE |
6471: (atop(pa) << SRMMU_PPNSHIFT));
1.69 pk 6472: if (cpuinfo.flags & CPUFLG_CACHE_MANDATORY)
1.68 abrown 6473: pte |= SRMMU_PG_C;
6474: else
6475: pte &= ~SRMMU_PG_C;
6476:
1.55 pk 6477: va = vpage[0];
1.101 pk 6478: *vpage_pte[0] = pte;
1.55 pk 6479: qzero(va, NBPG);
1.101 pk 6480: /* Remove temporary mapping */
6481: tlb_flush_page((int)va);
6482: *vpage_pte[0] = SRMMU_TEINVALID;
1.55 pk 6483: }
6484:
6485: /*
6486: * Copy the given MI physical source page to its destination.
6487: *
6488: * We avoid stomping on the cache as above (with same `XXX' note).
6489: * We must first flush any write-back cache for the source page.
6490: * We go ahead and stomp on the kernel's virtual cache for the
6491: * source page, since the cache can read memory MUCH faster than
6492: * the processor.
6493: */
6494: void
6495: pmap_copy_page4m(src, dst)
1.124 pk 6496: paddr_t src, dst;
1.55 pk 6497: {
1.124 pk 6498: caddr_t sva, dva;
6499: int spte, dpte;
1.55 pk 6500:
6501: if (managed(src)) {
1.69 pk 6502: if (CACHEINFO.c_vactype == VAC_WRITEBACK)
1.55 pk 6503: pv_flushcache(pvhead(src));
1.60 pk 6504: }
6505: spte = SRMMU_TEPTE | SRMMU_PG_C | PPROT_S |
6506: (atop(src) << SRMMU_PPNSHIFT);
1.55 pk 6507:
6508: if (managed(dst)) {
6509: /* similar `might not be necessary' comment applies */
1.69 pk 6510: if (CACHEINFO.c_vactype != VAC_NONE)
1.55 pk 6511: pv_flushcache(pvhead(dst));
1.60 pk 6512: }
1.68 abrown 6513: dpte = (SRMMU_TEPTE | PPROT_S | PPROT_WRITE |
6514: (atop(dst) << SRMMU_PPNSHIFT));
1.69 pk 6515: if (cpuinfo.flags & CPUFLG_CACHE_MANDATORY)
1.68 abrown 6516: dpte |= SRMMU_PG_C;
6517: else
6518: dpte &= ~SRMMU_PG_C;
1.60 pk 6519:
1.55 pk 6520: sva = vpage[0];
6521: dva = vpage[1];
1.101 pk 6522: *vpage_pte[0] = spte;
6523: *vpage_pte[1] = dpte;
1.55 pk 6524: qcopy(sva, dva, NBPG); /* loads cache, so we must ... */
1.69 pk 6525: cache_flush_page((int)sva);
1.101 pk 6526: *vpage_pte[0] = SRMMU_TEINVALID;
6527: *vpage_pte[1] = SRMMU_TEINVALID;
6528: tlb_flush_page((int)sva);
6529: tlb_flush_page((int)dva);
1.1 deraadt 6530: }
1.55 pk 6531: #endif /* Sun4M */
1.1 deraadt 6532:
6533: /*
6534: * Turn a cdevsw d_mmap value into a byte address for pmap_enter.
6535: * XXX this should almost certainly be done differently, and
6536: * elsewhere, or even not at all
6537: */
1.124 pk 6538: paddr_t
1.1 deraadt 6539: pmap_phys_address(x)
6540: int x;
6541: {
6542:
1.124 pk 6543: return ((paddr_t)x);
1.1 deraadt 6544: }
6545:
6546: /*
6547: * Turn off cache for a given (va, number of pages).
6548: *
6549: * We just assert PG_NC for each PTE; the addresses must reside
6550: * in locked kernel space. A cache flush is also done.
6551: */
1.53 christos 6552: void
1.1 deraadt 6553: kvm_uncache(va, npages)
1.115 pk 6554: caddr_t va;
6555: int npages;
1.1 deraadt 6556: {
1.115 pk 6557: int pte;
1.124 pk 6558: paddr_t pa;
1.88 pk 6559:
1.55 pk 6560: if (CPU_ISSUN4M) {
6561: #if defined(SUN4M)
1.100 pk 6562: int ctx = getcontext4m();
6563:
6564: setcontext4m(0);
1.55 pk 6565: for (; --npages >= 0; va += NBPG) {
1.124 pk 6566: pte = getpte4m((vaddr_t) va);
1.55 pk 6567: if ((pte & SRMMU_TETYPE) != SRMMU_TEPTE)
6568: panic("kvm_uncache: table entry not pte");
1.115 pk 6569:
1.128 pk 6570: if ((pte & SRMMU_PGTYPE) == PG_SUN4M_OBMEM)
6571: cache_flush_page((int)va);
1.115 pk 6572: pa = ptoa((pte & SRMMU_PPNMASK) >> SRMMU_PPNSHIFT);
6573: if ((pte & SRMMU_PGTYPE) == PG_SUN4M_OBMEM &&
6574: managed(pa)) {
6575: pv_changepte4m(pvhead(pa), 0, SRMMU_PG_C);
6576: }
1.116 pk 6577: pte &= ~SRMMU_PG_C;
1.124 pk 6578: setpte4m((vaddr_t) va, pte);
1.116 pk 6579:
1.55 pk 6580: }
1.100 pk 6581: setcontext4m(ctx);
1.55 pk 6582: #endif
6583: } else {
6584: #if defined(SUN4) || defined(SUN4C)
6585: for (; --npages >= 0; va += NBPG) {
6586: pte = getpte4(va);
6587: if ((pte & PG_V) == 0)
6588: panic("kvm_uncache !pg_v");
1.115 pk 6589:
6590: pa = ptoa(pte & PG_PFNUM);
6591: if ((pte & PG_TYPE) == PG_OBMEM &&
6592: managed(pa)) {
6593: pv_changepte4_4c(pvhead(pa), PG_NC, 0);
6594: }
1.116 pk 6595: pte |= PG_NC;
6596: setpte4(va, pte);
6597: if ((pte & PG_TYPE) == PG_OBMEM)
6598: cache_flush_page((int)va);
1.55 pk 6599: }
6600: #endif
1.1 deraadt 6601: }
1.21 deraadt 6602: }
6603:
1.46 pk 6604: /*
6605: * Turn on IO cache for a given (va, number of pages).
6606: *
6607: * We just assert PG_NC for each PTE; the addresses must reside
6608: * in locked kernel space. A cache flush is also done.
6609: */
1.53 christos 6610: void
1.46 pk 6611: kvm_iocache(va, npages)
1.124 pk 6612: caddr_t va;
6613: int npages;
1.46 pk 6614: {
6615:
1.55 pk 6616: #ifdef SUN4M
6617: if (CPU_ISSUN4M) /* %%%: Implement! */
6618: panic("kvm_iocache: 4m iocache not implemented");
6619: #endif
6620: #if defined(SUN4) || defined(SUN4C)
1.46 pk 6621: for (; --npages >= 0; va += NBPG) {
1.124 pk 6622: int pte = getpte4(va);
1.46 pk 6623: if ((pte & PG_V) == 0)
6624: panic("kvm_iocache !pg_v");
6625: pte |= PG_IOC;
1.55 pk 6626: setpte4(va, pte);
1.46 pk 6627: }
1.55 pk 6628: #endif
1.46 pk 6629: }
6630:
1.21 deraadt 6631: int
6632: pmap_count_ptes(pm)
1.124 pk 6633: struct pmap *pm;
1.21 deraadt 6634: {
1.124 pk 6635: int idx, total;
6636: struct regmap *rp;
6637: struct segmap *sp;
1.21 deraadt 6638:
1.43 pk 6639: if (pm == pmap_kernel()) {
6640: rp = &pm->pm_regmap[NUREG];
6641: idx = NKREG;
6642: } else {
6643: rp = pm->pm_regmap;
6644: idx = NUREG;
6645: }
1.21 deraadt 6646: for (total = 0; idx;)
1.43 pk 6647: if ((sp = rp[--idx].rg_segmap) != NULL)
6648: total += sp->sg_npte;
1.21 deraadt 6649: pm->pm_stats.resident_count = total;
6650: return (total);
1.24 pk 6651: }
6652:
6653: /*
1.51 gwr 6654: * Find first virtual address >= *va that is
6655: * least likely to cause cache aliases.
6656: * (This will just seg-align mappings.)
1.24 pk 6657: */
1.51 gwr 6658: void
1.52 pk 6659: pmap_prefer(foff, vap)
1.124 pk 6660: vaddr_t foff;
6661: vaddr_t *vap;
1.24 pk 6662: {
1.124 pk 6663: vaddr_t va = *vap;
6664: long d, m;
1.52 pk 6665:
6666: if (VA_INHOLE(va))
6667: va = MMU_HOLE_END;
1.24 pk 6668:
1.48 pk 6669: m = CACHE_ALIAS_DIST;
6670: if (m == 0) /* m=0 => no cache aliasing */
1.51 gwr 6671: return;
1.24 pk 6672:
1.52 pk 6673: d = foff - va;
6674: d &= (m - 1);
6675: *vap = va + d;
1.23 deraadt 6676: }
6677:
1.53 christos 6678: void
1.23 deraadt 6679: pmap_redzone()
6680: {
1.100 pk 6681: pmap_remove(pmap_kernel(), KERNBASE, KERNBASE+NBPG);
1.104 thorpej 6682: }
6683:
6684: /*
6685: * Activate the address space for the specified process. If the
6686: * process is the current process, load the new MMU context.
6687: */
6688: void
6689: pmap_activate(p)
6690: struct proc *p;
6691: {
6692: pmap_t pmap = p->p_vmspace->vm_map.pmap;
6693: int s;
6694:
6695: /*
6696: * This is essentially the same thing that happens in cpu_switch()
6697: * when the newly selected process is about to run, except that we
6698: * have to make sure to clean the register windows before we set
6699: * the new context.
6700: */
6701:
6702: s = splpmap();
6703: if (p == curproc) {
6704: write_user_windows();
1.123 pk 6705: if (pmap->pm_ctx == NULL) {
1.104 thorpej 6706: ctx_alloc(pmap); /* performs setcontext() */
1.123 pk 6707: } else {
6708: /* Do any cache flush needed on context switch */
6709: (*cpuinfo.pure_vcache_flush)();
1.104 thorpej 6710: setcontext(pmap->pm_ctxnum);
1.123 pk 6711: }
1.104 thorpej 6712: }
6713: splx(s);
6714: }
6715:
6716: /*
6717: * Deactivate the address space of the specified process.
6718: */
6719: void
6720: pmap_deactivate(p)
6721: struct proc *p;
6722: {
1.1 deraadt 6723: }
1.43 pk 6724:
6725: #ifdef DEBUG
6726: /*
6727: * Check consistency of a pmap (time consuming!).
6728: */
1.53 christos 6729: void
1.43 pk 6730: pm_check(s, pm)
6731: char *s;
6732: struct pmap *pm;
6733: {
6734: if (pm == pmap_kernel())
6735: pm_check_k(s, pm);
6736: else
6737: pm_check_u(s, pm);
6738: }
6739:
1.53 christos 6740: void
1.43 pk 6741: pm_check_u(s, pm)
6742: char *s;
6743: struct pmap *pm;
6744: {
6745: struct regmap *rp;
6746: struct segmap *sp;
6747: int n, vs, vr, j, m, *pte;
6748:
1.55 pk 6749: if (pm->pm_regmap == NULL)
1.72 pk 6750: panic("%s: CHK(pmap %p): no region mapping", s, pm);
1.55 pk 6751:
6752: #if defined(SUN4M)
6753: if (CPU_ISSUN4M &&
6754: (pm->pm_reg_ptps == NULL ||
6755: pm->pm_reg_ptps_pa != VA2PA((caddr_t)pm->pm_reg_ptps)))
1.72 pk 6756: panic("%s: CHK(pmap %p): no SRMMU region table or bad pa: "
6757: "tblva=%p, tblpa=0x%x",
6758: s, pm, pm->pm_reg_ptps, pm->pm_reg_ptps_pa);
1.55 pk 6759:
6760: if (CPU_ISSUN4M && pm->pm_ctx != NULL &&
1.69 pk 6761: (cpuinfo.ctx_tbl[pm->pm_ctxnum] != ((VA2PA((caddr_t)pm->pm_reg_ptps)
1.55 pk 6762: >> SRMMU_PPNPASHIFT) |
6763: SRMMU_TEPTD)))
1.91 fair 6764: panic("%s: CHK(pmap %p): SRMMU region table at 0x%x not installed "
1.55 pk 6765: "for context %d", s, pm, pm->pm_reg_ptps_pa, pm->pm_ctxnum);
6766: #endif
6767:
1.43 pk 6768: for (vr = 0; vr < NUREG; vr++) {
6769: rp = &pm->pm_regmap[vr];
6770: if (rp->rg_nsegmap == 0)
6771: continue;
6772: if (rp->rg_segmap == NULL)
6773: panic("%s: CHK(vr %d): nsegmap = %d; sp==NULL",
6774: s, vr, rp->rg_nsegmap);
1.55 pk 6775: #if defined(SUN4M)
6776: if (CPU_ISSUN4M && rp->rg_seg_ptps == NULL)
6777: panic("%s: CHK(vr %d): nsegmap=%d; no SRMMU segment table",
6778: s, vr, rp->rg_nsegmap);
6779: if (CPU_ISSUN4M &&
6780: pm->pm_reg_ptps[vr] != ((VA2PA((caddr_t)rp->rg_seg_ptps) >>
6781: SRMMU_PPNPASHIFT) | SRMMU_TEPTD))
6782: panic("%s: CHK(vr %d): SRMMU segtbl not installed",s,vr);
6783: #endif
1.43 pk 6784: if ((unsigned int)rp < KERNBASE)
1.54 christos 6785: panic("%s: rp=%p", s, rp);
1.43 pk 6786: n = 0;
6787: for (vs = 0; vs < NSEGRG; vs++) {
6788: sp = &rp->rg_segmap[vs];
6789: if ((unsigned int)sp < KERNBASE)
1.54 christos 6790: panic("%s: sp=%p", s, sp);
1.43 pk 6791: if (sp->sg_npte != 0) {
6792: n++;
6793: if (sp->sg_pte == NULL)
6794: panic("%s: CHK(vr %d, vs %d): npte=%d, "
6795: "pte=NULL", s, vr, vs, sp->sg_npte);
1.55 pk 6796: #if defined(SUN4M)
6797: if (CPU_ISSUN4M &&
6798: rp->rg_seg_ptps[vs] !=
6799: ((VA2PA((caddr_t)sp->sg_pte)
6800: >> SRMMU_PPNPASHIFT) |
6801: SRMMU_TEPTD))
6802: panic("%s: CHK(vr %d, vs %d): SRMMU page "
6803: "table not installed correctly",s,vr,
6804: vs);
6805: #endif
1.43 pk 6806: pte=sp->sg_pte;
6807: m = 0;
6808: for (j=0; j<NPTESG; j++,pte++)
1.55 pk 6809: if ((CPU_ISSUN4M
6810: ?((*pte & SRMMU_TETYPE) == SRMMU_TEPTE)
6811: :(*pte & PG_V)))
6812: m++;
1.43 pk 6813: if (m != sp->sg_npte)
6814: /*if (pmapdebug & 0x10000)*/
1.66 christos 6815: printf("%s: user CHK(vr %d, vs %d): "
1.43 pk 6816: "npte(%d) != # valid(%d)\n",
6817: s, vr, vs, sp->sg_npte, m);
6818: }
6819: }
6820: if (n != rp->rg_nsegmap)
6821: panic("%s: CHK(vr %d): inconsistent "
6822: "# of pte's: %d, should be %d",
6823: s, vr, rp->rg_nsegmap, n);
6824: }
1.53 christos 6825: return;
1.43 pk 6826: }
6827:
1.53 christos 6828: void
1.55 pk 6829: pm_check_k(s, pm) /* Note: not as extensive as pm_check_u. */
1.43 pk 6830: char *s;
6831: struct pmap *pm;
6832: {
6833: struct regmap *rp;
6834: int vr, vs, n;
6835:
1.55 pk 6836: if (pm->pm_regmap == NULL)
1.122 pk 6837: panic("%s: CHK(pmap %p): no region mapping", s, pm);
1.55 pk 6838:
6839: #if defined(SUN4M)
6840: if (CPU_ISSUN4M &&
6841: (pm->pm_reg_ptps == NULL ||
6842: pm->pm_reg_ptps_pa != VA2PA((caddr_t)pm->pm_reg_ptps)))
1.91 fair 6843: panic("%s: CHK(pmap %p): no SRMMU region table or bad pa: tblva=%p, tblpa=0x%x",
1.55 pk 6844: s, pm, pm->pm_reg_ptps, pm->pm_reg_ptps_pa);
6845:
6846: if (CPU_ISSUN4M &&
1.69 pk 6847: (cpuinfo.ctx_tbl[0] != ((VA2PA((caddr_t)pm->pm_reg_ptps) >>
1.55 pk 6848: SRMMU_PPNPASHIFT) | SRMMU_TEPTD)))
1.91 fair 6849: panic("%s: CHK(pmap %p): SRMMU region table at 0x%x not installed "
1.55 pk 6850: "for context %d", s, pm, pm->pm_reg_ptps_pa, 0);
6851: #endif
1.43 pk 6852: for (vr = NUREG; vr < NUREG+NKREG; vr++) {
6853: rp = &pm->pm_regmap[vr];
6854: if (rp->rg_segmap == NULL)
6855: panic("%s: CHK(vr %d): nsegmap = %d; sp==NULL",
6856: s, vr, rp->rg_nsegmap);
6857: if (rp->rg_nsegmap == 0)
6858: continue;
1.55 pk 6859: #if defined(SUN4M)
6860: if (CPU_ISSUN4M && rp->rg_seg_ptps == NULL)
6861: panic("%s: CHK(vr %d): nsegmap=%d; no SRMMU segment table",
6862: s, vr, rp->rg_nsegmap);
6863: if (CPU_ISSUN4M &&
6864: pm->pm_reg_ptps[vr] != ((VA2PA((caddr_t)rp->rg_seg_ptps) >>
6865: SRMMU_PPNPASHIFT) | SRMMU_TEPTD))
6866: panic("%s: CHK(vr %d): SRMMU segtbl not installed",s,vr);
6867: #endif
1.72 pk 6868: if (CPU_ISSUN4M) {
6869: n = NSEGRG;
6870: } else {
6871: for (n = 0, vs = 0; vs < NSEGRG; vs++) {
6872: if (rp->rg_segmap[vs].sg_npte)
6873: n++;
6874: }
1.43 pk 6875: }
6876: if (n != rp->rg_nsegmap)
1.66 christos 6877: printf("%s: kernel CHK(vr %d): inconsistent "
1.43 pk 6878: "# of pte's: %d, should be %d\n",
6879: s, vr, rp->rg_nsegmap, n);
6880: }
1.53 christos 6881: return;
1.43 pk 6882: }
6883: #endif
1.46 pk 6884:
6885: /*
1.98 pk 6886: * Return the number of disk blocks that pmap_dumpmmu() will dump.
1.46 pk 6887: */
6888: int
6889: pmap_dumpsize()
6890: {
1.98 pk 6891: int sz;
1.67 pk 6892:
6893: sz = ALIGN(sizeof(kcore_seg_t)) + ALIGN(sizeof(cpu_kcore_hdr_t));
6894: sz += npmemarr * sizeof(phys_ram_seg_t);
1.98 pk 6895: sz += sizeof(kernel_segmap_store);
1.55 pk 6896:
6897: if (CPU_ISSUN4OR4C)
1.98 pk 6898: /* For each pmeg in the MMU, we'll write NPTESG PTEs. */
1.67 pk 6899: sz += (seginval + 1) * NPTESG * sizeof(int);
6900:
1.98 pk 6901: return btodb(sz + DEV_BSIZE - 1);
1.46 pk 6902: }
6903:
6904: /*
1.98 pk 6905: * Write the core dump headers and MD data to the dump device.
6906: * We dump the following items:
6907: *
6908: * kcore_seg_t MI header defined in <sys/kcore.h>)
6909: * cpu_kcore_hdr_t MD header defined in <machine/kcore.h>)
6910: * phys_ram_seg_t[npmemarr] physical memory segments
6911: * segmap_t[NKREG*NSEGRG] the kernel's segment map
6912: * the MMU pmegs on sun4/sun4c
1.46 pk 6913: */
6914: int
6915: pmap_dumpmmu(dump, blkno)
1.124 pk 6916: daddr_t blkno;
6917: int (*dump) __P((dev_t, daddr_t, caddr_t, size_t));
1.46 pk 6918: {
1.67 pk 6919: kcore_seg_t *ksegp;
6920: cpu_kcore_hdr_t *kcpup;
6921: phys_ram_seg_t memseg;
1.124 pk 6922: int error = 0;
6923: int i, memsegoffset, segmapoffset, pmegoffset;
1.67 pk 6924: int buffer[dbtob(1) / sizeof(int)];
6925: int *bp, *ep;
1.55 pk 6926: #if defined(SUN4C) || defined(SUN4)
1.124 pk 6927: int pmeg;
1.55 pk 6928: #endif
1.46 pk 6929:
1.67 pk 6930: #define EXPEDITE(p,n) do { \
6931: int *sp = (int *)(p); \
6932: int sz = (n); \
6933: while (sz > 0) { \
6934: *bp++ = *sp++; \
6935: if (bp >= ep) { \
6936: error = (*dump)(dumpdev, blkno, \
6937: (caddr_t)buffer, dbtob(1)); \
6938: if (error != 0) \
6939: return (error); \
6940: ++blkno; \
6941: bp = buffer; \
6942: } \
6943: sz -= 4; \
6944: } \
6945: } while (0)
6946:
6947: setcontext(0);
6948:
6949: /* Setup bookkeeping pointers */
6950: bp = buffer;
6951: ep = &buffer[sizeof(buffer) / sizeof(buffer[0])];
6952:
6953: /* Fill in MI segment header */
6954: ksegp = (kcore_seg_t *)bp;
6955: CORE_SETMAGIC(*ksegp, KCORE_MAGIC, MID_MACHINE, CORE_CPU);
1.98 pk 6956: ksegp->c_size = dbtob(pmap_dumpsize()) - ALIGN(sizeof(kcore_seg_t));
1.67 pk 6957:
6958: /* Fill in MD segment header (interpreted by MD part of libkvm) */
6959: kcpup = (cpu_kcore_hdr_t *)((int)bp + ALIGN(sizeof(kcore_seg_t)));
6960: kcpup->cputype = cputyp;
1.98 pk 6961: kcpup->kernbase = KERNBASE;
1.67 pk 6962: kcpup->nmemseg = npmemarr;
6963: kcpup->memsegoffset = memsegoffset = ALIGN(sizeof(cpu_kcore_hdr_t));
1.98 pk 6964: kcpup->nsegmap = NKREG*NSEGRG;
6965: kcpup->segmapoffset = segmapoffset =
6966: memsegoffset + npmemarr * sizeof(phys_ram_seg_t);
6967:
1.67 pk 6968: kcpup->npmeg = (CPU_ISSUN4OR4C) ? seginval + 1 : 0;
6969: kcpup->pmegoffset = pmegoffset =
1.98 pk 6970: segmapoffset + kcpup->nsegmap * sizeof(struct segmap);
1.67 pk 6971:
6972: /* Note: we have assumed everything fits in buffer[] so far... */
1.98 pk 6973: bp = (int *)((int)kcpup + ALIGN(sizeof(cpu_kcore_hdr_t)));
1.67 pk 6974:
1.98 pk 6975: #if 0
1.67 pk 6976: /* Align storage for upcoming quad-aligned segment array */
6977: while (bp != (int *)ALIGN(bp)) {
6978: int dummy = 0;
6979: EXPEDITE(&dummy, 4);
6980: }
1.98 pk 6981: #endif
6982:
1.67 pk 6983: for (i = 0; i < npmemarr; i++) {
6984: memseg.start = pmemarr[i].addr;
6985: memseg.size = pmemarr[i].len;
6986: EXPEDITE(&memseg, sizeof(phys_ram_seg_t));
6987: }
1.98 pk 6988:
6989: EXPEDITE(&kernel_segmap_store, sizeof(kernel_segmap_store));
1.67 pk 6990:
6991: if (CPU_ISSUN4M)
6992: goto out;
1.55 pk 6993:
6994: #if defined(SUN4C) || defined(SUN4)
1.46 pk 6995: /*
6996: * dump page table entries
6997: *
6998: * We dump each pmeg in order (by segment number). Since the MMU
6999: * automatically maps the given virtual segment to a pmeg we must
7000: * iterate over the segments by incrementing an unused segment slot
7001: * in the MMU. This fixed segment number is used in the virtual
7002: * address argument to getpte().
7003: */
1.55 pk 7004:
1.46 pk 7005: /*
7006: * Go through the pmegs and dump each one.
7007: */
7008: for (pmeg = 0; pmeg <= seginval; ++pmeg) {
1.124 pk 7009: int va = 0;
1.46 pk 7010:
7011: setsegmap(va, pmeg);
7012: i = NPTESG;
7013: do {
1.67 pk 7014: int pte = getpte4(va);
7015: EXPEDITE(&pte, sizeof(pte));
1.46 pk 7016: va += NBPG;
7017: } while (--i > 0);
7018: }
7019: setsegmap(0, seginval);
1.67 pk 7020: #endif
1.46 pk 7021:
1.67 pk 7022: out:
7023: if (bp != buffer)
1.46 pk 7024: error = (*dump)(dumpdev, blkno++, (caddr_t)buffer, dbtob(1));
7025:
7026: return (error);
1.92 pk 7027: }
7028:
7029: /*
7030: * Helper function for debuggers.
7031: */
7032: void
7033: pmap_writetext(dst, ch)
7034: unsigned char *dst;
7035: int ch;
7036: {
1.95 pk 7037: int s, pte0, pte, ctx;
1.124 pk 7038: vaddr_t va;
1.92 pk 7039:
7040: s = splpmap();
7041: va = (unsigned long)dst & (~PGOFSET);
7042: cpuinfo.cache_flush(dst, 1);
7043:
1.95 pk 7044: ctx = getcontext();
7045: setcontext(0);
7046:
1.92 pk 7047: #if defined(SUN4M)
7048: if (CPU_ISSUN4M) {
7049: pte0 = getpte4m(va);
7050: if ((pte0 & SRMMU_TETYPE) != SRMMU_TEPTE) {
7051: splx(s);
7052: return;
7053: }
7054: pte = pte0 | PPROT_WRITE;
7055: setpte4m(va, pte);
7056: *dst = (unsigned char)ch;
7057: setpte4m(va, pte0);
7058:
7059: }
7060: #endif
7061: #if defined(SUN4) || defined(SUN4C)
7062: if (CPU_ISSUN4C || CPU_ISSUN4) {
7063: pte0 = getpte4(va);
7064: if ((pte0 & PG_V) == 0) {
7065: splx(s);
7066: return;
7067: }
7068: pte = pte0 | PG_W;
7069: setpte4(va, pte);
7070: *dst = (unsigned char)ch;
7071: setpte4(va, pte0);
7072: }
7073: #endif
7074: cpuinfo.cache_flush(dst, 1);
1.95 pk 7075: setcontext(ctx);
1.92 pk 7076: splx(s);
1.55 pk 7077: }
7078:
7079: #ifdef EXTREME_DEBUG
7080:
7081: static void test_region __P((int, int, int));
7082:
7083: void
7084: debug_pagetables()
7085: {
1.124 pk 7086: int i;
7087: int *regtbl;
7088: int te;
1.55 pk 7089:
1.66 christos 7090: printf("\nncontext=%d. ",ncontext);
7091: printf("Context table is at va 0x%x. Level 0 PTP: 0x%x\n",
1.69 pk 7092: cpuinfo.ctx_tbl, cpuinfo.ctx_tbl[0]);
1.66 christos 7093: printf("Context 0 region table is at va 0x%x, pa 0x%x. Contents:\n",
1.55 pk 7094: pmap_kernel()->pm_reg_ptps, pmap_kernel()->pm_reg_ptps_pa);
7095:
7096: regtbl = pmap_kernel()->pm_reg_ptps;
7097:
1.66 christos 7098: printf("PROM vector is at 0x%x\n",promvec);
7099: printf("PROM reboot routine is at 0x%x\n",promvec->pv_reboot);
7100: printf("PROM abort routine is at 0x%x\n",promvec->pv_abort);
7101: printf("PROM halt routine is at 0x%x\n",promvec->pv_halt);
1.55 pk 7102:
1.66 christos 7103: printf("Testing region 0xfe: ");
1.55 pk 7104: test_region(0xfe,0,16*1024*1024);
1.66 christos 7105: printf("Testing region 0xff: ");
1.55 pk 7106: test_region(0xff,0,16*1024*1024);
1.96 pk 7107: printf("Testing kernel region 0x%x: ", VA_VREG(KERNBASE));
7108: test_region(VA_VREG(KERNBASE), 4096, avail_start);
1.55 pk 7109: cngetc();
7110:
7111: for (i = 0; i < SRMMU_L1SIZE; i++) {
7112: te = regtbl[i];
7113: if ((te & SRMMU_TETYPE) == SRMMU_TEINVALID)
7114: continue;
1.66 christos 7115: printf("Region 0x%x: PTE=0x%x <%s> L2PA=0x%x kernL2VA=0x%x\n",
1.55 pk 7116: i, te, ((te & SRMMU_TETYPE) == SRMMU_TEPTE ? "pte" :
7117: ((te & SRMMU_TETYPE) == SRMMU_TEPTD ? "ptd" :
7118: ((te & SRMMU_TETYPE) == SRMMU_TEINVALID ?
7119: "invalid" : "reserved"))),
7120: (te & ~0x3) << SRMMU_PPNPASHIFT,
7121: pmap_kernel()->pm_regmap[i].rg_seg_ptps);
7122: }
1.66 christos 7123: printf("Press q to halt...\n");
1.55 pk 7124: if (cngetc()=='q')
7125: callrom();
7126: }
7127:
7128: static u_int
7129: VA2PAsw(ctx, addr, pte)
1.124 pk 7130: int ctx;
7131: caddr_t addr;
1.55 pk 7132: int *pte;
7133: {
1.124 pk 7134: int *curtbl;
7135: int curpte;
1.55 pk 7136:
7137: #ifdef EXTREME_EXTREME_DEBUG
1.66 christos 7138: printf("Looking up addr 0x%x in context 0x%x\n",addr,ctx);
1.55 pk 7139: #endif
7140: /* L0 */
1.69 pk 7141: *pte = curpte = cpuinfo.ctx_tbl[ctx];
1.55 pk 7142: #ifdef EXTREME_EXTREME_DEBUG
1.66 christos 7143: printf("Got L0 pte 0x%x\n",pte);
1.55 pk 7144: #endif
7145: if ((curpte & SRMMU_TETYPE) == SRMMU_TEPTE) {
7146: return (((curpte & SRMMU_PPNMASK) << SRMMU_PPNPASHIFT) |
7147: ((u_int)addr & 0xffffffff));
7148: }
7149: if ((curpte & SRMMU_TETYPE) != SRMMU_TEPTD) {
1.66 christos 7150: printf("Bad context table entry 0x%x for context 0x%x\n",
1.55 pk 7151: curpte, ctx);
7152: return 0;
7153: }
7154: /* L1 */
1.96 pk 7155: curtbl = ((curpte & ~0x3) << 4) | KERNBASE; /* correct for krn*/
1.55 pk 7156: *pte = curpte = curtbl[VA_VREG(addr)];
7157: #ifdef EXTREME_EXTREME_DEBUG
1.66 christos 7158: printf("L1 table at 0x%x.\nGot L1 pte 0x%x\n",curtbl,curpte);
1.55 pk 7159: #endif
7160: if ((curpte & SRMMU_TETYPE) == SRMMU_TEPTE)
7161: return (((curpte & SRMMU_PPNMASK) << SRMMU_PPNPASHIFT) |
7162: ((u_int)addr & 0xffffff));
7163: if ((curpte & SRMMU_TETYPE) != SRMMU_TEPTD) {
1.66 christos 7164: printf("Bad region table entry 0x%x for region 0x%x\n",
1.55 pk 7165: curpte, VA_VREG(addr));
7166: return 0;
7167: }
7168: /* L2 */
1.96 pk 7169: curtbl = ((curpte & ~0x3) << 4) | KERNBASE; /* correct for krn*/
1.55 pk 7170: *pte = curpte = curtbl[VA_VSEG(addr)];
7171: #ifdef EXTREME_EXTREME_DEBUG
1.66 christos 7172: printf("L2 table at 0x%x.\nGot L2 pte 0x%x\n",curtbl,curpte);
1.55 pk 7173: #endif
7174: if ((curpte & SRMMU_TETYPE) == SRMMU_TEPTE)
7175: return (((curpte & SRMMU_PPNMASK) << SRMMU_PPNPASHIFT) |
7176: ((u_int)addr & 0x3ffff));
7177: if ((curpte & SRMMU_TETYPE) != SRMMU_TEPTD) {
1.66 christos 7178: printf("Bad segment table entry 0x%x for reg 0x%x, seg 0x%x\n",
1.55 pk 7179: curpte, VA_VREG(addr), VA_VSEG(addr));
7180: return 0;
7181: }
7182: /* L3 */
1.96 pk 7183: curtbl = ((curpte & ~0x3) << 4) | KERNBASE; /* correct for krn*/
1.55 pk 7184: *pte = curpte = curtbl[VA_VPG(addr)];
7185: #ifdef EXTREME_EXTREME_DEBUG
1.66 christos 7186: printf("L3 table at 0x%x.\nGot L3 pte 0x%x\n",curtbl,curpte);
1.55 pk 7187: #endif
7188: if ((curpte & SRMMU_TETYPE) == SRMMU_TEPTE)
7189: return (((curpte & SRMMU_PPNMASK) << SRMMU_PPNPASHIFT) |
7190: ((u_int)addr & 0xfff));
7191: else {
1.66 christos 7192: printf("Bad L3 pte 0x%x for reg 0x%x, seg 0x%x, pg 0x%x\n",
1.55 pk 7193: curpte, VA_VREG(addr), VA_VSEG(addr), VA_VPG(addr));
7194: return 0;
7195: }
1.66 christos 7196: printf("Bizarreness with address 0x%x!\n",addr);
1.55 pk 7197: }
7198:
7199: void test_region(reg, start, stop)
1.124 pk 7200: int reg;
7201: int start, stop;
1.55 pk 7202: {
1.124 pk 7203: int i;
7204: int addr;
7205: int pte;
1.55 pk 7206: int ptesw;
7207: /* int cnt=0;
7208: */
7209:
7210: for (i = start; i < stop; i+= NBPG) {
7211: addr = (reg << RGSHIFT) | i;
7212: pte=lda(((u_int)(addr)) | ASI_SRMMUFP_LN, ASI_SRMMUFP);
7213: if (pte) {
1.66 christos 7214: /* printf("Valid address 0x%x\n",addr);
1.55 pk 7215: if (++cnt == 20) {
7216: cngetc();
7217: cnt=0;
7218: }
7219: */
7220: if (VA2PA(addr) != VA2PAsw(0,addr,&ptesw)) {
1.66 christos 7221: printf("Mismatch at address 0x%x.\n",addr);
1.55 pk 7222: if (cngetc()=='q') break;
7223: }
1.96 pk 7224: if (reg == VA_VREG(KERNBASE))
7225: /* kernel permissions are different */
7226: continue;
1.55 pk 7227: if ((pte&SRMMU_PROT_MASK)!=(ptesw&SRMMU_PROT_MASK)) {
1.66 christos 7228: printf("Mismatched protections at address "
1.55 pk 7229: "0x%x; pte=0x%x, ptesw=0x%x\n",
7230: addr,pte,ptesw);
7231: if (cngetc()=='q') break;
7232: }
7233: }
7234: }
1.66 christos 7235: printf("done.\n");
1.46 pk 7236: }
1.55 pk 7237:
7238:
7239: void print_fe_map(void)
7240: {
7241: u_int i, pte;
7242:
1.66 christos 7243: printf("map of region 0xfe:\n");
1.55 pk 7244: for (i = 0xfe000000; i < 0xff000000; i+=4096) {
7245: if (((pte = getpte4m(i)) & SRMMU_TETYPE) != SRMMU_TEPTE)
7246: continue;
1.91 fair 7247: printf("0x%x -> 0x%x%x (pte 0x%x)\n", i, pte >> 28,
1.55 pk 7248: (pte & ~0xff) << 4, pte);
7249: }
1.66 christos 7250: printf("done\n");
1.55 pk 7251: }
7252:
7253: #endif
CVSweb <webmaster@jp.NetBSD.org>