Annotation of src/sys/arch/sparc/sparc/pmap.c, Revision 1.199.4.17
1.199.4.17! martin 1: /* $NetBSD: pmap.c,v 1.199.4.16 2003/01/06 22:12:31 martin Exp $ */
1.199.4.2 pk 2:
3: /*
4: * Copyright (c) 1996
5: * The President and Fellows of Harvard College. All rights reserved.
6: * Copyright (c) 1992, 1993
7: * The Regents of the University of California. All rights reserved.
8: *
9: * This software was developed by the Computer Systems Engineering group
10: * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
11: * contributed to Berkeley.
12: *
13: * All advertising materials mentioning features or use of this software
14: * must display the following acknowledgement:
15: * This product includes software developed by Harvard University.
16: * This product includes software developed by the University of
17: * California, Lawrence Berkeley Laboratory.
18: *
19: * Redistribution and use in source and binary forms, with or without
20: * modification, are permitted provided that the following conditions
21: * are met:
22: *
23: * 1. Redistributions of source code must retain the above copyright
24: * notice, this list of conditions and the following disclaimer.
25: * 2. Redistributions in binary form must reproduce the above copyright
26: * notice, this list of conditions and the following disclaimer in the
27: * documentation and/or other materials provided with the distribution.
28: * 3. All advertising materials mentioning features or use of this software
29: * must display the following acknowledgement:
30: * This product includes software developed by Aaron Brown and
31: * Harvard University.
32: * This product includes software developed by the University of
33: * California, Berkeley and its contributors.
34: * 4. Neither the name of the University nor the names of its contributors
35: * may be used to endorse or promote products derived from this software
36: * without specific prior written permission.
37: *
38: * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
39: * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
40: * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
41: * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
42: * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
43: * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
44: * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
45: * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
46: * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
47: * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
48: * SUCH DAMAGE.
49: *
50: * @(#)pmap.c 8.4 (Berkeley) 2/5/94
51: *
52: */
53:
54: /*
55: * SPARC physical map management code.
56: * Does not function on multiprocessors (yet).
57: */
58:
59: #include "opt_ddb.h"
60: #include "opt_kgdb.h"
61: #include "opt_multiprocessor.h"
1.199.4.3 nathanw 62: #include "opt_sparc_arch.h"
1.199.4.2 pk 63:
64: #include <sys/param.h>
65: #include <sys/systm.h>
66: #include <sys/device.h>
67: #include <sys/proc.h>
68: #include <sys/queue.h>
69: #include <sys/malloc.h>
70: #include <sys/lock.h>
71: #include <sys/pool.h>
72: #include <sys/exec.h>
73: #include <sys/core.h>
74: #include <sys/kcore.h>
75: #include <sys/kernel.h>
76:
77: #include <uvm/uvm.h>
78:
79: #include <machine/autoconf.h>
80: #include <machine/bsd_openprom.h>
81: #include <machine/oldmon.h>
82: #include <machine/cpu.h>
83: #include <machine/ctlreg.h>
84: #include <machine/kcore.h>
85:
86: #include <sparc/sparc/asm.h>
87: #include <sparc/sparc/cache.h>
88: #include <sparc/sparc/vaddrs.h>
89: #include <sparc/sparc/cpuvar.h>
90:
91: /*
92: * The SPARCstation offers us the following challenges:
93: *
94: * 1. A virtual address cache. This is, strictly speaking, not
95: * part of the architecture, but the code below assumes one.
96: * This is a write-through cache on the 4c and a write-back cache
97: * on others.
98: *
99: * 2. (4/4c only) An MMU that acts like a cache. There is not enough
100: * space in the MMU to map everything all the time. Instead, we need
101: * to load MMU with the `working set' of translations for each
102: * process. The sun4m does not act like a cache; tables are maintained
103: * in physical memory.
104: *
105: * 3. Segmented virtual and physical spaces. The upper 12 bits of
106: * a virtual address (the virtual segment) index a segment table,
107: * giving a physical segment. The physical segment selects a
108: * `Page Map Entry Group' (PMEG) and the virtual page number---the
109: * next 5 or 6 bits of the virtual address---select the particular
110: * `Page Map Entry' for the page. We call the latter a PTE and
111: * call each Page Map Entry Group a pmeg (for want of a better name).
112: * Note that the sun4m has an unsegmented 36-bit physical space.
113: *
114: * Since there are no valid bits in the segment table, the only way
115: * to have an invalid segment is to make one full pmeg of invalid PTEs.
116: * We use the last one (since the ROM does as well) (sun4/4c only)
117: *
118: * 4. Discontiguous physical pages. The Mach VM expects physical pages
119: * to be in one sequential lump.
120: *
121: * 5. The MMU is always on: it is not possible to disable it. This is
122: * mainly a startup hassle.
123: */
124:
125: struct pmap_stats {
126: int ps_unlink_pvfirst; /* # of pv_unlinks on head */
127: int ps_unlink_pvsearch; /* # of pv_unlink searches */
128: int ps_changeprots; /* # of calls to changeprot */
129: int ps_useless_changeprots; /* # of changeprots for wiring */
130: int ps_enter_firstpv; /* pv heads entered */
131: int ps_enter_secondpv; /* pv nonheads entered */
132: int ps_useless_changewire; /* useless wiring changes */
133: int ps_npg_prot_all; /* # of active pages protected */
134: int ps_npg_prot_actual; /* # pages actually affected */
135: int ps_npmeg_free; /* # of free pmegs */
136: int ps_npmeg_locked; /* # of pmegs on locked list */
137: int ps_npmeg_lru; /* # of pmegs on lru list */
138: } pmap_stats;
139:
140: #ifdef DEBUG
141: #define PDB_CREATE 0x0001
142: #define PDB_DESTROY 0x0002
143: #define PDB_REMOVE 0x0004
144: #define PDB_CHANGEPROT 0x0008
145: #define PDB_ENTER 0x0010
146: #define PDB_FOLLOW 0x0020
147:
148: #define PDB_MMU_ALLOC 0x0100
149: #define PDB_MMU_STEAL 0x0200
150: #define PDB_CTX_ALLOC 0x0400
151: #define PDB_CTX_STEAL 0x0800
152: #define PDB_MMUREG_ALLOC 0x1000
153: #define PDB_MMUREG_STEAL 0x2000
154: #define PDB_CACHESTUFF 0x4000
155: #define PDB_SWITCHMAP 0x8000
156: #define PDB_SANITYCHK 0x10000
157: int pmapdebug = 0;
158: #endif
159:
160: /*
161: * Bounds on managed physical addresses. Used by (MD) users
162: * of uvm_pglistalloc() to provide search hints.
163: */
164: paddr_t vm_first_phys = (paddr_t)-1;
165: paddr_t vm_last_phys = 0;
166: psize_t vm_num_phys;
167:
168: /*
169: * For each managed physical page, there is a list of all currently
170: * valid virtual mappings of that page. Since there is usually one
171: * (or zero) mapping per page, the table begins with an initial entry,
172: * rather than a pointer; this head entry is empty iff its pv_pmap
173: * field is NULL.
174: *
175: * Note that these are per machine independent page (so there may be
176: * only one for every two hardware pages, e.g.). Since the virtual
177: * address is aligned on a page boundary, the low order bits are free
178: * for storing flags. Only the head of each list has flags.
179: *
180: * THIS SHOULD BE PART OF THE CORE MAP
181: */
182: struct pvlist {
183: struct pvlist *pv_next; /* next pvlist, if any */
184: struct pmap *pv_pmap; /* pmap of this va */
185: vaddr_t pv_va; /* virtual address */
186: int pv_flags; /* flags (below) */
187: };
188:
189: /*
190: * Flags in pv_flags. Note that PV_MOD must be 1 and PV_REF must be 2
191: * since they must line up with the bits in the hardware PTEs (see pte.h).
192: * SUN4M bits are at a slightly different location in the PTE.
193: * Note: the REF, MOD and ANC flag bits occur only in the head of a pvlist.
194: * The NC bit is meaningful in each individual pv entry and reflects the
195: * requested non-cacheability at the time the entry was made through
196: * pv_link() or when subsequently altered by kvm_uncache() (but the latter
197: * does not happen in kernels as of the time of this writing (March 2001)).
198: */
199: #define PV_MOD 1 /* page modified */
200: #define PV_REF 2 /* page referenced */
201: #define PV_NC 4 /* page cannot be cached */
202: #define PV_REF4M 1 /* page referenced (SRMMU) */
203: #define PV_MOD4M 2 /* page modified (SRMMU) */
204: #define PV_ANC 0x10 /* page has incongruent aliases */
205:
206: static struct pool pv_pool;
207:
208: static int pmap_initialized; /* XXX - allow pmap_enter() before the
209: * pv tables are allocated.
210: */
211:
212: static struct pvlist *pvhead(paddr_t pfn)
213: {
214: int bank, off;
215:
216: #ifdef DIAGNOSTIC
217: if (pmap_initialized == 0)
218: panic("pvhead: not initialized");
219: #endif
220:
221: bank = vm_physseg_find(pfn, &off);
222: if (bank == -1)
223: return (NULL);
224:
225: return (&vm_physmem[bank].pmseg.pvhead[off]);
226: }
227:
228: /*
229: * Each virtual segment within each pmap is either valid or invalid.
230: * It is valid if pm_npte[VA_VSEG(va)] is not 0. This does not mean
231: * it is in the MMU, however; that is true iff pm_segmap[VA_VSEG(va)]
232: * does not point to the invalid PMEG.
233: *
1.199.4.4 nathanw 234: * In the older SPARC architectures (sun4/sun4c), page tables are cached in
235: * the MMU. The following discussion applies to these architectures:
1.199.4.2 pk 236: *
237: * If a virtual segment is valid and loaded, the correct PTEs appear
238: * in the MMU only. If it is valid and unloaded, the correct PTEs appear
239: * in the pm_pte[VA_VSEG(va)] only. However, some effort is made to keep
240: * the software copies consistent enough with the MMU so that libkvm can
241: * do user address translations. In particular, pv_changepte() and
242: * pmap_enu() maintain consistency, while less critical changes are
243: * not maintained. pm_pte[VA_VSEG(va)] always points to space for those
244: * PTEs, unless this is the kernel pmap, in which case pm_pte[x] is not
245: * used (sigh).
246: *
247: * Each PMEG in the MMU is either free or contains PTEs corresponding to
248: * some pmap and virtual segment. If it contains some PTEs, it also contains
249: * reference and modify bits that belong in the pv_table. If we need
250: * to steal a PMEG from some process (if we need one and none are free)
251: * we must copy the ref and mod bits, and update pm_segmap in the other
252: * pmap to show that its virtual segment is no longer in the MMU.
253: *
254: * There are 128 PMEGs in a small Sun-4, of which only a few dozen are
255: * tied down permanently, leaving `about' 100 to be spread among
256: * running processes. These are managed as an LRU cache. Before
257: * calling the VM paging code for a user page fault, the fault handler
258: * calls mmu_load(pmap, va) to try to get a set of PTEs put into the
259: * MMU. mmu_load will check the validity of the segment and tell whether
260: * it did something.
261: *
262: * Since I hate the name PMEG I call this data structure an `mmu entry'.
263: * Each mmuentry is on exactly one of three `usage' lists: free, LRU,
264: * or locked. The LRU list is for user processes; the locked list is
265: * for kernel entries; both are doubly linked queues headed by `mmuhd's.
266: * The free list is a simple list, headed by a free list pointer.
267: *
1.199.4.4 nathanw 268: *
1.199.4.2 pk 269: * In the sun4m architecture using the SPARC Reference MMU (SRMMU), three
270: * levels of page tables are maintained in physical memory. We use the same
271: * structures as with the 3-level old-style MMU (pm_regmap, pm_segmap,
272: * rg_segmap, sg_pte, etc) to maintain kernel-edible page tables; we also
273: * build a parallel set of physical tables that can be used by the MMU.
274: * (XXX: This seems redundant, but is it necessary for the unified kernel?)
275: *
276: * If a virtual segment is valid, its entries will be in both parallel lists.
277: * If it is not valid, then its entry in the kernel tables will be zero, and
278: * its entry in the MMU tables will either be nonexistent or zero as well.
279: *
280: * The Reference MMU generally uses a Translation Look-aside Buffer (TLB)
281: * to cache the result of recently executed page table walks. When
282: * manipulating page tables, we need to ensure consistency of the
283: * in-memory and TLB copies of the page table entries. This is handled
284: * by flushing (and invalidating) a TLB entry when appropriate before
285: * altering an in-memory page table entry.
286: */
287: struct mmuentry {
288: TAILQ_ENTRY(mmuentry) me_list; /* usage list link */
289: TAILQ_ENTRY(mmuentry) me_pmchain; /* pmap owner link */
290: struct pmap *me_pmap; /* pmap, if in use */
291: u_short me_vreg; /* associated virtual region/segment */
292: u_short me_vseg; /* associated virtual region/segment */
293: u_short me_cookie; /* hardware SMEG/PMEG number */
294: };
295: struct mmuentry *mmusegments; /* allocated in pmap_bootstrap */
296: struct mmuentry *mmuregions; /* allocated in pmap_bootstrap */
297:
298: struct mmuhd segm_freelist, segm_lru, segm_locked;
299: struct mmuhd region_freelist, region_lru, region_locked;
300:
301: int seginval; /* [4/4c] the invalid segment number */
302: int reginval; /* [4/3mmu] the invalid region number */
303:
304: /*
305: * (sun4/4c)
306: * A context is simply a small number that dictates which set of 4096
1.199.4.4 nathanw 307: * segment map entries the MMU uses. The Sun 4c has eight (SS1,IPC) or
308: * sixteen (SS2,IPX) such sets. These are alloted in an `almost MRU' fashion.
1.199.4.2 pk 309: * (sun4m)
310: * A context is simply a small number that indexes the context table, the
311: * root-level page table mapping 4G areas. Each entry in this table points
312: * to a 1st-level region table. A SPARC reference MMU will usually use 16
313: * such contexts, but some offer as many as 64k contexts; the theoretical
314: * maximum is 2^32 - 1, but this would create overlarge context tables.
315: *
316: * Each context is either free or attached to a pmap.
317: *
318: * Since the virtual address cache is tagged by context, when we steal
319: * a context we have to flush (that part of) the cache.
320: */
321: union ctxinfo {
322: union ctxinfo *c_nextfree; /* free list (if free) */
323: struct pmap *c_pmap; /* pmap (if busy) */
324: };
325:
326: static struct simplelock ctx_lock; /* lock for below */
327: union ctxinfo *ctxinfo; /* allocated at in pmap_bootstrap */
328: union ctxinfo *ctx_freelist; /* context free list */
329: int ctx_kick; /* allocation rover when none free */
330: int ctx_kickdir; /* ctx_kick roves both directions */
331: int ncontext; /* sizeof ctx_freelist */
332:
333: void ctx_alloc __P((struct pmap *));
334: void ctx_free __P((struct pmap *));
335:
336: caddr_t vpage[2]; /* two reserved MD virtual pages */
1.199.4.9 nathanw 337: #if defined(SUN4M) || defined(SUN4D)
1.199.4.2 pk 338: int *vpage_pte[2]; /* pte location of vpage[] */
339: #endif
340: caddr_t vmmap; /* one reserved MI vpage for /dev/mem */
341: caddr_t vdumppages; /* 32KB worth of reserved dump pages */
342:
343: smeg_t tregion; /* [4/3mmu] Region for temporary mappings */
344:
345: struct pmap kernel_pmap_store; /* the kernel's pmap */
346: struct regmap kernel_regmap_store[NKREG]; /* the kernel's regmap */
347: struct segmap kernel_segmap_store[NKREG*NSEGRG];/* the kernel's segmaps */
348:
1.199.4.9 nathanw 349: #if defined(SUN4M) || defined(SUN4D)
1.199.4.2 pk 350: u_int *kernel_regtable_store; /* 1k of storage to map the kernel */
351: u_int *kernel_segtable_store; /* 2k of storage to map the kernel */
352: u_int *kernel_pagtable_store; /* 128k of storage to map the kernel */
353:
354: /*
355: * Memory pools and back-end supplier for SRMMU page tables.
356: * Share a pool between the level 2 and level 3 page tables,
357: * since these are equal in size.
358: */
359: static struct pool L1_pool;
360: static struct pool L23_pool;
361:
1.199.4.5 nathanw 362: static void *pgt_page_alloc __P((struct pool *, int));
363: static void pgt_page_free __P((struct pool *, void *));
364:
365: static struct pool_allocator pgt_page_allocator = {
366: pgt_page_alloc, pgt_page_free, 0,
367: };
1.199.4.2 pk 368:
369: #endif
370:
371: #define MA_SIZE 32 /* size of memory descriptor arrays */
372: struct memarr pmemarr[MA_SIZE];/* physical memory regions */
373: int npmemarr; /* number of entries in pmemarr */
374:
375: static paddr_t avail_start; /* first available physical page, other
376: than the `etext gap' defined below */
377: static vaddr_t etext_gap_start;/* start of gap between text & data */
378: static vaddr_t etext_gap_end; /* end of gap between text & data */
379: static vaddr_t virtual_avail; /* first free kernel virtual address */
380: static vaddr_t virtual_end; /* last free kernel virtual address */
381:
382: static void pmap_page_upload __P((void));
383:
384: int mmu_has_hole;
385:
386: vaddr_t prom_vstart; /* For /dev/kmem */
387: vaddr_t prom_vend;
388:
389: /*
390: * Memory pool for pmap structures.
391: */
392: static struct pool pmap_pmap_pool;
393: static struct pool_cache pmap_pmap_pool_cache;
394: static int pmap_pmap_pool_ctor __P((void *, void *, int));
395: static void pmap_pmap_pool_dtor __P((void *, void *));
396:
397: #if defined(SUN4)
398: /*
399: * [sun4]: segfixmask: on some systems (4/110) "getsegmap()" returns a
400: * partly invalid value. getsegmap returns a 16 bit value on the sun4,
401: * but only the first 8 or so bits are valid (the rest are *supposed* to
402: * be zero. On the 4/110 the bits that are supposed to be zero are
403: * all one instead. e.g. KERNBASE is usually mapped by pmeg number zero.
404: * On a 4/300 getsegmap(KERNBASE) == 0x0000, but
405: * on a 4/100 getsegmap(KERNBASE) == 0xff00
406: *
407: * This confuses mmu_reservemon() and causes it to not reserve the PROM's
408: * pmegs. Then the PROM's pmegs get used during autoconfig and everything
409: * falls apart! (not very fun to debug, BTW.)
410: *
411: * solution: mask the invalid bits in the getsetmap macro.
412: */
413:
414: static u_long segfixmask = 0xffffffff; /* all bits valid to start */
415: #else
416: #define segfixmask 0xffffffff /* It's in getsegmap's scope */
417: #endif
418:
419: /*
420: * pseudo-functions for mnemonic value
421: */
422: #define getsegmap(va) (CPU_ISSUN4C \
423: ? lduba(va, ASI_SEGMAP) \
424: : (lduha(va, ASI_SEGMAP) & segfixmask))
425: #define setsegmap(va, pmeg) (CPU_ISSUN4C \
426: ? stba(va, ASI_SEGMAP, pmeg) \
427: : stha(va, ASI_SEGMAP, pmeg))
428:
429: /* 3-level sun4 MMU only: */
430: #define getregmap(va) ((unsigned)lduha((va)+2, ASI_REGMAP) >> 8)
431: #define setregmap(va, smeg) stha((va)+2, ASI_REGMAP, (smeg << 8))
432:
1.199.4.9 nathanw 433: #if defined(SUN4M) || defined(SUN4D)
1.199.4.2 pk 434: void setpgt4m __P((int *ptep, int pte));
435: void setpte4m __P((vaddr_t va, int pte));
436:
437: #ifdef MULTIPROCESSOR
1.199.4.14 thorpej 438: void setpgt4m_va __P((vaddr_t, int *, int, int, int));
1.199.4.2 pk 439: #else
1.199.4.14 thorpej 440: #define setpgt4m_va(va, ptep, pte, pageflush, ctx) do { \
1.199.4.2 pk 441: if ((pageflush)) \
1.199.4.14 thorpej 442: tlb_flush_page(va, ctx); \
1.199.4.2 pk 443: setpgt4m((ptep), (pte)); \
444: } while (0)
445: #endif
446:
447: #endif
448:
449: /* Function pointer messiness for supporting multiple sparc architectures
450: * within a single kernel: notice that there are two versions of many of the
451: * functions within this file/module, one for the sun4/sun4c and the other
452: * for the sun4m. For performance reasons (since things like pte bits don't
453: * map nicely between the two architectures), there are separate functions
454: * rather than unified functions which test the cputyp variable. If only
455: * one architecture is being used, then the non-suffixed function calls
456: * are macro-translated into the appropriate xxx4_4c or xxx4m call. If
457: * multiple architectures are defined, the calls translate to (*xxx_p),
458: * i.e. they indirect through function pointers initialized as appropriate
459: * to the run-time architecture in pmap_bootstrap. See also pmap.h.
460: */
461:
1.199.4.9 nathanw 462: #if defined(SUN4M) || defined(SUN4D)
1.199.4.2 pk 463: static void mmu_setup4m_L1 __P((int, struct pmap *));
464: static void mmu_setup4m_L2 __P((int, struct regmap *));
465: static void mmu_setup4m_L3 __P((int, struct segmap *));
466: /*static*/ void mmu_reservemon4m __P((struct pmap *));
467:
468: /*static*/ void pmap_rmk4m __P((struct pmap *, vaddr_t, vaddr_t, int, int));
469: /*static*/ void pmap_rmu4m __P((struct pmap *, vaddr_t, vaddr_t, int, int));
470: /*static*/ int pmap_enk4m __P((struct pmap *, vaddr_t, vm_prot_t,
471: int, struct pvlist *, int));
472: /*static*/ int pmap_enu4m __P((struct pmap *, vaddr_t, vm_prot_t,
473: int, struct pvlist *, int));
474: /*static*/ void pv_changepte4m __P((struct pvlist *, int, int));
475: /*static*/ int pv_syncflags4m __P((struct pvlist *));
476: /*static*/ int pv_link4m __P((struct pvlist *, struct pmap *, vaddr_t, int));
477: /*static*/ void pv_unlink4m __P((struct pvlist *, struct pmap *, vaddr_t));
478: #endif
479:
480: #if defined(SUN4) || defined(SUN4C)
481: /*static*/ void mmu_reservemon4_4c __P((int *, int *));
482: /*static*/ void pmap_rmk4_4c __P((struct pmap *, vaddr_t, vaddr_t, int, int));
483: /*static*/ void pmap_rmu4_4c __P((struct pmap *, vaddr_t, vaddr_t, int, int));
484: /*static*/ int pmap_enk4_4c __P((struct pmap *, vaddr_t, vm_prot_t,
485: int, struct pvlist *, int));
486: /*static*/ int pmap_enu4_4c __P((struct pmap *, vaddr_t, vm_prot_t,
487: int, struct pvlist *, int));
488: /*static*/ void pv_changepte4_4c __P((struct pvlist *, int, int));
489: /*static*/ int pv_syncflags4_4c __P((struct pvlist *));
490: /*static*/ int pv_link4_4c __P((struct pvlist *, struct pmap *, vaddr_t, int));
491: /*static*/ void pv_unlink4_4c __P((struct pvlist *, struct pmap *, vaddr_t));
492: #endif
493:
1.199.4.9 nathanw 494: #if !(defined(SUN4M) || defined(SUN4D)) && (defined(SUN4) || defined(SUN4C))
1.199.4.2 pk 495: #define pmap_rmk pmap_rmk4_4c
496: #define pmap_rmu pmap_rmu4_4c
497:
1.199.4.9 nathanw 498: #elif (defined(SUN4M) || defined(SUN4D)) && !(defined(SUN4) || defined(SUN4C))
1.199.4.2 pk 499: #define pmap_rmk pmap_rmk4m
500: #define pmap_rmu pmap_rmu4m
501:
502: #else /* must use function pointers */
503:
504: /* function pointer declarations */
505: /* from pmap.h: */
506: boolean_t (*pmap_clear_modify_p) __P((struct vm_page *));
507: boolean_t (*pmap_clear_reference_p) __P((struct vm_page *));
508: int (*pmap_enter_p) __P((pmap_t, vaddr_t, paddr_t, vm_prot_t, int));
509: boolean_t (*pmap_extract_p) __P((pmap_t, vaddr_t, paddr_t *));
510: boolean_t (*pmap_is_modified_p) __P((struct vm_page *));
511: boolean_t (*pmap_is_referenced_p) __P((struct vm_page *));
512: void (*pmap_kenter_pa_p) __P((vaddr_t, paddr_t, vm_prot_t));
513: void (*pmap_kremove_p) __P((vaddr_t, vsize_t));
514: void (*pmap_page_protect_p) __P((struct vm_page *, vm_prot_t));
515: void (*pmap_protect_p) __P((pmap_t, vaddr_t, vaddr_t, vm_prot_t));
516: void (*pmap_changeprot_p) __P((pmap_t, vaddr_t, vm_prot_t, int));
517: /* local: */
518: void (*pmap_rmk_p) __P((struct pmap *, vaddr_t, vaddr_t, int, int));
519: void (*pmap_rmu_p) __P((struct pmap *, vaddr_t, vaddr_t, int, int));
520:
521: #define pmap_rmk (*pmap_rmk_p)
522: #define pmap_rmu (*pmap_rmu_p)
523:
524: #endif
525:
526: /* --------------------------------------------------------------*/
527:
528: /*
1.199.4.9 nathanw 529: * Next we have some sun4m/4d-specific routines which have no 4/4c
1.199.4.2 pk 530: * counterparts, or which are 4/4c macros.
531: */
532:
1.199.4.9 nathanw 533: #if defined(SUN4M) || defined(SUN4D)
1.199.4.13 thorpej 534: /*
1.199.4.17! martin 535: * SP versions of the tlb flush operations.
! 536: *
! 537: * Turn off traps to prevent register window overflows
! 538: * from writing user windows to the wrong stack.
1.199.4.13 thorpej 539: */
1.199.4.17! martin 540: static void sp_tlb_flush(int va, int ctx, int lvl)
1.199.4.13 thorpej 541: {
1.199.4.17! martin 542: /* Traps off */
! 543: __asm("rd %psr, %o3");
! 544: __asm("wr %%o3, %0, %%psr" :: "n" (PSR_ET));
1.199.4.13 thorpej 545:
1.199.4.17! martin 546: /* Save context */
! 547: __asm("mov %0, %%o4" :: "n"(SRMMU_CXR));
! 548: __asm("lda [%%o4]%0, %%o5" :: "n"(ASI_SRMMU));
1.199.4.13 thorpej 549:
1.199.4.17! martin 550: /* Set new context and flush type bits */
! 551: __asm("andn %o0, 0xfff, %o0");
! 552: __asm("sta %%o1, [%%o4]%0" :: "n"(ASI_SRMMU));
! 553: __asm("or %o0, %o2, %o0");
1.199.4.13 thorpej 554:
1.199.4.17! martin 555: /* Do the TLB flush */
! 556: __asm("sta %%g0, [%%o0]%0" :: "n"(ASI_SRMMUFP));
! 557:
! 558: /* Restore context */
! 559: __asm("sta %%o5, [%%o4]%0" :: "n"(ASI_SRMMU));
! 560:
! 561: /* and turn traps on again */
! 562: __asm("wr %o3, 0, %psr");
! 563: __asm("nop");
! 564: __asm("retl");
! 565: __asm("nop");
1.199.4.13 thorpej 566: }
567:
568: static __inline__ void sp_tlb_flush_all(void)
569: {
1.199.4.17! martin 570: sta(ASI_SRMMUFP_LN, ASI_SRMMUFP, 0);
1.199.4.13 thorpej 571: }
1.199.4.2 pk 572:
573: #if defined(MULTIPROCESSOR)
574: /*
1.199.4.15 thorpej 575: * The SMP versions of the tlb flush routines. We only need to
576: * do a cross call for these on sun4m systems, which itself
577: * ensures that there is only one concurrent flush happening.
578: * For the sun4d case, we provide a special lock.
1.199.4.2 pk 579: */
1.199.4.15 thorpej 580:
581: #if defined(SUN4D)
582: static struct simplelock sun4d_tlb_lock = SIMPLELOCK_INITIALIZER;
583: #define LOCK_4DTLB() simple_lock(&sun4d_tlb_lock);
584: #define UNLOCK_4DTLB() simple_unlock(&sun4d_tlb_lock);
585: #else
586: #define LOCK_4DTLB() /* nothing */
587: #define UNLOCK_4DTLB() /* nothing */
588: #endif
589:
1.199.4.14 thorpej 590: static __inline__ void smp_tlb_flush_context __P((int ctx));
591: static __inline__ void smp_tlb_flush_region __P((int va, int ctx));
592: static __inline__ void smp_tlb_flush_segment __P((int va, int ctx));
593: static __inline__ void smp_tlb_flush_page __P((int va, int ctx));
1.199.4.2 pk 594: static __inline__ void smp_tlb_flush_all __P((void));
595:
596: static __inline__ void
1.199.4.14 thorpej 597: smp_tlb_flush_page(int va, int ctx)
1.199.4.2 pk 598: {
1.199.4.15 thorpej 599: if (CPU_ISSUN4D) {
600: LOCK_4DTLB();
1.199.4.17! martin 601: sp_tlb_flush(va, ctx, ASI_SRMMUFP_L3);
1.199.4.15 thorpej 602: UNLOCK_4DTLB();
603: } else
1.199.4.17! martin 604: XCALL3(sp_tlb_flush, va, ctx, ASI_SRMMUFP_L3, CPUSET_ALL);
1.199.4.2 pk 605: }
606:
607: static __inline__ void
1.199.4.14 thorpej 608: smp_tlb_flush_segment(int va, int ctx)
1.199.4.2 pk 609: {
1.199.4.15 thorpej 610: if (CPU_ISSUN4D) {
611: LOCK_4DTLB();
1.199.4.17! martin 612: sp_tlb_flush(va, ctx, ASI_SRMMUFP_L2);
1.199.4.15 thorpej 613: UNLOCK_4DTLB();
614: } else
1.199.4.17! martin 615: XCALL3(sp_tlb_flush, va, ctx, ASI_SRMMUFP_L2, CPUSET_ALL);
1.199.4.2 pk 616: }
617:
618: static __inline__ void
1.199.4.14 thorpej 619: smp_tlb_flush_region(int va, int ctx)
1.199.4.2 pk 620: {
1.199.4.15 thorpej 621: if (CPU_ISSUN4D) {
622: LOCK_4DTLB();
1.199.4.17! martin 623: sp_tlb_flush(va, ctx, ASI_SRMMUFP_L1);
1.199.4.15 thorpej 624: UNLOCK_4DTLB();
625: } else
1.199.4.17! martin 626: XCALL3(sp_tlb_flush, va, ctx, ASI_SRMMUFP_L1, CPUSET_ALL);
1.199.4.2 pk 627: }
628:
629: static __inline__ void
1.199.4.14 thorpej 630: smp_tlb_flush_context(int ctx)
1.199.4.2 pk 631: {
1.199.4.15 thorpej 632: if (CPU_ISSUN4D) {
633: LOCK_4DTLB();
1.199.4.17! martin 634: sp_tlb_flush(ctx, 0, ASI_SRMMUFP_L0);
1.199.4.15 thorpej 635: UNLOCK_4DTLB();
636: } else
1.199.4.17! martin 637: XCALL3(sp_tlb_flush, 0, ctx, ASI_SRMMUFP_L0, CPUSET_ALL);
1.199.4.2 pk 638: }
639:
640: static __inline__ void
641: smp_tlb_flush_all()
642: {
1.199.4.15 thorpej 643: if (CPU_ISSUN4D) {
644: LOCK_4DTLB();
645: sp_tlb_flush_all();
646: UNLOCK_4DTLB();
647: } else
648: XCALL0(sp_tlb_flush_all, CPUSET_ALL);
1.199.4.2 pk 649: }
1.199.4.17! martin 650: #endif /* MULTIPROCESSOR */
1.199.4.2 pk 651:
1.199.4.13 thorpej 652: #if defined(MULTIPROCESSOR)
1.199.4.14 thorpej 653: #define tlb_flush_page(va,ctx) smp_tlb_flush_page(va,ctx)
654: #define tlb_flush_segment(va,ctx) smp_tlb_flush_segment(va,ctx)
655: #define tlb_flush_region(va,ctx) smp_tlb_flush_region(va,ctx)
656: #define tlb_flush_context(ctx) smp_tlb_flush_context(ctx)
1.199.4.2 pk 657: #define tlb_flush_all() smp_tlb_flush_all()
658: #else
1.199.4.17! martin 659: #define tlb_flush_page(va,ctx) sp_tlb_flush(va,ctx,ASI_SRMMUFP_L3)
! 660: #define tlb_flush_segment(va,ctx) sp_tlb_flush(va,ctx,ASI_SRMMUFP_L2)
! 661: #define tlb_flush_region(va,ctx) sp_tlb_flush(va,ctx,ASI_SRMMUFP_L1)
! 662: #define tlb_flush_context(ctx) sp_tlb_flush(ctx,0,ASI_SRMMUFP_L0)
1.199.4.13 thorpej 663: #define tlb_flush_all() sp_tlb_flush_all()
1.199.4.2 pk 664: #endif
665:
666: /*
667: * Atomically update a PTE entry, coping with hardware updating the
668: * PTE at the same time we are. This is the procedure that is
669: * recommended in the SuperSPARC user's manual.
670: */
1.199.4.14 thorpej 671: int updatepte4m __P((vaddr_t, int *, int, int, int));
1.199.4.2 pk 672: static struct simplelock pte4m_lock = SIMPLELOCK_INITIALIZER;
673:
674: int
1.199.4.14 thorpej 675: updatepte4m(va, pte, bic, bis, ctx)
1.199.4.2 pk 676: vaddr_t va;
677: int *pte;
678: int bic;
679: int bis;
1.199.4.14 thorpej 680: int ctx;
1.199.4.2 pk 681: {
682: int oldval, swapval;
683: volatile int *vpte = (volatile int *)pte;
684:
685: /*
686: * Can only be one of these happening in the system
687: * at any one time.
688: */
689: simple_lock(&pte4m_lock);
690:
691: /*
692: * The idea is to loop swapping zero into the pte, flushing
693: * it, and repeating until it stays zero. At this point,
694: * there should be no more hardware accesses to this PTE
695: * so we can modify it without losing any mod/ref info.
696: */
697: oldval = 0;
698: do {
699: swapval = 0;
700: swap(vpte, swapval);
1.199.4.14 thorpej 701: tlb_flush_page(va, ctx);
1.199.4.2 pk 702: oldval |= swapval;
703: } while (*vpte != 0);
704:
705: swapval = (oldval & ~bic) | bis;
706: swap(vpte, swapval);
707:
708: simple_unlock(&pte4m_lock);
709:
710: return (oldval);
711: }
712:
713: static u_int VA2PA __P((caddr_t));
714: static u_long srmmu_bypass_read __P((u_long));
715:
716: /*
717: * VA2PA(addr) -- converts a virtual address to a physical address using
718: * the MMU's currently-installed page tables. As a side effect, the address
719: * translation used may cause the associated pte to be encached. The correct
720: * context for VA must be set before this is called.
721: *
722: * This routine should work with any level of mapping, as it is used
723: * during bootup to interact with the ROM's initial L1 mapping of the kernel.
724: */
725: static u_int
726: VA2PA(addr)
727: caddr_t addr;
728: {
729: u_int pte;
730:
1.199.4.14 thorpej 731: /*
732: * We'll use that handy SRMMU flush/probe.
733: * Try each level in turn until we find a valid pte. Otherwise panic.
734: */
1.199.4.2 pk 735:
736: pte = lda(((u_int)addr & ~0xfff) | ASI_SRMMUFP_L3, ASI_SRMMUFP);
737: /* Unlock fault status; required on Hypersparc modules */
738: (void)lda(SRMMU_SFSR, ASI_SRMMU);
739: if ((pte & SRMMU_TETYPE) == SRMMU_TEPTE)
740: return (((pte & SRMMU_PPNMASK) << SRMMU_PPNPASHIFT) |
741: ((u_int)addr & 0xfff));
742:
743: /* A `TLB Flush Entire' is required before any L0, L1 or L2 probe */
744: tlb_flush_all_real();
745:
746: pte = lda(((u_int)addr & ~0xfff) | ASI_SRMMUFP_L2, ASI_SRMMUFP);
747: if ((pte & SRMMU_TETYPE) == SRMMU_TEPTE)
748: return (((pte & SRMMU_PPNMASK) << SRMMU_PPNPASHIFT) |
749: ((u_int)addr & 0x3ffff));
750: pte = lda(((u_int)addr & ~0xfff) | ASI_SRMMUFP_L1, ASI_SRMMUFP);
751: if ((pte & SRMMU_TETYPE) == SRMMU_TEPTE)
752: return (((pte & SRMMU_PPNMASK) << SRMMU_PPNPASHIFT) |
753: ((u_int)addr & 0xffffff));
754: pte = lda(((u_int)addr & ~0xfff) | ASI_SRMMUFP_L0, ASI_SRMMUFP);
755: if ((pte & SRMMU_TETYPE) == SRMMU_TEPTE)
756: return (((pte & SRMMU_PPNMASK) << SRMMU_PPNPASHIFT) |
757: ((u_int)addr & 0xffffffff));
758:
759: #ifdef DIAGNOSTIC
760: panic("VA2PA: Asked to translate unmapped VA %p", addr);
761: #else
762: return (0);
763: #endif
764: }
765:
766: __inline void
767: setpgt4m(ptep, pte)
768: int *ptep;
769: int pte;
770: {
771:
772: swap(ptep, pte);
773: }
774:
775: #ifdef MULTIPROCESSOR
776: __inline void
1.199.4.14 thorpej 777: setpgt4m_va(va, ptep, pte, pageflush, ctx)
1.199.4.2 pk 778: vaddr_t va;
779: int *ptep;
780: int pte;
1.199.4.14 thorpej 781: int pageflush;
782: int ctx;
1.199.4.2 pk 783: {
784:
1.199.4.14 thorpej 785: updatepte4m(va, ptep, 0xffffffff, pte, pageflush ? ctx : 0);
1.199.4.2 pk 786: }
787: #endif
788:
789: /* Set the page table entry for va to pte. */
790: __inline void
791: setpte4m(va, pte)
792: vaddr_t va;
793: int pte;
794: {
795: struct pmap *pm;
796: struct regmap *rm;
797: struct segmap *sm;
798:
799: if (getcontext4m() != 0)
800: panic("setpte4m: user context");
801:
802: pm = pmap_kernel();
803:
804: /* Note: inline version of setptesw4m() */
805: #ifdef DEBUG
806: if (pm->pm_regmap == NULL)
807: panic("setpte4m: no regmap entry");
808: #endif
809: rm = &pm->pm_regmap[VA_VREG(va)];
810: sm = &rm->rg_segmap[VA_VSEG(va)];
811:
812: #ifdef DEBUG
813: if (rm->rg_segmap == NULL)
814: panic("setpte4m: no segmap for va %p (rp=%p)",
815: (caddr_t)va, (caddr_t)rm);
816:
817: if (sm->sg_pte == NULL)
818: panic("setpte4m: no pte for va %p (rp=%p, sp=%p)",
819: (caddr_t)va, rm, sm);
820: #endif
1.199.4.14 thorpej 821: tlb_flush_page(va, 0);
1.199.4.2 pk 822: setpgt4m(sm->sg_pte + VA_SUN4M_VPG(va), pte);
823: }
824:
825: /*
826: * Page table pool back-end.
827: */
828: void *
1.199.4.5 nathanw 829: pgt_page_alloc(struct pool *pp, int flags)
1.199.4.2 pk 830: {
831: int cacheit = (cpuinfo.flags & CPUFLG_CACHEPAGETABLES) != 0;
832: struct vm_page *pg;
833: vaddr_t va;
834: paddr_t pa;
835:
836: /* Allocate a page of physical memory */
837: if ((pg = uvm_pagealloc(NULL, 0, NULL, 0)) == NULL)
838: return (NULL);
839:
840: /* Allocate virtual memory */
841: va = uvm_km_valloc(kernel_map, PAGE_SIZE);
842: if (va == 0) {
843: uvm_pagefree(pg);
844: return (NULL);
845: }
846:
847: /*
848: * On systems with a physical data cache we need to flush this page
849: * from the cache if the pagetables cannot be cached.
850: * On systems with a virtually indexed data cache, we only need
851: * to map it non-cacheable, since the page is not currently mapped.
852: */
853: pa = VM_PAGE_TO_PHYS(pg);
854: if (cacheit == 0)
855: pcache_flush_page(pa, 1);
856:
857: /* Map the page */
858: pmap_kenter_pa(va, pa | (cacheit ? 0 : PMAP_NC),
859: VM_PROT_READ | VM_PROT_WRITE);
860: pmap_update(pmap_kernel());
861:
862: return ((void *)va);
863: }
864:
865: void
1.199.4.5 nathanw 866: pgt_page_free(struct pool *pp, void *v)
1.199.4.2 pk 867: {
868: vaddr_t va;
869: paddr_t pa;
870: boolean_t rv;
871:
872: va = (vaddr_t)v;
873: rv = pmap_extract(pmap_kernel(), va, &pa);
874: KASSERT(rv);
875: uvm_pagefree(PHYS_TO_VM_PAGE(pa));
1.199.4.5 nathanw 876: pmap_kremove(va, PAGE_SIZE);
877: uvm_km_free(kernel_map, va, PAGE_SIZE);
1.199.4.2 pk 878: }
1.199.4.13 thorpej 879: #endif /* SUN4M || SUN4D */
1.199.4.2 pk 880:
881: /*----------------------------------------------------------------*/
882:
883: /*
884: * The following three macros are to be used in sun4/sun4c code only.
885: */
886: #if defined(SUN4_MMU3L)
887: #define CTX_USABLE(pm,rp) ( \
888: ((pm)->pm_ctx != NULL && \
889: (!HASSUN4_MMU3L || (rp)->rg_smeg != reginval)) \
890: )
891: #else
892: #define CTX_USABLE(pm,rp) ((pm)->pm_ctx != NULL )
893: #endif
894:
1.199.4.9 nathanw 895: #define GAP_WIDEN(pm,vr) do if (CPU_ISSUN4 || CPU_ISSUN4C) { \
896: if (vr + 1 == pm->pm_gap_start) \
897: pm->pm_gap_start = vr; \
898: if (vr == pm->pm_gap_end) \
899: pm->pm_gap_end = vr + 1; \
1.199.4.2 pk 900: } while (0)
901:
1.199.4.9 nathanw 902: #define GAP_SHRINK(pm,vr) do if (CPU_ISSUN4 || CPU_ISSUN4C) { \
1.199.4.2 pk 903: int x; \
904: x = pm->pm_gap_start + (pm->pm_gap_end - pm->pm_gap_start) / 2; \
905: if (vr > x) { \
906: if (vr < pm->pm_gap_end) \
907: pm->pm_gap_end = vr; \
908: } else { \
909: if (vr >= pm->pm_gap_start && x != pm->pm_gap_start) \
910: pm->pm_gap_start = vr + 1; \
911: } \
912: } while (0)
913:
914:
915: static void get_phys_mem __P((void));
916: void pv_flushcache __P((struct pvlist *));
917: void pv_uncache __P((struct pvlist *));
918: void kvm_iocache __P((caddr_t, int));
919:
920: #ifdef DEBUG
921: void pm_check __P((char *, struct pmap *));
922: void pm_check_k __P((char *, struct pmap *));
923: void pm_check_u __P((char *, struct pmap *));
924: #endif
925:
926: /*
927: * During the PMAP bootstrap, we can use a simple translation to map a
928: * kernel virtual address to a psysical memory address (this is arranged
929: * in locore). Usually, KERNBASE maps to physical address 0. This is always
930: * the case on sun4 and sun4c machines. On sun4m machines -- if no memory is
931: * installed in the bank corresponding to physical address 0 -- the PROM may
932: * elect to load us at some other address, presumably at the start of
933: * the first memory bank that is available. We set the up the variable
934: * `va2pa_offset' to hold the physical address corresponding to KERNBASE.
935: */
936:
937: static u_long va2pa_offset = KERNBASE;
938: #define PMAP_BOOTSTRAP_VA2PA(v) ((paddr_t)((u_long)(v) - va2pa_offset))
939:
940: /*
941: * Grab physical memory list.
942: * While here, compute `physmem'.
943: */
944: void
945: get_phys_mem()
946: {
947: struct memarr *mp;
948: int i;
949:
950: npmemarr = makememarr(pmemarr, MA_SIZE, MEMARR_AVAILPHYS);
951:
952: for (physmem = 0, mp = pmemarr, i = npmemarr; --i >= 0; mp++)
953: physmem += btoc(mp->len);
954: }
955:
956:
957: /*
958: * Support functions for vm_page_bootstrap().
959: */
960:
961: /*
962: * How much virtual space does this kernel have?
963: * (After mapping kernel text, data, etc.)
964: */
965: void
966: pmap_virtual_space(v_start, v_end)
967: vaddr_t *v_start;
968: vaddr_t *v_end;
969: {
970: *v_start = virtual_avail;
971: *v_end = virtual_end;
972: }
973:
974: /*
975: * Helper routine that hands off available physical pages to the VM system.
976: */
977: static void
978: pmap_page_upload()
979: {
980: int n;
981: paddr_t start, end;
982:
983: /* First, the `etext gap' */
984: start = PMAP_BOOTSTRAP_VA2PA(etext_gap_start);
985: end = PMAP_BOOTSTRAP_VA2PA(etext_gap_end);
986: #ifdef DIAGNOSTIC
987: if (avail_start <= start)
988: panic("pmap_page_upload: etext gap overlap: %lx < %lx",
989: (u_long)avail_start, (u_long)start);
990: #endif
991: if (etext_gap_start < etext_gap_end) {
992: vm_first_phys = start;
993: uvm_page_physload(
994: atop(start),
995: atop(end),
996: atop(start),
997: atop(end), VM_FREELIST_DEFAULT);
998: }
999:
1000: for (n = 0; n < npmemarr; n++) {
1001:
1002: start = pmemarr[n].addr;
1003: end = start + pmemarr[n].len;
1004:
1005: /*
1006: * If this segment contains `avail_start', we must exclude
1007: * the range of initial kernel memory as computed by
1008: * pmap_bootstrap(). Note that this will also exclude
1009: * the `etext gap' range already uploaded above.
1010: */
1011: if (start <= avail_start && avail_start < end)
1012: start = avail_start;
1013:
1014: if (start == end)
1015: continue;
1016:
1017: /* Update vm_{first_last}_phys */
1018: if (vm_first_phys > start)
1019: vm_first_phys = start;
1020:
1021: if (vm_last_phys < end)
1022: vm_last_phys = end;
1023:
1024: uvm_page_physload(
1025: atop(start),
1026: atop(end),
1027: atop(start),
1028: atop(end), VM_FREELIST_DEFAULT);
1029: }
1030: }
1031:
1032: /*
1033: * This routine is used by mmrw() to validate access to `/dev/mem'.
1034: */
1035: int
1036: pmap_pa_exists(pa)
1037: paddr_t pa;
1038: {
1039: int nmem;
1040: struct memarr *mp;
1041:
1042: for (mp = pmemarr, nmem = npmemarr; --nmem >= 0; mp++) {
1043: if (pa >= mp->addr && pa < mp->addr + mp->len)
1044: return 1;
1045: }
1046:
1047: return 0;
1048: }
1049:
1050: /* update pv_flags given a valid pte */
1051: #define MR4_4C(pte) (((pte) >> PG_M_SHIFT) & (PV_MOD | PV_REF))
1052: #define MR4M(pte) (((pte) >> PG_M_SHIFT4M) & (PV_MOD4M | PV_REF4M))
1053:
1054: /*----------------------------------------------------------------*/
1055:
1056: /*
1057: * Agree with the monitor ROM as to how many MMU entries are
1058: * to be reserved, and map all of its segments into all contexts.
1059: *
1060: * Unfortunately, while the Version 0 PROM had a nice linked list of
1061: * taken virtual memory, the Version 2 PROM provides instead a convoluted
1062: * description of *free* virtual memory. Rather than invert this, we
1063: * resort to two magic constants from the PROM vector description file.
1064: */
1065: #if defined(SUN4) || defined(SUN4C)
1066: void
1067: mmu_reservemon4_4c(nrp, nsp)
1068: int *nrp, *nsp;
1069: {
1070: u_int va = 0, eva = 0;
1071: int mmuseg, i, nr, ns, vr, lastvr;
1072: #if defined(SUN4_MMU3L)
1073: int mmureg;
1074: #endif
1075: struct regmap *rp;
1076:
1.199.4.9 nathanw 1077: #if defined(SUN4M) || defined(SUN4D)
1078: if (CPU_HAS_SRMMU) {
1079: panic("mmu_reservemon4_4c called on SRMMU machine");
1.199.4.2 pk 1080: return;
1081: }
1082: #endif
1083:
1084: #if defined(SUN4)
1085: if (CPU_ISSUN4) {
1086: prom_vstart = va = OLDMON_STARTVADDR;
1087: prom_vend = eva = OLDMON_ENDVADDR;
1088: }
1089: #endif
1090: #if defined(SUN4C)
1091: if (CPU_ISSUN4C) {
1092: prom_vstart = va = OPENPROM_STARTVADDR;
1093: prom_vend = eva = OPENPROM_ENDVADDR;
1094: }
1095: #endif
1096: ns = *nsp;
1097: nr = *nrp;
1098: lastvr = 0;
1099: while (va < eva) {
1100: vr = VA_VREG(va);
1101: rp = &pmap_kernel()->pm_regmap[vr];
1102:
1103: #if defined(SUN4_MMU3L)
1104: if (HASSUN4_MMU3L && vr != lastvr) {
1105: lastvr = vr;
1106: mmureg = getregmap(va);
1107: if (mmureg < nr)
1108: rp->rg_smeg = nr = mmureg;
1109: /*
1110: * On 3-level MMU machines, we distribute regions,
1111: * rather than segments, amongst the contexts.
1112: */
1113: for (i = ncontext; --i > 0;)
1114: prom_setcontext(i, (caddr_t)va, mmureg);
1115: }
1116: #endif
1117: mmuseg = getsegmap(va);
1118: if (mmuseg < ns)
1119: ns = mmuseg;
1120:
1121: if (!HASSUN4_MMU3L)
1122: for (i = ncontext; --i > 0;)
1123: prom_setcontext(i, (caddr_t)va, mmuseg);
1124:
1125: if (mmuseg == seginval) {
1126: va += NBPSG;
1127: continue;
1128: }
1129: /*
1130: * Another PROM segment. Enter into region map.
1131: * Assume the entire segment is valid.
1132: */
1133: rp->rg_nsegmap += 1;
1134: rp->rg_segmap[VA_VSEG(va)].sg_pmeg = mmuseg;
1135: rp->rg_segmap[VA_VSEG(va)].sg_npte = NPTESG;
1136:
1137: /* PROM maps its memory user-accessible: fix it. */
1138: for (i = NPTESG; --i >= 0; va += NBPG)
1139: setpte4(va, getpte4(va) | PG_S);
1140: }
1141: *nsp = ns;
1142: *nrp = nr;
1143: return;
1144: }
1145: #endif
1146:
1.199.4.9 nathanw 1147: #if defined(SUN4M) || defined(SUN4D) /* SRMMU versions of above */
1.199.4.2 pk 1148:
1149: u_long
1150: srmmu_bypass_read(paddr)
1151: u_long paddr;
1152: {
1153: unsigned long v;
1154:
1155: if (cpuinfo.mxcc) {
1156: /*
1157: * We're going to have to use MMU passthrough. If we're on
1158: * a Viking SuperSPARC with a MultiCache Controller, we
1159: * need to set the AC (Alternate Cacheable) bit in the MMU's
1160: * control register in order to not by-pass the cache.
1161: */
1162:
1163: unsigned long s = lda(SRMMU_PCR, ASI_SRMMU);
1164:
1165: /* set MMU AC bit */
1166: sta(SRMMU_PCR, ASI_SRMMU, s | VIKING_PCR_AC);
1167: v = lda(paddr, ASI_BYPASS);
1168: sta(SRMMU_PCR, ASI_SRMMU, s);
1169: } else
1170: v = lda(paddr, ASI_BYPASS);
1171:
1172: return (v);
1173: }
1174:
1175:
1176: /*
1177: * Take the monitor's initial page table layout, convert it to 3rd-level pte's
1178: * (it starts out as a L1 mapping), and install it along with a set of kernel
1179: * mapping tables as the kernel's initial page table setup. Also create and
1180: * enable a context table. I suppose we also want to block user-mode access
1181: * to the new kernel/ROM mappings.
1182: */
1183:
1184: /*
1185: * mmu_reservemon4m(): Copies the existing (ROM) page tables to kernel space,
1186: * converting any L1/L2 PTEs to L3 PTEs. Does *not* copy the L1 entry mapping
1187: * the kernel at KERNBASE since we don't want to map 16M of physical
1188: * memory for the kernel. Thus the kernel must be installed later!
1189: * Also installs ROM mappings into the kernel pmap.
1190: * NOTE: This also revokes all user-mode access to the mapped regions.
1191: */
1192: void
1193: mmu_reservemon4m(kpmap)
1194: struct pmap *kpmap;
1195: {
1196: unsigned int rom_ctxtbl;
1197: int te;
1198:
1199: prom_vstart = OPENPROM_STARTVADDR;
1200: prom_vend = OPENPROM_ENDVADDR;
1201:
1202: /*
1.199.4.4 nathanw 1203: * XXX: although the sun4m can handle 36 bits of physical
1.199.4.2 pk 1204: * address space, we assume that all these page tables, etc
1205: * are in the lower 4G (32-bits) of address space, i.e. out of I/O
1206: * space. Eventually this should be changed to support the 36 bit
1207: * physical addressing, in case some crazed ROM designer decides to
1208: * stick the pagetables up there. In that case, we should use MMU
1209: * transparent mode, (i.e. ASI 0x20 to 0x2f) to access
1210: * physical memory.
1211: */
1212:
1213: rom_ctxtbl = (lda(SRMMU_CXTPTR,ASI_SRMMU) << SRMMU_PPNPASHIFT);
1214:
1215: te = srmmu_bypass_read(rom_ctxtbl); /* i.e. context 0 */
1216:
1217: switch (te & SRMMU_TETYPE) {
1218: case SRMMU_TEINVALID:
1219: cpuinfo.ctx_tbl[0] = SRMMU_TEINVALID;
1220: panic("mmu_reservemon4m: no existing L0 mapping! "
1221: "(How are we running?");
1222: break;
1223: case SRMMU_TEPTE:
1224: panic("mmu_reservemon4m: can't handle ROM 4G page size");
1225: /* XXX: Should make this work, however stupid it is */
1226: break;
1227: case SRMMU_TEPTD:
1228: mmu_setup4m_L1(te, kpmap);
1229: break;
1230: default:
1231: panic("mmu_reservemon4m: unknown pagetable entry type");
1232: }
1233: }
1234:
1235: void
1236: mmu_setup4m_L1(regtblptd, kpmap)
1237: int regtblptd; /* PTD for region table to be remapped */
1238: struct pmap *kpmap;
1239: {
1240: unsigned int regtblrover;
1241: int i;
1242: unsigned int te;
1243: struct regmap *rp;
1244: int j, k;
1245:
1246: /*
1247: * Here we scan the region table to copy any entries which appear.
1248: * We are only concerned with regions in kernel space and above
1249: * (i.e. regions VA_VREG(KERNBASE)+1 to 0xff). We ignore the first
1250: * region (at VA_VREG(KERNBASE)), since that is the 16MB L1 mapping
1251: * that the ROM used to map the kernel in initially. Later, we will
1252: * rebuild a new L3 mapping for the kernel and install it before
1253: * switching to the new pagetables.
1254: */
1255: regtblrover =
1256: ((regtblptd & ~SRMMU_TETYPE) << SRMMU_PPNPASHIFT) +
1257: (VA_VREG(KERNBASE)+1) * sizeof(long); /* kernel only */
1258:
1259: for (i = VA_VREG(KERNBASE) + 1; i < SRMMU_L1SIZE;
1260: i++, regtblrover += sizeof(long)) {
1261:
1262: /* The region we're dealing with */
1263: rp = &kpmap->pm_regmap[i];
1264:
1265: te = srmmu_bypass_read(regtblrover);
1266: switch(te & SRMMU_TETYPE) {
1267: case SRMMU_TEINVALID:
1268: break;
1269:
1270: case SRMMU_TEPTE:
1271: #ifdef DEBUG
1272: printf("mmu_setup4m_L1: "
1273: "converting region 0x%x from L1->L3\n", i);
1274: #endif
1275: /*
1276: * This region entry covers 64MB of memory -- or
1277: * (NSEGRG * NPTESG) pages -- which we must convert
1278: * into a 3-level description.
1279: */
1280:
1281: for (j = 0; j < SRMMU_L2SIZE; j++) {
1282: struct segmap *sp = &rp->rg_segmap[j];
1283:
1284: for (k = 0; k < SRMMU_L3SIZE; k++) {
1285: sp->sg_npte++;
1286: setpgt4m(&sp->sg_pte[k],
1287: (te & SRMMU_L1PPNMASK) |
1288: (j << SRMMU_L2PPNSHFT) |
1289: (k << SRMMU_L3PPNSHFT) |
1290: (te & SRMMU_PGBITSMSK) |
1291: ((te & SRMMU_PROT_MASK) |
1292: PPROT_U2S_OMASK) |
1293: SRMMU_TEPTE);
1294: }
1295: }
1296: break;
1297:
1298: case SRMMU_TEPTD:
1299: mmu_setup4m_L2(te, rp);
1300: break;
1301:
1302: default:
1303: panic("mmu_setup4m_L1: unknown pagetable entry type");
1304: }
1305: }
1306: }
1307:
1308: void
1309: mmu_setup4m_L2(segtblptd, rp)
1310: int segtblptd;
1311: struct regmap *rp;
1312: {
1313: unsigned int segtblrover;
1314: int i, k;
1315: unsigned int te;
1316: struct segmap *sp;
1317:
1318: segtblrover = (segtblptd & ~SRMMU_TETYPE) << SRMMU_PPNPASHIFT;
1319: for (i = 0; i < SRMMU_L2SIZE; i++, segtblrover += sizeof(long)) {
1320:
1321: sp = &rp->rg_segmap[i];
1322:
1323: te = srmmu_bypass_read(segtblrover);
1324: switch(te & SRMMU_TETYPE) {
1325: case SRMMU_TEINVALID:
1326: break;
1327:
1328: case SRMMU_TEPTE:
1329: #ifdef DEBUG
1330: printf("mmu_setup4m_L2: converting L2 entry at segment 0x%x to L3\n",i);
1331: #endif
1332: /*
1333: * This segment entry covers 256KB of memory -- or
1334: * (NPTESG) pages -- which we must convert
1335: * into a 3-level description.
1336: */
1337: for (k = 0; k < SRMMU_L3SIZE; k++) {
1338: sp->sg_npte++;
1339: setpgt4m(&sp->sg_pte[k],
1340: (te & SRMMU_L1PPNMASK) |
1341: (te & SRMMU_L2PPNMASK) |
1342: (k << SRMMU_L3PPNSHFT) |
1343: (te & SRMMU_PGBITSMSK) |
1344: ((te & SRMMU_PROT_MASK) |
1345: PPROT_U2S_OMASK) |
1346: SRMMU_TEPTE);
1347: }
1348: break;
1349:
1350: case SRMMU_TEPTD:
1351: mmu_setup4m_L3(te, sp);
1352: break;
1353:
1354: default:
1355: panic("mmu_setup4m_L2: unknown pagetable entry type");
1356: }
1357: }
1358: }
1359:
1360: void
1361: mmu_setup4m_L3(pagtblptd, sp)
1362: int pagtblptd;
1363: struct segmap *sp;
1364: {
1365: unsigned int pagtblrover;
1366: int i;
1367: unsigned int te;
1368:
1369: pagtblrover = (pagtblptd & ~SRMMU_TETYPE) << SRMMU_PPNPASHIFT;
1370: for (i = 0; i < SRMMU_L3SIZE; i++, pagtblrover += sizeof(long)) {
1371: te = srmmu_bypass_read(pagtblrover);
1372: switch(te & SRMMU_TETYPE) {
1373: case SRMMU_TEINVALID:
1374: break;
1375: case SRMMU_TEPTE:
1376: sp->sg_npte++;
1377: setpgt4m(&sp->sg_pte[i], te | PPROT_U2S_OMASK);
1378: break;
1379: case SRMMU_TEPTD:
1380: panic("mmu_setup4m_L3: PTD found in L3 page table");
1381: default:
1382: panic("mmu_setup4m_L3: unknown pagetable entry type");
1383: }
1384: }
1385: }
1.199.4.9 nathanw 1386: #endif /* defined SUN4M || defined SUN4D */
1.199.4.2 pk 1387:
1388: /*----------------------------------------------------------------*/
1389:
1390: /*
1391: * MMU management.
1392: */
1393: struct mmuentry *me_alloc __P((struct mmuhd *, struct pmap *, int, int));
1394: void me_free __P((struct pmap *, u_int));
1395: struct mmuentry *region_alloc __P((struct mmuhd *, struct pmap *, int));
1396: void region_free __P((struct pmap *, u_int));
1397:
1398: /*
1399: * Change contexts. We need the old context number as well as the new
1400: * one. If the context is changing, we must write all user windows
1401: * first, lest an interrupt cause them to be written to the (other)
1402: * user whose context we set here.
1403: */
1404: #define CHANGE_CONTEXTS(old, new) \
1405: if ((old) != (new)) { \
1406: write_user_windows(); \
1407: setcontext(new); \
1408: }
1409:
1410: #if defined(SUN4) || defined(SUN4C) /* This is old sun MMU stuff */
1411: /*
1412: * Allocate an MMU entry (i.e., a PMEG).
1413: * If necessary, steal one from someone else.
1414: * Put it on the tail of the given queue
1415: * (which is either the LRU list or the locked list).
1416: * The locked list is not actually ordered, but this is easiest.
1417: * Also put it on the given (new) pmap's chain,
1418: * enter its pmeg number into that pmap's segmap,
1419: * and store the pmeg's new virtual segment number (me->me_vseg).
1420: *
1421: * This routine is large and complicated, but it must be fast
1422: * since it implements the dynamic allocation of MMU entries.
1423: */
1424: struct mmuentry *
1425: me_alloc(mh, newpm, newvreg, newvseg)
1426: struct mmuhd *mh;
1427: struct pmap *newpm;
1428: int newvreg, newvseg;
1429: {
1430: struct mmuentry *me;
1431: struct pmap *pm;
1432: int i, va, *pte, tpte;
1433: int ctx;
1434: struct regmap *rp;
1435: struct segmap *sp;
1436:
1437: /* try free list first */
1438: if ((me = segm_freelist.tqh_first) != NULL) {
1439: TAILQ_REMOVE(&segm_freelist, me, me_list);
1440: #ifdef DEBUG
1441: if (me->me_pmap != NULL)
1442: panic("me_alloc: freelist entry has pmap");
1443: if (pmapdebug & PDB_MMU_ALLOC)
1444: printf("me_alloc: got pmeg %d\n", me->me_cookie);
1445: #endif
1446: TAILQ_INSERT_TAIL(mh, me, me_list);
1447:
1448: /* onto on pmap chain; pmap is already locked, if needed */
1449: TAILQ_INSERT_TAIL(&newpm->pm_seglist, me, me_pmchain);
1450: #ifdef DIAGNOSTIC
1451: pmap_stats.ps_npmeg_free--;
1452: if (mh == &segm_locked)
1453: pmap_stats.ps_npmeg_locked++;
1454: else
1455: pmap_stats.ps_npmeg_lru++;
1456: #endif
1457:
1458: /* into pmap segment table, with backpointers */
1459: newpm->pm_regmap[newvreg].rg_segmap[newvseg].sg_pmeg = me->me_cookie;
1460: me->me_pmap = newpm;
1461: me->me_vseg = newvseg;
1462: me->me_vreg = newvreg;
1463:
1464: return (me);
1465: }
1466:
1467: /* no luck, take head of LRU list */
1468: if ((me = segm_lru.tqh_first) == NULL)
1469: panic("me_alloc: all pmegs gone");
1470:
1471: pm = me->me_pmap;
1472: #ifdef DEBUG
1473: if (pmapdebug & (PDB_MMU_ALLOC | PDB_MMU_STEAL))
1474: printf("me_alloc: stealing pmeg 0x%x from pmap %p\n",
1475: me->me_cookie, pm);
1476: #endif
1477: /*
1478: * Remove from LRU list, and insert at end of new list
1479: * (probably the LRU list again, but so what?).
1480: */
1481: TAILQ_REMOVE(&segm_lru, me, me_list);
1482: TAILQ_INSERT_TAIL(mh, me, me_list);
1483:
1484: #ifdef DIAGNOSTIC
1485: if (mh == &segm_locked) {
1486: pmap_stats.ps_npmeg_lru--;
1487: pmap_stats.ps_npmeg_locked++;
1488: }
1489: #endif
1490:
1491: rp = &pm->pm_regmap[me->me_vreg];
1492: sp = &rp->rg_segmap[me->me_vseg];
1493: pte = sp->sg_pte;
1494:
1495: /*
1496: * The PMEG must be mapped into some context so that we can
1497: * read its PTEs. Use its current context if it has one;
1498: * if not, and since context 0 is reserved for the kernel,
1499: * the simplest method is to switch to 0 and map the PMEG
1500: * to virtual address 0---which, being a user space address,
1501: * is by definition not in use.
1502: *
1503: * XXX for ncpus>1 must use per-cpu VA?
1504: * XXX do not have to flush cache immediately
1505: */
1506: ctx = getcontext4();
1507: if (CTX_USABLE(pm,rp)) {
1508: CHANGE_CONTEXTS(ctx, pm->pm_ctxnum);
1.199.4.12 thorpej 1509: cache_flush_segment(me->me_vreg, me->me_vseg, pm->pm_ctxnum);
1.199.4.2 pk 1510: va = VSTOVA(me->me_vreg,me->me_vseg);
1511: } else {
1512: CHANGE_CONTEXTS(ctx, 0);
1513: if (HASSUN4_MMU3L)
1514: setregmap(0, tregion);
1515: setsegmap(0, me->me_cookie);
1516: /*
1517: * No cache flush needed: it happened earlier when
1518: * the old context was taken.
1519: */
1520: va = 0;
1521: }
1522:
1523: /*
1524: * Record reference and modify bits for each page,
1525: * and copy PTEs into kernel memory so that they can
1526: * be reloaded later.
1527: */
1528: i = NPTESG;
1529: do {
1530: tpte = getpte4(va);
1531: if ((tpte & (PG_V | PG_TYPE)) == (PG_V | PG_OBMEM)) {
1532: u_int pfn = tpte & PG_PFNUM;
1533: struct pvlist *pv;
1534: if ((pv = pvhead(pfn)) != NULL)
1535: pv->pv_flags |= MR4_4C(tpte);
1536: }
1537: *pte++ = tpte & ~(PG_U|PG_M);
1538: va += NBPG;
1539: } while (--i > 0);
1540:
1541: /* update segment tables */
1542: simple_lock(&pm->pm_lock); /* what if other cpu takes mmuentry ?? */
1543: if (CTX_USABLE(pm,rp))
1544: setsegmap(VSTOVA(me->me_vreg,me->me_vseg), seginval);
1545: sp->sg_pmeg = seginval;
1546:
1547: /* off old pmap chain */
1548: TAILQ_REMOVE(&pm->pm_seglist, me, me_pmchain);
1549: simple_unlock(&pm->pm_lock);
1550: setcontext4(ctx);
1551:
1552: /* onto new pmap chain; new pmap is already locked, if needed */
1553: TAILQ_INSERT_TAIL(&newpm->pm_seglist, me, me_pmchain);
1554:
1555: /* into new segment table, with backpointers */
1556: newpm->pm_regmap[newvreg].rg_segmap[newvseg].sg_pmeg = me->me_cookie;
1557: me->me_pmap = newpm;
1558: me->me_vseg = newvseg;
1559: me->me_vreg = newvreg;
1560:
1561: return (me);
1562: }
1563:
1564: /*
1565: * Free an MMU entry.
1566: *
1567: * Assumes the corresponding pmap is already locked.
1568: * Does NOT flush cache, but does record ref and mod bits.
1569: * The rest of each PTE is discarded.
1570: * CALLER MUST SET CONTEXT to pm->pm_ctxnum (if pmap has
1571: * a context) or to 0 (if not). Caller must also update
1572: * pm->pm_segmap and (possibly) the hardware.
1573: */
1574: void
1575: me_free(pm, pmeg)
1576: struct pmap *pm;
1577: u_int pmeg;
1578: {
1579: struct mmuentry *me = &mmusegments[pmeg];
1580: int i, va, tpte;
1581: int vr;
1582: struct regmap *rp;
1583:
1584: vr = me->me_vreg;
1585:
1586: #ifdef DEBUG
1587: if (pmapdebug & PDB_MMU_ALLOC)
1588: printf("me_free: freeing pmeg %d from pmap %p\n",
1589: me->me_cookie, pm);
1590: if (me->me_cookie != pmeg)
1591: panic("me_free: wrong mmuentry");
1592: if (pm != me->me_pmap)
1593: panic("me_free: pm != me_pmap");
1594: #endif
1595:
1596: rp = &pm->pm_regmap[vr];
1597:
1598: /* just like me_alloc, but no cache flush, and context already set */
1599: if (CTX_USABLE(pm,rp)) {
1600: va = VSTOVA(vr,me->me_vseg);
1601: } else {
1602: #ifdef DEBUG
1603: if (getcontext4() != 0) panic("me_free: ctx != 0");
1604: #endif
1605: if (HASSUN4_MMU3L)
1606: setregmap(0, tregion);
1607: setsegmap(0, me->me_cookie);
1608: va = 0;
1609: }
1610: i = NPTESG;
1611: do {
1612: tpte = getpte4(va);
1613: if ((tpte & (PG_V | PG_TYPE)) == (PG_V | PG_OBMEM)) {
1614: u_int pfn = tpte & PG_PFNUM;
1615: struct pvlist *pv;
1616: if ((pv = pvhead(pfn)) != NULL)
1617: pv->pv_flags |= MR4_4C(tpte);
1618: }
1619: va += NBPG;
1620: } while (--i > 0);
1621:
1622: /* take mmu entry off pmap chain */
1623: TAILQ_REMOVE(&pm->pm_seglist, me, me_pmchain);
1624: /* ... and remove from segment map */
1625: if (rp->rg_segmap == NULL)
1626: panic("me_free: no segments in pmap");
1627: rp->rg_segmap[me->me_vseg].sg_pmeg = seginval;
1628:
1629: /* off LRU or lock chain */
1630: if (pm == pmap_kernel()) {
1631: TAILQ_REMOVE(&segm_locked, me, me_list);
1632: #ifdef DIAGNOSTIC
1633: pmap_stats.ps_npmeg_locked--;
1634: #endif
1635: } else {
1636: TAILQ_REMOVE(&segm_lru, me, me_list);
1637: #ifdef DIAGNOSTIC
1638: pmap_stats.ps_npmeg_lru--;
1639: #endif
1640: }
1641:
1642: /* no associated pmap; on free list */
1643: me->me_pmap = NULL;
1644: TAILQ_INSERT_TAIL(&segm_freelist, me, me_list);
1645: #ifdef DIAGNOSTIC
1646: pmap_stats.ps_npmeg_free++;
1647: #endif
1648: }
1649:
1650: #if defined(SUN4_MMU3L)
1651:
1652: /* XXX - Merge with segm_alloc/segm_free ? */
1653:
1654: struct mmuentry *
1655: region_alloc(mh, newpm, newvr)
1656: struct mmuhd *mh;
1657: struct pmap *newpm;
1658: int newvr;
1659: {
1660: struct mmuentry *me;
1661: struct pmap *pm;
1662: int ctx;
1663: struct regmap *rp;
1664:
1665: /* try free list first */
1666: if ((me = region_freelist.tqh_first) != NULL) {
1667: TAILQ_REMOVE(®ion_freelist, me, me_list);
1668: #ifdef DEBUG
1669: if (me->me_pmap != NULL)
1670: panic("region_alloc: freelist entry has pmap");
1671: if (pmapdebug & PDB_MMUREG_ALLOC)
1672: printf("region_alloc: got smeg 0x%x\n", me->me_cookie);
1673: #endif
1674: TAILQ_INSERT_TAIL(mh, me, me_list);
1675:
1676: /* onto on pmap chain; pmap is already locked, if needed */
1677: TAILQ_INSERT_TAIL(&newpm->pm_reglist, me, me_pmchain);
1678:
1679: /* into pmap segment table, with backpointers */
1680: newpm->pm_regmap[newvr].rg_smeg = me->me_cookie;
1681: me->me_pmap = newpm;
1682: me->me_vreg = newvr;
1683:
1684: return (me);
1685: }
1686:
1687: /* no luck, take head of LRU list */
1688: if ((me = region_lru.tqh_first) == NULL)
1689: panic("region_alloc: all smegs gone");
1690:
1691: pm = me->me_pmap;
1692: if (pm == NULL)
1693: panic("region_alloc: LRU entry has no pmap");
1694: if (pm == pmap_kernel())
1695: panic("region_alloc: stealing from kernel");
1696: #ifdef DEBUG
1697: if (pmapdebug & (PDB_MMUREG_ALLOC | PDB_MMUREG_STEAL))
1698: printf("region_alloc: stealing smeg 0x%x from pmap %p\n",
1699: me->me_cookie, pm);
1700: #endif
1701: /*
1702: * Remove from LRU list, and insert at end of new list
1703: * (probably the LRU list again, but so what?).
1704: */
1705: TAILQ_REMOVE(®ion_lru, me, me_list);
1706: TAILQ_INSERT_TAIL(mh, me, me_list);
1707:
1708: rp = &pm->pm_regmap[me->me_vreg];
1709: ctx = getcontext4();
1710: if (pm->pm_ctx) {
1711: CHANGE_CONTEXTS(ctx, pm->pm_ctxnum);
1.199.4.12 thorpej 1712: cache_flush_region(me->me_vreg, pm->pm_ctxnum);
1.199.4.2 pk 1713: }
1714:
1715: /* update region tables */
1716: simple_lock(&pm->pm_lock); /* what if other cpu takes mmuentry ?? */
1717: if (pm->pm_ctx)
1718: setregmap(VRTOVA(me->me_vreg), reginval);
1719: rp->rg_smeg = reginval;
1720:
1721: /* off old pmap chain */
1722: TAILQ_REMOVE(&pm->pm_reglist, me, me_pmchain);
1723: simple_unlock(&pm->pm_lock);
1724: setcontext4(ctx); /* done with old context */
1725:
1726: /* onto new pmap chain; new pmap is already locked, if needed */
1727: TAILQ_INSERT_TAIL(&newpm->pm_reglist, me, me_pmchain);
1728:
1729: /* into new segment table, with backpointers */
1730: newpm->pm_regmap[newvr].rg_smeg = me->me_cookie;
1731: me->me_pmap = newpm;
1732: me->me_vreg = newvr;
1733:
1734: return (me);
1735: }
1736:
1737: /*
1738: * Free an MMU entry.
1739: *
1740: * Assumes the corresponding pmap is already locked.
1741: * Does NOT flush cache. ???
1742: * CALLER MUST SET CONTEXT to pm->pm_ctxnum (if pmap has
1743: * a context) or to 0 (if not). Caller must also update
1744: * pm->pm_regmap and (possibly) the hardware.
1745: */
1746: void
1747: region_free(pm, smeg)
1748: struct pmap *pm;
1749: u_int smeg;
1750: {
1751: struct mmuentry *me = &mmuregions[smeg];
1752:
1753: #ifdef DEBUG
1754: if (pmapdebug & PDB_MMUREG_ALLOC)
1755: printf("region_free: freeing smeg 0x%x from pmap %p\n",
1756: me->me_cookie, pm);
1757: if (me->me_cookie != smeg)
1758: panic("region_free: wrong mmuentry");
1759: if (pm != me->me_pmap)
1760: panic("region_free: pm != me_pmap");
1761: #endif
1762:
1763: if (pm->pm_ctx)
1.199.4.12 thorpej 1764: cache_flush_region(me->me_vreg, pm->pm_ctxnum);
1.199.4.2 pk 1765:
1766: /* take mmu entry off pmap chain */
1767: TAILQ_REMOVE(&pm->pm_reglist, me, me_pmchain);
1768: /* ... and remove from segment map */
1769: pm->pm_regmap[me->me_vreg].rg_smeg = reginval;
1770:
1771: /* off LRU or lock chain */
1772: if (pm == pmap_kernel()) {
1773: TAILQ_REMOVE(®ion_locked, me, me_list);
1774: } else {
1775: TAILQ_REMOVE(®ion_lru, me, me_list);
1776: }
1777:
1778: /* no associated pmap; on free list */
1779: me->me_pmap = NULL;
1780: TAILQ_INSERT_TAIL(®ion_freelist, me, me_list);
1781: }
1782: #endif
1783:
1784: /*
1785: * `Page in' (load or inspect) an MMU entry; called on page faults.
1786: * Returns 1 if we reloaded the segment, -1 if the segment was
1787: * already loaded and the page was marked valid (in which case the
1788: * fault must be a bus error or something), or 0 (segment loaded but
1789: * PTE not valid, or segment not loaded at all).
1790: */
1791: int
1792: mmu_pagein(pm, va, prot)
1793: struct pmap *pm;
1794: vaddr_t va;
1795: int prot;
1796: {
1797: int *pte;
1798: int vr, vs, pmeg, i, s, bits;
1799: struct regmap *rp;
1800: struct segmap *sp;
1801:
1802: if (va >= (unsigned long)KERNBASE)
1803: return (0);
1804:
1805: if (prot != VM_PROT_NONE)
1806: bits = PG_V | ((prot & VM_PROT_WRITE) ? PG_W : 0);
1807: else
1808: bits = 0;
1809:
1810: vr = VA_VREG(va);
1811: vs = VA_VSEG(va);
1812: rp = &pm->pm_regmap[vr];
1813: #ifdef DEBUG
1814: if (pm == pmap_kernel())
1815: printf("mmu_pagein: kernel wants map at va 0x%lx, vr %d, vs %d\n",
1816: (u_long)va, vr, vs);
1817: #endif
1818:
1819: /* return 0 if we have no PMEGs to load */
1820: if (rp->rg_segmap == NULL)
1821: return (0);
1822:
1823: #if defined(SUN4_MMU3L)
1824: if (HASSUN4_MMU3L && rp->rg_smeg == reginval) {
1825: smeg_t smeg;
1826: unsigned int tva = VA_ROUNDDOWNTOREG(va);
1827: struct segmap *sp = rp->rg_segmap;
1828:
1829: s = splvm(); /* paranoid */
1830: smeg = region_alloc(®ion_lru, pm, vr)->me_cookie;
1831: setregmap(tva, smeg);
1832: i = NSEGRG;
1833: do {
1834: setsegmap(tva, sp++->sg_pmeg);
1835: tva += NBPSG;
1836: } while (--i > 0);
1837: splx(s);
1838: }
1839: #endif
1840: sp = &rp->rg_segmap[vs];
1841:
1842: /* return 0 if we have no PTEs to load */
1843: if ((pte = sp->sg_pte) == NULL)
1844: return (0);
1845:
1846: /* return -1 if the fault is `hard', 0 if not */
1847: if (sp->sg_pmeg != seginval)
1848: return (bits && (getpte4(va) & bits) == bits ? -1 : 0);
1849:
1850: /* reload segment: write PTEs into a new LRU entry */
1851: va = VA_ROUNDDOWNTOSEG(va);
1852: s = splvm(); /* paranoid */
1853: pmeg = me_alloc(&segm_lru, pm, vr, vs)->me_cookie;
1854: setsegmap(va, pmeg);
1855: i = NPTESG;
1856: do {
1857: setpte4(va, *pte++);
1858: va += NBPG;
1859: } while (--i > 0);
1860: splx(s);
1861: return (1);
1862: }
1863: #endif /* defined SUN4 or SUN4C */
1864:
1865: /*
1866: * Allocate a context. If necessary, steal one from someone else.
1867: * Changes hardware context number and loads segment map.
1868: *
1869: * This routine is only ever called from locore.s just after it has
1870: * saved away the previous process, so there are no active user windows.
1871: */
1872: void
1873: ctx_alloc(pm)
1874: struct pmap *pm;
1875: {
1876: union ctxinfo *c;
1877: int s, cnum, i, doflush;
1878: struct regmap *rp;
1879: int gap_start, gap_end;
1880: vaddr_t va;
1881:
1882: /*XXX-GCC!*/gap_start=gap_end=0;
1883: #ifdef DEBUG
1884: if (pm->pm_ctx)
1885: panic("ctx_alloc pm_ctx");
1886: if (pmapdebug & PDB_CTX_ALLOC)
1.199.4.17! martin 1887: printf("ctx_alloc[%d](%p)\n", cpu_number(), pm);
1.199.4.2 pk 1888: #endif
1.199.4.9 nathanw 1889: if (CPU_ISSUN4 || CPU_ISSUN4C) {
1.199.4.2 pk 1890: gap_start = pm->pm_gap_start;
1891: gap_end = pm->pm_gap_end;
1892: }
1893:
1894: s = splvm();
1895: simple_lock(&ctx_lock);
1896: if ((c = ctx_freelist) != NULL) {
1897: ctx_freelist = c->c_nextfree;
1898: cnum = c - ctxinfo;
1899: doflush = 0;
1900: } else {
1901: if ((ctx_kick += ctx_kickdir) >= ncontext) {
1902: ctx_kick = ncontext - 1;
1903: ctx_kickdir = -1;
1904: } else if (ctx_kick < 1) {
1905: ctx_kick = 1;
1906: ctx_kickdir = 1;
1907: }
1908: c = &ctxinfo[cnum = ctx_kick];
1909: #ifdef DEBUG
1910: if (c->c_pmap == NULL)
1911: panic("ctx_alloc cu_pmap");
1912: if (pmapdebug & (PDB_CTX_ALLOC | PDB_CTX_STEAL))
1.199.4.17! martin 1913: printf("ctx_alloc[%d]: steal context %d from %p\n",
! 1914: cpu_number(), cnum, c->c_pmap);
1.199.4.2 pk 1915: #endif
1916: c->c_pmap->pm_ctx = NULL;
1.199.4.14 thorpej 1917: c->c_pmap->pm_ctxnum = 0;
1.199.4.2 pk 1918: doflush = (CACHEINFO.c_vactype != VAC_NONE);
1.199.4.9 nathanw 1919: if (CPU_ISSUN4 || CPU_ISSUN4C) {
1.199.4.2 pk 1920: if (gap_start < c->c_pmap->pm_gap_start)
1921: gap_start = c->c_pmap->pm_gap_start;
1922: if (gap_end > c->c_pmap->pm_gap_end)
1923: gap_end = c->c_pmap->pm_gap_end;
1924: }
1925: }
1926: simple_unlock(&ctx_lock);
1927:
1928: c->c_pmap = pm;
1929: pm->pm_ctx = c;
1930: pm->pm_ctxnum = cnum;
1931:
1.199.4.9 nathanw 1932: if (CPU_ISSUN4 || CPU_ISSUN4C) {
1.199.4.2 pk 1933: /*
1934: * Write pmap's region (3-level MMU) or segment table into
1935: * the MMU.
1936: *
1937: * Only write those entries that actually map something in
1938: * this context by maintaining a pair of region numbers in
1939: * between which the pmap has no valid mappings.
1940: *
1941: * If a context was just allocated from the free list, trust
1942: * that all its pmeg numbers are `seginval'. We make sure this
1943: * is the case initially in pmap_bootstrap(). Otherwise, the
1944: * context was freed by calling ctx_free() in pmap_release(),
1945: * which in turn is supposedly called only when all mappings
1946: * have been removed.
1947: *
1948: * On the other hand, if the context had to be stolen from
1949: * another pmap, we possibly shrink the gap to be the
1950: * disjuction of the new and the previous map.
1951: */
1952:
1953: setcontext4(cnum);
1954: splx(s);
1955: if (doflush)
1.199.4.12 thorpej 1956: cache_flush_context(cnum);
1.199.4.2 pk 1957:
1958: rp = pm->pm_regmap;
1959: for (va = 0, i = NUREG; --i >= 0; ) {
1960: if (VA_VREG(va) >= gap_start) {
1961: va = VRTOVA(gap_end);
1962: i -= gap_end - gap_start;
1963: rp += gap_end - gap_start;
1964: if (i < 0)
1965: break;
1966: /* mustn't re-enter this branch */
1967: gap_start = NUREG;
1968: }
1969: if (HASSUN4_MMU3L) {
1970: setregmap(va, rp++->rg_smeg);
1971: va += NBPRG;
1972: } else {
1973: int j;
1974: struct segmap *sp = rp->rg_segmap;
1975: for (j = NSEGRG; --j >= 0; va += NBPSG)
1976: setsegmap(va,
1977: sp?sp++->sg_pmeg:seginval);
1978: rp++;
1979: }
1980: }
1981:
1.199.4.9 nathanw 1982: } else if (CPU_HAS_SRMMU) {
1.199.4.2 pk 1983:
1.199.4.9 nathanw 1984: #if defined(SUN4M) || defined(SUN4D)
1.199.4.2 pk 1985: /*
1986: * Reload page and context tables to activate the page tables
1987: * for this context.
1988: *
1.199.4.4 nathanw 1989: * The gap stuff isn't really needed in the sun4m architecture,
1.199.4.2 pk 1990: * since we don't have to worry about excessive mappings (all
1991: * mappings exist since the page tables must be complete for
1992: * the mmu to be happy).
1993: *
1994: * If a context was just allocated from the free list, trust
1995: * that all of its mmu-edible page tables are zeroed out
1996: * (except for those associated with the kernel). We make
1997: * sure this is the case initially in pmap_bootstrap() and
1998: * pmap_init() (?).
1999: * Otherwise, the context was freed by calling ctx_free() in
2000: * pmap_release(), which in turn is supposedly called only
2001: * when all mappings have been removed.
2002: *
2003: * XXX: Do we have to flush cache after reloading ctx tbl?
2004: */
2005:
2006: /*
2007: * We need to flush the cache only when stealing a context
2008: * from another pmap. In that case it's Ok to switch the
1.199.4.14 thorpej 2009: * context and leave it set, since the context table
1.199.4.2 pk 2010: * will have a valid region table entry for this context
2011: * number.
2012: *
2013: * Otherwise, we switch to the new context after loading
2014: * the context table entry with the new pmap's region.
2015: */
2016: if (doflush) {
1.199.4.12 thorpej 2017: cache_flush_context(cnum);
1.199.4.2 pk 2018: }
2019:
2020: /*
2021: * The context allocated to a process is the same on all CPUs.
2022: * Here we install the per-CPU region table in each CPU's
2023: * context table slot.
2024: *
2025: * Note on multi-threaded processes: a context must remain
2026: * valid as long as any thread is still running on a cpu.
2027: */
2028: simple_lock(&pm->pm_lock);
2029: #if defined(MULTIPROCESSOR)
2030: for (i = 0; i < ncpu; i++)
2031: #else
2032: i = 0;
2033: #endif
2034: {
2035: struct cpu_info *cpi = cpus[i];
2036: #if defined(MULTIPROCESSOR)
2037: if (cpi == NULL)
2038: continue;
2039: #endif
2040: setpgt4m(&cpi->ctx_tbl[cnum],
2041: (pm->pm_reg_ptps_pa[i] >> SRMMU_PPNPASHIFT) |
2042: SRMMU_TEPTD);
2043: }
2044: simple_unlock(&pm->pm_lock);
2045:
1.199.4.14 thorpej 2046: /* And finally switch to the new context */
2047: (*cpuinfo.pure_vcache_flush)();
2048: setcontext4m(cnum);
2049: #endif /* SUN4M || SUN4D */
1.199.4.2 pk 2050: splx(s);
2051: }
2052: }
2053:
2054: /*
1.199.4.14 thorpej 2055: * Give away a context. Always called in the context of proc0 (reaper)
1.199.4.2 pk 2056: */
2057: void
2058: ctx_free(pm)
2059: struct pmap *pm;
2060: {
2061: union ctxinfo *c;
1.199.4.14 thorpej 2062: int ctx;
1.199.4.2 pk 2063:
2064: c = pm->pm_ctx;
1.199.4.14 thorpej 2065: ctx = pm->pm_ctxnum;
1.199.4.2 pk 2066: pm->pm_ctx = NULL;
1.199.4.14 thorpej 2067: pm->pm_ctxnum = 0;
2068: #if defined(SUN4) || defined(SUN4C)
2069: if (CPU_ISSUN4 || CPU_ISSUN4C) {
2070: int octx = getcontext4();
2071: cache_flush_context(ctx);
2072: setcontext(octx);
2073: }
2074: #endif /* SUN4 || SUN4C */
2075:
1.199.4.9 nathanw 2076: #if defined(SUN4M) || defined(SUN4D)
1.199.4.14 thorpej 2077: if (CPU_HAS_SRMMU) {
2078: cache_flush_context(ctx);
2079: tlb_flush_context(ctx);
1.199.4.2 pk 2080: }
1.199.4.14 thorpej 2081: #endif
1.199.4.2 pk 2082:
2083: simple_lock(&ctx_lock);
2084: c->c_nextfree = ctx_freelist;
2085: ctx_freelist = c;
2086: simple_unlock(&ctx_lock);
2087: }
2088:
2089:
2090: /*----------------------------------------------------------------*/
2091:
2092: /*
2093: * pvlist functions.
2094: */
2095:
2096: /*
2097: * Walk the given pv list, and for each PTE, set or clear some bits
2098: * (e.g., PG_W or PG_NC).
2099: *
2100: * This routine flushes the cache for any page whose PTE changes,
2101: * as long as the process has a context; this is overly conservative.
2102: * It also copies ref and mod bits to the pvlist, on the theory that
2103: * this might save work later. (XXX should test this theory)
2104: */
2105:
2106: #if defined(SUN4) || defined(SUN4C)
2107:
2108: void
2109: pv_changepte4_4c(pv0, bis, bic)
2110: struct pvlist *pv0;
2111: int bis, bic;
2112: {
2113: int *pte;
2114: struct pvlist *pv;
2115: struct pmap *pm;
2116: int va, vr, vs;
2117: int ctx, s;
2118: struct regmap *rp;
2119: struct segmap *sp;
2120:
2121: write_user_windows(); /* paranoid? */
2122: s = splvm(); /* paranoid? */
2123: if (pv0->pv_pmap == NULL) {
2124: splx(s);
2125: return;
2126: }
2127: ctx = getcontext4();
2128: for (pv = pv0; pv != NULL; pv = pv->pv_next) {
2129: pm = pv->pv_pmap;
2130: va = pv->pv_va;
2131: vr = VA_VREG(va);
2132: vs = VA_VSEG(va);
2133: rp = &pm->pm_regmap[vr];
2134: sp = &rp->rg_segmap[vs];
2135: pte = sp->sg_pte;
2136:
2137: if (sp->sg_pmeg == seginval) {
2138: /* not in hardware: just fix software copy */
2139: pte += VA_VPG(va);
2140: *pte = (*pte | bis) & ~bic;
2141: } else {
2142: int tpte;
2143:
2144: /* in hardware: fix hardware copy */
2145: if (CTX_USABLE(pm,rp)) {
2146: setcontext4(pm->pm_ctxnum);
2147: /* XXX should flush only when necessary */
2148: tpte = getpte4(va);
2149: /*
2150: * XXX: always flush cache; conservative, but
2151: * needed to invalidate cache tag protection
2152: * bits and when disabling caching.
2153: */
1.199.4.12 thorpej 2154: cache_flush_page(va, pm->pm_ctxnum);
1.199.4.2 pk 2155: } else {
2156: /* XXX per-cpu va? */
2157: setcontext4(0);
2158: if (HASSUN4_MMU3L)
2159: setregmap(0, tregion);
2160: setsegmap(0, sp->sg_pmeg);
2161: va = VA_VPG(va) << PGSHIFT;
2162: tpte = getpte4(va);
2163: }
2164: if (tpte & PG_V)
2165: pv0->pv_flags |= MR4_4C(tpte);
2166: tpte = (tpte | bis) & ~bic;
2167: setpte4(va, tpte);
2168: if (pte != NULL) /* update software copy */
2169: pte[VA_VPG(va)] = tpte;
2170: }
2171: }
2172: setcontext4(ctx);
2173: splx(s);
2174: }
2175:
2176: /*
2177: * Sync ref and mod bits in pvlist (turns off same in hardware PTEs).
2178: * Returns the new flags.
2179: *
2180: * This is just like pv_changepte, but we never add or remove bits,
2181: * hence never need to adjust software copies.
2182: */
2183: int
2184: pv_syncflags4_4c(pv0)
2185: struct pvlist *pv0;
2186: {
2187: struct pvlist *pv;
2188: struct pmap *pm;
2189: int tpte, va, vr, vs, pmeg, flags;
2190: int ctx, s;
2191: struct regmap *rp;
2192: struct segmap *sp;
2193:
2194: s = splvm(); /* paranoid? */
2195: if (pv0->pv_pmap == NULL) { /* paranoid */
2196: splx(s);
2197: return (0);
2198: }
2199: ctx = getcontext4();
2200: flags = pv0->pv_flags;
2201: for (pv = pv0; pv != NULL; pv = pv->pv_next) {
2202: pm = pv->pv_pmap;
2203: va = pv->pv_va;
2204: vr = VA_VREG(va);
2205: vs = VA_VSEG(va);
2206: rp = &pm->pm_regmap[vr];
2207: sp = &rp->rg_segmap[vs];
2208: if ((pmeg = sp->sg_pmeg) == seginval)
2209: continue;
2210: if (CTX_USABLE(pm,rp)) {
2211: setcontext4(pm->pm_ctxnum);
2212: /* XXX should flush only when necessary */
2213: tpte = getpte4(va);
2214: if (tpte & PG_M)
1.199.4.12 thorpej 2215: cache_flush_page(va, pm->pm_ctxnum);
1.199.4.2 pk 2216: } else {
2217: /* XXX per-cpu va? */
2218: setcontext4(0);
2219: if (HASSUN4_MMU3L)
2220: setregmap(0, tregion);
2221: setsegmap(0, pmeg);
2222: va = VA_VPG(va) << PGSHIFT;
2223: tpte = getpte4(va);
2224: }
2225: if (tpte & (PG_M|PG_U) && tpte & PG_V) {
2226: flags |= MR4_4C(tpte);
2227: tpte &= ~(PG_M|PG_U);
2228: setpte4(va, tpte);
2229: }
2230: }
2231: pv0->pv_flags = flags;
2232: setcontext4(ctx);
2233: splx(s);
2234: return (flags);
2235: }
2236:
2237: /*
2238: * pv_unlink is a helper function for pmap_remove.
2239: * It takes a pointer to the pv_table head for some physical address
2240: * and removes the appropriate (pmap, va) entry.
2241: *
2242: * Once the entry is removed, if the pv_table head has the cache
2243: * inhibit bit set, see if we can turn that off; if so, walk the
2244: * pvlist and turn off PG_NC in each PTE. (The pvlist is by
2245: * definition nonempty, since it must have at least two elements
2246: * in it to have PV_NC set, and we only remove one here.)
2247: */
2248: /*static*/ void
2249: pv_unlink4_4c(pv, pm, va)
2250: struct pvlist *pv;
2251: struct pmap *pm;
2252: vaddr_t va;
2253: {
2254: struct pvlist *npv;
2255:
2256: /*
2257: * First entry is special (sigh).
2258: */
2259: npv = pv->pv_next;
2260: if (pv->pv_pmap == pm && pv->pv_va == va) {
2261: pmap_stats.ps_unlink_pvfirst++;
2262: if (npv != NULL) {
2263: /*
2264: * Shift next entry into the head.
2265: * Make sure to retain the REF, MOD and ANC flags.
2266: */
2267: pv->pv_next = npv->pv_next;
2268: pv->pv_pmap = npv->pv_pmap;
2269: pv->pv_va = npv->pv_va;
2270: pv->pv_flags &= ~PV_NC;
2271: pv->pv_flags |= (npv->pv_flags & PV_NC);
2272: pool_put(&pv_pool, npv);
2273: } else {
2274: /*
2275: * No mappings left; we still need to maintain
2276: * the REF and MOD flags. since pmap_is_modified()
2277: * can still be called for this page.
2278: */
2279: pv->pv_pmap = NULL;
2280: pv->pv_flags &= ~(PV_NC|PV_ANC);
2281: return;
2282: }
2283: } else {
2284: struct pvlist *prev;
2285:
2286: for (prev = pv;; prev = npv, npv = npv->pv_next) {
2287: pmap_stats.ps_unlink_pvsearch++;
2288: if (npv->pv_pmap == pm && npv->pv_va == va)
2289: break;
2290: }
2291: prev->pv_next = npv->pv_next;
2292: pool_put(&pv_pool, npv);
2293: }
2294: if ((pv->pv_flags & (PV_NC|PV_ANC)) == PV_ANC) {
2295: /*
2296: * Not cached: check to see if we can fix that now.
2297: */
2298: va = pv->pv_va;
2299: for (npv = pv->pv_next; npv != NULL; npv = npv->pv_next)
2300: if (BADALIAS(va, npv->pv_va) ||
2301: (npv->pv_flags & PV_NC) != 0)
2302: return;
2303: pv->pv_flags &= ~PV_ANC;
2304: pv_changepte4_4c(pv, 0, PG_NC);
2305: }
2306: }
2307:
2308: /*
2309: * pv_link is the inverse of pv_unlink, and is used in pmap_enter.
2310: * It returns PG_NC if the (new) pvlist says that the address cannot
2311: * be cached.
2312: */
2313: /*static*/ int
2314: pv_link4_4c(pv, pm, va, nc)
2315: struct pvlist *pv;
2316: struct pmap *pm;
2317: vaddr_t va;
2318: int nc;
2319: {
2320: struct pvlist *npv;
2321: int ret;
2322:
2323: ret = nc ? PG_NC : 0;
2324:
2325: if (pv->pv_pmap == NULL) {
2326: /* no pvlist entries yet */
2327: pmap_stats.ps_enter_firstpv++;
2328: pv->pv_next = NULL;
2329: pv->pv_pmap = pm;
2330: pv->pv_va = va;
2331: pv->pv_flags |= nc ? PV_NC : 0;
2332: return (ret);
2333: }
2334: /*
2335: * Before entering the new mapping, see if
2336: * it will cause old mappings to become aliased
2337: * and thus need to be `discached'.
2338: */
2339: pmap_stats.ps_enter_secondpv++;
2340: if (pv->pv_flags & PV_ANC) {
2341: /* already uncached, just stay that way */
2342: ret = PG_NC;
2343: } else {
2344: for (npv = pv; npv != NULL; npv = npv->pv_next) {
2345: if (npv->pv_flags & PV_NC) {
2346: ret = PG_NC;
2347: #ifdef DEBUG
2348: /* Check currently illegal condition */
2349: if (nc == 0)
2350: printf("pv_link: proc %s, va=0x%lx: "
2351: "unexpected uncached mapping at 0x%lx\n",
2352: curproc ? curproc->p_comm : "--",
2353: va, npv->pv_va);
2354: #endif
2355: }
2356: if (BADALIAS(va, npv->pv_va)) {
2357: #ifdef DEBUG
2358: if (pmapdebug & PDB_CACHESTUFF)
2359: printf(
2360: "pv_link: badalias: proc %s, 0x%lx<=>0x%lx, pv %p\n",
2361: curproc ? curproc->p_comm : "--",
2362: va, npv->pv_va, pv);
2363: #endif
2364: /* Mark list head `uncached due to aliases' */
2365: pv->pv_flags |= PV_ANC;
2366: pv_changepte4_4c(pv, ret = PG_NC, 0);
2367: break;
2368: }
2369: }
2370: }
2371: npv = pool_get(&pv_pool, PR_NOWAIT);
2372: if (npv == NULL)
2373: panic("pv_link: pv_pool exhausted");
2374: npv->pv_next = pv->pv_next;
2375: npv->pv_pmap = pm;
2376: npv->pv_va = va;
2377: npv->pv_flags = nc ? PV_NC : 0;
2378: pv->pv_next = npv;
2379: return (ret);
2380: }
2381:
2382: #endif /* sun4, sun4c code */
2383:
1.199.4.9 nathanw 2384: #if defined(SUN4M) || defined(SUN4D) /* SRMMU versions of above */
1.199.4.2 pk 2385: /*
2386: * Walk the given pv list, and for each PTE, set or clear some bits
2387: * (e.g., PG_W or PG_NC).
2388: *
2389: * This routine flushes the cache for any page whose PTE changes,
2390: * as long as the process has a context; this is overly conservative.
2391: * It also copies ref and mod bits to the pvlist, on the theory that
2392: * this might save work later. (XXX should test this theory)
2393: */
2394: void
2395: pv_changepte4m(pv0, bis, bic)
2396: struct pvlist *pv0;
2397: int bis, bic;
2398: {
2399: struct pvlist *pv;
2400: struct pmap *pm;
2401: int va, vr;
1.199.4.14 thorpej 2402: int s;
1.199.4.2 pk 2403: struct regmap *rp;
2404: struct segmap *sp;
2405:
2406: write_user_windows(); /* paranoid? */
2407: s = splvm(); /* paranoid? */
2408: if (pv0->pv_pmap == NULL) {
2409: splx(s);
2410: return;
2411: }
2412: for (pv = pv0; pv != NULL; pv = pv->pv_next) {
2413: int tpte;
2414: pm = pv->pv_pmap;
2415: va = pv->pv_va;
2416: vr = VA_VREG(va);
2417: rp = &pm->pm_regmap[vr];
2418: sp = &rp->rg_segmap[VA_VSEG(va)];
2419:
2420: if (pm->pm_ctx) {
2421: /*
2422: * XXX: always flush cache; conservative, but
2423: * needed to invalidate cache tag protection
2424: * bits and when disabling caching.
2425: */
1.199.4.12 thorpej 2426: cache_flush_page(va, pm->pm_ctxnum);
1.199.4.2 pk 2427:
2428: #if !defined(MULTIPROCESSOR) /* XXX? done in updatepte4m() */
2429: /* Flush TLB so memory copy is up-to-date */
1.199.4.14 thorpej 2430: tlb_flush_page(va, pm->pm_ctxnum);
1.199.4.2 pk 2431: #endif
2432: }
2433:
2434: tpte = sp->sg_pte[VA_SUN4M_VPG(va)];
2435: KASSERT((tpte & SRMMU_TETYPE) == SRMMU_TEPTE);
2436: pv0->pv_flags |= MR4M(updatepte4m(va,
1.199.4.14 thorpej 2437: &sp->sg_pte[VA_SUN4M_VPG(va)], bic, bis, pm->pm_ctxnum));
1.199.4.2 pk 2438: }
2439: splx(s);
2440: }
2441:
2442: /*
2443: * Sync ref and mod bits in pvlist. If page has been ref'd or modified,
2444: * update ref/mod bits in pvlist, and clear the hardware bits.
2445: *
2446: * Return the new flags.
2447: */
2448: int
2449: pv_syncflags4m(pv0)
2450: struct pvlist *pv0;
2451: {
2452: struct pvlist *pv;
2453: struct pmap *pm;
2454: int tpte, va, vr, vs, flags;
1.199.4.14 thorpej 2455: int s;
1.199.4.2 pk 2456: struct regmap *rp;
2457: struct segmap *sp;
2458: boolean_t doflush;
2459:
2460: write_user_windows(); /* paranoid? */
2461: s = splvm(); /* paranoid? */
2462: if (pv0->pv_pmap == NULL) { /* paranoid */
2463: splx(s);
2464: return (0);
2465: }
2466: flags = pv0->pv_flags;
2467: for (pv = pv0; pv != NULL; pv = pv->pv_next) {
2468: pm = pv->pv_pmap;
2469: va = pv->pv_va;
2470: vr = VA_VREG(va);
2471: vs = VA_VSEG(va);
2472: rp = &pm->pm_regmap[vr];
2473: sp = &rp->rg_segmap[vs];
2474: if (sp->sg_pte == NULL) {
2475: continue;
2476: }
2477:
2478: /*
2479: * We need the PTE from memory as the TLB version will
2480: * always have the SRMMU_PG_R bit on.
2481: */
2482:
1.199.4.14 thorpej 2483: if (pm->pm_ctx)
2484: tlb_flush_page(va, pm->pm_ctxnum);
2485:
1.199.4.2 pk 2486: tpte = sp->sg_pte[VA_SUN4M_VPG(va)];
2487: if ((tpte & SRMMU_TETYPE) == SRMMU_TEPTE && /* if valid pte */
2488: (tpte & (SRMMU_PG_M|SRMMU_PG_R))) { /* and mod/refd */
2489: flags |= MR4M(tpte);
2490:
2491: /*
2492: * Clear mod/ref bits from PTE and write it back.
2493: * We must do this before flushing the cache to
2494: * avoid races with another cpu setting the M bit
2495: * and creating dirty cache lines again.
2496: */
2497:
2498: doflush = pm->pm_ctx && (tpte & SRMMU_PG_M);
1.199.4.14 thorpej 2499: updatepte4m(va, &sp->sg_pte[VA_SUN4M_VPG(va)],
2500: SRMMU_PG_M | SRMMU_PG_R,
2501: 0, pm->pm_ctxnum);
1.199.4.2 pk 2502: if (doflush) {
2503:
2504: /* Only do this for write-back caches? */
1.199.4.12 thorpej 2505: cache_flush_page(va, pm->pm_ctxnum);
1.199.4.2 pk 2506:
2507: /*
2508: * VIPT caches might use the TLB when
2509: * flushing, so we flush the TLB again.
2510: */
1.199.4.14 thorpej 2511: tlb_flush_page(va, pm->pm_ctxnum);
1.199.4.2 pk 2512: }
2513: }
2514: }
2515: pv0->pv_flags = flags;
2516: splx(s);
2517: return (flags);
2518: }
2519:
2520: /*
2521: * Should be called with pmap already locked.
2522: */
2523: void
2524: pv_unlink4m(pv, pm, va)
2525: struct pvlist *pv;
2526: struct pmap *pm;
2527: vaddr_t va;
2528: {
2529: struct pvlist *npv;
2530:
2531: /*
2532: * First entry is special (sigh).
2533: */
2534:
2535: npv = pv->pv_next;
2536: if (pv->pv_pmap == pm && pv->pv_va == va) {
2537: pmap_stats.ps_unlink_pvfirst++;
2538: if (npv != NULL) {
2539: /*
2540: * Shift next entry into the head.
2541: * Make sure to retain the REF, MOD and ANC flags
2542: * on the list head.
2543: */
2544: pv->pv_next = npv->pv_next;
2545: pv->pv_pmap = npv->pv_pmap;
2546: pv->pv_va = npv->pv_va;
2547: pv->pv_flags &= ~PV_NC;
2548: pv->pv_flags |= (npv->pv_flags & PV_NC);
2549: pool_put(&pv_pool, npv);
2550: } else {
2551: /*
2552: * No mappings left; we need to maintain
2553: * the REF and MOD flags, since pmap_is_modified()
2554: * can still be called for this page.
2555: */
2556: pv->pv_pmap = NULL;
2557: pv->pv_flags &= ~(PV_NC|PV_ANC);
2558: return;
2559: }
2560: } else {
2561: struct pvlist *prev;
2562:
2563: for (prev = pv;; prev = npv, npv = npv->pv_next) {
2564: pmap_stats.ps_unlink_pvsearch++;
2565: if (npv->pv_pmap == pm && npv->pv_va == va)
2566: break;
2567: }
2568: prev->pv_next = npv->pv_next;
2569: pool_put(&pv_pool, npv);
2570: }
2571: if ((pv->pv_flags & (PV_NC|PV_ANC)) == PV_ANC) {
2572:
2573: /*
2574: * Not cached: check to see if we can fix that now.
2575: */
2576:
2577: va = pv->pv_va;
2578: for (npv = pv->pv_next; npv != NULL; npv = npv->pv_next)
2579: if (BADALIAS(va, npv->pv_va) ||
2580: (npv->pv_flags & PV_NC) != 0)
2581: return;
2582: #ifdef DEBUG
2583: if (pmapdebug & PDB_CACHESTUFF)
2584: printf(
2585: "pv_unlink: alias ok: proc %s, va 0x%lx, pv %p\n",
2586: curproc ? curproc->p_comm : "--",
2587: va, pv);
2588: #endif
2589: pv->pv_flags &= ~PV_ANC;
2590: pv_changepte4m(pv, SRMMU_PG_C, 0);
2591: }
2592: }
2593:
2594: /*
2595: * pv_link is the inverse of pv_unlink, and is used in pmap_enter.
2596: * It returns SRMMU_PG_C if the (new) pvlist says that the address cannot
2597: * be cached (i.e. its results must be (& ~)'d in.
2598: */
2599: /*static*/ int
2600: pv_link4m(pv, pm, va, nc)
2601: struct pvlist *pv;
2602: struct pmap *pm;
2603: vaddr_t va;
2604: int nc;
2605: {
2606: struct pvlist *npv;
2607: int ret;
2608:
2609: ret = nc ? SRMMU_PG_C : 0;
2610:
2611: if (pv->pv_pmap == NULL) {
2612: /* no pvlist entries yet */
2613: pmap_stats.ps_enter_firstpv++;
2614: pv->pv_next = NULL;
2615: pv->pv_pmap = pm;
2616: pv->pv_va = va;
2617: pv->pv_flags |= nc ? PV_NC : 0;
2618: return (ret);
2619: }
2620:
2621: /*
2622: * Before entering the new mapping, see if
2623: * it will cause old mappings to become aliased
2624: * and thus need to be `discached'.
2625: */
2626:
2627: pmap_stats.ps_enter_secondpv++;
2628: if ((pv->pv_flags & PV_ANC) != 0) {
2629: /* already uncached, just stay that way */
2630: ret = SRMMU_PG_C;
2631: } else {
2632: for (npv = pv; npv != NULL; npv = npv->pv_next) {
2633: if ((npv->pv_flags & PV_NC) != 0) {
2634: ret = SRMMU_PG_C;
2635: #ifdef DEBUG
2636: /* Check currently illegal condition */
2637: if (nc == 0)
2638: printf("pv_link: proc %s, va=0x%lx: "
2639: "unexpected uncached mapping at 0x%lx\n",
2640: curproc ? curproc->p_comm : "--",
2641: va, npv->pv_va);
2642: #endif
2643: }
2644: if (BADALIAS(va, npv->pv_va)) {
2645: #ifdef DEBUG
2646: if (pmapdebug & PDB_CACHESTUFF)
2647: printf(
2648: "pv_link: badalias: proc %s, 0x%lx<=>0x%lx, pv %p\n",
2649: curproc ? curproc->p_comm : "--",
2650: va, npv->pv_va, pv);
2651: #endif
2652: /* Mark list head `uncached due to aliases' */
2653: pv->pv_flags |= PV_ANC;
2654: pv_changepte4m(pv, 0, ret = SRMMU_PG_C);
2655: break;
2656: }
2657: }
2658: }
2659: npv = pool_get(&pv_pool, PR_NOWAIT);
2660: if (npv == NULL)
2661: panic("pv_link: pv_pool exhausted");
2662: npv->pv_next = pv->pv_next;
2663: npv->pv_pmap = pm;
2664: npv->pv_va = va;
2665: npv->pv_flags = nc ? PV_NC : 0;
2666: pv->pv_next = npv;
2667: return (ret);
2668: }
2669: #endif
2670:
2671: /*
2672: * Uncache all entries on behalf of kvm_uncache(). In addition to
2673: * removing the cache bit from the PTE, we are also setting PV_NC
2674: * in each entry to stop pv_unlink() from re-caching (i.e. when a
2675: * a bad alias is going away).
2676: */
2677: void
2678: pv_uncache(pv0)
2679: struct pvlist *pv0;
2680: {
2681: struct pvlist *pv;
2682:
2683: for (pv = pv0; pv != NULL; pv = pv->pv_next)
2684: pv->pv_flags |= PV_NC;
2685:
1.199.4.9 nathanw 2686: #if defined(SUN4M) || defined(SUN4D)
2687: if (CPU_HAS_SRMMU)
1.199.4.2 pk 2688: pv_changepte4m(pv, 0, SRMMU_PG_C);
2689: #endif
2690: #if defined(SUN4) || defined(SUN4C)
1.199.4.9 nathanw 2691: if (CPU_ISSUN4 || CPU_ISSUN4C)
1.199.4.2 pk 2692: pv_changepte4_4c(pv, PG_NC, 0);
2693: #endif
2694: }
2695:
2696: /*
2697: * Walk the given list and flush the cache for each (MI) page that is
2698: * potentially in the cache. Called only if vactype != VAC_NONE.
2699: */
2700: void
2701: pv_flushcache(pv)
2702: struct pvlist *pv;
2703: {
2704: struct pmap *pm;
2705: int s, ctx;
2706:
2707: write_user_windows(); /* paranoia? */
2708: s = splvm(); /* XXX extreme paranoia */
2709: if ((pm = pv->pv_pmap) != NULL) {
2710: ctx = getcontext();
2711: for (;;) {
2712: if (pm->pm_ctx) {
2713: setcontext(pm->pm_ctxnum);
1.199.4.12 thorpej 2714: cache_flush_page(pv->pv_va, pm->pm_ctxnum);
1.199.4.9 nathanw 2715: #if defined(SUN4M) || defined(SUN4D)
2716: if (CPU_HAS_SRMMU)
1.199.4.14 thorpej 2717: tlb_flush_page(pv->pv_va, pm->pm_ctxnum);
1.199.4.2 pk 2718: #endif
2719: }
2720: pv = pv->pv_next;
2721: if (pv == NULL)
2722: break;
2723: pm = pv->pv_pmap;
2724: }
2725: setcontext(ctx);
2726: }
2727: splx(s);
2728: }
2729:
2730: /*----------------------------------------------------------------*/
2731:
2732: /*
2733: * At last, pmap code.
2734: */
2735:
1.199.4.9 nathanw 2736: #if defined(SUN4) && (defined(SUN4C) || defined(SUN4M) || defined(SUN4D))
1.199.4.2 pk 2737: int nptesg;
2738: #endif
2739:
1.199.4.9 nathanw 2740: #if defined(SUN4M) || defined(SUN4D)
1.199.4.2 pk 2741: static void pmap_bootstrap4m __P((void));
2742: #endif
2743: #if defined(SUN4) || defined(SUN4C)
2744: static void pmap_bootstrap4_4c __P((int, int, int));
2745: #endif
2746:
2747: /*
2748: * Bootstrap the system enough to run with VM enabled.
2749: *
2750: * nsegment is the number of mmu segment entries (``PMEGs'');
2751: * nregion is the number of mmu region entries (``SMEGs'');
2752: * nctx is the number of contexts.
2753: */
2754: void
2755: pmap_bootstrap(nctx, nregion, nsegment)
2756: int nsegment, nctx, nregion;
2757: {
2758: extern char etext[], kernel_data_start[];
2759:
1.199.4.9 nathanw 2760: #if defined(SUN4M) || defined(SUN4D)
2761: if (CPU_HAS_SRMMU) {
1.199.4.2 pk 2762: /*
2763: * Compute `va2pa_offset'.
2764: * Since the kernel is loaded at address 0x4000 within
2765: * the memory bank, we'll use the corresponding VA
2766: * (i.e. `kernel_text') to fetch the physical load
2767: * address, just in case those first 4 pages aren't mapped.
2768: */
2769: extern char kernel_text[];
2770: int offset = (vaddr_t)kernel_text - (vaddr_t)KERNBASE;
2771: va2pa_offset -= (VA2PA(kernel_text) - offset);
2772: }
2773: #endif
2774:
2775: uvmexp.pagesize = NBPG;
2776: uvm_setpagesize();
2777:
1.199.4.9 nathanw 2778: #if defined(SUN4) && (defined(SUN4C) || defined(SUN4M) || defined(SUN4D))
1.199.4.2 pk 2779: /* In this case NPTESG is a variable */
2780: nptesg = (NBPSG >> pgshift);
2781: #endif
2782:
2783: /*
2784: * Grab physical memory list.
2785: */
2786: get_phys_mem();
2787:
2788: /*
2789: * The data segment in sparc ELF images is aligned to a 64KB
2790: * (the maximum page size defined by the ELF/sparc ABI) boundary.
2791: * This results in a unused portion of physical memory in between
2792: * the text/rodata and the data segment. We pick up that gap
2793: * here to remove it from the kernel map and give it to the
2794: * VM manager later.
2795: */
2796: etext_gap_start = (vaddr_t)(etext + NBPG - 1) & ~PGOFSET;
2797: etext_gap_end = (vaddr_t)kernel_data_start & ~PGOFSET;
2798:
1.199.4.9 nathanw 2799: if (CPU_HAS_SRMMU) {
2800: #if defined(SUN4M) || defined(SUN4D)
1.199.4.2 pk 2801: pmap_bootstrap4m();
2802: #endif
1.199.4.9 nathanw 2803: } else if (CPU_ISSUN4 || CPU_ISSUN4C) {
1.199.4.2 pk 2804: #if defined(SUN4) || defined(SUN4C)
2805: pmap_bootstrap4_4c(nctx, nregion, nsegment);
2806: #endif
2807: }
2808:
2809: pmap_page_upload();
2810: }
2811:
2812: #if defined(SUN4) || defined(SUN4C)
2813: void
2814: pmap_bootstrap4_4c(nctx, nregion, nsegment)
2815: int nsegment, nctx, nregion;
2816: {
2817: union ctxinfo *ci;
2818: struct mmuentry *mmuseg;
2819: #if defined(SUN4_MMU3L)
2820: struct mmuentry *mmureg;
2821: #endif
2822: struct regmap *rp;
2823: int i, j;
2824: int npte, zseg, vr, vs;
2825: int rcookie, scookie;
2826: caddr_t p;
2827: int lastpage;
2828: vaddr_t va;
1.199.4.4 nathanw 2829: extern char *kernel_top;
1.199.4.2 pk 2830:
2831: ncontext = nctx;
2832:
2833: switch (cputyp) {
2834: case CPU_SUN4C:
2835: mmu_has_hole = 1;
2836: break;
2837: case CPU_SUN4:
2838: if (cpuinfo.cpu_type != CPUTYP_4_400) {
2839: mmu_has_hole = 1;
2840: break;
2841: }
2842: }
2843:
2844: #if defined(SUN4)
2845: /*
2846: * set up the segfixmask to mask off invalid bits
2847: */
2848: segfixmask = nsegment - 1; /* assume nsegment is a power of 2 */
2849: #ifdef DIAGNOSTIC
2850: if (((nsegment & segfixmask) | (nsegment & ~segfixmask)) != nsegment) {
2851: printf("pmap_bootstrap: unsuitable number of segments (%d)\n",
2852: nsegment);
2853: callrom();
2854: }
2855: #endif
2856: #endif
2857:
1.199.4.9 nathanw 2858: #if defined(SUN4M) || defined(SUN4D) /* We're in a dual-arch kernel.
2859: Setup 4/4c fn. ptrs */
1.199.4.2 pk 2860: pmap_clear_modify_p = pmap_clear_modify4_4c;
2861: pmap_clear_reference_p = pmap_clear_reference4_4c;
2862: pmap_enter_p = pmap_enter4_4c;
2863: pmap_extract_p = pmap_extract4_4c;
2864: pmap_is_modified_p = pmap_is_modified4_4c;
2865: pmap_is_referenced_p = pmap_is_referenced4_4c;
2866: pmap_kenter_pa_p = pmap_kenter_pa4_4c;
2867: pmap_kremove_p = pmap_kremove4_4c;
2868: pmap_page_protect_p = pmap_page_protect4_4c;
2869: pmap_protect_p = pmap_protect4_4c;
2870: pmap_changeprot_p = pmap_changeprot4_4c;
2871: pmap_rmk_p = pmap_rmk4_4c;
2872: pmap_rmu_p = pmap_rmu4_4c;
1.199.4.9 nathanw 2873: #endif /* defined SUN4M || defined SUN4D */
1.199.4.2 pk 2874:
2875: /*
2876: * Last segment is the `invalid' one (one PMEG of pte's with !pg_v).
2877: * It will never be used for anything else.
2878: */
2879: seginval = --nsegment;
2880:
2881: #if defined(SUN4_MMU3L)
2882: if (HASSUN4_MMU3L)
2883: reginval = --nregion;
2884: #endif
2885:
2886: /*
2887: * Intialize the kernel pmap.
2888: */
2889: /* kernel_pmap_store.pm_ctxnum = 0; */
2890: simple_lock_init(&kernel_pmap_store.pm_lock);
2891: kernel_pmap_store.pm_refcount = 1;
2892: #if defined(SUN4_MMU3L)
2893: TAILQ_INIT(&kernel_pmap_store.pm_reglist);
2894: #endif
2895: TAILQ_INIT(&kernel_pmap_store.pm_seglist);
2896:
2897: /*
2898: * Set up pm_regmap for kernel to point NUREG *below* the beginning
2899: * of kernel regmap storage. Since the kernel only uses regions
2900: * above NUREG, we save storage space and can index kernel and
2901: * user regions in the same way.
2902: */
2903: kernel_pmap_store.pm_regmap = &kernel_regmap_store[-NUREG];
2904: for (i = NKREG; --i >= 0;) {
2905: #if defined(SUN4_MMU3L)
2906: kernel_regmap_store[i].rg_smeg = reginval;
2907: #endif
2908: kernel_regmap_store[i].rg_segmap =
2909: &kernel_segmap_store[i * NSEGRG];
2910: for (j = NSEGRG; --j >= 0;)
2911: kernel_segmap_store[i * NSEGRG + j].sg_pmeg = seginval;
2912: }
2913:
2914: /*
2915: * Preserve the monitor ROM's reserved VM region, so that
2916: * we can use L1-A or the monitor's debugger. As a side
2917: * effect we map the ROM's reserved VM into all contexts
2918: * (otherwise L1-A crashes the machine!).
2919: */
2920:
2921: mmu_reservemon4_4c(&nregion, &nsegment);
2922:
2923: #if defined(SUN4_MMU3L)
2924: /* Reserve one region for temporary mappings */
2925: if (HASSUN4_MMU3L)
2926: tregion = --nregion;
2927: #endif
2928:
2929: /*
2930: * Allocate and clear mmu entries and context structures.
2931: */
1.199.4.4 nathanw 2932: p = kernel_top;
2933:
1.199.4.2 pk 2934: #if defined(SUN4_MMU3L)
2935: mmuregions = mmureg = (struct mmuentry *)p;
2936: p += nregion * sizeof(struct mmuentry);
2937: bzero(mmuregions, nregion * sizeof(struct mmuentry));
2938: #endif
2939: mmusegments = mmuseg = (struct mmuentry *)p;
2940: p += nsegment * sizeof(struct mmuentry);
2941: bzero(mmusegments, nsegment * sizeof(struct mmuentry));
2942:
2943: pmap_kernel()->pm_ctx = ctxinfo = ci = (union ctxinfo *)p;
2944: p += nctx * sizeof *ci;
2945:
2946: /* Initialize MMU resource queues */
2947: #if defined(SUN4_MMU3L)
2948: TAILQ_INIT(®ion_freelist);
2949: TAILQ_INIT(®ion_lru);
2950: TAILQ_INIT(®ion_locked);
2951: #endif
2952: TAILQ_INIT(&segm_freelist);
2953: TAILQ_INIT(&segm_lru);
2954: TAILQ_INIT(&segm_locked);
2955:
2956: /*
2957: * Set up the `constants' for the call to vm_init()
2958: * in main(). All pages beginning at p (rounded up to
2959: * the next whole page) and continuing through the number
2960: * of available pages are free, but they start at a higher
2961: * virtual address. This gives us two mappable MD pages
2962: * for pmap_zero_page and pmap_copy_page, and one MI page
2963: * for /dev/mem, all with no associated physical memory.
2964: */
2965: p = (caddr_t)(((u_int)p + NBPG - 1) & ~PGOFSET);
2966:
2967: avail_start = PMAP_BOOTSTRAP_VA2PA(p);
2968:
2969: i = (int)p;
2970: vpage[0] = p, p += NBPG;
2971: vpage[1] = p, p += NBPG;
2972: vmmap = p, p += NBPG;
2973: p = reserve_dumppages(p);
2974:
2975: virtual_avail = (vaddr_t)p;
2976: virtual_end = VM_MAX_KERNEL_ADDRESS;
2977:
2978: p = (caddr_t)i; /* retract to first free phys */
2979:
2980: /*
2981: * All contexts are free except the kernel's.
2982: *
2983: * XXX sun4c could use context 0 for users?
2984: */
2985: simple_lock_init(&ctx_lock);
2986: ci->c_pmap = pmap_kernel();
2987: ctx_freelist = ci + 1;
2988: for (i = 1; i < ncontext; i++) {
2989: ci++;
2990: ci->c_nextfree = ci + 1;
2991: }
2992: ci->c_nextfree = NULL;
2993: ctx_kick = 0;
2994: ctx_kickdir = -1;
2995:
2996: /*
2997: * Init mmu entries that map the kernel physical addresses.
2998: *
2999: * All the other MMU entries are free.
3000: *
3001: * THIS ASSUMES SEGMENT i IS MAPPED BY MMU ENTRY i DURING THE
3002: * BOOT PROCESS
3003: */
3004:
3005: /* Compute the number of segments used by the kernel */
3006: zseg = ((((u_int)p + NBPSG - 1) & ~SGOFSET) - KERNBASE) >> SGSHIFT;
3007: lastpage = VA_VPG(p);
3008: if (lastpage == 0)
3009: /*
3010: * If the page bits in p are 0, we filled the last segment
3011: * exactly (now how did that happen?); if not, it is
3012: * the last page filled in the last segment.
3013: */
3014: lastpage = NPTESG;
3015:
3016: p = (caddr_t)KERNBASE; /* first va */
3017: vs = VA_VSEG(KERNBASE); /* first virtual segment */
3018: vr = VA_VREG(KERNBASE); /* first virtual region */
3019: rp = &pmap_kernel()->pm_regmap[vr];
3020:
3021: for (rcookie = 0, scookie = 0;;) {
3022:
3023: /*
3024: * Distribute each kernel region/segment into all contexts.
3025: * This is done through the monitor ROM, rather than
3026: * directly here: if we do a setcontext we will fault,
3027: * as we are not (yet) mapped in any other context.
3028: */
3029:
3030: if ((vs % NSEGRG) == 0) {
3031: /* Entering a new region */
3032: if (VA_VREG(p) > vr) {
3033: #ifdef DEBUG
3034: printf("note: giant kernel!\n");
3035: #endif
3036: vr++, rp++;
3037: }
3038: #if defined(SUN4_MMU3L)
3039: if (HASSUN4_MMU3L) {
3040: for (i = 1; i < nctx; i++)
3041: prom_setcontext(i, p, rcookie);
3042:
3043: TAILQ_INSERT_TAIL(®ion_locked,
3044: mmureg, me_list);
3045: TAILQ_INSERT_TAIL(&pmap_kernel()->pm_reglist,
3046: mmureg, me_pmchain);
3047: mmureg->me_cookie = rcookie;
3048: mmureg->me_pmap = pmap_kernel();
3049: mmureg->me_vreg = vr;
3050: rp->rg_smeg = rcookie;
3051: mmureg++;
3052: rcookie++;
3053: }
3054: #endif
3055: }
3056:
3057: #if defined(SUN4_MMU3L)
3058: if (!HASSUN4_MMU3L)
3059: #endif
3060: for (i = 1; i < nctx; i++)
3061: prom_setcontext(i, p, scookie);
3062:
3063: /* set up the mmu entry */
3064: TAILQ_INSERT_TAIL(&segm_locked, mmuseg, me_list);
3065: TAILQ_INSERT_TAIL(&pmap_kernel()->pm_seglist, mmuseg, me_pmchain);
3066: pmap_stats.ps_npmeg_locked++;
3067: mmuseg->me_cookie = scookie;
3068: mmuseg->me_pmap = pmap_kernel();
3069: mmuseg->me_vreg = vr;
3070: mmuseg->me_vseg = vs % NSEGRG;
3071: rp->rg_segmap[vs % NSEGRG].sg_pmeg = scookie;
3072: npte = ++scookie < zseg ? NPTESG : lastpage;
3073: rp->rg_segmap[vs % NSEGRG].sg_npte = npte;
3074: rp->rg_nsegmap += 1;
3075: mmuseg++;
3076: vs++;
3077: if (scookie < zseg) {
3078: p += NBPSG;
3079: continue;
3080: }
3081:
3082: /*
3083: * Unmap the pages, if any, that are not part of
3084: * the final segment.
3085: */
3086: for (p += npte << PGSHIFT; npte < NPTESG; npte++, p += NBPG)
3087: setpte4(p, 0);
3088:
3089: #if defined(SUN4_MMU3L)
3090: if (HASSUN4_MMU3L) {
3091: /*
3092: * Unmap the segments, if any, that are not part of
3093: * the final region.
3094: */
3095: for (i = rp->rg_nsegmap; i < NSEGRG; i++, p += NBPSG)
3096: setsegmap(p, seginval);
3097:
3098: /*
3099: * Unmap any kernel regions that we aren't using.
3100: */
3101: for (i = 0; i < nctx; i++) {
3102: setcontext4(i);
3103: for (va = (vaddr_t)p;
3104: va < (OPENPROM_STARTVADDR & ~(NBPRG - 1));
3105: va += NBPRG)
3106: setregmap(va, reginval);
3107: }
3108:
3109: } else
3110: #endif
3111: {
3112: /*
3113: * Unmap any kernel segments that we aren't using.
3114: */
3115: for (i = 0; i < nctx; i++) {
3116: setcontext4(i);
3117: for (va = (vaddr_t)p;
3118: va < (OPENPROM_STARTVADDR & ~(NBPSG - 1));
3119: va += NBPSG)
3120: setsegmap(va, seginval);
3121: }
3122: }
3123: break;
3124: }
3125:
3126: #if defined(SUN4_MMU3L)
3127: if (HASSUN4_MMU3L)
3128: for (; rcookie < nregion; rcookie++, mmureg++) {
3129: mmureg->me_cookie = rcookie;
3130: TAILQ_INSERT_TAIL(®ion_freelist, mmureg, me_list);
3131: }
3132: #endif
3133:
3134: for (; scookie < nsegment; scookie++, mmuseg++) {
3135: mmuseg->me_cookie = scookie;
3136: TAILQ_INSERT_TAIL(&segm_freelist, mmuseg, me_list);
3137: pmap_stats.ps_npmeg_free++;
3138: }
3139:
3140: /* Erase all spurious user-space segmaps */
3141: for (i = 1; i < ncontext; i++) {
3142: setcontext4(i);
3143: if (HASSUN4_MMU3L)
3144: for (p = 0, j = NUREG; --j >= 0; p += NBPRG)
3145: setregmap(p, reginval);
3146: else
3147: for (p = 0, vr = 0; vr < NUREG; vr++) {
3148: if (VA_INHOLE(p)) {
3149: p = (caddr_t)MMU_HOLE_END;
3150: vr = VA_VREG(p);
3151: }
3152: for (j = NSEGRG; --j >= 0; p += NBPSG)
3153: setsegmap(p, seginval);
3154: }
3155: }
3156: setcontext4(0);
3157:
3158: /*
3159: * write protect & encache kernel text;
3160: * set red zone at kernel base; enable cache on message buffer.
3161: */
3162: {
3163: extern char etext[];
3164: #ifdef KGDB
3165: int mask = ~PG_NC; /* XXX chgkprot is busted */
3166: #else
3167: int mask = ~(PG_W | PG_NC);
3168: #endif
3169:
3170: for (p = (caddr_t)trapbase; p < etext; p += NBPG)
3171: setpte4(p, getpte4(p) & mask);
3172:
3173: /*
3174: * Unmap the `etext gap'; it'll be made available
3175: * to the VM manager.
3176: */
3177: for (p = (caddr_t)etext_gap_start;
3178: p < (caddr_t)etext_gap_end;
3179: p += NBPG)
3180: setpte4(p, 0);
3181: }
3182: }
3183: #endif
3184:
1.199.4.9 nathanw 3185: #if defined(SUN4M) || defined(SUN4D) /* SRMMU version of pmap_bootstrap */
1.199.4.2 pk 3186: /*
1.199.4.4 nathanw 3187: * Bootstrap the system enough to run with VM enabled on a sun4m machine.
1.199.4.2 pk 3188: *
3189: * Switches from ROM to kernel page tables, and sets up initial mappings.
3190: */
3191: static void
3192: pmap_bootstrap4m(void)
3193: {
3194: int i, j;
3195: caddr_t p, q;
3196: union ctxinfo *ci;
3197: int reg, seg;
3198: unsigned int ctxtblsize;
3199: caddr_t pagetables_start, pagetables_end;
3200: paddr_t pagetables_start_pa;
1.199.4.4 nathanw 3201: extern char *kernel_top;
1.199.4.2 pk 3202: extern char etext[];
3203: extern caddr_t reserve_dumppages(caddr_t);
3204:
3205: ncontext = cpuinfo.mmu_ncontext;
3206:
1.199.4.9 nathanw 3207: #if defined(SUN4) || defined(SUN4C) /* setup SRMMU fn. ptrs for dual-arch
3208: kernel */
1.199.4.2 pk 3209: pmap_clear_modify_p = pmap_clear_modify4m;
3210: pmap_clear_reference_p = pmap_clear_reference4m;
3211: pmap_enter_p = pmap_enter4m;
3212: pmap_extract_p = pmap_extract4m;
3213: pmap_is_modified_p = pmap_is_modified4m;
3214: pmap_is_referenced_p = pmap_is_referenced4m;
3215: pmap_kenter_pa_p = pmap_kenter_pa4m;
3216: pmap_kremove_p = pmap_kremove4m;
3217: pmap_page_protect_p = pmap_page_protect4m;
3218: pmap_protect_p = pmap_protect4m;
3219: pmap_changeprot_p = pmap_changeprot4m;
3220: pmap_rmk_p = pmap_rmk4m;
3221: pmap_rmu_p = pmap_rmu4m;
1.199.4.4 nathanw 3222: #endif /* defined SUN4/SUN4C */
1.199.4.2 pk 3223:
3224: /*
3225: * p points to top of kernel mem
3226: */
1.199.4.4 nathanw 3227: p = kernel_top;
1.199.4.2 pk 3228:
3229: /*
3230: * Intialize the kernel pmap.
3231: */
3232: /* kernel_pmap_store.pm_ctxnum = 0; */
3233: simple_lock_init(&kernel_pmap_store.pm_lock);
3234: kernel_pmap_store.pm_refcount = 1;
3235:
3236: /*
3237: * Set up pm_regmap for kernel to point NUREG *below* the beginning
3238: * of kernel regmap storage. Since the kernel only uses regions
3239: * above NUREG, we save storage space and can index kernel and
3240: * user regions in the same way.
3241: */
3242: kernel_pmap_store.pm_regmap = &kernel_regmap_store[-NUREG];
3243: bzero(kernel_regmap_store, NKREG * sizeof(struct regmap));
3244: bzero(kernel_segmap_store, NKREG * NSEGRG * sizeof(struct segmap));
3245: for (i = NKREG; --i >= 0;) {
3246: kernel_regmap_store[i].rg_segmap =
3247: &kernel_segmap_store[i * NSEGRG];
3248: kernel_regmap_store[i].rg_seg_ptps = NULL;
3249: for (j = NSEGRG; --j >= 0;)
3250: kernel_segmap_store[i * NSEGRG + j].sg_pte = NULL;
3251: }
3252:
3253: /* Allocate kernel region pointer tables */
3254: pmap_kernel()->pm_reg_ptps = (int **)(q = p);
3255: p += ncpu * sizeof(int **);
3256: bzero(q, (u_int)p - (u_int)q);
3257:
3258: pmap_kernel()->pm_reg_ptps_pa = (int *)(q = p);
3259: p += ncpu * sizeof(int *);
3260: bzero(q, (u_int)p - (u_int)q);
3261:
3262: /* Allocate context administration */
3263: pmap_kernel()->pm_ctx = ctxinfo = ci = (union ctxinfo *)p;
3264: p += ncontext * sizeof *ci;
3265: bzero((caddr_t)ci, (u_int)p - (u_int)ci);
3266:
3267:
3268: /*
3269: * Set up the `constants' for the call to vm_init()
3270: * in main(). All pages beginning at p (rounded up to
3271: * the next whole page) and continuing through the number
3272: * of available pages are free.
3273: */
3274: p = (caddr_t)(((u_int)p + NBPG - 1) & ~PGOFSET);
3275:
3276: /*
3277: * Reserve memory for MMU pagetables. Some of these have severe
3278: * alignment restrictions. We allocate in a sequence that
3279: * minimizes alignment gaps.
3280: */
3281:
3282: pagetables_start = p;
3283: pagetables_start_pa = PMAP_BOOTSTRAP_VA2PA(p);
3284:
3285: /*
3286: * Allocate context table.
3287: * To keep supersparc happy, minimum aligment is on a 4K boundary.
3288: */
3289: ctxtblsize = max(ncontext,1024) * sizeof(int);
3290: cpuinfo.ctx_tbl = (int *)roundup((u_int)p, ctxtblsize);
3291: cpuinfo.ctx_tbl_pa = PMAP_BOOTSTRAP_VA2PA(cpuinfo.ctx_tbl);
3292: p = (caddr_t)((u_int)cpuinfo.ctx_tbl + ctxtblsize);
3293:
3294: /*
3295: * Reserve memory for segment and page tables needed to map the entire
3296: * kernel. This takes (2K + NKREG * 16K) of space, but unfortunately
3297: * is necessary since pmap_enter() *must* be able to enter a kernel
3298: * mapping without delay.
3299: */
3300: p = (caddr_t) roundup((u_int)p, SRMMU_L1SIZE * sizeof(u_int));
3301: qzero(p, SRMMU_L1SIZE * sizeof(u_int));
3302: kernel_regtable_store = (u_int *)p;
3303: p += SRMMU_L1SIZE * sizeof(u_int);
3304:
3305: p = (caddr_t) roundup((u_int)p, SRMMU_L2SIZE * sizeof(u_int));
3306: qzero(p, (SRMMU_L2SIZE * sizeof(u_int)) * NKREG);
3307: kernel_segtable_store = (u_int *)p;
3308: p += (SRMMU_L2SIZE * sizeof(u_int)) * NKREG;
3309:
3310: p = (caddr_t) roundup((u_int)p, SRMMU_L3SIZE * sizeof(u_int));
3311: /* zero it: all will be SRMMU_TEINVALID */
3312: qzero(p, ((SRMMU_L3SIZE * sizeof(u_int)) * NKREG) * NSEGRG);
3313: kernel_pagtable_store = (u_int *)p;
3314: p += ((SRMMU_L3SIZE * sizeof(u_int)) * NKREG) * NSEGRG;
3315:
3316: /* Round to next page and mark end of pre-wired kernel space */
3317: p = (caddr_t)(((u_int)p + NBPG - 1) & ~PGOFSET);
3318: pagetables_end = p;
3319:
3320: avail_start = PMAP_BOOTSTRAP_VA2PA(p);
3321:
3322: /*
3323: * Now wire the region and segment tables of the kernel map.
3324: */
3325: pmap_kernel()->pm_reg_ptps[0] = (int *) kernel_regtable_store;
3326: pmap_kernel()->pm_reg_ptps_pa[0] =
3327: VA2PA((caddr_t)pmap_kernel()->pm_reg_ptps[0]);
3328:
3329: /* Install L1 table in context 0 */
3330: setpgt4m(&cpuinfo.ctx_tbl[0],
3331: (pmap_kernel()->pm_reg_ptps_pa[0] >> SRMMU_PPNPASHIFT) | SRMMU_TEPTD);
3332:
3333: for (reg = 0; reg < NKREG; reg++) {
3334: struct regmap *rp;
3335: caddr_t kphyssegtbl;
3336:
3337: /*
3338: * Entering new region; install & build segtbl
3339: */
3340:
3341: rp = &pmap_kernel()->pm_regmap[reg + VA_VREG(KERNBASE)];
3342:
3343: kphyssegtbl = (caddr_t)
3344: &kernel_segtable_store[reg * SRMMU_L2SIZE];
3345:
3346: setpgt4m(&pmap_kernel()->pm_reg_ptps[0][reg + VA_VREG(KERNBASE)],
3347: (VA2PA(kphyssegtbl) >> SRMMU_PPNPASHIFT) | SRMMU_TEPTD);
3348:
3349: rp->rg_seg_ptps = (int *)kphyssegtbl;
3350:
3351: for (seg = 0; seg < NSEGRG; seg++) {
3352: struct segmap *sp;
3353: caddr_t kphyspagtbl;
3354:
3355: rp->rg_nsegmap++;
3356:
3357: sp = &rp->rg_segmap[seg];
3358: kphyspagtbl = (caddr_t)
3359: &kernel_pagtable_store
3360: [((reg * NSEGRG) + seg) * SRMMU_L3SIZE];
3361:
3362: setpgt4m(&rp->rg_seg_ptps[seg],
3363: (VA2PA(kphyspagtbl) >> SRMMU_PPNPASHIFT) |
3364: SRMMU_TEPTD);
3365: sp->sg_pte = (int *) kphyspagtbl;
3366: }
3367: }
3368:
3369: /*
3370: * Preserve the monitor ROM's reserved VM region, so that
3371: * we can use L1-A or the monitor's debugger.
3372: */
3373: mmu_reservemon4m(&kernel_pmap_store);
3374:
3375: /*
3376: * Reserve virtual address space for two mappable MD pages
3377: * for pmap_zero_page and pmap_copy_page, one MI page
3378: * for /dev/mem, and some more for dumpsys().
3379: */
3380: q = p;
3381: vpage[0] = p, p += NBPG;
3382: vpage[1] = p, p += NBPG;
3383: vmmap = p, p += NBPG;
3384: p = reserve_dumppages(p);
3385:
3386: /* Find PTE locations of vpage[] to optimize zero_fill() et.al. */
3387: for (i = 0; i < 2; i++) {
3388: struct regmap *rp;
3389: struct segmap *sp;
3390: rp = &pmap_kernel()->pm_regmap[VA_VREG(vpage[i])];
3391: sp = &rp->rg_segmap[VA_VSEG(vpage[i])];
3392: vpage_pte[i] = &sp->sg_pte[VA_SUN4M_VPG(vpage[i])];
3393: }
3394:
3395: virtual_avail = (vaddr_t)p;
3396: virtual_end = VM_MAX_KERNEL_ADDRESS;
3397:
3398: p = q; /* retract to first free phys */
3399:
3400: /*
3401: * Set up the ctxinfo structures (freelist of contexts)
3402: */
3403: simple_lock_init(&ctx_lock);
3404: ci->c_pmap = pmap_kernel();
3405: ctx_freelist = ci + 1;
3406: for (i = 1; i < ncontext; i++) {
3407: ci++;
3408: ci->c_nextfree = ci + 1;
3409: }
3410: ci->c_nextfree = NULL;
3411: ctx_kick = 0;
3412: ctx_kickdir = -1;
3413:
3414: /*
3415: * Now map the kernel into our new set of page tables, then
3416: * (finally) switch over to our running page tables.
3417: * We map from KERNBASE to p into context 0's page tables (and
3418: * the kernel pmap).
3419: */
3420: #ifdef DEBUG /* Sanity checks */
3421: if ((u_int)p % NBPG != 0)
3422: panic("pmap_bootstrap4m: p misaligned?!?");
3423: if (KERNBASE % NBPRG != 0)
3424: panic("pmap_bootstrap4m: KERNBASE not region-aligned");
3425: #endif
3426:
3427: for (q = (caddr_t) KERNBASE; q < p; q += NBPG) {
3428: struct regmap *rp;
3429: struct segmap *sp;
3430: int pte, *ptep;
3431:
3432: /*
3433: * Now install entry for current page.
3434: */
3435: rp = &pmap_kernel()->pm_regmap[VA_VREG(q)];
3436: sp = &rp->rg_segmap[VA_VSEG(q)];
3437: ptep = &sp->sg_pte[VA_VPG(q)];
3438:
3439: /*
3440: * Unmap the `etext gap'; it'll be made available
3441: * to the VM manager.
3442: */
3443: if (q >= (caddr_t)etext_gap_start &&
3444: q < (caddr_t)etext_gap_end) {
3445: setpgt4m(ptep, 0);
3446: continue;
3447: }
3448:
3449: sp->sg_npte++;
3450:
3451: pte = PMAP_BOOTSTRAP_VA2PA(q) >> SRMMU_PPNPASHIFT;
3452: pte |= PPROT_N_RX | SRMMU_TEPTE;
3453:
3454: /* Deal with the cacheable bit for pagetable memory */
3455: if ((cpuinfo.flags & CPUFLG_CACHEPAGETABLES) != 0 ||
3456: q < pagetables_start || q >= pagetables_end)
3457: pte |= SRMMU_PG_C;
3458:
3459: /* write-protect kernel text */
3460: if (q < (caddr_t) trapbase || q >= etext)
3461: pte |= PPROT_WRITE;
3462:
3463: setpgt4m(ptep, pte);
3464: }
3465:
3466: if ((cpuinfo.flags & CPUFLG_CACHEPAGETABLES) == 0) {
3467: /*
3468: * The page tables have been setup. Since we're still
3469: * running on the PROM's memory map, the memory we
3470: * allocated for our page tables might still be cached.
3471: * Flush it now, and don't touch it again until we
3472: * switch to our own tables (will be done immediately below).
3473: */
3474: int size = pagetables_end - pagetables_start;
3475: if (CACHEINFO.c_vactype != VAC_NONE) {
3476: int va = (vaddr_t)pagetables_start;
3477: while (size != 0) {
1.199.4.12 thorpej 3478: cache_flush_page(va, 0);
1.199.4.2 pk 3479: va += NBPG;
3480: size -= NBPG;
3481: }
3482: } else if (cpuinfo.pcache_flush_page != NULL) {
3483: int pa = pagetables_start_pa;
3484: while (size != 0) {
3485: pcache_flush_page(pa, 0);
3486: pa += NBPG;
3487: size -= NBPG;
3488: }
3489: }
3490: }
3491:
3492: /*
3493: * Now switch to kernel pagetables (finally!)
3494: */
3495: mmu_install_tables(&cpuinfo);
3496: }
3497:
3498: static u_long prom_ctxreg;
3499:
3500: void
3501: mmu_install_tables(sc)
3502: struct cpu_info *sc;
3503: {
3504:
3505: #ifdef DEBUG
3506: printf("pmap_bootstrap: installing kernel page tables...");
3507: #endif
3508: setcontext4m(0); /* paranoia? %%%: Make 0x3 a define! below */
3509:
3510: /* Enable MMU tablewalk caching, flush TLB */
3511: if (sc->mmu_enable != 0)
3512: sc->mmu_enable();
3513:
3514: tlb_flush_all_real();
3515: prom_ctxreg = lda(SRMMU_CXTPTR, ASI_SRMMU);
3516:
3517: sta(SRMMU_CXTPTR, ASI_SRMMU,
3518: (sc->ctx_tbl_pa >> SRMMU_PPNPASHIFT) & ~0x3);
3519:
3520: tlb_flush_all_real();
3521:
3522: #ifdef DEBUG
3523: printf("done.\n");
3524: #endif
3525: }
3526:
3527: void srmmu_restore_prom_ctx __P((void));
3528:
3529: void
3530: srmmu_restore_prom_ctx()
3531: {
3532: tlb_flush_all();
3533: sta(SRMMU_CXTPTR, ASI_SRMMU, prom_ctxreg);
3534: tlb_flush_all();
3535: }
3536:
3537: /*
3538: * Globalize the boot cpu's cpu_info structure.
3539: */
3540: void
3541: pmap_globalize_boot_cpuinfo(cpi)
3542: struct cpu_info *cpi;
3543: {
3544: vaddr_t va;
3545: vsize_t off;
3546:
3547: off = 0;
3548: for (va = (vaddr_t)cpi; off < sizeof(*cpi); va += NBPG, off += NBPG) {
3549: paddr_t pa = VA2PA((caddr_t)CPUINFO_VA + off);
3550: pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE);
3551: }
3552: pmap_update(pmap_kernel());
3553: }
3554:
3555: /*
3556: * Allocate per-CPU page tables. One region, segment and page table
3557: * is needed to map CPUINFO_VA to different physical addresses on
3558: * each CPU. Since the kernel region and segment tables are all
3559: * pre-wired (in bootstrap() above) and we also assume that the
3560: * first segment (256K) of kernel space is fully populated with
3561: * pages from the start, these per-CPU tables will never need
3562: * to be updated when mapping kernel virtual memory.
3563: *
3564: * Note: this routine is called in the context of the boot CPU
3565: * during autoconfig.
3566: */
3567: void
3568: pmap_alloc_cpu(sc)
3569: struct cpu_info *sc;
3570: {
3571: vaddr_t va;
3572: paddr_t pa;
3573: paddr_t alignment;
3574: u_int *ctxtable, *regtable, *segtable, *pagtable;
3575: u_int *ctxtable_pa, *regtable_pa, *segtable_pa, *pagtable_pa;
3576: psize_t ctxsize, size;
3577: int vr, vs, vpg;
3578: struct regmap *rp;
3579: struct segmap *sp;
3580: struct pglist mlist;
3581: int cachebit;
3582: int pagesz = NBPG;
3583:
1.199.4.14 thorpej 3584: cachebit = (cpuinfo.flags & CPUFLG_CACHEPAGETABLES) != 0;
1.199.4.2 pk 3585:
3586: /*
3587: * Allocate properly aligned and contiguous physically memory
3588: * for the PTE tables.
3589: */
3590: ctxsize = (sc->mmu_ncontext * sizeof(int) + pagesz - 1) & -pagesz;
3591: alignment = ctxsize;
3592:
3593: /* The region, segment and page table we need fit in one page */
3594: size = ctxsize + pagesz;
3595:
3596: if (uvm_pglistalloc(size, vm_first_phys, vm_first_phys+vm_num_phys,
3597: alignment, 0, &mlist, 1, 0) != 0)
3598: panic("pmap_alloc_cpu: no memory");
3599:
3600: pa = VM_PAGE_TO_PHYS(TAILQ_FIRST(&mlist));
3601:
3602: /* Allocate virtual memory */
3603: va = uvm_km_valloc(kernel_map, size);
3604: if (va == 0)
3605: panic("pmap_alloc_cpu: no memory");
3606:
3607: /*
3608: * Layout the page tables in our chunk of memory
3609: */
3610: ctxtable = (u_int *)va;
3611: regtable = (u_int *)(va + ctxsize);
3612: segtable = regtable + SRMMU_L1SIZE;
3613: pagtable = segtable + SRMMU_L2SIZE;
3614:
3615: ctxtable_pa = (u_int *)pa;
3616: regtable_pa = (u_int *)(pa + ctxsize);
3617: segtable_pa = regtable_pa + SRMMU_L1SIZE;
3618: pagtable_pa = segtable_pa + SRMMU_L2SIZE;
3619:
3620: /* Map the pages */
3621: while (size != 0) {
3622: pmap_kenter_pa(va, pa | (cachebit ? 0 : PMAP_NC),
3623: VM_PROT_READ | VM_PROT_WRITE);
3624: va += pagesz;
3625: pa += pagesz;
3626: size -= pagesz;
3627: }
3628: pmap_update(pmap_kernel());
3629:
3630: /*
3631: * Store the region table pointer (and its corresponding physical
3632: * address) in the CPU's slot in the kernel pmap region table
3633: * pointer table.
3634: */
3635: pmap_kernel()->pm_reg_ptps[sc->ci_cpuid] = regtable;
3636: pmap_kernel()->pm_reg_ptps_pa[sc->ci_cpuid] = (paddr_t)regtable_pa;
3637:
3638: vr = VA_VREG(CPUINFO_VA);
3639: vs = VA_VSEG(CPUINFO_VA);
3640: vpg = VA_VPG(CPUINFO_VA);
3641: rp = &pmap_kernel()->pm_regmap[vr];
3642: sp = &rp->rg_segmap[vs];
3643:
3644: /*
3645: * Copy page tables from CPU #0, then modify entry for CPUINFO_VA
3646: * so that it points at the per-CPU pages.
3647: */
3648: qcopy(pmap_kernel()->pm_reg_ptps[0], regtable,
3649: SRMMU_L1SIZE * sizeof(int));
3650: qcopy(rp->rg_seg_ptps, segtable, SRMMU_L2SIZE * sizeof(int));
3651: qcopy(sp->sg_pte, pagtable, SRMMU_L3SIZE * sizeof(int));
3652:
3653: setpgt4m(&ctxtable[0],
3654: ((u_long)regtable_pa >> SRMMU_PPNPASHIFT) | SRMMU_TEPTD);
3655: setpgt4m(®table[vr],
3656: ((u_long)segtable_pa >> SRMMU_PPNPASHIFT) | SRMMU_TEPTD);
3657: setpgt4m(&segtable[vs],
3658: ((u_long)pagtable_pa >> SRMMU_PPNPASHIFT) | SRMMU_TEPTD);
3659: setpgt4m(&pagtable[vpg],
3660: (VA2PA((caddr_t)sc) >> SRMMU_PPNPASHIFT) |
3661: (SRMMU_TEPTE | PPROT_N_RWX | SRMMU_PG_C));
3662:
3663: /* Install this CPU's context table */
3664: sc->ctx_tbl = ctxtable;
3665: sc->ctx_tbl_pa = (paddr_t)ctxtable_pa;
3666: }
1.199.4.9 nathanw 3667: #endif /* SUN4M || SUN4D */
1.199.4.2 pk 3668:
3669:
3670: void
3671: pmap_init()
3672: {
3673: u_int sizeof_pmap;
3674: int n, npages;
3675: vsize_t s;
3676: vaddr_t va;
3677:
3678: if (PAGE_SIZE != NBPG)
1.199.4.4 nathanw 3679: panic("pmap_init: PAGE_SIZE!=NBPG");
1.199.4.2 pk 3680:
3681: npages = 0;
3682: for (n = 0; n < vm_nphysseg; n++)
3683: npages += (vm_physmem[n].end - vm_physmem[n].start);
3684:
3685: s = (vsize_t)round_page(npages * sizeof(struct pvlist));
3686: va = (vaddr_t)uvm_km_zalloc(kernel_map, s);
3687: if (va == 0)
3688: panic("pmap_init: pv_table");
3689:
3690: for (n = 0; n < vm_nphysseg; n++) {
3691: vm_physmem[n].pmseg.pvhead = (struct pvlist *)va;
3692: va += (vm_physmem[n].end - vm_physmem[n].start) *
3693: sizeof(struct pvlist);
3694: }
3695:
3696: vm_num_phys = vm_last_phys - vm_first_phys;
3697:
3698: /* Setup a pool for additional pvlist structures */
1.199.4.5 nathanw 3699: pool_init(&pv_pool, sizeof(struct pvlist), 0, 0, 0, "pvtable", NULL);
1.199.4.2 pk 3700:
3701: /*
3702: * Setup a pool for pmap structures.
3703: * The pool size includes space for an array of per-cpu
3704: * region table pointers & physical addresses
3705: */
3706: sizeof_pmap = ALIGN(sizeof(struct pmap)) +
3707: ALIGN(NUREG * sizeof(struct regmap)) +
3708: ncpu * sizeof(int *) + /* pm_reg_ptps */
3709: ncpu * sizeof(int); /* pm_reg_ptps_pa */
3710: pool_init(&pmap_pmap_pool, sizeof_pmap, 0, 0, 0, "pmappl",
1.199.4.5 nathanw 3711: &pool_allocator_nointr);
1.199.4.2 pk 3712: pool_cache_init(&pmap_pmap_pool_cache, &pmap_pmap_pool,
3713: pmap_pmap_pool_ctor, pmap_pmap_pool_dtor, NULL);
3714:
1.199.4.9 nathanw 3715: #if defined(SUN4M) || defined(SUN4D)
3716: if (CPU_HAS_SRMMU) {
1.199.4.2 pk 3717: /*
3718: * The SRMMU only ever needs chunks in one of two sizes:
3719: * 1024 (for region level tables) and 256 (for segment
3720: * and page level tables).
3721: */
3722: int n;
3723:
3724: n = SRMMU_L1SIZE * sizeof(int);
1.199.4.5 nathanw 3725: pool_init(&L1_pool, n, n, 0, 0, "L1 pagetable",
3726: &pgt_page_allocator);
1.199.4.2 pk 3727:
3728: n = SRMMU_L2SIZE * sizeof(int);
1.199.4.5 nathanw 3729: pool_init(&L23_pool, n, n, 0, 0, "L2/L3 pagetable",
3730: &pgt_page_allocator);
1.199.4.2 pk 3731: }
3732: #endif
3733:
3734: pmap_initialized = 1;
3735: }
3736:
3737:
3738: /*
3739: * Map physical addresses into kernel VM.
3740: */
3741: vaddr_t
3742: pmap_map(va, pa, endpa, prot)
3743: vaddr_t va;
3744: paddr_t pa, endpa;
3745: int prot;
3746: {
3747: int pgsize = PAGE_SIZE;
3748:
3749: while (pa < endpa) {
3750: pmap_kenter_pa(va, pa, prot);
3751: va += pgsize;
3752: pa += pgsize;
3753: }
3754: pmap_update(pmap_kernel());
3755: return (va);
3756: }
3757:
3758: #ifdef DEBUG
3759: /*
3760: * Check a pmap for spuriously lingering mappings
3761: */
3762: static __inline__ void
3763: pmap_quiet_check(struct pmap *pm)
3764: {
3765: int vs, vr;
3766:
1.199.4.9 nathanw 3767: if (CPU_ISSUN4 || CPU_ISSUN4C) {
1.199.4.2 pk 3768: #if defined(SUN4_MMU3L)
3769: if (pm->pm_reglist.tqh_first)
3770: panic("pmap_destroy: region list not empty");
3771: #endif
3772: if (pm->pm_seglist.tqh_first)
3773: panic("pmap_destroy: segment list not empty");
3774: }
3775:
3776: for (vr = 0; vr < NUREG; vr++) {
3777: struct regmap *rp = &pm->pm_regmap[vr];
3778:
1.199.4.9 nathanw 3779: if (CPU_ISSUN4 || CPU_ISSUN4C) {
1.199.4.2 pk 3780: #if defined(SUN4_MMU3L)
3781: if (HASSUN4_MMU3L) {
3782: if (rp->rg_smeg != reginval)
3783: printf("pmap_chk: spurious smeg in "
3784: "user region %d\n", vr);
3785: }
3786: #endif
3787: }
1.199.4.9 nathanw 3788: if (CPU_HAS_SRMMU) {
1.199.4.2 pk 3789: int n;
3790: #if defined(MULTIPROCESSOR)
3791: for (n = 0; n < ncpu; n++)
3792: #else
3793: n = 0;
3794: #endif
3795: {
3796: if (pm->pm_reg_ptps[n][vr] != SRMMU_TEINVALID)
3797: printf("pmap_chk: spurious PTP in user "
3798: "region %d on cpu %d\n", vr, n);
3799: }
3800: }
3801: if (rp->rg_nsegmap != 0)
3802: printf("pmap_chk: %d segments remain in "
3803: "region %d\n", rp->rg_nsegmap, vr);
3804: if (rp->rg_segmap != NULL) {
3805: printf("pmap_chk: segments still "
3806: "allocated in region %d\n", vr);
3807: for (vs = 0; vs < NSEGRG; vs++) {
3808: struct segmap *sp = &rp->rg_segmap[vs];
3809: if (sp->sg_npte != 0)
3810: printf("pmap_chk: %d ptes "
3811: "remain in segment %d\n",
3812: sp->sg_npte, vs);
3813: if (sp->sg_pte != NULL) {
3814: printf("pmap_chk: ptes still "
3815: "allocated in segment %d\n", vs);
3816: }
1.199.4.11 thorpej 3817: if (CPU_ISSUN4 || CPU_ISSUN4C) {
3818: if (sp->sg_pmeg != seginval)
3819: printf("pmap_chk: pm %p(%d,%d) "
3820: "spurious soft pmeg %d\n",
3821: pm, vr, vs, sp->sg_pmeg);
3822: }
1.199.4.2 pk 3823: }
3824: }
1.199.4.11 thorpej 3825:
3826: /* Check for spurious pmeg entries in the MMU */
3827: if (pm->pm_ctx == NULL)
3828: continue;
3829: if ((CPU_ISSUN4 || CPU_ISSUN4C)) {
3830: int ctx;
3831: if (mmu_has_hole && (vr >= 32 || vr < (256 - 32)))
3832: continue;
3833: ctx = getcontext4();
3834: setcontext4(pm->pm_ctxnum);
3835: for (vs = 0; vs < NSEGRG; vs++) {
3836: vaddr_t va = VSTOVA(vr,vs);
3837: int pmeg = getsegmap(va);
3838: if (pmeg != seginval)
3839: printf("pmap_chk: pm %p(%d,%d:%x): "
3840: "spurious pmeg %d\n",
3841: pm, vr, vs, (u_int)va, pmeg);
3842: }
3843: setcontext4(ctx);
3844: }
1.199.4.2 pk 3845: }
3846: }
3847: #endif /* DEBUG */
3848:
3849: int
3850: pmap_pmap_pool_ctor(void *arg, void *object, int flags)
3851: {
3852: struct pmap *pm = object;
3853: u_long addr;
3854:
3855: bzero(pm, sizeof *pm);
3856:
3857: /*
3858: * `pmap_pool' entries include space for the per-CPU
3859: * region table pointer arrays.
3860: */
3861: addr = (u_long)pm + ALIGN(sizeof(struct pmap));
3862: pm->pm_regmap = (void *)addr;
3863: addr += ALIGN(NUREG * sizeof(struct regmap));
3864: pm->pm_reg_ptps = (int **)addr;
3865: addr += ncpu * sizeof(int *);
3866: pm->pm_reg_ptps_pa = (int *)addr;
3867:
3868: qzero((caddr_t)pm->pm_regmap, NUREG * sizeof(struct regmap));
3869:
1.199.4.11 thorpej 3870: /* pm->pm_ctx = NULL; // already done */
1.199.4.2 pk 3871: simple_lock_init(&pm->pm_lock);
3872:
1.199.4.9 nathanw 3873: if (CPU_ISSUN4 || CPU_ISSUN4C) {
1.199.4.2 pk 3874: TAILQ_INIT(&pm->pm_seglist);
3875: #if defined(SUN4_MMU3L)
3876: TAILQ_INIT(&pm->pm_reglist);
3877: if (HASSUN4_MMU3L) {
3878: int i;
3879: for (i = NUREG; --i >= 0;)
3880: pm->pm_regmap[i].rg_smeg = reginval;
3881: }
3882: #endif
1.199.4.11 thorpej 3883: /* pm->pm_gap_start = 0; // already done */
1.199.4.2 pk 3884: pm->pm_gap_end = VA_VREG(VM_MAXUSER_ADDRESS);
3885: }
1.199.4.9 nathanw 3886: #if defined(SUN4M) || defined(SUN4D)
1.199.4.2 pk 3887: else {
3888: int i, n;
3889:
3890: /*
3891: * We must allocate and initialize hardware-readable (MMU)
3892: * pagetables. We must also map the kernel regions into this
3893: * pmap's pagetables, so that we can access the kernel from
3894: * this user context.
3895: */
3896: #if defined(MULTIPROCESSOR)
3897: for (n = 0; n < ncpu; n++)
3898: #else
3899: n = 0;
3900: #endif
3901: {
3902: int *upt, *kpt;
3903:
3904: upt = pool_get(&L1_pool, flags);
3905: pm->pm_reg_ptps[n] = upt;
3906: pm->pm_reg_ptps_pa[n] = VA2PA((char *)upt);
3907:
3908: /* Invalidate user space regions */
3909: for (i = 0; i < NUREG; i++)
3910: setpgt4m(upt++, SRMMU_TEINVALID);
3911:
3912: /* Copy kernel regions */
3913: kpt = &pmap_kernel()->pm_reg_ptps[n][VA_VREG(KERNBASE)];
1.199.4.14 thorpej 3914: for (i = 0; i < NKREG; i++)
3915: setpgt4m(upt++, kpt[i]);
1.199.4.2 pk 3916: }
3917: }
3918: #endif
3919:
3920: return (0);
3921: }
3922:
3923: void
3924: pmap_pmap_pool_dtor(void *arg, void *object)
3925: {
3926: struct pmap *pm = object;
3927: union ctxinfo *c;
3928: int s = splvm(); /* paranoia */
3929:
3930: #ifdef DEBUG
3931: if (pmapdebug & PDB_DESTROY)
3932: printf("pmap_pmap_pool_dtor(%p)\n", pm);
3933: #endif
3934:
3935: if ((c = pm->pm_ctx) != NULL) {
3936: ctx_free(pm);
3937: }
3938:
1.199.4.9 nathanw 3939: #if defined(SUN4M) || defined(SUN4D)
3940: if (CPU_HAS_SRMMU) {
1.199.4.2 pk 3941: int n;
3942:
3943: #if defined(MULTIPROCESSOR)
3944: for (n = 0; n < ncpu; n++)
3945: #else
3946: n = 0;
3947: #endif
3948: {
3949: int *pt = pm->pm_reg_ptps[n];
3950: pm->pm_reg_ptps[n] = NULL;
3951: pm->pm_reg_ptps_pa[n] = 0;
3952: pool_put(&L1_pool, pt);
3953: }
3954: }
3955: #endif
3956: splx(s);
3957: }
3958:
3959: /*
3960: * Create and return a physical map.
3961: */
3962: struct pmap *
3963: pmap_create()
3964: {
3965: struct pmap *pm;
3966:
3967: pm = pool_cache_get(&pmap_pmap_pool_cache, PR_WAITOK);
3968: pm->pm_refcount = 1;
3969: #ifdef DEBUG
3970: if (pmapdebug & PDB_CREATE)
1.199.4.17! martin 3971: printf("pmap_create[%d]: created %p\n", cpu_number(), pm);
1.199.4.2 pk 3972: pmap_quiet_check(pm);
3973: #endif
3974: return (pm);
3975: }
3976:
3977: /*
3978: * Retire the given pmap from service.
3979: * Should only be called if the map contains no valid mappings.
3980: */
3981: void
3982: pmap_destroy(pm)
3983: struct pmap *pm;
3984: {
3985: int count;
3986:
3987: #ifdef DEBUG
3988: if (pmapdebug & PDB_DESTROY)
1.199.4.17! martin 3989: printf("pmap_destroy[%d](%p)\n", cpu_number(), pm);
1.199.4.2 pk 3990: #endif
3991: simple_lock(&pm->pm_lock);
3992: count = --pm->pm_refcount;
3993: simple_unlock(&pm->pm_lock);
3994: if (count == 0) {
1.199.4.11 thorpej 3995: if (CPU_ISSUN4 || CPU_ISSUN4C) {
3996: /* reset the region gap */
3997: pm->pm_gap_start = 0;
3998: pm->pm_gap_end = VA_VREG(VM_MAXUSER_ADDRESS);
3999: }
1.199.4.2 pk 4000: #ifdef DEBUG
4001: pmap_quiet_check(pm);
4002: #endif
4003: pool_cache_put(&pmap_pmap_pool_cache, pm);
4004: }
4005: }
4006:
4007: /*
4008: * Add a reference to the given pmap.
4009: */
4010: void
4011: pmap_reference(pm)
4012: struct pmap *pm;
4013: {
4014: simple_lock(&pm->pm_lock);
4015: pm->pm_refcount++;
4016: simple_unlock(&pm->pm_lock);
4017: }
4018:
4019: /*
4020: * Remove the given range of mapping entries.
4021: * The starting and ending addresses are already rounded to pages.
4022: * Sheer lunacy: pmap_remove is often asked to remove nonexistent
4023: * mappings.
4024: */
4025: void
4026: pmap_remove(pm, va, endva)
4027: struct pmap *pm;
4028: vaddr_t va, endva;
4029: {
4030: vaddr_t nva;
4031: int vr, vs, s, ctx;
4032: void (*rm)(struct pmap *, vaddr_t, vaddr_t, int, int);
4033:
4034: #ifdef DEBUG
4035: if (pmapdebug & PDB_REMOVE)
1.199.4.17! martin 4036: printf("pmap_remove[%d](%p, 0x%lx, 0x%lx)\n",
! 4037: cpu_number(), pm, va, endva);
1.199.4.2 pk 4038: #endif
4039:
4040: if (pm == pmap_kernel()) {
4041: /*
4042: * Removing from kernel address space.
4043: */
4044: rm = pmap_rmk;
4045: } else {
4046: /*
4047: * Removing from user address space.
4048: */
4049: write_user_windows();
4050: rm = pmap_rmu;
4051: }
4052:
4053: ctx = getcontext();
4054: s = splvm(); /* XXX conservative */
4055: simple_lock(&pm->pm_lock);
4056: for (; va < endva; va = nva) {
4057: /* do one virtual segment at a time */
4058: vr = VA_VREG(va);
4059: vs = VA_VSEG(va);
4060: nva = VSTOVA(vr, vs + 1);
4061: if (nva == 0 || nva > endva)
4062: nva = endva;
4063: if (pm->pm_regmap[vr].rg_nsegmap != 0)
4064: (*rm)(pm, va, nva, vr, vs);
4065: }
4066: simple_unlock(&pm->pm_lock);
4067: splx(s);
4068: setcontext(ctx);
4069: }
4070:
4071: /*
4072: * The following magic number was chosen because:
4073: * 1. It is the same amount of work to cache_flush_page 4 pages
4074: * as to cache_flush_segment 1 segment (so at 4 the cost of
4075: * flush is the same).
4076: * 2. Flushing extra pages is bad (causes cache not to work).
4077: * 3. The current code, which malloc()s 5 pages for each process
4078: * for a user vmspace/pmap, almost never touches all 5 of those
4079: * pages.
4080: */
4081: #if 0
4082: #define PMAP_RMK_MAGIC (cacheinfo.c_hwflush?5:64) /* if > magic, use cache_flush_segment */
4083: #else
4084: #define PMAP_RMK_MAGIC 5 /* if > magic, use cache_flush_segment */
4085: #endif
4086:
4087: /*
4088: * Remove a range contained within a single segment.
4089: * These are egregiously complicated routines.
4090: */
4091:
4092: #if defined(SUN4) || defined(SUN4C)
4093:
4094: /* remove from kernel */
4095: /*static*/ void
4096: pmap_rmk4_4c(pm, va, endva, vr, vs)
4097: struct pmap *pm;
4098: vaddr_t va, endva;
4099: int vr, vs;
4100: {
4101: int i, tpte, perpage, npg;
4102: struct pvlist *pv;
4103: int nleft, pmeg;
4104: struct regmap *rp;
4105: struct segmap *sp;
4106:
4107: rp = &pm->pm_regmap[vr];
4108: sp = &rp->rg_segmap[vs];
4109:
4110: if (rp->rg_nsegmap == 0)
4111: return;
4112: if ((nleft = sp->sg_npte) == 0)
4113: return;
4114: pmeg = sp->sg_pmeg;
4115: setcontext4(0);
4116: /* decide how to flush cache */
4117: npg = (endva - va) >> PGSHIFT;
4118: if (npg > PMAP_RMK_MAGIC) {
4119: /* flush the whole segment */
4120: perpage = 0;
1.199.4.12 thorpej 4121: cache_flush_segment(vr, vs, 0);
1.199.4.2 pk 4122: } else {
4123: /* flush each page individually; some never need flushing */
4124: perpage = (CACHEINFO.c_vactype != VAC_NONE);
4125: }
4126: while (va < endva) {
4127: tpte = getpte4(va);
4128: if ((tpte & PG_V) == 0) {
4129: va += NBPG;
4130: continue;
4131: }
4132: if ((tpte & PG_TYPE) == PG_OBMEM) {
4133: u_int pfn = tpte & PG_PFNUM;
4134: /* if cacheable, flush page as needed */
4135: if (perpage && (tpte & PG_NC) == 0)
1.199.4.12 thorpej 4136: cache_flush_page(va, 0);
1.199.4.2 pk 4137: if ((pv = pvhead(pfn)) != NULL) {
4138: pv->pv_flags |= MR4_4C(tpte);
4139: pv_unlink4_4c(pv, pm, va);
4140: }
4141: }
4142: nleft--;
4143: #ifdef DIAGNOSTIC
4144: if (nleft < 0)
4145: panic("pmap_rmk: too many PTEs in segment; "
4146: "va 0x%lx; endva 0x%lx", va, endva);
4147: #endif
4148: setpte4(va, 0);
4149: va += NBPG;
4150: }
4151:
4152: /*
4153: * If the segment is all gone, remove it from everyone and
4154: * free the MMU entry.
4155: */
4156: if ((sp->sg_npte = nleft) == 0) {
4157: va = VSTOVA(vr,vs); /* retract */
4158: #if defined(SUN4_MMU3L)
4159: if (HASSUN4_MMU3L)
4160: setsegmap(va, seginval);
4161: else
4162: #endif
4163: for (i = ncontext; --i >= 0;) {
4164: setcontext4(i);
4165: setsegmap(va, seginval);
4166: }
4167: me_free(pm, pmeg);
4168: if (--rp->rg_nsegmap == 0) {
4169: #if defined(SUN4_MMU3L)
4170: if (HASSUN4_MMU3L) {
4171: for (i = ncontext; --i >= 0;) {
4172: setcontext4(i);
4173: setregmap(va, reginval);
4174: }
4175: /* note: context is 0 */
4176: region_free(pm, rp->rg_smeg);
4177: }
4178: #endif
4179: }
4180: }
4181: }
4182:
4183: #endif /* sun4, sun4c */
4184:
1.199.4.9 nathanw 4185: #if defined(SUN4M) || defined(SUN4D) /* SRMMU version of pmap_rmk */
1.199.4.2 pk 4186: /* remove from kernel (4m)*/
4187: /*static*/ void
4188: /* pm is already locked */
4189: pmap_rmk4m(pm, va, endva, vr, vs)
4190: struct pmap *pm;
4191: vaddr_t va, endva;
4192: int vr, vs;
4193: {
4194: int tpte, perpage, npg;
4195: struct pvlist *pv;
4196: int nleft;
4197: struct regmap *rp;
4198: struct segmap *sp;
4199:
4200: rp = &pm->pm_regmap[vr];
4201: sp = &rp->rg_segmap[vs];
4202: if (rp->rg_nsegmap == 0)
4203: return;
4204: if ((nleft = sp->sg_npte) == 0)
4205: return;
4206: /* decide how to flush cache */
4207: npg = (endva - va) >> PGSHIFT;
4208: if (npg > PMAP_RMK_MAGIC) {
4209: /* flush the whole segment */
4210: perpage = 0;
4211: if (CACHEINFO.c_vactype != VAC_NONE)
1.199.4.12 thorpej 4212: cache_flush_segment(vr, vs, 0);
1.199.4.2 pk 4213: } else {
4214: /* flush each page individually; some never need flushing */
4215: perpage = (CACHEINFO.c_vactype != VAC_NONE);
4216: }
4217: while (va < endva) {
4218: tpte = sp->sg_pte[VA_SUN4M_VPG(va)];
4219: if ((tpte & SRMMU_TETYPE) != SRMMU_TEPTE) {
4220: #ifdef DEBUG
4221: if ((pmapdebug & PDB_SANITYCHK) &&
4222: (getpte4m(va) & SRMMU_TETYPE) == SRMMU_TEPTE)
4223: panic("pmap_rmk: Spurious kTLB entry for 0x%lx",
4224: va);
4225: #endif
4226: va += NBPG;
4227: continue;
4228: }
4229: if ((tpte & SRMMU_PGTYPE) == PG_SUN4M_OBMEM) {
4230: u_int pfn = (tpte & SRMMU_PPNMASK) >> SRMMU_PPNSHIFT;
4231: /* if cacheable, flush page as needed */
4232: if (perpage && (tpte & SRMMU_PG_C))
1.199.4.12 thorpej 4233: cache_flush_page(va, 0);
1.199.4.2 pk 4234: if ((pv = pvhead(pfn)) != NULL) {
4235: pv->pv_flags |= MR4M(tpte);
4236: pv_unlink4m(pv, pm, va);
4237: }
4238: }
4239: setpgt4m_va(va, &sp->sg_pte[VA_SUN4M_VPG(va)],
1.199.4.14 thorpej 4240: SRMMU_TEINVALID, 1, 0);
1.199.4.2 pk 4241: nleft--;
4242: #ifdef DIAGNOSTIC
4243: if (nleft < 0)
4244: panic("pmap_rmk: too many PTEs in segment; "
4245: "va 0x%lx; endva 0x%lx", va, endva);
4246: #endif
4247: va += NBPG;
4248: }
4249:
4250: sp->sg_npte = nleft;
4251: }
1.199.4.9 nathanw 4252: #endif /* SUN4M || SUN4D */
1.199.4.2 pk 4253:
4254: /*
4255: * Just like pmap_rmk_magic, but we have a different threshold.
4256: * Note that this may well deserve further tuning work.
4257: */
4258: #if 0
4259: #define PMAP_RMU_MAGIC (cacheinfo.c_hwflush?4:64) /* if > magic, use cache_flush_segment */
4260: #else
4261: #define PMAP_RMU_MAGIC 4 /* if > magic, use cache_flush_segment */
4262: #endif
4263:
4264: #if defined(SUN4) || defined(SUN4C)
4265:
4266: /* remove from user */
4267: /*static*/ void
4268: pmap_rmu4_4c(pm, va, endva, vr, vs)
4269: struct pmap *pm;
4270: vaddr_t va, endva;
4271: int vr, vs;
4272: {
4273: int *pte0, pteva, tpte, perpage, npg;
4274: struct pvlist *pv;
4275: int nleft, pmeg;
4276: struct regmap *rp;
4277: struct segmap *sp;
4278:
4279: rp = &pm->pm_regmap[vr];
4280: if (rp->rg_nsegmap == 0)
4281: return;
4282: sp = &rp->rg_segmap[vs];
4283: if ((nleft = sp->sg_npte) == 0)
4284: return;
4285: pmeg = sp->sg_pmeg;
4286: pte0 = sp->sg_pte;
4287: if (pmeg == seginval) {
4288: int *pte = pte0 + VA_VPG(va);
4289:
4290: /*
4291: * PTEs are not in MMU. Just invalidate software copies.
4292: */
4293: for (; va < endva; pte++, va += NBPG) {
4294: tpte = *pte;
4295: if ((tpte & PG_V) == 0) {
4296: /* nothing to remove (braindead VM layer) */
4297: continue;
4298: }
4299: if ((tpte & PG_TYPE) == PG_OBMEM) {
4300: u_int pfn = tpte & PG_PFNUM;
4301: if ((pv = pvhead(pfn)) != NULL)
4302: pv_unlink4_4c(pv, pm, va);
4303: }
4304: nleft--;
4305: #ifdef DIAGNOSTIC
4306: if (nleft < 0)
4307: panic("pmap_rmu: too many PTEs in segment; "
4308: "va 0x%lx; endva 0x%lx", va, endva);
4309: #endif
4310: *pte = 0;
4311: }
4312: if ((sp->sg_npte = nleft) == 0) {
4313: free(pte0, M_VMPMAP);
4314: sp->sg_pte = NULL;
4315: if (--rp->rg_nsegmap == 0) {
4316: free(rp->rg_segmap, M_VMPMAP);
4317: rp->rg_segmap = NULL;
4318: #if defined(SUN4_MMU3L)
4319: if (HASSUN4_MMU3L && rp->rg_smeg != reginval) {
4320: if (pm->pm_ctx) {
4321: setcontext4(pm->pm_ctxnum);
4322: setregmap(va, reginval);
4323: } else
4324: setcontext4(0);
4325: region_free(pm, rp->rg_smeg);
4326: }
4327: #endif
4328: }
4329: }
4330: return;
4331: }
4332:
4333: /*
4334: * PTEs are in MMU. Invalidate in hardware, update ref &
4335: * mod bits, and flush cache if required.
4336: */
4337: if (CTX_USABLE(pm,rp)) {
4338: /* process has a context, must flush cache */
4339: npg = (endva - va) >> PGSHIFT;
4340: setcontext4(pm->pm_ctxnum);
4341: if (npg > PMAP_RMU_MAGIC) {
4342: perpage = 0; /* flush the whole segment */
1.199.4.12 thorpej 4343: cache_flush_segment(vr, vs, pm->pm_ctxnum);
1.199.4.2 pk 4344: } else
4345: perpage = (CACHEINFO.c_vactype != VAC_NONE);
4346: pteva = va;
4347: } else {
4348: /* no context, use context 0; cache flush unnecessary */
4349: setcontext4(0);
4350: if (HASSUN4_MMU3L)
4351: setregmap(0, tregion);
4352: /* XXX use per-cpu pteva? */
4353: setsegmap(0, pmeg);
4354: pteva = VA_VPG(va) << PGSHIFT;
4355: perpage = 0;
4356: }
4357: for (; va < endva; pteva += NBPG, va += NBPG) {
4358: tpte = getpte4(pteva);
4359: if ((tpte & PG_V) == 0)
4360: continue;
4361: if ((tpte & PG_TYPE) == PG_OBMEM) {
4362: u_int pfn = tpte & PG_PFNUM;
4363: /* if cacheable, flush page as needed */
4364: if (perpage && (tpte & PG_NC) == 0)
1.199.4.12 thorpej 4365: cache_flush_page(va, pm->pm_ctxnum);
1.199.4.2 pk 4366: if ((pv = pvhead(pfn)) != NULL) {
4367: pv->pv_flags |= MR4_4C(tpte);
4368: pv_unlink4_4c(pv, pm, va);
4369: }
4370: }
4371: nleft--;
4372: #ifdef DIAGNOSTIC
4373: if (nleft < 0)
4374: panic("pmap_rmu: too many PTEs in segment; "
4375: "va 0x%lx; endva 0x%lx; pmeg %d", va, endva, pmeg);
4376: #endif
4377: setpte4(pteva, 0);
4378: pte0[VA_VPG(pteva)] = 0;
4379: }
4380:
4381: /*
4382: * If the segment is all gone, and the context is loaded, give
4383: * the segment back.
4384: */
4385:
4386: sp->sg_npte = nleft;
4387: if (nleft == 0) {
4388: va = VSTOVA(vr,vs);
4389: if (CTX_USABLE(pm,rp))
4390: setsegmap(va, seginval);
4391: else if (HASSUN4_MMU3L && rp->rg_smeg != reginval) {
4392: /* note: context already set earlier */
4393: setregmap(0, rp->rg_smeg);
4394: setsegmap(vs << SGSHIFT, seginval);
4395: }
4396: free(pte0, M_VMPMAP);
4397: sp->sg_pte = NULL;
4398: me_free(pm, pmeg);
4399:
4400: if (--rp->rg_nsegmap == 0) {
4401: free(rp->rg_segmap, M_VMPMAP);
4402: rp->rg_segmap = NULL;
4403: GAP_WIDEN(pm,vr);
4404:
4405: #if defined(SUN4_MMU3L)
4406: if (HASSUN4_MMU3L && rp->rg_smeg != reginval) {
4407: /* note: context already set */
4408: if (pm->pm_ctx)
4409: setregmap(va, reginval);
4410: region_free(pm, rp->rg_smeg);
4411: }
4412: #endif
4413: }
4414: }
4415: }
4416:
4417: #endif /* sun4,4c */
4418:
1.199.4.9 nathanw 4419: #if defined(SUN4M) || defined(SUN4D) /* SRMMU version of pmap_rmu */
1.199.4.2 pk 4420: /* remove from user */
4421: /* Note: pm is already locked */
4422: /*static*/ void
4423: pmap_rmu4m(pm, va, endva, vr, vs)
4424: struct pmap *pm;
4425: vaddr_t va, endva;
4426: int vr, vs;
4427: {
4428: int *pte0, perpage, npg;
4429: struct pvlist *pv;
4430: int nleft;
4431: struct regmap *rp;
4432: struct segmap *sp;
4433:
4434: rp = &pm->pm_regmap[vr];
4435: if (rp->rg_nsegmap == 0)
4436: return;
4437: sp = &rp->rg_segmap[vs];
4438: if ((nleft = sp->sg_npte) == 0)
4439: return;
4440: pte0 = sp->sg_pte;
4441:
4442: /*
4443: * Invalidate PTE in MMU pagetables. Flush cache if necessary.
4444: */
4445: if (pm->pm_ctx) {
4446: /* process has a context, must flush cache */
4447: if (CACHEINFO.c_vactype != VAC_NONE) {
4448: npg = (endva - va) >> PGSHIFT;
4449: if (npg > PMAP_RMU_MAGIC) {
4450: perpage = 0; /* flush the whole segment */
1.199.4.12 thorpej 4451: cache_flush_segment(vr, vs, pm->pm_ctxnum);
1.199.4.2 pk 4452: } else
4453: perpage = 1;
4454: } else
4455: perpage = 0;
4456: } else {
4457: /* no context; cache flush unnecessary */
4458: perpage = 0;
4459: }
4460: for (; va < endva; va += NBPG) {
4461: int tpte;
4462:
4463: tpte = pte0[VA_SUN4M_VPG(va)];
4464:
4465: if ((tpte & SRMMU_TETYPE) != SRMMU_TEPTE) {
4466: #ifdef DEBUG
4467: if ((pmapdebug & PDB_SANITYCHK) &&
4468: pm->pm_ctx &&
4469: (getpte4m(va) & SRMMU_TEPTE) == SRMMU_TEPTE)
4470: panic("pmap_rmu: Spurious uTLB entry for 0x%lx",
4471: va);
4472: #endif
4473: continue;
4474: }
4475:
4476: if ((tpte & SRMMU_PGTYPE) == PG_SUN4M_OBMEM) {
4477: u_int pfn = (tpte & SRMMU_PPNMASK) >> SRMMU_PPNSHIFT;
4478: /* if cacheable, flush page as needed */
4479: if (perpage && (tpte & SRMMU_PG_C))
1.199.4.12 thorpej 4480: cache_flush_page(va, pm->pm_ctxnum);
1.199.4.2 pk 4481: if ((pv = pvhead(pfn)) != NULL) {
4482: pv->pv_flags |= MR4M(tpte);
4483: pv_unlink4m(pv, pm, va);
4484: }
4485: }
4486: nleft--;
4487: #ifdef DIAGNOSTIC
4488: if (nleft < 0)
4489: panic("pmap_rmu: too many PTEs in segment; "
4490: "va 0x%lx; endva 0x%lx", va, endva);
4491: #endif
4492: setpgt4m_va(va, &pte0[VA_SUN4M_VPG(va)], SRMMU_TEINVALID,
1.199.4.14 thorpej 4493: pm->pm_ctx != NULL, pm->pm_ctxnum);
1.199.4.2 pk 4494: }
4495:
4496: /*
4497: * If the segment is all gone, and the context is loaded, give
4498: * the segment back.
4499: */
4500: if ((sp->sg_npte = nleft) == 0) {
4501: va = VSTOVA(vr,vs);
4502:
4503: if (pm->pm_ctx)
1.199.4.14 thorpej 4504: tlb_flush_segment(va, pm->pm_ctxnum);/* Paranoia? */
4505: setpgt4m_va(va, &rp->rg_seg_ptps[vs], SRMMU_TEINVALID, 0,
4506: pm->pm_ctxnum);
1.199.4.2 pk 4507: sp->sg_pte = NULL;
4508: pool_put(&L23_pool, pte0);
4509:
4510: if (--rp->rg_nsegmap == 0) {
4511: int n;
4512:
4513: if (pm->pm_ctx)
1.199.4.14 thorpej 4514: tlb_flush_region(va, pm->pm_ctxnum);/* Paranoia? */
1.199.4.2 pk 4515: #ifdef MULTIPROCESSOR
4516: for (n = 0; n < ncpu; n++)
4517: #else
4518: n = 0;
4519: #endif
4520: {
4521: setpgt4m(&pm->pm_reg_ptps[n][vr],
4522: SRMMU_TEINVALID);
4523: }
4524: free(rp->rg_segmap, M_VMPMAP);
4525: rp->rg_segmap = NULL;
4526: pool_put(&L23_pool, rp->rg_seg_ptps);
4527: }
4528: }
4529: }
1.199.4.9 nathanw 4530: #endif /* SUN4M || SUN4D */
1.199.4.2 pk 4531:
4532: /*
4533: * Lower (make more strict) the protection on the specified
4534: * physical page.
4535: *
4536: * There are only two cases: either the protection is going to 0
4537: * (in which case we do the dirty work here), or it is going from
4538: * to read-only (in which case pv_changepte does the trick).
4539: */
4540:
4541: #if defined(SUN4) || defined(SUN4C)
4542: void
4543: pmap_page_protect4_4c(pg, prot)
4544: struct vm_page *pg;
4545: vm_prot_t prot;
4546: {
4547: struct pvlist *pv, *pv0, *npv;
4548: struct pmap *pm;
4549: int va, vr, vs, pteva, tpte;
4550: int flags, nleft, i, s, ctx;
4551: struct regmap *rp;
4552: struct segmap *sp;
4553: paddr_t pa = VM_PAGE_TO_PHYS(pg);
4554:
4555: #ifdef DEBUG
4556: if ((pmapdebug & PDB_CHANGEPROT) ||
4557: (pmapdebug & PDB_REMOVE && prot == VM_PROT_NONE))
4558: printf("pmap_page_protect(0x%lx, 0x%x)\n", pa, prot);
4559: #endif
4560: /*
4561: * Skip unmanaged pages.
4562: */
4563: if ((pv = pvhead(atop(pa))) == NULL)
4564: return;
4565:
4566: write_user_windows(); /* paranoia */
4567: if (prot & VM_PROT_READ) {
4568: pv_changepte4_4c(pv, 0, PG_W);
4569: return;
4570: }
4571:
4572: /*
4573: * Remove all access to all people talking to this page.
4574: * Walk down PV list, removing all mappings.
4575: * The logic is much like that for pmap_remove,
4576: * but we know we are removing exactly one page.
4577: */
4578: s = splvm();
4579: if (pv->pv_pmap == NULL) {
4580: splx(s);
4581: return;
4582: }
4583: ctx = getcontext4();
4584: pv0 = pv;
4585:
4586: /* This pv head will become empty, so clear caching state flags */
4587: flags = pv->pv_flags & ~(PV_NC|PV_ANC);
4588:
4589: while (pv != NULL) {
4590: pm = pv->pv_pmap;
4591: va = pv->pv_va;
4592: vr = VA_VREG(va);
4593: vs = VA_VSEG(va);
4594: rp = &pm->pm_regmap[vr];
4595: sp = &rp->rg_segmap[vs];
4596: nleft = sp->sg_npte;
4597: sp->sg_npte = --nleft;
4598:
4599: if (sp->sg_pmeg == seginval) {
4600: /* Definitely not a kernel map */
4601: if (nleft) {
4602: sp->sg_pte[VA_VPG(va)] = 0;
4603: } else {
4604: free(sp->sg_pte, M_VMPMAP);
4605: sp->sg_pte = NULL;
4606: if (--rp->rg_nsegmap == 0) {
4607: free(rp->rg_segmap, M_VMPMAP);
4608: rp->rg_segmap = NULL;
4609: GAP_WIDEN(pm,vr);
4610: #if defined(SUN4_MMU3L)
4611: if (HASSUN4_MMU3L && rp->rg_smeg != reginval) {
4612: if (pm->pm_ctx) {
4613: setcontext4(pm->pm_ctxnum);
4614: setregmap(va, reginval);
4615: } else
4616: setcontext4(0);
4617: region_free(pm, rp->rg_smeg);
4618: }
4619: #endif
4620: }
4621: }
4622: goto nextpv;
4623: }
4624:
4625: if (CTX_USABLE(pm,rp)) {
4626: setcontext4(pm->pm_ctxnum);
4627: pteva = va;
1.199.4.12 thorpej 4628: cache_flush_page(va, pm->pm_ctxnum);
1.199.4.2 pk 4629: } else {
4630: setcontext4(0);
4631: /* XXX use per-cpu pteva? */
4632: if (HASSUN4_MMU3L)
4633: setregmap(0, tregion);
4634: setsegmap(0, sp->sg_pmeg);
4635: pteva = VA_VPG(va) << PGSHIFT;
4636: }
4637:
4638: tpte = getpte4(pteva);
4639: #ifdef DIAGNOSTIC
4640: if ((tpte & PG_V) == 0)
4641: panic("pmap_page_protect !PG_V: "
4642: "ctx %d, va 0x%x, pte 0x%x",
4643: pm->pm_ctxnum, va, tpte);
4644: #endif
4645: flags |= MR4_4C(tpte);
4646:
4647: if (nleft) {
4648: setpte4(pteva, 0);
4649: if (sp->sg_pte != NULL)
4650: sp->sg_pte[VA_VPG(pteva)] = 0;
4651: goto nextpv;
4652: }
4653:
4654: /* Entire segment is gone */
4655: if (pm == pmap_kernel()) {
4656: #if defined(SUN4_MMU3L)
4657: if (!HASSUN4_MMU3L)
4658: #endif
4659: for (i = ncontext; --i >= 0;) {
4660: setcontext4(i);
4661: setsegmap(va, seginval);
4662: }
4663: me_free(pm, sp->sg_pmeg);
4664: if (--rp->rg_nsegmap == 0) {
4665: #if defined(SUN4_MMU3L)
4666: if (HASSUN4_MMU3L) {
4667: for (i = ncontext; --i >= 0;) {
4668: setcontext4(i);
4669: setregmap(va, reginval);
4670: }
4671: region_free(pm, rp->rg_smeg);
4672: }
4673: #endif
4674: }
4675: } else {
4676: if (CTX_USABLE(pm,rp))
4677: /* `pteva'; we might be using tregion */
4678: setsegmap(pteva, seginval);
4679: #if defined(SUN4_MMU3L)
4680: else if (HASSUN4_MMU3L &&
4681: rp->rg_smeg != reginval) {
4682: /* note: context already set earlier */
4683: setregmap(0, rp->rg_smeg);
4684: setsegmap(vs << SGSHIFT, seginval);
4685: }
4686: #endif
4687: free(sp->sg_pte, M_VMPMAP);
4688: sp->sg_pte = NULL;
4689: me_free(pm, sp->sg_pmeg);
4690:
4691: if (--rp->rg_nsegmap == 0) {
4692: #if defined(SUN4_MMU3L)
4693: if (HASSUN4_MMU3L &&
4694: rp->rg_smeg != reginval) {
4695: if (pm->pm_ctx)
4696: setregmap(va, reginval);
4697: region_free(pm, rp->rg_smeg);
4698: }
4699: #endif
4700: free(rp->rg_segmap, M_VMPMAP);
4701: rp->rg_segmap = NULL;
4702: GAP_WIDEN(pm,vr);
4703: }
4704: }
4705:
4706: nextpv:
4707: npv = pv->pv_next;
4708: if (pv != pv0)
4709: pool_put(&pv_pool, pv);
4710: pv = npv;
4711: }
4712:
4713: /* Finally, update pv head */
4714: pv0->pv_pmap = NULL;
4715: pv0->pv_next = NULL;
4716: pv0->pv_flags = flags;
4717: setcontext4(ctx);
4718: splx(s);
4719: }
4720:
4721: /*
4722: * Lower (make more strict) the protection on the specified
4723: * range of this pmap.
4724: *
4725: * There are only two cases: either the protection is going to 0
4726: * (in which case we call pmap_remove to do the dirty work), or
4727: * it is going from read/write to read-only. The latter is
4728: * fairly easy.
4729: */
4730: void
4731: pmap_protect4_4c(pm, sva, eva, prot)
4732: struct pmap *pm;
4733: vaddr_t sva, eva;
4734: vm_prot_t prot;
4735: {
4736: int va, nva, vr, vs;
4737: int s, ctx;
4738: struct regmap *rp;
4739: struct segmap *sp;
4740:
4741: if ((prot & VM_PROT_READ) == 0) {
4742: pmap_remove(pm, sva, eva);
4743: return;
4744: }
4745:
4746: write_user_windows();
4747: ctx = getcontext4();
4748: s = splvm();
4749: simple_lock(&pm->pm_lock);
4750: for (va = sva; va < eva;) {
4751: vr = VA_VREG(va);
4752: vs = VA_VSEG(va);
4753: rp = &pm->pm_regmap[vr];
4754: nva = VSTOVA(vr,vs + 1);
4755: if (nva > eva)
4756: nva = eva;
4757: if (rp->rg_nsegmap == 0) {
4758: va = nva;
4759: continue;
4760: }
4761: #ifdef DEBUG
4762: if (rp->rg_segmap == NULL)
4763: panic("pmap_protect: no segments");
4764: #endif
4765: sp = &rp->rg_segmap[vs];
4766: if (sp->sg_npte == 0) {
4767: va = nva;
4768: continue;
4769: }
4770: #ifdef DEBUG
4771: if (pm != pmap_kernel() && sp->sg_pte == NULL)
4772: panic("pmap_protect: no pages");
4773: #endif
4774: if (sp->sg_pmeg == seginval) {
4775: int *pte = &sp->sg_pte[VA_VPG(va)];
4776:
4777: /* not in MMU; just clear PG_W from core copies */
4778: for (; va < nva; va += NBPG)
4779: *pte++ &= ~PG_W;
4780: } else {
4781: /* in MMU: take away write bits from MMU PTEs */
4782: if (CTX_USABLE(pm,rp)) {
4783: int tpte;
4784:
4785: /*
4786: * Flush cache so that any existing cache
4787: * tags are updated. This is really only
4788: * needed for PTEs that lose PG_W.
4789: */
4790: setcontext4(pm->pm_ctxnum);
4791: for (; va < nva; va += NBPG) {
4792: tpte = getpte4(va);
4793: pmap_stats.ps_npg_prot_all++;
4794: if ((tpte & (PG_W|PG_TYPE)) ==
4795: (PG_W|PG_OBMEM)) {
4796: pmap_stats.ps_npg_prot_actual++;
1.199.4.12 thorpej 4797: cache_flush_page(va, pm->pm_ctxnum);
1.199.4.2 pk 4798: setpte4(va, tpte & ~PG_W);
4799: }
4800: }
4801: } else {
4802: int pteva;
4803:
4804: /*
4805: * No context, hence not cached;
4806: * just update PTEs.
4807: */
4808: setcontext4(0);
4809: /* XXX use per-cpu pteva? */
4810: if (HASSUN4_MMU3L)
4811: setregmap(0, tregion);
4812: setsegmap(0, sp->sg_pmeg);
4813: pteva = VA_VPG(va) << PGSHIFT;
4814: for (; va < nva; pteva += NBPG, va += NBPG)
4815: setpte4(pteva, getpte4(pteva) & ~PG_W);
4816: }
4817: }
4818: }
4819: simple_unlock(&pm->pm_lock);
4820: splx(s);
4821: setcontext4(ctx);
4822: }
4823:
4824: /*
4825: * Change the protection and/or wired status of the given (MI) virtual page.
4826: * XXX: should have separate function (or flag) telling whether only wiring
4827: * is changing.
4828: */
4829: void
4830: pmap_changeprot4_4c(pm, va, prot, wired)
4831: struct pmap *pm;
4832: vaddr_t va;
4833: vm_prot_t prot;
4834: int wired;
4835: {
4836: int vr, vs, tpte, newprot, ctx, s;
4837: struct regmap *rp;
4838: struct segmap *sp;
4839:
4840: #ifdef DEBUG
4841: if (pmapdebug & PDB_CHANGEPROT)
4842: printf("pmap_changeprot(%p, 0x%lx, 0x%x, 0x%x)\n",
4843: pm, va, prot, wired);
4844: #endif
4845:
4846: write_user_windows(); /* paranoia */
4847:
4848: va &= ~(NBPG-1);
4849: if (pm == pmap_kernel())
4850: newprot = prot & VM_PROT_WRITE ? PG_S|PG_W : PG_S;
4851: else
4852: newprot = prot & VM_PROT_WRITE ? PG_W : 0;
4853: vr = VA_VREG(va);
4854: vs = VA_VSEG(va);
4855: s = splvm(); /* conservative */
4856: rp = &pm->pm_regmap[vr];
4857: sp = &rp->rg_segmap[vs];
4858: pmap_stats.ps_changeprots++;
4859:
4860: /* update PTEs in software or hardware */
4861: if (sp->sg_pmeg == seginval) {
4862: int *pte = &sp->sg_pte[VA_VPG(va)];
4863:
4864: /* update in software */
4865: *pte = (*pte & ~PG_PROT) | newprot;
4866: } else {
4867: /* update in hardware */
4868: ctx = getcontext4();
4869: if (CTX_USABLE(pm,rp)) {
4870: /*
4871: * Use current context.
4872: * Flush cache if page has been referenced to
4873: * avoid stale protection bits in the cache tags.
4874: */
4875: setcontext4(pm->pm_ctxnum);
4876: tpte = getpte4(va);
4877: if ((tpte & (PG_U|PG_NC|PG_TYPE)) == (PG_U|PG_OBMEM))
1.199.4.12 thorpej 4878: cache_flush_page(va, pm->pm_ctxnum);
1.199.4.2 pk 4879: } else {
4880: setcontext4(0);
4881: /* XXX use per-cpu va? */
4882: if (HASSUN4_MMU3L)
4883: setregmap(0, tregion);
4884: setsegmap(0, sp->sg_pmeg);
4885: va = VA_VPG(va) << PGSHIFT;
4886: tpte = getpte4(va);
4887: }
4888: tpte = (tpte & ~PG_PROT) | newprot;
4889: setpte4(va, tpte);
4890: setcontext4(ctx);
4891: }
4892: splx(s);
4893: }
4894:
4895: #endif /* sun4, 4c */
4896:
1.199.4.9 nathanw 4897: #if defined(SUN4M) || defined(SUN4D) /* SRMMU version of protection routines above */
1.199.4.2 pk 4898: /*
4899: * Lower (make more strict) the protection on the specified
4900: * physical page.
4901: *
4902: * There are only two cases: either the protection is going to 0
4903: * (in which case we do the dirty work here), or it is going
4904: * to read-only (in which case pv_changepte does the trick).
4905: */
4906: void
4907: pmap_page_protect4m(pg, prot)
4908: struct vm_page *pg;
4909: vm_prot_t prot;
4910: {
4911: struct pvlist *pv, *pv0, *npv;
4912: struct pmap *pm;
4913: int va, vr, vs, tpte;
1.199.4.14 thorpej 4914: int flags, nleft, s;
1.199.4.2 pk 4915: struct regmap *rp;
4916: struct segmap *sp;
4917: paddr_t pa = VM_PAGE_TO_PHYS(pg);
4918:
4919: #ifdef DEBUG
4920: if ((pmapdebug & PDB_CHANGEPROT) ||
4921: (pmapdebug & PDB_REMOVE && prot == VM_PROT_NONE))
1.199.4.17! martin 4922: printf("pmap_page_protect[%d](0x%lx, 0x%x)\n",
! 4923: cpu_number(), pa, prot);
1.199.4.2 pk 4924: #endif
4925: /*
4926: * Skip unmanaged pages, or operations that do not take
4927: * away write permission.
4928: */
4929: if ((pv = pvhead(atop(pa))) == NULL || prot & VM_PROT_WRITE)
4930: return;
4931:
4932: write_user_windows(); /* paranoia */
4933: if (prot & VM_PROT_READ) {
4934: pv_changepte4m(pv, 0, PPROT_WRITE);
4935: return;
4936: }
4937:
4938: /*
4939: * Remove all access to all people talking to this page.
4940: * Walk down PV list, removing all mappings.
4941: * The logic is much like that for pmap_remove,
4942: * but we know we are removing exactly one page.
4943: */
4944: s = splvm();
4945: if (pv->pv_pmap == NULL) {
4946: splx(s);
4947: return;
4948: }
4949: pv0 = pv;
4950:
4951: /* This pv head will become empty, so clear caching state flags */
4952: flags = pv->pv_flags & ~(PV_NC|PV_ANC);
4953:
4954: while (pv != NULL) {
4955:
4956: pm = pv->pv_pmap;
4957: simple_lock(&pm->pm_lock);
4958: va = pv->pv_va;
4959: vr = VA_VREG(va);
4960: vs = VA_VSEG(va);
4961: rp = &pm->pm_regmap[vr];
4962: if (rp->rg_nsegmap == 0)
4963: panic("pmap_remove_all: empty vreg");
4964: sp = &rp->rg_segmap[vs];
4965: if ((nleft = sp->sg_npte) == 0)
4966: panic("pmap_remove_all: empty vseg");
4967: sp->sg_npte = --nleft;
4968:
4969: /* Invalidate PTE in MMU pagetables. Flush cache if necessary */
4970: if (pm->pm_ctx) {
1.199.4.12 thorpej 4971: cache_flush_page(va, pm->pm_ctxnum);
1.199.4.2 pk 4972: }
4973:
4974: tpte = sp->sg_pte[VA_SUN4M_VPG(va)];
4975: setpgt4m_va(va, &sp->sg_pte[VA_SUN4M_VPG(va)], SRMMU_TEINVALID,
1.199.4.14 thorpej 4976: pm->pm_ctx != NULL, pm->pm_ctxnum);
1.199.4.2 pk 4977:
4978: if ((tpte & SRMMU_TETYPE) != SRMMU_TEPTE)
4979: panic("pmap_page_protect !PG_V");
4980:
4981: flags |= MR4M(tpte);
4982:
4983: if (nleft == 0 && pm != pmap_kernel()) {
4984: /*
4985: * Entire user mode segment is gone
4986: */
4987: if (pm->pm_ctx)
1.199.4.14 thorpej 4988: tlb_flush_segment(va, pm->pm_ctxnum);
4989: setpgt4m_va(va, &rp->rg_seg_ptps[vs], SRMMU_TEINVALID, 0, pm->pm_ctxnum);
1.199.4.2 pk 4990: pool_put(&L23_pool, sp->sg_pte);
4991: sp->sg_pte = NULL;
4992:
4993: if (--rp->rg_nsegmap == 0) {
4994: int n;
4995: if (pm->pm_ctx)
1.199.4.14 thorpej 4996: tlb_flush_region(va, pm->pm_ctxnum);
1.199.4.2 pk 4997:
4998: /*
4999: * Replicate segment de-allocation in each
5000: * CPU's region table.
5001: */
5002: #ifdef MULTIPROCESSOR
5003: for (n = 0; n < ncpu; n++)
5004: #else
5005: n = 0;
5006: #endif
5007: {
5008: setpgt4m(&pm->pm_reg_ptps[n][vr],
5009: SRMMU_TEINVALID);
5010: }
5011: free(rp->rg_segmap, M_VMPMAP);
5012: rp->rg_segmap = NULL;
5013: pool_put(&L23_pool, rp->rg_seg_ptps);
5014: }
5015: }
5016:
5017: npv = pv->pv_next;
5018: if (pv != pv0)
5019: pool_put(&pv_pool, pv);
5020: simple_unlock(&pm->pm_lock);
5021: pv = npv;
5022: }
5023:
5024: /* Finally, update pv head */
5025: pv0->pv_pmap = NULL;
5026: pv0->pv_next = NULL;
5027: pv0->pv_flags = flags;
5028: splx(s);
5029: }
5030:
5031: /*
5032: * Lower (make more strict) the protection on the specified
5033: * range of this pmap.
5034: *
5035: * There are only two cases: either the protection is going to 0
5036: * (in which case we call pmap_remove to do the dirty work), or
5037: * it is going from read/write to read-only. The latter is
5038: * fairly easy.
5039: */
5040: void
5041: pmap_protect4m(pm, sva, eva, prot)
5042: struct pmap *pm;
5043: vaddr_t sva, eva;
5044: vm_prot_t prot;
5045: {
5046: vaddr_t va, nva;
1.199.4.14 thorpej 5047: int s, vr, vs;
1.199.4.2 pk 5048: struct regmap *rp;
5049: struct segmap *sp;
5050:
5051: if ((prot & VM_PROT_READ) == 0) {
5052: pmap_remove(pm, sva, eva);
5053: return;
5054: }
5055:
5056: #ifdef DEBUG
5057: if (pmapdebug & PDB_CHANGEPROT)
1.199.4.17! martin 5058: printf("pmap_protect[%d][curpid %d, ctx %d,%d](%lx, %lx, %x)\n",
! 5059: cpu_number(), getcontext4m(),
1.199.4.2 pk 5060: curproc==NULL ? -1 : curproc->p_pid,
5061: pm->pm_ctx ? pm->pm_ctxnum : -1, sva, eva, prot);
5062: #endif
5063:
5064: write_user_windows();
5065: s = splvm();
5066: simple_lock(&pm->pm_lock);
5067:
5068: for (va = sva; va < eva;) {
5069: vr = VA_VREG(va);
5070: vs = VA_VSEG(va);
5071: rp = &pm->pm_regmap[vr];
5072: nva = VSTOVA(vr,vs + 1);
5073: if (nva == 0) /* XXX */
5074: panic("pmap_protect: last segment"); /* cannot happen(why?)*/
5075: if (nva > eva)
5076: nva = eva;
5077: if (rp->rg_nsegmap == 0) {
5078: va = nva;
5079: continue;
5080: }
5081: #ifdef DEBUG
5082: if (rp->rg_segmap == NULL)
5083: panic("pmap_protect: no segments");
5084: #endif
5085: sp = &rp->rg_segmap[vs];
5086: if (sp->sg_npte == 0) {
5087: va = nva;
5088: continue;
5089: }
5090: #ifdef DEBUG
5091: if (sp->sg_pte == NULL)
5092: panic("pmap_protect: no pages");
5093: #endif
5094: /*
5095: * pages loaded: take away write bits from MMU PTEs
5096: */
5097:
5098: pmap_stats.ps_npg_prot_all = (nva - va) >> PGSHIFT;
5099: for (; va < nva; va += NBPG) {
5100: int tpte;
5101:
5102: tpte = sp->sg_pte[VA_SUN4M_VPG(va)];
5103: /*
5104: * Flush cache so that any existing cache
5105: * tags are updated. This is really only
5106: * needed for PTEs that lose PG_W.
5107: */
5108: if ((tpte & (PPROT_WRITE|SRMMU_PGTYPE)) ==
5109: (PPROT_WRITE|PG_SUN4M_OBMEM)) {
5110: pmap_stats.ps_npg_prot_actual++;
5111: if (pm->pm_ctx) {
1.199.4.12 thorpej 5112: cache_flush_page(va, pm->pm_ctxnum);
1.199.4.2 pk 5113: #if !defined(MULTIPROCESSOR)
5114: /* Flush TLB entry */
1.199.4.14 thorpej 5115: tlb_flush_page(va, pm->pm_ctxnum);
1.199.4.2 pk 5116: #endif
5117: }
1.199.4.14 thorpej 5118: updatepte4m(va, &sp->sg_pte[VA_SUN4M_VPG(va)], PPROT_WRITE, 0, pm->pm_ctxnum);
1.199.4.2 pk 5119: }
5120: }
5121: }
5122: simple_unlock(&pm->pm_lock);
5123: splx(s);
5124: }
5125:
5126: /*
5127: * Change the protection and/or wired status of the given (MI) virtual page.
5128: * XXX: should have separate function (or flag) telling whether only wiring
5129: * is changing.
5130: */
5131: void
5132: pmap_changeprot4m(pm, va, prot, wired)
5133: struct pmap *pm;
5134: vaddr_t va;
5135: vm_prot_t prot;
5136: int wired;
5137: {
1.199.4.14 thorpej 5138: int pte, newprot, s;
1.199.4.2 pk 5139: struct regmap *rp;
5140: struct segmap *sp;
5141:
5142: #ifdef DEBUG
5143: if (pmapdebug & PDB_CHANGEPROT)
1.199.4.17! martin 5144: printf("pmap_changeprot[%d](%p, 0x%lx, 0x%x, 0x%x)\n",
! 5145: cpu_number(), pm, va, prot, wired);
1.199.4.2 pk 5146: #endif
5147:
5148: write_user_windows(); /* paranoia */
5149:
5150: va &= ~(NBPG-1);
5151: if (pm == pmap_kernel())
5152: newprot = prot & VM_PROT_WRITE ? PPROT_N_RWX : PPROT_N_RX;
5153: else
5154: newprot = prot & VM_PROT_WRITE ? PPROT_RWX_RWX : PPROT_RX_RX;
5155:
5156: pmap_stats.ps_changeprots++;
5157:
5158: s = splvm(); /* conservative */
5159: simple_lock(&pm->pm_lock);
5160:
5161: rp = &pm->pm_regmap[VA_VREG(va)];
5162: sp = &rp->rg_segmap[VA_VSEG(va)];
5163:
5164: pte = sp->sg_pte[VA_SUN4M_VPG(va)];
5165: if ((pte & SRMMU_PROT_MASK) == newprot) {
5166: /* only wiring changed, and we ignore wiring */
5167: pmap_stats.ps_useless_changeprots++;
5168: goto out;
5169: }
5170:
5171: if (pm->pm_ctx) {
5172: /*
5173: * Use current context.
5174: * Flush cache if page has been referenced to
5175: * avoid stale protection bits in the cache tags.
5176: */
5177:
5178: if ((pte & (SRMMU_PG_C|SRMMU_PGTYPE)) ==
5179: (SRMMU_PG_C|PG_SUN4M_OBMEM))
1.199.4.12 thorpej 5180: cache_flush_page(va, pm->pm_ctxnum);
1.199.4.2 pk 5181: }
5182:
5183: setpgt4m_va(va, &sp->sg_pte[VA_SUN4M_VPG(va)],
5184: (pte & ~SRMMU_PROT_MASK) | newprot,
1.199.4.14 thorpej 5185: pm->pm_ctx != NULL, pm->pm_ctxnum);
1.199.4.2 pk 5186:
5187: out:
5188: simple_unlock(&pm->pm_lock);
5189: splx(s);
5190: }
1.199.4.9 nathanw 5191: #endif /* SUN4M || SUN4D */
1.199.4.2 pk 5192:
5193: /*
5194: * Insert (MI) physical page pa at virtual address va in the given pmap.
5195: * NB: the pa parameter includes type bits PMAP_OBIO, PMAP_NC as necessary.
5196: *
5197: * If pa is not in the `managed' range it will not be `bank mapped'.
5198: * This works during bootstrap only because the first 4MB happens to
5199: * map one-to-one.
5200: *
5201: * There may already be something else there, or we might just be
5202: * changing protections and/or wiring on an existing mapping.
5203: * XXX should have different entry points for changing!
5204: */
5205:
5206: #if defined(SUN4) || defined(SUN4C)
5207:
5208: int
5209: pmap_enter4_4c(pm, va, pa, prot, flags)
5210: struct pmap *pm;
5211: vaddr_t va;
5212: paddr_t pa;
5213: vm_prot_t prot;
5214: int flags;
5215: {
5216: struct pvlist *pv;
5217: int pteproto, ctx;
5218: u_int pfn;
5219: int error;
5220:
5221: if (VA_INHOLE(va)) {
5222: #ifdef DEBUG
5223: printf("pmap_enter: pm %p, va 0x%lx, pa 0x%lx: in MMU hole\n",
5224: pm, va, pa);
5225: #endif
5226: return 0;
5227: }
5228:
5229: #ifdef DEBUG
5230: if (pmapdebug & PDB_ENTER)
5231: printf("pmap_enter(%p, 0x%lx, 0x%lx, 0x%x, 0x%x)\n",
5232: pm, va, pa, prot, flags);
5233: #endif
5234:
5235: pteproto = PG_V | PMAP_T2PTE_4(pa);
5236: pa &= ~PMAP_TNC_4;
5237: pfn = atop(pa);
5238: /*
5239: * Set up prototype for new PTE. Cannot set PG_NC from PV_NC yet
5240: * since the pvlist no-cache bit might change as a result of the
5241: * new mapping.
5242: */
5243: if ((pteproto & PG_TYPE) == PG_OBMEM && pmap_initialized) {
5244: pv = pvhead(pfn);
5245: } else {
5246: pv = NULL;
5247: }
5248: pteproto |= pfn & PG_PFNUM;
5249: if (prot & VM_PROT_WRITE)
5250: pteproto |= PG_W;
5251:
5252: ctx = getcontext4();
5253: if (pm == pmap_kernel())
5254: error = pmap_enk4_4c(pm, va, prot, flags, pv, pteproto | PG_S);
5255: else
5256: error = pmap_enu4_4c(pm, va, prot, flags, pv, pteproto);
5257: setcontext4(ctx);
5258: return (error);
5259: }
5260:
5261: /* enter new (or change existing) kernel mapping */
5262: int
5263: pmap_enk4_4c(pm, va, prot, flags, pv, pteproto)
5264: struct pmap *pm;
5265: vaddr_t va;
5266: vm_prot_t prot;
5267: int flags;
5268: struct pvlist *pv;
5269: int pteproto;
5270: {
5271: int vr, vs, tpte, i, s;
5272: struct regmap *rp;
5273: struct segmap *sp;
5274:
5275: vr = VA_VREG(va);
5276: vs = VA_VSEG(va);
5277: rp = &pm->pm_regmap[vr];
5278: sp = &rp->rg_segmap[vs];
5279: s = splvm(); /* XXX way too conservative */
5280:
5281: #if defined(SUN4_MMU3L)
5282: if (HASSUN4_MMU3L && rp->rg_smeg == reginval) {
5283: vaddr_t tva;
5284: rp->rg_smeg = region_alloc(®ion_locked, pm, vr)->me_cookie;
5285: i = ncontext - 1;
5286: do {
5287: setcontext4(i);
5288: setregmap(va, rp->rg_smeg);
5289: } while (--i >= 0);
5290:
5291: /* set all PTEs to invalid, then overwrite one PTE below */
5292: tva = VA_ROUNDDOWNTOREG(va);
5293: for (i = 0; i < NSEGRG; i++) {
5294: setsegmap(tva, rp->rg_segmap[i].sg_pmeg);
5295: tva += NBPSG;
5296: }
5297: }
5298: #endif
5299: if (sp->sg_pmeg != seginval && (tpte = getpte4(va)) & PG_V) {
5300:
5301: /* old mapping exists, and is of the same pa type */
5302: if ((tpte & (PG_PFNUM|PG_TYPE)) ==
5303: (pteproto & (PG_PFNUM|PG_TYPE))) {
5304: /* just changing protection and/or wiring */
5305: splx(s);
5306: pmap_changeprot4_4c(pm, va, prot,
5307: (flags & PMAP_WIRED) != 0);
5308: return (0);
5309: }
5310:
5311: if ((tpte & PG_TYPE) == PG_OBMEM) {
1.199.4.10 nathanw 5312: struct pvlist *opv;
1.199.4.2 pk 5313: u_int pfn;
5314:
5315: /*
5316: * Switcheroo: changing pa for this va.
5317: * If old pa was managed, remove from pvlist.
5318: * If old page was cached, flush cache.
5319: */
5320:
5321: pfn = tpte & PG_PFNUM;
1.199.4.10 nathanw 5322: if ((opv = pvhead(pfn)) != NULL)
5323: pv_unlink4_4c(opv, pm, va);
1.199.4.2 pk 5324: if ((tpte & PG_NC) == 0) {
5325: setcontext4(0); /* ??? */
1.199.4.12 thorpej 5326: cache_flush_page(va, 0);
1.199.4.2 pk 5327: }
5328: }
5329: } else {
5330: /* adding new entry */
5331: sp->sg_npte++;
5332: }
5333:
5334: /*
5335: * If the new mapping is for a managed PA, enter into pvlist.
5336: * Note that the mapping for a malloc page will always be
5337: * unique (hence will never cause a second call to malloc).
5338: */
5339: if (pv != NULL)
5340: pteproto |= pv_link4_4c(pv, pm, va, pteproto & PG_NC);
5341:
5342: if (sp->sg_pmeg == seginval) {
5343: int tva;
5344:
5345: /*
5346: * Allocate an MMU entry now (on locked list),
5347: * and map it into every context. Set all its
5348: * PTEs invalid (we will then overwrite one, but
5349: * this is more efficient than looping twice).
5350: */
5351: #ifdef DEBUG
5352: if (pm->pm_ctx == NULL || pm->pm_ctxnum != 0)
5353: panic("pmap_enk: kern seg but no kern ctx");
5354: #endif
5355: sp->sg_pmeg = me_alloc(&segm_locked, pm, vr, vs)->me_cookie;
5356: rp->rg_nsegmap++;
5357:
5358: #if defined(SUN4_MMU3L)
5359: if (HASSUN4_MMU3L)
5360: setsegmap(va, sp->sg_pmeg);
5361: else
5362: #endif
5363: {
5364: i = ncontext - 1;
5365: do {
5366: setcontext4(i);
5367: setsegmap(va, sp->sg_pmeg);
5368: } while (--i >= 0);
5369: }
5370:
5371: /* set all PTEs to invalid, then overwrite one PTE below */
5372: tva = VA_ROUNDDOWNTOSEG(va);
5373: i = NPTESG;
5374: do {
5375: setpte4(tva, 0);
5376: tva += NBPG;
5377: } while (--i > 0);
5378: }
5379:
5380: /* ptes kept in hardware only */
5381: setpte4(va, pteproto);
5382: splx(s);
5383: return (0);
5384: }
5385:
5386: /* enter new (or change existing) user mapping */
5387: int
5388: pmap_enu4_4c(pm, va, prot, flags, pv, pteproto)
5389: struct pmap *pm;
5390: vaddr_t va;
5391: vm_prot_t prot;
5392: int flags;
5393: struct pvlist *pv;
5394: int pteproto;
5395: {
5396: int vr, vs, *pte, tpte, pmeg, s, doflush;
5397: int error = 0;
5398: struct regmap *rp;
5399: struct segmap *sp;
5400:
5401: write_user_windows(); /* XXX conservative */
5402: vr = VA_VREG(va);
5403: vs = VA_VSEG(va);
5404: rp = &pm->pm_regmap[vr];
5405: s = splvm(); /* XXX conservative */
5406:
5407: /*
5408: * If there is no space in which the PTEs can be written
5409: * while they are not in the hardware, this must be a new
5410: * virtual segment. Get PTE space and count the segment.
5411: *
5412: * TO SPEED UP CTX ALLOC, PUT SEGMENT BOUNDS STUFF HERE
5413: * AND IN pmap_rmu()
5414: */
5415:
5416: GAP_SHRINK(pm,vr);
5417:
5418: #ifdef DEBUG
5419: if (pm->pm_gap_end < pm->pm_gap_start) {
5420: printf("pmap_enu: gap_start 0x%x, gap_end 0x%x",
5421: pm->pm_gap_start, pm->pm_gap_end);
5422: panic("pmap_enu: gap botch");
5423: }
5424: #endif
5425:
5426: if (rp->rg_segmap == NULL) {
5427: /* definitely a new mapping */
5428: int i;
5429: int size = NSEGRG * sizeof (struct segmap);
5430: int mflag = M_NOWAIT;
5431:
5432: rretry:
5433: sp = (struct segmap *)malloc((u_long)size, M_VMPMAP, mflag);
5434: if (sp == NULL) {
5435: if ((flags & PMAP_CANFAIL) != 0) {
5436: error = ENOMEM;
5437: goto out;
5438: }
5439: mflag = M_WAITOK;
5440: goto rretry;
5441: }
5442: #ifdef DEBUG
5443: if (rp->rg_segmap != NULL)
5444: panic("pmap_enter: segment filled during sleep");
5445: #endif
5446: qzero((caddr_t)sp, size);
5447: rp->rg_segmap = sp;
5448: rp->rg_nsegmap = 0;
5449: for (i = NSEGRG; --i >= 0;)
5450: sp++->sg_pmeg = seginval;
5451: }
5452:
5453: sp = &rp->rg_segmap[vs];
5454:
5455: if ((pte = sp->sg_pte) == NULL) {
5456: /* definitely a new mapping */
5457: int size = NPTESG * sizeof *pte;
5458: int mflag = M_NOWAIT;
5459:
5460: sretry:
5461: pte = (int *)malloc((u_long)size, M_VMPMAP, mflag);
5462: if (pte == NULL) {
5463: if ((flags & PMAP_CANFAIL) != 0) {
5464: error = ENOMEM;
5465: goto out;
5466: }
5467: mflag = M_WAITOK;
5468: goto sretry;
5469: }
5470: #ifdef DEBUG
5471: if (sp->sg_pte != NULL)
5472: panic("pmap_enter: pte filled during sleep");
5473: if (sp->sg_pmeg != seginval)
5474: panic("pmap_enter: new ptes, but not seginval");
5475: #endif
5476: qzero((caddr_t)pte, size);
5477: sp->sg_pte = pte;
5478: sp->sg_npte = 1;
5479: rp->rg_nsegmap++;
5480: } else {
5481: /* might be a change: fetch old pte */
5482: doflush = 0;
5483: if ((pmeg = sp->sg_pmeg) == seginval) {
5484: /* software pte */
5485: tpte = pte[VA_VPG(va)];
5486: } else {
5487: /* hardware pte */
5488: if (CTX_USABLE(pm,rp)) {
5489: setcontext4(pm->pm_ctxnum);
5490: tpte = getpte4(va);
5491: doflush = CACHEINFO.c_vactype != VAC_NONE;
5492: } else {
5493: setcontext4(0);
5494: /* XXX use per-cpu pteva? */
5495: if (HASSUN4_MMU3L)
5496: setregmap(0, tregion);
5497: setsegmap(0, pmeg);
5498: tpte = getpte4(VA_VPG(va) << PGSHIFT);
5499: }
5500: }
5501:
5502: if (tpte & PG_V) {
5503: /* old mapping exists, and is of the same pa type */
5504: if ((tpte & (PG_PFNUM|PG_TYPE)) ==
5505: (pteproto & (PG_PFNUM|PG_TYPE))) {
5506: /* just changing prot and/or wiring */
5507: splx(s);
5508: /* caller should call this directly: */
5509: pmap_changeprot4_4c(pm, va, prot,
5510: (flags & PMAP_WIRED) != 0);
5511: if ((flags & PMAP_WIRED) != 0)
5512: pm->pm_stats.wired_count++;
5513: else
5514: pm->pm_stats.wired_count--;
5515: return (0);
5516: }
5517: /*
5518: * Switcheroo: changing pa for this va.
5519: * If old pa was managed, remove from pvlist.
5520: * If old page was cached, flush cache.
5521: */
5522: #if 0
5523: printf("%s[%d]: pmap_enu: changing existing "
5524: "va(0x%x)=>pa entry\n",
5525: curproc->p_comm, curproc->p_pid, va);
5526: #endif
5527: if ((tpte & PG_TYPE) == PG_OBMEM) {
1.199.4.10 nathanw 5528: struct pvlist *opv;
1.199.4.2 pk 5529: u_int pfn = tpte & PG_PFNUM;
1.199.4.10 nathanw 5530: if ((opv = pvhead(pfn)) != NULL)
5531: pv_unlink4_4c(opv, pm, va);
1.199.4.2 pk 5532: if (doflush && (tpte & PG_NC) == 0)
1.199.4.12 thorpej 5533: cache_flush_page(va, pm->pm_ctxnum);
1.199.4.2 pk 5534: }
5535: } else {
5536: /* adding new entry */
5537: sp->sg_npte++;
5538:
5539: /*
5540: * Increment counters
5541: */
5542: if ((flags & PMAP_WIRED) != 0)
5543: pm->pm_stats.wired_count++;
5544: }
5545: }
5546:
5547: if (pv != NULL)
5548: pteproto |= pv_link4_4c(pv, pm, va, pteproto & PG_NC);
5549:
5550: /*
5551: * Update hardware & software PTEs.
5552: */
5553: if ((pmeg = sp->sg_pmeg) != seginval) {
5554: /* ptes are in hardware */
5555: if (CTX_USABLE(pm,rp))
5556: setcontext4(pm->pm_ctxnum);
5557: else {
5558: setcontext4(0);
5559: /* XXX use per-cpu pteva? */
5560: if (HASSUN4_MMU3L)
5561: setregmap(0, tregion);
5562: setsegmap(0, pmeg);
5563: va = VA_VPG(va) << PGSHIFT;
5564: }
5565: setpte4(va, pteproto);
5566: }
5567: /* update software copy */
5568: pte += VA_VPG(va);
5569: *pte = pteproto;
5570:
5571: out:
5572: splx(s);
5573: return (error);
5574: }
5575:
5576: void
5577: pmap_kenter_pa4_4c(va, pa, prot)
5578: vaddr_t va;
5579: paddr_t pa;
5580: vm_prot_t prot;
5581: {
5582: struct pmap *pm = pmap_kernel();
5583: struct regmap *rp;
5584: struct segmap *sp;
5585: int vr, vs, i, s;
5586: int pteproto, ctx;
5587:
1.199.4.4 nathanw 5588: pteproto = PG_S | PG_V | PMAP_T2PTE_4(pa);
1.199.4.2 pk 5589: pa &= ~PMAP_TNC_4;
5590: pteproto |= atop(pa) & PG_PFNUM;
5591: if (prot & VM_PROT_WRITE)
5592: pteproto |= PG_W;
5593:
5594: vr = VA_VREG(va);
5595: vs = VA_VSEG(va);
5596: rp = &pm->pm_regmap[vr];
5597: sp = &rp->rg_segmap[vs];
5598:
5599: ctx = getcontext4();
5600: s = splvm();
5601: #if defined(SUN4_MMU3L)
5602: if (HASSUN4_MMU3L && rp->rg_smeg == reginval) {
5603: vaddr_t tva;
5604: rp->rg_smeg = region_alloc(®ion_locked, pm, vr)->me_cookie;
5605: i = ncontext - 1;
5606: do {
5607: setcontext4(i);
5608: setregmap(va, rp->rg_smeg);
5609: } while (--i >= 0);
5610:
5611: /* set all PTEs to invalid, then overwrite one PTE below */
5612: tva = VA_ROUNDDOWNTOREG(va);
5613: for (i = 0; i < NSEGRG; i++) {
5614: setsegmap(tva, rp->rg_segmap[i].sg_pmeg);
5615: tva += NBPSG;
5616: }
5617: }
5618: #endif
5619: KASSERT(sp->sg_pmeg == seginval || (getpte4(va) & PG_V) == 0);
5620: if (sp->sg_pmeg == seginval) {
5621: int tva;
5622:
5623: /*
5624: * Allocate an MMU entry now (on locked list),
5625: * and map it into every context. Set all its
5626: * PTEs invalid (we will then overwrite one, but
5627: * this is more efficient than looping twice).
5628: */
5629:
5630: sp->sg_pmeg = me_alloc(&segm_locked, pm, vr, vs)->me_cookie;
5631: rp->rg_nsegmap++;
5632:
5633: #if defined(SUN4_MMU3L)
5634: if (HASSUN4_MMU3L)
5635: setsegmap(va, sp->sg_pmeg);
5636: else
5637: #endif
5638: {
5639: i = ncontext - 1;
5640: do {
5641: setcontext4(i);
5642: setsegmap(va, sp->sg_pmeg);
5643: } while (--i >= 0);
5644: }
5645:
5646: /* set all PTEs to invalid, then overwrite one PTE below */
5647: tva = VA_ROUNDDOWNTOSEG(va);
5648: i = NPTESG;
5649: do {
5650: setpte4(tva, 0);
5651: tva += NBPG;
5652: } while (--i > 0);
5653: }
5654:
5655: /* ptes kept in hardware only */
5656: setpte4(va, pteproto);
5657: sp->sg_npte++;
5658: splx(s);
5659: setcontext4(ctx);
5660: }
5661:
5662: void
5663: pmap_kremove4_4c(va, len)
5664: vaddr_t va;
5665: vsize_t len;
5666: {
5667: struct pmap *pm = pmap_kernel();
5668: struct regmap *rp;
5669: struct segmap *sp;
5670: vaddr_t nva, endva;
5671: int i, tpte, perpage, npg;
5672: int nleft, pmeg;
5673: int vr, vs, s, ctx;
5674:
5675: endva = va + len;
5676: #ifdef DEBUG
5677: if (pmapdebug & PDB_REMOVE)
5678: printf("pmap_kremove(0x%lx, 0x%lx)\n", va, endva);
5679: #endif
5680:
5681: s = splvm();
5682: ctx = getcontext();
5683: simple_lock(&pm->pm_lock);
5684: for (; va < endva; va = nva) {
5685: /* do one virtual segment at a time */
5686: vr = VA_VREG(va);
5687: vs = VA_VSEG(va);
5688: nva = VSTOVA(vr, vs + 1);
5689: if (nva == 0 || nva > endva)
5690: nva = endva;
5691:
5692: rp = &pm->pm_regmap[vr];
5693: sp = &rp->rg_segmap[vs];
5694:
5695: if (rp->rg_nsegmap == 0)
5696: continue;
5697: nleft = sp->sg_npte;
5698: if (nleft == 0)
5699: continue;
5700: pmeg = sp->sg_pmeg;
5701: KASSERT(pmeg != seginval);
5702: setcontext4(0);
5703: /* decide how to flush cache */
1.199.4.6 nathanw 5704: npg = (nva - va) >> PGSHIFT;
1.199.4.2 pk 5705: if (npg > PMAP_RMK_MAGIC) {
5706: /* flush the whole segment */
5707: perpage = 0;
1.199.4.12 thorpej 5708: cache_flush_segment(vr, vs, 0);
1.199.4.2 pk 5709: } else {
5710: /*
5711: * flush each page individually;
5712: * some never need flushing
5713: */
5714: perpage = (CACHEINFO.c_vactype != VAC_NONE);
5715: }
5716: while (va < nva) {
5717: tpte = getpte4(va);
5718: if ((tpte & PG_V) == 0) {
5719: va += NBPG;
5720: continue;
5721: }
5722: if ((tpte & PG_TYPE) == PG_OBMEM) {
5723: /* if cacheable, flush page as needed */
5724: if (perpage && (tpte & PG_NC) == 0)
1.199.4.12 thorpej 5725: cache_flush_page(va, 0);
1.199.4.2 pk 5726: }
5727: nleft--;
5728: #ifdef DIAGNOSTIC
5729: if (nleft < 0)
5730: panic("pmap_kremove: too many PTEs in segment; "
5731: "va 0x%lx; endva 0x%lx", va, endva);
5732: #endif
5733: setpte4(va, 0);
5734: va += NBPG;
5735: }
5736:
5737: /*
5738: * If the segment is all gone, remove it from everyone and
5739: * free the MMU entry.
5740: */
5741:
5742: sp->sg_npte = nleft;
5743: if (nleft == 0) {
5744: va = VSTOVA(vr, vs);
5745: #if defined(SUN4_MMU3L)
5746: if (HASSUN4_MMU3L)
5747: setsegmap(va, seginval);
5748: else
5749: #endif
5750: for (i = ncontext; --i >= 0;) {
5751: setcontext4(i);
5752: setsegmap(va, seginval);
5753: }
5754: me_free(pm, pmeg);
5755: if (--rp->rg_nsegmap == 0) {
5756: #if defined(SUN4_MMU3L)
5757: if (HASSUN4_MMU3L) {
5758: for (i = ncontext; --i >= 0;) {
5759: setcontext4(i);
5760: setregmap(va, reginval);
5761: }
5762: /* note: context is 0 */
5763: region_free(pm, rp->rg_smeg);
5764: }
5765: #endif
5766: }
5767: }
5768: }
5769: simple_unlock(&pm->pm_lock);
1.199.4.10 nathanw 5770: setcontext4(ctx);
1.199.4.2 pk 5771: splx(s);
5772: }
5773:
5774: #endif /*sun4,4c*/
5775:
1.199.4.9 nathanw 5776: #if defined(SUN4M) || defined(SUN4D) /* SRMMU versions of enter routines */
1.199.4.2 pk 5777: /*
5778: * Insert (MI) physical page pa at virtual address va in the given pmap.
5779: * NB: the pa parameter includes type bits PMAP_OBIO, PMAP_NC as necessary.
5780: *
5781: * If pa is not in the `managed' range it will not be `bank mapped'.
5782: * This works during bootstrap only because the first 4MB happens to
5783: * map one-to-one.
5784: *
5785: * There may already be something else there, or we might just be
5786: * changing protections and/or wiring on an existing mapping.
5787: * XXX should have different entry points for changing!
5788: */
5789:
5790: int
5791: pmap_enter4m(pm, va, pa, prot, flags)
5792: struct pmap *pm;
5793: vaddr_t va;
5794: paddr_t pa;
5795: vm_prot_t prot;
5796: int flags;
5797: {
5798: struct pvlist *pv;
1.199.4.14 thorpej 5799: int pteproto;
1.199.4.2 pk 5800: u_int pfn;
5801: int error;
5802:
5803: #ifdef DEBUG
5804: if (pmapdebug & PDB_ENTER)
1.199.4.17! martin 5805: printf("pmap_enter[curcpu %d, curpid %d, ctx %d,%d]"
1.199.4.2 pk 5806: "(%p, 0x%lx, 0x%lx, 0x%x, 0x%x)\n",
1.199.4.17! martin 5807: cpu_number(), curproc==NULL ? -1 : curproc->p_pid,
! 5808: getcontext4m(), pm->pm_ctx==NULL ? -1 : pm->pm_ctxnum,
1.199.4.2 pk 5809: pm, va, pa, prot, flags);
5810: #endif
5811:
5812: /* Initialise pteproto with cache bit */
5813: pteproto = (pa & PMAP_NC) == 0 ? SRMMU_PG_C : 0;
5814:
5815: #ifdef DEBUG
5816: if (pa & PMAP_TYPE_SRMMU) { /* this page goes in an iospace */
5817: if (cpuinfo.cpu_type == CPUTYP_MS1)
5818: panic("pmap_enter4m: attempt to use 36-bit iospace on"
5819: " MicroSPARC");
5820: }
5821: #endif
5822: pteproto |= PMAP_T2PTE_SRMMU(pa);
5823:
5824: /* Make sure we get a pte with appropriate perms! */
5825: pteproto |= SRMMU_TEPTE | PPROT_RX_RX;
5826:
5827: pa &= ~PMAP_TNC_SRMMU;
5828: /*
5829: * Set up prototype for new PTE. Cannot set PG_NC from PV_NC yet
5830: * since the pvlist no-cache bit might change as a result of the
5831: * new mapping.
5832: */
5833: pfn = atop(pa);
5834: if ((pteproto & SRMMU_PGTYPE) == PG_SUN4M_OBMEM && pmap_initialized) {
5835: pv = pvhead(pfn);
5836: } else {
5837: pv = NULL;
5838: }
5839: pteproto |= (pfn << SRMMU_PPNSHIFT);
5840:
5841: if (prot & VM_PROT_WRITE)
5842: pteproto |= PPROT_WRITE;
5843:
5844: if (pm == pmap_kernel())
5845: error = pmap_enk4m(pm, va, prot, flags, pv, pteproto | PPROT_S);
5846: else
5847: error = pmap_enu4m(pm, va, prot, flags, pv, pteproto);
5848:
5849: return (error);
5850: }
5851:
5852: /* enter new (or change existing) kernel mapping */
5853: int
5854: pmap_enk4m(pm, va, prot, flags, pv, pteproto)
5855: struct pmap *pm;
5856: vaddr_t va;
5857: vm_prot_t prot;
5858: int flags;
5859: struct pvlist *pv;
5860: int pteproto;
5861: {
5862: int vr, vs, tpte, s;
5863: struct regmap *rp;
5864: struct segmap *sp;
5865:
5866: #ifdef DEBUG
5867: if (va < KERNBASE)
5868: panic("pmap_enk4m: can't enter va 0x%lx below KERNBASE", va);
5869: #endif
5870: vr = VA_VREG(va);
5871: vs = VA_VSEG(va);
5872: rp = &pm->pm_regmap[vr];
5873: sp = &rp->rg_segmap[vs];
5874:
5875: s = splvm(); /* XXX way too conservative */
5876:
5877: if (rp->rg_seg_ptps == NULL) /* enter new region */
5878: panic("pmap_enk4m: missing kernel region table for va 0x%lx",va);
5879:
5880: tpte = sp->sg_pte[VA_SUN4M_VPG(va)];
5881: if ((tpte & SRMMU_TETYPE) == SRMMU_TEPTE) {
5882:
5883: /* old mapping exists, and is of the same pa type */
5884:
5885: if ((tpte & SRMMU_PPNMASK) == (pteproto & SRMMU_PPNMASK)) {
5886: /* just changing protection and/or wiring */
5887: splx(s);
5888: pmap_changeprot4m(pm, va, prot,
5889: (flags & PMAP_WIRED) != 0);
5890: return (0);
5891: }
5892:
5893: if ((tpte & SRMMU_PGTYPE) == PG_SUN4M_OBMEM) {
1.199.4.10 nathanw 5894: struct pvlist *opv;
1.199.4.2 pk 5895: u_int pfn = (tpte & SRMMU_PPNMASK) >> SRMMU_PPNSHIFT;
5896: #ifdef DEBUG
5897: printf("pmap_enk4m: changing existing va=>pa entry: va 0x%lx, pteproto 0x%x, "
5898: "oldpte 0x%x\n", va, pteproto, tpte);
5899: #endif
5900: /*
5901: * Switcheroo: changing pa for this va.
5902: * If old pa was managed, remove from pvlist.
5903: * If old page was cached, flush cache.
5904: */
1.199.4.10 nathanw 5905: if ((opv = pvhead(pfn)) != NULL)
5906: pv_unlink4m(opv, pm, va);
1.199.4.2 pk 5907: if (tpte & SRMMU_PG_C) {
1.199.4.12 thorpej 5908: cache_flush_page(va, 0);
1.199.4.2 pk 5909: }
5910: }
5911: } else {
5912: /* adding new entry */
5913: sp->sg_npte++;
5914: }
5915:
5916: /*
5917: * If the new mapping is for a managed PA, enter into pvlist.
5918: * Note that the mapping for a malloc page will always be
5919: * unique (hence will never cause a second call to malloc).
5920: */
5921: if (pv != NULL)
5922: pteproto &= ~(pv_link4m(pv, pm, va, (pteproto & SRMMU_PG_C) == 0));
5923:
5924: #ifdef DEBUG
5925: if (sp->sg_pte == NULL) /* If no existing pagetable */
5926: panic("pmap_enk4m: missing segment table for va 0x%lx",va);
5927: #endif
5928:
1.199.4.14 thorpej 5929: setpgt4m_va(va, &sp->sg_pte[VA_SUN4M_VPG(va)], pteproto, 1, 0);
1.199.4.2 pk 5930:
5931: splx(s);
5932: return (0);
5933: }
5934:
5935: /* enter new (or change existing) user mapping */
5936: int
5937: pmap_enu4m(pm, va, prot, flags, pv, pteproto)
5938: struct pmap *pm;
5939: vaddr_t va;
5940: vm_prot_t prot;
5941: int flags;
5942: struct pvlist *pv;
5943: int pteproto;
5944: {
5945: int vr, vs, *pte, tpte, s;
5946: int error = 0;
5947: struct regmap *rp;
5948: struct segmap *sp;
5949:
5950: #ifdef DEBUG
5951: if (KERNBASE < va)
5952: panic("pmap_enu4m: can't enter va 0x%lx above KERNBASE", va);
5953: #endif
5954:
5955: write_user_windows(); /* XXX conservative */
5956: vr = VA_VREG(va);
5957: vs = VA_VSEG(va);
5958: rp = &pm->pm_regmap[vr];
5959: s = splvm(); /* XXX conservative */
5960:
5961: if (rp->rg_segmap == NULL) {
5962: /* definitely a new mapping */
5963: int size = NSEGRG * sizeof (struct segmap);
5964: int mflag = M_NOWAIT;
5965:
5966: rretry:
5967: sp = (struct segmap *)malloc((u_long)size, M_VMPMAP, mflag);
5968: if (sp == NULL) {
5969: if ((flags & PMAP_CANFAIL) != 0) {
5970: error = ENOMEM;
5971: goto out;
5972: }
5973: mflag = M_WAITOK;
5974: goto rretry;
5975: }
5976: #ifdef DEBUG
5977: if (rp->rg_segmap != NULL)
5978: panic("pmap_enu4m: segment filled during sleep");
5979: #endif
5980: qzero((caddr_t)sp, size);
5981: rp->rg_segmap = sp;
5982: rp->rg_nsegmap = 0;
5983: rp->rg_seg_ptps = NULL;
5984: }
5985: if (rp->rg_seg_ptps == NULL) {
5986: /* Need a segment table */
5987: int i, *ptd;
5988: int mflag = PR_NOWAIT;
5989:
5990: rgretry:
5991: ptd = pool_get(&L23_pool, mflag);
5992: if (ptd == NULL) {
5993: if ((flags & PMAP_CANFAIL) != 0) {
5994: error = ENOMEM;
5995: goto out;
5996: }
5997: mflag = PR_WAITOK;
5998: goto rgretry;
5999: }
6000: #ifdef DEBUG
6001: if (rp->rg_seg_ptps != NULL)
6002: panic("pmap_enu4m: segment table fill during sleep");
6003: #endif
6004:
6005: rp->rg_seg_ptps = ptd;
6006: for (i = 0; i < SRMMU_L2SIZE; i++)
6007: setpgt4m(&ptd[i], SRMMU_TEINVALID);
6008:
6009: /* Replicate segment allocation in each CPU's region table */
6010: #ifdef MULTIPROCESSOR
6011: for (i = 0; i < ncpu; i++)
6012: #else
6013: i = 0;
6014: #endif
6015: {
6016: setpgt4m(&pm->pm_reg_ptps[i][vr],
6017: (VA2PA((caddr_t)ptd) >> SRMMU_PPNPASHIFT) |
6018: SRMMU_TEPTD);
6019: }
6020: }
6021:
6022: sp = &rp->rg_segmap[vs];
6023:
6024: if ((pte = sp->sg_pte) == NULL) {
6025: /* definitely a new mapping */
6026: int i;
6027: int mflag = PR_NOWAIT;
6028:
6029: sretry:
6030: pte = pool_get(&L23_pool, mflag);
6031: if (pte == NULL) {
6032: if ((flags & PMAP_CANFAIL) != 0) {
6033: error = ENOMEM;
6034: goto out;
6035: }
6036: mflag = PR_WAITOK;
6037: goto sretry;
6038: }
6039: #ifdef DEBUG
6040: if (sp->sg_pte != NULL)
6041: panic("pmap_enter: pte filled during sleep");
6042: #endif
6043:
6044: sp->sg_pte = pte;
6045: sp->sg_npte = 1;
6046: rp->rg_nsegmap++;
6047: for (i = 0; i < SRMMU_L3SIZE; i++)
6048: setpgt4m(&pte[i], SRMMU_TEINVALID);
6049: setpgt4m(&rp->rg_seg_ptps[vs],
6050: (VA2PA((caddr_t)pte) >> SRMMU_PPNPASHIFT) | SRMMU_TEPTD);
6051: } else {
6052: /*
6053: * Might be a change: fetch old pte
6054: */
1.199.4.14 thorpej 6055: if (pm->pm_ctx)
6056: tlb_flush_page(va, pm->pm_ctxnum);
1.199.4.2 pk 6057: tpte = pte[VA_SUN4M_VPG(va)];
6058:
6059: if ((tpte & SRMMU_TETYPE) == SRMMU_TEPTE) {
6060:
6061: /* old mapping exists, and is of the same pa type */
6062: if ((tpte & SRMMU_PPNMASK) ==
6063: (pteproto & SRMMU_PPNMASK)) {
6064: /* just changing prot and/or wiring */
6065: splx(s);
6066: /* caller should call this directly: */
6067: pmap_changeprot4m(pm, va, prot,
6068: (flags & PMAP_WIRED) != 0);
6069: if ((flags & PMAP_WIRED) != 0)
6070: pm->pm_stats.wired_count++;
6071: else
6072: pm->pm_stats.wired_count--;
6073: return (0);
6074: }
6075: /*
6076: * Switcheroo: changing pa for this va.
6077: * If old pa was managed, remove from pvlist.
6078: * If old page was cached, flush cache.
6079: */
6080: #ifdef DEBUG
6081: if (pmapdebug & PDB_SWITCHMAP)
6082: printf("%s[%d]: pmap_enu: changing existing "
6083: "va 0x%x: pte 0x%x=>0x%x\n",
6084: curproc->p_comm, curproc->p_pid,
6085: (int)va, tpte, pteproto);
6086: #endif
6087: if ((tpte & SRMMU_PGTYPE) == PG_SUN4M_OBMEM) {
1.199.4.10 nathanw 6088: struct pvlist *opv;
1.199.4.2 pk 6089: u_int pfn =
6090: (tpte & SRMMU_PPNMASK) >> SRMMU_PPNSHIFT;
1.199.4.10 nathanw 6091: if ((opv = pvhead(pfn)) != NULL) {
6092: opv->pv_flags |= MR4M(tpte);
6093: pv_unlink4m(opv, pm, va);
1.199.4.2 pk 6094: }
6095: if (pm->pm_ctx && (tpte & SRMMU_PG_C))
1.199.4.12 thorpej 6096: cache_flush_page(va, pm->pm_ctxnum);
1.199.4.2 pk 6097: }
6098: } else {
6099: /* adding new entry */
6100: sp->sg_npte++;
6101:
6102: /*
6103: * Increment counters
6104: */
6105: if ((flags & PMAP_WIRED) != 0)
6106: pm->pm_stats.wired_count++;
6107: }
6108: }
6109: if (pv != NULL)
6110: pteproto &= ~(pv_link4m(pv, pm, va, (pteproto & SRMMU_PG_C) == 0));
6111:
6112: /*
6113: * Update PTEs, flush TLB as necessary.
6114: */
6115: setpgt4m_va(va, &sp->sg_pte[VA_SUN4M_VPG(va)], pteproto,
1.199.4.14 thorpej 6116: pm->pm_ctx != NULL, pm->pm_ctxnum);
1.199.4.2 pk 6117:
6118: out:
6119: splx(s);
6120: return (error);
6121: }
6122:
6123: void
6124: pmap_kenter_pa4m(va, pa, prot)
6125: vaddr_t va;
6126: paddr_t pa;
6127: vm_prot_t prot;
6128: {
6129: struct pmap *pm = pmap_kernel();
6130: struct regmap *rp;
6131: struct segmap *sp;
6132: int pteproto, vr, vs, tpte;
6133:
6134: /* Initialise pteproto with cache bit */
6135: pteproto = (pa & PMAP_NC) == 0 ? SRMMU_PG_C : 0;
6136: pteproto |= PMAP_T2PTE_SRMMU(pa);
6137: pteproto |= SRMMU_TEPTE | PPROT_RX_RX;
6138: pteproto |= (atop(pa & ~PMAP_TNC_SRMMU) << SRMMU_PPNSHIFT);
6139: if (prot & VM_PROT_WRITE)
6140: pteproto |= PPROT_WRITE;
6141: pteproto |= PPROT_S;
6142:
6143: vr = VA_VREG(va);
6144: vs = VA_VSEG(va);
6145: rp = &pm->pm_regmap[vr];
6146: sp = &rp->rg_segmap[vs];
6147:
6148: tpte = sp->sg_pte[VA_SUN4M_VPG(va)];
6149: KASSERT((tpte & SRMMU_TETYPE) != SRMMU_TEPTE);
6150:
6151: sp->sg_npte++;
1.199.4.14 thorpej 6152: setpgt4m_va(va, &sp->sg_pte[VA_SUN4M_VPG(va)], pteproto, 1, 0);
1.199.4.2 pk 6153: }
6154:
6155: void
6156: pmap_kremove4m(va, len)
6157: vaddr_t va;
6158: vsize_t len;
6159: {
6160: struct pmap *pm = pmap_kernel();
6161: struct regmap *rp;
6162: struct segmap *sp;
6163: vaddr_t endva, nva;
1.199.4.14 thorpej 6164: int vr, vs;
1.199.4.2 pk 6165: int tpte, perpage, npg;
6166: int nleft;
6167:
6168: endva = va + len;
6169: simple_lock(&pm->pm_lock);
6170: for (; va < endva; va = nva) {
6171: /* do one virtual segment at a time */
6172: vr = VA_VREG(va);
6173: vs = VA_VSEG(va);
6174: nva = VSTOVA(vr, vs + 1);
6175: if (nva == 0 || nva > endva) {
6176: nva = endva;
6177: }
6178:
6179: rp = &pm->pm_regmap[vr];
6180: if (rp->rg_nsegmap == 0) {
6181: continue;
6182: }
6183:
6184: sp = &rp->rg_segmap[vs];
6185: nleft = sp->sg_npte;
6186: if (nleft == 0) {
6187: continue;
6188: }
6189:
6190: /* decide how to flush cache */
6191: npg = (nva - va) >> PGSHIFT;
6192: if (npg > PMAP_RMK_MAGIC) {
6193: /* flush the whole segment */
6194: perpage = 0;
6195: if (CACHEINFO.c_vactype != VAC_NONE) {
1.199.4.12 thorpej 6196: cache_flush_segment(vr, vs, 0);
1.199.4.2 pk 6197: }
6198: } else {
6199:
6200: /*
6201: * flush each page individually;
6202: * some never need flushing
6203: */
6204:
6205: perpage = (CACHEINFO.c_vactype != VAC_NONE);
6206: }
6207: for (; va < nva; va += NBPG) {
6208: tpte = sp->sg_pte[VA_SUN4M_VPG(va)];
6209: if ((tpte & SRMMU_TETYPE) != SRMMU_TEPTE) {
6210: continue;
6211: }
6212: if ((tpte & SRMMU_PGTYPE) == PG_SUN4M_OBMEM) {
6213: /* if cacheable, flush page as needed */
6214: if (perpage && (tpte & SRMMU_PG_C))
1.199.4.12 thorpej 6215: cache_flush_page(va, 0);
1.199.4.2 pk 6216: }
6217: setpgt4m_va(va, &sp->sg_pte[VA_SUN4M_VPG(va)],
1.199.4.14 thorpej 6218: SRMMU_TEINVALID, 1, 0);
1.199.4.2 pk 6219: nleft--;
6220: }
6221: sp->sg_npte = nleft;
6222: }
6223: simple_unlock(&pm->pm_lock);
6224: }
6225:
1.199.4.9 nathanw 6226: #endif /* SUN4M || SUN4D */
1.199.4.2 pk 6227:
6228: /*
6229: * Clear the wiring attribute for a map/virtual-address pair.
6230: */
6231: /* ARGSUSED */
6232: void
6233: pmap_unwire(pm, va)
6234: struct pmap *pm;
6235: vaddr_t va;
6236: {
6237:
6238: pmap_stats.ps_useless_changewire++;
6239: }
6240:
6241: /*
6242: * Extract the physical page address associated
6243: * with the given map/virtual_address pair.
6244: * GRR, the vm code knows; we should not have to do this!
6245: */
6246:
6247: #if defined(SUN4) || defined(SUN4C)
6248: boolean_t
6249: pmap_extract4_4c(pm, va, pap)
6250: struct pmap *pm;
6251: vaddr_t va;
6252: paddr_t *pap;
6253: {
6254: int tpte;
6255: int vr, vs;
6256: struct regmap *rp;
6257: struct segmap *sp;
6258:
6259: vr = VA_VREG(va);
6260: vs = VA_VSEG(va);
6261: rp = &pm->pm_regmap[vr];
6262: if (rp->rg_segmap == NULL) {
6263: #ifdef DEBUG
6264: if (pmapdebug & PDB_FOLLOW)
6265: printf("pmap_extract: invalid segment (%d)\n", vr);
6266: #endif
6267: return (FALSE);
6268: }
6269: sp = &rp->rg_segmap[vs];
6270:
6271: if (sp->sg_pmeg != seginval) {
6272: int ctx = getcontext4();
6273:
6274: if (CTX_USABLE(pm,rp)) {
6275: CHANGE_CONTEXTS(ctx, pm->pm_ctxnum);
6276: tpte = getpte4(va);
6277: } else {
6278: CHANGE_CONTEXTS(ctx, 0);
6279: if (HASSUN4_MMU3L)
6280: setregmap(0, tregion);
6281: setsegmap(0, sp->sg_pmeg);
6282: tpte = getpte4(VA_VPG(va) << PGSHIFT);
6283: }
6284: setcontext4(ctx);
6285: } else {
6286: int *pte = sp->sg_pte;
6287:
6288: if (pte == NULL) {
6289: #ifdef DEBUG
6290: if (pmapdebug & PDB_FOLLOW)
6291: printf("pmap_extract: invalid segment\n");
6292: #endif
6293: return (FALSE);
6294: }
6295: tpte = pte[VA_VPG(va)];
6296: }
6297: if ((tpte & PG_V) == 0) {
6298: #ifdef DEBUG
6299: if (pmapdebug & PDB_FOLLOW)
6300: printf("pmap_extract: invalid pte\n");
6301: #endif
6302: return (FALSE);
6303: }
6304: tpte &= PG_PFNUM;
6305: tpte = tpte;
6306: if (pap != NULL)
6307: *pap = (tpte << PGSHIFT) | (va & PGOFSET);
6308: return (TRUE);
6309: }
6310: #endif /*4,4c*/
6311:
1.199.4.9 nathanw 6312: #if defined(SUN4M) || defined(SUN4D) /* SRMMU version of pmap_extract */
1.199.4.2 pk 6313: /*
6314: * Extract the physical page address associated
6315: * with the given map/virtual_address pair.
6316: * GRR, the vm code knows; we should not have to do this!
6317: */
6318: boolean_t
6319: pmap_extract4m(pm, va, pap)
6320: struct pmap *pm;
6321: vaddr_t va;
6322: paddr_t *pap;
6323: {
6324: struct regmap *rm;
6325: struct segmap *sm;
6326: int pte;
6327:
6328: if ((rm = pm->pm_regmap) == NULL) {
6329: #ifdef DEBUG
6330: if (pmapdebug & PDB_FOLLOW)
6331: printf("pmap_extract: no regmap entry\n");
6332: #endif
6333: return (FALSE);
6334: }
6335:
6336: rm += VA_VREG(va);
6337: if ((sm = rm->rg_segmap) == NULL) {
6338: #ifdef DEBUG
6339: if (pmapdebug & PDB_FOLLOW)
6340: printf("pmap_extract: no segmap\n");
6341: #endif
6342: return (FALSE);
6343: }
6344:
6345: sm += VA_VSEG(va);
6346: if (sm->sg_pte == NULL) {
6347: #ifdef DEBUG
6348: if (pmapdebug & PDB_FOLLOW)
6349: printf("pmap_extract: no ptes\n");
6350: #endif
6351: return (FALSE);
6352: }
6353:
6354: pte = sm->sg_pte[VA_SUN4M_VPG(va)];
6355: if ((pte & SRMMU_TETYPE) != SRMMU_TEPTE) {
6356: #ifdef DEBUG
6357: if (pmapdebug & PDB_FOLLOW)
6358: printf("pmap_extract: invalid pte of type %d\n",
6359: pte & SRMMU_TETYPE);
6360: #endif
6361: return (FALSE);
6362: }
6363:
6364: if (pap != NULL)
6365: *pap = ptoa((pte & SRMMU_PPNMASK) >> SRMMU_PPNSHIFT) |
6366: VA_OFF(va);
6367: return (TRUE);
6368: }
6369: #endif /* sun4m */
6370:
6371: /*
6372: * Copy the range specified by src_addr/len
6373: * from the source map to the range dst_addr/len
6374: * in the destination map.
6375: *
6376: * This routine is only advisory and need not do anything.
6377: */
6378: /* ARGSUSED */
6379: int pmap_copy_disabled=0;
6380: void
6381: pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr)
6382: struct pmap *dst_pmap, *src_pmap;
6383: vaddr_t dst_addr;
6384: vsize_t len;
6385: vaddr_t src_addr;
6386: {
6387: #if notyet
6388: struct regmap *rm;
6389: struct segmap *sm;
6390:
6391: if (pmap_copy_disabled)
6392: return;
6393: #ifdef DIAGNOSTIC
6394: if (VA_OFF(src_addr) != 0)
6395: printf("pmap_copy: addr not page aligned: 0x%lx\n", src_addr);
6396: if ((len & (NBPG-1)) != 0)
6397: printf("pmap_copy: length not page aligned: 0x%lx\n", len);
6398: #endif
6399:
6400: if (src_pmap == NULL)
6401: return;
6402:
1.199.4.9 nathanw 6403: if (CPU_HAS_SRMMU) {
1.199.4.2 pk 6404: int i, npg, pte;
6405: paddr_t pa;
6406:
6407: npg = len >> PGSHIFT;
6408: for (i = 0; i < npg; i++) {
1.199.4.14 thorpej 6409: tlb_flush_page(src_addr, getcontext4m());
1.199.4.2 pk 6410: if ((rm = src_pmap->pm_regmap) == NULL)
6411: continue;
6412: rm += VA_VREG(src_addr);
6413:
6414: if ((sm = rm->rg_segmap) == NULL)
6415: continue;
6416: sm += VA_VSEG(src_addr);
6417: if (sm->sg_npte == 0)
6418: continue;
6419:
6420: pte = sm->sg_pte[VA_SUN4M_VPG(src_addr)];
6421: if ((pte & SRMMU_TETYPE) != SRMMU_TEPTE)
6422: continue;
6423:
6424: pa = ptoa((pte & SRMMU_PPNMASK) >> SRMMU_PPNSHIFT);
6425: pmap_enter(dst_pmap, dst_addr,
6426: pa,
6427: (pte & PPROT_WRITE)
6428: ? (VM_PROT_WRITE | VM_PROT_READ)
6429: : VM_PROT_READ,
6430: 0);
6431: src_addr += NBPG;
6432: dst_addr += NBPG;
6433: }
6434: pmap_update(dst_pmap);
6435: }
6436: #endif
6437: }
6438:
6439: /*
6440: * Garbage collects the physical map system for
6441: * pages which are no longer used.
6442: * Success need not be guaranteed -- that is, there
6443: * may well be pages which are not referenced, but
6444: * others may be collected.
6445: * Called by the pageout daemon when pages are scarce.
6446: */
6447: /* ARGSUSED */
6448: void
6449: pmap_collect(pm)
6450: struct pmap *pm;
6451: {
6452: }
6453:
6454: #if defined(SUN4) || defined(SUN4C)
6455:
6456: /*
6457: * Clear the modify bit for the given physical page.
6458: */
6459: boolean_t
6460: pmap_clear_modify4_4c(pg)
6461: struct vm_page *pg;
6462: {
6463: struct pvlist *pv;
6464: u_int pfn = atop(VM_PAGE_TO_PHYS(pg));
6465: boolean_t rv;
6466:
6467: if ((pv = pvhead(pfn)) == NULL)
6468: return (0);
6469:
6470: (void) pv_syncflags4_4c(pv);
6471: rv = pv->pv_flags & PV_MOD;
6472: pv->pv_flags &= ~PV_MOD;
6473: return (rv);
6474: }
6475:
6476: /*
6477: * Tell whether the given physical page has been modified.
6478: */
6479: boolean_t
6480: pmap_is_modified4_4c(pg)
6481: struct vm_page *pg;
6482: {
6483: struct pvlist *pv;
6484: u_int pfn = atop(VM_PAGE_TO_PHYS(pg));
6485:
6486: if ((pv = pvhead(pfn)) == NULL)
6487: return (0);
6488:
6489: return (pv->pv_flags & PV_MOD || pv_syncflags4_4c(pv) & PV_MOD);
6490: }
6491:
6492: /*
6493: * Clear the reference bit for the given physical page.
6494: */
6495: boolean_t
6496: pmap_clear_reference4_4c(pg)
6497: struct vm_page *pg;
6498: {
6499: struct pvlist *pv;
6500: u_int pfn = atop(VM_PAGE_TO_PHYS(pg));
6501: boolean_t rv;
6502:
6503: if ((pv = pvhead(pfn)) == NULL)
6504: return (0);
6505:
6506: (void) pv_syncflags4_4c(pv);
6507: rv = pv->pv_flags & PV_REF;
6508: pv->pv_flags &= ~PV_REF;
6509: return (rv);
6510: }
6511:
6512: /*
6513: * Tell whether the given physical page has been referenced.
6514: */
6515: boolean_t
6516: pmap_is_referenced4_4c(pg)
6517: struct vm_page *pg;
6518: {
6519: struct pvlist *pv;
6520: u_int pfn = atop(VM_PAGE_TO_PHYS(pg));
6521:
6522: if ((pv = pvhead(pfn)) == NULL)
6523: return (0);
6524:
6525: return (pv->pv_flags & PV_REF || pv_syncflags4_4c(pv) & PV_REF);
6526: }
6527: #endif /*4,4c*/
6528:
1.199.4.9 nathanw 6529: #if defined(SUN4M) || defined(SUN4D)
1.199.4.2 pk 6530:
6531: /*
1.199.4.9 nathanw 6532: * SRMMU versions of bit test/set routines
1.199.4.2 pk 6533: *
6534: * Note that the 4m-specific routines should eventually service these
6535: * requests from their page tables, and the whole pvlist bit mess should
6536: * be dropped for the 4m (unless this causes a performance hit from
6537: * tracing down pagetables/regmap/segmaps).
6538: */
6539:
6540: /*
6541: * Clear the modify bit for the given physical page.
6542: */
6543: boolean_t
6544: pmap_clear_modify4m(pg) /* XXX %%%: Should service from swpagetbl for 4m */
6545: struct vm_page *pg;
6546: {
6547: struct pvlist *pv;
6548: u_int pfn = atop(VM_PAGE_TO_PHYS(pg));
6549: boolean_t rv;
6550:
6551: if ((pv = pvhead(pfn)) == NULL)
6552: return (0);
6553:
6554: (void) pv_syncflags4m(pv);
6555: rv = pv->pv_flags & PV_MOD4M;
6556: pv->pv_flags &= ~PV_MOD4M;
6557: return (rv);
6558: }
6559:
6560: /*
6561: * Tell whether the given physical page has been modified.
6562: */
6563: boolean_t
6564: pmap_is_modified4m(pg) /* Test performance with SUN4M && SUN4/4C. XXX */
6565: struct vm_page *pg;
6566: {
6567: struct pvlist *pv;
6568: u_int pfn = atop(VM_PAGE_TO_PHYS(pg));
6569:
6570: if ((pv = pvhead(pfn)) == NULL)
6571: return (0);
6572:
6573: return (pv->pv_flags & PV_MOD4M || pv_syncflags4m(pv) & PV_MOD4M);
6574: }
6575:
6576: /*
6577: * Clear the reference bit for the given physical page.
6578: */
6579: boolean_t
6580: pmap_clear_reference4m(pg)
6581: struct vm_page *pg;
6582: {
6583: struct pvlist *pv;
6584: u_int pfn = atop(VM_PAGE_TO_PHYS(pg));
6585: boolean_t rv;
6586:
6587: if ((pv = pvhead(pfn)) == NULL)
6588: return (0);
6589:
6590: (void) pv_syncflags4m(pv);
6591: rv = pv->pv_flags & PV_REF4M;
6592: pv->pv_flags &= ~PV_REF4M;
6593: return (rv);
6594: }
6595:
6596: /*
6597: * Tell whether the given physical page has been referenced.
6598: */
6599: int
6600: pmap_is_referenced4m(pg)
6601: struct vm_page *pg;
6602: {
6603: struct pvlist *pv;
6604: u_int pfn = atop(VM_PAGE_TO_PHYS(pg));
6605:
6606: if ((pv = pvhead(pfn)) == NULL)
6607: return (0);
6608:
6609: return (pv->pv_flags & PV_REF4M || pv_syncflags4m(pv) & PV_REF4M);
6610: }
6611: #endif /* 4m */
6612:
6613: /*
6614: * Fill the given MI physical page with zero bytes.
6615: *
6616: * We avoid stomping on the cache.
6617: * XXX might be faster to use destination's context and allow cache to fill?
6618: */
6619:
6620: #if defined(SUN4) || defined(SUN4C)
6621:
6622: void
6623: pmap_zero_page4_4c(pa)
6624: paddr_t pa;
6625: {
6626: u_int pfn = atop(pa);
6627: struct pvlist *pv;
6628: caddr_t va;
6629: int pte;
6630:
6631: if ((pv = pvhead(pfn)) != NULL) {
6632: /*
6633: * The following might not be necessary since the page
6634: * is being cleared because it is about to be allocated,
6635: * i.e., is in use by no one.
6636: */
6637: pv_flushcache(pv);
6638: }
6639: pte = PG_V | PG_S | PG_W | PG_NC | (pfn & PG_PFNUM);
6640:
6641: va = vpage[0];
6642: setpte4(va, pte);
6643: qzero(va, NBPG);
6644: setpte4(va, 0);
6645: }
6646:
6647: /*
6648: * Copy the given MI physical source page to its destination.
6649: *
6650: * We avoid stomping on the cache as above (with same `XXX' note).
6651: * We must first flush any write-back cache for the source page.
6652: * We go ahead and stomp on the kernel's virtual cache for the
6653: * source page, since the cache can read memory MUCH faster than
6654: * the processor.
6655: */
6656: void
6657: pmap_copy_page4_4c(src, dst)
6658: paddr_t src, dst;
6659: {
6660: struct pvlist *pv;
6661: caddr_t sva, dva;
6662: u_int srcpfn, dstpfn;
6663: int spte, dpte;
6664:
6665: srcpfn = atop(src);
6666: dstpfn = atop(dst);
6667: if ((pv = pvhead(srcpfn)) != NULL) {
6668: if (CACHEINFO.c_vactype == VAC_WRITEBACK)
6669: pv_flushcache(pv);
6670: }
6671: spte = PG_V | PG_S | (srcpfn & PG_PFNUM);
6672:
6673: if ((pv = pvhead(dstpfn)) != NULL) {
6674: /* similar `might not be necessary' comment applies */
6675: if (CACHEINFO.c_vactype != VAC_NONE)
6676: pv_flushcache(pv);
6677: }
6678: dpte = PG_V | PG_S | PG_W | PG_NC | (dstpfn & PG_PFNUM);
6679:
6680: sva = vpage[0];
6681: dva = vpage[1];
6682: setpte4(sva, spte);
6683: setpte4(dva, dpte);
6684: qcopy(sva, dva, NBPG); /* loads cache, so we must ... */
1.199.4.12 thorpej 6685: cache_flush_page((vaddr_t)sva, getcontext4());
1.199.4.2 pk 6686: setpte4(sva, 0);
6687: setpte4(dva, 0);
6688: }
6689: #endif /* 4, 4c */
6690:
1.199.4.9 nathanw 6691: #if defined(SUN4M) || defined(SUN4D) /* SRMMU version of copy/zero routines */
1.199.4.2 pk 6692: /*
6693: * Fill the given MI physical page with zero bytes.
6694: *
6695: * We avoid stomping on the cache.
6696: * XXX might be faster to use destination's context and allow cache to fill?
6697: */
6698: void
6699: pmap_zero_page4m(pa)
6700: paddr_t pa;
6701: {
6702: u_int pfn = atop(pa);
6703: struct pvlist *pv;
6704: caddr_t va;
6705: int pte;
6706:
6707: if ((pv = pvhead(pfn)) != NULL) {
6708: /*
6709: * The following VAC flush might not be necessary since the
6710: * page is being cleared because it is about to be allocated,
6711: * i.e., is in use by no one.
6712: * In the case of a physical cache, a flush (or just an
6713: * invalidate, if possible) is usually necessary when using
6714: * uncached access to clear it.
6715: */
6716: if (CACHEINFO.c_vactype != VAC_NONE)
6717: pv_flushcache(pv);
6718: else
6719: pcache_flush_page(pa, 1);
6720: }
6721: pte = SRMMU_TEPTE | PPROT_N_RWX | (pfn << SRMMU_PPNSHIFT);
6722: if (cpuinfo.flags & CPUFLG_CACHE_MANDATORY)
6723: pte |= SRMMU_PG_C;
6724:
6725: va = vpage[0];
6726: setpgt4m(vpage_pte[0], pte);
6727: qzero(va, NBPG);
6728: /* Remove temporary mapping */
1.199.4.14 thorpej 6729: tlb_flush_page((int)va, getcontext4m());
1.199.4.2 pk 6730: setpgt4m(vpage_pte[0], SRMMU_TEINVALID);
6731: }
6732:
6733: /*
6734: * Viking/MXCC specific version of pmap_zero_page
6735: */
6736: void
6737: pmap_zero_page_viking_mxcc(pa)
6738: paddr_t pa;
6739: {
6740: u_int offset;
6741: u_int stream_data_addr = MXCC_STREAM_DATA;
6742: u_int64_t v = (u_int64_t)pa;
6743:
6744: /* Load MXCC stream data register with 0 (bottom 32 bytes only) */
6745: stda(stream_data_addr+0, ASI_CONTROL, 0);
6746: stda(stream_data_addr+8, ASI_CONTROL, 0);
6747: stda(stream_data_addr+16, ASI_CONTROL, 0);
6748: stda(stream_data_addr+24, ASI_CONTROL, 0);
6749:
6750: /* Then write the stream data register to each block in the page */
6751: v |= MXCC_STREAM_C;
6752: for (offset = 0; offset < NBPG; offset += MXCC_STREAM_BLKSZ) {
6753: stda(MXCC_STREAM_DST, ASI_CONTROL, v | offset);
6754: }
6755: }
6756:
6757: /*
6758: * HyperSPARC/RT625 specific version of pmap_zero_page
6759: */
6760: void
6761: pmap_zero_page_hypersparc(pa)
6762: paddr_t pa;
6763: {
6764: u_int pfn = atop(pa);
6765: struct pvlist *pv;
6766: caddr_t va;
6767: int pte;
6768: int offset;
6769:
6770: /*
6771: * We still have to map the page, since ASI_BLOCKFILL
6772: * takes virtual addresses. This also means we have to
6773: * consider cache aliasing; therefore we still need
6774: * to flush the cache here. All we gain is the speed-up
6775: * in zero-fill loop itself..
6776: */
6777: if ((pv = pvhead(pfn)) != NULL) {
6778: /*
6779: * The following might not be necessary since the page
6780: * is being cleared because it is about to be allocated,
6781: * i.e., is in use by no one.
6782: */
6783: if (CACHEINFO.c_vactype != VAC_NONE)
6784: pv_flushcache(pv);
6785: }
6786: pte = SRMMU_TEPTE | SRMMU_PG_C | PPROT_N_RWX | (pfn << SRMMU_PPNSHIFT);
6787:
6788: va = vpage[0];
6789: setpgt4m(vpage_pte[0], pte);
6790: for (offset = 0; offset < NBPG; offset += 32) {
6791: sta(va + offset, ASI_BLOCKFILL, 0);
6792: }
6793: /* Remove temporary mapping */
1.199.4.14 thorpej 6794: tlb_flush_page((int)va, getcontext4m());
1.199.4.2 pk 6795: setpgt4m(vpage_pte[0], SRMMU_TEINVALID);
6796: }
6797:
6798: /*
6799: * Copy the given MI physical source page to its destination.
6800: *
6801: * We avoid stomping on the cache as above (with same `XXX' note).
6802: * We must first flush any write-back cache for the source page.
6803: * We go ahead and stomp on the kernel's virtual cache for the
6804: * source page, since the cache can read memory MUCH faster than
6805: * the processor.
6806: */
6807: void
6808: pmap_copy_page4m(src, dst)
6809: paddr_t src, dst;
6810: {
6811: struct pvlist *pv;
6812: caddr_t sva, dva;
6813: u_int srcpfn, dstpfn;
6814: int spte, dpte;
6815:
6816: srcpfn = atop(src);
6817: dstpfn = atop(dst);
6818: if ((pv = pvhead(srcpfn)) != NULL) {
6819: if (CACHEINFO.c_vactype == VAC_WRITEBACK)
6820: pv_flushcache(pv);
6821: }
6822:
6823: spte = SRMMU_TEPTE | SRMMU_PG_C | PPROT_N_RX |
6824: (srcpfn << SRMMU_PPNSHIFT);
6825:
6826: if ((pv = pvhead(dstpfn)) != NULL) {
6827: /* similar `might not be necessary' comment applies */
6828: if (CACHEINFO.c_vactype != VAC_NONE)
6829: pv_flushcache(pv);
6830: else
6831: pcache_flush_page(dst, 1);
6832: }
6833:
6834: dpte = SRMMU_TEPTE | PPROT_N_RWX | (dstpfn << SRMMU_PPNSHIFT);
6835: if (cpuinfo.flags & CPUFLG_CACHE_MANDATORY)
6836: dpte |= SRMMU_PG_C;
6837:
6838: sva = vpage[0];
6839: dva = vpage[1];
6840: setpgt4m(vpage_pte[0], spte);
6841: setpgt4m(vpage_pte[1], dpte);
6842: qcopy(sva, dva, NBPG); /* loads cache, so we must ... */
1.199.4.12 thorpej 6843: cache_flush_page((vaddr_t)sva, getcontext4m());
1.199.4.14 thorpej 6844: tlb_flush_page((int)sva, getcontext4m());
1.199.4.2 pk 6845: setpgt4m(vpage_pte[0], SRMMU_TEINVALID);
1.199.4.14 thorpej 6846: tlb_flush_page((int)dva, getcontext4m());
1.199.4.2 pk 6847: setpgt4m(vpage_pte[1], SRMMU_TEINVALID);
6848: }
6849:
6850: /*
6851: * Viking/MXCC specific version of pmap_copy_page
6852: */
6853: void
6854: pmap_copy_page_viking_mxcc(src, dst)
6855: paddr_t src, dst;
6856: {
6857: u_int offset;
6858: u_int64_t v1 = (u_int64_t)src;
6859: u_int64_t v2 = (u_int64_t)dst;
6860:
6861: /* Enable cache-coherency */
6862: v1 |= MXCC_STREAM_C;
6863: v2 |= MXCC_STREAM_C;
6864:
6865: /* Copy through stream data register */
6866: for (offset = 0; offset < NBPG; offset += MXCC_STREAM_BLKSZ) {
6867: stda(MXCC_STREAM_SRC, ASI_CONTROL, v1 | offset);
6868: stda(MXCC_STREAM_DST, ASI_CONTROL, v2 | offset);
6869: }
6870: }
6871:
6872: /*
6873: * HyperSPARC/RT625 specific version of pmap_copy_page
6874: */
6875: void
6876: pmap_copy_page_hypersparc(src, dst)
6877: paddr_t src, dst;
6878: {
6879: struct pvlist *pv;
6880: caddr_t sva, dva;
6881: u_int srcpfn, dstpfn;
6882: int spte, dpte;
6883: int offset;
6884:
6885: /*
6886: * We still have to map the pages, since ASI_BLOCKCOPY
6887: * takes virtual addresses. This also means we have to
6888: * consider cache aliasing; therefore we still need
6889: * to flush the cache here. All we gain is the speed-up
6890: * in copy loop itself..
6891: */
6892:
6893: srcpfn = atop(src);
6894: dstpfn = atop(dst);
6895: if ((pv = pvhead(srcpfn)) != NULL) {
6896: if (CACHEINFO.c_vactype == VAC_WRITEBACK)
6897: pv_flushcache(pv);
6898: }
6899:
6900: spte = SRMMU_TEPTE | SRMMU_PG_C | PPROT_N_RX |
6901: (srcpfn << SRMMU_PPNSHIFT);
6902:
6903: if ((pv = pvhead(dstpfn)) != NULL) {
6904: /* similar `might not be necessary' comment applies */
6905: if (CACHEINFO.c_vactype != VAC_NONE)
6906: pv_flushcache(pv);
6907: }
6908:
6909: dpte = SRMMU_TEPTE | SRMMU_PG_C | PPROT_N_RWX |
6910: (dstpfn << SRMMU_PPNSHIFT);
6911:
6912: sva = vpage[0];
6913: dva = vpage[1];
6914: setpgt4m(vpage_pte[0], spte);
6915: setpgt4m(vpage_pte[1], dpte);
6916:
6917: for (offset = 0; offset < NBPG; offset += 32) {
6918: sta(dva + offset, ASI_BLOCKCOPY, sva + offset);
6919: }
6920:
1.199.4.14 thorpej 6921: tlb_flush_page((int)sva, getcontext4m());
1.199.4.2 pk 6922: setpgt4m(vpage_pte[0], SRMMU_TEINVALID);
1.199.4.14 thorpej 6923: tlb_flush_page((int)dva, getcontext4m());
1.199.4.2 pk 6924: setpgt4m(vpage_pte[1], SRMMU_TEINVALID);
6925: }
1.199.4.9 nathanw 6926: #endif /* SUN4M || SUN4D */
1.199.4.2 pk 6927:
6928: /*
6929: * Turn a cdevsw d_mmap value into a byte address for pmap_enter.
6930: * XXX this should almost certainly be done differently, and
6931: * elsewhere, or even not at all
6932: */
6933: paddr_t
6934: pmap_phys_address(x)
6935: int x;
6936: {
6937:
6938: return ((paddr_t)x);
6939: }
6940:
6941: /*
6942: * Turn off cache for a given (va, number of pages).
6943: *
6944: * We just assert PG_NC for each PTE; the addresses must reside
6945: * in locked kernel space. A cache flush is also done.
6946: */
6947: void
6948: kvm_uncache(va, npages)
6949: caddr_t va;
6950: int npages;
6951: {
6952: struct pvlist *pv;
6953: int pte;
6954: u_int pfn;
6955:
1.199.4.9 nathanw 6956: if (CPU_HAS_SRMMU) {
6957: #if defined(SUN4M) || defined(SUN4D)
1.199.4.2 pk 6958: for (; --npages >= 0; va += NBPG) {
6959: pte = getpte4m((vaddr_t) va);
6960: if ((pte & SRMMU_TETYPE) != SRMMU_TEPTE)
6961: panic("kvm_uncache: table entry not pte");
6962:
6963: if ((pte & SRMMU_PGTYPE) == PG_SUN4M_OBMEM) {
6964: pfn = (pte & SRMMU_PPNMASK) >> SRMMU_PPNSHIFT;
6965: if ((pv = pvhead(pfn)) != NULL) {
6966: pv_uncache(pv);
6967: return;
6968: }
1.199.4.12 thorpej 6969: cache_flush_page((vaddr_t)va, 0);
1.199.4.2 pk 6970: }
6971:
6972: pte &= ~SRMMU_PG_C;
6973: setpte4m((vaddr_t)va, pte);
6974: }
6975: #endif
6976: } else {
6977: #if defined(SUN4) || defined(SUN4C)
6978: for (; --npages >= 0; va += NBPG) {
6979: pte = getpte4(va);
6980: if ((pte & PG_V) == 0)
6981: panic("kvm_uncache !pg_v");
6982:
6983: if ((pte & PG_TYPE) == PG_OBMEM) {
6984: pfn = pte & PG_PFNUM;
6985: if ((pv = pvhead(pfn)) != NULL) {
6986: pv_uncache(pv);
6987: return;
6988: }
1.199.4.12 thorpej 6989: cache_flush_page((vaddr_t)va, 0);
1.199.4.2 pk 6990: }
6991: pte |= PG_NC;
6992: setpte4(va, pte);
6993: }
6994: #endif
6995: }
6996: }
6997:
6998: /*
6999: * Turn on IO cache for a given (va, number of pages).
7000: *
7001: * We just assert PG_NC for each PTE; the addresses must reside
7002: * in locked kernel space. A cache flush is also done.
7003: */
7004: void
7005: kvm_iocache(va, npages)
7006: caddr_t va;
7007: int npages;
7008: {
7009:
1.199.4.9 nathanw 7010: #if defined(SUN4M)
1.199.4.2 pk 7011: if (CPU_ISSUN4M) /* %%%: Implement! */
7012: panic("kvm_iocache: 4m iocache not implemented");
7013: #endif
1.199.4.9 nathanw 7014: #if defined(SUN4D)
7015: if (CPU_ISSUN4D) /* %%%: Implement! */
7016: panic("kvm_iocache: 4d iocache not implemented");
7017: #endif
1.199.4.2 pk 7018: #if defined(SUN4) || defined(SUN4C)
7019: for (; --npages >= 0; va += NBPG) {
7020: int pte = getpte4(va);
7021: if ((pte & PG_V) == 0)
7022: panic("kvm_iocache !pg_v");
7023: pte |= PG_IOC;
7024: setpte4(va, pte);
7025: }
7026: #endif
7027: }
7028:
7029: int
7030: pmap_count_ptes(pm)
7031: struct pmap *pm;
7032: {
1.199.4.3 nathanw 7033: int idx, vs, total;
1.199.4.2 pk 7034: struct regmap *rp;
7035: struct segmap *sp;
7036:
7037: if (pm == pmap_kernel()) {
7038: rp = &pm->pm_regmap[NUREG];
7039: idx = NKREG;
7040: } else {
7041: rp = pm->pm_regmap;
7042: idx = NUREG;
7043: }
1.199.4.3 nathanw 7044: for (total = 0; idx;) {
7045: if ((sp = rp[--idx].rg_segmap) == NULL) {
7046: continue;
7047: }
7048: for (vs = 0; vs < NSEGRG; vs++) {
7049: total += sp[vs].sg_npte;
7050: }
7051: }
1.199.4.2 pk 7052: pm->pm_stats.resident_count = total;
7053: return (total);
7054: }
7055:
7056: /*
7057: * Find first virtual address >= *va that is
7058: * least likely to cause cache aliases.
7059: * (This will just seg-align mappings.)
7060: */
7061: void
7062: pmap_prefer(foff, vap)
7063: vaddr_t foff;
7064: vaddr_t *vap;
7065: {
7066: vaddr_t va = *vap;
7067: long d, m;
7068:
7069: if (VA_INHOLE(va))
7070: va = MMU_HOLE_END;
7071:
7072: m = CACHE_ALIAS_DIST;
7073: if (m == 0) /* m=0 => no cache aliasing */
7074: return;
7075:
7076: d = foff - va;
7077: d &= (m - 1);
7078: *vap = va + d;
7079: }
7080:
7081: void
7082: pmap_redzone()
7083: {
7084: pmap_remove(pmap_kernel(), KERNBASE, KERNBASE+NBPG);
7085: }
7086:
7087: /*
7088: * Activate the address space for the specified process. If the
7089: * process is the current process, load the new MMU context.
7090: */
7091: void
1.199.4.8 nathanw 7092: pmap_activate(l)
7093: struct lwp *l;
1.199.4.2 pk 7094: {
1.199.4.8 nathanw 7095: pmap_t pmap = l->l_proc->p_vmspace->vm_map.pmap;
1.199.4.2 pk 7096: int s;
7097:
7098: /*
7099: * This is essentially the same thing that happens in cpu_switch()
7100: * when the newly selected process is about to run, except that we
7101: * have to make sure to clean the register windows before we set
7102: * the new context.
7103: */
7104:
7105: s = splvm();
1.199.4.8 nathanw 7106: if (l->l_proc == curproc) {
1.199.4.2 pk 7107: write_user_windows();
7108: if (pmap->pm_ctx == NULL) {
7109: ctx_alloc(pmap); /* performs setcontext() */
7110: } else {
7111: /* Do any cache flush needed on context switch */
7112: (*cpuinfo.pure_vcache_flush)();
7113: setcontext(pmap->pm_ctxnum);
7114: }
7115: }
7116: splx(s);
7117: }
7118:
7119: /*
7120: * Deactivate the address space of the specified process.
7121: */
7122: void
1.199.4.8 nathanw 7123: pmap_deactivate(l)
7124: struct lwp *l;
1.199.4.2 pk 7125: {
7126: }
7127:
7128: #ifdef DEBUG
7129: /*
7130: * Check consistency of a pmap (time consuming!).
7131: */
7132: void
7133: pm_check(s, pm)
7134: char *s;
7135: struct pmap *pm;
7136: {
7137: if (pm == pmap_kernel())
7138: pm_check_k(s, pm);
7139: else
7140: pm_check_u(s, pm);
7141: }
7142:
7143: void
7144: pm_check_u(s, pm)
7145: char *s;
7146: struct pmap *pm;
7147: {
7148: struct regmap *rp;
7149: struct segmap *sp;
1.199.4.14 thorpej 7150: int cpu, n, vs, vr, j, m, *pte;
7151:
7152: cpu = cpuinfo.ci_cpuid;
1.199.4.2 pk 7153:
7154: if (pm->pm_regmap == NULL)
1.199.4.14 thorpej 7155: panic("%s: cpu %d: CHK(pmap %p): no region mapping",
7156: s, cpu, pm);
1.199.4.2 pk 7157:
1.199.4.9 nathanw 7158: #if defined(SUN4M) || defined(SUN4D)
7159: if (CPU_HAS_SRMMU &&
1.199.4.14 thorpej 7160: (pm->pm_reg_ptps[cpu] == NULL ||
7161: pm->pm_reg_ptps_pa[cpu] != VA2PA((caddr_t)pm->pm_reg_ptps[cpu])))
7162: panic("%s: cpu %d: CHK(pmap %p): no SRMMU region table or bad pa: "
1.199.4.2 pk 7163: "tblva=%p, tblpa=0x%x",
1.199.4.14 thorpej 7164: s, cpu, pm, pm->pm_reg_ptps[cpu], pm->pm_reg_ptps_pa[cpu]);
1.199.4.2 pk 7165:
1.199.4.9 nathanw 7166: if (CPU_HAS_SRMMU && pm->pm_ctx != NULL &&
1.199.4.14 thorpej 7167: (cpuinfo.ctx_tbl[pm->pm_ctxnum] != ((VA2PA((caddr_t)pm->pm_reg_ptps[cpu])
1.199.4.2 pk 7168: >> SRMMU_PPNPASHIFT) |
7169: SRMMU_TEPTD)))
1.199.4.14 thorpej 7170: panic("%s: cpu %d: CHK(pmap %p): SRMMU region table at 0x%x not installed "
7171: "for context %d", s, cpu, pm, pm->pm_reg_ptps_pa[cpu], pm->pm_ctxnum);
1.199.4.2 pk 7172: #endif
7173:
7174: for (vr = 0; vr < NUREG; vr++) {
7175: rp = &pm->pm_regmap[vr];
7176: if (rp->rg_nsegmap == 0)
7177: continue;
7178: if (rp->rg_segmap == NULL)
1.199.4.14 thorpej 7179: panic("%s: cpu %d: CHK(vr %d): nsegmap = %d; sp==NULL",
7180: s, cpu, vr, rp->rg_nsegmap);
1.199.4.9 nathanw 7181: #if defined(SUN4M) || defined(SUN4D)
7182: if (CPU_HAS_SRMMU && rp->rg_seg_ptps == NULL)
1.199.4.14 thorpej 7183: panic("%s: cpu %d: CHK(vr %d): nsegmap=%d; no SRMMU segment table",
7184: s, cpu, vr, rp->rg_nsegmap);
1.199.4.9 nathanw 7185: if (CPU_HAS_SRMMU &&
1.199.4.14 thorpej 7186: pm->pm_reg_ptps[cpu][vr] != ((VA2PA((caddr_t)rp->rg_seg_ptps) >>
1.199.4.2 pk 7187: SRMMU_PPNPASHIFT) | SRMMU_TEPTD))
1.199.4.14 thorpej 7188: panic("%s: cpu %d: CHK(vr %d): SRMMU segtbl not installed",
7189: s, cpu, vr);
1.199.4.2 pk 7190: #endif
7191: if ((unsigned int)rp < KERNBASE)
1.199.4.14 thorpej 7192: panic("%s: cpu %d: rp=%p", s, cpu, rp);
1.199.4.2 pk 7193: n = 0;
7194: for (vs = 0; vs < NSEGRG; vs++) {
7195: sp = &rp->rg_segmap[vs];
7196: if ((unsigned int)sp < KERNBASE)
1.199.4.14 thorpej 7197: panic("%s: cpu %d: sp=%p", s, cpu, sp);
1.199.4.2 pk 7198: if (sp->sg_npte != 0) {
7199: n++;
7200: if (sp->sg_pte == NULL)
1.199.4.14 thorpej 7201: panic("%s: cpu %d: CHK(vr %d, vs %d): npte=%d, "
7202: "pte=NULL", s, cpu, vr, vs, sp->sg_npte);
1.199.4.9 nathanw 7203: #if defined(SUN4M) || defined(SUN4D)
7204: if (CPU_HAS_SRMMU &&
1.199.4.2 pk 7205: rp->rg_seg_ptps[vs] !=
7206: ((VA2PA((caddr_t)sp->sg_pte)
7207: >> SRMMU_PPNPASHIFT) |
7208: SRMMU_TEPTD))
1.199.4.14 thorpej 7209: panic("%s: cpu %d: CHK(vr %d, vs %d): SRMMU page "
7210: "table not installed correctly",
7211: s, cpu, vr, vs);
1.199.4.2 pk 7212: #endif
7213: pte=sp->sg_pte;
7214: m = 0;
7215: for (j=0; j<NPTESG; j++,pte++)
1.199.4.9 nathanw 7216: if ((CPU_HAS_SRMMU
1.199.4.2 pk 7217: ?((*pte & SRMMU_TETYPE) == SRMMU_TEPTE)
7218: :(*pte & PG_V)))
7219: m++;
7220: if (m != sp->sg_npte)
1.199.4.14 thorpej 7221: printf("%s: cpu %d: user CHK(vr %d, vs %d): "
1.199.4.2 pk 7222: "npte(%d) != # valid(%d)\n",
1.199.4.14 thorpej 7223: s, cpu, vr, vs, sp->sg_npte, m);
1.199.4.2 pk 7224: }
7225: }
7226: if (n != rp->rg_nsegmap)
1.199.4.14 thorpej 7227: panic("%s: cpu %d: CHK(vr %d): inconsistent "
1.199.4.2 pk 7228: "# of pte's: %d, should be %d",
1.199.4.14 thorpej 7229: s, cpu, vr, rp->rg_nsegmap, n);
1.199.4.2 pk 7230: }
7231: return;
7232: }
7233:
7234: void
7235: pm_check_k(s, pm) /* Note: not as extensive as pm_check_u. */
7236: char *s;
7237: struct pmap *pm;
7238: {
7239: struct regmap *rp;
1.199.4.14 thorpej 7240: int cpu, vr, vs, n;
7241:
1.199.4.17! martin 7242: cpu = cpu_number();
1.199.4.2 pk 7243:
7244: if (pm->pm_regmap == NULL)
7245: panic("%s: CHK(pmap %p): no region mapping", s, pm);
7246:
1.199.4.9 nathanw 7247: #if defined(SUN4M) || defined(SUN4D)
7248: if (CPU_HAS_SRMMU &&
1.199.4.14 thorpej 7249: (pm->pm_reg_ptps[cpu] == NULL ||
7250: pm->pm_reg_ptps_pa[cpu] != VA2PA((caddr_t)pm->pm_reg_ptps[cpu])))
7251: panic("%s: cpu %d: CHK(pmap %p): no SRMMU region table or bad pa: tblva=%p, tblpa=0x%x",
7252: s, cpu, pm, pm->pm_reg_ptps[cpu], pm->pm_reg_ptps_pa[cpu]);
1.199.4.2 pk 7253:
1.199.4.9 nathanw 7254: if (CPU_HAS_SRMMU &&
1.199.4.14 thorpej 7255: (cpuinfo.ctx_tbl[0] != ((VA2PA((caddr_t)pm->pm_reg_ptps[cpu]) >>
1.199.4.2 pk 7256: SRMMU_PPNPASHIFT) | SRMMU_TEPTD)))
1.199.4.14 thorpej 7257: panic("%s: cpu %d: CHK(pmap %p): SRMMU region table at 0x%x not installed "
7258: "for context %d", s, cpu, pm, pm->pm_reg_ptps_pa[cpu], 0);
1.199.4.2 pk 7259: #endif
7260: for (vr = NUREG; vr < NUREG+NKREG; vr++) {
7261: rp = &pm->pm_regmap[vr];
7262: if (rp->rg_segmap == NULL)
1.199.4.14 thorpej 7263: panic("%s: cpu %d: CHK(vr %d): nsegmap = %d; sp==NULL",
7264: s, cpu, vr, rp->rg_nsegmap);
1.199.4.2 pk 7265: if (rp->rg_nsegmap == 0)
7266: continue;
1.199.4.9 nathanw 7267: #if defined(SUN4M) || defined(SUN4D)
7268: if (CPU_HAS_SRMMU && rp->rg_seg_ptps == NULL)
1.199.4.14 thorpej 7269: panic("%s: cpu %d: CHK(vr %d): nsegmap=%d; no SRMMU segment table",
7270: s, cpu, vr, rp->rg_nsegmap);
7271:
7272: if (CPU_HAS_SRMMU && vr != NUREG /* 1st kseg is per cpu */ &&
7273: pm->pm_reg_ptps[cpu][vr] != ((VA2PA((caddr_t)rp->rg_seg_ptps) >>
1.199.4.2 pk 7274: SRMMU_PPNPASHIFT) | SRMMU_TEPTD))
1.199.4.14 thorpej 7275: panic("%s: cpu %d: CHK(vr %d): SRMMU segtbl not installed",
7276: s, cpu, vr);
1.199.4.2 pk 7277: #endif
1.199.4.9 nathanw 7278: if (CPU_HAS_SRMMU) {
1.199.4.2 pk 7279: n = NSEGRG;
7280: } else {
7281: for (n = 0, vs = 0; vs < NSEGRG; vs++) {
7282: if (rp->rg_segmap[vs].sg_npte)
7283: n++;
7284: }
7285: }
7286: if (n != rp->rg_nsegmap)
1.199.4.14 thorpej 7287: printf("%s: cpu %d: kernel CHK(vr %d): inconsistent "
1.199.4.2 pk 7288: "# of pte's: %d, should be %d\n",
1.199.4.14 thorpej 7289: s, cpu, vr, rp->rg_nsegmap, n);
1.199.4.2 pk 7290: }
7291: return;
7292: }
7293: #endif
7294:
7295: /*
7296: * Return the number of disk blocks that pmap_dumpmmu() will dump.
7297: */
7298: int
7299: pmap_dumpsize()
7300: {
7301: int sz;
7302:
7303: sz = ALIGN(sizeof(kcore_seg_t)) + ALIGN(sizeof(cpu_kcore_hdr_t));
7304: sz += npmemarr * sizeof(phys_ram_seg_t);
7305: sz += sizeof(kernel_segmap_store);
7306:
1.199.4.9 nathanw 7307: if (CPU_ISSUN4 || CPU_ISSUN4C)
1.199.4.2 pk 7308: /* For each pmeg in the MMU, we'll write NPTESG PTEs. */
7309: sz += (seginval + 1) * NPTESG * sizeof(int);
7310:
7311: return btodb(sz + DEV_BSIZE - 1);
7312: }
7313:
7314: /*
7315: * Write the core dump headers and MD data to the dump device.
7316: * We dump the following items:
7317: *
7318: * kcore_seg_t MI header defined in <sys/kcore.h>)
7319: * cpu_kcore_hdr_t MD header defined in <machine/kcore.h>)
7320: * phys_ram_seg_t[npmemarr] physical memory segments
7321: * segmap_t[NKREG*NSEGRG] the kernel's segment map
7322: * the MMU pmegs on sun4/sun4c
7323: */
7324: int
7325: pmap_dumpmmu(dump, blkno)
7326: daddr_t blkno;
7327: int (*dump) __P((dev_t, daddr_t, caddr_t, size_t));
7328: {
7329: kcore_seg_t *ksegp;
7330: cpu_kcore_hdr_t *kcpup;
7331: phys_ram_seg_t memseg;
7332: int error = 0;
7333: int i, memsegoffset, segmapoffset, pmegoffset;
7334: int buffer[dbtob(1) / sizeof(int)];
7335: int *bp, *ep;
7336: #if defined(SUN4C) || defined(SUN4)
7337: int pmeg;
7338: #endif
7339:
7340: #define EXPEDITE(p,n) do { \
7341: int *sp = (int *)(p); \
7342: int sz = (n); \
7343: while (sz > 0) { \
7344: *bp++ = *sp++; \
7345: if (bp >= ep) { \
7346: error = (*dump)(dumpdev, blkno, \
7347: (caddr_t)buffer, dbtob(1)); \
7348: if (error != 0) \
7349: return (error); \
7350: ++blkno; \
7351: bp = buffer; \
7352: } \
7353: sz -= 4; \
7354: } \
7355: } while (0)
7356:
7357: setcontext(0);
7358:
7359: /* Setup bookkeeping pointers */
7360: bp = buffer;
7361: ep = &buffer[sizeof(buffer) / sizeof(buffer[0])];
7362:
7363: /* Fill in MI segment header */
7364: ksegp = (kcore_seg_t *)bp;
7365: CORE_SETMAGIC(*ksegp, KCORE_MAGIC, MID_MACHINE, CORE_CPU);
7366: ksegp->c_size = dbtob(pmap_dumpsize()) - ALIGN(sizeof(kcore_seg_t));
7367:
7368: /* Fill in MD segment header (interpreted by MD part of libkvm) */
7369: kcpup = (cpu_kcore_hdr_t *)((int)bp + ALIGN(sizeof(kcore_seg_t)));
7370: kcpup->cputype = cputyp;
7371: kcpup->kernbase = KERNBASE;
7372: kcpup->nmemseg = npmemarr;
7373: kcpup->memsegoffset = memsegoffset = ALIGN(sizeof(cpu_kcore_hdr_t));
7374: kcpup->nsegmap = NKREG*NSEGRG;
7375: kcpup->segmapoffset = segmapoffset =
7376: memsegoffset + npmemarr * sizeof(phys_ram_seg_t);
7377:
1.199.4.9 nathanw 7378: kcpup->npmeg = (CPU_ISSUN4 || CPU_ISSUN4C) ? seginval + 1 : 0;
1.199.4.2 pk 7379: kcpup->pmegoffset = pmegoffset =
7380: segmapoffset + kcpup->nsegmap * sizeof(struct segmap);
7381:
7382: /* Note: we have assumed everything fits in buffer[] so far... */
7383: bp = (int *)((int)kcpup + ALIGN(sizeof(cpu_kcore_hdr_t)));
7384:
7385: #if 0
7386: /* Align storage for upcoming quad-aligned segment array */
7387: while (bp != (int *)ALIGN(bp)) {
7388: int dummy = 0;
7389: EXPEDITE(&dummy, 4);
7390: }
7391: #endif
7392:
7393: for (i = 0; i < npmemarr; i++) {
7394: memseg.start = pmemarr[i].addr;
7395: memseg.size = pmemarr[i].len;
7396: EXPEDITE(&memseg, sizeof(phys_ram_seg_t));
7397: }
7398:
7399: EXPEDITE(&kernel_segmap_store, sizeof(kernel_segmap_store));
7400:
1.199.4.9 nathanw 7401: if (CPU_HAS_SRMMU)
1.199.4.2 pk 7402: goto out;
7403:
7404: #if defined(SUN4C) || defined(SUN4)
7405: /*
7406: * dump page table entries
7407: *
7408: * We dump each pmeg in order (by segment number). Since the MMU
7409: * automatically maps the given virtual segment to a pmeg we must
7410: * iterate over the segments by incrementing an unused segment slot
7411: * in the MMU. This fixed segment number is used in the virtual
7412: * address argument to getpte().
7413: */
7414:
7415: /*
7416: * Go through the pmegs and dump each one.
7417: */
7418: for (pmeg = 0; pmeg <= seginval; ++pmeg) {
7419: int va = 0;
7420:
7421: setsegmap(va, pmeg);
7422: i = NPTESG;
7423: do {
7424: int pte = getpte4(va);
7425: EXPEDITE(&pte, sizeof(pte));
7426: va += NBPG;
7427: } while (--i > 0);
7428: }
7429: setsegmap(0, seginval);
7430: #endif
7431:
7432: out:
7433: if (bp != buffer)
7434: error = (*dump)(dumpdev, blkno++, (caddr_t)buffer, dbtob(1));
7435:
7436: return (error);
7437: }
7438:
7439: /*
7440: * Helper function for debuggers.
7441: */
7442: void
7443: pmap_writetext(dst, ch)
7444: unsigned char *dst;
7445: int ch;
7446: {
7447: int s, pte0, pte, ctx;
7448: vaddr_t va;
7449:
7450: s = splvm();
7451: va = (unsigned long)dst & (~PGOFSET);
1.199.4.12 thorpej 7452: cache_flush(dst, 1);
1.199.4.2 pk 7453:
7454: ctx = getcontext();
7455: setcontext(0);
7456:
1.199.4.9 nathanw 7457: #if defined(SUN4M) || defined(SUN4D)
7458: if (CPU_HAS_SRMMU) {
1.199.4.2 pk 7459: pte0 = getpte4m(va);
7460: if ((pte0 & SRMMU_TETYPE) != SRMMU_TEPTE) {
7461: splx(s);
7462: return;
7463: }
7464: pte = pte0 | PPROT_WRITE;
7465: setpte4m(va, pte);
7466: *dst = (unsigned char)ch;
7467: setpte4m(va, pte0);
7468:
7469: }
7470: #endif
7471: #if defined(SUN4) || defined(SUN4C)
7472: if (CPU_ISSUN4C || CPU_ISSUN4) {
7473: pte0 = getpte4(va);
7474: if ((pte0 & PG_V) == 0) {
7475: splx(s);
7476: return;
7477: }
7478: pte = pte0 | PG_W;
7479: setpte4(va, pte);
7480: *dst = (unsigned char)ch;
7481: setpte4(va, pte0);
7482: }
7483: #endif
1.199.4.12 thorpej 7484: cache_flush(dst, 1);
1.199.4.2 pk 7485: setcontext(ctx);
7486: splx(s);
7487: }
7488:
7489: #ifdef EXTREME_DEBUG
7490:
7491: static void test_region __P((int, int, int));
7492:
7493: void
7494: debug_pagetables()
7495: {
7496: int i;
7497: int *regtbl;
7498: int te;
7499:
7500: printf("\nncontext=%d. ",ncontext);
7501: printf("Context table is at va 0x%x. Level 0 PTP: 0x%x\n",
7502: cpuinfo.ctx_tbl, cpuinfo.ctx_tbl[0]);
7503: printf("Context 0 region table is at va 0x%x, pa 0x%x. Contents:\n",
7504: pmap_kernel()->pm_reg_ptps[0], pmap_kernel()->pm_reg_ptps_pa[0]);
7505:
7506: regtbl = pmap_kernel()->pm_reg_ptps[0];
7507:
7508: printf("PROM vector is at 0x%x\n",promvec);
7509: printf("PROM reboot routine is at 0x%x\n",promvec->pv_reboot);
7510: printf("PROM abort routine is at 0x%x\n",promvec->pv_abort);
7511: printf("PROM halt routine is at 0x%x\n",promvec->pv_halt);
7512:
7513: printf("Testing region 0xfe: ");
7514: test_region(0xfe,0,16*1024*1024);
7515: printf("Testing region 0xff: ");
7516: test_region(0xff,0,16*1024*1024);
7517: printf("Testing kernel region 0x%x: ", VA_VREG(KERNBASE));
7518: test_region(VA_VREG(KERNBASE), 4096, avail_start);
7519: cngetc();
7520:
7521: for (i = 0; i < SRMMU_L1SIZE; i++) {
7522: te = regtbl[i];
7523: if ((te & SRMMU_TETYPE) == SRMMU_TEINVALID)
7524: continue;
7525: printf("Region 0x%x: PTE=0x%x <%s> L2PA=0x%x kernL2VA=0x%x\n",
7526: i, te, ((te & SRMMU_TETYPE) == SRMMU_TEPTE ? "pte" :
7527: ((te & SRMMU_TETYPE) == SRMMU_TEPTD ? "ptd" :
7528: ((te & SRMMU_TETYPE) == SRMMU_TEINVALID ?
7529: "invalid" : "reserved"))),
7530: (te & ~0x3) << SRMMU_PPNPASHIFT,
7531: pmap_kernel()->pm_regmap[i].rg_seg_ptps);
7532: }
7533: printf("Press q to halt...\n");
7534: if (cngetc()=='q')
7535: callrom();
7536: }
7537:
7538: static u_int
7539: VA2PAsw(ctx, addr, pte)
7540: int ctx;
7541: caddr_t addr;
7542: int *pte;
7543: {
7544: int *curtbl;
7545: int curpte;
7546:
7547: #ifdef EXTREME_EXTREME_DEBUG
7548: printf("Looking up addr 0x%x in context 0x%x\n",addr,ctx);
7549: #endif
7550: /* L0 */
7551: *pte = curpte = cpuinfo.ctx_tbl[ctx];
7552: #ifdef EXTREME_EXTREME_DEBUG
7553: printf("Got L0 pte 0x%x\n",pte);
7554: #endif
7555: if ((curpte & SRMMU_TETYPE) == SRMMU_TEPTE) {
7556: return (((curpte & SRMMU_PPNMASK) << SRMMU_PPNPASHIFT) |
7557: ((u_int)addr & 0xffffffff));
7558: }
7559: if ((curpte & SRMMU_TETYPE) != SRMMU_TEPTD) {
7560: printf("Bad context table entry 0x%x for context 0x%x\n",
7561: curpte, ctx);
7562: return 0;
7563: }
7564: /* L1 */
7565: curtbl = ((curpte & ~0x3) << 4) | KERNBASE; /* correct for krn*/
7566: *pte = curpte = curtbl[VA_VREG(addr)];
7567: #ifdef EXTREME_EXTREME_DEBUG
7568: printf("L1 table at 0x%x.\nGot L1 pte 0x%x\n",curtbl,curpte);
7569: #endif
7570: if ((curpte & SRMMU_TETYPE) == SRMMU_TEPTE)
7571: return (((curpte & SRMMU_PPNMASK) << SRMMU_PPNPASHIFT) |
7572: ((u_int)addr & 0xffffff));
7573: if ((curpte & SRMMU_TETYPE) != SRMMU_TEPTD) {
7574: printf("Bad region table entry 0x%x for region 0x%x\n",
7575: curpte, VA_VREG(addr));
7576: return 0;
7577: }
7578: /* L2 */
7579: curtbl = ((curpte & ~0x3) << 4) | KERNBASE; /* correct for krn*/
7580: *pte = curpte = curtbl[VA_VSEG(addr)];
7581: #ifdef EXTREME_EXTREME_DEBUG
7582: printf("L2 table at 0x%x.\nGot L2 pte 0x%x\n",curtbl,curpte);
7583: #endif
7584: if ((curpte & SRMMU_TETYPE) == SRMMU_TEPTE)
7585: return (((curpte & SRMMU_PPNMASK) << SRMMU_PPNPASHIFT) |
7586: ((u_int)addr & 0x3ffff));
7587: if ((curpte & SRMMU_TETYPE) != SRMMU_TEPTD) {
7588: printf("Bad segment table entry 0x%x for reg 0x%x, seg 0x%x\n",
7589: curpte, VA_VREG(addr), VA_VSEG(addr));
7590: return 0;
7591: }
7592: /* L3 */
7593: curtbl = ((curpte & ~0x3) << 4) | KERNBASE; /* correct for krn*/
7594: *pte = curpte = curtbl[VA_VPG(addr)];
7595: #ifdef EXTREME_EXTREME_DEBUG
7596: printf("L3 table at 0x%x.\nGot L3 pte 0x%x\n",curtbl,curpte);
7597: #endif
7598: if ((curpte & SRMMU_TETYPE) == SRMMU_TEPTE)
7599: return (((curpte & SRMMU_PPNMASK) << SRMMU_PPNPASHIFT) |
7600: ((u_int)addr & 0xfff));
7601: else {
7602: printf("Bad L3 pte 0x%x for reg 0x%x, seg 0x%x, pg 0x%x\n",
7603: curpte, VA_VREG(addr), VA_VSEG(addr), VA_VPG(addr));
7604: return 0;
7605: }
7606: printf("Bizarreness with address 0x%x!\n",addr);
7607: }
7608:
7609: void test_region(reg, start, stop)
7610: int reg;
7611: int start, stop;
7612: {
7613: int i;
7614: int addr;
7615: int pte;
7616: int ptesw;
7617: /* int cnt=0;
7618: */
7619:
7620: for (i = start; i < stop; i+= NBPG) {
7621: addr = (reg << RGSHIFT) | i;
7622: pte=lda(((u_int)(addr)) | ASI_SRMMUFP_LN, ASI_SRMMUFP);
7623: if (pte) {
7624: /* printf("Valid address 0x%x\n",addr);
7625: if (++cnt == 20) {
7626: cngetc();
7627: cnt=0;
7628: }
7629: */
7630: if (VA2PA(addr) != VA2PAsw(0,addr,&ptesw)) {
7631: printf("Mismatch at address 0x%x.\n",addr);
7632: if (cngetc()=='q') break;
7633: }
7634: if (reg == VA_VREG(KERNBASE))
7635: /* kernel permissions are different */
7636: continue;
7637: if ((pte&SRMMU_PROT_MASK)!=(ptesw&SRMMU_PROT_MASK)) {
7638: printf("Mismatched protections at address "
7639: "0x%x; pte=0x%x, ptesw=0x%x\n",
7640: addr,pte,ptesw);
7641: if (cngetc()=='q') break;
7642: }
7643: }
7644: }
7645: printf("done.\n");
7646: }
7647:
7648:
7649: void print_fe_map(void)
7650: {
7651: u_int i, pte;
7652:
7653: printf("map of region 0xfe:\n");
7654: for (i = 0xfe000000; i < 0xff000000; i+=4096) {
7655: if (((pte = getpte4m(i)) & SRMMU_TETYPE) != SRMMU_TEPTE)
7656: continue;
7657: printf("0x%x -> 0x%x%x (pte 0x%x)\n", i, pte >> 28,
7658: (pte & ~0xff) << 4, pte);
7659: }
7660: printf("done\n");
7661: }
7662:
7663: #endif
CVSweb <webmaster@jp.NetBSD.org>