Annotation of src/sys/arch/sparc/sparc/pmap.c, Revision 1.47
1.47 ! pk 1: /* $NetBSD: pmap.c,v 1.46 1995/07/05 16:35:42 pk Exp $ */
1.22 deraadt 2:
1.1 deraadt 3: /*
4: * Copyright (c) 1992, 1993
5: * The Regents of the University of California. All rights reserved.
6: *
7: * This software was developed by the Computer Systems Engineering group
8: * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
9: * contributed to Berkeley.
10: *
11: * All advertising materials mentioning features or use of this software
12: * must display the following acknowledgement:
13: * This product includes software developed by the University of
14: * California, Lawrence Berkeley Laboratory.
15: *
16: * Redistribution and use in source and binary forms, with or without
17: * modification, are permitted provided that the following conditions
18: * are met:
19: * 1. Redistributions of source code must retain the above copyright
20: * notice, this list of conditions and the following disclaimer.
21: * 2. Redistributions in binary form must reproduce the above copyright
22: * notice, this list of conditions and the following disclaimer in the
23: * documentation and/or other materials provided with the distribution.
24: * 3. All advertising materials mentioning features or use of this software
25: * must display the following acknowledgement:
26: * This product includes software developed by the University of
27: * California, Berkeley and its contributors.
28: * 4. Neither the name of the University nor the names of its contributors
29: * may be used to endorse or promote products derived from this software
30: * without specific prior written permission.
31: *
32: * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
33: * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
34: * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
35: * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
36: * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
37: * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
38: * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
39: * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
40: * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
41: * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
42: * SUCH DAMAGE.
43: *
1.22 deraadt 44: * @(#)pmap.c 8.4 (Berkeley) 2/5/94
1.1 deraadt 45: */
46:
47: /*
48: * SPARC physical map management code.
49: * Does not function on multiprocessors (yet).
50: */
51:
52: #include <sys/param.h>
53: #include <sys/systm.h>
54: #include <sys/device.h>
55: #include <sys/proc.h>
1.43 pk 56: #include <sys/queue.h>
1.1 deraadt 57: #include <sys/malloc.h>
58:
59: #include <vm/vm.h>
60: #include <vm/vm_kern.h>
61: #include <vm/vm_prot.h>
62: #include <vm/vm_page.h>
63:
64: #include <machine/autoconf.h>
65: #include <machine/bsd_openprom.h>
1.19 deraadt 66: #include <machine/oldmon.h>
1.1 deraadt 67: #include <machine/cpu.h>
68: #include <machine/ctlreg.h>
69:
70: #include <sparc/sparc/asm.h>
71: #include <sparc/sparc/cache.h>
1.3 deraadt 72: #include <sparc/sparc/vaddrs.h>
1.1 deraadt 73:
74: #ifdef DEBUG
75: #define PTE_BITS "\20\40V\37W\36S\35NC\33IO\32U\31M"
76: #endif
77:
78: extern struct promvec *promvec;
79:
80: /*
81: * The SPARCstation offers us the following challenges:
82: *
83: * 1. A virtual address cache. This is, strictly speaking, not
84: * part of the architecture, but the code below assumes one.
85: * This is a write-through cache on the 4c and a write-back cache
86: * on others.
87: *
88: * 2. An MMU that acts like a cache. There is not enough space
89: * in the MMU to map everything all the time. Instead, we need
90: * to load MMU with the `working set' of translations for each
91: * process.
92: *
93: * 3. Segmented virtual and physical spaces. The upper 12 bits of
94: * a virtual address (the virtual segment) index a segment table,
95: * giving a physical segment. The physical segment selects a
96: * `Page Map Entry Group' (PMEG) and the virtual page number---the
97: * next 5 or 6 bits of the virtual address---select the particular
98: * `Page Map Entry' for the page. We call the latter a PTE and
99: * call each Page Map Entry Group a pmeg (for want of a better name).
100: *
101: * Since there are no valid bits in the segment table, the only way
102: * to have an invalid segment is to make one full pmeg of invalid PTEs.
103: * We use the last one (since the ROM does as well).
104: *
105: * 4. Discontiguous physical pages. The Mach VM expects physical pages
106: * to be in one sequential lump.
107: *
108: * 5. The MMU is always on: it is not possible to disable it. This is
109: * mainly a startup hassle.
110: */
111:
112: struct pmap_stats {
113: int ps_unlink_pvfirst; /* # of pv_unlinks on head */
114: int ps_unlink_pvsearch; /* # of pv_unlink searches */
115: int ps_changeprots; /* # of calls to changeprot */
116: int ps_useless_changeprots; /* # of changeprots for wiring */
117: int ps_enter_firstpv; /* pv heads entered */
118: int ps_enter_secondpv; /* pv nonheads entered */
119: int ps_useless_changewire; /* useless wiring changes */
120: int ps_npg_prot_all; /* # of active pages protected */
121: int ps_npg_prot_actual; /* # pages actually affected */
122: } pmap_stats;
123:
124: #ifdef DEBUG
125: #define PDB_CREATE 0x0001
126: #define PDB_DESTROY 0x0002
127: #define PDB_REMOVE 0x0004
128: #define PDB_CHANGEPROT 0x0008
129: #define PDB_ENTER 0x0010
130:
131: #define PDB_MMU_ALLOC 0x0100
132: #define PDB_MMU_STEAL 0x0200
133: #define PDB_CTX_ALLOC 0x0400
134: #define PDB_CTX_STEAL 0x0800
1.43 pk 135: #define PDB_MMUREG_ALLOC 0x1000
136: #define PDB_MMUREG_STEAL 0x2000
1.1 deraadt 137: int pmapdebug = 0x0;
138: #endif
139:
1.10 deraadt 140: #define splpmap() splimp()
1.1 deraadt 141:
142: /*
143: * First and last managed physical addresses.
144: */
145: vm_offset_t vm_first_phys, vm_num_phys;
146:
147: /*
148: * For each managed physical page, there is a list of all currently
149: * valid virtual mappings of that page. Since there is usually one
150: * (or zero) mapping per page, the table begins with an initial entry,
151: * rather than a pointer; this head entry is empty iff its pv_pmap
152: * field is NULL.
153: *
154: * Note that these are per machine independent page (so there may be
155: * only one for every two hardware pages, e.g.). Since the virtual
156: * address is aligned on a page boundary, the low order bits are free
157: * for storing flags. Only the head of each list has flags.
158: *
159: * THIS SHOULD BE PART OF THE CORE MAP
160: */
161: struct pvlist {
162: struct pvlist *pv_next; /* next pvlist, if any */
163: struct pmap *pv_pmap; /* pmap of this va */
164: int pv_va; /* virtual address */
165: int pv_flags; /* flags (below) */
166: };
167:
168: /*
169: * Flags in pv_flags. Note that PV_MOD must be 1 and PV_REF must be 2
170: * since they must line up with the bits in the hardware PTEs (see pte.h).
171: */
172: #define PV_MOD 1 /* page modified */
173: #define PV_REF 2 /* page referenced */
174: #define PV_NC 4 /* page cannot be cached */
175: /*efine PV_ALLF 7 ** all of the above */
176:
177: struct pvlist *pv_table; /* array of entries, one per physical page */
178:
179: #define pvhead(pa) (&pv_table[atop((pa) - vm_first_phys)])
180:
181: /*
182: * Each virtual segment within each pmap is either valid or invalid.
183: * It is valid if pm_npte[VA_VSEG(va)] is not 0. This does not mean
184: * it is in the MMU, however; that is true iff pm_segmap[VA_VSEG(va)]
185: * does not point to the invalid PMEG.
186: *
187: * If a virtual segment is valid and loaded, the correct PTEs appear
188: * in the MMU only. If it is valid and unloaded, the correct PTEs appear
189: * in the pm_pte[VA_VSEG(va)] only. However, some effort is made to keep
190: * the software copies consistent enough with the MMU so that libkvm can
191: * do user address translations. In particular, pv_changepte() and
192: * pmap_enu() maintain consistency, while less critical changes are
193: * not maintained. pm_pte[VA_VSEG(va)] always points to space for those
194: * PTEs, unless this is the kernel pmap, in which case pm_pte[x] is not
195: * used (sigh).
196: *
197: * Each PMEG in the MMU is either free or contains PTEs corresponding to
198: * some pmap and virtual segment. If it contains some PTEs, it also contains
199: * reference and modify bits that belong in the pv_table. If we need
200: * to steal a PMEG from some process (if we need one and none are free)
201: * we must copy the ref and mod bits, and update pm_segmap in the other
202: * pmap to show that its virtual segment is no longer in the MMU.
203: *
204: * There are 128 PMEGs in a small Sun-4, of which only a few dozen are
205: * tied down permanently, leaving `about' 100 to be spread among
206: * running processes. These are managed as an LRU cache. Before
207: * calling the VM paging code for a user page fault, the fault handler
208: * calls mmu_load(pmap, va) to try to get a set of PTEs put into the
209: * MMU. mmu_load will check the validity of the segment and tell whether
210: * it did something.
211: *
212: * Since I hate the name PMEG I call this data structure an `mmu entry'.
213: * Each mmuentry is on exactly one of three `usage' lists: free, LRU,
214: * or locked. The LRU list is for user processes; the locked list is
215: * for kernel entries; both are doubly linked queues headed by `mmuhd's.
216: * The free list is a simple list, headed by a free list pointer.
217: */
218: struct mmuentry {
1.43 pk 219: TAILQ_ENTRY(mmuentry) me_list; /* usage list link */
220: TAILQ_ENTRY(mmuentry) me_pmchain; /* pmap owner link */
1.1 deraadt 221: struct pmap *me_pmap; /* pmap, if in use */
1.43 pk 222: u_short me_vreg; /* associated virtual region/segment */
223: u_short me_vseg; /* associated virtual region/segment */
1.45 pk 224: u_short me_cookie; /* hardware SMEG/PMEG number */
1.1 deraadt 225: };
1.43 pk 226: struct mmuentry *mmusegments; /* allocated in pmap_bootstrap */
227: struct mmuentry *mmuregions; /* allocated in pmap_bootstrap */
1.1 deraadt 228:
1.43 pk 229: struct mmuhd segm_freelist, segm_lru, segm_locked;
230: struct mmuhd region_freelist, region_lru, region_locked;
1.1 deraadt 231:
232: int seginval; /* the invalid segment number */
1.43 pk 233: #ifdef MMU_3L
234: int reginval; /* the invalid region number */
235: #endif
1.1 deraadt 236:
237: /*
238: * A context is simply a small number that dictates which set of 4096
239: * segment map entries the MMU uses. The Sun 4c has eight such sets.
240: * These are alloted in an `almost MRU' fashion.
241: *
242: * Each context is either free or attached to a pmap.
243: *
244: * Since the virtual address cache is tagged by context, when we steal
245: * a context we have to flush (that part of) the cache.
246: */
247: union ctxinfo {
248: union ctxinfo *c_nextfree; /* free list (if free) */
249: struct pmap *c_pmap; /* pmap (if busy) */
250: };
251: union ctxinfo *ctxinfo; /* allocated at in pmap_bootstrap */
252: int ncontext;
253:
254: union ctxinfo *ctx_freelist; /* context free list */
255: int ctx_kick; /* allocation rover when none free */
256: int ctx_kickdir; /* ctx_kick roves both directions */
257:
258: caddr_t vpage[2]; /* two reserved MD virtual pages */
1.41 mycroft 259: caddr_t vmmap; /* one reserved MI vpage for /dev/mem */
1.1 deraadt 260: caddr_t vdumppages; /* 32KB worth of reserved dump pages */
261:
1.43 pk 262: #ifdef MMU_3L
263: smeg_t tregion;
264: #endif
265: struct pmap kernel_pmap_store; /* the kernel's pmap */
266: struct regmap kernel_regmap_store[NKREG]; /* the kernel's regmap */
267: struct segmap kernel_segmap_store[NKREG*NSEGRG];/* the kernel's segmaps */
1.1 deraadt 268:
1.30 pk 269: #define MA_SIZE 32 /* size of memory descriptor arrays */
1.1 deraadt 270: struct memarr pmemarr[MA_SIZE];/* physical memory regions */
271: int npmemarr; /* number of entries in pmemarr */
1.29 pk 272: int cpmemarr; /* pmap_next_page() state */
273: /*static*/ vm_offset_t avail_start; /* first free physical page */
274: /*static*/ vm_offset_t avail_end; /* last free physical page */
275: /*static*/ vm_offset_t avail_next; /* pmap_next_page() state:
276: next free physical page */
277: /*static*/ vm_offset_t virtual_avail; /* first free virtual page number */
278: /*static*/ vm_offset_t virtual_end; /* last free virtual page number */
279:
1.45 pk 280: int mmu_has_hole;
281:
1.29 pk 282: vm_offset_t prom_vstart; /* For /dev/kmem */
283: vm_offset_t prom_vend;
1.1 deraadt 284:
1.31 pk 285: #ifdef SUN4
286: /*
287: * segfixmask: on some systems (4/110) "getsegmap()" returns a partly
288: * invalid value. getsegmap returns a 16 bit value on the sun4, but
289: * only the first 8 or so bits are valid (the rest are *supposed* to
290: * be zero. on the 4/110 the bits that are supposed to be zero are
291: * all one instead. e.g. KERNBASE is usually mapped by pmeg number zero.
1.36 pk 292: * on a 4/300 getsegmap(KERNBASE) == 0x0000, but
1.31 pk 293: * on a 4/100 getsegmap(KERNBASE) == 0xff00
294: *
295: * this confuses mmu_reservemon() and causes it to not reserve the PROM's
296: * pmegs. then the PROM's pmegs get used during autoconfig and everything
297: * falls apart! (not very fun to debug, BTW.)
298: *
1.43 pk 299: * solution: mask the invalid bits in the getsetmap macro.
1.31 pk 300: */
301:
302: static u_long segfixmask = 0xffffffff; /* all bits valid to start */
303: #endif
304:
1.1 deraadt 305: /*
306: * pseudo-functions for mnemonic value
307: * NB: setsegmap should be stba for 4c, but stha works and makes the
308: * code right for the Sun-4 as well.
309: */
310: #define getcontext() lduba(AC_CONTEXT, ASI_CONTROL)
311: #define setcontext(c) stba(AC_CONTEXT, ASI_CONTROL, c)
1.18 deraadt 312: #if defined(SUN4) && !defined(SUN4C)
1.31 pk 313: #define getsegmap(va) (lduha(va, ASI_SEGMAP) & segfixmask)
1.1 deraadt 314: #define setsegmap(va, pmeg) stha(va, ASI_SEGMAP, pmeg)
1.18 deraadt 315: #endif
316: #if !defined(SUN4) && defined(SUN4C)
1.1 deraadt 317: #define getsegmap(va) lduba(va, ASI_SEGMAP)
318: #define setsegmap(va, pmeg) stba(va, ASI_SEGMAP, pmeg)
319: #endif
1.18 deraadt 320: #if defined(SUN4) && defined(SUN4C)
1.20 deraadt 321: #define getsegmap(va) (cputyp==CPU_SUN4C ? lduba(va, ASI_SEGMAP) \
1.31 pk 322: : (lduha(va, ASI_SEGMAP) & segfixmask))
1.20 deraadt 323: #define setsegmap(va, pmeg) (cputyp==CPU_SUN4C ? stba(va, ASI_SEGMAP, pmeg) \
1.18 deraadt 324: : stha(va, ASI_SEGMAP, pmeg))
325: #endif
1.43 pk 326: #if defined(SUN4) && defined(MMU_3L)
327: #define getregmap(va) ((unsigned)lduha(va+2, ASI_REGMAP) >> 8)
328: #define setregmap(va, smeg) stha(va+2, ASI_REGMAP, (smeg << 8))
329: #endif
1.1 deraadt 330:
331: #define getpte(va) lda(va, ASI_PTE)
332: #define setpte(va, pte) sta(va, ASI_PTE, pte)
333:
334: /*----------------------------------------------------------------*/
335:
1.29 pk 336: #define HWTOSW(pg) (pg)
337: #define SWTOHW(pg) (pg)
1.1 deraadt 338:
1.43 pk 339: #ifdef MMU_3L
340: #define CTX_USABLE(pm,rp) ((pm)->pm_ctx && \
341: (!mmu_3l || (rp)->rg_smeg != reginval))
342: #else
343: #define CTX_USABLE(pm,rp) ((pm)->pm_ctx)
344: #endif
345:
346: #define GAP_WIDEN(pm,vr) do { \
347: if (vr + 1 == pm->pm_gap_start) \
348: pm->pm_gap_start = vr; \
349: if (vr == pm->pm_gap_end) \
350: pm->pm_gap_end = vr + 1;\
351: } while (0)
352:
353: #define GAP_SHRINK(pm,vr) do { \
354: register int x; \
355: x = pm->pm_gap_start + (pm->pm_gap_end - pm->pm_gap_start) / 2; \
356: if (vr > x) { \
357: if (vr < pm->pm_gap_end) \
358: pm->pm_gap_end = vr; \
359: } else { \
360: if (vr >= pm->pm_gap_start && x != pm->pm_gap_start) \
361: pm->pm_gap_start = vr + 1; \
362: } \
363: } while (0)
364:
1.2 deraadt 365: /*
366: * Sort a memory array by address.
367: */
368: static void
369: sortm(mp, n)
370: register struct memarr *mp;
371: register int n;
372: {
373: register struct memarr *mpj;
374: register int i, j;
375: register u_int addr, len;
376:
377: /* Insertion sort. This is O(n^2), but so what? */
378: for (i = 1; i < n; i++) {
379: /* save i'th entry */
380: addr = mp[i].addr;
381: len = mp[i].len;
382: /* find j such that i'th entry goes before j'th */
383: for (j = 0, mpj = mp; j < i; j++, mpj++)
384: if (addr < mpj->addr)
385: break;
386: /* slide up any additional entries */
387: ovbcopy(mpj, mpj + 1, (i - j) * sizeof(*mp));
388: mpj->addr = addr;
389: mpj->len = len;
390: }
391: }
392:
1.29 pk 393: /*
394: * For our convenience, vm_page.c implements:
395: * pmap_startup(), pmap_steal_memory()
396: * using the functions:
397: * pmap_virtual_space(), pmap_free_pages(), pmap_next_page(),
398: * which are much simpler to implement.
399: */
400:
401: /*
402: * How much virtual space does this kernel have?
403: * (After mapping kernel text, data, etc.)
404: */
405: void
406: pmap_virtual_space(v_start, v_end)
407: vm_offset_t *v_start;
408: vm_offset_t *v_end;
409: {
410: *v_start = virtual_avail;
411: *v_end = virtual_end;
412: }
413:
414: /*
415: * Return the number of page indices in the range of
416: * possible return values for pmap_page_index() for
417: * all addresses provided by pmap_next_page(). This
418: * return value is used to allocate per-page data.
419: *
420: */
421: u_int
422: pmap_free_pages()
423: {
424: int long bytes;
425: int nmem;
426: register struct memarr *mp;
427:
1.36 pk 428: bytes = -avail_start;
429: for (mp = pmemarr, nmem = npmemarr; --nmem >= 0; mp++)
1.29 pk 430: bytes += mp->len;
431:
432: return atop(bytes);
433: }
434:
435: /*
436: * If there are still physical pages available, put the address of
437: * the next available one at paddr and return TRUE. Otherwise,
438: * return FALSE to indicate that there are no more free pages.
439: * Note that avail_next is set to avail_start in pmap_bootstrap().
440: *
441: * Imporant: The page indices of the pages returned here must be
442: * in ascending order.
443: */
444: int
445: pmap_next_page(paddr)
446: vm_offset_t *paddr;
447: {
448:
449: /* Is it time to skip over a hole? */
450: if (avail_next == pmemarr[cpmemarr].addr + pmemarr[cpmemarr].len) {
451: if (++cpmemarr == npmemarr)
452: return FALSE;
453: avail_next = pmemarr[cpmemarr].addr;
454: }
455:
456: #ifdef DIAGNOSTIC
457: /* Any available memory remaining? */
458: if (avail_next >= avail_end) {
1.30 pk 459: panic("pmap_next_page: too much memory?!\n");
1.29 pk 460: }
461: #endif
462:
463: /* Have memory, will travel... */
464: *paddr = avail_next;
465: avail_next += NBPG;
466: return TRUE;
467: }
468:
469: /*
470: * pmap_page_index()
471: *
472: * Given a physical address, return a page index.
473: *
474: * There can be some values that we never return (i.e. a hole)
475: * as long as the range of indices returned by this function
476: * is smaller than the value returned by pmap_free_pages().
477: * The returned index does NOT need to start at zero.
478: *
479: */
480: u_long
481: pmap_page_index(pa)
482: vm_offset_t pa;
483: {
484: int idx;
485: int nmem;
486: register struct memarr *mp;
487:
488: #ifdef DIAGNOSTIC
489: if (pa < avail_start || pa >= avail_end)
490: panic("pmap_page_index: pa=0x%x", pa);
491: #endif
492:
493: for (idx = 0, mp = pmemarr, nmem = npmemarr; --nmem >= 0; mp++) {
494: if (pa >= mp->addr && pa < mp->addr + mp->len)
495: break;
496: idx += atop(mp->len);
497: }
498:
499: return (idx + atop(pa - mp->addr));
500: }
1.39 pk 501:
502: int
503: pmap_pa_exists(pa)
504: vm_offset_t pa;
505: {
506: register int nmem;
507: register struct memarr *mp;
508:
509: for (mp = pmemarr, nmem = npmemarr; --nmem >= 0; mp++) {
510: if (pa >= mp->addr && pa < mp->addr + mp->len)
511: return 1;
512: }
513:
514: return 0;
515: }
1.29 pk 516:
1.1 deraadt 517: /* update pv_flags given a valid pte */
518: #define MR(pte) (((pte) >> PG_M_SHIFT) & (PV_MOD | PV_REF))
519:
520: /*----------------------------------------------------------------*/
521:
522: /*
523: * Agree with the monitor ROM as to how many MMU entries are
524: * to be reserved, and map all of its segments into all contexts.
525: *
526: * Unfortunately, while the Version 0 PROM had a nice linked list of
527: * taken virtual memory, the Version 2 PROM provides instead a convoluted
528: * description of *free* virtual memory. Rather than invert this, we
529: * resort to two magic constants from the PROM vector description file.
530: */
1.43 pk 531: void
532: mmu_reservemon(nrp, nsp)
533: register int *nrp, *nsp;
1.1 deraadt 534: {
535: register u_int va, eva;
1.43 pk 536: register int mmureg, mmuseg, i, nr, ns, vr, lastvr;
537: register struct regmap *rp;
1.1 deraadt 538:
1.20 deraadt 539: #if defined(SUN4)
1.31 pk 540: if (cputyp == CPU_SUN4) {
1.29 pk 541: prom_vstart = va = OLDMON_STARTVADDR;
542: prom_vend = eva = OLDMON_ENDVADDR;
1.20 deraadt 543: }
544: #endif
545: #if defined(SUN4C)
1.31 pk 546: if (cputyp == CPU_SUN4C) {
1.29 pk 547: prom_vstart = va = OPENPROM_STARTVADDR;
548: prom_vend = eva = OPENPROM_ENDVADDR;
1.19 deraadt 549: }
1.20 deraadt 550: #endif
1.43 pk 551: ns = *nsp;
552: nr = *nrp;
553: lastvr = 0;
1.1 deraadt 554: while (va < eva) {
1.43 pk 555: vr = VA_VREG(va);
556: rp = &pmap_kernel()->pm_regmap[vr];
557:
558: #ifdef MMU_3L
559: if (mmu_3l && vr != lastvr) {
560: lastvr = vr;
561: mmureg = getregmap(va);
562: if (mmureg < nr)
563: rp->rg_smeg = nr = mmureg;
564: /*
565: * On 3-level MMU machines, we distribute regions,
566: * rather than segments, amongst the contexts.
567: */
568: for (i = ncontext; --i > 0;)
569: (*promvec->pv_setctxt)(i, (caddr_t)va, mmureg);
570: }
571: #endif
1.1 deraadt 572: mmuseg = getsegmap(va);
1.43 pk 573: if (mmuseg < ns)
574: ns = mmuseg;
575: #ifdef MMU_3L
576: if (!mmu_3l)
577: #endif
578: for (i = ncontext; --i > 0;)
579: (*promvec->pv_setctxt)(i, (caddr_t)va, mmuseg);
580:
1.1 deraadt 581: if (mmuseg == seginval) {
582: va += NBPSG;
583: continue;
584: }
1.43 pk 585: /*
586: * Another PROM segment. Enter into region map.
587: * Assume the entire segment is valid.
588: */
589: rp->rg_nsegmap += 1;
590: rp->rg_segmap[VA_VSEG(va)].sg_pmeg = mmuseg;
591: rp->rg_segmap[VA_VSEG(va)].sg_npte = NPTESG;
592:
1.1 deraadt 593: /* PROM maps its memory user-accessible: fix it. */
594: for (i = NPTESG; --i >= 0; va += NBPG)
595: setpte(va, getpte(va) | PG_S);
596: }
1.43 pk 597: *nsp = ns;
598: *nrp = nr;
599: return;
1.1 deraadt 600: }
601:
602: /*
603: * TODO: agree with the ROM on physical pages by taking them away
604: * from the page list, rather than having a dinky BTSIZE above.
605: */
606:
607: /*----------------------------------------------------------------*/
608:
609: /*
610: * MMU management.
611: */
1.43 pk 612: struct mmuentry *me_alloc __P((struct mmuhd *, struct pmap *, int, int));
613: void me_free __P((struct pmap *, u_int));
614: struct mmuentry *region_alloc __P((struct mmuhd *, struct pmap *, int));
615: void region_free __P((struct pmap *, u_int));
1.1 deraadt 616:
617: /*
618: * Change contexts. We need the old context number as well as the new
619: * one. If the context is changing, we must write all user windows
620: * first, lest an interrupt cause them to be written to the (other)
621: * user whose context we set here.
622: */
623: #define CHANGE_CONTEXTS(old, new) \
624: if ((old) != (new)) { \
625: write_user_windows(); \
626: setcontext(new); \
627: }
628:
629: /*
630: * Allocate an MMU entry (i.e., a PMEG).
631: * If necessary, steal one from someone else.
632: * Put it on the tail of the given queue
633: * (which is either the LRU list or the locked list).
634: * The locked list is not actually ordered, but this is easiest.
635: * Also put it on the given (new) pmap's chain,
636: * enter its pmeg number into that pmap's segmap,
637: * and store the pmeg's new virtual segment number (me->me_vseg).
638: *
639: * This routine is large and complicated, but it must be fast
640: * since it implements the dynamic allocation of MMU entries.
641: */
642: struct mmuentry *
1.43 pk 643: me_alloc(mh, newpm, newvreg, newvseg)
1.1 deraadt 644: register struct mmuhd *mh;
645: register struct pmap *newpm;
1.43 pk 646: register int newvreg, newvseg;
1.1 deraadt 647: {
648: register struct mmuentry *me;
649: register struct pmap *pm;
650: register int i, va, pa, *pte, tpte;
651: int ctx;
1.43 pk 652: struct regmap *rp;
653: struct segmap *sp;
1.1 deraadt 654:
655: /* try free list first */
1.43 pk 656: if ((me = segm_freelist.tqh_first) != NULL) {
657: TAILQ_REMOVE(&segm_freelist, me, me_list);
1.1 deraadt 658: #ifdef DEBUG
659: if (me->me_pmap != NULL)
660: panic("me_alloc: freelist entry has pmap");
661: if (pmapdebug & PDB_MMU_ALLOC)
1.43 pk 662: printf("me_alloc: got pmeg %d\n", me->me_cookie);
1.1 deraadt 663: #endif
1.43 pk 664: TAILQ_INSERT_TAIL(mh, me, me_list);
1.1 deraadt 665:
666: /* onto on pmap chain; pmap is already locked, if needed */
1.43 pk 667: TAILQ_INSERT_TAIL(&newpm->pm_seglist, me, me_pmchain);
1.1 deraadt 668:
669: /* into pmap segment table, with backpointers */
1.43 pk 670: newpm->pm_regmap[newvreg].rg_segmap[newvseg].sg_pmeg = me->me_cookie;
1.1 deraadt 671: me->me_pmap = newpm;
672: me->me_vseg = newvseg;
1.43 pk 673: me->me_vreg = newvreg;
1.1 deraadt 674:
675: return (me);
676: }
677:
678: /* no luck, take head of LRU list */
1.43 pk 679: if ((me = segm_lru.tqh_first) == NULL)
1.1 deraadt 680: panic("me_alloc: all pmegs gone");
1.43 pk 681:
1.1 deraadt 682: pm = me->me_pmap;
683: if (pm == NULL)
684: panic("me_alloc: LRU entry has no pmap");
1.42 mycroft 685: if (pm == pmap_kernel())
1.1 deraadt 686: panic("me_alloc: stealing from kernel");
1.12 pk 687: #ifdef DEBUG
1.1 deraadt 688: if (pmapdebug & (PDB_MMU_ALLOC | PDB_MMU_STEAL))
689: printf("me_alloc: stealing pmeg %x from pmap %x\n",
1.43 pk 690: me->me_cookie, pm);
1.1 deraadt 691: #endif
692: /*
693: * Remove from LRU list, and insert at end of new list
694: * (probably the LRU list again, but so what?).
695: */
1.43 pk 696: TAILQ_REMOVE(&segm_lru, me, me_list);
697: TAILQ_INSERT_TAIL(mh, me, me_list);
698:
699: rp = &pm->pm_regmap[me->me_vreg];
700: if (rp->rg_segmap == NULL)
701: panic("me_alloc: LRU entry's pmap has no segments");
702: sp = &rp->rg_segmap[me->me_vseg];
703: pte = sp->sg_pte;
704: if (pte == NULL)
705: panic("me_alloc: LRU entry's pmap has no ptes");
1.1 deraadt 706:
707: /*
708: * The PMEG must be mapped into some context so that we can
709: * read its PTEs. Use its current context if it has one;
710: * if not, and since context 0 is reserved for the kernel,
711: * the simplest method is to switch to 0 and map the PMEG
712: * to virtual address 0---which, being a user space address,
713: * is by definition not in use.
714: *
715: * XXX for ncpus>1 must use per-cpu VA?
716: * XXX do not have to flush cache immediately
717: */
718: ctx = getcontext();
1.43 pk 719: if (CTX_USABLE(pm,rp)) {
1.1 deraadt 720: CHANGE_CONTEXTS(ctx, pm->pm_ctxnum);
721: if (vactype != VAC_NONE)
1.43 pk 722: cache_flush_segment(me->me_vreg, me->me_vseg);
723: va = VSTOVA(me->me_vreg,me->me_vseg);
1.1 deraadt 724: } else {
725: CHANGE_CONTEXTS(ctx, 0);
1.43 pk 726: #ifdef MMU_3L
727: if (mmu_3l)
728: setregmap(0, tregion);
729: #endif
730: setsegmap(0, me->me_cookie);
1.1 deraadt 731: /*
732: * No cache flush needed: it happened earlier when
733: * the old context was taken.
734: */
735: va = 0;
736: }
737:
738: /*
739: * Record reference and modify bits for each page,
740: * and copy PTEs into kernel memory so that they can
741: * be reloaded later.
742: */
743: i = NPTESG;
744: do {
745: tpte = getpte(va);
1.33 pk 746: if ((tpte & (PG_V | PG_TYPE)) == (PG_V | PG_OBMEM)) {
1.1 deraadt 747: pa = ptoa(HWTOSW(tpte & PG_PFNUM));
748: if (managed(pa))
749: pvhead(pa)->pv_flags |= MR(tpte);
750: }
751: *pte++ = tpte & ~(PG_U|PG_M);
752: va += NBPG;
753: } while (--i > 0);
754:
755: /* update segment tables */
756: simple_lock(&pm->pm_lock); /* what if other cpu takes mmuentry ?? */
1.43 pk 757: if (CTX_USABLE(pm,rp))
758: setsegmap(VSTOVA(me->me_vreg,me->me_vseg), seginval);
759: sp->sg_pmeg = seginval;
1.1 deraadt 760:
761: /* off old pmap chain */
1.43 pk 762: TAILQ_REMOVE(&pm->pm_seglist, me, me_pmchain);
1.1 deraadt 763: simple_unlock(&pm->pm_lock);
764: setcontext(ctx); /* done with old context */
765:
766: /* onto new pmap chain; new pmap is already locked, if needed */
1.43 pk 767: TAILQ_INSERT_TAIL(&newpm->pm_seglist, me, me_pmchain);
1.1 deraadt 768:
769: /* into new segment table, with backpointers */
1.43 pk 770: newpm->pm_regmap[newvreg].rg_segmap[newvseg].sg_pmeg = me->me_cookie;
1.1 deraadt 771: me->me_pmap = newpm;
772: me->me_vseg = newvseg;
1.43 pk 773: me->me_vreg = newvreg;
1.1 deraadt 774:
775: return (me);
776: }
777:
778: /*
779: * Free an MMU entry.
780: *
781: * Assumes the corresponding pmap is already locked.
782: * Does NOT flush cache, but does record ref and mod bits.
783: * The rest of each PTE is discarded.
784: * CALLER MUST SET CONTEXT to pm->pm_ctxnum (if pmap has
785: * a context) or to 0 (if not). Caller must also update
786: * pm->pm_segmap and (possibly) the hardware.
787: */
788: void
789: me_free(pm, pmeg)
790: register struct pmap *pm;
791: register u_int pmeg;
792: {
1.43 pk 793: register struct mmuentry *me = &mmusegments[pmeg];
1.1 deraadt 794: register int i, va, pa, tpte;
1.43 pk 795: register int vr;
796: register struct regmap *rp;
797:
798: vr = me->me_vreg;
1.1 deraadt 799:
800: #ifdef DEBUG
801: if (pmapdebug & PDB_MMU_ALLOC)
1.43 pk 802: printf("me_free: freeing pmeg %d from pmap %x\n",
803: me->me_cookie, pm);
804: if (me->me_cookie != pmeg)
1.1 deraadt 805: panic("me_free: wrong mmuentry");
806: if (pm != me->me_pmap)
807: panic("me_free: pm != me_pmap");
808: #endif
809:
1.43 pk 810: rp = &pm->pm_regmap[vr];
811:
1.1 deraadt 812: /* just like me_alloc, but no cache flush, and context already set */
1.43 pk 813: if (CTX_USABLE(pm,rp)) {
814: va = VSTOVA(vr,me->me_vseg);
815: } else {
816: #ifdef DEBUG
817: if (getcontext() != 0) panic("me_free: ctx != 0");
818: #endif
819: #ifdef MMU_3L
820: if (mmu_3l)
821: setregmap(0, tregion);
822: #endif
823: setsegmap(0, me->me_cookie);
1.1 deraadt 824: va = 0;
825: }
826: i = NPTESG;
827: do {
828: tpte = getpte(va);
1.33 pk 829: if ((tpte & (PG_V | PG_TYPE)) == (PG_V | PG_OBMEM)) {
1.1 deraadt 830: pa = ptoa(HWTOSW(tpte & PG_PFNUM));
831: if (managed(pa))
832: pvhead(pa)->pv_flags |= MR(tpte);
833: }
834: va += NBPG;
835: } while (--i > 0);
836:
837: /* take mmu entry off pmap chain */
1.43 pk 838: TAILQ_REMOVE(&pm->pm_seglist, me, me_pmchain);
839: /* ... and remove from segment map */
840: if (rp->rg_segmap == NULL)
841: panic("me_free: no segments in pmap");
842: rp->rg_segmap[me->me_vseg].sg_pmeg = seginval;
843:
844: /* off LRU or lock chain */
845: if (pm == pmap_kernel()) {
846: TAILQ_REMOVE(&segm_locked, me, me_list);
847: } else {
848: TAILQ_REMOVE(&segm_lru, me, me_list);
849: }
850:
851: /* no associated pmap; on free list */
852: me->me_pmap = NULL;
853: TAILQ_INSERT_TAIL(&segm_freelist, me, me_list);
854: }
855:
856: #ifdef MMU_3L
857:
858: /* XXX - Merge with segm_alloc/segm_free ? */
859:
860: struct mmuentry *
861: region_alloc(mh, newpm, newvr)
862: register struct mmuhd *mh;
863: register struct pmap *newpm;
864: register int newvr;
865: {
866: register struct mmuentry *me;
867: register struct pmap *pm;
868: register int i, va, pa;
869: int ctx;
870: struct regmap *rp;
871: struct segmap *sp;
872:
873: /* try free list first */
874: if ((me = region_freelist.tqh_first) != NULL) {
875: TAILQ_REMOVE(®ion_freelist, me, me_list);
876: #ifdef DEBUG
877: if (me->me_pmap != NULL)
878: panic("region_alloc: freelist entry has pmap");
879: if (pmapdebug & PDB_MMUREG_ALLOC)
880: printf("region_alloc: got smeg %x\n", me->me_cookie);
881: #endif
882: TAILQ_INSERT_TAIL(mh, me, me_list);
883:
884: /* onto on pmap chain; pmap is already locked, if needed */
885: TAILQ_INSERT_TAIL(&newpm->pm_reglist, me, me_pmchain);
886:
887: /* into pmap segment table, with backpointers */
888: newpm->pm_regmap[newvr].rg_smeg = me->me_cookie;
889: me->me_pmap = newpm;
890: me->me_vreg = newvr;
891:
892: return (me);
893: }
894:
895: /* no luck, take head of LRU list */
896: if ((me = region_lru.tqh_first) == NULL)
897: panic("region_alloc: all smegs gone");
898:
899: pm = me->me_pmap;
900: if (pm == NULL)
901: panic("region_alloc: LRU entry has no pmap");
902: if (pm == pmap_kernel())
903: panic("region_alloc: stealing from kernel");
904: #ifdef DEBUG
905: if (pmapdebug & (PDB_MMUREG_ALLOC | PDB_MMUREG_STEAL))
906: printf("region_alloc: stealing smeg %x from pmap %x\n",
907: me->me_cookie, pm);
908: #endif
909: /*
910: * Remove from LRU list, and insert at end of new list
911: * (probably the LRU list again, but so what?).
912: */
913: TAILQ_REMOVE(®ion_lru, me, me_list);
914: TAILQ_INSERT_TAIL(mh, me, me_list);
915:
916: rp = &pm->pm_regmap[me->me_vreg];
917: ctx = getcontext();
918: if (pm->pm_ctx) {
919: CHANGE_CONTEXTS(ctx, pm->pm_ctxnum);
920: if (vactype != VAC_NONE)
921: cache_flush_region(me->me_vreg);
922: }
923:
924: /* update region tables */
925: simple_lock(&pm->pm_lock); /* what if other cpu takes mmuentry ?? */
926: if (pm->pm_ctx)
927: setregmap(VRTOVA(me->me_vreg), reginval);
928: rp->rg_smeg = reginval;
929:
930: /* off old pmap chain */
931: TAILQ_REMOVE(&pm->pm_reglist, me, me_pmchain);
932: simple_unlock(&pm->pm_lock);
933: setcontext(ctx); /* done with old context */
934:
935: /* onto new pmap chain; new pmap is already locked, if needed */
936: TAILQ_INSERT_TAIL(&newpm->pm_reglist, me, me_pmchain);
937:
938: /* into new segment table, with backpointers */
939: newpm->pm_regmap[newvr].rg_smeg = me->me_cookie;
940: me->me_pmap = newpm;
941: me->me_vreg = newvr;
942:
943: return (me);
944: }
945:
946: /*
947: * Free an MMU entry.
948: *
949: * Assumes the corresponding pmap is already locked.
950: * Does NOT flush cache. ???
951: * CALLER MUST SET CONTEXT to pm->pm_ctxnum (if pmap has
952: * a context) or to 0 (if not). Caller must also update
953: * pm->pm_regmap and (possibly) the hardware.
954: */
955: void
956: region_free(pm, smeg)
957: register struct pmap *pm;
958: register u_int smeg;
959: {
960: register struct mmuentry *me = &mmuregions[smeg];
961:
962: #ifdef DEBUG
963: if (pmapdebug & PDB_MMUREG_ALLOC)
964: printf("region_free: freeing smeg %x from pmap %x\n",
965: me->me_cookie, pm);
966: if (me->me_cookie != smeg)
967: panic("region_free: wrong mmuentry");
968: if (pm != me->me_pmap)
969: panic("region_free: pm != me_pmap");
970: #endif
971:
972: if (pm->pm_ctx)
973: if (vactype != VAC_NONE)
974: cache_flush_region(me->me_vreg);
975:
976: /* take mmu entry off pmap chain */
977: TAILQ_REMOVE(&pm->pm_reglist, me, me_pmchain);
1.1 deraadt 978: /* ... and remove from segment map */
1.43 pk 979: pm->pm_regmap[smeg].rg_smeg = reginval;
1.1 deraadt 980:
981: /* off LRU or lock chain */
1.43 pk 982: if (pm == pmap_kernel()) {
983: TAILQ_REMOVE(®ion_locked, me, me_list);
984: } else {
985: TAILQ_REMOVE(®ion_lru, me, me_list);
986: }
1.1 deraadt 987:
988: /* no associated pmap; on free list */
989: me->me_pmap = NULL;
1.43 pk 990: TAILQ_INSERT_TAIL(®ion_freelist, me, me_list);
1.1 deraadt 991: }
1.43 pk 992: #endif
1.1 deraadt 993:
994: /*
995: * `Page in' (load or inspect) an MMU entry; called on page faults.
996: * Returns 1 if we reloaded the segment, -1 if the segment was
997: * already loaded and the page was marked valid (in which case the
998: * fault must be a bus error or something), or 0 (segment loaded but
999: * PTE not valid, or segment not loaded at all).
1000: */
1001: int
1.45 pk 1002: mmu_pagein(pm, va, prot)
1.1 deraadt 1003: register struct pmap *pm;
1.45 pk 1004: register int va, prot;
1.1 deraadt 1005: {
1006: register int *pte;
1007: register struct mmuentry *me;
1.45 pk 1008: register int vr, vs, pmeg, i, s, bits;
1.43 pk 1009: struct regmap *rp;
1010: struct segmap *sp;
1011:
1.45 pk 1012: if (prot != VM_PROT_NONE)
1013: bits = PG_V | ((prot & VM_PROT_WRITE) ? PG_W : 0);
1014: else
1015: bits = 0;
1016:
1.43 pk 1017: vr = VA_VREG(va);
1018: vs = VA_VSEG(va);
1019: rp = &pm->pm_regmap[vr];
1020: #ifdef DEBUG
1021: if (pm == pmap_kernel())
1022: printf("mmu_pagein: kernel wants map at va %x, vr %d, vs %d\n", va, vr, vs);
1023: #endif
1024:
1025: /* return 0 if we have no PMEGs to load */
1026: if (rp->rg_segmap == NULL)
1027: return (0);
1028: #ifdef MMU_3L
1029: if (mmu_3l && rp->rg_smeg == reginval) {
1030: smeg_t smeg;
1031: unsigned int tva = VA_ROUNDDOWNTOREG(va);
1032: struct segmap *sp = rp->rg_segmap;
1033:
1034: s = splpmap(); /* paranoid */
1035: smeg = region_alloc(®ion_lru, pm, vr)->me_cookie;
1036: setregmap(tva, smeg);
1037: i = NSEGRG;
1038: do {
1039: setsegmap(tva, sp++->sg_pmeg);
1040: tva += NBPSG;
1041: } while (--i > 0);
1042: splx(s);
1043: }
1044: #endif
1045: sp = &rp->rg_segmap[vs];
1.1 deraadt 1046:
1047: /* return 0 if we have no PTEs to load */
1.43 pk 1048: if ((pte = sp->sg_pte) == NULL)
1.1 deraadt 1049: return (0);
1.43 pk 1050:
1.1 deraadt 1051: /* return -1 if the fault is `hard', 0 if not */
1.43 pk 1052: if (sp->sg_pmeg != seginval)
1.1 deraadt 1053: return (bits && (getpte(va) & bits) == bits ? -1 : 0);
1054:
1055: /* reload segment: write PTEs into a new LRU entry */
1056: va = VA_ROUNDDOWNTOSEG(va);
1057: s = splpmap(); /* paranoid */
1.43 pk 1058: pmeg = me_alloc(&segm_lru, pm, vr, vs)->me_cookie;
1.1 deraadt 1059: setsegmap(va, pmeg);
1060: i = NPTESG;
1061: do {
1062: setpte(va, *pte++);
1063: va += NBPG;
1064: } while (--i > 0);
1065: splx(s);
1066: return (1);
1067: }
1068:
1069: /*
1070: * Allocate a context. If necessary, steal one from someone else.
1071: * Changes hardware context number and loads segment map.
1072: *
1073: * This routine is only ever called from locore.s just after it has
1074: * saved away the previous process, so there are no active user windows.
1075: */
1076: void
1077: ctx_alloc(pm)
1078: register struct pmap *pm;
1079: {
1080: register union ctxinfo *c;
1.13 pk 1081: register int cnum, i;
1.43 pk 1082: register struct regmap *rp;
1.13 pk 1083: register int gap_start, gap_end;
1084: register unsigned long va;
1.1 deraadt 1085:
1086: #ifdef DEBUG
1087: if (pm->pm_ctx)
1088: panic("ctx_alloc pm_ctx");
1089: if (pmapdebug & PDB_CTX_ALLOC)
1090: printf("ctx_alloc(%x)\n", pm);
1091: #endif
1.13 pk 1092: gap_start = pm->pm_gap_start;
1093: gap_end = pm->pm_gap_end;
1094:
1.1 deraadt 1095: if ((c = ctx_freelist) != NULL) {
1096: ctx_freelist = c->c_nextfree;
1097: cnum = c - ctxinfo;
1098: setcontext(cnum);
1099: } else {
1100: if ((ctx_kick += ctx_kickdir) >= ncontext) {
1101: ctx_kick = ncontext - 1;
1102: ctx_kickdir = -1;
1103: } else if (ctx_kick < 1) {
1104: ctx_kick = 1;
1105: ctx_kickdir = 1;
1106: }
1107: c = &ctxinfo[cnum = ctx_kick];
1108: #ifdef DEBUG
1109: if (c->c_pmap == NULL)
1110: panic("ctx_alloc cu_pmap");
1111: if (pmapdebug & (PDB_CTX_ALLOC | PDB_CTX_STEAL))
1112: printf("ctx_alloc: steal context %x from %x\n",
1113: cnum, c->c_pmap);
1114: #endif
1115: c->c_pmap->pm_ctx = NULL;
1116: setcontext(cnum);
1117: if (vactype != VAC_NONE)
1118: cache_flush_context();
1.13 pk 1119: if (gap_start < c->c_pmap->pm_gap_start)
1120: gap_start = c->c_pmap->pm_gap_start;
1121: if (gap_end > c->c_pmap->pm_gap_end)
1122: gap_end = c->c_pmap->pm_gap_end;
1.1 deraadt 1123: }
1124: c->c_pmap = pm;
1125: pm->pm_ctx = c;
1126: pm->pm_ctxnum = cnum;
1127:
1.13 pk 1128: /*
1.43 pk 1129: * Write pmap's region (3-level MMU) or segment table into the MMU.
1.13 pk 1130: *
1.43 pk 1131: * Only write those entries that actually map something in this
1132: * context by maintaining a pair of region numbers in between
1133: * which the pmap has no valid mappings.
1.13 pk 1134: *
1135: * If a context was just allocated from the free list, trust that
1136: * all its pmeg numbers are `seginval'. We make sure this is the
1137: * case initially in pmap_bootstrap(). Otherwise, the context was
1138: * freed by calling ctx_free() in pmap_release(), which in turn is
1139: * supposedly called only when all mappings have been removed.
1140: *
1141: * On the other hand, if the context had to be stolen from another
1142: * pmap, we possibly shrink the gap to be the disjuction of the new
1143: * and the previous map.
1144: */
1.43 pk 1145:
1146:
1147: rp = pm->pm_regmap;
1148: for (va = 0, i = NUREG; --i >= 0; ) {
1149: if (VA_VREG(va) >= gap_start) {
1150: va = VRTOVA(gap_end);
1.13 pk 1151: i -= gap_end - gap_start;
1.43 pk 1152: rp += gap_end - gap_start;
1.13 pk 1153: if (i < 0)
1154: break;
1.43 pk 1155: gap_start = NUREG; /* mustn't re-enter this branch */
1156: }
1157: #ifdef MMU_3L
1158: if (mmu_3l) {
1159: setregmap(va, rp++->rg_smeg);
1160: va += NBPRG;
1161: } else
1162: #endif
1163: {
1164: register int j;
1165: register struct segmap *sp = rp->rg_segmap;
1166: for (j = NSEGRG; --j >= 0; va += NBPSG)
1167: setsegmap(va, sp?sp++->sg_pmeg:seginval);
1168: rp++;
1.13 pk 1169: }
1170: }
1.1 deraadt 1171: }
1172:
1173: /*
1174: * Give away a context. Flushes cache and sets current context to 0.
1175: */
1176: void
1177: ctx_free(pm)
1178: struct pmap *pm;
1179: {
1180: register union ctxinfo *c;
1181: register int newc, oldc;
1182:
1183: if ((c = pm->pm_ctx) == NULL)
1184: panic("ctx_free");
1185: pm->pm_ctx = NULL;
1186: oldc = getcontext();
1187: if (vactype != VAC_NONE) {
1188: newc = pm->pm_ctxnum;
1189: CHANGE_CONTEXTS(oldc, newc);
1190: cache_flush_context();
1191: setcontext(0);
1192: } else {
1193: CHANGE_CONTEXTS(oldc, 0);
1194: }
1195: c->c_nextfree = ctx_freelist;
1196: ctx_freelist = c;
1197: }
1198:
1199:
1200: /*----------------------------------------------------------------*/
1201:
1202: /*
1203: * pvlist functions.
1204: */
1205:
1206: /*
1207: * Walk the given pv list, and for each PTE, set or clear some bits
1208: * (e.g., PG_W or PG_NC).
1209: *
1210: * As a special case, this never clears PG_W on `pager' pages.
1211: * These, being kernel addresses, are always in hardware and have
1212: * a context.
1213: *
1214: * This routine flushes the cache for any page whose PTE changes,
1215: * as long as the process has a context; this is overly conservative.
1216: * It also copies ref and mod bits to the pvlist, on the theory that
1217: * this might save work later. (XXX should test this theory)
1218: */
1219: void
1220: pv_changepte(pv0, bis, bic)
1221: register struct pvlist *pv0;
1222: register int bis, bic;
1223: {
1224: register int *pte;
1225: register struct pvlist *pv;
1226: register struct pmap *pm;
1.43 pk 1227: register int va, vr, vs, i, flags;
1.1 deraadt 1228: int ctx, s;
1.43 pk 1229: struct regmap *rp;
1230: struct segmap *sp;
1.1 deraadt 1231:
1232: write_user_windows(); /* paranoid? */
1233:
1234: s = splpmap(); /* paranoid? */
1235: if (pv0->pv_pmap == NULL) {
1236: splx(s);
1237: return;
1238: }
1239: ctx = getcontext();
1240: flags = pv0->pv_flags;
1241: for (pv = pv0; pv != NULL; pv = pv->pv_next) {
1242: pm = pv->pv_pmap;
1243: if(pm==NULL)panic("pv_changepte 1");
1244: va = pv->pv_va;
1.43 pk 1245: vr = VA_VREG(va);
1246: vs = VA_VSEG(va);
1247: rp = &pm->pm_regmap[vr];
1248: if (rp->rg_segmap == NULL)
1249: panic("pv_changepte: no segments");
1250:
1251: sp = &rp->rg_segmap[vs];
1252: pte = sp->sg_pte;
1253:
1254: if (sp->sg_pmeg == seginval) {
1255: /* not in hardware: just fix software copy */
1256: if (pte == NULL)
1257: panic("pv_changepte 2");
1258: pte += VA_VPG(va);
1259: *pte = (*pte | bis) & ~bic;
1260: } else {
1.1 deraadt 1261: register int tpte;
1262:
1263: /* in hardware: fix hardware copy */
1.43 pk 1264: if (CTX_USABLE(pm,rp)) {
1.1 deraadt 1265: extern vm_offset_t pager_sva, pager_eva;
1266:
1.8 pk 1267: /*
1268: * Bizarreness: we never clear PG_W on
1269: * pager pages, nor PG_NC on DVMA pages.
1270: */
1.1 deraadt 1271: if (bic == PG_W &&
1272: va >= pager_sva && va < pager_eva)
1.3 deraadt 1273: continue;
1274: if (bic == PG_NC &&
1275: va >= DVMA_BASE && va < DVMA_END)
1.1 deraadt 1276: continue;
1277: setcontext(pm->pm_ctxnum);
1278: /* XXX should flush only when necessary */
1.13 pk 1279: tpte = getpte(va);
1.34 pk 1280: if (vactype != VAC_NONE && (tpte & PG_M))
1.43 pk 1281: cache_flush_page(va);
1.1 deraadt 1282: } else {
1283: /* XXX per-cpu va? */
1284: setcontext(0);
1.43 pk 1285: #ifdef MMU_3L
1286: if (mmu_3l)
1287: setregmap(0, tregion);
1288: #endif
1289: setsegmap(0, sp->sg_pmeg);
1.18 deraadt 1290: va = VA_VPG(va) << PGSHIFT;
1.13 pk 1291: tpte = getpte(va);
1.1 deraadt 1292: }
1293: if (tpte & PG_V)
1294: flags |= (tpte >> PG_M_SHIFT) &
1295: (PV_MOD|PV_REF);
1296: tpte = (tpte | bis) & ~bic;
1297: setpte(va, tpte);
1298: if (pte != NULL) /* update software copy */
1299: pte[VA_VPG(va)] = tpte;
1300: }
1301: }
1302: pv0->pv_flags = flags;
1303: setcontext(ctx);
1304: splx(s);
1305: }
1306:
1307: /*
1308: * Sync ref and mod bits in pvlist (turns off same in hardware PTEs).
1309: * Returns the new flags.
1310: *
1311: * This is just like pv_changepte, but we never add or remove bits,
1312: * hence never need to adjust software copies.
1313: */
1314: int
1315: pv_syncflags(pv0)
1316: register struct pvlist *pv0;
1317: {
1318: register struct pvlist *pv;
1319: register struct pmap *pm;
1.43 pk 1320: register int tpte, va, vr, vs, pmeg, i, flags;
1.1 deraadt 1321: int ctx, s;
1.43 pk 1322: struct regmap *rp;
1323: struct segmap *sp;
1.1 deraadt 1324:
1325: write_user_windows(); /* paranoid? */
1326:
1327: s = splpmap(); /* paranoid? */
1328: if (pv0->pv_pmap == NULL) { /* paranoid */
1329: splx(s);
1330: return (0);
1331: }
1332: ctx = getcontext();
1333: flags = pv0->pv_flags;
1334: for (pv = pv0; pv != NULL; pv = pv->pv_next) {
1335: pm = pv->pv_pmap;
1336: va = pv->pv_va;
1.43 pk 1337: vr = VA_VREG(va);
1338: vs = VA_VSEG(va);
1339: rp = &pm->pm_regmap[vr];
1340: if (rp->rg_segmap == NULL)
1341: panic("pv_syncflags: no segments");
1342: sp = &rp->rg_segmap[vs];
1343:
1344: if ((pmeg = sp->sg_pmeg) == seginval)
1.1 deraadt 1345: continue;
1.43 pk 1346:
1347: if (CTX_USABLE(pm,rp)) {
1.1 deraadt 1348: setcontext(pm->pm_ctxnum);
1349: /* XXX should flush only when necessary */
1.13 pk 1350: tpte = getpte(va);
1.34 pk 1351: if (vactype != VAC_NONE && (tpte & PG_M))
1352: cache_flush_page(va);
1.1 deraadt 1353: } else {
1354: /* XXX per-cpu va? */
1355: setcontext(0);
1.43 pk 1356: #ifdef MMU_3L
1357: if (mmu_3l)
1358: setregmap(0, tregion);
1359: #endif
1.1 deraadt 1360: setsegmap(0, pmeg);
1.18 deraadt 1361: va = VA_VPG(va) << PGSHIFT;
1.13 pk 1362: tpte = getpte(va);
1.1 deraadt 1363: }
1364: if (tpte & (PG_M|PG_U) && tpte & PG_V) {
1365: flags |= (tpte >> PG_M_SHIFT) &
1366: (PV_MOD|PV_REF);
1367: tpte &= ~(PG_M|PG_U);
1368: setpte(va, tpte);
1369: }
1370: }
1371: pv0->pv_flags = flags;
1372: setcontext(ctx);
1373: splx(s);
1374: return (flags);
1375: }
1376:
1377: /*
1378: * pv_unlink is a helper function for pmap_remove.
1379: * It takes a pointer to the pv_table head for some physical address
1380: * and removes the appropriate (pmap, va) entry.
1381: *
1382: * Once the entry is removed, if the pv_table head has the cache
1383: * inhibit bit set, see if we can turn that off; if so, walk the
1384: * pvlist and turn off PG_NC in each PTE. (The pvlist is by
1385: * definition nonempty, since it must have at least two elements
1386: * in it to have PV_NC set, and we only remove one here.)
1387: */
1.43 pk 1388: /*static*/ void
1.1 deraadt 1389: pv_unlink(pv, pm, va)
1390: register struct pvlist *pv;
1391: register struct pmap *pm;
1392: register vm_offset_t va;
1393: {
1394: register struct pvlist *npv;
1395:
1.11 pk 1396: #ifdef DIAGNOSTIC
1397: if (pv->pv_pmap == NULL)
1398: panic("pv_unlink0");
1399: #endif
1.1 deraadt 1400: /*
1401: * First entry is special (sigh).
1402: */
1403: npv = pv->pv_next;
1404: if (pv->pv_pmap == pm && pv->pv_va == va) {
1405: pmap_stats.ps_unlink_pvfirst++;
1406: if (npv != NULL) {
1407: pv->pv_next = npv->pv_next;
1408: pv->pv_pmap = npv->pv_pmap;
1409: pv->pv_va = npv->pv_va;
1410: free((caddr_t)npv, M_VMPVENT);
1411: } else
1412: pv->pv_pmap = NULL;
1413: } else {
1414: register struct pvlist *prev;
1415:
1416: for (prev = pv;; prev = npv, npv = npv->pv_next) {
1417: pmap_stats.ps_unlink_pvsearch++;
1418: if (npv == NULL)
1419: panic("pv_unlink");
1420: if (npv->pv_pmap == pm && npv->pv_va == va)
1421: break;
1422: }
1423: prev->pv_next = npv->pv_next;
1424: free((caddr_t)npv, M_VMPVENT);
1425: }
1426: if (pv->pv_flags & PV_NC) {
1427: /*
1428: * Not cached: check to see if we can fix that now.
1429: */
1430: va = pv->pv_va;
1431: for (npv = pv->pv_next; npv != NULL; npv = npv->pv_next)
1432: if (BADALIAS(va, npv->pv_va))
1433: return;
1434: pv->pv_flags &= ~PV_NC;
1435: pv_changepte(pv, 0, PG_NC);
1436: }
1437: }
1438:
1439: /*
1440: * pv_link is the inverse of pv_unlink, and is used in pmap_enter.
1441: * It returns PG_NC if the (new) pvlist says that the address cannot
1442: * be cached.
1443: */
1.43 pk 1444: /*static*/ int
1.1 deraadt 1445: pv_link(pv, pm, va)
1446: register struct pvlist *pv;
1447: register struct pmap *pm;
1448: register vm_offset_t va;
1449: {
1450: register struct pvlist *npv;
1451: register int ret;
1452:
1453: if (pv->pv_pmap == NULL) {
1454: /* no pvlist entries yet */
1455: pmap_stats.ps_enter_firstpv++;
1456: pv->pv_next = NULL;
1457: pv->pv_pmap = pm;
1458: pv->pv_va = va;
1459: return (0);
1460: }
1461: /*
1462: * Before entering the new mapping, see if
1463: * it will cause old mappings to become aliased
1464: * and thus need to be `discached'.
1465: */
1466: ret = 0;
1467: pmap_stats.ps_enter_secondpv++;
1468: if (pv->pv_flags & PV_NC) {
1469: /* already uncached, just stay that way */
1470: ret = PG_NC;
1471: } else {
1472: /* MAY NEED TO DISCACHE ANYWAY IF va IS IN DVMA SPACE? */
1473: for (npv = pv; npv != NULL; npv = npv->pv_next) {
1474: if (BADALIAS(va, npv->pv_va)) {
1.43 pk 1475: #ifdef DEBUG
1476: if (pmapdebug) printf(
1477: "pv_link: badalias: pid %d, %x<=>%x, pa %x\n",
1478: curproc?curproc->p_pid:-1, va, npv->pv_va,
1479: vm_first_phys + (pv-pv_table)*NBPG);
1480: #endif
1.1 deraadt 1481: pv->pv_flags |= PV_NC;
1482: pv_changepte(pv, ret = PG_NC, 0);
1483: break;
1484: }
1485: }
1486: }
1487: npv = (struct pvlist *)malloc(sizeof *npv, M_VMPVENT, M_WAITOK);
1488: npv->pv_next = pv->pv_next;
1489: npv->pv_pmap = pm;
1490: npv->pv_va = va;
1491: pv->pv_next = npv;
1492: return (ret);
1493: }
1494:
1495: /*
1496: * Walk the given list and flush the cache for each (MI) page that is
1.31 pk 1497: * potentially in the cache. Called only if vactype != VAC_NONE.
1.1 deraadt 1498: */
1499: pv_flushcache(pv)
1500: register struct pvlist *pv;
1501: {
1502: register struct pmap *pm;
1503: register int i, s, ctx;
1504:
1505: write_user_windows(); /* paranoia? */
1506:
1507: s = splpmap(); /* XXX extreme paranoia */
1508: if ((pm = pv->pv_pmap) != NULL) {
1509: ctx = getcontext();
1510: for (;;) {
1511: if (pm->pm_ctx) {
1512: setcontext(pm->pm_ctxnum);
1513: cache_flush_page(pv->pv_va);
1514: }
1515: pv = pv->pv_next;
1516: if (pv == NULL)
1517: break;
1518: pm = pv->pv_pmap;
1519: }
1520: setcontext(ctx);
1521: }
1522: splx(s);
1523: }
1524:
1525: /*----------------------------------------------------------------*/
1526:
1527: /*
1528: * At last, pmap code.
1529: */
1530:
1.18 deraadt 1531: #if defined(SUN4) && defined(SUN4C)
1532: int nptesg;
1533: #endif
1534:
1.1 deraadt 1535: /*
1536: * Bootstrap the system enough to run with VM enabled.
1537: *
1.43 pk 1538: * nsegment is the number of mmu segment entries (``PMEGs'');
1539: * nregion is the number of mmu region entries (``SMEGs'');
1.1 deraadt 1540: * nctx is the number of contexts.
1541: */
1542: void
1.43 pk 1543: pmap_bootstrap(nctx, nregion, nsegment)
1544: int nsegment, nctx, nregion;
1.1 deraadt 1545: {
1546: register union ctxinfo *ci;
1.43 pk 1547: register struct mmuentry *mmuseg, *mmureg;
1548: struct regmap *rp;
1549: register int i, j;
1550: register int npte, zseg, vr, vs;
1551: register int rcookie, scookie;
1.1 deraadt 1552: register caddr_t p;
1.37 pk 1553: register struct memarr *mp;
1.1 deraadt 1554: register void (*rom_setmap)(int ctx, caddr_t va, int pmeg);
1555: int lastpage;
1556: extern char end[];
1.7 pk 1557: #ifdef DDB
1558: extern char *esym;
1559: char *theend = end;
1560: #endif
1.1 deraadt 1561: extern caddr_t reserve_dumppages(caddr_t);
1562:
1.45 pk 1563: switch (cputyp) {
1564: case CPU_SUN4C:
1565: mmu_has_hole = 1;
1566: break;
1567: case CPU_SUN4:
1568: if (cpumod != SUN4_400) {
1569: mmu_has_hole = 1;
1570: break;
1571: }
1572: }
1573:
1.19 deraadt 1574: cnt.v_page_size = NBPG;
1575: vm_set_page_size();
1576:
1.18 deraadt 1577: #if defined(SUN4) && defined(SUN4C)
1578: /* In this case NPTESG is not a #define */
1579: nptesg = (NBPSG >> pgshift);
1580: #endif
1581:
1.31 pk 1582: #if defined(SUN4)
1583: /*
1584: * set up the segfixmask to mask off invalid bits
1585: */
1.43 pk 1586: segfixmask = nsegment - 1; /* assume nsegment is a power of 2 */
1587: #ifdef DIAGNOSTIC
1588: if (((nsegment & segfixmask) | (nsegment & ~segfixmask)) != nsegment) {
1589: printf("pmap_bootstrap: unsuitable number of segments (%d)\n",
1590: nsegment);
1591: callrom();
1592: }
1593: #endif
1.31 pk 1594: #endif
1595:
1.43 pk 1596: ncontext = nctx;
1597:
1.1 deraadt 1598: /*
1599: * Last segment is the `invalid' one (one PMEG of pte's with !pg_v).
1600: * It will never be used for anything else.
1601: */
1.43 pk 1602: seginval = --nsegment;
1603:
1604: #ifdef MMU_3L
1605: if (mmu_3l)
1606: reginval = --nregion;
1607: #endif
1608:
1609: /*
1610: * Intialize the kernel pmap.
1611: */
1612: /* kernel_pmap_store.pm_ctxnum = 0; */
1613: simple_lock_init(kernel_pmap_store.pm_lock);
1614: kernel_pmap_store.pm_refcount = 1;
1615: #ifdef MMU_3L
1616: TAILQ_INIT(&kernel_pmap_store.pm_reglist);
1617: #endif
1618: TAILQ_INIT(&kernel_pmap_store.pm_seglist);
1619:
1620: kernel_pmap_store.pm_regmap = &kernel_regmap_store[-NUREG];
1621: for (i = NKREG; --i >= 0;) {
1622: #ifdef MMU_3L
1623: kernel_regmap_store[i].rg_smeg = reginval;
1624: #endif
1625: kernel_regmap_store[i].rg_segmap =
1626: &kernel_segmap_store[i * NSEGRG];
1627: for (j = NSEGRG; --j >= 0;)
1628: kernel_segmap_store[i * NSEGRG + j].sg_pmeg = seginval;
1629: }
1.1 deraadt 1630:
1631: /*
1632: * Preserve the monitor ROM's reserved VM region, so that
1633: * we can use L1-A or the monitor's debugger. As a side
1634: * effect we map the ROM's reserved VM into all contexts
1635: * (otherwise L1-A crashes the machine!).
1636: */
1.43 pk 1637:
1638: mmu_reservemon(&nregion, &nsegment);
1639:
1640: #ifdef MMU_3L
1641: /* Reserve one region for temporary mappings */
1642: tregion = --nregion;
1643: #endif
1.1 deraadt 1644:
1645: /*
1.43 pk 1646: * Allocate and clear mmu entries and context structures.
1.1 deraadt 1647: */
1648: p = end;
1.7 pk 1649: #ifdef DDB
1650: if (esym != 0)
1651: theend = p = esym;
1652: #endif
1.43 pk 1653: #ifdef MMU_3L
1654: mmuregions = mmureg = (struct mmuentry *)p;
1655: p += nregion * sizeof(struct mmuentry);
1656: #endif
1657: mmusegments = mmuseg = (struct mmuentry *)p;
1658: p += nsegment * sizeof(struct mmuentry);
1659: pmap_kernel()->pm_ctx = ctxinfo = ci = (union ctxinfo *)p;
1.1 deraadt 1660: p += nctx * sizeof *ci;
1.7 pk 1661: #ifdef DDB
1662: bzero(theend, p - theend);
1663: #else
1.1 deraadt 1664: bzero(end, p - end);
1.7 pk 1665: #endif
1.1 deraadt 1666:
1.43 pk 1667: /* Initialize MMU resource queues */
1668: #ifdef MMU_3L
1669: TAILQ_INIT(®ion_freelist);
1670: TAILQ_INIT(®ion_lru);
1671: TAILQ_INIT(®ion_locked);
1672: #endif
1673: TAILQ_INIT(&segm_freelist);
1674: TAILQ_INIT(&segm_lru);
1675: TAILQ_INIT(&segm_locked);
1676:
1.1 deraadt 1677: /*
1678: * Set up the `constants' for the call to vm_init()
1679: * in main(). All pages beginning at p (rounded up to
1680: * the next whole page) and continuing through the number
1681: * of available pages are free, but they start at a higher
1682: * virtual address. This gives us two mappable MD pages
1683: * for pmap_zero_page and pmap_copy_page, and one MI page
1684: * for /dev/mem, all with no associated physical memory.
1685: */
1686: p = (caddr_t)(((u_int)p + NBPG - 1) & ~PGOFSET);
1687: avail_start = (int)p - KERNBASE;
1.36 pk 1688:
1689: /*
1690: * Grab physical memory list, so pmap_next_page() can do its bit.
1691: */
1692: npmemarr = makememarr(pmemarr, MA_SIZE, MEMARR_AVAILPHYS);
1693: sortm(pmemarr, npmemarr);
1694: if (pmemarr[0].addr != 0) {
1695: printf("pmap_bootstrap: no kernel memory?!\n");
1696: callrom();
1697: }
1698: avail_end = pmemarr[npmemarr-1].addr + pmemarr[npmemarr-1].len;
1.38 pk 1699: avail_next = avail_start;
1700: for (physmem = 0, mp = pmemarr, j = npmemarr; --j >= 0; mp++)
1701: physmem += btoc(mp->len);
1702:
1703: i = (int)p;
1704: vpage[0] = p, p += NBPG;
1705: vpage[1] = p, p += NBPG;
1.41 mycroft 1706: vmmap = p, p += NBPG;
1.38 pk 1707: p = reserve_dumppages(p);
1.39 pk 1708:
1.37 pk 1709: /*
1.38 pk 1710: * Allocate virtual memory for pv_table[], which will be mapped
1711: * sparsely in pmap_init().
1.37 pk 1712: */
1713: pv_table = (struct pvlist *)p;
1714: p += round_page(sizeof(struct pvlist) * atop(avail_end - avail_start));
1.36 pk 1715:
1.1 deraadt 1716: virtual_avail = (vm_offset_t)p;
1717: virtual_end = VM_MAX_KERNEL_ADDRESS;
1718:
1719: p = (caddr_t)i; /* retract to first free phys */
1720:
1721: /*
1722: * All contexts are free except the kernel's.
1723: *
1724: * XXX sun4c could use context 0 for users?
1725: */
1.42 mycroft 1726: ci->c_pmap = pmap_kernel();
1.1 deraadt 1727: ctx_freelist = ci + 1;
1728: for (i = 1; i < ncontext; i++) {
1729: ci++;
1730: ci->c_nextfree = ci + 1;
1731: }
1732: ci->c_nextfree = NULL;
1733: ctx_kick = 0;
1734: ctx_kickdir = -1;
1735:
1736: /*
1737: * Init mmu entries that map the kernel physical addresses.
1738: *
1739: * All the other MMU entries are free.
1740: *
1741: * THIS ASSUMES SEGMENT i IS MAPPED BY MMU ENTRY i DURING THE
1742: * BOOT PROCESS
1743: */
1.43 pk 1744:
1745: rom_setmap = promvec->pv_setctxt;
1746: zseg = ((((u_int)p + NBPSG - 1) & ~SGOFSET) - KERNBASE) >> SGSHIFT;
1.1 deraadt 1747: lastpage = VA_VPG(p);
1748: if (lastpage == 0)
1.43 pk 1749: /*
1750: * If the page bits in p are 0, we filled the last segment
1751: * exactly (now how did that happen?); if not, it is
1752: * the last page filled in the last segment.
1753: */
1.1 deraadt 1754: lastpage = NPTESG;
1.43 pk 1755:
1.1 deraadt 1756: p = (caddr_t)KERNBASE; /* first va */
1757: vs = VA_VSEG(KERNBASE); /* first virtual segment */
1.43 pk 1758: vr = VA_VREG(KERNBASE); /* first virtual region */
1759: rp = &pmap_kernel()->pm_regmap[vr];
1760:
1761: for (rcookie = 0, scookie = 0;;) {
1762:
1.1 deraadt 1763: /*
1.43 pk 1764: * Distribute each kernel region/segment into all contexts.
1.1 deraadt 1765: * This is done through the monitor ROM, rather than
1766: * directly here: if we do a setcontext we will fault,
1767: * as we are not (yet) mapped in any other context.
1768: */
1.43 pk 1769:
1770: if ((vs % NSEGRG) == 0) {
1771: /* Entering a new region */
1772: if (VA_VREG(p) > vr) {
1773: #ifdef DEBUG
1774: printf("note: giant kernel!\n");
1775: #endif
1776: vr++, rp++;
1777: }
1778: #ifdef MMU_3L
1779: if (mmu_3l) {
1780: for (i = 1; i < nctx; i++)
1781: rom_setmap(i, p, rcookie);
1782:
1783: TAILQ_INSERT_TAIL(®ion_locked,
1784: mmureg, me_list);
1785: TAILQ_INSERT_TAIL(&pmap_kernel()->pm_reglist,
1786: mmureg, me_pmchain);
1787: mmureg->me_cookie = rcookie;
1788: mmureg->me_pmap = pmap_kernel();
1789: mmureg->me_vreg = vr;
1790: rp->rg_smeg = rcookie;
1791: mmureg++;
1792: rcookie++;
1793: }
1794: #endif
1795: }
1796:
1797: #ifdef MMU_3L
1798: if (!mmu_3l)
1799: #endif
1800: for (i = 1; i < nctx; i++)
1801: rom_setmap(i, p, scookie);
1.1 deraadt 1802:
1803: /* set up the mmu entry */
1.43 pk 1804: TAILQ_INSERT_TAIL(&segm_locked, mmuseg, me_list);
1805: TAILQ_INSERT_TAIL(&pmap_kernel()->pm_seglist, mmuseg, me_pmchain);
1806: mmuseg->me_cookie = scookie;
1807: mmuseg->me_pmap = pmap_kernel();
1808: mmuseg->me_vreg = vr;
1809: mmuseg->me_vseg = vs % NSEGRG;
1810: rp->rg_segmap[vs % NSEGRG].sg_pmeg = scookie;
1811: npte = ++scookie < zseg ? NPTESG : lastpage;
1812: rp->rg_segmap[vs % NSEGRG].sg_npte = npte;
1813: rp->rg_nsegmap += 1;
1814: mmuseg++;
1.1 deraadt 1815: vs++;
1.43 pk 1816: if (scookie < zseg) {
1.1 deraadt 1817: p += NBPSG;
1818: continue;
1819: }
1.43 pk 1820:
1.1 deraadt 1821: /*
1822: * Unmap the pages, if any, that are not part of
1823: * the final segment.
1824: */
1.43 pk 1825: for (p += npte << PGSHIFT; npte < NPTESG; npte++, p += NBPG)
1.1 deraadt 1826: setpte(p, 0);
1.43 pk 1827:
1828: #ifdef MMU_3L
1829: if (mmu_3l) {
1830: /*
1831: * Unmap the segments, if any, that are not part of
1832: * the final region.
1833: */
1834: for (i = rp->rg_nsegmap; i < NSEGRG; i++, p += NBPSG)
1835: setsegmap(p, seginval);
1836: }
1837: #endif
1.1 deraadt 1838: break;
1839: }
1.43 pk 1840:
1841: #ifdef MMU_3L
1842: if (mmu_3l)
1843: for (; rcookie < nregion; rcookie++, mmureg++) {
1844: mmureg->me_cookie = rcookie;
1845: TAILQ_INSERT_TAIL(®ion_freelist, mmureg, me_list);
1846: }
1847: #endif
1848:
1849: for (; scookie < nsegment; scookie++, mmuseg++) {
1850: mmuseg->me_cookie = scookie;
1851: TAILQ_INSERT_TAIL(&segm_freelist, mmuseg, me_list);
1.1 deraadt 1852: }
1853:
1.13 pk 1854: /* Erase all spurious user-space segmaps */
1855: for (i = 1; i < ncontext; i++) {
1856: setcontext(i);
1.43 pk 1857: #ifdef MMU_3L
1858: if (mmu_3l)
1859: for (p = 0, j = NUREG; --j >= 0; p += NBPRG)
1860: setregmap(p, reginval);
1861: else
1862: #endif
1863: for (p = 0, vr = 0; vr < NUREG; vr++) {
1.45 pk 1864: if (VA_INHOLE(p)) {
1865: p = (caddr_t)MMU_HOLE_END;
1866: vr = VA_VREG(p);
1.43 pk 1867: }
1868: for (j = NSEGRG; --j >= 0; p += NBPSG)
1869: setsegmap(p, seginval);
1870: }
1.13 pk 1871: }
1872: setcontext(0);
1873:
1.1 deraadt 1874: /*
1875: * write protect & encache kernel text;
1876: * set red zone at kernel base; enable cache on message buffer.
1877: */
1878: {
1.23 deraadt 1879: extern char etext[];
1.1 deraadt 1880: #ifdef KGDB
1881: register int mask = ~PG_NC; /* XXX chgkprot is busted */
1882: #else
1883: register int mask = ~(PG_W | PG_NC);
1884: #endif
1.2 deraadt 1885:
1.23 deraadt 1886: for (p = (caddr_t)trapbase; p < etext; p += NBPG)
1.1 deraadt 1887: setpte(p, getpte(p) & mask);
1888: }
1889: }
1890:
1.36 pk 1891: void
1892: pmap_init()
1893: {
1894: register vm_size_t s;
1.38 pk 1895: int pass1, nmem;
1.36 pk 1896: register struct memarr *mp;
1.37 pk 1897: vm_offset_t sva, va, eva;
1898: vm_offset_t pa;
1.36 pk 1899:
1900: if (PAGE_SIZE != NBPG)
1901: panic("pmap_init: CLSIZE!=1");
1.37 pk 1902:
1.36 pk 1903: /*
1.38 pk 1904: * Map pv_table[] as a `sparse' array. This requires two passes
1905: * over the `pmemarr': (1) to determine the number of physical
1906: * pages needed, and (2), to map the correct pieces of virtual
1907: * memory allocated to pv_table[].
1.36 pk 1908: */
1.38 pk 1909:
1910: s = 0;
1911: pass1 = 1;
1912:
1913: pass2:
1.37 pk 1914: sva = eva = 0;
1.36 pk 1915: for (mp = pmemarr, nmem = npmemarr; --nmem >= 0; mp++) {
1.37 pk 1916: int len;
1917: vm_offset_t addr;
1.36 pk 1918:
1.37 pk 1919: len = mp->len;
1.38 pk 1920: if ((addr = mp->addr) < avail_start) {
1.36 pk 1921: /*
1.38 pk 1922: * pv_table[] covers everything above `avail_start'.
1.36 pk 1923: */
1.38 pk 1924: addr = avail_start;
1925: len -= avail_start;
1.36 pk 1926: }
1.37 pk 1927: len = sizeof(struct pvlist) * atop(len);
1928:
1.38 pk 1929: if (addr < avail_start || addr >= avail_end)
1.36 pk 1930: panic("pmap_init: unmanaged address: 0x%x", addr);
1931:
1.38 pk 1932: va = (vm_offset_t)&pv_table[atop(addr - avail_start)];
1.37 pk 1933: sva = trunc_page(va);
1934: if (sva < eva) {
1.38 pk 1935: #ifdef DEBUG
1936: printf("note: crowded chunk at 0x%x\n", mp->addr);
1937: #endif
1.37 pk 1938: sva += PAGE_SIZE;
1939: if (sva < eva)
1.38 pk 1940: panic("pmap_init: sva(%x) < eva(%x)", sva, eva);
1.37 pk 1941: }
1942: eva = round_page(va + len);
1.38 pk 1943: if (pass1) {
1944: /* Just counting */
1945: s += eva - sva;
1946: continue;
1947: }
1948:
1949: /* Map this piece of pv_table[] */
1.37 pk 1950: for (va = sva; va < eva; va += PAGE_SIZE) {
1.42 mycroft 1951: pmap_enter(pmap_kernel(), va, pa,
1.36 pk 1952: VM_PROT_READ|VM_PROT_WRITE, 1);
1953: pa += PAGE_SIZE;
1954: }
1.38 pk 1955: bzero((caddr_t)sva, eva - sva);
1956: }
1.36 pk 1957:
1.38 pk 1958: if (pass1) {
1.42 mycroft 1959: pa = pmap_extract(pmap_kernel(), kmem_alloc(kernel_map, s));
1.38 pk 1960: pass1 = 0;
1961: goto pass2;
1.36 pk 1962: }
1.38 pk 1963:
1964: vm_first_phys = avail_start;
1965: vm_num_phys = avail_end - avail_start;
1.36 pk 1966: }
1967:
1.1 deraadt 1968:
1969: /*
1970: * Map physical addresses into kernel VM.
1971: */
1972: vm_offset_t
1973: pmap_map(va, pa, endpa, prot)
1974: register vm_offset_t va, pa, endpa;
1975: register int prot;
1976: {
1977: register int pgsize = PAGE_SIZE;
1978:
1979: while (pa < endpa) {
1.42 mycroft 1980: pmap_enter(pmap_kernel(), va, pa, prot, 1);
1.1 deraadt 1981: va += pgsize;
1982: pa += pgsize;
1983: }
1984: return (va);
1985: }
1986:
1987: /*
1988: * Create and return a physical map.
1989: *
1990: * If size is nonzero, the map is useless. (ick)
1991: */
1992: struct pmap *
1993: pmap_create(size)
1994: vm_size_t size;
1995: {
1996: register struct pmap *pm;
1997:
1998: if (size)
1999: return (NULL);
2000: pm = (struct pmap *)malloc(sizeof *pm, M_VMPMAP, M_WAITOK);
2001: #ifdef DEBUG
2002: if (pmapdebug & PDB_CREATE)
2003: printf("pmap_create: created %x\n", pm);
2004: #endif
2005: bzero((caddr_t)pm, sizeof *pm);
2006: pmap_pinit(pm);
2007: return (pm);
2008: }
2009:
2010: /*
2011: * Initialize a preallocated and zeroed pmap structure,
2012: * such as one in a vmspace structure.
2013: */
2014: void
2015: pmap_pinit(pm)
2016: register struct pmap *pm;
2017: {
1.43 pk 2018: register int i, size;
2019: void *urp;
1.1 deraadt 2020:
2021: #ifdef DEBUG
2022: if (pmapdebug & PDB_CREATE)
2023: printf("pmap_pinit(%x)\n", pm);
2024: #endif
1.13 pk 2025:
1.43 pk 2026: size = NUREG * sizeof(struct regmap);
2027: pm->pm_regstore = urp = malloc(size, M_VMPMAP, M_WAITOK);
2028: bzero((caddr_t)urp, size);
1.1 deraadt 2029: /* pm->pm_ctx = NULL; */
2030: simple_lock_init(&pm->pm_lock);
2031: pm->pm_refcount = 1;
1.43 pk 2032: #ifdef MMU_3L
2033: TAILQ_INIT(&pm->pm_reglist);
2034: #endif
2035: TAILQ_INIT(&pm->pm_seglist);
2036: pm->pm_regmap = urp;
2037: #ifdef MMU_3L
2038: if (mmu_3l)
2039: for (i = NUREG; --i >= 0;)
2040: pm->pm_regmap[i].rg_smeg = reginval;
2041: #endif
2042: pm->pm_gap_end = VA_VREG(VM_MAXUSER_ADDRESS);
2043:
2044: return;
1.1 deraadt 2045: }
2046:
2047: /*
2048: * Retire the given pmap from service.
2049: * Should only be called if the map contains no valid mappings.
2050: */
2051: void
2052: pmap_destroy(pm)
2053: register struct pmap *pm;
2054: {
2055: int count;
2056:
2057: if (pm == NULL)
2058: return;
2059: #ifdef DEBUG
2060: if (pmapdebug & PDB_DESTROY)
2061: printf("pmap_destroy(%x)\n", pm);
2062: #endif
2063: simple_lock(&pm->pm_lock);
2064: count = --pm->pm_refcount;
2065: simple_unlock(&pm->pm_lock);
2066: if (count == 0) {
2067: pmap_release(pm);
2068: free((caddr_t)pm, M_VMPMAP);
2069: }
2070: }
2071:
2072: /*
2073: * Release any resources held by the given physical map.
2074: * Called when a pmap initialized by pmap_pinit is being released.
2075: */
2076: void
2077: pmap_release(pm)
2078: register struct pmap *pm;
2079: {
2080: register union ctxinfo *c;
2081: register int s = splpmap(); /* paranoia */
2082:
2083: #ifdef DEBUG
2084: if (pmapdebug & PDB_DESTROY)
2085: printf("pmap_release(%x)\n", pm);
2086: #endif
1.43 pk 2087: #ifdef MMU_3L
2088: if (pm->pm_reglist.tqh_first)
2089: panic("pmap_release: region list not empty");
2090: #endif
2091: if (pm->pm_seglist.tqh_first)
2092: panic("pmap_release: segment list not empty");
1.1 deraadt 2093: if ((c = pm->pm_ctx) != NULL) {
2094: if (pm->pm_ctxnum == 0)
2095: panic("pmap_release: releasing kernel");
2096: ctx_free(pm);
2097: }
2098: splx(s);
1.43 pk 2099: #ifdef DEBUG
2100: {
2101: int vs, vr;
2102: for (vr = 0; vr < NUREG; vr++) {
2103: struct regmap *rp = &pm->pm_regmap[vr];
2104: if (rp->rg_nsegmap != 0)
2105: printf("pmap_release: %d segments remain in "
2106: "region %d\n", rp->rg_nsegmap, vr);
2107: if (rp->rg_segmap != NULL) {
2108: printf("pmap_release: segments still "
2109: "allocated in region %d\n", vr);
2110: for (vs = 0; vs < NSEGRG; vs++) {
2111: struct segmap *sp = &rp->rg_segmap[vs];
2112: if (sp->sg_npte != 0)
2113: printf("pmap_release: %d ptes "
2114: "remain in segment %d\n",
2115: sp->sg_npte, vs);
2116: if (sp->sg_pte != NULL) {
2117: printf("pmap_release: ptes still "
2118: "allocated in segment %d\n", vs);
2119: }
2120: }
2121: }
2122: }
2123: }
2124: #endif
2125: if (pm->pm_regstore)
2126: free((caddr_t)pm->pm_regstore, M_VMPMAP);
1.1 deraadt 2127: }
2128:
2129: /*
2130: * Add a reference to the given pmap.
2131: */
2132: void
2133: pmap_reference(pm)
2134: struct pmap *pm;
2135: {
2136:
2137: if (pm != NULL) {
2138: simple_lock(&pm->pm_lock);
2139: pm->pm_refcount++;
2140: simple_unlock(&pm->pm_lock);
2141: }
2142: }
2143:
1.43 pk 2144: static void pmap_rmk __P((struct pmap *, vm_offset_t, vm_offset_t,
2145: int, int));
2146: static void pmap_rmu __P((struct pmap *, vm_offset_t, vm_offset_t,
2147: int, int));
1.1 deraadt 2148:
2149: /*
2150: * Remove the given range of mapping entries.
2151: * The starting and ending addresses are already rounded to pages.
2152: * Sheer lunacy: pmap_remove is often asked to remove nonexistent
2153: * mappings.
2154: */
2155: void
2156: pmap_remove(pm, va, endva)
2157: register struct pmap *pm;
2158: register vm_offset_t va, endva;
2159: {
2160: register vm_offset_t nva;
1.43 pk 2161: register int vr, vs, s, ctx;
2162: register void (*rm)(struct pmap *, vm_offset_t, vm_offset_t, int, int);
1.1 deraadt 2163:
2164: if (pm == NULL)
2165: return;
1.13 pk 2166:
1.1 deraadt 2167: #ifdef DEBUG
2168: if (pmapdebug & PDB_REMOVE)
2169: printf("pmap_remove(%x, %x, %x)\n", pm, va, endva);
2170: #endif
2171:
1.42 mycroft 2172: if (pm == pmap_kernel()) {
1.1 deraadt 2173: /*
2174: * Removing from kernel address space.
2175: */
2176: rm = pmap_rmk;
2177: } else {
2178: /*
2179: * Removing from user address space.
2180: */
2181: write_user_windows();
2182: rm = pmap_rmu;
2183: }
2184:
2185: ctx = getcontext();
2186: s = splpmap(); /* XXX conservative */
2187: simple_lock(&pm->pm_lock);
2188: for (; va < endva; va = nva) {
2189: /* do one virtual segment at a time */
1.43 pk 2190: vr = VA_VREG(va);
2191: vs = VA_VSEG(va);
2192: nva = VSTOVA(vr, vs + 1);
1.1 deraadt 2193: if (nva == 0 || nva > endva)
2194: nva = endva;
1.43 pk 2195: (*rm)(pm, va, nva, vr, vs);
1.1 deraadt 2196: }
2197: simple_unlock(&pm->pm_lock);
2198: splx(s);
2199: setcontext(ctx);
2200: }
2201:
2202: /*
2203: * The following magic number was chosen because:
2204: * 1. It is the same amount of work to cache_flush_page 4 pages
2205: * as to cache_flush_segment 1 segment (so at 4 the cost of
2206: * flush is the same).
2207: * 2. Flushing extra pages is bad (causes cache not to work).
2208: * 3. The current code, which malloc()s 5 pages for each process
2209: * for a user vmspace/pmap, almost never touches all 5 of those
2210: * pages.
2211: */
1.13 pk 2212: #if 0
2213: #define PMAP_RMK_MAGIC (cacheinfo.c_hwflush?5:64) /* if > magic, use cache_flush_segment */
2214: #else
1.1 deraadt 2215: #define PMAP_RMK_MAGIC 5 /* if > magic, use cache_flush_segment */
1.13 pk 2216: #endif
1.1 deraadt 2217:
2218: /*
2219: * Remove a range contained within a single segment.
2220: * These are egregiously complicated routines.
2221: */
2222:
1.43 pk 2223: /* remove from kernel */
2224: static void
2225: pmap_rmk(pm, va, endva, vr, vs)
1.1 deraadt 2226: register struct pmap *pm;
2227: register vm_offset_t va, endva;
1.43 pk 2228: register int vr, vs;
1.1 deraadt 2229: {
2230: register int i, tpte, perpage, npg;
2231: register struct pvlist *pv;
1.43 pk 2232: register int nleft, pmeg;
2233: struct regmap *rp;
2234: struct segmap *sp;
2235:
2236: rp = &pm->pm_regmap[vr];
2237: sp = &rp->rg_segmap[vs];
2238:
2239: if (rp->rg_nsegmap == 0)
2240: return;
2241:
2242: #ifdef DEBUG
2243: if (rp->rg_segmap == NULL)
2244: panic("pmap_rmk: no segments");
2245: #endif
2246:
2247: if ((nleft = sp->sg_npte) == 0)
2248: return;
2249:
2250: pmeg = sp->sg_pmeg;
1.1 deraadt 2251:
2252: #ifdef DEBUG
2253: if (pmeg == seginval)
2254: panic("pmap_rmk: not loaded");
2255: if (pm->pm_ctx == NULL)
2256: panic("pmap_rmk: lost context");
2257: #endif
2258:
2259: setcontext(0);
2260: /* decide how to flush cache */
2261: npg = (endva - va) >> PGSHIFT;
2262: if (npg > PMAP_RMK_MAGIC) {
2263: /* flush the whole segment */
2264: perpage = 0;
2265: if (vactype != VAC_NONE)
1.43 pk 2266: cache_flush_segment(vr, vs);
1.1 deraadt 2267: } else {
2268: /* flush each page individually; some never need flushing */
1.31 pk 2269: perpage = (vactype != VAC_NONE);
1.1 deraadt 2270: }
2271: while (va < endva) {
2272: tpte = getpte(va);
2273: if ((tpte & PG_V) == 0) {
2274: va += PAGE_SIZE;
2275: continue;
2276: }
1.35 pk 2277: if ((tpte & PG_TYPE) == PG_OBMEM) {
2278: /* if cacheable, flush page as needed */
2279: if (perpage && (tpte & PG_NC) == 0)
1.1 deraadt 2280: cache_flush_page(va);
2281: i = ptoa(HWTOSW(tpte & PG_PFNUM));
2282: if (managed(i)) {
2283: pv = pvhead(i);
2284: pv->pv_flags |= MR(tpte);
2285: pv_unlink(pv, pm, va);
2286: }
2287: }
2288: nleft--;
2289: setpte(va, 0);
2290: va += NBPG;
2291: }
2292:
2293: /*
2294: * If the segment is all gone, remove it from everyone and
2295: * free the MMU entry.
2296: */
1.43 pk 2297: if ((sp->sg_npte = nleft) == 0) {
2298: va = VSTOVA(vr,vs); /* retract */
2299: #ifdef MMU_3L
2300: if (mmu_3l)
1.1 deraadt 2301: setsegmap(va, seginval);
1.43 pk 2302: else
2303: #endif
2304: for (i = ncontext; --i >= 0;) {
2305: setcontext(i);
2306: setsegmap(va, seginval);
2307: }
2308: me_free(pm, pmeg);
2309: if (--rp->rg_nsegmap == 0) {
2310: #ifdef MMU_3L
2311: if (mmu_3l) {
2312: for (i = ncontext; --i >= 0;) {
2313: setcontext(i);
2314: setregmap(va, reginval);
2315: }
2316: /* note: context is 0 */
2317: region_free(pm, rp->rg_smeg);
2318: }
2319: #endif
1.1 deraadt 2320: }
2321: }
2322: }
2323:
2324: /*
2325: * Just like pmap_rmk_magic, but we have a different threshold.
2326: * Note that this may well deserve further tuning work.
2327: */
1.13 pk 2328: #if 0
2329: #define PMAP_RMU_MAGIC (cacheinfo.c_hwflush?4:64) /* if > magic, use cache_flush_segment */
2330: #else
1.1 deraadt 2331: #define PMAP_RMU_MAGIC 4 /* if > magic, use cache_flush_segment */
1.13 pk 2332: #endif
1.1 deraadt 2333:
2334: /* remove from user */
1.43 pk 2335: static void
2336: pmap_rmu(pm, va, endva, vr, vs)
1.1 deraadt 2337: register struct pmap *pm;
2338: register vm_offset_t va, endva;
1.43 pk 2339: register int vr, vs;
1.1 deraadt 2340: {
2341: register int *pte0, i, pteva, tpte, perpage, npg;
2342: register struct pvlist *pv;
1.43 pk 2343: register int nleft, pmeg;
2344: struct regmap *rp;
2345: struct segmap *sp;
2346:
2347: rp = &pm->pm_regmap[vr];
2348: if (rp->rg_nsegmap == 0)
2349: return;
2350: if (rp->rg_segmap == NULL)
2351: panic("pmap_rmu: no segments");
2352:
2353: sp = &rp->rg_segmap[vs];
2354: if ((nleft = sp->sg_npte) == 0)
2355: return;
2356: if (sp->sg_pte == NULL)
2357: panic("pmap_rmu: no pages");
2358:
2359:
2360: pmeg = sp->sg_pmeg;
2361: pte0 = sp->sg_pte;
1.1 deraadt 2362:
2363: if (pmeg == seginval) {
2364: register int *pte = pte0 + VA_VPG(va);
2365:
2366: /*
2367: * PTEs are not in MMU. Just invalidate software copies.
2368: */
2369: for (; va < endva; pte++, va += PAGE_SIZE) {
2370: tpte = *pte;
2371: if ((tpte & PG_V) == 0) {
2372: /* nothing to remove (braindead VM layer) */
2373: continue;
2374: }
2375: if ((tpte & PG_TYPE) == PG_OBMEM) {
2376: i = ptoa(HWTOSW(tpte & PG_PFNUM));
1.21 deraadt 2377: if (managed(i))
1.1 deraadt 2378: pv_unlink(pvhead(i), pm, va);
2379: }
2380: nleft--;
2381: *pte = 0;
2382: }
1.43 pk 2383: if ((sp->sg_npte = nleft) == 0) {
1.1 deraadt 2384: free((caddr_t)pte0, M_VMPMAP);
1.43 pk 2385: sp->sg_pte = NULL;
2386: if (--rp->rg_nsegmap == 0) {
2387: free((caddr_t)rp->rg_segmap, M_VMPMAP);
2388: rp->rg_segmap = NULL;
2389: #ifdef MMU_3L
2390: if (mmu_3l && rp->rg_smeg != reginval) {
2391: if (pm->pm_ctx) {
2392: setcontext(pm->pm_ctxnum);
2393: setregmap(va, reginval);
2394: } else
2395: setcontext(0);
2396: region_free(pm, rp->rg_smeg);
2397: }
2398: #endif
2399: }
1.1 deraadt 2400: }
1.43 pk 2401: return;
1.1 deraadt 2402: }
2403:
2404: /*
2405: * PTEs are in MMU. Invalidate in hardware, update ref &
2406: * mod bits, and flush cache if required.
2407: */
1.43 pk 2408: if (CTX_USABLE(pm,rp)) {
1.1 deraadt 2409: /* process has a context, must flush cache */
2410: npg = (endva - va) >> PGSHIFT;
2411: setcontext(pm->pm_ctxnum);
2412: if (npg > PMAP_RMU_MAGIC) {
2413: perpage = 0; /* flush the whole segment */
2414: if (vactype != VAC_NONE)
1.43 pk 2415: cache_flush_segment(vr, vs);
1.1 deraadt 2416: } else
1.31 pk 2417: perpage = (vactype != VAC_NONE);
1.1 deraadt 2418: pteva = va;
2419: } else {
2420: /* no context, use context 0; cache flush unnecessary */
2421: setcontext(0);
1.43 pk 2422: #ifdef MMU_3L
2423: if (mmu_3l)
2424: setregmap(0, tregion);
2425: #endif
1.1 deraadt 2426: /* XXX use per-cpu pteva? */
2427: setsegmap(0, pmeg);
1.18 deraadt 2428: pteva = VA_VPG(va) << PGSHIFT;
1.1 deraadt 2429: perpage = 0;
2430: }
2431: for (; va < endva; pteva += PAGE_SIZE, va += PAGE_SIZE) {
2432: tpte = getpte(pteva);
2433: if ((tpte & PG_V) == 0)
2434: continue;
1.35 pk 2435: if ((tpte & PG_TYPE) == PG_OBMEM) {
2436: /* if cacheable, flush page as needed */
2437: if (perpage && (tpte & PG_NC) == 0)
1.1 deraadt 2438: cache_flush_page(va);
2439: i = ptoa(HWTOSW(tpte & PG_PFNUM));
2440: if (managed(i)) {
2441: pv = pvhead(i);
2442: pv->pv_flags |= MR(tpte);
2443: pv_unlink(pv, pm, va);
2444: }
2445: }
2446: nleft--;
2447: setpte(pteva, 0);
1.43 pk 2448: #define PMAP_PTESYNC
2449: #ifdef PMAP_PTESYNC
2450: pte0[VA_VPG(pteva)] = 0;
2451: #endif
1.1 deraadt 2452: }
2453:
2454: /*
2455: * If the segment is all gone, and the context is loaded, give
2456: * the segment back.
2457: */
1.43 pk 2458: if ((sp->sg_npte = nleft) == 0 /* ??? && pm->pm_ctx != NULL*/) {
2459: #ifdef DEBUG
2460: if (pm->pm_ctx == NULL) {
2461: printf("pmap_rmu: no context here...");
2462: }
2463: #endif
2464: va = VSTOVA(vr,vs); /* retract */
2465: if (CTX_USABLE(pm,rp))
2466: setsegmap(va, seginval);
2467: #ifdef MMU_3L
2468: else if (mmu_3l && rp->rg_smeg != reginval) {
2469: /* note: context already set earlier */
2470: setregmap(0, rp->rg_smeg);
2471: setsegmap(vs << SGSHIFT, seginval);
2472: }
2473: #endif
1.1 deraadt 2474: free((caddr_t)pte0, M_VMPMAP);
1.43 pk 2475: sp->sg_pte = NULL;
1.1 deraadt 2476: me_free(pm, pmeg);
1.13 pk 2477:
1.43 pk 2478: if (--rp->rg_nsegmap == 0) {
2479: free((caddr_t)rp->rg_segmap, M_VMPMAP);
2480: rp->rg_segmap = NULL;
2481: GAP_WIDEN(pm,vr);
2482:
2483: #ifdef MMU_3L
2484: if (mmu_3l && rp->rg_smeg != reginval) {
2485: /* note: context already set */
2486: if (pm->pm_ctx)
2487: setregmap(va, reginval);
2488: region_free(pm, rp->rg_smeg);
2489: }
2490: #endif
2491: }
1.13 pk 2492:
1.1 deraadt 2493: }
2494: }
2495:
2496: /*
2497: * Lower (make more strict) the protection on the specified
2498: * physical page.
2499: *
2500: * There are only two cases: either the protection is going to 0
2501: * (in which case we do the dirty work here), or it is going from
2502: * to read-only (in which case pv_changepte does the trick).
2503: */
2504: void
2505: pmap_page_protect(pa, prot)
2506: vm_offset_t pa;
2507: vm_prot_t prot;
2508: {
2509: register struct pvlist *pv, *pv0, *npv;
2510: register struct pmap *pm;
1.43 pk 2511: register int va, vr, vs, pteva, tpte;
2512: register int flags, nleft, i, s, ctx, doflush;
2513: struct regmap *rp;
2514: struct segmap *sp;
1.1 deraadt 2515:
2516: #ifdef DEBUG
1.43 pk 2517: if (!pmap_pa_exists(pa))
2518: panic("pmap_page_protect: no such address: %x", pa);
1.1 deraadt 2519: if ((pmapdebug & PDB_CHANGEPROT) ||
2520: (pmapdebug & PDB_REMOVE && prot == VM_PROT_NONE))
2521: printf("pmap_page_protect(%x, %x)\n", pa, prot);
2522: #endif
2523: /*
2524: * Skip unmanaged pages, or operations that do not take
2525: * away write permission.
2526: */
1.34 pk 2527: if ((pa & (PMAP_TNC & ~PMAP_NC)) ||
2528: !managed(pa) || prot & VM_PROT_WRITE)
1.1 deraadt 2529: return;
2530: write_user_windows(); /* paranoia */
2531: if (prot & VM_PROT_READ) {
2532: pv_changepte(pvhead(pa), 0, PG_W);
2533: return;
2534: }
2535:
2536: /*
2537: * Remove all access to all people talking to this page.
2538: * Walk down PV list, removing all mappings.
2539: * The logic is much like that for pmap_remove,
2540: * but we know we are removing exactly one page.
2541: */
2542: pv = pvhead(pa);
2543: s = splpmap();
2544: if ((pm = pv->pv_pmap) == NULL) {
2545: splx(s);
2546: return;
2547: }
2548: ctx = getcontext();
2549: pv0 = pv;
2550: flags = pv->pv_flags & ~PV_NC;
2551: for (;; pm = pv->pv_pmap) {
2552: va = pv->pv_va;
1.43 pk 2553: vr = VA_VREG(va);
2554: vs = VA_VSEG(va);
2555: rp = &pm->pm_regmap[vr];
2556: if (rp->rg_nsegmap == 0)
2557: panic("pmap_remove_all: empty vreg");
2558: sp = &rp->rg_segmap[vs];
2559: if ((nleft = sp->sg_npte) == 0)
1.1 deraadt 2560: panic("pmap_remove_all: empty vseg");
2561: nleft--;
1.43 pk 2562: sp->sg_npte = nleft;
2563:
2564: if (sp->sg_pmeg == seginval) {
2565: /* Definitely not a kernel map */
1.1 deraadt 2566: if (nleft) {
1.43 pk 2567: sp->sg_pte[VA_VPG(va)] = 0;
1.1 deraadt 2568: } else {
1.43 pk 2569: free((caddr_t)sp->sg_pte, M_VMPMAP);
2570: sp->sg_pte = NULL;
2571: if (--rp->rg_nsegmap == 0) {
2572: free((caddr_t)rp->rg_segmap, M_VMPMAP);
2573: rp->rg_segmap = NULL;
2574: GAP_WIDEN(pm,vr);
2575: #ifdef MMU_3L
2576: if (mmu_3l && rp->rg_smeg != reginval) {
2577: if (pm->pm_ctx) {
2578: setcontext(pm->pm_ctxnum);
2579: setregmap(va, reginval);
2580: } else
2581: setcontext(0);
2582: region_free(pm, rp->rg_smeg);
2583: }
2584: #endif
2585: }
1.1 deraadt 2586: }
2587: goto nextpv;
2588: }
1.43 pk 2589: if (CTX_USABLE(pm,rp)) {
1.1 deraadt 2590: setcontext(pm->pm_ctxnum);
2591: pteva = va;
1.43 pk 2592: if (vactype != VAC_NONE)
2593: cache_flush_page(va);
1.1 deraadt 2594: } else {
2595: setcontext(0);
2596: /* XXX use per-cpu pteva? */
1.43 pk 2597: #ifdef MMU_3L
2598: if (mmu_3l)
2599: setregmap(0, tregion);
2600: #endif
2601: setsegmap(0, sp->sg_pmeg);
1.18 deraadt 2602: pteva = VA_VPG(va) << PGSHIFT;
1.1 deraadt 2603: }
1.43 pk 2604:
2605: tpte = getpte(pteva);
2606: if ((tpte & PG_V) == 0)
2607: panic("pmap_page_protect !PG_V");
2608: flags |= MR(tpte);
2609:
1.1 deraadt 2610: if (nleft) {
2611: setpte(pteva, 0);
1.43 pk 2612: #ifdef PMAP_PTESYNC
1.44 pk 2613: if (sp->sg_pte != NULL)
2614: sp->sg_pte[VA_VPG(pteva)] = 0;
1.43 pk 2615: #endif
1.1 deraadt 2616: } else {
1.43 pk 2617: if (pm == pmap_kernel()) {
2618: #ifdef MMU_3L
2619: if (!mmu_3l)
2620: #endif
2621: for (i = ncontext; --i >= 0;) {
1.1 deraadt 2622: setcontext(i);
2623: setsegmap(va, seginval);
2624: }
1.43 pk 2625: me_free(pm, sp->sg_pmeg);
2626: if (--rp->rg_nsegmap == 0) {
2627: #ifdef MMU_3L
2628: if (mmu_3l) {
2629: for (i = ncontext; --i >= 0;) {
2630: setcontext(i);
2631: setregmap(va, reginval);
2632: }
2633: region_free(pm, rp->rg_smeg);
2634: }
2635: #endif
2636: }
2637: } else {
2638: if (CTX_USABLE(pm,rp))
2639: /* `pteva'; we might be using tregion */
2640: setsegmap(pteva, seginval);
2641: #ifdef MMU_3L
2642: else if (mmu_3l && rp->rg_smeg != reginval) {
2643: /* note: context already set earlier */
2644: setregmap(0, rp->rg_smeg);
2645: setsegmap(vs << SGSHIFT, seginval);
2646: }
2647: #endif
2648: free((caddr_t)sp->sg_pte, M_VMPMAP);
2649: sp->sg_pte = NULL;
2650: me_free(pm, sp->sg_pmeg);
2651:
2652: if (--rp->rg_nsegmap == 0) {
2653: #ifdef MMU_3L
2654: if (mmu_3l && rp->rg_smeg != reginval) {
2655: if (pm->pm_ctx)
2656: setregmap(va, reginval);
2657: region_free(pm, rp->rg_smeg);
2658: }
2659: #endif
2660: free((caddr_t)rp->rg_segmap, M_VMPMAP);
2661: rp->rg_segmap = NULL;
2662: GAP_WIDEN(pm,vr);
1.1 deraadt 2663: }
2664: }
2665: }
2666: nextpv:
2667: npv = pv->pv_next;
2668: if (pv != pv0)
2669: free((caddr_t)pv, M_VMPVENT);
2670: if ((pv = npv) == NULL)
2671: break;
2672: }
2673: pv0->pv_pmap = NULL;
1.11 pk 2674: pv0->pv_next = NULL; /* ? */
1.1 deraadt 2675: pv0->pv_flags = flags;
2676: setcontext(ctx);
2677: splx(s);
2678: }
2679:
2680: /*
2681: * Lower (make more strict) the protection on the specified
2682: * range of this pmap.
2683: *
2684: * There are only two cases: either the protection is going to 0
2685: * (in which case we call pmap_remove to do the dirty work), or
2686: * it is going from read/write to read-only. The latter is
2687: * fairly easy.
2688: */
2689: void
2690: pmap_protect(pm, sva, eva, prot)
2691: register struct pmap *pm;
2692: vm_offset_t sva, eva;
2693: vm_prot_t prot;
2694: {
1.43 pk 2695: register int va, nva, vr, vs, pteva;
1.1 deraadt 2696: register int s, ctx;
1.43 pk 2697: struct regmap *rp;
2698: struct segmap *sp;
1.1 deraadt 2699:
2700: if (pm == NULL || prot & VM_PROT_WRITE)
2701: return;
1.43 pk 2702:
1.1 deraadt 2703: if ((prot & VM_PROT_READ) == 0) {
2704: pmap_remove(pm, sva, eva);
2705: return;
2706: }
2707:
2708: write_user_windows();
2709: ctx = getcontext();
2710: s = splpmap();
2711: simple_lock(&pm->pm_lock);
2712:
2713: for (va = sva; va < eva;) {
1.43 pk 2714: vr = VA_VREG(va);
2715: vs = VA_VSEG(va);
2716: rp = &pm->pm_regmap[vr];
2717: nva = VSTOVA(vr,vs + 1);
1.1 deraadt 2718: if (nva == 0) panic("pmap_protect: last segment"); /* cannot happen */
2719: if (nva > eva)
2720: nva = eva;
1.43 pk 2721: if (rp->rg_nsegmap == 0) {
1.1 deraadt 2722: va = nva;
2723: continue;
2724: }
1.43 pk 2725: #ifdef DEBUG
2726: if (rp->rg_segmap == NULL)
2727: panic("pmap_protect: no segments");
2728: #endif
2729: sp = &rp->rg_segmap[vs];
2730: if (sp->sg_npte == 0) {
2731: va = nva;
2732: continue;
2733: }
2734: #ifdef DEBUG
2735: if (pm != pmap_kernel() && sp->sg_pte == NULL)
2736: panic("pmap_protect: no pages");
2737: #endif
2738: if (sp->sg_pmeg == seginval) {
2739: register int *pte = &sp->sg_pte[VA_VPG(va)];
1.1 deraadt 2740:
2741: /* not in MMU; just clear PG_W from core copies */
2742: for (; va < nva; va += NBPG)
2743: *pte++ &= ~PG_W;
2744: } else {
2745: /* in MMU: take away write bits from MMU PTEs */
1.43 pk 2746: if (CTX_USABLE(pm,rp)) {
1.1 deraadt 2747: register int tpte;
2748:
2749: /*
2750: * Flush cache so that any existing cache
2751: * tags are updated. This is really only
2752: * needed for PTEs that lose PG_W.
2753: */
2754: setcontext(pm->pm_ctxnum);
2755: for (; va < nva; va += NBPG) {
2756: tpte = getpte(va);
2757: pmap_stats.ps_npg_prot_all++;
1.35 pk 2758: if ((tpte & (PG_W|PG_TYPE)) ==
2759: (PG_W|PG_OBMEM)) {
1.1 deraadt 2760: pmap_stats.ps_npg_prot_actual++;
1.34 pk 2761: if (vactype != VAC_NONE)
2762: cache_flush_page(va);
1.1 deraadt 2763: setpte(va, tpte & ~PG_W);
2764: }
2765: }
2766: } else {
2767: register int pteva;
2768:
2769: /*
2770: * No context, hence not cached;
2771: * just update PTEs.
2772: */
2773: setcontext(0);
2774: /* XXX use per-cpu pteva? */
1.43 pk 2775: #ifdef MMU_3L
2776: if (mmu_3l)
2777: setregmap(0, tregion);
2778: #endif
2779: setsegmap(0, sp->sg_pmeg);
1.18 deraadt 2780: pteva = VA_VPG(va) << PGSHIFT;
1.1 deraadt 2781: for (; va < nva; pteva += NBPG, va += NBPG)
2782: setpte(pteva, getpte(pteva) & ~PG_W);
2783: }
2784: }
2785: }
2786: simple_unlock(&pm->pm_lock);
1.12 pk 2787: splx(s);
1.11 pk 2788: setcontext(ctx);
1.1 deraadt 2789: }
2790:
2791: /*
2792: * Change the protection and/or wired status of the given (MI) virtual page.
2793: * XXX: should have separate function (or flag) telling whether only wiring
2794: * is changing.
2795: */
2796: void
2797: pmap_changeprot(pm, va, prot, wired)
2798: register struct pmap *pm;
2799: register vm_offset_t va;
2800: vm_prot_t prot;
2801: int wired;
2802: {
1.43 pk 2803: register int vr, vs, tpte, newprot, ctx, i, s;
2804: struct regmap *rp;
2805: struct segmap *sp;
1.1 deraadt 2806:
2807: #ifdef DEBUG
2808: if (pmapdebug & PDB_CHANGEPROT)
2809: printf("pmap_changeprot(%x, %x, %x, %x)\n",
2810: pm, va, prot, wired);
2811: #endif
2812:
2813: write_user_windows(); /* paranoia */
2814:
1.42 mycroft 2815: if (pm == pmap_kernel())
1.1 deraadt 2816: newprot = prot & VM_PROT_WRITE ? PG_S|PG_W : PG_S;
2817: else
2818: newprot = prot & VM_PROT_WRITE ? PG_W : 0;
1.43 pk 2819: vr = VA_VREG(va);
2820: vs = VA_VSEG(va);
1.1 deraadt 2821: s = splpmap(); /* conservative */
1.43 pk 2822: rp = &pm->pm_regmap[vr];
2823: if (rp->rg_nsegmap == 0) {
2824: printf("pmap_changeprot: no segments in %d\n", vr);
2825: return;
2826: }
2827: if (rp->rg_segmap == NULL) {
2828: printf("pmap_changeprot: no segments in %d!\n", vr);
2829: return;
2830: }
2831: sp = &rp->rg_segmap[vs];
2832:
1.1 deraadt 2833: pmap_stats.ps_changeprots++;
2834:
1.43 pk 2835: #ifdef DEBUG
2836: if (pm != pmap_kernel() && sp->sg_pte == NULL)
2837: panic("pmap_changeprot: no pages");
2838: #endif
2839:
1.1 deraadt 2840: /* update PTEs in software or hardware */
1.43 pk 2841: if (sp->sg_pmeg == seginval) {
2842: register int *pte = &sp->sg_pte[VA_VPG(va)];
1.1 deraadt 2843:
2844: /* update in software */
2845: if ((*pte & PG_PROT) == newprot)
2846: goto useless;
2847: *pte = (*pte & ~PG_PROT) | newprot;
2848: } else {
2849: /* update in hardware */
2850: ctx = getcontext();
1.43 pk 2851: if (CTX_USABLE(pm,rp)) {
1.1 deraadt 2852: /* use current context; flush writeback cache */
2853: setcontext(pm->pm_ctxnum);
2854: tpte = getpte(va);
1.11 pk 2855: if ((tpte & PG_PROT) == newprot) {
2856: setcontext(ctx);
1.1 deraadt 2857: goto useless;
1.11 pk 2858: }
1.1 deraadt 2859: if (vactype == VAC_WRITEBACK &&
1.35 pk 2860: (tpte & (PG_U|PG_NC|PG_TYPE)) == (PG_U|PG_OBMEM))
1.1 deraadt 2861: cache_flush_page((int)va);
2862: } else {
2863: setcontext(0);
2864: /* XXX use per-cpu va? */
1.43 pk 2865: #ifdef MMU_3L
2866: if (mmu_3l)
2867: setregmap(0, tregion);
2868: #endif
2869: setsegmap(0, sp->sg_pmeg);
1.18 deraadt 2870: va = VA_VPG(va) << PGSHIFT;
1.1 deraadt 2871: tpte = getpte(va);
1.11 pk 2872: if ((tpte & PG_PROT) == newprot) {
2873: setcontext(ctx);
1.1 deraadt 2874: goto useless;
1.11 pk 2875: }
1.1 deraadt 2876: }
2877: tpte = (tpte & ~PG_PROT) | newprot;
2878: setpte(va, tpte);
2879: setcontext(ctx);
2880: }
2881: splx(s);
2882: return;
2883:
2884: useless:
2885: /* only wiring changed, and we ignore wiring */
2886: pmap_stats.ps_useless_changeprots++;
2887: splx(s);
2888: }
2889:
2890: /*
2891: * Insert (MI) physical page pa at virtual address va in the given pmap.
2892: * NB: the pa parameter includes type bits PMAP_OBIO, PMAP_NC as necessary.
2893: *
2894: * If pa is not in the `managed' range it will not be `bank mapped'.
2895: * This works during bootstrap only because the first 4MB happens to
2896: * map one-to-one.
2897: *
2898: * There may already be something else there, or we might just be
2899: * changing protections and/or wiring on an existing mapping.
2900: * XXX should have different entry points for changing!
2901: */
2902: void
2903: pmap_enter(pm, va, pa, prot, wired)
2904: register struct pmap *pm;
2905: vm_offset_t va, pa;
2906: vm_prot_t prot;
2907: int wired;
2908: {
2909: register struct pvlist *pv;
2910: register int pteproto, ctx;
2911:
2912: if (pm == NULL)
2913: return;
1.45 pk 2914:
2915: if (VA_INHOLE(va)) {
2916: #ifdef DEBUG
2917: printf("pmap_enter: pm %x, va %x, pa %x: in MMU hole\n",
2918: pm, va, pa);
2919: #endif
2920: return;
2921: }
1.39 pk 2922:
1.1 deraadt 2923: #ifdef DEBUG
2924: if (pmapdebug & PDB_ENTER)
2925: printf("pmap_enter(%x, %x, %x, %x, %x)\n",
2926: pm, va, pa, prot, wired);
2927: #endif
2928:
2929: pteproto = PG_V | ((pa & PMAP_TNC) << PG_TNC_SHIFT);
2930: pa &= ~PMAP_TNC;
2931: /*
2932: * Set up prototype for new PTE. Cannot set PG_NC from PV_NC yet
2933: * since the pvlist no-cache bit might change as a result of the
2934: * new mapping.
2935: */
1.31 pk 2936: if ((pteproto & PG_TYPE) == PG_OBMEM && managed(pa)) {
1.39 pk 2937: #ifdef DIAGNOSTIC
2938: if (!pmap_pa_exists(pa))
2939: panic("pmap_enter: no such address: %x", pa);
2940: #endif
1.1 deraadt 2941: pteproto |= SWTOHW(atop(pa));
2942: pv = pvhead(pa);
2943: } else {
2944: pteproto |= atop(pa) & PG_PFNUM;
2945: pv = NULL;
2946: }
2947: if (prot & VM_PROT_WRITE)
2948: pteproto |= PG_W;
2949:
2950: ctx = getcontext();
1.42 mycroft 2951: if (pm == pmap_kernel())
1.1 deraadt 2952: pmap_enk(pm, va, prot, wired, pv, pteproto | PG_S);
2953: else
2954: pmap_enu(pm, va, prot, wired, pv, pteproto);
2955: setcontext(ctx);
2956: }
2957:
2958: /* enter new (or change existing) kernel mapping */
2959: pmap_enk(pm, va, prot, wired, pv, pteproto)
2960: register struct pmap *pm;
2961: vm_offset_t va;
2962: vm_prot_t prot;
2963: int wired;
2964: register struct pvlist *pv;
2965: register int pteproto;
2966: {
1.43 pk 2967: register int vr, vs, tpte, i, s;
2968: struct regmap *rp;
2969: struct segmap *sp;
2970:
2971: vr = VA_VREG(va);
2972: vs = VA_VSEG(va);
2973: rp = &pm->pm_regmap[vr];
2974: sp = &rp->rg_segmap[vs];
2975: s = splpmap(); /* XXX way too conservative */
2976:
2977: #ifdef MMU_3L
2978: if (mmu_3l && rp->rg_smeg == reginval) {
2979: vm_offset_t tva;
2980: rp->rg_smeg = region_alloc(®ion_locked, pm, vr)->me_cookie;
2981: i = ncontext - 1;
2982: do {
2983: setcontext(i);
2984: setregmap(va, rp->rg_smeg);
2985: } while (--i >= 0);
1.1 deraadt 2986:
1.43 pk 2987: /* set all PTEs to invalid, then overwrite one PTE below */
2988: tva = VA_ROUNDDOWNTOREG(va);
2989: for (i = 0; i < NSEGRG; i++) {
2990: setsegmap(tva, rp->rg_segmap[i].sg_pmeg);
2991: tva += NBPSG;
2992: };
2993: }
2994: #endif
2995: if (sp->sg_pmeg != seginval && (tpte = getpte(va)) & PG_V) {
1.34 pk 2996: register int addr;
1.1 deraadt 2997:
1.34 pk 2998: /* old mapping exists, and is of the same pa type */
2999: if ((tpte & (PG_PFNUM|PG_TYPE)) ==
3000: (pteproto & (PG_PFNUM|PG_TYPE))) {
1.1 deraadt 3001: /* just changing protection and/or wiring */
3002: splx(s);
3003: pmap_changeprot(pm, va, prot, wired);
3004: return;
3005: }
3006:
1.34 pk 3007: if ((tpte & PG_TYPE) == PG_OBMEM) {
1.43 pk 3008: #ifdef DEBUG
3009: printf("pmap_enk: changing existing va=>pa entry: va %x, pteproto %x\n",
3010: va, pteproto);
3011: #endif
1.34 pk 3012: /*
3013: * Switcheroo: changing pa for this va.
3014: * If old pa was managed, remove from pvlist.
3015: * If old page was cached, flush cache.
3016: */
3017: addr = ptoa(HWTOSW(tpte & PG_PFNUM));
1.31 pk 3018: if (managed(addr))
3019: pv_unlink(pvhead(addr), pm, va);
1.34 pk 3020: if ((tpte & PG_NC) == 0) {
3021: setcontext(0); /* ??? */
3022: if (vactype != VAC_NONE)
3023: cache_flush_page((int)va);
3024: }
1.1 deraadt 3025: }
3026: } else {
3027: /* adding new entry */
1.43 pk 3028: sp->sg_npte++;
1.1 deraadt 3029: }
3030:
3031: /*
3032: * If the new mapping is for a managed PA, enter into pvlist.
3033: * Note that the mapping for a malloc page will always be
3034: * unique (hence will never cause a second call to malloc).
3035: */
3036: if (pv != NULL)
3037: pteproto |= pv_link(pv, pm, va);
3038:
1.43 pk 3039: if (sp->sg_pmeg == seginval) {
1.1 deraadt 3040: register int tva;
3041:
3042: /*
3043: * Allocate an MMU entry now (on locked list),
3044: * and map it into every context. Set all its
3045: * PTEs invalid (we will then overwrite one, but
3046: * this is more efficient than looping twice).
3047: */
3048: #ifdef DEBUG
3049: if (pm->pm_ctx == NULL || pm->pm_ctxnum != 0)
3050: panic("pmap_enk: kern seg but no kern ctx");
3051: #endif
1.43 pk 3052: sp->sg_pmeg = me_alloc(&segm_locked, pm, vr, vs)->me_cookie;
3053: rp->rg_nsegmap++;
3054:
3055: #ifdef MMU_3L
3056: if (mmu_3l)
3057: setsegmap(va, sp->sg_pmeg);
3058: else
3059: #endif
3060: {
3061: i = ncontext - 1;
3062: do {
3063: setcontext(i);
3064: setsegmap(va, sp->sg_pmeg);
3065: } while (--i >= 0);
3066: }
1.1 deraadt 3067:
3068: /* set all PTEs to invalid, then overwrite one PTE below */
3069: tva = VA_ROUNDDOWNTOSEG(va);
3070: i = NPTESG;
3071: do {
3072: setpte(tva, 0);
3073: tva += NBPG;
3074: } while (--i > 0);
3075: }
3076:
3077: /* ptes kept in hardware only */
3078: setpte(va, pteproto);
3079: splx(s);
3080: }
3081:
3082: /* enter new (or change existing) user mapping */
3083: pmap_enu(pm, va, prot, wired, pv, pteproto)
3084: register struct pmap *pm;
3085: vm_offset_t va;
3086: vm_prot_t prot;
3087: int wired;
3088: register struct pvlist *pv;
3089: register int pteproto;
3090: {
1.43 pk 3091: register int vr, vs, *pte, tpte, pmeg, s, doflush;
3092: struct regmap *rp;
3093: struct segmap *sp;
1.1 deraadt 3094:
3095: write_user_windows(); /* XXX conservative */
1.43 pk 3096: vr = VA_VREG(va);
3097: vs = VA_VSEG(va);
3098: rp = &pm->pm_regmap[vr];
1.1 deraadt 3099: s = splpmap(); /* XXX conservative */
3100:
3101: /*
3102: * If there is no space in which the PTEs can be written
3103: * while they are not in the hardware, this must be a new
3104: * virtual segment. Get PTE space and count the segment.
3105: *
3106: * TO SPEED UP CTX ALLOC, PUT SEGMENT BOUNDS STUFF HERE
3107: * AND IN pmap_rmu()
3108: */
1.13 pk 3109:
1.43 pk 3110: GAP_SHRINK(pm,vr);
1.13 pk 3111:
3112: #ifdef DEBUG
3113: if (pm->pm_gap_end < pm->pm_gap_start) {
3114: printf("pmap_enu: gap_start %x, gap_end %x",
3115: pm->pm_gap_start, pm->pm_gap_end);
3116: panic("pmap_enu: gap botch");
3117: }
3118: #endif
3119:
1.43 pk 3120: rretry:
3121: if (rp->rg_segmap == NULL) {
3122: /* definitely a new mapping */
3123: register int i;
3124: register int size = NSEGRG * sizeof (struct segmap);
3125:
3126: sp = (struct segmap *)malloc((u_long)size, M_VMPMAP, M_WAITOK);
3127: if (rp->rg_segmap != NULL) {
3128: printf("pmap_enter: segment filled during sleep\n"); /* can this happen? */
3129: free((caddr_t)sp, M_VMPMAP);
3130: goto rretry;
3131: }
3132: bzero((caddr_t)sp, size);
3133: rp->rg_segmap = sp;
3134: rp->rg_nsegmap = 0;
3135: for (i = NSEGRG; --i >= 0;)
3136: sp++->sg_pmeg = seginval;
3137: }
3138:
3139: sp = &rp->rg_segmap[vs];
3140:
3141: sretry:
3142: if ((pte = sp->sg_pte) == NULL) {
1.1 deraadt 3143: /* definitely a new mapping */
3144: register int size = NPTESG * sizeof *pte;
3145:
3146: pte = (int *)malloc((u_long)size, M_VMPMAP, M_WAITOK);
1.43 pk 3147: if (sp->sg_pte != NULL) {
1.1 deraadt 3148: printf("pmap_enter: pte filled during sleep\n"); /* can this happen? */
3149: free((caddr_t)pte, M_VMPMAP);
1.43 pk 3150: goto sretry;
1.1 deraadt 3151: }
3152: #ifdef DEBUG
1.43 pk 3153: if (sp->sg_pmeg != seginval)
1.1 deraadt 3154: panic("pmap_enter: new ptes, but not seginval");
3155: #endif
3156: bzero((caddr_t)pte, size);
1.43 pk 3157: sp->sg_pte = pte;
3158: sp->sg_npte = 1;
3159: rp->rg_nsegmap++;
1.1 deraadt 3160: } else {
3161: /* might be a change: fetch old pte */
3162: doflush = 0;
1.43 pk 3163: if ((pmeg = sp->sg_pmeg) == seginval) {
3164: /* software pte */
3165: tpte = pte[VA_VPG(va)];
3166: } else {
3167: /* hardware pte */
3168: if (CTX_USABLE(pm,rp)) {
1.1 deraadt 3169: setcontext(pm->pm_ctxnum);
3170: tpte = getpte(va);
3171: doflush = 1;
3172: } else {
3173: setcontext(0);
3174: /* XXX use per-cpu pteva? */
1.43 pk 3175: #ifdef MMU_3L
3176: if (mmu_3l)
3177: setregmap(0, tregion);
3178: #endif
1.1 deraadt 3179: setsegmap(0, pmeg);
1.18 deraadt 3180: tpte = getpte(VA_VPG(va) << PGSHIFT);
1.1 deraadt 3181: }
3182: }
3183: if (tpte & PG_V) {
1.34 pk 3184: register int addr;
1.1 deraadt 3185:
1.34 pk 3186: /* old mapping exists, and is of the same pa type */
3187: if ((tpte & (PG_PFNUM|PG_TYPE)) ==
3188: (pteproto & (PG_PFNUM|PG_TYPE))) {
1.1 deraadt 3189: /* just changing prot and/or wiring */
3190: splx(s);
3191: /* caller should call this directly: */
3192: pmap_changeprot(pm, va, prot, wired);
1.15 deraadt 3193: if (wired)
3194: pm->pm_stats.wired_count++;
3195: else
3196: pm->pm_stats.wired_count--;
1.1 deraadt 3197: return;
3198: }
3199: /*
3200: * Switcheroo: changing pa for this va.
3201: * If old pa was managed, remove from pvlist.
3202: * If old page was cached, flush cache.
3203: */
3204: /*printf("%s[%d]: pmap_enu: changing existing va(%x)=>pa entry\n",
3205: curproc->p_comm, curproc->p_pid, va);*/
1.34 pk 3206: if ((tpte & PG_TYPE) == PG_OBMEM) {
3207: addr = ptoa(HWTOSW(tpte & PG_PFNUM));
1.31 pk 3208: if (managed(addr))
3209: pv_unlink(pvhead(addr), pm, va);
1.34 pk 3210: if (vactype != VAC_NONE &&
3211: doflush && (tpte & PG_NC) == 0)
3212: cache_flush_page((int)va);
1.31 pk 3213: }
1.1 deraadt 3214: } else {
3215: /* adding new entry */
1.43 pk 3216: sp->sg_npte++;
1.15 deraadt 3217:
3218: /*
3219: * Increment counters
3220: */
3221: if (wired)
3222: pm->pm_stats.wired_count++;
1.1 deraadt 3223: }
3224: }
3225:
3226: if (pv != NULL)
3227: pteproto |= pv_link(pv, pm, va);
3228:
3229: /*
1.43 pk 3230: * Update hardware & software PTEs.
1.1 deraadt 3231: */
1.43 pk 3232: if ((pmeg = sp->sg_pmeg) != seginval) {
1.1 deraadt 3233: /* ptes are in hardare */
1.43 pk 3234: if (CTX_USABLE(pm,rp))
1.1 deraadt 3235: setcontext(pm->pm_ctxnum);
3236: else {
3237: setcontext(0);
3238: /* XXX use per-cpu pteva? */
1.43 pk 3239: #ifdef MMU_3L
3240: if (mmu_3l)
3241: setregmap(0, tregion);
3242: #endif
1.1 deraadt 3243: setsegmap(0, pmeg);
1.18 deraadt 3244: va = VA_VPG(va) << PGSHIFT;
1.1 deraadt 3245: }
3246: setpte(va, pteproto);
3247: }
3248: /* update software copy */
3249: pte += VA_VPG(va);
3250: *pte = pteproto;
3251:
3252: splx(s);
3253: }
3254:
3255: /*
3256: * Change the wiring attribute for a map/virtual-address pair.
3257: */
3258: /* ARGSUSED */
3259: void
3260: pmap_change_wiring(pm, va, wired)
3261: struct pmap *pm;
3262: vm_offset_t va;
3263: int wired;
3264: {
3265:
3266: pmap_stats.ps_useless_changewire++;
3267: }
3268:
3269: /*
3270: * Extract the physical page address associated
3271: * with the given map/virtual_address pair.
3272: * GRR, the vm code knows; we should not have to do this!
3273: */
3274: vm_offset_t
3275: pmap_extract(pm, va)
3276: register struct pmap *pm;
3277: vm_offset_t va;
3278: {
3279: register int tpte;
1.43 pk 3280: register int vr, vs;
3281: struct regmap *rp;
3282: struct segmap *sp;
1.1 deraadt 3283:
3284: if (pm == NULL) {
3285: printf("pmap_extract: null pmap\n");
3286: return (0);
3287: }
1.43 pk 3288: vr = VA_VREG(va);
3289: vs = VA_VSEG(va);
3290: rp = &pm->pm_regmap[vr];
3291: if (rp->rg_segmap == NULL) {
3292: printf("pmap_extract: invalid segment (%d)\n", vr);
3293: return (0);
3294: }
3295: sp = &rp->rg_segmap[vs];
3296:
3297: if (sp->sg_pmeg != seginval) {
1.1 deraadt 3298: register int ctx = getcontext();
3299:
1.43 pk 3300: if (CTX_USABLE(pm,rp)) {
1.1 deraadt 3301: setcontext(pm->pm_ctxnum);
3302: tpte = getpte(va);
3303: } else {
3304: setcontext(0);
1.43 pk 3305: #ifdef MMU_3L
3306: if (mmu_3l)
3307: setregmap(0, tregion);
3308: #endif
3309: setsegmap(0, sp->sg_pmeg);
1.18 deraadt 3310: tpte = getpte(VA_VPG(va) << PGSHIFT);
1.1 deraadt 3311: }
3312: setcontext(ctx);
3313: } else {
1.43 pk 3314: register int *pte = sp->sg_pte;
1.1 deraadt 3315:
3316: if (pte == NULL) {
1.43 pk 3317: printf("pmap_extract: invalid segment\n");
1.1 deraadt 3318: return (0);
3319: }
3320: tpte = pte[VA_VPG(va)];
3321: }
3322: if ((tpte & PG_V) == 0) {
3323: printf("pmap_extract: invalid pte\n");
3324: return (0);
3325: }
3326: tpte &= PG_PFNUM;
3327: tpte = HWTOSW(tpte);
3328: return ((tpte << PGSHIFT) | (va & PGOFSET));
3329: }
3330:
3331: /*
3332: * Copy the range specified by src_addr/len
3333: * from the source map to the range dst_addr/len
3334: * in the destination map.
3335: *
3336: * This routine is only advisory and need not do anything.
3337: */
3338: /* ARGSUSED */
3339: void
3340: pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr)
3341: struct pmap *dst_pmap, *src_pmap;
3342: vm_offset_t dst_addr;
3343: vm_size_t len;
3344: vm_offset_t src_addr;
3345: {
3346: }
3347:
3348: /*
3349: * Require that all active physical maps contain no
3350: * incorrect entries NOW. [This update includes
3351: * forcing updates of any address map caching.]
3352: */
3353: void
3354: pmap_update()
3355: {
3356: }
3357:
3358: /*
3359: * Garbage collects the physical map system for
3360: * pages which are no longer used.
3361: * Success need not be guaranteed -- that is, there
3362: * may well be pages which are not referenced, but
3363: * others may be collected.
3364: * Called by the pageout daemon when pages are scarce.
3365: */
3366: /* ARGSUSED */
3367: void
3368: pmap_collect(pm)
3369: struct pmap *pm;
3370: {
3371: }
3372:
3373: /*
3374: * Clear the modify bit for the given physical page.
3375: */
3376: void
3377: pmap_clear_modify(pa)
3378: register vm_offset_t pa;
3379: {
3380: register struct pvlist *pv;
3381:
1.34 pk 3382: if ((pa & (PMAP_TNC & ~PMAP_NC)) == 0 && managed(pa)) {
1.1 deraadt 3383: pv = pvhead(pa);
3384: (void) pv_syncflags(pv);
3385: pv->pv_flags &= ~PV_MOD;
3386: }
3387: }
3388:
3389: /*
3390: * Tell whether the given physical page has been modified.
3391: */
3392: int
3393: pmap_is_modified(pa)
3394: register vm_offset_t pa;
3395: {
3396: register struct pvlist *pv;
3397:
1.34 pk 3398: if ((pa & (PMAP_TNC & ~PMAP_NC)) == 0 && managed(pa)) {
1.1 deraadt 3399: pv = pvhead(pa);
3400: if (pv->pv_flags & PV_MOD || pv_syncflags(pv) & PV_MOD)
3401: return (1);
3402: }
3403: return (0);
3404: }
3405:
3406: /*
3407: * Clear the reference bit for the given physical page.
3408: */
3409: void
3410: pmap_clear_reference(pa)
3411: vm_offset_t pa;
3412: {
3413: register struct pvlist *pv;
3414:
1.34 pk 3415: if ((pa & (PMAP_TNC & ~PMAP_NC)) == 0 && managed(pa)) {
1.1 deraadt 3416: pv = pvhead(pa);
3417: (void) pv_syncflags(pv);
3418: pv->pv_flags &= ~PV_REF;
3419: }
3420: }
3421:
3422: /*
3423: * Tell whether the given physical page has been referenced.
3424: */
3425: int
3426: pmap_is_referenced(pa)
3427: vm_offset_t pa;
3428: {
3429: register struct pvlist *pv;
3430:
1.34 pk 3431: if ((pa & (PMAP_TNC & ~PMAP_NC)) == 0 && managed(pa)) {
1.1 deraadt 3432: pv = pvhead(pa);
3433: if (pv->pv_flags & PV_REF || pv_syncflags(pv) & PV_REF)
3434: return (1);
3435: }
3436: return (0);
3437: }
3438:
3439: /*
3440: * Make the specified pages (by pmap, offset) pageable (or not) as requested.
3441: *
3442: * A page which is not pageable may not take a fault; therefore, its page
3443: * table entry must remain valid for the duration (or at least, the trap
3444: * handler must not call vm_fault).
3445: *
3446: * This routine is merely advisory; pmap_enter will specify that these pages
3447: * are to be wired down (or not) as appropriate.
3448: */
3449: /* ARGSUSED */
3450: void
3451: pmap_pageable(pm, start, end, pageable)
3452: struct pmap *pm;
3453: vm_offset_t start, end;
3454: int pageable;
3455: {
1.2 deraadt 3456: }
3457:
3458: /*
1.1 deraadt 3459: * Fill the given MI physical page with zero bytes.
3460: *
3461: * We avoid stomping on the cache.
3462: * XXX might be faster to use destination's context and allow cache to fill?
3463: */
3464: void
3465: pmap_zero_page(pa)
3466: register vm_offset_t pa;
3467: {
3468: register caddr_t va;
3469: register int pte;
3470:
1.34 pk 3471: if (((pa & (PMAP_TNC & ~PMAP_NC)) == 0) && managed(pa)) {
1.1 deraadt 3472: /*
3473: * The following might not be necessary since the page
3474: * is being cleared because it is about to be allocated,
3475: * i.e., is in use by no one.
3476: */
3477: if (vactype != VAC_NONE)
3478: pv_flushcache(pvhead(pa));
3479: pte = PG_V | PG_S | PG_W | PG_NC | SWTOHW(atop(pa));
3480: } else
3481: pte = PG_V | PG_S | PG_W | PG_NC | (atop(pa) & PG_PFNUM);
3482:
3483: va = vpage[0];
3484: setpte(va, pte);
3485: qzero(va, NBPG);
3486: setpte(va, 0);
3487: }
3488:
3489: /*
3490: * Copy the given MI physical source page to its destination.
3491: *
3492: * We avoid stomping on the cache as above (with same `XXX' note).
3493: * We must first flush any write-back cache for the source page.
3494: * We go ahead and stomp on the kernel's virtual cache for the
3495: * source page, since the cache can read memory MUCH faster than
3496: * the processor.
3497: */
3498: void
3499: pmap_copy_page(src, dst)
3500: vm_offset_t src, dst;
3501: {
3502: register caddr_t sva, dva;
3503: register int spte, dpte;
3504:
3505: if (managed(src)) {
3506: if (vactype == VAC_WRITEBACK)
3507: pv_flushcache(pvhead(src));
3508: spte = PG_V | PG_S | SWTOHW(atop(src));
3509: } else
3510: spte = PG_V | PG_S | (atop(src) & PG_PFNUM);
3511:
3512: if (managed(dst)) {
3513: /* similar `might not be necessary' comment applies */
3514: if (vactype != VAC_NONE)
3515: pv_flushcache(pvhead(dst));
3516: dpte = PG_V | PG_S | PG_W | PG_NC | SWTOHW(atop(dst));
3517: } else
3518: dpte = PG_V | PG_S | PG_W | PG_NC | (atop(dst) & PG_PFNUM);
3519:
3520: sva = vpage[0];
3521: dva = vpage[1];
3522: setpte(sva, spte);
3523: setpte(dva, dpte);
3524: qcopy(sva, dva, NBPG); /* loads cache, so we must ... */
1.31 pk 3525: if (vactype != VAC_NONE)
3526: cache_flush_page((int)sva);
1.1 deraadt 3527: setpte(sva, 0);
3528: setpte(dva, 0);
3529: }
3530:
3531: /*
3532: * Turn a cdevsw d_mmap value into a byte address for pmap_enter.
3533: * XXX this should almost certainly be done differently, and
3534: * elsewhere, or even not at all
3535: */
3536: vm_offset_t
3537: pmap_phys_address(x)
3538: int x;
3539: {
3540:
3541: return (x);
3542: }
3543:
3544: /*
3545: * Turn off cache for a given (va, number of pages).
3546: *
3547: * We just assert PG_NC for each PTE; the addresses must reside
3548: * in locked kernel space. A cache flush is also done.
3549: */
3550: kvm_uncache(va, npages)
3551: register caddr_t va;
3552: register int npages;
3553: {
3554: register int pte;
3555:
3556: for (; --npages >= 0; va += NBPG) {
3557: pte = getpte(va);
3558: if ((pte & PG_V) == 0)
3559: panic("kvm_uncache !pg_v");
3560: pte |= PG_NC;
3561: setpte(va, pte);
1.35 pk 3562: if (vactype != VAC_NONE && (pte & PG_TYPE) == PG_OBMEM)
1.31 pk 3563: cache_flush_page((int)va);
1.1 deraadt 3564: }
1.21 deraadt 3565: }
3566:
1.46 pk 3567: /*
3568: * Turn on IO cache for a given (va, number of pages).
3569: *
3570: * We just assert PG_NC for each PTE; the addresses must reside
3571: * in locked kernel space. A cache flush is also done.
3572: */
3573: kvm_iocache(va, npages)
3574: register caddr_t va;
3575: register int npages;
3576: {
3577: register int pte;
3578:
3579: for (; --npages >= 0; va += NBPG) {
3580: pte = getpte(va);
3581: if ((pte & PG_V) == 0)
3582: panic("kvm_iocache !pg_v");
3583: pte |= PG_IOC;
3584: setpte(va, pte);
3585: }
3586: }
3587:
1.21 deraadt 3588: int
3589: pmap_count_ptes(pm)
3590: register struct pmap *pm;
3591: {
3592: register int idx, total;
1.43 pk 3593: register struct regmap *rp;
3594: register struct segmap *sp;
1.21 deraadt 3595:
1.43 pk 3596: if (pm == pmap_kernel()) {
3597: rp = &pm->pm_regmap[NUREG];
3598: idx = NKREG;
3599: } else {
3600: rp = pm->pm_regmap;
3601: idx = NUREG;
3602: }
1.21 deraadt 3603: for (total = 0; idx;)
1.43 pk 3604: if ((sp = rp[--idx].rg_segmap) != NULL)
3605: total += sp->sg_npte;
1.21 deraadt 3606: pm->pm_stats.resident_count = total;
3607: return (total);
1.24 pk 3608: }
3609:
3610: /*
3611: * Find first virtual address >= va that doesn't cause
3612: * a cache alias on physical address pa.
3613: */
3614: vm_offset_t
3615: pmap_prefer(pa, va)
3616: register vm_offset_t pa;
3617: register vm_offset_t va;
3618: {
3619: register struct pvlist *pv;
3620: register long m, d;
3621:
1.25 deraadt 3622: if (cputyp == CPU_SUN4M)
3623: /* does the sun4m have the cache alias problem? */
1.24 pk 3624: return va;
1.25 deraadt 3625:
3626: m = CACHE_ALIAS_DIST;
1.24 pk 3627:
1.34 pk 3628: if ((pa & (PMAP_TNC & ~PMAP_NC)) || !managed(pa))
1.24 pk 3629: return va;
3630:
3631: pv = pvhead(pa);
1.43 pk 3632: if (pv->pv_pmap == NULL) {
3633: #if 0
3634: return ((va + m - 1) & ~(m - 1));
3635: #else
1.24 pk 3636: /* Unusable, tell caller to try another one */
3637: return (vm_offset_t)-1;
1.43 pk 3638: #endif
3639: }
1.24 pk 3640:
3641: d = (long)(pv->pv_va & (m - 1)) - (long)(va & (m - 1));
3642: if (d < 0)
3643: va += m;
3644: va += d;
3645:
3646: return va;
1.23 deraadt 3647: }
3648:
3649: pmap_redzone()
3650: {
3651: setpte(KERNBASE, 0);
1.1 deraadt 3652: }
1.43 pk 3653:
3654: #ifdef DEBUG
3655: /*
3656: * Check consistency of a pmap (time consuming!).
3657: */
3658: int
3659: pm_check(s, pm)
3660: char *s;
3661: struct pmap *pm;
3662: {
3663: if (pm == pmap_kernel())
3664: pm_check_k(s, pm);
3665: else
3666: pm_check_u(s, pm);
3667: }
3668:
3669: int
3670: pm_check_u(s, pm)
3671: char *s;
3672: struct pmap *pm;
3673: {
3674: struct regmap *rp;
3675: struct segmap *sp;
3676: int n, vs, vr, j, m, *pte;
3677:
3678: for (vr = 0; vr < NUREG; vr++) {
3679: rp = &pm->pm_regmap[vr];
3680: if (rp->rg_nsegmap == 0)
3681: continue;
3682: if (rp->rg_segmap == NULL)
3683: panic("%s: CHK(vr %d): nsegmap = %d; sp==NULL",
3684: s, vr, rp->rg_nsegmap);
3685: if ((unsigned int)rp < KERNBASE)
3686: panic("%s: rp=%x", s, rp);
3687: n = 0;
3688: for (vs = 0; vs < NSEGRG; vs++) {
3689: sp = &rp->rg_segmap[vs];
3690: if ((unsigned int)sp < KERNBASE)
3691: panic("%s: sp=%x", s, sp);
3692: if (sp->sg_npte != 0) {
3693: n++;
3694: if (sp->sg_pte == NULL)
3695: panic("%s: CHK(vr %d, vs %d): npte=%d, "
3696: "pte=NULL", s, vr, vs, sp->sg_npte);
3697:
3698: pte=sp->sg_pte;
3699: m = 0;
3700: for (j=0; j<NPTESG; j++,pte++)
3701: if (*pte & PG_V)
3702: m++;
3703: if (m != sp->sg_npte)
3704: /*if (pmapdebug & 0x10000)*/
3705: printf("%s: user CHK(vr %d, vs %d): "
3706: "npte(%d) != # valid(%d)\n",
3707: s, vr, vs, sp->sg_npte, m);
3708: }
3709: }
3710: if (n != rp->rg_nsegmap)
3711: panic("%s: CHK(vr %d): inconsistent "
3712: "# of pte's: %d, should be %d",
3713: s, vr, rp->rg_nsegmap, n);
3714: }
3715: return 0;
3716: }
3717:
3718: int
3719: pm_check_k(s, pm)
3720: char *s;
3721: struct pmap *pm;
3722: {
3723: struct regmap *rp;
3724: struct segmap *sp;
3725: int vr, vs, n;
3726:
3727: for (vr = NUREG; vr < NUREG+NKREG; vr++) {
3728: rp = &pm->pm_regmap[vr];
3729: if (rp->rg_segmap == NULL)
3730: panic("%s: CHK(vr %d): nsegmap = %d; sp==NULL",
3731: s, vr, rp->rg_nsegmap);
3732: if (rp->rg_nsegmap == 0)
3733: continue;
3734: n = 0;
3735: for (vs = 0; vs < NSEGRG; vs++) {
3736: if (rp->rg_segmap[vs].sg_npte)
3737: n++;
3738: }
3739: if (n != rp->rg_nsegmap)
3740: printf("%s: kernel CHK(vr %d): inconsistent "
3741: "# of pte's: %d, should be %d\n",
3742: s, vr, rp->rg_nsegmap, n);
3743: }
3744: return 0;
3745: }
3746: #endif
1.46 pk 3747:
3748: /*
3749: * Return the number bytes that pmap_dumpmmu() will dump.
3750: * For each pmeg in the MMU, we'll write NPTESG PTEs.
3751: * The last page or two contains stuff so libkvm can bootstrap.
3752: */
3753: int
3754: pmap_dumpsize()
3755: {
3756: return btoc(((seginval + 1) * NPTESG * sizeof(int)) +
3757: sizeof(seginval) +
3758: sizeof(pmemarr) +
3759: sizeof(kernel_segmap_store));
3760: }
3761:
3762: /*
3763: * Write the mmu contents to the dump device.
3764: * This gets appended to the end of a crash dump since
3765: * there is no in-core copy of kernel memory mappings.
3766: */
3767: int
3768: pmap_dumpmmu(dump, blkno)
3769: register daddr_t blkno;
3770: register int (*dump) __P((dev_t, daddr_t, caddr_t, size_t));
3771: {
3772: register int pmeg;
3773: register int addr; /* unused kernel virtual address */
3774: register int i;
3775: register int *pte, *ptend;
3776: register int error;
3777: register int *kp;
3778: int buffer[dbtob(1) / sizeof(int)];
3779:
3780: /*
3781: * dump page table entries
3782: *
3783: * We dump each pmeg in order (by segment number). Since the MMU
3784: * automatically maps the given virtual segment to a pmeg we must
3785: * iterate over the segments by incrementing an unused segment slot
3786: * in the MMU. This fixed segment number is used in the virtual
3787: * address argument to getpte().
3788: */
3789: setcontext(0);
3790:
3791: /*
3792: * Go through the pmegs and dump each one.
3793: */
3794: pte = buffer;
3795: ptend = &buffer[sizeof(buffer) / sizeof(buffer[0])];
3796: for (pmeg = 0; pmeg <= seginval; ++pmeg) {
3797: register int va = 0;
3798:
3799: setsegmap(va, pmeg);
3800: i = NPTESG;
3801: do {
3802: *pte++ = getpte(va);
3803: if (pte >= ptend) {
3804: /*
3805: * Note that we'll dump the last block
3806: * the last time through the loops because
3807: * all the PMEGs occupy 32KB which is
3808: * a multiple of the block size.
3809: */
3810: error = (*dump)(dumpdev, blkno,
3811: (caddr_t)buffer,
3812: dbtob(1));
3813: if (error != 0)
3814: return (error);
3815: ++blkno;
3816: pte = buffer;
3817: }
3818: va += NBPG;
3819: } while (--i > 0);
3820: }
3821: setsegmap(0, seginval);
3822:
3823: /*
3824: * Next, dump # of pmegs, the physical memory table and the
3825: * kernel's segment map.
3826: */
3827: pte = buffer;
3828: *pte++ = seginval;
3829: *pte++ = npmemarr;
3830: bcopy((char *)pmemarr, (char *)pte, sizeof(pmemarr));
3831: pte = (int *)((int)pte + sizeof(pmemarr));
3832: kp = (int *)kernel_segmap_store;
3833: i = sizeof(kernel_segmap_store) / sizeof(int);
3834: do {
3835: *pte++ = *kp++;
3836: if (pte >= ptend) {
3837: error = (*dump)(dumpdev, blkno, (caddr_t)buffer,
3838: dbtob(1));
3839: if (error != 0)
3840: return (error);
3841: ++blkno;
3842: pte = buffer;
3843: }
3844: } while (--i > 0);
3845: if (pte != buffer)
3846: error = (*dump)(dumpdev, blkno++, (caddr_t)buffer, dbtob(1));
3847:
3848: return (error);
3849: }
CVSweb <webmaster@jp.NetBSD.org>