Annotation of src/sys/arch/sun2/sun2/pmap.c, Revision 1.23
1.23 ! martin 1: /* $NetBSD: pmap.c,v 1.22 2003/05/10 21:10:41 thorpej Exp $ */
1.1 fredette 2:
3: /*-
4: * Copyright (c) 1996 The NetBSD Foundation, Inc.
5: * All rights reserved.
6: *
7: * This code is derived from software contributed to The NetBSD Foundation
8: * by Adam Glass, Gordon W. Ross, and Matthew Fredette.
9: *
10: * Redistribution and use in source and binary forms, with or without
11: * modification, are permitted provided that the following conditions
12: * are met:
13: * 1. Redistributions of source code must retain the above copyright
14: * notice, this list of conditions and the following disclaimer.
15: * 2. Redistributions in binary form must reproduce the above copyright
16: * notice, this list of conditions and the following disclaimer in the
17: * documentation and/or other materials provided with the distribution.
18: * 3. All advertising materials mentioning features or use of this software
19: * must display the following acknowledgement:
20: * This product includes software developed by the NetBSD
21: * Foundation, Inc. and its contributors.
22: * 4. Neither the name of The NetBSD Foundation nor the names of its
23: * contributors may be used to endorse or promote products derived
24: * from this software without specific prior written permission.
25: *
26: * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27: * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28: * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29: * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30: * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31: * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32: * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33: * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34: * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35: * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36: * POSSIBILITY OF SUCH DAMAGE.
37: */
38:
39: /*
40: * Some notes:
41: *
42: * sun2s have contexts (8). In this pmap design, the kernel is mapped
43: * into context zero. Processes take up a known portion of the context,
44: * and compete for the available contexts on a LRU basis.
45: *
46: * sun2s also have this evil "PMEG" crapola. Essentially each "context"'s
47: * address space is defined by the 512 one-byte entries in the segment map.
48: * Each of these 1-byte entries points to a "Page Map Entry Group" (PMEG)
49: * which contains the mappings for that virtual segment. (This strange
50: * terminology invented by Sun and preserved here for consistency.)
51: * Each PMEG maps a segment of 32Kb length, with 16 pages of 2Kb each.
52: *
53: * As you might guess, these PMEGs are in short supply and heavy demand.
54: * PMEGs allocated to the kernel are "static" in the sense that they can't
55: * be stolen from it. PMEGs allocated to a particular segment of a
56: * pmap's virtual space will be fought over by the other pmaps.
57: *
1.16 fredette 58: * This pmap was once sys/arch/sun3/sun3/pmap.c revision 1.135.
1.1 fredette 59: */
60:
61: /*
62: * Cache management:
63: * sun2's don't have cache implementations, but for now the caching
64: * code remains in. it's harmless (and, due to our 0 definitions of
65: * PG_NC and BADALIAS, should optimize away), and keeping it in makes
66: * it easier to diff this file against its cousin, sys/arch/sun3/sun3/pmap.c.
67: */
68:
69: /*
70: * wanted attributes:
71: * pmegs that aren't needed by a pmap remain in the MMU.
72: * quick context switches between pmaps
73: */
74:
75: /*
76: * Project1: Use a "null" context for processes that have not
77: * touched any user-space address recently. This is efficient
78: * for things that stay in the kernel for a while, waking up
79: * to handle some I/O then going back to sleep (i.e. nfsd).
80: * If and when such a process returns to user-mode, it will
81: * fault and be given a real context at that time.
82: *
83: * This also lets context switch be fast, because all we need
84: * to do there for the MMU is slam the context register.
85: *
86: * Project2: Use a private pool of PV elements. This pool can be
87: * fixed size because the total mapped virtual space supported by
88: * the MMU H/W (and this pmap) is fixed for all time.
89: */
90:
91: #include "opt_ddb.h"
1.23 ! martin 92: #include "opt_pmap_debug.h"
1.1 fredette 93:
94: #include <sys/param.h>
95: #include <sys/systm.h>
96: #include <sys/proc.h>
97: #include <sys/malloc.h>
1.16 fredette 98: #include <sys/pool.h>
1.1 fredette 99: #include <sys/user.h>
100: #include <sys/queue.h>
101: #include <sys/kcore.h>
102:
103: #include <uvm/uvm.h>
104:
105: #include <machine/cpu.h>
106: #include <machine/dvma.h>
107: #include <machine/idprom.h>
108: #include <machine/kcore.h>
1.3 fredette 109: #include <machine/promlib.h>
1.1 fredette 110: #include <machine/pmap.h>
111: #include <machine/pte.h>
112: #include <machine/vmparam.h>
113:
114: #include <sun2/sun2/control.h>
115: #include <sun2/sun2/fc.h>
116: #include <sun2/sun2/machdep.h>
117:
118: #ifdef DDB
119: #include <ddb/db_output.h>
120: #else
121: #define db_printf printf
122: #endif
123:
124: /* Verify this correspondence between definitions. */
125: #if (PMAP_OBIO << PG_MOD_SHIFT) != PGT_OBIO
126: #error "PMAP_XXX definitions don't match pte.h!"
127: #endif
128:
129: /* Type bits in a "pseudo" physical address. (XXX: pmap.h?) */
130: #define PMAP_TYPE PMAP_MBIO
131:
132: /*
133: * Local convenience macros
134: */
135:
136: #define DVMA_MAP_END (DVMA_MAP_BASE + DVMA_MAP_AVAIL)
137:
138: /* User segments are all of them. */
139: #define NUSEG (NSEGMAP)
140:
141: #define VA_SEGNUM(x) ((u_int)(x) >> SEGSHIFT)
142:
143: /*
144: * Only "main memory" pages are registered in the pv_lists.
145: * This macro is used to determine if a given pte refers to
146: * "main memory" or not. One slight hack here deserves more
1.16 fredette 147: * explanation: On the Sun-2, the bwtwo and zs1 appear
1.1 fredette 148: * as PG_OBMEM devices at 0x00700000 and 0x00780000,
149: * respectively. We do not want to consider these as
150: * "main memory" so the macro below treats obmem addresses
151: * >= 0x00700000 as device addresses. NB: this means for now,
152: * you can't have a headless Sun-2 with 8MB of main memory.
153: */
154: #define IS_MAIN_MEM(pte) (((pte) & PG_TYPE) == 0 && PG_PA(pte) < 0x00700000)
155:
156: /* Does this (pseudo) PA represent device space? */
157: #define PA_IS_DEV(pa) (((pa) & PMAP_TYPE) != 0 || (pa) >= 0x00700000)
158:
1.16 fredette 159: /*
160: * Is there a Virtually Addressed Cache (VAC) alias problem
161: * if one page is mapped at both a1 and a2?
162: */
1.1 fredette 163: #define BADALIAS(a1, a2) (0)
164:
165:
166: /*
167: * Debugging support.
168: */
169: #define PMD_ENTER 1
170: #define PMD_LINK 2
171: #define PMD_PROTECT 4
172: #define PMD_SWITCH 8
173: #define PMD_COW 0x10
174: #define PMD_MODBIT 0x20
175: #define PMD_REFBIT 0x40
176: #define PMD_WIRING 0x80
177: #define PMD_CONTEXT 0x100
178: #define PMD_CREATE 0x200
179: #define PMD_SEGMAP 0x400
180: #define PMD_SETPTE 0x800
1.16 fredette 181: #define PMD_FAULT 0x1000
182: #define PMD_KMAP 0x2000
1.1 fredette 183:
184: #define PMD_REMOVE PMD_ENTER
185: #define PMD_UNLINK PMD_LINK
186:
187: #ifdef PMAP_DEBUG
188: int pmap_debug = 0;
189: int pmap_db_watchva = -1;
190: int pmap_db_watchpmeg = -1;
191: #endif /* PMAP_DEBUG */
192:
193: /*
194: * Miscellaneous variables.
195: *
196: * For simplicity, this interface retains the variables
197: * that were used in the old interface (without NONCONTIG).
198: * These are set in pmap_bootstrap() and used in
199: * pmap_next_page().
200: */
1.22 thorpej 201: vaddr_t virtual_avail, virtual_end;
1.16 fredette 202: paddr_t avail_start, avail_end;
1.1 fredette 203: #define managed(pa) (((pa) >= avail_start) && ((pa) < avail_end))
204:
205: /* used to skip a single hole in RAM */
1.16 fredette 206: static vaddr_t hole_start, hole_size;
1.1 fredette 207:
208: /* This is for pmap_next_page() */
1.16 fredette 209: static paddr_t avail_next;
1.1 fredette 210:
211: /* This is where we map a PMEG without a context. */
1.16 fredette 212: static vaddr_t temp_seg_va;
213: #ifdef DIAGNOSTIC
1.1 fredette 214: static int temp_seg_inuse;
1.16 fredette 215: #endif
1.1 fredette 216:
217: /*
218: * Location to store virtual addresses
219: * to be used in copy/zero operations.
220: */
1.16 fredette 221: vaddr_t tmp_vpages[2] = {
1.20 thorpej 222: PAGE_SIZE * 8,
223: PAGE_SIZE * 9 };
1.1 fredette 224: int tmp_vpages_inuse;
225:
226: static int pmap_version = 1;
227: struct pmap kernel_pmap_store;
228: #define kernel_pmap (&kernel_pmap_store)
229: static u_char kernel_segmap[NSEGMAP];
1.16 fredette 230:
231: /* memory pool for pmap structures */
232: struct pool pmap_pmap_pool;
1.1 fredette 233:
234: /* statistics... */
235: struct pmap_stats {
236: int ps_enter_firstpv; /* pv heads entered */
237: int ps_enter_secondpv; /* pv nonheads entered */
238: int ps_unlink_pvfirst; /* of pv_unlinks on head */
239: int ps_unlink_pvsearch; /* of pv_unlink searches */
240: int ps_pmeg_faultin; /* pmegs reloaded */
241: int ps_changeprots; /* of calls to changeprot */
242: int ps_changewire; /* useless wiring changes */
243: int ps_npg_prot_all; /* of active pages protected */
244: int ps_npg_prot_actual; /* pages actually affected */
245: int ps_vac_uncached; /* non-cached due to bad alias */
246: int ps_vac_recached; /* re-cached when bad alias gone */
247: } pmap_stats;
248:
249: #define pmap_lock(pmap) simple_lock(&pmap->pm_lock)
250: #define pmap_unlock(pmap) simple_unlock(&pmap->pm_lock)
251: #define pmap_add_ref(pmap) ++pmap->pm_refcount
252: #define pmap_del_ref(pmap) --pmap->pm_refcount
253: #define pmap_refcount(pmap) pmap->pm_refcount
254:
255: #ifdef PMAP_DEBUG
256: #define CHECK_SPL() do { \
257: if ((getsr() & PSL_IPL) < PSL_IPL4) \
258: panic("pmap: bad spl, line %d", __LINE__); \
259: } while (0)
260: #else /* PMAP_DEBUG */
261: #define CHECK_SPL() (void)0
262: #endif /* PMAP_DEBUG */
263:
264:
265: /*
266: * PV support.
267: * (i.e. Find all virtual mappings of a physical page.)
268: */
269:
270: int pv_initialized = 0;
271:
272: /* One of these for each mapped virtual page. */
273: struct pv_entry {
274: struct pv_entry *pv_next;
275: pmap_t pv_pmap;
1.16 fredette 276: vaddr_t pv_va;
1.1 fredette 277: };
278: typedef struct pv_entry *pv_entry_t;
279:
280: /* Table of PV list heads (per physical page). */
281: static struct pv_entry **pv_head_tbl;
282:
283: /* Free list of PV entries. */
284: static struct pv_entry *pv_free_list;
285:
286: /* Table of flags (per physical page). */
287: static u_char *pv_flags_tbl;
288:
289: /* These are as in the MMU but shifted by PV_SHIFT. */
290: #define PV_SHIFT 20
291: #define PV_VALID (PG_VALID >> PV_SHIFT)
292: #define PV_NC (PG_NC >> PV_SHIFT)
293: #define PV_TYPE (PG_TYPE >> PV_SHIFT)
294: #define PV_REF (PG_REF >> PV_SHIFT)
295: #define PV_MOD (PG_MOD >> PV_SHIFT)
296:
297:
298: /*
299: * context structures, and queues
300: */
301:
302: struct context_state {
303: TAILQ_ENTRY(context_state) context_link;
304: int context_num;
305: struct pmap *context_upmap;
306: };
307: typedef struct context_state *context_t;
308:
309: #define INVALID_CONTEXT -1 /* impossible value */
310: #define EMPTY_CONTEXT 0
311: #define KERNEL_CONTEXT 0
312: #define FIRST_CONTEXT 1
1.4 fredette 313: #define has_context(pmap) (((pmap)->pm_ctxnum != EMPTY_CONTEXT) == ((pmap) != kernel_pmap))
1.1 fredette 314:
315: TAILQ_HEAD(context_tailq, context_state)
316: context_free_queue, context_active_queue;
317:
318: static struct context_state context_array[NCONTEXT];
319:
320:
321: /*
322: * PMEG structures, queues, and macros
323: */
324: #define PMEGQ_FREE 0
325: #define PMEGQ_INACTIVE 1
326: #define PMEGQ_ACTIVE 2
327: #define PMEGQ_KERNEL 3
328: #define PMEGQ_NONE 4
329:
330: struct pmeg_state {
331: TAILQ_ENTRY(pmeg_state) pmeg_link;
332: int pmeg_index;
333: pmap_t pmeg_owner;
334: int pmeg_version;
1.16 fredette 335: vaddr_t pmeg_va;
1.1 fredette 336: int pmeg_wired;
337: int pmeg_reserved;
338: int pmeg_vpages;
339: int pmeg_qstate;
340: };
341:
342: typedef struct pmeg_state *pmeg_t;
343:
344: #define PMEG_INVAL (NPMEG-1)
345: #define PMEG_NULL (pmeg_t) NULL
346:
347: /* XXX - Replace pmeg_kernel_queue with pmeg_wired_queue ? */
348: TAILQ_HEAD(pmeg_tailq, pmeg_state)
349: pmeg_free_queue, pmeg_inactive_queue,
350: pmeg_active_queue, pmeg_kernel_queue;
351:
352: static struct pmeg_state pmeg_array[NPMEG];
353:
354:
355: /*
356: * prototypes
357: */
358: static int get_pte_pmeg __P((int, int));
359: static void set_pte_pmeg __P((int, int, int));
360:
1.16 fredette 361: static void context_allocate __P((pmap_t));
362: static void context_free __P((pmap_t));
1.1 fredette 363: static void context_init __P((void));
364:
365: static void pmeg_init __P((void));
1.16 fredette 366: static void pmeg_reserve __P((int));
1.1 fredette 367:
1.16 fredette 368: static pmeg_t pmeg_allocate __P((pmap_t, vaddr_t));
369: static void pmeg_mon_init __P((vaddr_t, vaddr_t, int));
370: static void pmeg_release __P((pmeg_t));
371: static void pmeg_free __P((pmeg_t));
372: static pmeg_t pmeg_cache __P((pmap_t, vaddr_t));
373: static void pmeg_set_wiring __P((pmeg_t, vaddr_t, int));
374:
375: static int pv_link __P((pmap_t, int, vaddr_t));
376: static void pv_unlink __P((pmap_t, int, vaddr_t));
377: static void pv_remove_all __P((paddr_t));
378: static void pv_changepte __P((paddr_t, int, int));
1.1 fredette 379: static u_int pv_syncflags __P((pv_entry_t));
380: static void pv_init __P((void));
381:
1.16 fredette 382: static void pmeg_clean __P((pmeg_t));
1.1 fredette 383: static void pmeg_clean_free __P((void));
384:
1.16 fredette 385: static void pmap_common_init __P((pmap_t));
386: static void pmap_kernel_init __P((pmap_t));
387: static void pmap_user_init __P((pmap_t));
1.1 fredette 388: static void pmap_page_upload __P((void));
389:
1.16 fredette 390: static void pmap_enter_kernel __P((vaddr_t, int, boolean_t));
391: static void pmap_enter_user __P((pmap_t, vaddr_t, int, boolean_t));
1.1 fredette 392:
1.16 fredette 393: static void pmap_protect1 __P((pmap_t, vaddr_t, vaddr_t));
394: static void pmap_protect_mmu __P((pmap_t, vaddr_t, vaddr_t));
395: static void pmap_protect_noctx __P((pmap_t, vaddr_t, vaddr_t));
396:
397: static void pmap_remove1 __P((pmap_t, vaddr_t, vaddr_t));
398: static void pmap_remove_mmu __P((pmap_t, vaddr_t, vaddr_t));
399: static void pmap_remove_noctx __P((pmap_t, vaddr_t, vaddr_t));
400:
401: static int pmap_fault_reload __P((struct pmap *, vaddr_t, int));
1.1 fredette 402:
403: /* Called only from locore.s and pmap.c */
1.16 fredette 404: void _pmap_switch __P((pmap_t));
1.1 fredette 405:
406: #ifdef PMAP_DEBUG
1.16 fredette 407: void pmap_print __P((pmap_t));
408: void pv_print __P((struct vm_page *));
409: void pmeg_print __P((pmeg_t));
410: static void pmeg_verify_empty __P((vaddr_t));
1.1 fredette 411: #endif /* PMAP_DEBUG */
412: void pmap_pinit __P((pmap_t));
413: void pmap_release __P((pmap_t));
414:
415: /*
416: * Various in-line helper functions.
417: */
418:
419: static inline pmap_t
420: current_pmap __P((void))
421: {
422: struct vmspace *vm;
1.11 chs 423: struct vm_map *map;
1.1 fredette 424: pmap_t pmap;
425:
1.19 thorpej 426: if (curlwp == NULL)
1.1 fredette 427: pmap = kernel_pmap;
428: else {
1.19 thorpej 429: vm = curproc->p_vmspace;
1.1 fredette 430: map = &vm->vm_map;
431: pmap = vm_map_pmap(map);
432: }
433:
434: return (pmap);
435: }
436:
437: static inline struct pv_entry **
1.16 fredette 438: pa_to_pvhead(paddr_t pa)
1.1 fredette 439: {
440: int idx;
441:
442: idx = PA_PGNUM(pa);
443: #ifdef DIAGNOSTIC
444: if (PA_IS_DEV(pa) || (idx >= physmem))
445: panic("pmap:pa_to_pvhead: bad pa=0x%lx", pa);
446: #endif
447: return (&pv_head_tbl[idx]);
448: }
449:
450: static inline u_char *
1.16 fredette 451: pa_to_pvflags(paddr_t pa)
1.1 fredette 452: {
453: int idx;
454:
455: idx = PA_PGNUM(pa);
456: #ifdef DIAGNOSTIC
457: if (PA_IS_DEV(pa) || (idx >= physmem))
458: panic("pmap:pa_to_pvflags: bad pa=0x%lx", pa);
459: #endif
460: return (&pv_flags_tbl[idx]);
461: }
462:
1.16 fredette 463: /*
464: * Save the MOD bit from the given PTE using its PA
465: */
466: static inline void
467: save_modref_bits(int pte)
468: {
469: u_char *pv_flags;
470:
471: pv_flags = pa_to_pvflags(PG_PA(pte));
472: *pv_flags |= ((pte & PG_MODREF) >> PV_SHIFT);
473: }
474:
1.1 fredette 475: static inline pmeg_t
476: pmeg_p(int sme)
477: {
478: #ifdef DIAGNOSTIC
479: if (sme < 0 || sme >= SEGINV)
480: panic("pmeg_p: bad sme");
481: #endif
482: return &pmeg_array[sme];
483: }
484:
485: #define is_pmeg_wired(pmegp) (pmegp->pmeg_wired != 0)
486:
487: static void
488: pmeg_set_wiring(pmegp, va, flag)
489: pmeg_t pmegp;
1.16 fredette 490: vaddr_t va;
1.1 fredette 491: int flag;
492: {
493: int idx, mask;
494:
495: idx = VA_PTE_NUM(va);
496: mask = 1 << idx;
497:
498: if (flag)
499: pmegp->pmeg_wired |= mask;
500: else
501: pmegp->pmeg_wired &= ~mask;
502: }
503:
504: /****************************************************************
505: * Context management functions.
506: */
507:
508: /* part of pmap_bootstrap */
509: static void
510: context_init()
511: {
512: int i;
513:
514: TAILQ_INIT(&context_free_queue);
515: TAILQ_INIT(&context_active_queue);
516:
517: /* Leave EMPTY_CONTEXT out of the free list. */
518: context_array[0].context_upmap = kernel_pmap;
519:
520: for (i = 1; i < NCONTEXT; i++) {
521: context_array[i].context_num = i;
522: context_array[i].context_upmap = NULL;
523: TAILQ_INSERT_TAIL(&context_free_queue, &context_array[i],
524: context_link);
525: #ifdef PMAP_DEBUG
526: if (pmap_debug & PMD_CONTEXT)
527: printf("context_init: sizeof(context_array[0])=%d\n",
528: sizeof(context_array[0]));
529: #endif
530: }
531: }
532:
533: /* Get us a context (steal one if necessary). */
534: static void
535: context_allocate(pmap)
536: pmap_t pmap;
537: {
538: context_t context;
539:
540: CHECK_SPL();
541:
542: #ifdef DIAGNOSTIC
543: if (pmap == kernel_pmap)
544: panic("context_allocate: kernel_pmap");
545: if (has_context(pmap))
546: panic("pmap: pmap already has context allocated to it");
547: #endif
548:
549: context = TAILQ_FIRST(&context_free_queue);
550: if (context == NULL) {
551: /* Steal the head of the active queue. */
552: context = TAILQ_FIRST(&context_active_queue);
553: if (context == NULL)
554: panic("pmap: no contexts left?");
555: #ifdef PMAP_DEBUG
556: if (pmap_debug & PMD_CONTEXT)
557: printf("context_allocate: steal ctx %d from pmap %p\n",
558: context->context_num, context->context_upmap);
559: #endif
560: context_free(context->context_upmap);
561: context = TAILQ_FIRST(&context_free_queue);
562: }
563: TAILQ_REMOVE(&context_free_queue, context, context_link);
564:
1.16 fredette 565: #ifdef DIAGNOSTIC
1.1 fredette 566: if (context->context_upmap != NULL)
567: panic("pmap: context in use???");
1.16 fredette 568: #endif
1.1 fredette 569:
570: context->context_upmap = pmap;
571: pmap->pm_ctxnum = context->context_num;
572:
573: TAILQ_INSERT_TAIL(&context_active_queue, context, context_link);
574:
575: /*
576: * We could reload the MMU here, but that would
577: * artificially move PMEGs from the inactive queue
578: * to the active queue, so do lazy reloading.
579: * XXX - Need to reload wired pmegs though...
580: * XXX: Verify the context it is empty?
581: */
582: }
583:
584: /*
585: * Unload the context and put it on the free queue.
586: */
587: static void
588: context_free(pmap) /* :) */
589: pmap_t pmap;
590: {
591: int saved_ctxnum, ctxnum;
592: int i, sme;
593: context_t contextp;
1.16 fredette 594: vaddr_t va;
1.1 fredette 595:
596: CHECK_SPL();
597:
598: ctxnum = pmap->pm_ctxnum;
599: if (ctxnum < FIRST_CONTEXT || ctxnum >= NCONTEXT)
600: panic("pmap: context_free ctxnum");
601: contextp = &context_array[ctxnum];
602:
603: /* Temporary context change. */
604: saved_ctxnum = get_context();
605: set_context(ctxnum);
606:
607: /* Before unloading translations, flush cache. */
608: #ifdef HAVECACHE
609: if (cache_size)
610: cache_flush_context();
611: #endif
612:
613: /* Unload MMU (but keep in SW segmap). */
614: for (i=0, va=0; i < NUSEG; i++, va+=NBSG) {
615:
616: #if !defined(PMAP_DEBUG)
617: /* Short-cut using the S/W segmap (if !debug). */
618: if (pmap->pm_segmap[i] == SEGINV)
619: continue;
620: #endif
621:
622: /* Check the H/W segmap. */
623: sme = get_segmap(va);
624: if (sme == SEGINV)
625: continue;
626:
627: /* Found valid PMEG in the segmap. */
628: #ifdef PMAP_DEBUG
629: if (pmap_debug & PMD_SEGMAP)
630: printf("pmap: set_segmap ctx=%d v=0x%lx old=0x%x new=ff (cf)\n",
631: ctxnum, va, sme);
632: #endif
633: #ifdef DIAGNOSTIC
634: if (sme != pmap->pm_segmap[i])
635: panic("context_free: unknown sme at va=0x%lx", va);
636: #endif
637: /* Did cache flush above (whole context). */
638: set_segmap(va, SEGINV);
639: /* In this case, do not clear pm_segmap. */
640: /* XXX: Maybe inline this call? */
641: pmeg_release(pmeg_p(sme));
642: }
643:
644: /* Restore previous context. */
645: set_context(saved_ctxnum);
646:
647: /* Dequeue, update, requeue. */
648: TAILQ_REMOVE(&context_active_queue, contextp, context_link);
649: pmap->pm_ctxnum = EMPTY_CONTEXT;
650: contextp->context_upmap = NULL;
651: TAILQ_INSERT_TAIL(&context_free_queue, contextp, context_link);
652: }
653:
654:
655: /****************************************************************
656: * PMEG management functions.
657: */
658:
659: static void
660: pmeg_init()
661: {
662: int x;
663:
664: /* clear pmeg array, put it all on the free pmeq queue */
665:
666: TAILQ_INIT(&pmeg_free_queue);
667: TAILQ_INIT(&pmeg_inactive_queue);
668: TAILQ_INIT(&pmeg_active_queue);
669: TAILQ_INIT(&pmeg_kernel_queue);
670:
1.16 fredette 671: memset(pmeg_array, 0, NPMEG*sizeof(struct pmeg_state));
1.1 fredette 672: for (x =0 ; x<NPMEG; x++) {
673: TAILQ_INSERT_TAIL(&pmeg_free_queue, &pmeg_array[x],
674: pmeg_link);
675: pmeg_array[x].pmeg_qstate = PMEGQ_FREE;
676: pmeg_array[x].pmeg_index = x;
677: }
678:
679: /* The last pmeg is not usable. */
680: pmeg_reserve(SEGINV);
681: }
682:
683: /*
684: * Reserve a pmeg (forever) for use by PROM, etc.
685: * Contents are left as-is. Called very early...
686: */
687: void
688: pmeg_reserve(sme)
689: int sme;
690: {
691: pmeg_t pmegp;
692:
693: /* Can not use pmeg_p() because it fails on SEGINV. */
694: pmegp = &pmeg_array[sme];
695:
696: if (pmegp->pmeg_reserved) {
1.3 fredette 697: prom_printf("pmeg_reserve: already reserved\n");
698: prom_abort();
1.1 fredette 699: }
700: if (pmegp->pmeg_owner) {
1.3 fredette 701: prom_printf("pmeg_reserve: already owned\n");
702: prom_abort();
1.1 fredette 703: }
704:
705: /* Owned by kernel, but not really usable... */
706: pmegp->pmeg_owner = kernel_pmap;
707: pmegp->pmeg_reserved++; /* keep count, just in case */
708: TAILQ_REMOVE(&pmeg_free_queue, pmegp, pmeg_link);
709: pmegp->pmeg_qstate = PMEGQ_NONE;
710: }
711:
712: /*
713: * Examine PMEGs used by the monitor, and either
714: * reserve them (keep=1) or clear them (keep=0)
715: */
716: static void
717: pmeg_mon_init(sva, eva, keep)
1.16 fredette 718: vaddr_t sva, eva;
1.1 fredette 719: int keep; /* true: steal, false: clear */
720: {
1.16 fredette 721: vaddr_t pgva, endseg;
1.1 fredette 722: int pte, valid;
723: unsigned char sme;
724:
725: #ifdef PMAP_DEBUG
726: if (pmap_debug & PMD_SEGMAP)
1.3 fredette 727: prom_printf("pmeg_mon_init(0x%x, 0x%x, %d)\n",
1.1 fredette 728: sva, eva, keep);
729: #endif
730:
731: sva &= ~(NBSG-1);
732:
733: while (sva < eva) {
734: sme = get_segmap(sva);
735: if (sme != SEGINV) {
736: valid = 0;
737: endseg = sva + NBSG;
1.20 thorpej 738: for (pgva = sva; pgva < endseg; pgva += PAGE_SIZE) {
1.1 fredette 739: pte = get_pte(pgva);
740: if (pte & PG_VALID) {
741: valid++;
742: }
743: }
744: #ifdef PMAP_DEBUG
745: if (pmap_debug & PMD_SEGMAP)
1.3 fredette 746: prom_printf(" sva=0x%x seg=0x%x valid=%d\n",
1.1 fredette 747: sva, sme, valid);
748: #endif
749: if (keep && valid)
750: pmeg_reserve(sme);
751: else set_segmap(sva, SEGINV);
752: }
753: sva += NBSG;
754: }
755: }
756:
757: /*
758: * This is used only during pmap_bootstrap, so we can
759: * get away with borrowing a slot in the segmap.
760: */
761: static void
762: pmeg_clean(pmegp)
763: pmeg_t pmegp;
764: {
765: int sme;
1.16 fredette 766: vaddr_t va;
1.1 fredette 767:
1.9 fredette 768: sme = get_segmap(temp_seg_va);
1.1 fredette 769: if (sme != SEGINV)
770: panic("pmeg_clean");
771:
772: sme = pmegp->pmeg_index;
1.9 fredette 773: set_segmap(temp_seg_va, sme);
1.1 fredette 774:
1.20 thorpej 775: for (va = 0; va < NBSG; va += PAGE_SIZE)
1.9 fredette 776: set_pte(temp_seg_va + va, PG_INVAL);
1.1 fredette 777:
1.9 fredette 778: set_segmap(temp_seg_va, SEGINV);
1.1 fredette 779: }
780:
781: /*
782: * This routine makes sure that pmegs on the pmeg_free_queue contain
783: * no valid ptes. It pulls things off the queue, cleans them, and
784: * puts them at the end. The ending condition is finding the first
785: * queue element at the head of the queue again.
786: */
787: static void
788: pmeg_clean_free()
789: {
790: pmeg_t pmegp, pmegp_first;
791:
792: pmegp = TAILQ_FIRST(&pmeg_free_queue);
793: if (pmegp == NULL)
794: panic("pmap: no free pmegs available to clean");
795:
796: pmegp_first = NULL;
797:
798: for (;;) {
799: pmegp = TAILQ_FIRST(&pmeg_free_queue);
800: TAILQ_REMOVE(&pmeg_free_queue, pmegp, pmeg_link);
801:
802: pmegp->pmeg_qstate = PMEGQ_NONE;
803: pmeg_clean(pmegp);
804: pmegp->pmeg_qstate = PMEGQ_FREE;
805:
806: TAILQ_INSERT_TAIL(&pmeg_free_queue, pmegp, pmeg_link);
807:
808: if (pmegp == pmegp_first)
809: break;
810: if (pmegp_first == NULL)
811: pmegp_first = pmegp;
812: }
813: }
814:
815: /*
816: * Allocate a PMEG by whatever means necessary.
817: * (May invalidate some mappings!)
818: */
819: static pmeg_t
820: pmeg_allocate(pmap, va)
821: pmap_t pmap;
1.16 fredette 822: vaddr_t va;
1.1 fredette 823: {
824: pmeg_t pmegp;
825:
826: CHECK_SPL();
827:
828: #ifdef DIAGNOSTIC
829: if (va & SEGOFSET) {
830: panic("pmap:pmeg_allocate: va=0x%lx", va);
831: }
832: #endif
833:
834: /* Get one onto the free list if necessary. */
835: pmegp = TAILQ_FIRST(&pmeg_free_queue);
836: if (!pmegp) {
837: /* Try inactive queue... */
838: pmegp = TAILQ_FIRST(&pmeg_inactive_queue);
839: if (!pmegp) {
840: /* Try active queue... */
841: pmegp = TAILQ_FIRST(&pmeg_active_queue);
842: }
843: if (!pmegp) {
844: panic("pmeg_allocate: failed");
845: }
846: /*
847: * Remove mappings to free-up a pmeg
848: * (so it will go onto the free list).
849: * XXX - Skip this one if it is wired?
850: */
851: pmap_remove1(pmegp->pmeg_owner,
852: pmegp->pmeg_va,
853: pmegp->pmeg_va + NBSG);
854: }
855:
856: /* OK, free list has something for us to take. */
857: pmegp = TAILQ_FIRST(&pmeg_free_queue);
858: #ifdef DIAGNOSTIC
859: if (pmegp == NULL)
860: panic("pmeg_allocagte: still none free?");
861: if ((pmegp->pmeg_qstate != PMEGQ_FREE) ||
862: (pmegp->pmeg_index == SEGINV) ||
863: (pmegp->pmeg_vpages))
864: panic("pmeg_allocate: bad pmegp=%p", pmegp);
865: #endif
866: #ifdef PMAP_DEBUG
867: if (pmegp->pmeg_index == pmap_db_watchpmeg) {
868: db_printf("pmeg_allocate: watch pmegp=%p\n", pmegp);
869: Debugger();
870: }
871: #endif
872:
873: TAILQ_REMOVE(&pmeg_free_queue, pmegp, pmeg_link);
874:
875: /* Reassign this PMEG for the caller. */
876: pmegp->pmeg_owner = pmap;
877: pmegp->pmeg_version = pmap->pm_version;
878: pmegp->pmeg_va = va;
879: pmegp->pmeg_wired = 0;
880: pmegp->pmeg_reserved = 0;
881: pmegp->pmeg_vpages = 0;
882: if (pmap == kernel_pmap) {
883: TAILQ_INSERT_TAIL(&pmeg_kernel_queue, pmegp, pmeg_link);
884: pmegp->pmeg_qstate = PMEGQ_KERNEL;
885: } else {
886: TAILQ_INSERT_TAIL(&pmeg_active_queue, pmegp, pmeg_link);
887: pmegp->pmeg_qstate = PMEGQ_ACTIVE;
888: }
889: /* Caller will verify that it's empty (if debugging). */
890: return pmegp;
891: }
892:
893: /*
894: * Put pmeg on the inactive queue, leaving its contents intact.
895: * This happens when we loose our context. We may reclaim
896: * this pmeg later if it is still in the inactive queue.
897: */
898: static void
899: pmeg_release(pmegp)
900: pmeg_t pmegp;
901: {
902:
903: CHECK_SPL();
904:
905: #ifdef DIAGNOSTIC
906: if ((pmegp->pmeg_owner == kernel_pmap) ||
907: (pmegp->pmeg_qstate != PMEGQ_ACTIVE))
908: panic("pmeg_release: bad pmeg=%p", pmegp);
909: #endif
910:
911: TAILQ_REMOVE(&pmeg_active_queue, pmegp, pmeg_link);
912: pmegp->pmeg_qstate = PMEGQ_INACTIVE;
913: TAILQ_INSERT_TAIL(&pmeg_inactive_queue, pmegp, pmeg_link);
914: }
915:
916: /*
917: * Move the pmeg to the free queue from wherever it is.
918: * The pmeg will be clean. It might be in kernel_pmap.
919: */
920: static void
921: pmeg_free(pmegp)
922: pmeg_t pmegp;
923: {
924:
925: CHECK_SPL();
926:
927: #ifdef DIAGNOSTIC
928: /* Caller should verify that it's empty. */
929: if (pmegp->pmeg_vpages != 0)
930: panic("pmeg_free: vpages");
931: #endif
932:
933: switch (pmegp->pmeg_qstate) {
934: case PMEGQ_ACTIVE:
935: TAILQ_REMOVE(&pmeg_active_queue, pmegp, pmeg_link);
936: break;
937: case PMEGQ_INACTIVE:
938: TAILQ_REMOVE(&pmeg_inactive_queue, pmegp, pmeg_link);
939: break;
940: case PMEGQ_KERNEL:
941: TAILQ_REMOVE(&pmeg_kernel_queue, pmegp, pmeg_link);
942: break;
943: default:
944: panic("pmeg_free: releasing bad pmeg");
945: break;
946: }
947:
948: #ifdef PMAP_DEBUG
949: if (pmegp->pmeg_index == pmap_db_watchpmeg) {
950: db_printf("pmeg_free: watch pmeg 0x%x\n",
951: pmegp->pmeg_index);
952: Debugger();
953: }
954: #endif
955:
956: pmegp->pmeg_owner = NULL;
957: pmegp->pmeg_qstate = PMEGQ_FREE;
958: TAILQ_INSERT_TAIL(&pmeg_free_queue, pmegp, pmeg_link);
959: }
960:
961: /*
962: * Find a PMEG that was put on the inactive queue when we
963: * had our context stolen. If found, move to active queue.
964: */
965: static pmeg_t
966: pmeg_cache(pmap, va)
967: pmap_t pmap;
1.16 fredette 968: vaddr_t va;
1.1 fredette 969: {
970: int sme, segnum;
971: pmeg_t pmegp;
972:
973: CHECK_SPL();
974:
975: #ifdef DIAGNOSTIC
976: if (pmap == kernel_pmap)
977: panic("pmeg_cache: kernel_pmap");
978: if (va & SEGOFSET) {
979: panic("pmap:pmeg_cache: va=0x%lx", va);
980: }
981: #endif
982:
983: if (pmap->pm_segmap == NULL)
984: return PMEG_NULL;
985:
986: segnum = VA_SEGNUM(va);
987: if (segnum > NUSEG) /* out of range */
988: return PMEG_NULL;
989:
990: sme = pmap->pm_segmap[segnum];
991: if (sme == SEGINV) /* nothing cached */
992: return PMEG_NULL;
993:
994: pmegp = pmeg_p(sme);
995:
996: #ifdef PMAP_DEBUG
997: if (pmegp->pmeg_index == pmap_db_watchpmeg) {
998: db_printf("pmeg_cache: watch pmeg 0x%x\n", pmegp->pmeg_index);
999: Debugger();
1000: }
1001: #endif
1002:
1003: /*
1004: * Our segmap named a PMEG. If it is no longer ours,
1005: * invalidate that entry in our segmap and return NULL.
1006: */
1007: if ((pmegp->pmeg_owner != pmap) ||
1008: (pmegp->pmeg_version != pmap->pm_version) ||
1009: (pmegp->pmeg_va != va))
1010: {
1011: #ifdef PMAP_DEBUG
1012: db_printf("pmap:pmeg_cache: invalid pmeg: sme=0x%x\n", sme);
1013: pmeg_print(pmegp);
1014: Debugger();
1015: #endif
1016: pmap->pm_segmap[segnum] = SEGINV;
1017: return PMEG_NULL; /* cache lookup failed */
1018: }
1019:
1020: #ifdef DIAGNOSTIC
1021: /* Make sure it is on the inactive queue. */
1022: if (pmegp->pmeg_qstate != PMEGQ_INACTIVE)
1023: panic("pmeg_cache: pmeg was taken: %p", pmegp);
1024: #endif
1025:
1026: TAILQ_REMOVE(&pmeg_inactive_queue, pmegp, pmeg_link);
1027: pmegp->pmeg_qstate = PMEGQ_ACTIVE;
1028: TAILQ_INSERT_TAIL(&pmeg_active_queue, pmegp, pmeg_link);
1029:
1030: return pmegp;
1031: }
1032:
1033: #ifdef PMAP_DEBUG
1034: static void
1035: pmeg_verify_empty(va)
1.16 fredette 1036: vaddr_t va;
1.1 fredette 1037: {
1.16 fredette 1038: vaddr_t eva;
1.1 fredette 1039: int pte;
1040:
1.20 thorpej 1041: for (eva = va + NBSG; va < eva; va += PAGE_SIZE) {
1.1 fredette 1042: pte = get_pte(va);
1043: if (pte & PG_VALID)
1044: panic("pmeg_verify_empty");
1045: }
1046: }
1047: #endif /* PMAP_DEBUG */
1048:
1049:
1050: /****************************************************************
1051: * Physical-to-virutal lookup support
1052: *
1053: * Need memory for the pv_alloc/pv_free list heads
1054: * and elements. We know how many to allocate since
1055: * there is one list head for each physical page, and
1056: * at most one element for each PMEG slot.
1057: */
1058: static void
1059: pv_init()
1060: {
1061: int npp, nvp, sz;
1062: pv_entry_t pv;
1063: char *p;
1064:
1065: /* total allocation size */
1066: sz = 0;
1067:
1068: /*
1069: * Data for each physical page.
1070: * Each "mod/ref" flag is a char.
1071: * Each PV head is a pointer.
1072: * Note physmem is in pages.
1073: */
1074: npp = ALIGN(physmem);
1075: sz += (npp * sizeof(*pv_flags_tbl));
1076: sz += (npp * sizeof(*pv_head_tbl));
1077:
1078: /*
1079: * Data for each virtual page (all PMEGs).
1080: * One pv_entry for each page frame.
1081: */
1082: nvp = NPMEG * NPAGSEG;
1083: sz += (nvp * sizeof(*pv_free_list));
1084:
1085: /* Now allocate the whole thing. */
1086: sz = m68k_round_page(sz);
1.16 fredette 1087: p = (char *)uvm_km_alloc(kernel_map, sz);
1.1 fredette 1088: if (p == NULL)
1089: panic("pmap:pv_init: alloc failed");
1.16 fredette 1090: memset(p, 0, sz);
1.1 fredette 1091:
1092: /* Now divide up the space. */
1093: pv_flags_tbl = (void *) p;
1094: p += (npp * sizeof(*pv_flags_tbl));
1095: pv_head_tbl = (void*) p;
1096: p += (npp * sizeof(*pv_head_tbl));
1.16 fredette 1097: pv_free_list = (void *)p;
1.1 fredette 1098: p += (nvp * sizeof(*pv_free_list));
1099:
1100: /* Finally, make pv_free_list into a list. */
1.16 fredette 1101: for (pv = pv_free_list; (char *)pv < p; pv++)
1.1 fredette 1102: pv->pv_next = &pv[1];
1103: pv[-1].pv_next = 0;
1104:
1105: pv_initialized++;
1106: }
1107:
1108: /*
1109: * Set or clear bits in all PTEs mapping a page.
1110: * Also does syncflags work while we are there...
1111: */
1112: static void
1113: pv_changepte(pa, set_bits, clear_bits)
1.16 fredette 1114: paddr_t pa;
1.1 fredette 1115: int set_bits;
1116: int clear_bits;
1117: {
1118: pv_entry_t *head, pv;
1119: u_char *pv_flags;
1120: pmap_t pmap;
1.16 fredette 1121: vaddr_t va;
1.1 fredette 1122: int pte, sme;
1123: int saved_ctx;
1124: boolean_t in_ctx;
1125: u_int flags;
1126:
1127: pv_flags = pa_to_pvflags(pa);
1128: head = pa_to_pvhead(pa);
1129:
1130: /* If no mappings, no work to do. */
1131: if (*head == NULL)
1132: return;
1133:
1134: #ifdef DIAGNOSTIC
1135: /* This function should only clear these bits: */
1136: if (clear_bits & ~(PG_WRITE | PG_NC | PG_REF | PG_MOD))
1.18 provos 1137: panic("pv_changepte: clear=0x%x", clear_bits);
1.1 fredette 1138: #endif
1139:
1140: flags = 0;
1141: saved_ctx = get_context();
1142: for (pv = *head; pv != NULL; pv = pv->pv_next) {
1143: pmap = pv->pv_pmap;
1144: va = pv->pv_va;
1145:
1146: #ifdef DIAGNOSTIC
1147: if (pmap->pm_segmap == NULL)
1148: panic("pv_changepte: null segmap");
1149: #endif
1150:
1.12 wiz 1151: /* Is the PTE currently accessible in some context? */
1.1 fredette 1152: in_ctx = FALSE;
1153: sme = SEGINV; /* kill warning */
1154: if (pmap == kernel_pmap) {
1155: set_context(KERNEL_CONTEXT);
1156: in_ctx = TRUE;
1157: }
1158: else if (has_context(pmap)) {
1159: /* PMEG may be inactive. */
1160: set_context(pmap->pm_ctxnum);
1161: sme = get_segmap(va);
1162: if (sme != SEGINV)
1163: in_ctx = TRUE;
1164: }
1165:
1166: if (in_ctx == TRUE) {
1167: /*
1168: * The PTE is in the current context.
1169: * Make sure PTE is up-to-date with VAC.
1170: */
1171: #ifdef HAVECACHE
1172: if (cache_size)
1173: cache_flush_page(va);
1174: #endif
1175: pte = get_pte(va);
1176: } else {
1.16 fredette 1177:
1.1 fredette 1178: /*
1179: * The PTE is not in any context.
1180: */
1.16 fredette 1181:
1.1 fredette 1182: sme = pmap->pm_segmap[VA_SEGNUM(va)];
1.16 fredette 1183: #ifdef DIAGNOSTIC
1.1 fredette 1184: if (sme == SEGINV)
1185: panic("pv_changepte: SEGINV");
1.16 fredette 1186: #endif
1.1 fredette 1187: pte = get_pte_pmeg(sme, VA_PTE_NUM(va));
1188: }
1189:
1190: #ifdef DIAGNOSTIC
1191: /* PV entries point only to valid mappings. */
1192: if ((pte & PG_VALID) == 0)
1.18 provos 1193: panic("pv_changepte: not PG_VALID at va=0x%lx", va);
1.1 fredette 1194: #endif
1195: /* Get these while it's easy. */
1196: if (pte & PG_MODREF) {
1197: flags |= (pte & PG_MODREF);
1198: pte &= ~PG_MODREF;
1199: }
1200:
1201: /* Finally, set and clear some bits. */
1202: pte |= set_bits;
1203: pte &= ~clear_bits;
1204:
1205: if (in_ctx == TRUE) {
1206: /* Did cache flush above. */
1207: set_pte(va, pte);
1208: } else {
1209: set_pte_pmeg(sme, VA_PTE_NUM(va), pte);
1210: }
1211: }
1212: set_context(saved_ctx);
1213:
1214: *pv_flags |= (flags >> PV_SHIFT);
1215: }
1216:
1217: /*
1218: * Return ref and mod bits from pvlist,
1219: * and turns off same in hardware PTEs.
1220: */
1221: static u_int
1222: pv_syncflags(pv)
1223: pv_entry_t pv;
1224: {
1225: pmap_t pmap;
1.16 fredette 1226: vaddr_t va;
1.1 fredette 1227: int pte, sme;
1228: int saved_ctx;
1229: boolean_t in_ctx;
1230: u_int flags;
1231:
1232: /* If no mappings, no work to do. */
1233: if (pv == NULL)
1234: return (0);
1235:
1236: flags = 0;
1237: saved_ctx = get_context();
1.16 fredette 1238: for (; pv != NULL; pv = pv->pv_next) {
1.1 fredette 1239: pmap = pv->pv_pmap;
1240: va = pv->pv_va;
1.16 fredette 1241: sme = SEGINV;
1.1 fredette 1242:
1243: #ifdef DIAGNOSTIC
1244: /*
1245: * Only the head may have a null pmap, and
1246: * we checked for that above.
1247: */
1248: if (pmap->pm_segmap == NULL)
1249: panic("pv_syncflags: null segmap");
1250: #endif
1251:
1.12 wiz 1252: /* Is the PTE currently accessible in some context? */
1.1 fredette 1253: in_ctx = FALSE;
1254: if (pmap == kernel_pmap) {
1255: set_context(KERNEL_CONTEXT);
1256: in_ctx = TRUE;
1257: }
1258: else if (has_context(pmap)) {
1259: /* PMEG may be inactive. */
1260: set_context(pmap->pm_ctxnum);
1261: sme = get_segmap(va);
1262: if (sme != SEGINV)
1263: in_ctx = TRUE;
1264: }
1265:
1266: if (in_ctx == TRUE) {
1.16 fredette 1267:
1.1 fredette 1268: /*
1269: * The PTE is in the current context.
1270: * Make sure PTE is up-to-date with VAC.
1271: */
1.16 fredette 1272:
1.1 fredette 1273: #ifdef HAVECACHE
1274: if (cache_size)
1275: cache_flush_page(va);
1276: #endif
1277: pte = get_pte(va);
1278: } else {
1.16 fredette 1279:
1.1 fredette 1280: /*
1281: * The PTE is not in any context.
1282: */
1.16 fredette 1283:
1.1 fredette 1284: sme = pmap->pm_segmap[VA_SEGNUM(va)];
1.16 fredette 1285: #ifdef DIAGNOSTIC
1.1 fredette 1286: if (sme == SEGINV)
1287: panic("pv_syncflags: SEGINV");
1.16 fredette 1288: #endif
1.1 fredette 1289: pte = get_pte_pmeg(sme, VA_PTE_NUM(va));
1290: }
1291:
1292: #ifdef DIAGNOSTIC
1293: /* PV entries point only to valid mappings. */
1294: if ((pte & PG_VALID) == 0)
1.18 provos 1295: panic("pv_syncflags: not PG_VALID at va=0x%lx", va);
1.1 fredette 1296: #endif
1297: /* OK, do what we came here for... */
1298: if (pte & PG_MODREF) {
1299: flags |= (pte & PG_MODREF);
1300: pte &= ~PG_MODREF;
1301: }
1302:
1303: if (in_ctx == TRUE) {
1304: /* Did cache flush above. */
1305: set_pte(va, pte);
1306: } else {
1307: set_pte_pmeg(sme, VA_PTE_NUM(va), pte);
1308: }
1309: }
1310: set_context(saved_ctx);
1311:
1312: return (flags >> PV_SHIFT);
1313: }
1314:
1315: /* Remove all mappings for the physical page. */
1316: static void
1317: pv_remove_all(pa)
1.16 fredette 1318: paddr_t pa;
1.1 fredette 1319: {
1320: pv_entry_t *head, pv;
1321: pmap_t pmap;
1.16 fredette 1322: vaddr_t va;
1.1 fredette 1323:
1324: CHECK_SPL();
1325:
1326: #ifdef PMAP_DEBUG
1327: if (pmap_debug & PMD_REMOVE)
1328: printf("pv_remove_all(0x%lx)\n", pa);
1329: #endif
1330:
1331: head = pa_to_pvhead(pa);
1332: while ((pv = *head) != NULL) {
1333: pmap = pv->pv_pmap;
1334: va = pv->pv_va;
1.20 thorpej 1335: pmap_remove1(pmap, va, va + PAGE_SIZE);
1.1 fredette 1336: #ifdef PMAP_DEBUG
1337: /* Make sure it went away. */
1338: if (pv == *head) {
1339: db_printf("pv_remove_all: head unchanged for pa=0x%lx\n", pa);
1340: Debugger();
1341: }
1342: #endif
1343: }
1344: }
1345:
1346: /*
1347: * The pmap system is asked to lookup all mappings that point to a
1348: * given physical memory address. This function adds a new element
1349: * to the list of mappings maintained for the given physical address.
1350: * Returns PV_NC if the (new) pvlist says that the address cannot
1351: * be cached.
1352: */
1353: static int
1354: pv_link(pmap, pte, va)
1355: pmap_t pmap;
1356: int pte;
1.16 fredette 1357: vaddr_t va;
1.1 fredette 1358: {
1.16 fredette 1359: paddr_t pa;
1.1 fredette 1360: pv_entry_t *head, pv;
1361: u_char *pv_flags;
1362: int flags;
1363:
1364: if (!pv_initialized)
1365: return 0;
1366:
1367: CHECK_SPL();
1368:
1369: /* Only the non-cached bit is of interest here. */
1370: flags = (pte & PG_NC) ? PV_NC : 0;
1371: pa = PG_PA(pte);
1372:
1373: #ifdef PMAP_DEBUG
1374: if ((pmap_debug & PMD_LINK) || (va == pmap_db_watchva)) {
1375: printf("pv_link(%p, 0x%x, 0x%lx)\n", pmap, pte, va);
1376: /* pv_print(pa); */
1377: }
1378: #endif
1379:
1380: pv_flags = pa_to_pvflags(pa);
1381: head = pa_to_pvhead(pa);
1382:
1383: #ifdef DIAGNOSTIC
1384: /* See if this mapping is already in the list. */
1385: for (pv = *head; pv != NULL; pv = pv->pv_next) {
1386: if ((pv->pv_pmap == pmap) && (pv->pv_va == va))
1387: panic("pv_link: duplicate entry for PA=0x%lx", pa);
1388: }
1389: #endif
1.16 fredette 1390: #ifdef HAVECACHE
1.1 fredette 1391:
1392: /*
1393: * Does this new mapping cause VAC alias problems?
1394: */
1.16 fredette 1395:
1.1 fredette 1396: *pv_flags |= flags;
1397: if ((*pv_flags & PV_NC) == 0) {
1398: for (pv = *head; pv != NULL; pv = pv->pv_next) {
1399: if (BADALIAS(va, pv->pv_va)) {
1400: *pv_flags |= PV_NC;
1401: pv_changepte(pa, PG_NC, 0);
1402: pmap_stats.ps_vac_uncached++;
1403: break;
1404: }
1405: }
1406: }
1.16 fredette 1407: #endif
1.1 fredette 1408:
1409: /* Allocate a PV element (pv_alloc()). */
1410: pv = pv_free_list;
1411: if (pv == NULL)
1412: panic("pv_link: pv_alloc");
1413: pv_free_list = pv->pv_next;
1414: pv->pv_next = 0;
1415:
1416: /* Insert new entry at the head. */
1417: pv->pv_pmap = pmap;
1418: pv->pv_va = va;
1419: pv->pv_next = *head;
1420: *head = pv;
1421:
1422: return (*pv_flags & PV_NC);
1423: }
1424:
1425: /*
1426: * pv_unlink is a helper function for pmap_remove.
1427: * It removes the appropriate (pmap, pa, va) entry.
1428: *
1429: * Once the entry is removed, if the pv_table head has the cache
1430: * inhibit bit set, see if we can turn that off; if so, walk the
1431: * pvlist and turn off PG_NC in each PTE. (The pvlist is by
1432: * definition nonempty, since it must have at least two elements
1433: * in it to have PV_NC set, and we only remove one here.)
1434: */
1435: static void
1436: pv_unlink(pmap, pte, va)
1437: pmap_t pmap;
1438: int pte;
1.16 fredette 1439: vaddr_t va;
1.1 fredette 1440: {
1.16 fredette 1441: paddr_t pa;
1.1 fredette 1442: pv_entry_t *head, *ppv, pv;
1443: u_char *pv_flags;
1444:
1445: CHECK_SPL();
1446:
1447: pa = PG_PA(pte);
1448: #ifdef PMAP_DEBUG
1449: if ((pmap_debug & PMD_LINK) || (va == pmap_db_watchva)) {
1450: printf("pv_unlink(%p, 0x%x, 0x%lx)\n", pmap, pte, va);
1451: /* pv_print(pa); */
1452: }
1453: #endif
1454:
1455: pv_flags = pa_to_pvflags(pa);
1456: head = pa_to_pvhead(pa);
1457:
1458: /*
1459: * Find the entry.
1460: */
1461: ppv = head;
1462: pv = *ppv;
1463: while (pv) {
1464: if ((pv->pv_pmap == pmap) && (pv->pv_va == va))
1465: goto found;
1466: ppv = &pv->pv_next;
1467: pv = pv->pv_next;
1468: }
1469: #ifdef PMAP_DEBUG
1470: db_printf("pv_unlink: not found (pa=0x%lx,va=0x%lx)\n", pa, va);
1471: Debugger();
1472: #endif
1473: return;
1474:
1475: found:
1476: /* Unlink this entry from the list and clear it. */
1477: *ppv = pv->pv_next;
1478: pv->pv_pmap = NULL;
1479: pv->pv_va = 0;
1480:
1481: /* Insert it on the head of the free list. (pv_free()) */
1482: pv->pv_next = pv_free_list;
1483: pv_free_list = pv;
1484: pv = NULL;
1485:
1486: /* Do any non-cached mappings remain? */
1487: if ((*pv_flags & PV_NC) == 0)
1488: return;
1489: if ((pv = *head) == NULL)
1490: return;
1491:
1492: /*
1493: * Have non-cached mappings. See if we can fix that now.
1494: */
1495: va = pv->pv_va;
1496: for (pv = pv->pv_next; pv != NULL; pv = pv->pv_next) {
1497: /* If there is a DVMA mapping, leave it NC. */
1498: if (va >= DVMA_MAP_BASE)
1499: return;
1500: /* If there are VAC alias problems, leave NC. */
1501: if (BADALIAS(va, pv->pv_va))
1502: return;
1503: }
1504: /* OK, there are no "problem" mappings. */
1505: *pv_flags &= ~PV_NC;
1506: pv_changepte(pa, 0, PG_NC);
1507: pmap_stats.ps_vac_recached++;
1508: }
1509:
1510:
1511: /****************************************************************
1512: * Bootstrap and Initialization, etc.
1513: */
1514:
1515: void
1516: pmap_common_init(pmap)
1517: pmap_t pmap;
1518: {
1.16 fredette 1519: memset(pmap, 0, sizeof(struct pmap));
1520: pmap->pm_refcount = 1;
1.1 fredette 1521: pmap->pm_version = pmap_version++;
1522: pmap->pm_ctxnum = EMPTY_CONTEXT;
1523: simple_lock_init(&pmap->pm_lock);
1524: }
1525:
1526: /*
1527: * Prepare the kernel for VM operations.
1528: * This is called by locore2.c:_vm_init()
1529: * after the "start/end" globals are set.
1530: * This function must NOT leave context zero.
1531: */
1532: void
1533: pmap_bootstrap(nextva)
1.16 fredette 1534: vaddr_t nextva;
1.1 fredette 1535: {
1.16 fredette 1536: vaddr_t va, eva;
1.1 fredette 1537: int i, pte, sme;
1538: extern char etext[];
1539:
1540: nextva = m68k_round_page(nextva);
1541:
1542: /* Steal some special-purpose, already mapped pages? */
1543:
1544: /*
1545: * Determine the range of kernel virtual space available.
1546: * It is segment-aligned to simplify PMEG management.
1547: */
1548: virtual_avail = m68k_round_seg(nextva);
1549: virtual_end = VM_MAX_KERNEL_ADDRESS;
1550:
1551: /*
1552: * Determine the range of physical memory available.
1553: */
1.9 fredette 1554: avail_start = nextva;
1.3 fredette 1555: avail_end = prom_memsize();
1.1 fredette 1556: avail_end = m68k_trunc_page(avail_end);
1557:
1558: /*
1559: * Report the actual amount of physical memory,
1560: * even though the PROM takes a few pages.
1561: */
1562: physmem = (btoc(avail_end) + 0xF) & ~0xF;
1563:
1564: /*
1565: * Done allocating PAGES of virtual space, so
1566: * clean out the rest of the last used segment.
1567: */
1.20 thorpej 1568: for (va = nextva; va < virtual_avail; va += PAGE_SIZE)
1.1 fredette 1569: set_pte(va, PG_INVAL);
1570:
1571: /*
1572: * Now that we are done stealing physical pages, etc.
1573: * figure out which PMEGs are used by those mappings
1574: * and either reserve them or clear them out.
1575: * -- but first, init PMEG management.
1576: * This puts all PMEGs in the free list.
1577: * We will allocte the in-use ones.
1578: */
1579: pmeg_init();
1580:
1581: /*
1582: * Reserve PMEGS for kernel text/data/bss
1583: * and the misc pages taken above.
1584: * VA range: [KERNBASE .. virtual_avail]
1585: */
1.9 fredette 1586: for (va = KERNBASE; va < virtual_avail; va += NBSG) {
1.1 fredette 1587: sme = get_segmap(va);
1588: if (sme == SEGINV) {
1.3 fredette 1589: prom_printf("kernel text/data/bss not mapped\n");
1590: prom_abort();
1.1 fredette 1591: }
1592: pmeg_reserve(sme);
1593: }
1594:
1595: /*
1596: * Unmap kernel virtual space. Make sure to leave no valid
1597: * segmap entries in the MMU unless pmeg_array records them.
1598: * VA range: [vseg_avail .. virtual_end]
1599: */
1600: for ( ; va < virtual_end; va += NBSG)
1601: set_segmap(va, SEGINV);
1602:
1603: /*
1604: * Reserve PMEGs used by the PROM monitor (device mappings).
1605: * Free up any pmegs in this range which have no mappings.
1606: * VA range: [0x00E00000 .. 0x00F00000]
1607: */
1608: pmeg_mon_init(SUN2_MONSTART, SUN2_MONEND, TRUE);
1609:
1610: /*
1611: * Unmap any pmegs left in DVMA space by the PROM.
1612: * DO NOT kill the last one! (owned by the PROM!)
1613: * VA range: [0x00F00000 .. 0x00FE0000]
1614: */
1615: pmeg_mon_init(SUN2_MONEND, SUN2_MONEND + DVMA_MAP_SIZE, FALSE);
1616:
1617: /*
1618: * Done reserving PMEGs and/or clearing out mappings.
1619: *
1620: * Now verify the mapping protections and such for the
1621: * important parts of the address space (in VA order).
1622: * Note that the Sun PROM usually leaves the memory
1623: * mapped with everything non-cached...
1624: */
1625:
1626: /*
1.9 fredette 1627: * On a Sun2, the boot loader loads the kernel exactly where
1628: * it is linked, at physical/virtual 0x6000 (KERNBASE). This
1629: * means there are twelve physical/virtual pages before the
1630: * kernel text begins.
1631: */
1632: va = 0;
1633:
1634: /*
1635: * Physical/virtual pages zero through three are used by the
1.13 fredette 1636: * PROM. prom_init has already saved the PTEs, but we don't
1637: * want to unmap the pages until we've installed our own
1638: * vector table - just in case something happens before then
1639: * and we drop into the PROM.
1.1 fredette 1640: */
1.20 thorpej 1641: eva = va + PAGE_SIZE * 4;
1.13 fredette 1642: va = eva;
1.1 fredette 1643:
1644: /*
1.9 fredette 1645: * We use pages four through seven for the msgbuf.
1.1 fredette 1646: */
1.20 thorpej 1647: eva = va + PAGE_SIZE * 4;
1648: for(; va < eva; va += PAGE_SIZE) {
1.9 fredette 1649: pte = get_pte(va);
1650: pte |= (PG_SYSTEM | PG_WRITE | PG_NC);
1651: set_pte(va, pte);
1652: }
1.1 fredette 1653: /* Initialize msgbufaddr later, in machdep.c */
1654:
1655: /*
1656: * On the Sun3, two of the three dead pages in SUN3_MONSHORTSEG
1657: * are used for tmp_vpages. The Sun2 doesn't have this
1.9 fredette 1658: * short-segment concept, so we reserve virtual pages eight
1659: * and nine for this.
1.1 fredette 1660: */
1661: set_pte(va, PG_INVAL);
1.20 thorpej 1662: va += PAGE_SIZE;
1.1 fredette 1663: set_pte(va, PG_INVAL);
1.20 thorpej 1664: va += PAGE_SIZE;
1.1 fredette 1665:
1666: /*
1.9 fredette 1667: * Pages ten and eleven remain for the temporary kernel stack,
1668: * which is set up by locore.s. Hopefully this is enough space.
1.1 fredette 1669: */
1.20 thorpej 1670: eva = va + PAGE_SIZE * 2;
1671: for(; va < eva ; va += PAGE_SIZE) {
1.9 fredette 1672: pte = get_pte(va);
1673: pte &= ~(PG_NC);
1674: pte |= (PG_SYSTEM | PG_WRITE);
1675: set_pte(va, pte);
1676: }
1.1 fredette 1677:
1678: /*
1679: * Next is the kernel text.
1680: *
1681: * Verify protection bits on kernel text/data/bss
1682: * All of kernel text, data, and bss are cached.
1683: * Text is read-only (except in db_write_ktext).
1684: */
1685: eva = m68k_trunc_page(etext);
1686: while (va < eva) {
1687: pte = get_pte(va);
1688: if ((pte & (PG_VALID|PG_TYPE)) != PG_VALID) {
1.3 fredette 1689: prom_printf("invalid page at 0x%x\n", va);
1.1 fredette 1690: }
1691: pte &= ~(PG_WRITE|PG_NC);
1692: /* Kernel text is read-only */
1693: pte |= (PG_SYSTEM);
1694: set_pte(va, pte);
1.20 thorpej 1695: va += PAGE_SIZE;
1.1 fredette 1696: }
1697: /* data, bss, etc. */
1698: while (va < nextva) {
1699: pte = get_pte(va);
1700: if ((pte & (PG_VALID|PG_TYPE)) != PG_VALID) {
1.3 fredette 1701: prom_printf("invalid page at 0x%x\n", va);
1.1 fredette 1702: }
1703: pte &= ~(PG_NC);
1704: pte |= (PG_SYSTEM | PG_WRITE);
1705: set_pte(va, pte);
1.20 thorpej 1706: va += PAGE_SIZE;
1.1 fredette 1707: }
1708:
1709: /*
1710: * Initialize all of the other contexts.
1711: */
1712: #ifdef DIAGNOSTIC
1713: /* Near the beginning of locore.s we set context zero. */
1714: if (get_context() != 0) {
1.3 fredette 1715: prom_printf("pmap_bootstrap: not in context zero?\n");
1716: prom_abort();
1.1 fredette 1717: }
1718: #endif /* DIAGNOSTIC */
1.16 fredette 1719: for (va = 0; va < (vaddr_t) (NBSG * NSEGMAP); va += NBSG) {
1.1 fredette 1720: for (i = 1; i < NCONTEXT; i++) {
1721: set_context(i);
1722: set_segmap(va, SEGINV);
1723: }
1724: }
1725: set_context(KERNEL_CONTEXT);
1726:
1727: /*
1728: * Reserve a segment for the kernel to use to access a pmeg
1729: * that is not currently mapped into any context/segmap.
1730: * The kernel temporarily maps such a pmeg into this segment.
1731: */
1732: temp_seg_va = virtual_avail;
1733: virtual_avail += NBSG;
1734: #ifdef DIAGNOSTIC
1735: if (temp_seg_va & SEGOFSET) {
1.3 fredette 1736: prom_printf("pmap_bootstrap: temp_seg_va\n");
1737: prom_abort();
1.1 fredette 1738: }
1739: #endif
1740:
1741: /* Initialization for pmap_next_page() */
1742: avail_next = avail_start;
1743:
1.2 fredette 1744: uvmexp.pagesize = NBPG;
1.1 fredette 1745: uvm_setpagesize();
1746:
1747: /* after setting up some structures */
1748:
1749: pmap_common_init(kernel_pmap);
1750: pmap_kernel_init(kernel_pmap);
1751:
1752: context_init();
1753:
1754: pmeg_clean_free();
1755:
1756: pmap_page_upload();
1757: }
1758:
1759: /*
1760: * Give the kernel pmap a segmap, just so there are not
1761: * so many special cases required. Maybe faster too,
1762: * because this lets pmap_remove() and pmap_protect()
1763: * use a S/W copy of the segmap to avoid function calls.
1764: */
1765: void
1766: pmap_kernel_init(pmap)
1767: pmap_t pmap;
1768: {
1.16 fredette 1769: vaddr_t va;
1.1 fredette 1770: int i, sme;
1771:
1772: for (i=0, va=0; i < NSEGMAP; i++, va+=NBSG) {
1773: sme = get_segmap(va);
1774: kernel_segmap[i] = sme;
1775: }
1776: pmap->pm_segmap = kernel_segmap;
1777: }
1778:
1779:
1780: /****************************************************************
1781: * PMAP interface functions.
1782: */
1783:
1784: /*
1785: * Support functions for vm_page_bootstrap().
1786: */
1.22 thorpej 1787:
1788: /*
1789: * How much virtual space does this kernel have?
1790: * (After mapping kernel text, data, etc.)
1791: */
1792: void
1793: pmap_virtual_space(v_start, v_end)
1794: vaddr_t *v_start;
1795: vaddr_t *v_end;
1796: {
1797: *v_start = virtual_avail;
1798: *v_end = virtual_end;
1799: }
1.1 fredette 1800:
1801: /* Provide memory to the VM system. */
1802: static void
1803: pmap_page_upload()
1804: {
1805: int a, b, c, d;
1806:
1807: if (hole_size) {
1808: /*
1809: * Supply the memory in two segments so the
1810: * reserved memory (3/50 video ram at 1MB)
1811: * can be carved from the front of the 2nd.
1812: */
1813: a = atop(avail_start);
1814: b = atop(hole_start);
1815: uvm_page_physload(a, b, a, b, VM_FREELIST_DEFAULT);
1816: c = atop(hole_start + hole_size);
1817: d = atop(avail_end);
1818: uvm_page_physload(b, d, c, d, VM_FREELIST_DEFAULT);
1819: } else {
1820: a = atop(avail_start);
1821: d = atop(avail_end);
1822: uvm_page_physload(a, d, a, d, VM_FREELIST_DEFAULT);
1823: }
1824: }
1825:
1826: /*
1827: * Initialize the pmap module.
1828: * Called by vm_init, to initialize any structures that the pmap
1829: * system needs to map virtual memory.
1830: */
1831: void
1832: pmap_init()
1833: {
1.16 fredette 1834: pv_init();
1.1 fredette 1835:
1.2 fredette 1836: /* Initialize the pmap pool. */
1837: pool_init(&pmap_pmap_pool, sizeof(struct pmap), 0, 0, 0, "pmappl",
1.17 thorpej 1838: &pool_allocator_nointr);
1.1 fredette 1839: }
1840:
1841: /*
1842: * Map a range of kernel virtual address space.
1843: * This might be used for device mappings, or to
1844: * record the mapping for kernel text/data/bss.
1845: * Return VA following the mapped range.
1846: */
1.16 fredette 1847: vaddr_t
1.1 fredette 1848: pmap_map(va, pa, endpa, prot)
1.16 fredette 1849: vaddr_t va;
1850: paddr_t pa;
1851: paddr_t endpa;
1.1 fredette 1852: int prot;
1853: {
1854: int sz;
1855:
1856: sz = endpa - pa;
1857: do {
1858: pmap_enter(kernel_pmap, va, pa, prot, 0);
1.20 thorpej 1859: va += PAGE_SIZE;
1860: pa += PAGE_SIZE;
1861: sz -= PAGE_SIZE;
1.1 fredette 1862: } while (sz > 0);
1.14 chris 1863: pmap_update(kernel_pmap);
1.1 fredette 1864: return(va);
1865: }
1866:
1867: void
1868: pmap_user_init(pmap)
1869: pmap_t pmap;
1870: {
1871: int i;
1872: pmap->pm_segmap = malloc(sizeof(char)*NUSEG, M_VMPMAP, M_WAITOK);
1.16 fredette 1873: for (i = 0; i < NUSEG; i++) {
1.1 fredette 1874: pmap->pm_segmap[i] = SEGINV;
1875: }
1876: }
1877:
1878: /*
1879: * Create and return a physical map.
1880: *
1881: * If the size specified for the map
1882: * is zero, the map is an actual physical
1883: * map, and may be referenced by the
1884: * hardware.
1885: *
1886: * If the size specified is non-zero,
1887: * the map will be used in software only, and
1888: * is bounded by that size.
1889: */
1890: pmap_t
1891: pmap_create()
1892: {
1893: pmap_t pmap;
1894:
1.2 fredette 1895: pmap = pool_get(&pmap_pmap_pool, PR_WAITOK);
1.1 fredette 1896: pmap_pinit(pmap);
1897: return pmap;
1898: }
1899:
1900: /*
1901: * Release any resources held by the given physical map.
1902: * Called when a pmap initialized by pmap_pinit is being released.
1903: * Should only be called if the map contains no valid mappings.
1904: */
1905: void
1906: pmap_release(pmap)
1907: struct pmap *pmap;
1908: {
1909: int s;
1910:
1.2 fredette 1911: s = splvm();
1.1 fredette 1912:
1913: if (pmap == kernel_pmap)
1914: panic("pmap_release: kernel_pmap!");
1915:
1916: if (has_context(pmap)) {
1917: #ifdef PMAP_DEBUG
1918: if (pmap_debug & PMD_CONTEXT)
1919: printf("pmap_release(%p): free ctx %d\n",
1920: pmap, pmap->pm_ctxnum);
1921: #endif
1922: context_free(pmap);
1923: }
1924: free(pmap->pm_segmap, M_VMPMAP);
1925: pmap->pm_segmap = NULL;
1926:
1927: splx(s);
1928: }
1929:
1930:
1931: /*
1932: * Retire the given physical map from service.
1933: * Should only be called if the map contains
1934: * no valid mappings.
1935: */
1936: void
1937: pmap_destroy(pmap)
1938: pmap_t pmap;
1939: {
1940: int count;
1941:
1942: #ifdef PMAP_DEBUG
1943: if (pmap_debug & PMD_CREATE)
1944: printf("pmap_destroy(%p)\n", pmap);
1945: #endif
1946: if (pmap == kernel_pmap)
1947: panic("pmap_destroy: kernel_pmap!");
1948: pmap_lock(pmap);
1949: count = pmap_del_ref(pmap);
1950: pmap_unlock(pmap);
1951: if (count == 0) {
1952: pmap_release(pmap);
1.2 fredette 1953: pool_put(&pmap_pmap_pool, pmap);
1.1 fredette 1954: }
1955: }
1956:
1957: /*
1958: * Add a reference to the specified pmap.
1959: */
1960: void
1961: pmap_reference(pmap)
1962: pmap_t pmap;
1963: {
1.16 fredette 1964: pmap_lock(pmap);
1965: pmap_add_ref(pmap);
1966: pmap_unlock(pmap);
1.1 fredette 1967: }
1968:
1969:
1970: /*
1971: * Insert the given physical page (p) at
1972: * the specified virtual address (v) in the
1973: * target physical map with the protection requested.
1974: *
1975: * The physical address is page aligned, but may have some
1976: * low bits set indicating an OBIO or VME bus page, or just
1977: * that the non-cache bit should be set (i.e PMAP_NC).
1978: *
1979: * If specified, the page will be wired down, meaning
1980: * that the related pte can not be reclaimed.
1981: *
1982: * NB: This is the only routine which MAY NOT lazy-evaluate
1983: * or lose information. That is, this routine must actually
1984: * insert this page into the given map NOW.
1985: */
1986: int
1987: pmap_enter(pmap, va, pa, prot, flags)
1988: pmap_t pmap;
1.16 fredette 1989: vaddr_t va;
1990: paddr_t pa;
1.1 fredette 1991: vm_prot_t prot;
1992: int flags;
1993: {
1994: int new_pte, s;
1995: boolean_t wired = (flags & PMAP_WIRED) != 0;
1996:
1997: #ifdef PMAP_DEBUG
1998: if ((pmap_debug & PMD_ENTER) ||
1999: (va == pmap_db_watchva))
2000: printf("pmap_enter(%p, 0x%lx, 0x%lx, 0x%x, 0x%x)\n",
2001: pmap, va, pa, prot, wired);
2002: #endif
2003:
2004: /* Get page-type bits from low part of the PA... */
2005: new_pte = (pa & PMAP_SPEC) << PG_MOD_SHIFT;
2006:
2007: /* ...now the valid and writable bits... */
2008: new_pte |= PG_VALID;
2009: if (prot & VM_PROT_WRITE)
2010: new_pte |= PG_WRITE;
2011:
2012: /* ...and finally the page-frame number. */
2013: new_pte |= PA_PGNUM(pa);
2014:
2015: /*
2016: * treatment varies significantly:
1.3 fredette 2017: * kernel ptes are always in the mmu
1.1 fredette 2018: * user ptes may not necessarily? be in the mmu. pmap may not
2019: * be in the mmu either.
2020: *
2021: */
1.2 fredette 2022: s = splvm();
1.1 fredette 2023: if (pmap == kernel_pmap) {
2024: new_pte |= PG_SYSTEM;
2025: pmap_enter_kernel(va, new_pte, wired);
2026: } else {
2027: pmap_enter_user(pmap, va, new_pte, wired);
2028: }
2029: splx(s);
1.16 fredette 2030: return 0;
1.1 fredette 2031: }
2032:
2033: static void
2034: pmap_enter_kernel(pgva, new_pte, wired)
1.16 fredette 2035: vaddr_t pgva;
1.1 fredette 2036: int new_pte;
2037: boolean_t wired;
2038: {
2039: pmap_t pmap = kernel_pmap;
2040: pmeg_t pmegp;
2041: int do_pv, old_pte, sme;
1.16 fredette 2042: vaddr_t segva;
1.1 fredette 2043: int saved_ctx;
2044:
2045: /*
2046: need to handle possibly allocating additional pmegs
2047: need to make sure they cant be stolen from the kernel;
2048: map any new pmegs into context zero, make sure rest of pmeg is null;
2049: deal with pv_stuff; possibly caching problems;
2050: must also deal with changes too.
2051: */
2052: saved_ctx = get_context();
2053: set_context(KERNEL_CONTEXT);
2054:
2055: /*
2056: * In detail:
2057: *
2058: * (a) lock pmap
2059: * (b) Is the VA in a already mapped segment, if so
2060: * look to see if that VA address is "valid". If it is, then
2061: * action is a change to an existing pte
2062: * (c) if not mapped segment, need to allocate pmeg
2063: * (d) if adding pte entry or changing physaddr of existing one,
2064: * use pv_stuff, for change, pmap_remove() possibly.
2065: * (e) change/add pte
2066: */
2067:
2068: #ifdef DIAGNOSTIC
2069: if ((pgva < virtual_avail) || (pgva >= DVMA_MAP_END))
2070: panic("pmap_enter_kernel: bad va=0x%lx", pgva);
2071: if ((new_pte & (PG_VALID | PG_SYSTEM)) != (PG_VALID | PG_SYSTEM))
2072: panic("pmap_enter_kernel: bad pte");
2073: #endif
2074:
2075: if (pgva >= DVMA_MAP_BASE) {
2076: /* This is DVMA space. Always want it non-cached. */
2077: new_pte |= PG_NC;
2078: }
2079:
2080: segva = m68k_trunc_seg(pgva);
2081: do_pv = TRUE;
2082:
2083: /* Do we have a PMEG? */
2084: sme = get_segmap(segva);
2085: if (sme != SEGINV) {
2086: /* Found a PMEG in the segmap. Cool. */
2087: pmegp = pmeg_p(sme);
2088: #ifdef DIAGNOSTIC
2089: /* Make sure it is the right PMEG. */
2090: if (sme != pmap->pm_segmap[VA_SEGNUM(segva)])
2091: panic("pmap_enter_kernel: wrong sme at VA=0x%lx", segva);
2092: /* Make sure it is ours. */
2093: if (pmegp->pmeg_owner != pmap)
2094: panic("pmap_enter_kernel: MMU has bad pmeg 0x%x", sme);
2095: #endif
2096: } else {
2097: /* No PMEG in the segmap. Have to allocate one. */
2098: pmegp = pmeg_allocate(pmap, segva);
2099: sme = pmegp->pmeg_index;
2100: pmap->pm_segmap[VA_SEGNUM(segva)] = sme;
2101: set_segmap(segva, sme);
2102: #ifdef PMAP_DEBUG
2103: pmeg_verify_empty(segva);
2104: if (pmap_debug & PMD_SEGMAP) {
2105: printf("pmap: set_segmap pmap=%p va=0x%lx sme=0x%x (ek)\n",
2106: pmap, segva, sme);
2107: }
2108: #endif
2109: /* There are no existing mappings to deal with. */
2110: old_pte = 0;
2111: goto add_pte;
2112: }
2113:
2114: /*
2115: * We have a PMEG. Is the VA already mapped to somewhere?
2116: * (a) if so, is it same pa? (really a protection change)
2117: * (b) if not same pa, then we have to unlink from old pa
2118: */
2119: old_pte = get_pte(pgva);
2120: if ((old_pte & PG_VALID) == 0)
2121: goto add_pte;
2122:
2123: /* Have valid translation. Flush cache before changing it. */
2124: #ifdef HAVECACHE
2125: if (cache_size) {
2126: cache_flush_page(pgva);
2127: /* Get fresh mod/ref bits from write-back. */
2128: old_pte = get_pte(pgva);
2129: }
2130: #endif
2131:
2132: /* XXX - removing valid page here, way lame... -glass */
2133: pmegp->pmeg_vpages--;
2134:
2135: if (!IS_MAIN_MEM(old_pte)) {
2136: /* Was not main memory, so no pv_entry for it. */
2137: goto add_pte;
2138: }
2139:
2140: /* Old mapping was main memory. Save mod/ref bits. */
2141: save_modref_bits(old_pte);
2142:
2143: /*
2144: * If not changing the type or pfnum then re-use pv_entry.
2145: * Note we get here only with old_pte having PGT_OBMEM.
2146: */
1.16 fredette 2147: if ((old_pte & (PG_TYPE|PG_FRAME)) == (new_pte & (PG_TYPE|PG_FRAME))) {
1.1 fredette 2148: do_pv = FALSE; /* re-use pv_entry */
2149: new_pte |= (old_pte & PG_NC);
2150: goto add_pte;
2151: }
2152:
2153: /* OK, different type or PA, have to kill old pv_entry. */
2154: pv_unlink(pmap, old_pte, pgva);
2155:
2156: add_pte: /* can be destructive */
2157: pmeg_set_wiring(pmegp, pgva, wired);
2158:
2159: /* Anything but MAIN_MEM is mapped non-cached. */
2160: if (!IS_MAIN_MEM(new_pte)) {
2161: new_pte |= PG_NC;
2162: do_pv = FALSE;
2163: }
2164: if (do_pv == TRUE) {
2165: if (pv_link(pmap, new_pte, pgva) & PV_NC)
2166: new_pte |= PG_NC;
2167: }
2168: #ifdef PMAP_DEBUG
2169: if ((pmap_debug & PMD_SETPTE) || (pgva == pmap_db_watchva)) {
2170: printf("pmap: set_pte pmap=%p va=0x%lx old=0x%x new=0x%x (ek)\n",
2171: pmap, pgva, old_pte, new_pte);
2172: }
2173: #endif
2174: /* cache flush done above */
2175: set_pte(pgva, new_pte);
2176: set_context(saved_ctx);
2177: pmegp->pmeg_vpages++;
2178: }
2179:
2180:
2181: static void
2182: pmap_enter_user(pmap, pgva, new_pte, wired)
2183: pmap_t pmap;
1.16 fredette 2184: vaddr_t pgva;
1.1 fredette 2185: int new_pte;
2186: boolean_t wired;
2187: {
2188: int do_pv, old_pte, sme;
1.16 fredette 2189: vaddr_t segva;
1.1 fredette 2190: pmeg_t pmegp;
2191:
2192: #ifdef DIAGNOSTIC
2193: if (pgva >= VM_MAXUSER_ADDRESS)
2194: panic("pmap_enter_user: bad va=0x%lx", pgva);
2195: if ((new_pte & (PG_VALID | PG_SYSTEM)) != PG_VALID)
2196: panic("pmap_enter_user: bad pte");
2197: #endif
2198: #ifdef PMAP_DEBUG
2199: /*
2200: * Some user pages are wired here, and a later
2201: * call to pmap_unwire() will unwire them.
2202: * XXX - Need a separate list for wired user pmegs
2203: * so they can not be stolen from the active list.
2204: * XXX - Note: vm_fault.c assumes pmap_extract will
2205: * work on wired mappings, so must preserve them...
2206: * XXX: Maybe keep a list of wired PMEGs?
2207: */
2208: if (wired && (pmap_debug & PMD_WIRING)) {
2209: db_printf("pmap_enter_user: attempt to wire user page, ignored\n");
2210: Debugger();
2211: }
2212: #endif
2213:
2214: /* Validate this assumption. */
2215: if (pmap != current_pmap()) {
2216: #ifdef PMAP_DEBUG
2217: /* Aparently, this never happens. */
1.19 thorpej 2218: db_printf("pmap_enter_user: not curlwp\n");
1.1 fredette 2219: Debugger();
2220: #endif
2221: /* Just throw it out (fault it in later). */
2222: /* XXX: But must remember it if wired... */
2223: return;
2224: }
2225:
2226: segva = m68k_trunc_seg(pgva);
2227: do_pv = TRUE;
2228:
2229: /*
2230: * If this pmap was sharing the "empty" context,
2231: * allocate a real context for its exclusive use.
2232: */
2233: if (!has_context(pmap)) {
2234: context_allocate(pmap);
2235: #ifdef PMAP_DEBUG
2236: if (pmap_debug & PMD_CONTEXT)
2237: printf("pmap_enter(%p) got context %d\n",
2238: pmap, pmap->pm_ctxnum);
2239: #endif
2240: set_context(pmap->pm_ctxnum);
2241: } else {
2242: #ifdef PMAP_DEBUG
2243: /* Make sure context is correct. */
2244: if (pmap->pm_ctxnum != get_context()) {
2245: db_printf("pmap_enter_user: wrong context\n");
2246: Debugger();
2247: /* XXX: OK to proceed? */
2248: set_context(pmap->pm_ctxnum);
2249: }
2250: #endif
2251: }
2252:
2253: /*
2254: * We have a context. Do we have a PMEG?
2255: */
2256: sme = get_segmap(segva);
2257: if (sme != SEGINV) {
2258: /* Found a PMEG in the segmap. Cool. */
2259: pmegp = pmeg_p(sme);
2260: #ifdef DIAGNOSTIC
2261: /* Make sure it is the right PMEG. */
2262: if (sme != pmap->pm_segmap[VA_SEGNUM(segva)])
2263: panic("pmap_enter_user: wrong sme at VA=0x%lx", segva);
2264: /* Make sure it is ours. */
2265: if (pmegp->pmeg_owner != pmap)
2266: panic("pmap_enter_user: MMU has bad pmeg 0x%x", sme);
2267: #endif
2268: } else {
2269: /* Not in the segmap. Try the S/W cache. */
2270: pmegp = pmeg_cache(pmap, segva);
2271: if (pmegp) {
2272: /* Found PMEG in cache. Just reload it. */
2273: sme = pmegp->pmeg_index;
2274: set_segmap(segva, sme);
2275: } else {
2276: /* PMEG not in cache, so allocate one. */
2277: pmegp = pmeg_allocate(pmap, segva);
2278: sme = pmegp->pmeg_index;
2279: pmap->pm_segmap[VA_SEGNUM(segva)] = sme;
2280: set_segmap(segva, sme);
2281: #ifdef PMAP_DEBUG
2282: pmeg_verify_empty(segva);
2283: #endif
2284: }
2285: #ifdef PMAP_DEBUG
2286: if (pmap_debug & PMD_SEGMAP) {
2287: printf("pmap: set_segmap pmap=%p va=0x%lx sme=0x%x (eu)\n",
2288: pmap, segva, sme);
2289: }
2290: #endif
2291: }
2292:
2293: /*
2294: * We have a PMEG. Is the VA already mapped to somewhere?
2295: * (a) if so, is it same pa? (really a protection change)
2296: * (b) if not same pa, then we have to unlink from old pa
2297: */
2298: old_pte = get_pte(pgva);
2299: if ((old_pte & PG_VALID) == 0)
2300: goto add_pte;
2301:
2302: /* Have valid translation. Flush cache before changing it. */
2303: #ifdef HAVECACHE
2304: if (cache_size) {
2305: cache_flush_page(pgva);
2306: /* Get fresh mod/ref bits from write-back. */
2307: old_pte = get_pte(pgva);
2308: }
2309: #endif
2310:
2311: /* XXX - removing valid page here, way lame... -glass */
2312: pmegp->pmeg_vpages--;
2313:
2314: if (!IS_MAIN_MEM(old_pte)) {
2315: /* Was not main memory, so no pv_entry for it. */
2316: goto add_pte;
2317: }
2318:
2319: /* Old mapping was main memory. Save mod/ref bits. */
2320: save_modref_bits(old_pte);
2321:
2322: /*
2323: * If not changing the type or pfnum then re-use pv_entry.
2324: * Note we get here only with old_pte having PGT_OBMEM.
2325: */
1.16 fredette 2326: if ((old_pte & (PG_TYPE|PG_FRAME)) == (new_pte & (PG_TYPE|PG_FRAME))) {
1.1 fredette 2327: do_pv = FALSE; /* re-use pv_entry */
2328: new_pte |= (old_pte & PG_NC);
2329: goto add_pte;
2330: }
2331:
2332: /* OK, different type or PA, have to kill old pv_entry. */
2333: pv_unlink(pmap, old_pte, pgva);
2334:
2335: add_pte:
2336: /* XXX - Wiring changes on user pmaps? */
2337: /* pmeg_set_wiring(pmegp, pgva, wired); */
2338:
2339: /* Anything but MAIN_MEM is mapped non-cached. */
2340: if (!IS_MAIN_MEM(new_pte)) {
2341: new_pte |= PG_NC;
2342: do_pv = FALSE;
2343: }
2344: if (do_pv == TRUE) {
2345: if (pv_link(pmap, new_pte, pgva) & PV_NC)
2346: new_pte |= PG_NC;
2347: }
2348: #ifdef PMAP_DEBUG
2349: if ((pmap_debug & PMD_SETPTE) || (pgva == pmap_db_watchva)) {
2350: printf("pmap: set_pte pmap=%p va=0x%lx old=0x%x new=0x%x (eu)\n",
2351: pmap, pgva, old_pte, new_pte);
2352: }
2353: #endif
2354: /* cache flush done above */
2355: set_pte(pgva, new_pte);
2356: pmegp->pmeg_vpages++;
2357: }
2358:
2359: void
2360: pmap_kenter_pa(va, pa, prot)
2361: vaddr_t va;
2362: paddr_t pa;
2363: vm_prot_t prot;
2364: {
1.16 fredette 2365: int new_pte, s;
2366: pmap_t pmap = kernel_pmap;
2367: pmeg_t pmegp;
2368: int sme;
2369: vaddr_t segva;
2370: int saved_ctx;
2371:
2372: #ifdef PMAP_DEBUG
2373: if ((pmap_debug & PMD_ENTER) ||
2374: (va == pmap_db_watchva))
2375: printf("pmap_kenter_pa(0x%lx, 0x%lx, 0x%x)\n",
2376: va, pa, prot);
2377: #endif
2378:
2379: /* Get page-type bits from low part of the PA... */
2380: new_pte = (pa & PMAP_SPEC) << PG_MOD_SHIFT;
2381:
2382: /* ...now the valid and writable bits... */
2383: new_pte |= PG_SYSTEM|PG_VALID;
2384: if (prot & VM_PROT_WRITE)
2385: new_pte |= PG_WRITE;
2386:
2387: /* ...and finally the page-frame number. */
2388: new_pte |= PA_PGNUM(pa);
2389:
2390: /*
2391: * need to handle possibly allocating additional pmegs
2392: * need to make sure they cant be stolen from the kernel;
2393: * map any new pmegs into context zero, make sure rest of pmeg is null;
2394: * deal with pv_stuff; possibly caching problems;
2395: * must also deal with changes too.
2396: */
2397: saved_ctx = get_context();
2398: set_context(KERNEL_CONTEXT);
2399:
2400: /*
2401: * In detail:
2402: *
2403: * (a) lock pmap
2404: * (b) Is the VA in a already mapped segment, if so
2405: * look to see if that VA address is "valid". If it is, then
2406: * action is a change to an existing pte
2407: * (c) if not mapped segment, need to allocate pmeg
2408: * (d) change/add pte
2409: */
2410:
2411: #ifdef DIAGNOSTIC
2412: if ((va < virtual_avail) || (va >= DVMA_MAP_END))
2413: panic("pmap_kenter_pa: bad va=0x%lx", va);
2414: #endif
2415:
2416: if (va >= DVMA_MAP_BASE) {
2417: /* This is DVMA space. Always want it non-cached. */
2418: new_pte |= PG_NC;
2419: }
2420:
2421: segva = m68k_trunc_seg(va);
2422:
2423: s = splvm();
2424:
2425: /* Do we have a PMEG? */
2426: sme = get_segmap(segva);
2427: if (sme != SEGINV) {
2428: KASSERT((get_pte(va) & PG_VALID) == 0);
2429:
2430: /* Found a PMEG in the segmap. Cool. */
2431: pmegp = pmeg_p(sme);
2432: #ifdef DIAGNOSTIC
2433: /* Make sure it is the right PMEG. */
2434: if (sme != pmap->pm_segmap[VA_SEGNUM(segva)])
2435: panic("pmap_kenter_pa: wrong sme at VA=0x%lx", segva);
2436: /* Make sure it is ours. */
2437: if (pmegp->pmeg_owner != pmap)
2438: panic("pmap_kenter_pa: MMU has bad pmeg 0x%x", sme);
2439: #endif
2440: } else {
2441:
2442: /* No PMEG in the segmap. Have to allocate one. */
2443: pmegp = pmeg_allocate(pmap, segva);
2444: sme = pmegp->pmeg_index;
2445: pmap->pm_segmap[VA_SEGNUM(segva)] = sme;
2446: set_segmap(segva, sme);
2447: #ifdef PMAP_DEBUG
2448: pmeg_verify_empty(segva);
2449: if (pmap_debug & PMD_SEGMAP) {
2450: printf("pmap: set_segmap pmap=%p va=0x%lx sme=0x%x (ek)\n",
2451: pmap, segva, sme);
2452: }
2453: #endif
2454: }
2455:
2456: pmeg_set_wiring(pmegp, va, TRUE);
2457:
2458: /* Anything but MAIN_MEM is mapped non-cached. */
2459: if (!IS_MAIN_MEM(new_pte)) {
2460: new_pte |= PG_NC;
2461: }
2462: #ifdef PMAP_DEBUG
2463: if ((pmap_debug & PMD_SETPTE) || (va == pmap_db_watchva)) {
2464: printf("pmap: set_pte pmap=%p va=0x%lx old=0x%x new=0x%x (ek)\n",
2465: pmap, va, old_pte, new_pte);
2466: }
2467: #endif
2468: /* cache flush done above */
2469: set_pte(va, new_pte);
2470: set_context(saved_ctx);
2471: pmegp->pmeg_vpages++;
2472: splx(s);
1.1 fredette 2473: }
2474:
2475: void
2476: pmap_kremove(va, len)
2477: vaddr_t va;
2478: vsize_t len;
2479: {
1.16 fredette 2480: pmap_t pmap = kernel_pmap;
2481: vaddr_t eva, neva, pgva, segva, segnum;
2482: int pte, sme;
2483: pmeg_t pmegp;
2484: #ifdef HAVECACHE
2485: int flush_by_page = 0;
2486: #endif
2487: int s;
2488: int saved_ctx;
2489:
2490: s = splvm();
2491: saved_ctx = get_context();
2492: set_context(KERNEL_CONTEXT);
2493: segnum = VA_SEGNUM(va);
2494: for (eva = va + len; va < eva; va = neva, segnum++) {
2495: neva = m68k_trunc_seg(va) + NBSG;
2496: if (neva > eva) {
2497: neva = eva;
2498: }
2499: if (pmap->pm_segmap[segnum] == SEGINV) {
2500: continue;
2501: }
2502:
2503: segva = m68k_trunc_seg(va);
2504: sme = get_segmap(segva);
2505: pmegp = pmeg_p(sme);
2506:
2507: #ifdef HAVECACHE
2508: if (cache_size) {
2509:
2510: /*
2511: * If the range to be removed is larger than the cache,
2512: * it will be cheaper to flush this segment entirely.
2513: */
2514:
2515: if (cache_size < (eva - va)) {
2516: /* cheaper to flush whole segment */
2517: cache_flush_segment(segva);
2518: } else {
2519: flush_by_page = 1;
2520: }
2521: }
2522: #endif
2523:
2524: /* Invalidate the PTEs in the given range. */
1.20 thorpej 2525: for (pgva = va; pgva < neva; pgva += PAGE_SIZE) {
1.16 fredette 2526: pte = get_pte(pgva);
2527: if (pte & PG_VALID) {
2528: #ifdef HAVECACHE
2529: if (flush_by_page) {
2530: cache_flush_page(pgva);
2531: /* Get fresh mod/ref bits from write-back. */
2532: pte = get_pte(pgva);
2533: }
2534: #endif
2535: #ifdef PMAP_DEBUG
2536: if ((pmap_debug & PMD_SETPTE) || (pgva == pmap_db_watchva)) {
2537: printf("pmap: set_pte pmap=%p va=0x%lx"
2538: " old=0x%x new=0x%x (rrmmu)\n",
2539: pmap, pgva, pte, PG_INVAL);
2540: }
2541: #endif
2542: set_pte(pgva, PG_INVAL);
2543: KASSERT(pmegp->pmeg_vpages > 0);
2544: pmegp->pmeg_vpages--;
2545: }
2546: }
2547: KASSERT(pmegp->pmeg_vpages >= 0);
2548: if (pmegp->pmeg_vpages == 0) {
2549: /* We are done with this pmeg. */
2550: #ifdef PMAP_DEBUG
2551: if (is_pmeg_wired(pmegp)) {
2552: if (pmap_debug & PMD_WIRING) {
2553: db_printf("pmap: removing wired pmeg: %p\n", pmegp);
2554: Debugger();
2555: }
2556: }
2557: if (pmap_debug & PMD_SEGMAP) {
2558: printf("pmap: set_segmap ctx=%d v=0x%lx old=0x%x new=ff (rm)\n",
2559: pmap->pm_ctxnum, segva, pmegp->pmeg_index);
2560: }
2561: pmeg_verify_empty(segva);
2562: #endif
2563:
2564: /* Remove it from the MMU. */
2565: set_segmap(segva, SEGINV);
2566: pmap->pm_segmap[VA_SEGNUM(segva)] = SEGINV;
2567:
2568: /* Now, put it on the free list. */
2569: pmeg_free(pmegp);
2570: }
1.1 fredette 2571: }
1.16 fredette 2572: set_context(saved_ctx);
2573: splx(s);
1.1 fredette 2574: }
2575:
2576:
2577: /*
2578: * The trap handler calls this so we can try to resolve
2579: * user-level faults by reloading a PMEG.
2580: * If that does not prodce a valid mapping,
2581: * call vm_fault as usual.
2582: *
2583: * XXX: Merge this with the next function?
2584: */
2585: int
2586: _pmap_fault(map, va, ftype)
1.11 chs 2587: struct vm_map *map;
1.16 fredette 2588: vaddr_t va;
1.1 fredette 2589: vm_prot_t ftype;
2590: {
2591: pmap_t pmap;
2592: int rv;
2593:
2594: pmap = vm_map_pmap(map);
2595: if (map == kernel_map) {
2596: /* Do not allow faults below the "managed" space. */
2597: if (va < virtual_avail) {
2598: /*
2599: * Most pages below virtual_avail are read-only,
2600: * so I will assume it is a protection failure.
2601: */
1.2 fredette 2602: return EACCES;
1.1 fredette 2603: }
2604: } else {
2605: /* User map. Try reload shortcut. */
2606: if (pmap_fault_reload(pmap, va, ftype))
1.2 fredette 2607: return 0;
1.1 fredette 2608: }
2609: rv = uvm_fault(map, va, 0, ftype);
2610:
2611: #ifdef PMAP_DEBUG
2612: if (pmap_debug & PMD_FAULT) {
2613: printf("pmap_fault(%p, 0x%lx, 0x%x) -> 0x%x\n",
2614: map, va, ftype, rv);
2615: }
2616: #endif
2617:
2618: return (rv);
2619: }
2620:
2621: /*
2622: * This is a shortcut used by the trap handler to
2623: * reload PMEGs into a user segmap without calling
2624: * the actual VM fault handler. Returns TRUE if:
2625: * the PMEG was reloaded, and
2626: * it has a valid PTE at va.
2627: * Otherwise return zero and let VM code handle it.
2628: */
2629: int
2630: pmap_fault_reload(pmap, pgva, ftype)
2631: pmap_t pmap;
1.16 fredette 2632: vaddr_t pgva;
1.1 fredette 2633: vm_prot_t ftype;
2634: {
2635: int rv, s, pte, chkpte, sme;
1.16 fredette 2636: vaddr_t segva;
1.1 fredette 2637: pmeg_t pmegp;
2638:
2639: if (pgva >= VM_MAXUSER_ADDRESS)
2640: return (0);
2641: if (pmap->pm_segmap == NULL) {
2642: #ifdef PMAP_DEBUG
2643: db_printf("pmap_fault_reload: null segmap\n");
2644: Debugger();
2645: #endif
2646: return (0);
2647: }
2648:
2649: /* Short-cut using the S/W segmap. */
2650: if (pmap->pm_segmap[VA_SEGNUM(pgva)] == SEGINV)
2651: return (0);
2652:
2653: segva = m68k_trunc_seg(pgva);
2654: chkpte = PG_VALID;
2655: if (ftype & VM_PROT_WRITE)
2656: chkpte |= PG_WRITE;
2657: rv = 0;
2658:
1.2 fredette 2659: s = splvm();
1.1 fredette 2660:
2661: /*
2662: * Given that we faulted on a user-space address, we will
2663: * probably need a context. Get a context now so we can
2664: * try to resolve the fault with a segmap reload.
2665: */
2666: if (!has_context(pmap)) {
2667: context_allocate(pmap);
2668: #ifdef PMAP_DEBUG
2669: if (pmap_debug & PMD_CONTEXT)
2670: printf("pmap_fault(%p) got context %d\n",
2671: pmap, pmap->pm_ctxnum);
2672: #endif
2673: set_context(pmap->pm_ctxnum);
2674: } else {
2675: #ifdef PMAP_DEBUG
2676: /* Make sure context is correct. */
2677: if (pmap->pm_ctxnum != get_context()) {
2678: db_printf("pmap_fault_reload: wrong context\n");
2679: Debugger();
2680: /* XXX: OK to proceed? */
2681: set_context(pmap->pm_ctxnum);
2682: }
2683: #endif
2684: }
2685:
2686: sme = get_segmap(segva);
2687: if (sme == SEGINV) {
2688: /* See if there is something to reload. */
2689: pmegp = pmeg_cache(pmap, segva);
2690: if (pmegp) {
2691: /* Found one! OK, reload it. */
2692: pmap_stats.ps_pmeg_faultin++;
2693: sme = pmegp->pmeg_index;
2694: set_segmap(segva, sme);
2695: pte = get_pte(pgva);
2696: if (pte & chkpte)
2697: rv = 1;
2698: }
2699: }
2700:
2701: splx(s);
2702: return (rv);
2703: }
2704:
2705:
2706: /*
2707: * Clear the modify bit for the given physical page.
2708: */
2709: boolean_t
2710: pmap_clear_modify(pg)
2711: struct vm_page *pg;
2712: {
2713: paddr_t pa = VM_PAGE_TO_PHYS(pg);
2714: pv_entry_t *head;
2715: u_char *pv_flags;
2716: int s;
2717: boolean_t rv;
2718:
2719: pv_flags = pa_to_pvflags(pa);
2720: head = pa_to_pvhead(pa);
2721:
1.2 fredette 2722: s = splvm();
1.1 fredette 2723: *pv_flags |= pv_syncflags(*head);
2724: rv = *pv_flags & PV_MOD;
2725: *pv_flags &= ~PV_MOD;
2726: splx(s);
2727: return rv;
2728: }
2729:
2730: /*
2731: * Tell whether the given physical page has been modified.
2732: */
1.16 fredette 2733: boolean_t
1.1 fredette 2734: pmap_is_modified(pg)
2735: struct vm_page *pg;
2736: {
2737: paddr_t pa = VM_PAGE_TO_PHYS(pg);
2738: pv_entry_t *head;
2739: u_char *pv_flags;
1.16 fredette 2740: int s;
2741: boolean_t rv;
1.1 fredette 2742:
2743: pv_flags = pa_to_pvflags(pa);
2744: head = pa_to_pvhead(pa);
2745:
1.2 fredette 2746: s = splvm();
1.1 fredette 2747: if ((*pv_flags & PV_MOD) == 0)
2748: *pv_flags |= pv_syncflags(*head);
2749: rv = (*pv_flags & PV_MOD);
2750: splx(s);
2751: return (rv);
2752: }
2753:
2754: /*
2755: * Clear the reference bit for the given physical page.
2756: * It's OK to just remove mappings if that's easier.
2757: */
2758: boolean_t
2759: pmap_clear_reference(pg)
2760: struct vm_page *pg;
2761: {
2762: paddr_t pa = VM_PAGE_TO_PHYS(pg);
2763: pv_entry_t *head;
2764: u_char *pv_flags;
2765: int s;
2766: boolean_t rv;
2767:
2768: pv_flags = pa_to_pvflags(pa);
2769: head = pa_to_pvhead(pa);
2770:
1.2 fredette 2771: s = splvm();
1.1 fredette 2772: *pv_flags |= pv_syncflags(*head);
2773: rv = *pv_flags & PV_REF;
2774: *pv_flags &= ~PV_REF;
2775: splx(s);
2776: return rv;
2777: }
2778:
2779: /*
2780: * Tell whether the given physical page has been referenced.
2781: * It's OK to just return FALSE if page is not mapped.
2782: */
2783: boolean_t
2784: pmap_is_referenced(pg)
2785: struct vm_page *pg;
2786: {
2787: paddr_t pa = VM_PAGE_TO_PHYS(pg);
2788: pv_entry_t *head;
2789: u_char *pv_flags;
2790: int s;
2791: boolean_t rv;
2792:
2793: pv_flags = pa_to_pvflags(pa);
2794: head = pa_to_pvhead(pa);
2795:
1.2 fredette 2796: s = splvm();
1.1 fredette 2797: if ((*pv_flags & PV_REF) == 0)
2798: *pv_flags |= pv_syncflags(*head);
2799: rv = (*pv_flags & PV_REF);
2800: splx(s);
2801: return (rv);
2802: }
2803:
2804:
2805: /*
2806: * This is called by locore.s:cpu_switch() when it is
2807: * switching to a new process. Load new translations.
2808: */
2809: void
2810: _pmap_switch(pmap)
2811: pmap_t pmap;
2812: {
2813:
2814: /*
2815: * Since we maintain completely separate user and kernel address
2816: * spaces, whenever we switch to a process, we need to make sure
2817: * that it has a context allocated.
2818: */
2819: if (!has_context(pmap)) {
2820: context_allocate(pmap);
2821: #ifdef PMAP_DEBUG
2822: if (pmap_debug & PMD_CONTEXT)
2823: printf("_pmap_switch(%p) got context %d\n",
2824: pmap, pmap->pm_ctxnum);
2825: #endif
2826: }
2827: set_context(pmap->pm_ctxnum);
2828: }
2829:
2830: /*
2831: * Exported version of pmap_activate(). This is called from the
2832: * machine-independent VM code when a process is given a new pmap.
1.19 thorpej 2833: * If (p == curlwp) do like cpu_switch would do; otherwise just
1.1 fredette 2834: * take this as notification that the process has a new pmap.
2835: */
2836: void
1.19 thorpej 2837: pmap_activate(l)
2838: struct lwp *l;
1.1 fredette 2839: {
1.19 thorpej 2840: pmap_t pmap = l->l_proc->p_vmspace->vm_map.pmap;
1.1 fredette 2841:
1.19 thorpej 2842: if (curlwp && l->l_proc == curproc) {
1.1 fredette 2843: _pmap_switch(pmap);
2844: }
2845: }
2846:
2847: /*
2848: * Deactivate the address space of the specified process.
2849: */
2850: void
1.19 thorpej 2851: pmap_deactivate(l)
2852: struct lwp *l;
1.1 fredette 2853: {
1.16 fredette 2854: /* Nothing to do. */
1.1 fredette 2855: }
2856:
2857: /*
2858: * Routine: pmap_unwire
2859: * Function: Clear the wired attribute for a map/virtual-address
2860: * pair.
2861: * In/out conditions:
2862: * The mapping must already exist in the pmap.
2863: */
2864: void
2865: pmap_unwire(pmap, va)
2866: pmap_t pmap;
1.16 fredette 2867: vaddr_t va;
1.1 fredette 2868: {
2869: int s, sme;
2870: int wiremask, ptenum;
2871: pmeg_t pmegp;
2872: int saved_ctx;
2873:
2874: #ifdef PMAP_DEBUG
2875: if (pmap_debug & PMD_WIRING)
2876: printf("pmap_unwire(pmap=%p, va=0x%lx)\n",
2877: pmap, va);
2878: #endif
2879: /*
2880: * We are asked to unwire pages that were wired when
2881: * pmap_enter() was called and we ignored wiring.
2882: * (VM code appears to wire a stack page during fork.)
2883: */
2884: if (pmap != kernel_pmap) {
2885: #ifdef PMAP_DEBUG
2886: if (pmap_debug & PMD_WIRING) {
2887: db_printf(" (user pmap -- ignored)\n");
2888: Debugger();
2889: }
2890: #endif
2891: return;
2892: }
2893:
2894: ptenum = VA_PTE_NUM(va);
2895: wiremask = 1 << ptenum;
2896:
1.2 fredette 2897: s = splvm();
1.1 fredette 2898: saved_ctx = get_context();
2899: set_context(KERNEL_CONTEXT);
2900: sme = get_segmap(va);
2901: set_context(saved_ctx);
2902: pmegp = pmeg_p(sme);
2903: pmegp->pmeg_wired &= ~wiremask;
2904: splx(s);
2905: }
2906:
2907: /*
2908: * Copy the range specified by src_addr/len
2909: * from the source map to the range dst_addr/len
2910: * in the destination map.
2911: *
2912: * This routine is only advisory and need not do anything.
2913: */
2914: void
2915: pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr)
2916: pmap_t dst_pmap;
2917: pmap_t src_pmap;
1.16 fredette 2918: vaddr_t dst_addr;
2919: vsize_t len;
2920: vaddr_t src_addr;
1.1 fredette 2921: {
2922: }
2923:
2924: /*
1.3 fredette 2925: * This extracts the PMEG associated with the given map/virtual
2926: * address pair. Returns SEGINV if VA not valid.
2927: */
2928: int
2929: _pmap_extract_pmeg(pmap, va)
2930: pmap_t pmap;
1.16 fredette 2931: vaddr_t va;
1.3 fredette 2932: {
2933: int s, saved_ctx, segnum, sme;
2934:
2935: s = splvm();
2936:
2937: if (pmap == kernel_pmap) {
2938: saved_ctx = get_context();
2939: set_context(KERNEL_CONTEXT);
2940: sme = get_segmap(va);
2941: set_context(saved_ctx);
2942: } else {
2943: /* This is rare, so do it the easy way. */
2944: segnum = VA_SEGNUM(va);
2945: sme = pmap->pm_segmap[segnum];
2946: }
2947:
2948: splx(s);
2949: return (sme);
2950: }
2951:
2952: /*
1.1 fredette 2953: * Routine: pmap_extract
2954: * Function:
2955: * Extract the physical page address associated
2956: * with the given map/virtual_address pair.
2957: * Returns zero if VA not valid.
2958: */
2959: boolean_t
2960: pmap_extract(pmap, va, pap)
2961: pmap_t pmap;
1.16 fredette 2962: vaddr_t va;
1.1 fredette 2963: paddr_t *pap;
2964: {
2965: int s, sme, segnum, ptenum, pte;
2966: paddr_t pa;
2967: int saved_ctx;
2968:
2969: pte = 0;
1.2 fredette 2970: s = splvm();
1.1 fredette 2971: if (pmap == kernel_pmap) {
2972: saved_ctx = get_context();
2973: set_context(KERNEL_CONTEXT);
2974: sme = get_segmap(va);
2975: if (sme != SEGINV)
2976: pte = get_pte(va);
2977: set_context(saved_ctx);
2978: } else {
2979: /* This is rare, so do it the easy way. */
2980: segnum = VA_SEGNUM(va);
2981: sme = pmap->pm_segmap[segnum];
2982: if (sme != SEGINV) {
2983: ptenum = VA_PTE_NUM(va);
2984: pte = get_pte_pmeg(sme, ptenum);
2985: }
2986: }
2987: splx(s);
2988:
2989: if ((pte & PG_VALID) == 0) {
2990: #ifdef PMAP_DEBUG
2991: db_printf("pmap_extract: invalid va=0x%lx\n", va);
2992: Debugger();
2993: #endif
2994: return (FALSE);
2995: }
2996: pa = PG_PA(pte);
2997: #ifdef DIAGNOSTIC
2998: if (pte & PG_TYPE) {
1.18 provos 2999: panic("pmap_extract: not main mem, va=0x%lx", va);
1.1 fredette 3000: }
3001: #endif
3002: if (pap != NULL)
3003: *pap = pa;
3004: return (TRUE);
3005: }
3006:
3007:
3008: /*
3009: * pmap_page_protect:
3010: *
3011: * Lower the permission for all mappings to a given page.
3012: */
3013: void
3014: pmap_page_protect(pg, prot)
3015: struct vm_page *pg;
3016: vm_prot_t prot;
3017: {
3018: paddr_t pa = VM_PAGE_TO_PHYS(pg);
3019: int s;
3020:
1.2 fredette 3021: s = splvm();
1.1 fredette 3022: #ifdef PMAP_DEBUG
3023: if (pmap_debug & PMD_PROTECT)
3024: printf("pmap_page_protect(0x%lx, 0x%lx)\n", pa, prot);
3025: #endif
3026: switch (prot) {
3027: case VM_PROT_ALL:
3028: break;
3029: case VM_PROT_READ:
3030: case VM_PROT_READ|VM_PROT_EXECUTE:
3031: pv_changepte(pa, 0, PG_WRITE);
3032: break;
3033: default:
3034: /* remove mapping for all pmaps that have it */
3035: pv_remove_all(pa);
3036: break;
3037: }
3038: splx(s);
3039: }
3040:
3041: /*
3042: * Initialize a preallocated and zeroed pmap structure,
3043: * such as one in a vmspace structure.
3044: */
3045: void
3046: pmap_pinit(pmap)
3047: pmap_t pmap;
3048: {
3049: pmap_common_init(pmap);
3050: pmap_user_init(pmap);
3051: }
3052:
3053: /*
3054: * Reduce the permissions on the specified
3055: * range of this map as requested.
3056: * (Make pages read-only.)
3057: */
3058: void
3059: pmap_protect(pmap, sva, eva, prot)
3060: pmap_t pmap;
1.16 fredette 3061: vaddr_t sva, eva;
1.1 fredette 3062: vm_prot_t prot;
3063: {
1.16 fredette 3064: vaddr_t va, neva;
1.1 fredette 3065: int segnum;
3066:
3067: /* If leaving writable, nothing to do. */
3068: if (prot & VM_PROT_WRITE)
3069: return;
3070:
3071: /* If removing all permissions, just unmap. */
3072: if ((prot & VM_PROT_READ) == 0) {
3073: pmap_remove(pmap, sva, eva);
3074: return;
3075: }
3076:
3077: #ifdef PMAP_DEBUG
3078: if ((pmap_debug & PMD_PROTECT) ||
3079: ((sva <= pmap_db_watchva && eva > pmap_db_watchva)))
3080: printf("pmap_protect(%p, 0x%lx, 0x%lx)\n", pmap, sva, eva);
3081: #endif
3082:
1.16 fredette 3083: KASSERT((pmap == kernel_pmap) ?
3084: sva >= virtual_avail && eva < DVMA_MAP_END :
3085: eva <= VM_MAXUSER_ADDRESS);
1.1 fredette 3086: va = sva;
3087: segnum = VA_SEGNUM(va);
3088: while (va < eva) {
3089: neva = m68k_trunc_seg(va) + NBSG;
3090: if (neva > eva)
3091: neva = eva;
3092: if (pmap->pm_segmap[segnum] != SEGINV)
3093: pmap_protect1(pmap, va, neva);
3094: va = neva;
3095: segnum++;
3096: }
3097: }
3098:
3099: /*
3100: * Remove write permissions in given range.
3101: * (guaranteed to be within one segment)
3102: * similar to pmap_remove1()
3103: */
3104: void
3105: pmap_protect1(pmap, sva, eva)
3106: pmap_t pmap;
1.16 fredette 3107: vaddr_t sva, eva;
1.1 fredette 3108: {
3109: int old_ctx, s, sme;
3110: boolean_t in_ctx;
3111:
1.2 fredette 3112: s = splvm();
1.1 fredette 3113:
3114: #ifdef DIAGNOSTIC
3115: if (m68k_trunc_seg(sva) != m68k_trunc_seg(eva-1))
3116: panic("pmap_protect1: bad range!");
3117: #endif
3118:
3119: if (pmap == kernel_pmap) {
3120: old_ctx = get_context();
3121: set_context(KERNEL_CONTEXT);
3122: sme = get_segmap(sva);
1.16 fredette 3123: if (sme != SEGINV)
1.1 fredette 3124: pmap_protect_mmu(pmap, sva, eva);
3125: set_context(old_ctx);
3126: goto out;
3127: }
3128: /* It is a user pmap. */
3129:
3130: /* There is a PMEG, but maybe not active. */
3131: old_ctx = INVALID_CONTEXT;
3132: in_ctx = FALSE;
3133: if (has_context(pmap)) {
3134: /* Temporary context change. */
3135: old_ctx = get_context();
3136: set_context(pmap->pm_ctxnum);
3137: sme = get_segmap(sva);
3138: if (sme != SEGINV)
3139: in_ctx = TRUE;
3140: }
3141:
3142: if (in_ctx == TRUE)
3143: pmap_protect_mmu(pmap, sva, eva);
3144: else
3145: pmap_protect_noctx(pmap, sva, eva);
3146:
3147: if (old_ctx != INVALID_CONTEXT) {
3148: /* Restore previous context. */
3149: set_context(old_ctx);
3150: }
3151:
3152: out:
3153: splx(s);
3154: }
3155:
3156: /*
3157: * Remove write permissions, all in one PMEG,
3158: * where that PMEG is currently in the MMU.
3159: * The current context is already correct.
3160: */
3161: void
3162: pmap_protect_mmu(pmap, sva, eva)
3163: pmap_t pmap;
1.16 fredette 3164: vaddr_t sva, eva;
1.1 fredette 3165: {
3166: pmeg_t pmegp;
1.16 fredette 3167: vaddr_t pgva, segva;
1.1 fredette 3168: int pte, sme;
3169: #ifdef HAVECACHE
3170: int flush_by_page = 0;
3171: #endif
3172:
3173: CHECK_SPL();
3174:
3175: #ifdef DIAGNOSTIC
3176: if (pmap->pm_ctxnum != get_context())
3177: panic("pmap_protect_mmu: wrong context");
3178: #endif
3179:
3180: segva = m68k_trunc_seg(sva);
3181: sme = get_segmap(segva);
3182:
3183: #ifdef DIAGNOSTIC
3184: /* Make sure it is valid and known. */
3185: if (sme == SEGINV)
3186: panic("pmap_protect_mmu: SEGINV");
3187: if (pmap->pm_segmap && (pmap->pm_segmap[VA_SEGNUM(segva)] != sme))
3188: panic("pmap_protect_mmu: incorrect sme, va=0x%lx", segva);
3189: #endif
3190:
3191: pmegp = pmeg_p(sme);
3192: /* have pmeg, will travel */
3193:
3194: #ifdef DIAGNOSTIC
3195: /* Make sure we own the pmeg, right va, etc. */
3196: if ((pmegp->pmeg_va != segva) ||
3197: (pmegp->pmeg_owner != pmap) ||
3198: (pmegp->pmeg_version != pmap->pm_version))
3199: {
3200: panic("pmap_protect_mmu: bad pmeg=%p", pmegp);
3201: }
1.16 fredette 3202: if (pmegp->pmeg_vpages < 0)
3203: panic("pmap_protect_mmu: npages corrupted");
3204: if (pmegp->pmeg_vpages == 0)
1.1 fredette 3205: panic("pmap_protect_mmu: no valid pages?");
3206: #endif
3207:
3208: #ifdef HAVECACHE
3209: if (cache_size) {
3210: /*
3211: * If the range to be removed is larger than the cache,
3212: * it will be cheaper to flush this segment entirely.
3213: */
3214: if (cache_size < (eva - sva)) {
3215: /* cheaper to flush whole segment */
3216: cache_flush_segment(segva);
3217: } else {
3218: flush_by_page = 1;
3219: }
3220: }
3221: #endif
3222:
3223: /* Remove write permission in the given range. */
1.20 thorpej 3224: for (pgva = sva; pgva < eva; pgva += PAGE_SIZE) {
1.1 fredette 3225: pte = get_pte(pgva);
3226: if (pte & PG_VALID) {
3227: #ifdef HAVECACHE
3228: if (flush_by_page) {
3229: cache_flush_page(pgva);
3230: /* Get fresh mod/ref bits from write-back. */
3231: pte = get_pte(pgva);
3232: }
3233: #endif
3234: if (IS_MAIN_MEM(pte)) {
3235: save_modref_bits(pte);
3236: }
3237: pte &= ~(PG_WRITE | PG_MODREF);
3238: set_pte(pgva, pte);
3239: }
3240: }
3241: }
3242:
3243: /*
3244: * Remove write permissions, all in one PMEG,
3245: * where it is not currently in any context.
3246: */
3247: void
3248: pmap_protect_noctx(pmap, sva, eva)
3249: pmap_t pmap;
1.16 fredette 3250: vaddr_t sva, eva;
1.1 fredette 3251: {
3252: int old_ctx, pte, sme, segnum;
1.16 fredette 3253: vaddr_t pgva, segva;
1.1 fredette 3254:
3255: #ifdef DIAGNOSTIC
3256: /* Kernel always in a context (actually, in context zero). */
3257: if (pmap == kernel_pmap)
3258: panic("pmap_protect_noctx: kernel_pmap");
3259: if (pmap->pm_segmap == NULL)
3260: panic("pmap_protect_noctx: null segmap");
3261: #endif
3262:
3263: segva = m68k_trunc_seg(sva);
3264: segnum = VA_SEGNUM(segva);
3265: sme = pmap->pm_segmap[segnum];
3266: if (sme == SEGINV)
3267: return;
3268:
3269: /*
3270: * Switch to the kernel context so we can access the PMEG
3271: * using the temporary segment.
3272: */
3273: old_ctx = get_context();
3274: set_context(KERNEL_CONTEXT);
1.16 fredette 3275: #ifdef DIAGNOSTIC
1.1 fredette 3276: if (temp_seg_inuse)
3277: panic("pmap_protect_noctx: temp_seg_inuse");
3278: temp_seg_inuse++;
1.16 fredette 3279: #endif
1.1 fredette 3280: set_segmap(temp_seg_va, sme);
3281: sva += (temp_seg_va - segva);
3282: eva += (temp_seg_va - segva);
3283:
3284: /* Remove write permission in the given range. */
1.20 thorpej 3285: for (pgva = sva; pgva < eva; pgva += PAGE_SIZE) {
1.1 fredette 3286: pte = get_pte(pgva);
3287: if (pte & PG_VALID) {
3288: /* No cache flush needed. */
3289: if (IS_MAIN_MEM(pte)) {
3290: save_modref_bits(pte);
3291: }
3292: pte &= ~(PG_WRITE | PG_MODREF);
3293: set_pte(pgva, pte);
3294: }
3295: }
3296:
3297: /*
3298: * Release the temporary segment, and
3299: * restore the previous context.
3300: */
3301: set_segmap(temp_seg_va, SEGINV);
1.16 fredette 3302: #ifdef DIAGNOSTIC
1.1 fredette 3303: temp_seg_inuse--;
1.16 fredette 3304: #endif
1.1 fredette 3305: set_context(old_ctx);
3306: }
3307:
3308:
3309: /*
3310: * Remove the given range of addresses from the specified map.
3311: *
3312: * It is assumed that the start and end are properly
3313: * rounded to the page size.
3314: */
3315: void
3316: pmap_remove(pmap, sva, eva)
3317: pmap_t pmap;
1.16 fredette 3318: vaddr_t sva, eva;
1.1 fredette 3319: {
1.16 fredette 3320: vaddr_t va, neva;
1.1 fredette 3321: int segnum;
3322:
3323: #ifdef PMAP_DEBUG
3324: if ((pmap_debug & PMD_REMOVE) ||
3325: ((sva <= pmap_db_watchva && eva > pmap_db_watchva)))
3326: printf("pmap_remove(%p, 0x%lx, 0x%lx)\n", pmap, sva, eva);
3327: #endif
3328:
1.16 fredette 3329:
3330: KASSERT((pmap == kernel_pmap) ?
3331: sva >= virtual_avail && eva < DVMA_MAP_END :
3332: eva <= VM_MAXUSER_ADDRESS);
1.1 fredette 3333: va = sva;
3334: segnum = VA_SEGNUM(va);
3335: while (va < eva) {
3336: neva = m68k_trunc_seg(va) + NBSG;
3337: if (neva > eva)
3338: neva = eva;
3339: if (pmap->pm_segmap[segnum] != SEGINV)
3340: pmap_remove1(pmap, va, neva);
3341: va = neva;
3342: segnum++;
3343: }
3344: }
3345:
3346: /*
3347: * Remove user mappings, all within one segment
3348: */
3349: void
3350: pmap_remove1(pmap, sva, eva)
3351: pmap_t pmap;
1.16 fredette 3352: vaddr_t sva, eva;
1.1 fredette 3353: {
3354: int old_ctx, s, sme;
3355: boolean_t in_ctx;
3356:
1.2 fredette 3357: s = splvm();
1.1 fredette 3358:
3359: #ifdef DIAGNOSTIC
3360: if (m68k_trunc_seg(sva) != m68k_trunc_seg(eva-1))
3361: panic("pmap_remove1: bad range!");
3362: #endif
3363:
3364: if (pmap == kernel_pmap) {
3365: old_ctx = get_context();
3366: set_context(KERNEL_CONTEXT);
3367: sme = get_segmap(sva);
3368: if (sme != SEGINV)
3369: pmap_remove_mmu(pmap, sva, eva);
3370: set_context(old_ctx);
3371: goto out;
3372: }
3373: /* It is a user pmap. */
3374:
3375: /* There is a PMEG, but maybe not active. */
3376: old_ctx = INVALID_CONTEXT;
3377: in_ctx = FALSE;
3378: if (has_context(pmap)) {
3379: /* Temporary context change. */
3380: old_ctx = get_context();
3381: set_context(pmap->pm_ctxnum);
3382: sme = get_segmap(sva);
3383: if (sme != SEGINV)
3384: in_ctx = TRUE;
3385: }
3386:
3387: if (in_ctx == TRUE)
3388: pmap_remove_mmu(pmap, sva, eva);
3389: else
3390: pmap_remove_noctx(pmap, sva, eva);
3391:
3392: if (old_ctx != INVALID_CONTEXT) {
3393: /* Restore previous context. */
3394: set_context(old_ctx);
3395: }
3396:
3397: out:
3398: splx(s);
3399: }
3400:
3401: /*
3402: * Remove some mappings, all in one PMEG,
3403: * where that PMEG is currently in the MMU.
3404: * The current context is already correct.
3405: * If no PTEs remain valid in the PMEG, free it.
3406: */
3407: void
3408: pmap_remove_mmu(pmap, sva, eva)
3409: pmap_t pmap;
1.16 fredette 3410: vaddr_t sva, eva;
1.1 fredette 3411: {
3412: pmeg_t pmegp;
1.16 fredette 3413: vaddr_t pgva, segva;
1.1 fredette 3414: int pte, sme;
3415: #ifdef HAVECACHE
3416: int flush_by_page = 0;
3417: #endif
3418:
3419: CHECK_SPL();
3420:
3421: #ifdef DIAGNOSTIC
3422: if (pmap->pm_ctxnum != get_context())
3423: panic("pmap_remove_mmu: wrong context");
3424: #endif
3425:
3426: segva = m68k_trunc_seg(sva);
3427: sme = get_segmap(segva);
3428:
3429: #ifdef DIAGNOSTIC
3430: /* Make sure it is valid and known. */
3431: if (sme == SEGINV)
3432: panic("pmap_remove_mmu: SEGINV");
3433: if (pmap->pm_segmap && (pmap->pm_segmap[VA_SEGNUM(segva)] != sme))
3434: panic("pmap_remove_mmu: incorrect sme, va=0x%lx", segva);
3435: #endif
3436:
3437: pmegp = pmeg_p(sme);
3438: /* have pmeg, will travel */
3439:
3440: #ifdef DIAGNOSTIC
3441: /* Make sure we own the pmeg, right va, etc. */
3442: if ((pmegp->pmeg_va != segva) ||
3443: (pmegp->pmeg_owner != pmap) ||
3444: (pmegp->pmeg_version != pmap->pm_version))
3445: {
3446: panic("pmap_remove_mmu: bad pmeg=%p", pmegp);
3447: }
1.16 fredette 3448: if (pmegp->pmeg_vpages < 0)
3449: panic("pmap_remove_mmu: npages corrupted");
3450: if (pmegp->pmeg_vpages == 0)
1.1 fredette 3451: panic("pmap_remove_mmu: no valid pages?");
3452: #endif
3453:
3454: #ifdef HAVECACHE
3455: if (cache_size) {
3456: /*
3457: * If the range to be removed is larger than the cache,
3458: * it will be cheaper to flush this segment entirely.
3459: */
3460: if (cache_size < (eva - sva)) {
3461: /* cheaper to flush whole segment */
3462: cache_flush_segment(segva);
3463: } else {
3464: flush_by_page = 1;
3465: }
3466: }
3467: #endif
3468:
3469: /* Invalidate the PTEs in the given range. */
1.20 thorpej 3470: for (pgva = sva; pgva < eva; pgva += PAGE_SIZE) {
1.1 fredette 3471: pte = get_pte(pgva);
3472: if (pte & PG_VALID) {
3473: #ifdef HAVECACHE
3474: if (flush_by_page) {
3475: cache_flush_page(pgva);
3476: /* Get fresh mod/ref bits from write-back. */
3477: pte = get_pte(pgva);
3478: }
3479: #endif
3480: if (IS_MAIN_MEM(pte)) {
3481: save_modref_bits(pte);
3482: pv_unlink(pmap, pte, pgva);
3483: }
3484: #ifdef PMAP_DEBUG
3485: if ((pmap_debug & PMD_SETPTE) || (pgva == pmap_db_watchva)) {
3486: printf("pmap: set_pte pmap=%p va=0x%lx"
3487: " old=0x%x new=0x%x (rrmmu)\n",
3488: pmap, pgva, pte, PG_INVAL);
3489: }
3490: #endif
3491: set_pte(pgva, PG_INVAL);
1.16 fredette 3492: KASSERT(pmegp->pmeg_vpages > 0);
1.1 fredette 3493: pmegp->pmeg_vpages--;
3494: }
3495: }
3496:
1.16 fredette 3497: KASSERT(pmegp->pmeg_vpages >= 0);
3498: if (pmegp->pmeg_vpages == 0) {
1.1 fredette 3499: /* We are done with this pmeg. */
3500: if (is_pmeg_wired(pmegp)) {
3501: #ifdef PMAP_DEBUG
3502: if (pmap_debug & PMD_WIRING) {
3503: db_printf("pmap: removing wired pmeg: %p\n", pmegp);
3504: Debugger();
3505: }
3506: #endif /* PMAP_DEBUG */
3507: }
3508:
3509: #ifdef PMAP_DEBUG
3510: if (pmap_debug & PMD_SEGMAP) {
3511: printf("pmap: set_segmap ctx=%d v=0x%lx old=0x%x new=ff (rm)\n",
3512: pmap->pm_ctxnum, segva, pmegp->pmeg_index);
3513: }
3514: pmeg_verify_empty(segva);
3515: #endif
3516:
3517: /* Remove it from the MMU. */
3518: if (kernel_pmap == pmap) {
3519: /* Did cache flush above. */
3520: set_segmap(segva, SEGINV);
3521: } else {
3522: /* Did cache flush above. */
3523: set_segmap(segva, SEGINV);
3524: }
3525: pmap->pm_segmap[VA_SEGNUM(segva)] = SEGINV;
3526: /* Now, put it on the free list. */
3527: pmeg_free(pmegp);
3528: }
3529: }
3530:
3531: /*
3532: * Remove some mappings, all in one PMEG,
3533: * where it is not currently in any context.
3534: */
3535: void
3536: pmap_remove_noctx(pmap, sva, eva)
3537: pmap_t pmap;
1.16 fredette 3538: vaddr_t sva, eva;
1.1 fredette 3539: {
3540: pmeg_t pmegp;
3541: int old_ctx, pte, sme, segnum;
1.16 fredette 3542: vaddr_t pgva, segva;
1.1 fredette 3543:
3544: CHECK_SPL();
3545:
3546: #ifdef DIAGNOSTIC
3547: /* Kernel always in a context (actually, in context zero). */
3548: if (pmap == kernel_pmap)
3549: panic("pmap_remove_noctx: kernel_pmap");
3550: if (pmap->pm_segmap == NULL)
3551: panic("pmap_remove_noctx: null segmap");
3552: #endif
3553:
3554: segva = m68k_trunc_seg(sva);
3555: segnum = VA_SEGNUM(segva);
3556: sme = pmap->pm_segmap[segnum];
3557: if (sme == SEGINV)
3558: return;
3559: pmegp = pmeg_p(sme);
3560:
3561: /*
3562: * Switch to the kernel context so we can access the PMEG
3563: * using the temporary segment.
3564: */
3565: old_ctx = get_context();
3566: set_context(KERNEL_CONTEXT);
1.16 fredette 3567: #ifdef DIAGNOSTIC
1.1 fredette 3568: if (temp_seg_inuse)
3569: panic("pmap_remove_noctx: temp_seg_inuse");
3570: temp_seg_inuse++;
1.16 fredette 3571: #endif
1.1 fredette 3572: set_segmap(temp_seg_va, sme);
3573: sva += (temp_seg_va - segva);
3574: eva += (temp_seg_va - segva);
3575:
3576: /* Invalidate the PTEs in the given range. */
1.20 thorpej 3577: for (pgva = sva; pgva < eva; pgva += PAGE_SIZE) {
1.1 fredette 3578: pte = get_pte(pgva);
3579: if (pte & PG_VALID) {
3580: /* No cache flush needed. */
3581: if (IS_MAIN_MEM(pte)) {
3582: save_modref_bits(pte);
3583: pv_unlink(pmap, pte, pgva - (temp_seg_va - segva));
3584: }
3585: #ifdef PMAP_DEBUG
3586: if ((pmap_debug & PMD_SETPTE) || (pgva == pmap_db_watchva)) {
3587: printf("pmap: set_pte pmap=%p va=0x%lx"
3588: " old=0x%x new=0x%x (rrncx)\n",
3589: pmap, pgva, pte, PG_INVAL);
3590: }
3591: #endif
3592: set_pte(pgva, PG_INVAL);
1.16 fredette 3593: KASSERT(pmegp->pmeg_vpages > 0);
1.1 fredette 3594: pmegp->pmeg_vpages--;
3595: }
3596: }
3597:
3598: /*
3599: * Release the temporary segment, and
3600: * restore the previous context.
3601: */
3602: set_segmap(temp_seg_va, SEGINV);
1.16 fredette 3603: #ifdef DIAGNOSTIC
1.1 fredette 3604: temp_seg_inuse--;
1.16 fredette 3605: #endif
1.1 fredette 3606: set_context(old_ctx);
3607:
1.16 fredette 3608: KASSERT(pmegp->pmeg_vpages >= 0);
3609: if (pmegp->pmeg_vpages == 0) {
1.1 fredette 3610: /* We are done with this pmeg. */
3611: if (is_pmeg_wired(pmegp)) {
3612: #ifdef PMAP_DEBUG
3613: if (pmap_debug & PMD_WIRING) {
3614: db_printf("pmap: removing wired pmeg: %p\n", pmegp);
3615: Debugger();
3616: }
3617: #endif /* PMAP_DEBUG */
3618: }
3619:
3620: pmap->pm_segmap[segnum] = SEGINV;
3621: pmeg_free(pmegp);
3622: }
3623: }
3624:
3625:
3626: /*
3627: * Count resident pages in this pmap.
3628: * See: kern_sysctl.c:pmap_resident_count
3629: */
3630: segsz_t
3631: pmap_resident_pages(pmap)
3632: pmap_t pmap;
3633: {
3634: int i, sme, pages;
3635: pmeg_t pmeg;
3636:
3637: if (pmap->pm_segmap == 0)
3638: return (0);
3639:
3640: pages = 0;
3641: for (i = 0; i < NUSEG; i++) {
3642: sme = pmap->pm_segmap[i];
3643: if (sme != SEGINV) {
3644: pmeg = pmeg_p(sme);
3645: pages += pmeg->pmeg_vpages;
3646: }
3647: }
3648: return (pages);
3649: }
3650:
3651: /*
3652: * Count wired pages in this pmap.
3653: * See vm_mmap.c:pmap_wired_count
3654: */
3655: segsz_t
3656: pmap_wired_pages(pmap)
3657: pmap_t pmap;
3658: {
3659: int i, mask, sme, pages;
3660: pmeg_t pmeg;
3661:
3662: if (pmap->pm_segmap == 0)
3663: return (0);
3664:
3665: pages = 0;
3666: for (i = 0; i < NUSEG; i++) {
3667: sme = pmap->pm_segmap[i];
3668: if (sme != SEGINV) {
3669: pmeg = pmeg_p(sme);
3670: mask = 0x8000;
3671: do {
3672: if (pmeg->pmeg_wired & mask)
3673: pages++;
3674: mask = (mask >> 1);
3675: } while (mask);
3676: }
3677: }
3678: return (pages);
3679: }
3680:
3681:
3682: /*
3683: * pmap_copy_page copies the specified (machine independent)
3684: * page by mapping the page into virtual memory and using
3685: * bcopy to copy the page, one machine dependent page at a
3686: * time.
3687: */
3688: void
3689: pmap_copy_page(src, dst)
1.16 fredette 3690: paddr_t src, dst;
1.1 fredette 3691: {
3692: int pte;
3693: int s;
3694: int saved_ctx;
3695:
1.2 fredette 3696: s = splvm();
1.1 fredette 3697:
3698: #ifdef PMAP_DEBUG
3699: if (pmap_debug & PMD_COW)
3700: printf("pmap_copy_page: 0x%lx -> 0x%lx\n", src, dst);
3701: #endif
3702:
3703: /*
3704: * Temporarily switch to the kernel context to use the
3705: * tmp_vpages.
3706: */
3707: saved_ctx = get_context();
3708: set_context(KERNEL_CONTEXT);
1.16 fredette 3709: #ifdef DIAGNOSTIC
1.1 fredette 3710: if (tmp_vpages_inuse)
3711: panic("pmap_copy_page: vpages inuse");
3712: tmp_vpages_inuse++;
1.16 fredette 3713: #endif
1.1 fredette 3714:
3715: /* PG_PERM is short for (PG_VALID|PG_WRITE|PG_SYSTEM|PG_NC) */
3716: /* All mappings to vmp_vpages are non-cached, so no flush. */
3717: pte = PG_PERM | PA_PGNUM(src);
3718: set_pte(tmp_vpages[0], pte);
3719: pte = PG_PERM | PA_PGNUM(dst);
3720: set_pte(tmp_vpages[1], pte);
3721: copypage((char *) tmp_vpages[0], (char *) tmp_vpages[1]);
3722: set_pte(tmp_vpages[0], PG_INVAL);
3723: set_pte(tmp_vpages[1], PG_INVAL);
3724:
1.16 fredette 3725: #ifdef DIAGNOSTIC
1.1 fredette 3726: tmp_vpages_inuse--;
1.16 fredette 3727: #endif
1.1 fredette 3728: set_context(saved_ctx);
3729:
3730: splx(s);
3731: }
3732:
3733: /*
3734: * pmap_zero_page zeros the specified (machine independent)
3735: * page by mapping the page into virtual memory and using
3736: * bzero to clear its contents, one machine dependent page
3737: * at a time.
3738: */
3739: void
3740: pmap_zero_page(pa)
1.16 fredette 3741: paddr_t pa;
1.1 fredette 3742: {
3743: int pte;
3744: int s;
3745: int saved_ctx;
3746:
1.2 fredette 3747: s = splvm();
1.1 fredette 3748:
3749: #ifdef PMAP_DEBUG
3750: if (pmap_debug & PMD_COW)
3751: printf("pmap_zero_page: 0x%lx\n", pa);
3752: #endif
3753:
3754: /*
3755: * Temporarily switch to the kernel context to use the
3756: * tmp_vpages.
3757: */
3758: saved_ctx = get_context();
3759: set_context(KERNEL_CONTEXT);
1.16 fredette 3760: #ifdef DIAGNOSTIC
1.1 fredette 3761: if (tmp_vpages_inuse)
3762: panic("pmap_zero_page: vpages inuse");
3763: tmp_vpages_inuse++;
1.16 fredette 3764: #endif
1.1 fredette 3765:
3766: /* PG_PERM is short for (PG_VALID|PG_WRITE|PG_SYSTEM|PG_NC) */
3767: /* All mappings to vmp_vpages are non-cached, so no flush. */
3768: pte = PG_PERM | PA_PGNUM(pa);
3769: set_pte(tmp_vpages[0], pte);
3770: zeropage((char *) tmp_vpages[0]);
3771: set_pte(tmp_vpages[0], PG_INVAL);
3772:
1.16 fredette 3773: #ifdef DIAGNOSTIC
1.1 fredette 3774: tmp_vpages_inuse--;
1.16 fredette 3775: #endif
1.1 fredette 3776: set_context(saved_ctx);
3777:
3778: splx(s);
3779: }
3780:
3781: /*
3782: * Routine: pmap_collect
3783: * Function:
3784: * Garbage collects the physical map system for
3785: * pages which are no longer used.
3786: * Success need not be guaranteed -- that is, there
3787: * may well be pages which are not referenced, but
3788: * others may be collected.
3789: * Usage:
3790: * Called by the pageout daemon when pages are scarce.
3791: */
3792: void
3793: pmap_collect(pmap)
3794: pmap_t pmap;
3795: {
3796: }
3797:
3798: /*
3799: * Find first virtual address >= *va that is
3800: * least likely to cause cache aliases.
3801: * (This will just seg-align mappings.)
3802: */
3803: void
3804: pmap_prefer(fo, va)
1.16 fredette 3805: vaddr_t fo;
3806: vaddr_t *va;
1.1 fredette 3807: {
1.16 fredette 3808: long d;
1.1 fredette 3809:
3810: d = fo - *va;
3811: d &= SEGOFSET;
3812: *va += d;
3813: }
3814:
3815: /*
3816: * Fill in the sun2-specific part of the kernel core header
3817: * for dumpsys(). (See machdep.c for the rest.)
3818: */
3819: void
3820: pmap_kcore_hdr(sh)
3821: struct sun2_kcore_hdr *sh;
3822: {
1.16 fredette 3823: vaddr_t va;
1.1 fredette 3824: u_char *cp, *ep;
3825: int saved_ctx;
3826:
3827: sh->segshift = SEGSHIFT;
3828: sh->pg_frame = PG_FRAME;
3829: sh->pg_valid = PG_VALID;
3830:
3831: /* Copy the kernel segmap (256 bytes). */
3832: va = KERNBASE;
3833: cp = sh->ksegmap;
3834: ep = cp + sizeof(sh->ksegmap);
3835: saved_ctx = get_context();
3836: set_context(KERNEL_CONTEXT);
3837: do {
3838: *cp = get_segmap(va);
3839: va += NBSG;
3840: cp++;
3841: } while (cp < ep);
3842: set_context(saved_ctx);
3843: }
3844:
3845: /*
3846: * Copy the pagemap RAM into the passed buffer (one page)
3847: * starting at OFF in the pagemap RAM.
3848: */
3849: void
3850: pmap_get_pagemap(pt, off)
3851: int *pt;
3852: int off;
3853: {
1.16 fredette 3854: vaddr_t va, va_end;
1.1 fredette 3855: int sme, sme_end; /* SegMap Entry numbers */
3856: int saved_ctx;
3857:
3858: sme = (off / (NPAGSEG * sizeof(*pt))); /* PMEG to start on */
1.20 thorpej 3859: sme_end =
3860: sme + (PAGE_SIZE / (NPAGSEG * sizeof(*pt))); /* where to stop */
1.1 fredette 3861: va_end = temp_seg_va + NBSG;
3862:
3863: saved_ctx = get_context();
3864: set_context(KERNEL_CONTEXT);
3865: do {
3866: set_segmap(temp_seg_va, sme);
3867: va = temp_seg_va;
3868: do {
3869: *pt++ = get_pte(va);
1.20 thorpej 3870: va += PAGE_SIZE;
1.1 fredette 3871: } while (va < va_end);
3872: sme++;
3873: } while (sme < sme_end);
3874: set_segmap(temp_seg_va, SEGINV);
3875: set_context(saved_ctx);
3876: }
3877:
3878:
3879: /*
3880: * Helper functions for changing unloaded PMEGs
3881: */
3882:
3883: static int
3884: get_pte_pmeg(int pmeg_num, int page_num)
3885: {
1.16 fredette 3886: vaddr_t va;
1.1 fredette 3887: int pte;
3888: int saved_ctx;
3889:
3890: CHECK_SPL();
3891: saved_ctx = get_context();
3892: set_context(KERNEL_CONTEXT);
1.16 fredette 3893: #ifdef DIAGNOSTIC
1.1 fredette 3894: if (temp_seg_inuse)
3895: panic("get_pte_pmeg: temp_seg_inuse");
3896: temp_seg_inuse++;
1.16 fredette 3897: #endif
1.1 fredette 3898:
3899: va = temp_seg_va;
3900: set_segmap(temp_seg_va, pmeg_num);
1.20 thorpej 3901: va += PAGE_SIZE*page_num;
1.1 fredette 3902: pte = get_pte(va);
3903: set_segmap(temp_seg_va, SEGINV);
3904:
1.16 fredette 3905: #ifdef DIAGNOSTIC
1.1 fredette 3906: temp_seg_inuse--;
1.16 fredette 3907: #endif
1.1 fredette 3908: set_context(saved_ctx);
3909: return pte;
3910: }
3911:
3912: static void
3913: set_pte_pmeg(int pmeg_num, int page_num, int pte)
3914: {
1.16 fredette 3915: vaddr_t va;
1.1 fredette 3916: int saved_ctx;
3917:
3918: CHECK_SPL();
3919: saved_ctx = get_context();
3920: set_context(KERNEL_CONTEXT);
1.16 fredette 3921: #ifdef DIAGNOSTIC
1.1 fredette 3922: if (temp_seg_inuse)
3923: panic("set_pte_pmeg: temp_seg_inuse");
3924: temp_seg_inuse++;
1.16 fredette 3925: #endif
1.1 fredette 3926:
3927: /* We never access data in temp_seg_va so no need to flush. */
3928: va = temp_seg_va;
3929: set_segmap(temp_seg_va, pmeg_num);
1.20 thorpej 3930: va += PAGE_SIZE*page_num;
1.1 fredette 3931: set_pte(va, pte);
3932: set_segmap(temp_seg_va, SEGINV);
3933:
1.16 fredette 3934: #ifdef DIAGNOSTIC
1.1 fredette 3935: temp_seg_inuse--;
1.16 fredette 3936: #endif
1.1 fredette 3937: set_context(saved_ctx);
3938: }
3939:
3940: /*
3941: * Routine: pmap_procwr
3942: *
3943: * Function:
3944: * Synchronize caches corresponding to [addr, addr+len) in p.
3945: */
3946: void
3947: pmap_procwr(p, va, len)
3948: struct proc *p;
3949: vaddr_t va;
3950: size_t len;
3951: {
3952: }
3953:
3954:
3955: #ifdef PMAP_DEBUG
3956: /* Things to call from the debugger. */
3957:
3958: void
3959: pmap_print(pmap)
3960: pmap_t pmap;
3961: {
3962: db_printf(" pm_ctxnum=%d\n", pmap->pm_ctxnum);
3963: db_printf(" pm_version=0x%x\n", pmap->pm_version);
3964: db_printf(" pm_segmap=%p\n", pmap->pm_segmap);
3965: }
3966:
3967: void
3968: pmeg_print(pmegp)
3969: pmeg_t pmegp;
3970: {
3971: db_printf("link_next=%p link_prev=%p\n",
3972: pmegp->pmeg_link.tqe_next,
3973: pmegp->pmeg_link.tqe_prev);
3974: db_printf("index=0x%x owner=%p own_vers=0x%x\n",
3975: pmegp->pmeg_index, pmegp->pmeg_owner, pmegp->pmeg_version);
3976: db_printf("va=0x%lx wired=0x%x reserved=0x%x vpgs=0x%x qstate=0x%x\n",
3977: pmegp->pmeg_va, pmegp->pmeg_wired,
3978: pmegp->pmeg_reserved, pmegp->pmeg_vpages,
3979: pmegp->pmeg_qstate);
3980: }
3981:
3982: void
3983: pv_print(pa)
1.16 fredette 3984: paddr_t pa;
1.1 fredette 3985: {
3986: pv_entry_t pv;
3987: int idx;
3988:
3989: idx = PA_PGNUM(pa);
3990: if (idx >= physmem) {
3991: db_printf("bad address\n");
3992: return;
3993: }
3994: db_printf("pa=0x%lx, flags=0x%x\n",
3995: pa, pv_flags_tbl[idx]);
3996:
3997: pv = pv_head_tbl[idx];
3998: while (pv) {
3999: db_printf(" pv_entry %p pmap %p va 0x%lx next %p\n",
4000: pv, pv->pv_pmap, pv->pv_va, pv->pv_next);
4001: pv = pv->pv_next;
4002: }
4003: }
4004: #endif /* PMAP_DEBUG */
4005:
4006: /*
4007: * Local Variables:
4008: * tab-width: 4
4009: * End:
4010: */
CVSweb <webmaster@jp.NetBSD.org>