Annotation of src/sys/arch/m68k/m68k/pmap_motorola.c, Revision 1.60.2.1
1.60.2.1! jruoho 1: /* $NetBSD: pmap_motorola.c,v 1.61 2011/06/03 17:03:52 tsutsui Exp $ */
1.1 chs 2:
3: /*-
4: * Copyright (c) 1999 The NetBSD Foundation, Inc.
5: * All rights reserved.
6: *
7: * This code is derived from software contributed to The NetBSD Foundation
8: * by Jason R. Thorpe.
9: *
10: * Redistribution and use in source and binary forms, with or without
11: * modification, are permitted provided that the following conditions
12: * are met:
13: * 1. Redistributions of source code must retain the above copyright
14: * notice, this list of conditions and the following disclaimer.
15: * 2. Redistributions in binary form must reproduce the above copyright
16: * notice, this list of conditions and the following disclaimer in the
17: * documentation and/or other materials provided with the distribution.
18: *
19: * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20: * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21: * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22: * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23: * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24: * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25: * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26: * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27: * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28: * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29: * POSSIBILITY OF SUCH DAMAGE.
30: */
31:
32: /*
33: * Copyright (c) 1991, 1993
34: * The Regents of the University of California. All rights reserved.
35: *
36: * This code is derived from software contributed to Berkeley by
37: * the Systems Programming Group of the University of Utah Computer
38: * Science Department.
39: *
40: * Redistribution and use in source and binary forms, with or without
41: * modification, are permitted provided that the following conditions
42: * are met:
43: * 1. Redistributions of source code must retain the above copyright
44: * notice, this list of conditions and the following disclaimer.
45: * 2. Redistributions in binary form must reproduce the above copyright
46: * notice, this list of conditions and the following disclaimer in the
47: * documentation and/or other materials provided with the distribution.
1.6 agc 48: * 3. Neither the name of the University nor the names of its contributors
1.1 chs 49: * may be used to endorse or promote products derived from this software
50: * without specific prior written permission.
51: *
52: * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
53: * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
54: * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
55: * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
56: * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
57: * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
58: * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
59: * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
60: * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
61: * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
62: * SUCH DAMAGE.
63: *
64: * @(#)pmap.c 8.6 (Berkeley) 5/27/94
65: */
66:
67: /*
68: * Motorola m68k-family physical map management code.
69: *
70: * Supports:
71: * 68020 with 68851 MMU
72: * 68030 with on-chip MMU
73: * 68040 with on-chip MMU
74: * 68060 with on-chip MMU
75: *
76: * Notes:
77: * Don't even pay lip service to multiprocessor support.
78: *
79: * We assume TLB entries don't have process tags (except for the
80: * supervisor/user distinction) so we only invalidate TLB entries
81: * when changing mappings for the current (or kernel) pmap. This is
82: * technically not true for the 68851 but we flush the TLB on every
83: * context switch, so it effectively winds up that way.
84: *
85: * Bitwise and/or operations are significantly faster than bitfield
86: * references so we use them when accessing STE/PTEs in the pmap_pte_*
87: * macros. Note also that the two are not always equivalent; e.g.:
88: * (*pte & PG_PROT) [4] != pte->pg_prot [1]
89: * and a couple of routines that deal with protection and wiring take
90: * some shortcuts that assume the and/or definitions.
91: */
92:
93: /*
94: * Manages physical address maps.
95: *
96: * In addition to hardware address maps, this
97: * module is called upon to provide software-use-only
98: * maps which may or may not be stored in the same
99: * form as hardware maps. These pseudo-maps are
100: * used to store intermediate results from copy
101: * operations to and from address spaces.
102: *
103: * Since the information managed by this module is
104: * also stored by the logical address mapping module,
105: * this module may throw away valid virtual-to-physical
106: * mappings at almost any time. However, invalidations
107: * of virtual-to-physical mappings must be done as
108: * requested.
109: *
110: * In order to cope with hardware architectures which
111: * make virtual-to-physical map invalidates expensive,
112: * this module may delay invalidate or reduced protection
113: * operations until such time as they are actually
114: * necessary. This module is given full information as
115: * to which processors are currently using which maps,
116: * and to when physical maps must be made correct.
117: */
118:
1.56 mrg 119: #include "opt_m68k_arch.h"
120:
1.1 chs 121: #include <sys/cdefs.h>
1.60.2.1! jruoho 122: __KERNEL_RCSID(0, "$NetBSD: pmap_motorola.c,v 1.61 2011/06/03 17:03:52 tsutsui Exp $");
1.1 chs 123:
124: #include <sys/param.h>
125: #include <sys/systm.h>
126: #include <sys/proc.h>
127: #include <sys/malloc.h>
128: #include <sys/pool.h>
1.60 rmind 129: #include <sys/cpu.h>
1.60.2.1! jruoho 130: #include <sys/atomic.h>
1.1 chs 131:
132: #include <machine/pte.h>
1.60 rmind 133: #include <machine/pcb.h>
1.1 chs 134:
135: #include <uvm/uvm.h>
136:
137: #include <m68k/cacheops.h>
138:
139: #ifdef DEBUG
140: #define PDB_FOLLOW 0x0001
141: #define PDB_INIT 0x0002
142: #define PDB_ENTER 0x0004
143: #define PDB_REMOVE 0x0008
144: #define PDB_CREATE 0x0010
145: #define PDB_PTPAGE 0x0020
146: #define PDB_CACHE 0x0040
147: #define PDB_BITS 0x0080
148: #define PDB_COLLECT 0x0100
149: #define PDB_PROTECT 0x0200
150: #define PDB_SEGTAB 0x0400
151: #define PDB_MULTIMAP 0x0800
152: #define PDB_PARANOIA 0x2000
153: #define PDB_WIRING 0x4000
154: #define PDB_PVDUMP 0x8000
155:
156: int debugmap = 0;
157: int pmapdebug = PDB_PARANOIA;
158:
159: #define PMAP_DPRINTF(l, x) if (pmapdebug & (l)) printf x
160: #else /* ! DEBUG */
161: #define PMAP_DPRINTF(l, x) /* nothing */
162: #endif /* DEBUG */
163:
164: /*
165: * Get STEs and PTEs for user/kernel address space
166: */
167: #if defined(M68040) || defined(M68060)
168: #define pmap_ste1(m, v) \
169: (&((m)->pm_stab[(vaddr_t)(v) >> SG4_SHIFT1]))
170: /* XXX assumes physically contiguous ST pages (if more than one) */
171: #define pmap_ste2(m, v) \
172: (&((m)->pm_stab[(st_entry_t *)(*(u_int *)pmap_ste1(m, v) & SG4_ADDR1) \
173: - (m)->pm_stpa + (((v) & SG4_MASK2) >> SG4_SHIFT2)]))
174: #if defined(M68020) || defined(M68030)
175: #define pmap_ste(m, v) \
176: (&((m)->pm_stab[(vaddr_t)(v) \
177: >> (mmutype == MMU_68040 ? SG4_SHIFT1 : SG_ISHIFT)]))
178: #define pmap_ste_v(m, v) \
179: (mmutype == MMU_68040 \
180: ? ((*pmap_ste1(m, v) & SG_V) && \
181: (*pmap_ste2(m, v) & SG_V)) \
182: : (*pmap_ste(m, v) & SG_V))
183: #else
184: #define pmap_ste(m, v) \
185: (&((m)->pm_stab[(vaddr_t)(v) >> SG4_SHIFT1]))
186: #define pmap_ste_v(m, v) \
187: ((*pmap_ste1(m, v) & SG_V) && (*pmap_ste2(m, v) & SG_V))
188: #endif
189: #else
190: #define pmap_ste(m, v) (&((m)->pm_stab[(vaddr_t)(v) >> SG_ISHIFT]))
191: #define pmap_ste_v(m, v) (*pmap_ste(m, v) & SG_V)
192: #endif
193:
194: #define pmap_pte(m, v) (&((m)->pm_ptab[(vaddr_t)(v) >> PG_SHIFT]))
195: #define pmap_pte_pa(pte) (*(pte) & PG_FRAME)
196: #define pmap_pte_w(pte) (*(pte) & PG_W)
197: #define pmap_pte_ci(pte) (*(pte) & PG_CI)
198: #define pmap_pte_m(pte) (*(pte) & PG_M)
199: #define pmap_pte_u(pte) (*(pte) & PG_U)
200: #define pmap_pte_prot(pte) (*(pte) & PG_PROT)
201: #define pmap_pte_v(pte) (*(pte) & PG_V)
202:
203: #define pmap_pte_set_w(pte, v) \
204: if (v) *(pte) |= PG_W; else *(pte) &= ~PG_W
205: #define pmap_pte_set_prot(pte, v) \
206: if (v) *(pte) |= PG_PROT; else *(pte) &= ~PG_PROT
207: #define pmap_pte_w_chg(pte, nw) ((nw) ^ pmap_pte_w(pte))
208: #define pmap_pte_prot_chg(pte, np) ((np) ^ pmap_pte_prot(pte))
209:
210: /*
211: * Given a map and a machine independent protection code,
212: * convert to an m68k protection code.
213: */
214: #define pte_prot(m, p) (protection_codes[p])
1.42 tsutsui 215: u_int protection_codes[8];
1.1 chs 216:
217: /*
218: * Kernel page table page management.
219: */
220: struct kpt_page {
221: struct kpt_page *kpt_next; /* link on either used or free list */
222: vaddr_t kpt_va; /* always valid kernel VA */
223: paddr_t kpt_pa; /* PA of this page (for speed) */
224: };
225: struct kpt_page *kpt_free_list, *kpt_used_list;
226: struct kpt_page *kpt_pages;
227:
228: /*
229: * Kernel segment/page table and page table map.
230: * The page table map gives us a level of indirection we need to dynamically
231: * expand the page table. It is essentially a copy of the segment table
232: * with PTEs instead of STEs. All are initialized in locore at boot time.
233: * Sysmap will initially contain VM_KERNEL_PT_PAGES pages of PTEs.
234: * Segtabzero is an empty segment table which all processes share til they
235: * reference something.
236: */
1.54 tsutsui 237: paddr_t Sysseg_pa;
1.1 chs 238: st_entry_t *Sysseg;
239: pt_entry_t *Sysmap, *Sysptmap;
240: st_entry_t *Segtabzero, *Segtabzeropa;
241: vsize_t Sysptsize = VM_KERNEL_PT_PAGES;
242:
1.41 tsutsui 243: static struct pmap kernel_pmap_store;
1.40 tsutsui 244: struct pmap *const kernel_pmap_ptr = &kernel_pmap_store;
1.1 chs 245: struct vm_map *st_map, *pt_map;
1.12 yamt 246: struct vm_map_kernel st_map_store, pt_map_store;
1.1 chs 247:
1.52 tsutsui 248: vaddr_t lwp0uarea; /* lwp0 u-area VA, initialized in bootstrap */
249:
1.1 chs 250: paddr_t avail_start; /* PA of first available physical page */
251: paddr_t avail_end; /* PA of last available physical page */
252: vsize_t mem_size; /* memory size in bytes */
1.5 thorpej 253: vaddr_t virtual_avail; /* VA of first avail page (after kernel bss)*/
254: vaddr_t virtual_end; /* VA of last avail page (end of kernel AS) */
1.1 chs 255: int page_cnt; /* number of pages managed by VM system */
256:
1.25 thorpej 257: bool pmap_initialized = false; /* Has pmap_init completed? */
1.46 thorpej 258:
259: struct pv_header {
260: struct pv_entry pvh_first; /* first PV entry */
261: uint16_t pvh_attrs; /* attributes:
262: bits 0-7: PTE bits
263: bits 8-15: flags */
264: uint16_t pvh_cimappings; /* # caller-specified CI
265: mappings */
266: };
267:
268: #define PVH_CI 0x10 /* all entries are cache-inhibited */
269: #define PVH_PTPAGE 0x20 /* entry maps a page table page */
270:
271: struct pv_header *pv_table;
1.1 chs 272: TAILQ_HEAD(pv_page_list, pv_page) pv_page_freelist;
273: int pv_nfree;
274:
1.59 tsutsui 275: #ifdef CACHE_HAVE_VAC
1.58 tsutsui 276: u_int pmap_aliasmask; /* seperation at which VA aliasing ok */
1.1 chs 277: #endif
278: #if defined(M68040) || defined(M68060)
1.53 tsutsui 279: u_int protostfree; /* prototype (default) free ST map */
1.1 chs 280: #endif
281:
282: pt_entry_t *caddr1_pte; /* PTE for CADDR1 */
283: pt_entry_t *caddr2_pte; /* PTE for CADDR2 */
284:
285: struct pool pmap_pmap_pool; /* memory pool for pmap structures */
286:
1.20 tsutsui 287: struct pv_entry *pmap_alloc_pv(void);
288: void pmap_free_pv(struct pv_entry *);
1.1 chs 289:
1.45 thorpej 290: #define PAGE_IS_MANAGED(pa) (pmap_initialized && uvm_pageismanaged(pa))
1.1 chs 291:
1.46 thorpej 292: static inline struct pv_header *
1.38 tsutsui 293: pa_to_pvh(paddr_t pa)
294: {
295: int bank, pg = 0; /* XXX gcc4 -Wuninitialized */
296:
297: bank = vm_physseg_find(atop((pa)), &pg);
1.57 uebayasi 298: return &VM_PHYSMEM_PTR(bank)->pmseg.pvheader[pg];
1.38 tsutsui 299: }
1.1 chs 300:
301: /*
302: * Internal routines
303: */
1.20 tsutsui 304: void pmap_remove_mapping(pmap_t, vaddr_t, pt_entry_t *, int);
1.23 thorpej 305: bool pmap_testbit(paddr_t, int);
306: bool pmap_changebit(paddr_t, int, int);
1.24 tsutsui 307: int pmap_enter_ptpage(pmap_t, vaddr_t, bool);
1.20 tsutsui 308: void pmap_ptpage_addref(vaddr_t);
309: int pmap_ptpage_delref(vaddr_t);
310: void pmap_pinit(pmap_t);
311: void pmap_release(pmap_t);
1.1 chs 312:
313: #ifdef DEBUG
1.20 tsutsui 314: void pmap_pvdump(paddr_t);
315: void pmap_check_wiring(const char *, vaddr_t);
1.1 chs 316: #endif
317:
318: /* pmap_remove_mapping flags */
319: #define PRM_TFLUSH 0x01
320: #define PRM_CFLUSH 0x02
321: #define PRM_KEEPPTPAGE 0x04
322:
323: /*
1.52 tsutsui 324: * pmap_bootstrap_finalize: [ INTERFACE ]
325: *
326: * Initialize lwp0 uarea, curlwp, and curpcb after MMU is turned on,
327: * using lwp0uarea variable saved during pmap_bootstrap().
328: */
329: void
330: pmap_bootstrap_finalize(void)
331: {
332:
1.55 tsutsui 333: #if !defined(amiga) && !defined(atari)
334: /*
335: * XXX
336: * amiga and atari have different pmap initialization functions
337: * and they require this earlier.
338: */
339: uvmexp.pagesize = NBPG;
340: uvm_setpagesize();
341: #endif
342:
1.54 tsutsui 343: /*
344: * Initialize protection array.
345: * XXX: Could this have port specific values? Can't this be static?
346: */
347: protection_codes[VM_PROT_NONE|VM_PROT_NONE|VM_PROT_NONE] = 0;
348: protection_codes[VM_PROT_READ|VM_PROT_NONE|VM_PROT_NONE] = PG_RO;
349: protection_codes[VM_PROT_READ|VM_PROT_NONE|VM_PROT_EXECUTE] = PG_RO;
350: protection_codes[VM_PROT_NONE|VM_PROT_NONE|VM_PROT_EXECUTE] = PG_RO;
351: protection_codes[VM_PROT_NONE|VM_PROT_WRITE|VM_PROT_NONE] = PG_RW;
352: protection_codes[VM_PROT_NONE|VM_PROT_WRITE|VM_PROT_EXECUTE] = PG_RW;
353: protection_codes[VM_PROT_READ|VM_PROT_WRITE|VM_PROT_NONE] = PG_RW;
354: protection_codes[VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE] = PG_RW;
355:
356: /*
357: * Initialize pmap_kernel().
358: */
359: pmap_kernel()->pm_stpa = (st_entry_t *)Sysseg_pa;
360: pmap_kernel()->pm_stab = Sysseg;
361: pmap_kernel()->pm_ptab = Sysmap;
362: #if defined(M68040) || defined(M68060)
363: if (mmutype == MMU_68040)
364: pmap_kernel()->pm_stfree = protostfree;
365: #endif
366: pmap_kernel()->pm_count = 1;
367:
368: /*
369: * Initialize lwp0 uarea, curlwp, and curpcb.
370: */
1.52 tsutsui 371: memset((void *)lwp0uarea, 0, USPACE);
372: uvm_lwp_setuarea(&lwp0, lwp0uarea);
373: curlwp = &lwp0;
374: curpcb = lwp_getpcb(&lwp0);
375: }
376:
377: /*
1.5 thorpej 378: * pmap_virtual_space: [ INTERFACE ]
379: *
380: * Report the range of available kernel virtual address
381: * space to the VM system during bootstrap.
382: *
383: * This is only an interface function if we do not use
384: * pmap_steal_memory()!
385: *
386: * Note: no locking is necessary in this function.
387: */
388: void
1.43 dsl 389: pmap_virtual_space(vaddr_t *vstartp, vaddr_t *vendp)
1.5 thorpej 390: {
391:
392: *vstartp = virtual_avail;
393: *vendp = virtual_end;
394: }
395:
396: /*
1.1 chs 397: * pmap_init: [ INTERFACE ]
398: *
399: * Initialize the pmap module. Called by vm_init(), to initialize any
400: * structures that the pmap system needs to map virtual memory.
401: *
402: * Note: no locking is necessary in this function.
403: */
404: void
1.20 tsutsui 405: pmap_init(void)
1.1 chs 406: {
407: vaddr_t addr, addr2;
408: vsize_t s;
1.46 thorpej 409: struct pv_header *pvh;
1.1 chs 410: int rv;
411: int npages;
412: int bank;
413:
414: PMAP_DPRINTF(PDB_FOLLOW, ("pmap_init()\n"));
415:
416: /*
417: * Before we do anything else, initialize the PTE pointers
418: * used by pmap_zero_page() and pmap_copy_page().
419: */
420: caddr1_pte = pmap_pte(pmap_kernel(), CADDR1);
421: caddr2_pte = pmap_pte(pmap_kernel(), CADDR2);
422:
423: PMAP_DPRINTF(PDB_INIT,
424: ("pmap_init: Sysseg %p, Sysmap %p, Sysptmap %p\n",
425: Sysseg, Sysmap, Sysptmap));
426: PMAP_DPRINTF(PDB_INIT,
427: (" pstart %lx, pend %lx, vstart %lx, vend %lx\n",
428: avail_start, avail_end, virtual_avail, virtual_end));
429:
430: /*
431: * Allocate memory for random pmap data structures. Includes the
432: * initial segment table, pv_head_table and pmap_attributes.
433: */
434: for (page_cnt = 0, bank = 0; bank < vm_nphysseg; bank++)
1.57 uebayasi 435: page_cnt += VM_PHYSMEM_PTR(bank)->end - VM_PHYSMEM_PTR(bank)->start;
1.1 chs 436: s = M68K_STSIZE; /* Segtabzero */
1.46 thorpej 437: s += page_cnt * sizeof(struct pv_header); /* pv table */
1.1 chs 438: s = round_page(s);
1.14 yamt 439: addr = uvm_km_alloc(kernel_map, s, 0, UVM_KMF_WIRED | UVM_KMF_ZERO);
1.1 chs 440: if (addr == 0)
441: panic("pmap_init: can't allocate data structures");
442:
1.20 tsutsui 443: Segtabzero = (st_entry_t *)addr;
444: (void)pmap_extract(pmap_kernel(), addr,
445: (paddr_t *)(void *)&Segtabzeropa);
1.1 chs 446: addr += M68K_STSIZE;
447:
1.46 thorpej 448: pv_table = (struct pv_header *) addr;
449: addr += page_cnt * sizeof(struct pv_header);
1.1 chs 450:
451: PMAP_DPRINTF(PDB_INIT, ("pmap_init: %lx bytes: page_cnt %x s0 %p(%p) "
1.46 thorpej 452: "tbl %p\n",
1.1 chs 453: s, page_cnt, Segtabzero, Segtabzeropa,
1.46 thorpej 454: pv_table));
1.1 chs 455:
456: /*
457: * Now that the pv and attribute tables have been allocated,
458: * assign them to the memory segments.
459: */
1.46 thorpej 460: pvh = pv_table;
1.1 chs 461: for (bank = 0; bank < vm_nphysseg; bank++) {
1.57 uebayasi 462: npages = VM_PHYSMEM_PTR(bank)->end - VM_PHYSMEM_PTR(bank)->start;
463: VM_PHYSMEM_PTR(bank)->pmseg.pvheader = pvh;
1.46 thorpej 464: pvh += npages;
1.1 chs 465: }
466:
467: /*
1.5 thorpej 468: * Allocate physical memory for kernel PT pages and their management.
469: * We need 1 PT page per possible task plus some slop.
470: */
471: npages = min(atop(M68K_MAX_KPTSIZE), maxproc+16);
472: s = ptoa(npages) + round_page(npages * sizeof(struct kpt_page));
473:
474: /*
475: * Verify that space will be allocated in region for which
476: * we already have kernel PT pages.
477: */
478: addr = 0;
479: rv = uvm_map(kernel_map, &addr, s, NULL, UVM_UNKNOWN_OFFSET, 0,
1.20 tsutsui 480: UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE,
481: UVM_ADV_RANDOM, UVM_FLAG_NOMERGE));
1.5 thorpej 482: if (rv != 0 || (addr + s) >= (vaddr_t)Sysmap)
483: panic("pmap_init: kernel PT too small");
484: uvm_unmap(kernel_map, addr, addr + s);
485:
486: /*
487: * Now allocate the space and link the pages together to
488: * form the KPT free list.
489: */
1.14 yamt 490: addr = uvm_km_alloc(kernel_map, s, 0, UVM_KMF_WIRED | UVM_KMF_ZERO);
1.5 thorpej 491: if (addr == 0)
492: panic("pmap_init: cannot allocate KPT free list");
493: s = ptoa(npages);
494: addr2 = addr + s;
495: kpt_pages = &((struct kpt_page *)addr2)[npages];
496: kpt_free_list = NULL;
497: do {
498: addr2 -= PAGE_SIZE;
499: (--kpt_pages)->kpt_next = kpt_free_list;
500: kpt_free_list = kpt_pages;
501: kpt_pages->kpt_va = addr2;
502: (void) pmap_extract(pmap_kernel(), addr2,
503: (paddr_t *)&kpt_pages->kpt_pa);
504: } while (addr != addr2);
505:
506: PMAP_DPRINTF(PDB_INIT, ("pmap_init: KPT: %ld pages from %lx to %lx\n",
507: atop(s), addr, addr + s));
508:
509: /*
1.1 chs 510: * Allocate the segment table map and the page table map.
511: */
512: s = maxproc * M68K_STSIZE;
1.25 thorpej 513: st_map = uvm_km_suballoc(kernel_map, &addr, &addr2, s, 0, false,
1.1 chs 514: &st_map_store);
515:
516: addr = M68K_PTBASE;
517: if ((M68K_PTMAXSIZE / M68K_MAX_PTSIZE) < maxproc) {
518: s = M68K_PTMAXSIZE;
519: /*
520: * XXX We don't want to hang when we run out of
521: * page tables, so we lower maxproc so that fork()
522: * will fail instead. Note that root could still raise
523: * this value via sysctl(3).
524: */
525: maxproc = (M68K_PTMAXSIZE / M68K_MAX_PTSIZE);
526: } else
527: s = (maxproc * M68K_MAX_PTSIZE);
528: pt_map = uvm_km_suballoc(kernel_map, &addr, &addr2, s, 0,
1.25 thorpej 529: true, &pt_map_store);
1.1 chs 530:
531: #if defined(M68040) || defined(M68060)
532: if (mmutype == MMU_68040) {
533: protostfree = ~l2tobm(0);
534: for (rv = MAXUL2SIZE; rv < sizeof(protostfree)*NBBY; rv++)
535: protostfree &= ~l2tobm(rv);
536: }
537: #endif
538:
539: /*
540: * Initialize the pmap pools.
541: */
542: pool_init(&pmap_pmap_pool, sizeof(struct pmap), 0, 0, 0, "pmappl",
1.28 ad 543: &pool_allocator_nointr, IPL_NONE);
1.1 chs 544:
545: /*
546: * Now that this is done, mark the pages shared with the
547: * hardware page table search as non-CCB (actually, as CI).
548: *
549: * XXX Hm. Given that this is in the kernel map, can't we just
550: * use the va's?
551: */
552: #ifdef M68060
553: #if defined(M68020) || defined(M68030) || defined(M68040)
554: if (cputype == CPU_68060)
555: #endif
556: {
557: struct kpt_page *kptp = kpt_free_list;
558: paddr_t paddr;
559:
560: while (kptp) {
561: pmap_changebit(kptp->kpt_pa, PG_CI, ~PG_CCB);
562: kptp = kptp->kpt_next;
563: }
564:
565: paddr = (paddr_t)Segtabzeropa;
566: while (paddr < (paddr_t)Segtabzeropa + M68K_STSIZE) {
567: pmap_changebit(paddr, PG_CI, ~PG_CCB);
1.3 thorpej 568: paddr += PAGE_SIZE;
1.1 chs 569: }
570:
571: DCIS();
572: }
573: #endif
574:
575: /*
576: * Now it is safe to enable pv_table recording.
577: */
1.25 thorpej 578: pmap_initialized = true;
1.1 chs 579: }
580:
581: /*
582: * pmap_alloc_pv:
583: *
584: * Allocate a pv_entry.
585: */
586: struct pv_entry *
1.20 tsutsui 587: pmap_alloc_pv(void)
1.1 chs 588: {
589: struct pv_page *pvp;
590: struct pv_entry *pv;
591: int i;
592:
593: if (pv_nfree == 0) {
1.14 yamt 594: pvp = (struct pv_page *)uvm_km_alloc(kernel_map, PAGE_SIZE, 0,
595: UVM_KMF_WIRED | UVM_KMF_ZERO);
1.39 tsutsui 596: if (pvp == NULL)
1.14 yamt 597: panic("pmap_alloc_pv: uvm_km_alloc() failed");
1.1 chs 598: pvp->pvp_pgi.pgi_freelist = pv = &pvp->pvp_pv[1];
599: for (i = NPVPPG - 2; i; i--, pv++)
600: pv->pv_next = pv + 1;
1.39 tsutsui 601: pv->pv_next = NULL;
1.1 chs 602: pv_nfree += pvp->pvp_pgi.pgi_nfree = NPVPPG - 1;
603: TAILQ_INSERT_HEAD(&pv_page_freelist, pvp, pvp_pgi.pgi_list);
604: pv = &pvp->pvp_pv[0];
605: } else {
606: --pv_nfree;
1.39 tsutsui 607: pvp = TAILQ_FIRST(&pv_page_freelist);
1.1 chs 608: if (--pvp->pvp_pgi.pgi_nfree == 0) {
609: TAILQ_REMOVE(&pv_page_freelist, pvp, pvp_pgi.pgi_list);
610: }
611: pv = pvp->pvp_pgi.pgi_freelist;
612: #ifdef DIAGNOSTIC
1.39 tsutsui 613: if (pv == NULL)
1.1 chs 614: panic("pmap_alloc_pv: pgi_nfree inconsistent");
615: #endif
616: pvp->pvp_pgi.pgi_freelist = pv->pv_next;
617: }
618: return pv;
619: }
620:
621: /*
622: * pmap_free_pv:
623: *
624: * Free a pv_entry.
625: */
626: void
1.20 tsutsui 627: pmap_free_pv(struct pv_entry *pv)
1.1 chs 628: {
629: struct pv_page *pvp;
630:
1.20 tsutsui 631: pvp = (struct pv_page *)trunc_page((vaddr_t)pv);
1.1 chs 632: switch (++pvp->pvp_pgi.pgi_nfree) {
633: case 1:
634: TAILQ_INSERT_TAIL(&pv_page_freelist, pvp, pvp_pgi.pgi_list);
635: default:
636: pv->pv_next = pvp->pvp_pgi.pgi_freelist;
637: pvp->pvp_pgi.pgi_freelist = pv;
638: ++pv_nfree;
639: break;
640: case NPVPPG:
641: pv_nfree -= NPVPPG - 1;
642: TAILQ_REMOVE(&pv_page_freelist, pvp, pvp_pgi.pgi_list);
1.14 yamt 643: uvm_km_free(kernel_map, (vaddr_t)pvp, PAGE_SIZE, UVM_KMF_WIRED);
1.1 chs 644: break;
645: }
646: }
647:
648: /*
649: * pmap_collect_pv:
650: *
651: * Perform compaction on the PV list, called via pmap_collect().
652: */
1.49 rmind 653: #ifdef notyet
654: void
1.20 tsutsui 655: pmap_collect_pv(void)
1.1 chs 656: {
657: struct pv_page_list pv_page_collectlist;
658: struct pv_page *pvp, *npvp;
659: struct pv_entry *ph, *ppv, *pv, *npv;
1.46 thorpej 660: struct pv_header *pvh;
1.1 chs 661: int s;
662:
663: TAILQ_INIT(&pv_page_collectlist);
664:
1.39 tsutsui 665: for (pvp = TAILQ_FIRST(&pv_page_freelist); pvp != NULL; pvp = npvp) {
1.1 chs 666: if (pv_nfree < NPVPPG)
667: break;
1.39 tsutsui 668: npvp = TAILQ_NEXT(&pvp->pvp_pgi, pgi_list);
1.1 chs 669: if (pvp->pvp_pgi.pgi_nfree > NPVPPG / 3) {
670: TAILQ_REMOVE(&pv_page_freelist, pvp, pvp_pgi.pgi_list);
671: TAILQ_INSERT_TAIL(&pv_page_collectlist, pvp,
672: pvp_pgi.pgi_list);
673: pv_nfree -= NPVPPG;
674: pvp->pvp_pgi.pgi_nfree = -1;
675: }
676: }
677:
1.39 tsutsui 678: if (TAILQ_FIRST(&pv_page_collectlist) == NULL)
1.1 chs 679: return;
680:
1.46 thorpej 681: for (pvh = &pv_table[page_cnt - 1]; pvh >= &pv_table[0]; pvh--) {
682: ph = &pvh->pvh_first;
1.39 tsutsui 683: if (ph->pv_pmap == NULL)
1.1 chs 684: continue;
685: s = splvm();
1.39 tsutsui 686: for (ppv = ph; (pv = ppv->pv_next) != NULL; ) {
1.1 chs 687: pvp = (struct pv_page *) trunc_page((vaddr_t)pv);
688: if (pvp->pvp_pgi.pgi_nfree == -1) {
1.39 tsutsui 689: pvp = TAILQ_FIRST(&pv_page_freelist);
1.1 chs 690: if (--pvp->pvp_pgi.pgi_nfree == 0) {
691: TAILQ_REMOVE(&pv_page_freelist, pvp,
692: pvp_pgi.pgi_list);
693: }
694: npv = pvp->pvp_pgi.pgi_freelist;
695: #ifdef DIAGNOSTIC
1.39 tsutsui 696: if (npv == NULL)
1.20 tsutsui 697: panic("pmap_collect_pv: "
698: "pgi_nfree inconsistent");
1.1 chs 699: #endif
700: pvp->pvp_pgi.pgi_freelist = npv->pv_next;
701: *npv = *pv;
702: ppv->pv_next = npv;
703: ppv = npv;
704: } else
705: ppv = pv;
706: }
707: splx(s);
708: }
709:
1.39 tsutsui 710: for (pvp = TAILQ_FIRST(&pv_page_collectlist); pvp != NULL; pvp = npvp) {
711: npvp = TAILQ_NEXT(&pvp->pvp_pgi, pgi_list);
1.14 yamt 712: uvm_km_free(kernel_map, (vaddr_t)pvp, PAGE_SIZE, UVM_KMF_WIRED);
1.1 chs 713: }
714: }
1.49 rmind 715: #endif
1.1 chs 716:
717: /*
718: * pmap_map:
719: *
720: * Used to map a range of physical addresses into kernel
721: * virtual address space.
722: *
723: * For now, VM is already on, we only need to map the
724: * specified memory.
725: *
726: * Note: THIS FUNCTION IS DEPRECATED, AND SHOULD BE REMOVED!
727: */
728: vaddr_t
1.20 tsutsui 729: pmap_map(vaddr_t va, paddr_t spa, paddr_t epa, int prot)
1.1 chs 730: {
731:
732: PMAP_DPRINTF(PDB_FOLLOW,
733: ("pmap_map(%lx, %lx, %lx, %x)\n", va, spa, epa, prot));
734:
735: while (spa < epa) {
736: pmap_enter(pmap_kernel(), va, spa, prot, 0);
1.3 thorpej 737: va += PAGE_SIZE;
738: spa += PAGE_SIZE;
1.1 chs 739: }
740: pmap_update(pmap_kernel());
1.20 tsutsui 741: return va;
1.1 chs 742: }
743:
744: /*
745: * pmap_create: [ INTERFACE ]
746: *
747: * Create and return a physical map.
748: *
749: * Note: no locking is necessary in this function.
750: */
751: pmap_t
1.20 tsutsui 752: pmap_create(void)
1.1 chs 753: {
754: struct pmap *pmap;
755:
756: PMAP_DPRINTF(PDB_FOLLOW|PDB_CREATE,
757: ("pmap_create()\n"));
758:
759: pmap = pool_get(&pmap_pmap_pool, PR_WAITOK);
760: memset(pmap, 0, sizeof(*pmap));
761: pmap_pinit(pmap);
1.20 tsutsui 762: return pmap;
1.1 chs 763: }
764:
765: /*
766: * pmap_pinit:
767: *
768: * Initialize a preallocated and zeroed pmap structure.
769: *
770: * Note: THIS FUNCTION SHOULD BE MOVED INTO pmap_create()!
771: */
772: void
1.20 tsutsui 773: pmap_pinit(struct pmap *pmap)
1.1 chs 774: {
775:
776: PMAP_DPRINTF(PDB_FOLLOW|PDB_CREATE,
777: ("pmap_pinit(%p)\n", pmap));
778:
779: /*
780: * No need to allocate page table space yet but we do need a
781: * valid segment table. Initially, we point everyone at the
782: * "null" segment table. On the first pmap_enter, a real
783: * segment table will be allocated.
784: */
785: pmap->pm_stab = Segtabzero;
786: pmap->pm_stpa = Segtabzeropa;
787: #if defined(M68040) || defined(M68060)
788: #if defined(M68020) || defined(M68030)
789: if (mmutype == MMU_68040)
790: #endif
791: pmap->pm_stfree = protostfree;
792: #endif
793: pmap->pm_count = 1;
794: }
795:
796: /*
797: * pmap_destroy: [ INTERFACE ]
798: *
799: * Drop the reference count on the specified pmap, releasing
800: * all resources if the reference count drops to zero.
801: */
802: void
1.20 tsutsui 803: pmap_destroy(pmap_t pmap)
1.1 chs 804: {
805: int count;
806:
807: PMAP_DPRINTF(PDB_FOLLOW, ("pmap_destroy(%p)\n", pmap));
808:
1.60.2.1! jruoho 809: count = atomic_dec_uint_nv(&pmap->pm_count);
1.1 chs 810: if (count == 0) {
811: pmap_release(pmap);
812: pool_put(&pmap_pmap_pool, pmap);
813: }
814: }
815:
816: /*
817: * pmap_release:
818: *
819: * Relese the resources held by a pmap.
820: *
821: * Note: THIS FUNCTION SHOULD BE MOVED INTO pmap_destroy().
822: */
823: void
1.20 tsutsui 824: pmap_release(pmap_t pmap)
1.1 chs 825: {
826:
827: PMAP_DPRINTF(PDB_FOLLOW, ("pmap_release(%p)\n", pmap));
828:
829: #ifdef notdef /* DIAGNOSTIC */
830: /* count would be 0 from pmap_destroy... */
831: if (pmap->pm_count != 1)
832: panic("pmap_release count");
833: #endif
834:
835: if (pmap->pm_ptab) {
836: pmap_remove(pmap_kernel(), (vaddr_t)pmap->pm_ptab,
837: (vaddr_t)pmap->pm_ptab + M68K_MAX_PTSIZE);
1.14 yamt 838: uvm_km_pgremove((vaddr_t)pmap->pm_ptab,
839: (vaddr_t)pmap->pm_ptab + M68K_MAX_PTSIZE);
840: uvm_km_free(pt_map, (vaddr_t)pmap->pm_ptab,
841: M68K_MAX_PTSIZE, UVM_KMF_VAONLY);
1.1 chs 842: }
843: KASSERT(pmap->pm_stab == Segtabzero);
844: }
845:
846: /*
847: * pmap_reference: [ INTERFACE ]
848: *
849: * Add a reference to the specified pmap.
850: */
851: void
1.20 tsutsui 852: pmap_reference(pmap_t pmap)
1.1 chs 853: {
854: PMAP_DPRINTF(PDB_FOLLOW, ("pmap_reference(%p)\n", pmap));
855:
1.60.2.1! jruoho 856: atomic_inc_uint(&pmap->pm_count);
1.1 chs 857: }
858:
859: /*
860: * pmap_activate: [ INTERFACE ]
861: *
862: * Activate the pmap used by the specified process. This includes
863: * reloading the MMU context if the current process, and marking
864: * the pmap in use by the processor.
865: *
866: * Note: we may only use spin locks here, since we are called
867: * by a critical section in cpu_switch()!
868: */
869: void
1.20 tsutsui 870: pmap_activate(struct lwp *l)
1.1 chs 871: {
1.20 tsutsui 872: pmap_t pmap = l->l_proc->p_vmspace->vm_map.pmap;
1.1 chs 873:
874: PMAP_DPRINTF(PDB_FOLLOW|PDB_SEGTAB,
1.2 thorpej 875: ("pmap_activate(%p)\n", l));
1.1 chs 876:
1.30 mhitch 877: PMAP_ACTIVATE(pmap, (curlwp->l_flag & LW_IDLE) != 0 ||
878: l->l_proc == curproc);
1.1 chs 879: }
880:
881: /*
882: * pmap_deactivate: [ INTERFACE ]
883: *
884: * Mark that the pmap used by the specified process is no longer
885: * in use by the processor.
886: *
887: * The comment above pmap_activate() wrt. locking applies here,
888: * as well.
889: */
890: void
1.20 tsutsui 891: pmap_deactivate(struct lwp *l)
1.1 chs 892: {
893:
894: /* No action necessary in this pmap implementation. */
895: }
896:
897: /*
898: * pmap_remove: [ INTERFACE ]
899: *
900: * Remove the given range of addresses from the specified map.
901: *
902: * It is assumed that the start and end are properly
903: * rounded to the page size.
904: */
905: void
1.20 tsutsui 906: pmap_remove(pmap_t pmap, vaddr_t sva, vaddr_t eva)
1.1 chs 907: {
908: vaddr_t nssva;
909: pt_entry_t *pte;
910: int flags;
1.59 tsutsui 911: #ifdef CACHE_HAVE_VAC
1.25 thorpej 912: bool firstpage = true, needcflush = false;
1.1 chs 913: #endif
914:
915: PMAP_DPRINTF(PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT,
916: ("pmap_remove(%p, %lx, %lx)\n", pmap, sva, eva));
917:
918: flags = active_pmap(pmap) ? PRM_TFLUSH : 0;
919: while (sva < eva) {
920: nssva = m68k_trunc_seg(sva) + NBSEG;
921: if (nssva == 0 || nssva > eva)
922: nssva = eva;
923:
924: /*
925: * Invalidate every valid mapping within this segment.
926: */
927:
928: pte = pmap_pte(pmap, sva);
929: while (sva < nssva) {
930:
931: /*
932: * If this segment is unallocated,
933: * skip to the next segment boundary.
934: */
935:
936: if (!pmap_ste_v(pmap, sva)) {
937: sva = nssva;
938: break;
939: }
940:
1.49 rmind 941: if (pmap_pte_v(pte)) {
1.59 tsutsui 942: #ifdef CACHE_HAVE_VAC
1.1 chs 943: if (pmap_aliasmask) {
944:
945: /*
946: * Purge kernel side of VAC to ensure
947: * we get the correct state of any
948: * hardware maintained bits.
949: */
950:
951: if (firstpage) {
952: DCIS();
953: }
954:
955: /*
956: * Remember if we may need to
957: * flush the VAC due to a non-CI
958: * mapping.
959: */
960:
961: if (!needcflush && !pmap_pte_ci(pte))
1.25 thorpej 962: needcflush = true;
1.1 chs 963:
964: }
1.25 thorpej 965: firstpage = false;
1.1 chs 966: #endif
967: pmap_remove_mapping(pmap, sva, pte, flags);
968: }
969: pte++;
1.3 thorpej 970: sva += PAGE_SIZE;
1.1 chs 971: }
972: }
973:
1.59 tsutsui 974: #ifdef CACHE_HAVE_VAC
1.1 chs 975:
976: /*
977: * Didn't do anything, no need for cache flushes
978: */
979:
980: if (firstpage)
981: return;
982:
983: /*
984: * In a couple of cases, we don't need to worry about flushing
985: * the VAC:
986: * 1. if this is a kernel mapping,
987: * we have already done it
988: * 2. if it is a user mapping not for the current process,
989: * it won't be there
990: */
991:
992: if (pmap_aliasmask && !active_user_pmap(pmap))
1.25 thorpej 993: needcflush = false;
1.1 chs 994: if (needcflush) {
995: if (pmap == pmap_kernel()) {
996: DCIS();
997: } else {
998: DCIU();
999: }
1000: }
1001: #endif
1002: }
1003:
1004: /*
1005: * pmap_page_protect: [ INTERFACE ]
1006: *
1007: * Lower the permission for all mappings to a given page to
1008: * the permissions specified.
1009: */
1010: void
1.20 tsutsui 1011: pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
1.1 chs 1012: {
1013: paddr_t pa = VM_PAGE_TO_PHYS(pg);
1.46 thorpej 1014: struct pv_header *pvh;
1.1 chs 1015: struct pv_entry *pv;
1016: pt_entry_t *pte;
1017: int s;
1018:
1019: #ifdef DEBUG
1020: if ((pmapdebug & (PDB_FOLLOW|PDB_PROTECT)) ||
1021: (prot == VM_PROT_NONE && (pmapdebug & PDB_REMOVE)))
1022: printf("pmap_page_protect(%p, %x)\n", pg, prot);
1023: #endif
1024:
1025: switch (prot) {
1026: case VM_PROT_READ|VM_PROT_WRITE:
1027: case VM_PROT_ALL:
1028: return;
1029:
1030: /* copy_on_write */
1031: case VM_PROT_READ:
1032: case VM_PROT_READ|VM_PROT_EXECUTE:
1033: pmap_changebit(pa, PG_RO, ~0);
1034: return;
1035:
1036: /* remove_all */
1037: default:
1038: break;
1039: }
1040:
1.46 thorpej 1041: pvh = pa_to_pvh(pa);
1042: pv = &pvh->pvh_first;
1.1 chs 1043: s = splvm();
1044: while (pv->pv_pmap != NULL) {
1045:
1046: pte = pmap_pte(pv->pv_pmap, pv->pv_va);
1047: #ifdef DEBUG
1048: if (!pmap_ste_v(pv->pv_pmap, pv->pv_va) ||
1049: pmap_pte_pa(pte) != pa)
1050: panic("pmap_page_protect: bad mapping");
1051: #endif
1052: pmap_remove_mapping(pv->pv_pmap, pv->pv_va,
1053: pte, PRM_TFLUSH|PRM_CFLUSH);
1054: }
1055: splx(s);
1056: }
1057:
1058: /*
1059: * pmap_protect: [ INTERFACE ]
1060: *
1.29 tnn 1061: * Set the physical protection on the specified range of this map
1.1 chs 1062: * as requested.
1063: */
1064: void
1.20 tsutsui 1065: pmap_protect(pmap_t pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
1.1 chs 1066: {
1067: vaddr_t nssva;
1068: pt_entry_t *pte;
1.23 thorpej 1069: bool firstpage, needtflush;
1.1 chs 1070: int isro;
1071:
1072: PMAP_DPRINTF(PDB_FOLLOW|PDB_PROTECT,
1073: ("pmap_protect(%p, %lx, %lx, %x)\n",
1074: pmap, sva, eva, prot));
1075:
1076: #ifdef PMAPSTATS
1077: protect_stats.calls++;
1078: #endif
1079: if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
1080: pmap_remove(pmap, sva, eva);
1081: return;
1082: }
1083: isro = pte_prot(pmap, prot);
1084: needtflush = active_pmap(pmap);
1.25 thorpej 1085: firstpage = true;
1.1 chs 1086: while (sva < eva) {
1087: nssva = m68k_trunc_seg(sva) + NBSEG;
1088: if (nssva == 0 || nssva > eva)
1089: nssva = eva;
1090:
1091: /*
1092: * If VA belongs to an unallocated segment,
1093: * skip to the next segment boundary.
1094: */
1095:
1096: if (!pmap_ste_v(pmap, sva)) {
1097: sva = nssva;
1098: continue;
1099: }
1100:
1101: /*
1102: * Change protection on mapping if it is valid and doesn't
1103: * already have the correct protection.
1104: */
1105:
1106: pte = pmap_pte(pmap, sva);
1107: while (sva < nssva) {
1108: if (pmap_pte_v(pte) && pmap_pte_prot_chg(pte, isro)) {
1.59 tsutsui 1109: #ifdef CACHE_HAVE_VAC
1.1 chs 1110:
1111: /*
1112: * Purge kernel side of VAC to ensure we
1113: * get the correct state of any hardware
1114: * maintained bits.
1115: *
1116: * XXX do we need to clear the VAC in
1117: * general to reflect the new protection?
1118: */
1119:
1120: if (firstpage && pmap_aliasmask)
1121: DCIS();
1122: #endif
1123:
1124: #if defined(M68040) || defined(M68060)
1125:
1126: /*
1127: * Clear caches if making RO (see section
1128: * "7.3 Cache Coherency" in the manual).
1129: */
1130:
1131: #if defined(M68020) || defined(M68030)
1132: if (isro && mmutype == MMU_68040)
1133: #else
1134: if (isro)
1135: #endif
1136: {
1137: paddr_t pa = pmap_pte_pa(pte);
1138:
1139: DCFP(pa);
1140: ICPP(pa);
1141: }
1142: #endif
1143: pmap_pte_set_prot(pte, isro);
1144: if (needtflush)
1145: TBIS(sva);
1.25 thorpej 1146: firstpage = false;
1.1 chs 1147: }
1148: pte++;
1.3 thorpej 1149: sva += PAGE_SIZE;
1.1 chs 1150: }
1151: }
1152: }
1153:
1154: /*
1155: * pmap_enter: [ INTERFACE ]
1156: *
1157: * Insert the given physical page (pa) at
1158: * the specified virtual address (va) in the
1159: * target physical map with the protection requested.
1160: *
1161: * If specified, the page will be wired down, meaning
1162: * that the related pte cannot be reclaimed.
1163: *
1164: * Note: This is the only routine which MAY NOT lazy-evaluate
1165: * or lose information. Thatis, this routine must actually
1166: * insert this page into the given map NOW.
1167: */
1168: int
1.44 cegger 1169: pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
1.1 chs 1170: {
1171: pt_entry_t *pte;
1172: int npte;
1173: paddr_t opa;
1.25 thorpej 1174: bool cacheable = true;
1175: bool checkpv = true;
1.23 thorpej 1176: bool wired = (flags & PMAP_WIRED) != 0;
1177: bool can_fail = (flags & PMAP_CANFAIL) != 0;
1.1 chs 1178:
1179: PMAP_DPRINTF(PDB_FOLLOW|PDB_ENTER,
1180: ("pmap_enter(%p, %lx, %lx, %x, %x)\n",
1181: pmap, va, pa, prot, wired));
1182:
1183: #ifdef DIAGNOSTIC
1184: /*
1185: * pmap_enter() should never be used for CADDR1 and CADDR2.
1186: */
1187: if (pmap == pmap_kernel() &&
1188: (va == (vaddr_t)CADDR1 || va == (vaddr_t)CADDR2))
1189: panic("pmap_enter: used for CADDR1 or CADDR2");
1190: #endif
1191:
1192: /*
1193: * For user mapping, allocate kernel VM resources if necessary.
1194: */
1.22 martin 1195: if (pmap->pm_ptab == NULL) {
1.1 chs 1196: pmap->pm_ptab = (pt_entry_t *)
1.14 yamt 1197: uvm_km_alloc(pt_map, M68K_MAX_PTSIZE, 0,
1.22 martin 1198: UVM_KMF_VAONLY |
1199: (can_fail ? UVM_KMF_NOWAIT : UVM_KMF_WAITVA));
1200: if (pmap->pm_ptab == NULL)
1201: return ENOMEM;
1202: }
1.1 chs 1203:
1204: /*
1205: * Segment table entry not valid, we need a new PT page
1206: */
1.22 martin 1207: if (!pmap_ste_v(pmap, va)) {
1208: int err = pmap_enter_ptpage(pmap, va, can_fail);
1209: if (err)
1210: return err;
1211: }
1.1 chs 1212:
1213: pa = m68k_trunc_page(pa);
1214: pte = pmap_pte(pmap, va);
1215: opa = pmap_pte_pa(pte);
1216:
1217: PMAP_DPRINTF(PDB_ENTER, ("enter: pte %p, *pte %x\n", pte, *pte));
1218:
1219: /*
1220: * Mapping has not changed, must be protection or wiring change.
1221: */
1222: if (opa == pa) {
1223: /*
1224: * Wiring change, just update stats.
1225: * We don't worry about wiring PT pages as they remain
1226: * resident as long as there are valid mappings in them.
1227: * Hence, if a user page is wired, the PT page will be also.
1228: */
1229: if (pmap_pte_w_chg(pte, wired ? PG_W : 0)) {
1230: PMAP_DPRINTF(PDB_ENTER,
1231: ("enter: wiring change -> %x\n", wired));
1232: if (wired)
1233: pmap->pm_stats.wired_count++;
1234: else
1235: pmap->pm_stats.wired_count--;
1236: }
1237: /*
1238: * Retain cache inhibition status
1239: */
1.25 thorpej 1240: checkpv = false;
1.1 chs 1241: if (pmap_pte_ci(pte))
1.25 thorpej 1242: cacheable = false;
1.1 chs 1243: goto validate;
1244: }
1245:
1246: /*
1247: * Mapping has changed, invalidate old range and fall through to
1248: * handle validating new mapping.
1249: */
1250: if (opa) {
1251: PMAP_DPRINTF(PDB_ENTER,
1252: ("enter: removing old mapping %lx\n", va));
1253: pmap_remove_mapping(pmap, va, pte,
1254: PRM_TFLUSH|PRM_CFLUSH|PRM_KEEPPTPAGE);
1255: }
1256:
1257: /*
1258: * If this is a new user mapping, increment the wiring count
1259: * on this PT page. PT pages are wired down as long as there
1260: * is a valid mapping in the page.
1261: */
1262: if (pmap != pmap_kernel())
1263: pmap_ptpage_addref(trunc_page((vaddr_t)pte));
1264:
1265: /*
1266: * Enter on the PV list if part of our managed memory
1267: * Note that we raise IPL while manipulating pv_table
1268: * since pmap_enter can be called at interrupt time.
1269: */
1270: if (PAGE_IS_MANAGED(pa)) {
1.46 thorpej 1271: struct pv_header *pvh;
1.1 chs 1272: struct pv_entry *pv, *npv;
1273: int s;
1274:
1.46 thorpej 1275: pvh = pa_to_pvh(pa);
1276: pv = &pvh->pvh_first;
1.1 chs 1277: s = splvm();
1278:
1279: PMAP_DPRINTF(PDB_ENTER,
1280: ("enter: pv at %p: %lx/%p/%p\n",
1281: pv, pv->pv_va, pv->pv_pmap, pv->pv_next));
1282: /*
1283: * No entries yet, use header as the first entry
1284: */
1285: if (pv->pv_pmap == NULL) {
1286: pv->pv_va = va;
1287: pv->pv_pmap = pmap;
1288: pv->pv_next = NULL;
1289: pv->pv_ptste = NULL;
1290: pv->pv_ptpmap = NULL;
1.46 thorpej 1291: pvh->pvh_attrs = 0;
1.1 chs 1292: }
1293: /*
1294: * There is at least one other VA mapping this page.
1295: * Place this entry after the header.
1296: */
1297: else {
1298: #ifdef DEBUG
1299: for (npv = pv; npv; npv = npv->pv_next)
1300: if (pmap == npv->pv_pmap && va == npv->pv_va)
1301: panic("pmap_enter: already in pv_tab");
1302: #endif
1303: npv = pmap_alloc_pv();
1304: npv->pv_va = va;
1305: npv->pv_pmap = pmap;
1306: npv->pv_next = pv->pv_next;
1307: npv->pv_ptste = NULL;
1308: npv->pv_ptpmap = NULL;
1309: pv->pv_next = npv;
1310:
1.59 tsutsui 1311: #ifdef CACHE_HAVE_VAC
1.1 chs 1312:
1313: /*
1314: * Since there is another logical mapping for the
1315: * same page we may need to cache-inhibit the
1316: * descriptors on those CPUs with external VACs.
1317: * We don't need to CI if:
1318: *
1319: * - No two mappings belong to the same user pmaps.
1320: * Since the cache is flushed on context switches
1321: * there is no problem between user processes.
1322: *
1323: * - Mappings within a single pmap are a certain
1324: * magic distance apart. VAs at these appropriate
1325: * boundaries map to the same cache entries or
1326: * otherwise don't conflict.
1327: *
1328: * To keep it simple, we only check for these special
1329: * cases if there are only two mappings, otherwise we
1330: * punt and always CI.
1331: *
1332: * Note that there are no aliasing problems with the
1333: * on-chip data-cache when the WA bit is set.
1334: */
1335:
1336: if (pmap_aliasmask) {
1.46 thorpej 1337: if (pvh->pvh_attrs & PVH_CI) {
1.1 chs 1338: PMAP_DPRINTF(PDB_CACHE,
1339: ("enter: pa %lx already CI'ed\n",
1340: pa));
1.25 thorpej 1341: checkpv = cacheable = false;
1.1 chs 1342: } else if (npv->pv_next ||
1343: ((pmap == pv->pv_pmap ||
1344: pmap == pmap_kernel() ||
1345: pv->pv_pmap == pmap_kernel()) &&
1346: ((pv->pv_va & pmap_aliasmask) !=
1347: (va & pmap_aliasmask)))) {
1348: PMAP_DPRINTF(PDB_CACHE,
1349: ("enter: pa %lx CI'ing all\n",
1350: pa));
1.25 thorpej 1351: cacheable = false;
1.46 thorpej 1352: pvh->pvh_attrs |= PVH_CI;
1.1 chs 1353: }
1354: }
1355: #endif
1356: }
1357:
1358: /*
1359: * Speed pmap_is_referenced() or pmap_is_modified() based
1360: * on the hint provided in access_type.
1361: */
1362: #ifdef DIAGNOSTIC
1363: if ((flags & VM_PROT_ALL) & ~prot)
1364: panic("pmap_enter: access_type exceeds prot");
1365: #endif
1366: if (flags & VM_PROT_WRITE)
1.46 thorpej 1367: pvh->pvh_attrs |= (PG_U|PG_M);
1.1 chs 1368: else if (flags & VM_PROT_ALL)
1.46 thorpej 1369: pvh->pvh_attrs |= PG_U;
1.1 chs 1370:
1371: splx(s);
1372: }
1373: /*
1374: * Assumption: if it is not part of our managed memory
1375: * then it must be device memory which may be volitile.
1376: */
1377: else if (pmap_initialized) {
1.25 thorpej 1378: checkpv = cacheable = false;
1.1 chs 1379: }
1380:
1381: /*
1382: * Increment counters
1383: */
1384: pmap->pm_stats.resident_count++;
1385: if (wired)
1386: pmap->pm_stats.wired_count++;
1387:
1388: validate:
1.59 tsutsui 1389: #ifdef CACHE_HAVE_VAC
1.1 chs 1390: /*
1391: * Purge kernel side of VAC to ensure we get correct state
1392: * of HW bits so we don't clobber them.
1393: */
1394: if (pmap_aliasmask)
1395: DCIS();
1396: #endif
1397:
1398: /*
1399: * Build the new PTE.
1400: */
1401:
1402: npte = pa | pte_prot(pmap, prot) | (*pte & (PG_M|PG_U)) | PG_V;
1403: if (wired)
1404: npte |= PG_W;
1405: if (!checkpv && !cacheable)
1406: #if defined(M68040) || defined(M68060)
1407: #if defined(M68020) || defined(M68030)
1408: npte |= (mmutype == MMU_68040 ? PG_CIN : PG_CI);
1409: #else
1410: npte |= PG_CIN;
1411: #endif
1412: #else
1413: npte |= PG_CI;
1414: #endif
1415: #if defined(M68040) || defined(M68060)
1416: #if defined(M68020) || defined(M68030)
1417: else if (mmutype == MMU_68040 && (npte & (PG_PROT|PG_CI)) == PG_RW)
1418: #else
1419: else if ((npte & (PG_PROT|PG_CI)) == PG_RW)
1420: #endif
1421: npte |= PG_CCB;
1422: #endif
1423:
1424: PMAP_DPRINTF(PDB_ENTER, ("enter: new pte value %x\n", npte));
1425:
1426: /*
1427: * Remember if this was a wiring-only change.
1428: * If so, we need not flush the TLB and caches.
1429: */
1430:
1431: wired = ((*pte ^ npte) == PG_W);
1432: #if defined(M68040) || defined(M68060)
1433: #if defined(M68020) || defined(M68030)
1434: if (mmutype == MMU_68040 && !wired)
1435: #else
1436: if (!wired)
1437: #endif
1438: {
1439: DCFP(pa);
1440: ICPP(pa);
1441: }
1442: #endif
1443: *pte = npte;
1444: if (!wired && active_pmap(pmap))
1445: TBIS(va);
1.59 tsutsui 1446: #ifdef CACHE_HAVE_VAC
1.1 chs 1447: /*
1448: * The following is executed if we are entering a second
1449: * (or greater) mapping for a physical page and the mappings
1450: * may create an aliasing problem. In this case we must
1451: * cache inhibit the descriptors involved and flush any
1452: * external VAC.
1453: */
1454: if (checkpv && !cacheable) {
1455: pmap_changebit(pa, PG_CI, ~0);
1456: DCIA();
1457: #ifdef DEBUG
1458: if ((pmapdebug & (PDB_CACHE|PDB_PVDUMP)) ==
1459: (PDB_CACHE|PDB_PVDUMP))
1460: pmap_pvdump(pa);
1461: #endif
1462: }
1463: #endif
1464: #ifdef DEBUG
1465: if ((pmapdebug & PDB_WIRING) && pmap != pmap_kernel())
1466: pmap_check_wiring("enter", trunc_page((vaddr_t)pte));
1467: #endif
1468:
1469: return 0;
1470: }
1471:
1472: void
1.50 cegger 1473: pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
1.1 chs 1474: {
1.20 tsutsui 1475: pmap_t pmap = pmap_kernel();
1.1 chs 1476: pt_entry_t *pte;
1477: int s, npte;
1478:
1479: PMAP_DPRINTF(PDB_FOLLOW|PDB_ENTER,
1480: ("pmap_kenter_pa(%lx, %lx, %x)\n", va, pa, prot));
1481:
1482: /*
1483: * Segment table entry not valid, we need a new PT page
1484: */
1485:
1486: if (!pmap_ste_v(pmap, va)) {
1487: s = splvm();
1.25 thorpej 1488: pmap_enter_ptpage(pmap, va, false);
1.1 chs 1489: splx(s);
1490: }
1491:
1492: pa = m68k_trunc_page(pa);
1493: pte = pmap_pte(pmap, va);
1494:
1495: PMAP_DPRINTF(PDB_ENTER, ("enter: pte %p, *pte %x\n", pte, *pte));
1496: KASSERT(!pmap_pte_v(pte));
1497:
1498: /*
1499: * Increment counters
1500: */
1501:
1502: pmap->pm_stats.resident_count++;
1503: pmap->pm_stats.wired_count++;
1504:
1505: /*
1506: * Build the new PTE.
1507: */
1508:
1509: npte = pa | pte_prot(pmap, prot) | PG_V | PG_W;
1510: #if defined(M68040) || defined(M68060)
1511: #if defined(M68020) || defined(M68030)
1512: if (mmutype == MMU_68040 && (npte & PG_PROT) == PG_RW)
1513: #else
1514: if ((npte & PG_PROT) == PG_RW)
1515: #endif
1516: npte |= PG_CCB;
1517:
1518: if (mmutype == MMU_68040) {
1519: DCFP(pa);
1520: ICPP(pa);
1521: }
1522: #endif
1523:
1524: *pte = npte;
1525: TBIS(va);
1526: }
1527:
1528: void
1.20 tsutsui 1529: pmap_kremove(vaddr_t va, vsize_t size)
1.1 chs 1530: {
1.20 tsutsui 1531: pmap_t pmap = pmap_kernel();
1.1 chs 1532: pt_entry_t *pte;
1533: vaddr_t nssva;
1534: vaddr_t eva = va + size;
1.59 tsutsui 1535: #ifdef CACHE_HAVE_VAC
1.23 thorpej 1536: bool firstpage, needcflush;
1.1 chs 1537: #endif
1538:
1539: PMAP_DPRINTF(PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT,
1540: ("pmap_kremove(%lx, %lx)\n", va, size));
1541:
1.59 tsutsui 1542: #ifdef CACHE_HAVE_VAC
1.25 thorpej 1543: firstpage = true;
1544: needcflush = false;
1.1 chs 1545: #endif
1546: while (va < eva) {
1547: nssva = m68k_trunc_seg(va) + NBSEG;
1548: if (nssva == 0 || nssva > eva)
1549: nssva = eva;
1550:
1551: /*
1552: * If VA belongs to an unallocated segment,
1553: * skip to the next segment boundary.
1554: */
1555:
1556: if (!pmap_ste_v(pmap, va)) {
1557: va = nssva;
1558: continue;
1559: }
1560:
1561: /*
1562: * Invalidate every valid mapping within this segment.
1563: */
1564:
1565: pte = pmap_pte(pmap, va);
1566: while (va < nssva) {
1567: if (!pmap_pte_v(pte)) {
1568: pte++;
1.3 thorpej 1569: va += PAGE_SIZE;
1.1 chs 1570: continue;
1571: }
1.59 tsutsui 1572: #ifdef CACHE_HAVE_VAC
1.1 chs 1573: if (pmap_aliasmask) {
1574:
1575: /*
1576: * Purge kernel side of VAC to ensure
1577: * we get the correct state of any
1578: * hardware maintained bits.
1579: */
1580:
1581: if (firstpage) {
1582: DCIS();
1.25 thorpej 1583: firstpage = false;
1.1 chs 1584: }
1585:
1586: /*
1587: * Remember if we may need to
1588: * flush the VAC.
1589: */
1590:
1.25 thorpej 1591: needcflush = true;
1.1 chs 1592: }
1593: #endif
1594: pmap->pm_stats.wired_count--;
1595: pmap->pm_stats.resident_count--;
1596: *pte = PG_NV;
1597: TBIS(va);
1598: pte++;
1.3 thorpej 1599: va += PAGE_SIZE;
1.1 chs 1600: }
1601: }
1602:
1.59 tsutsui 1603: #ifdef CACHE_HAVE_VAC
1.1 chs 1604:
1605: /*
1606: * In a couple of cases, we don't need to worry about flushing
1607: * the VAC:
1608: * 1. if this is a kernel mapping,
1609: * we have already done it
1610: * 2. if it is a user mapping not for the current process,
1611: * it won't be there
1612: */
1613:
1614: if (pmap_aliasmask && !active_user_pmap(pmap))
1.25 thorpej 1615: needcflush = false;
1.1 chs 1616: if (needcflush) {
1617: if (pmap == pmap_kernel()) {
1618: DCIS();
1619: } else {
1620: DCIU();
1621: }
1622: }
1623: #endif
1624: }
1625:
1626: /*
1627: * pmap_unwire: [ INTERFACE ]
1628: *
1629: * Clear the wired attribute for a map/virtual-address pair.
1630: *
1631: * The mapping must already exist in the pmap.
1632: */
1633: void
1.20 tsutsui 1634: pmap_unwire(pmap_t pmap, vaddr_t va)
1.1 chs 1635: {
1636: pt_entry_t *pte;
1637:
1638: PMAP_DPRINTF(PDB_FOLLOW,
1639: ("pmap_unwire(%p, %lx)\n", pmap, va));
1640:
1641: pte = pmap_pte(pmap, va);
1642:
1643: /*
1644: * If wiring actually changed (always?) clear the wire bit and
1645: * update the wire count. Note that wiring is not a hardware
1646: * characteristic so there is no need to invalidate the TLB.
1647: */
1648:
1649: if (pmap_pte_w_chg(pte, 0)) {
1.25 thorpej 1650: pmap_pte_set_w(pte, false);
1.1 chs 1651: pmap->pm_stats.wired_count--;
1652: }
1653: }
1654:
1655: /*
1656: * pmap_extract: [ INTERFACE ]
1657: *
1658: * Extract the physical address associated with the given
1659: * pmap/virtual address pair.
1660: */
1.23 thorpej 1661: bool
1.20 tsutsui 1662: pmap_extract(pmap_t pmap, vaddr_t va, paddr_t *pap)
1.1 chs 1663: {
1664: paddr_t pa;
1665: u_int pte;
1.8 cl 1666:
1.1 chs 1667: PMAP_DPRINTF(PDB_FOLLOW,
1668: ("pmap_extract(%p, %lx) -> ", pmap, va));
1669:
1670: if (pmap_ste_v(pmap, va)) {
1671: pte = *(u_int *)pmap_pte(pmap, va);
1672: if (pte) {
1673: pa = (pte & PG_FRAME) | (va & ~PG_FRAME);
1674: if (pap != NULL)
1675: *pap = pa;
1.9 mycroft 1676: #ifdef DEBUG
1677: if (pmapdebug & PDB_FOLLOW)
1678: printf("%lx\n", pa);
1679: #endif
1.25 thorpej 1680: return true;
1.1 chs 1681: }
1682: }
1683: #ifdef DEBUG
1.9 mycroft 1684: if (pmapdebug & PDB_FOLLOW)
1685: printf("failed\n");
1.1 chs 1686: #endif
1.25 thorpej 1687: return false;
1.1 chs 1688: }
1689:
1690: /*
1691: * pmap_copy: [ INTERFACE ]
1692: *
1693: * Copy the mapping range specified by src_addr/len
1694: * from the source map to the range dst_addr/len
1695: * in the destination map.
1696: *
1697: * This routine is only advisory and need not do anything.
1698: */
1699: void
1.20 tsutsui 1700: pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vaddr_t dst_addr, vsize_t len,
1701: vaddr_t src_addr)
1.1 chs 1702: {
1703:
1704: PMAP_DPRINTF(PDB_FOLLOW,
1705: ("pmap_copy(%p, %p, %lx, %lx, %lx)\n",
1706: dst_pmap, src_pmap, dst_addr, len, src_addr));
1707: }
1708:
1709: /*
1710: * pmap_collect1():
1711: *
1712: * Garbage-collect KPT pages. Helper for the above (bogus)
1713: * pmap_collect().
1714: *
1715: * Note: THIS SHOULD GO AWAY, AND BE REPLACED WITH A BETTER
1716: * WAY OF HANDLING PT PAGES!
1717: */
1.49 rmind 1718: static inline void
1.20 tsutsui 1719: pmap_collect1(pmap_t pmap, paddr_t startpa, paddr_t endpa)
1.1 chs 1720: {
1721: paddr_t pa;
1.46 thorpej 1722: struct pv_header *pvh;
1.1 chs 1723: struct pv_entry *pv;
1724: pt_entry_t *pte;
1725: paddr_t kpa;
1726: #ifdef DEBUG
1727: st_entry_t *ste;
1728: int opmapdebug = 0;
1729: #endif
1730:
1.3 thorpej 1731: for (pa = startpa; pa < endpa; pa += PAGE_SIZE) {
1.1 chs 1732: struct kpt_page *kpt, **pkpt;
1733:
1734: /*
1735: * Locate physical pages which are being used as kernel
1736: * page table pages.
1737: */
1738:
1.46 thorpej 1739: pvh = pa_to_pvh(pa);
1740: pv = &pvh->pvh_first;
1741: if (pv->pv_pmap != pmap_kernel() ||
1742: !(pvh->pvh_attrs & PVH_PTPAGE))
1.1 chs 1743: continue;
1744: do {
1745: if (pv->pv_ptste && pv->pv_ptpmap == pmap_kernel())
1746: break;
1747: } while ((pv = pv->pv_next));
1748: if (pv == NULL)
1749: continue;
1750: #ifdef DEBUG
1751: if (pv->pv_va < (vaddr_t)Sysmap ||
1752: pv->pv_va >= (vaddr_t)Sysmap + M68K_MAX_PTSIZE) {
1753: printf("collect: kernel PT VA out of range\n");
1754: pmap_pvdump(pa);
1755: continue;
1756: }
1757: #endif
1.3 thorpej 1758: pte = (pt_entry_t *)(pv->pv_va + PAGE_SIZE);
1.1 chs 1759: while (--pte >= (pt_entry_t *)pv->pv_va && *pte == PG_NV)
1760: ;
1761: if (pte >= (pt_entry_t *)pv->pv_va)
1762: continue;
1763:
1764: #ifdef DEBUG
1765: if (pmapdebug & (PDB_PTPAGE|PDB_COLLECT)) {
1766: printf("collect: freeing KPT page at %lx (ste %x@%p)\n",
1.20 tsutsui 1767: pv->pv_va, *pv->pv_ptste, pv->pv_ptste);
1.1 chs 1768: opmapdebug = pmapdebug;
1769: pmapdebug |= PDB_PTPAGE;
1770: }
1771:
1772: ste = pv->pv_ptste;
1773: #endif
1774: /*
1775: * If all entries were invalid we can remove the page.
1776: * We call pmap_remove_entry to take care of invalidating
1777: * ST and Sysptmap entries.
1778: */
1779:
1780: (void) pmap_extract(pmap, pv->pv_va, &kpa);
1781: pmap_remove_mapping(pmap, pv->pv_va, NULL,
1782: PRM_TFLUSH|PRM_CFLUSH);
1783:
1784: /*
1785: * Use the physical address to locate the original
1786: * (kmem_alloc assigned) address for the page and put
1787: * that page back on the free list.
1788: */
1789:
1790: for (pkpt = &kpt_used_list, kpt = *pkpt;
1791: kpt != NULL;
1792: pkpt = &kpt->kpt_next, kpt = *pkpt)
1793: if (kpt->kpt_pa == kpa)
1794: break;
1795: #ifdef DEBUG
1796: if (kpt == NULL)
1797: panic("pmap_collect: lost a KPT page");
1798: if (pmapdebug & (PDB_PTPAGE|PDB_COLLECT))
1799: printf("collect: %lx (%lx) to free list\n",
1.20 tsutsui 1800: kpt->kpt_va, kpa);
1.1 chs 1801: #endif
1802: *pkpt = kpt->kpt_next;
1803: kpt->kpt_next = kpt_free_list;
1804: kpt_free_list = kpt;
1805: #ifdef DEBUG
1806: if (pmapdebug & (PDB_PTPAGE|PDB_COLLECT))
1807: pmapdebug = opmapdebug;
1808:
1809: if (*ste != SG_NV)
1810: printf("collect: kernel STE at %p still valid (%x)\n",
1.20 tsutsui 1811: ste, *ste);
1.1 chs 1812: ste = &Sysptmap[ste - pmap_ste(pmap_kernel(), 0)];
1813: if (*ste != SG_NV)
1814: printf("collect: kernel PTmap at %p still valid (%x)\n",
1.20 tsutsui 1815: ste, *ste);
1.1 chs 1816: #endif
1817: }
1818: }
1819:
1820: /*
1.49 rmind 1821: * pmap_collect:
1822: *
1823: * Helper for pmap_enter_ptpage().
1824: *
1825: * Garbage collects the physical map system for pages which are no
1826: * longer used. Success need not be guaranteed -- that is, there
1827: * may well be pages which are not referenced, but others may be
1828: * collected.
1829: */
1830: static void
1831: pmap_collect(void)
1832: {
1833: int bank, s;
1834:
1835: /*
1836: * XXX This is very bogus. We should handle kernel PT
1837: * XXX pages much differently.
1838: */
1839:
1840: s = splvm();
1841: for (bank = 0; bank < vm_nphysseg; bank++) {
1.57 uebayasi 1842: pmap_collect1(pmap_kernel(), ptoa(VM_PHYSMEM_PTR(bank)->start),
1843: ptoa(VM_PHYSMEM_PTR(bank)->end));
1.49 rmind 1844: }
1845: splx(s);
1846:
1847: #ifdef notyet
1848: /* Go compact and garbage-collect the pv_table. */
1849: pmap_collect_pv();
1850: #endif
1851: }
1852:
1853: /*
1.1 chs 1854: * pmap_zero_page: [ INTERFACE ]
1855: *
1856: * Zero the specified (machine independent) page by mapping the page
1857: * into virtual memory and using memset to clear its contents, one
1858: * machine dependent page at a time.
1859: *
1860: * Note: WE DO NOT CURRENTLY LOCK THE TEMPORARY ADDRESSES!
1861: * (Actually, we go to splvm(), and since we don't
1862: * support multiple processors, this is sufficient.)
1863: */
1864: void
1.20 tsutsui 1865: pmap_zero_page(paddr_t phys)
1.1 chs 1866: {
1867: int npte;
1868:
1869: PMAP_DPRINTF(PDB_FOLLOW, ("pmap_zero_page(%lx)\n", phys));
1870:
1871: npte = phys | PG_V;
1.59 tsutsui 1872: #ifdef CACHE_HAVE_VAC
1.1 chs 1873: if (pmap_aliasmask) {
1874:
1875: /*
1876: * Cache-inhibit the mapping on VAC machines, as we would
1877: * be wasting the cache load.
1878: */
1879:
1880: npte |= PG_CI;
1881: }
1882: #endif
1883:
1884: #if defined(M68040) || defined(M68060)
1885: #if defined(M68020) || defined(M68030)
1886: if (mmutype == MMU_68040)
1887: #endif
1888: {
1889: /*
1890: * Set copyback caching on the page; this is required
1891: * for cache consistency (since regular mappings are
1892: * copyback as well).
1893: */
1894:
1895: npte |= PG_CCB;
1896: }
1897: #endif
1898:
1899: *caddr1_pte = npte;
1900: TBIS((vaddr_t)CADDR1);
1901:
1902: zeropage(CADDR1);
1903:
1904: #ifdef DEBUG
1905: *caddr1_pte = PG_NV;
1906: TBIS((vaddr_t)CADDR1);
1907: #endif
1908: }
1909:
1910: /*
1911: * pmap_copy_page: [ INTERFACE ]
1912: *
1913: * Copy the specified (machine independent) page by mapping the page
1914: * into virtual memory and using memcpy to copy the page, one machine
1915: * dependent page at a time.
1916: *
1917: * Note: WE DO NOT CURRENTLY LOCK THE TEMPORARY ADDRESSES!
1918: * (Actually, we go to splvm(), and since we don't
1919: * support multiple processors, this is sufficient.)
1920: */
1921: void
1.20 tsutsui 1922: pmap_copy_page(paddr_t src, paddr_t dst)
1.1 chs 1923: {
1924: int npte1, npte2;
1925:
1926: PMAP_DPRINTF(PDB_FOLLOW, ("pmap_copy_page(%lx, %lx)\n", src, dst));
1927:
1928: npte1 = src | PG_RO | PG_V;
1929: npte2 = dst | PG_V;
1.59 tsutsui 1930: #ifdef CACHE_HAVE_VAC
1.1 chs 1931: if (pmap_aliasmask) {
1932:
1933: /*
1934: * Cache-inhibit the mapping on VAC machines, as we would
1935: * be wasting the cache load.
1936: */
1937:
1938: npte1 |= PG_CI;
1939: npte2 |= PG_CI;
1940: }
1941: #endif
1942:
1943: #if defined(M68040) || defined(M68060)
1944: #if defined(M68020) || defined(M68030)
1945: if (mmutype == MMU_68040)
1946: #endif
1947: {
1948: /*
1949: * Set copyback caching on the pages; this is required
1950: * for cache consistency (since regular mappings are
1951: * copyback as well).
1952: */
1953:
1954: npte1 |= PG_CCB;
1955: npte2 |= PG_CCB;
1956: }
1957: #endif
1958:
1959: *caddr1_pte = npte1;
1960: TBIS((vaddr_t)CADDR1);
1961:
1962: *caddr2_pte = npte2;
1963: TBIS((vaddr_t)CADDR2);
1964:
1965: copypage(CADDR1, CADDR2);
1966:
1967: #ifdef DEBUG
1968: *caddr1_pte = PG_NV;
1969: TBIS((vaddr_t)CADDR1);
1970:
1971: *caddr2_pte = PG_NV;
1972: TBIS((vaddr_t)CADDR2);
1973: #endif
1974: }
1975:
1976: /*
1977: * pmap_clear_modify: [ INTERFACE ]
1978: *
1979: * Clear the modify bits on the specified physical page.
1980: */
1.23 thorpej 1981: bool
1.20 tsutsui 1982: pmap_clear_modify(struct vm_page *pg)
1.1 chs 1983: {
1984: paddr_t pa = VM_PAGE_TO_PHYS(pg);
1985:
1986: PMAP_DPRINTF(PDB_FOLLOW, ("pmap_clear_modify(%p)\n", pg));
1987:
1988: return pmap_changebit(pa, 0, ~PG_M);
1989: }
1990:
1991: /*
1992: * pmap_clear_reference: [ INTERFACE ]
1993: *
1994: * Clear the reference bit on the specified physical page.
1995: */
1.23 thorpej 1996: bool
1.20 tsutsui 1997: pmap_clear_reference(struct vm_page *pg)
1.1 chs 1998: {
1999: paddr_t pa = VM_PAGE_TO_PHYS(pg);
2000:
2001: PMAP_DPRINTF(PDB_FOLLOW, ("pmap_clear_reference(%p)\n", pg));
2002:
2003: return pmap_changebit(pa, 0, ~PG_U);
2004: }
2005:
2006: /*
2007: * pmap_is_referenced: [ INTERFACE ]
2008: *
2009: * Return whether or not the specified physical page is referenced
2010: * by any physical maps.
2011: */
1.23 thorpej 2012: bool
1.20 tsutsui 2013: pmap_is_referenced(struct vm_page *pg)
1.1 chs 2014: {
2015: paddr_t pa = VM_PAGE_TO_PHYS(pg);
2016:
1.20 tsutsui 2017: return pmap_testbit(pa, PG_U);
1.1 chs 2018: }
2019:
2020: /*
2021: * pmap_is_modified: [ INTERFACE ]
2022: *
2023: * Return whether or not the specified physical page is modified
2024: * by any physical maps.
2025: */
1.23 thorpej 2026: bool
1.20 tsutsui 2027: pmap_is_modified(struct vm_page *pg)
1.1 chs 2028: {
2029: paddr_t pa = VM_PAGE_TO_PHYS(pg);
2030:
1.20 tsutsui 2031: return pmap_testbit(pa, PG_M);
1.1 chs 2032: }
2033:
2034: /*
2035: * pmap_phys_address: [ INTERFACE ]
2036: *
2037: * Return the physical address corresponding to the specified
2038: * cookie. Used by the device pager to decode a device driver's
2039: * mmap entry point return value.
2040: *
2041: * Note: no locking is necessary in this function.
2042: */
2043: paddr_t
1.32 macallan 2044: pmap_phys_address(paddr_t ppn)
1.1 chs 2045: {
1.20 tsutsui 2046: return m68k_ptob(ppn);
1.1 chs 2047: }
2048:
1.59 tsutsui 2049: #ifdef CACHE_HAVE_VAC
1.1 chs 2050: /*
2051: * pmap_prefer: [ INTERFACE ]
2052: *
2053: * Find the first virtual address >= *vap that does not
2054: * cause a virtually-addressed cache alias problem.
2055: */
2056: void
1.20 tsutsui 2057: pmap_prefer(vaddr_t foff, vaddr_t *vap)
1.1 chs 2058: {
2059: vaddr_t va;
2060: vsize_t d;
2061:
2062: #ifdef M68K_MMU_MOTOROLA
2063: if (pmap_aliasmask)
2064: #endif
2065: {
2066: va = *vap;
2067: d = foff - va;
2068: d &= pmap_aliasmask;
2069: *vap = va + d;
2070: }
2071: }
1.59 tsutsui 2072: #endif /* CACHE_HAVE_VAC */
1.1 chs 2073:
2074: /*
2075: * Miscellaneous support routines follow
2076: */
2077:
2078: /*
2079: * pmap_remove_mapping:
2080: *
2081: * Invalidate a single page denoted by pmap/va.
2082: *
2083: * If (pte != NULL), it is the already computed PTE for the page.
2084: *
2085: * If (flags & PRM_TFLUSH), we must invalidate any TLB information.
2086: *
2087: * If (flags & PRM_CFLUSH), we must flush/invalidate any cache
2088: * information.
2089: *
2090: * If (flags & PRM_KEEPPTPAGE), we don't free the page table page
2091: * if the reference drops to zero.
2092: */
2093: /* static */
2094: void
1.20 tsutsui 2095: pmap_remove_mapping(pmap_t pmap, vaddr_t va, pt_entry_t *pte, int flags)
1.1 chs 2096: {
2097: paddr_t pa;
1.46 thorpej 2098: struct pv_header *pvh;
1.1 chs 2099: struct pv_entry *pv, *npv;
2100: struct pmap *ptpmap;
2101: st_entry_t *ste;
2102: int s, bits;
2103: #ifdef DEBUG
2104: pt_entry_t opte;
2105: #endif
2106:
2107: PMAP_DPRINTF(PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT,
2108: ("pmap_remove_mapping(%p, %lx, %p, %x)\n",
2109: pmap, va, pte, flags));
2110:
2111: /*
2112: * PTE not provided, compute it from pmap and va.
2113: */
2114:
2115: if (pte == NULL) {
2116: pte = pmap_pte(pmap, va);
2117: if (*pte == PG_NV)
2118: return;
2119: }
2120:
1.59 tsutsui 2121: #ifdef CACHE_HAVE_VAC
1.1 chs 2122: if (pmap_aliasmask && (flags & PRM_CFLUSH)) {
2123:
2124: /*
2125: * Purge kernel side of VAC to ensure we get the correct
2126: * state of any hardware maintained bits.
2127: */
2128:
2129: DCIS();
2130:
2131: /*
2132: * If this is a non-CI user mapping for the current process,
2133: * flush the VAC. Note that the kernel side was flushed
2134: * above so we don't worry about non-CI kernel mappings.
2135: */
2136:
2137: if (active_user_pmap(pmap) && !pmap_pte_ci(pte)) {
2138: DCIU();
2139: }
2140: }
2141: #endif
2142:
2143: pa = pmap_pte_pa(pte);
2144: #ifdef DEBUG
2145: opte = *pte;
2146: #endif
2147:
2148: /*
2149: * Update statistics
2150: */
2151:
2152: if (pmap_pte_w(pte))
2153: pmap->pm_stats.wired_count--;
2154: pmap->pm_stats.resident_count--;
2155:
2156: #if defined(M68040) || defined(M68060)
2157: #if defined(M68020) || defined(M68030)
2158: if (mmutype == MMU_68040)
2159: #endif
2160: if ((flags & PRM_CFLUSH)) {
2161: DCFP(pa);
2162: ICPP(pa);
2163: }
2164: #endif
2165:
2166: /*
2167: * Invalidate the PTE after saving the reference modify info.
2168: */
2169:
2170: PMAP_DPRINTF(PDB_REMOVE, ("remove: invalidating pte at %p\n", pte));
2171: bits = *pte & (PG_U|PG_M);
2172: *pte = PG_NV;
2173: if ((flags & PRM_TFLUSH) && active_pmap(pmap))
2174: TBIS(va);
2175:
2176: /*
2177: * For user mappings decrement the wiring count on
2178: * the PT page.
2179: */
2180:
2181: if (pmap != pmap_kernel()) {
2182: vaddr_t ptpva = trunc_page((vaddr_t)pte);
2183: int refs = pmap_ptpage_delref(ptpva);
2184: #ifdef DEBUG
2185: if (pmapdebug & PDB_WIRING)
2186: pmap_check_wiring("remove", ptpva);
2187: #endif
2188:
2189: /*
2190: * If reference count drops to 0, and we're not instructed
2191: * to keep it around, free the PT page.
2192: */
2193:
2194: if (refs == 0 && (flags & PRM_KEEPPTPAGE) == 0) {
2195: #ifdef DIAGNOSTIC
1.46 thorpej 2196: struct pv_header *ptppvh;
1.16 tsutsui 2197: struct pv_entry *ptppv;
1.1 chs 2198: #endif
1.15 tsutsui 2199: paddr_t ptppa;
1.1 chs 2200:
1.15 tsutsui 2201: ptppa = pmap_pte_pa(pmap_pte(pmap_kernel(), ptpva));
1.1 chs 2202: #ifdef DIAGNOSTIC
1.15 tsutsui 2203: if (PAGE_IS_MANAGED(ptppa) == 0)
1.1 chs 2204: panic("pmap_remove_mapping: unmanaged PT page");
1.46 thorpej 2205: ptppvh = pa_to_pvh(ptppa);
2206: ptppv = &ptppvh->pvh_first;
1.16 tsutsui 2207: if (ptppv->pv_ptste == NULL)
1.1 chs 2208: panic("pmap_remove_mapping: ptste == NULL");
1.16 tsutsui 2209: if (ptppv->pv_pmap != pmap_kernel() ||
2210: ptppv->pv_va != ptpva ||
2211: ptppv->pv_next != NULL)
1.1 chs 2212: panic("pmap_remove_mapping: "
2213: "bad PT page pmap %p, va 0x%lx, next %p",
1.16 tsutsui 2214: ptppv->pv_pmap, ptppv->pv_va,
2215: ptppv->pv_next);
1.1 chs 2216: #endif
2217: pmap_remove_mapping(pmap_kernel(), ptpva,
2218: NULL, PRM_TFLUSH|PRM_CFLUSH);
1.36 tsutsui 2219: mutex_enter(&uvm_kernel_object->vmobjlock);
1.15 tsutsui 2220: uvm_pagefree(PHYS_TO_VM_PAGE(ptppa));
1.36 tsutsui 2221: mutex_exit(&uvm_kernel_object->vmobjlock);
1.1 chs 2222: PMAP_DPRINTF(PDB_REMOVE|PDB_PTPAGE,
2223: ("remove: PT page 0x%lx (0x%lx) freed\n",
1.15 tsutsui 2224: ptpva, ptppa));
1.1 chs 2225: }
2226: }
2227:
2228: /*
2229: * If this isn't a managed page, we are all done.
2230: */
2231:
2232: if (PAGE_IS_MANAGED(pa) == 0)
2233: return;
2234:
2235: /*
2236: * Otherwise remove it from the PV table
2237: * (raise IPL since we may be called at interrupt time).
2238: */
2239:
1.46 thorpej 2240: pvh = pa_to_pvh(pa);
2241: pv = &pvh->pvh_first;
1.1 chs 2242: ste = NULL;
2243: s = splvm();
2244:
2245: /*
2246: * If it is the first entry on the list, it is actually
2247: * in the header and we must copy the following entry up
2248: * to the header. Otherwise we must search the list for
2249: * the entry. In either case we free the now unused entry.
2250: */
2251:
2252: if (pmap == pv->pv_pmap && va == pv->pv_va) {
2253: ste = pv->pv_ptste;
2254: ptpmap = pv->pv_ptpmap;
2255: npv = pv->pv_next;
2256: if (npv) {
2257: *pv = *npv;
2258: pmap_free_pv(npv);
2259: } else
2260: pv->pv_pmap = NULL;
2261: } else {
2262: for (npv = pv->pv_next; npv; npv = npv->pv_next) {
2263: if (pmap == npv->pv_pmap && va == npv->pv_va)
2264: break;
2265: pv = npv;
2266: }
2267: #ifdef DEBUG
2268: if (npv == NULL)
2269: panic("pmap_remove: PA not in pv_tab");
2270: #endif
2271: ste = npv->pv_ptste;
2272: ptpmap = npv->pv_ptpmap;
2273: pv->pv_next = npv->pv_next;
2274: pmap_free_pv(npv);
1.46 thorpej 2275: pvh = pa_to_pvh(pa);
2276: pv = &pvh->pvh_first;
1.1 chs 2277: }
2278:
1.59 tsutsui 2279: #ifdef CACHE_HAVE_VAC
1.1 chs 2280:
2281: /*
2282: * If only one mapping left we no longer need to cache inhibit
2283: */
2284:
2285: if (pmap_aliasmask &&
1.46 thorpej 2286: pv->pv_pmap && pv->pv_next == NULL && (pvh->pvh_attrs & PVH_CI)) {
1.1 chs 2287: PMAP_DPRINTF(PDB_CACHE,
2288: ("remove: clearing CI for pa %lx\n", pa));
1.46 thorpej 2289: pvh->pvh_attrs &= ~PVH_CI;
1.1 chs 2290: pmap_changebit(pa, 0, ~PG_CI);
2291: #ifdef DEBUG
2292: if ((pmapdebug & (PDB_CACHE|PDB_PVDUMP)) ==
2293: (PDB_CACHE|PDB_PVDUMP))
2294: pmap_pvdump(pa);
2295: #endif
2296: }
2297: #endif
2298:
2299: /*
2300: * If this was a PT page we must also remove the
2301: * mapping from the associated segment table.
2302: */
2303:
2304: if (ste) {
2305: PMAP_DPRINTF(PDB_REMOVE|PDB_PTPAGE,
2306: ("remove: ste was %x@%p pte was %x@%p\n",
2307: *ste, ste, opte, pmap_pte(pmap, va)));
2308: #if defined(M68040) || defined(M68060)
2309: #if defined(M68020) || defined(M68030)
2310: if (mmutype == MMU_68040)
2311: #endif
2312: {
2313: st_entry_t *este = &ste[NPTEPG/SG4_LEV3SIZE];
2314:
2315: while (ste < este)
2316: *ste++ = SG_NV;
2317: #ifdef DEBUG
2318: ste -= NPTEPG/SG4_LEV3SIZE;
2319: #endif
2320: }
2321: #if defined(M68020) || defined(M68030)
2322: else
2323: #endif
2324: #endif
2325: #if defined(M68020) || defined(M68030)
2326: *ste = SG_NV;
2327: #endif
2328:
2329: /*
2330: * If it was a user PT page, we decrement the
2331: * reference count on the segment table as well,
2332: * freeing it if it is now empty.
2333: */
2334:
2335: if (ptpmap != pmap_kernel()) {
2336: PMAP_DPRINTF(PDB_REMOVE|PDB_SEGTAB,
2337: ("remove: stab %p, refcnt %d\n",
2338: ptpmap->pm_stab, ptpmap->pm_sref - 1));
2339: #ifdef DEBUG
2340: if ((pmapdebug & PDB_PARANOIA) &&
2341: ptpmap->pm_stab !=
2342: (st_entry_t *)trunc_page((vaddr_t)ste))
2343: panic("remove: bogus ste");
2344: #endif
2345: if (--(ptpmap->pm_sref) == 0) {
2346: PMAP_DPRINTF(PDB_REMOVE|PDB_SEGTAB,
2347: ("remove: free stab %p\n",
2348: ptpmap->pm_stab));
1.14 yamt 2349: uvm_km_free(st_map, (vaddr_t)ptpmap->pm_stab,
2350: M68K_STSIZE, UVM_KMF_WIRED);
1.1 chs 2351: ptpmap->pm_stab = Segtabzero;
2352: ptpmap->pm_stpa = Segtabzeropa;
2353: #if defined(M68040) || defined(M68060)
2354: #if defined(M68020) || defined(M68030)
2355: if (mmutype == MMU_68040)
2356: #endif
2357: ptpmap->pm_stfree = protostfree;
2358: #endif
2359:
2360: /*
2361: * XXX may have changed segment table
2362: * pointer for current process so
2363: * update now to reload hardware.
2364: */
2365:
2366: if (active_user_pmap(ptpmap))
2367: PMAP_ACTIVATE(ptpmap, 1);
2368: }
2369: }
1.46 thorpej 2370: pvh->pvh_attrs &= ~PVH_PTPAGE;
1.1 chs 2371: ptpmap->pm_ptpages--;
2372: }
2373:
2374: /*
2375: * Update saved attributes for managed page
2376: */
2377:
1.46 thorpej 2378: pvh->pvh_attrs |= bits;
1.1 chs 2379: splx(s);
2380: }
2381:
2382: /*
2383: * pmap_testbit:
2384: *
2385: * Test the modified/referenced bits of a physical page.
2386: */
2387: /* static */
1.23 thorpej 2388: bool
1.20 tsutsui 2389: pmap_testbit(paddr_t pa, int bit)
1.1 chs 2390: {
1.46 thorpej 2391: struct pv_header *pvh;
1.1 chs 2392: struct pv_entry *pv;
2393: pt_entry_t *pte;
2394: int s;
2395:
1.46 thorpej 2396: pvh = pa_to_pvh(pa);
2397: pv = &pvh->pvh_first;
1.1 chs 2398: s = splvm();
2399:
2400: /*
2401: * Check saved info first
2402: */
2403:
1.46 thorpej 2404: if (pvh->pvh_attrs & bit) {
1.1 chs 2405: splx(s);
1.25 thorpej 2406: return true;
1.1 chs 2407: }
2408:
1.59 tsutsui 2409: #ifdef CACHE_HAVE_VAC
1.1 chs 2410:
2411: /*
2412: * Flush VAC to get correct state of any hardware maintained bits.
2413: */
2414:
2415: if (pmap_aliasmask && (bit & (PG_U|PG_M)))
2416: DCIS();
2417: #endif
2418:
2419: /*
2420: * Not found. Check current mappings, returning immediately if
2421: * found. Cache a hit to speed future lookups.
2422: */
2423:
2424: if (pv->pv_pmap != NULL) {
2425: for (; pv; pv = pv->pv_next) {
2426: pte = pmap_pte(pv->pv_pmap, pv->pv_va);
2427: if (*pte & bit) {
1.46 thorpej 2428: pvh->pvh_attrs |= bit;
1.1 chs 2429: splx(s);
1.25 thorpej 2430: return true;
1.1 chs 2431: }
2432: }
2433: }
2434: splx(s);
1.25 thorpej 2435: return false;
1.1 chs 2436: }
2437:
2438: /*
2439: * pmap_changebit:
2440: *
2441: * Change the modified/referenced bits, or other PTE bits,
2442: * for a physical page.
2443: */
2444: /* static */
1.23 thorpej 2445: bool
1.20 tsutsui 2446: pmap_changebit(paddr_t pa, int set, int mask)
1.1 chs 2447: {
1.46 thorpej 2448: struct pv_header *pvh;
1.1 chs 2449: struct pv_entry *pv;
2450: pt_entry_t *pte, npte;
2451: vaddr_t va;
2452: int s;
1.59 tsutsui 2453: #if defined(CACHE_HAVE_VAC) || defined(M68040) || defined(M68060)
1.25 thorpej 2454: bool firstpage = true;
1.1 chs 2455: #endif
1.23 thorpej 2456: bool r;
1.1 chs 2457:
2458: PMAP_DPRINTF(PDB_BITS,
2459: ("pmap_changebit(%lx, %x, %x)\n", pa, set, mask));
2460:
1.46 thorpej 2461: pvh = pa_to_pvh(pa);
2462: pv = &pvh->pvh_first;
1.1 chs 2463: s = splvm();
2464:
2465: /*
2466: * Clear saved attributes (modify, reference)
2467: */
2468:
1.46 thorpej 2469: r = (pvh->pvh_attrs & ~mask) != 0;
2470: pvh->pvh_attrs &= mask;
1.1 chs 2471:
2472: /*
2473: * Loop over all current mappings setting/clearing as appropos
2474: * If setting RO do we need to clear the VAC?
2475: */
2476:
2477: if (pv->pv_pmap != NULL) {
2478: #ifdef DEBUG
2479: int toflush = 0;
2480: #endif
2481: for (; pv; pv = pv->pv_next) {
2482: #ifdef DEBUG
2483: toflush |= (pv->pv_pmap == pmap_kernel()) ? 2 : 1;
2484: #endif
2485: va = pv->pv_va;
2486: pte = pmap_pte(pv->pv_pmap, va);
1.59 tsutsui 2487: #ifdef CACHE_HAVE_VAC
1.1 chs 2488:
2489: /*
2490: * Flush VAC to ensure we get correct state of HW bits
2491: * so we don't clobber them.
2492: */
2493:
2494: if (firstpage && pmap_aliasmask) {
1.25 thorpej 2495: firstpage = false;
1.1 chs 2496: DCIS();
2497: }
2498: #endif
2499: npte = (*pte | set) & mask;
2500: if (*pte != npte) {
1.25 thorpej 2501: r = true;
1.1 chs 2502: #if defined(M68040) || defined(M68060)
2503: /*
2504: * If we are changing caching status or
2505: * protection make sure the caches are
2506: * flushed (but only once).
2507: */
2508: if (firstpage &&
2509: #if defined(M68020) || defined(M68030)
2510: (mmutype == MMU_68040) &&
2511: #endif
2512: ((set == PG_RO) ||
2513: (set & PG_CMASK) ||
2514: (mask & PG_CMASK) == 0)) {
1.25 thorpej 2515: firstpage = false;
1.1 chs 2516: DCFP(pa);
2517: ICPP(pa);
2518: }
2519: #endif
2520: *pte = npte;
2521: if (active_pmap(pv->pv_pmap))
2522: TBIS(va);
2523: }
2524: }
2525: }
2526: splx(s);
1.20 tsutsui 2527: return r;
1.1 chs 2528: }
2529:
2530: /*
2531: * pmap_enter_ptpage:
2532: *
2533: * Allocate and map a PT page for the specified pmap/va pair.
2534: */
2535: /* static */
1.22 martin 2536: int
1.23 thorpej 2537: pmap_enter_ptpage(pmap_t pmap, vaddr_t va, bool can_fail)
1.1 chs 2538: {
2539: paddr_t ptpa;
2540: struct vm_page *pg;
1.46 thorpej 2541: struct pv_header *pvh;
1.1 chs 2542: struct pv_entry *pv;
2543: st_entry_t *ste;
2544: int s;
2545:
2546: PMAP_DPRINTF(PDB_FOLLOW|PDB_ENTER|PDB_PTPAGE,
2547: ("pmap_enter_ptpage: pmap %p, va %lx\n", pmap, va));
2548:
2549: /*
2550: * Allocate a segment table if necessary. Note that it is allocated
2551: * from a private map and not pt_map. This keeps user page tables
2552: * aligned on segment boundaries in the kernel address space.
2553: * The segment table is wired down. It will be freed whenever the
2554: * reference count drops to zero.
2555: */
2556: if (pmap->pm_stab == Segtabzero) {
2557: pmap->pm_stab = (st_entry_t *)
1.14 yamt 2558: uvm_km_alloc(st_map, M68K_STSIZE, 0,
1.22 martin 2559: UVM_KMF_WIRED | UVM_KMF_ZERO |
2560: (can_fail ? UVM_KMF_NOWAIT : 0));
2561: if (pmap->pm_stab == NULL) {
2562: pmap->pm_stab = Segtabzero;
2563: return ENOMEM;
2564: }
1.1 chs 2565: (void) pmap_extract(pmap_kernel(), (vaddr_t)pmap->pm_stab,
2566: (paddr_t *)&pmap->pm_stpa);
2567: #if defined(M68040) || defined(M68060)
2568: #if defined(M68020) || defined(M68030)
2569: if (mmutype == MMU_68040)
2570: #endif
2571: {
1.21 mhitch 2572: pt_entry_t *pte;
2573:
2574: pte = pmap_pte(pmap_kernel(), pmap->pm_stab);
2575: *pte = (*pte & ~PG_CMASK) | PG_CI;
1.1 chs 2576: pmap->pm_stfree = protostfree;
2577: }
2578: #endif
2579: /*
2580: * XXX may have changed segment table pointer for current
2581: * process so update now to reload hardware.
2582: */
2583: if (active_user_pmap(pmap))
2584: PMAP_ACTIVATE(pmap, 1);
2585:
2586: PMAP_DPRINTF(PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB,
2587: ("enter: pmap %p stab %p(%p)\n",
2588: pmap, pmap->pm_stab, pmap->pm_stpa));
2589: }
2590:
2591: ste = pmap_ste(pmap, va);
2592: #if defined(M68040) || defined(M68060)
2593: /*
2594: * Allocate level 2 descriptor block if necessary
2595: */
2596: #if defined(M68020) || defined(M68030)
2597: if (mmutype == MMU_68040)
2598: #endif
2599: {
2600: if (*ste == SG_NV) {
2601: int ix;
1.26 christos 2602: void *addr;
1.1 chs 2603:
2604: ix = bmtol2(pmap->pm_stfree);
2605: if (ix == -1)
2606: panic("enter: out of address space"); /* XXX */
2607: pmap->pm_stfree &= ~l2tobm(ix);
1.26 christos 2608: addr = (void *)&pmap->pm_stab[ix*SG4_LEV2SIZE];
1.1 chs 2609: memset(addr, 0, SG4_LEV2SIZE*sizeof(st_entry_t));
1.26 christos 2610: addr = (void *)&pmap->pm_stpa[ix*SG4_LEV2SIZE];
1.1 chs 2611: *ste = (u_int)addr | SG_RW | SG_U | SG_V;
2612:
2613: PMAP_DPRINTF(PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB,
2614: ("enter: alloc ste2 %d(%p)\n", ix, addr));
2615: }
2616: ste = pmap_ste2(pmap, va);
2617: /*
2618: * Since a level 2 descriptor maps a block of SG4_LEV3SIZE
2619: * level 3 descriptors, we need a chunk of NPTEPG/SG4_LEV3SIZE
1.3 thorpej 2620: * (16) such descriptors (PAGE_SIZE/SG4_LEV3SIZE bytes) to map a
1.1 chs 2621: * PT page--the unit of allocation. We set `ste' to point
2622: * to the first entry of that chunk which is validated in its
2623: * entirety below.
2624: */
1.3 thorpej 2625: ste = (st_entry_t *)((int)ste & ~(PAGE_SIZE/SG4_LEV3SIZE-1));
1.1 chs 2626:
2627: PMAP_DPRINTF(PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB,
2628: ("enter: ste2 %p (%p)\n", pmap_ste2(pmap, va), ste));
2629: }
2630: #endif
2631: va = trunc_page((vaddr_t)pmap_pte(pmap, va));
2632:
2633: /*
2634: * In the kernel we allocate a page from the kernel PT page
2635: * free list and map it into the kernel page table map (via
2636: * pmap_enter).
2637: */
2638: if (pmap == pmap_kernel()) {
2639: struct kpt_page *kpt;
2640:
2641: s = splvm();
2642: if ((kpt = kpt_free_list) == NULL) {
2643: /*
2644: * No PT pages available.
2645: * Try once to free up unused ones.
2646: */
2647: PMAP_DPRINTF(PDB_COLLECT,
2648: ("enter: no KPT pages, collecting...\n"));
1.49 rmind 2649: pmap_collect();
1.1 chs 2650: if ((kpt = kpt_free_list) == NULL)
2651: panic("pmap_enter_ptpage: can't get KPT page");
2652: }
2653: kpt_free_list = kpt->kpt_next;
2654: kpt->kpt_next = kpt_used_list;
2655: kpt_used_list = kpt;
2656: ptpa = kpt->kpt_pa;
1.26 christos 2657: memset((void *)kpt->kpt_va, 0, PAGE_SIZE);
1.1 chs 2658: pmap_enter(pmap, va, ptpa, VM_PROT_READ | VM_PROT_WRITE,
2659: VM_PROT_READ | VM_PROT_WRITE | PMAP_WIRED);
2660: pmap_update(pmap);
2661: #ifdef DEBUG
2662: if (pmapdebug & (PDB_ENTER|PDB_PTPAGE)) {
2663: int ix = pmap_ste(pmap, va) - pmap_ste(pmap, 0);
2664:
2665: printf("enter: add &Sysptmap[%d]: %x (KPT page %lx)\n",
1.20 tsutsui 2666: ix, Sysptmap[ix], kpt->kpt_va);
1.1 chs 2667: }
2668: #endif
2669: splx(s);
2670: } else {
2671:
2672: /*
2673: * For user processes we just allocate a page from the
2674: * VM system. Note that we set the page "wired" count to 1,
2675: * which is what we use to check if the page can be freed.
2676: * See pmap_remove_mapping().
2677: *
2678: * Count the segment table reference first so that we won't
2679: * lose the segment table when low on memory.
2680: */
2681:
2682: pmap->pm_sref++;
2683: PMAP_DPRINTF(PDB_ENTER|PDB_PTPAGE,
2684: ("enter: about to alloc UPT pg at %lx\n", va));
1.36 tsutsui 2685: mutex_enter(&uvm_kernel_object->vmobjlock);
1.33 dogcow 2686: while ((pg = uvm_pagealloc(uvm_kernel_object,
1.1 chs 2687: va - vm_map_min(kernel_map),
2688: NULL, UVM_PGA_ZERO)) == NULL) {
1.36 tsutsui 2689: mutex_exit(&uvm_kernel_object->vmobjlock);
1.1 chs 2690: uvm_wait("ptpage");
1.36 tsutsui 2691: mutex_enter(&uvm_kernel_object->vmobjlock);
1.1 chs 2692: }
1.36 tsutsui 2693: mutex_exit(&uvm_kernel_object->vmobjlock);
1.1 chs 2694: pg->flags &= ~(PG_BUSY|PG_FAKE);
2695: UVM_PAGE_OWN(pg, NULL);
2696: ptpa = VM_PAGE_TO_PHYS(pg);
2697: pmap_enter(pmap_kernel(), va, ptpa,
2698: VM_PROT_READ | VM_PROT_WRITE,
2699: VM_PROT_READ | VM_PROT_WRITE | PMAP_WIRED);
2700: pmap_update(pmap_kernel());
2701: }
2702: #if defined(M68040) || defined(M68060)
2703: /*
2704: * Turn off copyback caching of page table pages,
2705: * could get ugly otherwise.
2706: */
2707: #if defined(M68020) || defined(M68030)
2708: if (mmutype == MMU_68040)
2709: #endif
2710: {
2711: #ifdef DEBUG
2712: pt_entry_t *pte = pmap_pte(pmap_kernel(), va);
2713: if ((pmapdebug & PDB_PARANOIA) && (*pte & PG_CCB) == 0)
2714: printf("%s PT no CCB: kva=%lx ptpa=%lx pte@%p=%x\n",
1.20 tsutsui 2715: pmap == pmap_kernel() ? "Kernel" : "User",
2716: va, ptpa, pte, *pte);
1.1 chs 2717: #endif
2718: if (pmap_changebit(ptpa, PG_CI, ~PG_CCB))
2719: DCIS();
2720: }
2721: #endif
2722: /*
2723: * Locate the PV entry in the kernel for this PT page and
2724: * record the STE address. This is so that we can invalidate
2725: * the STE when we remove the mapping for the page.
2726: */
1.46 thorpej 2727: pvh = pa_to_pvh(ptpa);
1.1 chs 2728: s = splvm();
1.46 thorpej 2729: if (pvh) {
2730: pv = &pvh->pvh_first;
2731: pvh->pvh_attrs |= PVH_PTPAGE;
1.1 chs 2732: do {
2733: if (pv->pv_pmap == pmap_kernel() && pv->pv_va == va)
2734: break;
2735: } while ((pv = pv->pv_next));
1.46 thorpej 2736: } else {
2737: pv = NULL;
1.1 chs 2738: }
2739: #ifdef DEBUG
2740: if (pv == NULL)
2741: panic("pmap_enter_ptpage: PT page not entered");
2742: #endif
2743: pv->pv_ptste = ste;
2744: pv->pv_ptpmap = pmap;
2745:
2746: PMAP_DPRINTF(PDB_ENTER|PDB_PTPAGE,
2747: ("enter: new PT page at PA %lx, ste at %p\n", ptpa, ste));
2748:
2749: /*
2750: * Map the new PT page into the segment table.
2751: * Also increment the reference count on the segment table if this
2752: * was a user page table page. Note that we don't use vm_map_pageable
2753: * to keep the count like we do for PT pages, this is mostly because
2754: * it would be difficult to identify ST pages in pmap_pageable to
2755: * release them. We also avoid the overhead of vm_map_pageable.
2756: */
2757: #if defined(M68040) || defined(M68060)
2758: #if defined(M68020) || defined(M68030)
2759: if (mmutype == MMU_68040)
2760: #endif
2761: {
2762: st_entry_t *este;
2763:
2764: for (este = &ste[NPTEPG/SG4_LEV3SIZE]; ste < este; ste++) {
2765: *ste = ptpa | SG_U | SG_RW | SG_V;
2766: ptpa += SG4_LEV3SIZE * sizeof(st_entry_t);
2767: }
2768: }
2769: #if defined(M68020) || defined(M68030)
2770: else
2771: *ste = (ptpa & SG_FRAME) | SG_RW | SG_V;
2772: #endif
2773: #else
2774: *ste = (ptpa & SG_FRAME) | SG_RW | SG_V;
2775: #endif
2776: if (pmap != pmap_kernel()) {
2777: PMAP_DPRINTF(PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB,
2778: ("enter: stab %p refcnt %d\n",
2779: pmap->pm_stab, pmap->pm_sref));
2780: }
2781: /*
2782: * Flush stale TLB info.
2783: */
2784: if (pmap == pmap_kernel())
2785: TBIAS();
2786: else
2787: TBIAU();
2788: pmap->pm_ptpages++;
2789: splx(s);
1.22 martin 2790:
2791: return 0;
1.1 chs 2792: }
2793:
2794: /*
2795: * pmap_ptpage_addref:
2796: *
2797: * Add a reference to the specified PT page.
2798: */
2799: void
1.20 tsutsui 2800: pmap_ptpage_addref(vaddr_t ptpva)
1.1 chs 2801: {
2802: struct vm_page *pg;
2803:
1.36 tsutsui 2804: mutex_enter(&uvm_kernel_object->vmobjlock);
1.33 dogcow 2805: pg = uvm_pagelookup(uvm_kernel_object, ptpva - vm_map_min(kernel_map));
1.1 chs 2806: pg->wire_count++;
2807: PMAP_DPRINTF(PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB,
2808: ("ptpage addref: pg %p now %d\n",
2809: pg, pg->wire_count));
1.36 tsutsui 2810: mutex_exit(&uvm_kernel_object->vmobjlock);
1.1 chs 2811: }
2812:
2813: /*
2814: * pmap_ptpage_delref:
2815: *
2816: * Delete a reference to the specified PT page.
2817: */
2818: int
1.20 tsutsui 2819: pmap_ptpage_delref(vaddr_t ptpva)
1.1 chs 2820: {
2821: struct vm_page *pg;
2822: int rv;
2823:
1.36 tsutsui 2824: mutex_enter(&uvm_kernel_object->vmobjlock);
1.33 dogcow 2825: pg = uvm_pagelookup(uvm_kernel_object, ptpva - vm_map_min(kernel_map));
1.1 chs 2826: rv = --pg->wire_count;
2827: PMAP_DPRINTF(PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB,
2828: ("ptpage delref: pg %p now %d\n",
2829: pg, pg->wire_count));
1.36 tsutsui 2830: mutex_exit(&uvm_kernel_object->vmobjlock);
1.20 tsutsui 2831: return rv;
1.1 chs 2832: }
2833:
2834: /*
2835: * Routine: pmap_procwr
2836: *
2837: * Function:
2838: * Synchronize caches corresponding to [addr, addr + len) in p.
2839: */
2840: void
1.20 tsutsui 2841: pmap_procwr(struct proc *p, vaddr_t va, size_t len)
1.1 chs 2842: {
1.20 tsutsui 2843:
1.1 chs 2844: (void)cachectl1(0x80000004, va, len, p);
2845: }
2846:
2847: void
1.20 tsutsui 2848: _pmap_set_page_cacheable(pmap_t pmap, vaddr_t va)
1.1 chs 2849: {
2850:
1.20 tsutsui 2851: if (!pmap_ste_v(pmap, va))
1.1 chs 2852: return;
2853:
2854: #if defined(M68040) || defined(M68060)
2855: #if defined(M68020) || defined(M68030)
1.20 tsutsui 2856: if (mmutype == MMU_68040) {
1.1 chs 2857: #endif
1.20 tsutsui 2858: if (pmap_changebit(pmap_pte_pa(pmap_pte(pmap, va)), PG_CCB, ~PG_CI))
1.1 chs 2859: DCIS();
2860:
2861: #if defined(M68020) || defined(M68030)
2862: } else
1.20 tsutsui 2863: pmap_changebit(pmap_pte_pa(pmap_pte(pmap, va)), 0, ~PG_CI);
1.1 chs 2864: #endif
2865: #else
1.20 tsutsui 2866: pmap_changebit(pmap_pte_pa(pmap_pte(pmap, va)), 0, ~PG_CI);
1.1 chs 2867: #endif
2868: }
2869:
2870: void
1.20 tsutsui 2871: _pmap_set_page_cacheinhibit(pmap_t pmap, vaddr_t va)
1.1 chs 2872: {
2873:
1.20 tsutsui 2874: if (!pmap_ste_v(pmap, va))
1.1 chs 2875: return;
2876:
2877: #if defined(M68040) || defined(M68060)
2878: #if defined(M68020) || defined(M68030)
1.20 tsutsui 2879: if (mmutype == MMU_68040) {
1.1 chs 2880: #endif
1.20 tsutsui 2881: if (pmap_changebit(pmap_pte_pa(pmap_pte(pmap, va)), PG_CI, ~PG_CCB))
1.1 chs 2882: DCIS();
2883: #if defined(M68020) || defined(M68030)
2884: } else
1.20 tsutsui 2885: pmap_changebit(pmap_pte_pa(pmap_pte(pmap, va)), PG_CI, ~0);
1.1 chs 2886: #endif
2887: #else
1.20 tsutsui 2888: pmap_changebit(pmap_pte_pa(pmap_pte(pmap, va)), PG_CI, ~0);
1.1 chs 2889: #endif
2890: }
2891:
2892: int
1.20 tsutsui 2893: _pmap_page_is_cacheable(pmap_t pmap, vaddr_t va)
1.1 chs 2894: {
2895:
1.20 tsutsui 2896: if (!pmap_ste_v(pmap, va))
2897: return 0;
1.1 chs 2898:
1.20 tsutsui 2899: return (pmap_pte_ci(pmap_pte(pmap, va)) == 0) ? 1 : 0;
1.1 chs 2900: }
2901:
2902: #ifdef DEBUG
2903: /*
2904: * pmap_pvdump:
2905: *
2906: * Dump the contents of the PV list for the specified physical page.
2907: */
2908: void
1.20 tsutsui 2909: pmap_pvdump(paddr_t pa)
1.1 chs 2910: {
1.46 thorpej 2911: struct pv_header *pvh;
1.1 chs 2912: struct pv_entry *pv;
2913:
2914: printf("pa %lx", pa);
1.46 thorpej 2915: pvh = pa_to_pvh(pa);
2916: for (pv = &pvh->pvh_first; pv; pv = pv->pv_next)
1.47 mhitch 2917: printf(" -> pmap %p, va %lx, ptste %p, ptpmap %p",
2918: pv->pv_pmap, pv->pv_va, pv->pv_ptste, pv->pv_ptpmap);
1.1 chs 2919: printf("\n");
2920: }
2921:
2922: /*
2923: * pmap_check_wiring:
2924: *
2925: * Count the number of valid mappings in the specified PT page,
2926: * and ensure that it is consistent with the number of wirings
2927: * to that page that the VM system has.
2928: */
2929: void
1.20 tsutsui 2930: pmap_check_wiring(const char *str, vaddr_t va)
1.1 chs 2931: {
2932: pt_entry_t *pte;
2933: paddr_t pa;
2934: struct vm_page *pg;
2935: int count;
2936:
2937: if (!pmap_ste_v(pmap_kernel(), va) ||
2938: !pmap_pte_v(pmap_pte(pmap_kernel(), va)))
2939: return;
2940:
2941: pa = pmap_pte_pa(pmap_pte(pmap_kernel(), va));
2942: pg = PHYS_TO_VM_PAGE(pa);
1.13 chs 2943: if (pg->wire_count > PAGE_SIZE / sizeof(pt_entry_t)) {
1.1 chs 2944: panic("*%s*: 0x%lx: wire count %d", str, va, pg->wire_count);
2945: }
2946:
2947: count = 0;
1.3 thorpej 2948: for (pte = (pt_entry_t *)va; pte < (pt_entry_t *)(va + PAGE_SIZE);
2949: pte++)
1.1 chs 2950: if (*pte)
2951: count++;
2952: if (pg->wire_count != count)
2953: panic("*%s*: 0x%lx: w%d/a%d",
2954: str, va, pg->wire_count, count);
2955: }
2956: #endif /* DEBUG */
CVSweb <webmaster@jp.NetBSD.org>