Annotation of src/sys/arch/arm/arm32/pmap.c, Revision 1.51
1.51 ! chris 1: /* $NetBSD: pmap.c,v 1.50 2002/03/05 04:48:03 thorpej Exp $ */
1.12 chris 2:
3: /*
1.49 thorpej 4: * Copyright (c) 2002 Wasabi Systems, Inc.
1.12 chris 5: * Copyright (c) 2001 Richard Earnshaw
6: * Copyright (c) 2001 Christopher Gilbert
7: * All rights reserved.
8: *
9: * 1. Redistributions of source code must retain the above copyright
10: * notice, this list of conditions and the following disclaimer.
11: * 2. Redistributions in binary form must reproduce the above copyright
12: * notice, this list of conditions and the following disclaimer in the
13: * documentation and/or other materials provided with the distribution.
14: * 3. The name of the company nor the name of the author may be used to
15: * endorse or promote products derived from this software without specific
16: * prior written permission.
17: *
18: * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
19: * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
20: * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21: * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
22: * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
23: * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
24: * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25: * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26: * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27: * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28: * SUCH DAMAGE.
29: */
1.1 matt 30:
31: /*-
32: * Copyright (c) 1999 The NetBSD Foundation, Inc.
33: * All rights reserved.
34: *
35: * This code is derived from software contributed to The NetBSD Foundation
36: * by Charles M. Hannum.
37: *
38: * Redistribution and use in source and binary forms, with or without
39: * modification, are permitted provided that the following conditions
40: * are met:
41: * 1. Redistributions of source code must retain the above copyright
42: * notice, this list of conditions and the following disclaimer.
43: * 2. Redistributions in binary form must reproduce the above copyright
44: * notice, this list of conditions and the following disclaimer in the
45: * documentation and/or other materials provided with the distribution.
46: * 3. All advertising materials mentioning features or use of this software
47: * must display the following acknowledgement:
48: * This product includes software developed by the NetBSD
49: * Foundation, Inc. and its contributors.
50: * 4. Neither the name of The NetBSD Foundation nor the names of its
51: * contributors may be used to endorse or promote products derived
52: * from this software without specific prior written permission.
53: *
54: * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
55: * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
56: * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
57: * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
58: * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
59: * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
60: * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
61: * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
62: * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
63: * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
64: * POSSIBILITY OF SUCH DAMAGE.
65: */
66:
67: /*
68: * Copyright (c) 1994-1998 Mark Brinicombe.
69: * Copyright (c) 1994 Brini.
70: * All rights reserved.
71: *
72: * This code is derived from software written for Brini by Mark Brinicombe
73: *
74: * Redistribution and use in source and binary forms, with or without
75: * modification, are permitted provided that the following conditions
76: * are met:
77: * 1. Redistributions of source code must retain the above copyright
78: * notice, this list of conditions and the following disclaimer.
79: * 2. Redistributions in binary form must reproduce the above copyright
80: * notice, this list of conditions and the following disclaimer in the
81: * documentation and/or other materials provided with the distribution.
82: * 3. All advertising materials mentioning features or use of this software
83: * must display the following acknowledgement:
84: * This product includes software developed by Mark Brinicombe.
85: * 4. The name of the author may not be used to endorse or promote products
86: * derived from this software without specific prior written permission.
87: *
88: * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
89: * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
90: * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
91: * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
92: * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
93: * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
94: * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
95: * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
96: * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
97: *
98: * RiscBSD kernel project
99: *
100: * pmap.c
101: *
102: * Machine dependant vm stuff
103: *
104: * Created : 20/09/94
105: */
106:
107: /*
108: * Performance improvements, UVM changes, overhauls and part-rewrites
109: * were contributed by Neil A. Carson <neil@causality.com>.
110: */
111:
112: /*
113: * The dram block info is currently referenced from the bootconfig.
114: * This should be placed in a separate structure.
115: */
116:
117: /*
118: * Special compilation symbols
119: * PMAP_DEBUG - Build in pmap_debug_level code
120: */
121:
122: /* Include header files */
123:
124: #include "opt_pmap_debug.h"
125: #include "opt_ddb.h"
126:
127: #include <sys/types.h>
128: #include <sys/param.h>
129: #include <sys/kernel.h>
130: #include <sys/systm.h>
131: #include <sys/proc.h>
132: #include <sys/malloc.h>
133: #include <sys/user.h>
1.10 chris 134: #include <sys/pool.h>
1.16 chris 135: #include <sys/cdefs.h>
136:
1.1 matt 137: #include <uvm/uvm.h>
138:
139: #include <machine/bootconfig.h>
140: #include <machine/bus.h>
141: #include <machine/pmap.h>
142: #include <machine/pcb.h>
143: #include <machine/param.h>
1.32 thorpej 144: #include <arm/arm32/katelib.h>
1.16 chris 145:
1.51 ! chris 146: __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.50 2002/03/05 04:48:03 thorpej Exp $");
1.1 matt 147: #ifdef PMAP_DEBUG
148: #define PDEBUG(_lev_,_stat_) \
149: if (pmap_debug_level >= (_lev_)) \
150: ((_stat_))
151: int pmap_debug_level = -2;
1.48 chris 152: void pmap_dump_pvlist(vaddr_t phys, char *m);
1.17 chris 153:
154: /*
155: * for switching to potentially finer grained debugging
156: */
157: #define PDB_FOLLOW 0x0001
158: #define PDB_INIT 0x0002
159: #define PDB_ENTER 0x0004
160: #define PDB_REMOVE 0x0008
161: #define PDB_CREATE 0x0010
162: #define PDB_PTPAGE 0x0020
1.48 chris 163: #define PDB_GROWKERN 0x0040
1.17 chris 164: #define PDB_BITS 0x0080
165: #define PDB_COLLECT 0x0100
166: #define PDB_PROTECT 0x0200
1.48 chris 167: #define PDB_MAP_L1 0x0400
1.17 chris 168: #define PDB_BOOTSTRAP 0x1000
169: #define PDB_PARANOIA 0x2000
170: #define PDB_WIRING 0x4000
171: #define PDB_PVDUMP 0x8000
172:
173: int debugmap = 0;
174: int pmapdebug = PDB_PARANOIA | PDB_FOLLOW;
175: #define NPDEBUG(_lev_,_stat_) \
176: if (pmapdebug & (_lev_)) \
177: ((_stat_))
178:
1.1 matt 179: #else /* PMAP_DEBUG */
180: #define PDEBUG(_lev_,_stat_) /* Nothing */
1.48 chris 181: #define NPDEBUG(_lev_,_stat_) /* Nothing */
1.1 matt 182: #endif /* PMAP_DEBUG */
183:
184: struct pmap kernel_pmap_store;
185:
1.10 chris 186: /*
1.48 chris 187: * linked list of all non-kernel pmaps
188: */
189:
190: static struct pmap_head pmaps;
191:
192: /*
1.10 chris 193: * pool that pmap structures are allocated from
194: */
195:
196: struct pool pmap_pmap_pool;
197:
1.1 matt 198: pagehook_t page_hook0;
199: pagehook_t page_hook1;
200: char *memhook;
201: pt_entry_t msgbufpte;
202: extern caddr_t msgbufaddr;
203:
204: boolean_t pmap_initialized = FALSE; /* Has pmap_init completed? */
1.17 chris 205: /*
206: * locking data structures
207: */
1.1 matt 208:
1.17 chris 209: static struct lock pmap_main_lock;
210: static struct simplelock pvalloc_lock;
1.48 chris 211: static struct simplelock pmaps_lock;
1.17 chris 212: #ifdef LOCKDEBUG
213: #define PMAP_MAP_TO_HEAD_LOCK() \
214: (void) spinlockmgr(&pmap_main_lock, LK_SHARED, NULL)
215: #define PMAP_MAP_TO_HEAD_UNLOCK() \
216: (void) spinlockmgr(&pmap_main_lock, LK_RELEASE, NULL)
217:
218: #define PMAP_HEAD_TO_MAP_LOCK() \
219: (void) spinlockmgr(&pmap_main_lock, LK_EXCLUSIVE, NULL)
220: #define PMAP_HEAD_TO_MAP_UNLOCK() \
221: (void) spinlockmgr(&pmap_main_lock, LK_RELEASE, NULL)
222: #else
223: #define PMAP_MAP_TO_HEAD_LOCK() /* nothing */
224: #define PMAP_MAP_TO_HEAD_UNLOCK() /* nothing */
225: #define PMAP_HEAD_TO_MAP_LOCK() /* nothing */
226: #define PMAP_HEAD_TO_MAP_UNLOCK() /* nothing */
227: #endif /* LOCKDEBUG */
228:
229: /*
230: * pv_page management structures: locked by pvalloc_lock
231: */
1.1 matt 232:
1.17 chris 233: TAILQ_HEAD(pv_pagelist, pv_page);
234: static struct pv_pagelist pv_freepages; /* list of pv_pages with free entrys */
235: static struct pv_pagelist pv_unusedpgs; /* list of unused pv_pages */
236: static int pv_nfpvents; /* # of free pv entries */
237: static struct pv_page *pv_initpage; /* bootstrap page from kernel_map */
238: static vaddr_t pv_cachedva; /* cached VA for later use */
239:
240: #define PVE_LOWAT (PVE_PER_PVPAGE / 2) /* free pv_entry low water mark */
241: #define PVE_HIWAT (PVE_LOWAT + (PVE_PER_PVPAGE * 2))
242: /* high water mark */
243:
244: /*
245: * local prototypes
246: */
247:
248: static struct pv_entry *pmap_add_pvpage __P((struct pv_page *, boolean_t));
249: static struct pv_entry *pmap_alloc_pv __P((struct pmap *, int)); /* see codes below */
250: #define ALLOCPV_NEED 0 /* need PV now */
251: #define ALLOCPV_TRY 1 /* just try to allocate, don't steal */
252: #define ALLOCPV_NONEED 2 /* don't need PV, just growing cache */
253: static struct pv_entry *pmap_alloc_pvpage __P((struct pmap *, int));
1.49 thorpej 254: static void pmap_enter_pv __P((struct vm_page *,
1.17 chris 255: struct pv_entry *, struct pmap *,
256: vaddr_t, struct vm_page *, int));
257: static void pmap_free_pv __P((struct pmap *, struct pv_entry *));
258: static void pmap_free_pvs __P((struct pmap *, struct pv_entry *));
259: static void pmap_free_pv_doit __P((struct pv_entry *));
260: static void pmap_free_pvpage __P((void));
261: static boolean_t pmap_is_curpmap __P((struct pmap *));
1.49 thorpej 262: static struct pv_entry *pmap_remove_pv __P((struct vm_page *, struct pmap *,
1.17 chris 263: vaddr_t));
264: #define PMAP_REMOVE_ALL 0 /* remove all mappings */
265: #define PMAP_REMOVE_SKIPWIRED 1 /* skip wired mappings */
1.1 matt 266:
1.49 thorpej 267: static u_int pmap_modify_pv __P((struct pmap *, vaddr_t, struct vm_page *,
1.33 chris 268: u_int, u_int));
269:
270: static void pmap_free_l1pt __P((struct l1pt *));
271: static int pmap_allocpagedir __P((struct pmap *));
272: static int pmap_clean_page __P((struct pv_entry *, boolean_t));
1.49 thorpej 273: static void pmap_remove_all __P((struct vm_page *));
1.33 chris 274:
275:
1.2 matt 276: vsize_t npages;
1.1 matt 277:
1.17 chris 278: static struct vm_page *pmap_alloc_ptp __P((struct pmap *, vaddr_t, boolean_t));
279: static struct vm_page *pmap_get_ptp __P((struct pmap *, vaddr_t, boolean_t));
1.49 thorpej 280: __inline static void pmap_clearbit __P((struct vm_page *, unsigned int));
1.17 chris 281:
1.2 matt 282: extern paddr_t physical_start;
283: extern paddr_t physical_freestart;
284: extern paddr_t physical_end;
285: extern paddr_t physical_freeend;
1.1 matt 286: extern unsigned int free_pages;
287: extern int max_processes;
288:
289: vaddr_t virtual_start;
290: vaddr_t virtual_end;
1.48 chris 291: vaddr_t pmap_curmaxkvaddr;
1.1 matt 292:
293: vaddr_t avail_start;
294: vaddr_t avail_end;
295:
296: extern pv_addr_t systempage;
297:
298: #define ALLOC_PAGE_HOOK(x, s) \
299: x.va = virtual_start; \
1.15 chris 300: x.pte = (pt_entry_t *)pmap_pte(pmap_kernel(), virtual_start); \
1.1 matt 301: virtual_start += s;
302:
303: /* Variables used by the L1 page table queue code */
304: SIMPLEQ_HEAD(l1pt_queue, l1pt);
305: struct l1pt_queue l1pt_static_queue; /* head of our static l1 queue */
306: int l1pt_static_queue_count; /* items in the static l1 queue */
307: int l1pt_static_create_count; /* static l1 items created */
308: struct l1pt_queue l1pt_queue; /* head of our l1 queue */
309: int l1pt_queue_count; /* items in the l1 queue */
310: int l1pt_create_count; /* stat - L1's create count */
311: int l1pt_reuse_count; /* stat - L1's reused count */
312:
313: /* Local function prototypes (not used outside this file) */
1.15 chris 314: pt_entry_t *pmap_pte __P((struct pmap *pmap, vaddr_t va));
1.49 thorpej 315: void pmap_copy_on_write __P((struct vm_page *));
1.15 chris 316: void pmap_pinit __P((struct pmap *));
317: void pmap_freepagedir __P((struct pmap *));
1.1 matt 318:
319: /* Other function prototypes */
320: extern void bzero_page __P((vaddr_t));
321: extern void bcopy_page __P((vaddr_t, vaddr_t));
322:
323: struct l1pt *pmap_alloc_l1pt __P((void));
1.15 chris 324: static __inline void pmap_map_in_l1 __P((struct pmap *pmap, vaddr_t va,
1.17 chris 325: vaddr_t l2pa, boolean_t));
1.1 matt 326:
1.11 chris 327: static pt_entry_t *pmap_map_ptes __P((struct pmap *));
1.17 chris 328: static void pmap_unmap_ptes __P((struct pmap *));
1.11 chris 329:
1.49 thorpej 330: __inline static void pmap_vac_me_harder __P((struct pmap *, struct vm_page *,
1.25 rearnsha 331: pt_entry_t *, boolean_t));
1.49 thorpej 332: static void pmap_vac_me_kpmap __P((struct pmap *, struct vm_page *,
1.25 rearnsha 333: pt_entry_t *, boolean_t));
1.49 thorpej 334: static void pmap_vac_me_user __P((struct pmap *, struct vm_page *,
1.25 rearnsha 335: pt_entry_t *, boolean_t));
1.11 chris 336:
1.17 chris 337: /*
1.27 rearnsha 338: * Cache enable bits in PTE to use on pages that are cacheable.
339: * On most machines this is cacheable/bufferable, but on some, eg arm10, we
340: * can chose between write-through and write-back cacheing.
341: */
342: pt_entry_t pte_cache_mode = (PT_C | PT_B);
343:
344: /*
1.17 chris 345: * real definition of pv_entry.
346: */
347:
348: struct pv_entry {
349: struct pv_entry *pv_next; /* next pv_entry */
350: struct pmap *pv_pmap; /* pmap where mapping lies */
351: vaddr_t pv_va; /* virtual address for mapping */
352: int pv_flags; /* flags */
353: struct vm_page *pv_ptp; /* vm_page for the ptp */
354: };
355:
356: /*
357: * pv_entrys are dynamically allocated in chunks from a single page.
358: * we keep track of how many pv_entrys are in use for each page and
359: * we can free pv_entry pages if needed. there is one lock for the
360: * entire allocation system.
361: */
362:
363: struct pv_page_info {
364: TAILQ_ENTRY(pv_page) pvpi_list;
365: struct pv_entry *pvpi_pvfree;
366: int pvpi_nfree;
367: };
368:
369: /*
370: * number of pv_entry's in a pv_page
371: * (note: won't work on systems where NPBG isn't a constant)
372: */
373:
374: #define PVE_PER_PVPAGE ((NBPG - sizeof(struct pv_page_info)) / \
375: sizeof(struct pv_entry))
376:
377: /*
378: * a pv_page: where pv_entrys are allocated from
379: */
380:
381: struct pv_page {
382: struct pv_page_info pvinfo;
383: struct pv_entry pvents[PVE_PER_PVPAGE];
384: };
385:
1.1 matt 386: #ifdef MYCROFT_HACK
387: int mycroft_hack = 0;
388: #endif
389:
390: /* Function to set the debug level of the pmap code */
391:
392: #ifdef PMAP_DEBUG
393: void
394: pmap_debug(level)
395: int level;
396: {
397: pmap_debug_level = level;
398: printf("pmap_debug: level=%d\n", pmap_debug_level);
399: }
400: #endif /* PMAP_DEBUG */
401:
1.22 chris 402: __inline static boolean_t
1.17 chris 403: pmap_is_curpmap(struct pmap *pmap)
404: {
405: if ((curproc && curproc->p_vmspace->vm_map.pmap == pmap)
406: || (pmap == pmap_kernel()))
407: return (TRUE);
408: return (FALSE);
409: }
1.1 matt 410: #include "isadma.h"
411:
412: #if NISADMA > 0
413: /*
414: * Used to protect memory for ISA DMA bounce buffers. If, when loading
415: * pages into the system, memory intersects with any of these ranges,
416: * the intersecting memory will be loaded into a lower-priority free list.
417: */
418: bus_dma_segment_t *pmap_isa_dma_ranges;
419: int pmap_isa_dma_nranges;
420:
1.2 matt 421: boolean_t pmap_isa_dma_range_intersect __P((paddr_t, psize_t,
422: paddr_t *, psize_t *));
1.1 matt 423:
424: /*
425: * Check if a memory range intersects with an ISA DMA range, and
426: * return the page-rounded intersection if it does. The intersection
427: * will be placed on a lower-priority free list.
428: */
429: boolean_t
430: pmap_isa_dma_range_intersect(pa, size, pap, sizep)
1.2 matt 431: paddr_t pa;
432: psize_t size;
433: paddr_t *pap;
434: psize_t *sizep;
1.1 matt 435: {
436: bus_dma_segment_t *ds;
437: int i;
438:
439: if (pmap_isa_dma_ranges == NULL)
440: return (FALSE);
441:
442: for (i = 0, ds = pmap_isa_dma_ranges;
443: i < pmap_isa_dma_nranges; i++, ds++) {
444: if (ds->ds_addr <= pa && pa < (ds->ds_addr + ds->ds_len)) {
445: /*
446: * Beginning of region intersects with this range.
447: */
448: *pap = trunc_page(pa);
449: *sizep = round_page(min(pa + size,
450: ds->ds_addr + ds->ds_len) - pa);
451: return (TRUE);
452: }
453: if (pa < ds->ds_addr && ds->ds_addr < (pa + size)) {
454: /*
455: * End of region intersects with this range.
456: */
457: *pap = trunc_page(ds->ds_addr);
458: *sizep = round_page(min((pa + size) - ds->ds_addr,
459: ds->ds_len));
460: return (TRUE);
461: }
462: }
463:
464: /*
465: * No intersection found.
466: */
467: return (FALSE);
468: }
469: #endif /* NISADMA > 0 */
470:
471: /*
1.17 chris 472: * p v _ e n t r y f u n c t i o n s
473: */
474:
475: /*
476: * pv_entry allocation functions:
477: * the main pv_entry allocation functions are:
478: * pmap_alloc_pv: allocate a pv_entry structure
479: * pmap_free_pv: free one pv_entry
480: * pmap_free_pvs: free a list of pv_entrys
481: *
482: * the rest are helper functions
1.1 matt 483: */
484:
485: /*
1.17 chris 486: * pmap_alloc_pv: inline function to allocate a pv_entry structure
487: * => we lock pvalloc_lock
488: * => if we fail, we call out to pmap_alloc_pvpage
489: * => 3 modes:
490: * ALLOCPV_NEED = we really need a pv_entry, even if we have to steal it
491: * ALLOCPV_TRY = we want a pv_entry, but not enough to steal
492: * ALLOCPV_NONEED = we are trying to grow our free list, don't really need
493: * one now
494: *
495: * "try" is for optional functions like pmap_copy().
1.1 matt 496: */
1.17 chris 497:
498: __inline static struct pv_entry *
499: pmap_alloc_pv(pmap, mode)
500: struct pmap *pmap;
501: int mode;
1.1 matt 502: {
1.17 chris 503: struct pv_page *pvpage;
504: struct pv_entry *pv;
505:
506: simple_lock(&pvalloc_lock);
507:
1.51 ! chris 508: pvpage = TAILQ_FIRST(&pv_freepages);
! 509:
! 510: if (pvpage != NULL) {
1.17 chris 511: pvpage->pvinfo.pvpi_nfree--;
512: if (pvpage->pvinfo.pvpi_nfree == 0) {
513: /* nothing left in this one? */
514: TAILQ_REMOVE(&pv_freepages, pvpage, pvinfo.pvpi_list);
515: }
516: pv = pvpage->pvinfo.pvpi_pvfree;
1.51 ! chris 517: KASSERT(pv);
1.17 chris 518: pvpage->pvinfo.pvpi_pvfree = pv->pv_next;
519: pv_nfpvents--; /* took one from pool */
520: } else {
521: pv = NULL; /* need more of them */
522: }
523:
524: /*
525: * if below low water mark or we didn't get a pv_entry we try and
526: * create more pv_entrys ...
527: */
528:
529: if (pv_nfpvents < PVE_LOWAT || pv == NULL) {
530: if (pv == NULL)
531: pv = pmap_alloc_pvpage(pmap, (mode == ALLOCPV_TRY) ?
532: mode : ALLOCPV_NEED);
533: else
534: (void) pmap_alloc_pvpage(pmap, ALLOCPV_NONEED);
535: }
536:
537: simple_unlock(&pvalloc_lock);
538: return(pv);
539: }
540:
541: /*
542: * pmap_alloc_pvpage: maybe allocate a new pvpage
543: *
544: * if need_entry is false: try and allocate a new pv_page
545: * if need_entry is true: try and allocate a new pv_page and return a
546: * new pv_entry from it. if we are unable to allocate a pv_page
547: * we make a last ditch effort to steal a pv_page from some other
548: * mapping. if that fails, we panic...
549: *
550: * => we assume that the caller holds pvalloc_lock
551: */
552:
553: static struct pv_entry *
554: pmap_alloc_pvpage(pmap, mode)
555: struct pmap *pmap;
556: int mode;
557: {
558: struct vm_page *pg;
559: struct pv_page *pvpage;
1.1 matt 560: struct pv_entry *pv;
1.17 chris 561: int s;
562:
563: /*
564: * if we need_entry and we've got unused pv_pages, allocate from there
565: */
566:
1.51 ! chris 567: pvpage = TAILQ_FIRST(&pv_unusedpgs);
! 568: if (mode != ALLOCPV_NONEED && pvpage != NULL) {
1.17 chris 569:
570: /* move it to pv_freepages list */
571: TAILQ_REMOVE(&pv_unusedpgs, pvpage, pvinfo.pvpi_list);
572: TAILQ_INSERT_HEAD(&pv_freepages, pvpage, pvinfo.pvpi_list);
573:
574: /* allocate a pv_entry */
575: pvpage->pvinfo.pvpi_nfree--; /* can't go to zero */
576: pv = pvpage->pvinfo.pvpi_pvfree;
1.51 ! chris 577: KASSERT(pv);
1.17 chris 578: pvpage->pvinfo.pvpi_pvfree = pv->pv_next;
579:
580: pv_nfpvents--; /* took one from pool */
581: return(pv);
582: }
1.1 matt 583:
584: /*
1.17 chris 585: * see if we've got a cached unmapped VA that we can map a page in.
586: * if not, try to allocate one.
1.1 matt 587: */
588:
1.23 chs 589:
1.17 chris 590: if (pv_cachedva == 0) {
1.23 chs 591: s = splvm();
592: pv_cachedva = uvm_km_kmemalloc(kmem_map, NULL,
1.17 chris 593: PAGE_SIZE, UVM_KMF_TRYLOCK|UVM_KMF_VALLOC);
1.23 chs 594: splx(s);
1.17 chris 595: if (pv_cachedva == 0) {
596: return (NULL);
1.1 matt 597: }
598: }
1.17 chris 599:
1.23 chs 600: pg = uvm_pagealloc(NULL, pv_cachedva - vm_map_min(kernel_map), NULL,
601: UVM_PGA_USERESERVE);
1.17 chris 602:
603: if (pg == NULL)
604: return (NULL);
1.51 ! chris 605: pg->flags &= ~PG_BUSY; /* never busy */
1.17 chris 606:
607: /*
608: * add a mapping for our new pv_page and free its entrys (save one!)
609: *
610: * NOTE: If we are allocating a PV page for the kernel pmap, the
611: * pmap is already locked! (...but entering the mapping is safe...)
612: */
613:
1.51 ! chris 614: pmap_kenter_pa(pv_cachedva, VM_PAGE_TO_PHYS(pg),
! 615: VM_PROT_READ|VM_PROT_WRITE);
1.19 chris 616: pmap_update(pmap_kernel());
1.17 chris 617: pvpage = (struct pv_page *) pv_cachedva;
618: pv_cachedva = 0;
619: return (pmap_add_pvpage(pvpage, mode != ALLOCPV_NONEED));
1.1 matt 620: }
621:
622: /*
1.17 chris 623: * pmap_add_pvpage: add a pv_page's pv_entrys to the free list
624: *
625: * => caller must hold pvalloc_lock
626: * => if need_entry is true, we allocate and return one pv_entry
1.1 matt 627: */
628:
1.17 chris 629: static struct pv_entry *
630: pmap_add_pvpage(pvp, need_entry)
631: struct pv_page *pvp;
632: boolean_t need_entry;
1.1 matt 633: {
1.17 chris 634: int tofree, lcv;
635:
636: /* do we need to return one? */
637: tofree = (need_entry) ? PVE_PER_PVPAGE - 1 : PVE_PER_PVPAGE;
1.1 matt 638:
1.17 chris 639: pvp->pvinfo.pvpi_pvfree = NULL;
640: pvp->pvinfo.pvpi_nfree = tofree;
641: for (lcv = 0 ; lcv < tofree ; lcv++) {
642: pvp->pvents[lcv].pv_next = pvp->pvinfo.pvpi_pvfree;
643: pvp->pvinfo.pvpi_pvfree = &pvp->pvents[lcv];
1.1 matt 644: }
1.17 chris 645: if (need_entry)
646: TAILQ_INSERT_TAIL(&pv_freepages, pvp, pvinfo.pvpi_list);
647: else
648: TAILQ_INSERT_TAIL(&pv_unusedpgs, pvp, pvinfo.pvpi_list);
649: pv_nfpvents += tofree;
650: return((need_entry) ? &pvp->pvents[lcv] : NULL);
1.1 matt 651: }
652:
1.17 chris 653: /*
654: * pmap_free_pv_doit: actually free a pv_entry
655: *
656: * => do not call this directly! instead use either
657: * 1. pmap_free_pv ==> free a single pv_entry
658: * 2. pmap_free_pvs => free a list of pv_entrys
659: * => we must be holding pvalloc_lock
660: */
661:
662: __inline static void
663: pmap_free_pv_doit(pv)
664: struct pv_entry *pv;
1.1 matt 665: {
1.17 chris 666: struct pv_page *pvp;
1.1 matt 667:
1.17 chris 668: pvp = (struct pv_page *) arm_trunc_page((vaddr_t)pv);
669: pv_nfpvents++;
670: pvp->pvinfo.pvpi_nfree++;
1.1 matt 671:
1.17 chris 672: /* nfree == 1 => fully allocated page just became partly allocated */
673: if (pvp->pvinfo.pvpi_nfree == 1) {
674: TAILQ_INSERT_HEAD(&pv_freepages, pvp, pvinfo.pvpi_list);
1.1 matt 675: }
676:
1.17 chris 677: /* free it */
678: pv->pv_next = pvp->pvinfo.pvpi_pvfree;
679: pvp->pvinfo.pvpi_pvfree = pv;
1.1 matt 680:
1.17 chris 681: /*
682: * are all pv_page's pv_entry's free? move it to unused queue.
683: */
1.1 matt 684:
1.17 chris 685: if (pvp->pvinfo.pvpi_nfree == PVE_PER_PVPAGE) {
686: TAILQ_REMOVE(&pv_freepages, pvp, pvinfo.pvpi_list);
687: TAILQ_INSERT_HEAD(&pv_unusedpgs, pvp, pvinfo.pvpi_list);
1.1 matt 688: }
689: }
690:
691: /*
1.17 chris 692: * pmap_free_pv: free a single pv_entry
693: *
694: * => we gain the pvalloc_lock
1.1 matt 695: */
696:
1.17 chris 697: __inline static void
698: pmap_free_pv(pmap, pv)
1.15 chris 699: struct pmap *pmap;
1.1 matt 700: struct pv_entry *pv;
701: {
1.17 chris 702: simple_lock(&pvalloc_lock);
703: pmap_free_pv_doit(pv);
704:
705: /*
706: * Can't free the PV page if the PV entries were associated with
707: * the kernel pmap; the pmap is already locked.
708: */
1.51 ! chris 709: if (pv_nfpvents > PVE_HIWAT && TAILQ_FIRST(&pv_unusedpgs) != NULL &&
1.17 chris 710: pmap != pmap_kernel())
711: pmap_free_pvpage();
712:
713: simple_unlock(&pvalloc_lock);
714: }
1.1 matt 715:
1.17 chris 716: /*
717: * pmap_free_pvs: free a list of pv_entrys
718: *
719: * => we gain the pvalloc_lock
720: */
1.1 matt 721:
1.17 chris 722: __inline static void
723: pmap_free_pvs(pmap, pvs)
724: struct pmap *pmap;
725: struct pv_entry *pvs;
726: {
727: struct pv_entry *nextpv;
1.1 matt 728:
1.17 chris 729: simple_lock(&pvalloc_lock);
1.1 matt 730:
1.17 chris 731: for ( /* null */ ; pvs != NULL ; pvs = nextpv) {
732: nextpv = pvs->pv_next;
733: pmap_free_pv_doit(pvs);
1.1 matt 734: }
735:
1.17 chris 736: /*
737: * Can't free the PV page if the PV entries were associated with
738: * the kernel pmap; the pmap is already locked.
739: */
1.51 ! chris 740: if (pv_nfpvents > PVE_HIWAT && TAILQ_FIRST(&pv_unusedpgs) != NULL &&
1.17 chris 741: pmap != pmap_kernel())
742: pmap_free_pvpage();
1.1 matt 743:
1.17 chris 744: simple_unlock(&pvalloc_lock);
1.1 matt 745: }
746:
747:
748: /*
1.17 chris 749: * pmap_free_pvpage: try and free an unused pv_page structure
750: *
751: * => assume caller is holding the pvalloc_lock and that
752: * there is a page on the pv_unusedpgs list
753: * => if we can't get a lock on the kmem_map we try again later
1.1 matt 754: */
755:
1.17 chris 756: static void
757: pmap_free_pvpage()
1.1 matt 758: {
1.17 chris 759: int s;
760: struct vm_map *map;
761: struct vm_map_entry *dead_entries;
762: struct pv_page *pvp;
763:
764: s = splvm(); /* protect kmem_map */
1.1 matt 765:
1.51 ! chris 766: pvp = TAILQ_FIRST(&pv_unusedpgs);
1.1 matt 767:
768: /*
1.17 chris 769: * note: watch out for pv_initpage which is allocated out of
770: * kernel_map rather than kmem_map.
1.1 matt 771: */
1.17 chris 772: if (pvp == pv_initpage)
773: map = kernel_map;
774: else
775: map = kmem_map;
776: if (vm_map_lock_try(map)) {
777:
778: /* remove pvp from pv_unusedpgs */
779: TAILQ_REMOVE(&pv_unusedpgs, pvp, pvinfo.pvpi_list);
780:
781: /* unmap the page */
782: dead_entries = NULL;
783: uvm_unmap_remove(map, (vaddr_t)pvp, ((vaddr_t)pvp) + PAGE_SIZE,
784: &dead_entries);
785: vm_map_unlock(map);
786:
787: if (dead_entries != NULL)
788: uvm_unmap_detach(dead_entries, 0);
1.1 matt 789:
1.17 chris 790: pv_nfpvents -= PVE_PER_PVPAGE; /* update free count */
1.1 matt 791: }
1.17 chris 792: if (pvp == pv_initpage)
793: /* no more initpage, we've freed it */
794: pv_initpage = NULL;
1.1 matt 795:
796: splx(s);
797: }
798:
799: /*
1.17 chris 800: * main pv_entry manipulation functions:
1.49 thorpej 801: * pmap_enter_pv: enter a mapping onto a vm_page list
802: * pmap_remove_pv: remove a mappiing from a vm_page list
1.17 chris 803: *
804: * NOTE: pmap_enter_pv expects to lock the pvh itself
805: * pmap_remove_pv expects te caller to lock the pvh before calling
806: */
807:
808: /*
1.49 thorpej 809: * pmap_enter_pv: enter a mapping onto a vm_page lst
1.17 chris 810: *
811: * => caller should hold the proper lock on pmap_main_lock
812: * => caller should have pmap locked
1.49 thorpej 813: * => we will gain the lock on the vm_page and allocate the new pv_entry
1.17 chris 814: * => caller should adjust ptp's wire_count before calling
815: * => caller should not adjust pmap's wire_count
816: */
817:
818: __inline static void
1.49 thorpej 819: pmap_enter_pv(pg, pve, pmap, va, ptp, flags)
820: struct vm_page *pg;
1.17 chris 821: struct pv_entry *pve; /* preallocated pve for us to use */
822: struct pmap *pmap;
823: vaddr_t va;
824: struct vm_page *ptp; /* PTP in pmap that maps this VA */
825: int flags;
826: {
827: pve->pv_pmap = pmap;
828: pve->pv_va = va;
829: pve->pv_ptp = ptp; /* NULL for kernel pmap */
830: pve->pv_flags = flags;
1.49 thorpej 831: simple_lock(&pg->mdpage.pvh_slock); /* lock vm_page */
832: pve->pv_next = pg->mdpage.pvh_list; /* add to ... */
833: pg->mdpage.pvh_list = pve; /* ... locked list */
834: simple_unlock(&pg->mdpage.pvh_slock); /* unlock, done! */
1.17 chris 835: if (pve->pv_flags & PT_W)
836: ++pmap->pm_stats.wired_count;
837: }
838:
839: /*
840: * pmap_remove_pv: try to remove a mapping from a pv_list
841: *
842: * => caller should hold proper lock on pmap_main_lock
843: * => pmap should be locked
1.49 thorpej 844: * => caller should hold lock on vm_page [so that attrs can be adjusted]
1.17 chris 845: * => caller should adjust ptp's wire_count and free PTP if needed
846: * => caller should NOT adjust pmap's wire_count
847: * => we return the removed pve
848: */
849:
850: __inline static struct pv_entry *
1.49 thorpej 851: pmap_remove_pv(pg, pmap, va)
852: struct vm_page *pg;
1.17 chris 853: struct pmap *pmap;
854: vaddr_t va;
855: {
856: struct pv_entry *pve, **prevptr;
857:
1.49 thorpej 858: prevptr = &pg->mdpage.pvh_list; /* previous pv_entry pointer */
1.17 chris 859: pve = *prevptr;
860: while (pve) {
861: if (pve->pv_pmap == pmap && pve->pv_va == va) { /* match? */
862: *prevptr = pve->pv_next; /* remove it! */
863: if (pve->pv_flags & PT_W)
864: --pmap->pm_stats.wired_count;
865: break;
866: }
867: prevptr = &pve->pv_next; /* previous pointer */
868: pve = pve->pv_next; /* advance */
869: }
870: return(pve); /* return removed pve */
871: }
872:
873: /*
874: *
875: * pmap_modify_pv: Update pv flags
876: *
1.49 thorpej 877: * => caller should hold lock on vm_page [so that attrs can be adjusted]
1.17 chris 878: * => caller should NOT adjust pmap's wire_count
1.29 rearnsha 879: * => caller must call pmap_vac_me_harder() if writable status of a page
880: * may have changed.
1.17 chris 881: * => we return the old flags
882: *
1.1 matt 883: * Modify a physical-virtual mapping in the pv table
884: */
885:
1.33 chris 886: /*__inline */
887: static u_int
1.49 thorpej 888: pmap_modify_pv(pmap, va, pg, bic_mask, eor_mask)
1.15 chris 889: struct pmap *pmap;
1.1 matt 890: vaddr_t va;
1.49 thorpej 891: struct vm_page *pg;
1.1 matt 892: u_int bic_mask;
893: u_int eor_mask;
894: {
895: struct pv_entry *npv;
896: u_int flags, oflags;
897:
898: /*
899: * There is at least one VA mapping this page.
900: */
901:
1.49 thorpej 902: for (npv = pg->mdpage.pvh_list; npv; npv = npv->pv_next) {
1.1 matt 903: if (pmap == npv->pv_pmap && va == npv->pv_va) {
904: oflags = npv->pv_flags;
905: npv->pv_flags = flags =
906: ((oflags & ~bic_mask) ^ eor_mask);
907: if ((flags ^ oflags) & PT_W) {
908: if (flags & PT_W)
909: ++pmap->pm_stats.wired_count;
910: else
911: --pmap->pm_stats.wired_count;
912: }
913: return (oflags);
914: }
915: }
916: return (0);
917: }
918:
919: /*
920: * Map the specified level 2 pagetable into the level 1 page table for
921: * the given pmap to cover a chunk of virtual address space starting from the
922: * address specified.
923: */
924: static /*__inline*/ void
1.17 chris 925: pmap_map_in_l1(pmap, va, l2pa, selfref)
1.15 chris 926: struct pmap *pmap;
1.1 matt 927: vaddr_t va, l2pa;
1.17 chris 928: boolean_t selfref;
1.1 matt 929: {
930: vaddr_t ptva;
931:
932: /* Calculate the index into the L1 page table. */
933: ptva = (va >> PDSHIFT) & ~3;
934:
1.48 chris 935: NPDEBUG(PDB_MAP_L1, printf("wiring %08lx in to pd%p pte0x%lx va0x%lx\n", l2pa,
1.1 matt 936: pmap->pm_pdir, L1_PTE(l2pa), ptva));
937:
938: /* Map page table into the L1. */
939: pmap->pm_pdir[ptva + 0] = L1_PTE(l2pa + 0x000);
940: pmap->pm_pdir[ptva + 1] = L1_PTE(l2pa + 0x400);
941: pmap->pm_pdir[ptva + 2] = L1_PTE(l2pa + 0x800);
942: pmap->pm_pdir[ptva + 3] = L1_PTE(l2pa + 0xc00);
943:
944: /* Map the page table into the page table area. */
1.17 chris 945: if (selfref) {
1.48 chris 946: NPDEBUG(PDB_MAP_L1, printf("pt self reference %lx in %lx\n",
947: L2_PTE_NC_NB(l2pa, AP_KRW), pmap->pm_vptpt));
948: *((pt_entry_t *)(pmap->pm_vptpt + ptva)) =
949: L2_PTE_NC_NB(l2pa, AP_KRW);
1.17 chris 950: }
1.1 matt 951: /* XXX should be a purge */
952: /* cpu_tlb_flushD();*/
953: }
954:
955: #if 0
956: static /*__inline*/ void
957: pmap_unmap_in_l1(pmap, va)
1.15 chris 958: struct pmap *pmap;
1.1 matt 959: vaddr_t va;
960: {
961: vaddr_t ptva;
962:
963: /* Calculate the index into the L1 page table. */
964: ptva = (va >> PDSHIFT) & ~3;
965:
966: /* Unmap page table from the L1. */
967: pmap->pm_pdir[ptva + 0] = 0;
968: pmap->pm_pdir[ptva + 1] = 0;
969: pmap->pm_pdir[ptva + 2] = 0;
970: pmap->pm_pdir[ptva + 3] = 0;
971:
972: /* Unmap the page table from the page table area. */
973: *((pt_entry_t *)(pmap->pm_vptpt + ptva)) = 0;
974:
975: /* XXX should be a purge */
976: /* cpu_tlb_flushD();*/
977: }
978: #endif
979:
980: /*
981: * Used to map a range of physical addresses into kernel
982: * virtual address space.
983: *
984: * For now, VM is already on, we only need to map the
985: * specified memory.
986: */
987: vaddr_t
988: pmap_map(va, spa, epa, prot)
989: vaddr_t va, spa, epa;
990: int prot;
991: {
992: while (spa < epa) {
1.20 chris 993: pmap_kenter_pa(va, spa, prot);
1.1 matt 994: va += NBPG;
995: spa += NBPG;
996: }
1.19 chris 997: pmap_update(pmap_kernel());
1.1 matt 998: return(va);
999: }
1000:
1001:
1002: /*
1.3 matt 1003: * void pmap_bootstrap(pd_entry_t *kernel_l1pt, pv_addr_t kernel_ptpt)
1.1 matt 1004: *
1005: * bootstrap the pmap system. This is called from initarm and allows
1006: * the pmap system to initailise any structures it requires.
1007: *
1008: * Currently this sets up the kernel_pmap that is statically allocated
1009: * and also allocated virtual addresses for certain page hooks.
1010: * Currently the only one page hook is allocated that is used
1011: * to zero physical pages of memory.
1012: * It also initialises the start and end address of the kernel data space.
1013: */
1.2 matt 1014: extern paddr_t physical_freestart;
1015: extern paddr_t physical_freeend;
1.1 matt 1016:
1.17 chris 1017: char *boot_head;
1.1 matt 1018:
1019: void
1020: pmap_bootstrap(kernel_l1pt, kernel_ptpt)
1021: pd_entry_t *kernel_l1pt;
1022: pv_addr_t kernel_ptpt;
1023: {
1024: int loop;
1.2 matt 1025: paddr_t start, end;
1.1 matt 1026: #if NISADMA > 0
1.2 matt 1027: paddr_t istart;
1028: psize_t isize;
1.1 matt 1029: #endif
1030:
1.15 chris 1031: pmap_kernel()->pm_pdir = kernel_l1pt;
1032: pmap_kernel()->pm_pptpt = kernel_ptpt.pv_pa;
1033: pmap_kernel()->pm_vptpt = kernel_ptpt.pv_va;
1034: simple_lock_init(&pmap_kernel()->pm_lock);
1.16 chris 1035: pmap_kernel()->pm_obj.pgops = NULL;
1036: TAILQ_INIT(&(pmap_kernel()->pm_obj.memq));
1037: pmap_kernel()->pm_obj.uo_npages = 0;
1038: pmap_kernel()->pm_obj.uo_refs = 1;
1039:
1.1 matt 1040: /*
1041: * Initialize PAGE_SIZE-dependent variables.
1042: */
1043: uvm_setpagesize();
1044:
1045: npages = 0;
1046: loop = 0;
1047: while (loop < bootconfig.dramblocks) {
1.2 matt 1048: start = (paddr_t)bootconfig.dram[loop].address;
1.1 matt 1049: end = start + (bootconfig.dram[loop].pages * NBPG);
1050: if (start < physical_freestart)
1051: start = physical_freestart;
1052: if (end > physical_freeend)
1053: end = physical_freeend;
1054: #if 0
1055: printf("%d: %lx -> %lx\n", loop, start, end - 1);
1056: #endif
1057: #if NISADMA > 0
1058: if (pmap_isa_dma_range_intersect(start, end - start,
1059: &istart, &isize)) {
1060: /*
1061: * Place the pages that intersect with the
1062: * ISA DMA range onto the ISA DMA free list.
1063: */
1064: #if 0
1065: printf(" ISADMA 0x%lx -> 0x%lx\n", istart,
1066: istart + isize - 1);
1067: #endif
1068: uvm_page_physload(atop(istart),
1069: atop(istart + isize), atop(istart),
1070: atop(istart + isize), VM_FREELIST_ISADMA);
1071: npages += atop(istart + isize) - atop(istart);
1072:
1073: /*
1074: * Load the pieces that come before
1075: * the intersection into the default
1076: * free list.
1077: */
1078: if (start < istart) {
1079: #if 0
1080: printf(" BEFORE 0x%lx -> 0x%lx\n",
1081: start, istart - 1);
1082: #endif
1083: uvm_page_physload(atop(start),
1084: atop(istart), atop(start),
1085: atop(istart), VM_FREELIST_DEFAULT);
1086: npages += atop(istart) - atop(start);
1087: }
1088:
1089: /*
1090: * Load the pieces that come after
1091: * the intersection into the default
1092: * free list.
1093: */
1094: if ((istart + isize) < end) {
1095: #if 0
1096: printf(" AFTER 0x%lx -> 0x%lx\n",
1097: (istart + isize), end - 1);
1098: #endif
1099: uvm_page_physload(atop(istart + isize),
1100: atop(end), atop(istart + isize),
1101: atop(end), VM_FREELIST_DEFAULT);
1102: npages += atop(end) - atop(istart + isize);
1103: }
1104: } else {
1105: uvm_page_physload(atop(start), atop(end),
1106: atop(start), atop(end), VM_FREELIST_DEFAULT);
1107: npages += atop(end) - atop(start);
1108: }
1109: #else /* NISADMA > 0 */
1110: uvm_page_physload(atop(start), atop(end),
1111: atop(start), atop(end), VM_FREELIST_DEFAULT);
1112: npages += atop(end) - atop(start);
1113: #endif /* NISADMA > 0 */
1114: ++loop;
1115: }
1116:
1117: #ifdef MYCROFT_HACK
1118: printf("npages = %ld\n", npages);
1119: #endif
1120:
1121: virtual_start = KERNEL_VM_BASE;
1.48 chris 1122: virtual_end = KERNEL_VM_BASE + KERNEL_VM_SIZE - 1;
1.1 matt 1123:
1124: ALLOC_PAGE_HOOK(page_hook0, NBPG);
1125: ALLOC_PAGE_HOOK(page_hook1, NBPG);
1126:
1127: /*
1128: * The mem special device needs a virtual hook but we don't
1129: * need a pte
1130: */
1131: memhook = (char *)virtual_start;
1132: virtual_start += NBPG;
1133:
1134: msgbufaddr = (caddr_t)virtual_start;
1.15 chris 1135: msgbufpte = (pt_entry_t)pmap_pte(pmap_kernel(), virtual_start);
1.1 matt 1136: virtual_start += round_page(MSGBUFSIZE);
1137:
1.17 chris 1138: /*
1139: * init the static-global locks and global lists.
1140: */
1141: spinlockinit(&pmap_main_lock, "pmaplk", 0);
1142: simple_lock_init(&pvalloc_lock);
1.48 chris 1143: simple_lock_init(&pmaps_lock);
1144: LIST_INIT(&pmaps);
1.17 chris 1145: TAILQ_INIT(&pv_freepages);
1146: TAILQ_INIT(&pv_unusedpgs);
1.1 matt 1147:
1.10 chris 1148: /*
1149: * initialize the pmap pool.
1150: */
1151:
1152: pool_init(&pmap_pmap_pool, sizeof(struct pmap), 0, 0, 0, "pmappl",
1153: 0, pool_page_alloc_nointr, pool_page_free_nointr, M_VMPMAP);
1154:
1.36 thorpej 1155: cpu_dcache_wbinv_all();
1.1 matt 1156: }
1157:
1158: /*
1159: * void pmap_init(void)
1160: *
1161: * Initialize the pmap module.
1162: * Called by vm_init() in vm/vm_init.c in order to initialise
1163: * any structures that the pmap system needs to map virtual memory.
1164: */
1165:
1166: extern int physmem;
1167:
1168: void
1169: pmap_init()
1170: {
1171:
1172: /*
1173: * Set the available memory vars - These do not map to real memory
1174: * addresses and cannot as the physical memory is fragmented.
1175: * They are used by ps for %mem calculations.
1176: * One could argue whether this should be the entire memory or just
1177: * the memory that is useable in a user process.
1178: */
1179: avail_start = 0;
1180: avail_end = physmem * NBPG;
1181:
1.17 chris 1182: /*
1183: * now we need to free enough pv_entry structures to allow us to get
1184: * the kmem_map/kmem_object allocated and inited (done after this
1185: * function is finished). to do this we allocate one bootstrap page out
1186: * of kernel_map and use it to provide an initial pool of pv_entry
1187: * structures. we never free this page.
1188: */
1189:
1190: pv_initpage = (struct pv_page *) uvm_km_alloc(kernel_map, PAGE_SIZE);
1191: if (pv_initpage == NULL)
1192: panic("pmap_init: pv_initpage");
1193: pv_cachedva = 0; /* a VA we have allocated but not used yet */
1194: pv_nfpvents = 0;
1195: (void) pmap_add_pvpage(pv_initpage, FALSE);
1196:
1.1 matt 1197: pmap_initialized = TRUE;
1198:
1199: /* Initialise our L1 page table queues and counters */
1200: SIMPLEQ_INIT(&l1pt_static_queue);
1201: l1pt_static_queue_count = 0;
1202: l1pt_static_create_count = 0;
1203: SIMPLEQ_INIT(&l1pt_queue);
1204: l1pt_queue_count = 0;
1205: l1pt_create_count = 0;
1206: l1pt_reuse_count = 0;
1207: }
1208:
1209: /*
1210: * pmap_postinit()
1211: *
1212: * This routine is called after the vm and kmem subsystems have been
1213: * initialised. This allows the pmap code to perform any initialisation
1214: * that can only be done one the memory allocation is in place.
1215: */
1216:
1217: void
1218: pmap_postinit()
1219: {
1220: int loop;
1221: struct l1pt *pt;
1222:
1223: #ifdef PMAP_STATIC_L1S
1224: for (loop = 0; loop < PMAP_STATIC_L1S; ++loop) {
1225: #else /* PMAP_STATIC_L1S */
1226: for (loop = 0; loop < max_processes; ++loop) {
1227: #endif /* PMAP_STATIC_L1S */
1228: /* Allocate a L1 page table */
1229: pt = pmap_alloc_l1pt();
1230: if (!pt)
1231: panic("Cannot allocate static L1 page tables\n");
1232:
1233: /* Clean it */
1234: bzero((void *)pt->pt_va, PD_SIZE);
1235: pt->pt_flags |= (PTFLAG_STATIC | PTFLAG_CLEAN);
1236: /* Add the page table to the queue */
1237: SIMPLEQ_INSERT_TAIL(&l1pt_static_queue, pt, pt_queue);
1238: ++l1pt_static_queue_count;
1239: ++l1pt_static_create_count;
1240: }
1241: }
1242:
1243:
1244: /*
1245: * Create and return a physical map.
1246: *
1247: * If the size specified for the map is zero, the map is an actual physical
1248: * map, and may be referenced by the hardware.
1249: *
1250: * If the size specified is non-zero, the map will be used in software only,
1251: * and is bounded by that size.
1252: */
1253:
1254: pmap_t
1255: pmap_create()
1256: {
1.15 chris 1257: struct pmap *pmap;
1.1 matt 1258:
1.10 chris 1259: /*
1260: * Fetch pmap entry from the pool
1261: */
1262:
1263: pmap = pool_get(&pmap_pmap_pool, PR_WAITOK);
1.17 chris 1264: /* XXX is this really needed! */
1265: memset(pmap, 0, sizeof(*pmap));
1.1 matt 1266:
1.16 chris 1267: simple_lock_init(&pmap->pm_obj.vmobjlock);
1268: pmap->pm_obj.pgops = NULL; /* currently not a mappable object */
1269: TAILQ_INIT(&pmap->pm_obj.memq);
1270: pmap->pm_obj.uo_npages = 0;
1271: pmap->pm_obj.uo_refs = 1;
1272: pmap->pm_stats.wired_count = 0;
1273: pmap->pm_stats.resident_count = 1;
1274:
1.1 matt 1275: /* Now init the machine part of the pmap */
1276: pmap_pinit(pmap);
1277: return(pmap);
1278: }
1279:
1280: /*
1281: * pmap_alloc_l1pt()
1282: *
1283: * This routine allocates physical and virtual memory for a L1 page table
1284: * and wires it.
1285: * A l1pt structure is returned to describe the allocated page table.
1286: *
1287: * This routine is allowed to fail if the required memory cannot be allocated.
1288: * In this case NULL is returned.
1289: */
1290:
1291: struct l1pt *
1292: pmap_alloc_l1pt(void)
1293: {
1.2 matt 1294: paddr_t pa;
1295: vaddr_t va;
1.1 matt 1296: struct l1pt *pt;
1297: int error;
1.9 chs 1298: struct vm_page *m;
1.11 chris 1299: pt_entry_t *ptes;
1.1 matt 1300:
1301: /* Allocate virtual address space for the L1 page table */
1302: va = uvm_km_valloc(kernel_map, PD_SIZE);
1303: if (va == 0) {
1304: #ifdef DIAGNOSTIC
1.26 rearnsha 1305: PDEBUG(0,
1306: printf("pmap: Cannot allocate pageable memory for L1\n"));
1.1 matt 1307: #endif /* DIAGNOSTIC */
1308: return(NULL);
1309: }
1310:
1311: /* Allocate memory for the l1pt structure */
1312: pt = (struct l1pt *)malloc(sizeof(struct l1pt), M_VMPMAP, M_WAITOK);
1313:
1314: /*
1315: * Allocate pages from the VM system.
1316: */
1317: TAILQ_INIT(&pt->pt_plist);
1318: error = uvm_pglistalloc(PD_SIZE, physical_start, physical_end,
1319: PD_SIZE, 0, &pt->pt_plist, 1, M_WAITOK);
1320: if (error) {
1321: #ifdef DIAGNOSTIC
1.26 rearnsha 1322: PDEBUG(0,
1323: printf("pmap: Cannot allocate physical mem for L1 (%d)\n",
1324: error));
1.1 matt 1325: #endif /* DIAGNOSTIC */
1326: /* Release the resources we already have claimed */
1327: free(pt, M_VMPMAP);
1328: uvm_km_free(kernel_map, va, PD_SIZE);
1329: return(NULL);
1330: }
1331:
1332: /* Map our physical pages into our virtual space */
1333: pt->pt_va = va;
1.51 ! chris 1334: m = TAILQ_FIRST(&pt->pt_plist);
1.11 chris 1335: ptes = pmap_map_ptes(pmap_kernel());
1.1 matt 1336: while (m && va < (pt->pt_va + PD_SIZE)) {
1337: pa = VM_PAGE_TO_PHYS(m);
1338:
1.20 chris 1339: pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE);
1.1 matt 1340:
1341: /* Revoke cacheability and bufferability */
1342: /* XXX should be done better than this */
1.11 chris 1343: ptes[arm_byte_to_page(va)] &= ~(PT_C | PT_B);
1.1 matt 1344:
1345: va += NBPG;
1346: m = m->pageq.tqe_next;
1347: }
1.11 chris 1348: pmap_unmap_ptes(pmap_kernel());
1.19 chris 1349: pmap_update(pmap_kernel());
1.1 matt 1350:
1351: #ifdef DIAGNOSTIC
1352: if (m)
1353: panic("pmap_alloc_l1pt: pglist not empty\n");
1354: #endif /* DIAGNOSTIC */
1355:
1356: pt->pt_flags = 0;
1357: return(pt);
1358: }
1359:
1360: /*
1361: * Free a L1 page table previously allocated with pmap_alloc_l1pt().
1362: */
1.33 chris 1363: static void
1.1 matt 1364: pmap_free_l1pt(pt)
1365: struct l1pt *pt;
1366: {
1367: /* Separate the physical memory for the virtual space */
1.20 chris 1368: pmap_kremove(pt->pt_va, PD_SIZE);
1.19 chris 1369: pmap_update(pmap_kernel());
1.1 matt 1370:
1371: /* Return the physical memory */
1372: uvm_pglistfree(&pt->pt_plist);
1373:
1374: /* Free the virtual space */
1375: uvm_km_free(kernel_map, pt->pt_va, PD_SIZE);
1376:
1377: /* Free the l1pt structure */
1378: free(pt, M_VMPMAP);
1379: }
1380:
1381: /*
1382: * Allocate a page directory.
1383: * This routine will either allocate a new page directory from the pool
1384: * of L1 page tables currently held by the kernel or it will allocate
1385: * a new one via pmap_alloc_l1pt().
1386: * It will then initialise the l1 page table for use.
1.48 chris 1387: *
1388: * XXX must tidy up and fix this code, not happy about how it does the pmaps_locking
1.1 matt 1389: */
1.33 chris 1390: static int
1.1 matt 1391: pmap_allocpagedir(pmap)
1392: struct pmap *pmap;
1393: {
1.2 matt 1394: paddr_t pa;
1.1 matt 1395: struct l1pt *pt;
1396: pt_entry_t *pte;
1397:
1398: PDEBUG(0, printf("pmap_allocpagedir(%p)\n", pmap));
1399:
1400: /* Do we have any spare L1's lying around ? */
1401: if (l1pt_static_queue_count) {
1402: --l1pt_static_queue_count;
1403: pt = l1pt_static_queue.sqh_first;
1404: SIMPLEQ_REMOVE_HEAD(&l1pt_static_queue, pt, pt_queue);
1405: } else if (l1pt_queue_count) {
1406: --l1pt_queue_count;
1407: pt = l1pt_queue.sqh_first;
1408: SIMPLEQ_REMOVE_HEAD(&l1pt_queue, pt, pt_queue);
1409: ++l1pt_reuse_count;
1410: } else {
1411: pt = pmap_alloc_l1pt();
1412: if (!pt)
1413: return(ENOMEM);
1414: ++l1pt_create_count;
1415: }
1416:
1417: /* Store the pointer to the l1 descriptor in the pmap. */
1418: pmap->pm_l1pt = pt;
1419:
1420: /* Get the physical address of the start of the l1 */
1.51 ! chris 1421: pa = VM_PAGE_TO_PHYS(TAILQ_FIRST(&pt->pt_plist));
1.1 matt 1422:
1423: /* Store the virtual address of the l1 in the pmap. */
1424: pmap->pm_pdir = (pd_entry_t *)pt->pt_va;
1425:
1426: /* Clean the L1 if it is dirty */
1427: if (!(pt->pt_flags & PTFLAG_CLEAN))
1428: bzero((void *)pmap->pm_pdir, (PD_SIZE - KERNEL_PD_SIZE));
1429:
1430: /* Allocate a page table to map all the page tables for this pmap */
1431:
1432: #ifdef DIAGNOSTIC
1433: if (pmap->pm_vptpt) {
1434: /* XXX What if we have one already ? */
1435: panic("pmap_allocpagedir: have pt already\n");
1436: }
1437: #endif /* DIAGNOSTIC */
1438: pmap->pm_vptpt = uvm_km_zalloc(kernel_map, NBPG);
1.5 toshii 1439: if (pmap->pm_vptpt == 0) {
1.48 chris 1440: pmap_freepagedir(pmap);
1441: return(ENOMEM);
1.5 toshii 1442: }
1443:
1.48 chris 1444: /* need to lock this all up for growkernel */
1445: simple_lock(&pmaps_lock);
1446: /* wish we didn't have to keep this locked... */
1447:
1448: /* Duplicate the kernel mapping i.e. all mappings 0xf0000000+ */
1449: bcopy((char *)pmap_kernel()->pm_pdir + (PD_SIZE - KERNEL_PD_SIZE),
1450: (char *)pmap->pm_pdir + (PD_SIZE - KERNEL_PD_SIZE),
1451: KERNEL_PD_SIZE);
1452:
1.15 chris 1453: (void) pmap_extract(pmap_kernel(), pmap->pm_vptpt, &pmap->pm_pptpt);
1.1 matt 1454: pmap->pm_pptpt &= PG_FRAME;
1455: /* Revoke cacheability and bufferability */
1456: /* XXX should be done better than this */
1.15 chris 1457: pte = pmap_pte(pmap_kernel(), pmap->pm_vptpt);
1.1 matt 1458: *pte = *pte & ~(PT_C | PT_B);
1459:
1460: /* Wire in this page table */
1.17 chris 1461: pmap_map_in_l1(pmap, PROCESS_PAGE_TBLS_BASE, pmap->pm_pptpt, TRUE);
1.1 matt 1462:
1463: pt->pt_flags &= ~PTFLAG_CLEAN; /* L1 is dirty now */
1.48 chris 1464:
1.1 matt 1465: /*
1466: * Map the kernel page tables for 0xf0000000 +
1467: * into the page table used to map the
1468: * pmap's page tables
1469: */
1470: bcopy((char *)(PROCESS_PAGE_TBLS_BASE
1471: + (PROCESS_PAGE_TBLS_BASE >> (PGSHIFT - 2))
1472: + ((PD_SIZE - KERNEL_PD_SIZE) >> 2)),
1473: (char *)pmap->pm_vptpt + ((PD_SIZE - KERNEL_PD_SIZE) >> 2),
1474: (KERNEL_PD_SIZE >> 2));
1475:
1.48 chris 1476: LIST_INSERT_HEAD(&pmaps, pmap, pm_list);
1477: simple_unlock(&pmaps_lock);
1478:
1.1 matt 1479: return(0);
1480: }
1481:
1482:
1483: /*
1484: * Initialize a preallocated and zeroed pmap structure,
1485: * such as one in a vmspace structure.
1486: */
1487:
1488: void
1489: pmap_pinit(pmap)
1490: struct pmap *pmap;
1491: {
1.26 rearnsha 1492: int backoff = 6;
1493: int retry = 10;
1494:
1.1 matt 1495: PDEBUG(0, printf("pmap_pinit(%p)\n", pmap));
1496:
1497: /* Keep looping until we succeed in allocating a page directory */
1498: while (pmap_allocpagedir(pmap) != 0) {
1499: /*
1500: * Ok we failed to allocate a suitable block of memory for an
1501: * L1 page table. This means that either:
1502: * 1. 16KB of virtual address space could not be allocated
1503: * 2. 16KB of physically contiguous memory on a 16KB boundary
1504: * could not be allocated.
1505: *
1506: * Since we cannot fail we will sleep for a while and try
1.17 chris 1507: * again.
1.26 rearnsha 1508: *
1509: * Searching for a suitable L1 PT is expensive:
1510: * to avoid hogging the system when memory is really
1511: * scarce, use an exponential back-off so that
1512: * eventually we won't retry more than once every 8
1513: * seconds. This should allow other processes to run
1514: * to completion and free up resources.
1.1 matt 1515: */
1.26 rearnsha 1516: (void) ltsleep(&lbolt, PVM, "l1ptwait", (hz << 3) >> backoff,
1517: NULL);
1518: if (--retry == 0) {
1519: retry = 10;
1520: if (backoff)
1521: --backoff;
1522: }
1.1 matt 1523: }
1524:
1525: /* Map zero page for the pmap. This will also map the L2 for it */
1526: pmap_enter(pmap, 0x00000000, systempage.pv_pa,
1527: VM_PROT_READ, VM_PROT_READ | PMAP_WIRED);
1.19 chris 1528: pmap_update(pmap);
1.1 matt 1529: }
1530:
1531:
1532: void
1533: pmap_freepagedir(pmap)
1.15 chris 1534: struct pmap *pmap;
1.1 matt 1535: {
1536: /* Free the memory used for the page table mapping */
1.5 toshii 1537: if (pmap->pm_vptpt != 0)
1538: uvm_km_free(kernel_map, (vaddr_t)pmap->pm_vptpt, NBPG);
1.1 matt 1539:
1540: /* junk the L1 page table */
1541: if (pmap->pm_l1pt->pt_flags & PTFLAG_STATIC) {
1542: /* Add the page table to the queue */
1543: SIMPLEQ_INSERT_TAIL(&l1pt_static_queue, pmap->pm_l1pt, pt_queue);
1544: ++l1pt_static_queue_count;
1545: } else if (l1pt_queue_count < 8) {
1546: /* Add the page table to the queue */
1547: SIMPLEQ_INSERT_TAIL(&l1pt_queue, pmap->pm_l1pt, pt_queue);
1548: ++l1pt_queue_count;
1549: } else
1550: pmap_free_l1pt(pmap->pm_l1pt);
1551: }
1552:
1553:
1554: /*
1555: * Retire the given physical map from service.
1556: * Should only be called if the map contains no valid mappings.
1557: */
1558:
1559: void
1560: pmap_destroy(pmap)
1.15 chris 1561: struct pmap *pmap;
1.1 matt 1562: {
1.17 chris 1563: struct vm_page *page;
1.1 matt 1564: int count;
1565:
1566: if (pmap == NULL)
1567: return;
1568:
1569: PDEBUG(0, printf("pmap_destroy(%p)\n", pmap));
1.17 chris 1570:
1571: /*
1572: * Drop reference count
1573: */
1574: simple_lock(&pmap->pm_obj.vmobjlock);
1.16 chris 1575: count = --pmap->pm_obj.uo_refs;
1.17 chris 1576: simple_unlock(&pmap->pm_obj.vmobjlock);
1577: if (count > 0) {
1578: return;
1.1 matt 1579: }
1580:
1.17 chris 1581: /*
1582: * reference count is zero, free pmap resources and then free pmap.
1583: */
1.48 chris 1584:
1585: /*
1586: * remove it from global list of pmaps
1587: */
1588:
1589: simple_lock(&pmaps_lock);
1590: LIST_REMOVE(pmap, pm_list);
1591: simple_unlock(&pmaps_lock);
1.17 chris 1592:
1.1 matt 1593: /* Remove the zero page mapping */
1594: pmap_remove(pmap, 0x00000000, 0x00000000 + NBPG);
1.19 chris 1595: pmap_update(pmap);
1.1 matt 1596:
1597: /*
1598: * Free any page tables still mapped
1599: * This is only temporay until pmap_enter can count the number
1600: * of mappings made in a page table. Then pmap_remove() can
1601: * reduce the count and free the pagetable when the count
1.16 chris 1602: * reaches zero. Note that entries in this list should match the
1603: * contents of the ptpt, however this is faster than walking a 1024
1604: * entries looking for pt's
1605: * taken from i386 pmap.c
1.1 matt 1606: */
1.51 ! chris 1607: while ((page = TAILQ_FIRST(&pmap->pm_obj.memq)) != NULL) {
! 1608: KASSERT((page->flags & PG_BUSY) == 0);
1.16 chris 1609: page->wire_count = 0;
1610: uvm_pagefree(page);
1.1 matt 1611: }
1.16 chris 1612:
1.1 matt 1613: /* Free the page dir */
1614: pmap_freepagedir(pmap);
1.17 chris 1615:
1616: /* return the pmap to the pool */
1617: pool_put(&pmap_pmap_pool, pmap);
1.1 matt 1618: }
1619:
1620:
1621: /*
1.15 chris 1622: * void pmap_reference(struct pmap *pmap)
1.1 matt 1623: *
1624: * Add a reference to the specified pmap.
1625: */
1626:
1627: void
1628: pmap_reference(pmap)
1.15 chris 1629: struct pmap *pmap;
1.1 matt 1630: {
1631: if (pmap == NULL)
1632: return;
1633:
1634: simple_lock(&pmap->pm_lock);
1.16 chris 1635: pmap->pm_obj.uo_refs++;
1.1 matt 1636: simple_unlock(&pmap->pm_lock);
1637: }
1638:
1639: /*
1640: * void pmap_virtual_space(vaddr_t *start, vaddr_t *end)
1641: *
1642: * Return the start and end addresses of the kernel's virtual space.
1643: * These values are setup in pmap_bootstrap and are updated as pages
1644: * are allocated.
1645: */
1646:
1647: void
1648: pmap_virtual_space(start, end)
1649: vaddr_t *start;
1650: vaddr_t *end;
1651: {
1652: *start = virtual_start;
1653: *end = virtual_end;
1654: }
1655:
1656:
1657: /*
1658: * Activate the address space for the specified process. If the process
1659: * is the current process, load the new MMU context.
1660: */
1661: void
1662: pmap_activate(p)
1663: struct proc *p;
1664: {
1.15 chris 1665: struct pmap *pmap = p->p_vmspace->vm_map.pmap;
1.1 matt 1666: struct pcb *pcb = &p->p_addr->u_pcb;
1667:
1.15 chris 1668: (void) pmap_extract(pmap_kernel(), (vaddr_t)pmap->pm_pdir,
1.1 matt 1669: (paddr_t *)&pcb->pcb_pagedir);
1670:
1671: PDEBUG(0, printf("pmap_activate: p=%p pmap=%p pcb=%p pdir=%p l1=%p\n",
1672: p, pmap, pcb, pmap->pm_pdir, pcb->pcb_pagedir));
1673:
1674: if (p == curproc) {
1675: PDEBUG(0, printf("pmap_activate: setting TTB\n"));
1676: setttb((u_int)pcb->pcb_pagedir);
1677: }
1678: #if 0
1679: pmap->pm_pdchanged = FALSE;
1680: #endif
1681: }
1682:
1683:
1684: /*
1685: * Deactivate the address space of the specified process.
1686: */
1687: void
1688: pmap_deactivate(p)
1689: struct proc *p;
1690: {
1691: }
1692:
1.31 thorpej 1693: /*
1694: * Perform any deferred pmap operations.
1695: */
1696: void
1697: pmap_update(struct pmap *pmap)
1698: {
1699:
1700: /*
1701: * We haven't deferred any pmap operations, but we do need to
1702: * make sure TLB/cache operations have completed.
1703: */
1704: cpu_cpwait();
1705: }
1.1 matt 1706:
1707: /*
1708: * pmap_clean_page()
1709: *
1710: * This is a local function used to work out the best strategy to clean
1711: * a single page referenced by its entry in the PV table. It's used by
1712: * pmap_copy_page, pmap_zero page and maybe some others later on.
1713: *
1714: * Its policy is effectively:
1715: * o If there are no mappings, we don't bother doing anything with the cache.
1716: * o If there is one mapping, we clean just that page.
1717: * o If there are multiple mappings, we clean the entire cache.
1718: *
1719: * So that some functions can be further optimised, it returns 0 if it didn't
1720: * clean the entire cache, or 1 if it did.
1721: *
1722: * XXX One bug in this routine is that if the pv_entry has a single page
1723: * mapped at 0x00000000 a whole cache clean will be performed rather than
1724: * just the 1 page. Since this should not occur in everyday use and if it does
1725: * it will just result in not the most efficient clean for the page.
1726: */
1727: static int
1.17 chris 1728: pmap_clean_page(pv, is_src)
1.1 matt 1729: struct pv_entry *pv;
1.17 chris 1730: boolean_t is_src;
1.1 matt 1731: {
1.17 chris 1732: struct pmap *pmap;
1733: struct pv_entry *npv;
1.1 matt 1734: int cache_needs_cleaning = 0;
1735: vaddr_t page_to_clean = 0;
1736:
1.17 chris 1737: if (pv == NULL)
1738: /* nothing mapped in so nothing to flush */
1739: return (0);
1740:
1741: /* Since we flush the cache each time we change curproc, we
1742: * only need to flush the page if it is in the current pmap.
1743: */
1744: if (curproc)
1745: pmap = curproc->p_vmspace->vm_map.pmap;
1746: else
1747: pmap = pmap_kernel();
1748:
1749: for (npv = pv; npv; npv = npv->pv_next) {
1750: if (npv->pv_pmap == pmap) {
1751: /* The page is mapped non-cacheable in
1752: * this map. No need to flush the cache.
1753: */
1754: if (npv->pv_flags & PT_NC) {
1755: #ifdef DIAGNOSTIC
1756: if (cache_needs_cleaning)
1757: panic("pmap_clean_page: "
1758: "cache inconsistency");
1759: #endif
1760: break;
1761: }
1762: #if 0
1763: /* This doesn't work, because pmap_protect
1764: doesn't flush changes on pages that it
1765: has write-protected. */
1.21 chris 1766:
1.25 rearnsha 1767: /* If the page is not writable and this
1.17 chris 1768: is the source, then there is no need
1769: to flush it from the cache. */
1770: else if (is_src && ! (npv->pv_flags & PT_Wr))
1771: continue;
1772: #endif
1773: if (cache_needs_cleaning){
1774: page_to_clean = 0;
1775: break;
1776: }
1777: else
1778: page_to_clean = npv->pv_va;
1779: cache_needs_cleaning = 1;
1780: }
1.1 matt 1781: }
1782:
1783: if (page_to_clean)
1.36 thorpej 1784: cpu_idcache_wbinv_range(page_to_clean, NBPG);
1.1 matt 1785: else if (cache_needs_cleaning) {
1.36 thorpej 1786: cpu_idcache_wbinv_all();
1.1 matt 1787: return (1);
1788: }
1789: return (0);
1790: }
1791:
1792: /*
1793: * pmap_zero_page()
1794: *
1795: * Zero a given physical page by mapping it at a page hook point.
1796: * In doing the zero page op, the page we zero is mapped cachable, as with
1797: * StrongARM accesses to non-cached pages are non-burst making writing
1798: * _any_ bulk data very slow.
1799: */
1800: void
1801: pmap_zero_page(phys)
1.2 matt 1802: paddr_t phys;
1.1 matt 1803: {
1.49 thorpej 1804: struct vm_page *pg;
1.1 matt 1805:
1806: /* Get an entry for this page, and clean it it. */
1.49 thorpej 1807: pg = PHYS_TO_VM_PAGE(phys);
1808: simple_lock(&pg->mdpage.pvh_slock);
1809: pmap_clean_page(pg->mdpage.pvh_list, FALSE);
1810: simple_unlock(&pg->mdpage.pvh_slock);
1.17 chris 1811:
1.1 matt 1812: /*
1813: * Hook in the page, zero it, and purge the cache for that
1814: * zeroed page. Invalidate the TLB as needed.
1815: */
1816: *page_hook0.pte = L2_PTE(phys & PG_FRAME, AP_KRW);
1817: cpu_tlb_flushD_SE(page_hook0.va);
1.32 thorpej 1818: cpu_cpwait();
1.1 matt 1819: bzero_page(page_hook0.va);
1.36 thorpej 1820: cpu_dcache_wbinv_range(page_hook0.va, NBPG);
1.1 matt 1821: }
1822:
1.17 chris 1823: /* pmap_pageidlezero()
1824: *
1825: * The same as above, except that we assume that the page is not
1826: * mapped. This means we never have to flush the cache first. Called
1827: * from the idle loop.
1828: */
1829: boolean_t
1830: pmap_pageidlezero(phys)
1831: paddr_t phys;
1832: {
1833: int i, *ptr;
1834: boolean_t rv = TRUE;
1835:
1836: #ifdef DIAGNOSTIC
1.49 thorpej 1837: struct vm_page *pg;
1.17 chris 1838:
1.49 thorpej 1839: pg = PHYS_TO_VM_PAGE(phys);
1840: if (pg->mdpage.pvh_list != NULL)
1.17 chris 1841: panic("pmap_pageidlezero: zeroing mapped page\n");
1842: #endif
1843:
1844: /*
1845: * Hook in the page, zero it, and purge the cache for that
1846: * zeroed page. Invalidate the TLB as needed.
1847: */
1848: *page_hook0.pte = L2_PTE(phys & PG_FRAME, AP_KRW);
1849: cpu_tlb_flushD_SE(page_hook0.va);
1.32 thorpej 1850: cpu_cpwait();
1851:
1.17 chris 1852: for (i = 0, ptr = (int *)page_hook0.va;
1853: i < (NBPG / sizeof(int)); i++) {
1854: if (sched_whichqs != 0) {
1855: /*
1856: * A process has become ready. Abort now,
1857: * so we don't keep it waiting while we
1858: * do slow memory access to finish this
1859: * page.
1860: */
1861: rv = FALSE;
1862: break;
1863: }
1864: *ptr++ = 0;
1865: }
1866:
1867: if (rv)
1868: /*
1869: * if we aborted we'll rezero this page again later so don't
1870: * purge it unless we finished it
1871: */
1.36 thorpej 1872: cpu_dcache_wbinv_range(page_hook0.va, NBPG);
1.17 chris 1873: return (rv);
1874: }
1875:
1.1 matt 1876: /*
1877: * pmap_copy_page()
1878: *
1879: * Copy one physical page into another, by mapping the pages into
1880: * hook points. The same comment regarding cachability as in
1881: * pmap_zero_page also applies here.
1882: */
1883: void
1884: pmap_copy_page(src, dest)
1.2 matt 1885: paddr_t src;
1886: paddr_t dest;
1.1 matt 1887: {
1.49 thorpej 1888: struct vm_page *src_pg, *dest_pg;
1.20 chris 1889: boolean_t cleanedcache;
1.1 matt 1890:
1891: /* Get PV entries for the pages, and clean them if needed. */
1.49 thorpej 1892: src_pg = PHYS_TO_VM_PAGE(src);
1.20 chris 1893:
1.49 thorpej 1894: simple_lock(&src_pg->mdpage.pvh_slock);
1895: cleanedcache = pmap_clean_page(src_pg->mdpage.pvh_list, TRUE);
1896: simple_unlock(&src_pg->mdpage.pvh_slock);
1.1 matt 1897:
1.20 chris 1898: if (cleanedcache == 0) {
1.49 thorpej 1899: dest_pg = PHYS_TO_VM_PAGE(dest);
1900: simple_lock(&dest_pg->mdpage.pvh_slock);
1901: pmap_clean_page(dest_pg->mdpage.pvh_list, FALSE);
1902: simple_unlock(&dest_pg->mdpage.pvh_slock);
1.20 chris 1903: }
1.1 matt 1904: /*
1905: * Map the pages into the page hook points, copy them, and purge
1906: * the cache for the appropriate page. Invalidate the TLB
1907: * as required.
1908: */
1909: *page_hook0.pte = L2_PTE(src & PG_FRAME, AP_KRW);
1910: *page_hook1.pte = L2_PTE(dest & PG_FRAME, AP_KRW);
1911: cpu_tlb_flushD_SE(page_hook0.va);
1912: cpu_tlb_flushD_SE(page_hook1.va);
1.32 thorpej 1913: cpu_cpwait();
1.1 matt 1914: bcopy_page(page_hook0.va, page_hook1.va);
1.36 thorpej 1915: cpu_dcache_wbinv_range(page_hook0.va, NBPG);
1916: cpu_dcache_wbinv_range(page_hook1.va, NBPG);
1.1 matt 1917: }
1918:
1919: #if 0
1920: void
1921: pmap_pte_addref(pmap, va)
1.15 chris 1922: struct pmap *pmap;
1.1 matt 1923: vaddr_t va;
1924: {
1925: pd_entry_t *pde;
1.2 matt 1926: paddr_t pa;
1.1 matt 1927: struct vm_page *m;
1928:
1929: if (pmap == pmap_kernel())
1930: return;
1931:
1932: pde = pmap_pde(pmap, va & ~(3 << PDSHIFT));
1933: pa = pmap_pte_pa(pde);
1934: m = PHYS_TO_VM_PAGE(pa);
1935: ++m->wire_count;
1936: #ifdef MYCROFT_HACK
1937: printf("addref pmap=%p va=%08lx pde=%p pa=%08lx m=%p wire=%d\n",
1938: pmap, va, pde, pa, m, m->wire_count);
1939: #endif
1940: }
1941:
1942: void
1943: pmap_pte_delref(pmap, va)
1.15 chris 1944: struct pmap *pmap;
1.1 matt 1945: vaddr_t va;
1946: {
1947: pd_entry_t *pde;
1.2 matt 1948: paddr_t pa;
1.1 matt 1949: struct vm_page *m;
1950:
1951: if (pmap == pmap_kernel())
1952: return;
1953:
1954: pde = pmap_pde(pmap, va & ~(3 << PDSHIFT));
1955: pa = pmap_pte_pa(pde);
1956: m = PHYS_TO_VM_PAGE(pa);
1957: --m->wire_count;
1958: #ifdef MYCROFT_HACK
1959: printf("delref pmap=%p va=%08lx pde=%p pa=%08lx m=%p wire=%d\n",
1960: pmap, va, pde, pa, m, m->wire_count);
1961: #endif
1962: if (m->wire_count == 0) {
1963: #ifdef MYCROFT_HACK
1964: printf("delref pmap=%p va=%08lx pde=%p pa=%08lx m=%p\n",
1965: pmap, va, pde, pa, m);
1966: #endif
1967: pmap_unmap_in_l1(pmap, va);
1968: uvm_pagefree(m);
1969: --pmap->pm_stats.resident_count;
1970: }
1971: }
1972: #else
1973: #define pmap_pte_addref(pmap, va)
1974: #define pmap_pte_delref(pmap, va)
1975: #endif
1976:
1977: /*
1978: * Since we have a virtually indexed cache, we may need to inhibit caching if
1979: * there is more than one mapping and at least one of them is writable.
1980: * Since we purge the cache on every context switch, we only need to check for
1981: * other mappings within the same pmap, or kernel_pmap.
1982: * This function is also called when a page is unmapped, to possibly reenable
1983: * caching on any remaining mappings.
1.28 rearnsha 1984: *
1985: * The code implements the following logic, where:
1986: *
1987: * KW = # of kernel read/write pages
1988: * KR = # of kernel read only pages
1989: * UW = # of user read/write pages
1990: * UR = # of user read only pages
1991: * OW = # of user read/write pages in another pmap, then
1992: *
1993: * KC = kernel mapping is cacheable
1994: * UC = user mapping is cacheable
1995: *
1996: * KW=0,KR=0 KW=0,KR>0 KW=1,KR=0 KW>1,KR>=0
1997: * +---------------------------------------------
1998: * UW=0,UR=0,OW=0 | --- KC=1 KC=1 KC=0
1999: * UW=0,UR>0,OW=0 | UC=1 KC=1,UC=1 KC=0,UC=0 KC=0,UC=0
2000: * UW=0,UR>0,OW>0 | UC=1 KC=0,UC=1 KC=0,UC=0 KC=0,UC=0
2001: * UW=1,UR=0,OW=0 | UC=1 KC=0,UC=0 KC=0,UC=0 KC=0,UC=0
2002: * UW>1,UR>=0,OW>=0 | UC=0 KC=0,UC=0 KC=0,UC=0 KC=0,UC=0
1.11 chris 2003: *
2004: * Note that the pmap must have it's ptes mapped in, and passed with ptes.
1.1 matt 2005: */
1.25 rearnsha 2006: __inline static void
1.49 thorpej 2007: pmap_vac_me_harder(struct pmap *pmap, struct vm_page *pg, pt_entry_t *ptes,
1.12 chris 2008: boolean_t clear_cache)
1.1 matt 2009: {
1.25 rearnsha 2010: if (pmap == pmap_kernel())
1.49 thorpej 2011: pmap_vac_me_kpmap(pmap, pg, ptes, clear_cache);
1.25 rearnsha 2012: else
1.49 thorpej 2013: pmap_vac_me_user(pmap, pg, ptes, clear_cache);
1.25 rearnsha 2014: }
2015:
2016: static void
1.49 thorpej 2017: pmap_vac_me_kpmap(struct pmap *pmap, struct vm_page *pg, pt_entry_t *ptes,
1.25 rearnsha 2018: boolean_t clear_cache)
2019: {
2020: int user_entries = 0;
2021: int user_writable = 0;
2022: int user_cacheable = 0;
2023: int kernel_entries = 0;
2024: int kernel_writable = 0;
2025: int kernel_cacheable = 0;
2026: struct pv_entry *pv;
2027: struct pmap *last_pmap = pmap;
2028:
2029: #ifdef DIAGNOSTIC
2030: if (pmap != pmap_kernel())
2031: panic("pmap_vac_me_kpmap: pmap != pmap_kernel()");
2032: #endif
2033:
2034: /*
2035: * Pass one, see if there are both kernel and user pmaps for
2036: * this page. Calculate whether there are user-writable or
2037: * kernel-writable pages.
2038: */
1.49 thorpej 2039: for (pv = pg->mdpage.pvh_list; pv != NULL; pv = pv->pv_next) {
1.25 rearnsha 2040: if (pv->pv_pmap != pmap) {
2041: user_entries++;
2042: if (pv->pv_flags & PT_Wr)
2043: user_writable++;
2044: if ((pv->pv_flags & PT_NC) == 0)
2045: user_cacheable++;
2046: } else {
2047: kernel_entries++;
2048: if (pv->pv_flags & PT_Wr)
2049: kernel_writable++;
2050: if ((pv->pv_flags & PT_NC) == 0)
2051: kernel_cacheable++;
2052: }
2053: }
2054:
2055: /*
2056: * We know we have just been updating a kernel entry, so if
2057: * all user pages are already cacheable, then there is nothing
2058: * further to do.
2059: */
2060: if (kernel_entries == 0 &&
2061: user_cacheable == user_entries)
2062: return;
2063:
2064: if (user_entries) {
2065: /*
2066: * Scan over the list again, for each entry, if it
2067: * might not be set correctly, call pmap_vac_me_user
2068: * to recalculate the settings.
2069: */
1.49 thorpej 2070: for (pv = pg->mdpage.pvh_list; pv; pv = pv->pv_next) {
1.25 rearnsha 2071: /*
2072: * We know kernel mappings will get set
2073: * correctly in other calls. We also know
2074: * that if the pmap is the same as last_pmap
2075: * then we've just handled this entry.
2076: */
2077: if (pv->pv_pmap == pmap || pv->pv_pmap == last_pmap)
2078: continue;
2079: /*
2080: * If there are kernel entries and this page
2081: * is writable but non-cacheable, then we can
2082: * skip this entry also.
2083: */
2084: if (kernel_entries > 0 &&
2085: (pv->pv_flags & (PT_NC | PT_Wr)) ==
2086: (PT_NC | PT_Wr))
2087: continue;
2088: /*
2089: * Similarly if there are no kernel-writable
2090: * entries and the page is already
2091: * read-only/cacheable.
2092: */
2093: if (kernel_writable == 0 &&
2094: (pv->pv_flags & (PT_NC | PT_Wr)) == 0)
2095: continue;
2096: /*
2097: * For some of the remaining cases, we know
2098: * that we must recalculate, but for others we
2099: * can't tell if they are correct or not, so
2100: * we recalculate anyway.
2101: */
2102: pmap_unmap_ptes(last_pmap);
2103: last_pmap = pv->pv_pmap;
2104: ptes = pmap_map_ptes(last_pmap);
1.49 thorpej 2105: pmap_vac_me_user(last_pmap, pg, ptes,
1.25 rearnsha 2106: pmap_is_curpmap(last_pmap));
2107: }
2108: /* Restore the pte mapping that was passed to us. */
2109: if (last_pmap != pmap) {
2110: pmap_unmap_ptes(last_pmap);
2111: ptes = pmap_map_ptes(pmap);
2112: }
2113: if (kernel_entries == 0)
2114: return;
2115: }
2116:
1.49 thorpej 2117: pmap_vac_me_user(pmap, pg, ptes, clear_cache);
1.25 rearnsha 2118: return;
2119: }
2120:
2121: static void
1.49 thorpej 2122: pmap_vac_me_user(struct pmap *pmap, struct vm_page *pg, pt_entry_t *ptes,
1.25 rearnsha 2123: boolean_t clear_cache)
2124: {
2125: struct pmap *kpmap = pmap_kernel();
1.17 chris 2126: struct pv_entry *pv, *npv;
1.1 matt 2127: int entries = 0;
1.25 rearnsha 2128: int writable = 0;
1.12 chris 2129: int cacheable_entries = 0;
1.25 rearnsha 2130: int kern_cacheable = 0;
2131: int other_writable = 0;
1.1 matt 2132:
1.49 thorpej 2133: pv = pg->mdpage.pvh_list;
1.11 chris 2134: KASSERT(ptes != NULL);
1.1 matt 2135:
2136: /*
2137: * Count mappings and writable mappings in this pmap.
1.25 rearnsha 2138: * Include kernel mappings as part of our own.
1.1 matt 2139: * Keep a pointer to the first one.
2140: */
2141: for (npv = pv; npv; npv = npv->pv_next) {
2142: /* Count mappings in the same pmap */
1.25 rearnsha 2143: if (pmap == npv->pv_pmap ||
2144: kpmap == npv->pv_pmap) {
1.1 matt 2145: if (entries++ == 0)
2146: pv = npv;
1.12 chris 2147: /* Cacheable mappings */
1.25 rearnsha 2148: if ((npv->pv_flags & PT_NC) == 0) {
1.12 chris 2149: cacheable_entries++;
1.25 rearnsha 2150: if (kpmap == npv->pv_pmap)
2151: kern_cacheable++;
2152: }
2153: /* Writable mappings */
1.1 matt 2154: if (npv->pv_flags & PT_Wr)
1.25 rearnsha 2155: ++writable;
2156: } else if (npv->pv_flags & PT_Wr)
2157: other_writable = 1;
1.1 matt 2158: }
2159:
1.12 chris 2160: PDEBUG(3,printf("pmap_vac_me_harder: pmap %p Entries %d, "
1.25 rearnsha 2161: "writable %d cacheable %d %s\n", pmap, entries, writable,
1.12 chris 2162: cacheable_entries, clear_cache ? "clean" : "no clean"));
2163:
1.1 matt 2164: /*
2165: * Enable or disable caching as necessary.
1.25 rearnsha 2166: * Note: the first entry might be part of the kernel pmap,
2167: * so we can't assume this is indicative of the state of the
2168: * other (maybe non-kpmap) entries.
1.1 matt 2169: */
1.25 rearnsha 2170: if ((entries > 1 && writable) ||
2171: (entries > 0 && pmap == kpmap && other_writable)) {
1.12 chris 2172: if (cacheable_entries == 0)
2173: return;
1.25 rearnsha 2174: for (npv = pv; npv; npv = npv->pv_next) {
2175: if ((pmap == npv->pv_pmap
2176: || kpmap == npv->pv_pmap) &&
1.12 chris 2177: (npv->pv_flags & PT_NC) == 0) {
2178: ptes[arm_byte_to_page(npv->pv_va)] &=
1.11 chris 2179: ~(PT_C | PT_B);
1.12 chris 2180: npv->pv_flags |= PT_NC;
1.25 rearnsha 2181: /*
2182: * If this page needs flushing from the
2183: * cache, and we aren't going to do it
2184: * below, do it now.
2185: */
2186: if ((cacheable_entries < 4 &&
2187: (clear_cache || npv->pv_pmap == kpmap)) ||
2188: (npv->pv_pmap == kpmap &&
2189: !clear_cache && kern_cacheable < 4)) {
1.36 thorpej 2190: cpu_idcache_wbinv_range(npv->pv_va,
1.12 chris 2191: NBPG);
2192: cpu_tlb_flushID_SE(npv->pv_va);
2193: }
1.1 matt 2194: }
2195: }
1.25 rearnsha 2196: if ((clear_cache && cacheable_entries >= 4) ||
2197: kern_cacheable >= 4) {
1.36 thorpej 2198: cpu_idcache_wbinv_all();
1.12 chris 2199: cpu_tlb_flushID();
2200: }
1.32 thorpej 2201: cpu_cpwait();
1.1 matt 2202: } else if (entries > 0) {
1.25 rearnsha 2203: /*
2204: * Turn cacheing back on for some pages. If it is a kernel
2205: * page, only do so if there are no other writable pages.
2206: */
2207: for (npv = pv; npv; npv = npv->pv_next) {
2208: if ((pmap == npv->pv_pmap ||
2209: (kpmap == npv->pv_pmap && other_writable == 0)) &&
2210: (npv->pv_flags & PT_NC)) {
1.11 chris 2211: ptes[arm_byte_to_page(npv->pv_va)] |=
1.27 rearnsha 2212: pte_cache_mode;
1.12 chris 2213: npv->pv_flags &= ~PT_NC;
1.1 matt 2214: }
2215: }
2216: }
2217: }
2218:
2219: /*
2220: * pmap_remove()
2221: *
2222: * pmap_remove is responsible for nuking a number of mappings for a range
2223: * of virtual address space in the current pmap. To do this efficiently
2224: * is interesting, because in a number of cases a wide virtual address
2225: * range may be supplied that contains few actual mappings. So, the
2226: * optimisations are:
2227: * 1. Try and skip over hunks of address space for which an L1 entry
2228: * does not exist.
2229: * 2. Build up a list of pages we've hit, up to a maximum, so we can
2230: * maybe do just a partial cache clean. This path of execution is
2231: * complicated by the fact that the cache must be flushed _before_
2232: * the PTE is nuked, being a VAC :-)
2233: * 3. Maybe later fast-case a single page, but I don't think this is
2234: * going to make _that_ much difference overall.
2235: */
2236:
2237: #define PMAP_REMOVE_CLEAN_LIST_SIZE 3
2238:
2239: void
2240: pmap_remove(pmap, sva, eva)
1.15 chris 2241: struct pmap *pmap;
1.1 matt 2242: vaddr_t sva;
2243: vaddr_t eva;
2244: {
2245: int cleanlist_idx = 0;
2246: struct pagelist {
2247: vaddr_t va;
2248: pt_entry_t *pte;
2249: } cleanlist[PMAP_REMOVE_CLEAN_LIST_SIZE];
1.11 chris 2250: pt_entry_t *pte = 0, *ptes;
1.2 matt 2251: paddr_t pa;
1.1 matt 2252: int pmap_active;
1.49 thorpej 2253: struct vm_page *pg;
1.1 matt 2254:
2255: /* Exit quick if there is no pmap */
2256: if (!pmap)
2257: return;
2258:
2259: PDEBUG(0, printf("pmap_remove: pmap=%p sva=%08lx eva=%08lx\n", pmap, sva, eva));
2260:
2261: sva &= PG_FRAME;
2262: eva &= PG_FRAME;
2263:
1.17 chris 2264: /*
1.49 thorpej 2265: * we lock in the pmap => vm_page direction
1.17 chris 2266: */
2267: PMAP_MAP_TO_HEAD_LOCK();
2268:
1.11 chris 2269: ptes = pmap_map_ptes(pmap);
1.1 matt 2270: /* Get a page table pointer */
2271: while (sva < eva) {
1.30 rearnsha 2272: if (pmap_pde_page(pmap_pde(pmap, sva)))
1.1 matt 2273: break;
2274: sva = (sva & PD_MASK) + NBPD;
2275: }
1.11 chris 2276:
2277: pte = &ptes[arm_byte_to_page(sva)];
1.1 matt 2278: /* Note if the pmap is active thus require cache and tlb cleans */
2279: if ((curproc && curproc->p_vmspace->vm_map.pmap == pmap)
1.15 chris 2280: || (pmap == pmap_kernel()))
1.1 matt 2281: pmap_active = 1;
2282: else
2283: pmap_active = 0;
2284:
2285: /* Now loop along */
2286: while (sva < eva) {
2287: /* Check if we can move to the next PDE (l1 chunk) */
2288: if (!(sva & PT_MASK))
1.30 rearnsha 2289: if (!pmap_pde_page(pmap_pde(pmap, sva))) {
1.1 matt 2290: sva += NBPD;
2291: pte += arm_byte_to_page(NBPD);
2292: continue;
2293: }
2294:
2295: /* We've found a valid PTE, so this page of PTEs has to go. */
2296: if (pmap_pte_v(pte)) {
2297: /* Update statistics */
2298: --pmap->pm_stats.resident_count;
2299:
2300: /*
2301: * Add this page to our cache remove list, if we can.
2302: * If, however the cache remove list is totally full,
2303: * then do a complete cache invalidation taking note
2304: * to backtrack the PTE table beforehand, and ignore
2305: * the lists in future because there's no longer any
2306: * point in bothering with them (we've paid the
2307: * penalty, so will carry on unhindered). Otherwise,
2308: * when we fall out, we just clean the list.
2309: */
2310: PDEBUG(10, printf("remove: inv pte at %p(%x) ", pte, *pte));
2311: pa = pmap_pte_pa(pte);
2312:
2313: if (cleanlist_idx < PMAP_REMOVE_CLEAN_LIST_SIZE) {
2314: /* Add to the clean list. */
2315: cleanlist[cleanlist_idx].pte = pte;
2316: cleanlist[cleanlist_idx].va = sva;
2317: cleanlist_idx++;
2318: } else if (cleanlist_idx == PMAP_REMOVE_CLEAN_LIST_SIZE) {
2319: int cnt;
2320:
2321: /* Nuke everything if needed. */
2322: if (pmap_active) {
1.36 thorpej 2323: cpu_idcache_wbinv_all();
1.1 matt 2324: cpu_tlb_flushID();
2325: }
2326:
2327: /*
2328: * Roll back the previous PTE list,
2329: * and zero out the current PTE.
2330: */
2331: for (cnt = 0; cnt < PMAP_REMOVE_CLEAN_LIST_SIZE; cnt++) {
2332: *cleanlist[cnt].pte = 0;
2333: pmap_pte_delref(pmap, cleanlist[cnt].va);
2334: }
2335: *pte = 0;
2336: pmap_pte_delref(pmap, sva);
2337: cleanlist_idx++;
2338: } else {
2339: /*
2340: * We've already nuked the cache and
2341: * TLB, so just carry on regardless,
2342: * and we won't need to do it again
2343: */
2344: *pte = 0;
2345: pmap_pte_delref(pmap, sva);
2346: }
2347:
2348: /*
2349: * Update flags. In a number of circumstances,
2350: * we could cluster a lot of these and do a
2351: * number of sequential pages in one go.
2352: */
1.49 thorpej 2353: if ((pg = PHYS_TO_VM_PAGE(pa)) != NULL) {
1.17 chris 2354: struct pv_entry *pve;
1.49 thorpej 2355: simple_lock(&pg->mdpage.pvh_slock);
2356: pve = pmap_remove_pv(pg, pmap, sva);
1.17 chris 2357: pmap_free_pv(pmap, pve);
1.49 thorpej 2358: pmap_vac_me_harder(pmap, pg, ptes, FALSE);
2359: simple_unlock(&pg->mdpage.pvh_slock);
1.1 matt 2360: }
2361: }
2362: sva += NBPG;
2363: pte++;
2364: }
2365:
1.11 chris 2366: pmap_unmap_ptes(pmap);
1.1 matt 2367: /*
2368: * Now, if we've fallen through down to here, chances are that there
2369: * are less than PMAP_REMOVE_CLEAN_LIST_SIZE mappings left.
2370: */
2371: if (cleanlist_idx <= PMAP_REMOVE_CLEAN_LIST_SIZE) {
2372: u_int cnt;
2373:
2374: for (cnt = 0; cnt < cleanlist_idx; cnt++) {
2375: if (pmap_active) {
1.36 thorpej 2376: cpu_idcache_wbinv_range(cleanlist[cnt].va,
2377: NBPG);
1.1 matt 2378: *cleanlist[cnt].pte = 0;
2379: cpu_tlb_flushID_SE(cleanlist[cnt].va);
2380: } else
2381: *cleanlist[cnt].pte = 0;
2382: pmap_pte_delref(pmap, cleanlist[cnt].va);
2383: }
2384: }
1.17 chris 2385: PMAP_MAP_TO_HEAD_UNLOCK();
1.1 matt 2386: }
2387:
2388: /*
2389: * Routine: pmap_remove_all
2390: * Function:
2391: * Removes this physical page from
2392: * all physical maps in which it resides.
2393: * Reflects back modify bits to the pager.
2394: */
2395:
1.33 chris 2396: static void
1.49 thorpej 2397: pmap_remove_all(pg)
2398: struct vm_page *pg;
1.1 matt 2399: {
1.17 chris 2400: struct pv_entry *pv, *npv;
1.15 chris 2401: struct pmap *pmap;
1.11 chris 2402: pt_entry_t *pte, *ptes;
1.1 matt 2403:
1.49 thorpej 2404: PDEBUG(0, printf("pmap_remove_all: pa=%lx ", VM_PAGE_TO_PHYS(pg)));
1.1 matt 2405:
1.49 thorpej 2406: /* set vm_page => pmap locking */
1.17 chris 2407: PMAP_HEAD_TO_MAP_LOCK();
1.1 matt 2408:
1.49 thorpej 2409: simple_lock(&pg->mdpage.pvh_slock);
1.17 chris 2410:
1.49 thorpej 2411: pv = pg->mdpage.pvh_list;
2412: if (pv == NULL) {
2413: PDEBUG(0, printf("free page\n"));
2414: simple_unlock(&pg->mdpage.pvh_slock);
2415: PMAP_HEAD_TO_MAP_UNLOCK();
2416: return;
1.1 matt 2417: }
1.17 chris 2418: pmap_clean_page(pv, FALSE);
1.1 matt 2419:
2420: while (pv) {
2421: pmap = pv->pv_pmap;
1.11 chris 2422: ptes = pmap_map_ptes(pmap);
2423: pte = &ptes[arm_byte_to_page(pv->pv_va)];
1.1 matt 2424:
2425: PDEBUG(0, printf("[%p,%08x,%08lx,%08x] ", pmap, *pte,
2426: pv->pv_va, pv->pv_flags));
2427: #ifdef DEBUG
1.32 thorpej 2428: if (!pmap_pde_page(pmap_pde(pmap, pv->pv_va)) ||
1.30 rearnsha 2429: !pmap_pte_v(pte) || pmap_pte_pa(pte) != pa)
1.1 matt 2430: panic("pmap_remove_all: bad mapping");
2431: #endif /* DEBUG */
2432:
2433: /*
2434: * Update statistics
2435: */
2436: --pmap->pm_stats.resident_count;
2437:
2438: /* Wired bit */
2439: if (pv->pv_flags & PT_W)
2440: --pmap->pm_stats.wired_count;
2441:
2442: /*
2443: * Invalidate the PTEs.
2444: * XXX: should cluster them up and invalidate as many
2445: * as possible at once.
2446: */
2447:
2448: #ifdef needednotdone
2449: reduce wiring count on page table pages as references drop
2450: #endif
2451:
2452: *pte = 0;
2453: pmap_pte_delref(pmap, pv->pv_va);
2454:
2455: npv = pv->pv_next;
1.17 chris 2456: pmap_free_pv(pmap, pv);
1.1 matt 2457: pv = npv;
1.11 chris 2458: pmap_unmap_ptes(pmap);
1.1 matt 2459: }
1.49 thorpej 2460: pg->mdpage.pvh_list = NULL;
2461: simple_unlock(&pg->mdpage.pvh_slock);
1.17 chris 2462: PMAP_HEAD_TO_MAP_UNLOCK();
1.1 matt 2463:
2464: PDEBUG(0, printf("done\n"));
2465: cpu_tlb_flushID();
1.32 thorpej 2466: cpu_cpwait();
1.1 matt 2467: }
2468:
2469:
2470: /*
2471: * Set the physical protection on the specified range of this map as requested.
2472: */
2473:
2474: void
2475: pmap_protect(pmap, sva, eva, prot)
1.15 chris 2476: struct pmap *pmap;
1.1 matt 2477: vaddr_t sva;
2478: vaddr_t eva;
2479: vm_prot_t prot;
2480: {
1.11 chris 2481: pt_entry_t *pte = NULL, *ptes;
1.49 thorpej 2482: struct vm_page *pg;
1.1 matt 2483: int armprot;
2484: int flush = 0;
1.2 matt 2485: paddr_t pa;
1.1 matt 2486:
2487: PDEBUG(0, printf("pmap_protect: pmap=%p %08lx->%08lx %x\n",
2488: pmap, sva, eva, prot));
2489:
2490: if (~prot & VM_PROT_READ) {
2491: /* Just remove the mappings. */
2492: pmap_remove(pmap, sva, eva);
1.33 chris 2493: /* pmap_update not needed as it should be called by the caller
2494: * of pmap_protect */
1.1 matt 2495: return;
2496: }
2497: if (prot & VM_PROT_WRITE) {
2498: /*
2499: * If this is a read->write transition, just ignore it and let
2500: * uvm_fault() take care of it later.
2501: */
2502: return;
2503: }
2504:
2505: sva &= PG_FRAME;
2506: eva &= PG_FRAME;
2507:
1.17 chris 2508: /* Need to lock map->head */
2509: PMAP_MAP_TO_HEAD_LOCK();
2510:
1.11 chris 2511: ptes = pmap_map_ptes(pmap);
1.1 matt 2512: /*
2513: * We need to acquire a pointer to a page table page before entering
2514: * the following loop.
2515: */
2516: while (sva < eva) {
1.30 rearnsha 2517: if (pmap_pde_page(pmap_pde(pmap, sva)))
1.1 matt 2518: break;
2519: sva = (sva & PD_MASK) + NBPD;
2520: }
1.11 chris 2521:
2522: pte = &ptes[arm_byte_to_page(sva)];
1.17 chris 2523:
1.1 matt 2524: while (sva < eva) {
2525: /* only check once in a while */
2526: if ((sva & PT_MASK) == 0) {
1.30 rearnsha 2527: if (!pmap_pde_page(pmap_pde(pmap, sva))) {
1.1 matt 2528: /* We can race ahead here, to the next pde. */
2529: sva += NBPD;
2530: pte += arm_byte_to_page(NBPD);
2531: continue;
2532: }
2533: }
2534:
2535: if (!pmap_pte_v(pte))
2536: goto next;
2537:
2538: flush = 1;
2539:
2540: armprot = 0;
2541: if (sva < VM_MAXUSER_ADDRESS)
2542: armprot |= PT_AP(AP_U);
2543: else if (sva < VM_MAX_ADDRESS)
2544: armprot |= PT_AP(AP_W); /* XXX Ekk what is this ? */
2545: *pte = (*pte & 0xfffff00f) | armprot;
2546:
2547: pa = pmap_pte_pa(pte);
2548:
2549: /* Get the physical page index */
2550:
2551: /* Clear write flag */
1.49 thorpej 2552: if ((pg = PHYS_TO_VM_PAGE(pa)) != NULL) {
2553: simple_lock(&pg->mdpage.pvh_slock);
2554: (void) pmap_modify_pv(pmap, sva, pg, PT_Wr, 0);
2555: pmap_vac_me_harder(pmap, pg, ptes, FALSE);
2556: simple_unlock(&pg->mdpage.pvh_slock);
1.1 matt 2557: }
2558:
2559: next:
2560: sva += NBPG;
2561: pte++;
2562: }
1.11 chris 2563: pmap_unmap_ptes(pmap);
1.17 chris 2564: PMAP_MAP_TO_HEAD_UNLOCK();
1.1 matt 2565: if (flush)
2566: cpu_tlb_flushID();
2567: }
2568:
2569: /*
1.15 chris 2570: * void pmap_enter(struct pmap *pmap, vaddr_t va, paddr_t pa, vm_prot_t prot,
1.1 matt 2571: * int flags)
2572: *
2573: * Insert the given physical page (p) at
2574: * the specified virtual address (v) in the
2575: * target physical map with the protection requested.
2576: *
2577: * If specified, the page will be wired down, meaning
2578: * that the related pte can not be reclaimed.
2579: *
2580: * NB: This is the only routine which MAY NOT lazy-evaluate
2581: * or lose information. That is, this routine must actually
2582: * insert this page into the given map NOW.
2583: */
2584:
2585: int
2586: pmap_enter(pmap, va, pa, prot, flags)
1.15 chris 2587: struct pmap *pmap;
1.1 matt 2588: vaddr_t va;
1.2 matt 2589: paddr_t pa;
1.1 matt 2590: vm_prot_t prot;
2591: int flags;
2592: {
1.11 chris 2593: pt_entry_t *pte, *ptes;
1.1 matt 2594: u_int npte;
1.2 matt 2595: paddr_t opa;
1.1 matt 2596: int nflags;
2597: boolean_t wired = (flags & PMAP_WIRED) != 0;
1.49 thorpej 2598: struct vm_page *pg;
1.17 chris 2599: struct pv_entry *pve;
2600: int error;
1.1 matt 2601:
2602: PDEBUG(5, printf("pmap_enter: V%08lx P%08lx in pmap %p prot=%08x, wired = %d\n",
2603: va, pa, pmap, prot, wired));
2604:
2605: #ifdef DIAGNOSTIC
2606: /* Valid address ? */
1.48 chris 2607: if (va >= (pmap_curmaxkvaddr))
1.1 matt 2608: panic("pmap_enter: too big");
2609: if (pmap != pmap_kernel() && va != 0) {
2610: if (va < VM_MIN_ADDRESS || va >= VM_MAXUSER_ADDRESS)
2611: panic("pmap_enter: kernel page in user map");
2612: } else {
2613: if (va >= VM_MIN_ADDRESS && va < VM_MAXUSER_ADDRESS)
2614: panic("pmap_enter: user page in kernel map");
2615: if (va >= VM_MAXUSER_ADDRESS && va < VM_MAX_ADDRESS)
2616: panic("pmap_enter: entering PT page");
2617: }
2618: #endif
1.49 thorpej 2619: /*
2620: * Get a pointer to the page. Later on in this function, we
2621: * test for a managed page by checking pg != NULL.
2622: */
2623: pg = PHYS_TO_VM_PAGE(pa);
2624:
1.17 chris 2625: /* get lock */
2626: PMAP_MAP_TO_HEAD_LOCK();
1.1 matt 2627: /*
2628: * Get a pointer to the pte for this virtual address. If the
2629: * pte pointer is NULL then we are missing the L2 page table
2630: * so we need to create one.
2631: */
1.24 chris 2632: /* XXX horrible hack to get us working with lockdebug */
2633: simple_lock(&pmap->pm_obj.vmobjlock);
1.1 matt 2634: pte = pmap_pte(pmap, va);
2635: if (!pte) {
1.17 chris 2636: struct vm_page *ptp;
1.48 chris 2637: KASSERT(pmap != pmap_kernel()); /* kernel should have pre-grown */
1.17 chris 2638:
2639: /* if failure is allowed then don't try too hard */
2640: ptp = pmap_get_ptp(pmap, va, flags & PMAP_CANFAIL);
2641: if (ptp == NULL) {
2642: if (flags & PMAP_CANFAIL) {
2643: error = ENOMEM;
2644: goto out;
2645: }
2646: panic("pmap_enter: get ptp failed");
1.1 matt 2647: }
1.16 chris 2648:
1.1 matt 2649: pte = pmap_pte(pmap, va);
2650: #ifdef DIAGNOSTIC
2651: if (!pte)
2652: panic("pmap_enter: no pte");
2653: #endif
2654: }
2655:
2656: nflags = 0;
2657: if (prot & VM_PROT_WRITE)
2658: nflags |= PT_Wr;
2659: if (wired)
2660: nflags |= PT_W;
2661:
2662: /* More debugging info */
2663: PDEBUG(5, printf("pmap_enter: pte for V%08lx = V%p (%08x)\n", va, pte,
2664: *pte));
2665:
2666: /* Is the pte valid ? If so then this page is already mapped */
2667: if (pmap_pte_v(pte)) {
2668: /* Get the physical address of the current page mapped */
2669: opa = pmap_pte_pa(pte);
2670:
2671: #ifdef MYCROFT_HACK
2672: printf("pmap_enter: pmap=%p va=%lx pa=%lx opa=%lx\n", pmap, va, pa, opa);
2673: #endif
2674:
2675: /* Are we mapping the same page ? */
2676: if (opa == pa) {
2677: /* All we must be doing is changing the protection */
2678: PDEBUG(0, printf("Case 02 in pmap_enter (V%08lx P%08lx)\n",
2679: va, pa));
2680:
2681: /* Has the wiring changed ? */
1.49 thorpej 2682: if (pg != NULL) {
2683: simple_lock(&pg->mdpage.pvh_slock);
2684: (void) pmap_modify_pv(pmap, va, pg,
1.1 matt 2685: PT_Wr | PT_W, nflags);
1.49 thorpej 2686: simple_unlock(&pg->mdpage.pvh_slock);
2687: }
1.1 matt 2688: } else {
1.49 thorpej 2689: struct vm_page *opg;
2690:
1.1 matt 2691: /* We are replacing the page with a new one. */
1.36 thorpej 2692: cpu_idcache_wbinv_range(va, NBPG);
1.1 matt 2693:
2694: PDEBUG(0, printf("Case 03 in pmap_enter (V%08lx P%08lx P%08lx)\n",
2695: va, pa, opa));
2696:
2697: /*
2698: * If it is part of our managed memory then we
2699: * must remove it from the PV list
2700: */
1.49 thorpej 2701: if ((opg = PHYS_TO_VM_PAGE(opa)) != NULL) {
2702: simple_lock(&opg->mdpage.pvh_slock);
2703: pve = pmap_remove_pv(opg, pmap, va);
2704: simple_unlock(&opg->mdpage.pvh_slock);
1.17 chris 2705: } else {
2706: pve = NULL;
1.1 matt 2707: }
2708:
2709: goto enter;
2710: }
2711: } else {
2712: opa = 0;
1.17 chris 2713: pve = NULL;
1.1 matt 2714: pmap_pte_addref(pmap, va);
2715:
2716: /* pte is not valid so we must be hooking in a new page */
2717: ++pmap->pm_stats.resident_count;
2718:
2719: enter:
2720: /*
2721: * Enter on the PV list if part of our managed memory
2722: */
1.49 thorpej 2723: if (pmap_initialized && pg != NULL) {
1.17 chris 2724: if (pve == NULL) {
2725: pve = pmap_alloc_pv(pmap, ALLOCPV_NEED);
2726: if (pve == NULL) {
2727: if (flags & PMAP_CANFAIL) {
2728: error = ENOMEM;
2729: goto out;
2730: }
2731: panic("pmap_enter: no pv entries available");
2732: }
2733: }
2734: /* enter_pv locks pvh when adding */
1.49 thorpej 2735: pmap_enter_pv(pg, pve, pmap, va, NULL, nflags);
1.17 chris 2736: } else {
1.49 thorpej 2737: pg = NULL;
1.17 chris 2738: if (pve != NULL)
2739: pmap_free_pv(pmap, pve);
1.1 matt 2740: }
2741: }
2742:
2743: #ifdef MYCROFT_HACK
2744: if (mycroft_hack)
2745: printf("pmap_enter: pmap=%p va=%lx pa=%lx opa=%lx bank=%d off=%d pv=%p\n", pmap, va, pa, opa, bank, off, pv);
2746: #endif
2747:
2748: /* Construct the pte, giving the correct access. */
2749: npte = (pa & PG_FRAME);
2750:
2751: /* VA 0 is magic. */
2752: if (pmap != pmap_kernel() && va != 0)
2753: npte |= PT_AP(AP_U);
2754:
1.49 thorpej 2755: if (pmap_initialized && pg != NULL) {
1.1 matt 2756: #ifdef DIAGNOSTIC
2757: if ((flags & VM_PROT_ALL) & ~prot)
2758: panic("pmap_enter: access_type exceeds prot");
2759: #endif
1.27 rearnsha 2760: npte |= pte_cache_mode;
1.1 matt 2761: if (flags & VM_PROT_WRITE) {
2762: npte |= L2_SPAGE | PT_AP(AP_W);
1.49 thorpej 2763: pg->mdpage.pvh_attrs |= PT_H | PT_M;
1.1 matt 2764: } else if (flags & VM_PROT_ALL) {
2765: npte |= L2_SPAGE;
1.49 thorpej 2766: pg->mdpage.pvh_attrs |= PT_H;
1.1 matt 2767: } else
2768: npte |= L2_INVAL;
2769: } else {
2770: if (prot & VM_PROT_WRITE)
2771: npte |= L2_SPAGE | PT_AP(AP_W);
2772: else if (prot & VM_PROT_ALL)
2773: npte |= L2_SPAGE;
2774: else
2775: npte |= L2_INVAL;
2776: }
2777:
2778: #ifdef MYCROFT_HACK
2779: if (mycroft_hack)
2780: printf("pmap_enter: pmap=%p va=%lx pa=%lx prot=%x wired=%d access_type=%x npte=%08x\n", pmap, va, pa, prot, wired, flags & VM_PROT_ALL, npte);
2781: #endif
2782:
2783: *pte = npte;
2784:
1.49 thorpej 2785: if (pmap_initialized && pg != NULL) {
1.12 chris 2786: boolean_t pmap_active = FALSE;
1.11 chris 2787: /* XXX this will change once the whole of pmap_enter uses
2788: * map_ptes
2789: */
2790: ptes = pmap_map_ptes(pmap);
1.12 chris 2791: if ((curproc && curproc->p_vmspace->vm_map.pmap == pmap)
1.15 chris 2792: || (pmap == pmap_kernel()))
1.12 chris 2793: pmap_active = TRUE;
1.49 thorpej 2794: simple_lock(&pg->mdpage.pvh_slock);
2795: pmap_vac_me_harder(pmap, pg, ptes, pmap_active);
2796: simple_unlock(&pg->mdpage.pvh_slock);
1.11 chris 2797: pmap_unmap_ptes(pmap);
2798: }
1.1 matt 2799:
2800: /* Better flush the TLB ... */
2801: cpu_tlb_flushID_SE(va);
1.17 chris 2802: error = 0;
2803: out:
1.24 chris 2804: simple_unlock(&pmap->pm_obj.vmobjlock);
1.17 chris 2805: PMAP_MAP_TO_HEAD_UNLOCK();
1.1 matt 2806: PDEBUG(5, printf("pmap_enter: pte = V%p %08x\n", pte, *pte));
2807:
1.17 chris 2808: return error;
1.1 matt 2809: }
2810:
1.48 chris 2811: /*
2812: * pmap_kenter_pa: enter a kernel mapping
2813: *
2814: * => no need to lock anything assume va is already allocated
2815: * => should be faster than normal pmap enter function
2816: */
1.1 matt 2817: void
2818: pmap_kenter_pa(va, pa, prot)
2819: vaddr_t va;
2820: paddr_t pa;
2821: vm_prot_t prot;
2822: {
1.13 chris 2823: pt_entry_t *pte;
2824:
2825: pte = vtopte(va);
1.14 chs 2826: KASSERT(!pmap_pte_v(pte));
1.13 chris 2827: *pte = L2_PTE(pa, AP_KRW);
1.1 matt 2828: }
2829:
2830: void
2831: pmap_kremove(va, len)
2832: vaddr_t va;
2833: vsize_t len;
2834: {
1.14 chs 2835: pt_entry_t *pte;
2836:
1.1 matt 2837: for (len >>= PAGE_SHIFT; len > 0; len--, va += PAGE_SIZE) {
1.13 chris 2838:
1.14 chs 2839: /*
2840: * We assume that we will only be called with small
2841: * regions of memory.
2842: */
2843:
1.30 rearnsha 2844: KASSERT(pmap_pde_page(pmap_pde(pmap_kernel(), va)));
1.13 chris 2845: pte = vtopte(va);
1.36 thorpej 2846: cpu_idcache_wbinv_range(va, PAGE_SIZE);
1.13 chris 2847: *pte = 0;
2848: cpu_tlb_flushID_SE(va);
1.1 matt 2849: }
2850: }
2851:
2852: /*
2853: * pmap_page_protect:
2854: *
2855: * Lower the permission for all mappings to a given page.
2856: */
2857:
2858: void
2859: pmap_page_protect(pg, prot)
2860: struct vm_page *pg;
2861: vm_prot_t prot;
2862: {
2863:
1.49 thorpej 2864: PDEBUG(0, printf("pmap_page_protect(pa=%lx, prot=%d)\n",
2865: VM_PAGE_TO_PHYS(pg), prot));
1.1 matt 2866:
2867: switch(prot) {
1.17 chris 2868: case VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE:
2869: case VM_PROT_READ|VM_PROT_WRITE:
2870: return;
2871:
1.1 matt 2872: case VM_PROT_READ:
2873: case VM_PROT_READ|VM_PROT_EXECUTE:
1.49 thorpej 2874: pmap_copy_on_write(pg);
1.1 matt 2875: break;
2876:
2877: default:
1.49 thorpej 2878: pmap_remove_all(pg);
1.1 matt 2879: break;
2880: }
2881: }
2882:
2883:
2884: /*
2885: * Routine: pmap_unwire
2886: * Function: Clear the wired attribute for a map/virtual-address
2887: * pair.
2888: * In/out conditions:
2889: * The mapping must already exist in the pmap.
2890: */
2891:
2892: void
2893: pmap_unwire(pmap, va)
1.15 chris 2894: struct pmap *pmap;
1.1 matt 2895: vaddr_t va;
2896: {
2897: pt_entry_t *pte;
1.2 matt 2898: paddr_t pa;
1.49 thorpej 2899: struct vm_page *pg;
1.1 matt 2900:
2901: /*
2902: * Make sure pmap is valid. -dct
2903: */
2904: if (pmap == NULL)
2905: return;
2906:
2907: /* Get the pte */
2908: pte = pmap_pte(pmap, va);
2909: if (!pte)
2910: return;
2911:
2912: /* Extract the physical address of the page */
2913: pa = pmap_pte_pa(pte);
2914:
1.49 thorpej 2915: if ((pg = PHYS_TO_VM_PAGE(pa)) == NULL)
1.1 matt 2916: return;
1.49 thorpej 2917:
2918: simple_lock(&pg->mdpage.pvh_slock);
1.1 matt 2919: /* Update the wired bit in the pv entry for this page. */
1.49 thorpej 2920: (void) pmap_modify_pv(pmap, va, pg, PT_W, 0);
2921: simple_unlock(&pg->mdpage.pvh_slock);
1.1 matt 2922: }
2923:
2924: /*
1.15 chris 2925: * pt_entry_t *pmap_pte(struct pmap *pmap, vaddr_t va)
1.1 matt 2926: *
2927: * Return the pointer to a page table entry corresponding to the supplied
2928: * virtual address.
2929: *
2930: * The page directory is first checked to make sure that a page table
2931: * for the address in question exists and if it does a pointer to the
2932: * entry is returned.
2933: *
2934: * The way this works is that that the kernel page tables are mapped
2935: * into the memory map at ALT_PAGE_TBLS_BASE to ALT_PAGE_TBLS_BASE+4MB.
2936: * This allows page tables to be located quickly.
2937: */
2938: pt_entry_t *
2939: pmap_pte(pmap, va)
1.15 chris 2940: struct pmap *pmap;
1.1 matt 2941: vaddr_t va;
2942: {
2943: pt_entry_t *ptp;
2944: pt_entry_t *result;
2945:
2946: /* The pmap must be valid */
2947: if (!pmap)
2948: return(NULL);
2949:
2950: /* Return the address of the pte */
2951: PDEBUG(10, printf("pmap_pte: pmap=%p va=V%08lx pde = V%p (%08X)\n",
2952: pmap, va, pmap_pde(pmap, va), *(pmap_pde(pmap, va))));
2953:
2954: /* Do we have a valid pde ? If not we don't have a page table */
1.30 rearnsha 2955: if (!pmap_pde_page(pmap_pde(pmap, va))) {
1.39 thorpej 2956: PDEBUG(0, printf("pmap_pte: failed - pde = %p\n",
2957: pmap_pde(pmap, va)));
1.1 matt 2958: return(NULL);
2959: }
2960:
2961: PDEBUG(10, printf("pmap pagetable = P%08lx current = P%08x\n",
2962: pmap->pm_pptpt, (*((pt_entry_t *)(PROCESS_PAGE_TBLS_BASE
2963: + (PROCESS_PAGE_TBLS_BASE >> (PGSHIFT - 2)) +
2964: (PROCESS_PAGE_TBLS_BASE >> PDSHIFT))) & PG_FRAME)));
2965:
2966: /*
2967: * If the pmap is the kernel pmap or the pmap is the active one
2968: * then we can just return a pointer to entry relative to
2969: * PROCESS_PAGE_TBLS_BASE.
2970: * Otherwise we need to map the page tables to an alternative
2971: * address and reference them there.
2972: */
1.15 chris 2973: if (pmap == pmap_kernel() || pmap->pm_pptpt
1.1 matt 2974: == (*((pt_entry_t *)(PROCESS_PAGE_TBLS_BASE
2975: + ((PROCESS_PAGE_TBLS_BASE >> (PGSHIFT - 2)) &
2976: ~3) + (PROCESS_PAGE_TBLS_BASE >> PDSHIFT))) & PG_FRAME)) {
2977: ptp = (pt_entry_t *)PROCESS_PAGE_TBLS_BASE;
2978: } else {
2979: struct proc *p = curproc;
2980:
2981: /* If we don't have a valid curproc use proc0 */
2982: /* Perhaps we should just use kernel_pmap instead */
2983: if (p == NULL)
2984: p = &proc0;
2985: #ifdef DIAGNOSTIC
2986: /*
2987: * The pmap should always be valid for the process so
2988: * panic if it is not.
2989: */
2990: if (!p->p_vmspace || !p->p_vmspace->vm_map.pmap) {
2991: printf("pmap_pte: va=%08lx p=%p vm=%p\n",
2992: va, p, p->p_vmspace);
2993: console_debugger();
2994: }
2995: /*
2996: * The pmap for the current process should be mapped. If it
2997: * is not then we have a problem.
2998: */
2999: if (p->p_vmspace->vm_map.pmap->pm_pptpt !=
3000: (*((pt_entry_t *)(PROCESS_PAGE_TBLS_BASE
3001: + (PROCESS_PAGE_TBLS_BASE >> (PGSHIFT - 2)) +
3002: (PROCESS_PAGE_TBLS_BASE >> PDSHIFT))) & PG_FRAME)) {
3003: printf("pmap pagetable = P%08lx current = P%08x ",
3004: pmap->pm_pptpt, (*((pt_entry_t *)(PROCESS_PAGE_TBLS_BASE
3005: + (PROCESS_PAGE_TBLS_BASE >> (PGSHIFT - 2)) +
3006: (PROCESS_PAGE_TBLS_BASE >> PDSHIFT))) &
3007: PG_FRAME));
3008: printf("pptpt=%lx\n", p->p_vmspace->vm_map.pmap->pm_pptpt);
3009: panic("pmap_pte: current and pmap mismatch\n");
3010: }
3011: #endif
3012:
3013: ptp = (pt_entry_t *)ALT_PAGE_TBLS_BASE;
3014: pmap_map_in_l1(p->p_vmspace->vm_map.pmap, ALT_PAGE_TBLS_BASE,
1.17 chris 3015: pmap->pm_pptpt, FALSE);
1.1 matt 3016: cpu_tlb_flushD();
1.32 thorpej 3017: cpu_cpwait();
1.1 matt 3018: }
3019: PDEBUG(10, printf("page tables base = %p offset=%lx\n", ptp,
3020: ((va >> (PGSHIFT-2)) & ~3)));
3021: result = (pt_entry_t *)((char *)ptp + ((va >> (PGSHIFT-2)) & ~3));
3022: return(result);
3023: }
3024:
3025: /*
3026: * Routine: pmap_extract
3027: * Function:
3028: * Extract the physical page address associated
3029: * with the given map/virtual_address pair.
3030: */
3031: boolean_t
3032: pmap_extract(pmap, va, pap)
1.15 chris 3033: struct pmap *pmap;
1.1 matt 3034: vaddr_t va;
3035: paddr_t *pap;
3036: {
1.34 thorpej 3037: pd_entry_t *pde;
1.11 chris 3038: pt_entry_t *pte, *ptes;
1.1 matt 3039: paddr_t pa;
1.34 thorpej 3040: boolean_t rv = TRUE;
1.1 matt 3041:
3042: PDEBUG(5, printf("pmap_extract: pmap=%p, va=V%08lx\n", pmap, va));
3043:
3044: /*
1.11 chris 3045: * Get the pte for this virtual address.
1.1 matt 3046: */
1.34 thorpej 3047: pde = pmap_pde(pmap, va);
1.11 chris 3048: ptes = pmap_map_ptes(pmap);
3049: pte = &ptes[arm_byte_to_page(va)];
1.1 matt 3050:
1.34 thorpej 3051: if (pmap_pde_section(pde)) {
3052: pa = (*pde & PD_MASK) | (va & (L1_SEC_SIZE - 1));
3053: goto out;
3054: } else if (pmap_pde_page(pde) == 0 || pmap_pte_v(pte) == 0) {
3055: rv = FALSE;
3056: goto out;
1.11 chris 3057: }
1.1 matt 3058:
1.34 thorpej 3059: if ((*pte & L2_MASK) == L2_LPAGE) {
1.1 matt 3060: /* Extract the physical address from the pte */
1.34 thorpej 3061: pa = *pte & ~(L2_LPAGE_SIZE - 1);
1.1 matt 3062:
3063: PDEBUG(5, printf("pmap_extract: LPAGE pa = P%08lx\n",
3064: (pa | (va & (L2_LPAGE_SIZE - 1)))));
3065:
3066: if (pap != NULL)
3067: *pap = pa | (va & (L2_LPAGE_SIZE - 1));
1.34 thorpej 3068: goto out;
3069: }
3070:
3071: /* Extract the physical address from the pte */
3072: pa = pmap_pte_pa(pte);
1.1 matt 3073:
1.34 thorpej 3074: PDEBUG(5, printf("pmap_extract: SPAGE pa = P%08lx\n",
3075: (pa | (va & ~PG_FRAME))));
1.1 matt 3076:
1.34 thorpej 3077: if (pap != NULL)
3078: *pap = pa | (va & ~PG_FRAME);
3079: out:
1.11 chris 3080: pmap_unmap_ptes(pmap);
1.34 thorpej 3081: return (rv);
1.1 matt 3082: }
3083:
3084:
3085: /*
1.39 thorpej 3086: * Copy the range specified by src_addr/len from the source map to the
3087: * range dst_addr/len in the destination map.
1.1 matt 3088: *
1.39 thorpej 3089: * This routine is only advisory and need not do anything.
1.1 matt 3090: */
3091:
1.39 thorpej 3092: void
3093: pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr)
3094: struct pmap *dst_pmap;
3095: struct pmap *src_pmap;
3096: vaddr_t dst_addr;
3097: vsize_t len;
3098: vaddr_t src_addr;
3099: {
3100: PDEBUG(0, printf("pmap_copy(%p, %p, %lx, %lx, %lx)\n",
3101: dst_pmap, src_pmap, dst_addr, len, src_addr));
3102: }
1.1 matt 3103:
3104: #if defined(PMAP_DEBUG)
3105: void
3106: pmap_dump_pvlist(phys, m)
3107: vaddr_t phys;
3108: char *m;
3109: {
1.49 thorpej 3110: struct vm_page *pg;
1.1 matt 3111: struct pv_entry *pv;
3112:
1.49 thorpej 3113: if ((pg = PHYS_TO_VM_PAGE(phys)) == NULL) {
1.1 matt 3114: printf("INVALID PA\n");
3115: return;
3116: }
1.49 thorpej 3117: simple_lock(&pg->mdpage.pvh_slock);
1.1 matt 3118: printf("%s %08lx:", m, phys);
1.49 thorpej 3119: if (pg->mdpage.pvh_list == NULL) {
1.1 matt 3120: printf(" no mappings\n");
3121: return;
3122: }
3123:
1.49 thorpej 3124: for (pv = pg->mdpage.pvh_list; pv; pv = pv->pv_next)
1.1 matt 3125: printf(" pmap %p va %08lx flags %08x", pv->pv_pmap,
3126: pv->pv_va, pv->pv_flags);
3127:
3128: printf("\n");
1.49 thorpej 3129: simple_unlock(&pg->mdpage.pvh_slock);
1.1 matt 3130: }
3131:
3132: #endif /* PMAP_DEBUG */
3133:
1.11 chris 3134: static pt_entry_t *
3135: pmap_map_ptes(struct pmap *pmap)
3136: {
1.17 chris 3137: struct proc *p;
3138:
3139: /* the kernel's pmap is always accessible */
3140: if (pmap == pmap_kernel()) {
3141: return (pt_entry_t *)PROCESS_PAGE_TBLS_BASE ;
3142: }
3143:
3144: if (pmap_is_curpmap(pmap)) {
3145: simple_lock(&pmap->pm_obj.vmobjlock);
3146: return (pt_entry_t *)PROCESS_PAGE_TBLS_BASE;
3147: }
3148:
3149: p = curproc;
3150:
3151: if (p == NULL)
3152: p = &proc0;
3153:
3154: /* need to lock both curpmap and pmap: use ordered locking */
3155: if ((unsigned) pmap < (unsigned) curproc->p_vmspace->vm_map.pmap) {
3156: simple_lock(&pmap->pm_obj.vmobjlock);
3157: simple_lock(&curproc->p_vmspace->vm_map.pmap->pm_obj.vmobjlock);
3158: } else {
3159: simple_lock(&curproc->p_vmspace->vm_map.pmap->pm_obj.vmobjlock);
3160: simple_lock(&pmap->pm_obj.vmobjlock);
3161: }
1.11 chris 3162:
1.17 chris 3163: pmap_map_in_l1(p->p_vmspace->vm_map.pmap, ALT_PAGE_TBLS_BASE,
3164: pmap->pm_pptpt, FALSE);
3165: cpu_tlb_flushD();
1.32 thorpej 3166: cpu_cpwait();
1.17 chris 3167: return (pt_entry_t *)ALT_PAGE_TBLS_BASE;
3168: }
3169:
3170: /*
3171: * pmap_unmap_ptes: unlock the PTE mapping of "pmap"
3172: */
3173:
3174: static void
3175: pmap_unmap_ptes(pmap)
3176: struct pmap *pmap;
3177: {
3178: if (pmap == pmap_kernel()) {
3179: return;
3180: }
3181: if (pmap_is_curpmap(pmap)) {
3182: simple_unlock(&pmap->pm_obj.vmobjlock);
3183: } else {
3184: simple_unlock(&pmap->pm_obj.vmobjlock);
3185: simple_unlock(&curproc->p_vmspace->vm_map.pmap->pm_obj.vmobjlock);
3186: }
1.11 chris 3187: }
1.1 matt 3188:
3189: /*
3190: * Modify pte bits for all ptes corresponding to the given physical address.
3191: * We use `maskbits' rather than `clearbits' because we're always passing
3192: * constants and the latter would require an extra inversion at run-time.
3193: */
3194:
1.22 chris 3195: static void
1.49 thorpej 3196: pmap_clearbit(pg, maskbits)
3197: struct vm_page *pg;
1.22 chris 3198: unsigned int maskbits;
1.1 matt 3199: {
3200: struct pv_entry *pv;
3201: pt_entry_t *pte;
3202: vaddr_t va;
1.49 thorpej 3203: int tlbentry;
1.1 matt 3204:
3205: PDEBUG(1, printf("pmap_clearbit: pa=%08lx mask=%08x\n",
1.49 thorpej 3206: VM_PAGE_TO_PHYS(pg), maskbits));
1.21 chris 3207:
3208: tlbentry = 0;
3209:
1.17 chris 3210: PMAP_HEAD_TO_MAP_LOCK();
1.49 thorpej 3211: simple_lock(&pg->mdpage.pvh_slock);
1.17 chris 3212:
1.1 matt 3213: /*
3214: * Clear saved attributes (modify, reference)
3215: */
1.49 thorpej 3216: pg->mdpage.pvh_attrs &= ~maskbits;
1.1 matt 3217:
1.49 thorpej 3218: if (pg->mdpage.pvh_list == NULL) {
3219: simple_unlock(&pg->mdpage.pvh_slock);
1.17 chris 3220: PMAP_HEAD_TO_MAP_UNLOCK();
1.1 matt 3221: return;
3222: }
3223:
3224: /*
3225: * Loop over all current mappings setting/clearing as appropos
3226: */
1.49 thorpej 3227: for (pv = pg->mdpage.pvh_list; pv; pv = pv->pv_next) {
1.1 matt 3228: va = pv->pv_va;
3229: pv->pv_flags &= ~maskbits;
3230: pte = pmap_pte(pv->pv_pmap, va);
1.17 chris 3231: KASSERT(pte != NULL);
1.29 rearnsha 3232: if (maskbits & (PT_Wr|PT_M)) {
3233: if ((pv->pv_flags & PT_NC)) {
3234: /*
3235: * Entry is not cacheable: reenable
3236: * the cache, nothing to flush
3237: *
3238: * Don't turn caching on again if this
3239: * is a modified emulation. This
3240: * would be inconsitent with the
3241: * settings created by
3242: * pmap_vac_me_harder().
3243: *
3244: * There's no need to call
3245: * pmap_vac_me_harder() here: all
3246: * pages are loosing their write
3247: * permission.
3248: *
3249: */
3250: if (maskbits & PT_Wr) {
3251: *pte |= pte_cache_mode;
3252: pv->pv_flags &= ~PT_NC;
3253: }
3254: } else if (pmap_is_curpmap(pv->pv_pmap))
3255: /*
3256: * Entry is cacheable: check if pmap is
3257: * current if it is flush it,
3258: * otherwise it won't be in the cache
3259: */
1.36 thorpej 3260: cpu_idcache_wbinv_range(pv->pv_va, NBPG);
1.29 rearnsha 3261:
3262: /* make the pte read only */
3263: *pte &= ~PT_AP(AP_W);
3264: }
3265:
3266: if (maskbits & PT_H)
3267: *pte = (*pte & ~L2_MASK) | L2_INVAL;
1.21 chris 3268:
1.29 rearnsha 3269: if (pmap_is_curpmap(pv->pv_pmap))
1.21 chris 3270: /*
1.29 rearnsha 3271: * if we had cacheable pte's we'd clean the
3272: * pte out to memory here
3273: *
1.21 chris 3274: * flush tlb entry as it's in the current pmap
3275: */
3276: cpu_tlb_flushID_SE(pv->pv_va);
1.29 rearnsha 3277: }
1.32 thorpej 3278: cpu_cpwait();
1.21 chris 3279:
1.49 thorpej 3280: simple_unlock(&pg->mdpage.pvh_slock);
1.17 chris 3281: PMAP_HEAD_TO_MAP_UNLOCK();
1.1 matt 3282: }
3283:
1.50 thorpej 3284: /*
3285: * pmap_clear_modify:
3286: *
3287: * Clear the "modified" attribute for a page.
3288: */
1.1 matt 3289: boolean_t
3290: pmap_clear_modify(pg)
3291: struct vm_page *pg;
3292: {
3293: boolean_t rv;
3294:
1.50 thorpej 3295: if (pg->mdpage.pvh_attrs & PT_M) {
3296: rv = TRUE;
3297: pmap_clearbit(pg, PT_M);
3298: } else
3299: rv = FALSE;
3300:
3301: PDEBUG(0, printf("pmap_clear_modify pa=%08lx -> %d\n",
3302: VM_PAGE_TO_PHYS(pg), rv));
3303:
3304: return (rv);
1.1 matt 3305: }
3306:
1.50 thorpej 3307: /*
3308: * pmap_clear_reference:
3309: *
3310: * Clear the "referenced" attribute for a page.
3311: */
1.1 matt 3312: boolean_t
3313: pmap_clear_reference(pg)
3314: struct vm_page *pg;
3315: {
3316: boolean_t rv;
3317:
1.50 thorpej 3318: if (pg->mdpage.pvh_attrs & PT_H) {
3319: rv = TRUE;
3320: pmap_clearbit(pg, PT_H);
3321: } else
3322: rv = FALSE;
3323:
3324: PDEBUG(0, printf("pmap_clear_reference pa=%08lx -> %d\n",
3325: VM_PAGE_TO_PHYS(pg), rv));
3326:
3327: return (rv);
1.1 matt 3328: }
3329:
3330:
3331: void
1.49 thorpej 3332: pmap_copy_on_write(pg)
3333: struct vm_page *pg;
1.39 thorpej 3334: {
1.49 thorpej 3335: PDEBUG(0, printf("pmap_copy_on_write pa=%08lx\n", VM_PAGE_TO_PHYS(pg)));
3336: pmap_clearbit(pg, PT_Wr);
1.39 thorpej 3337: }
3338:
1.50 thorpej 3339: /*
3340: * pmap_is_modified:
3341: *
3342: * Test if a page has the "modified" attribute.
3343: */
3344: /* See <arm/arm32/pmap.h> */
1.39 thorpej 3345:
1.50 thorpej 3346: /*
3347: * pmap_is_referenced:
3348: *
3349: * Test if a page has the "referenced" attribute.
3350: */
3351: /* See <arm/arm32/pmap.h> */
1.1 matt 3352:
3353: int
3354: pmap_modified_emulation(pmap, va)
1.15 chris 3355: struct pmap *pmap;
1.1 matt 3356: vaddr_t va;
3357: {
3358: pt_entry_t *pte;
1.2 matt 3359: paddr_t pa;
1.49 thorpej 3360: struct vm_page *pg;
1.1 matt 3361: u_int flags;
3362:
3363: PDEBUG(2, printf("pmap_modified_emulation\n"));
3364:
3365: /* Get the pte */
3366: pte = pmap_pte(pmap, va);
3367: if (!pte) {
3368: PDEBUG(2, printf("no pte\n"));
3369: return(0);
3370: }
3371:
3372: PDEBUG(1, printf("*pte=%08x\n", *pte));
3373:
3374: /* Check for a zero pte */
3375: if (*pte == 0)
3376: return(0);
3377:
3378: /* This can happen if user code tries to access kernel memory. */
3379: if ((*pte & PT_AP(AP_W)) != 0)
3380: return (0);
3381:
3382: /* Extract the physical address of the page */
3383: pa = pmap_pte_pa(pte);
1.49 thorpej 3384: if ((pg = PHYS_TO_VM_PAGE(pa)) == NULL)
1.1 matt 3385: return(0);
3386:
1.49 thorpej 3387: /* Get the current flags for this page. */
1.39 thorpej 3388: PMAP_HEAD_TO_MAP_LOCK();
1.49 thorpej 3389: simple_lock(&pg->mdpage.pvh_slock);
1.17 chris 3390:
1.49 thorpej 3391: flags = pmap_modify_pv(pmap, va, pg, 0, 0);
1.1 matt 3392: PDEBUG(2, printf("pmap_modified_emulation: flags = %08x\n", flags));
3393:
3394: /*
3395: * Do the flags say this page is writable ? If not then it is a
3396: * genuine write fault. If yes then the write fault is our fault
3397: * as we did not reflect the write access in the PTE. Now we know
3398: * a write has occurred we can correct this and also set the
3399: * modified bit
3400: */
1.17 chris 3401: if (~flags & PT_Wr) {
1.49 thorpej 3402: simple_unlock(&pg->mdpage.pvh_slock);
1.17 chris 3403: PMAP_HEAD_TO_MAP_UNLOCK();
1.1 matt 3404: return(0);
1.17 chris 3405: }
1.1 matt 3406:
3407: PDEBUG(0, printf("pmap_modified_emulation: Got a hit va=%08lx, pte = %p (%08x)\n",
3408: va, pte, *pte));
1.49 thorpej 3409: pg->mdpage.pvh_attrs |= PT_H | PT_M;
1.29 rearnsha 3410:
3411: /*
3412: * Re-enable write permissions for the page. No need to call
3413: * pmap_vac_me_harder(), since this is just a
3414: * modified-emulation fault, and the PT_Wr bit isn't changing. We've
3415: * already set the cacheable bits based on the assumption that we
3416: * can write to this page.
3417: */
1.1 matt 3418: *pte = (*pte & ~L2_MASK) | L2_SPAGE | PT_AP(AP_W);
3419: PDEBUG(0, printf("->(%08x)\n", *pte));
3420:
1.49 thorpej 3421: simple_unlock(&pg->mdpage.pvh_slock);
1.17 chris 3422: PMAP_HEAD_TO_MAP_UNLOCK();
1.1 matt 3423: /* Return, indicating the problem has been dealt with */
3424: cpu_tlb_flushID_SE(va);
1.32 thorpej 3425: cpu_cpwait();
1.1 matt 3426: return(1);
3427: }
3428:
3429:
3430: int
3431: pmap_handled_emulation(pmap, va)
1.15 chris 3432: struct pmap *pmap;
1.1 matt 3433: vaddr_t va;
3434: {
3435: pt_entry_t *pte;
1.2 matt 3436: paddr_t pa;
1.49 thorpej 3437: struct vm_page *pg;
1.1 matt 3438:
3439: PDEBUG(2, printf("pmap_handled_emulation\n"));
3440:
3441: /* Get the pte */
3442: pte = pmap_pte(pmap, va);
3443: if (!pte) {
3444: PDEBUG(2, printf("no pte\n"));
3445: return(0);
3446: }
3447:
3448: PDEBUG(1, printf("*pte=%08x\n", *pte));
3449:
3450: /* Check for a zero pte */
3451: if (*pte == 0)
3452: return(0);
3453:
3454: /* This can happen if user code tries to access kernel memory. */
3455: if ((*pte & L2_MASK) != L2_INVAL)
3456: return (0);
3457:
3458: /* Extract the physical address of the page */
3459: pa = pmap_pte_pa(pte);
1.49 thorpej 3460: if ((pg = PHYS_TO_VM_PAGE(pa)) == NULL)
3461: return (0);
1.1 matt 3462:
3463: /*
3464: * Ok we just enable the pte and mark the attibs as handled
3465: */
3466: PDEBUG(0, printf("pmap_handled_emulation: Got a hit va=%08lx pte = %p (%08x)\n",
3467: va, pte, *pte));
1.49 thorpej 3468: pg->mdpage.pvh_attrs |= PT_H;
1.1 matt 3469: *pte = (*pte & ~L2_MASK) | L2_SPAGE;
3470: PDEBUG(0, printf("->(%08x)\n", *pte));
3471:
3472: /* Return, indicating the problem has been dealt with */
3473: cpu_tlb_flushID_SE(va);
1.32 thorpej 3474: cpu_cpwait();
1.1 matt 3475: return(1);
3476: }
3477:
1.17 chris 3478:
3479:
3480:
1.1 matt 3481: /*
3482: * pmap_collect: free resources held by a pmap
3483: *
3484: * => optional function.
3485: * => called when a process is swapped out to free memory.
3486: */
3487:
3488: void
3489: pmap_collect(pmap)
1.15 chris 3490: struct pmap *pmap;
1.1 matt 3491: {
3492: }
3493:
3494: /*
3495: * Routine: pmap_procwr
3496: *
3497: * Function:
3498: * Synchronize caches corresponding to [addr, addr+len) in p.
3499: *
3500: */
3501: void
3502: pmap_procwr(p, va, len)
3503: struct proc *p;
3504: vaddr_t va;
1.3 matt 3505: int len;
1.1 matt 3506: {
3507: /* We only need to do anything if it is the current process. */
3508: if (p == curproc)
1.36 thorpej 3509: cpu_icache_sync_range(va, len);
1.17 chris 3510: }
3511: /*
3512: * PTP functions
3513: */
3514:
3515: /*
3516: * pmap_steal_ptp: Steal a PTP from somewhere else.
3517: *
3518: * This is just a placeholder, for now we never steal.
3519: */
3520:
3521: static struct vm_page *
3522: pmap_steal_ptp(struct pmap *pmap, vaddr_t va)
3523: {
3524: return (NULL);
3525: }
3526:
3527: /*
3528: * pmap_get_ptp: get a PTP (if there isn't one, allocate a new one)
3529: *
3530: * => pmap should NOT be pmap_kernel()
3531: * => pmap should be locked
3532: */
3533:
3534: static struct vm_page *
3535: pmap_get_ptp(struct pmap *pmap, vaddr_t va, boolean_t just_try)
3536: {
3537: struct vm_page *ptp;
3538:
1.30 rearnsha 3539: if (pmap_pde_page(pmap_pde(pmap, va))) {
1.17 chris 3540:
3541: /* valid... check hint (saves us a PA->PG lookup) */
3542: #if 0
3543: if (pmap->pm_ptphint &&
3544: ((unsigned)pmap_pde(pmap, va) & PG_FRAME) ==
3545: VM_PAGE_TO_PHYS(pmap->pm_ptphint))
3546: return (pmap->pm_ptphint);
3547: #endif
3548: ptp = uvm_pagelookup(&pmap->pm_obj, va);
3549: #ifdef DIAGNOSTIC
3550: if (ptp == NULL)
3551: panic("pmap_get_ptp: unmanaged user PTP");
3552: #endif
3553: // pmap->pm_ptphint = ptp;
3554: return(ptp);
3555: }
3556:
3557: /* allocate a new PTP (updates ptphint) */
3558: return(pmap_alloc_ptp(pmap, va, just_try));
3559: }
3560:
3561: /*
3562: * pmap_alloc_ptp: allocate a PTP for a PMAP
3563: *
3564: * => pmap should already be locked by caller
3565: * => we use the ptp's wire_count to count the number of active mappings
3566: * in the PTP (we start it at one to prevent any chance this PTP
3567: * will ever leak onto the active/inactive queues)
3568: */
3569:
3570: /*__inline */ static struct vm_page *
3571: pmap_alloc_ptp(struct pmap *pmap, vaddr_t va, boolean_t just_try)
3572: {
3573: struct vm_page *ptp;
3574:
3575: ptp = uvm_pagealloc(&pmap->pm_obj, va, NULL,
3576: UVM_PGA_USERESERVE|UVM_PGA_ZERO);
3577: if (ptp == NULL) {
3578: if (just_try)
3579: return (NULL);
3580:
3581: ptp = pmap_steal_ptp(pmap, va);
3582:
3583: if (ptp == NULL)
3584: return (NULL);
3585: /* Stole a page, zero it. */
3586: pmap_zero_page(VM_PAGE_TO_PHYS(ptp));
3587: }
3588:
3589: /* got one! */
3590: ptp->flags &= ~PG_BUSY; /* never busy */
3591: ptp->wire_count = 1; /* no mappings yet */
3592: pmap_map_in_l1(pmap, va, VM_PAGE_TO_PHYS(ptp), TRUE);
3593: pmap->pm_stats.resident_count++; /* count PTP as resident */
3594: // pmap->pm_ptphint = ptp;
3595: return (ptp);
1.1 matt 3596: }
1.48 chris 3597:
3598: vaddr_t
3599: pmap_growkernel(maxkvaddr)
3600: vaddr_t maxkvaddr;
3601: {
3602: struct pmap *kpm = pmap_kernel(), *pm;
3603: int s;
3604: paddr_t ptaddr;
3605: struct vm_page *ptp;
3606:
3607: if (maxkvaddr <= pmap_curmaxkvaddr)
3608: goto out; /* we are OK */
3609: NPDEBUG(PDB_GROWKERN, printf("pmap_growkernel: growing kernel from %lx to %lx\n",
3610: pmap_curmaxkvaddr, maxkvaddr));
3611:
3612: /*
3613: * whoops! we need to add kernel PTPs
3614: */
3615:
3616: s = splhigh(); /* to be safe */
3617: simple_lock(&kpm->pm_obj.vmobjlock);
3618: /* due to the way the arm pmap works we map 4MB at a time */
3619: for (/*null*/ ; pmap_curmaxkvaddr < maxkvaddr ; pmap_curmaxkvaddr += 4 * NBPD) {
3620:
3621: if (uvm.page_init_done == FALSE) {
3622:
3623: /*
3624: * we're growing the kernel pmap early (from
3625: * uvm_pageboot_alloc()). this case must be
3626: * handled a little differently.
3627: */
3628:
3629: if (uvm_page_physget(&ptaddr) == FALSE)
3630: panic("pmap_growkernel: out of memory");
3631: pmap_zero_page(ptaddr);
3632:
3633: /* map this page in */
3634: pmap_map_in_l1(kpm, (pmap_curmaxkvaddr + 1), ptaddr, TRUE);
3635:
3636: /* count PTP as resident */
3637: kpm->pm_stats.resident_count++;
3638: continue;
3639: }
3640:
3641: /*
3642: * THIS *MUST* BE CODED SO AS TO WORK IN THE
3643: * pmap_initialized == FALSE CASE! WE MAY BE
3644: * INVOKED WHILE pmap_init() IS RUNNING!
3645: */
3646:
3647: if ((ptp = pmap_alloc_ptp(kpm, (pmap_curmaxkvaddr + 1), FALSE)) == NULL) {
3648: panic("pmap_growkernel: alloc ptp failed");
3649: }
3650:
3651: /* distribute new kernel PTP to all active pmaps */
3652: simple_lock(&pmaps_lock);
3653: LIST_FOREACH(pm, &pmaps, pm_list) {
3654: pmap_map_in_l1(pm, (pmap_curmaxkvaddr + 1), VM_PAGE_TO_PHYS(ptp), TRUE);
3655: }
3656:
3657: simple_unlock(&pmaps_lock);
3658: }
3659:
3660: /*
3661: * flush out the cache, expensive but growkernel will happen so
3662: * rarely
3663: */
3664: cpu_tlb_flushD();
3665: cpu_cpwait();
3666:
3667: simple_unlock(&kpm->pm_obj.vmobjlock);
3668: splx(s);
3669:
3670: out:
3671: return (pmap_curmaxkvaddr);
3672: }
3673:
3674:
1.1 matt 3675:
1.40 thorpej 3676: /************************ Bootstrapping routines ****************************/
3677:
3678: /*
1.46 thorpej 3679: * This list exists for the benefit of pmap_map_chunk(). It keeps track
3680: * of the kernel L2 tables during bootstrap, so that pmap_map_chunk() can
3681: * find them as necessary.
3682: *
3683: * Note that the data on this list is not valid after initarm() returns.
3684: */
3685: SLIST_HEAD(, pv_addr) kernel_pt_list = SLIST_HEAD_INITIALIZER(kernel_pt_list);
3686:
3687: static vaddr_t
3688: kernel_pt_lookup(paddr_t pa)
3689: {
3690: pv_addr_t *pv;
3691:
3692: SLIST_FOREACH(pv, &kernel_pt_list, pv_list) {
3693: if (pv->pv_pa == pa)
3694: return (pv->pv_va);
3695: }
3696: return (0);
3697: }
3698:
3699: /*
1.40 thorpej 3700: * pmap_map_section:
3701: *
3702: * Create a single section mapping.
3703: */
3704: void
3705: pmap_map_section(vaddr_t l1pt, vaddr_t va, paddr_t pa, int prot, int cache)
3706: {
3707: pd_entry_t *pde = (pd_entry_t *) l1pt;
1.43 thorpej 3708: pd_entry_t ap = (prot & VM_PROT_WRITE) ? AP_KRW : AP_KR;
3709: pd_entry_t fl = (cache == PTE_CACHE) ? pte_cache_mode : 0;
1.40 thorpej 3710:
3711: KASSERT(((va | pa) & (L1_SEC_SIZE - 1)) == 0);
3712:
1.43 thorpej 3713: pde[va >> PDSHIFT] = L1_SECPTE(pa & PD_MASK, ap, fl);
1.41 thorpej 3714: }
3715:
3716: /*
3717: * pmap_map_entry:
3718: *
3719: * Create a single page mapping.
3720: */
3721: void
1.47 thorpej 3722: pmap_map_entry(vaddr_t l1pt, vaddr_t va, paddr_t pa, int prot, int cache)
1.41 thorpej 3723: {
1.47 thorpej 3724: pd_entry_t *pde = (pd_entry_t *) l1pt;
1.41 thorpej 3725: pt_entry_t ap = (prot & VM_PROT_WRITE) ? AP_KRW : AP_KR;
3726: pt_entry_t fl = (cache == PTE_CACHE) ? pte_cache_mode : 0;
1.47 thorpej 3727: pt_entry_t *pte;
1.41 thorpej 3728:
3729: KASSERT(((va | pa) & PGOFSET) == 0);
3730:
1.47 thorpej 3731: if ((pde[va >> PDSHIFT] & L1_MASK) != L1_PAGE)
3732: panic("pmap_map_entry: no L2 table for VA 0x%08lx", va);
3733:
3734: pte = (pt_entry_t *)
3735: kernel_pt_lookup(pde[va >> PDSHIFT] & PG_FRAME);
3736: if (pte == NULL)
3737: panic("pmap_map_entry: can't find L2 table for VA 0x%08lx", va);
3738:
1.41 thorpej 3739: pte[(va >> PGSHIFT) & 0x3ff] = L2_SPTE(pa & PG_FRAME, ap, fl);
1.42 thorpej 3740: }
3741:
3742: /*
3743: * pmap_link_l2pt:
3744: *
3745: * Link the L2 page table specified by "pa" into the L1
3746: * page table at the slot for "va".
3747: */
3748: void
1.46 thorpej 3749: pmap_link_l2pt(vaddr_t l1pt, vaddr_t va, pv_addr_t *l2pv)
1.42 thorpej 3750: {
3751: pd_entry_t *pde = (pd_entry_t *) l1pt;
3752: u_int slot = va >> PDSHIFT;
3753:
1.46 thorpej 3754: KASSERT((l2pv->pv_pa & PGOFSET) == 0);
3755:
3756: pde[slot + 0] = L1_PTE(l2pv->pv_pa + 0x000);
3757: pde[slot + 1] = L1_PTE(l2pv->pv_pa + 0x400);
3758: pde[slot + 2] = L1_PTE(l2pv->pv_pa + 0x800);
3759: pde[slot + 3] = L1_PTE(l2pv->pv_pa + 0xc00);
1.42 thorpej 3760:
1.46 thorpej 3761: SLIST_INSERT_HEAD(&kernel_pt_list, l2pv, pv_list);
1.43 thorpej 3762: }
3763:
3764: /*
3765: * pmap_map_chunk:
3766: *
3767: * Map a chunk of memory using the most efficient mappings
3768: * possible (section, large page, small page) into the
3769: * provided L1 and L2 tables at the specified virtual address.
3770: */
3771: vsize_t
1.46 thorpej 3772: pmap_map_chunk(vaddr_t l1pt, vaddr_t va, paddr_t pa, vsize_t size,
3773: int prot, int cache)
1.43 thorpej 3774: {
3775: pd_entry_t *pde = (pd_entry_t *) l1pt;
3776: pt_entry_t ap = (prot & VM_PROT_WRITE) ? AP_KRW : AP_KR;
3777: pt_entry_t fl = (cache == PTE_CACHE) ? pte_cache_mode : 0;
1.46 thorpej 3778: pt_entry_t *pte;
1.43 thorpej 3779: vsize_t resid;
3780: int i;
3781:
3782: resid = (size + (NBPG - 1)) & ~(NBPG - 1);
3783:
1.44 thorpej 3784: if (l1pt == 0)
3785: panic("pmap_map_chunk: no L1 table provided");
3786:
1.43 thorpej 3787: #ifdef VERBOSE_INIT_ARM
3788: printf("pmap_map_chunk: pa=0x%lx va=0x%lx size=0x%lx resid=0x%lx "
3789: "prot=0x%x cache=%d\n", pa, va, size, resid, prot, cache);
3790: #endif
3791:
3792: size = resid;
3793:
3794: while (resid > 0) {
3795: /* See if we can use a section mapping. */
1.44 thorpej 3796: if (((pa | va) & (L1_SEC_SIZE - 1)) == 0 &&
1.43 thorpej 3797: resid >= L1_SEC_SIZE) {
3798: #ifdef VERBOSE_INIT_ARM
3799: printf("S");
3800: #endif
3801: pde[va >> PDSHIFT] = L1_SECPTE(pa, ap, fl);
3802: va += L1_SEC_SIZE;
3803: pa += L1_SEC_SIZE;
3804: resid -= L1_SEC_SIZE;
3805: continue;
3806: }
1.45 thorpej 3807:
3808: /*
3809: * Ok, we're going to use an L2 table. Make sure
3810: * one is actually in the corresponding L1 slot
3811: * for the current VA.
3812: */
3813: if ((pde[va >> PDSHIFT] & L1_MASK) != L1_PAGE)
1.46 thorpej 3814: panic("pmap_map_chunk: no L2 table for VA 0x%08lx", va);
3815:
3816: pte = (pt_entry_t *)
3817: kernel_pt_lookup(pde[va >> PDSHIFT] & PG_FRAME);
3818: if (pte == NULL)
3819: panic("pmap_map_chunk: can't find L2 table for VA"
3820: "0x%08lx", va);
1.43 thorpej 3821:
3822: /* See if we can use a L2 large page mapping. */
3823: if (((pa | va) & (L2_LPAGE_SIZE - 1)) == 0 &&
3824: resid >= L2_LPAGE_SIZE) {
3825: #ifdef VERBOSE_INIT_ARM
3826: printf("L");
3827: #endif
3828: for (i = 0; i < 16; i++) {
3829: pte[((va >> PGSHIFT) & 0x3f0) + i] =
3830: L2_LPTE(pa, ap, fl);
3831: }
3832: va += L2_LPAGE_SIZE;
3833: pa += L2_LPAGE_SIZE;
3834: resid -= L2_LPAGE_SIZE;
3835: continue;
3836: }
3837:
3838: /* Use a small page mapping. */
3839: #ifdef VERBOSE_INIT_ARM
3840: printf("P");
3841: #endif
3842: pte[(va >> PGSHIFT) & 0x3ff] = L2_SPTE(pa, ap, fl);
3843: va += NBPG;
3844: pa += NBPG;
3845: resid -= NBPG;
3846: }
3847: #ifdef VERBOSE_INIT_ARM
3848: printf("\n");
3849: #endif
3850: return (size);
1.40 thorpej 3851: }
CVSweb <webmaster@jp.NetBSD.org>