Annotation of src/sys/arch/arm/arm32/pmap.c, Revision 1.30.2.18
1.30.2.18! nathanw 1: /* $NetBSD: pmap.c,v 1.30.2.17 2002/11/11 21:56:35 nathanw Exp $ */
1.30.2.2 thorpej 2:
3: /*
1.30.2.6 nathanw 4: * Copyright (c) 2002 Wasabi Systems, Inc.
1.30.2.2 thorpej 5: * Copyright (c) 2001 Richard Earnshaw
1.30.2.18! nathanw 6: * Copyright (c) 2001-2002 Christopher Gilbert
1.30.2.2 thorpej 7: * All rights reserved.
8: *
9: * 1. Redistributions of source code must retain the above copyright
10: * notice, this list of conditions and the following disclaimer.
11: * 2. Redistributions in binary form must reproduce the above copyright
12: * notice, this list of conditions and the following disclaimer in the
13: * documentation and/or other materials provided with the distribution.
14: * 3. The name of the company nor the name of the author may be used to
15: * endorse or promote products derived from this software without specific
16: * prior written permission.
17: *
18: * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
19: * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
20: * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21: * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
22: * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
23: * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
24: * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25: * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26: * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27: * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28: * SUCH DAMAGE.
29: */
30:
31: /*-
32: * Copyright (c) 1999 The NetBSD Foundation, Inc.
33: * All rights reserved.
34: *
35: * This code is derived from software contributed to The NetBSD Foundation
36: * by Charles M. Hannum.
37: *
38: * Redistribution and use in source and binary forms, with or without
39: * modification, are permitted provided that the following conditions
40: * are met:
41: * 1. Redistributions of source code must retain the above copyright
42: * notice, this list of conditions and the following disclaimer.
43: * 2. Redistributions in binary form must reproduce the above copyright
44: * notice, this list of conditions and the following disclaimer in the
45: * documentation and/or other materials provided with the distribution.
46: * 3. All advertising materials mentioning features or use of this software
47: * must display the following acknowledgement:
48: * This product includes software developed by the NetBSD
49: * Foundation, Inc. and its contributors.
50: * 4. Neither the name of The NetBSD Foundation nor the names of its
51: * contributors may be used to endorse or promote products derived
52: * from this software without specific prior written permission.
53: *
54: * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
55: * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
56: * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
57: * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
58: * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
59: * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
60: * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
61: * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
62: * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
63: * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
64: * POSSIBILITY OF SUCH DAMAGE.
65: */
66:
67: /*
68: * Copyright (c) 1994-1998 Mark Brinicombe.
69: * Copyright (c) 1994 Brini.
70: * All rights reserved.
71: *
72: * This code is derived from software written for Brini by Mark Brinicombe
73: *
74: * Redistribution and use in source and binary forms, with or without
75: * modification, are permitted provided that the following conditions
76: * are met:
77: * 1. Redistributions of source code must retain the above copyright
78: * notice, this list of conditions and the following disclaimer.
79: * 2. Redistributions in binary form must reproduce the above copyright
80: * notice, this list of conditions and the following disclaimer in the
81: * documentation and/or other materials provided with the distribution.
82: * 3. All advertising materials mentioning features or use of this software
83: * must display the following acknowledgement:
84: * This product includes software developed by Mark Brinicombe.
85: * 4. The name of the author may not be used to endorse or promote products
86: * derived from this software without specific prior written permission.
87: *
88: * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
89: * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
90: * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
91: * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
92: * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
93: * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
94: * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
95: * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
96: * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
97: *
98: * RiscBSD kernel project
99: *
100: * pmap.c
101: *
102: * Machine dependant vm stuff
103: *
104: * Created : 20/09/94
105: */
106:
107: /*
108: * Performance improvements, UVM changes, overhauls and part-rewrites
109: * were contributed by Neil A. Carson <neil@causality.com>.
110: */
111:
112: /*
113: * The dram block info is currently referenced from the bootconfig.
114: * This should be placed in a separate structure.
115: */
116:
117: /*
118: * Special compilation symbols
119: * PMAP_DEBUG - Build in pmap_debug_level code
120: */
121:
122: /* Include header files */
123:
124: #include "opt_pmap_debug.h"
125: #include "opt_ddb.h"
126:
127: #include <sys/types.h>
128: #include <sys/param.h>
129: #include <sys/kernel.h>
130: #include <sys/systm.h>
131: #include <sys/proc.h>
132: #include <sys/malloc.h>
133: #include <sys/user.h>
134: #include <sys/pool.h>
135: #include <sys/cdefs.h>
136:
137: #include <uvm/uvm.h>
138:
139: #include <machine/bootconfig.h>
140: #include <machine/bus.h>
141: #include <machine/pmap.h>
142: #include <machine/pcb.h>
143: #include <machine/param.h>
1.30.2.3 nathanw 144: #include <arm/arm32/katelib.h>
1.30.2.2 thorpej 145:
1.30.2.18! nathanw 146: __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.30.2.17 2002/11/11 21:56:35 nathanw Exp $");
1.30.2.16 nathanw 147:
1.30.2.2 thorpej 148: #ifdef PMAP_DEBUG
149: #define PDEBUG(_lev_,_stat_) \
150: if (pmap_debug_level >= (_lev_)) \
151: ((_stat_))
152: int pmap_debug_level = -2;
1.30.2.6 nathanw 153: void pmap_dump_pvlist(vaddr_t phys, char *m);
1.30.2.2 thorpej 154:
155: /*
156: * for switching to potentially finer grained debugging
157: */
158: #define PDB_FOLLOW 0x0001
159: #define PDB_INIT 0x0002
160: #define PDB_ENTER 0x0004
161: #define PDB_REMOVE 0x0008
162: #define PDB_CREATE 0x0010
163: #define PDB_PTPAGE 0x0020
1.30.2.6 nathanw 164: #define PDB_GROWKERN 0x0040
1.30.2.2 thorpej 165: #define PDB_BITS 0x0080
166: #define PDB_COLLECT 0x0100
167: #define PDB_PROTECT 0x0200
1.30.2.6 nathanw 168: #define PDB_MAP_L1 0x0400
1.30.2.2 thorpej 169: #define PDB_BOOTSTRAP 0x1000
170: #define PDB_PARANOIA 0x2000
171: #define PDB_WIRING 0x4000
172: #define PDB_PVDUMP 0x8000
173:
174: int debugmap = 0;
175: int pmapdebug = PDB_PARANOIA | PDB_FOLLOW;
176: #define NPDEBUG(_lev_,_stat_) \
177: if (pmapdebug & (_lev_)) \
178: ((_stat_))
179:
180: #else /* PMAP_DEBUG */
181: #define PDEBUG(_lev_,_stat_) /* Nothing */
1.30.2.6 nathanw 182: #define NPDEBUG(_lev_,_stat_) /* Nothing */
1.30.2.2 thorpej 183: #endif /* PMAP_DEBUG */
184:
185: struct pmap kernel_pmap_store;
186:
187: /*
1.30.2.6 nathanw 188: * linked list of all non-kernel pmaps
189: */
190:
191: static LIST_HEAD(, pmap) pmaps;
192:
193: /*
1.30.2.2 thorpej 194: * pool that pmap structures are allocated from
195: */
196:
197: struct pool pmap_pmap_pool;
198:
1.30.2.14 thorpej 199: /*
200: * pool/cache that PT-PT's are allocated from
201: */
202:
203: struct pool pmap_ptpt_pool;
204: struct pool_cache pmap_ptpt_cache;
205: u_int pmap_ptpt_cache_generation;
206:
207: static void *pmap_ptpt_page_alloc(struct pool *, int);
208: static void pmap_ptpt_page_free(struct pool *, void *);
209:
210: struct pool_allocator pmap_ptpt_allocator = {
211: pmap_ptpt_page_alloc, pmap_ptpt_page_free,
212: };
213:
214: static int pmap_ptpt_ctor(void *, void *, int);
215:
1.30.2.6 nathanw 216: static pt_entry_t *csrc_pte, *cdst_pte;
217: static vaddr_t csrcp, cdstp;
218:
1.30.2.2 thorpej 219: char *memhook;
220: extern caddr_t msgbufaddr;
221:
222: boolean_t pmap_initialized = FALSE; /* Has pmap_init completed? */
223: /*
224: * locking data structures
225: */
226:
227: static struct lock pmap_main_lock;
228: static struct simplelock pvalloc_lock;
1.30.2.6 nathanw 229: static struct simplelock pmaps_lock;
1.30.2.2 thorpej 230: #ifdef LOCKDEBUG
231: #define PMAP_MAP_TO_HEAD_LOCK() \
232: (void) spinlockmgr(&pmap_main_lock, LK_SHARED, NULL)
233: #define PMAP_MAP_TO_HEAD_UNLOCK() \
234: (void) spinlockmgr(&pmap_main_lock, LK_RELEASE, NULL)
235:
236: #define PMAP_HEAD_TO_MAP_LOCK() \
237: (void) spinlockmgr(&pmap_main_lock, LK_EXCLUSIVE, NULL)
238: #define PMAP_HEAD_TO_MAP_UNLOCK() \
239: (void) spinlockmgr(&pmap_main_lock, LK_RELEASE, NULL)
240: #else
241: #define PMAP_MAP_TO_HEAD_LOCK() /* nothing */
242: #define PMAP_MAP_TO_HEAD_UNLOCK() /* nothing */
243: #define PMAP_HEAD_TO_MAP_LOCK() /* nothing */
244: #define PMAP_HEAD_TO_MAP_UNLOCK() /* nothing */
245: #endif /* LOCKDEBUG */
246:
247: /*
248: * pv_page management structures: locked by pvalloc_lock
249: */
250:
251: TAILQ_HEAD(pv_pagelist, pv_page);
252: static struct pv_pagelist pv_freepages; /* list of pv_pages with free entrys */
253: static struct pv_pagelist pv_unusedpgs; /* list of unused pv_pages */
254: static int pv_nfpvents; /* # of free pv entries */
255: static struct pv_page *pv_initpage; /* bootstrap page from kernel_map */
256: static vaddr_t pv_cachedva; /* cached VA for later use */
257:
258: #define PVE_LOWAT (PVE_PER_PVPAGE / 2) /* free pv_entry low water mark */
259: #define PVE_HIWAT (PVE_LOWAT + (PVE_PER_PVPAGE * 2))
260: /* high water mark */
261:
262: /*
263: * local prototypes
264: */
265:
266: static struct pv_entry *pmap_add_pvpage __P((struct pv_page *, boolean_t));
267: static struct pv_entry *pmap_alloc_pv __P((struct pmap *, int)); /* see codes below */
268: #define ALLOCPV_NEED 0 /* need PV now */
269: #define ALLOCPV_TRY 1 /* just try to allocate, don't steal */
270: #define ALLOCPV_NONEED 2 /* don't need PV, just growing cache */
271: static struct pv_entry *pmap_alloc_pvpage __P((struct pmap *, int));
1.30.2.6 nathanw 272: static void pmap_enter_pv __P((struct vm_page *,
1.30.2.2 thorpej 273: struct pv_entry *, struct pmap *,
274: vaddr_t, struct vm_page *, int));
275: static void pmap_free_pv __P((struct pmap *, struct pv_entry *));
276: static void pmap_free_pvs __P((struct pmap *, struct pv_entry *));
277: static void pmap_free_pv_doit __P((struct pv_entry *));
278: static void pmap_free_pvpage __P((void));
279: static boolean_t pmap_is_curpmap __P((struct pmap *));
1.30.2.6 nathanw 280: static struct pv_entry *pmap_remove_pv __P((struct vm_page *, struct pmap *,
1.30.2.2 thorpej 281: vaddr_t));
282: #define PMAP_REMOVE_ALL 0 /* remove all mappings */
283: #define PMAP_REMOVE_SKIPWIRED 1 /* skip wired mappings */
284:
1.30.2.6 nathanw 285: static u_int pmap_modify_pv __P((struct pmap *, vaddr_t, struct vm_page *,
1.30.2.4 nathanw 286: u_int, u_int));
287:
1.30.2.6 nathanw 288: /*
289: * Structure that describes and L1 table.
290: */
291: struct l1pt {
292: SIMPLEQ_ENTRY(l1pt) pt_queue; /* Queue pointers */
293: struct pglist pt_plist; /* Allocated page list */
294: vaddr_t pt_va; /* Allocated virtual address */
295: int pt_flags; /* Flags */
296: };
297: #define PTFLAG_STATIC 0x01 /* Statically allocated */
298: #define PTFLAG_KPT 0x02 /* Kernel pt's are mapped */
299: #define PTFLAG_CLEAN 0x04 /* L1 is clean */
300:
1.30.2.4 nathanw 301: static void pmap_free_l1pt __P((struct l1pt *));
302: static int pmap_allocpagedir __P((struct pmap *));
303: static int pmap_clean_page __P((struct pv_entry *, boolean_t));
1.30.2.17 nathanw 304: static void pmap_page_remove __P((struct vm_page *));
1.30.2.4 nathanw 305:
1.30.2.6 nathanw 306: static struct vm_page *pmap_alloc_ptp __P((struct pmap *, vaddr_t));
307: static struct vm_page *pmap_get_ptp __P((struct pmap *, vaddr_t));
308: __inline static void pmap_clearbit __P((struct vm_page *, unsigned int));
1.30.2.2 thorpej 309:
310: extern paddr_t physical_start;
311: extern paddr_t physical_end;
312: extern unsigned int free_pages;
313: extern int max_processes;
314:
1.30.2.6 nathanw 315: vaddr_t virtual_avail;
1.30.2.2 thorpej 316: vaddr_t virtual_end;
1.30.2.6 nathanw 317: vaddr_t pmap_curmaxkvaddr;
1.30.2.2 thorpej 318:
319: vaddr_t avail_start;
320: vaddr_t avail_end;
321:
322: extern pv_addr_t systempage;
323:
324: /* Variables used by the L1 page table queue code */
325: SIMPLEQ_HEAD(l1pt_queue, l1pt);
1.30.2.6 nathanw 326: static struct l1pt_queue l1pt_static_queue; /* head of our static l1 queue */
327: static int l1pt_static_queue_count; /* items in the static l1 queue */
328: static int l1pt_static_create_count; /* static l1 items created */
329: static struct l1pt_queue l1pt_queue; /* head of our l1 queue */
330: static int l1pt_queue_count; /* items in the l1 queue */
331: static int l1pt_create_count; /* stat - L1's create count */
332: static int l1pt_reuse_count; /* stat - L1's reused count */
1.30.2.2 thorpej 333:
334: /* Local function prototypes (not used outside this file) */
335: void pmap_pinit __P((struct pmap *));
336: void pmap_freepagedir __P((struct pmap *));
337:
338: /* Other function prototypes */
339: extern void bzero_page __P((vaddr_t));
340: extern void bcopy_page __P((vaddr_t, vaddr_t));
341:
342: struct l1pt *pmap_alloc_l1pt __P((void));
343: static __inline void pmap_map_in_l1 __P((struct pmap *pmap, vaddr_t va,
1.30.2.14 thorpej 344: vaddr_t l2pa, int));
1.30.2.2 thorpej 345:
346: static pt_entry_t *pmap_map_ptes __P((struct pmap *));
347: static void pmap_unmap_ptes __P((struct pmap *));
348:
1.30.2.6 nathanw 349: __inline static void pmap_vac_me_harder __P((struct pmap *, struct vm_page *,
1.30.2.2 thorpej 350: pt_entry_t *, boolean_t));
1.30.2.6 nathanw 351: static void pmap_vac_me_kpmap __P((struct pmap *, struct vm_page *,
1.30.2.2 thorpej 352: pt_entry_t *, boolean_t));
1.30.2.6 nathanw 353: static void pmap_vac_me_user __P((struct pmap *, struct vm_page *,
1.30.2.2 thorpej 354: pt_entry_t *, boolean_t));
355:
356: /*
357: * real definition of pv_entry.
358: */
359:
360: struct pv_entry {
361: struct pv_entry *pv_next; /* next pv_entry */
362: struct pmap *pv_pmap; /* pmap where mapping lies */
363: vaddr_t pv_va; /* virtual address for mapping */
364: int pv_flags; /* flags */
365: struct vm_page *pv_ptp; /* vm_page for the ptp */
366: };
367:
368: /*
369: * pv_entrys are dynamically allocated in chunks from a single page.
370: * we keep track of how many pv_entrys are in use for each page and
371: * we can free pv_entry pages if needed. there is one lock for the
372: * entire allocation system.
373: */
374:
375: struct pv_page_info {
376: TAILQ_ENTRY(pv_page) pvpi_list;
377: struct pv_entry *pvpi_pvfree;
378: int pvpi_nfree;
379: };
380:
381: /*
382: * number of pv_entry's in a pv_page
383: * (note: won't work on systems where NPBG isn't a constant)
384: */
385:
386: #define PVE_PER_PVPAGE ((NBPG - sizeof(struct pv_page_info)) / \
387: sizeof(struct pv_entry))
388:
389: /*
390: * a pv_page: where pv_entrys are allocated from
391: */
392:
393: struct pv_page {
394: struct pv_page_info pvinfo;
395: struct pv_entry pvents[PVE_PER_PVPAGE];
396: };
397:
398: #ifdef MYCROFT_HACK
399: int mycroft_hack = 0;
400: #endif
401:
402: /* Function to set the debug level of the pmap code */
403:
404: #ifdef PMAP_DEBUG
405: void
1.30.2.6 nathanw 406: pmap_debug(int level)
1.30.2.2 thorpej 407: {
408: pmap_debug_level = level;
409: printf("pmap_debug: level=%d\n", pmap_debug_level);
410: }
411: #endif /* PMAP_DEBUG */
412:
413: __inline static boolean_t
414: pmap_is_curpmap(struct pmap *pmap)
415: {
1.30.2.6 nathanw 416:
1.30.2.10 nathanw 417: if ((curproc && curproc->p_vmspace->vm_map.pmap == pmap) ||
1.30.2.6 nathanw 418: pmap == pmap_kernel())
419: return (TRUE);
420:
421: return (FALSE);
1.30.2.2 thorpej 422: }
1.30.2.6 nathanw 423:
1.30.2.2 thorpej 424: /*
1.30.2.14 thorpej 425: * PTE_SYNC_CURRENT:
426: *
427: * Make sure the pte is flushed to RAM. If the pmap is
428: * not the current pmap, then also evict the pte from
429: * any cache lines.
430: */
431: #define PTE_SYNC_CURRENT(pmap, pte) \
432: do { \
433: if (pmap_is_curpmap(pmap)) \
434: PTE_SYNC(pte); \
435: else \
436: PTE_FLUSH(pte); \
437: } while (/*CONSTCOND*/0)
438:
439: /*
440: * PTE_FLUSH_ALT:
441: *
442: * Make sure the pte is not in any cache lines. We expect
443: * this to be used only when a pte has not been modified.
444: */
445: #define PTE_FLUSH_ALT(pmap, pte) \
446: do { \
447: if (pmap_is_curpmap(pmap) == 0) \
448: PTE_FLUSH(pte); \
449: } while (/*CONSTCOND*/0)
450:
451: /*
1.30.2.2 thorpej 452: * p v _ e n t r y f u n c t i o n s
453: */
454:
455: /*
456: * pv_entry allocation functions:
457: * the main pv_entry allocation functions are:
458: * pmap_alloc_pv: allocate a pv_entry structure
459: * pmap_free_pv: free one pv_entry
460: * pmap_free_pvs: free a list of pv_entrys
461: *
462: * the rest are helper functions
463: */
464:
465: /*
466: * pmap_alloc_pv: inline function to allocate a pv_entry structure
467: * => we lock pvalloc_lock
468: * => if we fail, we call out to pmap_alloc_pvpage
469: * => 3 modes:
470: * ALLOCPV_NEED = we really need a pv_entry, even if we have to steal it
471: * ALLOCPV_TRY = we want a pv_entry, but not enough to steal
472: * ALLOCPV_NONEED = we are trying to grow our free list, don't really need
473: * one now
474: *
475: * "try" is for optional functions like pmap_copy().
476: */
477:
478: __inline static struct pv_entry *
1.30.2.6 nathanw 479: pmap_alloc_pv(struct pmap *pmap, int mode)
1.30.2.2 thorpej 480: {
481: struct pv_page *pvpage;
482: struct pv_entry *pv;
483:
484: simple_lock(&pvalloc_lock);
485:
1.30.2.6 nathanw 486: pvpage = TAILQ_FIRST(&pv_freepages);
487:
488: if (pvpage != NULL) {
1.30.2.2 thorpej 489: pvpage->pvinfo.pvpi_nfree--;
490: if (pvpage->pvinfo.pvpi_nfree == 0) {
491: /* nothing left in this one? */
492: TAILQ_REMOVE(&pv_freepages, pvpage, pvinfo.pvpi_list);
493: }
494: pv = pvpage->pvinfo.pvpi_pvfree;
1.30.2.6 nathanw 495: KASSERT(pv);
1.30.2.2 thorpej 496: pvpage->pvinfo.pvpi_pvfree = pv->pv_next;
497: pv_nfpvents--; /* took one from pool */
498: } else {
499: pv = NULL; /* need more of them */
500: }
501:
502: /*
503: * if below low water mark or we didn't get a pv_entry we try and
504: * create more pv_entrys ...
505: */
506:
507: if (pv_nfpvents < PVE_LOWAT || pv == NULL) {
508: if (pv == NULL)
509: pv = pmap_alloc_pvpage(pmap, (mode == ALLOCPV_TRY) ?
510: mode : ALLOCPV_NEED);
511: else
512: (void) pmap_alloc_pvpage(pmap, ALLOCPV_NONEED);
513: }
514:
515: simple_unlock(&pvalloc_lock);
516: return(pv);
517: }
518:
519: /*
520: * pmap_alloc_pvpage: maybe allocate a new pvpage
521: *
522: * if need_entry is false: try and allocate a new pv_page
523: * if need_entry is true: try and allocate a new pv_page and return a
524: * new pv_entry from it. if we are unable to allocate a pv_page
525: * we make a last ditch effort to steal a pv_page from some other
526: * mapping. if that fails, we panic...
527: *
528: * => we assume that the caller holds pvalloc_lock
529: */
530:
531: static struct pv_entry *
1.30.2.6 nathanw 532: pmap_alloc_pvpage(struct pmap *pmap, int mode)
1.30.2.2 thorpej 533: {
534: struct vm_page *pg;
535: struct pv_page *pvpage;
536: struct pv_entry *pv;
537: int s;
538:
539: /*
540: * if we need_entry and we've got unused pv_pages, allocate from there
541: */
542:
1.30.2.6 nathanw 543: pvpage = TAILQ_FIRST(&pv_unusedpgs);
544: if (mode != ALLOCPV_NONEED && pvpage != NULL) {
1.30.2.2 thorpej 545:
546: /* move it to pv_freepages list */
547: TAILQ_REMOVE(&pv_unusedpgs, pvpage, pvinfo.pvpi_list);
548: TAILQ_INSERT_HEAD(&pv_freepages, pvpage, pvinfo.pvpi_list);
549:
550: /* allocate a pv_entry */
551: pvpage->pvinfo.pvpi_nfree--; /* can't go to zero */
552: pv = pvpage->pvinfo.pvpi_pvfree;
1.30.2.6 nathanw 553: KASSERT(pv);
1.30.2.2 thorpej 554: pvpage->pvinfo.pvpi_pvfree = pv->pv_next;
555:
556: pv_nfpvents--; /* took one from pool */
557: return(pv);
558: }
559:
560: /*
561: * see if we've got a cached unmapped VA that we can map a page in.
562: * if not, try to allocate one.
563: */
564:
565:
566: if (pv_cachedva == 0) {
567: s = splvm();
568: pv_cachedva = uvm_km_kmemalloc(kmem_map, NULL,
569: PAGE_SIZE, UVM_KMF_TRYLOCK|UVM_KMF_VALLOC);
570: splx(s);
571: if (pv_cachedva == 0) {
572: return (NULL);
573: }
574: }
575:
576: pg = uvm_pagealloc(NULL, pv_cachedva - vm_map_min(kernel_map), NULL,
577: UVM_PGA_USERESERVE);
578:
579: if (pg == NULL)
580: return (NULL);
1.30.2.6 nathanw 581: pg->flags &= ~PG_BUSY; /* never busy */
1.30.2.2 thorpej 582:
583: /*
584: * add a mapping for our new pv_page and free its entrys (save one!)
585: *
586: * NOTE: If we are allocating a PV page for the kernel pmap, the
587: * pmap is already locked! (...but entering the mapping is safe...)
588: */
589:
1.30.2.6 nathanw 590: pmap_kenter_pa(pv_cachedva, VM_PAGE_TO_PHYS(pg),
591: VM_PROT_READ|VM_PROT_WRITE);
1.30.2.2 thorpej 592: pmap_update(pmap_kernel());
593: pvpage = (struct pv_page *) pv_cachedva;
594: pv_cachedva = 0;
595: return (pmap_add_pvpage(pvpage, mode != ALLOCPV_NONEED));
596: }
597:
598: /*
599: * pmap_add_pvpage: add a pv_page's pv_entrys to the free list
600: *
601: * => caller must hold pvalloc_lock
602: * => if need_entry is true, we allocate and return one pv_entry
603: */
604:
605: static struct pv_entry *
1.30.2.6 nathanw 606: pmap_add_pvpage(struct pv_page *pvp, boolean_t need_entry)
1.30.2.2 thorpej 607: {
608: int tofree, lcv;
609:
610: /* do we need to return one? */
611: tofree = (need_entry) ? PVE_PER_PVPAGE - 1 : PVE_PER_PVPAGE;
612:
613: pvp->pvinfo.pvpi_pvfree = NULL;
614: pvp->pvinfo.pvpi_nfree = tofree;
615: for (lcv = 0 ; lcv < tofree ; lcv++) {
616: pvp->pvents[lcv].pv_next = pvp->pvinfo.pvpi_pvfree;
617: pvp->pvinfo.pvpi_pvfree = &pvp->pvents[lcv];
618: }
619: if (need_entry)
620: TAILQ_INSERT_TAIL(&pv_freepages, pvp, pvinfo.pvpi_list);
621: else
622: TAILQ_INSERT_TAIL(&pv_unusedpgs, pvp, pvinfo.pvpi_list);
623: pv_nfpvents += tofree;
624: return((need_entry) ? &pvp->pvents[lcv] : NULL);
625: }
626:
627: /*
628: * pmap_free_pv_doit: actually free a pv_entry
629: *
630: * => do not call this directly! instead use either
631: * 1. pmap_free_pv ==> free a single pv_entry
632: * 2. pmap_free_pvs => free a list of pv_entrys
633: * => we must be holding pvalloc_lock
634: */
635:
636: __inline static void
1.30.2.6 nathanw 637: pmap_free_pv_doit(struct pv_entry *pv)
1.30.2.2 thorpej 638: {
639: struct pv_page *pvp;
640:
641: pvp = (struct pv_page *) arm_trunc_page((vaddr_t)pv);
642: pv_nfpvents++;
643: pvp->pvinfo.pvpi_nfree++;
644:
645: /* nfree == 1 => fully allocated page just became partly allocated */
646: if (pvp->pvinfo.pvpi_nfree == 1) {
647: TAILQ_INSERT_HEAD(&pv_freepages, pvp, pvinfo.pvpi_list);
648: }
649:
650: /* free it */
651: pv->pv_next = pvp->pvinfo.pvpi_pvfree;
652: pvp->pvinfo.pvpi_pvfree = pv;
653:
654: /*
655: * are all pv_page's pv_entry's free? move it to unused queue.
656: */
657:
658: if (pvp->pvinfo.pvpi_nfree == PVE_PER_PVPAGE) {
659: TAILQ_REMOVE(&pv_freepages, pvp, pvinfo.pvpi_list);
660: TAILQ_INSERT_HEAD(&pv_unusedpgs, pvp, pvinfo.pvpi_list);
661: }
662: }
663:
664: /*
665: * pmap_free_pv: free a single pv_entry
666: *
667: * => we gain the pvalloc_lock
668: */
669:
670: __inline static void
1.30.2.6 nathanw 671: pmap_free_pv(struct pmap *pmap, struct pv_entry *pv)
1.30.2.2 thorpej 672: {
673: simple_lock(&pvalloc_lock);
674: pmap_free_pv_doit(pv);
675:
676: /*
677: * Can't free the PV page if the PV entries were associated with
678: * the kernel pmap; the pmap is already locked.
679: */
1.30.2.6 nathanw 680: if (pv_nfpvents > PVE_HIWAT && TAILQ_FIRST(&pv_unusedpgs) != NULL &&
1.30.2.2 thorpej 681: pmap != pmap_kernel())
682: pmap_free_pvpage();
683:
684: simple_unlock(&pvalloc_lock);
685: }
686:
687: /*
688: * pmap_free_pvs: free a list of pv_entrys
689: *
690: * => we gain the pvalloc_lock
691: */
692:
693: __inline static void
1.30.2.6 nathanw 694: pmap_free_pvs(struct pmap *pmap, struct pv_entry *pvs)
1.30.2.2 thorpej 695: {
696: struct pv_entry *nextpv;
697:
698: simple_lock(&pvalloc_lock);
699:
700: for ( /* null */ ; pvs != NULL ; pvs = nextpv) {
701: nextpv = pvs->pv_next;
702: pmap_free_pv_doit(pvs);
703: }
704:
705: /*
706: * Can't free the PV page if the PV entries were associated with
707: * the kernel pmap; the pmap is already locked.
708: */
1.30.2.6 nathanw 709: if (pv_nfpvents > PVE_HIWAT && TAILQ_FIRST(&pv_unusedpgs) != NULL &&
1.30.2.2 thorpej 710: pmap != pmap_kernel())
711: pmap_free_pvpage();
712:
713: simple_unlock(&pvalloc_lock);
714: }
715:
716:
717: /*
718: * pmap_free_pvpage: try and free an unused pv_page structure
719: *
720: * => assume caller is holding the pvalloc_lock and that
721: * there is a page on the pv_unusedpgs list
722: * => if we can't get a lock on the kmem_map we try again later
723: */
724:
725: static void
1.30.2.6 nathanw 726: pmap_free_pvpage(void)
1.30.2.2 thorpej 727: {
728: int s;
729: struct vm_map *map;
730: struct vm_map_entry *dead_entries;
731: struct pv_page *pvp;
732:
733: s = splvm(); /* protect kmem_map */
734:
1.30.2.6 nathanw 735: pvp = TAILQ_FIRST(&pv_unusedpgs);
1.30.2.2 thorpej 736:
737: /*
738: * note: watch out for pv_initpage which is allocated out of
739: * kernel_map rather than kmem_map.
740: */
741: if (pvp == pv_initpage)
742: map = kernel_map;
743: else
744: map = kmem_map;
745: if (vm_map_lock_try(map)) {
746:
747: /* remove pvp from pv_unusedpgs */
748: TAILQ_REMOVE(&pv_unusedpgs, pvp, pvinfo.pvpi_list);
749:
750: /* unmap the page */
751: dead_entries = NULL;
752: uvm_unmap_remove(map, (vaddr_t)pvp, ((vaddr_t)pvp) + PAGE_SIZE,
753: &dead_entries);
754: vm_map_unlock(map);
755:
756: if (dead_entries != NULL)
757: uvm_unmap_detach(dead_entries, 0);
758:
759: pv_nfpvents -= PVE_PER_PVPAGE; /* update free count */
760: }
761: if (pvp == pv_initpage)
762: /* no more initpage, we've freed it */
763: pv_initpage = NULL;
764:
765: splx(s);
766: }
767:
768: /*
769: * main pv_entry manipulation functions:
1.30.2.6 nathanw 770: * pmap_enter_pv: enter a mapping onto a vm_page list
771: * pmap_remove_pv: remove a mappiing from a vm_page list
1.30.2.2 thorpej 772: *
773: * NOTE: pmap_enter_pv expects to lock the pvh itself
774: * pmap_remove_pv expects te caller to lock the pvh before calling
775: */
776:
777: /*
1.30.2.6 nathanw 778: * pmap_enter_pv: enter a mapping onto a vm_page lst
1.30.2.2 thorpej 779: *
780: * => caller should hold the proper lock on pmap_main_lock
781: * => caller should have pmap locked
1.30.2.6 nathanw 782: * => we will gain the lock on the vm_page and allocate the new pv_entry
1.30.2.2 thorpej 783: * => caller should adjust ptp's wire_count before calling
784: * => caller should not adjust pmap's wire_count
785: */
786:
787: __inline static void
1.30.2.6 nathanw 788: pmap_enter_pv(struct vm_page *pg, struct pv_entry *pve, struct pmap *pmap,
789: vaddr_t va, struct vm_page *ptp, int flags)
1.30.2.2 thorpej 790: {
791: pve->pv_pmap = pmap;
792: pve->pv_va = va;
793: pve->pv_ptp = ptp; /* NULL for kernel pmap */
794: pve->pv_flags = flags;
1.30.2.6 nathanw 795: simple_lock(&pg->mdpage.pvh_slock); /* lock vm_page */
796: pve->pv_next = pg->mdpage.pvh_list; /* add to ... */
797: pg->mdpage.pvh_list = pve; /* ... locked list */
798: simple_unlock(&pg->mdpage.pvh_slock); /* unlock, done! */
1.30.2.7 nathanw 799: if (pve->pv_flags & PVF_WIRED)
1.30.2.2 thorpej 800: ++pmap->pm_stats.wired_count;
1.30.2.12 nathanw 801: #ifdef PMAP_ALIAS_DEBUG
802: {
803: int s = splhigh();
804: if (pve->pv_flags & PVF_WRITE)
805: pg->mdpage.rw_mappings++;
806: else
807: pg->mdpage.ro_mappings++;
808: if (pg->mdpage.rw_mappings != 0 &&
809: (pg->mdpage.kro_mappings != 0 || pg->mdpage.krw_mappings != 0)) {
810: printf("pmap_enter_pv: rw %u, kro %u, krw %u\n",
811: pg->mdpage.rw_mappings, pg->mdpage.kro_mappings,
812: pg->mdpage.krw_mappings);
813: }
814: splx(s);
815: }
816: #endif /* PMAP_ALIAS_DEBUG */
1.30.2.2 thorpej 817: }
818:
819: /*
820: * pmap_remove_pv: try to remove a mapping from a pv_list
821: *
822: * => caller should hold proper lock on pmap_main_lock
823: * => pmap should be locked
1.30.2.6 nathanw 824: * => caller should hold lock on vm_page [so that attrs can be adjusted]
1.30.2.2 thorpej 825: * => caller should adjust ptp's wire_count and free PTP if needed
826: * => caller should NOT adjust pmap's wire_count
827: * => we return the removed pve
828: */
829:
830: __inline static struct pv_entry *
1.30.2.6 nathanw 831: pmap_remove_pv(struct vm_page *pg, struct pmap *pmap, vaddr_t va)
1.30.2.2 thorpej 832: {
833: struct pv_entry *pve, **prevptr;
834:
1.30.2.6 nathanw 835: prevptr = &pg->mdpage.pvh_list; /* previous pv_entry pointer */
1.30.2.2 thorpej 836: pve = *prevptr;
837: while (pve) {
838: if (pve->pv_pmap == pmap && pve->pv_va == va) { /* match? */
839: *prevptr = pve->pv_next; /* remove it! */
1.30.2.7 nathanw 840: if (pve->pv_flags & PVF_WIRED)
1.30.2.2 thorpej 841: --pmap->pm_stats.wired_count;
1.30.2.12 nathanw 842: #ifdef PMAP_ALIAS_DEBUG
843: {
844: int s = splhigh();
845: if (pve->pv_flags & PVF_WRITE) {
846: KASSERT(pg->mdpage.rw_mappings != 0);
847: pg->mdpage.rw_mappings--;
848: } else {
849: KASSERT(pg->mdpage.ro_mappings != 0);
850: pg->mdpage.ro_mappings--;
851: }
852: splx(s);
853: }
854: #endif /* PMAP_ALIAS_DEBUG */
1.30.2.2 thorpej 855: break;
856: }
857: prevptr = &pve->pv_next; /* previous pointer */
858: pve = pve->pv_next; /* advance */
859: }
860: return(pve); /* return removed pve */
861: }
862:
863: /*
864: *
865: * pmap_modify_pv: Update pv flags
866: *
1.30.2.6 nathanw 867: * => caller should hold lock on vm_page [so that attrs can be adjusted]
1.30.2.2 thorpej 868: * => caller should NOT adjust pmap's wire_count
869: * => caller must call pmap_vac_me_harder() if writable status of a page
870: * may have changed.
871: * => we return the old flags
872: *
873: * Modify a physical-virtual mapping in the pv table
874: */
875:
1.30.2.6 nathanw 876: static /* __inline */ u_int
877: pmap_modify_pv(struct pmap *pmap, vaddr_t va, struct vm_page *pg,
878: u_int bic_mask, u_int eor_mask)
1.30.2.2 thorpej 879: {
880: struct pv_entry *npv;
881: u_int flags, oflags;
882:
883: /*
884: * There is at least one VA mapping this page.
885: */
886:
1.30.2.6 nathanw 887: for (npv = pg->mdpage.pvh_list; npv; npv = npv->pv_next) {
1.30.2.2 thorpej 888: if (pmap == npv->pv_pmap && va == npv->pv_va) {
889: oflags = npv->pv_flags;
890: npv->pv_flags = flags =
891: ((oflags & ~bic_mask) ^ eor_mask);
1.30.2.7 nathanw 892: if ((flags ^ oflags) & PVF_WIRED) {
893: if (flags & PVF_WIRED)
1.30.2.2 thorpej 894: ++pmap->pm_stats.wired_count;
895: else
896: --pmap->pm_stats.wired_count;
897: }
1.30.2.12 nathanw 898: #ifdef PMAP_ALIAS_DEBUG
899: {
900: int s = splhigh();
901: if ((flags ^ oflags) & PVF_WRITE) {
902: if (flags & PVF_WRITE) {
903: pg->mdpage.rw_mappings++;
904: pg->mdpage.ro_mappings--;
905: if (pg->mdpage.rw_mappings != 0 &&
906: (pg->mdpage.kro_mappings != 0 ||
907: pg->mdpage.krw_mappings != 0)) {
908: printf("pmap_modify_pv: rw %u, "
909: "kro %u, krw %u\n",
910: pg->mdpage.rw_mappings,
911: pg->mdpage.kro_mappings,
912: pg->mdpage.krw_mappings);
913: }
914: } else {
915: KASSERT(pg->mdpage.rw_mappings != 0);
916: pg->mdpage.rw_mappings--;
917: pg->mdpage.ro_mappings++;
918: }
919: }
920: splx(s);
921: }
922: #endif /* PMAP_ALIAS_DEBUG */
1.30.2.2 thorpej 923: return (oflags);
924: }
925: }
926: return (0);
927: }
928:
929: /*
930: * Map the specified level 2 pagetable into the level 1 page table for
931: * the given pmap to cover a chunk of virtual address space starting from the
932: * address specified.
933: */
1.30.2.14 thorpej 934: #define PMAP_PTP_SELFREF 0x01
935: #define PMAP_PTP_CACHEABLE 0x02
936:
1.30.2.6 nathanw 937: static __inline void
1.30.2.14 thorpej 938: pmap_map_in_l1(struct pmap *pmap, vaddr_t va, paddr_t l2pa, int flags)
1.30.2.2 thorpej 939: {
940: vaddr_t ptva;
941:
1.30.2.14 thorpej 942: KASSERT((va & PD_OFFSET) == 0); /* XXX KDASSERT */
943:
1.30.2.2 thorpej 944: /* Calculate the index into the L1 page table. */
1.30.2.14 thorpej 945: ptva = va >> L1_S_SHIFT;
1.30.2.2 thorpej 946:
947: /* Map page table into the L1. */
1.30.2.7 nathanw 948: pmap->pm_pdir[ptva + 0] = L1_C_PROTO | (l2pa + 0x000);
949: pmap->pm_pdir[ptva + 1] = L1_C_PROTO | (l2pa + 0x400);
950: pmap->pm_pdir[ptva + 2] = L1_C_PROTO | (l2pa + 0x800);
951: pmap->pm_pdir[ptva + 3] = L1_C_PROTO | (l2pa + 0xc00);
1.30.2.14 thorpej 952: cpu_dcache_wb_range((vaddr_t) &pmap->pm_pdir[ptva + 0], 16);
1.30.2.2 thorpej 953:
954: /* Map the page table into the page table area. */
1.30.2.14 thorpej 955: if (flags & PMAP_PTP_SELFREF) {
1.30.2.7 nathanw 956: *((pt_entry_t *)(pmap->pm_vptpt + ptva)) = L2_S_PROTO | l2pa |
1.30.2.14 thorpej 957: L2_S_PROT(PTE_KERNEL, VM_PROT_READ|VM_PROT_WRITE) |
958: ((flags & PMAP_PTP_CACHEABLE) ? pte_l2_s_cache_mode : 0);
959: PTE_SYNC_CURRENT(pmap, (pt_entry_t *)(pmap->pm_vptpt + ptva));
960: }
1.30.2.2 thorpej 961: }
962:
963: #if 0
1.30.2.6 nathanw 964: static __inline void
965: pmap_unmap_in_l1(struct pmap *pmap, vaddr_t va)
1.30.2.2 thorpej 966: {
967: vaddr_t ptva;
968:
1.30.2.14 thorpej 969: KASSERT((va & PD_OFFSET) == 0); /* XXX KDASSERT */
970:
1.30.2.2 thorpej 971: /* Calculate the index into the L1 page table. */
1.30.2.14 thorpej 972: ptva = va >> L1_S_SHIFT;
1.30.2.2 thorpej 973:
974: /* Unmap page table from the L1. */
975: pmap->pm_pdir[ptva + 0] = 0;
976: pmap->pm_pdir[ptva + 1] = 0;
977: pmap->pm_pdir[ptva + 2] = 0;
978: pmap->pm_pdir[ptva + 3] = 0;
1.30.2.14 thorpej 979: cpu_dcache_wb_range((vaddr_t) &pmap->pm_pdir[ptva + 0], 16);
1.30.2.2 thorpej 980:
981: /* Unmap the page table from the page table area. */
982: *((pt_entry_t *)(pmap->pm_vptpt + ptva)) = 0;
1.30.2.14 thorpej 983: PTE_SYNC_CURRENT(pmap, (pt_entry_t *)(pmap->pm_vptpt + ptva));
1.30.2.2 thorpej 984: }
985: #endif
986:
987: /*
988: * Used to map a range of physical addresses into kernel
989: * virtual address space.
990: *
991: * For now, VM is already on, we only need to map the
992: * specified memory.
1.30.2.11 nathanw 993: *
994: * XXX This routine should eventually go away; it's only used
995: * XXX by machine-dependent crash dump code.
1.30.2.2 thorpej 996: */
997: vaddr_t
1.30.2.6 nathanw 998: pmap_map(vaddr_t va, paddr_t spa, paddr_t epa, vm_prot_t prot)
1.30.2.2 thorpej 999: {
1.30.2.11 nathanw 1000: pt_entry_t *pte;
1001:
1.30.2.2 thorpej 1002: while (spa < epa) {
1.30.2.11 nathanw 1003: pte = vtopte(va);
1004:
1005: *pte = L2_S_PROTO | spa |
1006: L2_S_PROT(PTE_KERNEL, prot) | pte_l2_s_cache_mode;
1.30.2.14 thorpej 1007: PTE_SYNC(pte);
1.30.2.11 nathanw 1008: cpu_tlb_flushID_SE(va);
1.30.2.2 thorpej 1009: va += NBPG;
1010: spa += NBPG;
1011: }
1012: pmap_update(pmap_kernel());
1013: return(va);
1014: }
1015:
1016:
1017: /*
1018: * void pmap_bootstrap(pd_entry_t *kernel_l1pt, pv_addr_t kernel_ptpt)
1019: *
1020: * bootstrap the pmap system. This is called from initarm and allows
1021: * the pmap system to initailise any structures it requires.
1022: *
1023: * Currently this sets up the kernel_pmap that is statically allocated
1024: * and also allocated virtual addresses for certain page hooks.
1025: * Currently the only one page hook is allocated that is used
1026: * to zero physical pages of memory.
1027: * It also initialises the start and end address of the kernel data space.
1028: */
1029:
1030: char *boot_head;
1031:
1032: void
1.30.2.6 nathanw 1033: pmap_bootstrap(pd_entry_t *kernel_l1pt, pv_addr_t kernel_ptpt)
1.30.2.2 thorpej 1034: {
1.30.2.6 nathanw 1035: pt_entry_t *pte;
1.30.2.2 thorpej 1036:
1037: pmap_kernel()->pm_pdir = kernel_l1pt;
1038: pmap_kernel()->pm_pptpt = kernel_ptpt.pv_pa;
1039: pmap_kernel()->pm_vptpt = kernel_ptpt.pv_va;
1040: simple_lock_init(&pmap_kernel()->pm_lock);
1041: pmap_kernel()->pm_obj.pgops = NULL;
1042: TAILQ_INIT(&(pmap_kernel()->pm_obj.memq));
1043: pmap_kernel()->pm_obj.uo_npages = 0;
1044: pmap_kernel()->pm_obj.uo_refs = 1;
1045:
1.30.2.6 nathanw 1046: virtual_avail = KERNEL_VM_BASE;
1047: virtual_end = KERNEL_VM_BASE + KERNEL_VM_SIZE;
1.30.2.2 thorpej 1048:
1049: /*
1.30.2.6 nathanw 1050: * now we allocate the "special" VAs which are used for tmp mappings
1051: * by the pmap (and other modules). we allocate the VAs by advancing
1052: * virtual_avail (note that there are no pages mapped at these VAs).
1053: * we find the PTE that maps the allocated VA via the linear PTE
1054: * mapping.
1.30.2.2 thorpej 1055: */
1056:
1.30.2.6 nathanw 1057: pte = ((pt_entry_t *) PTE_BASE) + atop(virtual_avail);
1058:
1059: csrcp = virtual_avail; csrc_pte = pte;
1060: virtual_avail += PAGE_SIZE; pte++;
1061:
1062: cdstp = virtual_avail; cdst_pte = pte;
1063: virtual_avail += PAGE_SIZE; pte++;
1064:
1065: memhook = (char *) virtual_avail; /* don't need pte */
1066: virtual_avail += PAGE_SIZE; pte++;
1067:
1068: msgbufaddr = (caddr_t) virtual_avail; /* don't need pte */
1069: virtual_avail += round_page(MSGBUFSIZE);
1070: pte += atop(round_page(MSGBUFSIZE));
1.30.2.2 thorpej 1071:
1072: /*
1073: * init the static-global locks and global lists.
1074: */
1075: spinlockinit(&pmap_main_lock, "pmaplk", 0);
1076: simple_lock_init(&pvalloc_lock);
1.30.2.6 nathanw 1077: simple_lock_init(&pmaps_lock);
1078: LIST_INIT(&pmaps);
1.30.2.2 thorpej 1079: TAILQ_INIT(&pv_freepages);
1080: TAILQ_INIT(&pv_unusedpgs);
1081:
1082: /*
1083: * initialize the pmap pool.
1084: */
1085:
1086: pool_init(&pmap_pmap_pool, sizeof(struct pmap), 0, 0, 0, "pmappl",
1.30.2.6 nathanw 1087: &pool_allocator_nointr);
1.30.2.14 thorpej 1088:
1089: /*
1090: * initialize the PT-PT pool and cache.
1091: */
1092:
1093: pool_init(&pmap_ptpt_pool, PAGE_SIZE, 0, 0, 0, "ptptpl",
1094: &pmap_ptpt_allocator);
1095: pool_cache_init(&pmap_ptpt_cache, &pmap_ptpt_pool,
1096: pmap_ptpt_ctor, NULL, NULL);
1097:
1.30.2.5 nathanw 1098: cpu_dcache_wbinv_all();
1.30.2.2 thorpej 1099: }
1100:
1101: /*
1102: * void pmap_init(void)
1103: *
1104: * Initialize the pmap module.
1105: * Called by vm_init() in vm/vm_init.c in order to initialise
1106: * any structures that the pmap system needs to map virtual memory.
1107: */
1108:
1109: extern int physmem;
1110:
1111: void
1.30.2.6 nathanw 1112: pmap_init(void)
1.30.2.2 thorpej 1113: {
1114:
1115: /*
1116: * Set the available memory vars - These do not map to real memory
1117: * addresses and cannot as the physical memory is fragmented.
1118: * They are used by ps for %mem calculations.
1119: * One could argue whether this should be the entire memory or just
1120: * the memory that is useable in a user process.
1121: */
1122: avail_start = 0;
1123: avail_end = physmem * NBPG;
1124:
1125: /*
1126: * now we need to free enough pv_entry structures to allow us to get
1127: * the kmem_map/kmem_object allocated and inited (done after this
1128: * function is finished). to do this we allocate one bootstrap page out
1129: * of kernel_map and use it to provide an initial pool of pv_entry
1130: * structures. we never free this page.
1131: */
1132:
1133: pv_initpage = (struct pv_page *) uvm_km_alloc(kernel_map, PAGE_SIZE);
1134: if (pv_initpage == NULL)
1135: panic("pmap_init: pv_initpage");
1136: pv_cachedva = 0; /* a VA we have allocated but not used yet */
1137: pv_nfpvents = 0;
1138: (void) pmap_add_pvpage(pv_initpage, FALSE);
1139:
1140: pmap_initialized = TRUE;
1141:
1142: /* Initialise our L1 page table queues and counters */
1143: SIMPLEQ_INIT(&l1pt_static_queue);
1144: l1pt_static_queue_count = 0;
1145: l1pt_static_create_count = 0;
1146: SIMPLEQ_INIT(&l1pt_queue);
1147: l1pt_queue_count = 0;
1148: l1pt_create_count = 0;
1149: l1pt_reuse_count = 0;
1150: }
1151:
1152: /*
1153: * pmap_postinit()
1154: *
1155: * This routine is called after the vm and kmem subsystems have been
1156: * initialised. This allows the pmap code to perform any initialisation
1157: * that can only be done one the memory allocation is in place.
1158: */
1159:
1160: void
1.30.2.6 nathanw 1161: pmap_postinit(void)
1.30.2.2 thorpej 1162: {
1163: int loop;
1164: struct l1pt *pt;
1165:
1166: #ifdef PMAP_STATIC_L1S
1167: for (loop = 0; loop < PMAP_STATIC_L1S; ++loop) {
1168: #else /* PMAP_STATIC_L1S */
1169: for (loop = 0; loop < max_processes; ++loop) {
1170: #endif /* PMAP_STATIC_L1S */
1171: /* Allocate a L1 page table */
1172: pt = pmap_alloc_l1pt();
1173: if (!pt)
1.30.2.17 nathanw 1174: panic("Cannot allocate static L1 page tables");
1.30.2.2 thorpej 1175:
1176: /* Clean it */
1.30.2.7 nathanw 1177: bzero((void *)pt->pt_va, L1_TABLE_SIZE);
1.30.2.2 thorpej 1178: pt->pt_flags |= (PTFLAG_STATIC | PTFLAG_CLEAN);
1179: /* Add the page table to the queue */
1180: SIMPLEQ_INSERT_TAIL(&l1pt_static_queue, pt, pt_queue);
1181: ++l1pt_static_queue_count;
1182: ++l1pt_static_create_count;
1183: }
1184: }
1185:
1186:
1187: /*
1188: * Create and return a physical map.
1189: *
1190: * If the size specified for the map is zero, the map is an actual physical
1191: * map, and may be referenced by the hardware.
1192: *
1193: * If the size specified is non-zero, the map will be used in software only,
1194: * and is bounded by that size.
1195: */
1196:
1197: pmap_t
1.30.2.6 nathanw 1198: pmap_create(void)
1.30.2.2 thorpej 1199: {
1200: struct pmap *pmap;
1201:
1202: /*
1203: * Fetch pmap entry from the pool
1204: */
1205:
1206: pmap = pool_get(&pmap_pmap_pool, PR_WAITOK);
1207: /* XXX is this really needed! */
1208: memset(pmap, 0, sizeof(*pmap));
1209:
1210: simple_lock_init(&pmap->pm_obj.vmobjlock);
1211: pmap->pm_obj.pgops = NULL; /* currently not a mappable object */
1212: TAILQ_INIT(&pmap->pm_obj.memq);
1213: pmap->pm_obj.uo_npages = 0;
1214: pmap->pm_obj.uo_refs = 1;
1215: pmap->pm_stats.wired_count = 0;
1216: pmap->pm_stats.resident_count = 1;
1.30.2.6 nathanw 1217: pmap->pm_ptphint = NULL;
1.30.2.2 thorpej 1218:
1219: /* Now init the machine part of the pmap */
1220: pmap_pinit(pmap);
1221: return(pmap);
1222: }
1223:
1224: /*
1225: * pmap_alloc_l1pt()
1226: *
1227: * This routine allocates physical and virtual memory for a L1 page table
1228: * and wires it.
1229: * A l1pt structure is returned to describe the allocated page table.
1230: *
1231: * This routine is allowed to fail if the required memory cannot be allocated.
1232: * In this case NULL is returned.
1233: */
1234:
1235: struct l1pt *
1236: pmap_alloc_l1pt(void)
1237: {
1238: paddr_t pa;
1239: vaddr_t va;
1240: struct l1pt *pt;
1241: int error;
1242: struct vm_page *m;
1243:
1244: /* Allocate virtual address space for the L1 page table */
1.30.2.7 nathanw 1245: va = uvm_km_valloc(kernel_map, L1_TABLE_SIZE);
1.30.2.2 thorpej 1246: if (va == 0) {
1247: #ifdef DIAGNOSTIC
1248: PDEBUG(0,
1249: printf("pmap: Cannot allocate pageable memory for L1\n"));
1250: #endif /* DIAGNOSTIC */
1251: return(NULL);
1252: }
1253:
1254: /* Allocate memory for the l1pt structure */
1255: pt = (struct l1pt *)malloc(sizeof(struct l1pt), M_VMPMAP, M_WAITOK);
1256:
1257: /*
1258: * Allocate pages from the VM system.
1259: */
1.30.2.7 nathanw 1260: error = uvm_pglistalloc(L1_TABLE_SIZE, physical_start, physical_end,
1261: L1_TABLE_SIZE, 0, &pt->pt_plist, 1, M_WAITOK);
1.30.2.2 thorpej 1262: if (error) {
1263: #ifdef DIAGNOSTIC
1264: PDEBUG(0,
1265: printf("pmap: Cannot allocate physical mem for L1 (%d)\n",
1266: error));
1267: #endif /* DIAGNOSTIC */
1268: /* Release the resources we already have claimed */
1269: free(pt, M_VMPMAP);
1.30.2.7 nathanw 1270: uvm_km_free(kernel_map, va, L1_TABLE_SIZE);
1.30.2.2 thorpej 1271: return(NULL);
1272: }
1273:
1274: /* Map our physical pages into our virtual space */
1275: pt->pt_va = va;
1.30.2.6 nathanw 1276: m = TAILQ_FIRST(&pt->pt_plist);
1.30.2.7 nathanw 1277: while (m && va < (pt->pt_va + L1_TABLE_SIZE)) {
1.30.2.2 thorpej 1278: pa = VM_PAGE_TO_PHYS(m);
1279:
1.30.2.14 thorpej 1280: pmap_kenter_pa(va, pa, VM_PROT_READ|VM_PROT_WRITE);
1.30.2.2 thorpej 1281:
1282: va += NBPG;
1283: m = m->pageq.tqe_next;
1284: }
1285:
1286: #ifdef DIAGNOSTIC
1287: if (m)
1.30.2.17 nathanw 1288: panic("pmap_alloc_l1pt: pglist not empty");
1.30.2.2 thorpej 1289: #endif /* DIAGNOSTIC */
1290:
1291: pt->pt_flags = 0;
1292: return(pt);
1293: }
1294:
1295: /*
1296: * Free a L1 page table previously allocated with pmap_alloc_l1pt().
1297: */
1.30.2.4 nathanw 1298: static void
1.30.2.6 nathanw 1299: pmap_free_l1pt(struct l1pt *pt)
1.30.2.2 thorpej 1300: {
1301: /* Separate the physical memory for the virtual space */
1.30.2.7 nathanw 1302: pmap_kremove(pt->pt_va, L1_TABLE_SIZE);
1.30.2.2 thorpej 1303: pmap_update(pmap_kernel());
1304:
1305: /* Return the physical memory */
1306: uvm_pglistfree(&pt->pt_plist);
1307:
1308: /* Free the virtual space */
1.30.2.7 nathanw 1309: uvm_km_free(kernel_map, pt->pt_va, L1_TABLE_SIZE);
1.30.2.2 thorpej 1310:
1311: /* Free the l1pt structure */
1312: free(pt, M_VMPMAP);
1313: }
1314:
1315: /*
1.30.2.14 thorpej 1316: * pmap_ptpt_page_alloc:
1.30.2.7 nathanw 1317: *
1.30.2.14 thorpej 1318: * Back-end page allocator for the PT-PT pool.
1.30.2.7 nathanw 1319: */
1.30.2.14 thorpej 1320: static void *
1321: pmap_ptpt_page_alloc(struct pool *pp, int flags)
1.30.2.7 nathanw 1322: {
1323: struct vm_page *pg;
1324: pt_entry_t *pte;
1.30.2.14 thorpej 1325: vaddr_t va;
1.30.2.7 nathanw 1326:
1.30.2.14 thorpej 1327: /* XXX PR_WAITOK? */
1328: va = uvm_km_valloc(kernel_map, L2_TABLE_SIZE);
1329: if (va == 0)
1330: return (NULL);
1.30.2.7 nathanw 1331:
1332: for (;;) {
1333: pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_ZERO);
1334: if (pg != NULL)
1335: break;
1.30.2.14 thorpej 1336: if ((flags & PR_WAITOK) == 0) {
1337: uvm_km_free(kernel_map, va, L2_TABLE_SIZE);
1338: return (NULL);
1339: }
1.30.2.7 nathanw 1340: uvm_wait("pmap_ptpt");
1341: }
1342:
1.30.2.14 thorpej 1343: pte = vtopte(va);
1.30.2.7 nathanw 1344: KDASSERT(pmap_pte_v(pte) == 0);
1345:
1.30.2.14 thorpej 1346: *pte = L2_S_PROTO | VM_PAGE_TO_PHYS(pg) |
1347: L2_S_PROT(PTE_KERNEL, VM_PROT_READ|VM_PROT_WRITE);
1348: PTE_SYNC(pte);
1.30.2.12 nathanw 1349: #ifdef PMAP_ALIAS_DEBUG
1350: {
1351: int s = splhigh();
1352: pg->mdpage.krw_mappings++;
1353: splx(s);
1354: }
1355: #endif /* PMAP_ALIAS_DEBUG */
1.30.2.7 nathanw 1356:
1.30.2.14 thorpej 1357: return ((void *) va);
1.30.2.7 nathanw 1358: }
1359:
1360: /*
1.30.2.14 thorpej 1361: * pmap_ptpt_page_free:
1.30.2.7 nathanw 1362: *
1.30.2.14 thorpej 1363: * Back-end page free'er for the PT-PT pool.
1.30.2.7 nathanw 1364: */
1365: static void
1.30.2.14 thorpej 1366: pmap_ptpt_page_free(struct pool *pp, void *v)
1.30.2.7 nathanw 1367: {
1.30.2.14 thorpej 1368: vaddr_t va = (vaddr_t) v;
1369: paddr_t pa;
1370:
1371: pa = vtophys(va);
1.30.2.7 nathanw 1372:
1.30.2.14 thorpej 1373: pmap_kremove(va, L2_TABLE_SIZE);
1.30.2.7 nathanw 1374: pmap_update(pmap_kernel());
1375:
1.30.2.14 thorpej 1376: uvm_pagefree(PHYS_TO_VM_PAGE(pa));
1.30.2.7 nathanw 1377:
1.30.2.14 thorpej 1378: uvm_km_free(kernel_map, va, L2_TABLE_SIZE);
1379: }
1380:
1381: /*
1382: * pmap_ptpt_ctor:
1383: *
1384: * Constructor for the PT-PT cache.
1385: */
1386: static int
1387: pmap_ptpt_ctor(void *arg, void *object, int flags)
1388: {
1389: caddr_t vptpt = object;
1390:
1391: /* Page is already zero'd. */
1392:
1393: /*
1394: * Map in kernel PTs.
1395: *
1396: * XXX THIS IS CURRENTLY DONE AS UNCACHED MEMORY ACCESS.
1397: */
1398: memcpy(vptpt + ((L1_TABLE_SIZE - KERNEL_PD_SIZE) >> 2),
1399: (char *)(PTE_BASE + (PTE_BASE >> (PGSHIFT - 2)) +
1400: ((L1_TABLE_SIZE - KERNEL_PD_SIZE) >> 2)),
1401: (KERNEL_PD_SIZE >> 2));
1402:
1403: return (0);
1.30.2.7 nathanw 1404: }
1405:
1406: /*
1.30.2.2 thorpej 1407: * Allocate a page directory.
1408: * This routine will either allocate a new page directory from the pool
1409: * of L1 page tables currently held by the kernel or it will allocate
1410: * a new one via pmap_alloc_l1pt().
1411: * It will then initialise the l1 page table for use.
1412: */
1.30.2.4 nathanw 1413: static int
1.30.2.6 nathanw 1414: pmap_allocpagedir(struct pmap *pmap)
1.30.2.2 thorpej 1415: {
1.30.2.14 thorpej 1416: vaddr_t vptpt;
1.30.2.2 thorpej 1417: struct l1pt *pt;
1.30.2.14 thorpej 1418: u_int gen;
1.30.2.2 thorpej 1419:
1420: PDEBUG(0, printf("pmap_allocpagedir(%p)\n", pmap));
1421:
1422: /* Do we have any spare L1's lying around ? */
1423: if (l1pt_static_queue_count) {
1424: --l1pt_static_queue_count;
1.30.2.8 nathanw 1425: pt = SIMPLEQ_FIRST(&l1pt_static_queue);
1426: SIMPLEQ_REMOVE_HEAD(&l1pt_static_queue, pt_queue);
1.30.2.2 thorpej 1427: } else if (l1pt_queue_count) {
1428: --l1pt_queue_count;
1.30.2.8 nathanw 1429: pt = SIMPLEQ_FIRST(&l1pt_queue);
1430: SIMPLEQ_REMOVE_HEAD(&l1pt_queue, pt_queue);
1.30.2.2 thorpej 1431: ++l1pt_reuse_count;
1432: } else {
1433: pt = pmap_alloc_l1pt();
1434: if (!pt)
1435: return(ENOMEM);
1436: ++l1pt_create_count;
1437: }
1438:
1439: /* Store the pointer to the l1 descriptor in the pmap. */
1440: pmap->pm_l1pt = pt;
1441:
1442: /* Store the virtual address of the l1 in the pmap. */
1443: pmap->pm_pdir = (pd_entry_t *)pt->pt_va;
1444:
1445: /* Clean the L1 if it is dirty */
1.30.2.14 thorpej 1446: if (!(pt->pt_flags & PTFLAG_CLEAN)) {
1.30.2.7 nathanw 1447: bzero((void *)pmap->pm_pdir, (L1_TABLE_SIZE - KERNEL_PD_SIZE));
1.30.2.14 thorpej 1448: cpu_dcache_wb_range((vaddr_t) pmap->pm_pdir,
1449: (L1_TABLE_SIZE - KERNEL_PD_SIZE));
1450: }
1.30.2.2 thorpej 1451:
1452: /* Allocate a page table to map all the page tables for this pmap */
1.30.2.14 thorpej 1453: KASSERT(pmap->pm_vptpt == 0);
1454:
1455: try_again:
1456: gen = pmap_ptpt_cache_generation;
1457: vptpt = (vaddr_t) pool_cache_get(&pmap_ptpt_cache, PR_WAITOK);
1458: if (vptpt == NULL) {
1459: PDEBUG(0, printf("pmap_alloc_pagedir: no KVA for PTPT\n"));
1.30.2.7 nathanw 1460: pmap_freepagedir(pmap);
1.30.2.14 thorpej 1461: return (ENOMEM);
1.30.2.2 thorpej 1462: }
1463:
1.30.2.7 nathanw 1464: /* need to lock this all up for growkernel */
1.30.2.6 nathanw 1465: simple_lock(&pmaps_lock);
1466:
1.30.2.14 thorpej 1467: if (gen != pmap_ptpt_cache_generation) {
1468: simple_unlock(&pmaps_lock);
1469: pool_cache_destruct_object(&pmap_ptpt_cache, (void *) vptpt);
1470: goto try_again;
1471: }
1472:
1473: pmap->pm_vptpt = vptpt;
1474: pmap->pm_pptpt = vtophys(vptpt);
1475:
1.30.2.6 nathanw 1476: /* Duplicate the kernel mappings. */
1.30.2.7 nathanw 1477: bcopy((char *)pmap_kernel()->pm_pdir + (L1_TABLE_SIZE - KERNEL_PD_SIZE),
1478: (char *)pmap->pm_pdir + (L1_TABLE_SIZE - KERNEL_PD_SIZE),
1.30.2.6 nathanw 1479: KERNEL_PD_SIZE);
1.30.2.14 thorpej 1480: cpu_dcache_wb_range((vaddr_t)pmap->pm_pdir +
1481: (L1_TABLE_SIZE - KERNEL_PD_SIZE), KERNEL_PD_SIZE);
1.30.2.6 nathanw 1482:
1.30.2.2 thorpej 1483: /* Wire in this page table */
1.30.2.14 thorpej 1484: pmap_map_in_l1(pmap, PTE_BASE, pmap->pm_pptpt, PMAP_PTP_SELFREF);
1.30.2.2 thorpej 1485:
1486: pt->pt_flags &= ~PTFLAG_CLEAN; /* L1 is dirty now */
1487:
1.30.2.6 nathanw 1488: LIST_INSERT_HEAD(&pmaps, pmap, pm_list);
1489: simple_unlock(&pmaps_lock);
1490:
1.30.2.2 thorpej 1491: return(0);
1492: }
1493:
1494:
1495: /*
1496: * Initialize a preallocated and zeroed pmap structure,
1497: * such as one in a vmspace structure.
1498: */
1499:
1500: void
1.30.2.6 nathanw 1501: pmap_pinit(struct pmap *pmap)
1.30.2.2 thorpej 1502: {
1503: int backoff = 6;
1504: int retry = 10;
1505:
1506: PDEBUG(0, printf("pmap_pinit(%p)\n", pmap));
1507:
1508: /* Keep looping until we succeed in allocating a page directory */
1509: while (pmap_allocpagedir(pmap) != 0) {
1510: /*
1511: * Ok we failed to allocate a suitable block of memory for an
1512: * L1 page table. This means that either:
1513: * 1. 16KB of virtual address space could not be allocated
1514: * 2. 16KB of physically contiguous memory on a 16KB boundary
1515: * could not be allocated.
1516: *
1517: * Since we cannot fail we will sleep for a while and try
1518: * again.
1519: *
1520: * Searching for a suitable L1 PT is expensive:
1521: * to avoid hogging the system when memory is really
1522: * scarce, use an exponential back-off so that
1523: * eventually we won't retry more than once every 8
1524: * seconds. This should allow other processes to run
1525: * to completion and free up resources.
1526: */
1527: (void) ltsleep(&lbolt, PVM, "l1ptwait", (hz << 3) >> backoff,
1528: NULL);
1529: if (--retry == 0) {
1530: retry = 10;
1531: if (backoff)
1532: --backoff;
1533: }
1534: }
1535:
1.30.2.7 nathanw 1536: if (vector_page < KERNEL_BASE) {
1537: /*
1538: * Map the vector page. This will also allocate and map
1539: * an L2 table for it.
1540: */
1541: pmap_enter(pmap, vector_page, systempage.pv_pa,
1542: VM_PROT_READ, VM_PROT_READ | PMAP_WIRED);
1543: pmap_update(pmap);
1544: }
1.30.2.2 thorpej 1545: }
1546:
1547: void
1.30.2.6 nathanw 1548: pmap_freepagedir(struct pmap *pmap)
1.30.2.2 thorpej 1549: {
1550: /* Free the memory used for the page table mapping */
1.30.2.14 thorpej 1551: if (pmap->pm_vptpt != 0) {
1552: /*
1553: * XXX Objects freed to a pool cache must be in constructed
1554: * XXX form when freed, but we don't free page tables as we
1555: * XXX go, so we need to zap the mappings here.
1556: *
1557: * XXX THIS IS CURRENTLY DONE AS UNCACHED MEMORY ACCESS.
1558: */
1559: memset((caddr_t) pmap->pm_vptpt, 0,
1560: ((L1_TABLE_SIZE - KERNEL_PD_SIZE) >> 2));
1561: pool_cache_put(&pmap_ptpt_cache, (void *) pmap->pm_vptpt);
1562: }
1.30.2.2 thorpej 1563:
1564: /* junk the L1 page table */
1565: if (pmap->pm_l1pt->pt_flags & PTFLAG_STATIC) {
1566: /* Add the page table to the queue */
1.30.2.14 thorpej 1567: SIMPLEQ_INSERT_TAIL(&l1pt_static_queue,
1568: pmap->pm_l1pt, pt_queue);
1.30.2.2 thorpej 1569: ++l1pt_static_queue_count;
1570: } else if (l1pt_queue_count < 8) {
1571: /* Add the page table to the queue */
1572: SIMPLEQ_INSERT_TAIL(&l1pt_queue, pmap->pm_l1pt, pt_queue);
1573: ++l1pt_queue_count;
1574: } else
1575: pmap_free_l1pt(pmap->pm_l1pt);
1576: }
1577:
1578: /*
1579: * Retire the given physical map from service.
1580: * Should only be called if the map contains no valid mappings.
1581: */
1582:
1583: void
1.30.2.6 nathanw 1584: pmap_destroy(struct pmap *pmap)
1.30.2.2 thorpej 1585: {
1586: struct vm_page *page;
1587: int count;
1588:
1589: if (pmap == NULL)
1590: return;
1591:
1592: PDEBUG(0, printf("pmap_destroy(%p)\n", pmap));
1593:
1594: /*
1595: * Drop reference count
1596: */
1597: simple_lock(&pmap->pm_obj.vmobjlock);
1598: count = --pmap->pm_obj.uo_refs;
1599: simple_unlock(&pmap->pm_obj.vmobjlock);
1600: if (count > 0) {
1601: return;
1602: }
1603:
1604: /*
1605: * reference count is zero, free pmap resources and then free pmap.
1606: */
1.30.2.6 nathanw 1607:
1608: /*
1609: * remove it from global list of pmaps
1610: */
1611:
1612: simple_lock(&pmaps_lock);
1613: LIST_REMOVE(pmap, pm_list);
1614: simple_unlock(&pmaps_lock);
1.30.2.2 thorpej 1615:
1.30.2.7 nathanw 1616: if (vector_page < KERNEL_BASE) {
1617: /* Remove the vector page mapping */
1618: pmap_remove(pmap, vector_page, vector_page + NBPG);
1619: pmap_update(pmap);
1620: }
1.30.2.2 thorpej 1621:
1622: /*
1623: * Free any page tables still mapped
1624: * This is only temporay until pmap_enter can count the number
1625: * of mappings made in a page table. Then pmap_remove() can
1626: * reduce the count and free the pagetable when the count
1627: * reaches zero. Note that entries in this list should match the
1628: * contents of the ptpt, however this is faster than walking a 1024
1629: * entries looking for pt's
1630: * taken from i386 pmap.c
1631: */
1.30.2.8 nathanw 1632: /*
1633: * vmobjlock must be held while freeing pages
1634: */
1635: simple_lock(&pmap->pm_obj.vmobjlock);
1.30.2.6 nathanw 1636: while ((page = TAILQ_FIRST(&pmap->pm_obj.memq)) != NULL) {
1637: KASSERT((page->flags & PG_BUSY) == 0);
1.30.2.14 thorpej 1638:
1639: /* Freeing a PT page? The contents are a throw-away. */
1640: KASSERT((page->offset & PD_OFFSET) == 0);/* XXX KDASSERT */
1641: cpu_dcache_inv_range((vaddr_t)vtopte(page->offset), PAGE_SIZE);
1642:
1.30.2.2 thorpej 1643: page->wire_count = 0;
1644: uvm_pagefree(page);
1645: }
1.30.2.8 nathanw 1646: simple_unlock(&pmap->pm_obj.vmobjlock);
1.30.2.14 thorpej 1647:
1.30.2.2 thorpej 1648: /* Free the page dir */
1649: pmap_freepagedir(pmap);
1.30.2.14 thorpej 1650:
1.30.2.2 thorpej 1651: /* return the pmap to the pool */
1652: pool_put(&pmap_pmap_pool, pmap);
1653: }
1654:
1655:
1656: /*
1657: * void pmap_reference(struct pmap *pmap)
1658: *
1659: * Add a reference to the specified pmap.
1660: */
1661:
1662: void
1.30.2.6 nathanw 1663: pmap_reference(struct pmap *pmap)
1.30.2.2 thorpej 1664: {
1665: if (pmap == NULL)
1666: return;
1667:
1668: simple_lock(&pmap->pm_lock);
1669: pmap->pm_obj.uo_refs++;
1670: simple_unlock(&pmap->pm_lock);
1671: }
1672:
1673: /*
1674: * void pmap_virtual_space(vaddr_t *start, vaddr_t *end)
1675: *
1676: * Return the start and end addresses of the kernel's virtual space.
1677: * These values are setup in pmap_bootstrap and are updated as pages
1678: * are allocated.
1679: */
1680:
1681: void
1.30.2.6 nathanw 1682: pmap_virtual_space(vaddr_t *start, vaddr_t *end)
1.30.2.2 thorpej 1683: {
1.30.2.6 nathanw 1684: *start = virtual_avail;
1.30.2.2 thorpej 1685: *end = virtual_end;
1686: }
1687:
1688: /*
1689: * Activate the address space for the specified process. If the process
1690: * is the current process, load the new MMU context.
1691: */
1692: void
1.30.2.6 nathanw 1693: pmap_activate(struct lwp *l)
1.30.2.2 thorpej 1694: {
1695: struct pmap *pmap = l->l_proc->p_vmspace->vm_map.pmap;
1696: struct pcb *pcb = &l->l_addr->u_pcb;
1697:
1698: (void) pmap_extract(pmap_kernel(), (vaddr_t)pmap->pm_pdir,
1699: (paddr_t *)&pcb->pcb_pagedir);
1700:
1.30.2.15 wrstuden 1701: PDEBUG(0, printf("pmap_activate: l=%p pmap=%p pcb=%p pdir=%p l1=%p\n",
1702: l, pmap, pcb, pmap->pm_pdir, pcb->pcb_pagedir));
1.30.2.2 thorpej 1703:
1.30.2.9 nathanw 1704: if (l == curlwp) {
1.30.2.2 thorpej 1705: PDEBUG(0, printf("pmap_activate: setting TTB\n"));
1706: setttb((u_int)pcb->pcb_pagedir);
1707: }
1708: }
1709:
1710: /*
1711: * Deactivate the address space of the specified process.
1712: */
1713: void
1.30.2.6 nathanw 1714: pmap_deactivate(struct lwp *l)
1.30.2.2 thorpej 1715: {
1716: }
1717:
1.30.2.3 nathanw 1718: /*
1719: * Perform any deferred pmap operations.
1720: */
1721: void
1722: pmap_update(struct pmap *pmap)
1723: {
1724:
1725: /*
1726: * We haven't deferred any pmap operations, but we do need to
1727: * make sure TLB/cache operations have completed.
1728: */
1729: cpu_cpwait();
1730: }
1.30.2.2 thorpej 1731:
1732: /*
1733: * pmap_clean_page()
1734: *
1735: * This is a local function used to work out the best strategy to clean
1736: * a single page referenced by its entry in the PV table. It's used by
1737: * pmap_copy_page, pmap_zero page and maybe some others later on.
1738: *
1739: * Its policy is effectively:
1740: * o If there are no mappings, we don't bother doing anything with the cache.
1741: * o If there is one mapping, we clean just that page.
1742: * o If there are multiple mappings, we clean the entire cache.
1743: *
1744: * So that some functions can be further optimised, it returns 0 if it didn't
1745: * clean the entire cache, or 1 if it did.
1746: *
1747: * XXX One bug in this routine is that if the pv_entry has a single page
1748: * mapped at 0x00000000 a whole cache clean will be performed rather than
1749: * just the 1 page. Since this should not occur in everyday use and if it does
1750: * it will just result in not the most efficient clean for the page.
1751: */
1752: static int
1.30.2.6 nathanw 1753: pmap_clean_page(struct pv_entry *pv, boolean_t is_src)
1.30.2.2 thorpej 1754: {
1755: struct pmap *pmap;
1756: struct pv_entry *npv;
1757: int cache_needs_cleaning = 0;
1758: vaddr_t page_to_clean = 0;
1759:
1.30.2.12 nathanw 1760: if (pv == NULL) {
1.30.2.2 thorpej 1761: /* nothing mapped in so nothing to flush */
1762: return (0);
1.30.2.12 nathanw 1763: }
1.30.2.2 thorpej 1764:
1.30.2.12 nathanw 1765: /*
1766: * Since we flush the cache each time we change curlwp, we
1.30.2.2 thorpej 1767: * only need to flush the page if it is in the current pmap.
1768: */
1.30.2.10 nathanw 1769: if (curproc)
1.30.2.9 nathanw 1770: pmap = curproc->p_vmspace->vm_map.pmap;
1.30.2.2 thorpej 1771: else
1772: pmap = pmap_kernel();
1773:
1774: for (npv = pv; npv; npv = npv->pv_next) {
1775: if (npv->pv_pmap == pmap) {
1.30.2.12 nathanw 1776: /*
1777: * The page is mapped non-cacheable in
1.30.2.2 thorpej 1778: * this map. No need to flush the cache.
1779: */
1.30.2.7 nathanw 1780: if (npv->pv_flags & PVF_NC) {
1.30.2.2 thorpej 1781: #ifdef DIAGNOSTIC
1782: if (cache_needs_cleaning)
1783: panic("pmap_clean_page: "
1.30.2.12 nathanw 1784: "cache inconsistency");
1.30.2.2 thorpej 1785: #endif
1786: break;
1.30.2.12 nathanw 1787: } else if (is_src && (npv->pv_flags & PVF_WRITE) == 0)
1.30.2.2 thorpej 1788: continue;
1.30.2.12 nathanw 1789: if (cache_needs_cleaning) {
1.30.2.2 thorpej 1790: page_to_clean = 0;
1791: break;
1.30.2.12 nathanw 1792: } else
1.30.2.2 thorpej 1793: page_to_clean = npv->pv_va;
1794: cache_needs_cleaning = 1;
1795: }
1796: }
1797:
1.30.2.12 nathanw 1798: if (page_to_clean) {
1799: /*
1800: * XXX If is_src, we really only need to write-back,
1801: * XXX not invalidate, too. Investigate further.
1802: * XXX --thorpej@netbsd.org
1803: */
1.30.2.5 nathanw 1804: cpu_idcache_wbinv_range(page_to_clean, NBPG);
1.30.2.12 nathanw 1805: } else if (cache_needs_cleaning) {
1.30.2.5 nathanw 1806: cpu_idcache_wbinv_all();
1.30.2.2 thorpej 1807: return (1);
1808: }
1809: return (0);
1810: }
1811:
1812: /*
1813: * pmap_zero_page()
1814: *
1815: * Zero a given physical page by mapping it at a page hook point.
1816: * In doing the zero page op, the page we zero is mapped cachable, as with
1817: * StrongARM accesses to non-cached pages are non-burst making writing
1818: * _any_ bulk data very slow.
1819: */
1.30.2.7 nathanw 1820: #if ARM_MMU_GENERIC == 1
1.30.2.2 thorpej 1821: void
1.30.2.7 nathanw 1822: pmap_zero_page_generic(paddr_t phys)
1.30.2.2 thorpej 1823: {
1.30.2.6 nathanw 1824: #ifdef DEBUG
1825: struct vm_page *pg = PHYS_TO_VM_PAGE(phys);
1826:
1827: if (pg->mdpage.pvh_list != NULL)
1828: panic("pmap_zero_page: page has mappings");
1829: #endif
1.30.2.2 thorpej 1830:
1.30.2.7 nathanw 1831: KDASSERT((phys & PGOFSET) == 0);
1832:
1.30.2.2 thorpej 1833: /*
1834: * Hook in the page, zero it, and purge the cache for that
1835: * zeroed page. Invalidate the TLB as needed.
1836: */
1.30.2.7 nathanw 1837: *cdst_pte = L2_S_PROTO | phys |
1838: L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | pte_l2_s_cache_mode;
1.30.2.14 thorpej 1839: PTE_SYNC(cdst_pte);
1.30.2.6 nathanw 1840: cpu_tlb_flushD_SE(cdstp);
1.30.2.3 nathanw 1841: cpu_cpwait();
1.30.2.6 nathanw 1842: bzero_page(cdstp);
1843: cpu_dcache_wbinv_range(cdstp, NBPG);
1.30.2.2 thorpej 1844: }
1.30.2.7 nathanw 1845: #endif /* ARM_MMU_GENERIC == 1 */
1846:
1847: #if ARM_MMU_XSCALE == 1
1848: void
1849: pmap_zero_page_xscale(paddr_t phys)
1850: {
1851: #ifdef DEBUG
1852: struct vm_page *pg = PHYS_TO_VM_PAGE(phys);
1853:
1854: if (pg->mdpage.pvh_list != NULL)
1855: panic("pmap_zero_page: page has mappings");
1856: #endif
1857:
1858: KDASSERT((phys & PGOFSET) == 0);
1859:
1860: /*
1861: * Hook in the page, zero it, and purge the cache for that
1862: * zeroed page. Invalidate the TLB as needed.
1863: */
1864: *cdst_pte = L2_S_PROTO | phys |
1865: L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) |
1866: L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X); /* mini-data */
1.30.2.14 thorpej 1867: PTE_SYNC(cdst_pte);
1.30.2.7 nathanw 1868: cpu_tlb_flushD_SE(cdstp);
1869: cpu_cpwait();
1870: bzero_page(cdstp);
1871: xscale_cache_clean_minidata();
1872: }
1873: #endif /* ARM_MMU_XSCALE == 1 */
1.30.2.2 thorpej 1874:
1875: /* pmap_pageidlezero()
1876: *
1877: * The same as above, except that we assume that the page is not
1878: * mapped. This means we never have to flush the cache first. Called
1879: * from the idle loop.
1880: */
1881: boolean_t
1.30.2.6 nathanw 1882: pmap_pageidlezero(paddr_t phys)
1.30.2.2 thorpej 1883: {
1884: int i, *ptr;
1885: boolean_t rv = TRUE;
1.30.2.6 nathanw 1886: #ifdef DEBUG
1887: struct vm_page *pg;
1.30.2.2 thorpej 1888:
1.30.2.6 nathanw 1889: pg = PHYS_TO_VM_PAGE(phys);
1890: if (pg->mdpage.pvh_list != NULL)
1891: panic("pmap_pageidlezero: page has mappings");
1.30.2.2 thorpej 1892: #endif
1.30.2.7 nathanw 1893:
1894: KDASSERT((phys & PGOFSET) == 0);
1895:
1.30.2.2 thorpej 1896: /*
1897: * Hook in the page, zero it, and purge the cache for that
1898: * zeroed page. Invalidate the TLB as needed.
1899: */
1.30.2.7 nathanw 1900: *cdst_pte = L2_S_PROTO | phys |
1901: L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | pte_l2_s_cache_mode;
1.30.2.14 thorpej 1902: PTE_SYNC(cdst_pte);
1.30.2.6 nathanw 1903: cpu_tlb_flushD_SE(cdstp);
1.30.2.3 nathanw 1904: cpu_cpwait();
1905:
1.30.2.6 nathanw 1906: for (i = 0, ptr = (int *)cdstp;
1.30.2.2 thorpej 1907: i < (NBPG / sizeof(int)); i++) {
1908: if (sched_whichqs != 0) {
1909: /*
1910: * A process has become ready. Abort now,
1911: * so we don't keep it waiting while we
1912: * do slow memory access to finish this
1913: * page.
1914: */
1915: rv = FALSE;
1916: break;
1917: }
1918: *ptr++ = 0;
1919: }
1920:
1921: if (rv)
1922: /*
1923: * if we aborted we'll rezero this page again later so don't
1924: * purge it unless we finished it
1925: */
1.30.2.6 nathanw 1926: cpu_dcache_wbinv_range(cdstp, NBPG);
1.30.2.2 thorpej 1927: return (rv);
1928: }
1929:
1930: /*
1931: * pmap_copy_page()
1932: *
1933: * Copy one physical page into another, by mapping the pages into
1934: * hook points. The same comment regarding cachability as in
1935: * pmap_zero_page also applies here.
1936: */
1.30.2.7 nathanw 1937: #if ARM_MMU_GENERIC == 1
1.30.2.2 thorpej 1938: void
1.30.2.7 nathanw 1939: pmap_copy_page_generic(paddr_t src, paddr_t dst)
1.30.2.6 nathanw 1940: {
1941: struct vm_page *src_pg = PHYS_TO_VM_PAGE(src);
1942: #ifdef DEBUG
1943: struct vm_page *dst_pg = PHYS_TO_VM_PAGE(dst);
1944:
1945: if (dst_pg->mdpage.pvh_list != NULL)
1946: panic("pmap_copy_page: dst page has mappings");
1947: #endif
1948:
1.30.2.7 nathanw 1949: KDASSERT((src & PGOFSET) == 0);
1950: KDASSERT((dst & PGOFSET) == 0);
1951:
1.30.2.6 nathanw 1952: /*
1953: * Clean the source page. Hold the source page's lock for
1954: * the duration of the copy so that no other mappings can
1955: * be created while we have a potentially aliased mapping.
1956: */
1957: simple_lock(&src_pg->mdpage.pvh_slock);
1958: (void) pmap_clean_page(src_pg->mdpage.pvh_list, TRUE);
1959:
1.30.2.2 thorpej 1960: /*
1961: * Map the pages into the page hook points, copy them, and purge
1962: * the cache for the appropriate page. Invalidate the TLB
1963: * as required.
1964: */
1.30.2.7 nathanw 1965: *csrc_pte = L2_S_PROTO | src |
1966: L2_S_PROT(PTE_KERNEL, VM_PROT_READ) | pte_l2_s_cache_mode;
1.30.2.14 thorpej 1967: PTE_SYNC(csrc_pte);
1.30.2.7 nathanw 1968: *cdst_pte = L2_S_PROTO | dst |
1969: L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | pte_l2_s_cache_mode;
1.30.2.14 thorpej 1970: PTE_SYNC(cdst_pte);
1.30.2.6 nathanw 1971: cpu_tlb_flushD_SE(csrcp);
1972: cpu_tlb_flushD_SE(cdstp);
1.30.2.3 nathanw 1973: cpu_cpwait();
1.30.2.6 nathanw 1974: bcopy_page(csrcp, cdstp);
1975: cpu_dcache_inv_range(csrcp, NBPG);
1976: simple_unlock(&src_pg->mdpage.pvh_slock); /* cache is safe again */
1977: cpu_dcache_wbinv_range(cdstp, NBPG);
1.30.2.2 thorpej 1978: }
1.30.2.7 nathanw 1979: #endif /* ARM_MMU_GENERIC == 1 */
1980:
1981: #if ARM_MMU_XSCALE == 1
1982: void
1983: pmap_copy_page_xscale(paddr_t src, paddr_t dst)
1984: {
1985: struct vm_page *src_pg = PHYS_TO_VM_PAGE(src);
1986: #ifdef DEBUG
1987: struct vm_page *dst_pg = PHYS_TO_VM_PAGE(dst);
1988:
1989: if (dst_pg->mdpage.pvh_list != NULL)
1990: panic("pmap_copy_page: dst page has mappings");
1991: #endif
1992:
1993: KDASSERT((src & PGOFSET) == 0);
1994: KDASSERT((dst & PGOFSET) == 0);
1995:
1996: /*
1997: * Clean the source page. Hold the source page's lock for
1998: * the duration of the copy so that no other mappings can
1999: * be created while we have a potentially aliased mapping.
2000: */
2001: simple_lock(&src_pg->mdpage.pvh_slock);
2002: (void) pmap_clean_page(src_pg->mdpage.pvh_list, TRUE);
2003:
2004: /*
2005: * Map the pages into the page hook points, copy them, and purge
2006: * the cache for the appropriate page. Invalidate the TLB
2007: * as required.
2008: */
2009: *csrc_pte = L2_S_PROTO | src |
2010: L2_S_PROT(PTE_KERNEL, VM_PROT_READ) |
2011: L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X); /* mini-data */
1.30.2.14 thorpej 2012: PTE_SYNC(csrc_pte);
1.30.2.7 nathanw 2013: *cdst_pte = L2_S_PROTO | dst |
2014: L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) |
2015: L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X); /* mini-data */
1.30.2.14 thorpej 2016: PTE_SYNC(cdst_pte);
1.30.2.7 nathanw 2017: cpu_tlb_flushD_SE(csrcp);
2018: cpu_tlb_flushD_SE(cdstp);
2019: cpu_cpwait();
2020: bcopy_page(csrcp, cdstp);
2021: simple_unlock(&src_pg->mdpage.pvh_slock); /* cache is safe again */
2022: xscale_cache_clean_minidata();
2023: }
2024: #endif /* ARM_MMU_XSCALE == 1 */
1.30.2.2 thorpej 2025:
2026: #if 0
2027: void
1.30.2.6 nathanw 2028: pmap_pte_addref(struct pmap *pmap, vaddr_t va)
1.30.2.2 thorpej 2029: {
2030: pd_entry_t *pde;
2031: paddr_t pa;
2032: struct vm_page *m;
2033:
2034: if (pmap == pmap_kernel())
2035: return;
2036:
1.30.2.14 thorpej 2037: pde = pmap_pde(pmap, va & PD_FRAME);
1.30.2.2 thorpej 2038: pa = pmap_pte_pa(pde);
2039: m = PHYS_TO_VM_PAGE(pa);
1.30.2.14 thorpej 2040: m->wire_count++;
1.30.2.2 thorpej 2041: #ifdef MYCROFT_HACK
2042: printf("addref pmap=%p va=%08lx pde=%p pa=%08lx m=%p wire=%d\n",
2043: pmap, va, pde, pa, m, m->wire_count);
2044: #endif
2045: }
2046:
2047: void
1.30.2.6 nathanw 2048: pmap_pte_delref(struct pmap *pmap, vaddr_t va)
1.30.2.2 thorpej 2049: {
2050: pd_entry_t *pde;
2051: paddr_t pa;
2052: struct vm_page *m;
2053:
2054: if (pmap == pmap_kernel())
2055: return;
2056:
1.30.2.14 thorpej 2057: pde = pmap_pde(pmap, va & PD_FRAME);
1.30.2.2 thorpej 2058: pa = pmap_pte_pa(pde);
2059: m = PHYS_TO_VM_PAGE(pa);
1.30.2.14 thorpej 2060: m->wire_count--;
1.30.2.2 thorpej 2061: #ifdef MYCROFT_HACK
2062: printf("delref pmap=%p va=%08lx pde=%p pa=%08lx m=%p wire=%d\n",
2063: pmap, va, pde, pa, m, m->wire_count);
2064: #endif
2065: if (m->wire_count == 0) {
2066: #ifdef MYCROFT_HACK
2067: printf("delref pmap=%p va=%08lx pde=%p pa=%08lx m=%p\n",
2068: pmap, va, pde, pa, m);
2069: #endif
1.30.2.14 thorpej 2070: pmap_unmap_in_l1(pmap, va & PD_FRAME);
1.30.2.2 thorpej 2071: uvm_pagefree(m);
2072: --pmap->pm_stats.resident_count;
2073: }
2074: }
2075: #else
2076: #define pmap_pte_addref(pmap, va)
2077: #define pmap_pte_delref(pmap, va)
2078: #endif
2079:
2080: /*
2081: * Since we have a virtually indexed cache, we may need to inhibit caching if
2082: * there is more than one mapping and at least one of them is writable.
2083: * Since we purge the cache on every context switch, we only need to check for
2084: * other mappings within the same pmap, or kernel_pmap.
2085: * This function is also called when a page is unmapped, to possibly reenable
2086: * caching on any remaining mappings.
2087: *
2088: * The code implements the following logic, where:
2089: *
2090: * KW = # of kernel read/write pages
2091: * KR = # of kernel read only pages
2092: * UW = # of user read/write pages
2093: * UR = # of user read only pages
2094: * OW = # of user read/write pages in another pmap, then
2095: *
2096: * KC = kernel mapping is cacheable
2097: * UC = user mapping is cacheable
2098: *
2099: * KW=0,KR=0 KW=0,KR>0 KW=1,KR=0 KW>1,KR>=0
2100: * +---------------------------------------------
2101: * UW=0,UR=0,OW=0 | --- KC=1 KC=1 KC=0
2102: * UW=0,UR>0,OW=0 | UC=1 KC=1,UC=1 KC=0,UC=0 KC=0,UC=0
2103: * UW=0,UR>0,OW>0 | UC=1 KC=0,UC=1 KC=0,UC=0 KC=0,UC=0
2104: * UW=1,UR=0,OW=0 | UC=1 KC=0,UC=0 KC=0,UC=0 KC=0,UC=0
2105: * UW>1,UR>=0,OW>=0 | UC=0 KC=0,UC=0 KC=0,UC=0 KC=0,UC=0
2106: *
2107: * Note that the pmap must have it's ptes mapped in, and passed with ptes.
2108: */
2109: __inline static void
1.30.2.6 nathanw 2110: pmap_vac_me_harder(struct pmap *pmap, struct vm_page *pg, pt_entry_t *ptes,
1.30.2.2 thorpej 2111: boolean_t clear_cache)
2112: {
2113: if (pmap == pmap_kernel())
1.30.2.6 nathanw 2114: pmap_vac_me_kpmap(pmap, pg, ptes, clear_cache);
1.30.2.2 thorpej 2115: else
1.30.2.6 nathanw 2116: pmap_vac_me_user(pmap, pg, ptes, clear_cache);
1.30.2.2 thorpej 2117: }
2118:
2119: static void
1.30.2.6 nathanw 2120: pmap_vac_me_kpmap(struct pmap *pmap, struct vm_page *pg, pt_entry_t *ptes,
1.30.2.2 thorpej 2121: boolean_t clear_cache)
2122: {
2123: int user_entries = 0;
2124: int user_writable = 0;
2125: int user_cacheable = 0;
2126: int kernel_entries = 0;
2127: int kernel_writable = 0;
2128: int kernel_cacheable = 0;
2129: struct pv_entry *pv;
2130: struct pmap *last_pmap = pmap;
2131:
2132: #ifdef DIAGNOSTIC
2133: if (pmap != pmap_kernel())
2134: panic("pmap_vac_me_kpmap: pmap != pmap_kernel()");
2135: #endif
2136:
2137: /*
2138: * Pass one, see if there are both kernel and user pmaps for
2139: * this page. Calculate whether there are user-writable or
2140: * kernel-writable pages.
2141: */
1.30.2.6 nathanw 2142: for (pv = pg->mdpage.pvh_list; pv != NULL; pv = pv->pv_next) {
1.30.2.2 thorpej 2143: if (pv->pv_pmap != pmap) {
2144: user_entries++;
1.30.2.7 nathanw 2145: if (pv->pv_flags & PVF_WRITE)
1.30.2.2 thorpej 2146: user_writable++;
1.30.2.7 nathanw 2147: if ((pv->pv_flags & PVF_NC) == 0)
1.30.2.2 thorpej 2148: user_cacheable++;
2149: } else {
2150: kernel_entries++;
1.30.2.7 nathanw 2151: if (pv->pv_flags & PVF_WRITE)
1.30.2.2 thorpej 2152: kernel_writable++;
1.30.2.7 nathanw 2153: if ((pv->pv_flags & PVF_NC) == 0)
1.30.2.2 thorpej 2154: kernel_cacheable++;
2155: }
2156: }
2157:
2158: /*
2159: * We know we have just been updating a kernel entry, so if
2160: * all user pages are already cacheable, then there is nothing
2161: * further to do.
2162: */
2163: if (kernel_entries == 0 &&
2164: user_cacheable == user_entries)
2165: return;
2166:
2167: if (user_entries) {
2168: /*
2169: * Scan over the list again, for each entry, if it
2170: * might not be set correctly, call pmap_vac_me_user
2171: * to recalculate the settings.
2172: */
1.30.2.6 nathanw 2173: for (pv = pg->mdpage.pvh_list; pv; pv = pv->pv_next) {
1.30.2.2 thorpej 2174: /*
2175: * We know kernel mappings will get set
2176: * correctly in other calls. We also know
2177: * that if the pmap is the same as last_pmap
2178: * then we've just handled this entry.
2179: */
2180: if (pv->pv_pmap == pmap || pv->pv_pmap == last_pmap)
2181: continue;
2182: /*
2183: * If there are kernel entries and this page
2184: * is writable but non-cacheable, then we can
2185: * skip this entry also.
2186: */
2187: if (kernel_entries > 0 &&
1.30.2.7 nathanw 2188: (pv->pv_flags & (PVF_NC | PVF_WRITE)) ==
2189: (PVF_NC | PVF_WRITE))
1.30.2.2 thorpej 2190: continue;
2191: /*
2192: * Similarly if there are no kernel-writable
2193: * entries and the page is already
2194: * read-only/cacheable.
2195: */
2196: if (kernel_writable == 0 &&
1.30.2.7 nathanw 2197: (pv->pv_flags & (PVF_NC | PVF_WRITE)) == 0)
1.30.2.2 thorpej 2198: continue;
2199: /*
2200: * For some of the remaining cases, we know
2201: * that we must recalculate, but for others we
2202: * can't tell if they are correct or not, so
2203: * we recalculate anyway.
2204: */
2205: pmap_unmap_ptes(last_pmap);
2206: last_pmap = pv->pv_pmap;
2207: ptes = pmap_map_ptes(last_pmap);
1.30.2.6 nathanw 2208: pmap_vac_me_user(last_pmap, pg, ptes,
1.30.2.2 thorpej 2209: pmap_is_curpmap(last_pmap));
2210: }
2211: /* Restore the pte mapping that was passed to us. */
2212: if (last_pmap != pmap) {
2213: pmap_unmap_ptes(last_pmap);
2214: ptes = pmap_map_ptes(pmap);
2215: }
2216: if (kernel_entries == 0)
2217: return;
2218: }
2219:
1.30.2.6 nathanw 2220: pmap_vac_me_user(pmap, pg, ptes, clear_cache);
1.30.2.2 thorpej 2221: return;
2222: }
2223:
2224: static void
1.30.2.6 nathanw 2225: pmap_vac_me_user(struct pmap *pmap, struct vm_page *pg, pt_entry_t *ptes,
1.30.2.2 thorpej 2226: boolean_t clear_cache)
2227: {
2228: struct pmap *kpmap = pmap_kernel();
2229: struct pv_entry *pv, *npv;
2230: int entries = 0;
2231: int writable = 0;
2232: int cacheable_entries = 0;
2233: int kern_cacheable = 0;
2234: int other_writable = 0;
2235:
1.30.2.6 nathanw 2236: pv = pg->mdpage.pvh_list;
1.30.2.2 thorpej 2237: KASSERT(ptes != NULL);
2238:
2239: /*
2240: * Count mappings and writable mappings in this pmap.
2241: * Include kernel mappings as part of our own.
2242: * Keep a pointer to the first one.
2243: */
2244: for (npv = pv; npv; npv = npv->pv_next) {
2245: /* Count mappings in the same pmap */
2246: if (pmap == npv->pv_pmap ||
2247: kpmap == npv->pv_pmap) {
2248: if (entries++ == 0)
2249: pv = npv;
2250: /* Cacheable mappings */
1.30.2.7 nathanw 2251: if ((npv->pv_flags & PVF_NC) == 0) {
1.30.2.2 thorpej 2252: cacheable_entries++;
2253: if (kpmap == npv->pv_pmap)
2254: kern_cacheable++;
2255: }
2256: /* Writable mappings */
1.30.2.7 nathanw 2257: if (npv->pv_flags & PVF_WRITE)
1.30.2.2 thorpej 2258: ++writable;
1.30.2.7 nathanw 2259: } else if (npv->pv_flags & PVF_WRITE)
1.30.2.2 thorpej 2260: other_writable = 1;
2261: }
2262:
2263: PDEBUG(3,printf("pmap_vac_me_harder: pmap %p Entries %d, "
2264: "writable %d cacheable %d %s\n", pmap, entries, writable,
2265: cacheable_entries, clear_cache ? "clean" : "no clean"));
2266:
2267: /*
2268: * Enable or disable caching as necessary.
2269: * Note: the first entry might be part of the kernel pmap,
2270: * so we can't assume this is indicative of the state of the
2271: * other (maybe non-kpmap) entries.
2272: */
2273: if ((entries > 1 && writable) ||
2274: (entries > 0 && pmap == kpmap && other_writable)) {
2275: if (cacheable_entries == 0)
2276: return;
2277: for (npv = pv; npv; npv = npv->pv_next) {
2278: if ((pmap == npv->pv_pmap
2279: || kpmap == npv->pv_pmap) &&
1.30.2.7 nathanw 2280: (npv->pv_flags & PVF_NC) == 0) {
2281: ptes[arm_btop(npv->pv_va)] &= ~L2_S_CACHE_MASK;
1.30.2.14 thorpej 2282: PTE_SYNC_CURRENT(pmap,
2283: &ptes[arm_btop(npv->pv_va)]);
1.30.2.7 nathanw 2284: npv->pv_flags |= PVF_NC;
1.30.2.2 thorpej 2285: /*
2286: * If this page needs flushing from the
2287: * cache, and we aren't going to do it
2288: * below, do it now.
2289: */
2290: if ((cacheable_entries < 4 &&
2291: (clear_cache || npv->pv_pmap == kpmap)) ||
2292: (npv->pv_pmap == kpmap &&
2293: !clear_cache && kern_cacheable < 4)) {
1.30.2.5 nathanw 2294: cpu_idcache_wbinv_range(npv->pv_va,
1.30.2.2 thorpej 2295: NBPG);
2296: cpu_tlb_flushID_SE(npv->pv_va);
2297: }
2298: }
2299: }
2300: if ((clear_cache && cacheable_entries >= 4) ||
2301: kern_cacheable >= 4) {
1.30.2.5 nathanw 2302: cpu_idcache_wbinv_all();
1.30.2.2 thorpej 2303: cpu_tlb_flushID();
2304: }
1.30.2.3 nathanw 2305: cpu_cpwait();
1.30.2.2 thorpej 2306: } else if (entries > 0) {
2307: /*
2308: * Turn cacheing back on for some pages. If it is a kernel
2309: * page, only do so if there are no other writable pages.
2310: */
2311: for (npv = pv; npv; npv = npv->pv_next) {
2312: if ((pmap == npv->pv_pmap ||
2313: (kpmap == npv->pv_pmap && other_writable == 0)) &&
1.30.2.7 nathanw 2314: (npv->pv_flags & PVF_NC)) {
2315: ptes[arm_btop(npv->pv_va)] |=
2316: pte_l2_s_cache_mode;
1.30.2.14 thorpej 2317: PTE_SYNC_CURRENT(pmap,
2318: &ptes[arm_btop(npv->pv_va)]);
1.30.2.7 nathanw 2319: npv->pv_flags &= ~PVF_NC;
1.30.2.2 thorpej 2320: }
2321: }
2322: }
2323: }
2324:
2325: /*
2326: * pmap_remove()
2327: *
2328: * pmap_remove is responsible for nuking a number of mappings for a range
2329: * of virtual address space in the current pmap. To do this efficiently
2330: * is interesting, because in a number of cases a wide virtual address
2331: * range may be supplied that contains few actual mappings. So, the
2332: * optimisations are:
2333: * 1. Try and skip over hunks of address space for which an L1 entry
2334: * does not exist.
2335: * 2. Build up a list of pages we've hit, up to a maximum, so we can
2336: * maybe do just a partial cache clean. This path of execution is
2337: * complicated by the fact that the cache must be flushed _before_
2338: * the PTE is nuked, being a VAC :-)
2339: * 3. Maybe later fast-case a single page, but I don't think this is
2340: * going to make _that_ much difference overall.
2341: */
2342:
2343: #define PMAP_REMOVE_CLEAN_LIST_SIZE 3
2344:
2345: void
1.30.2.6 nathanw 2346: pmap_remove(struct pmap *pmap, vaddr_t sva, vaddr_t eva)
1.30.2.2 thorpej 2347: {
2348: int cleanlist_idx = 0;
2349: struct pagelist {
2350: vaddr_t va;
2351: pt_entry_t *pte;
2352: } cleanlist[PMAP_REMOVE_CLEAN_LIST_SIZE];
2353: pt_entry_t *pte = 0, *ptes;
2354: paddr_t pa;
2355: int pmap_active;
1.30.2.6 nathanw 2356: struct vm_page *pg;
1.30.2.18! nathanw 2357: struct pv_entry *pv_tofree = NULL;
1.30.2.2 thorpej 2358:
2359: /* Exit quick if there is no pmap */
2360: if (!pmap)
2361: return;
2362:
1.30.2.7 nathanw 2363: PDEBUG(0, printf("pmap_remove: pmap=%p sva=%08lx eva=%08lx\n",
2364: pmap, sva, eva));
1.30.2.2 thorpej 2365:
2366: /*
1.30.2.6 nathanw 2367: * we lock in the pmap => vm_page direction
1.30.2.2 thorpej 2368: */
2369: PMAP_MAP_TO_HEAD_LOCK();
2370:
2371: ptes = pmap_map_ptes(pmap);
2372: /* Get a page table pointer */
2373: while (sva < eva) {
2374: if (pmap_pde_page(pmap_pde(pmap, sva)))
2375: break;
1.30.2.7 nathanw 2376: sva = (sva & L1_S_FRAME) + L1_S_SIZE;
1.30.2.2 thorpej 2377: }
2378:
1.30.2.6 nathanw 2379: pte = &ptes[arm_btop(sva)];
1.30.2.2 thorpej 2380: /* Note if the pmap is active thus require cache and tlb cleans */
1.30.2.6 nathanw 2381: pmap_active = pmap_is_curpmap(pmap);
1.30.2.2 thorpej 2382:
2383: /* Now loop along */
2384: while (sva < eva) {
2385: /* Check if we can move to the next PDE (l1 chunk) */
1.30.2.14 thorpej 2386: if ((sva & L2_ADDR_BITS) == 0) {
1.30.2.2 thorpej 2387: if (!pmap_pde_page(pmap_pde(pmap, sva))) {
1.30.2.7 nathanw 2388: sva += L1_S_SIZE;
2389: pte += arm_btop(L1_S_SIZE);
1.30.2.2 thorpej 2390: continue;
2391: }
1.30.2.14 thorpej 2392: }
1.30.2.2 thorpej 2393:
2394: /* We've found a valid PTE, so this page of PTEs has to go. */
2395: if (pmap_pte_v(pte)) {
2396: /* Update statistics */
2397: --pmap->pm_stats.resident_count;
2398:
2399: /*
2400: * Add this page to our cache remove list, if we can.
2401: * If, however the cache remove list is totally full,
2402: * then do a complete cache invalidation taking note
2403: * to backtrack the PTE table beforehand, and ignore
2404: * the lists in future because there's no longer any
2405: * point in bothering with them (we've paid the
2406: * penalty, so will carry on unhindered). Otherwise,
2407: * when we fall out, we just clean the list.
2408: */
2409: PDEBUG(10, printf("remove: inv pte at %p(%x) ", pte, *pte));
2410: pa = pmap_pte_pa(pte);
2411:
2412: if (cleanlist_idx < PMAP_REMOVE_CLEAN_LIST_SIZE) {
2413: /* Add to the clean list. */
2414: cleanlist[cleanlist_idx].pte = pte;
2415: cleanlist[cleanlist_idx].va = sva;
2416: cleanlist_idx++;
2417: } else if (cleanlist_idx == PMAP_REMOVE_CLEAN_LIST_SIZE) {
2418: int cnt;
2419:
2420: /* Nuke everything if needed. */
2421: if (pmap_active) {
1.30.2.5 nathanw 2422: cpu_idcache_wbinv_all();
1.30.2.2 thorpej 2423: cpu_tlb_flushID();
2424: }
2425:
2426: /*
2427: * Roll back the previous PTE list,
2428: * and zero out the current PTE.
2429: */
1.30.2.14 thorpej 2430: for (cnt = 0;
2431: cnt < PMAP_REMOVE_CLEAN_LIST_SIZE;
2432: cnt++) {
1.30.2.2 thorpej 2433: *cleanlist[cnt].pte = 0;
1.30.2.14 thorpej 2434: if (pmap_active)
2435: PTE_SYNC(cleanlist[cnt].pte);
2436: else
2437: PTE_FLUSH(cleanlist[cnt].pte);
2438: pmap_pte_delref(pmap,
2439: cleanlist[cnt].va);
1.30.2.2 thorpej 2440: }
2441: *pte = 0;
1.30.2.14 thorpej 2442: if (pmap_active)
2443: PTE_SYNC(pte);
2444: else
2445: PTE_FLUSH(pte);
1.30.2.2 thorpej 2446: pmap_pte_delref(pmap, sva);
2447: cleanlist_idx++;
2448: } else {
2449: /*
2450: * We've already nuked the cache and
2451: * TLB, so just carry on regardless,
2452: * and we won't need to do it again
2453: */
2454: *pte = 0;
1.30.2.14 thorpej 2455: if (pmap_active)
2456: PTE_SYNC(pte);
2457: else
2458: PTE_FLUSH(pte);
1.30.2.2 thorpej 2459: pmap_pte_delref(pmap, sva);
2460: }
2461:
2462: /*
2463: * Update flags. In a number of circumstances,
2464: * we could cluster a lot of these and do a
2465: * number of sequential pages in one go.
2466: */
1.30.2.6 nathanw 2467: if ((pg = PHYS_TO_VM_PAGE(pa)) != NULL) {
1.30.2.2 thorpej 2468: struct pv_entry *pve;
1.30.2.6 nathanw 2469: simple_lock(&pg->mdpage.pvh_slock);
2470: pve = pmap_remove_pv(pg, pmap, sva);
2471: pmap_vac_me_harder(pmap, pg, ptes, FALSE);
2472: simple_unlock(&pg->mdpage.pvh_slock);
1.30.2.18! nathanw 2473: if (pve != NULL) {
! 2474: pve->pv_next = pv_tofree;
! 2475: pv_tofree = pve;
! 2476: }
1.30.2.2 thorpej 2477: }
1.30.2.14 thorpej 2478: } else if (pmap_active == 0)
2479: PTE_FLUSH(pte);
1.30.2.2 thorpej 2480: sva += NBPG;
2481: pte++;
2482: }
2483:
2484: /*
2485: * Now, if we've fallen through down to here, chances are that there
2486: * are less than PMAP_REMOVE_CLEAN_LIST_SIZE mappings left.
2487: */
2488: if (cleanlist_idx <= PMAP_REMOVE_CLEAN_LIST_SIZE) {
2489: u_int cnt;
2490:
2491: for (cnt = 0; cnt < cleanlist_idx; cnt++) {
2492: if (pmap_active) {
1.30.2.5 nathanw 2493: cpu_idcache_wbinv_range(cleanlist[cnt].va,
2494: NBPG);
1.30.2.2 thorpej 2495: *cleanlist[cnt].pte = 0;
2496: cpu_tlb_flushID_SE(cleanlist[cnt].va);
1.30.2.14 thorpej 2497: PTE_SYNC(cleanlist[cnt].pte);
2498: } else {
1.30.2.2 thorpej 2499: *cleanlist[cnt].pte = 0;
1.30.2.14 thorpej 2500: PTE_FLUSH(cleanlist[cnt].pte);
2501: }
1.30.2.2 thorpej 2502: pmap_pte_delref(pmap, cleanlist[cnt].va);
2503: }
2504: }
1.30.2.12 nathanw 2505:
1.30.2.18! nathanw 2506: /* Delete pv entries */
! 2507: if (pv_tofree != NULL)
! 2508: pmap_free_pvs(pmap, pv_tofree);
! 2509:
1.30.2.12 nathanw 2510: pmap_unmap_ptes(pmap);
2511:
1.30.2.2 thorpej 2512: PMAP_MAP_TO_HEAD_UNLOCK();
2513: }
2514:
2515: /*
1.30.2.17 nathanw 2516: * Routine: pmap_page_remove
1.30.2.2 thorpej 2517: * Function:
2518: * Removes this physical page from
2519: * all physical maps in which it resides.
2520: * Reflects back modify bits to the pager.
2521: */
2522:
1.30.2.4 nathanw 2523: static void
1.30.2.17 nathanw 2524: pmap_page_remove(struct vm_page *pg)
1.30.2.2 thorpej 2525: {
2526: struct pv_entry *pv, *npv;
2527: struct pmap *pmap;
2528: pt_entry_t *pte, *ptes;
2529:
1.30.2.17 nathanw 2530: PDEBUG(0, printf("pmap_page_remove: pa=%lx ", VM_PAGE_TO_PHYS(pg)));
1.30.2.2 thorpej 2531:
1.30.2.6 nathanw 2532: /* set vm_page => pmap locking */
1.30.2.2 thorpej 2533: PMAP_HEAD_TO_MAP_LOCK();
2534:
1.30.2.6 nathanw 2535: simple_lock(&pg->mdpage.pvh_slock);
1.30.2.2 thorpej 2536:
1.30.2.6 nathanw 2537: pv = pg->mdpage.pvh_list;
2538: if (pv == NULL) {
2539: PDEBUG(0, printf("free page\n"));
2540: simple_unlock(&pg->mdpage.pvh_slock);
2541: PMAP_HEAD_TO_MAP_UNLOCK();
2542: return;
1.30.2.2 thorpej 2543: }
2544: pmap_clean_page(pv, FALSE);
2545:
2546: while (pv) {
2547: pmap = pv->pv_pmap;
2548: ptes = pmap_map_ptes(pmap);
1.30.2.6 nathanw 2549: pte = &ptes[arm_btop(pv->pv_va)];
1.30.2.2 thorpej 2550:
2551: PDEBUG(0, printf("[%p,%08x,%08lx,%08x] ", pmap, *pte,
2552: pv->pv_va, pv->pv_flags));
2553: #ifdef DEBUG
1.30.2.7 nathanw 2554: if (pmap_pde_page(pmap_pde(pmap, pv->pv_va)) == 0 ||
2555: pmap_pte_v(pte) == 0 ||
2556: pmap_pte_pa(pte) != VM_PAGE_TO_PHYS(pg))
1.30.2.17 nathanw 2557: panic("pmap_page_remove: bad mapping");
1.30.2.2 thorpej 2558: #endif /* DEBUG */
2559:
2560: /*
2561: * Update statistics
2562: */
2563: --pmap->pm_stats.resident_count;
2564:
2565: /* Wired bit */
1.30.2.7 nathanw 2566: if (pv->pv_flags & PVF_WIRED)
1.30.2.2 thorpej 2567: --pmap->pm_stats.wired_count;
2568:
2569: /*
2570: * Invalidate the PTEs.
2571: * XXX: should cluster them up and invalidate as many
2572: * as possible at once.
2573: */
2574:
2575: #ifdef needednotdone
2576: reduce wiring count on page table pages as references drop
2577: #endif
2578:
2579: *pte = 0;
1.30.2.14 thorpej 2580: PTE_SYNC_CURRENT(pmap, pte);
1.30.2.2 thorpej 2581: pmap_pte_delref(pmap, pv->pv_va);
2582:
2583: npv = pv->pv_next;
2584: pmap_free_pv(pmap, pv);
2585: pv = npv;
2586: pmap_unmap_ptes(pmap);
2587: }
1.30.2.6 nathanw 2588: pg->mdpage.pvh_list = NULL;
2589: simple_unlock(&pg->mdpage.pvh_slock);
1.30.2.2 thorpej 2590: PMAP_HEAD_TO_MAP_UNLOCK();
2591:
2592: PDEBUG(0, printf("done\n"));
2593: cpu_tlb_flushID();
1.30.2.3 nathanw 2594: cpu_cpwait();
1.30.2.2 thorpej 2595: }
2596:
2597:
2598: /*
2599: * Set the physical protection on the specified range of this map as requested.
2600: */
2601:
2602: void
1.30.2.6 nathanw 2603: pmap_protect(struct pmap *pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
1.30.2.2 thorpej 2604: {
2605: pt_entry_t *pte = NULL, *ptes;
1.30.2.6 nathanw 2606: struct vm_page *pg;
1.30.2.2 thorpej 2607: int flush = 0;
2608:
2609: PDEBUG(0, printf("pmap_protect: pmap=%p %08lx->%08lx %x\n",
2610: pmap, sva, eva, prot));
2611:
2612: if (~prot & VM_PROT_READ) {
1.30.2.12 nathanw 2613: /*
2614: * Just remove the mappings. pmap_update() is not required
2615: * here since the caller should do it.
2616: */
1.30.2.2 thorpej 2617: pmap_remove(pmap, sva, eva);
2618: return;
2619: }
2620: if (prot & VM_PROT_WRITE) {
2621: /*
2622: * If this is a read->write transition, just ignore it and let
2623: * uvm_fault() take care of it later.
2624: */
2625: return;
2626: }
2627:
2628: /* Need to lock map->head */
2629: PMAP_MAP_TO_HEAD_LOCK();
2630:
2631: ptes = pmap_map_ptes(pmap);
1.30.2.8 nathanw 2632:
2633: /*
2634: * OK, at this point, we know we're doing write-protect operation.
2635: * If the pmap is active, write-back the range.
2636: */
2637: if (pmap_is_curpmap(pmap))
2638: cpu_dcache_wb_range(sva, eva - sva);
2639:
1.30.2.2 thorpej 2640: /*
2641: * We need to acquire a pointer to a page table page before entering
2642: * the following loop.
2643: */
2644: while (sva < eva) {
2645: if (pmap_pde_page(pmap_pde(pmap, sva)))
2646: break;
1.30.2.7 nathanw 2647: sva = (sva & L1_S_FRAME) + L1_S_SIZE;
1.30.2.2 thorpej 2648: }
2649:
1.30.2.6 nathanw 2650: pte = &ptes[arm_btop(sva)];
1.30.2.2 thorpej 2651:
2652: while (sva < eva) {
2653: /* only check once in a while */
1.30.2.7 nathanw 2654: if ((sva & L2_ADDR_BITS) == 0) {
1.30.2.2 thorpej 2655: if (!pmap_pde_page(pmap_pde(pmap, sva))) {
2656: /* We can race ahead here, to the next pde. */
1.30.2.7 nathanw 2657: sva += L1_S_SIZE;
2658: pte += arm_btop(L1_S_SIZE);
1.30.2.2 thorpej 2659: continue;
2660: }
2661: }
2662:
1.30.2.14 thorpej 2663: if (!pmap_pte_v(pte)) {
2664: PTE_FLUSH_ALT(pmap, pte);
1.30.2.2 thorpej 2665: goto next;
1.30.2.14 thorpej 2666: }
1.30.2.2 thorpej 2667:
2668: flush = 1;
2669:
1.30.2.14 thorpej 2670: pg = PHYS_TO_VM_PAGE(pmap_pte_pa(pte));
2671:
1.30.2.12 nathanw 2672: *pte &= ~L2_S_PROT_W; /* clear write bit */
1.30.2.14 thorpej 2673: PTE_SYNC_CURRENT(pmap, pte); /* XXXJRT optimize */
1.30.2.2 thorpej 2674:
2675: /* Clear write flag */
1.30.2.14 thorpej 2676: if (pg != NULL) {
1.30.2.6 nathanw 2677: simple_lock(&pg->mdpage.pvh_slock);
1.30.2.7 nathanw 2678: (void) pmap_modify_pv(pmap, sva, pg, PVF_WRITE, 0);
1.30.2.6 nathanw 2679: pmap_vac_me_harder(pmap, pg, ptes, FALSE);
2680: simple_unlock(&pg->mdpage.pvh_slock);
1.30.2.2 thorpej 2681: }
2682:
1.30.2.12 nathanw 2683: next:
1.30.2.2 thorpej 2684: sva += NBPG;
2685: pte++;
2686: }
2687: pmap_unmap_ptes(pmap);
2688: PMAP_MAP_TO_HEAD_UNLOCK();
2689: if (flush)
2690: cpu_tlb_flushID();
2691: }
2692:
2693: /*
2694: * void pmap_enter(struct pmap *pmap, vaddr_t va, paddr_t pa, vm_prot_t prot,
2695: * int flags)
2696: *
2697: * Insert the given physical page (p) at
2698: * the specified virtual address (v) in the
2699: * target physical map with the protection requested.
2700: *
2701: * If specified, the page will be wired down, meaning
2702: * that the related pte can not be reclaimed.
2703: *
2704: * NB: This is the only routine which MAY NOT lazy-evaluate
2705: * or lose information. That is, this routine must actually
2706: * insert this page into the given map NOW.
2707: */
2708:
2709: int
1.30.2.6 nathanw 2710: pmap_enter(struct pmap *pmap, vaddr_t va, paddr_t pa, vm_prot_t prot,
2711: int flags)
1.30.2.2 thorpej 2712: {
1.30.2.6 nathanw 2713: pt_entry_t *ptes, opte, npte;
1.30.2.2 thorpej 2714: paddr_t opa;
2715: boolean_t wired = (flags & PMAP_WIRED) != 0;
1.30.2.6 nathanw 2716: struct vm_page *pg;
1.30.2.2 thorpej 2717: struct pv_entry *pve;
1.30.2.6 nathanw 2718: int error, nflags;
1.30.2.18! nathanw 2719: struct vm_page *ptp = NULL;
1.30.2.2 thorpej 2720:
2721: PDEBUG(5, printf("pmap_enter: V%08lx P%08lx in pmap %p prot=%08x, wired = %d\n",
2722: va, pa, pmap, prot, wired));
2723:
2724: #ifdef DIAGNOSTIC
2725: /* Valid address ? */
1.30.2.6 nathanw 2726: if (va >= (pmap_curmaxkvaddr))
1.30.2.2 thorpej 2727: panic("pmap_enter: too big");
2728: if (pmap != pmap_kernel() && va != 0) {
2729: if (va < VM_MIN_ADDRESS || va >= VM_MAXUSER_ADDRESS)
2730: panic("pmap_enter: kernel page in user map");
2731: } else {
2732: if (va >= VM_MIN_ADDRESS && va < VM_MAXUSER_ADDRESS)
2733: panic("pmap_enter: user page in kernel map");
2734: if (va >= VM_MAXUSER_ADDRESS && va < VM_MAX_ADDRESS)
2735: panic("pmap_enter: entering PT page");
2736: }
2737: #endif
1.30.2.7 nathanw 2738:
2739: KDASSERT(((va | pa) & PGOFSET) == 0);
2740:
1.30.2.6 nathanw 2741: /*
2742: * Get a pointer to the page. Later on in this function, we
2743: * test for a managed page by checking pg != NULL.
2744: */
2745: pg = pmap_initialized ? PHYS_TO_VM_PAGE(pa) : NULL;
2746:
1.30.2.2 thorpej 2747: /* get lock */
2748: PMAP_MAP_TO_HEAD_LOCK();
1.30.2.6 nathanw 2749:
1.30.2.2 thorpej 2750: /*
1.30.2.6 nathanw 2751: * map the ptes. If there's not already an L2 table for this
2752: * address, allocate one.
1.30.2.2 thorpej 2753: */
1.30.2.6 nathanw 2754: ptes = pmap_map_ptes(pmap); /* locks pmap */
1.30.2.18! nathanw 2755: /* kernel should be pre-grown */
! 2756: if (pmap != pmap_kernel())
! 2757: {
1.30.2.2 thorpej 2758: /* if failure is allowed then don't try too hard */
1.30.2.14 thorpej 2759: ptp = pmap_get_ptp(pmap, va & PD_FRAME);
1.30.2.2 thorpej 2760: if (ptp == NULL) {
2761: if (flags & PMAP_CANFAIL) {
2762: error = ENOMEM;
2763: goto out;
2764: }
2765: panic("pmap_enter: get ptp failed");
2766: }
2767: }
1.30.2.6 nathanw 2768: opte = ptes[arm_btop(va)];
1.30.2.2 thorpej 2769:
2770: nflags = 0;
2771: if (prot & VM_PROT_WRITE)
1.30.2.7 nathanw 2772: nflags |= PVF_WRITE;
1.30.2.2 thorpej 2773: if (wired)
1.30.2.7 nathanw 2774: nflags |= PVF_WIRED;
1.30.2.2 thorpej 2775:
2776: /* Is the pte valid ? If so then this page is already mapped */
1.30.2.6 nathanw 2777: if (l2pte_valid(opte)) {
1.30.2.2 thorpej 2778: /* Get the physical address of the current page mapped */
1.30.2.6 nathanw 2779: opa = l2pte_pa(opte);
1.30.2.2 thorpej 2780:
2781: /* Are we mapping the same page ? */
2782: if (opa == pa) {
1.30.2.12 nathanw 2783: /* Check to see if we're doing rw->ro. */
2784: if ((opte & L2_S_PROT_W) != 0 &&
2785: (prot & VM_PROT_WRITE) == 0) {
2786: /* Yup, flush the cache if current pmap. */
2787: if (pmap_is_curpmap(pmap))
2788: cpu_dcache_wb_range(va, NBPG);
2789: }
2790:
1.30.2.2 thorpej 2791: /* Has the wiring changed ? */
1.30.2.6 nathanw 2792: if (pg != NULL) {
2793: simple_lock(&pg->mdpage.pvh_slock);
2794: (void) pmap_modify_pv(pmap, va, pg,
1.30.2.7 nathanw 2795: PVF_WRITE | PVF_WIRED, nflags);
1.30.2.6 nathanw 2796: simple_unlock(&pg->mdpage.pvh_slock);
2797: }
1.30.2.2 thorpej 2798: } else {
1.30.2.6 nathanw 2799: struct vm_page *opg;
2800:
1.30.2.2 thorpej 2801: /* We are replacing the page with a new one. */
1.30.2.5 nathanw 2802: cpu_idcache_wbinv_range(va, NBPG);
1.30.2.2 thorpej 2803:
2804: /*
2805: * If it is part of our managed memory then we
2806: * must remove it from the PV list
2807: */
1.30.2.6 nathanw 2808: if ((opg = PHYS_TO_VM_PAGE(opa)) != NULL) {
2809: simple_lock(&opg->mdpage.pvh_slock);
2810: pve = pmap_remove_pv(opg, pmap, va);
2811: simple_unlock(&opg->mdpage.pvh_slock);
1.30.2.2 thorpej 2812: } else {
2813: pve = NULL;
2814: }
2815:
2816: goto enter;
2817: }
2818: } else {
2819: opa = 0;
2820: pve = NULL;
1.30.2.18! nathanw 2821:
! 2822: /* bump ptp ref */
! 2823: if (ptp != NULL)
! 2824: ptp->wire_count++;
1.30.2.2 thorpej 2825:
2826: /* pte is not valid so we must be hooking in a new page */
2827: ++pmap->pm_stats.resident_count;
2828:
2829: enter:
2830: /*
2831: * Enter on the PV list if part of our managed memory
2832: */
1.30.2.6 nathanw 2833: if (pg != NULL) {
1.30.2.2 thorpej 2834: if (pve == NULL) {
2835: pve = pmap_alloc_pv(pmap, ALLOCPV_NEED);
2836: if (pve == NULL) {
2837: if (flags & PMAP_CANFAIL) {
1.30.2.14 thorpej 2838: PTE_FLUSH_ALT(pmap,
2839: ptes[arm_btop(va)]);
1.30.2.2 thorpej 2840: error = ENOMEM;
2841: goto out;
2842: }
1.30.2.6 nathanw 2843: panic("pmap_enter: no pv entries "
2844: "available");
1.30.2.2 thorpej 2845: }
2846: }
2847: /* enter_pv locks pvh when adding */
1.30.2.18! nathanw 2848: pmap_enter_pv(pg, pve, pmap, va, ptp, nflags);
1.30.2.2 thorpej 2849: } else {
2850: if (pve != NULL)
2851: pmap_free_pv(pmap, pve);
2852: }
2853: }
2854:
2855: /* Construct the pte, giving the correct access. */
1.30.2.7 nathanw 2856: npte = pa;
1.30.2.2 thorpej 2857:
2858: /* VA 0 is magic. */
1.30.2.7 nathanw 2859: if (pmap != pmap_kernel() && va != vector_page)
2860: npte |= L2_S_PROT_U;
1.30.2.2 thorpej 2861:
1.30.2.6 nathanw 2862: if (pg != NULL) {
1.30.2.2 thorpej 2863: #ifdef DIAGNOSTIC
2864: if ((flags & VM_PROT_ALL) & ~prot)
2865: panic("pmap_enter: access_type exceeds prot");
2866: #endif
1.30.2.7 nathanw 2867: npte |= pte_l2_s_cache_mode;
1.30.2.2 thorpej 2868: if (flags & VM_PROT_WRITE) {
1.30.2.7 nathanw 2869: npte |= L2_S_PROTO | L2_S_PROT_W;
2870: pg->mdpage.pvh_attrs |= PVF_REF | PVF_MOD;
1.30.2.2 thorpej 2871: } else if (flags & VM_PROT_ALL) {
1.30.2.7 nathanw 2872: npte |= L2_S_PROTO;
2873: pg->mdpage.pvh_attrs |= PVF_REF;
1.30.2.2 thorpej 2874: } else
1.30.2.7 nathanw 2875: npte |= L2_TYPE_INV;
1.30.2.2 thorpej 2876: } else {
2877: if (prot & VM_PROT_WRITE)
1.30.2.7 nathanw 2878: npte |= L2_S_PROTO | L2_S_PROT_W;
1.30.2.2 thorpej 2879: else if (prot & VM_PROT_ALL)
1.30.2.7 nathanw 2880: npte |= L2_S_PROTO;
1.30.2.2 thorpej 2881: else
1.30.2.7 nathanw 2882: npte |= L2_TYPE_INV;
1.30.2.2 thorpej 2883: }
2884:
1.30.2.13 thorpej 2885: #if ARM_MMU_XSCALE == 1 && defined(XSCALE_CACHE_READ_WRITE_ALLOCATE)
2886: #if ARM_NMMUS > 1
2887: # error "XXX Unable to use read/write-allocate and configure non-XScale"
2888: #endif
2889: /*
2890: * XXX BRUTAL HACK! This allows us to limp along with
2891: * XXX the read/write-allocate cache mode.
2892: */
2893: if (pmap == pmap_kernel())
2894: npte &= ~L2_XSCALE_T_TEX(TEX_XSCALE_X);
2895: #endif
1.30.2.6 nathanw 2896: ptes[arm_btop(va)] = npte;
1.30.2.14 thorpej 2897: PTE_SYNC_CURRENT(pmap, &ptes[arm_btop(va)]);
1.30.2.2 thorpej 2898:
1.30.2.6 nathanw 2899: if (pg != NULL) {
2900: simple_lock(&pg->mdpage.pvh_slock);
2901: pmap_vac_me_harder(pmap, pg, ptes, pmap_is_curpmap(pmap));
2902: simple_unlock(&pg->mdpage.pvh_slock);
1.30.2.2 thorpej 2903: }
2904:
2905: /* Better flush the TLB ... */
2906: cpu_tlb_flushID_SE(va);
2907: error = 0;
2908: out:
1.30.2.6 nathanw 2909: pmap_unmap_ptes(pmap); /* unlocks pmap */
1.30.2.2 thorpej 2910: PMAP_MAP_TO_HEAD_UNLOCK();
2911:
2912: return error;
2913: }
2914:
1.30.2.6 nathanw 2915: /*
2916: * pmap_kenter_pa: enter a kernel mapping
2917: *
2918: * => no need to lock anything assume va is already allocated
2919: * => should be faster than normal pmap enter function
2920: */
1.30.2.2 thorpej 2921: void
1.30.2.6 nathanw 2922: pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot)
1.30.2.2 thorpej 2923: {
2924: pt_entry_t *pte;
1.30.2.12 nathanw 2925:
1.30.2.2 thorpej 2926: pte = vtopte(va);
2927: KASSERT(!pmap_pte_v(pte));
1.30.2.7 nathanw 2928:
1.30.2.12 nathanw 2929: #ifdef PMAP_ALIAS_DEBUG
2930: {
2931: struct vm_page *pg;
2932: int s;
2933:
2934: pg = PHYS_TO_VM_PAGE(pa);
2935: if (pg != NULL) {
2936: s = splhigh();
2937: if (pg->mdpage.ro_mappings == 0 &&
2938: pg->mdpage.rw_mappings == 0 &&
2939: pg->mdpage.kro_mappings == 0 &&
2940: pg->mdpage.krw_mappings == 0) {
2941: /* This case is okay. */
2942: } else if (pg->mdpage.rw_mappings == 0 &&
2943: pg->mdpage.krw_mappings == 0 &&
2944: (prot & VM_PROT_WRITE) == 0) {
2945: /* This case is okay. */
2946: } else {
2947: /* Something is awry. */
2948: printf("pmap_kenter_pa: ro %u, rw %u, kro %u, krw %u "
2949: "prot 0x%x\n", pg->mdpage.ro_mappings,
2950: pg->mdpage.rw_mappings, pg->mdpage.kro_mappings,
2951: pg->mdpage.krw_mappings, prot);
2952: Debugger();
2953: }
2954: if (prot & VM_PROT_WRITE)
2955: pg->mdpage.krw_mappings++;
2956: else
2957: pg->mdpage.kro_mappings++;
2958: splx(s);
2959: }
2960: }
2961: #endif /* PMAP_ALIAS_DEBUG */
2962:
1.30.2.7 nathanw 2963: *pte = L2_S_PROTO | pa |
2964: L2_S_PROT(PTE_KERNEL, prot) | pte_l2_s_cache_mode;
1.30.2.14 thorpej 2965: PTE_SYNC(pte);
1.30.2.2 thorpej 2966: }
2967:
2968: void
1.30.2.6 nathanw 2969: pmap_kremove(vaddr_t va, vsize_t len)
1.30.2.2 thorpej 2970: {
2971: pt_entry_t *pte;
1.30.2.14 thorpej 2972: vaddr_t ova = va;
2973: vaddr_t olen = len;
1.30.2.2 thorpej 2974:
2975: for (len >>= PAGE_SHIFT; len > 0; len--, va += PAGE_SIZE) {
2976:
2977: /*
2978: * We assume that we will only be called with small
2979: * regions of memory.
2980: */
2981:
2982: KASSERT(pmap_pde_page(pmap_pde(pmap_kernel(), va)));
2983: pte = vtopte(va);
1.30.2.12 nathanw 2984: #ifdef PMAP_ALIAS_DEBUG
2985: {
2986: struct vm_page *pg;
2987: int s;
2988:
2989: if ((*pte & L2_TYPE_MASK) != L2_TYPE_INV &&
2990: (pg = PHYS_TO_VM_PAGE(*pte & L2_S_FRAME)) != NULL) {
2991: s = splhigh();
2992: if (*pte & L2_S_PROT_W) {
2993: KASSERT(pg->mdpage.krw_mappings != 0);
2994: pg->mdpage.krw_mappings--;
2995: } else {
2996: KASSERT(pg->mdpage.kro_mappings != 0);
2997: pg->mdpage.kro_mappings--;
2998: }
2999: splx(s);
3000: }
3001: }
3002: #endif /* PMAP_ALIAS_DEBUG */
1.30.2.5 nathanw 3003: cpu_idcache_wbinv_range(va, PAGE_SIZE);
1.30.2.2 thorpej 3004: *pte = 0;
3005: cpu_tlb_flushID_SE(va);
3006: }
1.30.2.14 thorpej 3007: PTE_SYNC_RANGE(vtopte(ova), olen >> PAGE_SHIFT);
1.30.2.2 thorpej 3008: }
3009:
3010: /*
3011: * pmap_page_protect:
3012: *
3013: * Lower the permission for all mappings to a given page.
3014: */
3015:
3016: void
1.30.2.6 nathanw 3017: pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
1.30.2.2 thorpej 3018: {
3019:
1.30.2.6 nathanw 3020: PDEBUG(0, printf("pmap_page_protect(pa=%lx, prot=%d)\n",
3021: VM_PAGE_TO_PHYS(pg), prot));
1.30.2.2 thorpej 3022:
3023: switch(prot) {
3024: case VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE:
3025: case VM_PROT_READ|VM_PROT_WRITE:
3026: return;
3027:
3028: case VM_PROT_READ:
3029: case VM_PROT_READ|VM_PROT_EXECUTE:
1.30.2.7 nathanw 3030: pmap_clearbit(pg, PVF_WRITE);
1.30.2.2 thorpej 3031: break;
3032:
3033: default:
1.30.2.17 nathanw 3034: pmap_page_remove(pg);
1.30.2.2 thorpej 3035: break;
3036: }
3037: }
3038:
3039:
3040: /*
3041: * Routine: pmap_unwire
3042: * Function: Clear the wired attribute for a map/virtual-address
3043: * pair.
3044: * In/out conditions:
3045: * The mapping must already exist in the pmap.
3046: */
3047:
3048: void
1.30.2.6 nathanw 3049: pmap_unwire(struct pmap *pmap, vaddr_t va)
1.30.2.2 thorpej 3050: {
1.30.2.6 nathanw 3051: pt_entry_t *ptes;
3052: struct vm_page *pg;
1.30.2.2 thorpej 3053: paddr_t pa;
3054:
1.30.2.6 nathanw 3055: PMAP_MAP_TO_HEAD_LOCK();
3056: ptes = pmap_map_ptes(pmap); /* locks pmap */
1.30.2.2 thorpej 3057:
1.30.2.6 nathanw 3058: if (pmap_pde_v(pmap_pde(pmap, va))) {
1.30.2.2 thorpej 3059: #ifdef DIAGNOSTIC
1.30.2.6 nathanw 3060: if (l2pte_valid(ptes[arm_btop(va)]) == 0)
3061: panic("pmap_unwire: invalid L2 PTE");
1.30.2.2 thorpej 3062: #endif
1.30.2.6 nathanw 3063: /* Extract the physical address of the page */
3064: pa = l2pte_pa(ptes[arm_btop(va)]);
1.30.2.14 thorpej 3065: PTE_FLUSH_ALT(pmap, &ptes[arm_btop(va)]);
1.30.2.2 thorpej 3066:
1.30.2.6 nathanw 3067: if ((pg = PHYS_TO_VM_PAGE(pa)) == NULL)
3068: goto out;
3069:
3070: /* Update the wired bit in the pv entry for this page. */
3071: simple_lock(&pg->mdpage.pvh_slock);
1.30.2.7 nathanw 3072: (void) pmap_modify_pv(pmap, va, pg, PVF_WIRED, 0);
1.30.2.6 nathanw 3073: simple_unlock(&pg->mdpage.pvh_slock);
3074: }
3075: #ifdef DIAGNOSTIC
3076: else {
3077: panic("pmap_unwire: invalid L1 PTE");
1.30.2.2 thorpej 3078: }
1.30.2.6 nathanw 3079: #endif
3080: out:
3081: pmap_unmap_ptes(pmap); /* unlocks pmap */
3082: PMAP_MAP_TO_HEAD_UNLOCK();
1.30.2.2 thorpej 3083: }
3084:
3085: /*
3086: * Routine: pmap_extract
3087: * Function:
3088: * Extract the physical page address associated
3089: * with the given map/virtual_address pair.
3090: */
3091: boolean_t
1.30.2.6 nathanw 3092: pmap_extract(struct pmap *pmap, vaddr_t va, paddr_t *pap)
1.30.2.2 thorpej 3093: {
1.30.2.5 nathanw 3094: pd_entry_t *pde;
1.30.2.2 thorpej 3095: pt_entry_t *pte, *ptes;
3096: paddr_t pa;
3097:
1.30.2.7 nathanw 3098: PDEBUG(5, printf("pmap_extract: pmap=%p, va=0x%08lx -> ", pmap, va));
3099:
3100: ptes = pmap_map_ptes(pmap); /* locks pmap */
1.30.2.2 thorpej 3101:
1.30.2.5 nathanw 3102: pde = pmap_pde(pmap, va);
1.30.2.6 nathanw 3103: pte = &ptes[arm_btop(va)];
1.30.2.2 thorpej 3104:
1.30.2.5 nathanw 3105: if (pmap_pde_section(pde)) {
1.30.2.7 nathanw 3106: pa = (*pde & L1_S_FRAME) | (va & L1_S_OFFSET);
3107: PDEBUG(5, printf("section pa=0x%08lx\n", pa));
1.30.2.5 nathanw 3108: goto out;
3109: } else if (pmap_pde_page(pde) == 0 || pmap_pte_v(pte) == 0) {
1.30.2.7 nathanw 3110: PDEBUG(5, printf("no mapping\n"));
3111: goto failed;
1.30.2.2 thorpej 3112: }
3113:
1.30.2.7 nathanw 3114: if ((*pte & L2_TYPE_MASK) == L2_TYPE_L) {
3115: pa = (*pte & L2_L_FRAME) | (va & L2_L_OFFSET);
3116: PDEBUG(5, printf("large page pa=0x%08lx\n", pa));
1.30.2.5 nathanw 3117: goto out;
3118: }
1.30.2.2 thorpej 3119:
1.30.2.7 nathanw 3120: pa = (*pte & L2_S_FRAME) | (va & L2_S_OFFSET);
3121: PDEBUG(5, printf("small page pa=0x%08lx\n", pa));
1.30.2.2 thorpej 3122:
1.30.2.5 nathanw 3123: out:
1.30.2.7 nathanw 3124: if (pap != NULL)
3125: *pap = pa;
3126:
1.30.2.14 thorpej 3127: PTE_FLUSH_ALT(pmap, &ptes[arm_btop(va)]);
1.30.2.7 nathanw 3128: pmap_unmap_ptes(pmap); /* unlocks pmap */
3129: return (TRUE);
3130:
3131: failed:
1.30.2.14 thorpej 3132: PTE_FLUSH_ALT(pmap, &ptes[arm_btop(va)]);
1.30.2.7 nathanw 3133: pmap_unmap_ptes(pmap); /* unlocks pmap */
3134: return (FALSE);
1.30.2.2 thorpej 3135: }
3136:
3137:
3138: /*
1.30.2.6 nathanw 3139: * pmap_copy:
3140: *
3141: * Copy the range specified by src_addr/len from the source map to the
3142: * range dst_addr/len in the destination map.
1.30.2.2 thorpej 3143: *
1.30.2.6 nathanw 3144: * This routine is only advisory and need not do anything.
1.30.2.2 thorpej 3145: */
1.30.2.6 nathanw 3146: /* Call deleted in <arm/arm32/pmap.h> */
1.30.2.2 thorpej 3147:
3148: #if defined(PMAP_DEBUG)
3149: void
3150: pmap_dump_pvlist(phys, m)
3151: vaddr_t phys;
3152: char *m;
3153: {
1.30.2.6 nathanw 3154: struct vm_page *pg;
1.30.2.2 thorpej 3155: struct pv_entry *pv;
3156:
1.30.2.6 nathanw 3157: if ((pg = PHYS_TO_VM_PAGE(phys)) == NULL) {
1.30.2.2 thorpej 3158: printf("INVALID PA\n");
3159: return;
3160: }
1.30.2.6 nathanw 3161: simple_lock(&pg->mdpage.pvh_slock);
1.30.2.2 thorpej 3162: printf("%s %08lx:", m, phys);
1.30.2.6 nathanw 3163: if (pg->mdpage.pvh_list == NULL) {
1.30.2.8 nathanw 3164: simple_unlock(&pg->mdpage.pvh_slock);
1.30.2.2 thorpej 3165: printf(" no mappings\n");
3166: return;
3167: }
3168:
1.30.2.6 nathanw 3169: for (pv = pg->mdpage.pvh_list; pv; pv = pv->pv_next)
1.30.2.2 thorpej 3170: printf(" pmap %p va %08lx flags %08x", pv->pv_pmap,
3171: pv->pv_va, pv->pv_flags);
3172:
3173: printf("\n");
1.30.2.6 nathanw 3174: simple_unlock(&pg->mdpage.pvh_slock);
1.30.2.2 thorpej 3175: }
3176:
3177: #endif /* PMAP_DEBUG */
3178:
3179: static pt_entry_t *
3180: pmap_map_ptes(struct pmap *pmap)
3181: {
1.30.2.6 nathanw 3182: struct proc *p;
1.30.2.2 thorpej 3183:
3184: /* the kernel's pmap is always accessible */
3185: if (pmap == pmap_kernel()) {
1.30.2.6 nathanw 3186: return (pt_entry_t *)PTE_BASE;
1.30.2.2 thorpej 3187: }
3188:
3189: if (pmap_is_curpmap(pmap)) {
3190: simple_lock(&pmap->pm_obj.vmobjlock);
1.30.2.6 nathanw 3191: return (pt_entry_t *)PTE_BASE;
1.30.2.2 thorpej 3192: }
1.30.2.6 nathanw 3193:
1.30.2.10 nathanw 3194: p = curproc;
3195: KDASSERT(p != NULL);
1.30.2.2 thorpej 3196:
3197: /* need to lock both curpmap and pmap: use ordered locking */
1.30.2.6 nathanw 3198: if ((vaddr_t) pmap < (vaddr_t) p->p_vmspace->vm_map.pmap) {
1.30.2.2 thorpej 3199: simple_lock(&pmap->pm_obj.vmobjlock);
3200: simple_lock(&p->p_vmspace->vm_map.pmap->pm_obj.vmobjlock);
3201: } else {
3202: simple_lock(&p->p_vmspace->vm_map.pmap->pm_obj.vmobjlock);
3203: simple_lock(&pmap->pm_obj.vmobjlock);
3204: }
3205:
1.30.2.14 thorpej 3206: pmap_map_in_l1(p->p_vmspace->vm_map.pmap, APTE_BASE,
3207: pmap->pm_pptpt, 0);
1.30.2.2 thorpej 3208: cpu_tlb_flushD();
1.30.2.3 nathanw 3209: cpu_cpwait();
1.30.2.6 nathanw 3210: return (pt_entry_t *)APTE_BASE;
1.30.2.2 thorpej 3211: }
3212:
3213: /*
3214: * pmap_unmap_ptes: unlock the PTE mapping of "pmap"
3215: */
3216:
3217: static void
1.30.2.6 nathanw 3218: pmap_unmap_ptes(struct pmap *pmap)
1.30.2.2 thorpej 3219: {
3220:
3221: if (pmap == pmap_kernel()) {
3222: return;
3223: }
3224: if (pmap_is_curpmap(pmap)) {
3225: simple_unlock(&pmap->pm_obj.vmobjlock);
3226: } else {
1.30.2.10 nathanw 3227: KDASSERT(curproc != NULL);
1.30.2.2 thorpej 3228: simple_unlock(&pmap->pm_obj.vmobjlock);
1.30.2.6 nathanw 3229: simple_unlock(
1.30.2.9 nathanw 3230: &curproc->p_vmspace->vm_map.pmap->pm_obj.vmobjlock);
1.30.2.2 thorpej 3231: }
3232: }
3233:
3234: /*
3235: * Modify pte bits for all ptes corresponding to the given physical address.
3236: * We use `maskbits' rather than `clearbits' because we're always passing
3237: * constants and the latter would require an extra inversion at run-time.
3238: */
3239:
3240: static void
1.30.2.6 nathanw 3241: pmap_clearbit(struct vm_page *pg, u_int maskbits)
1.30.2.2 thorpej 3242: {
3243: struct pv_entry *pv;
1.30.2.12 nathanw 3244: pt_entry_t *ptes, npte, opte;
1.30.2.2 thorpej 3245: vaddr_t va;
3246:
3247: PDEBUG(1, printf("pmap_clearbit: pa=%08lx mask=%08x\n",
1.30.2.6 nathanw 3248: VM_PAGE_TO_PHYS(pg), maskbits));
1.30.2.2 thorpej 3249:
3250: PMAP_HEAD_TO_MAP_LOCK();
1.30.2.6 nathanw 3251: simple_lock(&pg->mdpage.pvh_slock);
1.30.2.2 thorpej 3252:
3253: /*
3254: * Clear saved attributes (modify, reference)
3255: */
1.30.2.6 nathanw 3256: pg->mdpage.pvh_attrs &= ~maskbits;
1.30.2.2 thorpej 3257:
1.30.2.6 nathanw 3258: if (pg->mdpage.pvh_list == NULL) {
3259: simple_unlock(&pg->mdpage.pvh_slock);
1.30.2.2 thorpej 3260: PMAP_HEAD_TO_MAP_UNLOCK();
3261: return;
3262: }
3263:
3264: /*
3265: * Loop over all current mappings setting/clearing as appropos
3266: */
1.30.2.6 nathanw 3267: for (pv = pg->mdpage.pvh_list; pv; pv = pv->pv_next) {
1.30.2.12 nathanw 3268: #ifdef PMAP_ALIAS_DEBUG
3269: {
3270: int s = splhigh();
3271: if ((maskbits & PVF_WRITE) != 0 &&
3272: (pv->pv_flags & PVF_WRITE) != 0) {
3273: KASSERT(pg->mdpage.rw_mappings != 0);
3274: pg->mdpage.rw_mappings--;
3275: pg->mdpage.ro_mappings++;
3276: }
3277: splx(s);
3278: }
3279: #endif /* PMAP_ALIAS_DEBUG */
1.30.2.2 thorpej 3280: va = pv->pv_va;
3281: pv->pv_flags &= ~maskbits;
1.30.2.6 nathanw 3282: ptes = pmap_map_ptes(pv->pv_pmap); /* locks pmap */
3283: KASSERT(pmap_pde_v(pmap_pde(pv->pv_pmap, va)));
1.30.2.12 nathanw 3284: npte = opte = ptes[arm_btop(va)];
1.30.2.7 nathanw 3285: if (maskbits & (PVF_WRITE|PVF_MOD)) {
3286: if ((pv->pv_flags & PVF_NC)) {
1.30.2.2 thorpej 3287: /*
3288: * Entry is not cacheable: reenable
3289: * the cache, nothing to flush
3290: *
3291: * Don't turn caching on again if this
3292: * is a modified emulation. This
3293: * would be inconsitent with the
3294: * settings created by
3295: * pmap_vac_me_harder().
3296: *
3297: * There's no need to call
3298: * pmap_vac_me_harder() here: all
3299: * pages are loosing their write
3300: * permission.
3301: *
3302: */
1.30.2.7 nathanw 3303: if (maskbits & PVF_WRITE) {
1.30.2.12 nathanw 3304: npte |= pte_l2_s_cache_mode;
1.30.2.7 nathanw 3305: pv->pv_flags &= ~PVF_NC;
1.30.2.2 thorpej 3306: }
1.30.2.6 nathanw 3307: } else if (pmap_is_curpmap(pv->pv_pmap)) {
1.30.2.2 thorpej 3308: /*
3309: * Entry is cacheable: check if pmap is
3310: * current if it is flush it,
3311: * otherwise it won't be in the cache
3312: */
1.30.2.5 nathanw 3313: cpu_idcache_wbinv_range(pv->pv_va, NBPG);
1.30.2.6 nathanw 3314: }
1.30.2.2 thorpej 3315:
3316: /* make the pte read only */
1.30.2.12 nathanw 3317: npte &= ~L2_S_PROT_W;
1.30.2.2 thorpej 3318: }
3319:
1.30.2.12 nathanw 3320: if (maskbits & PVF_REF) {
3321: if (pmap_is_curpmap(pv->pv_pmap) &&
3322: (pv->pv_flags & PVF_NC) == 0) {
3323: /*
3324: * Check npte here; we may have already
3325: * done the wbinv above, and the validity
3326: * of the PTE is the same for opte and
3327: * npte.
3328: */
3329: if (npte & L2_S_PROT_W) {
3330: cpu_idcache_wbinv_range(pv->pv_va,
3331: NBPG);
3332: } else if ((npte & L2_TYPE_MASK)
3333: != L2_TYPE_INV) {
3334: /* XXXJRT need idcache_inv_range */
3335: cpu_idcache_wbinv_range(pv->pv_va,
3336: NBPG);
3337: }
3338: }
3339:
3340: /* make the pte invalid */
3341: npte = (npte & ~L2_TYPE_MASK) | L2_TYPE_INV;
3342: }
1.30.2.2 thorpej 3343:
1.30.2.12 nathanw 3344: if (npte != opte) {
3345: ptes[arm_btop(va)] = npte;
1.30.2.14 thorpej 3346: PTE_SYNC_CURRENT(pv->pv_pmap, &ptes[arm_btop(va)]);
1.30.2.12 nathanw 3347: /* Flush the TLB entry if a current pmap. */
3348: if (pmap_is_curpmap(pv->pv_pmap))
3349: cpu_tlb_flushID_SE(pv->pv_va);
1.30.2.14 thorpej 3350: } else
3351: PTE_FLUSH_ALT(pv->pv_pmap, &ptes[arm_btop(va)]);
1.30.2.12 nathanw 3352:
1.30.2.6 nathanw 3353: pmap_unmap_ptes(pv->pv_pmap); /* unlocks pmap */
1.30.2.2 thorpej 3354: }
1.30.2.3 nathanw 3355: cpu_cpwait();
1.30.2.2 thorpej 3356:
1.30.2.6 nathanw 3357: simple_unlock(&pg->mdpage.pvh_slock);
1.30.2.2 thorpej 3358: PMAP_HEAD_TO_MAP_UNLOCK();
3359: }
3360:
1.30.2.6 nathanw 3361: /*
3362: * pmap_clear_modify:
3363: *
3364: * Clear the "modified" attribute for a page.
3365: */
1.30.2.2 thorpej 3366: boolean_t
1.30.2.6 nathanw 3367: pmap_clear_modify(struct vm_page *pg)
1.30.2.2 thorpej 3368: {
3369: boolean_t rv;
3370:
1.30.2.7 nathanw 3371: if (pg->mdpage.pvh_attrs & PVF_MOD) {
1.30.2.6 nathanw 3372: rv = TRUE;
1.30.2.7 nathanw 3373: pmap_clearbit(pg, PVF_MOD);
1.30.2.6 nathanw 3374: } else
3375: rv = FALSE;
1.30.2.2 thorpej 3376:
1.30.2.6 nathanw 3377: PDEBUG(0, printf("pmap_clear_modify pa=%08lx -> %d\n",
3378: VM_PAGE_TO_PHYS(pg), rv));
3379:
3380: return (rv);
3381: }
1.30.2.2 thorpej 3382:
1.30.2.6 nathanw 3383: /*
3384: * pmap_clear_reference:
3385: *
3386: * Clear the "referenced" attribute for a page.
3387: */
1.30.2.2 thorpej 3388: boolean_t
1.30.2.6 nathanw 3389: pmap_clear_reference(struct vm_page *pg)
1.30.2.2 thorpej 3390: {
3391: boolean_t rv;
3392:
1.30.2.7 nathanw 3393: if (pg->mdpage.pvh_attrs & PVF_REF) {
1.30.2.6 nathanw 3394: rv = TRUE;
1.30.2.7 nathanw 3395: pmap_clearbit(pg, PVF_REF);
1.30.2.6 nathanw 3396: } else
3397: rv = FALSE;
1.30.2.2 thorpej 3398:
1.30.2.6 nathanw 3399: PDEBUG(0, printf("pmap_clear_reference pa=%08lx -> %d\n",
3400: VM_PAGE_TO_PHYS(pg), rv));
1.30.2.2 thorpej 3401:
1.30.2.6 nathanw 3402: return (rv);
1.30.2.2 thorpej 3403: }
3404:
1.30.2.6 nathanw 3405: /*
3406: * pmap_is_modified:
3407: *
3408: * Test if a page has the "modified" attribute.
3409: */
3410: /* See <arm/arm32/pmap.h> */
1.30.2.2 thorpej 3411:
1.30.2.6 nathanw 3412: /*
3413: * pmap_is_referenced:
3414: *
3415: * Test if a page has the "referenced" attribute.
3416: */
3417: /* See <arm/arm32/pmap.h> */
1.30.2.2 thorpej 3418:
3419: int
1.30.2.6 nathanw 3420: pmap_modified_emulation(struct pmap *pmap, vaddr_t va)
1.30.2.2 thorpej 3421: {
1.30.2.6 nathanw 3422: pt_entry_t *ptes;
3423: struct vm_page *pg;
1.30.2.2 thorpej 3424: paddr_t pa;
3425: u_int flags;
1.30.2.6 nathanw 3426: int rv = 0;
1.30.2.2 thorpej 3427:
3428: PDEBUG(2, printf("pmap_modified_emulation\n"));
3429:
1.30.2.6 nathanw 3430: PMAP_MAP_TO_HEAD_LOCK();
3431: ptes = pmap_map_ptes(pmap); /* locks pmap */
3432:
3433: if (pmap_pde_v(pmap_pde(pmap, va)) == 0) {
3434: PDEBUG(2, printf("L1 PTE invalid\n"));
3435: goto out;
1.30.2.2 thorpej 3436: }
3437:
1.30.2.6 nathanw 3438: PDEBUG(1, printf("pte=%08x\n", ptes[arm_btop(va)]));
3439:
1.30.2.14 thorpej 3440: /*
3441: * Don't need to PTE_FLUSH_ALT() here; this is always done
3442: * with the current pmap.
3443: */
3444:
1.30.2.6 nathanw 3445: /* Check for a invalid pte */
3446: if (l2pte_valid(ptes[arm_btop(va)]) == 0)
3447: goto out;
1.30.2.2 thorpej 3448:
3449: /* This can happen if user code tries to access kernel memory. */
1.30.2.7 nathanw 3450: if ((ptes[arm_btop(va)] & L2_S_PROT_W) != 0)
1.30.2.6 nathanw 3451: goto out;
1.30.2.2 thorpej 3452:
3453: /* Extract the physical address of the page */
1.30.2.6 nathanw 3454: pa = l2pte_pa(ptes[arm_btop(va)]);
3455: if ((pg = PHYS_TO_VM_PAGE(pa)) == NULL)
3456: goto out;
1.30.2.2 thorpej 3457:
3458: /* Get the current flags for this page. */
1.30.2.6 nathanw 3459: simple_lock(&pg->mdpage.pvh_slock);
1.30.2.2 thorpej 3460:
1.30.2.6 nathanw 3461: flags = pmap_modify_pv(pmap, va, pg, 0, 0);
1.30.2.2 thorpej 3462: PDEBUG(2, printf("pmap_modified_emulation: flags = %08x\n", flags));
3463:
3464: /*
3465: * Do the flags say this page is writable ? If not then it is a
3466: * genuine write fault. If yes then the write fault is our fault
3467: * as we did not reflect the write access in the PTE. Now we know
3468: * a write has occurred we can correct this and also set the
3469: * modified bit
3470: */
1.30.2.7 nathanw 3471: if (~flags & PVF_WRITE) {
1.30.2.6 nathanw 3472: simple_unlock(&pg->mdpage.pvh_slock);
3473: goto out;
1.30.2.2 thorpej 3474: }
3475:
1.30.2.6 nathanw 3476: PDEBUG(0,
3477: printf("pmap_modified_emulation: Got a hit va=%08lx, pte = %08x\n",
3478: va, ptes[arm_btop(va)]));
1.30.2.7 nathanw 3479: pg->mdpage.pvh_attrs |= PVF_REF | PVF_MOD;
1.30.2.2 thorpej 3480:
3481: /*
3482: * Re-enable write permissions for the page. No need to call
3483: * pmap_vac_me_harder(), since this is just a
1.30.2.7 nathanw 3484: * modified-emulation fault, and the PVF_WRITE bit isn't changing.
3485: * We've already set the cacheable bits based on the assumption
3486: * that we can write to this page.
1.30.2.2 thorpej 3487: */
1.30.2.6 nathanw 3488: ptes[arm_btop(va)] =
1.30.2.7 nathanw 3489: (ptes[arm_btop(va)] & ~L2_TYPE_MASK) | L2_S_PROTO | L2_S_PROT_W;
1.30.2.14 thorpej 3490: PTE_SYNC(&ptes[arm_btop(va)]);
1.30.2.6 nathanw 3491: PDEBUG(0, printf("->(%08x)\n", ptes[arm_btop(va)]));
3492:
3493: simple_unlock(&pg->mdpage.pvh_slock);
1.30.2.2 thorpej 3494:
3495: cpu_tlb_flushID_SE(va);
1.30.2.3 nathanw 3496: cpu_cpwait();
1.30.2.6 nathanw 3497: rv = 1;
3498: out:
3499: pmap_unmap_ptes(pmap); /* unlocks pmap */
3500: PMAP_MAP_TO_HEAD_UNLOCK();
3501: return (rv);
1.30.2.2 thorpej 3502: }
3503:
3504: int
1.30.2.6 nathanw 3505: pmap_handled_emulation(struct pmap *pmap, vaddr_t va)
1.30.2.2 thorpej 3506: {
1.30.2.6 nathanw 3507: pt_entry_t *ptes;
3508: struct vm_page *pg;
1.30.2.2 thorpej 3509: paddr_t pa;
1.30.2.6 nathanw 3510: int rv = 0;
1.30.2.2 thorpej 3511:
3512: PDEBUG(2, printf("pmap_handled_emulation\n"));
3513:
1.30.2.6 nathanw 3514: PMAP_MAP_TO_HEAD_LOCK();
3515: ptes = pmap_map_ptes(pmap); /* locks pmap */
3516:
3517: if (pmap_pde_v(pmap_pde(pmap, va)) == 0) {
3518: PDEBUG(2, printf("L1 PTE invalid\n"));
3519: goto out;
1.30.2.2 thorpej 3520: }
3521:
1.30.2.6 nathanw 3522: PDEBUG(1, printf("pte=%08x\n", ptes[arm_btop(va)]));
3523:
1.30.2.14 thorpej 3524: /*
3525: * Don't need to PTE_FLUSH_ALT() here; this is always done
3526: * with the current pmap.
3527: */
3528:
1.30.2.6 nathanw 3529: /* Check for invalid pte */
3530: if (l2pte_valid(ptes[arm_btop(va)]) == 0)
3531: goto out;
1.30.2.2 thorpej 3532:
3533: /* This can happen if user code tries to access kernel memory. */
1.30.2.7 nathanw 3534: if ((ptes[arm_btop(va)] & L2_TYPE_MASK) != L2_TYPE_INV)
1.30.2.6 nathanw 3535: goto out;
1.30.2.2 thorpej 3536:
3537: /* Extract the physical address of the page */
1.30.2.6 nathanw 3538: pa = l2pte_pa(ptes[arm_btop(va)]);
3539: if ((pg = PHYS_TO_VM_PAGE(pa)) == NULL)
3540: goto out;
3541:
3542: simple_lock(&pg->mdpage.pvh_slock);
1.30.2.2 thorpej 3543:
3544: /*
3545: * Ok we just enable the pte and mark the attibs as handled
1.30.2.6 nathanw 3546: * XXX Should we traverse the PV list and enable all PTEs?
1.30.2.2 thorpej 3547: */
1.30.2.6 nathanw 3548: PDEBUG(0,
3549: printf("pmap_handled_emulation: Got a hit va=%08lx pte = %08x\n",
3550: va, ptes[arm_btop(va)]));
1.30.2.7 nathanw 3551: pg->mdpage.pvh_attrs |= PVF_REF;
1.30.2.6 nathanw 3552:
1.30.2.7 nathanw 3553: ptes[arm_btop(va)] = (ptes[arm_btop(va)] & ~L2_TYPE_MASK) | L2_S_PROTO;
1.30.2.14 thorpej 3554: PTE_SYNC(&ptes[arm_btop(va)]);
1.30.2.6 nathanw 3555: PDEBUG(0, printf("->(%08x)\n", ptes[arm_btop(va)]));
3556:
3557: simple_unlock(&pg->mdpage.pvh_slock);
1.30.2.2 thorpej 3558:
3559: cpu_tlb_flushID_SE(va);
1.30.2.3 nathanw 3560: cpu_cpwait();
1.30.2.6 nathanw 3561: rv = 1;
3562: out:
3563: pmap_unmap_ptes(pmap); /* unlocks pmap */
3564: PMAP_MAP_TO_HEAD_UNLOCK();
3565: return (rv);
1.30.2.2 thorpej 3566: }
3567:
3568: /*
3569: * pmap_collect: free resources held by a pmap
3570: *
3571: * => optional function.
3572: * => called when a process is swapped out to free memory.
3573: */
3574:
3575: void
1.30.2.6 nathanw 3576: pmap_collect(struct pmap *pmap)
1.30.2.2 thorpej 3577: {
3578: }
3579:
3580: /*
3581: * Routine: pmap_procwr
3582: *
3583: * Function:
3584: * Synchronize caches corresponding to [addr, addr+len) in p.
3585: *
3586: */
3587: void
1.30.2.6 nathanw 3588: pmap_procwr(struct proc *p, vaddr_t va, int len)
1.30.2.2 thorpej 3589: {
3590: /* We only need to do anything if it is the current process. */
1.30.2.10 nathanw 3591: if (p == curproc)
1.30.2.5 nathanw 3592: cpu_icache_sync_range(va, len);
1.30.2.2 thorpej 3593: }
3594: /*
3595: * PTP functions
3596: */
3597:
3598: /*
3599: * pmap_get_ptp: get a PTP (if there isn't one, allocate a new one)
3600: *
3601: * => pmap should NOT be pmap_kernel()
3602: * => pmap should be locked
3603: */
3604:
3605: static struct vm_page *
1.30.2.6 nathanw 3606: pmap_get_ptp(struct pmap *pmap, vaddr_t va)
1.30.2.2 thorpej 3607: {
1.30.2.6 nathanw 3608: struct vm_page *ptp;
1.30.2.18! nathanw 3609: pd_entry_t *pde;
1.30.2.2 thorpej 3610:
1.30.2.14 thorpej 3611: KASSERT((va & PD_OFFSET) == 0); /* XXX KDASSERT */
3612:
1.30.2.18! nathanw 3613: pde = pmap_pde(pmap, va);
! 3614: if (pmap_pde_v(pde)) {
1.30.2.6 nathanw 3615: /* valid... check hint (saves us a PA->PG lookup) */
3616: if (pmap->pm_ptphint &&
1.30.2.18! nathanw 3617: ((*pde) & L2_S_FRAME) ==
1.30.2.6 nathanw 3618: VM_PAGE_TO_PHYS(pmap->pm_ptphint))
3619: return (pmap->pm_ptphint);
3620: ptp = uvm_pagelookup(&pmap->pm_obj, va);
1.30.2.2 thorpej 3621: #ifdef DIAGNOSTIC
1.30.2.6 nathanw 3622: if (ptp == NULL)
3623: panic("pmap_get_ptp: unmanaged user PTP");
1.30.2.2 thorpej 3624: #endif
1.30.2.6 nathanw 3625: pmap->pm_ptphint = ptp;
3626: return(ptp);
3627: }
1.30.2.2 thorpej 3628:
1.30.2.6 nathanw 3629: /* allocate a new PTP (updates ptphint) */
1.30.2.14 thorpej 3630: return (pmap_alloc_ptp(pmap, va));
1.30.2.2 thorpej 3631: }
3632:
3633: /*
3634: * pmap_alloc_ptp: allocate a PTP for a PMAP
3635: *
3636: * => pmap should already be locked by caller
3637: * => we use the ptp's wire_count to count the number of active mappings
3638: * in the PTP (we start it at one to prevent any chance this PTP
3639: * will ever leak onto the active/inactive queues)
3640: */
3641:
3642: /*__inline */ static struct vm_page *
1.30.2.6 nathanw 3643: pmap_alloc_ptp(struct pmap *pmap, vaddr_t va)
1.30.2.2 thorpej 3644: {
3645: struct vm_page *ptp;
3646:
1.30.2.14 thorpej 3647: KASSERT((va & PD_OFFSET) == 0); /* XXX KDASSERT */
3648:
1.30.2.2 thorpej 3649: ptp = uvm_pagealloc(&pmap->pm_obj, va, NULL,
3650: UVM_PGA_USERESERVE|UVM_PGA_ZERO);
1.30.2.6 nathanw 3651: if (ptp == NULL)
1.30.2.2 thorpej 3652: return (NULL);
3653:
3654: /* got one! */
3655: ptp->flags &= ~PG_BUSY; /* never busy */
3656: ptp->wire_count = 1; /* no mappings yet */
1.30.2.14 thorpej 3657: pmap_map_in_l1(pmap, va, VM_PAGE_TO_PHYS(ptp),
3658: PMAP_PTP_SELFREF | PMAP_PTP_CACHEABLE);
1.30.2.2 thorpej 3659: pmap->pm_stats.resident_count++; /* count PTP as resident */
1.30.2.6 nathanw 3660: pmap->pm_ptphint = ptp;
1.30.2.2 thorpej 3661: return (ptp);
3662: }
3663:
1.30.2.6 nathanw 3664: vaddr_t
3665: pmap_growkernel(vaddr_t maxkvaddr)
3666: {
3667: struct pmap *kpm = pmap_kernel(), *pm;
3668: int s;
3669: paddr_t ptaddr;
3670: struct vm_page *ptp;
3671:
3672: if (maxkvaddr <= pmap_curmaxkvaddr)
3673: goto out; /* we are OK */
3674: NPDEBUG(PDB_GROWKERN, printf("pmap_growkernel: growing kernel from %lx to %lx\n",
3675: pmap_curmaxkvaddr, maxkvaddr));
3676:
3677: /*
3678: * whoops! we need to add kernel PTPs
3679: */
3680:
3681: s = splhigh(); /* to be safe */
3682: simple_lock(&kpm->pm_obj.vmobjlock);
3683: /* due to the way the arm pmap works we map 4MB at a time */
3684: for (/*null*/ ; pmap_curmaxkvaddr < maxkvaddr;
1.30.2.7 nathanw 3685: pmap_curmaxkvaddr += 4 * L1_S_SIZE) {
1.30.2.6 nathanw 3686:
3687: if (uvm.page_init_done == FALSE) {
3688:
3689: /*
3690: * we're growing the kernel pmap early (from
3691: * uvm_pageboot_alloc()). this case must be
3692: * handled a little differently.
3693: */
3694:
3695: if (uvm_page_physget(&ptaddr) == FALSE)
3696: panic("pmap_growkernel: out of memory");
3697: pmap_zero_page(ptaddr);
3698:
3699: /* map this page in */
1.30.2.14 thorpej 3700: pmap_map_in_l1(kpm, pmap_curmaxkvaddr, ptaddr,
3701: PMAP_PTP_SELFREF | PMAP_PTP_CACHEABLE);
1.30.2.6 nathanw 3702:
3703: /* count PTP as resident */
3704: kpm->pm_stats.resident_count++;
3705: continue;
3706: }
3707:
3708: /*
3709: * THIS *MUST* BE CODED SO AS TO WORK IN THE
3710: * pmap_initialized == FALSE CASE! WE MAY BE
3711: * INVOKED WHILE pmap_init() IS RUNNING!
3712: */
3713:
3714: if ((ptp = pmap_alloc_ptp(kpm, pmap_curmaxkvaddr)) == NULL)
3715: panic("pmap_growkernel: alloc ptp failed");
3716:
3717: /* distribute new kernel PTP to all active pmaps */
3718: simple_lock(&pmaps_lock);
3719: LIST_FOREACH(pm, &pmaps, pm_list) {
3720: pmap_map_in_l1(pm, pmap_curmaxkvaddr,
1.30.2.14 thorpej 3721: VM_PAGE_TO_PHYS(ptp),
3722: PMAP_PTP_SELFREF | PMAP_PTP_CACHEABLE);
1.30.2.6 nathanw 3723: }
3724:
1.30.2.14 thorpej 3725: /* Invalidate the PTPT cache. */
3726: pool_cache_invalidate(&pmap_ptpt_cache);
3727: pmap_ptpt_cache_generation++;
3728:
1.30.2.6 nathanw 3729: simple_unlock(&pmaps_lock);
3730: }
3731:
3732: /*
3733: * flush out the cache, expensive but growkernel will happen so
3734: * rarely
3735: */
3736: cpu_tlb_flushD();
3737: cpu_cpwait();
3738:
3739: simple_unlock(&kpm->pm_obj.vmobjlock);
3740: splx(s);
3741:
3742: out:
3743: return (pmap_curmaxkvaddr);
3744: }
3745:
1.30.2.7 nathanw 3746: /************************ Utility routines ****************************/
3747:
3748: /*
3749: * vector_page_setprot:
3750: *
3751: * Manipulate the protection of the vector page.
3752: */
3753: void
3754: vector_page_setprot(int prot)
3755: {
3756: pt_entry_t *pte;
3757:
3758: pte = vtopte(vector_page);
1.30.2.6 nathanw 3759:
1.30.2.7 nathanw 3760: *pte = (*pte & ~L1_S_PROT_MASK) | L2_S_PROT(PTE_KERNEL, prot);
1.30.2.14 thorpej 3761: PTE_SYNC(pte);
1.30.2.7 nathanw 3762: cpu_tlb_flushD_SE(vector_page);
3763: cpu_cpwait();
3764: }
1.30.2.6 nathanw 3765:
1.30.2.5 nathanw 3766: /************************ Bootstrapping routines ****************************/
3767:
3768: /*
3769: * This list exists for the benefit of pmap_map_chunk(). It keeps track
3770: * of the kernel L2 tables during bootstrap, so that pmap_map_chunk() can
3771: * find them as necessary.
3772: *
3773: * Note that the data on this list is not valid after initarm() returns.
3774: */
3775: SLIST_HEAD(, pv_addr) kernel_pt_list = SLIST_HEAD_INITIALIZER(kernel_pt_list);
3776:
3777: static vaddr_t
3778: kernel_pt_lookup(paddr_t pa)
3779: {
3780: pv_addr_t *pv;
3781:
3782: SLIST_FOREACH(pv, &kernel_pt_list, pv_list) {
3783: if (pv->pv_pa == pa)
3784: return (pv->pv_va);
3785: }
3786: return (0);
3787: }
3788:
3789: /*
3790: * pmap_map_section:
3791: *
3792: * Create a single section mapping.
3793: */
3794: void
3795: pmap_map_section(vaddr_t l1pt, vaddr_t va, paddr_t pa, int prot, int cache)
3796: {
3797: pd_entry_t *pde = (pd_entry_t *) l1pt;
1.30.2.7 nathanw 3798: pd_entry_t fl = (cache == PTE_CACHE) ? pte_l1_s_cache_mode : 0;
1.30.2.5 nathanw 3799:
1.30.2.7 nathanw 3800: KASSERT(((va | pa) & L1_S_OFFSET) == 0);
1.30.2.5 nathanw 3801:
1.30.2.7 nathanw 3802: pde[va >> L1_S_SHIFT] = L1_S_PROTO | pa |
3803: L1_S_PROT(PTE_KERNEL, prot) | fl;
1.30.2.5 nathanw 3804: }
3805:
3806: /*
3807: * pmap_map_entry:
3808: *
3809: * Create a single page mapping.
3810: */
3811: void
3812: pmap_map_entry(vaddr_t l1pt, vaddr_t va, paddr_t pa, int prot, int cache)
3813: {
3814: pd_entry_t *pde = (pd_entry_t *) l1pt;
1.30.2.7 nathanw 3815: pt_entry_t fl = (cache == PTE_CACHE) ? pte_l2_s_cache_mode : 0;
1.30.2.5 nathanw 3816: pt_entry_t *pte;
3817:
3818: KASSERT(((va | pa) & PGOFSET) == 0);
3819:
1.30.2.7 nathanw 3820: if ((pde[va >> L1_S_SHIFT] & L1_TYPE_MASK) != L1_TYPE_C)
1.30.2.5 nathanw 3821: panic("pmap_map_entry: no L2 table for VA 0x%08lx", va);
3822:
3823: pte = (pt_entry_t *)
1.30.2.7 nathanw 3824: kernel_pt_lookup(pde[va >> L1_S_SHIFT] & L2_S_FRAME);
1.30.2.5 nathanw 3825: if (pte == NULL)
3826: panic("pmap_map_entry: can't find L2 table for VA 0x%08lx", va);
3827:
1.30.2.7 nathanw 3828: pte[(va >> PGSHIFT) & 0x3ff] = L2_S_PROTO | pa |
3829: L2_S_PROT(PTE_KERNEL, prot) | fl;
1.30.2.5 nathanw 3830: }
3831:
3832: /*
3833: * pmap_link_l2pt:
3834: *
3835: * Link the L2 page table specified by "pa" into the L1
3836: * page table at the slot for "va".
3837: */
3838: void
3839: pmap_link_l2pt(vaddr_t l1pt, vaddr_t va, pv_addr_t *l2pv)
3840: {
3841: pd_entry_t *pde = (pd_entry_t *) l1pt;
1.30.2.7 nathanw 3842: u_int slot = va >> L1_S_SHIFT;
1.30.2.5 nathanw 3843:
3844: KASSERT((l2pv->pv_pa & PGOFSET) == 0);
3845:
1.30.2.7 nathanw 3846: pde[slot + 0] = L1_C_PROTO | (l2pv->pv_pa + 0x000);
3847: pde[slot + 1] = L1_C_PROTO | (l2pv->pv_pa + 0x400);
3848: pde[slot + 2] = L1_C_PROTO | (l2pv->pv_pa + 0x800);
3849: pde[slot + 3] = L1_C_PROTO | (l2pv->pv_pa + 0xc00);
1.30.2.5 nathanw 3850:
3851: SLIST_INSERT_HEAD(&kernel_pt_list, l2pv, pv_list);
3852: }
3853:
3854: /*
3855: * pmap_map_chunk:
3856: *
3857: * Map a chunk of memory using the most efficient mappings
3858: * possible (section, large page, small page) into the
3859: * provided L1 and L2 tables at the specified virtual address.
3860: */
3861: vsize_t
3862: pmap_map_chunk(vaddr_t l1pt, vaddr_t va, paddr_t pa, vsize_t size,
3863: int prot, int cache)
3864: {
3865: pd_entry_t *pde = (pd_entry_t *) l1pt;
1.30.2.7 nathanw 3866: pt_entry_t *pte, fl;
1.30.2.5 nathanw 3867: vsize_t resid;
3868: int i;
3869:
3870: resid = (size + (NBPG - 1)) & ~(NBPG - 1);
3871:
3872: if (l1pt == 0)
3873: panic("pmap_map_chunk: no L1 table provided");
3874:
3875: #ifdef VERBOSE_INIT_ARM
3876: printf("pmap_map_chunk: pa=0x%lx va=0x%lx size=0x%lx resid=0x%lx "
3877: "prot=0x%x cache=%d\n", pa, va, size, resid, prot, cache);
3878: #endif
3879:
3880: size = resid;
3881:
3882: while (resid > 0) {
3883: /* See if we can use a section mapping. */
1.30.2.7 nathanw 3884: if (((pa | va) & L1_S_OFFSET) == 0 &&
3885: resid >= L1_S_SIZE) {
3886: fl = (cache == PTE_CACHE) ? pte_l1_s_cache_mode : 0;
1.30.2.5 nathanw 3887: #ifdef VERBOSE_INIT_ARM
3888: printf("S");
3889: #endif
1.30.2.7 nathanw 3890: pde[va >> L1_S_SHIFT] = L1_S_PROTO | pa |
3891: L1_S_PROT(PTE_KERNEL, prot) | fl;
3892: va += L1_S_SIZE;
3893: pa += L1_S_SIZE;
3894: resid -= L1_S_SIZE;
1.30.2.5 nathanw 3895: continue;
3896: }
3897:
3898: /*
3899: * Ok, we're going to use an L2 table. Make sure
3900: * one is actually in the corresponding L1 slot
3901: * for the current VA.
3902: */
1.30.2.7 nathanw 3903: if ((pde[va >> L1_S_SHIFT] & L1_TYPE_MASK) != L1_TYPE_C)
1.30.2.5 nathanw 3904: panic("pmap_map_chunk: no L2 table for VA 0x%08lx", va);
3905:
3906: pte = (pt_entry_t *)
1.30.2.7 nathanw 3907: kernel_pt_lookup(pde[va >> L1_S_SHIFT] & L2_S_FRAME);
1.30.2.5 nathanw 3908: if (pte == NULL)
3909: panic("pmap_map_chunk: can't find L2 table for VA"
3910: "0x%08lx", va);
3911:
3912: /* See if we can use a L2 large page mapping. */
1.30.2.7 nathanw 3913: if (((pa | va) & L2_L_OFFSET) == 0 &&
3914: resid >= L2_L_SIZE) {
3915: fl = (cache == PTE_CACHE) ? pte_l2_l_cache_mode : 0;
1.30.2.5 nathanw 3916: #ifdef VERBOSE_INIT_ARM
3917: printf("L");
3918: #endif
3919: for (i = 0; i < 16; i++) {
3920: pte[((va >> PGSHIFT) & 0x3f0) + i] =
1.30.2.7 nathanw 3921: L2_L_PROTO | pa |
3922: L2_L_PROT(PTE_KERNEL, prot) | fl;
1.30.2.5 nathanw 3923: }
1.30.2.7 nathanw 3924: va += L2_L_SIZE;
3925: pa += L2_L_SIZE;
3926: resid -= L2_L_SIZE;
1.30.2.5 nathanw 3927: continue;
3928: }
3929:
3930: /* Use a small page mapping. */
1.30.2.7 nathanw 3931: fl = (cache == PTE_CACHE) ? pte_l2_s_cache_mode : 0;
1.30.2.5 nathanw 3932: #ifdef VERBOSE_INIT_ARM
3933: printf("P");
3934: #endif
1.30.2.7 nathanw 3935: pte[(va >> PGSHIFT) & 0x3ff] = L2_S_PROTO | pa |
3936: L2_S_PROT(PTE_KERNEL, prot) | fl;
1.30.2.5 nathanw 3937: va += NBPG;
3938: pa += NBPG;
3939: resid -= NBPG;
3940: }
3941: #ifdef VERBOSE_INIT_ARM
3942: printf("\n");
3943: #endif
3944: return (size);
3945: }
1.30.2.7 nathanw 3946:
3947: /********************** PTE initialization routines **************************/
3948:
3949: /*
3950: * These routines are called when the CPU type is identified to set up
3951: * the PTE prototypes, cache modes, etc.
3952: *
3953: * The variables are always here, just in case LKMs need to reference
3954: * them (though, they shouldn't).
3955: */
3956:
3957: pt_entry_t pte_l1_s_cache_mode;
3958: pt_entry_t pte_l1_s_cache_mask;
3959:
3960: pt_entry_t pte_l2_l_cache_mode;
3961: pt_entry_t pte_l2_l_cache_mask;
3962:
3963: pt_entry_t pte_l2_s_cache_mode;
3964: pt_entry_t pte_l2_s_cache_mask;
3965:
3966: pt_entry_t pte_l2_s_prot_u;
3967: pt_entry_t pte_l2_s_prot_w;
3968: pt_entry_t pte_l2_s_prot_mask;
3969:
3970: pt_entry_t pte_l1_s_proto;
3971: pt_entry_t pte_l1_c_proto;
3972: pt_entry_t pte_l2_s_proto;
3973:
3974: void (*pmap_copy_page_func)(paddr_t, paddr_t);
3975: void (*pmap_zero_page_func)(paddr_t);
3976:
3977: #if ARM_MMU_GENERIC == 1
3978: void
3979: pmap_pte_init_generic(void)
3980: {
3981:
3982: pte_l1_s_cache_mode = L1_S_B|L1_S_C;
3983: pte_l1_s_cache_mask = L1_S_CACHE_MASK_generic;
3984:
3985: pte_l2_l_cache_mode = L2_B|L2_C;
3986: pte_l2_l_cache_mask = L2_L_CACHE_MASK_generic;
3987:
3988: pte_l2_s_cache_mode = L2_B|L2_C;
3989: pte_l2_s_cache_mask = L2_S_CACHE_MASK_generic;
3990:
3991: pte_l2_s_prot_u = L2_S_PROT_U_generic;
3992: pte_l2_s_prot_w = L2_S_PROT_W_generic;
3993: pte_l2_s_prot_mask = L2_S_PROT_MASK_generic;
3994:
3995: pte_l1_s_proto = L1_S_PROTO_generic;
3996: pte_l1_c_proto = L1_C_PROTO_generic;
3997: pte_l2_s_proto = L2_S_PROTO_generic;
3998:
3999: pmap_copy_page_func = pmap_copy_page_generic;
4000: pmap_zero_page_func = pmap_zero_page_generic;
4001: }
4002:
4003: #if defined(CPU_ARM9)
4004: void
4005: pmap_pte_init_arm9(void)
4006: {
4007:
4008: /*
4009: * ARM9 is compatible with generic, but we want to use
4010: * write-through caching for now.
4011: */
4012: pmap_pte_init_generic();
4013:
4014: pte_l1_s_cache_mode = L1_S_C;
4015: pte_l2_l_cache_mode = L2_C;
4016: pte_l2_s_cache_mode = L2_C;
4017: }
4018: #endif /* CPU_ARM9 */
4019: #endif /* ARM_MMU_GENERIC == 1 */
4020:
4021: #if ARM_MMU_XSCALE == 1
4022: void
4023: pmap_pte_init_xscale(void)
4024: {
1.30.2.8 nathanw 4025: uint32_t auxctl;
1.30.2.7 nathanw 4026:
1.30.2.8 nathanw 4027: pte_l1_s_cache_mode = L1_S_B|L1_S_C;
1.30.2.7 nathanw 4028: pte_l1_s_cache_mask = L1_S_CACHE_MASK_xscale;
4029:
1.30.2.8 nathanw 4030: pte_l2_l_cache_mode = L2_B|L2_C;
1.30.2.7 nathanw 4031: pte_l2_l_cache_mask = L2_L_CACHE_MASK_xscale;
4032:
1.30.2.8 nathanw 4033: pte_l2_s_cache_mode = L2_B|L2_C;
1.30.2.7 nathanw 4034: pte_l2_s_cache_mask = L2_S_CACHE_MASK_xscale;
4035:
1.30.2.12 nathanw 4036: #ifdef XSCALE_CACHE_READ_WRITE_ALLOCATE
4037: /*
4038: * The XScale core has an enhanced mode where writes that
4039: * miss the cache cause a cache line to be allocated. This
4040: * is significantly faster than the traditional, write-through
4041: * behavior of this case.
4042: *
4043: * However, there is a bug lurking in this pmap module, or in
4044: * other parts of the VM system, or both, which causes corruption
4045: * of NFS-backed files when this cache mode is used. We have
4046: * an ugly work-around for this problem (disable r/w-allocate
4047: * for managed kernel mappings), but the bug is still evil enough
4048: * to consider this cache mode "experimental".
4049: */
4050: pte_l1_s_cache_mode |= L1_S_XSCALE_TEX(TEX_XSCALE_X);
4051: pte_l2_l_cache_mode |= L2_XSCALE_L_TEX(TEX_XSCALE_X);
4052: pte_l2_s_cache_mode |= L2_XSCALE_T_TEX(TEX_XSCALE_X);
4053: #endif /* XSCALE_CACHE_READ_WRITE_ALLOCATE */
4054:
1.30.2.7 nathanw 4055: #ifdef XSCALE_CACHE_WRITE_THROUGH
4056: /*
4057: * Some versions of the XScale core have various bugs in
4058: * their cache units, the work-around for which is to run
4059: * the cache in write-through mode. Unfortunately, this
4060: * has a major (negative) impact on performance. So, we
4061: * go ahead and run fast-and-loose, in the hopes that we
4062: * don't line up the planets in a way that will trip the
4063: * bugs.
4064: *
4065: * However, we give you the option to be slow-but-correct.
4066: */
4067: pte_l1_s_cache_mode = L1_S_C;
4068: pte_l2_l_cache_mode = L2_C;
4069: pte_l2_s_cache_mode = L2_C;
4070: #endif /* XSCALE_CACHE_WRITE_THROUGH */
4071:
4072: pte_l2_s_prot_u = L2_S_PROT_U_xscale;
4073: pte_l2_s_prot_w = L2_S_PROT_W_xscale;
4074: pte_l2_s_prot_mask = L2_S_PROT_MASK_xscale;
4075:
4076: pte_l1_s_proto = L1_S_PROTO_xscale;
4077: pte_l1_c_proto = L1_C_PROTO_xscale;
4078: pte_l2_s_proto = L2_S_PROTO_xscale;
4079:
4080: pmap_copy_page_func = pmap_copy_page_xscale;
4081: pmap_zero_page_func = pmap_zero_page_xscale;
1.30.2.8 nathanw 4082:
4083: /*
4084: * Disable ECC protection of page table access, for now.
4085: */
4086: __asm __volatile("mrc p15, 0, %0, c1, c0, 1"
4087: : "=r" (auxctl));
4088: auxctl &= ~XSCALE_AUXCTL_P;
4089: __asm __volatile("mcr p15, 0, %0, c1, c0, 1"
4090: :
4091: : "r" (auxctl));
1.30.2.7 nathanw 4092: }
4093:
4094: /*
4095: * xscale_setup_minidata:
4096: *
4097: * Set up the mini-data cache clean area. We require the
4098: * caller to allocate the right amount of physically and
4099: * virtually contiguous space.
4100: */
4101: void
4102: xscale_setup_minidata(vaddr_t l1pt, vaddr_t va, paddr_t pa)
4103: {
4104: extern vaddr_t xscale_minidata_clean_addr;
4105: extern vsize_t xscale_minidata_clean_size; /* already initialized */
4106: pd_entry_t *pde = (pd_entry_t *) l1pt;
4107: pt_entry_t *pte;
4108: vsize_t size;
1.30.2.8 nathanw 4109: uint32_t auxctl;
1.30.2.7 nathanw 4110:
4111: xscale_minidata_clean_addr = va;
4112:
4113: /* Round it to page size. */
4114: size = (xscale_minidata_clean_size + L2_S_OFFSET) & L2_S_FRAME;
4115:
4116: for (; size != 0;
4117: va += L2_S_SIZE, pa += L2_S_SIZE, size -= L2_S_SIZE) {
4118: pte = (pt_entry_t *)
4119: kernel_pt_lookup(pde[va >> L1_S_SHIFT] & L2_S_FRAME);
4120: if (pte == NULL)
4121: panic("xscale_setup_minidata: can't find L2 table for "
4122: "VA 0x%08lx", va);
4123: pte[(va >> PGSHIFT) & 0x3ff] = L2_S_PROTO | pa |
4124: L2_S_PROT(PTE_KERNEL, VM_PROT_READ) |
4125: L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X);
4126: }
1.30.2.8 nathanw 4127:
4128: /*
4129: * Configure the mini-data cache for write-back with
4130: * read/write-allocate.
4131: *
4132: * NOTE: In order to reconfigure the mini-data cache, we must
4133: * make sure it contains no valid data! In order to do that,
4134: * we must issue a global data cache invalidate command!
4135: *
4136: * WE ASSUME WE ARE RUNNING UN-CACHED WHEN THIS ROUTINE IS CALLED!
4137: * THIS IS VERY IMPORTANT!
4138: */
4139:
4140: /* Invalidate data and mini-data. */
4141: __asm __volatile("mcr p15, 0, %0, c7, c6, 0"
4142: :
4143: : "r" (auxctl));
4144:
4145:
4146: __asm __volatile("mrc p15, 0, %0, c1, c0, 1"
4147: : "=r" (auxctl));
4148: auxctl = (auxctl & ~XSCALE_AUXCTL_MD_MASK) | XSCALE_AUXCTL_MD_WB_RWA;
4149: __asm __volatile("mcr p15, 0, %0, c1, c0, 1"
4150: :
4151: : "r" (auxctl));
1.30.2.7 nathanw 4152: }
4153: #endif /* ARM_MMU_XSCALE == 1 */
CVSweb <webmaster@jp.NetBSD.org>