Annotation of src/sys/arch/arm/include/arm32/pmap.h, Revision 1.135.2.2.4.1
1.135.2.2.4.1! (skrll 1:: /* $NetBSD: pmap.h,v 1.135.2.3 2017/03/11 07:40:21 snj Exp $ */
1.46 thorpej 2:
3: /*
1.65 scw 4: * Copyright (c) 2002, 2003 Wasabi Systems, Inc.
1.46 thorpej 5: * All rights reserved.
6: *
1.65 scw 7: * Written by Jason R. Thorpe & Steve C. Woodford for Wasabi Systems, Inc.
1.46 thorpej 8: *
9: * Redistribution and use in source and binary forms, with or without
10: * modification, are permitted provided that the following conditions
11: * are met:
12: * 1. Redistributions of source code must retain the above copyright
13: * notice, this list of conditions and the following disclaimer.
14: * 2. Redistributions in binary form must reproduce the above copyright
15: * notice, this list of conditions and the following disclaimer in the
16: * documentation and/or other materials provided with the distribution.
17: * 3. All advertising materials mentioning features or use of this software
18: * must display the following acknowledgement:
19: * This product includes software developed for the NetBSD Project by
20: * Wasabi Systems, Inc.
21: * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22: * or promote products derived from this software without specific prior
23: * written permission.
24: *
25: * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26: * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27: * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28: * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29: * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30: * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31: * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32: * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33: * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34: * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35: * POSSIBILITY OF SUCH DAMAGE.
36: */
1.1 reinoud 37:
38: /*
39: * Copyright (c) 1994,1995 Mark Brinicombe.
40: * All rights reserved.
41: *
42: * Redistribution and use in source and binary forms, with or without
43: * modification, are permitted provided that the following conditions
44: * are met:
45: * 1. Redistributions of source code must retain the above copyright
46: * notice, this list of conditions and the following disclaimer.
47: * 2. Redistributions in binary form must reproduce the above copyright
48: * notice, this list of conditions and the following disclaimer in the
49: * documentation and/or other materials provided with the distribution.
50: * 3. All advertising materials mentioning features or use of this software
51: * must display the following acknowledgement:
52: * This product includes software developed by Mark Brinicombe
53: * 4. The name of the author may not be used to endorse or promote products
54: * derived from this software without specific prior written permission.
55: *
56: * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
57: * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
58: * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
59: * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
60: * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
61: * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
62: * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
63: * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
64: * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
65: * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
66: */
67:
68: #ifndef _ARM32_PMAP_H_
69: #define _ARM32_PMAP_H_
70:
1.18 thorpej 71: #ifdef _KERNEL
72:
1.52 thorpej 73: #include <arm/cpuconf.h>
1.75 bsh 74: #include <arm/arm32/pte.h>
75: #ifndef _LOCORE
1.85 matt 76: #if defined(_KERNEL_OPT)
77: #include "opt_arm32_pmap.h"
1.135.2.1 martin 78: #include "opt_multiprocessor.h"
1.85 matt 79: #endif
1.19 thorpej 80: #include <arm/cpufunc.h>
1.135.2.2 msaitoh 81: #include <arm/locore.h>
1.12 chris 82: #include <uvm/uvm_object.h>
1.75 bsh 83: #endif
1.1 reinoud 84:
1.124 matt 85: #ifdef ARM_MMU_EXTENDED
86: #define PMAP_TLB_MAX 1
87: #define PMAP_TLB_HWPAGEWALKER 1
1.126 matt 88: #if PMAP_TLB_MAX > 1
1.133 ozaki-r 89: #define PMAP_NEED_TLB_SHOOTDOWN 1
1.126 matt 90: #endif
91: #define PMAP_TLB_FLUSH_ASID_ON_RESET (arm_has_tlbiasid_p)
1.124 matt 92: #define PMAP_TLB_NUM_PIDS 256
93: #define cpu_set_tlb_info(ci, ti) ((void)((ci)->ci_tlb_info = (ti)))
94: #if PMAP_TLB_MAX > 1
95: #define cpu_tlb_info(ci) ((ci)->ci_tlb_info)
96: #else
97: #define cpu_tlb_info(ci) (&pmap_tlb0_info)
98: #endif
99: #define pmap_md_tlb_asid_max() (PMAP_TLB_NUM_PIDS - 1)
100: #include <uvm/pmap/tlb.h>
101: #include <uvm/pmap/pmap_tlb.h>
102:
1.135 skrll 103: /*
1.124 matt 104: * If we have an EXTENDED MMU and the address space is split evenly between
105: * user and kernel, we can use the TTBR0/TTBR1 to have separate L1 tables for
106: * user and kernel address spaces.
1.135 skrll 107: */
1.128 matt 108: #if (KERNEL_BASE & 0x80000000) == 0
109: #error ARMv6 or later systems must have a KERNEL_BASE >= 0x80000000
1.135 skrll 110: #endif
1.124 matt 111: #endif /* ARM_MMU_EXTENDED */
112:
1.1 reinoud 113: /*
1.11 chris 114: * a pmap describes a processes' 4GB virtual address space. this
115: * virtual address space can be broken up into 4096 1MB regions which
1.38 thorpej 116: * are described by L1 PTEs in the L1 table.
1.11 chris 117: *
1.38 thorpej 118: * There is a line drawn at KERNEL_BASE. Everything below that line
119: * changes when the VM context is switched. Everything above that line
120: * is the same no matter which VM context is running. This is achieved
121: * by making the L1 PTEs for those slots above KERNEL_BASE reference
122: * kernel L2 tables.
1.11 chris 123: *
1.38 thorpej 124: * The basic layout of the virtual address space thus looks like this:
125: *
126: * 0xffffffff
127: * .
128: * .
129: * .
130: * KERNEL_BASE
131: * --------------------
132: * .
133: * .
134: * .
135: * 0x00000000
1.11 chris 136: */
137:
1.65 scw 138: /*
139: * The number of L2 descriptor tables which can be tracked by an l2_dtable.
140: * A bucket size of 16 provides for 16MB of contiguous virtual address
141: * space per l2_dtable. Most processes will, therefore, require only two or
142: * three of these to map their whole working set.
143: */
1.124 matt 144: #define L2_BUCKET_XLOG2 (L1_S_SHIFT)
145: #define L2_BUCKET_XSIZE (1 << L2_BUCKET_XLOG2)
1.65 scw 146: #define L2_BUCKET_LOG2 4
147: #define L2_BUCKET_SIZE (1 << L2_BUCKET_LOG2)
148:
149: /*
150: * Given the above "L2-descriptors-per-l2_dtable" constant, the number
151: * of l2_dtable structures required to track all possible page descriptors
152: * mappable by an L1 translation table is given by the following constants:
153: */
1.124 matt 154: #define L2_LOG2 (32 - (L2_BUCKET_XLOG2 + L2_BUCKET_LOG2))
1.65 scw 155: #define L2_SIZE (1 << L2_LOG2)
156:
1.90 matt 157: /*
158: * tell MI code that the cache is virtually-indexed.
159: * ARMv6 is physically-tagged but all others are virtually-tagged.
160: */
1.95 jmcneill 161: #if (ARM_MMU_V6 + ARM_MMU_V7) > 0
1.90 matt 162: #define PMAP_CACHE_VIPT
163: #else
164: #define PMAP_CACHE_VIVT
165: #endif
166:
1.75 bsh 167: #ifndef _LOCORE
168:
1.124 matt 169: #ifndef PMAP_MMU_EXTENDED
1.65 scw 170: struct l1_ttable;
171: struct l2_dtable;
172:
173: /*
174: * Track cache/tlb occupancy using the following structure
175: */
176: union pmap_cache_state {
177: struct {
178: union {
1.115 skrll 179: uint8_t csu_cache_b[2];
180: uint16_t csu_cache;
1.65 scw 181: } cs_cache_u;
182:
183: union {
1.115 skrll 184: uint8_t csu_tlb_b[2];
185: uint16_t csu_tlb;
1.65 scw 186: } cs_tlb_u;
187: } cs_s;
1.115 skrll 188: uint32_t cs_all;
1.65 scw 189: };
190: #define cs_cache_id cs_s.cs_cache_u.csu_cache_b[0]
191: #define cs_cache_d cs_s.cs_cache_u.csu_cache_b[1]
192: #define cs_cache cs_s.cs_cache_u.csu_cache
193: #define cs_tlb_id cs_s.cs_tlb_u.csu_tlb_b[0]
194: #define cs_tlb_d cs_s.cs_tlb_u.csu_tlb_b[1]
195: #define cs_tlb cs_s.cs_tlb_u.csu_tlb
196:
197: /*
198: * Assigned to cs_all to force cacheops to work for a particular pmap
199: */
200: #define PMAP_CACHE_STATE_ALL 0xffffffffu
1.124 matt 201: #endif /* !ARM_MMU_EXTENDED */
1.65 scw 202:
203: /*
1.73 thorpej 204: * This structure is used by machine-dependent code to describe
205: * static mappings of devices, created at bootstrap time.
206: */
207: struct pmap_devmap {
208: vaddr_t pd_va; /* virtual address */
209: paddr_t pd_pa; /* physical address */
210: psize_t pd_size; /* size of region */
211: vm_prot_t pd_prot; /* protection code */
212: int pd_cache; /* cache attributes */
213: };
214:
215: /*
1.65 scw 216: * The pmap structure itself
217: */
218: struct pmap {
1.124 matt 219: struct uvm_object pm_obj;
220: kmutex_t pm_obj_lock;
221: #define pm_lock pm_obj.vmobjlock
1.120 matt 222: #ifndef ARM_HAS_VBAR
1.82 scw 223: pd_entry_t *pm_pl1vec;
1.124 matt 224: pd_entry_t pm_l1vec;
1.120 matt 225: #endif
1.65 scw 226: struct l2_dtable *pm_l2[L2_SIZE];
227: struct pmap_statistics pm_stats;
228: LIST_ENTRY(pmap) pm_list;
1.124 matt 229: #ifdef ARM_MMU_EXTENDED
230: pd_entry_t *pm_l1;
231: paddr_t pm_l1_pa;
232: bool pm_remove_all;
233: #ifdef MULTIPROCESSOR
234: kcpuset_t *pm_onproc;
235: kcpuset_t *pm_active;
1.126 matt 236: #if PMAP_TLB_MAX > 1
237: u_int pm_shootdown_pending;
238: #endif
1.124 matt 239: #endif
1.126 matt 240: struct pmap_asid_info pm_pai[PMAP_TLB_MAX];
1.124 matt 241: #else
242: struct l1_ttable *pm_l1;
243: union pmap_cache_state pm_cstate;
244: uint8_t pm_domain;
245: bool pm_activated;
246: bool pm_remove_all;
247: #endif
248: };
249:
250: struct pmap_kernel {
251: struct pmap kernel_pmap;
1.65 scw 252: };
253:
1.106 martin 254: /*
255: * Physical / virtual address structure. In a number of places (particularly
256: * during bootstrapping) we need to keep track of the physical and virtual
257: * addresses of various pages
258: */
259: typedef struct pv_addr {
260: SLIST_ENTRY(pv_addr) pv_list;
261: paddr_t pv_pa;
262: vaddr_t pv_va;
263: vsize_t pv_size;
264: uint8_t pv_cache;
265: uint8_t pv_prot;
266: } pv_addr_t;
267: typedef SLIST_HEAD(, pv_addr) pv_addrqh_t;
268:
1.85 matt 269: extern pv_addrqh_t pmap_freeq;
1.102 matt 270: extern pv_addr_t kernelstack;
271: extern pv_addr_t abtstack;
272: extern pv_addr_t fiqstack;
273: extern pv_addr_t irqstack;
274: extern pv_addr_t undstack;
1.103 matt 275: extern pv_addr_t idlestack;
1.85 matt 276: extern pv_addr_t systempage;
277: extern pv_addr_t kernel_l1pt;
1.1 reinoud 278:
1.126 matt 279: #ifdef ARM_MMU_EXTENDED
280: extern bool arm_has_tlbiasid_p; /* also in <arm/locore.h> */
281: #endif
282:
1.1 reinoud 283: /*
1.24 thorpej 284: * Determine various modes for PTEs (user vs. kernel, cacheable
285: * vs. non-cacheable).
286: */
287: #define PTE_KERNEL 0
288: #define PTE_USER 1
289: #define PTE_NOCACHE 0
290: #define PTE_CACHE 1
1.65 scw 291: #define PTE_PAGETABLE 2
1.24 thorpej 292:
293: /*
1.43 thorpej 294: * Flags that indicate attributes of pages or mappings of pages.
295: *
296: * The PVF_MOD and PVF_REF flags are stored in the mdpage for each
297: * page. PVF_WIRED, PVF_WRITE, and PVF_NC are kept in individual
298: * pv_entry's for each page. They live in the same "namespace" so
299: * that we can clear multiple attributes at a time.
300: *
301: * Note the "non-cacheable" flag generally means the page has
302: * multiple mappings in a given address space.
303: */
304: #define PVF_MOD 0x01 /* page is modified */
305: #define PVF_REF 0x02 /* page is referenced */
306: #define PVF_WIRED 0x04 /* mapping is wired */
307: #define PVF_WRITE 0x08 /* mapping is writable */
1.56 thorpej 308: #define PVF_EXEC 0x10 /* mapping is executable */
1.90 matt 309: #ifdef PMAP_CACHE_VIVT
1.65 scw 310: #define PVF_UNC 0x20 /* mapping is 'user' non-cacheable */
311: #define PVF_KNC 0x40 /* mapping is 'kernel' non-cacheable */
1.90 matt 312: #define PVF_NC (PVF_UNC|PVF_KNC)
313: #endif
314: #ifdef PMAP_CACHE_VIPT
315: #define PVF_NC 0x20 /* mapping is 'kernel' non-cacheable */
316: #define PVF_MULTCLR 0x40 /* mapping is multi-colored */
317: #endif
1.85 matt 318: #define PVF_COLORED 0x80 /* page has or had a color */
319: #define PVF_KENTRY 0x0100 /* page entered via pmap_kenter_pa */
1.86 matt 320: #define PVF_KMPAGE 0x0200 /* page is used for kmem */
1.87 matt 321: #define PVF_DIRTY 0x0400 /* page may have dirty cache lines */
1.88 matt 322: #define PVF_KMOD 0x0800 /* unmanaged page is modified */
323: #define PVF_KWRITE (PVF_KENTRY|PVF_WRITE)
324: #define PVF_DMOD (PVF_MOD|PVF_KMOD|PVF_KMPAGE)
1.43 thorpej 325:
326: /*
1.1 reinoud 327: * Commonly referenced structures
328: */
1.4 matt 329: extern int pmap_debug_level; /* Only exists if PMAP_DEBUG */
1.113 matt 330: extern int arm_poolpage_vmfreelist;
1.1 reinoud 331:
332: /*
333: * Macros that we need to export
334: */
335: #define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count)
336: #define pmap_wired_count(pmap) ((pmap)->pm_stats.wired_count)
1.31 thorpej 337:
1.43 thorpej 338: #define pmap_is_modified(pg) \
339: (((pg)->mdpage.pvh_attrs & PVF_MOD) != 0)
340: #define pmap_is_referenced(pg) \
341: (((pg)->mdpage.pvh_attrs & PVF_REF) != 0)
1.96 uebayasi 342: #define pmap_is_page_colored_p(md) \
343: (((md)->pvh_attrs & PVF_COLORED) != 0)
1.41 thorpej 344:
345: #define pmap_copy(dp, sp, da, l, sa) /* nothing */
1.60 chs 346:
1.35 thorpej 347: #define pmap_phys_address(ppn) (arm_ptob((ppn)))
1.98 macallan 348: u_int arm32_mmap_flags(paddr_t);
349: #define ARM32_MMAP_WRITECOMBINE 0x40000000
350: #define ARM32_MMAP_CACHEABLE 0x20000000
351: #define pmap_mmap_flags(ppn) arm32_mmap_flags(ppn)
1.1 reinoud 352:
1.123 matt 353: #define PMAP_PTE 0x10000000 /* kenter_pa */
354:
1.1 reinoud 355: /*
356: * Functions that we need to export
357: */
1.39 thorpej 358: void pmap_procwr(struct proc *, vaddr_t, int);
1.65 scw 359: void pmap_remove_all(pmap_t);
1.80 thorpej 360: bool pmap_extract(pmap_t, vaddr_t, paddr_t *);
1.39 thorpej 361:
1.1 reinoud 362: #define PMAP_NEED_PROCWR
1.29 chris 363: #define PMAP_GROWKERNEL /* turn on pmap_growkernel interface */
1.92 thorpej 364: #define PMAP_ENABLE_PMAP_KMPAGE /* enable the PMAP_KMPAGE flag */
1.4 matt 365:
1.95 jmcneill 366: #if (ARM_MMU_V6 + ARM_MMU_V7) > 0
1.85 matt 367: #define PMAP_PREFER(hint, vap, sz, td) pmap_prefer((hint), (vap), (td))
368: void pmap_prefer(vaddr_t, vaddr_t *, int);
369: #endif
370:
371: void pmap_icache_sync_range(pmap_t, vaddr_t, vaddr_t);
372:
1.39 thorpej 373: /* Functions we use internally. */
1.85 matt 374: #ifdef PMAP_STEAL_MEMORY
375: void pmap_boot_pagealloc(psize_t, psize_t, psize_t, pv_addr_t *);
376: void pmap_boot_pageadd(pv_addr_t *);
377: vaddr_t pmap_steal_memory(vsize_t, vaddr_t *, vaddr_t *);
378: #endif
379: void pmap_bootstrap(vaddr_t, vaddr_t);
1.65 scw 380:
1.78 scw 381: void pmap_do_remove(pmap_t, vaddr_t, vaddr_t, int);
1.70 scw 382: int pmap_fault_fixup(pmap_t, vaddr_t, vm_prot_t, int);
1.124 matt 383: int pmap_prefetchabt_fixup(void *);
1.80 thorpej 384: bool pmap_get_pde_pte(pmap_t, vaddr_t, pd_entry_t **, pt_entry_t **);
385: bool pmap_get_pde(pmap_t, vaddr_t, pd_entry_t **);
1.122 matt 386: struct pcb;
1.65 scw 387: void pmap_set_pcb_pagedir(pmap_t, struct pcb *);
388:
389: void pmap_debug(int);
1.39 thorpej 390: void pmap_postinit(void);
1.42 thorpej 391:
392: void vector_page_setprot(int);
1.24 thorpej 393:
1.73 thorpej 394: const struct pmap_devmap *pmap_devmap_find_pa(paddr_t, psize_t);
395: const struct pmap_devmap *pmap_devmap_find_va(vaddr_t, vsize_t);
396:
1.24 thorpej 397: /* Bootstrapping routines. */
398: void pmap_map_section(vaddr_t, vaddr_t, paddr_t, int, int);
1.25 thorpej 399: void pmap_map_entry(vaddr_t, vaddr_t, paddr_t, int, int);
1.28 thorpej 400: vsize_t pmap_map_chunk(vaddr_t, vaddr_t, paddr_t, vsize_t, int, int);
401: void pmap_link_l2pt(vaddr_t, vaddr_t, pv_addr_t *);
1.73 thorpej 402: void pmap_devmap_bootstrap(vaddr_t, const struct pmap_devmap *);
1.74 thorpej 403: void pmap_devmap_register(const struct pmap_devmap *);
1.13 chris 404:
405: /*
1.135 skrll 406: * Special page zero routine for use by the idle loop (no cache cleans).
1.13 chris 407: */
1.80 thorpej 408: bool pmap_pageidlezero(paddr_t);
1.13 chris 409: #define PMAP_PAGEIDLEZERO(pa) pmap_pageidlezero((pa))
1.1 reinoud 410:
1.131 matt 411: #ifdef __HAVE_MM_MD_DIRECT_MAPPED_PHYS
412: /*
413: * For the pmap, this is a more useful way to map a direct mapped page.
414: * It returns either the direct-mapped VA or the VA supplied if it can't
415: * be direct mapped.
416: */
417: vaddr_t pmap_direct_mapped_phys(paddr_t, bool *, vaddr_t);
418: #endif
419:
1.29 chris 420: /*
1.84 chris 421: * used by dumpsys to record the PA of the L1 table
422: */
423: uint32_t pmap_kernel_L1_addr(void);
424: /*
1.29 chris 425: * The current top of kernel VM
426: */
427: extern vaddr_t pmap_curmaxkvaddr;
1.1 reinoud 428:
1.131 matt 429: #if defined(ARM_MMU_EXTENDED) && defined(__HAVE_MM_MD_DIRECT_MAPPED_PHYS)
430: /*
431: * Starting VA of direct mapped memory (usually KERNEL_BASE).
432: */
433: extern vaddr_t pmap_directbase;
434: #endif
435:
1.1 reinoud 436: /*
1.135 skrll 437: * Useful macros and constants
1.1 reinoud 438: */
1.59 thorpej 439:
1.65 scw 440: /* Virtual address to page table entry */
1.79 perry 441: static inline pt_entry_t *
1.65 scw 442: vtopte(vaddr_t va)
443: {
444: pd_entry_t *pdep;
445: pt_entry_t *ptep;
446:
1.124 matt 447: KASSERT(trunc_page(va) == va);
448:
1.81 thorpej 449: if (pmap_get_pde_pte(pmap_kernel(), va, &pdep, &ptep) == false)
1.65 scw 450: return (NULL);
451: return (ptep);
452: }
453:
454: /*
455: * Virtual address to physical address
456: */
1.79 perry 457: static inline paddr_t
1.65 scw 458: vtophys(vaddr_t va)
459: {
460: paddr_t pa;
461:
1.81 thorpej 462: if (pmap_extract(pmap_kernel(), va, &pa) == false)
1.65 scw 463: return (0); /* XXXSCW: Panic? */
464:
465: return (pa);
466: }
467:
468: /*
469: * The new pmap ensures that page-tables are always mapping Write-Thru.
470: * Thus, on some platforms we can run fast and loose and avoid syncing PTEs
471: * on every change.
472: *
1.69 thorpej 473: * Unfortunately, not all CPUs have a write-through cache mode. So we
474: * define PMAP_NEEDS_PTE_SYNC for C code to conditionally do PTE syncs,
475: * and if there is the chance for PTE syncs to be needed, we define
476: * PMAP_INCLUDE_PTE_SYNC so e.g. assembly code can include (and run)
477: * the code.
478: */
479: extern int pmap_needs_pte_sync;
480: #if defined(_KERNEL_OPT)
481: /*
1.135.2.2.4.1! (skrll 482:: * Perform compile time evaluation of PMAP_NEEDS_PTE_SYNC when only a
! 483:: * single MMU type is selected.
! 484:: *
1.69 thorpej 485: * StrongARM SA-1 caches do not have a write-through mode. So, on these,
1.135.2.2.4.1! (skrll 486:: * we need to do PTE syncs. Additionally, V6 MMUs also need PTE syncs.
! 487:: * Finally, MEMC, GENERIC and XSCALE MMUs do not need PTE syncs.
! 488:: *
! 489:: * Use run time evaluation for all other cases.
! 490:: *
1.69 thorpej 491: */
1.135.2.2.4.1! (skrll 492:: #if (ARM_NMMUS == 1)
! 493:: #if (ARM_MMU_SA1 + ARM_MMU_V6 != 0)
1.104 matt 494: #define PMAP_INCLUDE_PTE_SYNC
1.109 matt 495: #define PMAP_NEEDS_PTE_SYNC 1
1.135.2.2.4.1! (skrll 496:: #elif (ARM_MMU_MEMC + ARM_MMU_GENERIC + ARM_MMU_XSCALE != 0)
1.69 thorpej 497: #define PMAP_NEEDS_PTE_SYNC 0
498: #endif
1.112 matt 499: #endif
1.69 thorpej 500: #endif /* _KERNEL_OPT */
501:
502: /*
503: * Provide a fallback in case we were not able to determine it at
504: * compile-time.
1.65 scw 505: */
1.69 thorpej 506: #ifndef PMAP_NEEDS_PTE_SYNC
507: #define PMAP_NEEDS_PTE_SYNC pmap_needs_pte_sync
508: #define PMAP_INCLUDE_PTE_SYNC
509: #endif
1.65 scw 510:
1.104 matt 511: static inline void
512: pmap_ptesync(pt_entry_t *ptep, size_t cnt)
513: {
1.132 matt 514: if (PMAP_NEEDS_PTE_SYNC) {
1.104 matt 515: cpu_dcache_wb_range((vaddr_t)ptep, cnt * sizeof(pt_entry_t));
1.132 matt 516: #ifdef SHEEVA_L2_CACHE
517: cpu_sdcache_wb_range((vaddr_t)ptep, -1,
518: cnt * sizeof(pt_entry_t));
519: #endif
520: }
1.135.2.2 msaitoh 521: arm_dsb();
1.104 matt 522: }
1.69 thorpej 523:
1.124 matt 524: #define PDE_SYNC(pdep) pmap_ptesync((pdep), 1)
525: #define PDE_SYNC_RANGE(pdep, cnt) pmap_ptesync((pdep), (cnt))
526: #define PTE_SYNC(ptep) pmap_ptesync((ptep), PAGE_SIZE / L2_S_SIZE)
1.104 matt 527: #define PTE_SYNC_RANGE(ptep, cnt) pmap_ptesync((ptep), (cnt))
1.65 scw 528:
1.124 matt 529: #define l1pte_valid_p(pde) ((pde) != 0)
530: #define l1pte_section_p(pde) (((pde) & L1_TYPE_MASK) == L1_TYPE_S)
531: #define l1pte_supersection_p(pde) (l1pte_section_p(pde) \
1.104 matt 532: && ((pde) & L1_S_V6_SUPER) != 0)
1.124 matt 533: #define l1pte_page_p(pde) (((pde) & L1_TYPE_MASK) == L1_TYPE_C)
534: #define l1pte_fpage_p(pde) (((pde) & L1_TYPE_MASK) == L1_TYPE_F)
535: #define l1pte_pa(pde) ((pde) & L1_C_ADDR_MASK)
536: #define l1pte_index(v) ((vaddr_t)(v) >> L1_S_SHIFT)
537: #define l1pte_pgindex(v) l1pte_index((v) & L1_ADDR_BITS \
538: & ~(PAGE_SIZE * PAGE_SIZE / sizeof(pt_entry_t) - 1))
539:
540: static inline void
541: l1pte_setone(pt_entry_t *pdep, pt_entry_t pde)
542: {
543: *pdep = pde;
544: }
1.36 thorpej 545:
1.124 matt 546: static inline void
547: l1pte_set(pt_entry_t *pdep, pt_entry_t pde)
548: {
549: *pdep = pde;
550: if (l1pte_page_p(pde)) {
551: KASSERTMSG((((uintptr_t)pdep / sizeof(pde)) & (PAGE_SIZE / L2_T_SIZE - 1)) == 0, "%p", pdep);
552: for (size_t k = 1; k < PAGE_SIZE / L2_T_SIZE; k++) {
553: pde += L2_T_SIZE;
554: pdep[k] = pde;
555: }
556: } else if (l1pte_supersection_p(pde)) {
557: KASSERTMSG((((uintptr_t)pdep / sizeof(pde)) & (L1_SS_SIZE / L1_S_SIZE - 1)) == 0, "%p", pdep);
558: for (size_t k = 1; k < L1_SS_SIZE / L1_S_SIZE; k++) {
559: pdep[k] = pde;
560: }
561: }
562: }
563:
564: #define l2pte_index(v) ((((v) & L2_ADDR_BITS) >> PGSHIFT) << (PGSHIFT-L2_S_SHIFT))
565: #define l2pte_valid_p(pte) (((pte) & L2_TYPE_MASK) != L2_TYPE_INV)
566: #define l2pte_pa(pte) ((pte) & L2_S_FRAME)
567: #define l1pte_lpage_p(pte) (((pte) & L2_TYPE_MASK) == L2_TYPE_L)
568: #define l2pte_minidata_p(pte) (((pte) & \
1.85 matt 569: (L2_B | L2_C | L2_XS_T_TEX(TEX_XSCALE_X)))\
570: == (L2_C | L2_XS_T_TEX(TEX_XSCALE_X)))
1.35 thorpej 571:
1.121 matt 572: static inline void
573: l2pte_set(pt_entry_t *ptep, pt_entry_t pte, pt_entry_t opte)
574: {
1.129 skrll 575: if (l1pte_lpage_p(pte)) {
576: for (size_t k = 0; k < L2_L_SIZE / L2_S_SIZE; k++) {
577: *ptep++ = pte;
578: }
579: } else {
580: for (size_t k = 0; k < PAGE_SIZE / L2_S_SIZE; k++) {
581: KASSERTMSG(*ptep == opte, "%#x [*%p] != %#x", *ptep, ptep, opte);
582: *ptep++ = pte;
583: pte += L2_S_SIZE;
584: if (opte)
585: opte += L2_S_SIZE;
586: }
1.121 matt 587: }
1.129 skrll 588: }
1.121 matt 589:
590: static inline void
591: l2pte_reset(pt_entry_t *ptep)
592: {
593: *ptep = 0;
594: for (vsize_t k = 1; k < PAGE_SIZE / L2_S_SIZE; k++) {
595: ptep[k] = 0;
596: }
1.135 skrll 597: }
1.121 matt 598:
1.1 reinoud 599: /* L1 and L2 page table macros */
1.36 thorpej 600: #define pmap_pde_v(pde) l1pte_valid(*(pde))
601: #define pmap_pde_section(pde) l1pte_section_p(*(pde))
1.107 matt 602: #define pmap_pde_supersection(pde) l1pte_supersection_p(*(pde))
1.36 thorpej 603: #define pmap_pde_page(pde) l1pte_page_p(*(pde))
604: #define pmap_pde_fpage(pde) l1pte_fpage_p(*(pde))
1.16 rearnsha 605:
1.124 matt 606: #define pmap_pte_v(pte) l2pte_valid_p(*(pte))
1.36 thorpej 607: #define pmap_pte_pa(pte) l2pte_pa(*(pte))
1.35 thorpej 608:
1.1 reinoud 609: /* Size of the kernel part of the L1 page table */
610: #define KERNEL_PD_SIZE \
1.44 thorpej 611: (L1_TABLE_SIZE - (KERNEL_BASE >> L1_S_SHIFT) * sizeof(pd_entry_t))
1.20 chs 612:
1.117 matt 613: void bzero_page(vaddr_t);
614: void bcopy_page(vaddr_t, vaddr_t);
1.46 thorpej 615:
1.116 matt 616: #ifdef FPU_VFP
1.117 matt 617: void bzero_page_vfp(vaddr_t);
618: void bcopy_page_vfp(vaddr_t, vaddr_t);
1.116 matt 619: #endif
620:
1.117 matt 621: /************************* ARM MMU configuration *****************************/
622:
1.95 jmcneill 623: #if (ARM_MMU_GENERIC + ARM_MMU_SA1 + ARM_MMU_V6 + ARM_MMU_V7) != 0
1.51 thorpej 624: void pmap_copy_page_generic(paddr_t, paddr_t);
625: void pmap_zero_page_generic(paddr_t);
626:
1.46 thorpej 627: void pmap_pte_init_generic(void);
1.69 thorpej 628: #if defined(CPU_ARM8)
629: void pmap_pte_init_arm8(void);
630: #endif
1.46 thorpej 631: #if defined(CPU_ARM9)
632: void pmap_pte_init_arm9(void);
633: #endif /* CPU_ARM9 */
1.76 rearnsha 634: #if defined(CPU_ARM10)
635: void pmap_pte_init_arm10(void);
636: #endif /* CPU_ARM10 */
1.103 matt 637: #if defined(CPU_ARM11) /* ARM_MMU_V6 */
1.94 uebayasi 638: void pmap_pte_init_arm11(void);
639: #endif /* CPU_ARM11 */
1.103 matt 640: #if defined(CPU_ARM11MPCORE) /* ARM_MMU_V6 */
1.99 bsh 641: void pmap_pte_init_arm11mpcore(void);
642: #endif
1.103 matt 643: #if ARM_MMU_V7 == 1
644: void pmap_pte_init_armv7(void);
645: #endif /* ARM_MMU_V7 */
1.69 thorpej 646: #endif /* (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0 */
647:
648: #if ARM_MMU_SA1 == 1
649: void pmap_pte_init_sa1(void);
650: #endif /* ARM_MMU_SA1 == 1 */
1.46 thorpej 651:
1.52 thorpej 652: #if ARM_MMU_XSCALE == 1
1.51 thorpej 653: void pmap_copy_page_xscale(paddr_t, paddr_t);
654: void pmap_zero_page_xscale(paddr_t);
655:
1.46 thorpej 656: void pmap_pte_init_xscale(void);
1.50 thorpej 657:
658: void xscale_setup_minidata(vaddr_t, vaddr_t, paddr_t);
1.77 scw 659:
660: #define PMAP_UAREA(va) pmap_uarea(va)
661: void pmap_uarea(vaddr_t);
1.52 thorpej 662: #endif /* ARM_MMU_XSCALE == 1 */
1.46 thorpej 663:
1.49 thorpej 664: extern pt_entry_t pte_l1_s_cache_mode;
665: extern pt_entry_t pte_l1_s_cache_mask;
666:
667: extern pt_entry_t pte_l2_l_cache_mode;
668: extern pt_entry_t pte_l2_l_cache_mask;
669:
670: extern pt_entry_t pte_l2_s_cache_mode;
671: extern pt_entry_t pte_l2_s_cache_mask;
1.46 thorpej 672:
1.65 scw 673: extern pt_entry_t pte_l1_s_cache_mode_pt;
674: extern pt_entry_t pte_l2_l_cache_mode_pt;
675: extern pt_entry_t pte_l2_s_cache_mode_pt;
676:
1.98 macallan 677: extern pt_entry_t pte_l1_s_wc_mode;
678: extern pt_entry_t pte_l2_l_wc_mode;
679: extern pt_entry_t pte_l2_s_wc_mode;
680:
1.95 jmcneill 681: extern pt_entry_t pte_l1_s_prot_u;
682: extern pt_entry_t pte_l1_s_prot_w;
683: extern pt_entry_t pte_l1_s_prot_ro;
684: extern pt_entry_t pte_l1_s_prot_mask;
685:
1.46 thorpej 686: extern pt_entry_t pte_l2_s_prot_u;
687: extern pt_entry_t pte_l2_s_prot_w;
1.95 jmcneill 688: extern pt_entry_t pte_l2_s_prot_ro;
1.46 thorpej 689: extern pt_entry_t pte_l2_s_prot_mask;
1.95 jmcneill 690:
691: extern pt_entry_t pte_l2_l_prot_u;
692: extern pt_entry_t pte_l2_l_prot_w;
693: extern pt_entry_t pte_l2_l_prot_ro;
694: extern pt_entry_t pte_l2_l_prot_mask;
695:
1.103 matt 696: extern pt_entry_t pte_l1_ss_proto;
1.46 thorpej 697: extern pt_entry_t pte_l1_s_proto;
698: extern pt_entry_t pte_l1_c_proto;
699: extern pt_entry_t pte_l2_s_proto;
700:
1.51 thorpej 701: extern void (*pmap_copy_page_func)(paddr_t, paddr_t);
702: extern void (*pmap_zero_page_func)(paddr_t);
1.75 bsh 703:
704: #endif /* !_LOCORE */
1.51 thorpej 705:
1.46 thorpej 706: /*****************************************************************************/
707:
1.124 matt 708: #define KERNEL_PID 0 /* The kernel uses ASID 0 */
709:
1.20 chs 710: /*
1.65 scw 711: * Definitions for MMU domains
712: */
1.103 matt 713: #define PMAP_DOMAINS 15 /* 15 'user' domains (1-15) */
1.124 matt 714: #define PMAP_DOMAIN_KERNEL 0 /* The kernel pmap uses domain #0 */
715: #ifdef ARM_MMU_EXTENDED
716: #define PMAP_DOMAIN_USER 1 /* User pmaps use domain #1 */
717: #endif
1.45 thorpej 718:
719: /*
720: * These macros define the various bit masks in the PTE.
721: *
722: * We use these macros since we use different bits on different processor
723: * models.
724: */
1.95 jmcneill 725: #define L1_S_PROT_U_generic (L1_S_AP(AP_U))
726: #define L1_S_PROT_W_generic (L1_S_AP(AP_W))
727: #define L1_S_PROT_RO_generic (0)
728: #define L1_S_PROT_MASK_generic (L1_S_PROT_U|L1_S_PROT_W|L1_S_PROT_RO)
729:
730: #define L1_S_PROT_U_xscale (L1_S_AP(AP_U))
731: #define L1_S_PROT_W_xscale (L1_S_AP(AP_W))
732: #define L1_S_PROT_RO_xscale (0)
733: #define L1_S_PROT_MASK_xscale (L1_S_PROT_U|L1_S_PROT_W|L1_S_PROT_RO)
734:
1.99 bsh 735: #define L1_S_PROT_U_armv6 (L1_S_AP(AP_R) | L1_S_AP(AP_U))
736: #define L1_S_PROT_W_armv6 (L1_S_AP(AP_W))
737: #define L1_S_PROT_RO_armv6 (L1_S_AP(AP_R) | L1_S_AP(AP_RO))
738: #define L1_S_PROT_MASK_armv6 (L1_S_PROT_U|L1_S_PROT_W|L1_S_PROT_RO)
739:
1.95 jmcneill 740: #define L1_S_PROT_U_armv7 (L1_S_AP(AP_R) | L1_S_AP(AP_U))
741: #define L1_S_PROT_W_armv7 (L1_S_AP(AP_W))
742: #define L1_S_PROT_RO_armv7 (L1_S_AP(AP_R) | L1_S_AP(AP_RO))
743: #define L1_S_PROT_MASK_armv7 (L1_S_PROT_U|L1_S_PROT_W|L1_S_PROT_RO)
1.45 thorpej 744:
1.49 thorpej 745: #define L1_S_CACHE_MASK_generic (L1_S_B|L1_S_C)
1.85 matt 746: #define L1_S_CACHE_MASK_xscale (L1_S_B|L1_S_C|L1_S_XS_TEX(TEX_XSCALE_X))
1.99 bsh 747: #define L1_S_CACHE_MASK_armv6 (L1_S_B|L1_S_C|L1_S_XS_TEX(TEX_ARMV6_TEX))
1.134 skrll 748: #define L1_S_CACHE_MASK_armv6n (L1_S_B|L1_S_C|L1_S_XS_TEX(TEX_ARMV6_TEX)|L1_S_V6_S)
1.111 matt 749: #define L1_S_CACHE_MASK_armv7 (L1_S_B|L1_S_C|L1_S_XS_TEX(TEX_ARMV6_TEX)|L1_S_V6_S)
1.45 thorpej 750:
1.95 jmcneill 751: #define L2_L_PROT_U_generic (L2_AP(AP_U))
752: #define L2_L_PROT_W_generic (L2_AP(AP_W))
753: #define L2_L_PROT_RO_generic (0)
754: #define L2_L_PROT_MASK_generic (L2_L_PROT_U|L2_L_PROT_W|L2_L_PROT_RO)
755:
756: #define L2_L_PROT_U_xscale (L2_AP(AP_U))
757: #define L2_L_PROT_W_xscale (L2_AP(AP_W))
758: #define L2_L_PROT_RO_xscale (0)
759: #define L2_L_PROT_MASK_xscale (L2_L_PROT_U|L2_L_PROT_W|L2_L_PROT_RO)
760:
1.99 bsh 761: #define L2_L_PROT_U_armv6n (L2_AP0(AP_R) | L2_AP0(AP_U))
762: #define L2_L_PROT_W_armv6n (L2_AP0(AP_W))
763: #define L2_L_PROT_RO_armv6n (L2_AP0(AP_R) | L2_AP0(AP_RO))
764: #define L2_L_PROT_MASK_armv6n (L2_L_PROT_U|L2_L_PROT_W|L2_L_PROT_RO)
765:
1.95 jmcneill 766: #define L2_L_PROT_U_armv7 (L2_AP0(AP_R) | L2_AP0(AP_U))
767: #define L2_L_PROT_W_armv7 (L2_AP0(AP_W))
768: #define L2_L_PROT_RO_armv7 (L2_AP0(AP_R) | L2_AP0(AP_RO))
769: #define L2_L_PROT_MASK_armv7 (L2_L_PROT_U|L2_L_PROT_W|L2_L_PROT_RO)
1.45 thorpej 770:
1.49 thorpej 771: #define L2_L_CACHE_MASK_generic (L2_B|L2_C)
1.85 matt 772: #define L2_L_CACHE_MASK_xscale (L2_B|L2_C|L2_XS_L_TEX(TEX_XSCALE_X))
1.99 bsh 773: #define L2_L_CACHE_MASK_armv6 (L2_B|L2_C|L2_V6_L_TEX(TEX_ARMV6_TEX))
1.134 skrll 774: #define L2_L_CACHE_MASK_armv6n (L2_B|L2_C|L2_V6_L_TEX(TEX_ARMV6_TEX)|L2_XS_S)
1.111 matt 775: #define L2_L_CACHE_MASK_armv7 (L2_B|L2_C|L2_V6_L_TEX(TEX_ARMV6_TEX)|L2_XS_S)
1.49 thorpej 776:
1.46 thorpej 777: #define L2_S_PROT_U_generic (L2_AP(AP_U))
778: #define L2_S_PROT_W_generic (L2_AP(AP_W))
1.95 jmcneill 779: #define L2_S_PROT_RO_generic (0)
780: #define L2_S_PROT_MASK_generic (L2_S_PROT_U|L2_S_PROT_W|L2_S_PROT_RO)
1.46 thorpej 781:
1.48 thorpej 782: #define L2_S_PROT_U_xscale (L2_AP0(AP_U))
783: #define L2_S_PROT_W_xscale (L2_AP0(AP_W))
1.95 jmcneill 784: #define L2_S_PROT_RO_xscale (0)
785: #define L2_S_PROT_MASK_xscale (L2_S_PROT_U|L2_S_PROT_W|L2_S_PROT_RO)
786:
1.99 bsh 787: #define L2_S_PROT_U_armv6n (L2_AP0(AP_R) | L2_AP0(AP_U))
788: #define L2_S_PROT_W_armv6n (L2_AP0(AP_W))
789: #define L2_S_PROT_RO_armv6n (L2_AP0(AP_R) | L2_AP0(AP_RO))
790: #define L2_S_PROT_MASK_armv6n (L2_S_PROT_U|L2_S_PROT_W|L2_S_PROT_RO)
791:
1.95 jmcneill 792: #define L2_S_PROT_U_armv7 (L2_AP0(AP_R) | L2_AP0(AP_U))
793: #define L2_S_PROT_W_armv7 (L2_AP0(AP_W))
794: #define L2_S_PROT_RO_armv7 (L2_AP0(AP_R) | L2_AP0(AP_RO))
795: #define L2_S_PROT_MASK_armv7 (L2_S_PROT_U|L2_S_PROT_W|L2_S_PROT_RO)
1.46 thorpej 796:
1.49 thorpej 797: #define L2_S_CACHE_MASK_generic (L2_B|L2_C)
1.85 matt 798: #define L2_S_CACHE_MASK_xscale (L2_B|L2_C|L2_XS_T_TEX(TEX_XSCALE_X))
1.99 bsh 799: #define L2_XS_CACHE_MASK_armv6 (L2_B|L2_C|L2_V6_XS_TEX(TEX_ARMV6_TEX))
800: #define L2_S_CACHE_MASK_armv6n L2_XS_CACHE_MASK_armv6
801: #ifdef ARMV6_EXTENDED_SMALL_PAGE
802: #define L2_S_CACHE_MASK_armv6c L2_XS_CACHE_MASK_armv6
803: #else
804: #define L2_S_CACHE_MASK_armv6c L2_S_CACHE_MASK_generic
805: #endif
1.111 matt 806: #define L2_S_CACHE_MASK_armv7 (L2_B|L2_C|L2_V6_XS_TEX(TEX_ARMV6_TEX)|L2_XS_S)
1.46 thorpej 807:
1.99 bsh 808:
1.46 thorpej 809: #define L1_S_PROTO_generic (L1_TYPE_S | L1_S_IMP)
1.47 thorpej 810: #define L1_S_PROTO_xscale (L1_TYPE_S)
1.99 bsh 811: #define L1_S_PROTO_armv6 (L1_TYPE_S)
1.95 jmcneill 812: #define L1_S_PROTO_armv7 (L1_TYPE_S)
1.46 thorpej 813:
1.103 matt 814: #define L1_SS_PROTO_generic 0
815: #define L1_SS_PROTO_xscale 0
816: #define L1_SS_PROTO_armv6 (L1_TYPE_S | L1_S_V6_SS)
817: #define L1_SS_PROTO_armv7 (L1_TYPE_S | L1_S_V6_SS)
818:
1.46 thorpej 819: #define L1_C_PROTO_generic (L1_TYPE_C | L1_C_IMP2)
1.47 thorpej 820: #define L1_C_PROTO_xscale (L1_TYPE_C)
1.99 bsh 821: #define L1_C_PROTO_armv6 (L1_TYPE_C)
1.95 jmcneill 822: #define L1_C_PROTO_armv7 (L1_TYPE_C)
1.46 thorpej 823:
824: #define L2_L_PROTO (L2_TYPE_L)
825:
826: #define L2_S_PROTO_generic (L2_TYPE_S)
1.85 matt 827: #define L2_S_PROTO_xscale (L2_TYPE_XS)
1.99 bsh 828: #ifdef ARMV6_EXTENDED_SMALL_PAGE
829: #define L2_S_PROTO_armv6c (L2_TYPE_XS) /* XP=0, extended small page */
830: #else
831: #define L2_S_PROTO_armv6c (L2_TYPE_S) /* XP=0, subpage APs */
832: #endif
1.134 skrll 833: #ifdef ARM_MMU_EXTENDED
834: #define L2_S_PROTO_armv6n (L2_TYPE_S|L2_XS_XN)
835: #else
1.99 bsh 836: #define L2_S_PROTO_armv6n (L2_TYPE_S) /* with XP=1 */
1.134 skrll 837: #endif
1.124 matt 838: #ifdef ARM_MMU_EXTENDED
839: #define L2_S_PROTO_armv7 (L2_TYPE_S|L2_XS_XN)
840: #else
1.95 jmcneill 841: #define L2_S_PROTO_armv7 (L2_TYPE_S)
1.124 matt 842: #endif
1.45 thorpej 843:
1.46 thorpej 844: /*
845: * User-visible names for the ones that vary with MMU class.
846: */
847:
848: #if ARM_NMMUS > 1
849: /* More than one MMU class configured; use variables. */
1.95 jmcneill 850: #define L1_S_PROT_U pte_l1_s_prot_u
851: #define L1_S_PROT_W pte_l1_s_prot_w
852: #define L1_S_PROT_RO pte_l1_s_prot_ro
853: #define L1_S_PROT_MASK pte_l1_s_prot_mask
854:
1.46 thorpej 855: #define L2_S_PROT_U pte_l2_s_prot_u
856: #define L2_S_PROT_W pte_l2_s_prot_w
1.95 jmcneill 857: #define L2_S_PROT_RO pte_l2_s_prot_ro
1.46 thorpej 858: #define L2_S_PROT_MASK pte_l2_s_prot_mask
859:
1.95 jmcneill 860: #define L2_L_PROT_U pte_l2_l_prot_u
861: #define L2_L_PROT_W pte_l2_l_prot_w
862: #define L2_L_PROT_RO pte_l2_l_prot_ro
863: #define L2_L_PROT_MASK pte_l2_l_prot_mask
864:
1.49 thorpej 865: #define L1_S_CACHE_MASK pte_l1_s_cache_mask
866: #define L2_L_CACHE_MASK pte_l2_l_cache_mask
867: #define L2_S_CACHE_MASK pte_l2_s_cache_mask
868:
1.103 matt 869: #define L1_SS_PROTO pte_l1_ss_proto
1.46 thorpej 870: #define L1_S_PROTO pte_l1_s_proto
871: #define L1_C_PROTO pte_l1_c_proto
872: #define L2_S_PROTO pte_l2_s_proto
1.51 thorpej 873:
874: #define pmap_copy_page(s, d) (*pmap_copy_page_func)((s), (d))
875: #define pmap_zero_page(d) (*pmap_zero_page_func)((d))
1.99 bsh 876: #elif (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0
877: #define L1_S_PROT_U L1_S_PROT_U_generic
878: #define L1_S_PROT_W L1_S_PROT_W_generic
879: #define L1_S_PROT_RO L1_S_PROT_RO_generic
880: #define L1_S_PROT_MASK L1_S_PROT_MASK_generic
881:
882: #define L2_S_PROT_U L2_S_PROT_U_generic
883: #define L2_S_PROT_W L2_S_PROT_W_generic
884: #define L2_S_PROT_RO L2_S_PROT_RO_generic
885: #define L2_S_PROT_MASK L2_S_PROT_MASK_generic
886:
887: #define L2_L_PROT_U L2_L_PROT_U_generic
888: #define L2_L_PROT_W L2_L_PROT_W_generic
889: #define L2_L_PROT_RO L2_L_PROT_RO_generic
890: #define L2_L_PROT_MASK L2_L_PROT_MASK_generic
891:
892: #define L1_S_CACHE_MASK L1_S_CACHE_MASK_generic
893: #define L2_L_CACHE_MASK L2_L_CACHE_MASK_generic
894: #define L2_S_CACHE_MASK L2_S_CACHE_MASK_generic
895:
1.103 matt 896: #define L1_SS_PROTO L1_SS_PROTO_generic
1.99 bsh 897: #define L1_S_PROTO L1_S_PROTO_generic
898: #define L1_C_PROTO L1_C_PROTO_generic
899: #define L2_S_PROTO L2_S_PROTO_generic
900:
901: #define pmap_copy_page(s, d) pmap_copy_page_generic((s), (d))
902: #define pmap_zero_page(d) pmap_zero_page_generic((d))
903: #elif ARM_MMU_V6N != 0
904: #define L1_S_PROT_U L1_S_PROT_U_armv6
905: #define L1_S_PROT_W L1_S_PROT_W_armv6
906: #define L1_S_PROT_RO L1_S_PROT_RO_armv6
907: #define L1_S_PROT_MASK L1_S_PROT_MASK_armv6
908:
909: #define L2_S_PROT_U L2_S_PROT_U_armv6n
910: #define L2_S_PROT_W L2_S_PROT_W_armv6n
911: #define L2_S_PROT_RO L2_S_PROT_RO_armv6n
912: #define L2_S_PROT_MASK L2_S_PROT_MASK_armv6n
913:
914: #define L2_L_PROT_U L2_L_PROT_U_armv6n
915: #define L2_L_PROT_W L2_L_PROT_W_armv6n
916: #define L2_L_PROT_RO L2_L_PROT_RO_armv6n
917: #define L2_L_PROT_MASK L2_L_PROT_MASK_armv6n
918:
1.134 skrll 919: #define L1_S_CACHE_MASK L1_S_CACHE_MASK_armv6n
920: #define L2_L_CACHE_MASK L2_L_CACHE_MASK_armv6n
1.99 bsh 921: #define L2_S_CACHE_MASK L2_S_CACHE_MASK_armv6n
922:
923: /* These prototypes make writeable mappings, while the other MMU types
924: * make read-only mappings. */
1.103 matt 925: #define L1_SS_PROTO L1_SS_PROTO_armv6
1.99 bsh 926: #define L1_S_PROTO L1_S_PROTO_armv6
927: #define L1_C_PROTO L1_C_PROTO_armv6
928: #define L2_S_PROTO L2_S_PROTO_armv6n
929:
930: #define pmap_copy_page(s, d) pmap_copy_page_generic((s), (d))
931: #define pmap_zero_page(d) pmap_zero_page_generic((d))
932: #elif ARM_MMU_V6C != 0
1.95 jmcneill 933: #define L1_S_PROT_U L1_S_PROT_U_generic
934: #define L1_S_PROT_W L1_S_PROT_W_generic
935: #define L1_S_PROT_RO L1_S_PROT_RO_generic
936: #define L1_S_PROT_MASK L1_S_PROT_MASK_generic
937:
1.46 thorpej 938: #define L2_S_PROT_U L2_S_PROT_U_generic
939: #define L2_S_PROT_W L2_S_PROT_W_generic
1.95 jmcneill 940: #define L2_S_PROT_RO L2_S_PROT_RO_generic
1.46 thorpej 941: #define L2_S_PROT_MASK L2_S_PROT_MASK_generic
942:
1.95 jmcneill 943: #define L2_L_PROT_U L2_L_PROT_U_generic
944: #define L2_L_PROT_W L2_L_PROT_W_generic
945: #define L2_L_PROT_RO L2_L_PROT_RO_generic
946: #define L2_L_PROT_MASK L2_L_PROT_MASK_generic
947:
1.49 thorpej 948: #define L1_S_CACHE_MASK L1_S_CACHE_MASK_generic
949: #define L2_L_CACHE_MASK L2_L_CACHE_MASK_generic
950: #define L2_S_CACHE_MASK L2_S_CACHE_MASK_generic
951:
1.130 matt 952: #define L1_SS_PROTO L1_SS_PROTO_armv6
1.46 thorpej 953: #define L1_S_PROTO L1_S_PROTO_generic
954: #define L1_C_PROTO L1_C_PROTO_generic
955: #define L2_S_PROTO L2_S_PROTO_generic
1.51 thorpej 956:
957: #define pmap_copy_page(s, d) pmap_copy_page_generic((s), (d))
958: #define pmap_zero_page(d) pmap_zero_page_generic((d))
1.46 thorpej 959: #elif ARM_MMU_XSCALE == 1
1.95 jmcneill 960: #define L1_S_PROT_U L1_S_PROT_U_generic
961: #define L1_S_PROT_W L1_S_PROT_W_generic
962: #define L1_S_PROT_RO L1_S_PROT_RO_generic
963: #define L1_S_PROT_MASK L1_S_PROT_MASK_generic
964:
1.46 thorpej 965: #define L2_S_PROT_U L2_S_PROT_U_xscale
966: #define L2_S_PROT_W L2_S_PROT_W_xscale
1.95 jmcneill 967: #define L2_S_PROT_RO L2_S_PROT_RO_xscale
1.46 thorpej 968: #define L2_S_PROT_MASK L2_S_PROT_MASK_xscale
1.49 thorpej 969:
1.95 jmcneill 970: #define L2_L_PROT_U L2_L_PROT_U_generic
971: #define L2_L_PROT_W L2_L_PROT_W_generic
972: #define L2_L_PROT_RO L2_L_PROT_RO_generic
973: #define L2_L_PROT_MASK L2_L_PROT_MASK_generic
974:
1.49 thorpej 975: #define L1_S_CACHE_MASK L1_S_CACHE_MASK_xscale
976: #define L2_L_CACHE_MASK L2_L_CACHE_MASK_xscale
977: #define L2_S_CACHE_MASK L2_S_CACHE_MASK_xscale
1.46 thorpej 978:
1.103 matt 979: #define L1_SS_PROTO L1_SS_PROTO_xscale
1.46 thorpej 980: #define L1_S_PROTO L1_S_PROTO_xscale
981: #define L1_C_PROTO L1_C_PROTO_xscale
982: #define L2_S_PROTO L2_S_PROTO_xscale
1.51 thorpej 983:
984: #define pmap_copy_page(s, d) pmap_copy_page_xscale((s), (d))
985: #define pmap_zero_page(d) pmap_zero_page_xscale((d))
1.95 jmcneill 986: #elif ARM_MMU_V7 == 1
987: #define L1_S_PROT_U L1_S_PROT_U_armv7
988: #define L1_S_PROT_W L1_S_PROT_W_armv7
989: #define L1_S_PROT_RO L1_S_PROT_RO_armv7
990: #define L1_S_PROT_MASK L1_S_PROT_MASK_armv7
991:
992: #define L2_S_PROT_U L2_S_PROT_U_armv7
993: #define L2_S_PROT_W L2_S_PROT_W_armv7
994: #define L2_S_PROT_RO L2_S_PROT_RO_armv7
995: #define L2_S_PROT_MASK L2_S_PROT_MASK_armv7
996:
997: #define L2_L_PROT_U L2_L_PROT_U_armv7
998: #define L2_L_PROT_W L2_L_PROT_W_armv7
999: #define L2_L_PROT_RO L2_L_PROT_RO_armv7
1000: #define L2_L_PROT_MASK L2_L_PROT_MASK_armv7
1001:
1002: #define L1_S_CACHE_MASK L1_S_CACHE_MASK_armv7
1003: #define L2_L_CACHE_MASK L2_L_CACHE_MASK_armv7
1004: #define L2_S_CACHE_MASK L2_S_CACHE_MASK_armv7
1005:
1006: /* These prototypes make writeable mappings, while the other MMU types
1007: * make read-only mappings. */
1.103 matt 1008: #define L1_SS_PROTO L1_SS_PROTO_armv7
1.95 jmcneill 1009: #define L1_S_PROTO L1_S_PROTO_armv7
1010: #define L1_C_PROTO L1_C_PROTO_armv7
1011: #define L2_S_PROTO L2_S_PROTO_armv7
1012:
1013: #define pmap_copy_page(s, d) pmap_copy_page_generic((s), (d))
1014: #define pmap_zero_page(d) pmap_zero_page_generic((d))
1.46 thorpej 1015: #endif /* ARM_NMMUS > 1 */
1.20 chs 1016:
1.45 thorpej 1017: /*
1.95 jmcneill 1018: * Macros to set and query the write permission on page descriptors.
1019: */
1020: #define l1pte_set_writable(pte) (((pte) & ~L1_S_PROT_RO) | L1_S_PROT_W)
1021: #define l1pte_set_readonly(pte) (((pte) & ~L1_S_PROT_W) | L1_S_PROT_RO)
1022: #define l2pte_set_writable(pte) (((pte) & ~L2_S_PROT_RO) | L2_S_PROT_W)
1023: #define l2pte_set_readonly(pte) (((pte) & ~L2_S_PROT_W) | L2_S_PROT_RO)
1024:
1025: #define l2pte_writable_p(pte) (((pte) & L2_S_PROT_W) == L2_S_PROT_W && \
1026: (L2_S_PROT_RO == 0 || \
1027: ((pte) & L2_S_PROT_RO) != L2_S_PROT_RO))
1028:
1029: /*
1.45 thorpej 1030: * These macros return various bits based on kernel/user and protection.
1031: * Note that the compiler will usually fold these at compile time.
1032: */
1033: #define L1_S_PROT(ku, pr) ((((ku) == PTE_USER) ? L1_S_PROT_U : 0) | \
1.95 jmcneill 1034: (((pr) & VM_PROT_WRITE) ? L1_S_PROT_W : L1_S_PROT_RO))
1.45 thorpej 1035:
1036: #define L2_L_PROT(ku, pr) ((((ku) == PTE_USER) ? L2_L_PROT_U : 0) | \
1.95 jmcneill 1037: (((pr) & VM_PROT_WRITE) ? L2_L_PROT_W : L2_L_PROT_RO))
1.45 thorpej 1038:
1039: #define L2_S_PROT(ku, pr) ((((ku) == PTE_USER) ? L2_S_PROT_U : 0) | \
1.95 jmcneill 1040: (((pr) & VM_PROT_WRITE) ? L2_S_PROT_W : L2_S_PROT_RO))
1.66 thorpej 1041:
1042: /*
1.103 matt 1043: * Macros to test if a mapping is mappable with an L1 SuperSection,
1044: * L1 Section, or an L2 Large Page mapping.
1.66 thorpej 1045: */
1.103 matt 1046: #define L1_SS_MAPPABLE_P(va, pa, size) \
1047: ((((va) | (pa)) & L1_SS_OFFSET) == 0 && (size) >= L1_SS_SIZE)
1048:
1.66 thorpej 1049: #define L1_S_MAPPABLE_P(va, pa, size) \
1050: ((((va) | (pa)) & L1_S_OFFSET) == 0 && (size) >= L1_S_SIZE)
1051:
1.67 thorpej 1052: #define L2_L_MAPPABLE_P(va, pa, size) \
1.68 thorpej 1053: ((((va) | (pa)) & L2_L_OFFSET) == 0 && (size) >= L2_L_SIZE)
1.64 thorpej 1054:
1.119 matt 1055: #ifndef _LOCORE
1.64 thorpej 1056: /*
1057: * Hooks for the pool allocator.
1058: */
1059: #define POOL_VTOPHYS(va) vtophys((vaddr_t) (va))
1.117 matt 1060: extern paddr_t physical_start, physical_end;
1.113 matt 1061: #ifdef PMAP_NEED_ALLOC_POOLPAGE
1.114 matt 1062: struct vm_page *arm_pmap_alloc_poolpage(int);
1.113 matt 1063: #define PMAP_ALLOC_POOLPAGE arm_pmap_alloc_poolpage
1.118 matt 1064: #endif
1065: #if defined(PMAP_NEED_ALLOC_POOLPAGE) || defined(__HAVE_MM_MD_DIRECT_MAPPED_PHYS)
1.131 matt 1066: vaddr_t pmap_map_poolpage(paddr_t);
1067: paddr_t pmap_unmap_poolpage(vaddr_t);
1068: #define PMAP_MAP_POOLPAGE(pa) pmap_map_poolpage(pa)
1069: #define PMAP_UNMAP_POOLPAGE(va) pmap_unmap_poolpage(va)
1.113 matt 1070: #endif
1.18 thorpej 1071:
1.97 uebayasi 1072: /*
1073: * pmap-specific data store in the vm_page structure.
1074: */
1075: #define __HAVE_VM_PAGE_MD
1076: struct vm_page_md {
1077: SLIST_HEAD(,pv_entry) pvh_list; /* pv_entry list */
1078: int pvh_attrs; /* page attributes */
1079: u_int uro_mappings;
1080: u_int urw_mappings;
1081: union {
1082: u_short s_mappings[2]; /* Assume kernel count <= 65535 */
1083: u_int i_mappings;
1084: } k_u;
1085: #define kro_mappings k_u.s_mappings[0]
1086: #define krw_mappings k_u.s_mappings[1]
1087: #define k_mappings k_u.i_mappings
1088: };
1089:
1090: /*
1091: * Set the default color of each page.
1092: */
1093: #if ARM_MMU_V6 > 0
1094: #define VM_MDPAGE_PVH_ATTRS_INIT(pg) \
1095: (pg)->mdpage.pvh_attrs = (pg)->phys_addr & arm_cache_prefer_mask
1096: #else
1097: #define VM_MDPAGE_PVH_ATTRS_INIT(pg) \
1098: (pg)->mdpage.pvh_attrs = 0
1099: #endif
1.135 skrll 1100:
1.97 uebayasi 1101: #define VM_MDPAGE_INIT(pg) \
1102: do { \
1103: SLIST_INIT(&(pg)->mdpage.pvh_list); \
1104: VM_MDPAGE_PVH_ATTRS_INIT(pg); \
1105: (pg)->mdpage.uro_mappings = 0; \
1106: (pg)->mdpage.urw_mappings = 0; \
1107: (pg)->mdpage.k_mappings = 0; \
1108: } while (/*CONSTCOND*/0)
1109:
1110: #endif /* !_LOCORE */
1111:
1.18 thorpej 1112: #endif /* _KERNEL */
1.1 reinoud 1113:
1114: #endif /* _ARM32_PMAP_H_ */
CVSweb <webmaster@jp.NetBSD.org>