Annotation of src/sys/arch/arm/include/arm32/pmap.h, Revision 1.83
1.83 ! garbled 1: /* $NetBSD: pmap.h,v 1.81.12.1 2007/10/03 19:22:43 garbled Exp $ */
1.46 thorpej 2:
3: /*
1.65 scw 4: * Copyright (c) 2002, 2003 Wasabi Systems, Inc.
1.46 thorpej 5: * All rights reserved.
6: *
1.65 scw 7: * Written by Jason R. Thorpe & Steve C. Woodford for Wasabi Systems, Inc.
1.46 thorpej 8: *
9: * Redistribution and use in source and binary forms, with or without
10: * modification, are permitted provided that the following conditions
11: * are met:
12: * 1. Redistributions of source code must retain the above copyright
13: * notice, this list of conditions and the following disclaimer.
14: * 2. Redistributions in binary form must reproduce the above copyright
15: * notice, this list of conditions and the following disclaimer in the
16: * documentation and/or other materials provided with the distribution.
17: * 3. All advertising materials mentioning features or use of this software
18: * must display the following acknowledgement:
19: * This product includes software developed for the NetBSD Project by
20: * Wasabi Systems, Inc.
21: * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22: * or promote products derived from this software without specific prior
23: * written permission.
24: *
25: * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26: * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27: * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28: * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29: * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30: * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31: * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32: * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33: * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34: * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35: * POSSIBILITY OF SUCH DAMAGE.
36: */
1.1 reinoud 37:
38: /*
39: * Copyright (c) 1994,1995 Mark Brinicombe.
40: * All rights reserved.
41: *
42: * Redistribution and use in source and binary forms, with or without
43: * modification, are permitted provided that the following conditions
44: * are met:
45: * 1. Redistributions of source code must retain the above copyright
46: * notice, this list of conditions and the following disclaimer.
47: * 2. Redistributions in binary form must reproduce the above copyright
48: * notice, this list of conditions and the following disclaimer in the
49: * documentation and/or other materials provided with the distribution.
50: * 3. All advertising materials mentioning features or use of this software
51: * must display the following acknowledgement:
52: * This product includes software developed by Mark Brinicombe
53: * 4. The name of the author may not be used to endorse or promote products
54: * derived from this software without specific prior written permission.
55: *
56: * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
57: * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
58: * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
59: * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
60: * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
61: * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
62: * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
63: * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
64: * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
65: * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
66: */
67:
68: #ifndef _ARM32_PMAP_H_
69: #define _ARM32_PMAP_H_
70:
1.18 thorpej 71: #ifdef _KERNEL
72:
1.52 thorpej 73: #include <arm/cpuconf.h>
1.75 bsh 74: #include <arm/arm32/pte.h>
75: #ifndef _LOCORE
1.19 thorpej 76: #include <arm/cpufunc.h>
1.12 chris 77: #include <uvm/uvm_object.h>
1.75 bsh 78: #endif
1.1 reinoud 79:
80: /*
1.11 chris 81: * a pmap describes a processes' 4GB virtual address space. this
82: * virtual address space can be broken up into 4096 1MB regions which
1.38 thorpej 83: * are described by L1 PTEs in the L1 table.
1.11 chris 84: *
1.38 thorpej 85: * There is a line drawn at KERNEL_BASE. Everything below that line
86: * changes when the VM context is switched. Everything above that line
87: * is the same no matter which VM context is running. This is achieved
88: * by making the L1 PTEs for those slots above KERNEL_BASE reference
89: * kernel L2 tables.
1.11 chris 90: *
1.38 thorpej 91: * The basic layout of the virtual address space thus looks like this:
92: *
93: * 0xffffffff
94: * .
95: * .
96: * .
97: * KERNEL_BASE
98: * --------------------
99: * .
100: * .
101: * .
102: * 0x00000000
1.11 chris 103: */
104:
1.65 scw 105: /*
106: * The number of L2 descriptor tables which can be tracked by an l2_dtable.
107: * A bucket size of 16 provides for 16MB of contiguous virtual address
108: * space per l2_dtable. Most processes will, therefore, require only two or
109: * three of these to map their whole working set.
110: */
111: #define L2_BUCKET_LOG2 4
112: #define L2_BUCKET_SIZE (1 << L2_BUCKET_LOG2)
113:
114: /*
115: * Given the above "L2-descriptors-per-l2_dtable" constant, the number
116: * of l2_dtable structures required to track all possible page descriptors
117: * mappable by an L1 translation table is given by the following constants:
118: */
119: #define L2_LOG2 ((32 - L1_S_SHIFT) - L2_BUCKET_LOG2)
120: #define L2_SIZE (1 << L2_LOG2)
121:
1.75 bsh 122: #ifndef _LOCORE
123:
1.65 scw 124: struct l1_ttable;
125: struct l2_dtable;
126:
127: /*
128: * Track cache/tlb occupancy using the following structure
129: */
130: union pmap_cache_state {
131: struct {
132: union {
133: u_int8_t csu_cache_b[2];
134: u_int16_t csu_cache;
135: } cs_cache_u;
136:
137: union {
138: u_int8_t csu_tlb_b[2];
139: u_int16_t csu_tlb;
140: } cs_tlb_u;
141: } cs_s;
142: u_int32_t cs_all;
143: };
144: #define cs_cache_id cs_s.cs_cache_u.csu_cache_b[0]
145: #define cs_cache_d cs_s.cs_cache_u.csu_cache_b[1]
146: #define cs_cache cs_s.cs_cache_u.csu_cache
147: #define cs_tlb_id cs_s.cs_tlb_u.csu_tlb_b[0]
148: #define cs_tlb_d cs_s.cs_tlb_u.csu_tlb_b[1]
149: #define cs_tlb cs_s.cs_tlb_u.csu_tlb
150:
151: /*
152: * Assigned to cs_all to force cacheops to work for a particular pmap
153: */
154: #define PMAP_CACHE_STATE_ALL 0xffffffffu
155:
156: /*
1.73 thorpej 157: * This structure is used by machine-dependent code to describe
158: * static mappings of devices, created at bootstrap time.
159: */
160: struct pmap_devmap {
161: vaddr_t pd_va; /* virtual address */
162: paddr_t pd_pa; /* physical address */
163: psize_t pd_size; /* size of region */
164: vm_prot_t pd_prot; /* protection code */
165: int pd_cache; /* cache attributes */
166: };
167:
168: /*
1.65 scw 169: * The pmap structure itself
170: */
171: struct pmap {
172: u_int8_t pm_domain;
1.80 thorpej 173: bool pm_remove_all;
1.82 scw 174: bool pm_activated;
1.65 scw 175: struct l1_ttable *pm_l1;
1.82 scw 176: pd_entry_t *pm_pl1vec;
177: pd_entry_t pm_l1vec;
1.65 scw 178: union pmap_cache_state pm_cstate;
179: struct uvm_object pm_obj;
180: #define pm_lock pm_obj.vmobjlock
181: struct l2_dtable *pm_l2[L2_SIZE];
182: struct pmap_statistics pm_stats;
183: LIST_ENTRY(pmap) pm_list;
184: };
185:
1.1 reinoud 186: typedef struct pmap *pmap_t;
187:
188: /*
189: * Physical / virtual address structure. In a number of places (particularly
190: * during bootstrapping) we need to keep track of the physical and virtual
191: * addresses of various pages
192: */
1.28 thorpej 193: typedef struct pv_addr {
194: SLIST_ENTRY(pv_addr) pv_list;
1.3 matt 195: paddr_t pv_pa;
1.2 matt 196: vaddr_t pv_va;
1.1 reinoud 197: } pv_addr_t;
198:
199: /*
1.24 thorpej 200: * Determine various modes for PTEs (user vs. kernel, cacheable
201: * vs. non-cacheable).
202: */
203: #define PTE_KERNEL 0
204: #define PTE_USER 1
205: #define PTE_NOCACHE 0
206: #define PTE_CACHE 1
1.65 scw 207: #define PTE_PAGETABLE 2
1.24 thorpej 208:
209: /*
1.43 thorpej 210: * Flags that indicate attributes of pages or mappings of pages.
211: *
212: * The PVF_MOD and PVF_REF flags are stored in the mdpage for each
213: * page. PVF_WIRED, PVF_WRITE, and PVF_NC are kept in individual
214: * pv_entry's for each page. They live in the same "namespace" so
215: * that we can clear multiple attributes at a time.
216: *
217: * Note the "non-cacheable" flag generally means the page has
218: * multiple mappings in a given address space.
219: */
220: #define PVF_MOD 0x01 /* page is modified */
221: #define PVF_REF 0x02 /* page is referenced */
222: #define PVF_WIRED 0x04 /* mapping is wired */
223: #define PVF_WRITE 0x08 /* mapping is writable */
1.56 thorpej 224: #define PVF_EXEC 0x10 /* mapping is executable */
1.65 scw 225: #define PVF_UNC 0x20 /* mapping is 'user' non-cacheable */
226: #define PVF_KNC 0x40 /* mapping is 'kernel' non-cacheable */
227: #define PVF_NC (PVF_UNC|PVF_KNC)
1.43 thorpej 228:
229: /*
1.1 reinoud 230: * Commonly referenced structures
231: */
1.11 chris 232: extern struct pmap kernel_pmap_store;
1.4 matt 233: extern int pmap_debug_level; /* Only exists if PMAP_DEBUG */
1.1 reinoud 234:
235: /*
236: * Macros that we need to export
237: */
238: #define pmap_kernel() (&kernel_pmap_store)
239: #define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count)
240: #define pmap_wired_count(pmap) ((pmap)->pm_stats.wired_count)
1.31 thorpej 241:
1.78 scw 242: #define pmap_remove(pmap,sva,eva) pmap_do_remove((pmap),(sva),(eva),0)
243:
1.43 thorpej 244: #define pmap_is_modified(pg) \
245: (((pg)->mdpage.pvh_attrs & PVF_MOD) != 0)
246: #define pmap_is_referenced(pg) \
247: (((pg)->mdpage.pvh_attrs & PVF_REF) != 0)
1.41 thorpej 248:
249: #define pmap_copy(dp, sp, da, l, sa) /* nothing */
1.60 chs 250:
1.35 thorpej 251: #define pmap_phys_address(ppn) (arm_ptob((ppn)))
1.1 reinoud 252:
253: /*
254: * Functions that we need to export
255: */
1.39 thorpej 256: void pmap_procwr(struct proc *, vaddr_t, int);
1.65 scw 257: void pmap_remove_all(pmap_t);
1.80 thorpej 258: bool pmap_extract(pmap_t, vaddr_t, paddr_t *);
1.39 thorpej 259:
1.1 reinoud 260: #define PMAP_NEED_PROCWR
1.29 chris 261: #define PMAP_GROWKERNEL /* turn on pmap_growkernel interface */
1.4 matt 262:
1.39 thorpej 263: /* Functions we use internally. */
1.71 thorpej 264: void pmap_bootstrap(pd_entry_t *, vaddr_t, vaddr_t);
1.65 scw 265:
1.78 scw 266: void pmap_do_remove(pmap_t, vaddr_t, vaddr_t, int);
1.70 scw 267: int pmap_fault_fixup(pmap_t, vaddr_t, vm_prot_t, int);
1.80 thorpej 268: bool pmap_get_pde_pte(pmap_t, vaddr_t, pd_entry_t **, pt_entry_t **);
269: bool pmap_get_pde(pmap_t, vaddr_t, pd_entry_t **);
1.65 scw 270: void pmap_set_pcb_pagedir(pmap_t, struct pcb *);
271:
272: void pmap_debug(int);
1.39 thorpej 273: void pmap_postinit(void);
1.42 thorpej 274:
275: void vector_page_setprot(int);
1.24 thorpej 276:
1.73 thorpej 277: const struct pmap_devmap *pmap_devmap_find_pa(paddr_t, psize_t);
278: const struct pmap_devmap *pmap_devmap_find_va(vaddr_t, vsize_t);
279:
1.24 thorpej 280: /* Bootstrapping routines. */
281: void pmap_map_section(vaddr_t, vaddr_t, paddr_t, int, int);
1.25 thorpej 282: void pmap_map_entry(vaddr_t, vaddr_t, paddr_t, int, int);
1.28 thorpej 283: vsize_t pmap_map_chunk(vaddr_t, vaddr_t, paddr_t, vsize_t, int, int);
284: void pmap_link_l2pt(vaddr_t, vaddr_t, pv_addr_t *);
1.73 thorpej 285: void pmap_devmap_bootstrap(vaddr_t, const struct pmap_devmap *);
1.74 thorpej 286: void pmap_devmap_register(const struct pmap_devmap *);
1.13 chris 287:
288: /*
289: * Special page zero routine for use by the idle loop (no cache cleans).
290: */
1.80 thorpej 291: bool pmap_pageidlezero(paddr_t);
1.13 chris 292: #define PMAP_PAGEIDLEZERO(pa) pmap_pageidlezero((pa))
1.1 reinoud 293:
1.29 chris 294: /*
295: * The current top of kernel VM
296: */
297: extern vaddr_t pmap_curmaxkvaddr;
1.1 reinoud 298:
299: /*
300: * Useful macros and constants
301: */
1.59 thorpej 302:
1.65 scw 303: /* Virtual address to page table entry */
1.79 perry 304: static inline pt_entry_t *
1.65 scw 305: vtopte(vaddr_t va)
306: {
307: pd_entry_t *pdep;
308: pt_entry_t *ptep;
309:
1.81 thorpej 310: if (pmap_get_pde_pte(pmap_kernel(), va, &pdep, &ptep) == false)
1.65 scw 311: return (NULL);
312: return (ptep);
313: }
314:
315: /*
316: * Virtual address to physical address
317: */
1.79 perry 318: static inline paddr_t
1.65 scw 319: vtophys(vaddr_t va)
320: {
321: paddr_t pa;
322:
1.81 thorpej 323: if (pmap_extract(pmap_kernel(), va, &pa) == false)
1.65 scw 324: return (0); /* XXXSCW: Panic? */
325:
326: return (pa);
327: }
328:
329: /*
330: * The new pmap ensures that page-tables are always mapping Write-Thru.
331: * Thus, on some platforms we can run fast and loose and avoid syncing PTEs
332: * on every change.
333: *
1.69 thorpej 334: * Unfortunately, not all CPUs have a write-through cache mode. So we
335: * define PMAP_NEEDS_PTE_SYNC for C code to conditionally do PTE syncs,
336: * and if there is the chance for PTE syncs to be needed, we define
337: * PMAP_INCLUDE_PTE_SYNC so e.g. assembly code can include (and run)
338: * the code.
339: */
340: extern int pmap_needs_pte_sync;
341: #if defined(_KERNEL_OPT)
342: /*
343: * StrongARM SA-1 caches do not have a write-through mode. So, on these,
344: * we need to do PTE syncs. If only SA-1 is configured, then evaluate
345: * this at compile time.
346: */
347: #if (ARM_MMU_SA1 == 1) && (ARM_NMMUS == 1)
348: #define PMAP_NEEDS_PTE_SYNC 1
349: #define PMAP_INCLUDE_PTE_SYNC
350: #elif (ARM_MMU_SA1 == 0)
351: #define PMAP_NEEDS_PTE_SYNC 0
352: #endif
353: #endif /* _KERNEL_OPT */
354:
355: /*
356: * Provide a fallback in case we were not able to determine it at
357: * compile-time.
1.65 scw 358: */
1.69 thorpej 359: #ifndef PMAP_NEEDS_PTE_SYNC
360: #define PMAP_NEEDS_PTE_SYNC pmap_needs_pte_sync
361: #define PMAP_INCLUDE_PTE_SYNC
362: #endif
1.65 scw 363:
1.69 thorpej 364: #define PTE_SYNC(pte) \
365: do { \
366: if (PMAP_NEEDS_PTE_SYNC) \
367: cpu_dcache_wb_range((vaddr_t)(pte), sizeof(pt_entry_t));\
368: } while (/*CONSTCOND*/0)
369:
370: #define PTE_SYNC_RANGE(pte, cnt) \
371: do { \
372: if (PMAP_NEEDS_PTE_SYNC) { \
373: cpu_dcache_wb_range((vaddr_t)(pte), \
374: (cnt) << 2); /* * sizeof(pt_entry_t) */ \
375: } \
376: } while (/*CONSTCOND*/0)
1.65 scw 377:
1.36 thorpej 378: #define l1pte_valid(pde) ((pde) != 0)
1.44 thorpej 379: #define l1pte_section_p(pde) (((pde) & L1_TYPE_MASK) == L1_TYPE_S)
380: #define l1pte_page_p(pde) (((pde) & L1_TYPE_MASK) == L1_TYPE_C)
381: #define l1pte_fpage_p(pde) (((pde) & L1_TYPE_MASK) == L1_TYPE_F)
1.36 thorpej 382:
1.65 scw 383: #define l2pte_index(v) (((v) & L2_ADDR_BITS) >> L2_S_SHIFT)
1.36 thorpej 384: #define l2pte_valid(pte) ((pte) != 0)
1.44 thorpej 385: #define l2pte_pa(pte) ((pte) & L2_S_FRAME)
1.77 scw 386: #define l2pte_minidata(pte) (((pte) & \
387: (L2_B | L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X)))\
388: == (L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X)))
1.35 thorpej 389:
1.1 reinoud 390: /* L1 and L2 page table macros */
1.36 thorpej 391: #define pmap_pde_v(pde) l1pte_valid(*(pde))
392: #define pmap_pde_section(pde) l1pte_section_p(*(pde))
393: #define pmap_pde_page(pde) l1pte_page_p(*(pde))
394: #define pmap_pde_fpage(pde) l1pte_fpage_p(*(pde))
1.16 rearnsha 395:
1.36 thorpej 396: #define pmap_pte_v(pte) l2pte_valid(*(pte))
397: #define pmap_pte_pa(pte) l2pte_pa(*(pte))
1.35 thorpej 398:
1.1 reinoud 399: /* Size of the kernel part of the L1 page table */
400: #define KERNEL_PD_SIZE \
1.44 thorpej 401: (L1_TABLE_SIZE - (KERNEL_BASE >> L1_S_SHIFT) * sizeof(pd_entry_t))
1.20 chs 402:
1.46 thorpej 403: /************************* ARM MMU configuration *****************************/
404:
1.69 thorpej 405: #if (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0
1.51 thorpej 406: void pmap_copy_page_generic(paddr_t, paddr_t);
407: void pmap_zero_page_generic(paddr_t);
408:
1.46 thorpej 409: void pmap_pte_init_generic(void);
1.69 thorpej 410: #if defined(CPU_ARM8)
411: void pmap_pte_init_arm8(void);
412: #endif
1.46 thorpej 413: #if defined(CPU_ARM9)
414: void pmap_pte_init_arm9(void);
415: #endif /* CPU_ARM9 */
1.76 rearnsha 416: #if defined(CPU_ARM10)
417: void pmap_pte_init_arm10(void);
418: #endif /* CPU_ARM10 */
1.69 thorpej 419: #endif /* (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0 */
420:
421: #if ARM_MMU_SA1 == 1
422: void pmap_pte_init_sa1(void);
423: #endif /* ARM_MMU_SA1 == 1 */
1.46 thorpej 424:
1.52 thorpej 425: #if ARM_MMU_XSCALE == 1
1.51 thorpej 426: void pmap_copy_page_xscale(paddr_t, paddr_t);
427: void pmap_zero_page_xscale(paddr_t);
428:
1.46 thorpej 429: void pmap_pte_init_xscale(void);
1.50 thorpej 430:
431: void xscale_setup_minidata(vaddr_t, vaddr_t, paddr_t);
1.77 scw 432:
433: #define PMAP_UAREA(va) pmap_uarea(va)
434: void pmap_uarea(vaddr_t);
1.52 thorpej 435: #endif /* ARM_MMU_XSCALE == 1 */
1.46 thorpej 436:
1.49 thorpej 437: extern pt_entry_t pte_l1_s_cache_mode;
438: extern pt_entry_t pte_l1_s_cache_mask;
439:
440: extern pt_entry_t pte_l2_l_cache_mode;
441: extern pt_entry_t pte_l2_l_cache_mask;
442:
443: extern pt_entry_t pte_l2_s_cache_mode;
444: extern pt_entry_t pte_l2_s_cache_mask;
1.46 thorpej 445:
1.65 scw 446: extern pt_entry_t pte_l1_s_cache_mode_pt;
447: extern pt_entry_t pte_l2_l_cache_mode_pt;
448: extern pt_entry_t pte_l2_s_cache_mode_pt;
449:
1.46 thorpej 450: extern pt_entry_t pte_l2_s_prot_u;
451: extern pt_entry_t pte_l2_s_prot_w;
452: extern pt_entry_t pte_l2_s_prot_mask;
453:
454: extern pt_entry_t pte_l1_s_proto;
455: extern pt_entry_t pte_l1_c_proto;
456: extern pt_entry_t pte_l2_s_proto;
457:
1.51 thorpej 458: extern void (*pmap_copy_page_func)(paddr_t, paddr_t);
459: extern void (*pmap_zero_page_func)(paddr_t);
1.75 bsh 460:
461: #endif /* !_LOCORE */
1.51 thorpej 462:
1.46 thorpej 463: /*****************************************************************************/
464:
1.20 chs 465: /*
466: * tell MI code that the cache is virtually-indexed *and* virtually-tagged.
467: */
1.45 thorpej 468: #define PMAP_CACHE_VIVT
1.65 scw 469:
470: /*
471: * Definitions for MMU domains
472: */
473: #define PMAP_DOMAINS 15 /* 15 'user' domains (0-14) */
474: #define PMAP_DOMAIN_KERNEL 15 /* The kernel uses domain #15 */
1.45 thorpej 475:
476: /*
477: * These macros define the various bit masks in the PTE.
478: *
479: * We use these macros since we use different bits on different processor
480: * models.
481: */
482: #define L1_S_PROT_U (L1_S_AP(AP_U))
483: #define L1_S_PROT_W (L1_S_AP(AP_W))
484: #define L1_S_PROT_MASK (L1_S_PROT_U|L1_S_PROT_W)
485:
1.49 thorpej 486: #define L1_S_CACHE_MASK_generic (L1_S_B|L1_S_C)
487: #define L1_S_CACHE_MASK_xscale (L1_S_B|L1_S_C|L1_S_XSCALE_TEX(TEX_XSCALE_X))
1.45 thorpej 488:
489: #define L2_L_PROT_U (L2_AP(AP_U))
490: #define L2_L_PROT_W (L2_AP(AP_W))
491: #define L2_L_PROT_MASK (L2_L_PROT_U|L2_L_PROT_W)
492:
1.49 thorpej 493: #define L2_L_CACHE_MASK_generic (L2_B|L2_C)
494: #define L2_L_CACHE_MASK_xscale (L2_B|L2_C|L2_XSCALE_L_TEX(TEX_XSCALE_X))
495:
1.46 thorpej 496: #define L2_S_PROT_U_generic (L2_AP(AP_U))
497: #define L2_S_PROT_W_generic (L2_AP(AP_W))
498: #define L2_S_PROT_MASK_generic (L2_S_PROT_U|L2_S_PROT_W)
499:
1.48 thorpej 500: #define L2_S_PROT_U_xscale (L2_AP0(AP_U))
501: #define L2_S_PROT_W_xscale (L2_AP0(AP_W))
1.46 thorpej 502: #define L2_S_PROT_MASK_xscale (L2_S_PROT_U|L2_S_PROT_W)
503:
1.49 thorpej 504: #define L2_S_CACHE_MASK_generic (L2_B|L2_C)
505: #define L2_S_CACHE_MASK_xscale (L2_B|L2_C|L2_XSCALE_T_TEX(TEX_XSCALE_X))
1.46 thorpej 506:
507: #define L1_S_PROTO_generic (L1_TYPE_S | L1_S_IMP)
1.47 thorpej 508: #define L1_S_PROTO_xscale (L1_TYPE_S)
1.46 thorpej 509:
510: #define L1_C_PROTO_generic (L1_TYPE_C | L1_C_IMP2)
1.47 thorpej 511: #define L1_C_PROTO_xscale (L1_TYPE_C)
1.46 thorpej 512:
513: #define L2_L_PROTO (L2_TYPE_L)
514:
515: #define L2_S_PROTO_generic (L2_TYPE_S)
1.48 thorpej 516: #define L2_S_PROTO_xscale (L2_TYPE_XSCALE_XS)
1.45 thorpej 517:
1.46 thorpej 518: /*
519: * User-visible names for the ones that vary with MMU class.
520: */
521:
522: #if ARM_NMMUS > 1
523: /* More than one MMU class configured; use variables. */
524: #define L2_S_PROT_U pte_l2_s_prot_u
525: #define L2_S_PROT_W pte_l2_s_prot_w
526: #define L2_S_PROT_MASK pte_l2_s_prot_mask
527:
1.49 thorpej 528: #define L1_S_CACHE_MASK pte_l1_s_cache_mask
529: #define L2_L_CACHE_MASK pte_l2_l_cache_mask
530: #define L2_S_CACHE_MASK pte_l2_s_cache_mask
531:
1.46 thorpej 532: #define L1_S_PROTO pte_l1_s_proto
533: #define L1_C_PROTO pte_l1_c_proto
534: #define L2_S_PROTO pte_l2_s_proto
1.51 thorpej 535:
536: #define pmap_copy_page(s, d) (*pmap_copy_page_func)((s), (d))
537: #define pmap_zero_page(d) (*pmap_zero_page_func)((d))
1.69 thorpej 538: #elif (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0
1.46 thorpej 539: #define L2_S_PROT_U L2_S_PROT_U_generic
540: #define L2_S_PROT_W L2_S_PROT_W_generic
541: #define L2_S_PROT_MASK L2_S_PROT_MASK_generic
542:
1.49 thorpej 543: #define L1_S_CACHE_MASK L1_S_CACHE_MASK_generic
544: #define L2_L_CACHE_MASK L2_L_CACHE_MASK_generic
545: #define L2_S_CACHE_MASK L2_S_CACHE_MASK_generic
546:
1.46 thorpej 547: #define L1_S_PROTO L1_S_PROTO_generic
548: #define L1_C_PROTO L1_C_PROTO_generic
549: #define L2_S_PROTO L2_S_PROTO_generic
1.51 thorpej 550:
551: #define pmap_copy_page(s, d) pmap_copy_page_generic((s), (d))
552: #define pmap_zero_page(d) pmap_zero_page_generic((d))
1.46 thorpej 553: #elif ARM_MMU_XSCALE == 1
554: #define L2_S_PROT_U L2_S_PROT_U_xscale
555: #define L2_S_PROT_W L2_S_PROT_W_xscale
556: #define L2_S_PROT_MASK L2_S_PROT_MASK_xscale
1.49 thorpej 557:
558: #define L1_S_CACHE_MASK L1_S_CACHE_MASK_xscale
559: #define L2_L_CACHE_MASK L2_L_CACHE_MASK_xscale
560: #define L2_S_CACHE_MASK L2_S_CACHE_MASK_xscale
1.46 thorpej 561:
562: #define L1_S_PROTO L1_S_PROTO_xscale
563: #define L1_C_PROTO L1_C_PROTO_xscale
564: #define L2_S_PROTO L2_S_PROTO_xscale
1.51 thorpej 565:
566: #define pmap_copy_page(s, d) pmap_copy_page_xscale((s), (d))
567: #define pmap_zero_page(d) pmap_zero_page_xscale((d))
1.46 thorpej 568: #endif /* ARM_NMMUS > 1 */
1.20 chs 569:
1.45 thorpej 570: /*
571: * These macros return various bits based on kernel/user and protection.
572: * Note that the compiler will usually fold these at compile time.
573: */
574: #define L1_S_PROT(ku, pr) ((((ku) == PTE_USER) ? L1_S_PROT_U : 0) | \
575: (((pr) & VM_PROT_WRITE) ? L1_S_PROT_W : 0))
576:
577: #define L2_L_PROT(ku, pr) ((((ku) == PTE_USER) ? L2_L_PROT_U : 0) | \
578: (((pr) & VM_PROT_WRITE) ? L2_L_PROT_W : 0))
579:
580: #define L2_S_PROT(ku, pr) ((((ku) == PTE_USER) ? L2_S_PROT_U : 0) | \
581: (((pr) & VM_PROT_WRITE) ? L2_S_PROT_W : 0))
1.66 thorpej 582:
583: /*
584: * Macros to test if a mapping is mappable with an L1 Section mapping
585: * or an L2 Large Page mapping.
586: */
587: #define L1_S_MAPPABLE_P(va, pa, size) \
588: ((((va) | (pa)) & L1_S_OFFSET) == 0 && (size) >= L1_S_SIZE)
589:
1.67 thorpej 590: #define L2_L_MAPPABLE_P(va, pa, size) \
1.68 thorpej 591: ((((va) | (pa)) & L2_L_OFFSET) == 0 && (size) >= L2_L_SIZE)
1.64 thorpej 592:
593: /*
594: * Hooks for the pool allocator.
595: */
596: #define POOL_VTOPHYS(va) vtophys((vaddr_t) (va))
1.18 thorpej 597:
598: #endif /* _KERNEL */
1.1 reinoud 599:
600: #endif /* _ARM32_PMAP_H_ */
CVSweb <webmaster@jp.NetBSD.org>