[BACK]Return to pmap.h CVS log [TXT][DIR] Up to [cvs.NetBSD.org] / src / sys / arch / arm / include / arm32

Annotation of src/sys/arch/arm/include/arm32/pmap.h, Revision 1.157.2.1

1.157.2.1! ad          1: /*     $NetBSD: pmap.h,v 1.158 2020/01/12 20:06:52 christos Exp $      */
1.46      thorpej     2:
                      3: /*
1.65      scw         4:  * Copyright (c) 2002, 2003 Wasabi Systems, Inc.
1.46      thorpej     5:  * All rights reserved.
                      6:  *
1.65      scw         7:  * Written by Jason R. Thorpe & Steve C. Woodford for Wasabi Systems, Inc.
1.46      thorpej     8:  *
                      9:  * Redistribution and use in source and binary forms, with or without
                     10:  * modification, are permitted provided that the following conditions
                     11:  * are met:
                     12:  * 1. Redistributions of source code must retain the above copyright
                     13:  *    notice, this list of conditions and the following disclaimer.
                     14:  * 2. Redistributions in binary form must reproduce the above copyright
                     15:  *    notice, this list of conditions and the following disclaimer in the
                     16:  *    documentation and/or other materials provided with the distribution.
                     17:  * 3. All advertising materials mentioning features or use of this software
                     18:  *    must display the following acknowledgement:
                     19:  *     This product includes software developed for the NetBSD Project by
                     20:  *     Wasabi Systems, Inc.
                     21:  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
                     22:  *    or promote products derived from this software without specific prior
                     23:  *    written permission.
                     24:  *
                     25:  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
                     26:  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
                     27:  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
                     28:  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
                     29:  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
                     30:  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
                     31:  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
                     32:  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
                     33:  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
                     34:  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
                     35:  * POSSIBILITY OF SUCH DAMAGE.
                     36:  */
1.1       reinoud    37:
                     38: /*
                     39:  * Copyright (c) 1994,1995 Mark Brinicombe.
                     40:  * All rights reserved.
                     41:  *
                     42:  * Redistribution and use in source and binary forms, with or without
                     43:  * modification, are permitted provided that the following conditions
                     44:  * are met:
                     45:  * 1. Redistributions of source code must retain the above copyright
                     46:  *    notice, this list of conditions and the following disclaimer.
                     47:  * 2. Redistributions in binary form must reproduce the above copyright
                     48:  *    notice, this list of conditions and the following disclaimer in the
                     49:  *    documentation and/or other materials provided with the distribution.
                     50:  * 3. All advertising materials mentioning features or use of this software
                     51:  *    must display the following acknowledgement:
                     52:  *     This product includes software developed by Mark Brinicombe
                     53:  * 4. The name of the author may not be used to endorse or promote products
                     54:  *    derived from this software without specific prior written permission.
                     55:  *
                     56:  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
                     57:  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
                     58:  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
                     59:  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
                     60:  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
                     61:  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
                     62:  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
                     63:  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
                     64:  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
                     65:  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
                     66:  */
                     67:
                     68: #ifndef        _ARM32_PMAP_H_
                     69: #define        _ARM32_PMAP_H_
                     70:
1.18      thorpej    71: #ifdef _KERNEL
                     72:
1.52      thorpej    73: #include <arm/cpuconf.h>
1.75      bsh        74: #include <arm/arm32/pte.h>
                     75: #ifndef _LOCORE
1.85      matt       76: #if defined(_KERNEL_OPT)
                     77: #include "opt_arm32_pmap.h"
1.136     skrll      78: #include "opt_multiprocessor.h"
1.85      matt       79: #endif
1.19      thorpej    80: #include <arm/cpufunc.h>
1.138     joerg      81: #include <arm/locore.h>
1.12      chris      82: #include <uvm/uvm_object.h>
1.143     skrll      83: #include <uvm/pmap/pmap_pvt.h>
1.75      bsh        84: #endif
1.1       reinoud    85:
1.124     matt       86: #ifdef ARM_MMU_EXTENDED
1.147     skrll      87: #define PMAP_HWPAGEWALKER              1
1.124     matt       88: #define PMAP_TLB_MAX                   1
1.126     matt       89: #if PMAP_TLB_MAX > 1
1.144     skrll      90: #define PMAP_TLB_NEED_SHOOTDOWN                1
1.126     matt       91: #endif
                     92: #define PMAP_TLB_FLUSH_ASID_ON_RESET   (arm_has_tlbiasid_p)
1.124     matt       93: #define PMAP_TLB_NUM_PIDS              256
                     94: #define cpu_set_tlb_info(ci, ti)        ((void)((ci)->ci_tlb_info = (ti)))
                     95: #if PMAP_TLB_MAX > 1
                     96: #define cpu_tlb_info(ci)               ((ci)->ci_tlb_info)
                     97: #else
                     98: #define cpu_tlb_info(ci)               (&pmap_tlb0_info)
                     99: #endif
                    100: #define pmap_md_tlb_asid_max()         (PMAP_TLB_NUM_PIDS - 1)
                    101: #include <uvm/pmap/tlb.h>
                    102: #include <uvm/pmap/pmap_tlb.h>
                    103:
1.135     skrll     104: /*
1.124     matt      105:  * If we have an EXTENDED MMU and the address space is split evenly between
                    106:  * user and kernel, we can use the TTBR0/TTBR1 to have separate L1 tables for
                    107:  * user and kernel address spaces.
1.135     skrll     108:  */
1.128     matt      109: #if (KERNEL_BASE & 0x80000000) == 0
                    110: #error ARMv6 or later systems must have a KERNEL_BASE >= 0x80000000
1.135     skrll     111: #endif
1.124     matt      112: #endif  /* ARM_MMU_EXTENDED */
                    113:
1.1       reinoud   114: /*
1.11      chris     115:  * a pmap describes a processes' 4GB virtual address space.  this
                    116:  * virtual address space can be broken up into 4096 1MB regions which
1.38      thorpej   117:  * are described by L1 PTEs in the L1 table.
1.11      chris     118:  *
1.38      thorpej   119:  * There is a line drawn at KERNEL_BASE.  Everything below that line
                    120:  * changes when the VM context is switched.  Everything above that line
                    121:  * is the same no matter which VM context is running.  This is achieved
                    122:  * by making the L1 PTEs for those slots above KERNEL_BASE reference
                    123:  * kernel L2 tables.
1.11      chris     124:  *
1.38      thorpej   125:  * The basic layout of the virtual address space thus looks like this:
                    126:  *
                    127:  *     0xffffffff
                    128:  *     .
                    129:  *     .
                    130:  *     .
                    131:  *     KERNEL_BASE
                    132:  *     --------------------
                    133:  *     .
                    134:  *     .
                    135:  *     .
                    136:  *     0x00000000
1.11      chris     137:  */
                    138:
1.65      scw       139: /*
                    140:  * The number of L2 descriptor tables which can be tracked by an l2_dtable.
                    141:  * A bucket size of 16 provides for 16MB of contiguous virtual address
                    142:  * space per l2_dtable. Most processes will, therefore, require only two or
                    143:  * three of these to map their whole working set.
                    144:  */
1.124     matt      145: #define        L2_BUCKET_XLOG2 (L1_S_SHIFT)
                    146: #define L2_BUCKET_XSIZE        (1 << L2_BUCKET_XLOG2)
1.65      scw       147: #define        L2_BUCKET_LOG2  4
                    148: #define        L2_BUCKET_SIZE  (1 << L2_BUCKET_LOG2)
                    149:
                    150: /*
                    151:  * Given the above "L2-descriptors-per-l2_dtable" constant, the number
                    152:  * of l2_dtable structures required to track all possible page descriptors
                    153:  * mappable by an L1 translation table is given by the following constants:
                    154:  */
1.124     matt      155: #define        L2_LOG2         (32 - (L2_BUCKET_XLOG2 + L2_BUCKET_LOG2))
1.65      scw       156: #define        L2_SIZE         (1 << L2_LOG2)
                    157:
1.90      matt      158: /*
                    159:  * tell MI code that the cache is virtually-indexed.
                    160:  * ARMv6 is physically-tagged but all others are virtually-tagged.
                    161:  */
1.95      jmcneill  162: #if (ARM_MMU_V6 + ARM_MMU_V7) > 0
1.90      matt      163: #define PMAP_CACHE_VIPT
                    164: #else
                    165: #define PMAP_CACHE_VIVT
                    166: #endif
                    167:
1.75      bsh       168: #ifndef _LOCORE
                    169:
1.146     skrll     170: #ifndef ARM_MMU_EXTENDED
1.65      scw       171: struct l1_ttable;
                    172: struct l2_dtable;
                    173:
                    174: /*
                    175:  * Track cache/tlb occupancy using the following structure
                    176:  */
                    177: union pmap_cache_state {
                    178:        struct {
                    179:                union {
1.115     skrll     180:                        uint8_t csu_cache_b[2];
                    181:                        uint16_t csu_cache;
1.65      scw       182:                } cs_cache_u;
                    183:
                    184:                union {
1.115     skrll     185:                        uint8_t csu_tlb_b[2];
                    186:                        uint16_t csu_tlb;
1.65      scw       187:                } cs_tlb_u;
                    188:        } cs_s;
1.115     skrll     189:        uint32_t cs_all;
1.65      scw       190: };
                    191: #define        cs_cache_id     cs_s.cs_cache_u.csu_cache_b[0]
                    192: #define        cs_cache_d      cs_s.cs_cache_u.csu_cache_b[1]
                    193: #define        cs_cache        cs_s.cs_cache_u.csu_cache
                    194: #define        cs_tlb_id       cs_s.cs_tlb_u.csu_tlb_b[0]
                    195: #define        cs_tlb_d        cs_s.cs_tlb_u.csu_tlb_b[1]
                    196: #define        cs_tlb          cs_s.cs_tlb_u.csu_tlb
                    197:
                    198: /*
                    199:  * Assigned to cs_all to force cacheops to work for a particular pmap
                    200:  */
                    201: #define        PMAP_CACHE_STATE_ALL    0xffffffffu
1.124     matt      202: #endif /* !ARM_MMU_EXTENDED */
1.65      scw       203:
                    204: /*
1.73      thorpej   205:  * This structure is used by machine-dependent code to describe
                    206:  * static mappings of devices, created at bootstrap time.
                    207:  */
                    208: struct pmap_devmap {
                    209:        vaddr_t         pd_va;          /* virtual address */
                    210:        paddr_t         pd_pa;          /* physical address */
                    211:        psize_t         pd_size;        /* size of region */
                    212:        vm_prot_t       pd_prot;        /* protection code */
                    213:        int             pd_cache;       /* cache attributes */
                    214: };
                    215:
1.153     skrll     216: #define        DEVMAP_ALIGN(a) ((a) & ~L1_S_OFFSET)
                    217: #define        DEVMAP_SIZE(s)  roundup2((s), L1_S_SIZE)
                    218: #define        DEVMAP_ENTRY(va, pa, sz)                        \
                    219:        {                                               \
                    220:                .pd_va = DEVMAP_ALIGN(va),              \
                    221:                .pd_pa = DEVMAP_ALIGN(pa),              \
                    222:                .pd_size = DEVMAP_SIZE(sz),             \
                    223:                .pd_prot = VM_PROT_READ|VM_PROT_WRITE,  \
                    224:                .pd_cache = PTE_NOCACHE                 \
                    225:        }
                    226: #define        DEVMAP_ENTRY_END        { 0 }
                    227:
1.73      thorpej   228: /*
1.65      scw       229:  * The pmap structure itself
                    230:  */
                    231: struct pmap {
1.124     matt      232:        struct uvm_object       pm_obj;
                    233:        kmutex_t                pm_obj_lock;
                    234: #define        pm_lock pm_obj.vmobjlock
1.120     matt      235: #ifndef ARM_HAS_VBAR
1.82      scw       236:        pd_entry_t              *pm_pl1vec;
1.124     matt      237:        pd_entry_t              pm_l1vec;
1.120     matt      238: #endif
1.65      scw       239:        struct l2_dtable        *pm_l2[L2_SIZE];
                    240:        struct pmap_statistics  pm_stats;
                    241:        LIST_ENTRY(pmap)        pm_list;
1.124     matt      242: #ifdef ARM_MMU_EXTENDED
                    243:        pd_entry_t              *pm_l1;
                    244:        paddr_t                 pm_l1_pa;
                    245:        bool                    pm_remove_all;
                    246: #ifdef MULTIPROCESSOR
                    247:        kcpuset_t               *pm_onproc;
                    248:        kcpuset_t               *pm_active;
1.126     matt      249: #if PMAP_TLB_MAX > 1
                    250:        u_int                   pm_shootdown_pending;
                    251: #endif
1.124     matt      252: #endif
1.126     matt      253:        struct pmap_asid_info   pm_pai[PMAP_TLB_MAX];
1.124     matt      254: #else
                    255:        struct l1_ttable        *pm_l1;
                    256:        union pmap_cache_state  pm_cstate;
                    257:        uint8_t                 pm_domain;
                    258:        bool                    pm_activated;
                    259:        bool                    pm_remove_all;
                    260: #endif
                    261: };
                    262:
                    263: struct pmap_kernel {
                    264:        struct pmap             kernel_pmap;
1.65      scw       265: };
                    266:
1.106     martin    267: /*
                    268:  * Physical / virtual address structure. In a number of places (particularly
                    269:  * during bootstrapping) we need to keep track of the physical and virtual
                    270:  * addresses of various pages
                    271:  */
                    272: typedef struct pv_addr {
                    273:        SLIST_ENTRY(pv_addr) pv_list;
                    274:        paddr_t pv_pa;
                    275:        vaddr_t pv_va;
                    276:        vsize_t pv_size;
                    277:        uint8_t pv_cache;
                    278:        uint8_t pv_prot;
                    279: } pv_addr_t;
                    280: typedef SLIST_HEAD(, pv_addr) pv_addrqh_t;
                    281:
1.85      matt      282: extern pv_addrqh_t pmap_freeq;
1.102     matt      283: extern pv_addr_t kernelstack;
                    284: extern pv_addr_t abtstack;
                    285: extern pv_addr_t fiqstack;
                    286: extern pv_addr_t irqstack;
                    287: extern pv_addr_t undstack;
1.103     matt      288: extern pv_addr_t idlestack;
1.85      matt      289: extern pv_addr_t systempage;
                    290: extern pv_addr_t kernel_l1pt;
1.1       reinoud   291:
1.126     matt      292: #ifdef ARM_MMU_EXTENDED
                    293: extern bool arm_has_tlbiasid_p;        /* also in <arm/locore.h> */
                    294: #endif
                    295:
1.1       reinoud   296: /*
1.24      thorpej   297:  * Determine various modes for PTEs (user vs. kernel, cacheable
                    298:  * vs. non-cacheable).
                    299:  */
                    300: #define        PTE_KERNEL      0
                    301: #define        PTE_USER        1
                    302: #define        PTE_NOCACHE     0
                    303: #define        PTE_CACHE       1
1.65      scw       304: #define        PTE_PAGETABLE   2
1.24      thorpej   305:
                    306: /*
1.43      thorpej   307:  * Flags that indicate attributes of pages or mappings of pages.
                    308:  *
                    309:  * The PVF_MOD and PVF_REF flags are stored in the mdpage for each
                    310:  * page.  PVF_WIRED, PVF_WRITE, and PVF_NC are kept in individual
                    311:  * pv_entry's for each page.  They live in the same "namespace" so
                    312:  * that we can clear multiple attributes at a time.
                    313:  *
                    314:  * Note the "non-cacheable" flag generally means the page has
                    315:  * multiple mappings in a given address space.
                    316:  */
                    317: #define        PVF_MOD         0x01            /* page is modified */
                    318: #define        PVF_REF         0x02            /* page is referenced */
                    319: #define        PVF_WIRED       0x04            /* mapping is wired */
                    320: #define        PVF_WRITE       0x08            /* mapping is writable */
1.56      thorpej   321: #define        PVF_EXEC        0x10            /* mapping is executable */
1.90      matt      322: #ifdef PMAP_CACHE_VIVT
1.65      scw       323: #define        PVF_UNC         0x20            /* mapping is 'user' non-cacheable */
                    324: #define        PVF_KNC         0x40            /* mapping is 'kernel' non-cacheable */
1.90      matt      325: #define        PVF_NC          (PVF_UNC|PVF_KNC)
                    326: #endif
                    327: #ifdef PMAP_CACHE_VIPT
                    328: #define        PVF_NC          0x20            /* mapping is 'kernel' non-cacheable */
                    329: #define        PVF_MULTCLR     0x40            /* mapping is multi-colored */
                    330: #endif
1.85      matt      331: #define        PVF_COLORED     0x80            /* page has or had a color */
                    332: #define        PVF_KENTRY      0x0100          /* page entered via pmap_kenter_pa */
1.86      matt      333: #define        PVF_KMPAGE      0x0200          /* page is used for kmem */
1.87      matt      334: #define        PVF_DIRTY       0x0400          /* page may have dirty cache lines */
1.88      matt      335: #define        PVF_KMOD        0x0800          /* unmanaged page is modified  */
                    336: #define        PVF_KWRITE      (PVF_KENTRY|PVF_WRITE)
                    337: #define        PVF_DMOD        (PVF_MOD|PVF_KMOD|PVF_KMPAGE)
1.43      thorpej   338:
                    339: /*
1.1       reinoud   340:  * Commonly referenced structures
                    341:  */
1.4       matt      342: extern int             pmap_debug_level; /* Only exists if PMAP_DEBUG */
1.113     matt      343: extern int             arm_poolpage_vmfreelist;
1.1       reinoud   344:
                    345: /*
                    346:  * Macros that we need to export
                    347:  */
                    348: #define        pmap_resident_count(pmap)       ((pmap)->pm_stats.resident_count)
                    349: #define        pmap_wired_count(pmap)          ((pmap)->pm_stats.wired_count)
1.31      thorpej   350:
1.43      thorpej   351: #define        pmap_is_modified(pg)    \
                    352:        (((pg)->mdpage.pvh_attrs & PVF_MOD) != 0)
                    353: #define        pmap_is_referenced(pg)  \
                    354:        (((pg)->mdpage.pvh_attrs & PVF_REF) != 0)
1.96      uebayasi  355: #define        pmap_is_page_colored_p(md)      \
                    356:        (((md)->pvh_attrs & PVF_COLORED) != 0)
1.41      thorpej   357:
                    358: #define        pmap_copy(dp, sp, da, l, sa)    /* nothing */
1.60      chs       359:
1.35      thorpej   360: #define pmap_phys_address(ppn)         (arm_ptob((ppn)))
1.98      macallan  361: u_int arm32_mmap_flags(paddr_t);
1.137     skrll     362: #define ARM32_MMAP_WRITECOMBINE                0x40000000
1.98      macallan  363: #define ARM32_MMAP_CACHEABLE           0x20000000
1.155     ryo       364: #define ARM_MMAP_WRITECOMBINE          ARM32_MMAP_WRITECOMBINE
                    365: #define ARM_MMAP_CACHEABLE             ARM32_MMAP_CACHEABLE
1.137     skrll     366: #define pmap_mmap_flags(ppn)           arm32_mmap_flags(ppn)
1.1       reinoud   367:
1.123     matt      368: #define        PMAP_PTE                        0x10000000 /* kenter_pa */
                    369:
1.1       reinoud   370: /*
                    371:  * Functions that we need to export
                    372:  */
1.39      thorpej   373: void   pmap_procwr(struct proc *, vaddr_t, int);
1.65      scw       374: void   pmap_remove_all(pmap_t);
1.80      thorpej   375: bool   pmap_extract(pmap_t, vaddr_t, paddr_t *);
1.39      thorpej   376:
1.1       reinoud   377: #define        PMAP_NEED_PROCWR
1.29      chris     378: #define PMAP_GROWKERNEL                /* turn on pmap_growkernel interface */
1.92      thorpej   379: #define        PMAP_ENABLE_PMAP_KMPAGE /* enable the PMAP_KMPAGE flag */
1.4       matt      380:
1.95      jmcneill  381: #if (ARM_MMU_V6 + ARM_MMU_V7) > 0
1.85      matt      382: #define        PMAP_PREFER(hint, vap, sz, td)  pmap_prefer((hint), (vap), (td))
                    383: void   pmap_prefer(vaddr_t, vaddr_t *, int);
                    384: #endif
                    385:
                    386: void   pmap_icache_sync_range(pmap_t, vaddr_t, vaddr_t);
                    387:
1.39      thorpej   388: /* Functions we use internally. */
1.85      matt      389: #ifdef PMAP_STEAL_MEMORY
                    390: void   pmap_boot_pagealloc(psize_t, psize_t, psize_t, pv_addr_t *);
                    391: void   pmap_boot_pageadd(pv_addr_t *);
                    392: vaddr_t        pmap_steal_memory(vsize_t, vaddr_t *, vaddr_t *);
                    393: #endif
                    394: void   pmap_bootstrap(vaddr_t, vaddr_t);
1.65      scw       395:
1.78      scw       396: void   pmap_do_remove(pmap_t, vaddr_t, vaddr_t, int);
1.70      scw       397: int    pmap_fault_fixup(pmap_t, vaddr_t, vm_prot_t, int);
1.124     matt      398: int    pmap_prefetchabt_fixup(void *);
1.80      thorpej   399: bool   pmap_get_pde_pte(pmap_t, vaddr_t, pd_entry_t **, pt_entry_t **);
                    400: bool   pmap_get_pde(pmap_t, vaddr_t, pd_entry_t **);
1.155     ryo       401: bool   pmap_extract_coherency(pmap_t, vaddr_t, paddr_t *, bool *);
1.65      scw       402:
                    403: void   pmap_debug(int);
1.39      thorpej   404: void   pmap_postinit(void);
1.42      thorpej   405:
                    406: void   vector_page_setprot(int);
1.24      thorpej   407:
1.73      thorpej   408: const struct pmap_devmap *pmap_devmap_find_pa(paddr_t, psize_t);
                    409: const struct pmap_devmap *pmap_devmap_find_va(vaddr_t, vsize_t);
                    410:
1.24      thorpej   411: /* Bootstrapping routines. */
                    412: void   pmap_map_section(vaddr_t, vaddr_t, paddr_t, int, int);
1.25      thorpej   413: void   pmap_map_entry(vaddr_t, vaddr_t, paddr_t, int, int);
1.28      thorpej   414: vsize_t        pmap_map_chunk(vaddr_t, vaddr_t, paddr_t, vsize_t, int, int);
1.156     skrll     415: void   pmap_unmap_chunk(vaddr_t, vaddr_t, vsize_t);
1.28      thorpej   416: void   pmap_link_l2pt(vaddr_t, vaddr_t, pv_addr_t *);
1.73      thorpej   417: void   pmap_devmap_bootstrap(vaddr_t, const struct pmap_devmap *);
1.74      thorpej   418: void   pmap_devmap_register(const struct pmap_devmap *);
1.13      chris     419:
                    420: /*
1.135     skrll     421:  * Special page zero routine for use by the idle loop (no cache cleans).
1.13      chris     422:  */
1.80      thorpej   423: bool   pmap_pageidlezero(paddr_t);
1.13      chris     424: #define PMAP_PAGEIDLEZERO(pa)  pmap_pageidlezero((pa))
1.1       reinoud   425:
1.131     matt      426: #ifdef __HAVE_MM_MD_DIRECT_MAPPED_PHYS
                    427: /*
                    428:  * For the pmap, this is a more useful way to map a direct mapped page.
                    429:  * It returns either the direct-mapped VA or the VA supplied if it can't
                    430:  * be direct mapped.
                    431:  */
                    432: vaddr_t        pmap_direct_mapped_phys(paddr_t, bool *, vaddr_t);
                    433: #endif
                    434:
1.29      chris     435: /*
1.84      chris     436:  * used by dumpsys to record the PA of the L1 table
                    437:  */
                    438: uint32_t pmap_kernel_L1_addr(void);
                    439: /*
1.29      chris     440:  * The current top of kernel VM
                    441:  */
                    442: extern vaddr_t pmap_curmaxkvaddr;
1.1       reinoud   443:
1.131     matt      444: #if defined(ARM_MMU_EXTENDED) && defined(__HAVE_MM_MD_DIRECT_MAPPED_PHYS)
                    445: /*
1.141     matt      446:  * Ending VA of direct mapped memory (usually KERNEL_VM_BASE).
1.131     matt      447:  */
1.140     matt      448: extern vaddr_t pmap_directlimit;
1.131     matt      449: #endif
                    450:
1.1       reinoud   451: /*
1.135     skrll     452:  * Useful macros and constants
1.1       reinoud   453:  */
1.59      thorpej   454:
1.65      scw       455: /* Virtual address to page table entry */
1.79      perry     456: static inline pt_entry_t *
1.65      scw       457: vtopte(vaddr_t va)
                    458: {
                    459:        pd_entry_t *pdep;
                    460:        pt_entry_t *ptep;
                    461:
1.124     matt      462:        KASSERT(trunc_page(va) == va);
                    463:
1.81      thorpej   464:        if (pmap_get_pde_pte(pmap_kernel(), va, &pdep, &ptep) == false)
1.65      scw       465:                return (NULL);
                    466:        return (ptep);
                    467: }
                    468:
                    469: /*
                    470:  * Virtual address to physical address
                    471:  */
1.79      perry     472: static inline paddr_t
1.65      scw       473: vtophys(vaddr_t va)
                    474: {
                    475:        paddr_t pa;
                    476:
1.81      thorpej   477:        if (pmap_extract(pmap_kernel(), va, &pa) == false)
1.65      scw       478:                return (0);     /* XXXSCW: Panic? */
                    479:
                    480:        return (pa);
                    481: }
                    482:
                    483: /*
                    484:  * The new pmap ensures that page-tables are always mapping Write-Thru.
                    485:  * Thus, on some platforms we can run fast and loose and avoid syncing PTEs
                    486:  * on every change.
                    487:  *
1.69      thorpej   488:  * Unfortunately, not all CPUs have a write-through cache mode.  So we
                    489:  * define PMAP_NEEDS_PTE_SYNC for C code to conditionally do PTE syncs,
                    490:  * and if there is the chance for PTE syncs to be needed, we define
                    491:  * PMAP_INCLUDE_PTE_SYNC so e.g. assembly code can include (and run)
                    492:  * the code.
                    493:  */
                    494: extern int pmap_needs_pte_sync;
                    495: #if defined(_KERNEL_OPT)
                    496: /*
1.145     skrll     497:  * Perform compile time evaluation of PMAP_NEEDS_PTE_SYNC when only a
                    498:  * single MMU type is selected.
                    499:  *
1.69      thorpej   500:  * StrongARM SA-1 caches do not have a write-through mode.  So, on these,
1.145     skrll     501:  * we need to do PTE syncs. Additionally, V6 MMUs also need PTE syncs.
                    502:  * Finally, MEMC, GENERIC and XSCALE MMUs do not need PTE syncs.
                    503:  *
                    504:  * Use run time evaluation for all other cases.
1.148     skrll     505:  *
1.69      thorpej   506:  */
1.145     skrll     507: #if (ARM_NMMUS == 1)
                    508: #if (ARM_MMU_SA1 + ARM_MMU_V6 != 0)
1.104     matt      509: #define        PMAP_INCLUDE_PTE_SYNC
1.109     matt      510: #define        PMAP_NEEDS_PTE_SYNC     1
1.145     skrll     511: #elif (ARM_MMU_MEMC + ARM_MMU_GENERIC + ARM_MMU_XSCALE != 0)
1.69      thorpej   512: #define        PMAP_NEEDS_PTE_SYNC     0
                    513: #endif
1.112     matt      514: #endif
1.69      thorpej   515: #endif /* _KERNEL_OPT */
                    516:
                    517: /*
                    518:  * Provide a fallback in case we were not able to determine it at
                    519:  * compile-time.
1.65      scw       520:  */
1.69      thorpej   521: #ifndef PMAP_NEEDS_PTE_SYNC
                    522: #define        PMAP_NEEDS_PTE_SYNC     pmap_needs_pte_sync
                    523: #define        PMAP_INCLUDE_PTE_SYNC
                    524: #endif
1.65      scw       525:
1.104     matt      526: static inline void
                    527: pmap_ptesync(pt_entry_t *ptep, size_t cnt)
                    528: {
1.132     matt      529:        if (PMAP_NEEDS_PTE_SYNC) {
1.104     matt      530:                cpu_dcache_wb_range((vaddr_t)ptep, cnt * sizeof(pt_entry_t));
1.132     matt      531: #ifdef SHEEVA_L2_CACHE
                    532:                cpu_sdcache_wb_range((vaddr_t)ptep, -1,
                    533:                    cnt * sizeof(pt_entry_t));
                    534: #endif
                    535:        }
1.138     joerg     536:        arm_dsb();
1.104     matt      537: }
1.69      thorpej   538:
1.124     matt      539: #define        PDE_SYNC(pdep)                  pmap_ptesync((pdep), 1)
                    540: #define        PDE_SYNC_RANGE(pdep, cnt)       pmap_ptesync((pdep), (cnt))
                    541: #define        PTE_SYNC(ptep)                  pmap_ptesync((ptep), PAGE_SIZE / L2_S_SIZE)
1.104     matt      542: #define        PTE_SYNC_RANGE(ptep, cnt)       pmap_ptesync((ptep), (cnt))
1.65      scw       543:
1.124     matt      544: #define l1pte_valid_p(pde)     ((pde) != 0)
                    545: #define l1pte_section_p(pde)   (((pde) & L1_TYPE_MASK) == L1_TYPE_S)
                    546: #define l1pte_supersection_p(pde) (l1pte_section_p(pde)        \
1.104     matt      547:                                && ((pde) & L1_S_V6_SUPER) != 0)
1.124     matt      548: #define l1pte_page_p(pde)      (((pde) & L1_TYPE_MASK) == L1_TYPE_C)
                    549: #define l1pte_fpage_p(pde)     (((pde) & L1_TYPE_MASK) == L1_TYPE_F)
                    550: #define l1pte_pa(pde)          ((pde) & L1_C_ADDR_MASK)
                    551: #define l1pte_index(v)         ((vaddr_t)(v) >> L1_S_SHIFT)
                    552: #define l1pte_pgindex(v)       l1pte_index((v) & L1_ADDR_BITS \
                    553:                & ~(PAGE_SIZE * PAGE_SIZE / sizeof(pt_entry_t) - 1))
                    554:
                    555: static inline void
                    556: l1pte_setone(pt_entry_t *pdep, pt_entry_t pde)
                    557: {
                    558:        *pdep = pde;
                    559: }
1.36      thorpej   560:
1.124     matt      561: static inline void
                    562: l1pte_set(pt_entry_t *pdep, pt_entry_t pde)
                    563: {
                    564:        *pdep = pde;
                    565:        if (l1pte_page_p(pde)) {
                    566:                KASSERTMSG((((uintptr_t)pdep / sizeof(pde)) & (PAGE_SIZE / L2_T_SIZE - 1)) == 0, "%p", pdep);
1.157.2.1! ad        567:                for (int k = 1; k < PAGE_SIZE / L2_T_SIZE; k++) {
1.124     matt      568:                        pde += L2_T_SIZE;
                    569:                        pdep[k] = pde;
                    570:                }
                    571:        } else if (l1pte_supersection_p(pde)) {
                    572:                KASSERTMSG((((uintptr_t)pdep / sizeof(pde)) & (L1_SS_SIZE / L1_S_SIZE - 1)) == 0, "%p", pdep);
1.157.2.1! ad        573:                for (int k = 1; k < L1_SS_SIZE / L1_S_SIZE; k++) {
1.124     matt      574:                        pdep[k] = pde;
                    575:                }
                    576:        }
                    577: }
                    578:
                    579: #define l2pte_index(v)         ((((v) & L2_ADDR_BITS) >> PGSHIFT) << (PGSHIFT-L2_S_SHIFT))
                    580: #define l2pte_valid_p(pte)     (((pte) & L2_TYPE_MASK) != L2_TYPE_INV)
                    581: #define l2pte_pa(pte)          ((pte) & L2_S_FRAME)
                    582: #define l1pte_lpage_p(pte)     (((pte) & L2_TYPE_MASK) == L2_TYPE_L)
                    583: #define l2pte_minidata_p(pte)  (((pte) & \
1.85      matt      584:                                 (L2_B | L2_C | L2_XS_T_TEX(TEX_XSCALE_X)))\
                    585:                                 == (L2_C | L2_XS_T_TEX(TEX_XSCALE_X)))
1.35      thorpej   586:
1.121     matt      587: static inline void
                    588: l2pte_set(pt_entry_t *ptep, pt_entry_t pte, pt_entry_t opte)
                    589: {
1.129     skrll     590:        if (l1pte_lpage_p(pte)) {
1.139     skrll     591:                KASSERTMSG((((uintptr_t)ptep / sizeof(pte)) & (L2_L_SIZE / L2_S_SIZE - 1)) == 0, "%p", ptep);
1.157.2.1! ad        592:                for (int k = 0; k < L2_L_SIZE / L2_S_SIZE; k++) {
1.129     skrll     593:                        *ptep++ = pte;
                    594:                }
                    595:        } else {
1.139     skrll     596:                KASSERTMSG((((uintptr_t)ptep / sizeof(pte)) & (PAGE_SIZE / L2_S_SIZE - 1)) == 0, "%p", ptep);
1.157.2.1! ad        597:                for (int k = 0; k < PAGE_SIZE / L2_S_SIZE; k++) {
1.129     skrll     598:                        KASSERTMSG(*ptep == opte, "%#x [*%p] != %#x", *ptep, ptep, opte);
                    599:                        *ptep++ = pte;
                    600:                        pte += L2_S_SIZE;
                    601:                        if (opte)
                    602:                                opte += L2_S_SIZE;
                    603:                }
1.121     matt      604:        }
1.129     skrll     605: }
1.121     matt      606:
                    607: static inline void
                    608: l2pte_reset(pt_entry_t *ptep)
                    609: {
1.139     skrll     610:        KASSERTMSG((((uintptr_t)ptep / sizeof(*ptep)) & (PAGE_SIZE / L2_S_SIZE - 1)) == 0, "%p", ptep);
1.121     matt      611:        *ptep = 0;
1.157.2.1! ad        612:        for (int k = 1; k < PAGE_SIZE / L2_S_SIZE; k++) {
1.121     matt      613:                ptep[k] = 0;
                    614:        }
1.135     skrll     615: }
1.121     matt      616:
1.1       reinoud   617: /* L1 and L2 page table macros */
1.36      thorpej   618: #define pmap_pde_v(pde)                l1pte_valid(*(pde))
                    619: #define pmap_pde_section(pde)  l1pte_section_p(*(pde))
1.107     matt      620: #define pmap_pde_supersection(pde)     l1pte_supersection_p(*(pde))
1.36      thorpej   621: #define pmap_pde_page(pde)     l1pte_page_p(*(pde))
                    622: #define pmap_pde_fpage(pde)    l1pte_fpage_p(*(pde))
1.16      rearnsha  623:
1.124     matt      624: #define        pmap_pte_v(pte)         l2pte_valid_p(*(pte))
1.36      thorpej   625: #define        pmap_pte_pa(pte)        l2pte_pa(*(pte))
1.35      thorpej   626:
1.1       reinoud   627: /* Size of the kernel part of the L1 page table */
                    628: #define KERNEL_PD_SIZE \
1.44      thorpej   629:        (L1_TABLE_SIZE - (KERNEL_BASE >> L1_S_SHIFT) * sizeof(pd_entry_t))
1.20      chs       630:
1.117     matt      631: void   bzero_page(vaddr_t);
                    632: void   bcopy_page(vaddr_t, vaddr_t);
1.46      thorpej   633:
1.116     matt      634: #ifdef FPU_VFP
1.117     matt      635: void   bzero_page_vfp(vaddr_t);
                    636: void   bcopy_page_vfp(vaddr_t, vaddr_t);
1.116     matt      637: #endif
                    638:
1.117     matt      639: /************************* ARM MMU configuration *****************************/
                    640:
1.95      jmcneill  641: #if (ARM_MMU_GENERIC + ARM_MMU_SA1 + ARM_MMU_V6 + ARM_MMU_V7) != 0
1.51      thorpej   642: void   pmap_copy_page_generic(paddr_t, paddr_t);
                    643: void   pmap_zero_page_generic(paddr_t);
                    644:
1.46      thorpej   645: void   pmap_pte_init_generic(void);
1.69      thorpej   646: #if defined(CPU_ARM8)
                    647: void   pmap_pte_init_arm8(void);
                    648: #endif
1.46      thorpej   649: #if defined(CPU_ARM9)
                    650: void   pmap_pte_init_arm9(void);
                    651: #endif /* CPU_ARM9 */
1.76      rearnsha  652: #if defined(CPU_ARM10)
                    653: void   pmap_pte_init_arm10(void);
                    654: #endif /* CPU_ARM10 */
1.103     matt      655: #if defined(CPU_ARM11) /* ARM_MMU_V6 */
1.94      uebayasi  656: void   pmap_pte_init_arm11(void);
                    657: #endif /* CPU_ARM11 */
1.103     matt      658: #if defined(CPU_ARM11MPCORE)   /* ARM_MMU_V6 */
1.99      bsh       659: void   pmap_pte_init_arm11mpcore(void);
                    660: #endif
1.103     matt      661: #if ARM_MMU_V7 == 1
                    662: void   pmap_pte_init_armv7(void);
                    663: #endif /* ARM_MMU_V7 */
1.69      thorpej   664: #endif /* (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0 */
                    665:
                    666: #if ARM_MMU_SA1 == 1
                    667: void   pmap_pte_init_sa1(void);
                    668: #endif /* ARM_MMU_SA1 == 1 */
1.46      thorpej   669:
1.52      thorpej   670: #if ARM_MMU_XSCALE == 1
1.51      thorpej   671: void   pmap_copy_page_xscale(paddr_t, paddr_t);
                    672: void   pmap_zero_page_xscale(paddr_t);
                    673:
1.46      thorpej   674: void   pmap_pte_init_xscale(void);
1.50      thorpej   675:
                    676: void   xscale_setup_minidata(vaddr_t, vaddr_t, paddr_t);
1.77      scw       677:
                    678: #define        PMAP_UAREA(va)          pmap_uarea(va)
                    679: void   pmap_uarea(vaddr_t);
1.52      thorpej   680: #endif /* ARM_MMU_XSCALE == 1 */
1.46      thorpej   681:
1.49      thorpej   682: extern pt_entry_t              pte_l1_s_cache_mode;
                    683: extern pt_entry_t              pte_l1_s_cache_mask;
                    684:
                    685: extern pt_entry_t              pte_l2_l_cache_mode;
                    686: extern pt_entry_t              pte_l2_l_cache_mask;
                    687:
                    688: extern pt_entry_t              pte_l2_s_cache_mode;
                    689: extern pt_entry_t              pte_l2_s_cache_mask;
1.46      thorpej   690:
1.65      scw       691: extern pt_entry_t              pte_l1_s_cache_mode_pt;
                    692: extern pt_entry_t              pte_l2_l_cache_mode_pt;
                    693: extern pt_entry_t              pte_l2_s_cache_mode_pt;
                    694:
1.98      macallan  695: extern pt_entry_t              pte_l1_s_wc_mode;
                    696: extern pt_entry_t              pte_l2_l_wc_mode;
                    697: extern pt_entry_t              pte_l2_s_wc_mode;
                    698:
1.95      jmcneill  699: extern pt_entry_t              pte_l1_s_prot_u;
                    700: extern pt_entry_t              pte_l1_s_prot_w;
                    701: extern pt_entry_t              pte_l1_s_prot_ro;
                    702: extern pt_entry_t              pte_l1_s_prot_mask;
                    703:
1.46      thorpej   704: extern pt_entry_t              pte_l2_s_prot_u;
                    705: extern pt_entry_t              pte_l2_s_prot_w;
1.95      jmcneill  706: extern pt_entry_t              pte_l2_s_prot_ro;
1.46      thorpej   707: extern pt_entry_t              pte_l2_s_prot_mask;
1.95      jmcneill  708:
                    709: extern pt_entry_t              pte_l2_l_prot_u;
                    710: extern pt_entry_t              pte_l2_l_prot_w;
                    711: extern pt_entry_t              pte_l2_l_prot_ro;
                    712: extern pt_entry_t              pte_l2_l_prot_mask;
                    713:
1.103     matt      714: extern pt_entry_t              pte_l1_ss_proto;
1.46      thorpej   715: extern pt_entry_t              pte_l1_s_proto;
                    716: extern pt_entry_t              pte_l1_c_proto;
                    717: extern pt_entry_t              pte_l2_s_proto;
                    718:
1.51      thorpej   719: extern void (*pmap_copy_page_func)(paddr_t, paddr_t);
                    720: extern void (*pmap_zero_page_func)(paddr_t);
1.75      bsh       721:
                    722: #endif /* !_LOCORE */
1.51      thorpej   723:
1.46      thorpej   724: /*****************************************************************************/
                    725:
1.124     matt      726: #define        KERNEL_PID              0       /* The kernel uses ASID 0 */
                    727:
1.20      chs       728: /*
1.65      scw       729:  * Definitions for MMU domains
                    730:  */
1.103     matt      731: #define        PMAP_DOMAINS            15      /* 15 'user' domains (1-15) */
1.124     matt      732: #define        PMAP_DOMAIN_KERNEL      0       /* The kernel pmap uses domain #0 */
1.156     skrll     733:
1.124     matt      734: #ifdef ARM_MMU_EXTENDED
                    735: #define        PMAP_DOMAIN_USER        1       /* User pmaps use domain #1 */
1.156     skrll     736: #define        DOMAIN_DEFAULT          ((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | (DOMAIN_CLIENT << (PMAP_DOMAIN_USER*2)))
                    737: #else
                    738: #define        DOMAIN_DEFAULT          ((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)))
1.124     matt      739: #endif
1.45      thorpej   740:
                    741: /*
                    742:  * These macros define the various bit masks in the PTE.
                    743:  *
                    744:  * We use these macros since we use different bits on different processor
                    745:  * models.
                    746:  */
1.95      jmcneill  747: #define        L1_S_PROT_U_generic     (L1_S_AP(AP_U))
                    748: #define        L1_S_PROT_W_generic     (L1_S_AP(AP_W))
1.152     skrll     749: #define        L1_S_PROT_RO_generic    (0)
1.95      jmcneill  750: #define        L1_S_PROT_MASK_generic  (L1_S_PROT_U|L1_S_PROT_W|L1_S_PROT_RO)
                    751:
                    752: #define        L1_S_PROT_U_xscale      (L1_S_AP(AP_U))
                    753: #define        L1_S_PROT_W_xscale      (L1_S_AP(AP_W))
1.152     skrll     754: #define        L1_S_PROT_RO_xscale     (0)
1.95      jmcneill  755: #define        L1_S_PROT_MASK_xscale   (L1_S_PROT_U|L1_S_PROT_W|L1_S_PROT_RO)
                    756:
1.99      bsh       757: #define        L1_S_PROT_U_armv6       (L1_S_AP(AP_R) | L1_S_AP(AP_U))
                    758: #define        L1_S_PROT_W_armv6       (L1_S_AP(AP_W))
                    759: #define        L1_S_PROT_RO_armv6      (L1_S_AP(AP_R) | L1_S_AP(AP_RO))
                    760: #define        L1_S_PROT_MASK_armv6    (L1_S_PROT_U|L1_S_PROT_W|L1_S_PROT_RO)
                    761:
1.95      jmcneill  762: #define        L1_S_PROT_U_armv7       (L1_S_AP(AP_R) | L1_S_AP(AP_U))
                    763: #define        L1_S_PROT_W_armv7       (L1_S_AP(AP_W))
                    764: #define        L1_S_PROT_RO_armv7      (L1_S_AP(AP_R) | L1_S_AP(AP_RO))
                    765: #define        L1_S_PROT_MASK_armv7    (L1_S_PROT_U|L1_S_PROT_W|L1_S_PROT_RO)
1.45      thorpej   766:
1.49      thorpej   767: #define        L1_S_CACHE_MASK_generic (L1_S_B|L1_S_C)
1.85      matt      768: #define        L1_S_CACHE_MASK_xscale  (L1_S_B|L1_S_C|L1_S_XS_TEX(TEX_XSCALE_X))
1.99      bsh       769: #define        L1_S_CACHE_MASK_armv6   (L1_S_B|L1_S_C|L1_S_XS_TEX(TEX_ARMV6_TEX))
1.134     skrll     770: #define        L1_S_CACHE_MASK_armv6n  (L1_S_B|L1_S_C|L1_S_XS_TEX(TEX_ARMV6_TEX)|L1_S_V6_S)
1.111     matt      771: #define        L1_S_CACHE_MASK_armv7   (L1_S_B|L1_S_C|L1_S_XS_TEX(TEX_ARMV6_TEX)|L1_S_V6_S)
1.45      thorpej   772:
1.95      jmcneill  773: #define        L2_L_PROT_U_generic     (L2_AP(AP_U))
                    774: #define        L2_L_PROT_W_generic     (L2_AP(AP_W))
1.152     skrll     775: #define        L2_L_PROT_RO_generic    (0)
1.95      jmcneill  776: #define        L2_L_PROT_MASK_generic  (L2_L_PROT_U|L2_L_PROT_W|L2_L_PROT_RO)
                    777:
                    778: #define        L2_L_PROT_U_xscale      (L2_AP(AP_U))
                    779: #define        L2_L_PROT_W_xscale      (L2_AP(AP_W))
1.152     skrll     780: #define        L2_L_PROT_RO_xscale     (0)
1.95      jmcneill  781: #define        L2_L_PROT_MASK_xscale   (L2_L_PROT_U|L2_L_PROT_W|L2_L_PROT_RO)
                    782:
1.99      bsh       783: #define        L2_L_PROT_U_armv6n      (L2_AP0(AP_R) | L2_AP0(AP_U))
                    784: #define        L2_L_PROT_W_armv6n      (L2_AP0(AP_W))
                    785: #define        L2_L_PROT_RO_armv6n     (L2_AP0(AP_R) | L2_AP0(AP_RO))
                    786: #define        L2_L_PROT_MASK_armv6n   (L2_L_PROT_U|L2_L_PROT_W|L2_L_PROT_RO)
                    787:
1.95      jmcneill  788: #define        L2_L_PROT_U_armv7       (L2_AP0(AP_R) | L2_AP0(AP_U))
                    789: #define        L2_L_PROT_W_armv7       (L2_AP0(AP_W))
                    790: #define        L2_L_PROT_RO_armv7      (L2_AP0(AP_R) | L2_AP0(AP_RO))
                    791: #define        L2_L_PROT_MASK_armv7    (L2_L_PROT_U|L2_L_PROT_W|L2_L_PROT_RO)
1.45      thorpej   792:
1.49      thorpej   793: #define        L2_L_CACHE_MASK_generic (L2_B|L2_C)
1.85      matt      794: #define        L2_L_CACHE_MASK_xscale  (L2_B|L2_C|L2_XS_L_TEX(TEX_XSCALE_X))
1.99      bsh       795: #define        L2_L_CACHE_MASK_armv6   (L2_B|L2_C|L2_V6_L_TEX(TEX_ARMV6_TEX))
1.134     skrll     796: #define        L2_L_CACHE_MASK_armv6n  (L2_B|L2_C|L2_V6_L_TEX(TEX_ARMV6_TEX)|L2_XS_S)
1.111     matt      797: #define        L2_L_CACHE_MASK_armv7   (L2_B|L2_C|L2_V6_L_TEX(TEX_ARMV6_TEX)|L2_XS_S)
1.49      thorpej   798:
1.46      thorpej   799: #define        L2_S_PROT_U_generic     (L2_AP(AP_U))
                    800: #define        L2_S_PROT_W_generic     (L2_AP(AP_W))
1.152     skrll     801: #define        L2_S_PROT_RO_generic    (0)
1.95      jmcneill  802: #define        L2_S_PROT_MASK_generic  (L2_S_PROT_U|L2_S_PROT_W|L2_S_PROT_RO)
1.46      thorpej   803:
1.48      thorpej   804: #define        L2_S_PROT_U_xscale      (L2_AP0(AP_U))
                    805: #define        L2_S_PROT_W_xscale      (L2_AP0(AP_W))
1.152     skrll     806: #define        L2_S_PROT_RO_xscale     (0)
1.95      jmcneill  807: #define        L2_S_PROT_MASK_xscale   (L2_S_PROT_U|L2_S_PROT_W|L2_S_PROT_RO)
                    808:
1.99      bsh       809: #define        L2_S_PROT_U_armv6n      (L2_AP0(AP_R) | L2_AP0(AP_U))
                    810: #define        L2_S_PROT_W_armv6n      (L2_AP0(AP_W))
                    811: #define        L2_S_PROT_RO_armv6n     (L2_AP0(AP_R) | L2_AP0(AP_RO))
                    812: #define        L2_S_PROT_MASK_armv6n   (L2_S_PROT_U|L2_S_PROT_W|L2_S_PROT_RO)
                    813:
1.95      jmcneill  814: #define        L2_S_PROT_U_armv7       (L2_AP0(AP_R) | L2_AP0(AP_U))
                    815: #define        L2_S_PROT_W_armv7       (L2_AP0(AP_W))
                    816: #define        L2_S_PROT_RO_armv7      (L2_AP0(AP_R) | L2_AP0(AP_RO))
                    817: #define        L2_S_PROT_MASK_armv7    (L2_S_PROT_U|L2_S_PROT_W|L2_S_PROT_RO)
1.46      thorpej   818:
1.49      thorpej   819: #define        L2_S_CACHE_MASK_generic (L2_B|L2_C)
1.85      matt      820: #define        L2_S_CACHE_MASK_xscale  (L2_B|L2_C|L2_XS_T_TEX(TEX_XSCALE_X))
1.99      bsh       821: #define        L2_XS_CACHE_MASK_armv6  (L2_B|L2_C|L2_V6_XS_TEX(TEX_ARMV6_TEX))
                    822: #ifdef ARMV6_EXTENDED_SMALL_PAGE
                    823: #define        L2_S_CACHE_MASK_armv6c  L2_XS_CACHE_MASK_armv6
                    824: #else
                    825: #define        L2_S_CACHE_MASK_armv6c  L2_S_CACHE_MASK_generic
                    826: #endif
1.142     skrll     827: #define        L2_S_CACHE_MASK_armv6n  (L2_B|L2_C|L2_V6_XS_TEX(TEX_ARMV6_TEX)|L2_XS_S)
1.111     matt      828: #define        L2_S_CACHE_MASK_armv7   (L2_B|L2_C|L2_V6_XS_TEX(TEX_ARMV6_TEX)|L2_XS_S)
1.46      thorpej   829:
1.99      bsh       830:
1.46      thorpej   831: #define        L1_S_PROTO_generic      (L1_TYPE_S | L1_S_IMP)
1.47      thorpej   832: #define        L1_S_PROTO_xscale       (L1_TYPE_S)
1.99      bsh       833: #define        L1_S_PROTO_armv6        (L1_TYPE_S)
1.95      jmcneill  834: #define        L1_S_PROTO_armv7        (L1_TYPE_S)
1.46      thorpej   835:
1.103     matt      836: #define        L1_SS_PROTO_generic     0
                    837: #define        L1_SS_PROTO_xscale      0
                    838: #define        L1_SS_PROTO_armv6       (L1_TYPE_S | L1_S_V6_SS)
                    839: #define        L1_SS_PROTO_armv7       (L1_TYPE_S | L1_S_V6_SS)
                    840:
1.46      thorpej   841: #define        L1_C_PROTO_generic      (L1_TYPE_C | L1_C_IMP2)
1.47      thorpej   842: #define        L1_C_PROTO_xscale       (L1_TYPE_C)
1.99      bsh       843: #define        L1_C_PROTO_armv6        (L1_TYPE_C)
1.95      jmcneill  844: #define        L1_C_PROTO_armv7        (L1_TYPE_C)
1.46      thorpej   845:
                    846: #define        L2_L_PROTO              (L2_TYPE_L)
                    847:
                    848: #define        L2_S_PROTO_generic      (L2_TYPE_S)
1.85      matt      849: #define        L2_S_PROTO_xscale       (L2_TYPE_XS)
1.99      bsh       850: #ifdef ARMV6_EXTENDED_SMALL_PAGE
                    851: #define        L2_S_PROTO_armv6c       (L2_TYPE_XS)    /* XP=0, extended small page */
                    852: #else
                    853: #define        L2_S_PROTO_armv6c       (L2_TYPE_S)     /* XP=0, subpage APs */
                    854: #endif
1.134     skrll     855: #ifdef ARM_MMU_EXTENDED
                    856: #define        L2_S_PROTO_armv6n       (L2_TYPE_S|L2_XS_XN)
                    857: #else
1.99      bsh       858: #define        L2_S_PROTO_armv6n       (L2_TYPE_S)     /* with XP=1 */
1.134     skrll     859: #endif
1.124     matt      860: #ifdef ARM_MMU_EXTENDED
                    861: #define        L2_S_PROTO_armv7        (L2_TYPE_S|L2_XS_XN)
                    862: #else
1.95      jmcneill  863: #define        L2_S_PROTO_armv7        (L2_TYPE_S)
1.124     matt      864: #endif
1.45      thorpej   865:
1.46      thorpej   866: /*
                    867:  * User-visible names for the ones that vary with MMU class.
                    868:  */
                    869:
                    870: #if ARM_NMMUS > 1
                    871: /* More than one MMU class configured; use variables. */
1.95      jmcneill  872: #define        L1_S_PROT_U             pte_l1_s_prot_u
                    873: #define        L1_S_PROT_W             pte_l1_s_prot_w
                    874: #define        L1_S_PROT_RO            pte_l1_s_prot_ro
                    875: #define        L1_S_PROT_MASK          pte_l1_s_prot_mask
                    876:
1.46      thorpej   877: #define        L2_S_PROT_U             pte_l2_s_prot_u
                    878: #define        L2_S_PROT_W             pte_l2_s_prot_w
1.95      jmcneill  879: #define        L2_S_PROT_RO            pte_l2_s_prot_ro
1.46      thorpej   880: #define        L2_S_PROT_MASK          pte_l2_s_prot_mask
                    881:
1.95      jmcneill  882: #define        L2_L_PROT_U             pte_l2_l_prot_u
                    883: #define        L2_L_PROT_W             pte_l2_l_prot_w
                    884: #define        L2_L_PROT_RO            pte_l2_l_prot_ro
                    885: #define        L2_L_PROT_MASK          pte_l2_l_prot_mask
                    886:
1.49      thorpej   887: #define        L1_S_CACHE_MASK         pte_l1_s_cache_mask
                    888: #define        L2_L_CACHE_MASK         pte_l2_l_cache_mask
                    889: #define        L2_S_CACHE_MASK         pte_l2_s_cache_mask
                    890:
1.103     matt      891: #define        L1_SS_PROTO             pte_l1_ss_proto
1.46      thorpej   892: #define        L1_S_PROTO              pte_l1_s_proto
                    893: #define        L1_C_PROTO              pte_l1_c_proto
                    894: #define        L2_S_PROTO              pte_l2_s_proto
1.51      thorpej   895:
                    896: #define        pmap_copy_page(s, d)    (*pmap_copy_page_func)((s), (d))
                    897: #define        pmap_zero_page(d)       (*pmap_zero_page_func)((d))
1.99      bsh       898: #elif (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0
                    899: #define        L1_S_PROT_U             L1_S_PROT_U_generic
                    900: #define        L1_S_PROT_W             L1_S_PROT_W_generic
                    901: #define        L1_S_PROT_RO            L1_S_PROT_RO_generic
                    902: #define        L1_S_PROT_MASK          L1_S_PROT_MASK_generic
                    903:
                    904: #define        L2_S_PROT_U             L2_S_PROT_U_generic
                    905: #define        L2_S_PROT_W             L2_S_PROT_W_generic
                    906: #define        L2_S_PROT_RO            L2_S_PROT_RO_generic
                    907: #define        L2_S_PROT_MASK          L2_S_PROT_MASK_generic
                    908:
                    909: #define        L2_L_PROT_U             L2_L_PROT_U_generic
                    910: #define        L2_L_PROT_W             L2_L_PROT_W_generic
                    911: #define        L2_L_PROT_RO            L2_L_PROT_RO_generic
                    912: #define        L2_L_PROT_MASK          L2_L_PROT_MASK_generic
                    913:
                    914: #define        L1_S_CACHE_MASK         L1_S_CACHE_MASK_generic
                    915: #define        L2_L_CACHE_MASK         L2_L_CACHE_MASK_generic
                    916: #define        L2_S_CACHE_MASK         L2_S_CACHE_MASK_generic
                    917:
1.103     matt      918: #define        L1_SS_PROTO             L1_SS_PROTO_generic
1.99      bsh       919: #define        L1_S_PROTO              L1_S_PROTO_generic
                    920: #define        L1_C_PROTO              L1_C_PROTO_generic
                    921: #define        L2_S_PROTO              L2_S_PROTO_generic
                    922:
                    923: #define        pmap_copy_page(s, d)    pmap_copy_page_generic((s), (d))
                    924: #define        pmap_zero_page(d)       pmap_zero_page_generic((d))
                    925: #elif ARM_MMU_V6N != 0
                    926: #define        L1_S_PROT_U             L1_S_PROT_U_armv6
                    927: #define        L1_S_PROT_W             L1_S_PROT_W_armv6
                    928: #define        L1_S_PROT_RO            L1_S_PROT_RO_armv6
                    929: #define        L1_S_PROT_MASK          L1_S_PROT_MASK_armv6
                    930:
                    931: #define        L2_S_PROT_U             L2_S_PROT_U_armv6n
                    932: #define        L2_S_PROT_W             L2_S_PROT_W_armv6n
                    933: #define        L2_S_PROT_RO            L2_S_PROT_RO_armv6n
                    934: #define        L2_S_PROT_MASK          L2_S_PROT_MASK_armv6n
                    935:
                    936: #define        L2_L_PROT_U             L2_L_PROT_U_armv6n
                    937: #define        L2_L_PROT_W             L2_L_PROT_W_armv6n
                    938: #define        L2_L_PROT_RO            L2_L_PROT_RO_armv6n
                    939: #define        L2_L_PROT_MASK          L2_L_PROT_MASK_armv6n
                    940:
1.134     skrll     941: #define        L1_S_CACHE_MASK         L1_S_CACHE_MASK_armv6n
                    942: #define        L2_L_CACHE_MASK         L2_L_CACHE_MASK_armv6n
1.99      bsh       943: #define        L2_S_CACHE_MASK         L2_S_CACHE_MASK_armv6n
                    944:
1.150     skrll     945: /*
                    946:  * These prototypes make writeable mappings, while the other MMU types
                    947:  * make read-only mappings.
                    948:  */
1.103     matt      949: #define        L1_SS_PROTO             L1_SS_PROTO_armv6
1.99      bsh       950: #define        L1_S_PROTO              L1_S_PROTO_armv6
                    951: #define        L1_C_PROTO              L1_C_PROTO_armv6
                    952: #define        L2_S_PROTO              L2_S_PROTO_armv6n
                    953:
                    954: #define        pmap_copy_page(s, d)    pmap_copy_page_generic((s), (d))
                    955: #define        pmap_zero_page(d)       pmap_zero_page_generic((d))
                    956: #elif ARM_MMU_V6C != 0
1.95      jmcneill  957: #define        L1_S_PROT_U             L1_S_PROT_U_generic
                    958: #define        L1_S_PROT_W             L1_S_PROT_W_generic
                    959: #define        L1_S_PROT_RO            L1_S_PROT_RO_generic
                    960: #define        L1_S_PROT_MASK          L1_S_PROT_MASK_generic
                    961:
1.46      thorpej   962: #define        L2_S_PROT_U             L2_S_PROT_U_generic
                    963: #define        L2_S_PROT_W             L2_S_PROT_W_generic
1.95      jmcneill  964: #define        L2_S_PROT_RO            L2_S_PROT_RO_generic
1.46      thorpej   965: #define        L2_S_PROT_MASK          L2_S_PROT_MASK_generic
                    966:
1.95      jmcneill  967: #define        L2_L_PROT_U             L2_L_PROT_U_generic
                    968: #define        L2_L_PROT_W             L2_L_PROT_W_generic
                    969: #define        L2_L_PROT_RO            L2_L_PROT_RO_generic
                    970: #define        L2_L_PROT_MASK          L2_L_PROT_MASK_generic
                    971:
1.49      thorpej   972: #define        L1_S_CACHE_MASK         L1_S_CACHE_MASK_generic
                    973: #define        L2_L_CACHE_MASK         L2_L_CACHE_MASK_generic
                    974: #define        L2_S_CACHE_MASK         L2_S_CACHE_MASK_generic
                    975:
1.130     matt      976: #define        L1_SS_PROTO             L1_SS_PROTO_armv6
1.46      thorpej   977: #define        L1_S_PROTO              L1_S_PROTO_generic
                    978: #define        L1_C_PROTO              L1_C_PROTO_generic
                    979: #define        L2_S_PROTO              L2_S_PROTO_generic
1.51      thorpej   980:
                    981: #define        pmap_copy_page(s, d)    pmap_copy_page_generic((s), (d))
                    982: #define        pmap_zero_page(d)       pmap_zero_page_generic((d))
1.46      thorpej   983: #elif ARM_MMU_XSCALE == 1
1.95      jmcneill  984: #define        L1_S_PROT_U             L1_S_PROT_U_generic
                    985: #define        L1_S_PROT_W             L1_S_PROT_W_generic
                    986: #define        L1_S_PROT_RO            L1_S_PROT_RO_generic
                    987: #define        L1_S_PROT_MASK          L1_S_PROT_MASK_generic
                    988:
1.46      thorpej   989: #define        L2_S_PROT_U             L2_S_PROT_U_xscale
                    990: #define        L2_S_PROT_W             L2_S_PROT_W_xscale
1.95      jmcneill  991: #define        L2_S_PROT_RO            L2_S_PROT_RO_xscale
1.46      thorpej   992: #define        L2_S_PROT_MASK          L2_S_PROT_MASK_xscale
1.49      thorpej   993:
1.95      jmcneill  994: #define        L2_L_PROT_U             L2_L_PROT_U_generic
                    995: #define        L2_L_PROT_W             L2_L_PROT_W_generic
                    996: #define        L2_L_PROT_RO            L2_L_PROT_RO_generic
                    997: #define        L2_L_PROT_MASK          L2_L_PROT_MASK_generic
                    998:
1.49      thorpej   999: #define        L1_S_CACHE_MASK         L1_S_CACHE_MASK_xscale
                   1000: #define        L2_L_CACHE_MASK         L2_L_CACHE_MASK_xscale
                   1001: #define        L2_S_CACHE_MASK         L2_S_CACHE_MASK_xscale
1.46      thorpej  1002:
1.103     matt     1003: #define        L1_SS_PROTO             L1_SS_PROTO_xscale
1.46      thorpej  1004: #define        L1_S_PROTO              L1_S_PROTO_xscale
                   1005: #define        L1_C_PROTO              L1_C_PROTO_xscale
                   1006: #define        L2_S_PROTO              L2_S_PROTO_xscale
1.51      thorpej  1007:
                   1008: #define        pmap_copy_page(s, d)    pmap_copy_page_xscale((s), (d))
                   1009: #define        pmap_zero_page(d)       pmap_zero_page_xscale((d))
1.95      jmcneill 1010: #elif ARM_MMU_V7 == 1
                   1011: #define        L1_S_PROT_U             L1_S_PROT_U_armv7
                   1012: #define        L1_S_PROT_W             L1_S_PROT_W_armv7
                   1013: #define        L1_S_PROT_RO            L1_S_PROT_RO_armv7
                   1014: #define        L1_S_PROT_MASK          L1_S_PROT_MASK_armv7
                   1015:
                   1016: #define        L2_S_PROT_U             L2_S_PROT_U_armv7
                   1017: #define        L2_S_PROT_W             L2_S_PROT_W_armv7
                   1018: #define        L2_S_PROT_RO            L2_S_PROT_RO_armv7
                   1019: #define        L2_S_PROT_MASK          L2_S_PROT_MASK_armv7
                   1020:
                   1021: #define        L2_L_PROT_U             L2_L_PROT_U_armv7
                   1022: #define        L2_L_PROT_W             L2_L_PROT_W_armv7
                   1023: #define        L2_L_PROT_RO            L2_L_PROT_RO_armv7
                   1024: #define        L2_L_PROT_MASK          L2_L_PROT_MASK_armv7
                   1025:
                   1026: #define        L1_S_CACHE_MASK         L1_S_CACHE_MASK_armv7
                   1027: #define        L2_L_CACHE_MASK         L2_L_CACHE_MASK_armv7
                   1028: #define        L2_S_CACHE_MASK         L2_S_CACHE_MASK_armv7
                   1029:
1.150     skrll    1030: /*
                   1031:  * These prototypes make writeable mappings, while the other MMU types
                   1032:  * make read-only mappings.
                   1033:  */
1.103     matt     1034: #define        L1_SS_PROTO             L1_SS_PROTO_armv7
1.95      jmcneill 1035: #define        L1_S_PROTO              L1_S_PROTO_armv7
                   1036: #define        L1_C_PROTO              L1_C_PROTO_armv7
                   1037: #define        L2_S_PROTO              L2_S_PROTO_armv7
                   1038:
                   1039: #define        pmap_copy_page(s, d)    pmap_copy_page_generic((s), (d))
                   1040: #define        pmap_zero_page(d)       pmap_zero_page_generic((d))
1.46      thorpej  1041: #endif /* ARM_NMMUS > 1 */
1.20      chs      1042:
1.45      thorpej  1043: /*
1.95      jmcneill 1044:  * Macros to set and query the write permission on page descriptors.
                   1045:  */
                   1046: #define l1pte_set_writable(pte)        (((pte) & ~L1_S_PROT_RO) | L1_S_PROT_W)
                   1047: #define l1pte_set_readonly(pte)        (((pte) & ~L1_S_PROT_W) | L1_S_PROT_RO)
1.149     skrll    1048:
1.152     skrll    1049: #define l2pte_set_writable(pte)        (((pte) & ~L2_S_PROT_RO) | L2_S_PROT_W)
                   1050: #define l2pte_set_readonly(pte)        (((pte) & ~L2_S_PROT_W) | L2_S_PROT_RO)
1.95      jmcneill 1051:
                   1052: #define l2pte_writable_p(pte)  (((pte) & L2_S_PROT_W) == L2_S_PROT_W && \
1.152     skrll    1053:                                 (L2_S_PROT_RO == 0 || \
1.95      jmcneill 1054:                                  ((pte) & L2_S_PROT_RO) != L2_S_PROT_RO))
                   1055:
                   1056: /*
1.45      thorpej  1057:  * These macros return various bits based on kernel/user and protection.
                   1058:  * Note that the compiler will usually fold these at compile time.
                   1059:  */
1.152     skrll    1060:
                   1061: #define        L1_S_PROT(ku, pr)       (                                          \
                   1062:        (((ku) == PTE_USER) ?                                              \
                   1063:            L1_S_PROT_U | (((pr) & VM_PROT_WRITE) ? L1_S_PROT_W : 0)       \
                   1064:        :                                                                  \
                   1065:            (((L1_S_PROT_RO &&                                             \
                   1066:                ((pr) & (VM_PROT_READ | VM_PROT_WRITE)) == VM_PROT_READ) ? \
                   1067:                    L1_S_PROT_RO : L1_S_PROT_W)))                          \
                   1068:     )
                   1069:
                   1070: #define        L2_L_PROT(ku, pr)       (                                          \
                   1071:        (((ku) == PTE_USER) ?                                              \
                   1072:            L2_L_PROT_U | (((pr) & VM_PROT_WRITE) ? L2_L_PROT_W : 0)       \
                   1073:        :                                                                  \
                   1074:            (((L2_L_PROT_RO &&                                             \
                   1075:                ((pr) & (VM_PROT_READ | VM_PROT_WRITE)) == VM_PROT_READ) ? \
                   1076:                    L2_L_PROT_RO : L2_L_PROT_W)))                          \
                   1077:     )
                   1078:
                   1079: #define        L2_S_PROT(ku, pr)       (                                          \
                   1080:        (((ku) == PTE_USER) ?                                              \
                   1081:            L2_S_PROT_U | (((pr) & VM_PROT_WRITE) ? L2_S_PROT_W : 0)       \
                   1082:        :                                                                  \
                   1083:            (((L2_S_PROT_RO &&                                             \
                   1084:                ((pr) & (VM_PROT_READ | VM_PROT_WRITE)) == VM_PROT_READ) ? \
                   1085:                    L2_S_PROT_RO : L2_S_PROT_W)))                          \
                   1086:     )
1.66      thorpej  1087:
                   1088: /*
1.103     matt     1089:  * Macros to test if a mapping is mappable with an L1 SuperSection,
                   1090:  * L1 Section, or an L2 Large Page mapping.
1.66      thorpej  1091:  */
1.103     matt     1092: #define        L1_SS_MAPPABLE_P(va, pa, size)                                  \
                   1093:        ((((va) | (pa)) & L1_SS_OFFSET) == 0 && (size) >= L1_SS_SIZE)
                   1094:
1.66      thorpej  1095: #define        L1_S_MAPPABLE_P(va, pa, size)                                   \
                   1096:        ((((va) | (pa)) & L1_S_OFFSET) == 0 && (size) >= L1_S_SIZE)
                   1097:
1.67      thorpej  1098: #define        L2_L_MAPPABLE_P(va, pa, size)                                   \
1.68      thorpej  1099:        ((((va) | (pa)) & L2_L_OFFSET) == 0 && (size) >= L2_L_SIZE)
1.64      thorpej  1100:
1.155     ryo      1101: #define        PMAP_MAPSIZE1   L2_L_SIZE
                   1102: #define        PMAP_MAPSIZE2   L1_S_SIZE
                   1103: #if (ARM_MMU_V6 + ARM_MMU_V7) > 0
                   1104: #define        PMAP_MAPSIZE3   L1_SS_SIZE
                   1105: #endif
                   1106:
1.119     matt     1107: #ifndef _LOCORE
1.64      thorpej  1108: /*
                   1109:  * Hooks for the pool allocator.
                   1110:  */
                   1111: #define        POOL_VTOPHYS(va)        vtophys((vaddr_t) (va))
1.117     matt     1112: extern paddr_t physical_start, physical_end;
1.113     matt     1113: #ifdef PMAP_NEED_ALLOC_POOLPAGE
1.114     matt     1114: struct vm_page *arm_pmap_alloc_poolpage(int);
1.113     matt     1115: #define        PMAP_ALLOC_POOLPAGE     arm_pmap_alloc_poolpage
1.118     matt     1116: #endif
                   1117: #if defined(PMAP_NEED_ALLOC_POOLPAGE) || defined(__HAVE_MM_MD_DIRECT_MAPPED_PHYS)
1.131     matt     1118: vaddr_t        pmap_map_poolpage(paddr_t);
                   1119: paddr_t        pmap_unmap_poolpage(vaddr_t);
                   1120: #define        PMAP_MAP_POOLPAGE(pa)   pmap_map_poolpage(pa)
                   1121: #define PMAP_UNMAP_POOLPAGE(va)        pmap_unmap_poolpage(va)
1.113     matt     1122: #endif
1.18      thorpej  1123:
1.143     skrll    1124: #define __HAVE_PMAP_PV_TRACK   1
                   1125:
                   1126: void pmap_pv_protect(paddr_t, vm_prot_t);
                   1127:
                   1128: struct pmap_page {
1.97      uebayasi 1129:        SLIST_HEAD(,pv_entry) pvh_list;         /* pv_entry list */
                   1130:        int pvh_attrs;                          /* page attributes */
                   1131:        u_int uro_mappings;
                   1132:        u_int urw_mappings;
                   1133:        union {
                   1134:                u_short s_mappings[2];  /* Assume kernel count <= 65535 */
                   1135:                u_int i_mappings;
                   1136:        } k_u;
                   1137: };
                   1138:
                   1139: /*
1.143     skrll    1140:  * pmap-specific data store in the vm_page structure.
                   1141:  */
                   1142: #define        __HAVE_VM_PAGE_MD
                   1143: struct vm_page_md {
                   1144:        struct pmap_page pp;
                   1145: #define        pvh_list        pp.pvh_list
                   1146: #define        pvh_attrs       pp.pvh_attrs
                   1147: #define        uro_mappings    pp.uro_mappings
                   1148: #define        urw_mappings    pp.urw_mappings
                   1149: #define        kro_mappings    pp.k_u.s_mappings[0]
                   1150: #define        krw_mappings    pp.k_u.s_mappings[1]
                   1151: #define        k_mappings      pp.k_u.i_mappings
                   1152: };
                   1153:
                   1154: #define PMAP_PAGE_TO_MD(ppage) container_of((ppage), struct vm_page_md, pp)
                   1155:
                   1156: /*
1.97      uebayasi 1157:  * Set the default color of each page.
                   1158:  */
                   1159: #if ARM_MMU_V6 > 0
                   1160: #define        VM_MDPAGE_PVH_ATTRS_INIT(pg) \
1.157     ad       1161:        (pg)->mdpage.pvh_attrs = VM_PAGE_TO_PHYS(pg) & arm_cache_prefer_mask
1.97      uebayasi 1162: #else
                   1163: #define        VM_MDPAGE_PVH_ATTRS_INIT(pg) \
                   1164:        (pg)->mdpage.pvh_attrs = 0
                   1165: #endif
1.135     skrll    1166:
1.97      uebayasi 1167: #define        VM_MDPAGE_INIT(pg)                                              \
                   1168: do {                                                                   \
                   1169:        SLIST_INIT(&(pg)->mdpage.pvh_list);                             \
                   1170:        VM_MDPAGE_PVH_ATTRS_INIT(pg);                                   \
                   1171:        (pg)->mdpage.uro_mappings = 0;                                  \
                   1172:        (pg)->mdpage.urw_mappings = 0;                                  \
                   1173:        (pg)->mdpage.k_mappings = 0;                                    \
                   1174: } while (/*CONSTCOND*/0)
                   1175:
                   1176: #endif /* !_LOCORE */
                   1177:
1.18      thorpej  1178: #endif /* _KERNEL */
1.1       reinoud  1179:
                   1180: #endif /* _ARM32_PMAP_H_ */

CVSweb <webmaster@jp.NetBSD.org>