[BACK]Return to pmap.h CVS log [TXT][DIR] Up to [cvs.NetBSD.org] / src / sys / arch / arm / include / arm32

Annotation of src/sys/arch/arm/include/arm32/pmap.h, Revision 1.78

1.78    ! scw         1: /*     $NetBSD: pmap.h,v 1.77 2003/10/13 20:50:34 scw Exp $    */
1.46      thorpej     2:
                      3: /*
1.65      scw         4:  * Copyright (c) 2002, 2003 Wasabi Systems, Inc.
1.46      thorpej     5:  * All rights reserved.
                      6:  *
1.65      scw         7:  * Written by Jason R. Thorpe & Steve C. Woodford for Wasabi Systems, Inc.
1.46      thorpej     8:  *
                      9:  * Redistribution and use in source and binary forms, with or without
                     10:  * modification, are permitted provided that the following conditions
                     11:  * are met:
                     12:  * 1. Redistributions of source code must retain the above copyright
                     13:  *    notice, this list of conditions and the following disclaimer.
                     14:  * 2. Redistributions in binary form must reproduce the above copyright
                     15:  *    notice, this list of conditions and the following disclaimer in the
                     16:  *    documentation and/or other materials provided with the distribution.
                     17:  * 3. All advertising materials mentioning features or use of this software
                     18:  *    must display the following acknowledgement:
                     19:  *     This product includes software developed for the NetBSD Project by
                     20:  *     Wasabi Systems, Inc.
                     21:  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
                     22:  *    or promote products derived from this software without specific prior
                     23:  *    written permission.
                     24:  *
                     25:  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
                     26:  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
                     27:  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
                     28:  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
                     29:  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
                     30:  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
                     31:  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
                     32:  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
                     33:  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
                     34:  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
                     35:  * POSSIBILITY OF SUCH DAMAGE.
                     36:  */
1.1       reinoud    37:
                     38: /*
                     39:  * Copyright (c) 1994,1995 Mark Brinicombe.
                     40:  * All rights reserved.
                     41:  *
                     42:  * Redistribution and use in source and binary forms, with or without
                     43:  * modification, are permitted provided that the following conditions
                     44:  * are met:
                     45:  * 1. Redistributions of source code must retain the above copyright
                     46:  *    notice, this list of conditions and the following disclaimer.
                     47:  * 2. Redistributions in binary form must reproduce the above copyright
                     48:  *    notice, this list of conditions and the following disclaimer in the
                     49:  *    documentation and/or other materials provided with the distribution.
                     50:  * 3. All advertising materials mentioning features or use of this software
                     51:  *    must display the following acknowledgement:
                     52:  *     This product includes software developed by Mark Brinicombe
                     53:  * 4. The name of the author may not be used to endorse or promote products
                     54:  *    derived from this software without specific prior written permission.
                     55:  *
                     56:  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
                     57:  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
                     58:  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
                     59:  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
                     60:  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
                     61:  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
                     62:  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
                     63:  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
                     64:  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
                     65:  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
                     66:  */
                     67:
                     68: #ifndef        _ARM32_PMAP_H_
                     69: #define        _ARM32_PMAP_H_
                     70:
1.18      thorpej    71: #ifdef _KERNEL
                     72:
1.52      thorpej    73: #include <arm/cpuconf.h>
1.75      bsh        74: #include <arm/arm32/pte.h>
                     75: #ifndef _LOCORE
1.19      thorpej    76: #include <arm/cpufunc.h>
1.12      chris      77: #include <uvm/uvm_object.h>
1.75      bsh        78: #endif
1.1       reinoud    79:
                     80: /*
1.11      chris      81:  * a pmap describes a processes' 4GB virtual address space.  this
                     82:  * virtual address space can be broken up into 4096 1MB regions which
1.38      thorpej    83:  * are described by L1 PTEs in the L1 table.
1.11      chris      84:  *
1.38      thorpej    85:  * There is a line drawn at KERNEL_BASE.  Everything below that line
                     86:  * changes when the VM context is switched.  Everything above that line
                     87:  * is the same no matter which VM context is running.  This is achieved
                     88:  * by making the L1 PTEs for those slots above KERNEL_BASE reference
                     89:  * kernel L2 tables.
1.11      chris      90:  *
1.38      thorpej    91:  * The basic layout of the virtual address space thus looks like this:
                     92:  *
                     93:  *     0xffffffff
                     94:  *     .
                     95:  *     .
                     96:  *     .
                     97:  *     KERNEL_BASE
                     98:  *     --------------------
                     99:  *     .
                    100:  *     .
                    101:  *     .
                    102:  *     0x00000000
1.11      chris     103:  */
                    104:
1.65      scw       105: /*
                    106:  * The number of L2 descriptor tables which can be tracked by an l2_dtable.
                    107:  * A bucket size of 16 provides for 16MB of contiguous virtual address
                    108:  * space per l2_dtable. Most processes will, therefore, require only two or
                    109:  * three of these to map their whole working set.
                    110:  */
                    111: #define        L2_BUCKET_LOG2  4
                    112: #define        L2_BUCKET_SIZE  (1 << L2_BUCKET_LOG2)
                    113:
                    114: /*
                    115:  * Given the above "L2-descriptors-per-l2_dtable" constant, the number
                    116:  * of l2_dtable structures required to track all possible page descriptors
                    117:  * mappable by an L1 translation table is given by the following constants:
                    118:  */
                    119: #define        L2_LOG2         ((32 - L1_S_SHIFT) - L2_BUCKET_LOG2)
                    120: #define        L2_SIZE         (1 << L2_LOG2)
                    121:
1.75      bsh       122: #ifndef _LOCORE
                    123:
1.65      scw       124: struct l1_ttable;
                    125: struct l2_dtable;
                    126:
                    127: /*
                    128:  * Track cache/tlb occupancy using the following structure
                    129:  */
                    130: union pmap_cache_state {
                    131:        struct {
                    132:                union {
                    133:                        u_int8_t csu_cache_b[2];
                    134:                        u_int16_t csu_cache;
                    135:                } cs_cache_u;
                    136:
                    137:                union {
                    138:                        u_int8_t csu_tlb_b[2];
                    139:                        u_int16_t csu_tlb;
                    140:                } cs_tlb_u;
                    141:        } cs_s;
                    142:        u_int32_t cs_all;
                    143: };
                    144: #define        cs_cache_id     cs_s.cs_cache_u.csu_cache_b[0]
                    145: #define        cs_cache_d      cs_s.cs_cache_u.csu_cache_b[1]
                    146: #define        cs_cache        cs_s.cs_cache_u.csu_cache
                    147: #define        cs_tlb_id       cs_s.cs_tlb_u.csu_tlb_b[0]
                    148: #define        cs_tlb_d        cs_s.cs_tlb_u.csu_tlb_b[1]
                    149: #define        cs_tlb          cs_s.cs_tlb_u.csu_tlb
                    150:
                    151: /*
                    152:  * Assigned to cs_all to force cacheops to work for a particular pmap
                    153:  */
                    154: #define        PMAP_CACHE_STATE_ALL    0xffffffffu
                    155:
                    156: /*
1.73      thorpej   157:  * This structure is used by machine-dependent code to describe
                    158:  * static mappings of devices, created at bootstrap time.
                    159:  */
                    160: struct pmap_devmap {
                    161:        vaddr_t         pd_va;          /* virtual address */
                    162:        paddr_t         pd_pa;          /* physical address */
                    163:        psize_t         pd_size;        /* size of region */
                    164:        vm_prot_t       pd_prot;        /* protection code */
                    165:        int             pd_cache;       /* cache attributes */
                    166: };
                    167:
                    168: /*
1.65      scw       169:  * The pmap structure itself
                    170:  */
                    171: struct pmap {
                    172:        u_int8_t                pm_domain;
                    173:        boolean_t               pm_remove_all;
                    174:        struct l1_ttable        *pm_l1;
                    175:        union pmap_cache_state  pm_cstate;
                    176:        struct uvm_object       pm_obj;
                    177: #define        pm_lock pm_obj.vmobjlock
                    178:        struct l2_dtable        *pm_l2[L2_SIZE];
                    179:        struct pmap_statistics  pm_stats;
                    180:        LIST_ENTRY(pmap)        pm_list;
                    181: };
                    182:
1.1       reinoud   183: typedef struct pmap *pmap_t;
                    184:
                    185: /*
                    186:  * Physical / virtual address structure. In a number of places (particularly
                    187:  * during bootstrapping) we need to keep track of the physical and virtual
                    188:  * addresses of various pages
                    189:  */
1.28      thorpej   190: typedef struct pv_addr {
                    191:        SLIST_ENTRY(pv_addr) pv_list;
1.3       matt      192:        paddr_t pv_pa;
1.2       matt      193:        vaddr_t pv_va;
1.1       reinoud   194: } pv_addr_t;
                    195:
                    196: /*
1.24      thorpej   197:  * Determine various modes for PTEs (user vs. kernel, cacheable
                    198:  * vs. non-cacheable).
                    199:  */
                    200: #define        PTE_KERNEL      0
                    201: #define        PTE_USER        1
                    202: #define        PTE_NOCACHE     0
                    203: #define        PTE_CACHE       1
1.65      scw       204: #define        PTE_PAGETABLE   2
1.24      thorpej   205:
                    206: /*
1.43      thorpej   207:  * Flags that indicate attributes of pages or mappings of pages.
                    208:  *
                    209:  * The PVF_MOD and PVF_REF flags are stored in the mdpage for each
                    210:  * page.  PVF_WIRED, PVF_WRITE, and PVF_NC are kept in individual
                    211:  * pv_entry's for each page.  They live in the same "namespace" so
                    212:  * that we can clear multiple attributes at a time.
                    213:  *
                    214:  * Note the "non-cacheable" flag generally means the page has
                    215:  * multiple mappings in a given address space.
                    216:  */
                    217: #define        PVF_MOD         0x01            /* page is modified */
                    218: #define        PVF_REF         0x02            /* page is referenced */
                    219: #define        PVF_WIRED       0x04            /* mapping is wired */
                    220: #define        PVF_WRITE       0x08            /* mapping is writable */
1.56      thorpej   221: #define        PVF_EXEC        0x10            /* mapping is executable */
1.65      scw       222: #define        PVF_UNC         0x20            /* mapping is 'user' non-cacheable */
                    223: #define        PVF_KNC         0x40            /* mapping is 'kernel' non-cacheable */
                    224: #define        PVF_NC          (PVF_UNC|PVF_KNC)
1.43      thorpej   225:
                    226: /*
1.1       reinoud   227:  * Commonly referenced structures
                    228:  */
1.11      chris     229: extern struct pmap     kernel_pmap_store;
1.4       matt      230: extern int             pmap_debug_level; /* Only exists if PMAP_DEBUG */
1.1       reinoud   231:
                    232: /*
                    233:  * Macros that we need to export
                    234:  */
                    235: #define pmap_kernel()                  (&kernel_pmap_store)
                    236: #define        pmap_resident_count(pmap)       ((pmap)->pm_stats.resident_count)
                    237: #define        pmap_wired_count(pmap)          ((pmap)->pm_stats.wired_count)
1.31      thorpej   238:
1.78    ! scw       239: #define        pmap_remove(pmap,sva,eva)       pmap_do_remove((pmap),(sva),(eva),0)
        !           240:
1.43      thorpej   241: #define        pmap_is_modified(pg)    \
                    242:        (((pg)->mdpage.pvh_attrs & PVF_MOD) != 0)
                    243: #define        pmap_is_referenced(pg)  \
                    244:        (((pg)->mdpage.pvh_attrs & PVF_REF) != 0)
1.41      thorpej   245:
                    246: #define        pmap_copy(dp, sp, da, l, sa)    /* nothing */
1.60      chs       247:
1.35      thorpej   248: #define pmap_phys_address(ppn)         (arm_ptob((ppn)))
1.1       reinoud   249:
                    250: /*
                    251:  * Functions that we need to export
                    252:  */
1.39      thorpej   253: void   pmap_procwr(struct proc *, vaddr_t, int);
1.65      scw       254: void   pmap_remove_all(pmap_t);
                    255: boolean_t pmap_extract(pmap_t, vaddr_t, paddr_t *);
1.39      thorpej   256:
1.1       reinoud   257: #define        PMAP_NEED_PROCWR
1.29      chris     258: #define PMAP_GROWKERNEL                /* turn on pmap_growkernel interface */
1.4       matt      259:
1.39      thorpej   260: /* Functions we use internally. */
1.71      thorpej   261: void   pmap_bootstrap(pd_entry_t *, vaddr_t, vaddr_t);
1.65      scw       262:
1.78    ! scw       263: void   pmap_do_remove(pmap_t, vaddr_t, vaddr_t, int);
1.70      scw       264: int    pmap_fault_fixup(pmap_t, vaddr_t, vm_prot_t, int);
1.65      scw       265: boolean_t pmap_get_pde_pte(pmap_t, vaddr_t, pd_entry_t **, pt_entry_t **);
                    266: boolean_t pmap_get_pde(pmap_t, vaddr_t, pd_entry_t **);
                    267: void   pmap_set_pcb_pagedir(pmap_t, struct pcb *);
                    268:
                    269: void   pmap_debug(int);
1.39      thorpej   270: void   pmap_postinit(void);
1.42      thorpej   271:
                    272: void   vector_page_setprot(int);
1.24      thorpej   273:
1.73      thorpej   274: const struct pmap_devmap *pmap_devmap_find_pa(paddr_t, psize_t);
                    275: const struct pmap_devmap *pmap_devmap_find_va(vaddr_t, vsize_t);
                    276:
1.24      thorpej   277: /* Bootstrapping routines. */
                    278: void   pmap_map_section(vaddr_t, vaddr_t, paddr_t, int, int);
1.25      thorpej   279: void   pmap_map_entry(vaddr_t, vaddr_t, paddr_t, int, int);
1.28      thorpej   280: vsize_t        pmap_map_chunk(vaddr_t, vaddr_t, paddr_t, vsize_t, int, int);
                    281: void   pmap_link_l2pt(vaddr_t, vaddr_t, pv_addr_t *);
1.73      thorpej   282: void   pmap_devmap_bootstrap(vaddr_t, const struct pmap_devmap *);
1.74      thorpej   283: void   pmap_devmap_register(const struct pmap_devmap *);
1.13      chris     284:
                    285: /*
                    286:  * Special page zero routine for use by the idle loop (no cache cleans).
                    287:  */
1.65      scw       288: boolean_t      pmap_pageidlezero(paddr_t);
1.13      chris     289: #define PMAP_PAGEIDLEZERO(pa)  pmap_pageidlezero((pa))
1.1       reinoud   290:
1.29      chris     291: /*
                    292:  * The current top of kernel VM
                    293:  */
                    294: extern vaddr_t pmap_curmaxkvaddr;
1.1       reinoud   295:
                    296: /*
                    297:  * Useful macros and constants
                    298:  */
1.59      thorpej   299:
1.65      scw       300: /* Virtual address to page table entry */
                    301: static __inline pt_entry_t *
                    302: vtopte(vaddr_t va)
                    303: {
                    304:        pd_entry_t *pdep;
                    305:        pt_entry_t *ptep;
                    306:
                    307:        if (pmap_get_pde_pte(pmap_kernel(), va, &pdep, &ptep) == FALSE)
                    308:                return (NULL);
                    309:        return (ptep);
                    310: }
                    311:
                    312: /*
                    313:  * Virtual address to physical address
                    314:  */
                    315: static __inline paddr_t
                    316: vtophys(vaddr_t va)
                    317: {
                    318:        paddr_t pa;
                    319:
                    320:        if (pmap_extract(pmap_kernel(), va, &pa) == FALSE)
                    321:                return (0);     /* XXXSCW: Panic? */
                    322:
                    323:        return (pa);
                    324: }
                    325:
                    326: /*
                    327:  * The new pmap ensures that page-tables are always mapping Write-Thru.
                    328:  * Thus, on some platforms we can run fast and loose and avoid syncing PTEs
                    329:  * on every change.
                    330:  *
1.69      thorpej   331:  * Unfortunately, not all CPUs have a write-through cache mode.  So we
                    332:  * define PMAP_NEEDS_PTE_SYNC for C code to conditionally do PTE syncs,
                    333:  * and if there is the chance for PTE syncs to be needed, we define
                    334:  * PMAP_INCLUDE_PTE_SYNC so e.g. assembly code can include (and run)
                    335:  * the code.
                    336:  */
                    337: extern int pmap_needs_pte_sync;
                    338: #if defined(_KERNEL_OPT)
                    339: /*
                    340:  * StrongARM SA-1 caches do not have a write-through mode.  So, on these,
                    341:  * we need to do PTE syncs.  If only SA-1 is configured, then evaluate
                    342:  * this at compile time.
                    343:  */
                    344: #if (ARM_MMU_SA1 == 1) && (ARM_NMMUS == 1)
                    345: #define        PMAP_NEEDS_PTE_SYNC     1
                    346: #define        PMAP_INCLUDE_PTE_SYNC
                    347: #elif (ARM_MMU_SA1 == 0)
                    348: #define        PMAP_NEEDS_PTE_SYNC     0
                    349: #endif
                    350: #endif /* _KERNEL_OPT */
                    351:
                    352: /*
                    353:  * Provide a fallback in case we were not able to determine it at
                    354:  * compile-time.
1.65      scw       355:  */
1.69      thorpej   356: #ifndef PMAP_NEEDS_PTE_SYNC
                    357: #define        PMAP_NEEDS_PTE_SYNC     pmap_needs_pte_sync
                    358: #define        PMAP_INCLUDE_PTE_SYNC
                    359: #endif
1.65      scw       360:
1.69      thorpej   361: #define        PTE_SYNC(pte)                                                   \
                    362: do {                                                                   \
                    363:        if (PMAP_NEEDS_PTE_SYNC)                                        \
                    364:                cpu_dcache_wb_range((vaddr_t)(pte), sizeof(pt_entry_t));\
                    365: } while (/*CONSTCOND*/0)
                    366:
                    367: #define        PTE_SYNC_RANGE(pte, cnt)                                        \
                    368: do {                                                                   \
                    369:        if (PMAP_NEEDS_PTE_SYNC) {                                      \
                    370:                cpu_dcache_wb_range((vaddr_t)(pte),                     \
                    371:                    (cnt) << 2); /* * sizeof(pt_entry_t) */             \
                    372:        }                                                               \
                    373: } while (/*CONSTCOND*/0)
1.65      scw       374:
1.36      thorpej   375: #define        l1pte_valid(pde)        ((pde) != 0)
1.44      thorpej   376: #define        l1pte_section_p(pde)    (((pde) & L1_TYPE_MASK) == L1_TYPE_S)
                    377: #define        l1pte_page_p(pde)       (((pde) & L1_TYPE_MASK) == L1_TYPE_C)
                    378: #define        l1pte_fpage_p(pde)      (((pde) & L1_TYPE_MASK) == L1_TYPE_F)
1.36      thorpej   379:
1.65      scw       380: #define l2pte_index(v)         (((v) & L2_ADDR_BITS) >> L2_S_SHIFT)
1.36      thorpej   381: #define        l2pte_valid(pte)        ((pte) != 0)
1.44      thorpej   382: #define        l2pte_pa(pte)           ((pte) & L2_S_FRAME)
1.77      scw       383: #define l2pte_minidata(pte)    (((pte) & \
                    384:                                 (L2_B | L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X)))\
                    385:                                 == (L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X)))
1.35      thorpej   386:
1.1       reinoud   387: /* L1 and L2 page table macros */
1.36      thorpej   388: #define pmap_pde_v(pde)                l1pte_valid(*(pde))
                    389: #define pmap_pde_section(pde)  l1pte_section_p(*(pde))
                    390: #define pmap_pde_page(pde)     l1pte_page_p(*(pde))
                    391: #define pmap_pde_fpage(pde)    l1pte_fpage_p(*(pde))
1.16      rearnsha  392:
1.36      thorpej   393: #define        pmap_pte_v(pte)         l2pte_valid(*(pte))
                    394: #define        pmap_pte_pa(pte)        l2pte_pa(*(pte))
1.35      thorpej   395:
1.1       reinoud   396: /* Size of the kernel part of the L1 page table */
                    397: #define KERNEL_PD_SIZE \
1.44      thorpej   398:        (L1_TABLE_SIZE - (KERNEL_BASE >> L1_S_SHIFT) * sizeof(pd_entry_t))
1.20      chs       399:
1.46      thorpej   400: /************************* ARM MMU configuration *****************************/
                    401:
1.69      thorpej   402: #if (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0
1.51      thorpej   403: void   pmap_copy_page_generic(paddr_t, paddr_t);
                    404: void   pmap_zero_page_generic(paddr_t);
                    405:
1.46      thorpej   406: void   pmap_pte_init_generic(void);
1.69      thorpej   407: #if defined(CPU_ARM8)
                    408: void   pmap_pte_init_arm8(void);
                    409: #endif
1.46      thorpej   410: #if defined(CPU_ARM9)
                    411: void   pmap_pte_init_arm9(void);
                    412: #endif /* CPU_ARM9 */
1.76      rearnsha  413: #if defined(CPU_ARM10)
                    414: void   pmap_pte_init_arm10(void);
                    415: #endif /* CPU_ARM10 */
1.69      thorpej   416: #endif /* (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0 */
                    417:
                    418: #if ARM_MMU_SA1 == 1
                    419: void   pmap_pte_init_sa1(void);
                    420: #endif /* ARM_MMU_SA1 == 1 */
1.46      thorpej   421:
1.52      thorpej   422: #if ARM_MMU_XSCALE == 1
1.51      thorpej   423: void   pmap_copy_page_xscale(paddr_t, paddr_t);
                    424: void   pmap_zero_page_xscale(paddr_t);
                    425:
1.46      thorpej   426: void   pmap_pte_init_xscale(void);
1.50      thorpej   427:
                    428: void   xscale_setup_minidata(vaddr_t, vaddr_t, paddr_t);
1.77      scw       429:
                    430: #define        PMAP_UAREA(va)          pmap_uarea(va)
                    431: void   pmap_uarea(vaddr_t);
1.52      thorpej   432: #endif /* ARM_MMU_XSCALE == 1 */
1.46      thorpej   433:
1.49      thorpej   434: extern pt_entry_t              pte_l1_s_cache_mode;
                    435: extern pt_entry_t              pte_l1_s_cache_mask;
                    436:
                    437: extern pt_entry_t              pte_l2_l_cache_mode;
                    438: extern pt_entry_t              pte_l2_l_cache_mask;
                    439:
                    440: extern pt_entry_t              pte_l2_s_cache_mode;
                    441: extern pt_entry_t              pte_l2_s_cache_mask;
1.46      thorpej   442:
1.65      scw       443: extern pt_entry_t              pte_l1_s_cache_mode_pt;
                    444: extern pt_entry_t              pte_l2_l_cache_mode_pt;
                    445: extern pt_entry_t              pte_l2_s_cache_mode_pt;
                    446:
1.46      thorpej   447: extern pt_entry_t              pte_l2_s_prot_u;
                    448: extern pt_entry_t              pte_l2_s_prot_w;
                    449: extern pt_entry_t              pte_l2_s_prot_mask;
                    450:
                    451: extern pt_entry_t              pte_l1_s_proto;
                    452: extern pt_entry_t              pte_l1_c_proto;
                    453: extern pt_entry_t              pte_l2_s_proto;
                    454:
1.51      thorpej   455: extern void (*pmap_copy_page_func)(paddr_t, paddr_t);
                    456: extern void (*pmap_zero_page_func)(paddr_t);
1.75      bsh       457:
                    458: #endif /* !_LOCORE */
1.51      thorpej   459:
1.46      thorpej   460: /*****************************************************************************/
                    461:
1.20      chs       462: /*
                    463:  * tell MI code that the cache is virtually-indexed *and* virtually-tagged.
                    464:  */
1.45      thorpej   465: #define PMAP_CACHE_VIVT
1.65      scw       466:
                    467: /*
                    468:  * Definitions for MMU domains
                    469:  */
                    470: #define        PMAP_DOMAINS            15      /* 15 'user' domains (0-14) */
                    471: #define        PMAP_DOMAIN_KERNEL      15      /* The kernel uses domain #15 */
1.45      thorpej   472:
                    473: /*
                    474:  * These macros define the various bit masks in the PTE.
                    475:  *
                    476:  * We use these macros since we use different bits on different processor
                    477:  * models.
                    478:  */
                    479: #define        L1_S_PROT_U             (L1_S_AP(AP_U))
                    480: #define        L1_S_PROT_W             (L1_S_AP(AP_W))
                    481: #define        L1_S_PROT_MASK          (L1_S_PROT_U|L1_S_PROT_W)
                    482:
1.49      thorpej   483: #define        L1_S_CACHE_MASK_generic (L1_S_B|L1_S_C)
                    484: #define        L1_S_CACHE_MASK_xscale  (L1_S_B|L1_S_C|L1_S_XSCALE_TEX(TEX_XSCALE_X))
1.45      thorpej   485:
                    486: #define        L2_L_PROT_U             (L2_AP(AP_U))
                    487: #define        L2_L_PROT_W             (L2_AP(AP_W))
                    488: #define        L2_L_PROT_MASK          (L2_L_PROT_U|L2_L_PROT_W)
                    489:
1.49      thorpej   490: #define        L2_L_CACHE_MASK_generic (L2_B|L2_C)
                    491: #define        L2_L_CACHE_MASK_xscale  (L2_B|L2_C|L2_XSCALE_L_TEX(TEX_XSCALE_X))
                    492:
1.46      thorpej   493: #define        L2_S_PROT_U_generic     (L2_AP(AP_U))
                    494: #define        L2_S_PROT_W_generic     (L2_AP(AP_W))
                    495: #define        L2_S_PROT_MASK_generic  (L2_S_PROT_U|L2_S_PROT_W)
                    496:
1.48      thorpej   497: #define        L2_S_PROT_U_xscale      (L2_AP0(AP_U))
                    498: #define        L2_S_PROT_W_xscale      (L2_AP0(AP_W))
1.46      thorpej   499: #define        L2_S_PROT_MASK_xscale   (L2_S_PROT_U|L2_S_PROT_W)
                    500:
1.49      thorpej   501: #define        L2_S_CACHE_MASK_generic (L2_B|L2_C)
                    502: #define        L2_S_CACHE_MASK_xscale  (L2_B|L2_C|L2_XSCALE_T_TEX(TEX_XSCALE_X))
1.46      thorpej   503:
                    504: #define        L1_S_PROTO_generic      (L1_TYPE_S | L1_S_IMP)
1.47      thorpej   505: #define        L1_S_PROTO_xscale       (L1_TYPE_S)
1.46      thorpej   506:
                    507: #define        L1_C_PROTO_generic      (L1_TYPE_C | L1_C_IMP2)
1.47      thorpej   508: #define        L1_C_PROTO_xscale       (L1_TYPE_C)
1.46      thorpej   509:
                    510: #define        L2_L_PROTO              (L2_TYPE_L)
                    511:
                    512: #define        L2_S_PROTO_generic      (L2_TYPE_S)
1.48      thorpej   513: #define        L2_S_PROTO_xscale       (L2_TYPE_XSCALE_XS)
1.45      thorpej   514:
1.46      thorpej   515: /*
                    516:  * User-visible names for the ones that vary with MMU class.
                    517:  */
                    518:
                    519: #if ARM_NMMUS > 1
                    520: /* More than one MMU class configured; use variables. */
                    521: #define        L2_S_PROT_U             pte_l2_s_prot_u
                    522: #define        L2_S_PROT_W             pte_l2_s_prot_w
                    523: #define        L2_S_PROT_MASK          pte_l2_s_prot_mask
                    524:
1.49      thorpej   525: #define        L1_S_CACHE_MASK         pte_l1_s_cache_mask
                    526: #define        L2_L_CACHE_MASK         pte_l2_l_cache_mask
                    527: #define        L2_S_CACHE_MASK         pte_l2_s_cache_mask
                    528:
1.46      thorpej   529: #define        L1_S_PROTO              pte_l1_s_proto
                    530: #define        L1_C_PROTO              pte_l1_c_proto
                    531: #define        L2_S_PROTO              pte_l2_s_proto
1.51      thorpej   532:
                    533: #define        pmap_copy_page(s, d)    (*pmap_copy_page_func)((s), (d))
                    534: #define        pmap_zero_page(d)       (*pmap_zero_page_func)((d))
1.69      thorpej   535: #elif (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0
1.46      thorpej   536: #define        L2_S_PROT_U             L2_S_PROT_U_generic
                    537: #define        L2_S_PROT_W             L2_S_PROT_W_generic
                    538: #define        L2_S_PROT_MASK          L2_S_PROT_MASK_generic
                    539:
1.49      thorpej   540: #define        L1_S_CACHE_MASK         L1_S_CACHE_MASK_generic
                    541: #define        L2_L_CACHE_MASK         L2_L_CACHE_MASK_generic
                    542: #define        L2_S_CACHE_MASK         L2_S_CACHE_MASK_generic
                    543:
1.46      thorpej   544: #define        L1_S_PROTO              L1_S_PROTO_generic
                    545: #define        L1_C_PROTO              L1_C_PROTO_generic
                    546: #define        L2_S_PROTO              L2_S_PROTO_generic
1.51      thorpej   547:
                    548: #define        pmap_copy_page(s, d)    pmap_copy_page_generic((s), (d))
                    549: #define        pmap_zero_page(d)       pmap_zero_page_generic((d))
1.46      thorpej   550: #elif ARM_MMU_XSCALE == 1
                    551: #define        L2_S_PROT_U             L2_S_PROT_U_xscale
                    552: #define        L2_S_PROT_W             L2_S_PROT_W_xscale
                    553: #define        L2_S_PROT_MASK          L2_S_PROT_MASK_xscale
1.49      thorpej   554:
                    555: #define        L1_S_CACHE_MASK         L1_S_CACHE_MASK_xscale
                    556: #define        L2_L_CACHE_MASK         L2_L_CACHE_MASK_xscale
                    557: #define        L2_S_CACHE_MASK         L2_S_CACHE_MASK_xscale
1.46      thorpej   558:
                    559: #define        L1_S_PROTO              L1_S_PROTO_xscale
                    560: #define        L1_C_PROTO              L1_C_PROTO_xscale
                    561: #define        L2_S_PROTO              L2_S_PROTO_xscale
1.51      thorpej   562:
                    563: #define        pmap_copy_page(s, d)    pmap_copy_page_xscale((s), (d))
                    564: #define        pmap_zero_page(d)       pmap_zero_page_xscale((d))
1.46      thorpej   565: #endif /* ARM_NMMUS > 1 */
1.20      chs       566:
1.45      thorpej   567: /*
                    568:  * These macros return various bits based on kernel/user and protection.
                    569:  * Note that the compiler will usually fold these at compile time.
                    570:  */
                    571: #define        L1_S_PROT(ku, pr)       ((((ku) == PTE_USER) ? L1_S_PROT_U : 0) | \
                    572:                                 (((pr) & VM_PROT_WRITE) ? L1_S_PROT_W : 0))
                    573:
                    574: #define        L2_L_PROT(ku, pr)       ((((ku) == PTE_USER) ? L2_L_PROT_U : 0) | \
                    575:                                 (((pr) & VM_PROT_WRITE) ? L2_L_PROT_W : 0))
                    576:
                    577: #define        L2_S_PROT(ku, pr)       ((((ku) == PTE_USER) ? L2_S_PROT_U : 0) | \
                    578:                                 (((pr) & VM_PROT_WRITE) ? L2_S_PROT_W : 0))
1.66      thorpej   579:
                    580: /*
                    581:  * Macros to test if a mapping is mappable with an L1 Section mapping
                    582:  * or an L2 Large Page mapping.
                    583:  */
                    584: #define        L1_S_MAPPABLE_P(va, pa, size)                                   \
                    585:        ((((va) | (pa)) & L1_S_OFFSET) == 0 && (size) >= L1_S_SIZE)
                    586:
1.67      thorpej   587: #define        L2_L_MAPPABLE_P(va, pa, size)                                   \
1.68      thorpej   588:        ((((va) | (pa)) & L2_L_OFFSET) == 0 && (size) >= L2_L_SIZE)
1.64      thorpej   589:
                    590: /*
                    591:  * Hooks for the pool allocator.
                    592:  */
                    593: #define        POOL_VTOPHYS(va)        vtophys((vaddr_t) (va))
1.18      thorpej   594:
                    595: #endif /* _KERNEL */
1.1       reinoud   596:
                    597: #endif /* _ARM32_PMAP_H_ */

CVSweb <webmaster@jp.NetBSD.org>