[BACK]Return to pmap.c CVS log [TXT][DIR] Up to [cvs.NetBSD.org] / src / sys / arch / arm / arm32

Annotation of src/sys/arch/arm/arm32/pmap.c, Revision 1.115

1.115   ! thorpej     1: /*     $NetBSD: pmap.c,v 1.114 2002/08/24 02:50:53 thorpej Exp $       */
1.12      chris       2:
                      3: /*
1.49      thorpej     4:  * Copyright (c) 2002 Wasabi Systems, Inc.
1.12      chris       5:  * Copyright (c) 2001 Richard Earnshaw
                      6:  * Copyright (c) 2001 Christopher Gilbert
                      7:  * All rights reserved.
                      8:  *
                      9:  * 1. Redistributions of source code must retain the above copyright
                     10:  *    notice, this list of conditions and the following disclaimer.
                     11:  * 2. Redistributions in binary form must reproduce the above copyright
                     12:  *    notice, this list of conditions and the following disclaimer in the
                     13:  *    documentation and/or other materials provided with the distribution.
                     14:  * 3. The name of the company nor the name of the author may be used to
                     15:  *    endorse or promote products derived from this software without specific
                     16:  *    prior written permission.
                     17:  *
                     18:  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
                     19:  * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
                     20:  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
                     21:  * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
                     22:  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
                     23:  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
                     24:  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
                     25:  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
                     26:  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
                     27:  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
                     28:  * SUCH DAMAGE.
                     29:  */
1.1       matt       30:
                     31: /*-
                     32:  * Copyright (c) 1999 The NetBSD Foundation, Inc.
                     33:  * All rights reserved.
                     34:  *
                     35:  * This code is derived from software contributed to The NetBSD Foundation
                     36:  * by Charles M. Hannum.
                     37:  *
                     38:  * Redistribution and use in source and binary forms, with or without
                     39:  * modification, are permitted provided that the following conditions
                     40:  * are met:
                     41:  * 1. Redistributions of source code must retain the above copyright
                     42:  *    notice, this list of conditions and the following disclaimer.
                     43:  * 2. Redistributions in binary form must reproduce the above copyright
                     44:  *    notice, this list of conditions and the following disclaimer in the
                     45:  *    documentation and/or other materials provided with the distribution.
                     46:  * 3. All advertising materials mentioning features or use of this software
                     47:  *    must display the following acknowledgement:
                     48:  *        This product includes software developed by the NetBSD
                     49:  *        Foundation, Inc. and its contributors.
                     50:  * 4. Neither the name of The NetBSD Foundation nor the names of its
                     51:  *    contributors may be used to endorse or promote products derived
                     52:  *    from this software without specific prior written permission.
                     53:  *
                     54:  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
                     55:  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
                     56:  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
                     57:  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
                     58:  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
                     59:  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
                     60:  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
                     61:  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
                     62:  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
                     63:  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
                     64:  * POSSIBILITY OF SUCH DAMAGE.
                     65:  */
                     66:
                     67: /*
                     68:  * Copyright (c) 1994-1998 Mark Brinicombe.
                     69:  * Copyright (c) 1994 Brini.
                     70:  * All rights reserved.
                     71:  *
                     72:  * This code is derived from software written for Brini by Mark Brinicombe
                     73:  *
                     74:  * Redistribution and use in source and binary forms, with or without
                     75:  * modification, are permitted provided that the following conditions
                     76:  * are met:
                     77:  * 1. Redistributions of source code must retain the above copyright
                     78:  *    notice, this list of conditions and the following disclaimer.
                     79:  * 2. Redistributions in binary form must reproduce the above copyright
                     80:  *    notice, this list of conditions and the following disclaimer in the
                     81:  *    documentation and/or other materials provided with the distribution.
                     82:  * 3. All advertising materials mentioning features or use of this software
                     83:  *    must display the following acknowledgement:
                     84:  *     This product includes software developed by Mark Brinicombe.
                     85:  * 4. The name of the author may not be used to endorse or promote products
                     86:  *    derived from this software without specific prior written permission.
                     87:  *
                     88:  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
                     89:  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
                     90:  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
                     91:  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
                     92:  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
                     93:  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
                     94:  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
                     95:  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
                     96:  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
                     97:  *
                     98:  * RiscBSD kernel project
                     99:  *
                    100:  * pmap.c
                    101:  *
                    102:  * Machine dependant vm stuff
                    103:  *
                    104:  * Created      : 20/09/94
                    105:  */
                    106:
                    107: /*
                    108:  * Performance improvements, UVM changes, overhauls and part-rewrites
                    109:  * were contributed by Neil A. Carson <neil@causality.com>.
                    110:  */
                    111:
                    112: /*
                    113:  * The dram block info is currently referenced from the bootconfig.
                    114:  * This should be placed in a separate structure.
                    115:  */
                    116:
                    117: /*
                    118:  * Special compilation symbols
                    119:  * PMAP_DEBUG          - Build in pmap_debug_level code
                    120:  */
                    121:
                    122: /* Include header files */
                    123:
                    124: #include "opt_pmap_debug.h"
                    125: #include "opt_ddb.h"
                    126:
                    127: #include <sys/types.h>
                    128: #include <sys/param.h>
                    129: #include <sys/kernel.h>
                    130: #include <sys/systm.h>
                    131: #include <sys/proc.h>
                    132: #include <sys/malloc.h>
                    133: #include <sys/user.h>
1.10      chris     134: #include <sys/pool.h>
1.16      chris     135: #include <sys/cdefs.h>
                    136:
1.1       matt      137: #include <uvm/uvm.h>
                    138:
                    139: #include <machine/bootconfig.h>
                    140: #include <machine/bus.h>
                    141: #include <machine/pmap.h>
                    142: #include <machine/pcb.h>
                    143: #include <machine/param.h>
1.32      thorpej   144: #include <arm/arm32/katelib.h>
1.16      chris     145:
1.115   ! thorpej   146: __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.114 2002/08/24 02:50:53 thorpej Exp $");
1.1       matt      147: #ifdef PMAP_DEBUG
                    148: #define        PDEBUG(_lev_,_stat_) \
                    149:        if (pmap_debug_level >= (_lev_)) \
                    150:                ((_stat_))
                    151: int pmap_debug_level = -2;
1.48      chris     152: void pmap_dump_pvlist(vaddr_t phys, char *m);
1.17      chris     153:
                    154: /*
                    155:  * for switching to potentially finer grained debugging
                    156:  */
                    157: #define        PDB_FOLLOW      0x0001
                    158: #define        PDB_INIT        0x0002
                    159: #define        PDB_ENTER       0x0004
                    160: #define        PDB_REMOVE      0x0008
                    161: #define        PDB_CREATE      0x0010
                    162: #define        PDB_PTPAGE      0x0020
1.48      chris     163: #define        PDB_GROWKERN    0x0040
1.17      chris     164: #define        PDB_BITS        0x0080
                    165: #define        PDB_COLLECT     0x0100
                    166: #define        PDB_PROTECT     0x0200
1.48      chris     167: #define        PDB_MAP_L1      0x0400
1.17      chris     168: #define        PDB_BOOTSTRAP   0x1000
                    169: #define        PDB_PARANOIA    0x2000
                    170: #define        PDB_WIRING      0x4000
                    171: #define        PDB_PVDUMP      0x8000
                    172:
                    173: int debugmap = 0;
                    174: int pmapdebug = PDB_PARANOIA | PDB_FOLLOW;
                    175: #define        NPDEBUG(_lev_,_stat_) \
                    176:        if (pmapdebug & (_lev_)) \
                    177:                ((_stat_))
                    178:
1.1       matt      179: #else  /* PMAP_DEBUG */
                    180: #define        PDEBUG(_lev_,_stat_) /* Nothing */
1.48      chris     181: #define NPDEBUG(_lev_,_stat_) /* Nothing */
1.1       matt      182: #endif /* PMAP_DEBUG */
                    183:
                    184: struct pmap     kernel_pmap_store;
                    185:
1.10      chris     186: /*
1.48      chris     187:  * linked list of all non-kernel pmaps
                    188:  */
                    189:
1.69      thorpej   190: static LIST_HEAD(, pmap) pmaps;
1.48      chris     191:
                    192: /*
1.10      chris     193:  * pool that pmap structures are allocated from
                    194:  */
                    195:
                    196: struct pool pmap_pmap_pool;
                    197:
1.111     thorpej   198: /*
                    199:  * pool/cache that PT-PT's are allocated from
                    200:  */
                    201:
                    202: struct pool pmap_ptpt_pool;
                    203: struct pool_cache pmap_ptpt_cache;
                    204: u_int pmap_ptpt_cache_generation;
                    205:
                    206: static void *pmap_ptpt_page_alloc(struct pool *, int);
                    207: static void pmap_ptpt_page_free(struct pool *, void *);
                    208:
                    209: struct pool_allocator pmap_ptpt_allocator = {
                    210:        pmap_ptpt_page_alloc, pmap_ptpt_page_free,
                    211: };
                    212:
                    213: static int pmap_ptpt_ctor(void *, void *, int);
                    214:
1.54      thorpej   215: static pt_entry_t *csrc_pte, *cdst_pte;
                    216: static vaddr_t csrcp, cdstp;
                    217:
1.1       matt      218: char *memhook;
                    219: extern caddr_t msgbufaddr;
                    220:
                    221: boolean_t pmap_initialized = FALSE;    /* Has pmap_init completed? */
1.17      chris     222: /*
                    223:  * locking data structures
                    224:  */
1.1       matt      225:
1.17      chris     226: static struct lock pmap_main_lock;
                    227: static struct simplelock pvalloc_lock;
1.48      chris     228: static struct simplelock pmaps_lock;
1.17      chris     229: #ifdef LOCKDEBUG
                    230: #define PMAP_MAP_TO_HEAD_LOCK() \
                    231:      (void) spinlockmgr(&pmap_main_lock, LK_SHARED, NULL)
                    232: #define PMAP_MAP_TO_HEAD_UNLOCK() \
                    233:      (void) spinlockmgr(&pmap_main_lock, LK_RELEASE, NULL)
                    234:
                    235: #define PMAP_HEAD_TO_MAP_LOCK() \
                    236:      (void) spinlockmgr(&pmap_main_lock, LK_EXCLUSIVE, NULL)
                    237: #define PMAP_HEAD_TO_MAP_UNLOCK() \
                    238:      (void) spinlockmgr(&pmap_main_lock, LK_RELEASE, NULL)
                    239: #else
                    240: #define        PMAP_MAP_TO_HEAD_LOCK()         /* nothing */
                    241: #define        PMAP_MAP_TO_HEAD_UNLOCK()       /* nothing */
                    242: #define        PMAP_HEAD_TO_MAP_LOCK()         /* nothing */
                    243: #define        PMAP_HEAD_TO_MAP_UNLOCK()       /* nothing */
                    244: #endif /* LOCKDEBUG */
                    245:
                    246: /*
                    247:  * pv_page management structures: locked by pvalloc_lock
                    248:  */
1.1       matt      249:
1.17      chris     250: TAILQ_HEAD(pv_pagelist, pv_page);
                    251: static struct pv_pagelist pv_freepages;        /* list of pv_pages with free entrys */
                    252: static struct pv_pagelist pv_unusedpgs; /* list of unused pv_pages */
                    253: static int pv_nfpvents;                        /* # of free pv entries */
                    254: static struct pv_page *pv_initpage;    /* bootstrap page from kernel_map */
                    255: static vaddr_t pv_cachedva;            /* cached VA for later use */
                    256:
                    257: #define PVE_LOWAT (PVE_PER_PVPAGE / 2) /* free pv_entry low water mark */
                    258: #define PVE_HIWAT (PVE_LOWAT + (PVE_PER_PVPAGE * 2))
                    259:                                        /* high water mark */
                    260:
                    261: /*
                    262:  * local prototypes
                    263:  */
                    264:
                    265: static struct pv_entry *pmap_add_pvpage __P((struct pv_page *, boolean_t));
                    266: static struct pv_entry *pmap_alloc_pv __P((struct pmap *, int)); /* see codes below */
                    267: #define ALLOCPV_NEED   0       /* need PV now */
                    268: #define ALLOCPV_TRY    1       /* just try to allocate, don't steal */
                    269: #define ALLOCPV_NONEED 2       /* don't need PV, just growing cache */
                    270: static struct pv_entry *pmap_alloc_pvpage __P((struct pmap *, int));
1.49      thorpej   271: static void             pmap_enter_pv __P((struct vm_page *,
1.17      chris     272:                                            struct pv_entry *, struct pmap *,
                    273:                                            vaddr_t, struct vm_page *, int));
                    274: static void             pmap_free_pv __P((struct pmap *, struct pv_entry *));
                    275: static void             pmap_free_pvs __P((struct pmap *, struct pv_entry *));
                    276: static void             pmap_free_pv_doit __P((struct pv_entry *));
                    277: static void             pmap_free_pvpage __P((void));
                    278: static boolean_t        pmap_is_curpmap __P((struct pmap *));
1.49      thorpej   279: static struct pv_entry *pmap_remove_pv __P((struct vm_page *, struct pmap *,
1.17      chris     280:                        vaddr_t));
                    281: #define PMAP_REMOVE_ALL                0       /* remove all mappings */
                    282: #define PMAP_REMOVE_SKIPWIRED  1       /* skip wired mappings */
1.1       matt      283:
1.49      thorpej   284: static u_int pmap_modify_pv __P((struct pmap *, vaddr_t, struct vm_page *,
1.33      chris     285:        u_int, u_int));
                    286:
1.69      thorpej   287: /*
                    288:  * Structure that describes and L1 table.
                    289:  */
                    290: struct l1pt {
                    291:        SIMPLEQ_ENTRY(l1pt)     pt_queue;       /* Queue pointers */
                    292:        struct pglist           pt_plist;       /* Allocated page list */
                    293:        vaddr_t                 pt_va;          /* Allocated virtual address */
                    294:        int                     pt_flags;       /* Flags */
                    295: };
                    296: #define        PTFLAG_STATIC           0x01            /* Statically allocated */
                    297: #define        PTFLAG_KPT              0x02            /* Kernel pt's are mapped */
                    298: #define        PTFLAG_CLEAN            0x04            /* L1 is clean */
                    299:
1.33      chris     300: static void pmap_free_l1pt __P((struct l1pt *));
                    301: static int pmap_allocpagedir __P((struct pmap *));
                    302: static int pmap_clean_page __P((struct pv_entry *, boolean_t));
1.49      thorpej   303: static void pmap_remove_all __P((struct vm_page *));
1.33      chris     304:
1.57      thorpej   305: static struct vm_page  *pmap_alloc_ptp __P((struct pmap *, vaddr_t));
                    306: static struct vm_page  *pmap_get_ptp __P((struct pmap *, vaddr_t));
1.49      thorpej   307: __inline static void pmap_clearbit __P((struct vm_page *, unsigned int));
1.17      chris     308:
1.2       matt      309: extern paddr_t physical_start;
                    310: extern paddr_t physical_end;
1.1       matt      311: extern unsigned int free_pages;
                    312: extern int max_processes;
                    313:
1.54      thorpej   314: vaddr_t virtual_avail;
1.1       matt      315: vaddr_t virtual_end;
1.48      chris     316: vaddr_t pmap_curmaxkvaddr;
1.1       matt      317:
                    318: vaddr_t avail_start;
                    319: vaddr_t avail_end;
                    320:
                    321: extern pv_addr_t systempage;
                    322:
                    323: /* Variables used by the L1 page table queue code */
                    324: SIMPLEQ_HEAD(l1pt_queue, l1pt);
1.73      thorpej   325: static struct l1pt_queue l1pt_static_queue; /* head of our static l1 queue */
                    326: static int l1pt_static_queue_count;        /* items in the static l1 queue */
                    327: static int l1pt_static_create_count;       /* static l1 items created */
                    328: static struct l1pt_queue l1pt_queue;       /* head of our l1 queue */
                    329: static int l1pt_queue_count;               /* items in the l1 queue */
                    330: static int l1pt_create_count;              /* stat - L1's create count */
                    331: static int l1pt_reuse_count;               /* stat - L1's reused count */
1.1       matt      332:
                    333: /* Local function prototypes (not used outside this file) */
1.15      chris     334: void pmap_pinit __P((struct pmap *));
                    335: void pmap_freepagedir __P((struct pmap *));
1.1       matt      336:
                    337: /* Other function prototypes */
                    338: extern void bzero_page __P((vaddr_t));
                    339: extern void bcopy_page __P((vaddr_t, vaddr_t));
                    340:
                    341: struct l1pt *pmap_alloc_l1pt __P((void));
1.15      chris     342: static __inline void pmap_map_in_l1 __P((struct pmap *pmap, vaddr_t va,
1.113     thorpej   343:      vaddr_t l2pa, int));
1.1       matt      344:
1.11      chris     345: static pt_entry_t *pmap_map_ptes __P((struct pmap *));
1.17      chris     346: static void pmap_unmap_ptes __P((struct pmap *));
1.11      chris     347:
1.49      thorpej   348: __inline static void pmap_vac_me_harder __P((struct pmap *, struct vm_page *,
1.25      rearnsha  349:     pt_entry_t *, boolean_t));
1.49      thorpej   350: static void pmap_vac_me_kpmap __P((struct pmap *, struct vm_page *,
1.25      rearnsha  351:     pt_entry_t *, boolean_t));
1.49      thorpej   352: static void pmap_vac_me_user __P((struct pmap *, struct vm_page *,
1.25      rearnsha  353:     pt_entry_t *, boolean_t));
1.11      chris     354:
1.17      chris     355: /*
                    356:  * real definition of pv_entry.
                    357:  */
                    358:
                    359: struct pv_entry {
                    360:        struct pv_entry *pv_next;       /* next pv_entry */
                    361:        struct pmap     *pv_pmap;        /* pmap where mapping lies */
                    362:        vaddr_t         pv_va;          /* virtual address for mapping */
                    363:        int             pv_flags;       /* flags */
                    364:        struct vm_page  *pv_ptp;        /* vm_page for the ptp */
                    365: };
                    366:
                    367: /*
                    368:  * pv_entrys are dynamically allocated in chunks from a single page.
                    369:  * we keep track of how many pv_entrys are in use for each page and
                    370:  * we can free pv_entry pages if needed.  there is one lock for the
                    371:  * entire allocation system.
                    372:  */
                    373:
                    374: struct pv_page_info {
                    375:        TAILQ_ENTRY(pv_page) pvpi_list;
                    376:        struct pv_entry *pvpi_pvfree;
                    377:        int pvpi_nfree;
                    378: };
                    379:
                    380: /*
                    381:  * number of pv_entry's in a pv_page
                    382:  * (note: won't work on systems where NPBG isn't a constant)
                    383:  */
                    384:
                    385: #define PVE_PER_PVPAGE ((NBPG - sizeof(struct pv_page_info)) / \
                    386:                        sizeof(struct pv_entry))
                    387:
                    388: /*
                    389:  * a pv_page: where pv_entrys are allocated from
                    390:  */
                    391:
                    392: struct pv_page {
                    393:        struct pv_page_info pvinfo;
                    394:        struct pv_entry pvents[PVE_PER_PVPAGE];
                    395: };
                    396:
1.1       matt      397: #ifdef MYCROFT_HACK
                    398: int mycroft_hack = 0;
                    399: #endif
                    400:
                    401: /* Function to set the debug level of the pmap code */
                    402:
                    403: #ifdef PMAP_DEBUG
                    404: void
1.73      thorpej   405: pmap_debug(int level)
1.1       matt      406: {
                    407:        pmap_debug_level = level;
                    408:        printf("pmap_debug: level=%d\n", pmap_debug_level);
                    409: }
                    410: #endif /* PMAP_DEBUG */
                    411:
1.22      chris     412: __inline static boolean_t
1.17      chris     413: pmap_is_curpmap(struct pmap *pmap)
                    414: {
1.58      thorpej   415:
                    416:        if ((curproc && curproc->p_vmspace->vm_map.pmap == pmap) ||
                    417:            pmap == pmap_kernel())
                    418:                return (TRUE);
                    419:
                    420:        return (FALSE);
1.17      chris     421: }
1.1       matt      422:
                    423: /*
1.113     thorpej   424:  * PTE_SYNC_CURRENT:
                    425:  *
                    426:  *     Make sure the pte is flushed to RAM.  If the pmap is
                    427:  *     not the current pmap, then also evict the pte from
                    428:  *     any cache lines.
                    429:  */
                    430: #define        PTE_SYNC_CURRENT(pmap, pte)                                     \
                    431: do {                                                                   \
                    432:        if (pmap_is_curpmap(pmap))                                      \
                    433:                PTE_SYNC(pte);                                          \
                    434:        else                                                            \
                    435:                PTE_FLUSH(pte);                                         \
                    436: } while (/*CONSTCOND*/0)
                    437:
                    438: /*
                    439:  * PTE_FLUSH_ALT:
                    440:  *
                    441:  *     Make sure the pte is not in any cache lines.  We expect
                    442:  *     this to be used only when a pte has not been modified.
                    443:  */
                    444: #define        PTE_FLUSH_ALT(pmap, pte)                                        \
                    445: do {                                                                   \
                    446:        if (pmap_is_curpmap(pmap) == 0)                                 \
                    447:                PTE_FLUSH(pte);                                         \
                    448: } while (/*CONSTCOND*/0)
                    449:
                    450: /*
1.17      chris     451:  * p v _ e n t r y   f u n c t i o n s
                    452:  */
                    453:
                    454: /*
                    455:  * pv_entry allocation functions:
                    456:  *   the main pv_entry allocation functions are:
                    457:  *     pmap_alloc_pv: allocate a pv_entry structure
                    458:  *     pmap_free_pv: free one pv_entry
                    459:  *     pmap_free_pvs: free a list of pv_entrys
                    460:  *
                    461:  * the rest are helper functions
1.1       matt      462:  */
                    463:
                    464: /*
1.17      chris     465:  * pmap_alloc_pv: inline function to allocate a pv_entry structure
                    466:  * => we lock pvalloc_lock
                    467:  * => if we fail, we call out to pmap_alloc_pvpage
                    468:  * => 3 modes:
                    469:  *    ALLOCPV_NEED   = we really need a pv_entry, even if we have to steal it
                    470:  *    ALLOCPV_TRY    = we want a pv_entry, but not enough to steal
                    471:  *    ALLOCPV_NONEED = we are trying to grow our free list, don't really need
                    472:  *                     one now
                    473:  *
                    474:  * "try" is for optional functions like pmap_copy().
1.1       matt      475:  */
1.17      chris     476:
                    477: __inline static struct pv_entry *
1.73      thorpej   478: pmap_alloc_pv(struct pmap *pmap, int mode)
1.1       matt      479: {
1.17      chris     480:        struct pv_page *pvpage;
                    481:        struct pv_entry *pv;
                    482:
                    483:        simple_lock(&pvalloc_lock);
                    484:
1.51      chris     485:        pvpage = TAILQ_FIRST(&pv_freepages);
                    486:
                    487:        if (pvpage != NULL) {
1.17      chris     488:                pvpage->pvinfo.pvpi_nfree--;
                    489:                if (pvpage->pvinfo.pvpi_nfree == 0) {
                    490:                        /* nothing left in this one? */
                    491:                        TAILQ_REMOVE(&pv_freepages, pvpage, pvinfo.pvpi_list);
                    492:                }
                    493:                pv = pvpage->pvinfo.pvpi_pvfree;
1.51      chris     494:                KASSERT(pv);
1.17      chris     495:                pvpage->pvinfo.pvpi_pvfree = pv->pv_next;
                    496:                pv_nfpvents--;  /* took one from pool */
                    497:        } else {
                    498:                pv = NULL;              /* need more of them */
                    499:        }
                    500:
                    501:        /*
                    502:         * if below low water mark or we didn't get a pv_entry we try and
                    503:         * create more pv_entrys ...
                    504:         */
                    505:
                    506:        if (pv_nfpvents < PVE_LOWAT || pv == NULL) {
                    507:                if (pv == NULL)
                    508:                        pv = pmap_alloc_pvpage(pmap, (mode == ALLOCPV_TRY) ?
                    509:                                               mode : ALLOCPV_NEED);
                    510:                else
                    511:                        (void) pmap_alloc_pvpage(pmap, ALLOCPV_NONEED);
                    512:        }
                    513:
                    514:        simple_unlock(&pvalloc_lock);
                    515:        return(pv);
                    516: }
                    517:
                    518: /*
                    519:  * pmap_alloc_pvpage: maybe allocate a new pvpage
                    520:  *
                    521:  * if need_entry is false: try and allocate a new pv_page
                    522:  * if need_entry is true: try and allocate a new pv_page and return a
                    523:  *     new pv_entry from it.   if we are unable to allocate a pv_page
                    524:  *     we make a last ditch effort to steal a pv_page from some other
                    525:  *     mapping.    if that fails, we panic...
                    526:  *
                    527:  * => we assume that the caller holds pvalloc_lock
                    528:  */
                    529:
                    530: static struct pv_entry *
1.73      thorpej   531: pmap_alloc_pvpage(struct pmap *pmap, int mode)
1.17      chris     532: {
                    533:        struct vm_page *pg;
                    534:        struct pv_page *pvpage;
1.1       matt      535:        struct pv_entry *pv;
1.17      chris     536:        int s;
                    537:
                    538:        /*
                    539:         * if we need_entry and we've got unused pv_pages, allocate from there
                    540:         */
                    541:
1.51      chris     542:        pvpage = TAILQ_FIRST(&pv_unusedpgs);
                    543:        if (mode != ALLOCPV_NONEED && pvpage != NULL) {
1.17      chris     544:
                    545:                /* move it to pv_freepages list */
                    546:                TAILQ_REMOVE(&pv_unusedpgs, pvpage, pvinfo.pvpi_list);
                    547:                TAILQ_INSERT_HEAD(&pv_freepages, pvpage, pvinfo.pvpi_list);
                    548:
                    549:                /* allocate a pv_entry */
                    550:                pvpage->pvinfo.pvpi_nfree--;    /* can't go to zero */
                    551:                pv = pvpage->pvinfo.pvpi_pvfree;
1.51      chris     552:                KASSERT(pv);
1.17      chris     553:                pvpage->pvinfo.pvpi_pvfree = pv->pv_next;
                    554:
                    555:                pv_nfpvents--;  /* took one from pool */
                    556:                return(pv);
                    557:        }
1.1       matt      558:
                    559:        /*
1.17      chris     560:         *  see if we've got a cached unmapped VA that we can map a page in.
                    561:         * if not, try to allocate one.
1.1       matt      562:         */
                    563:
1.23      chs       564:
1.17      chris     565:        if (pv_cachedva == 0) {
1.23      chs       566:                s = splvm();
                    567:                pv_cachedva = uvm_km_kmemalloc(kmem_map, NULL,
1.17      chris     568:                    PAGE_SIZE, UVM_KMF_TRYLOCK|UVM_KMF_VALLOC);
1.23      chs       569:                splx(s);
1.17      chris     570:                if (pv_cachedva == 0) {
                    571:                        return (NULL);
1.1       matt      572:                }
                    573:        }
1.17      chris     574:
1.23      chs       575:        pg = uvm_pagealloc(NULL, pv_cachedva - vm_map_min(kernel_map), NULL,
                    576:            UVM_PGA_USERESERVE);
1.17      chris     577:
                    578:        if (pg == NULL)
                    579:                return (NULL);
1.51      chris     580:        pg->flags &= ~PG_BUSY;  /* never busy */
1.17      chris     581:
                    582:        /*
                    583:         * add a mapping for our new pv_page and free its entrys (save one!)
                    584:         *
                    585:         * NOTE: If we are allocating a PV page for the kernel pmap, the
                    586:         * pmap is already locked!  (...but entering the mapping is safe...)
                    587:         */
                    588:
1.51      chris     589:        pmap_kenter_pa(pv_cachedva, VM_PAGE_TO_PHYS(pg),
                    590:                VM_PROT_READ|VM_PROT_WRITE);
1.19      chris     591:        pmap_update(pmap_kernel());
1.17      chris     592:        pvpage = (struct pv_page *) pv_cachedva;
                    593:        pv_cachedva = 0;
                    594:        return (pmap_add_pvpage(pvpage, mode != ALLOCPV_NONEED));
1.1       matt      595: }
                    596:
                    597: /*
1.17      chris     598:  * pmap_add_pvpage: add a pv_page's pv_entrys to the free list
                    599:  *
                    600:  * => caller must hold pvalloc_lock
                    601:  * => if need_entry is true, we allocate and return one pv_entry
1.1       matt      602:  */
                    603:
1.17      chris     604: static struct pv_entry *
1.73      thorpej   605: pmap_add_pvpage(struct pv_page *pvp, boolean_t need_entry)
1.1       matt      606: {
1.17      chris     607:        int tofree, lcv;
                    608:
                    609:        /* do we need to return one? */
                    610:        tofree = (need_entry) ? PVE_PER_PVPAGE - 1 : PVE_PER_PVPAGE;
1.1       matt      611:
1.17      chris     612:        pvp->pvinfo.pvpi_pvfree = NULL;
                    613:        pvp->pvinfo.pvpi_nfree = tofree;
                    614:        for (lcv = 0 ; lcv < tofree ; lcv++) {
                    615:                pvp->pvents[lcv].pv_next = pvp->pvinfo.pvpi_pvfree;
                    616:                pvp->pvinfo.pvpi_pvfree = &pvp->pvents[lcv];
1.1       matt      617:        }
1.17      chris     618:        if (need_entry)
                    619:                TAILQ_INSERT_TAIL(&pv_freepages, pvp, pvinfo.pvpi_list);
                    620:        else
                    621:                TAILQ_INSERT_TAIL(&pv_unusedpgs, pvp, pvinfo.pvpi_list);
                    622:        pv_nfpvents += tofree;
                    623:        return((need_entry) ? &pvp->pvents[lcv] : NULL);
1.1       matt      624: }
                    625:
1.17      chris     626: /*
                    627:  * pmap_free_pv_doit: actually free a pv_entry
                    628:  *
                    629:  * => do not call this directly!  instead use either
                    630:  *    1. pmap_free_pv ==> free a single pv_entry
                    631:  *    2. pmap_free_pvs => free a list of pv_entrys
                    632:  * => we must be holding pvalloc_lock
                    633:  */
                    634:
                    635: __inline static void
1.73      thorpej   636: pmap_free_pv_doit(struct pv_entry *pv)
1.1       matt      637: {
1.17      chris     638:        struct pv_page *pvp;
1.1       matt      639:
1.17      chris     640:        pvp = (struct pv_page *) arm_trunc_page((vaddr_t)pv);
                    641:        pv_nfpvents++;
                    642:        pvp->pvinfo.pvpi_nfree++;
1.1       matt      643:
1.17      chris     644:        /* nfree == 1 => fully allocated page just became partly allocated */
                    645:        if (pvp->pvinfo.pvpi_nfree == 1) {
                    646:                TAILQ_INSERT_HEAD(&pv_freepages, pvp, pvinfo.pvpi_list);
1.1       matt      647:        }
                    648:
1.17      chris     649:        /* free it */
                    650:        pv->pv_next = pvp->pvinfo.pvpi_pvfree;
                    651:        pvp->pvinfo.pvpi_pvfree = pv;
1.1       matt      652:
1.17      chris     653:        /*
                    654:         * are all pv_page's pv_entry's free?  move it to unused queue.
                    655:         */
1.1       matt      656:
1.17      chris     657:        if (pvp->pvinfo.pvpi_nfree == PVE_PER_PVPAGE) {
                    658:                TAILQ_REMOVE(&pv_freepages, pvp, pvinfo.pvpi_list);
                    659:                TAILQ_INSERT_HEAD(&pv_unusedpgs, pvp, pvinfo.pvpi_list);
1.1       matt      660:        }
                    661: }
                    662:
                    663: /*
1.17      chris     664:  * pmap_free_pv: free a single pv_entry
                    665:  *
                    666:  * => we gain the pvalloc_lock
1.1       matt      667:  */
                    668:
1.17      chris     669: __inline static void
1.73      thorpej   670: pmap_free_pv(struct pmap *pmap, struct pv_entry *pv)
1.1       matt      671: {
1.17      chris     672:        simple_lock(&pvalloc_lock);
                    673:        pmap_free_pv_doit(pv);
                    674:
                    675:        /*
                    676:         * Can't free the PV page if the PV entries were associated with
                    677:         * the kernel pmap; the pmap is already locked.
                    678:         */
1.51      chris     679:        if (pv_nfpvents > PVE_HIWAT && TAILQ_FIRST(&pv_unusedpgs) != NULL &&
1.17      chris     680:            pmap != pmap_kernel())
                    681:                pmap_free_pvpage();
                    682:
                    683:        simple_unlock(&pvalloc_lock);
                    684: }
1.1       matt      685:
1.17      chris     686: /*
                    687:  * pmap_free_pvs: free a list of pv_entrys
                    688:  *
                    689:  * => we gain the pvalloc_lock
                    690:  */
1.1       matt      691:
1.17      chris     692: __inline static void
1.73      thorpej   693: pmap_free_pvs(struct pmap *pmap, struct pv_entry *pvs)
1.17      chris     694: {
                    695:        struct pv_entry *nextpv;
1.1       matt      696:
1.17      chris     697:        simple_lock(&pvalloc_lock);
1.1       matt      698:
1.17      chris     699:        for ( /* null */ ; pvs != NULL ; pvs = nextpv) {
                    700:                nextpv = pvs->pv_next;
                    701:                pmap_free_pv_doit(pvs);
1.1       matt      702:        }
                    703:
1.17      chris     704:        /*
                    705:         * Can't free the PV page if the PV entries were associated with
                    706:         * the kernel pmap; the pmap is already locked.
                    707:         */
1.51      chris     708:        if (pv_nfpvents > PVE_HIWAT && TAILQ_FIRST(&pv_unusedpgs) != NULL &&
1.17      chris     709:            pmap != pmap_kernel())
                    710:                pmap_free_pvpage();
1.1       matt      711:
1.17      chris     712:        simple_unlock(&pvalloc_lock);
1.1       matt      713: }
                    714:
                    715:
                    716: /*
1.17      chris     717:  * pmap_free_pvpage: try and free an unused pv_page structure
                    718:  *
                    719:  * => assume caller is holding the pvalloc_lock and that
                    720:  *     there is a page on the pv_unusedpgs list
                    721:  * => if we can't get a lock on the kmem_map we try again later
1.1       matt      722:  */
                    723:
1.17      chris     724: static void
1.73      thorpej   725: pmap_free_pvpage(void)
1.1       matt      726: {
1.17      chris     727:        int s;
                    728:        struct vm_map *map;
                    729:        struct vm_map_entry *dead_entries;
                    730:        struct pv_page *pvp;
                    731:
                    732:        s = splvm(); /* protect kmem_map */
1.1       matt      733:
1.51      chris     734:        pvp = TAILQ_FIRST(&pv_unusedpgs);
1.1       matt      735:
                    736:        /*
1.17      chris     737:         * note: watch out for pv_initpage which is allocated out of
                    738:         * kernel_map rather than kmem_map.
1.1       matt      739:         */
1.17      chris     740:        if (pvp == pv_initpage)
                    741:                map = kernel_map;
                    742:        else
                    743:                map = kmem_map;
                    744:        if (vm_map_lock_try(map)) {
                    745:
                    746:                /* remove pvp from pv_unusedpgs */
                    747:                TAILQ_REMOVE(&pv_unusedpgs, pvp, pvinfo.pvpi_list);
                    748:
                    749:                /* unmap the page */
                    750:                dead_entries = NULL;
                    751:                uvm_unmap_remove(map, (vaddr_t)pvp, ((vaddr_t)pvp) + PAGE_SIZE,
                    752:                    &dead_entries);
                    753:                vm_map_unlock(map);
                    754:
                    755:                if (dead_entries != NULL)
                    756:                        uvm_unmap_detach(dead_entries, 0);
1.1       matt      757:
1.17      chris     758:                pv_nfpvents -= PVE_PER_PVPAGE;  /* update free count */
1.1       matt      759:        }
1.17      chris     760:        if (pvp == pv_initpage)
                    761:                /* no more initpage, we've freed it */
                    762:                pv_initpage = NULL;
1.1       matt      763:
                    764:        splx(s);
                    765: }
                    766:
                    767: /*
1.17      chris     768:  * main pv_entry manipulation functions:
1.49      thorpej   769:  *   pmap_enter_pv: enter a mapping onto a vm_page list
                    770:  *   pmap_remove_pv: remove a mappiing from a vm_page list
1.17      chris     771:  *
                    772:  * NOTE: pmap_enter_pv expects to lock the pvh itself
                    773:  *       pmap_remove_pv expects te caller to lock the pvh before calling
                    774:  */
                    775:
                    776: /*
1.49      thorpej   777:  * pmap_enter_pv: enter a mapping onto a vm_page lst
1.17      chris     778:  *
                    779:  * => caller should hold the proper lock on pmap_main_lock
                    780:  * => caller should have pmap locked
1.49      thorpej   781:  * => we will gain the lock on the vm_page and allocate the new pv_entry
1.17      chris     782:  * => caller should adjust ptp's wire_count before calling
                    783:  * => caller should not adjust pmap's wire_count
                    784:  */
                    785:
                    786: __inline static void
1.73      thorpej   787: pmap_enter_pv(struct vm_page *pg, struct pv_entry *pve, struct pmap *pmap,
                    788:     vaddr_t va, struct vm_page *ptp, int flags)
1.17      chris     789: {
                    790:        pve->pv_pmap = pmap;
                    791:        pve->pv_va = va;
                    792:        pve->pv_ptp = ptp;                      /* NULL for kernel pmap */
                    793:        pve->pv_flags = flags;
1.49      thorpej   794:        simple_lock(&pg->mdpage.pvh_slock);     /* lock vm_page */
                    795:        pve->pv_next = pg->mdpage.pvh_list;     /* add to ... */
                    796:        pg->mdpage.pvh_list = pve;              /* ... locked list */
                    797:        simple_unlock(&pg->mdpage.pvh_slock);   /* unlock, done! */
1.78      thorpej   798:        if (pve->pv_flags & PVF_WIRED)
1.17      chris     799:                ++pmap->pm_stats.wired_count;
1.105     thorpej   800: #ifdef PMAP_ALIAS_DEBUG
                    801:     {
                    802:        int s = splhigh();
                    803:        if (pve->pv_flags & PVF_WRITE)
                    804:                pg->mdpage.rw_mappings++;
                    805:        else
                    806:                pg->mdpage.ro_mappings++;
                    807:        if (pg->mdpage.rw_mappings != 0 &&
                    808:            (pg->mdpage.kro_mappings != 0 || pg->mdpage.krw_mappings != 0)) {
                    809:                printf("pmap_enter_pv: rw %u, kro %u, krw %u\n",
                    810:                    pg->mdpage.rw_mappings, pg->mdpage.kro_mappings,
                    811:                    pg->mdpage.krw_mappings);
                    812:        }
                    813:        splx(s);
                    814:     }
                    815: #endif /* PMAP_ALIAS_DEBUG */
1.17      chris     816: }
                    817:
                    818: /*
                    819:  * pmap_remove_pv: try to remove a mapping from a pv_list
                    820:  *
                    821:  * => caller should hold proper lock on pmap_main_lock
                    822:  * => pmap should be locked
1.49      thorpej   823:  * => caller should hold lock on vm_page [so that attrs can be adjusted]
1.17      chris     824:  * => caller should adjust ptp's wire_count and free PTP if needed
                    825:  * => caller should NOT adjust pmap's wire_count
                    826:  * => we return the removed pve
                    827:  */
                    828:
                    829: __inline static struct pv_entry *
1.73      thorpej   830: pmap_remove_pv(struct vm_page *pg, struct pmap *pmap, vaddr_t va)
1.17      chris     831: {
                    832:        struct pv_entry *pve, **prevptr;
                    833:
1.49      thorpej   834:        prevptr = &pg->mdpage.pvh_list;         /* previous pv_entry pointer */
1.17      chris     835:        pve = *prevptr;
                    836:        while (pve) {
                    837:                if (pve->pv_pmap == pmap && pve->pv_va == va) { /* match? */
                    838:                        *prevptr = pve->pv_next;                /* remove it! */
1.78      thorpej   839:                        if (pve->pv_flags & PVF_WIRED)
1.17      chris     840:                            --pmap->pm_stats.wired_count;
1.105     thorpej   841: #ifdef PMAP_ALIAS_DEBUG
                    842:     {
                    843:                        int s = splhigh();
                    844:                        if (pve->pv_flags & PVF_WRITE) {
                    845:                                KASSERT(pg->mdpage.rw_mappings != 0);
                    846:                                pg->mdpage.rw_mappings--;
                    847:                        } else {
                    848:                                KASSERT(pg->mdpage.ro_mappings != 0);
                    849:                                pg->mdpage.ro_mappings--;
                    850:                        }
                    851:                        splx(s);
                    852:     }
                    853: #endif /* PMAP_ALIAS_DEBUG */
1.17      chris     854:                        break;
                    855:                }
                    856:                prevptr = &pve->pv_next;                /* previous pointer */
                    857:                pve = pve->pv_next;                     /* advance */
                    858:        }
                    859:        return(pve);                            /* return removed pve */
                    860: }
                    861:
                    862: /*
                    863:  *
                    864:  * pmap_modify_pv: Update pv flags
                    865:  *
1.49      thorpej   866:  * => caller should hold lock on vm_page [so that attrs can be adjusted]
1.17      chris     867:  * => caller should NOT adjust pmap's wire_count
1.29      rearnsha  868:  * => caller must call pmap_vac_me_harder() if writable status of a page
                    869:  *    may have changed.
1.17      chris     870:  * => we return the old flags
                    871:  *
1.1       matt      872:  * Modify a physical-virtual mapping in the pv table
                    873:  */
                    874:
1.73      thorpej   875: static /* __inline */ u_int
                    876: pmap_modify_pv(struct pmap *pmap, vaddr_t va, struct vm_page *pg,
                    877:     u_int bic_mask, u_int eor_mask)
1.1       matt      878: {
                    879:        struct pv_entry *npv;
                    880:        u_int flags, oflags;
                    881:
                    882:        /*
                    883:         * There is at least one VA mapping this page.
                    884:         */
                    885:
1.49      thorpej   886:        for (npv = pg->mdpage.pvh_list; npv; npv = npv->pv_next) {
1.1       matt      887:                if (pmap == npv->pv_pmap && va == npv->pv_va) {
                    888:                        oflags = npv->pv_flags;
                    889:                        npv->pv_flags = flags =
                    890:                            ((oflags & ~bic_mask) ^ eor_mask);
1.78      thorpej   891:                        if ((flags ^ oflags) & PVF_WIRED) {
                    892:                                if (flags & PVF_WIRED)
1.1       matt      893:                                        ++pmap->pm_stats.wired_count;
                    894:                                else
                    895:                                        --pmap->pm_stats.wired_count;
                    896:                        }
1.105     thorpej   897: #ifdef PMAP_ALIAS_DEBUG
                    898:     {
                    899:                        int s = splhigh();
                    900:                        if ((flags ^ oflags) & PVF_WRITE) {
                    901:                                if (flags & PVF_WRITE) {
                    902:                                        pg->mdpage.rw_mappings++;
                    903:                                        pg->mdpage.ro_mappings--;
                    904:                                        if (pg->mdpage.rw_mappings != 0 &&
                    905:                                            (pg->mdpage.kro_mappings != 0 ||
                    906:                                             pg->mdpage.krw_mappings != 0)) {
                    907:                                                printf("pmap_modify_pv: rw %u, "
                    908:                                                    "kro %u, krw %u\n",
                    909:                                                    pg->mdpage.rw_mappings,
                    910:                                                    pg->mdpage.kro_mappings,
                    911:                                                    pg->mdpage.krw_mappings);
                    912:                                        }
                    913:                                } else {
                    914:                                        KASSERT(pg->mdpage.rw_mappings != 0);
                    915:                                        pg->mdpage.rw_mappings--;
                    916:                                        pg->mdpage.ro_mappings++;
                    917:                                }
                    918:                        }
                    919:                        splx(s);
                    920:     }
                    921: #endif /* PMAP_ALIAS_DEBUG */
1.1       matt      922:                        return (oflags);
                    923:                }
                    924:        }
                    925:        return (0);
                    926: }
                    927:
                    928: /*
                    929:  * Map the specified level 2 pagetable into the level 1 page table for
                    930:  * the given pmap to cover a chunk of virtual address space starting from the
                    931:  * address specified.
                    932:  */
1.113     thorpej   933: #define        PMAP_PTP_SELFREF        0x01
                    934: #define        PMAP_PTP_CACHEABLE      0x02
                    935:
1.73      thorpej   936: static __inline void
1.113     thorpej   937: pmap_map_in_l1(struct pmap *pmap, vaddr_t va, paddr_t l2pa, int flags)
1.1       matt      938: {
                    939:        vaddr_t ptva;
                    940:
1.115   ! thorpej   941:        KASSERT((va & PD_OFFSET) == 0);         /* XXX KDASSERT */
        !           942:
1.1       matt      943:        /* Calculate the index into the L1 page table. */
1.115   ! thorpej   944:        ptva = va >> L1_S_SHIFT;
1.1       matt      945:
                    946:        /* Map page table into the L1. */
1.83      thorpej   947:        pmap->pm_pdir[ptva + 0] = L1_C_PROTO | (l2pa + 0x000);
                    948:        pmap->pm_pdir[ptva + 1] = L1_C_PROTO | (l2pa + 0x400);
                    949:        pmap->pm_pdir[ptva + 2] = L1_C_PROTO | (l2pa + 0x800);
                    950:        pmap->pm_pdir[ptva + 3] = L1_C_PROTO | (l2pa + 0xc00);
1.110     thorpej   951:        cpu_dcache_wb_range((vaddr_t) &pmap->pm_pdir[ptva + 0], 16);
1.1       matt      952:
                    953:        /* Map the page table into the page table area. */
1.113     thorpej   954:        if (flags & PMAP_PTP_SELFREF) {
1.83      thorpej   955:                *((pt_entry_t *)(pmap->pm_vptpt + ptva)) = L2_S_PROTO | l2pa |
1.113     thorpej   956:                    L2_S_PROT(PTE_KERNEL, VM_PROT_READ|VM_PROT_WRITE) |
                    957:                    ((flags & PMAP_PTP_CACHEABLE) ? pte_l2_s_cache_mode : 0);
                    958:                PTE_SYNC_CURRENT(pmap, (pt_entry_t *)(pmap->pm_vptpt + ptva));
                    959:        }
1.1       matt      960: }
                    961:
                    962: #if 0
1.73      thorpej   963: static __inline void
                    964: pmap_unmap_in_l1(struct pmap *pmap, vaddr_t va)
1.1       matt      965: {
                    966:        vaddr_t ptva;
                    967:
1.115   ! thorpej   968:        KASSERT((va & PD_OFFSET) == 0);         /* XXX KDASSERT */
        !           969:
1.1       matt      970:        /* Calculate the index into the L1 page table. */
1.115   ! thorpej   971:        ptva = va >> L1_S_SHIFT;
1.1       matt      972:
                    973:        /* Unmap page table from the L1. */
                    974:        pmap->pm_pdir[ptva + 0] = 0;
                    975:        pmap->pm_pdir[ptva + 1] = 0;
                    976:        pmap->pm_pdir[ptva + 2] = 0;
                    977:        pmap->pm_pdir[ptva + 3] = 0;
1.110     thorpej   978:        cpu_dcache_wb_range((vaddr_t) &pmap->pm_pdir[ptva + 0], 16);
1.1       matt      979:
                    980:        /* Unmap the page table from the page table area. */
                    981:        *((pt_entry_t *)(pmap->pm_vptpt + ptva)) = 0;
1.113     thorpej   982:        PTE_SYNC_CURRENT(pmap, (pt_entry_t *)(pmap->pm_vptpt + ptva));
1.1       matt      983: }
                    984: #endif
                    985:
                    986: /*
                    987:  *     Used to map a range of physical addresses into kernel
                    988:  *     virtual address space.
                    989:  *
                    990:  *     For now, VM is already on, we only need to map the
                    991:  *     specified memory.
1.100     thorpej   992:  *
                    993:  *     XXX This routine should eventually go away; it's only used
                    994:  *     XXX by machine-dependent crash dump code.
1.1       matt      995:  */
                    996: vaddr_t
1.73      thorpej   997: pmap_map(vaddr_t va, paddr_t spa, paddr_t epa, vm_prot_t prot)
1.1       matt      998: {
1.100     thorpej   999:        pt_entry_t *pte;
                   1000:
1.1       matt     1001:        while (spa < epa) {
1.100     thorpej  1002:                pte = vtopte(va);
                   1003:
                   1004:                *pte = L2_S_PROTO | spa |
                   1005:                    L2_S_PROT(PTE_KERNEL, prot) | pte_l2_s_cache_mode;
1.112     thorpej  1006:                PTE_SYNC(pte);
1.100     thorpej  1007:                cpu_tlb_flushID_SE(va);
1.1       matt     1008:                va += NBPG;
                   1009:                spa += NBPG;
                   1010:        }
1.19      chris    1011:        pmap_update(pmap_kernel());
1.1       matt     1012:        return(va);
                   1013: }
                   1014:
                   1015:
                   1016: /*
1.3       matt     1017:  * void pmap_bootstrap(pd_entry_t *kernel_l1pt, pv_addr_t kernel_ptpt)
1.1       matt     1018:  *
                   1019:  * bootstrap the pmap system. This is called from initarm and allows
                   1020:  * the pmap system to initailise any structures it requires.
                   1021:  *
                   1022:  * Currently this sets up the kernel_pmap that is statically allocated
                   1023:  * and also allocated virtual addresses for certain page hooks.
                   1024:  * Currently the only one page hook is allocated that is used
                   1025:  * to zero physical pages of memory.
                   1026:  * It also initialises the start and end address of the kernel data space.
                   1027:  */
                   1028:
1.17      chris    1029: char *boot_head;
1.1       matt     1030:
                   1031: void
1.73      thorpej  1032: pmap_bootstrap(pd_entry_t *kernel_l1pt, pv_addr_t kernel_ptpt)
1.1       matt     1033: {
1.54      thorpej  1034:        pt_entry_t *pte;
1.1       matt     1035:
1.15      chris    1036:        pmap_kernel()->pm_pdir = kernel_l1pt;
                   1037:        pmap_kernel()->pm_pptpt = kernel_ptpt.pv_pa;
                   1038:        pmap_kernel()->pm_vptpt = kernel_ptpt.pv_va;
                   1039:        simple_lock_init(&pmap_kernel()->pm_lock);
1.16      chris    1040:        pmap_kernel()->pm_obj.pgops = NULL;
                   1041:        TAILQ_INIT(&(pmap_kernel()->pm_obj.memq));
                   1042:        pmap_kernel()->pm_obj.uo_npages = 0;
                   1043:        pmap_kernel()->pm_obj.uo_refs = 1;
1.1       matt     1044:
1.54      thorpej  1045:        virtual_avail = KERNEL_VM_BASE;
1.74      thorpej  1046:        virtual_end = KERNEL_VM_BASE + KERNEL_VM_SIZE;
1.1       matt     1047:
                   1048:        /*
1.54      thorpej  1049:         * now we allocate the "special" VAs which are used for tmp mappings
                   1050:         * by the pmap (and other modules).  we allocate the VAs by advancing
                   1051:         * virtual_avail (note that there are no pages mapped at these VAs).
                   1052:         * we find the PTE that maps the allocated VA via the linear PTE
                   1053:         * mapping.
1.1       matt     1054:         */
                   1055:
1.54      thorpej  1056:        pte = ((pt_entry_t *) PTE_BASE) + atop(virtual_avail);
                   1057:
                   1058:        csrcp = virtual_avail; csrc_pte = pte;
                   1059:        virtual_avail += PAGE_SIZE; pte++;
                   1060:
                   1061:        cdstp = virtual_avail; cdst_pte = pte;
                   1062:        virtual_avail += PAGE_SIZE; pte++;
                   1063:
                   1064:        memhook = (char *) virtual_avail;       /* don't need pte */
                   1065:        virtual_avail += PAGE_SIZE; pte++;
                   1066:
                   1067:        msgbufaddr = (caddr_t) virtual_avail;   /* don't need pte */
                   1068:        virtual_avail += round_page(MSGBUFSIZE);
                   1069:        pte += atop(round_page(MSGBUFSIZE));
1.1       matt     1070:
1.17      chris    1071:        /*
                   1072:         * init the static-global locks and global lists.
                   1073:         */
                   1074:        spinlockinit(&pmap_main_lock, "pmaplk", 0);
                   1075:        simple_lock_init(&pvalloc_lock);
1.48      chris    1076:        simple_lock_init(&pmaps_lock);
                   1077:        LIST_INIT(&pmaps);
1.17      chris    1078:        TAILQ_INIT(&pv_freepages);
                   1079:        TAILQ_INIT(&pv_unusedpgs);
1.1       matt     1080:
1.10      chris    1081:        /*
                   1082:         * initialize the pmap pool.
                   1083:         */
                   1084:
                   1085:        pool_init(&pmap_pmap_pool, sizeof(struct pmap), 0, 0, 0, "pmappl",
1.52      thorpej  1086:                  &pool_allocator_nointr);
1.111     thorpej  1087:
                   1088:        /*
                   1089:         * initialize the PT-PT pool and cache.
                   1090:         */
                   1091:
                   1092:        pool_init(&pmap_ptpt_pool, PAGE_SIZE, 0, 0, 0, "ptptpl",
                   1093:                  &pmap_ptpt_allocator);
                   1094:        pool_cache_init(&pmap_ptpt_cache, &pmap_ptpt_pool,
                   1095:                        pmap_ptpt_ctor, NULL, NULL);
                   1096:
1.36      thorpej  1097:        cpu_dcache_wbinv_all();
1.1       matt     1098: }
                   1099:
                   1100: /*
                   1101:  * void pmap_init(void)
                   1102:  *
                   1103:  * Initialize the pmap module.
                   1104:  * Called by vm_init() in vm/vm_init.c in order to initialise
                   1105:  * any structures that the pmap system needs to map virtual memory.
                   1106:  */
                   1107:
                   1108: extern int physmem;
                   1109:
                   1110: void
1.73      thorpej  1111: pmap_init(void)
1.1       matt     1112: {
                   1113:
                   1114:        /*
                   1115:         * Set the available memory vars - These do not map to real memory
                   1116:         * addresses and cannot as the physical memory is fragmented.
                   1117:         * They are used by ps for %mem calculations.
                   1118:         * One could argue whether this should be the entire memory or just
                   1119:         * the memory that is useable in a user process.
                   1120:         */
                   1121:        avail_start = 0;
                   1122:        avail_end = physmem * NBPG;
                   1123:
1.17      chris    1124:        /*
                   1125:         * now we need to free enough pv_entry structures to allow us to get
                   1126:         * the kmem_map/kmem_object allocated and inited (done after this
                   1127:         * function is finished).  to do this we allocate one bootstrap page out
                   1128:         * of kernel_map and use it to provide an initial pool of pv_entry
                   1129:         * structures.   we never free this page.
                   1130:         */
                   1131:
                   1132:        pv_initpage = (struct pv_page *) uvm_km_alloc(kernel_map, PAGE_SIZE);
                   1133:        if (pv_initpage == NULL)
                   1134:                panic("pmap_init: pv_initpage");
                   1135:        pv_cachedva = 0;   /* a VA we have allocated but not used yet */
                   1136:        pv_nfpvents = 0;
                   1137:        (void) pmap_add_pvpage(pv_initpage, FALSE);
                   1138:
1.1       matt     1139:        pmap_initialized = TRUE;
                   1140:
                   1141:        /* Initialise our L1 page table queues and counters */
                   1142:        SIMPLEQ_INIT(&l1pt_static_queue);
                   1143:        l1pt_static_queue_count = 0;
                   1144:        l1pt_static_create_count = 0;
                   1145:        SIMPLEQ_INIT(&l1pt_queue);
                   1146:        l1pt_queue_count = 0;
                   1147:        l1pt_create_count = 0;
                   1148:        l1pt_reuse_count = 0;
                   1149: }
                   1150:
                   1151: /*
                   1152:  * pmap_postinit()
                   1153:  *
                   1154:  * This routine is called after the vm and kmem subsystems have been
                   1155:  * initialised. This allows the pmap code to perform any initialisation
                   1156:  * that can only be done one the memory allocation is in place.
                   1157:  */
                   1158:
                   1159: void
1.73      thorpej  1160: pmap_postinit(void)
1.1       matt     1161: {
                   1162:        int loop;
                   1163:        struct l1pt *pt;
                   1164:
                   1165: #ifdef PMAP_STATIC_L1S
                   1166:        for (loop = 0; loop < PMAP_STATIC_L1S; ++loop) {
                   1167: #else  /* PMAP_STATIC_L1S */
                   1168:        for (loop = 0; loop < max_processes; ++loop) {
                   1169: #endif /* PMAP_STATIC_L1S */
                   1170:                /* Allocate a L1 page table */
                   1171:                pt = pmap_alloc_l1pt();
                   1172:                if (!pt)
                   1173:                        panic("Cannot allocate static L1 page tables\n");
                   1174:
                   1175:                /* Clean it */
1.81      thorpej  1176:                bzero((void *)pt->pt_va, L1_TABLE_SIZE);
1.1       matt     1177:                pt->pt_flags |= (PTFLAG_STATIC | PTFLAG_CLEAN);
                   1178:                /* Add the page table to the queue */
                   1179:                SIMPLEQ_INSERT_TAIL(&l1pt_static_queue, pt, pt_queue);
                   1180:                ++l1pt_static_queue_count;
                   1181:                ++l1pt_static_create_count;
                   1182:        }
                   1183: }
                   1184:
                   1185:
                   1186: /*
                   1187:  * Create and return a physical map.
                   1188:  *
                   1189:  * If the size specified for the map is zero, the map is an actual physical
                   1190:  * map, and may be referenced by the hardware.
                   1191:  *
                   1192:  * If the size specified is non-zero, the map will be used in software only,
                   1193:  * and is bounded by that size.
                   1194:  */
                   1195:
                   1196: pmap_t
1.73      thorpej  1197: pmap_create(void)
1.1       matt     1198: {
1.15      chris    1199:        struct pmap *pmap;
1.1       matt     1200:
1.10      chris    1201:        /*
                   1202:         * Fetch pmap entry from the pool
                   1203:         */
                   1204:
                   1205:        pmap = pool_get(&pmap_pmap_pool, PR_WAITOK);
1.17      chris    1206:        /* XXX is this really needed! */
                   1207:        memset(pmap, 0, sizeof(*pmap));
1.1       matt     1208:
1.16      chris    1209:        simple_lock_init(&pmap->pm_obj.vmobjlock);
                   1210:        pmap->pm_obj.pgops = NULL;      /* currently not a mappable object */
                   1211:        TAILQ_INIT(&pmap->pm_obj.memq);
                   1212:        pmap->pm_obj.uo_npages = 0;
                   1213:        pmap->pm_obj.uo_refs = 1;
                   1214:        pmap->pm_stats.wired_count = 0;
                   1215:        pmap->pm_stats.resident_count = 1;
1.70      thorpej  1216:        pmap->pm_ptphint = NULL;
1.16      chris    1217:
1.1       matt     1218:        /* Now init the machine part of the pmap */
                   1219:        pmap_pinit(pmap);
                   1220:        return(pmap);
                   1221: }
                   1222:
                   1223: /*
                   1224:  * pmap_alloc_l1pt()
                   1225:  *
                   1226:  * This routine allocates physical and virtual memory for a L1 page table
                   1227:  * and wires it.
                   1228:  * A l1pt structure is returned to describe the allocated page table.
                   1229:  *
                   1230:  * This routine is allowed to fail if the required memory cannot be allocated.
                   1231:  * In this case NULL is returned.
                   1232:  */
                   1233:
                   1234: struct l1pt *
                   1235: pmap_alloc_l1pt(void)
                   1236: {
1.2       matt     1237:        paddr_t pa;
                   1238:        vaddr_t va;
1.1       matt     1239:        struct l1pt *pt;
                   1240:        int error;
1.9       chs      1241:        struct vm_page *m;
1.1       matt     1242:
                   1243:        /* Allocate virtual address space for the L1 page table */
1.81      thorpej  1244:        va = uvm_km_valloc(kernel_map, L1_TABLE_SIZE);
1.1       matt     1245:        if (va == 0) {
                   1246: #ifdef DIAGNOSTIC
1.26      rearnsha 1247:                PDEBUG(0,
                   1248:                    printf("pmap: Cannot allocate pageable memory for L1\n"));
1.1       matt     1249: #endif /* DIAGNOSTIC */
                   1250:                return(NULL);
                   1251:        }
                   1252:
                   1253:        /* Allocate memory for the l1pt structure */
                   1254:        pt = (struct l1pt *)malloc(sizeof(struct l1pt), M_VMPMAP, M_WAITOK);
                   1255:
                   1256:        /*
                   1257:         * Allocate pages from the VM system.
                   1258:         */
1.81      thorpej  1259:        error = uvm_pglistalloc(L1_TABLE_SIZE, physical_start, physical_end,
                   1260:            L1_TABLE_SIZE, 0, &pt->pt_plist, 1, M_WAITOK);
1.1       matt     1261:        if (error) {
                   1262: #ifdef DIAGNOSTIC
1.26      rearnsha 1263:                PDEBUG(0,
                   1264:                    printf("pmap: Cannot allocate physical mem for L1 (%d)\n",
                   1265:                    error));
1.1       matt     1266: #endif /* DIAGNOSTIC */
                   1267:                /* Release the resources we already have claimed */
                   1268:                free(pt, M_VMPMAP);
1.81      thorpej  1269:                uvm_km_free(kernel_map, va, L1_TABLE_SIZE);
1.1       matt     1270:                return(NULL);
                   1271:        }
                   1272:
                   1273:        /* Map our physical pages into our virtual space */
                   1274:        pt->pt_va = va;
1.51      chris    1275:        m = TAILQ_FIRST(&pt->pt_plist);
1.81      thorpej  1276:        while (m && va < (pt->pt_va + L1_TABLE_SIZE)) {
1.1       matt     1277:                pa = VM_PAGE_TO_PHYS(m);
                   1278:
1.110     thorpej  1279:                pmap_kenter_pa(va, pa, VM_PROT_READ|VM_PROT_WRITE);
1.1       matt     1280:
                   1281:                va += NBPG;
                   1282:                m = m->pageq.tqe_next;
                   1283:        }
                   1284:
                   1285: #ifdef DIAGNOSTIC
                   1286:        if (m)
                   1287:                panic("pmap_alloc_l1pt: pglist not empty\n");
                   1288: #endif /* DIAGNOSTIC */
                   1289:
                   1290:        pt->pt_flags = 0;
                   1291:        return(pt);
                   1292: }
                   1293:
                   1294: /*
                   1295:  * Free a L1 page table previously allocated with pmap_alloc_l1pt().
                   1296:  */
1.33      chris    1297: static void
1.73      thorpej  1298: pmap_free_l1pt(struct l1pt *pt)
1.1       matt     1299: {
                   1300:        /* Separate the physical memory for the virtual space */
1.81      thorpej  1301:        pmap_kremove(pt->pt_va, L1_TABLE_SIZE);
1.19      chris    1302:        pmap_update(pmap_kernel());
1.1       matt     1303:
                   1304:        /* Return the physical memory */
                   1305:        uvm_pglistfree(&pt->pt_plist);
                   1306:
                   1307:        /* Free the virtual space */
1.81      thorpej  1308:        uvm_km_free(kernel_map, pt->pt_va, L1_TABLE_SIZE);
1.1       matt     1309:
                   1310:        /* Free the l1pt structure */
                   1311:        free(pt, M_VMPMAP);
                   1312: }
                   1313:
                   1314: /*
1.111     thorpej  1315:  * pmap_ptpt_page_alloc:
1.93      thorpej  1316:  *
1.111     thorpej  1317:  *     Back-end page allocator for the PT-PT pool.
1.93      thorpej  1318:  */
1.111     thorpej  1319: static void *
                   1320: pmap_ptpt_page_alloc(struct pool *pp, int flags)
1.93      thorpej  1321: {
                   1322:        struct vm_page *pg;
                   1323:        pt_entry_t *pte;
1.111     thorpej  1324:        vaddr_t va;
1.93      thorpej  1325:
1.111     thorpej  1326:        /* XXX PR_WAITOK? */
                   1327:        va = uvm_km_valloc(kernel_map, L2_TABLE_SIZE);
                   1328:        if (va == 0)
                   1329:                return (NULL);
1.93      thorpej  1330:
                   1331:        for (;;) {
                   1332:                pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_ZERO);
                   1333:                if (pg != NULL)
                   1334:                        break;
1.111     thorpej  1335:                if ((flags & PR_WAITOK) == 0) {
                   1336:                        uvm_km_free(kernel_map, va, L2_TABLE_SIZE);
                   1337:                        return (NULL);
                   1338:                }
1.93      thorpej  1339:                uvm_wait("pmap_ptpt");
                   1340:        }
                   1341:
1.111     thorpej  1342:        pte = vtopte(va);
1.93      thorpej  1343:        KDASSERT(pmap_pte_v(pte) == 0);
                   1344:
1.111     thorpej  1345:        *pte = L2_S_PROTO | VM_PAGE_TO_PHYS(pg) |
                   1346:             L2_S_PROT(PTE_KERNEL, VM_PROT_READ|VM_PROT_WRITE);
1.112     thorpej  1347:        PTE_SYNC(pte);
1.105     thorpej  1348: #ifdef PMAP_ALIAS_DEBUG
                   1349:     {
                   1350:        int s = splhigh();
                   1351:        pg->mdpage.krw_mappings++;
                   1352:        splx(s);
                   1353:     }
                   1354: #endif /* PMAP_ALIAS_DEBUG */
1.93      thorpej  1355:
1.111     thorpej  1356:        return ((void *) va);
1.93      thorpej  1357: }
                   1358:
                   1359: /*
1.111     thorpej  1360:  * pmap_ptpt_page_free:
1.93      thorpej  1361:  *
1.111     thorpej  1362:  *     Back-end page free'er for the PT-PT pool.
1.93      thorpej  1363:  */
                   1364: static void
1.111     thorpej  1365: pmap_ptpt_page_free(struct pool *pp, void *v)
1.93      thorpej  1366: {
1.111     thorpej  1367:        vaddr_t va = (vaddr_t) v;
                   1368:        paddr_t pa;
                   1369:
                   1370:        pa = vtophys(va);
1.93      thorpej  1371:
1.111     thorpej  1372:        pmap_kremove(va, L2_TABLE_SIZE);
1.93      thorpej  1373:        pmap_update(pmap_kernel());
                   1374:
1.111     thorpej  1375:        uvm_pagefree(PHYS_TO_VM_PAGE(pa));
                   1376:
                   1377:        uvm_km_free(kernel_map, va, L2_TABLE_SIZE);
                   1378: }
                   1379:
                   1380: /*
                   1381:  * pmap_ptpt_ctor:
                   1382:  *
                   1383:  *     Constructor for the PT-PT cache.
                   1384:  */
                   1385: static int
                   1386: pmap_ptpt_ctor(void *arg, void *object, int flags)
                   1387: {
                   1388:        caddr_t vptpt = object;
                   1389:
                   1390:        /* Page is already zero'd. */
1.93      thorpej  1391:
1.111     thorpej  1392:        /*
                   1393:         * Map in kernel PTs.
                   1394:         *
                   1395:         * XXX THIS IS CURRENTLY DONE AS UNCACHED MEMORY ACCESS.
                   1396:         */
                   1397:        memcpy(vptpt + ((L1_TABLE_SIZE - KERNEL_PD_SIZE) >> 2),
                   1398:               (char *)(PTE_BASE + (PTE_BASE >> (PGSHIFT - 2)) +
                   1399:                        ((L1_TABLE_SIZE - KERNEL_PD_SIZE) >> 2)),
                   1400:               (KERNEL_PD_SIZE >> 2));
                   1401:
                   1402:        return (0);
1.93      thorpej  1403: }
                   1404:
                   1405: /*
1.1       matt     1406:  * Allocate a page directory.
                   1407:  * This routine will either allocate a new page directory from the pool
                   1408:  * of L1 page tables currently held by the kernel or it will allocate
                   1409:  * a new one via pmap_alloc_l1pt().
                   1410:  * It will then initialise the l1 page table for use.
                   1411:  */
1.33      chris    1412: static int
1.73      thorpej  1413: pmap_allocpagedir(struct pmap *pmap)
1.1       matt     1414: {
1.111     thorpej  1415:        vaddr_t vptpt;
1.2       matt     1416:        paddr_t pa;
1.1       matt     1417:        struct l1pt *pt;
1.111     thorpej  1418:        u_int gen;
1.1       matt     1419:
                   1420:        PDEBUG(0, printf("pmap_allocpagedir(%p)\n", pmap));
                   1421:
                   1422:        /* Do we have any spare L1's lying around ? */
                   1423:        if (l1pt_static_queue_count) {
                   1424:                --l1pt_static_queue_count;
1.98      lukem    1425:                pt = SIMPLEQ_FIRST(&l1pt_static_queue);
                   1426:                SIMPLEQ_REMOVE_HEAD(&l1pt_static_queue, pt_queue);
1.1       matt     1427:        } else if (l1pt_queue_count) {
                   1428:                --l1pt_queue_count;
1.98      lukem    1429:                pt = SIMPLEQ_FIRST(&l1pt_queue);
                   1430:                SIMPLEQ_REMOVE_HEAD(&l1pt_queue, pt_queue);
1.1       matt     1431:                ++l1pt_reuse_count;
                   1432:        } else {
                   1433:                pt = pmap_alloc_l1pt();
                   1434:                if (!pt)
                   1435:                        return(ENOMEM);
                   1436:                ++l1pt_create_count;
                   1437:        }
                   1438:
                   1439:        /* Store the pointer to the l1 descriptor in the pmap. */
                   1440:        pmap->pm_l1pt = pt;
                   1441:
                   1442:        /* Get the physical address of the start of the l1 */
1.51      chris    1443:        pa = VM_PAGE_TO_PHYS(TAILQ_FIRST(&pt->pt_plist));
1.1       matt     1444:
                   1445:        /* Store the virtual address of the l1 in the pmap. */
                   1446:        pmap->pm_pdir = (pd_entry_t *)pt->pt_va;
                   1447:
                   1448:        /* Clean the L1 if it is dirty */
1.110     thorpej  1449:        if (!(pt->pt_flags & PTFLAG_CLEAN)) {
1.81      thorpej  1450:                bzero((void *)pmap->pm_pdir, (L1_TABLE_SIZE - KERNEL_PD_SIZE));
1.110     thorpej  1451:                cpu_dcache_wb_range((vaddr_t) pmap->pm_pdir,
                   1452:                    (L1_TABLE_SIZE - KERNEL_PD_SIZE));
                   1453:        }
1.1       matt     1454:
                   1455:        /* Allocate a page table to map all the page tables for this pmap */
1.111     thorpej  1456:        KASSERT(pmap->pm_vptpt == 0);
                   1457:
                   1458:  try_again:
                   1459:        gen = pmap_ptpt_cache_generation;
                   1460:        vptpt = (vaddr_t) pool_cache_get(&pmap_ptpt_cache, PR_WAITOK);
                   1461:        if (vptpt == NULL) {
                   1462:                PDEBUG(0, printf("pmap_alloc_pagedir: no KVA for PTPT\n"));
1.93      thorpej  1463:                pmap_freepagedir(pmap);
1.111     thorpej  1464:                return (ENOMEM);
1.5       toshii   1465:        }
                   1466:
1.93      thorpej  1467:        /* need to lock this all up for growkernel */
1.48      chris    1468:        simple_lock(&pmaps_lock);
                   1469:
1.111     thorpej  1470:        if (gen != pmap_ptpt_cache_generation) {
                   1471:                simple_unlock(&pmaps_lock);
                   1472:                pool_cache_destruct_object(&pmap_ptpt_cache, (void *) vptpt);
                   1473:                goto try_again;
                   1474:        }
                   1475:
                   1476:        pmap->pm_vptpt = vptpt;
                   1477:        pmap->pm_pptpt = vtophys(vptpt);
                   1478:
1.64      thorpej  1479:        /* Duplicate the kernel mappings. */
1.81      thorpej  1480:        bcopy((char *)pmap_kernel()->pm_pdir + (L1_TABLE_SIZE - KERNEL_PD_SIZE),
                   1481:                (char *)pmap->pm_pdir + (L1_TABLE_SIZE - KERNEL_PD_SIZE),
1.48      chris    1482:                KERNEL_PD_SIZE);
1.110     thorpej  1483:        cpu_dcache_wb_range((vaddr_t)pmap->pm_pdir +
                   1484:            (L1_TABLE_SIZE - KERNEL_PD_SIZE), KERNEL_PD_SIZE);
1.48      chris    1485:
1.1       matt     1486:        /* Wire in this page table */
1.113     thorpej  1487:        pmap_map_in_l1(pmap, PTE_BASE, pmap->pm_pptpt, PMAP_PTP_SELFREF);
1.1       matt     1488:
                   1489:        pt->pt_flags &= ~PTFLAG_CLEAN;  /* L1 is dirty now */
1.110     thorpej  1490:
1.48      chris    1491:        LIST_INSERT_HEAD(&pmaps, pmap, pm_list);
                   1492:        simple_unlock(&pmaps_lock);
                   1493:
1.1       matt     1494:        return(0);
                   1495: }
                   1496:
                   1497:
                   1498: /*
                   1499:  * Initialize a preallocated and zeroed pmap structure,
                   1500:  * such as one in a vmspace structure.
                   1501:  */
                   1502:
                   1503: void
1.73      thorpej  1504: pmap_pinit(struct pmap *pmap)
1.1       matt     1505: {
1.26      rearnsha 1506:        int backoff = 6;
                   1507:        int retry = 10;
                   1508:
1.1       matt     1509:        PDEBUG(0, printf("pmap_pinit(%p)\n", pmap));
                   1510:
                   1511:        /* Keep looping until we succeed in allocating a page directory */
                   1512:        while (pmap_allocpagedir(pmap) != 0) {
                   1513:                /*
                   1514:                 * Ok we failed to allocate a suitable block of memory for an
                   1515:                 * L1 page table. This means that either:
                   1516:                 * 1. 16KB of virtual address space could not be allocated
                   1517:                 * 2. 16KB of physically contiguous memory on a 16KB boundary
                   1518:                 *    could not be allocated.
                   1519:                 *
                   1520:                 * Since we cannot fail we will sleep for a while and try
1.17      chris    1521:                 * again.
1.26      rearnsha 1522:                 *
                   1523:                 * Searching for a suitable L1 PT is expensive:
                   1524:                 * to avoid hogging the system when memory is really
                   1525:                 * scarce, use an exponential back-off so that
                   1526:                 * eventually we won't retry more than once every 8
                   1527:                 * seconds.  This should allow other processes to run
                   1528:                 * to completion and free up resources.
1.1       matt     1529:                 */
1.26      rearnsha 1530:                (void) ltsleep(&lbolt, PVM, "l1ptwait", (hz << 3) >> backoff,
                   1531:                    NULL);
                   1532:                if (--retry == 0) {
                   1533:                        retry = 10;
                   1534:                        if (backoff)
                   1535:                                --backoff;
                   1536:                }
1.1       matt     1537:        }
                   1538:
1.76      thorpej  1539:        if (vector_page < KERNEL_BASE) {
                   1540:                /*
                   1541:                 * Map the vector page.  This will also allocate and map
                   1542:                 * an L2 table for it.
                   1543:                 */
                   1544:                pmap_enter(pmap, vector_page, systempage.pv_pa,
                   1545:                    VM_PROT_READ, VM_PROT_READ | PMAP_WIRED);
                   1546:                pmap_update(pmap);
                   1547:        }
1.1       matt     1548: }
                   1549:
                   1550: void
1.73      thorpej  1551: pmap_freepagedir(struct pmap *pmap)
1.1       matt     1552: {
                   1553:        /* Free the memory used for the page table mapping */
1.111     thorpej  1554:        if (pmap->pm_vptpt != 0) {
                   1555:                /*
                   1556:                 * XXX Objects freed to a pool cache must be in constructed
                   1557:                 * XXX form when freed, but we don't free page tables as we
                   1558:                 * XXX go, so we need to zap the mappings here.
                   1559:                 *
                   1560:                 * XXX THIS IS CURRENTLY DONE AS UNCACHED MEMORY ACCESS.
                   1561:                 */
                   1562:                memset((caddr_t) pmap->pm_vptpt, 0,
                   1563:                       ((L1_TABLE_SIZE - KERNEL_PD_SIZE) >> 2));
                   1564:                pool_cache_put(&pmap_ptpt_cache, (void *) pmap->pm_vptpt);
                   1565:        }
1.1       matt     1566:
                   1567:        /* junk the L1 page table */
                   1568:        if (pmap->pm_l1pt->pt_flags & PTFLAG_STATIC) {
                   1569:                /* Add the page table to the queue */
1.111     thorpej  1570:                SIMPLEQ_INSERT_TAIL(&l1pt_static_queue,
                   1571:                                    pmap->pm_l1pt, pt_queue);
1.1       matt     1572:                ++l1pt_static_queue_count;
                   1573:        } else if (l1pt_queue_count < 8) {
                   1574:                /* Add the page table to the queue */
                   1575:                SIMPLEQ_INSERT_TAIL(&l1pt_queue, pmap->pm_l1pt, pt_queue);
                   1576:                ++l1pt_queue_count;
                   1577:        } else
                   1578:                pmap_free_l1pt(pmap->pm_l1pt);
                   1579: }
                   1580:
                   1581: /*
                   1582:  * Retire the given physical map from service.
                   1583:  * Should only be called if the map contains no valid mappings.
                   1584:  */
                   1585:
                   1586: void
1.73      thorpej  1587: pmap_destroy(struct pmap *pmap)
1.1       matt     1588: {
1.17      chris    1589:        struct vm_page *page;
1.1       matt     1590:        int count;
                   1591:
                   1592:        if (pmap == NULL)
                   1593:                return;
                   1594:
                   1595:        PDEBUG(0, printf("pmap_destroy(%p)\n", pmap));
1.17      chris    1596:
                   1597:        /*
                   1598:         * Drop reference count
                   1599:         */
                   1600:        simple_lock(&pmap->pm_obj.vmobjlock);
1.16      chris    1601:        count = --pmap->pm_obj.uo_refs;
1.17      chris    1602:        simple_unlock(&pmap->pm_obj.vmobjlock);
                   1603:        if (count > 0) {
                   1604:                return;
1.1       matt     1605:        }
                   1606:
1.17      chris    1607:        /*
                   1608:         * reference count is zero, free pmap resources and then free pmap.
                   1609:         */
1.48      chris    1610:
                   1611:        /*
                   1612:         * remove it from global list of pmaps
                   1613:         */
                   1614:
                   1615:        simple_lock(&pmaps_lock);
                   1616:        LIST_REMOVE(pmap, pm_list);
                   1617:        simple_unlock(&pmaps_lock);
1.17      chris    1618:
1.77      thorpej  1619:        if (vector_page < KERNEL_BASE) {
                   1620:                /* Remove the vector page mapping */
                   1621:                pmap_remove(pmap, vector_page, vector_page + NBPG);
                   1622:                pmap_update(pmap);
                   1623:        }
1.1       matt     1624:
                   1625:        /*
                   1626:         * Free any page tables still mapped
                   1627:         * This is only temporay until pmap_enter can count the number
                   1628:         * of mappings made in a page table. Then pmap_remove() can
                   1629:         * reduce the count and free the pagetable when the count
1.16      chris    1630:         * reaches zero.  Note that entries in this list should match the
                   1631:         * contents of the ptpt, however this is faster than walking a 1024
                   1632:         * entries looking for pt's
                   1633:         * taken from i386 pmap.c
1.1       matt     1634:         */
1.97      chris    1635:        /*
                   1636:         * vmobjlock must be held while freeing pages
                   1637:         */
                   1638:        simple_lock(&pmap->pm_obj.vmobjlock);
1.51      chris    1639:        while ((page = TAILQ_FIRST(&pmap->pm_obj.memq)) != NULL) {
                   1640:                KASSERT((page->flags & PG_BUSY) == 0);
1.114     thorpej  1641:
                   1642:                /* Freeing a PT page?  The contents are a throw-away. */
                   1643:                KASSERT((page->offset & PD_OFFSET) == 0);/* XXX KDASSERT */
                   1644:                cpu_dcache_inv_range((vaddr_t)vtopte(page->offset), PAGE_SIZE);
                   1645:
1.16      chris    1646:                page->wire_count = 0;
                   1647:                uvm_pagefree(page);
1.1       matt     1648:        }
1.97      chris    1649:        simple_unlock(&pmap->pm_obj.vmobjlock);
1.111     thorpej  1650:
1.1       matt     1651:        /* Free the page dir */
                   1652:        pmap_freepagedir(pmap);
1.111     thorpej  1653:
1.17      chris    1654:        /* return the pmap to the pool */
                   1655:        pool_put(&pmap_pmap_pool, pmap);
1.1       matt     1656: }
                   1657:
                   1658:
                   1659: /*
1.15      chris    1660:  * void pmap_reference(struct pmap *pmap)
1.1       matt     1661:  *
                   1662:  * Add a reference to the specified pmap.
                   1663:  */
                   1664:
                   1665: void
1.73      thorpej  1666: pmap_reference(struct pmap *pmap)
1.1       matt     1667: {
                   1668:        if (pmap == NULL)
                   1669:                return;
                   1670:
                   1671:        simple_lock(&pmap->pm_lock);
1.16      chris    1672:        pmap->pm_obj.uo_refs++;
1.1       matt     1673:        simple_unlock(&pmap->pm_lock);
                   1674: }
                   1675:
                   1676: /*
                   1677:  * void pmap_virtual_space(vaddr_t *start, vaddr_t *end)
                   1678:  *
                   1679:  * Return the start and end addresses of the kernel's virtual space.
                   1680:  * These values are setup in pmap_bootstrap and are updated as pages
                   1681:  * are allocated.
                   1682:  */
                   1683:
                   1684: void
1.73      thorpej  1685: pmap_virtual_space(vaddr_t *start, vaddr_t *end)
1.1       matt     1686: {
1.54      thorpej  1687:        *start = virtual_avail;
1.1       matt     1688:        *end = virtual_end;
                   1689: }
                   1690:
                   1691: /*
                   1692:  * Activate the address space for the specified process.  If the process
                   1693:  * is the current process, load the new MMU context.
                   1694:  */
                   1695: void
1.73      thorpej  1696: pmap_activate(struct proc *p)
1.1       matt     1697: {
1.15      chris    1698:        struct pmap *pmap = p->p_vmspace->vm_map.pmap;
1.1       matt     1699:        struct pcb *pcb = &p->p_addr->u_pcb;
                   1700:
1.15      chris    1701:        (void) pmap_extract(pmap_kernel(), (vaddr_t)pmap->pm_pdir,
1.1       matt     1702:            (paddr_t *)&pcb->pcb_pagedir);
                   1703:
                   1704:        PDEBUG(0, printf("pmap_activate: p=%p pmap=%p pcb=%p pdir=%p l1=%p\n",
                   1705:            p, pmap, pcb, pmap->pm_pdir, pcb->pcb_pagedir));
                   1706:
                   1707:        if (p == curproc) {
                   1708:                PDEBUG(0, printf("pmap_activate: setting TTB\n"));
                   1709:                setttb((u_int)pcb->pcb_pagedir);
                   1710:        }
                   1711: }
                   1712:
                   1713: /*
                   1714:  * Deactivate the address space of the specified process.
                   1715:  */
                   1716: void
1.73      thorpej  1717: pmap_deactivate(struct proc *p)
1.1       matt     1718: {
                   1719: }
                   1720:
1.31      thorpej  1721: /*
                   1722:  * Perform any deferred pmap operations.
                   1723:  */
                   1724: void
                   1725: pmap_update(struct pmap *pmap)
                   1726: {
                   1727:
                   1728:        /*
                   1729:         * We haven't deferred any pmap operations, but we do need to
                   1730:         * make sure TLB/cache operations have completed.
                   1731:         */
                   1732:        cpu_cpwait();
                   1733: }
1.1       matt     1734:
                   1735: /*
                   1736:  * pmap_clean_page()
                   1737:  *
                   1738:  * This is a local function used to work out the best strategy to clean
                   1739:  * a single page referenced by its entry in the PV table. It's used by
                   1740:  * pmap_copy_page, pmap_zero page and maybe some others later on.
                   1741:  *
                   1742:  * Its policy is effectively:
                   1743:  *  o If there are no mappings, we don't bother doing anything with the cache.
                   1744:  *  o If there is one mapping, we clean just that page.
                   1745:  *  o If there are multiple mappings, we clean the entire cache.
                   1746:  *
                   1747:  * So that some functions can be further optimised, it returns 0 if it didn't
                   1748:  * clean the entire cache, or 1 if it did.
                   1749:  *
                   1750:  * XXX One bug in this routine is that if the pv_entry has a single page
                   1751:  * mapped at 0x00000000 a whole cache clean will be performed rather than
                   1752:  * just the 1 page. Since this should not occur in everyday use and if it does
                   1753:  * it will just result in not the most efficient clean for the page.
                   1754:  */
                   1755: static int
1.73      thorpej  1756: pmap_clean_page(struct pv_entry *pv, boolean_t is_src)
1.1       matt     1757: {
1.17      chris    1758:        struct pmap *pmap;
                   1759:        struct pv_entry *npv;
1.1       matt     1760:        int cache_needs_cleaning = 0;
                   1761:        vaddr_t page_to_clean = 0;
                   1762:
1.108     thorpej  1763:        if (pv == NULL) {
1.17      chris    1764:                /* nothing mapped in so nothing to flush */
                   1765:                return (0);
1.108     thorpej  1766:        }
1.17      chris    1767:
1.108     thorpej  1768:        /*
                   1769:         * Since we flush the cache each time we change curproc, we
1.17      chris    1770:         * only need to flush the page if it is in the current pmap.
                   1771:         */
                   1772:        if (curproc)
                   1773:                pmap = curproc->p_vmspace->vm_map.pmap;
                   1774:        else
                   1775:                pmap = pmap_kernel();
                   1776:
                   1777:        for (npv = pv; npv; npv = npv->pv_next) {
                   1778:                if (npv->pv_pmap == pmap) {
1.108     thorpej  1779:                        /*
                   1780:                         * The page is mapped non-cacheable in
1.17      chris    1781:                         * this map.  No need to flush the cache.
                   1782:                         */
1.78      thorpej  1783:                        if (npv->pv_flags & PVF_NC) {
1.17      chris    1784: #ifdef DIAGNOSTIC
                   1785:                                if (cache_needs_cleaning)
                   1786:                                        panic("pmap_clean_page: "
1.108     thorpej  1787:                                            "cache inconsistency");
1.17      chris    1788: #endif
                   1789:                                break;
1.108     thorpej  1790:                        } else if (is_src && (npv->pv_flags & PVF_WRITE) == 0)
1.17      chris    1791:                                continue;
1.108     thorpej  1792:                        if (cache_needs_cleaning) {
1.17      chris    1793:                                page_to_clean = 0;
                   1794:                                break;
1.108     thorpej  1795:                        } else
1.17      chris    1796:                                page_to_clean = npv->pv_va;
                   1797:                        cache_needs_cleaning = 1;
                   1798:                }
1.1       matt     1799:        }
                   1800:
1.108     thorpej  1801:        if (page_to_clean) {
                   1802:                /*
                   1803:                 * XXX If is_src, we really only need to write-back,
                   1804:                 * XXX not invalidate, too.  Investigate further.
                   1805:                 * XXX --thorpej@netbsd.org
                   1806:                 */
1.36      thorpej  1807:                cpu_idcache_wbinv_range(page_to_clean, NBPG);
1.108     thorpej  1808:        } else if (cache_needs_cleaning) {
1.36      thorpej  1809:                cpu_idcache_wbinv_all();
1.1       matt     1810:                return (1);
                   1811:        }
                   1812:        return (0);
                   1813: }
                   1814:
                   1815: /*
                   1816:  * pmap_zero_page()
                   1817:  *
                   1818:  * Zero a given physical page by mapping it at a page hook point.
                   1819:  * In doing the zero page op, the page we zero is mapped cachable, as with
                   1820:  * StrongARM accesses to non-cached pages are non-burst making writing
                   1821:  * _any_ bulk data very slow.
                   1822:  */
1.88      thorpej  1823: #if ARM_MMU_GENERIC == 1
1.1       matt     1824: void
1.88      thorpej  1825: pmap_zero_page_generic(paddr_t phys)
1.1       matt     1826: {
1.71      thorpej  1827: #ifdef DEBUG
                   1828:        struct vm_page *pg = PHYS_TO_VM_PAGE(phys);
                   1829:
                   1830:        if (pg->mdpage.pvh_list != NULL)
                   1831:                panic("pmap_zero_page: page has mappings");
                   1832: #endif
1.1       matt     1833:
1.79      thorpej  1834:        KDASSERT((phys & PGOFSET) == 0);
                   1835:
1.1       matt     1836:        /*
                   1837:         * Hook in the page, zero it, and purge the cache for that
                   1838:         * zeroed page. Invalidate the TLB as needed.
                   1839:         */
1.83      thorpej  1840:        *cdst_pte = L2_S_PROTO | phys |
1.86      thorpej  1841:            L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | pte_l2_s_cache_mode;
1.113     thorpej  1842:        PTE_SYNC(cdst_pte);
1.54      thorpej  1843:        cpu_tlb_flushD_SE(cdstp);
1.32      thorpej  1844:        cpu_cpwait();
1.54      thorpej  1845:        bzero_page(cdstp);
                   1846:        cpu_dcache_wbinv_range(cdstp, NBPG);
1.1       matt     1847: }
1.88      thorpej  1848: #endif /* ARM_MMU_GENERIC == 1 */
                   1849:
                   1850: #if ARM_MMU_XSCALE == 1
                   1851: void
                   1852: pmap_zero_page_xscale(paddr_t phys)
                   1853: {
                   1854: #ifdef DEBUG
                   1855:        struct vm_page *pg = PHYS_TO_VM_PAGE(phys);
                   1856:
                   1857:        if (pg->mdpage.pvh_list != NULL)
                   1858:                panic("pmap_zero_page: page has mappings");
                   1859: #endif
                   1860:
                   1861:        KDASSERT((phys & PGOFSET) == 0);
                   1862:
                   1863:        /*
                   1864:         * Hook in the page, zero it, and purge the cache for that
                   1865:         * zeroed page. Invalidate the TLB as needed.
                   1866:         */
                   1867:        *cdst_pte = L2_S_PROTO | phys |
                   1868:            L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) |
                   1869:            L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X);       /* mini-data */
1.113     thorpej  1870:        PTE_SYNC(cdst_pte);
1.88      thorpej  1871:        cpu_tlb_flushD_SE(cdstp);
                   1872:        cpu_cpwait();
                   1873:        bzero_page(cdstp);
                   1874:        xscale_cache_clean_minidata();
                   1875: }
                   1876: #endif /* ARM_MMU_XSCALE == 1 */
1.1       matt     1877:
1.17      chris    1878: /* pmap_pageidlezero()
                   1879:  *
                   1880:  * The same as above, except that we assume that the page is not
                   1881:  * mapped.  This means we never have to flush the cache first.  Called
                   1882:  * from the idle loop.
                   1883:  */
                   1884: boolean_t
1.73      thorpej  1885: pmap_pageidlezero(paddr_t phys)
1.17      chris    1886: {
                   1887:        int i, *ptr;
                   1888:        boolean_t rv = TRUE;
1.71      thorpej  1889: #ifdef DEBUG
1.49      thorpej  1890:        struct vm_page *pg;
1.17      chris    1891:
1.49      thorpej  1892:        pg = PHYS_TO_VM_PAGE(phys);
                   1893:        if (pg->mdpage.pvh_list != NULL)
1.71      thorpej  1894:                panic("pmap_pageidlezero: page has mappings");
1.17      chris    1895: #endif
1.79      thorpej  1896:
                   1897:        KDASSERT((phys & PGOFSET) == 0);
                   1898:
1.17      chris    1899:        /*
                   1900:         * Hook in the page, zero it, and purge the cache for that
                   1901:         * zeroed page. Invalidate the TLB as needed.
                   1902:         */
1.83      thorpej  1903:        *cdst_pte = L2_S_PROTO | phys |
1.86      thorpej  1904:            L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | pte_l2_s_cache_mode;
1.113     thorpej  1905:        PTE_SYNC(cdst_pte);
1.54      thorpej  1906:        cpu_tlb_flushD_SE(cdstp);
1.32      thorpej  1907:        cpu_cpwait();
                   1908:
1.54      thorpej  1909:        for (i = 0, ptr = (int *)cdstp;
1.17      chris    1910:                        i < (NBPG / sizeof(int)); i++) {
                   1911:                if (sched_whichqs != 0) {
                   1912:                        /*
                   1913:                         * A process has become ready.  Abort now,
                   1914:                         * so we don't keep it waiting while we
                   1915:                         * do slow memory access to finish this
                   1916:                         * page.
                   1917:                         */
                   1918:                        rv = FALSE;
                   1919:                        break;
                   1920:                }
                   1921:                *ptr++ = 0;
                   1922:        }
                   1923:
                   1924:        if (rv)
                   1925:                /*
                   1926:                 * if we aborted we'll rezero this page again later so don't
                   1927:                 * purge it unless we finished it
                   1928:                 */
1.54      thorpej  1929:                cpu_dcache_wbinv_range(cdstp, NBPG);
1.17      chris    1930:        return (rv);
                   1931: }
                   1932:
1.1       matt     1933: /*
                   1934:  * pmap_copy_page()
                   1935:  *
                   1936:  * Copy one physical page into another, by mapping the pages into
                   1937:  * hook points. The same comment regarding cachability as in
                   1938:  * pmap_zero_page also applies here.
                   1939:  */
1.88      thorpej  1940: #if ARM_MMU_GENERIC == 1
1.1       matt     1941: void
1.88      thorpej  1942: pmap_copy_page_generic(paddr_t src, paddr_t dst)
1.1       matt     1943: {
1.71      thorpej  1944:        struct vm_page *src_pg = PHYS_TO_VM_PAGE(src);
                   1945: #ifdef DEBUG
                   1946:        struct vm_page *dst_pg = PHYS_TO_VM_PAGE(dst);
                   1947:
                   1948:        if (dst_pg->mdpage.pvh_list != NULL)
                   1949:                panic("pmap_copy_page: dst page has mappings");
                   1950: #endif
                   1951:
1.79      thorpej  1952:        KDASSERT((src & PGOFSET) == 0);
                   1953:        KDASSERT((dst & PGOFSET) == 0);
                   1954:
1.71      thorpej  1955:        /*
                   1956:         * Clean the source page.  Hold the source page's lock for
                   1957:         * the duration of the copy so that no other mappings can
                   1958:         * be created while we have a potentially aliased mapping.
                   1959:         */
1.49      thorpej  1960:        simple_lock(&src_pg->mdpage.pvh_slock);
1.71      thorpej  1961:        (void) pmap_clean_page(src_pg->mdpage.pvh_list, TRUE);
1.1       matt     1962:
                   1963:        /*
                   1964:         * Map the pages into the page hook points, copy them, and purge
                   1965:         * the cache for the appropriate page. Invalidate the TLB
                   1966:         * as required.
                   1967:         */
1.83      thorpej  1968:        *csrc_pte = L2_S_PROTO | src |
1.86      thorpej  1969:            L2_S_PROT(PTE_KERNEL, VM_PROT_READ) | pte_l2_s_cache_mode;
1.113     thorpej  1970:        PTE_SYNC(csrc_pte);
1.83      thorpej  1971:        *cdst_pte = L2_S_PROTO | dst |
1.86      thorpej  1972:            L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | pte_l2_s_cache_mode;
1.113     thorpej  1973:        PTE_SYNC(cdst_pte);
1.54      thorpej  1974:        cpu_tlb_flushD_SE(csrcp);
                   1975:        cpu_tlb_flushD_SE(cdstp);
1.32      thorpej  1976:        cpu_cpwait();
1.54      thorpej  1977:        bcopy_page(csrcp, cdstp);
1.65      chris    1978:        cpu_dcache_inv_range(csrcp, NBPG);
1.71      thorpej  1979:        simple_unlock(&src_pg->mdpage.pvh_slock); /* cache is safe again */
1.54      thorpej  1980:        cpu_dcache_wbinv_range(cdstp, NBPG);
1.1       matt     1981: }
1.88      thorpej  1982: #endif /* ARM_MMU_GENERIC == 1 */
                   1983:
                   1984: #if ARM_MMU_XSCALE == 1
                   1985: void
                   1986: pmap_copy_page_xscale(paddr_t src, paddr_t dst)
                   1987: {
                   1988:        struct vm_page *src_pg = PHYS_TO_VM_PAGE(src);
                   1989: #ifdef DEBUG
                   1990:        struct vm_page *dst_pg = PHYS_TO_VM_PAGE(dst);
                   1991:
                   1992:        if (dst_pg->mdpage.pvh_list != NULL)
                   1993:                panic("pmap_copy_page: dst page has mappings");
                   1994: #endif
                   1995:
                   1996:        KDASSERT((src & PGOFSET) == 0);
                   1997:        KDASSERT((dst & PGOFSET) == 0);
                   1998:
                   1999:        /*
                   2000:         * Clean the source page.  Hold the source page's lock for
                   2001:         * the duration of the copy so that no other mappings can
                   2002:         * be created while we have a potentially aliased mapping.
                   2003:         */
                   2004:        simple_lock(&src_pg->mdpage.pvh_slock);
                   2005:        (void) pmap_clean_page(src_pg->mdpage.pvh_list, TRUE);
                   2006:
                   2007:        /*
                   2008:         * Map the pages into the page hook points, copy them, and purge
                   2009:         * the cache for the appropriate page. Invalidate the TLB
                   2010:         * as required.
                   2011:         */
                   2012:        *csrc_pte = L2_S_PROTO | src |
1.89      thorpej  2013:            L2_S_PROT(PTE_KERNEL, VM_PROT_READ) |
                   2014:            L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X);       /* mini-data */
1.113     thorpej  2015:        PTE_SYNC(csrc_pte);
1.88      thorpej  2016:        *cdst_pte = L2_S_PROTO | dst |
                   2017:            L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) |
                   2018:            L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X);       /* mini-data */
1.113     thorpej  2019:        PTE_SYNC(cdst_pte);
1.88      thorpej  2020:        cpu_tlb_flushD_SE(csrcp);
                   2021:        cpu_tlb_flushD_SE(cdstp);
                   2022:        cpu_cpwait();
                   2023:        bcopy_page(csrcp, cdstp);
                   2024:        simple_unlock(&src_pg->mdpage.pvh_slock); /* cache is safe again */
                   2025:        xscale_cache_clean_minidata();
                   2026: }
                   2027: #endif /* ARM_MMU_XSCALE == 1 */
1.1       matt     2028:
                   2029: #if 0
                   2030: void
1.73      thorpej  2031: pmap_pte_addref(struct pmap *pmap, vaddr_t va)
1.1       matt     2032: {
                   2033:        pd_entry_t *pde;
1.2       matt     2034:        paddr_t pa;
1.1       matt     2035:        struct vm_page *m;
                   2036:
                   2037:        if (pmap == pmap_kernel())
                   2038:                return;
                   2039:
1.115   ! thorpej  2040:        pde = pmap_pde(pmap, va & PD_FRAME);
1.1       matt     2041:        pa = pmap_pte_pa(pde);
                   2042:        m = PHYS_TO_VM_PAGE(pa);
1.115   ! thorpej  2043:        m->wire_count++;
1.1       matt     2044: #ifdef MYCROFT_HACK
                   2045:        printf("addref pmap=%p va=%08lx pde=%p pa=%08lx m=%p wire=%d\n",
                   2046:            pmap, va, pde, pa, m, m->wire_count);
                   2047: #endif
                   2048: }
                   2049:
                   2050: void
1.73      thorpej  2051: pmap_pte_delref(struct pmap *pmap, vaddr_t va)
1.1       matt     2052: {
                   2053:        pd_entry_t *pde;
1.2       matt     2054:        paddr_t pa;
1.1       matt     2055:        struct vm_page *m;
                   2056:
                   2057:        if (pmap == pmap_kernel())
                   2058:                return;
                   2059:
1.115   ! thorpej  2060:        pde = pmap_pde(pmap, va & PD_FRAME);
1.1       matt     2061:        pa = pmap_pte_pa(pde);
                   2062:        m = PHYS_TO_VM_PAGE(pa);
1.115   ! thorpej  2063:        m->wire_count--;
1.1       matt     2064: #ifdef MYCROFT_HACK
                   2065:        printf("delref pmap=%p va=%08lx pde=%p pa=%08lx m=%p wire=%d\n",
                   2066:            pmap, va, pde, pa, m, m->wire_count);
                   2067: #endif
                   2068:        if (m->wire_count == 0) {
                   2069: #ifdef MYCROFT_HACK
                   2070:                printf("delref pmap=%p va=%08lx pde=%p pa=%08lx m=%p\n",
                   2071:                    pmap, va, pde, pa, m);
                   2072: #endif
1.115   ! thorpej  2073:                pmap_unmap_in_l1(pmap, va & PD_FRAME);
1.1       matt     2074:                uvm_pagefree(m);
                   2075:                --pmap->pm_stats.resident_count;
                   2076:        }
                   2077: }
                   2078: #else
                   2079: #define        pmap_pte_addref(pmap, va)
                   2080: #define        pmap_pte_delref(pmap, va)
                   2081: #endif
                   2082:
                   2083: /*
                   2084:  * Since we have a virtually indexed cache, we may need to inhibit caching if
                   2085:  * there is more than one mapping and at least one of them is writable.
                   2086:  * Since we purge the cache on every context switch, we only need to check for
                   2087:  * other mappings within the same pmap, or kernel_pmap.
                   2088:  * This function is also called when a page is unmapped, to possibly reenable
                   2089:  * caching on any remaining mappings.
1.28      rearnsha 2090:  *
                   2091:  * The code implements the following logic, where:
                   2092:  *
                   2093:  * KW = # of kernel read/write pages
                   2094:  * KR = # of kernel read only pages
                   2095:  * UW = # of user read/write pages
                   2096:  * UR = # of user read only pages
                   2097:  * OW = # of user read/write pages in another pmap, then
                   2098:  *
                   2099:  * KC = kernel mapping is cacheable
                   2100:  * UC = user mapping is cacheable
                   2101:  *
                   2102:  *                     KW=0,KR=0  KW=0,KR>0  KW=1,KR=0  KW>1,KR>=0
                   2103:  *                   +---------------------------------------------
                   2104:  * UW=0,UR=0,OW=0    | ---        KC=1       KC=1       KC=0
                   2105:  * UW=0,UR>0,OW=0    | UC=1       KC=1,UC=1  KC=0,UC=0  KC=0,UC=0
                   2106:  * UW=0,UR>0,OW>0    | UC=1       KC=0,UC=1  KC=0,UC=0  KC=0,UC=0
                   2107:  * UW=1,UR=0,OW=0    | UC=1       KC=0,UC=0  KC=0,UC=0  KC=0,UC=0
                   2108:  * UW>1,UR>=0,OW>=0  | UC=0       KC=0,UC=0  KC=0,UC=0  KC=0,UC=0
1.11      chris    2109:  *
                   2110:  * Note that the pmap must have it's ptes mapped in, and passed with ptes.
1.1       matt     2111:  */
1.25      rearnsha 2112: __inline static void
1.49      thorpej  2113: pmap_vac_me_harder(struct pmap *pmap, struct vm_page *pg, pt_entry_t *ptes,
1.12      chris    2114:        boolean_t clear_cache)
1.1       matt     2115: {
1.25      rearnsha 2116:        if (pmap == pmap_kernel())
1.49      thorpej  2117:                pmap_vac_me_kpmap(pmap, pg, ptes, clear_cache);
1.25      rearnsha 2118:        else
1.49      thorpej  2119:                pmap_vac_me_user(pmap, pg, ptes, clear_cache);
1.25      rearnsha 2120: }
                   2121:
                   2122: static void
1.49      thorpej  2123: pmap_vac_me_kpmap(struct pmap *pmap, struct vm_page *pg, pt_entry_t *ptes,
1.25      rearnsha 2124:        boolean_t clear_cache)
                   2125: {
                   2126:        int user_entries = 0;
                   2127:        int user_writable = 0;
                   2128:        int user_cacheable = 0;
                   2129:        int kernel_entries = 0;
                   2130:        int kernel_writable = 0;
                   2131:        int kernel_cacheable = 0;
                   2132:        struct pv_entry *pv;
                   2133:        struct pmap *last_pmap = pmap;
                   2134:
                   2135: #ifdef DIAGNOSTIC
                   2136:        if (pmap != pmap_kernel())
                   2137:                panic("pmap_vac_me_kpmap: pmap != pmap_kernel()");
                   2138: #endif
                   2139:
                   2140:        /*
                   2141:         * Pass one, see if there are both kernel and user pmaps for
                   2142:         * this page.  Calculate whether there are user-writable or
                   2143:         * kernel-writable pages.
                   2144:         */
1.49      thorpej  2145:        for (pv = pg->mdpage.pvh_list; pv != NULL; pv = pv->pv_next) {
1.25      rearnsha 2146:                if (pv->pv_pmap != pmap) {
                   2147:                        user_entries++;
1.78      thorpej  2148:                        if (pv->pv_flags & PVF_WRITE)
1.25      rearnsha 2149:                                user_writable++;
1.78      thorpej  2150:                        if ((pv->pv_flags & PVF_NC) == 0)
1.25      rearnsha 2151:                                user_cacheable++;
                   2152:                } else {
                   2153:                        kernel_entries++;
1.78      thorpej  2154:                        if (pv->pv_flags & PVF_WRITE)
1.25      rearnsha 2155:                                kernel_writable++;
1.78      thorpej  2156:                        if ((pv->pv_flags & PVF_NC) == 0)
1.25      rearnsha 2157:                                kernel_cacheable++;
                   2158:                }
                   2159:        }
                   2160:
                   2161:        /*
                   2162:         * We know we have just been updating a kernel entry, so if
                   2163:         * all user pages are already cacheable, then there is nothing
                   2164:         * further to do.
                   2165:         */
                   2166:        if (kernel_entries == 0 &&
                   2167:            user_cacheable == user_entries)
                   2168:                return;
                   2169:
                   2170:        if (user_entries) {
                   2171:                /*
                   2172:                 * Scan over the list again, for each entry, if it
                   2173:                 * might not be set correctly, call pmap_vac_me_user
                   2174:                 * to recalculate the settings.
                   2175:                 */
1.49      thorpej  2176:                for (pv = pg->mdpage.pvh_list; pv; pv = pv->pv_next) {
1.25      rearnsha 2177:                        /*
                   2178:                         * We know kernel mappings will get set
                   2179:                         * correctly in other calls.  We also know
                   2180:                         * that if the pmap is the same as last_pmap
                   2181:                         * then we've just handled this entry.
                   2182:                         */
                   2183:                        if (pv->pv_pmap == pmap || pv->pv_pmap == last_pmap)
                   2184:                                continue;
                   2185:                        /*
                   2186:                         * If there are kernel entries and this page
                   2187:                         * is writable but non-cacheable, then we can
                   2188:                         * skip this entry also.
                   2189:                         */
                   2190:                        if (kernel_entries > 0 &&
1.78      thorpej  2191:                            (pv->pv_flags & (PVF_NC | PVF_WRITE)) ==
                   2192:                            (PVF_NC | PVF_WRITE))
1.25      rearnsha 2193:                                continue;
                   2194:                        /*
                   2195:                         * Similarly if there are no kernel-writable
                   2196:                         * entries and the page is already
                   2197:                         * read-only/cacheable.
                   2198:                         */
                   2199:                        if (kernel_writable == 0 &&
1.78      thorpej  2200:                            (pv->pv_flags & (PVF_NC | PVF_WRITE)) == 0)
1.25      rearnsha 2201:                                continue;
                   2202:                        /*
                   2203:                         * For some of the remaining cases, we know
                   2204:                         * that we must recalculate, but for others we
                   2205:                         * can't tell if they are correct or not, so
                   2206:                         * we recalculate anyway.
                   2207:                         */
                   2208:                        pmap_unmap_ptes(last_pmap);
                   2209:                        last_pmap = pv->pv_pmap;
                   2210:                        ptes = pmap_map_ptes(last_pmap);
1.49      thorpej  2211:                        pmap_vac_me_user(last_pmap, pg, ptes,
1.25      rearnsha 2212:                            pmap_is_curpmap(last_pmap));
                   2213:                }
                   2214:                /* Restore the pte mapping that was passed to us.  */
                   2215:                if (last_pmap != pmap) {
                   2216:                        pmap_unmap_ptes(last_pmap);
                   2217:                        ptes = pmap_map_ptes(pmap);
                   2218:                }
                   2219:                if (kernel_entries == 0)
                   2220:                        return;
                   2221:        }
                   2222:
1.49      thorpej  2223:        pmap_vac_me_user(pmap, pg, ptes, clear_cache);
1.25      rearnsha 2224:        return;
                   2225: }
                   2226:
                   2227: static void
1.49      thorpej  2228: pmap_vac_me_user(struct pmap *pmap, struct vm_page *pg, pt_entry_t *ptes,
1.25      rearnsha 2229:        boolean_t clear_cache)
                   2230: {
                   2231:        struct pmap *kpmap = pmap_kernel();
1.17      chris    2232:        struct pv_entry *pv, *npv;
1.1       matt     2233:        int entries = 0;
1.25      rearnsha 2234:        int writable = 0;
1.12      chris    2235:        int cacheable_entries = 0;
1.25      rearnsha 2236:        int kern_cacheable = 0;
                   2237:        int other_writable = 0;
1.1       matt     2238:
1.49      thorpej  2239:        pv = pg->mdpage.pvh_list;
1.11      chris    2240:        KASSERT(ptes != NULL);
1.1       matt     2241:
                   2242:        /*
                   2243:         * Count mappings and writable mappings in this pmap.
1.25      rearnsha 2244:         * Include kernel mappings as part of our own.
1.1       matt     2245:         * Keep a pointer to the first one.
                   2246:         */
                   2247:        for (npv = pv; npv; npv = npv->pv_next) {
                   2248:                /* Count mappings in the same pmap */
1.25      rearnsha 2249:                if (pmap == npv->pv_pmap ||
                   2250:                    kpmap == npv->pv_pmap) {
1.1       matt     2251:                        if (entries++ == 0)
                   2252:                                pv = npv;
1.12      chris    2253:                        /* Cacheable mappings */
1.78      thorpej  2254:                        if ((npv->pv_flags & PVF_NC) == 0) {
1.12      chris    2255:                                cacheable_entries++;
1.25      rearnsha 2256:                                if (kpmap == npv->pv_pmap)
                   2257:                                        kern_cacheable++;
                   2258:                        }
                   2259:                        /* Writable mappings */
1.78      thorpej  2260:                        if (npv->pv_flags & PVF_WRITE)
1.25      rearnsha 2261:                                ++writable;
1.78      thorpej  2262:                } else if (npv->pv_flags & PVF_WRITE)
1.25      rearnsha 2263:                        other_writable = 1;
1.1       matt     2264:        }
                   2265:
1.12      chris    2266:        PDEBUG(3,printf("pmap_vac_me_harder: pmap %p Entries %d, "
1.25      rearnsha 2267:                "writable %d cacheable %d %s\n", pmap, entries, writable,
1.12      chris    2268:                cacheable_entries, clear_cache ? "clean" : "no clean"));
                   2269:
1.1       matt     2270:        /*
                   2271:         * Enable or disable caching as necessary.
1.25      rearnsha 2272:         * Note: the first entry might be part of the kernel pmap,
                   2273:         * so we can't assume this is indicative of the state of the
                   2274:         * other (maybe non-kpmap) entries.
1.1       matt     2275:         */
1.25      rearnsha 2276:        if ((entries > 1 && writable) ||
                   2277:            (entries > 0 && pmap == kpmap && other_writable)) {
1.12      chris    2278:                if (cacheable_entries == 0)
                   2279:                    return;
1.25      rearnsha 2280:                for (npv = pv; npv; npv = npv->pv_next) {
                   2281:                        if ((pmap == npv->pv_pmap
                   2282:                            || kpmap == npv->pv_pmap) &&
1.78      thorpej  2283:                            (npv->pv_flags & PVF_NC) == 0) {
1.91      thorpej  2284:                                ptes[arm_btop(npv->pv_va)] &= ~L2_S_CACHE_MASK;
1.113     thorpej  2285:                                PTE_SYNC_CURRENT(pmap,
                   2286:                                    &ptes[arm_btop(npv->pv_va)]);
1.78      thorpej  2287:                                npv->pv_flags |= PVF_NC;
1.25      rearnsha 2288:                                /*
                   2289:                                 * If this page needs flushing from the
                   2290:                                 * cache, and we aren't going to do it
                   2291:                                 * below, do it now.
                   2292:                                 */
                   2293:                                if ((cacheable_entries < 4 &&
                   2294:                                    (clear_cache || npv->pv_pmap == kpmap)) ||
                   2295:                                    (npv->pv_pmap == kpmap &&
                   2296:                                    !clear_cache && kern_cacheable < 4)) {
1.36      thorpej  2297:                                        cpu_idcache_wbinv_range(npv->pv_va,
1.12      chris    2298:                                            NBPG);
                   2299:                                        cpu_tlb_flushID_SE(npv->pv_va);
                   2300:                                }
1.1       matt     2301:                        }
                   2302:                }
1.25      rearnsha 2303:                if ((clear_cache && cacheable_entries >= 4) ||
                   2304:                    kern_cacheable >= 4) {
1.36      thorpej  2305:                        cpu_idcache_wbinv_all();
1.12      chris    2306:                        cpu_tlb_flushID();
                   2307:                }
1.32      thorpej  2308:                cpu_cpwait();
1.1       matt     2309:        } else if (entries > 0) {
1.25      rearnsha 2310:                /*
                   2311:                 * Turn cacheing back on for some pages.  If it is a kernel
                   2312:                 * page, only do so if there are no other writable pages.
                   2313:                 */
                   2314:                for (npv = pv; npv; npv = npv->pv_next) {
                   2315:                        if ((pmap == npv->pv_pmap ||
                   2316:                            (kpmap == npv->pv_pmap && other_writable == 0)) &&
1.78      thorpej  2317:                            (npv->pv_flags & PVF_NC)) {
1.86      thorpej  2318:                                ptes[arm_btop(npv->pv_va)] |=
                   2319:                                    pte_l2_s_cache_mode;
1.113     thorpej  2320:                                PTE_SYNC_CURRENT(pmap,
                   2321:                                    &ptes[arm_btop(npv->pv_va)]);
1.78      thorpej  2322:                                npv->pv_flags &= ~PVF_NC;
1.1       matt     2323:                        }
                   2324:                }
                   2325:        }
                   2326: }
                   2327:
                   2328: /*
                   2329:  * pmap_remove()
                   2330:  *
                   2331:  * pmap_remove is responsible for nuking a number of mappings for a range
                   2332:  * of virtual address space in the current pmap. To do this efficiently
                   2333:  * is interesting, because in a number of cases a wide virtual address
                   2334:  * range may be supplied that contains few actual mappings. So, the
                   2335:  * optimisations are:
                   2336:  *  1. Try and skip over hunks of address space for which an L1 entry
                   2337:  *     does not exist.
                   2338:  *  2. Build up a list of pages we've hit, up to a maximum, so we can
                   2339:  *     maybe do just a partial cache clean. This path of execution is
                   2340:  *     complicated by the fact that the cache must be flushed _before_
                   2341:  *     the PTE is nuked, being a VAC :-)
                   2342:  *  3. Maybe later fast-case a single page, but I don't think this is
                   2343:  *     going to make _that_ much difference overall.
                   2344:  */
                   2345:
                   2346: #define PMAP_REMOVE_CLEAN_LIST_SIZE    3
                   2347:
                   2348: void
1.73      thorpej  2349: pmap_remove(struct pmap *pmap, vaddr_t sva, vaddr_t eva)
1.1       matt     2350: {
                   2351:        int cleanlist_idx = 0;
                   2352:        struct pagelist {
                   2353:                vaddr_t va;
                   2354:                pt_entry_t *pte;
                   2355:        } cleanlist[PMAP_REMOVE_CLEAN_LIST_SIZE];
1.11      chris    2356:        pt_entry_t *pte = 0, *ptes;
1.2       matt     2357:        paddr_t pa;
1.1       matt     2358:        int pmap_active;
1.49      thorpej  2359:        struct vm_page *pg;
1.1       matt     2360:
                   2361:        /* Exit quick if there is no pmap */
                   2362:        if (!pmap)
                   2363:                return;
                   2364:
1.79      thorpej  2365:        PDEBUG(0, printf("pmap_remove: pmap=%p sva=%08lx eva=%08lx\n",
                   2366:            pmap, sva, eva));
1.1       matt     2367:
1.17      chris    2368:        /*
1.49      thorpej  2369:         * we lock in the pmap => vm_page direction
1.17      chris    2370:         */
                   2371:        PMAP_MAP_TO_HEAD_LOCK();
                   2372:
1.11      chris    2373:        ptes = pmap_map_ptes(pmap);
1.1       matt     2374:        /* Get a page table pointer */
                   2375:        while (sva < eva) {
1.30      rearnsha 2376:                if (pmap_pde_page(pmap_pde(pmap, sva)))
1.1       matt     2377:                        break;
1.81      thorpej  2378:                sva = (sva & L1_S_FRAME) + L1_S_SIZE;
1.1       matt     2379:        }
1.11      chris    2380:
1.56      thorpej  2381:        pte = &ptes[arm_btop(sva)];
1.1       matt     2382:        /* Note if the pmap is active thus require cache and tlb cleans */
1.58      thorpej  2383:        pmap_active = pmap_is_curpmap(pmap);
1.1       matt     2384:
                   2385:        /* Now loop along */
                   2386:        while (sva < eva) {
                   2387:                /* Check if we can move to the next PDE (l1 chunk) */
1.113     thorpej  2388:                if ((sva & L2_ADDR_BITS) == 0) {
1.30      rearnsha 2389:                        if (!pmap_pde_page(pmap_pde(pmap, sva))) {
1.81      thorpej  2390:                                sva += L1_S_SIZE;
                   2391:                                pte += arm_btop(L1_S_SIZE);
1.1       matt     2392:                                continue;
                   2393:                        }
1.113     thorpej  2394:                }
1.1       matt     2395:
                   2396:                /* We've found a valid PTE, so this page of PTEs has to go. */
                   2397:                if (pmap_pte_v(pte)) {
                   2398:                        /* Update statistics */
                   2399:                        --pmap->pm_stats.resident_count;
                   2400:
                   2401:                        /*
                   2402:                         * Add this page to our cache remove list, if we can.
                   2403:                         * If, however the cache remove list is totally full,
                   2404:                         * then do a complete cache invalidation taking note
                   2405:                         * to backtrack the PTE table beforehand, and ignore
                   2406:                         * the lists in future because there's no longer any
                   2407:                         * point in bothering with them (we've paid the
                   2408:                         * penalty, so will carry on unhindered). Otherwise,
                   2409:                         * when we fall out, we just clean the list.
                   2410:                         */
                   2411:                        PDEBUG(10, printf("remove: inv pte at %p(%x) ", pte, *pte));
                   2412:                        pa = pmap_pte_pa(pte);
                   2413:
                   2414:                        if (cleanlist_idx < PMAP_REMOVE_CLEAN_LIST_SIZE) {
                   2415:                                /* Add to the clean list. */
                   2416:                                cleanlist[cleanlist_idx].pte = pte;
                   2417:                                cleanlist[cleanlist_idx].va = sva;
                   2418:                                cleanlist_idx++;
                   2419:                        } else if (cleanlist_idx == PMAP_REMOVE_CLEAN_LIST_SIZE) {
                   2420:                                int cnt;
                   2421:
                   2422:                                /* Nuke everything if needed. */
                   2423:                                if (pmap_active) {
1.36      thorpej  2424:                                        cpu_idcache_wbinv_all();
1.1       matt     2425:                                        cpu_tlb_flushID();
                   2426:                                }
                   2427:
                   2428:                                /*
                   2429:                                 * Roll back the previous PTE list,
                   2430:                                 * and zero out the current PTE.
                   2431:                                 */
1.113     thorpej  2432:                                for (cnt = 0;
                   2433:                                     cnt < PMAP_REMOVE_CLEAN_LIST_SIZE;
                   2434:                                     cnt++) {
1.1       matt     2435:                                        *cleanlist[cnt].pte = 0;
1.113     thorpej  2436:                                        if (pmap_active)
                   2437:                                                PTE_SYNC(cleanlist[cnt].pte);
                   2438:                                        else
                   2439:                                                PTE_FLUSH(cleanlist[cnt].pte);
                   2440:                                        pmap_pte_delref(pmap,
                   2441:                                            cleanlist[cnt].va);
1.1       matt     2442:                                }
                   2443:                                *pte = 0;
1.113     thorpej  2444:                                if (pmap_active)
                   2445:                                        PTE_SYNC(pte);
                   2446:                                else
                   2447:                                        PTE_FLUSH(pte);
1.1       matt     2448:                                pmap_pte_delref(pmap, sva);
                   2449:                                cleanlist_idx++;
                   2450:                        } else {
                   2451:                                /*
                   2452:                                 * We've already nuked the cache and
                   2453:                                 * TLB, so just carry on regardless,
                   2454:                                 * and we won't need to do it again
                   2455:                                 */
                   2456:                                *pte = 0;
1.113     thorpej  2457:                                if (pmap_active)
                   2458:                                        PTE_SYNC(pte);
                   2459:                                else
                   2460:                                        PTE_FLUSH(pte);
1.1       matt     2461:                                pmap_pte_delref(pmap, sva);
                   2462:                        }
                   2463:
                   2464:                        /*
                   2465:                         * Update flags. In a number of circumstances,
                   2466:                         * we could cluster a lot of these and do a
                   2467:                         * number of sequential pages in one go.
                   2468:                         */
1.49      thorpej  2469:                        if ((pg = PHYS_TO_VM_PAGE(pa)) != NULL) {
1.17      chris    2470:                                struct pv_entry *pve;
1.49      thorpej  2471:                                simple_lock(&pg->mdpage.pvh_slock);
                   2472:                                pve = pmap_remove_pv(pg, pmap, sva);
1.17      chris    2473:                                pmap_free_pv(pmap, pve);
1.49      thorpej  2474:                                pmap_vac_me_harder(pmap, pg, ptes, FALSE);
                   2475:                                simple_unlock(&pg->mdpage.pvh_slock);
1.1       matt     2476:                        }
1.113     thorpej  2477:                } else if (pmap_active == 0)
                   2478:                        PTE_FLUSH(pte);
1.1       matt     2479:                sva += NBPG;
                   2480:                pte++;
                   2481:        }
                   2482:
                   2483:        /*
                   2484:         * Now, if we've fallen through down to here, chances are that there
                   2485:         * are less than PMAP_REMOVE_CLEAN_LIST_SIZE mappings left.
                   2486:         */
                   2487:        if (cleanlist_idx <= PMAP_REMOVE_CLEAN_LIST_SIZE) {
                   2488:                u_int cnt;
                   2489:
                   2490:                for (cnt = 0; cnt < cleanlist_idx; cnt++) {
                   2491:                        if (pmap_active) {
1.36      thorpej  2492:                                cpu_idcache_wbinv_range(cleanlist[cnt].va,
                   2493:                                    NBPG);
1.1       matt     2494:                                *cleanlist[cnt].pte = 0;
                   2495:                                cpu_tlb_flushID_SE(cleanlist[cnt].va);
1.113     thorpej  2496:                                PTE_SYNC(cleanlist[cnt].pte);
                   2497:                        } else {
1.1       matt     2498:                                *cleanlist[cnt].pte = 0;
1.113     thorpej  2499:                                PTE_FLUSH(cleanlist[cnt].pte);
                   2500:                        }
1.1       matt     2501:                        pmap_pte_delref(pmap, cleanlist[cnt].va);
                   2502:                }
                   2503:        }
1.104     thorpej  2504:
                   2505:        pmap_unmap_ptes(pmap);
                   2506:
1.17      chris    2507:        PMAP_MAP_TO_HEAD_UNLOCK();
1.1       matt     2508: }
                   2509:
                   2510: /*
                   2511:  * Routine:    pmap_remove_all
                   2512:  * Function:
                   2513:  *             Removes this physical page from
                   2514:  *             all physical maps in which it resides.
                   2515:  *             Reflects back modify bits to the pager.
                   2516:  */
                   2517:
1.33      chris    2518: static void
1.73      thorpej  2519: pmap_remove_all(struct vm_page *pg)
1.1       matt     2520: {
1.17      chris    2521:        struct pv_entry *pv, *npv;
1.15      chris    2522:        struct pmap *pmap;
1.11      chris    2523:        pt_entry_t *pte, *ptes;
1.1       matt     2524:
1.49      thorpej  2525:        PDEBUG(0, printf("pmap_remove_all: pa=%lx ", VM_PAGE_TO_PHYS(pg)));
1.1       matt     2526:
1.49      thorpej  2527:        /* set vm_page => pmap locking */
1.17      chris    2528:        PMAP_HEAD_TO_MAP_LOCK();
1.1       matt     2529:
1.49      thorpej  2530:        simple_lock(&pg->mdpage.pvh_slock);
1.17      chris    2531:
1.49      thorpej  2532:        pv = pg->mdpage.pvh_list;
                   2533:        if (pv == NULL) {
                   2534:                PDEBUG(0, printf("free page\n"));
                   2535:                simple_unlock(&pg->mdpage.pvh_slock);
                   2536:                PMAP_HEAD_TO_MAP_UNLOCK();
                   2537:                return;
1.1       matt     2538:        }
1.17      chris    2539:        pmap_clean_page(pv, FALSE);
1.1       matt     2540:
                   2541:        while (pv) {
                   2542:                pmap = pv->pv_pmap;
1.11      chris    2543:                ptes = pmap_map_ptes(pmap);
1.56      thorpej  2544:                pte = &ptes[arm_btop(pv->pv_va)];
1.1       matt     2545:
                   2546:                PDEBUG(0, printf("[%p,%08x,%08lx,%08x] ", pmap, *pte,
                   2547:                    pv->pv_va, pv->pv_flags));
                   2548: #ifdef DEBUG
1.79      thorpej  2549:                if (pmap_pde_page(pmap_pde(pmap, pv->pv_va)) == 0 ||
                   2550:                    pmap_pte_v(pte) == 0 ||
                   2551:                    pmap_pte_pa(pte) != VM_PAGE_TO_PHYS(pg))
1.1       matt     2552:                        panic("pmap_remove_all: bad mapping");
                   2553: #endif /* DEBUG */
                   2554:
                   2555:                /*
                   2556:                 * Update statistics
                   2557:                 */
                   2558:                --pmap->pm_stats.resident_count;
                   2559:
                   2560:                /* Wired bit */
1.78      thorpej  2561:                if (pv->pv_flags & PVF_WIRED)
1.1       matt     2562:                        --pmap->pm_stats.wired_count;
                   2563:
                   2564:                /*
                   2565:                 * Invalidate the PTEs.
                   2566:                 * XXX: should cluster them up and invalidate as many
                   2567:                 * as possible at once.
                   2568:                 */
                   2569:
                   2570: #ifdef needednotdone
                   2571: reduce wiring count on page table pages as references drop
                   2572: #endif
                   2573:
                   2574:                *pte = 0;
1.113     thorpej  2575:                PTE_SYNC_CURRENT(pmap, pte);
1.1       matt     2576:                pmap_pte_delref(pmap, pv->pv_va);
                   2577:
                   2578:                npv = pv->pv_next;
1.17      chris    2579:                pmap_free_pv(pmap, pv);
1.1       matt     2580:                pv = npv;
1.11      chris    2581:                pmap_unmap_ptes(pmap);
1.1       matt     2582:        }
1.49      thorpej  2583:        pg->mdpage.pvh_list = NULL;
                   2584:        simple_unlock(&pg->mdpage.pvh_slock);
1.17      chris    2585:        PMAP_HEAD_TO_MAP_UNLOCK();
1.1       matt     2586:
                   2587:        PDEBUG(0, printf("done\n"));
                   2588:        cpu_tlb_flushID();
1.32      thorpej  2589:        cpu_cpwait();
1.1       matt     2590: }
                   2591:
                   2592:
                   2593: /*
                   2594:  * Set the physical protection on the specified range of this map as requested.
                   2595:  */
                   2596:
                   2597: void
1.73      thorpej  2598: pmap_protect(struct pmap *pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
1.1       matt     2599: {
1.11      chris    2600:        pt_entry_t *pte = NULL, *ptes;
1.49      thorpej  2601:        struct vm_page *pg;
1.1       matt     2602:        int flush = 0;
                   2603:
                   2604:        PDEBUG(0, printf("pmap_protect: pmap=%p %08lx->%08lx %x\n",
                   2605:            pmap, sva, eva, prot));
                   2606:
                   2607:        if (~prot & VM_PROT_READ) {
1.107     thorpej  2608:                /*
                   2609:                 * Just remove the mappings.  pmap_update() is not required
                   2610:                 * here since the caller should do it.
                   2611:                 */
1.1       matt     2612:                pmap_remove(pmap, sva, eva);
                   2613:                return;
                   2614:        }
                   2615:        if (prot & VM_PROT_WRITE) {
                   2616:                /*
                   2617:                 * If this is a read->write transition, just ignore it and let
                   2618:                 * uvm_fault() take care of it later.
                   2619:                 */
                   2620:                return;
                   2621:        }
                   2622:
1.17      chris    2623:        /* Need to lock map->head */
                   2624:        PMAP_MAP_TO_HEAD_LOCK();
                   2625:
1.11      chris    2626:        ptes = pmap_map_ptes(pmap);
1.96      thorpej  2627:
                   2628:        /*
                   2629:         * OK, at this point, we know we're doing write-protect operation.
                   2630:         * If the pmap is active, write-back the range.
                   2631:         */
                   2632:        if (pmap_is_curpmap(pmap))
                   2633:                cpu_dcache_wb_range(sva, eva - sva);
                   2634:
1.1       matt     2635:        /*
                   2636:         * We need to acquire a pointer to a page table page before entering
                   2637:         * the following loop.
                   2638:         */
                   2639:        while (sva < eva) {
1.30      rearnsha 2640:                if (pmap_pde_page(pmap_pde(pmap, sva)))
1.1       matt     2641:                        break;
1.81      thorpej  2642:                sva = (sva & L1_S_FRAME) + L1_S_SIZE;
1.1       matt     2643:        }
1.11      chris    2644:
1.56      thorpej  2645:        pte = &ptes[arm_btop(sva)];
1.17      chris    2646:
1.1       matt     2647:        while (sva < eva) {
                   2648:                /* only check once in a while */
1.81      thorpej  2649:                if ((sva & L2_ADDR_BITS) == 0) {
1.30      rearnsha 2650:                        if (!pmap_pde_page(pmap_pde(pmap, sva))) {
1.1       matt     2651:                                /* We can race ahead here, to the next pde. */
1.81      thorpej  2652:                                sva += L1_S_SIZE;
                   2653:                                pte += arm_btop(L1_S_SIZE);
1.1       matt     2654:                                continue;
                   2655:                        }
                   2656:                }
                   2657:
1.113     thorpej  2658:                if (!pmap_pte_v(pte)) {
                   2659:                        PTE_FLUSH_ALT(pmap, pte);
1.1       matt     2660:                        goto next;
1.113     thorpej  2661:                }
1.1       matt     2662:
                   2663:                flush = 1;
                   2664:
1.113     thorpej  2665:                pg = PHYS_TO_VM_PAGE(pmap_pte_pa(pte));
                   2666:
1.107     thorpej  2667:                *pte &= ~L2_S_PROT_W;           /* clear write bit */
1.113     thorpej  2668:                PTE_SYNC_CURRENT(pmap, pte);    /* XXXJRT optimize */
1.1       matt     2669:
                   2670:                /* Clear write flag */
1.113     thorpej  2671:                if (pg != NULL) {
1.49      thorpej  2672:                        simple_lock(&pg->mdpage.pvh_slock);
1.78      thorpej  2673:                        (void) pmap_modify_pv(pmap, sva, pg, PVF_WRITE, 0);
1.49      thorpej  2674:                        pmap_vac_me_harder(pmap, pg, ptes, FALSE);
                   2675:                        simple_unlock(&pg->mdpage.pvh_slock);
1.1       matt     2676:                }
                   2677:
1.107     thorpej  2678:  next:
1.1       matt     2679:                sva += NBPG;
                   2680:                pte++;
                   2681:        }
1.11      chris    2682:        pmap_unmap_ptes(pmap);
1.17      chris    2683:        PMAP_MAP_TO_HEAD_UNLOCK();
1.1       matt     2684:        if (flush)
                   2685:                cpu_tlb_flushID();
                   2686: }
                   2687:
                   2688: /*
1.15      chris    2689:  * void pmap_enter(struct pmap *pmap, vaddr_t va, paddr_t pa, vm_prot_t prot,
1.1       matt     2690:  * int flags)
                   2691:  *
                   2692:  *      Insert the given physical page (p) at
                   2693:  *      the specified virtual address (v) in the
                   2694:  *      target physical map with the protection requested.
                   2695:  *
                   2696:  *      If specified, the page will be wired down, meaning
                   2697:  *      that the related pte can not be reclaimed.
                   2698:  *
                   2699:  *      NB:  This is the only routine which MAY NOT lazy-evaluate
                   2700:  *      or lose information.  That is, this routine must actually
                   2701:  *      insert this page into the given map NOW.
                   2702:  */
                   2703:
                   2704: int
1.73      thorpej  2705: pmap_enter(struct pmap *pmap, vaddr_t va, paddr_t pa, vm_prot_t prot,
                   2706:     int flags)
1.1       matt     2707: {
1.66      thorpej  2708:        pt_entry_t *ptes, opte, npte;
1.2       matt     2709:        paddr_t opa;
1.1       matt     2710:        boolean_t wired = (flags & PMAP_WIRED) != 0;
1.49      thorpej  2711:        struct vm_page *pg;
1.17      chris    2712:        struct pv_entry *pve;
1.66      thorpej  2713:        int error, nflags;
1.1       matt     2714:
                   2715:        PDEBUG(5, printf("pmap_enter: V%08lx P%08lx in pmap %p prot=%08x, wired = %d\n",
                   2716:            va, pa, pmap, prot, wired));
                   2717:
                   2718: #ifdef DIAGNOSTIC
                   2719:        /* Valid address ? */
1.48      chris    2720:        if (va >= (pmap_curmaxkvaddr))
1.1       matt     2721:                panic("pmap_enter: too big");
                   2722:        if (pmap != pmap_kernel() && va != 0) {
                   2723:                if (va < VM_MIN_ADDRESS || va >= VM_MAXUSER_ADDRESS)
                   2724:                        panic("pmap_enter: kernel page in user map");
                   2725:        } else {
                   2726:                if (va >= VM_MIN_ADDRESS && va < VM_MAXUSER_ADDRESS)
                   2727:                        panic("pmap_enter: user page in kernel map");
                   2728:                if (va >= VM_MAXUSER_ADDRESS && va < VM_MAX_ADDRESS)
                   2729:                        panic("pmap_enter: entering PT page");
                   2730:        }
                   2731: #endif
1.79      thorpej  2732:
                   2733:        KDASSERT(((va | pa) & PGOFSET) == 0);
                   2734:
1.49      thorpej  2735:        /*
                   2736:         * Get a pointer to the page.  Later on in this function, we
                   2737:         * test for a managed page by checking pg != NULL.
                   2738:         */
1.55      thorpej  2739:        pg = pmap_initialized ? PHYS_TO_VM_PAGE(pa) : NULL;
1.49      thorpej  2740:
1.17      chris    2741:        /* get lock */
                   2742:        PMAP_MAP_TO_HEAD_LOCK();
1.66      thorpej  2743:
1.1       matt     2744:        /*
1.66      thorpej  2745:         * map the ptes.  If there's not already an L2 table for this
                   2746:         * address, allocate one.
1.1       matt     2747:         */
1.66      thorpej  2748:        ptes = pmap_map_ptes(pmap);             /* locks pmap */
                   2749:        if (pmap_pde_v(pmap_pde(pmap, va)) == 0) {
1.17      chris    2750:                struct vm_page *ptp;
1.57      thorpej  2751:
                   2752:                /* kernel should be pre-grown */
                   2753:                KASSERT(pmap != pmap_kernel());
1.17      chris    2754:
                   2755:                /* if failure is allowed then don't try too hard */
1.114     thorpej  2756:                ptp = pmap_get_ptp(pmap, va & PD_FRAME);
1.17      chris    2757:                if (ptp == NULL) {
                   2758:                        if (flags & PMAP_CANFAIL) {
                   2759:                                error = ENOMEM;
                   2760:                                goto out;
                   2761:                        }
                   2762:                        panic("pmap_enter: get ptp failed");
1.1       matt     2763:                }
                   2764:        }
1.66      thorpej  2765:        opte = ptes[arm_btop(va)];
1.1       matt     2766:
                   2767:        nflags = 0;
                   2768:        if (prot & VM_PROT_WRITE)
1.78      thorpej  2769:                nflags |= PVF_WRITE;
1.1       matt     2770:        if (wired)
1.78      thorpej  2771:                nflags |= PVF_WIRED;
1.1       matt     2772:
                   2773:        /* Is the pte valid ? If so then this page is already mapped */
1.66      thorpej  2774:        if (l2pte_valid(opte)) {
1.1       matt     2775:                /* Get the physical address of the current page mapped */
1.66      thorpej  2776:                opa = l2pte_pa(opte);
1.1       matt     2777:
                   2778:                /* Are we mapping the same page ? */
                   2779:                if (opa == pa) {
1.104     thorpej  2780:                        /* Check to see if we're doing rw->ro. */
                   2781:                        if ((opte & L2_S_PROT_W) != 0 &&
                   2782:                            (prot & VM_PROT_WRITE) == 0) {
                   2783:                                /* Yup, flush the cache if current pmap. */
                   2784:                                if (pmap_is_curpmap(pmap))
                   2785:                                        cpu_dcache_wb_range(va, NBPG);
                   2786:                        }
                   2787:
1.1       matt     2788:                        /* Has the wiring changed ? */
1.49      thorpej  2789:                        if (pg != NULL) {
                   2790:                                simple_lock(&pg->mdpage.pvh_slock);
                   2791:                                (void) pmap_modify_pv(pmap, va, pg,
1.78      thorpej  2792:                                    PVF_WRITE | PVF_WIRED, nflags);
1.49      thorpej  2793:                                simple_unlock(&pg->mdpage.pvh_slock);
                   2794:                        }
1.1       matt     2795:                } else {
1.49      thorpej  2796:                        struct vm_page *opg;
                   2797:
1.1       matt     2798:                        /* We are replacing the page with a new one. */
1.36      thorpej  2799:                        cpu_idcache_wbinv_range(va, NBPG);
1.1       matt     2800:
                   2801:                        /*
                   2802:                         * If it is part of our managed memory then we
                   2803:                         * must remove it from the PV list
                   2804:                         */
1.49      thorpej  2805:                        if ((opg = PHYS_TO_VM_PAGE(opa)) != NULL) {
                   2806:                                simple_lock(&opg->mdpage.pvh_slock);
                   2807:                                pve = pmap_remove_pv(opg, pmap, va);
                   2808:                                simple_unlock(&opg->mdpage.pvh_slock);
1.17      chris    2809:                        } else {
                   2810:                                pve = NULL;
1.1       matt     2811:                        }
                   2812:
                   2813:                        goto enter;
                   2814:                }
                   2815:        } else {
                   2816:                opa = 0;
1.17      chris    2817:                pve = NULL;
1.1       matt     2818:                pmap_pte_addref(pmap, va);
                   2819:
                   2820:                /* pte is not valid so we must be hooking in a new page */
                   2821:                ++pmap->pm_stats.resident_count;
                   2822:
                   2823:        enter:
                   2824:                /*
                   2825:                 * Enter on the PV list if part of our managed memory
                   2826:                 */
1.55      thorpej  2827:                if (pg != NULL) {
1.17      chris    2828:                        if (pve == NULL) {
                   2829:                                pve = pmap_alloc_pv(pmap, ALLOCPV_NEED);
                   2830:                                if (pve == NULL) {
                   2831:                                        if (flags & PMAP_CANFAIL) {
1.113     thorpej  2832:                                                PTE_FLUSH_ALT(pmap,
                   2833:                                                    ptes[arm_btop(va)]);
1.17      chris    2834:                                                error = ENOMEM;
                   2835:                                                goto out;
                   2836:                                        }
1.66      thorpej  2837:                                        panic("pmap_enter: no pv entries "
                   2838:                                            "available");
1.17      chris    2839:                                }
                   2840:                        }
                   2841:                        /* enter_pv locks pvh when adding */
1.49      thorpej  2842:                        pmap_enter_pv(pg, pve, pmap, va, NULL, nflags);
1.17      chris    2843:                } else {
                   2844:                        if (pve != NULL)
                   2845:                                pmap_free_pv(pmap, pve);
1.1       matt     2846:                }
                   2847:        }
                   2848:
                   2849:        /* Construct the pte, giving the correct access. */
1.79      thorpej  2850:        npte = pa;
1.1       matt     2851:
                   2852:        /* VA 0 is magic. */
1.77      thorpej  2853:        if (pmap != pmap_kernel() && va != vector_page)
1.83      thorpej  2854:                npte |= L2_S_PROT_U;
1.1       matt     2855:
1.55      thorpej  2856:        if (pg != NULL) {
1.1       matt     2857: #ifdef DIAGNOSTIC
                   2858:                if ((flags & VM_PROT_ALL) & ~prot)
                   2859:                        panic("pmap_enter: access_type exceeds prot");
                   2860: #endif
1.86      thorpej  2861:                npte |= pte_l2_s_cache_mode;
1.1       matt     2862:                if (flags & VM_PROT_WRITE) {
1.84      thorpej  2863:                        npte |= L2_S_PROTO | L2_S_PROT_W;
1.78      thorpej  2864:                        pg->mdpage.pvh_attrs |= PVF_REF | PVF_MOD;
1.1       matt     2865:                } else if (flags & VM_PROT_ALL) {
1.84      thorpej  2866:                        npte |= L2_S_PROTO;
1.78      thorpej  2867:                        pg->mdpage.pvh_attrs |= PVF_REF;
1.1       matt     2868:                } else
1.81      thorpej  2869:                        npte |= L2_TYPE_INV;
1.1       matt     2870:        } else {
                   2871:                if (prot & VM_PROT_WRITE)
1.84      thorpej  2872:                        npte |= L2_S_PROTO | L2_S_PROT_W;
1.1       matt     2873:                else if (prot & VM_PROT_ALL)
1.84      thorpej  2874:                        npte |= L2_S_PROTO;
1.1       matt     2875:                else
1.81      thorpej  2876:                        npte |= L2_TYPE_INV;
1.1       matt     2877:        }
                   2878:
1.109     thorpej  2879: #if ARM_MMU_XSCALE == 1 && defined(XSCALE_CACHE_READ_WRITE_ALLOCATE)
                   2880: #if ARM_NMMUS > 1
                   2881: # error "XXX Unable to use read/write-allocate and configure non-XScale"
                   2882: #endif
                   2883:        /*
                   2884:         * XXX BRUTAL HACK!  This allows us to limp along with
                   2885:         * XXX the read/write-allocate cache mode.
                   2886:         */
                   2887:        if (pmap == pmap_kernel())
                   2888:                npte &= ~L2_XSCALE_T_TEX(TEX_XSCALE_X);
                   2889: #endif
1.66      thorpej  2890:        ptes[arm_btop(va)] = npte;
1.113     thorpej  2891:        PTE_SYNC_CURRENT(pmap, &ptes[arm_btop(va)]);
1.1       matt     2892:
1.55      thorpej  2893:        if (pg != NULL) {
1.49      thorpej  2894:                simple_lock(&pg->mdpage.pvh_slock);
1.59      thorpej  2895:                pmap_vac_me_harder(pmap, pg, ptes, pmap_is_curpmap(pmap));
1.49      thorpej  2896:                simple_unlock(&pg->mdpage.pvh_slock);
1.11      chris    2897:        }
1.1       matt     2898:
                   2899:        /* Better flush the TLB ... */
                   2900:        cpu_tlb_flushID_SE(va);
1.17      chris    2901:        error = 0;
                   2902: out:
1.66      thorpej  2903:        pmap_unmap_ptes(pmap);                  /* unlocks pmap */
1.17      chris    2904:        PMAP_MAP_TO_HEAD_UNLOCK();
1.1       matt     2905:
1.17      chris    2906:        return error;
1.1       matt     2907: }
                   2908:
1.48      chris    2909: /*
                   2910:  * pmap_kenter_pa: enter a kernel mapping
                   2911:  *
                   2912:  * => no need to lock anything assume va is already allocated
                   2913:  * => should be faster than normal pmap enter function
                   2914:  */
1.1       matt     2915: void
1.73      thorpej  2916: pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot)
1.1       matt     2917: {
1.13      chris    2918:        pt_entry_t *pte;
1.105     thorpej  2919:
1.13      chris    2920:        pte = vtopte(va);
1.14      chs      2921:        KASSERT(!pmap_pte_v(pte));
1.83      thorpej  2922:
1.105     thorpej  2923: #ifdef PMAP_ALIAS_DEBUG
                   2924:     {
                   2925:        struct vm_page *pg;
                   2926:        int s;
                   2927:
                   2928:        pg = PHYS_TO_VM_PAGE(pa);
                   2929:        if (pg != NULL) {
                   2930:                s = splhigh();
                   2931:                if (pg->mdpage.ro_mappings == 0 &&
                   2932:                    pg->mdpage.rw_mappings == 0 &&
                   2933:                    pg->mdpage.kro_mappings == 0 &&
                   2934:                    pg->mdpage.krw_mappings == 0) {
                   2935:                        /* This case is okay. */
                   2936:                } else if (pg->mdpage.rw_mappings == 0 &&
                   2937:                           pg->mdpage.krw_mappings == 0 &&
                   2938:                           (prot & VM_PROT_WRITE) == 0) {
                   2939:                        /* This case is okay. */
                   2940:                } else {
                   2941:                        /* Something is awry. */
                   2942:                        printf("pmap_kenter_pa: ro %u, rw %u, kro %u, krw %u "
                   2943:                            "prot 0x%x\n", pg->mdpage.ro_mappings,
                   2944:                            pg->mdpage.rw_mappings, pg->mdpage.kro_mappings,
                   2945:                            pg->mdpage.krw_mappings, prot);
                   2946:                        Debugger();
                   2947:                }
                   2948:                if (prot & VM_PROT_WRITE)
                   2949:                        pg->mdpage.krw_mappings++;
                   2950:                else
                   2951:                        pg->mdpage.kro_mappings++;
                   2952:                splx(s);
                   2953:        }
                   2954:     }
                   2955: #endif /* PMAP_ALIAS_DEBUG */
                   2956:
1.83      thorpej  2957:        *pte = L2_S_PROTO | pa |
1.90      thorpej  2958:            L2_S_PROT(PTE_KERNEL, prot) | pte_l2_s_cache_mode;
1.112     thorpej  2959:        PTE_SYNC(pte);
1.1       matt     2960: }
                   2961:
                   2962: void
1.73      thorpej  2963: pmap_kremove(vaddr_t va, vsize_t len)
1.1       matt     2964: {
1.14      chs      2965:        pt_entry_t *pte;
1.112     thorpej  2966:        vaddr_t ova = va;
                   2967:        vaddr_t olen = len;
1.14      chs      2968:
1.1       matt     2969:        for (len >>= PAGE_SHIFT; len > 0; len--, va += PAGE_SIZE) {
1.13      chris    2970:
1.14      chs      2971:                /*
                   2972:                 * We assume that we will only be called with small
                   2973:                 * regions of memory.
                   2974:                 */
                   2975:
1.30      rearnsha 2976:                KASSERT(pmap_pde_page(pmap_pde(pmap_kernel(), va)));
1.13      chris    2977:                pte = vtopte(va);
1.105     thorpej  2978: #ifdef PMAP_ALIAS_DEBUG
                   2979:     {
                   2980:                struct vm_page *pg;
                   2981:                int s;
                   2982:
                   2983:                if ((*pte & L2_TYPE_MASK) != L2_TYPE_INV &&
                   2984:                    (pg = PHYS_TO_VM_PAGE(*pte & L2_S_FRAME)) != NULL) {
                   2985:                        s = splhigh();
                   2986:                        if (*pte & L2_S_PROT_W) {
                   2987:                                KASSERT(pg->mdpage.krw_mappings != 0);
                   2988:                                pg->mdpage.krw_mappings--;
                   2989:                        } else {
                   2990:                                KASSERT(pg->mdpage.kro_mappings != 0);
                   2991:                                pg->mdpage.kro_mappings--;
                   2992:                        }
                   2993:                        splx(s);
                   2994:                }
                   2995:     }
                   2996: #endif /* PMAP_ALIAS_DEBUG */
1.36      thorpej  2997:                cpu_idcache_wbinv_range(va, PAGE_SIZE);
1.13      chris    2998:                *pte = 0;
                   2999:                cpu_tlb_flushID_SE(va);
1.1       matt     3000:        }
1.112     thorpej  3001:        PTE_SYNC_RANGE(vtopte(ova), olen >> PAGE_SHIFT);
1.1       matt     3002: }
                   3003:
                   3004: /*
                   3005:  * pmap_page_protect:
                   3006:  *
                   3007:  * Lower the permission for all mappings to a given page.
                   3008:  */
                   3009:
                   3010: void
1.73      thorpej  3011: pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
1.1       matt     3012: {
                   3013:
1.49      thorpej  3014:        PDEBUG(0, printf("pmap_page_protect(pa=%lx, prot=%d)\n",
                   3015:            VM_PAGE_TO_PHYS(pg), prot));
1.1       matt     3016:
                   3017:        switch(prot) {
1.17      chris    3018:        case VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE:
                   3019:        case VM_PROT_READ|VM_PROT_WRITE:
                   3020:                return;
                   3021:
1.1       matt     3022:        case VM_PROT_READ:
                   3023:        case VM_PROT_READ|VM_PROT_EXECUTE:
1.78      thorpej  3024:                pmap_clearbit(pg, PVF_WRITE);
1.1       matt     3025:                break;
                   3026:
                   3027:        default:
1.49      thorpej  3028:                pmap_remove_all(pg);
1.1       matt     3029:                break;
                   3030:        }
                   3031: }
                   3032:
                   3033:
                   3034: /*
                   3035:  * Routine:    pmap_unwire
                   3036:  * Function:   Clear the wired attribute for a map/virtual-address
                   3037:  *             pair.
                   3038:  * In/out conditions:
                   3039:  *             The mapping must already exist in the pmap.
                   3040:  */
                   3041:
                   3042: void
1.73      thorpej  3043: pmap_unwire(struct pmap *pmap, vaddr_t va)
1.1       matt     3044: {
1.60      thorpej  3045:        pt_entry_t *ptes;
                   3046:        struct vm_page *pg;
1.2       matt     3047:        paddr_t pa;
1.1       matt     3048:
1.60      thorpej  3049:        PMAP_MAP_TO_HEAD_LOCK();
                   3050:        ptes = pmap_map_ptes(pmap);             /* locks pmap */
1.1       matt     3051:
1.60      thorpej  3052:        if (pmap_pde_v(pmap_pde(pmap, va))) {
                   3053: #ifdef DIAGNOSTIC
                   3054:                if (l2pte_valid(ptes[arm_btop(va)]) == 0)
                   3055:                        panic("pmap_unwire: invalid L2 PTE");
                   3056: #endif
                   3057:                /* Extract the physical address of the page */
                   3058:                pa = l2pte_pa(ptes[arm_btop(va)]);
1.113     thorpej  3059:                PTE_FLUSH_ALT(pmap, &ptes[arm_btop(va)]);
1.1       matt     3060:
1.60      thorpej  3061:                if ((pg = PHYS_TO_VM_PAGE(pa)) == NULL)
                   3062:                        goto out;
1.1       matt     3063:
1.60      thorpej  3064:                /* Update the wired bit in the pv entry for this page. */
                   3065:                simple_lock(&pg->mdpage.pvh_slock);
1.78      thorpej  3066:                (void) pmap_modify_pv(pmap, va, pg, PVF_WIRED, 0);
1.60      thorpej  3067:                simple_unlock(&pg->mdpage.pvh_slock);
                   3068:        }
                   3069: #ifdef DIAGNOSTIC
                   3070:        else {
                   3071:                panic("pmap_unwire: invalid L1 PTE");
                   3072:        }
                   3073: #endif
                   3074:  out:
                   3075:        pmap_unmap_ptes(pmap);                  /* unlocks pmap */
                   3076:        PMAP_MAP_TO_HEAD_UNLOCK();
1.1       matt     3077: }
                   3078:
                   3079: /*
                   3080:  * Routine:  pmap_extract
                   3081:  * Function:
                   3082:  *           Extract the physical page address associated
                   3083:  *           with the given map/virtual_address pair.
                   3084:  */
                   3085: boolean_t
1.73      thorpej  3086: pmap_extract(struct pmap *pmap, vaddr_t va, paddr_t *pap)
1.1       matt     3087: {
1.34      thorpej  3088:        pd_entry_t *pde;
1.11      chris    3089:        pt_entry_t *pte, *ptes;
1.1       matt     3090:        paddr_t pa;
                   3091:
1.82      thorpej  3092:        PDEBUG(5, printf("pmap_extract: pmap=%p, va=0x%08lx -> ", pmap, va));
                   3093:
                   3094:        ptes = pmap_map_ptes(pmap);             /* locks pmap */
1.1       matt     3095:
1.34      thorpej  3096:        pde = pmap_pde(pmap, va);
1.56      thorpej  3097:        pte = &ptes[arm_btop(va)];
1.1       matt     3098:
1.82      thorpej  3099:        if (pmap_pde_section(pde)) {
                   3100:                pa = (*pde & L1_S_FRAME) | (va & L1_S_OFFSET);
                   3101:                PDEBUG(5, printf("section pa=0x%08lx\n", pa));
                   3102:                goto out;
                   3103:        } else if (pmap_pde_page(pde) == 0 || pmap_pte_v(pte) == 0) {
                   3104:                PDEBUG(5, printf("no mapping\n"));
                   3105:                goto failed;
                   3106:        }
1.75      reinoud  3107:
1.82      thorpej  3108:        if ((*pte & L2_TYPE_MASK) == L2_TYPE_L) {
                   3109:                pa = (*pte & L2_L_FRAME) | (va & L2_L_OFFSET);
                   3110:                PDEBUG(5, printf("large page pa=0x%08lx\n", pa));
                   3111:                goto out;
                   3112:        }
1.1       matt     3113:
1.82      thorpej  3114:        pa = (*pte & L2_S_FRAME) | (va & L2_S_OFFSET);
                   3115:        PDEBUG(5, printf("small page pa=0x%08lx\n", pa));
1.1       matt     3116:
1.82      thorpej  3117:  out:
                   3118:        if (pap != NULL)
                   3119:                *pap = pa;
1.1       matt     3120:
1.113     thorpej  3121:        PTE_FLUSH_ALT(pmap, &ptes[arm_btop(va)]);
1.82      thorpej  3122:        pmap_unmap_ptes(pmap);                  /* unlocks pmap */
                   3123:        return (TRUE);
1.34      thorpej  3124:
1.82      thorpej  3125:  failed:
1.113     thorpej  3126:        PTE_FLUSH_ALT(pmap, &ptes[arm_btop(va)]);
1.82      thorpej  3127:        pmap_unmap_ptes(pmap);                  /* unlocks pmap */
                   3128:        return (FALSE);
1.1       matt     3129: }
                   3130:
                   3131:
                   3132: /*
1.73      thorpej  3133:  * pmap_copy:
1.1       matt     3134:  *
1.73      thorpej  3135:  *     Copy the range specified by src_addr/len from the source map to the
                   3136:  *     range dst_addr/len in the destination map.
                   3137:  *
                   3138:  *     This routine is only advisory and need not do anything.
1.1       matt     3139:  */
1.73      thorpej  3140: /* Call deleted in <arm/arm32/pmap.h> */
1.1       matt     3141:
                   3142: #if defined(PMAP_DEBUG)
                   3143: void
                   3144: pmap_dump_pvlist(phys, m)
                   3145:        vaddr_t phys;
                   3146:        char *m;
                   3147: {
1.49      thorpej  3148:        struct vm_page *pg;
1.1       matt     3149:        struct pv_entry *pv;
                   3150:
1.49      thorpej  3151:        if ((pg = PHYS_TO_VM_PAGE(phys)) == NULL) {
1.1       matt     3152:                printf("INVALID PA\n");
                   3153:                return;
                   3154:        }
1.49      thorpej  3155:        simple_lock(&pg->mdpage.pvh_slock);
1.1       matt     3156:        printf("%s %08lx:", m, phys);
1.49      thorpej  3157:        if (pg->mdpage.pvh_list == NULL) {
1.97      chris    3158:                simple_unlock(&pg->mdpage.pvh_slock);
1.1       matt     3159:                printf(" no mappings\n");
                   3160:                return;
                   3161:        }
                   3162:
1.49      thorpej  3163:        for (pv = pg->mdpage.pvh_list; pv; pv = pv->pv_next)
1.1       matt     3164:                printf(" pmap %p va %08lx flags %08x", pv->pv_pmap,
                   3165:                    pv->pv_va, pv->pv_flags);
                   3166:
                   3167:        printf("\n");
1.49      thorpej  3168:        simple_unlock(&pg->mdpage.pvh_slock);
1.1       matt     3169: }
                   3170:
                   3171: #endif /* PMAP_DEBUG */
                   3172:
1.11      chris    3173: static pt_entry_t *
                   3174: pmap_map_ptes(struct pmap *pmap)
                   3175: {
1.72      thorpej  3176:        struct proc *p;
1.17      chris    3177:
                   3178:        /* the kernel's pmap is always accessible */
                   3179:        if (pmap == pmap_kernel()) {
1.72      thorpej  3180:                return (pt_entry_t *)PTE_BASE;
1.17      chris    3181:        }
                   3182:
                   3183:        if (pmap_is_curpmap(pmap)) {
                   3184:                simple_lock(&pmap->pm_obj.vmobjlock);
1.53      thorpej  3185:                return (pt_entry_t *)PTE_BASE;
1.17      chris    3186:        }
1.72      thorpej  3187:
1.17      chris    3188:        p = curproc;
1.72      thorpej  3189:        KDASSERT(p != NULL);
1.17      chris    3190:
                   3191:        /* need to lock both curpmap and pmap: use ordered locking */
1.72      thorpej  3192:        if ((vaddr_t) pmap < (vaddr_t) p->p_vmspace->vm_map.pmap) {
1.17      chris    3193:                simple_lock(&pmap->pm_obj.vmobjlock);
1.72      thorpej  3194:                simple_lock(&p->p_vmspace->vm_map.pmap->pm_obj.vmobjlock);
1.17      chris    3195:        } else {
1.72      thorpej  3196:                simple_lock(&p->p_vmspace->vm_map.pmap->pm_obj.vmobjlock);
1.17      chris    3197:                simple_lock(&pmap->pm_obj.vmobjlock);
                   3198:        }
1.11      chris    3199:
1.113     thorpej  3200:        pmap_map_in_l1(p->p_vmspace->vm_map.pmap, APTE_BASE,
                   3201:            pmap->pm_pptpt, 0);
1.17      chris    3202:        cpu_tlb_flushD();
1.32      thorpej  3203:        cpu_cpwait();
1.53      thorpej  3204:        return (pt_entry_t *)APTE_BASE;
1.17      chris    3205: }
                   3206:
                   3207: /*
                   3208:  * pmap_unmap_ptes: unlock the PTE mapping of "pmap"
                   3209:  */
                   3210:
                   3211: static void
1.73      thorpej  3212: pmap_unmap_ptes(struct pmap *pmap)
1.17      chris    3213: {
1.72      thorpej  3214:
1.17      chris    3215:        if (pmap == pmap_kernel()) {
                   3216:                return;
                   3217:        }
                   3218:        if (pmap_is_curpmap(pmap)) {
                   3219:                simple_unlock(&pmap->pm_obj.vmobjlock);
                   3220:        } else {
1.72      thorpej  3221:                KDASSERT(curproc != NULL);
1.17      chris    3222:                simple_unlock(&pmap->pm_obj.vmobjlock);
1.72      thorpej  3223:                simple_unlock(
                   3224:                    &curproc->p_vmspace->vm_map.pmap->pm_obj.vmobjlock);
1.17      chris    3225:        }
1.11      chris    3226: }
1.1       matt     3227:
                   3228: /*
                   3229:  * Modify pte bits for all ptes corresponding to the given physical address.
                   3230:  * We use `maskbits' rather than `clearbits' because we're always passing
                   3231:  * constants and the latter would require an extra inversion at run-time.
                   3232:  */
                   3233:
1.22      chris    3234: static void
1.73      thorpej  3235: pmap_clearbit(struct vm_page *pg, u_int maskbits)
1.1       matt     3236: {
                   3237:        struct pv_entry *pv;
1.104     thorpej  3238:        pt_entry_t *ptes, npte, opte;
1.1       matt     3239:        vaddr_t va;
                   3240:
                   3241:        PDEBUG(1, printf("pmap_clearbit: pa=%08lx mask=%08x\n",
1.49      thorpej  3242:            VM_PAGE_TO_PHYS(pg), maskbits));
1.21      chris    3243:
1.17      chris    3244:        PMAP_HEAD_TO_MAP_LOCK();
1.49      thorpej  3245:        simple_lock(&pg->mdpage.pvh_slock);
1.17      chris    3246:
1.1       matt     3247:        /*
                   3248:         * Clear saved attributes (modify, reference)
                   3249:         */
1.49      thorpej  3250:        pg->mdpage.pvh_attrs &= ~maskbits;
1.1       matt     3251:
1.49      thorpej  3252:        if (pg->mdpage.pvh_list == NULL) {
                   3253:                simple_unlock(&pg->mdpage.pvh_slock);
1.17      chris    3254:                PMAP_HEAD_TO_MAP_UNLOCK();
1.1       matt     3255:                return;
                   3256:        }
                   3257:
                   3258:        /*
                   3259:         * Loop over all current mappings setting/clearing as appropos
                   3260:         */
1.49      thorpej  3261:        for (pv = pg->mdpage.pvh_list; pv; pv = pv->pv_next) {
1.105     thorpej  3262: #ifdef PMAP_ALIAS_DEBUG
                   3263:     {
                   3264:                int s = splhigh();
                   3265:                if ((maskbits & PVF_WRITE) != 0 &&
                   3266:                    (pv->pv_flags & PVF_WRITE) != 0) {
                   3267:                        KASSERT(pg->mdpage.rw_mappings != 0);
                   3268:                        pg->mdpage.rw_mappings--;
                   3269:                        pg->mdpage.ro_mappings++;
                   3270:                }
                   3271:                splx(s);
                   3272:     }
                   3273: #endif /* PMAP_ALIAS_DEBUG */
1.1       matt     3274:                va = pv->pv_va;
                   3275:                pv->pv_flags &= ~maskbits;
1.59      thorpej  3276:                ptes = pmap_map_ptes(pv->pv_pmap);      /* locks pmap */
                   3277:                KASSERT(pmap_pde_v(pmap_pde(pv->pv_pmap, va)));
1.104     thorpej  3278:                npte = opte = ptes[arm_btop(va)];
1.78      thorpej  3279:                if (maskbits & (PVF_WRITE|PVF_MOD)) {
                   3280:                        if ((pv->pv_flags & PVF_NC)) {
1.29      rearnsha 3281:                                /*
                   3282:                                 * Entry is not cacheable: reenable
                   3283:                                 * the cache, nothing to flush
                   3284:                                 *
                   3285:                                 * Don't turn caching on again if this
                   3286:                                 * is a modified emulation.  This
                   3287:                                 * would be inconsitent with the
                   3288:                                 * settings created by
                   3289:                                 * pmap_vac_me_harder().
                   3290:                                 *
                   3291:                                 * There's no need to call
                   3292:                                 * pmap_vac_me_harder() here: all
                   3293:                                 * pages are loosing their write
                   3294:                                 * permission.
                   3295:                                 *
                   3296:                                 */
1.78      thorpej  3297:                                if (maskbits & PVF_WRITE) {
1.104     thorpej  3298:                                        npte |= pte_l2_s_cache_mode;
1.78      thorpej  3299:                                        pv->pv_flags &= ~PVF_NC;
1.29      rearnsha 3300:                                }
1.59      thorpej  3301:                        } else if (pmap_is_curpmap(pv->pv_pmap)) {
1.29      rearnsha 3302:                                /*
                   3303:                                 * Entry is cacheable: check if pmap is
                   3304:                                 * current if it is flush it,
                   3305:                                 * otherwise it won't be in the cache
                   3306:                                 */
1.36      thorpej  3307:                                cpu_idcache_wbinv_range(pv->pv_va, NBPG);
1.59      thorpej  3308:                        }
1.29      rearnsha 3309:
                   3310:                        /* make the pte read only */
1.104     thorpej  3311:                        npte &= ~L2_S_PROT_W;
1.29      rearnsha 3312:                }
                   3313:
1.104     thorpej  3314:                if (maskbits & PVF_REF) {
                   3315:                        if (pmap_is_curpmap(pv->pv_pmap) &&
                   3316:                            (pv->pv_flags & PVF_NC) == 0) {
                   3317:                                /*
                   3318:                                 * Check npte here; we may have already
                   3319:                                 * done the wbinv above, and the validity
                   3320:                                 * of the PTE is the same for opte and
                   3321:                                 * npte.
                   3322:                                 */
                   3323:                                if (npte & L2_S_PROT_W) {
                   3324:                                        cpu_idcache_wbinv_range(pv->pv_va,
                   3325:                                            NBPG);
                   3326:                                } else if ((npte & L2_TYPE_MASK)
                   3327:                                           != L2_TYPE_INV) {
                   3328:                                        /* XXXJRT need idcache_inv_range */
                   3329:                                        cpu_idcache_wbinv_range(pv->pv_va,
                   3330:                                            NBPG);
                   3331:                                }
                   3332:                        }
                   3333:
                   3334:                        /* make the pte invalid */
                   3335:                        npte = (npte & ~L2_TYPE_MASK) | L2_TYPE_INV;
                   3336:                }
1.21      chris    3337:
1.104     thorpej  3338:                if (npte != opte) {
                   3339:                        ptes[arm_btop(va)] = npte;
1.113     thorpej  3340:                        PTE_SYNC_CURRENT(pv->pv_pmap, &ptes[arm_btop(va)]);
1.104     thorpej  3341:                        /* Flush the TLB entry if a current pmap. */
                   3342:                        if (pmap_is_curpmap(pv->pv_pmap))
                   3343:                                cpu_tlb_flushID_SE(pv->pv_va);
1.113     thorpej  3344:                } else
                   3345:                        PTE_FLUSH_ALT(pv->pv_pmap, &ptes[arm_btop(va)]);
1.104     thorpej  3346:
1.59      thorpej  3347:                pmap_unmap_ptes(pv->pv_pmap);           /* unlocks pmap */
1.29      rearnsha 3348:        }
1.32      thorpej  3349:        cpu_cpwait();
1.21      chris    3350:
1.49      thorpej  3351:        simple_unlock(&pg->mdpage.pvh_slock);
1.17      chris    3352:        PMAP_HEAD_TO_MAP_UNLOCK();
1.1       matt     3353: }
                   3354:
1.50      thorpej  3355: /*
                   3356:  * pmap_clear_modify:
                   3357:  *
                   3358:  *     Clear the "modified" attribute for a page.
                   3359:  */
1.1       matt     3360: boolean_t
1.73      thorpej  3361: pmap_clear_modify(struct vm_page *pg)
1.1       matt     3362: {
                   3363:        boolean_t rv;
                   3364:
1.78      thorpej  3365:        if (pg->mdpage.pvh_attrs & PVF_MOD) {
1.50      thorpej  3366:                rv = TRUE;
1.78      thorpej  3367:                pmap_clearbit(pg, PVF_MOD);
1.50      thorpej  3368:        } else
                   3369:                rv = FALSE;
                   3370:
                   3371:        PDEBUG(0, printf("pmap_clear_modify pa=%08lx -> %d\n",
                   3372:            VM_PAGE_TO_PHYS(pg), rv));
                   3373:
                   3374:        return (rv);
1.1       matt     3375: }
                   3376:
1.50      thorpej  3377: /*
                   3378:  * pmap_clear_reference:
                   3379:  *
                   3380:  *     Clear the "referenced" attribute for a page.
                   3381:  */
1.1       matt     3382: boolean_t
1.73      thorpej  3383: pmap_clear_reference(struct vm_page *pg)
1.1       matt     3384: {
                   3385:        boolean_t rv;
                   3386:
1.78      thorpej  3387:        if (pg->mdpage.pvh_attrs & PVF_REF) {
1.50      thorpej  3388:                rv = TRUE;
1.78      thorpej  3389:                pmap_clearbit(pg, PVF_REF);
1.50      thorpej  3390:        } else
                   3391:                rv = FALSE;
                   3392:
                   3393:        PDEBUG(0, printf("pmap_clear_reference pa=%08lx -> %d\n",
                   3394:            VM_PAGE_TO_PHYS(pg), rv));
                   3395:
                   3396:        return (rv);
1.1       matt     3397: }
                   3398:
1.50      thorpej  3399: /*
                   3400:  * pmap_is_modified:
                   3401:  *
                   3402:  *     Test if a page has the "modified" attribute.
                   3403:  */
                   3404: /* See <arm/arm32/pmap.h> */
1.39      thorpej  3405:
1.50      thorpej  3406: /*
                   3407:  * pmap_is_referenced:
                   3408:  *
                   3409:  *     Test if a page has the "referenced" attribute.
                   3410:  */
                   3411: /* See <arm/arm32/pmap.h> */
1.1       matt     3412:
                   3413: int
1.73      thorpej  3414: pmap_modified_emulation(struct pmap *pmap, vaddr_t va)
1.1       matt     3415: {
1.61      thorpej  3416:        pt_entry_t *ptes;
                   3417:        struct vm_page *pg;
1.2       matt     3418:        paddr_t pa;
1.1       matt     3419:        u_int flags;
1.61      thorpej  3420:        int rv = 0;
1.1       matt     3421:
                   3422:        PDEBUG(2, printf("pmap_modified_emulation\n"));
                   3423:
1.61      thorpej  3424:        PMAP_MAP_TO_HEAD_LOCK();
1.62      thorpej  3425:        ptes = pmap_map_ptes(pmap);             /* locks pmap */
1.61      thorpej  3426:
                   3427:        if (pmap_pde_v(pmap_pde(pmap, va)) == 0) {
                   3428:                PDEBUG(2, printf("L1 PTE invalid\n"));
                   3429:                goto out;
1.1       matt     3430:        }
                   3431:
1.61      thorpej  3432:        PDEBUG(1, printf("pte=%08x\n", ptes[arm_btop(va)]));
1.1       matt     3433:
1.113     thorpej  3434:        /*
                   3435:         * Don't need to PTE_FLUSH_ALT() here; this is always done
                   3436:         * with the current pmap.
                   3437:         */
                   3438:
1.61      thorpej  3439:        /* Check for a invalid pte */
                   3440:        if (l2pte_valid(ptes[arm_btop(va)]) == 0)
                   3441:                goto out;
1.1       matt     3442:
                   3443:        /* This can happen if user code tries to access kernel memory. */
1.83      thorpej  3444:        if ((ptes[arm_btop(va)] & L2_S_PROT_W) != 0)
1.61      thorpej  3445:                goto out;
1.1       matt     3446:
                   3447:        /* Extract the physical address of the page */
1.61      thorpej  3448:        pa = l2pte_pa(ptes[arm_btop(va)]);
1.49      thorpej  3449:        if ((pg = PHYS_TO_VM_PAGE(pa)) == NULL)
1.61      thorpej  3450:                goto out;
1.1       matt     3451:
1.49      thorpej  3452:        /* Get the current flags for this page. */
                   3453:        simple_lock(&pg->mdpage.pvh_slock);
1.17      chris    3454:
1.49      thorpej  3455:        flags = pmap_modify_pv(pmap, va, pg, 0, 0);
1.1       matt     3456:        PDEBUG(2, printf("pmap_modified_emulation: flags = %08x\n", flags));
                   3457:
                   3458:        /*
                   3459:         * Do the flags say this page is writable ? If not then it is a
                   3460:         * genuine write fault. If yes then the write fault is our fault
                   3461:         * as we did not reflect the write access in the PTE. Now we know
                   3462:         * a write has occurred we can correct this and also set the
                   3463:         * modified bit
                   3464:         */
1.78      thorpej  3465:        if (~flags & PVF_WRITE) {
1.49      thorpej  3466:                simple_unlock(&pg->mdpage.pvh_slock);
1.61      thorpej  3467:                goto out;
1.17      chris    3468:        }
1.1       matt     3469:
1.61      thorpej  3470:        PDEBUG(0,
                   3471:            printf("pmap_modified_emulation: Got a hit va=%08lx, pte = %08x\n",
                   3472:            va, ptes[arm_btop(va)]));
1.78      thorpej  3473:        pg->mdpage.pvh_attrs |= PVF_REF | PVF_MOD;
1.29      rearnsha 3474:
                   3475:        /*
                   3476:         * Re-enable write permissions for the page.  No need to call
                   3477:         * pmap_vac_me_harder(), since this is just a
1.78      thorpej  3478:         * modified-emulation fault, and the PVF_WRITE bit isn't changing.
                   3479:         * We've already set the cacheable bits based on the assumption
                   3480:         * that we can write to this page.
1.29      rearnsha 3481:         */
1.61      thorpej  3482:        ptes[arm_btop(va)] =
1.84      thorpej  3483:            (ptes[arm_btop(va)] & ~L2_TYPE_MASK) | L2_S_PROTO | L2_S_PROT_W;
1.113     thorpej  3484:        PTE_SYNC(&ptes[arm_btop(va)]);
1.61      thorpej  3485:        PDEBUG(0, printf("->(%08x)\n", ptes[arm_btop(va)]));
1.1       matt     3486:
1.49      thorpej  3487:        simple_unlock(&pg->mdpage.pvh_slock);
1.61      thorpej  3488:
1.1       matt     3489:        cpu_tlb_flushID_SE(va);
1.32      thorpej  3490:        cpu_cpwait();
1.61      thorpej  3491:        rv = 1;
                   3492:  out:
                   3493:        pmap_unmap_ptes(pmap);                  /* unlocks pmap */
                   3494:        PMAP_MAP_TO_HEAD_UNLOCK();
                   3495:        return (rv);
1.1       matt     3496: }
                   3497:
                   3498: int
1.73      thorpej  3499: pmap_handled_emulation(struct pmap *pmap, vaddr_t va)
1.1       matt     3500: {
1.62      thorpej  3501:        pt_entry_t *ptes;
                   3502:        struct vm_page *pg;
1.2       matt     3503:        paddr_t pa;
1.62      thorpej  3504:        int rv = 0;
1.1       matt     3505:
                   3506:        PDEBUG(2, printf("pmap_handled_emulation\n"));
                   3507:
1.63      thorpej  3508:        PMAP_MAP_TO_HEAD_LOCK();
1.62      thorpej  3509:        ptes = pmap_map_ptes(pmap);             /* locks pmap */
                   3510:
                   3511:        if (pmap_pde_v(pmap_pde(pmap, va)) == 0) {
                   3512:                PDEBUG(2, printf("L1 PTE invalid\n"));
                   3513:                goto out;
1.1       matt     3514:        }
                   3515:
1.62      thorpej  3516:        PDEBUG(1, printf("pte=%08x\n", ptes[arm_btop(va)]));
1.1       matt     3517:
1.113     thorpej  3518:        /*
                   3519:         * Don't need to PTE_FLUSH_ALT() here; this is always done
                   3520:         * with the current pmap.
                   3521:         */
                   3522:
1.62      thorpej  3523:        /* Check for invalid pte */
                   3524:        if (l2pte_valid(ptes[arm_btop(va)]) == 0)
                   3525:                goto out;
1.1       matt     3526:
                   3527:        /* This can happen if user code tries to access kernel memory. */
1.81      thorpej  3528:        if ((ptes[arm_btop(va)] & L2_TYPE_MASK) != L2_TYPE_INV)
1.62      thorpej  3529:                goto out;
1.1       matt     3530:
                   3531:        /* Extract the physical address of the page */
1.62      thorpej  3532:        pa = l2pte_pa(ptes[arm_btop(va)]);
1.49      thorpej  3533:        if ((pg = PHYS_TO_VM_PAGE(pa)) == NULL)
1.62      thorpej  3534:                goto out;
1.1       matt     3535:
1.63      thorpej  3536:        simple_lock(&pg->mdpage.pvh_slock);
                   3537:
1.1       matt     3538:        /*
                   3539:         * Ok we just enable the pte and mark the attibs as handled
1.63      thorpej  3540:         * XXX Should we traverse the PV list and enable all PTEs?
1.1       matt     3541:         */
1.62      thorpej  3542:        PDEBUG(0,
                   3543:            printf("pmap_handled_emulation: Got a hit va=%08lx pte = %08x\n",
                   3544:            va, ptes[arm_btop(va)]));
1.78      thorpej  3545:        pg->mdpage.pvh_attrs |= PVF_REF;
1.1       matt     3546:
1.84      thorpej  3547:        ptes[arm_btop(va)] = (ptes[arm_btop(va)] & ~L2_TYPE_MASK) | L2_S_PROTO;
1.113     thorpej  3548:        PTE_SYNC(&ptes[arm_btop(va)]);
1.62      thorpej  3549:        PDEBUG(0, printf("->(%08x)\n", ptes[arm_btop(va)]));
                   3550:
1.63      thorpej  3551:        simple_unlock(&pg->mdpage.pvh_slock);
                   3552:
1.1       matt     3553:        cpu_tlb_flushID_SE(va);
1.32      thorpej  3554:        cpu_cpwait();
1.62      thorpej  3555:        rv = 1;
                   3556:  out:
                   3557:        pmap_unmap_ptes(pmap);                  /* unlocks pmap */
1.63      thorpej  3558:        PMAP_MAP_TO_HEAD_UNLOCK();
1.62      thorpej  3559:        return (rv);
1.1       matt     3560: }
1.17      chris    3561:
1.1       matt     3562: /*
                   3563:  * pmap_collect: free resources held by a pmap
                   3564:  *
                   3565:  * => optional function.
                   3566:  * => called when a process is swapped out to free memory.
                   3567:  */
                   3568:
                   3569: void
1.73      thorpej  3570: pmap_collect(struct pmap *pmap)
1.1       matt     3571: {
                   3572: }
                   3573:
                   3574: /*
                   3575:  * Routine:    pmap_procwr
                   3576:  *
                   3577:  * Function:
                   3578:  *     Synchronize caches corresponding to [addr, addr+len) in p.
                   3579:  *
                   3580:  */
                   3581: void
1.73      thorpej  3582: pmap_procwr(struct proc *p, vaddr_t va, int len)
1.1       matt     3583: {
                   3584:        /* We only need to do anything if it is the current process. */
                   3585:        if (p == curproc)
1.36      thorpej  3586:                cpu_icache_sync_range(va, len);
1.17      chris    3587: }
                   3588: /*
                   3589:  * PTP functions
                   3590:  */
                   3591:
                   3592: /*
                   3593:  * pmap_get_ptp: get a PTP (if there isn't one, allocate a new one)
                   3594:  *
                   3595:  * => pmap should NOT be pmap_kernel()
                   3596:  * => pmap should be locked
                   3597:  */
                   3598:
                   3599: static struct vm_page *
1.57      thorpej  3600: pmap_get_ptp(struct pmap *pmap, vaddr_t va)
1.17      chris    3601: {
1.57      thorpej  3602:        struct vm_page *ptp;
1.17      chris    3603:
1.114     thorpej  3604:        KASSERT((va & PD_OFFSET) == 0);         /* XXX KDASSERT */
                   3605:
1.57      thorpej  3606:        if (pmap_pde_page(pmap_pde(pmap, va))) {
1.17      chris    3607:
1.57      thorpej  3608:                /* valid... check hint (saves us a PA->PG lookup) */
                   3609:                if (pmap->pm_ptphint &&
1.81      thorpej  3610:                    (pmap->pm_pdir[pmap_pdei(va)] & L2_S_FRAME) ==
1.57      thorpej  3611:                    VM_PAGE_TO_PHYS(pmap->pm_ptphint))
                   3612:                        return (pmap->pm_ptphint);
                   3613:                ptp = uvm_pagelookup(&pmap->pm_obj, va);
1.17      chris    3614: #ifdef DIAGNOSTIC
1.57      thorpej  3615:                if (ptp == NULL)
                   3616:                        panic("pmap_get_ptp: unmanaged user PTP");
1.17      chris    3617: #endif
1.70      thorpej  3618:                pmap->pm_ptphint = ptp;
1.57      thorpej  3619:                return(ptp);
                   3620:        }
1.17      chris    3621:
1.57      thorpej  3622:        /* allocate a new PTP (updates ptphint) */
1.114     thorpej  3623:        return (pmap_alloc_ptp(pmap, va));
1.17      chris    3624: }
                   3625:
                   3626: /*
                   3627:  * pmap_alloc_ptp: allocate a PTP for a PMAP
                   3628:  *
                   3629:  * => pmap should already be locked by caller
                   3630:  * => we use the ptp's wire_count to count the number of active mappings
                   3631:  *     in the PTP (we start it at one to prevent any chance this PTP
                   3632:  *     will ever leak onto the active/inactive queues)
                   3633:  */
                   3634:
                   3635: /*__inline */ static struct vm_page *
1.57      thorpej  3636: pmap_alloc_ptp(struct pmap *pmap, vaddr_t va)
1.17      chris    3637: {
                   3638:        struct vm_page *ptp;
1.114     thorpej  3639:
                   3640:        KASSERT((va & PD_OFFSET) == 0);         /* XXX KDASSERT */
1.17      chris    3641:
                   3642:        ptp = uvm_pagealloc(&pmap->pm_obj, va, NULL,
                   3643:                UVM_PGA_USERESERVE|UVM_PGA_ZERO);
1.57      thorpej  3644:        if (ptp == NULL)
1.17      chris    3645:                return (NULL);
                   3646:
                   3647:        /* got one! */
                   3648:        ptp->flags &= ~PG_BUSY; /* never busy */
                   3649:        ptp->wire_count = 1;    /* no mappings yet */
1.113     thorpej  3650:        pmap_map_in_l1(pmap, va, VM_PAGE_TO_PHYS(ptp),
                   3651:            PMAP_PTP_SELFREF | PMAP_PTP_CACHEABLE);
1.17      chris    3652:        pmap->pm_stats.resident_count++;        /* count PTP as resident */
1.70      thorpej  3653:        pmap->pm_ptphint = ptp;
1.17      chris    3654:        return (ptp);
1.1       matt     3655: }
1.48      chris    3656:
                   3657: vaddr_t
1.73      thorpej  3658: pmap_growkernel(vaddr_t maxkvaddr)
1.48      chris    3659: {
                   3660:        struct pmap *kpm = pmap_kernel(), *pm;
                   3661:        int s;
                   3662:        paddr_t ptaddr;
                   3663:        struct vm_page *ptp;
                   3664:
                   3665:        if (maxkvaddr <= pmap_curmaxkvaddr)
                   3666:                goto out;               /* we are OK */
                   3667:        NPDEBUG(PDB_GROWKERN, printf("pmap_growkernel: growing kernel from %lx to %lx\n",
                   3668:                    pmap_curmaxkvaddr, maxkvaddr));
                   3669:
                   3670:        /*
                   3671:         * whoops!   we need to add kernel PTPs
                   3672:         */
                   3673:
                   3674:        s = splhigh();  /* to be safe */
                   3675:        simple_lock(&kpm->pm_obj.vmobjlock);
                   3676:        /* due to the way the arm pmap works we map 4MB at a time */
1.70      thorpej  3677:        for (/*null*/ ; pmap_curmaxkvaddr < maxkvaddr;
1.81      thorpej  3678:             pmap_curmaxkvaddr += 4 * L1_S_SIZE) {
1.48      chris    3679:
                   3680:                if (uvm.page_init_done == FALSE) {
                   3681:
                   3682:                        /*
                   3683:                         * we're growing the kernel pmap early (from
                   3684:                         * uvm_pageboot_alloc()).  this case must be
                   3685:                         * handled a little differently.
                   3686:                         */
                   3687:
                   3688:                        if (uvm_page_physget(&ptaddr) == FALSE)
                   3689:                                panic("pmap_growkernel: out of memory");
                   3690:                        pmap_zero_page(ptaddr);
                   3691:
                   3692:                        /* map this page in */
1.113     thorpej  3693:                        pmap_map_in_l1(kpm, pmap_curmaxkvaddr, ptaddr,
                   3694:                            PMAP_PTP_SELFREF | PMAP_PTP_CACHEABLE);
1.48      chris    3695:
                   3696:                        /* count PTP as resident */
                   3697:                        kpm->pm_stats.resident_count++;
                   3698:                        continue;
                   3699:                }
                   3700:
                   3701:                /*
                   3702:                 * THIS *MUST* BE CODED SO AS TO WORK IN THE
                   3703:                 * pmap_initialized == FALSE CASE!  WE MAY BE
                   3704:                 * INVOKED WHILE pmap_init() IS RUNNING!
                   3705:                 */
                   3706:
1.70      thorpej  3707:                if ((ptp = pmap_alloc_ptp(kpm, pmap_curmaxkvaddr)) == NULL)
1.48      chris    3708:                        panic("pmap_growkernel: alloc ptp failed");
                   3709:
                   3710:                /* distribute new kernel PTP to all active pmaps */
                   3711:                simple_lock(&pmaps_lock);
                   3712:                LIST_FOREACH(pm, &pmaps, pm_list) {
1.70      thorpej  3713:                        pmap_map_in_l1(pm, pmap_curmaxkvaddr,
1.113     thorpej  3714:                            VM_PAGE_TO_PHYS(ptp),
                   3715:                            PMAP_PTP_SELFREF | PMAP_PTP_CACHEABLE);
1.48      chris    3716:                }
1.111     thorpej  3717:
                   3718:                /* Invalidate the PTPT cache. */
                   3719:                pool_cache_invalidate(&pmap_ptpt_cache);
                   3720:                pmap_ptpt_cache_generation++;
1.48      chris    3721:
                   3722:                simple_unlock(&pmaps_lock);
                   3723:        }
                   3724:
                   3725:        /*
                   3726:         * flush out the cache, expensive but growkernel will happen so
                   3727:         * rarely
                   3728:         */
                   3729:        cpu_tlb_flushD();
                   3730:        cpu_cpwait();
                   3731:
                   3732:        simple_unlock(&kpm->pm_obj.vmobjlock);
                   3733:        splx(s);
                   3734:
                   3735: out:
                   3736:        return (pmap_curmaxkvaddr);
                   3737: }
                   3738:
1.76      thorpej  3739: /************************ Utility routines ****************************/
                   3740:
                   3741: /*
                   3742:  * vector_page_setprot:
                   3743:  *
                   3744:  *     Manipulate the protection of the vector page.
                   3745:  */
                   3746: void
                   3747: vector_page_setprot(int prot)
                   3748: {
                   3749:        pt_entry_t *pte;
                   3750:
                   3751:        pte = vtopte(vector_page);
1.48      chris    3752:
1.83      thorpej  3753:        *pte = (*pte & ~L1_S_PROT_MASK) | L2_S_PROT(PTE_KERNEL, prot);
1.112     thorpej  3754:        PTE_SYNC(pte);
1.76      thorpej  3755:        cpu_tlb_flushD_SE(vector_page);
                   3756:        cpu_cpwait();
                   3757: }
1.1       matt     3758:
1.40      thorpej  3759: /************************ Bootstrapping routines ****************************/
                   3760:
                   3761: /*
1.46      thorpej  3762:  * This list exists for the benefit of pmap_map_chunk().  It keeps track
                   3763:  * of the kernel L2 tables during bootstrap, so that pmap_map_chunk() can
                   3764:  * find them as necessary.
                   3765:  *
                   3766:  * Note that the data on this list is not valid after initarm() returns.
                   3767:  */
                   3768: SLIST_HEAD(, pv_addr) kernel_pt_list = SLIST_HEAD_INITIALIZER(kernel_pt_list);
                   3769:
                   3770: static vaddr_t
                   3771: kernel_pt_lookup(paddr_t pa)
                   3772: {
                   3773:        pv_addr_t *pv;
                   3774:
                   3775:        SLIST_FOREACH(pv, &kernel_pt_list, pv_list) {
                   3776:                if (pv->pv_pa == pa)
                   3777:                        return (pv->pv_va);
                   3778:        }
                   3779:        return (0);
                   3780: }
                   3781:
                   3782: /*
1.40      thorpej  3783:  * pmap_map_section:
                   3784:  *
                   3785:  *     Create a single section mapping.
                   3786:  */
                   3787: void
                   3788: pmap_map_section(vaddr_t l1pt, vaddr_t va, paddr_t pa, int prot, int cache)
                   3789: {
                   3790:        pd_entry_t *pde = (pd_entry_t *) l1pt;
1.86      thorpej  3791:        pd_entry_t fl = (cache == PTE_CACHE) ? pte_l1_s_cache_mode : 0;
1.40      thorpej  3792:
1.81      thorpej  3793:        KASSERT(((va | pa) & L1_S_OFFSET) == 0);
1.40      thorpej  3794:
1.83      thorpej  3795:        pde[va >> L1_S_SHIFT] = L1_S_PROTO | pa |
                   3796:            L1_S_PROT(PTE_KERNEL, prot) | fl;
1.41      thorpej  3797: }
                   3798:
                   3799: /*
                   3800:  * pmap_map_entry:
                   3801:  *
                   3802:  *     Create a single page mapping.
                   3803:  */
                   3804: void
1.47      thorpej  3805: pmap_map_entry(vaddr_t l1pt, vaddr_t va, paddr_t pa, int prot, int cache)
1.41      thorpej  3806: {
1.47      thorpej  3807:        pd_entry_t *pde = (pd_entry_t *) l1pt;
1.86      thorpej  3808:        pt_entry_t fl = (cache == PTE_CACHE) ? pte_l2_s_cache_mode : 0;
1.47      thorpej  3809:        pt_entry_t *pte;
1.41      thorpej  3810:
                   3811:        KASSERT(((va | pa) & PGOFSET) == 0);
                   3812:
1.81      thorpej  3813:        if ((pde[va >> L1_S_SHIFT] & L1_TYPE_MASK) != L1_TYPE_C)
1.47      thorpej  3814:                panic("pmap_map_entry: no L2 table for VA 0x%08lx", va);
                   3815:
                   3816:        pte = (pt_entry_t *)
1.81      thorpej  3817:            kernel_pt_lookup(pde[va >> L1_S_SHIFT] & L2_S_FRAME);
1.47      thorpej  3818:        if (pte == NULL)
                   3819:                panic("pmap_map_entry: can't find L2 table for VA 0x%08lx", va);
                   3820:
1.83      thorpej  3821:        pte[(va >> PGSHIFT) & 0x3ff] = L2_S_PROTO | pa |
                   3822:            L2_S_PROT(PTE_KERNEL, prot) | fl;
1.42      thorpej  3823: }
                   3824:
                   3825: /*
                   3826:  * pmap_link_l2pt:
                   3827:  *
                   3828:  *     Link the L2 page table specified by "pa" into the L1
                   3829:  *     page table at the slot for "va".
                   3830:  */
                   3831: void
1.46      thorpej  3832: pmap_link_l2pt(vaddr_t l1pt, vaddr_t va, pv_addr_t *l2pv)
1.42      thorpej  3833: {
                   3834:        pd_entry_t *pde = (pd_entry_t *) l1pt;
1.81      thorpej  3835:        u_int slot = va >> L1_S_SHIFT;
1.42      thorpej  3836:
1.46      thorpej  3837:        KASSERT((l2pv->pv_pa & PGOFSET) == 0);
                   3838:
1.83      thorpej  3839:        pde[slot + 0] = L1_C_PROTO | (l2pv->pv_pa + 0x000);
                   3840:        pde[slot + 1] = L1_C_PROTO | (l2pv->pv_pa + 0x400);
                   3841:        pde[slot + 2] = L1_C_PROTO | (l2pv->pv_pa + 0x800);
                   3842:        pde[slot + 3] = L1_C_PROTO | (l2pv->pv_pa + 0xc00);
1.42      thorpej  3843:
1.46      thorpej  3844:        SLIST_INSERT_HEAD(&kernel_pt_list, l2pv, pv_list);
1.43      thorpej  3845: }
                   3846:
                   3847: /*
                   3848:  * pmap_map_chunk:
                   3849:  *
                   3850:  *     Map a chunk of memory using the most efficient mappings
                   3851:  *     possible (section, large page, small page) into the
                   3852:  *     provided L1 and L2 tables at the specified virtual address.
                   3853:  */
                   3854: vsize_t
1.46      thorpej  3855: pmap_map_chunk(vaddr_t l1pt, vaddr_t va, paddr_t pa, vsize_t size,
                   3856:     int prot, int cache)
1.43      thorpej  3857: {
                   3858:        pd_entry_t *pde = (pd_entry_t *) l1pt;
1.86      thorpej  3859:        pt_entry_t *pte, fl;
1.43      thorpej  3860:        vsize_t resid;
                   3861:        int i;
                   3862:
                   3863:        resid = (size + (NBPG - 1)) & ~(NBPG - 1);
                   3864:
1.44      thorpej  3865:        if (l1pt == 0)
                   3866:                panic("pmap_map_chunk: no L1 table provided");
                   3867:
1.43      thorpej  3868: #ifdef VERBOSE_INIT_ARM
                   3869:        printf("pmap_map_chunk: pa=0x%lx va=0x%lx size=0x%lx resid=0x%lx "
                   3870:            "prot=0x%x cache=%d\n", pa, va, size, resid, prot, cache);
                   3871: #endif
                   3872:
                   3873:        size = resid;
                   3874:
                   3875:        while (resid > 0) {
                   3876:                /* See if we can use a section mapping. */
1.81      thorpej  3877:                if (((pa | va) & L1_S_OFFSET) == 0 &&
                   3878:                    resid >= L1_S_SIZE) {
1.86      thorpej  3879:                        fl = (cache == PTE_CACHE) ? pte_l1_s_cache_mode : 0;
1.43      thorpej  3880: #ifdef VERBOSE_INIT_ARM
                   3881:                        printf("S");
                   3882: #endif
1.83      thorpej  3883:                        pde[va >> L1_S_SHIFT] = L1_S_PROTO | pa |
                   3884:                            L1_S_PROT(PTE_KERNEL, prot) | fl;
1.81      thorpej  3885:                        va += L1_S_SIZE;
                   3886:                        pa += L1_S_SIZE;
                   3887:                        resid -= L1_S_SIZE;
1.43      thorpej  3888:                        continue;
                   3889:                }
1.45      thorpej  3890:
                   3891:                /*
                   3892:                 * Ok, we're going to use an L2 table.  Make sure
                   3893:                 * one is actually in the corresponding L1 slot
                   3894:                 * for the current VA.
                   3895:                 */
1.81      thorpej  3896:                if ((pde[va >> L1_S_SHIFT] & L1_TYPE_MASK) != L1_TYPE_C)
1.46      thorpej  3897:                        panic("pmap_map_chunk: no L2 table for VA 0x%08lx", va);
                   3898:
                   3899:                pte = (pt_entry_t *)
1.81      thorpej  3900:                    kernel_pt_lookup(pde[va >> L1_S_SHIFT] & L2_S_FRAME);
1.46      thorpej  3901:                if (pte == NULL)
                   3902:                        panic("pmap_map_chunk: can't find L2 table for VA"
                   3903:                            "0x%08lx", va);
1.43      thorpej  3904:
                   3905:                /* See if we can use a L2 large page mapping. */
1.81      thorpej  3906:                if (((pa | va) & L2_L_OFFSET) == 0 &&
                   3907:                    resid >= L2_L_SIZE) {
1.86      thorpej  3908:                        fl = (cache == PTE_CACHE) ? pte_l2_l_cache_mode : 0;
1.43      thorpej  3909: #ifdef VERBOSE_INIT_ARM
                   3910:                        printf("L");
                   3911: #endif
                   3912:                        for (i = 0; i < 16; i++) {
                   3913:                                pte[((va >> PGSHIFT) & 0x3f0) + i] =
1.83      thorpej  3914:                                    L2_L_PROTO | pa |
                   3915:                                    L2_L_PROT(PTE_KERNEL, prot) | fl;
1.43      thorpej  3916:                        }
1.81      thorpej  3917:                        va += L2_L_SIZE;
                   3918:                        pa += L2_L_SIZE;
                   3919:                        resid -= L2_L_SIZE;
1.43      thorpej  3920:                        continue;
                   3921:                }
                   3922:
                   3923:                /* Use a small page mapping. */
1.86      thorpej  3924:                fl = (cache == PTE_CACHE) ? pte_l2_s_cache_mode : 0;
1.43      thorpej  3925: #ifdef VERBOSE_INIT_ARM
                   3926:                printf("P");
                   3927: #endif
1.83      thorpej  3928:                pte[(va >> PGSHIFT) & 0x3ff] = L2_S_PROTO | pa |
                   3929:                    L2_S_PROT(PTE_KERNEL, prot) | fl;
1.43      thorpej  3930:                va += NBPG;
                   3931:                pa += NBPG;
                   3932:                resid -= NBPG;
                   3933:        }
                   3934: #ifdef VERBOSE_INIT_ARM
                   3935:        printf("\n");
                   3936: #endif
                   3937:        return (size);
1.40      thorpej  3938: }
1.85      thorpej  3939:
                   3940: /********************** PTE initialization routines **************************/
                   3941:
                   3942: /*
                   3943:  * These routines are called when the CPU type is identified to set up
                   3944:  * the PTE prototypes, cache modes, etc.
                   3945:  *
                   3946:  * The variables are always here, just in case LKMs need to reference
                   3947:  * them (though, they shouldn't).
                   3948:  */
                   3949:
1.86      thorpej  3950: pt_entry_t     pte_l1_s_cache_mode;
                   3951: pt_entry_t     pte_l1_s_cache_mask;
                   3952:
                   3953: pt_entry_t     pte_l2_l_cache_mode;
                   3954: pt_entry_t     pte_l2_l_cache_mask;
                   3955:
                   3956: pt_entry_t     pte_l2_s_cache_mode;
                   3957: pt_entry_t     pte_l2_s_cache_mask;
1.85      thorpej  3958:
                   3959: pt_entry_t     pte_l2_s_prot_u;
                   3960: pt_entry_t     pte_l2_s_prot_w;
                   3961: pt_entry_t     pte_l2_s_prot_mask;
                   3962:
                   3963: pt_entry_t     pte_l1_s_proto;
                   3964: pt_entry_t     pte_l1_c_proto;
                   3965: pt_entry_t     pte_l2_s_proto;
                   3966:
1.88      thorpej  3967: void           (*pmap_copy_page_func)(paddr_t, paddr_t);
                   3968: void           (*pmap_zero_page_func)(paddr_t);
                   3969:
1.85      thorpej  3970: #if ARM_MMU_GENERIC == 1
                   3971: void
                   3972: pmap_pte_init_generic(void)
                   3973: {
                   3974:
1.86      thorpej  3975:        pte_l1_s_cache_mode = L1_S_B|L1_S_C;
                   3976:        pte_l1_s_cache_mask = L1_S_CACHE_MASK_generic;
                   3977:
                   3978:        pte_l2_l_cache_mode = L2_B|L2_C;
                   3979:        pte_l2_l_cache_mask = L2_L_CACHE_MASK_generic;
                   3980:
                   3981:        pte_l2_s_cache_mode = L2_B|L2_C;
                   3982:        pte_l2_s_cache_mask = L2_S_CACHE_MASK_generic;
1.85      thorpej  3983:
                   3984:        pte_l2_s_prot_u = L2_S_PROT_U_generic;
                   3985:        pte_l2_s_prot_w = L2_S_PROT_W_generic;
                   3986:        pte_l2_s_prot_mask = L2_S_PROT_MASK_generic;
                   3987:
                   3988:        pte_l1_s_proto = L1_S_PROTO_generic;
                   3989:        pte_l1_c_proto = L1_C_PROTO_generic;
                   3990:        pte_l2_s_proto = L2_S_PROTO_generic;
1.88      thorpej  3991:
                   3992:        pmap_copy_page_func = pmap_copy_page_generic;
                   3993:        pmap_zero_page_func = pmap_zero_page_generic;
1.85      thorpej  3994: }
                   3995:
                   3996: #if defined(CPU_ARM9)
                   3997: void
                   3998: pmap_pte_init_arm9(void)
                   3999: {
                   4000:
                   4001:        /*
                   4002:         * ARM9 is compatible with generic, but we want to use
                   4003:         * write-through caching for now.
                   4004:         */
                   4005:        pmap_pte_init_generic();
1.86      thorpej  4006:
                   4007:        pte_l1_s_cache_mode = L1_S_C;
                   4008:        pte_l2_l_cache_mode = L2_C;
                   4009:        pte_l2_s_cache_mode = L2_C;
1.85      thorpej  4010: }
                   4011: #endif /* CPU_ARM9 */
                   4012: #endif /* ARM_MMU_GENERIC == 1 */
                   4013:
                   4014: #if ARM_MMU_XSCALE == 1
                   4015: void
                   4016: pmap_pte_init_xscale(void)
                   4017: {
1.96      thorpej  4018:        uint32_t auxctl;
1.85      thorpej  4019:
1.96      thorpej  4020:        pte_l1_s_cache_mode = L1_S_B|L1_S_C;
1.86      thorpej  4021:        pte_l1_s_cache_mask = L1_S_CACHE_MASK_xscale;
                   4022:
1.96      thorpej  4023:        pte_l2_l_cache_mode = L2_B|L2_C;
1.86      thorpej  4024:        pte_l2_l_cache_mask = L2_L_CACHE_MASK_xscale;
                   4025:
1.96      thorpej  4026:        pte_l2_s_cache_mode = L2_B|L2_C;
1.86      thorpej  4027:        pte_l2_s_cache_mask = L2_S_CACHE_MASK_xscale;
1.106     thorpej  4028:
                   4029: #ifdef XSCALE_CACHE_READ_WRITE_ALLOCATE
                   4030:        /*
                   4031:         * The XScale core has an enhanced mode where writes that
                   4032:         * miss the cache cause a cache line to be allocated.  This
                   4033:         * is significantly faster than the traditional, write-through
                   4034:         * behavior of this case.
                   4035:         *
                   4036:         * However, there is a bug lurking in this pmap module, or in
                   4037:         * other parts of the VM system, or both, which causes corruption
                   4038:         * of NFS-backed files when this cache mode is used.  We have
                   4039:         * an ugly work-around for this problem (disable r/w-allocate
                   4040:         * for managed kernel mappings), but the bug is still evil enough
                   4041:         * to consider this cache mode "experimental".
                   4042:         */
                   4043:        pte_l1_s_cache_mode |= L1_S_XSCALE_TEX(TEX_XSCALE_X);
                   4044:        pte_l2_l_cache_mode |= L2_XSCALE_L_TEX(TEX_XSCALE_X);
                   4045:        pte_l2_s_cache_mode |= L2_XSCALE_T_TEX(TEX_XSCALE_X);
                   4046: #endif /* XSCALE_CACHE_READ_WRITE_ALLOCATE */
1.85      thorpej  4047:
1.95      thorpej  4048: #ifdef XSCALE_CACHE_WRITE_THROUGH
                   4049:        /*
                   4050:         * Some versions of the XScale core have various bugs in
                   4051:         * their cache units, the work-around for which is to run
                   4052:         * the cache in write-through mode.  Unfortunately, this
                   4053:         * has a major (negative) impact on performance.  So, we
                   4054:         * go ahead and run fast-and-loose, in the hopes that we
                   4055:         * don't line up the planets in a way that will trip the
                   4056:         * bugs.
                   4057:         *
                   4058:         * However, we give you the option to be slow-but-correct.
                   4059:         */
                   4060:        pte_l1_s_cache_mode = L1_S_C;
                   4061:        pte_l2_l_cache_mode = L2_C;
                   4062:        pte_l2_s_cache_mode = L2_C;
                   4063: #endif /* XSCALE_CACHE_WRITE_THROUGH */
                   4064:
1.85      thorpej  4065:        pte_l2_s_prot_u = L2_S_PROT_U_xscale;
                   4066:        pte_l2_s_prot_w = L2_S_PROT_W_xscale;
                   4067:        pte_l2_s_prot_mask = L2_S_PROT_MASK_xscale;
                   4068:
                   4069:        pte_l1_s_proto = L1_S_PROTO_xscale;
                   4070:        pte_l1_c_proto = L1_C_PROTO_xscale;
                   4071:        pte_l2_s_proto = L2_S_PROTO_xscale;
1.88      thorpej  4072:
                   4073:        pmap_copy_page_func = pmap_copy_page_xscale;
                   4074:        pmap_zero_page_func = pmap_zero_page_xscale;
1.96      thorpej  4075:
                   4076:        /*
                   4077:         * Disable ECC protection of page table access, for now.
                   4078:         */
                   4079:        __asm __volatile("mrc p15, 0, %0, c1, c0, 1"
                   4080:                : "=r" (auxctl));
                   4081:        auxctl &= ~XSCALE_AUXCTL_P;
                   4082:        __asm __volatile("mcr p15, 0, %0, c1, c0, 1"
                   4083:                :
                   4084:                : "r" (auxctl));
1.85      thorpej  4085: }
1.87      thorpej  4086:
                   4087: /*
                   4088:  * xscale_setup_minidata:
                   4089:  *
                   4090:  *     Set up the mini-data cache clean area.  We require the
                   4091:  *     caller to allocate the right amount of physically and
                   4092:  *     virtually contiguous space.
                   4093:  */
                   4094: void
                   4095: xscale_setup_minidata(vaddr_t l1pt, vaddr_t va, paddr_t pa)
                   4096: {
                   4097:        extern vaddr_t xscale_minidata_clean_addr;
                   4098:        extern vsize_t xscale_minidata_clean_size; /* already initialized */
                   4099:        pd_entry_t *pde = (pd_entry_t *) l1pt;
                   4100:        pt_entry_t *pte;
                   4101:        vsize_t size;
1.96      thorpej  4102:        uint32_t auxctl;
1.87      thorpej  4103:
                   4104:        xscale_minidata_clean_addr = va;
                   4105:
                   4106:        /* Round it to page size. */
                   4107:        size = (xscale_minidata_clean_size + L2_S_OFFSET) & L2_S_FRAME;
                   4108:
                   4109:        for (; size != 0;
                   4110:             va += L2_S_SIZE, pa += L2_S_SIZE, size -= L2_S_SIZE) {
                   4111:                pte = (pt_entry_t *)
                   4112:                    kernel_pt_lookup(pde[va >> L1_S_SHIFT] & L2_S_FRAME);
                   4113:                if (pte == NULL)
                   4114:                        panic("xscale_setup_minidata: can't find L2 table for "
                   4115:                            "VA 0x%08lx", va);
                   4116:                pte[(va >> PGSHIFT) & 0x3ff] = L2_S_PROTO | pa |
                   4117:                    L2_S_PROT(PTE_KERNEL, VM_PROT_READ) |
                   4118:                    L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X);
                   4119:        }
1.96      thorpej  4120:
                   4121:        /*
                   4122:         * Configure the mini-data cache for write-back with
                   4123:         * read/write-allocate.
                   4124:         *
                   4125:         * NOTE: In order to reconfigure the mini-data cache, we must
                   4126:         * make sure it contains no valid data!  In order to do that,
                   4127:         * we must issue a global data cache invalidate command!
                   4128:         *
                   4129:         * WE ASSUME WE ARE RUNNING UN-CACHED WHEN THIS ROUTINE IS CALLED!
                   4130:         * THIS IS VERY IMPORTANT!
                   4131:         */
                   4132:
                   4133:        /* Invalidate data and mini-data. */
                   4134:        __asm __volatile("mcr p15, 0, %0, c7, c6, 0"
                   4135:                :
                   4136:                : "r" (auxctl));
                   4137:
                   4138:
                   4139:        __asm __volatile("mrc p15, 0, %0, c1, c0, 1"
                   4140:                : "=r" (auxctl));
                   4141:        auxctl = (auxctl & ~XSCALE_AUXCTL_MD_MASK) | XSCALE_AUXCTL_MD_WB_RWA;
                   4142:        __asm __volatile("mcr p15, 0, %0, c1, c0, 1"
                   4143:                :
                   4144:                : "r" (auxctl));
1.87      thorpej  4145: }
1.85      thorpej  4146: #endif /* ARM_MMU_XSCALE == 1 */

CVSweb <webmaster@jp.NetBSD.org>