[BACK]Return to pmap.c CVS log [TXT][DIR] Up to [cvs.NetBSD.org] / src / sys / arch / arm / arm32

Annotation of src/sys/arch/arm/arm32/pmap.c, Revision 1.30.2.8

1.30.2.8! nathanw     1: /*     $NetBSD: pmap.c,v 1.30.2.7 2002/06/20 03:38:05 nathanw Exp $    */
1.30.2.2  thorpej     2:
                      3: /*
1.30.2.6  nathanw     4:  * Copyright (c) 2002 Wasabi Systems, Inc.
1.30.2.2  thorpej     5:  * Copyright (c) 2001 Richard Earnshaw
                      6:  * Copyright (c) 2001 Christopher Gilbert
                      7:  * All rights reserved.
                      8:  *
                      9:  * 1. Redistributions of source code must retain the above copyright
                     10:  *    notice, this list of conditions and the following disclaimer.
                     11:  * 2. Redistributions in binary form must reproduce the above copyright
                     12:  *    notice, this list of conditions and the following disclaimer in the
                     13:  *    documentation and/or other materials provided with the distribution.
                     14:  * 3. The name of the company nor the name of the author may be used to
                     15:  *    endorse or promote products derived from this software without specific
                     16:  *    prior written permission.
                     17:  *
                     18:  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
                     19:  * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
                     20:  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
                     21:  * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
                     22:  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
                     23:  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
                     24:  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
                     25:  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
                     26:  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
                     27:  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
                     28:  * SUCH DAMAGE.
                     29:  */
                     30:
                     31: /*-
                     32:  * Copyright (c) 1999 The NetBSD Foundation, Inc.
                     33:  * All rights reserved.
                     34:  *
                     35:  * This code is derived from software contributed to The NetBSD Foundation
                     36:  * by Charles M. Hannum.
                     37:  *
                     38:  * Redistribution and use in source and binary forms, with or without
                     39:  * modification, are permitted provided that the following conditions
                     40:  * are met:
                     41:  * 1. Redistributions of source code must retain the above copyright
                     42:  *    notice, this list of conditions and the following disclaimer.
                     43:  * 2. Redistributions in binary form must reproduce the above copyright
                     44:  *    notice, this list of conditions and the following disclaimer in the
                     45:  *    documentation and/or other materials provided with the distribution.
                     46:  * 3. All advertising materials mentioning features or use of this software
                     47:  *    must display the following acknowledgement:
                     48:  *        This product includes software developed by the NetBSD
                     49:  *        Foundation, Inc. and its contributors.
                     50:  * 4. Neither the name of The NetBSD Foundation nor the names of its
                     51:  *    contributors may be used to endorse or promote products derived
                     52:  *    from this software without specific prior written permission.
                     53:  *
                     54:  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
                     55:  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
                     56:  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
                     57:  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
                     58:  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
                     59:  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
                     60:  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
                     61:  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
                     62:  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
                     63:  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
                     64:  * POSSIBILITY OF SUCH DAMAGE.
                     65:  */
                     66:
                     67: /*
                     68:  * Copyright (c) 1994-1998 Mark Brinicombe.
                     69:  * Copyright (c) 1994 Brini.
                     70:  * All rights reserved.
                     71:  *
                     72:  * This code is derived from software written for Brini by Mark Brinicombe
                     73:  *
                     74:  * Redistribution and use in source and binary forms, with or without
                     75:  * modification, are permitted provided that the following conditions
                     76:  * are met:
                     77:  * 1. Redistributions of source code must retain the above copyright
                     78:  *    notice, this list of conditions and the following disclaimer.
                     79:  * 2. Redistributions in binary form must reproduce the above copyright
                     80:  *    notice, this list of conditions and the following disclaimer in the
                     81:  *    documentation and/or other materials provided with the distribution.
                     82:  * 3. All advertising materials mentioning features or use of this software
                     83:  *    must display the following acknowledgement:
                     84:  *     This product includes software developed by Mark Brinicombe.
                     85:  * 4. The name of the author may not be used to endorse or promote products
                     86:  *    derived from this software without specific prior written permission.
                     87:  *
                     88:  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
                     89:  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
                     90:  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
                     91:  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
                     92:  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
                     93:  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
                     94:  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
                     95:  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
                     96:  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
                     97:  *
                     98:  * RiscBSD kernel project
                     99:  *
                    100:  * pmap.c
                    101:  *
                    102:  * Machine dependant vm stuff
                    103:  *
                    104:  * Created      : 20/09/94
                    105:  */
                    106:
                    107: /*
                    108:  * Performance improvements, UVM changes, overhauls and part-rewrites
                    109:  * were contributed by Neil A. Carson <neil@causality.com>.
                    110:  */
                    111:
                    112: /*
                    113:  * The dram block info is currently referenced from the bootconfig.
                    114:  * This should be placed in a separate structure.
                    115:  */
                    116:
                    117: /*
                    118:  * Special compilation symbols
                    119:  * PMAP_DEBUG          - Build in pmap_debug_level code
                    120:  */
                    121:
                    122: /* Include header files */
                    123:
                    124: #include "opt_pmap_debug.h"
                    125: #include "opt_ddb.h"
                    126:
                    127: #include <sys/types.h>
                    128: #include <sys/param.h>
                    129: #include <sys/kernel.h>
                    130: #include <sys/systm.h>
                    131: #include <sys/proc.h>
                    132: #include <sys/malloc.h>
                    133: #include <sys/user.h>
                    134: #include <sys/pool.h>
                    135: #include <sys/cdefs.h>
                    136:
                    137: #include <uvm/uvm.h>
                    138:
                    139: #include <machine/bootconfig.h>
                    140: #include <machine/bus.h>
                    141: #include <machine/pmap.h>
                    142: #include <machine/pcb.h>
                    143: #include <machine/param.h>
1.30.2.3  nathanw   144: #include <arm/arm32/katelib.h>
1.30.2.2  thorpej   145:
1.30.2.8! nathanw   146: __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.30.2.7 2002/06/20 03:38:05 nathanw Exp $");
1.30.2.2  thorpej   147: #ifdef PMAP_DEBUG
                    148: #define        PDEBUG(_lev_,_stat_) \
                    149:        if (pmap_debug_level >= (_lev_)) \
                    150:                ((_stat_))
                    151: int pmap_debug_level = -2;
1.30.2.6  nathanw   152: void pmap_dump_pvlist(vaddr_t phys, char *m);
1.30.2.2  thorpej   153:
                    154: /*
                    155:  * for switching to potentially finer grained debugging
                    156:  */
                    157: #define        PDB_FOLLOW      0x0001
                    158: #define        PDB_INIT        0x0002
                    159: #define        PDB_ENTER       0x0004
                    160: #define        PDB_REMOVE      0x0008
                    161: #define        PDB_CREATE      0x0010
                    162: #define        PDB_PTPAGE      0x0020
1.30.2.6  nathanw   163: #define        PDB_GROWKERN    0x0040
1.30.2.2  thorpej   164: #define        PDB_BITS        0x0080
                    165: #define        PDB_COLLECT     0x0100
                    166: #define        PDB_PROTECT     0x0200
1.30.2.6  nathanw   167: #define        PDB_MAP_L1      0x0400
1.30.2.2  thorpej   168: #define        PDB_BOOTSTRAP   0x1000
                    169: #define        PDB_PARANOIA    0x2000
                    170: #define        PDB_WIRING      0x4000
                    171: #define        PDB_PVDUMP      0x8000
                    172:
                    173: int debugmap = 0;
                    174: int pmapdebug = PDB_PARANOIA | PDB_FOLLOW;
                    175: #define        NPDEBUG(_lev_,_stat_) \
                    176:        if (pmapdebug & (_lev_)) \
                    177:                ((_stat_))
                    178:
                    179: #else  /* PMAP_DEBUG */
                    180: #define        PDEBUG(_lev_,_stat_) /* Nothing */
1.30.2.6  nathanw   181: #define NPDEBUG(_lev_,_stat_) /* Nothing */
1.30.2.2  thorpej   182: #endif /* PMAP_DEBUG */
                    183:
                    184: struct pmap     kernel_pmap_store;
                    185:
                    186: /*
1.30.2.6  nathanw   187:  * linked list of all non-kernel pmaps
                    188:  */
                    189:
                    190: static LIST_HEAD(, pmap) pmaps;
                    191:
                    192: /*
1.30.2.2  thorpej   193:  * pool that pmap structures are allocated from
                    194:  */
                    195:
                    196: struct pool pmap_pmap_pool;
                    197:
1.30.2.6  nathanw   198: static pt_entry_t *csrc_pte, *cdst_pte;
                    199: static vaddr_t csrcp, cdstp;
                    200:
1.30.2.2  thorpej   201: char *memhook;
                    202: extern caddr_t msgbufaddr;
                    203:
                    204: boolean_t pmap_initialized = FALSE;    /* Has pmap_init completed? */
                    205: /*
                    206:  * locking data structures
                    207:  */
                    208:
                    209: static struct lock pmap_main_lock;
                    210: static struct simplelock pvalloc_lock;
1.30.2.6  nathanw   211: static struct simplelock pmaps_lock;
1.30.2.2  thorpej   212: #ifdef LOCKDEBUG
                    213: #define PMAP_MAP_TO_HEAD_LOCK() \
                    214:      (void) spinlockmgr(&pmap_main_lock, LK_SHARED, NULL)
                    215: #define PMAP_MAP_TO_HEAD_UNLOCK() \
                    216:      (void) spinlockmgr(&pmap_main_lock, LK_RELEASE, NULL)
                    217:
                    218: #define PMAP_HEAD_TO_MAP_LOCK() \
                    219:      (void) spinlockmgr(&pmap_main_lock, LK_EXCLUSIVE, NULL)
                    220: #define PMAP_HEAD_TO_MAP_UNLOCK() \
                    221:      (void) spinlockmgr(&pmap_main_lock, LK_RELEASE, NULL)
                    222: #else
                    223: #define        PMAP_MAP_TO_HEAD_LOCK()         /* nothing */
                    224: #define        PMAP_MAP_TO_HEAD_UNLOCK()       /* nothing */
                    225: #define        PMAP_HEAD_TO_MAP_LOCK()         /* nothing */
                    226: #define        PMAP_HEAD_TO_MAP_UNLOCK()       /* nothing */
                    227: #endif /* LOCKDEBUG */
                    228:
                    229: /*
                    230:  * pv_page management structures: locked by pvalloc_lock
                    231:  */
                    232:
                    233: TAILQ_HEAD(pv_pagelist, pv_page);
                    234: static struct pv_pagelist pv_freepages;        /* list of pv_pages with free entrys */
                    235: static struct pv_pagelist pv_unusedpgs; /* list of unused pv_pages */
                    236: static int pv_nfpvents;                        /* # of free pv entries */
                    237: static struct pv_page *pv_initpage;    /* bootstrap page from kernel_map */
                    238: static vaddr_t pv_cachedva;            /* cached VA for later use */
                    239:
                    240: #define PVE_LOWAT (PVE_PER_PVPAGE / 2) /* free pv_entry low water mark */
                    241: #define PVE_HIWAT (PVE_LOWAT + (PVE_PER_PVPAGE * 2))
                    242:                                        /* high water mark */
                    243:
                    244: /*
                    245:  * local prototypes
                    246:  */
                    247:
                    248: static struct pv_entry *pmap_add_pvpage __P((struct pv_page *, boolean_t));
                    249: static struct pv_entry *pmap_alloc_pv __P((struct pmap *, int)); /* see codes below */
                    250: #define ALLOCPV_NEED   0       /* need PV now */
                    251: #define ALLOCPV_TRY    1       /* just try to allocate, don't steal */
                    252: #define ALLOCPV_NONEED 2       /* don't need PV, just growing cache */
                    253: static struct pv_entry *pmap_alloc_pvpage __P((struct pmap *, int));
1.30.2.6  nathanw   254: static void             pmap_enter_pv __P((struct vm_page *,
1.30.2.2  thorpej   255:                                            struct pv_entry *, struct pmap *,
                    256:                                            vaddr_t, struct vm_page *, int));
                    257: static void             pmap_free_pv __P((struct pmap *, struct pv_entry *));
                    258: static void             pmap_free_pvs __P((struct pmap *, struct pv_entry *));
                    259: static void             pmap_free_pv_doit __P((struct pv_entry *));
                    260: static void             pmap_free_pvpage __P((void));
                    261: static boolean_t        pmap_is_curpmap __P((struct pmap *));
1.30.2.6  nathanw   262: static struct pv_entry *pmap_remove_pv __P((struct vm_page *, struct pmap *,
1.30.2.2  thorpej   263:                        vaddr_t));
                    264: #define PMAP_REMOVE_ALL                0       /* remove all mappings */
                    265: #define PMAP_REMOVE_SKIPWIRED  1       /* skip wired mappings */
                    266:
1.30.2.6  nathanw   267: static u_int pmap_modify_pv __P((struct pmap *, vaddr_t, struct vm_page *,
1.30.2.4  nathanw   268:        u_int, u_int));
                    269:
1.30.2.6  nathanw   270: /*
                    271:  * Structure that describes and L1 table.
                    272:  */
                    273: struct l1pt {
                    274:        SIMPLEQ_ENTRY(l1pt)     pt_queue;       /* Queue pointers */
                    275:        struct pglist           pt_plist;       /* Allocated page list */
                    276:        vaddr_t                 pt_va;          /* Allocated virtual address */
                    277:        int                     pt_flags;       /* Flags */
                    278: };
                    279: #define        PTFLAG_STATIC           0x01            /* Statically allocated */
                    280: #define        PTFLAG_KPT              0x02            /* Kernel pt's are mapped */
                    281: #define        PTFLAG_CLEAN            0x04            /* L1 is clean */
                    282:
1.30.2.4  nathanw   283: static void pmap_free_l1pt __P((struct l1pt *));
                    284: static int pmap_allocpagedir __P((struct pmap *));
                    285: static int pmap_clean_page __P((struct pv_entry *, boolean_t));
1.30.2.6  nathanw   286: static void pmap_remove_all __P((struct vm_page *));
1.30.2.4  nathanw   287:
1.30.2.7  nathanw   288: static int pmap_alloc_ptpt(struct pmap *);
                    289: static void pmap_free_ptpt(struct pmap *);
                    290:
1.30.2.6  nathanw   291: static struct vm_page  *pmap_alloc_ptp __P((struct pmap *, vaddr_t));
                    292: static struct vm_page  *pmap_get_ptp __P((struct pmap *, vaddr_t));
                    293: __inline static void pmap_clearbit __P((struct vm_page *, unsigned int));
1.30.2.2  thorpej   294:
                    295: extern paddr_t physical_start;
                    296: extern paddr_t physical_freestart;
                    297: extern paddr_t physical_end;
                    298: extern paddr_t physical_freeend;
                    299: extern unsigned int free_pages;
                    300: extern int max_processes;
                    301:
1.30.2.6  nathanw   302: vaddr_t virtual_avail;
1.30.2.2  thorpej   303: vaddr_t virtual_end;
1.30.2.6  nathanw   304: vaddr_t pmap_curmaxkvaddr;
1.30.2.2  thorpej   305:
                    306: vaddr_t avail_start;
                    307: vaddr_t avail_end;
                    308:
                    309: extern pv_addr_t systempage;
                    310:
                    311: /* Variables used by the L1 page table queue code */
                    312: SIMPLEQ_HEAD(l1pt_queue, l1pt);
1.30.2.6  nathanw   313: static struct l1pt_queue l1pt_static_queue; /* head of our static l1 queue */
                    314: static int l1pt_static_queue_count;        /* items in the static l1 queue */
                    315: static int l1pt_static_create_count;       /* static l1 items created */
                    316: static struct l1pt_queue l1pt_queue;       /* head of our l1 queue */
                    317: static int l1pt_queue_count;               /* items in the l1 queue */
                    318: static int l1pt_create_count;              /* stat - L1's create count */
                    319: static int l1pt_reuse_count;               /* stat - L1's reused count */
1.30.2.2  thorpej   320:
                    321: /* Local function prototypes (not used outside this file) */
                    322: void pmap_pinit __P((struct pmap *));
                    323: void pmap_freepagedir __P((struct pmap *));
                    324:
                    325: /* Other function prototypes */
                    326: extern void bzero_page __P((vaddr_t));
                    327: extern void bcopy_page __P((vaddr_t, vaddr_t));
                    328:
                    329: struct l1pt *pmap_alloc_l1pt __P((void));
                    330: static __inline void pmap_map_in_l1 __P((struct pmap *pmap, vaddr_t va,
                    331:      vaddr_t l2pa, boolean_t));
                    332:
                    333: static pt_entry_t *pmap_map_ptes __P((struct pmap *));
                    334: static void pmap_unmap_ptes __P((struct pmap *));
                    335:
1.30.2.6  nathanw   336: __inline static void pmap_vac_me_harder __P((struct pmap *, struct vm_page *,
1.30.2.2  thorpej   337:     pt_entry_t *, boolean_t));
1.30.2.6  nathanw   338: static void pmap_vac_me_kpmap __P((struct pmap *, struct vm_page *,
1.30.2.2  thorpej   339:     pt_entry_t *, boolean_t));
1.30.2.6  nathanw   340: static void pmap_vac_me_user __P((struct pmap *, struct vm_page *,
1.30.2.2  thorpej   341:     pt_entry_t *, boolean_t));
                    342:
                    343: /*
                    344:  * real definition of pv_entry.
                    345:  */
                    346:
                    347: struct pv_entry {
                    348:        struct pv_entry *pv_next;       /* next pv_entry */
                    349:        struct pmap     *pv_pmap;        /* pmap where mapping lies */
                    350:        vaddr_t         pv_va;          /* virtual address for mapping */
                    351:        int             pv_flags;       /* flags */
                    352:        struct vm_page  *pv_ptp;        /* vm_page for the ptp */
                    353: };
                    354:
                    355: /*
                    356:  * pv_entrys are dynamically allocated in chunks from a single page.
                    357:  * we keep track of how many pv_entrys are in use for each page and
                    358:  * we can free pv_entry pages if needed.  there is one lock for the
                    359:  * entire allocation system.
                    360:  */
                    361:
                    362: struct pv_page_info {
                    363:        TAILQ_ENTRY(pv_page) pvpi_list;
                    364:        struct pv_entry *pvpi_pvfree;
                    365:        int pvpi_nfree;
                    366: };
                    367:
                    368: /*
                    369:  * number of pv_entry's in a pv_page
                    370:  * (note: won't work on systems where NPBG isn't a constant)
                    371:  */
                    372:
                    373: #define PVE_PER_PVPAGE ((NBPG - sizeof(struct pv_page_info)) / \
                    374:                        sizeof(struct pv_entry))
                    375:
                    376: /*
                    377:  * a pv_page: where pv_entrys are allocated from
                    378:  */
                    379:
                    380: struct pv_page {
                    381:        struct pv_page_info pvinfo;
                    382:        struct pv_entry pvents[PVE_PER_PVPAGE];
                    383: };
                    384:
                    385: #ifdef MYCROFT_HACK
                    386: int mycroft_hack = 0;
                    387: #endif
                    388:
                    389: /* Function to set the debug level of the pmap code */
                    390:
                    391: #ifdef PMAP_DEBUG
                    392: void
1.30.2.6  nathanw   393: pmap_debug(int level)
1.30.2.2  thorpej   394: {
                    395:        pmap_debug_level = level;
                    396:        printf("pmap_debug: level=%d\n", pmap_debug_level);
                    397: }
                    398: #endif /* PMAP_DEBUG */
                    399:
                    400: __inline static boolean_t
                    401: pmap_is_curpmap(struct pmap *pmap)
                    402: {
1.30.2.6  nathanw   403:
                    404:        if ((curproc && curproc->l_proc->p_vmspace->vm_map.pmap == pmap) ||
                    405:            pmap == pmap_kernel())
                    406:                return (TRUE);
                    407:
                    408:        return (FALSE);
1.30.2.2  thorpej   409: }
1.30.2.6  nathanw   410:
1.30.2.2  thorpej   411: #include "isadma.h"
                    412:
                    413: #if NISADMA > 0
                    414: /*
                    415:  * Used to protect memory for ISA DMA bounce buffers.  If, when loading
                    416:  * pages into the system, memory intersects with any of these ranges,
                    417:  * the intersecting memory will be loaded into a lower-priority free list.
                    418:  */
                    419: bus_dma_segment_t *pmap_isa_dma_ranges;
                    420: int pmap_isa_dma_nranges;
                    421:
                    422: /*
                    423:  * Check if a memory range intersects with an ISA DMA range, and
                    424:  * return the page-rounded intersection if it does.  The intersection
                    425:  * will be placed on a lower-priority free list.
                    426:  */
1.30.2.6  nathanw   427: static boolean_t
                    428: pmap_isa_dma_range_intersect(paddr_t pa, psize_t size, paddr_t *pap,
                    429:     psize_t *sizep)
1.30.2.2  thorpej   430: {
                    431:        bus_dma_segment_t *ds;
                    432:        int i;
                    433:
                    434:        if (pmap_isa_dma_ranges == NULL)
                    435:                return (FALSE);
                    436:
                    437:        for (i = 0, ds = pmap_isa_dma_ranges;
                    438:             i < pmap_isa_dma_nranges; i++, ds++) {
                    439:                if (ds->ds_addr <= pa && pa < (ds->ds_addr + ds->ds_len)) {
                    440:                        /*
                    441:                         * Beginning of region intersects with this range.
                    442:                         */
                    443:                        *pap = trunc_page(pa);
                    444:                        *sizep = round_page(min(pa + size,
                    445:                            ds->ds_addr + ds->ds_len) - pa);
                    446:                        return (TRUE);
                    447:                }
                    448:                if (pa < ds->ds_addr && ds->ds_addr < (pa + size)) {
                    449:                        /*
                    450:                         * End of region intersects with this range.
                    451:                         */
                    452:                        *pap = trunc_page(ds->ds_addr);
                    453:                        *sizep = round_page(min((pa + size) - ds->ds_addr,
                    454:                            ds->ds_len));
                    455:                        return (TRUE);
                    456:                }
                    457:        }
                    458:
                    459:        /*
                    460:         * No intersection found.
                    461:         */
                    462:        return (FALSE);
                    463: }
                    464: #endif /* NISADMA > 0 */
                    465:
                    466: /*
                    467:  * p v _ e n t r y   f u n c t i o n s
                    468:  */
                    469:
                    470: /*
                    471:  * pv_entry allocation functions:
                    472:  *   the main pv_entry allocation functions are:
                    473:  *     pmap_alloc_pv: allocate a pv_entry structure
                    474:  *     pmap_free_pv: free one pv_entry
                    475:  *     pmap_free_pvs: free a list of pv_entrys
                    476:  *
                    477:  * the rest are helper functions
                    478:  */
                    479:
                    480: /*
                    481:  * pmap_alloc_pv: inline function to allocate a pv_entry structure
                    482:  * => we lock pvalloc_lock
                    483:  * => if we fail, we call out to pmap_alloc_pvpage
                    484:  * => 3 modes:
                    485:  *    ALLOCPV_NEED   = we really need a pv_entry, even if we have to steal it
                    486:  *    ALLOCPV_TRY    = we want a pv_entry, but not enough to steal
                    487:  *    ALLOCPV_NONEED = we are trying to grow our free list, don't really need
                    488:  *                     one now
                    489:  *
                    490:  * "try" is for optional functions like pmap_copy().
                    491:  */
                    492:
                    493: __inline static struct pv_entry *
1.30.2.6  nathanw   494: pmap_alloc_pv(struct pmap *pmap, int mode)
1.30.2.2  thorpej   495: {
                    496:        struct pv_page *pvpage;
                    497:        struct pv_entry *pv;
                    498:
                    499:        simple_lock(&pvalloc_lock);
                    500:
1.30.2.6  nathanw   501:        pvpage = TAILQ_FIRST(&pv_freepages);
                    502:
                    503:        if (pvpage != NULL) {
1.30.2.2  thorpej   504:                pvpage->pvinfo.pvpi_nfree--;
                    505:                if (pvpage->pvinfo.pvpi_nfree == 0) {
                    506:                        /* nothing left in this one? */
                    507:                        TAILQ_REMOVE(&pv_freepages, pvpage, pvinfo.pvpi_list);
                    508:                }
                    509:                pv = pvpage->pvinfo.pvpi_pvfree;
1.30.2.6  nathanw   510:                KASSERT(pv);
1.30.2.2  thorpej   511:                pvpage->pvinfo.pvpi_pvfree = pv->pv_next;
                    512:                pv_nfpvents--;  /* took one from pool */
                    513:        } else {
                    514:                pv = NULL;              /* need more of them */
                    515:        }
                    516:
                    517:        /*
                    518:         * if below low water mark or we didn't get a pv_entry we try and
                    519:         * create more pv_entrys ...
                    520:         */
                    521:
                    522:        if (pv_nfpvents < PVE_LOWAT || pv == NULL) {
                    523:                if (pv == NULL)
                    524:                        pv = pmap_alloc_pvpage(pmap, (mode == ALLOCPV_TRY) ?
                    525:                                               mode : ALLOCPV_NEED);
                    526:                else
                    527:                        (void) pmap_alloc_pvpage(pmap, ALLOCPV_NONEED);
                    528:        }
                    529:
                    530:        simple_unlock(&pvalloc_lock);
                    531:        return(pv);
                    532: }
                    533:
                    534: /*
                    535:  * pmap_alloc_pvpage: maybe allocate a new pvpage
                    536:  *
                    537:  * if need_entry is false: try and allocate a new pv_page
                    538:  * if need_entry is true: try and allocate a new pv_page and return a
                    539:  *     new pv_entry from it.   if we are unable to allocate a pv_page
                    540:  *     we make a last ditch effort to steal a pv_page from some other
                    541:  *     mapping.    if that fails, we panic...
                    542:  *
                    543:  * => we assume that the caller holds pvalloc_lock
                    544:  */
                    545:
                    546: static struct pv_entry *
1.30.2.6  nathanw   547: pmap_alloc_pvpage(struct pmap *pmap, int mode)
1.30.2.2  thorpej   548: {
                    549:        struct vm_page *pg;
                    550:        struct pv_page *pvpage;
                    551:        struct pv_entry *pv;
                    552:        int s;
                    553:
                    554:        /*
                    555:         * if we need_entry and we've got unused pv_pages, allocate from there
                    556:         */
                    557:
1.30.2.6  nathanw   558:        pvpage = TAILQ_FIRST(&pv_unusedpgs);
                    559:        if (mode != ALLOCPV_NONEED && pvpage != NULL) {
1.30.2.2  thorpej   560:
                    561:                /* move it to pv_freepages list */
                    562:                TAILQ_REMOVE(&pv_unusedpgs, pvpage, pvinfo.pvpi_list);
                    563:                TAILQ_INSERT_HEAD(&pv_freepages, pvpage, pvinfo.pvpi_list);
                    564:
                    565:                /* allocate a pv_entry */
                    566:                pvpage->pvinfo.pvpi_nfree--;    /* can't go to zero */
                    567:                pv = pvpage->pvinfo.pvpi_pvfree;
1.30.2.6  nathanw   568:                KASSERT(pv);
1.30.2.2  thorpej   569:                pvpage->pvinfo.pvpi_pvfree = pv->pv_next;
                    570:
                    571:                pv_nfpvents--;  /* took one from pool */
                    572:                return(pv);
                    573:        }
                    574:
                    575:        /*
                    576:         *  see if we've got a cached unmapped VA that we can map a page in.
                    577:         * if not, try to allocate one.
                    578:         */
                    579:
                    580:
                    581:        if (pv_cachedva == 0) {
                    582:                s = splvm();
                    583:                pv_cachedva = uvm_km_kmemalloc(kmem_map, NULL,
                    584:                    PAGE_SIZE, UVM_KMF_TRYLOCK|UVM_KMF_VALLOC);
                    585:                splx(s);
                    586:                if (pv_cachedva == 0) {
                    587:                        return (NULL);
                    588:                }
                    589:        }
                    590:
                    591:        pg = uvm_pagealloc(NULL, pv_cachedva - vm_map_min(kernel_map), NULL,
                    592:            UVM_PGA_USERESERVE);
                    593:
                    594:        if (pg == NULL)
                    595:                return (NULL);
1.30.2.6  nathanw   596:        pg->flags &= ~PG_BUSY;  /* never busy */
1.30.2.2  thorpej   597:
                    598:        /*
                    599:         * add a mapping for our new pv_page and free its entrys (save one!)
                    600:         *
                    601:         * NOTE: If we are allocating a PV page for the kernel pmap, the
                    602:         * pmap is already locked!  (...but entering the mapping is safe...)
                    603:         */
                    604:
1.30.2.6  nathanw   605:        pmap_kenter_pa(pv_cachedva, VM_PAGE_TO_PHYS(pg),
                    606:                VM_PROT_READ|VM_PROT_WRITE);
1.30.2.2  thorpej   607:        pmap_update(pmap_kernel());
                    608:        pvpage = (struct pv_page *) pv_cachedva;
                    609:        pv_cachedva = 0;
                    610:        return (pmap_add_pvpage(pvpage, mode != ALLOCPV_NONEED));
                    611: }
                    612:
                    613: /*
                    614:  * pmap_add_pvpage: add a pv_page's pv_entrys to the free list
                    615:  *
                    616:  * => caller must hold pvalloc_lock
                    617:  * => if need_entry is true, we allocate and return one pv_entry
                    618:  */
                    619:
                    620: static struct pv_entry *
1.30.2.6  nathanw   621: pmap_add_pvpage(struct pv_page *pvp, boolean_t need_entry)
1.30.2.2  thorpej   622: {
                    623:        int tofree, lcv;
                    624:
                    625:        /* do we need to return one? */
                    626:        tofree = (need_entry) ? PVE_PER_PVPAGE - 1 : PVE_PER_PVPAGE;
                    627:
                    628:        pvp->pvinfo.pvpi_pvfree = NULL;
                    629:        pvp->pvinfo.pvpi_nfree = tofree;
                    630:        for (lcv = 0 ; lcv < tofree ; lcv++) {
                    631:                pvp->pvents[lcv].pv_next = pvp->pvinfo.pvpi_pvfree;
                    632:                pvp->pvinfo.pvpi_pvfree = &pvp->pvents[lcv];
                    633:        }
                    634:        if (need_entry)
                    635:                TAILQ_INSERT_TAIL(&pv_freepages, pvp, pvinfo.pvpi_list);
                    636:        else
                    637:                TAILQ_INSERT_TAIL(&pv_unusedpgs, pvp, pvinfo.pvpi_list);
                    638:        pv_nfpvents += tofree;
                    639:        return((need_entry) ? &pvp->pvents[lcv] : NULL);
                    640: }
                    641:
                    642: /*
                    643:  * pmap_free_pv_doit: actually free a pv_entry
                    644:  *
                    645:  * => do not call this directly!  instead use either
                    646:  *    1. pmap_free_pv ==> free a single pv_entry
                    647:  *    2. pmap_free_pvs => free a list of pv_entrys
                    648:  * => we must be holding pvalloc_lock
                    649:  */
                    650:
                    651: __inline static void
1.30.2.6  nathanw   652: pmap_free_pv_doit(struct pv_entry *pv)
1.30.2.2  thorpej   653: {
                    654:        struct pv_page *pvp;
                    655:
                    656:        pvp = (struct pv_page *) arm_trunc_page((vaddr_t)pv);
                    657:        pv_nfpvents++;
                    658:        pvp->pvinfo.pvpi_nfree++;
                    659:
                    660:        /* nfree == 1 => fully allocated page just became partly allocated */
                    661:        if (pvp->pvinfo.pvpi_nfree == 1) {
                    662:                TAILQ_INSERT_HEAD(&pv_freepages, pvp, pvinfo.pvpi_list);
                    663:        }
                    664:
                    665:        /* free it */
                    666:        pv->pv_next = pvp->pvinfo.pvpi_pvfree;
                    667:        pvp->pvinfo.pvpi_pvfree = pv;
                    668:
                    669:        /*
                    670:         * are all pv_page's pv_entry's free?  move it to unused queue.
                    671:         */
                    672:
                    673:        if (pvp->pvinfo.pvpi_nfree == PVE_PER_PVPAGE) {
                    674:                TAILQ_REMOVE(&pv_freepages, pvp, pvinfo.pvpi_list);
                    675:                TAILQ_INSERT_HEAD(&pv_unusedpgs, pvp, pvinfo.pvpi_list);
                    676:        }
                    677: }
                    678:
                    679: /*
                    680:  * pmap_free_pv: free a single pv_entry
                    681:  *
                    682:  * => we gain the pvalloc_lock
                    683:  */
                    684:
                    685: __inline static void
1.30.2.6  nathanw   686: pmap_free_pv(struct pmap *pmap, struct pv_entry *pv)
1.30.2.2  thorpej   687: {
                    688:        simple_lock(&pvalloc_lock);
                    689:        pmap_free_pv_doit(pv);
                    690:
                    691:        /*
                    692:         * Can't free the PV page if the PV entries were associated with
                    693:         * the kernel pmap; the pmap is already locked.
                    694:         */
1.30.2.6  nathanw   695:        if (pv_nfpvents > PVE_HIWAT && TAILQ_FIRST(&pv_unusedpgs) != NULL &&
1.30.2.2  thorpej   696:            pmap != pmap_kernel())
                    697:                pmap_free_pvpage();
                    698:
                    699:        simple_unlock(&pvalloc_lock);
                    700: }
                    701:
                    702: /*
                    703:  * pmap_free_pvs: free a list of pv_entrys
                    704:  *
                    705:  * => we gain the pvalloc_lock
                    706:  */
                    707:
                    708: __inline static void
1.30.2.6  nathanw   709: pmap_free_pvs(struct pmap *pmap, struct pv_entry *pvs)
1.30.2.2  thorpej   710: {
                    711:        struct pv_entry *nextpv;
                    712:
                    713:        simple_lock(&pvalloc_lock);
                    714:
                    715:        for ( /* null */ ; pvs != NULL ; pvs = nextpv) {
                    716:                nextpv = pvs->pv_next;
                    717:                pmap_free_pv_doit(pvs);
                    718:        }
                    719:
                    720:        /*
                    721:         * Can't free the PV page if the PV entries were associated with
                    722:         * the kernel pmap; the pmap is already locked.
                    723:         */
1.30.2.6  nathanw   724:        if (pv_nfpvents > PVE_HIWAT && TAILQ_FIRST(&pv_unusedpgs) != NULL &&
1.30.2.2  thorpej   725:            pmap != pmap_kernel())
                    726:                pmap_free_pvpage();
                    727:
                    728:        simple_unlock(&pvalloc_lock);
                    729: }
                    730:
                    731:
                    732: /*
                    733:  * pmap_free_pvpage: try and free an unused pv_page structure
                    734:  *
                    735:  * => assume caller is holding the pvalloc_lock and that
                    736:  *     there is a page on the pv_unusedpgs list
                    737:  * => if we can't get a lock on the kmem_map we try again later
                    738:  */
                    739:
                    740: static void
1.30.2.6  nathanw   741: pmap_free_pvpage(void)
1.30.2.2  thorpej   742: {
                    743:        int s;
                    744:        struct vm_map *map;
                    745:        struct vm_map_entry *dead_entries;
                    746:        struct pv_page *pvp;
                    747:
                    748:        s = splvm(); /* protect kmem_map */
                    749:
1.30.2.6  nathanw   750:        pvp = TAILQ_FIRST(&pv_unusedpgs);
1.30.2.2  thorpej   751:
                    752:        /*
                    753:         * note: watch out for pv_initpage which is allocated out of
                    754:         * kernel_map rather than kmem_map.
                    755:         */
                    756:        if (pvp == pv_initpage)
                    757:                map = kernel_map;
                    758:        else
                    759:                map = kmem_map;
                    760:        if (vm_map_lock_try(map)) {
                    761:
                    762:                /* remove pvp from pv_unusedpgs */
                    763:                TAILQ_REMOVE(&pv_unusedpgs, pvp, pvinfo.pvpi_list);
                    764:
                    765:                /* unmap the page */
                    766:                dead_entries = NULL;
                    767:                uvm_unmap_remove(map, (vaddr_t)pvp, ((vaddr_t)pvp) + PAGE_SIZE,
                    768:                    &dead_entries);
                    769:                vm_map_unlock(map);
                    770:
                    771:                if (dead_entries != NULL)
                    772:                        uvm_unmap_detach(dead_entries, 0);
                    773:
                    774:                pv_nfpvents -= PVE_PER_PVPAGE;  /* update free count */
                    775:        }
                    776:        if (pvp == pv_initpage)
                    777:                /* no more initpage, we've freed it */
                    778:                pv_initpage = NULL;
                    779:
                    780:        splx(s);
                    781: }
                    782:
                    783: /*
                    784:  * main pv_entry manipulation functions:
1.30.2.6  nathanw   785:  *   pmap_enter_pv: enter a mapping onto a vm_page list
                    786:  *   pmap_remove_pv: remove a mappiing from a vm_page list
1.30.2.2  thorpej   787:  *
                    788:  * NOTE: pmap_enter_pv expects to lock the pvh itself
                    789:  *       pmap_remove_pv expects te caller to lock the pvh before calling
                    790:  */
                    791:
                    792: /*
1.30.2.6  nathanw   793:  * pmap_enter_pv: enter a mapping onto a vm_page lst
1.30.2.2  thorpej   794:  *
                    795:  * => caller should hold the proper lock on pmap_main_lock
                    796:  * => caller should have pmap locked
1.30.2.6  nathanw   797:  * => we will gain the lock on the vm_page and allocate the new pv_entry
1.30.2.2  thorpej   798:  * => caller should adjust ptp's wire_count before calling
                    799:  * => caller should not adjust pmap's wire_count
                    800:  */
                    801:
                    802: __inline static void
1.30.2.6  nathanw   803: pmap_enter_pv(struct vm_page *pg, struct pv_entry *pve, struct pmap *pmap,
                    804:     vaddr_t va, struct vm_page *ptp, int flags)
1.30.2.2  thorpej   805: {
                    806:        pve->pv_pmap = pmap;
                    807:        pve->pv_va = va;
                    808:        pve->pv_ptp = ptp;                      /* NULL for kernel pmap */
                    809:        pve->pv_flags = flags;
1.30.2.6  nathanw   810:        simple_lock(&pg->mdpage.pvh_slock);     /* lock vm_page */
                    811:        pve->pv_next = pg->mdpage.pvh_list;     /* add to ... */
                    812:        pg->mdpage.pvh_list = pve;              /* ... locked list */
                    813:        simple_unlock(&pg->mdpage.pvh_slock);   /* unlock, done! */
1.30.2.7  nathanw   814:        if (pve->pv_flags & PVF_WIRED)
1.30.2.2  thorpej   815:                ++pmap->pm_stats.wired_count;
                    816: }
                    817:
                    818: /*
                    819:  * pmap_remove_pv: try to remove a mapping from a pv_list
                    820:  *
                    821:  * => caller should hold proper lock on pmap_main_lock
                    822:  * => pmap should be locked
1.30.2.6  nathanw   823:  * => caller should hold lock on vm_page [so that attrs can be adjusted]
1.30.2.2  thorpej   824:  * => caller should adjust ptp's wire_count and free PTP if needed
                    825:  * => caller should NOT adjust pmap's wire_count
                    826:  * => we return the removed pve
                    827:  */
                    828:
                    829: __inline static struct pv_entry *
1.30.2.6  nathanw   830: pmap_remove_pv(struct vm_page *pg, struct pmap *pmap, vaddr_t va)
1.30.2.2  thorpej   831: {
                    832:        struct pv_entry *pve, **prevptr;
                    833:
1.30.2.6  nathanw   834:        prevptr = &pg->mdpage.pvh_list;         /* previous pv_entry pointer */
1.30.2.2  thorpej   835:        pve = *prevptr;
                    836:        while (pve) {
                    837:                if (pve->pv_pmap == pmap && pve->pv_va == va) { /* match? */
                    838:                        *prevptr = pve->pv_next;                /* remove it! */
1.30.2.7  nathanw   839:                        if (pve->pv_flags & PVF_WIRED)
1.30.2.2  thorpej   840:                            --pmap->pm_stats.wired_count;
                    841:                        break;
                    842:                }
                    843:                prevptr = &pve->pv_next;                /* previous pointer */
                    844:                pve = pve->pv_next;                     /* advance */
                    845:        }
                    846:        return(pve);                            /* return removed pve */
                    847: }
                    848:
                    849: /*
                    850:  *
                    851:  * pmap_modify_pv: Update pv flags
                    852:  *
1.30.2.6  nathanw   853:  * => caller should hold lock on vm_page [so that attrs can be adjusted]
1.30.2.2  thorpej   854:  * => caller should NOT adjust pmap's wire_count
                    855:  * => caller must call pmap_vac_me_harder() if writable status of a page
                    856:  *    may have changed.
                    857:  * => we return the old flags
                    858:  *
                    859:  * Modify a physical-virtual mapping in the pv table
                    860:  */
                    861:
1.30.2.6  nathanw   862: static /* __inline */ u_int
                    863: pmap_modify_pv(struct pmap *pmap, vaddr_t va, struct vm_page *pg,
                    864:     u_int bic_mask, u_int eor_mask)
1.30.2.2  thorpej   865: {
                    866:        struct pv_entry *npv;
                    867:        u_int flags, oflags;
                    868:
                    869:        /*
                    870:         * There is at least one VA mapping this page.
                    871:         */
                    872:
1.30.2.6  nathanw   873:        for (npv = pg->mdpage.pvh_list; npv; npv = npv->pv_next) {
1.30.2.2  thorpej   874:                if (pmap == npv->pv_pmap && va == npv->pv_va) {
                    875:                        oflags = npv->pv_flags;
                    876:                        npv->pv_flags = flags =
                    877:                            ((oflags & ~bic_mask) ^ eor_mask);
1.30.2.7  nathanw   878:                        if ((flags ^ oflags) & PVF_WIRED) {
                    879:                                if (flags & PVF_WIRED)
1.30.2.2  thorpej   880:                                        ++pmap->pm_stats.wired_count;
                    881:                                else
                    882:                                        --pmap->pm_stats.wired_count;
                    883:                        }
                    884:                        return (oflags);
                    885:                }
                    886:        }
                    887:        return (0);
                    888: }
                    889:
                    890: /*
                    891:  * Map the specified level 2 pagetable into the level 1 page table for
                    892:  * the given pmap to cover a chunk of virtual address space starting from the
                    893:  * address specified.
                    894:  */
1.30.2.6  nathanw   895: static __inline void
                    896: pmap_map_in_l1(struct pmap *pmap, vaddr_t va, paddr_t l2pa, boolean_t selfref)
1.30.2.2  thorpej   897: {
                    898:        vaddr_t ptva;
                    899:
                    900:        /* Calculate the index into the L1 page table. */
1.30.2.7  nathanw   901:        ptva = (va >> L1_S_SHIFT) & ~3;
1.30.2.2  thorpej   902:
                    903:        /* Map page table into the L1. */
1.30.2.7  nathanw   904:        pmap->pm_pdir[ptva + 0] = L1_C_PROTO | (l2pa + 0x000);
                    905:        pmap->pm_pdir[ptva + 1] = L1_C_PROTO | (l2pa + 0x400);
                    906:        pmap->pm_pdir[ptva + 2] = L1_C_PROTO | (l2pa + 0x800);
                    907:        pmap->pm_pdir[ptva + 3] = L1_C_PROTO | (l2pa + 0xc00);
1.30.2.2  thorpej   908:
                    909:        /* Map the page table into the page table area. */
1.30.2.6  nathanw   910:        if (selfref)
1.30.2.7  nathanw   911:                *((pt_entry_t *)(pmap->pm_vptpt + ptva)) = L2_S_PROTO | l2pa |
                    912:                    L2_S_PROT(PTE_KERNEL, VM_PROT_READ|VM_PROT_WRITE);
1.30.2.2  thorpej   913: }
                    914:
                    915: #if 0
1.30.2.6  nathanw   916: static __inline void
                    917: pmap_unmap_in_l1(struct pmap *pmap, vaddr_t va)
1.30.2.2  thorpej   918: {
                    919:        vaddr_t ptva;
                    920:
                    921:        /* Calculate the index into the L1 page table. */
1.30.2.7  nathanw   922:        ptva = (va >> L1_S_SHIFT) & ~3;
1.30.2.2  thorpej   923:
                    924:        /* Unmap page table from the L1. */
                    925:        pmap->pm_pdir[ptva + 0] = 0;
                    926:        pmap->pm_pdir[ptva + 1] = 0;
                    927:        pmap->pm_pdir[ptva + 2] = 0;
                    928:        pmap->pm_pdir[ptva + 3] = 0;
                    929:
                    930:        /* Unmap the page table from the page table area. */
                    931:        *((pt_entry_t *)(pmap->pm_vptpt + ptva)) = 0;
                    932: }
                    933: #endif
                    934:
                    935: /*
                    936:  *     Used to map a range of physical addresses into kernel
                    937:  *     virtual address space.
                    938:  *
                    939:  *     For now, VM is already on, we only need to map the
                    940:  *     specified memory.
                    941:  */
                    942: vaddr_t
1.30.2.6  nathanw   943: pmap_map(vaddr_t va, paddr_t spa, paddr_t epa, vm_prot_t prot)
1.30.2.2  thorpej   944: {
                    945:        while (spa < epa) {
                    946:                pmap_kenter_pa(va, spa, prot);
                    947:                va += NBPG;
                    948:                spa += NBPG;
                    949:        }
                    950:        pmap_update(pmap_kernel());
                    951:        return(va);
                    952: }
                    953:
                    954:
                    955: /*
                    956:  * void pmap_bootstrap(pd_entry_t *kernel_l1pt, pv_addr_t kernel_ptpt)
                    957:  *
                    958:  * bootstrap the pmap system. This is called from initarm and allows
                    959:  * the pmap system to initailise any structures it requires.
                    960:  *
                    961:  * Currently this sets up the kernel_pmap that is statically allocated
                    962:  * and also allocated virtual addresses for certain page hooks.
                    963:  * Currently the only one page hook is allocated that is used
                    964:  * to zero physical pages of memory.
                    965:  * It also initialises the start and end address of the kernel data space.
                    966:  */
                    967: extern paddr_t physical_freestart;
                    968: extern paddr_t physical_freeend;
                    969:
                    970: char *boot_head;
                    971:
                    972: void
1.30.2.6  nathanw   973: pmap_bootstrap(pd_entry_t *kernel_l1pt, pv_addr_t kernel_ptpt)
1.30.2.2  thorpej   974: {
1.30.2.6  nathanw   975:        pt_entry_t *pte;
1.30.2.2  thorpej   976:        int loop;
                    977:        paddr_t start, end;
                    978: #if NISADMA > 0
                    979:        paddr_t istart;
                    980:        psize_t isize;
                    981: #endif
                    982:
                    983:        pmap_kernel()->pm_pdir = kernel_l1pt;
                    984:        pmap_kernel()->pm_pptpt = kernel_ptpt.pv_pa;
                    985:        pmap_kernel()->pm_vptpt = kernel_ptpt.pv_va;
                    986:        simple_lock_init(&pmap_kernel()->pm_lock);
                    987:        pmap_kernel()->pm_obj.pgops = NULL;
                    988:        TAILQ_INIT(&(pmap_kernel()->pm_obj.memq));
                    989:        pmap_kernel()->pm_obj.uo_npages = 0;
                    990:        pmap_kernel()->pm_obj.uo_refs = 1;
                    991:
                    992:        /*
                    993:         * Initialize PAGE_SIZE-dependent variables.
                    994:         */
                    995:        uvm_setpagesize();
                    996:
                    997:        loop = 0;
                    998:        while (loop < bootconfig.dramblocks) {
                    999:                start = (paddr_t)bootconfig.dram[loop].address;
                   1000:                end = start + (bootconfig.dram[loop].pages * NBPG);
                   1001:                if (start < physical_freestart)
                   1002:                        start = physical_freestart;
                   1003:                if (end > physical_freeend)
                   1004:                        end = physical_freeend;
                   1005: #if 0
                   1006:                printf("%d: %lx -> %lx\n", loop, start, end - 1);
                   1007: #endif
                   1008: #if NISADMA > 0
                   1009:                if (pmap_isa_dma_range_intersect(start, end - start,
                   1010:                    &istart, &isize)) {
                   1011:                        /*
                   1012:                         * Place the pages that intersect with the
                   1013:                         * ISA DMA range onto the ISA DMA free list.
                   1014:                         */
                   1015: #if 0
                   1016:                        printf("    ISADMA 0x%lx -> 0x%lx\n", istart,
                   1017:                            istart + isize - 1);
                   1018: #endif
                   1019:                        uvm_page_physload(atop(istart),
                   1020:                            atop(istart + isize), atop(istart),
                   1021:                            atop(istart + isize), VM_FREELIST_ISADMA);
1.30.2.6  nathanw  1022:
1.30.2.2  thorpej  1023:                        /*
                   1024:                         * Load the pieces that come before
                   1025:                         * the intersection into the default
                   1026:                         * free list.
                   1027:                         */
                   1028:                        if (start < istart) {
                   1029: #if 0
                   1030:                                printf("    BEFORE 0x%lx -> 0x%lx\n",
                   1031:                                    start, istart - 1);
                   1032: #endif
                   1033:                                uvm_page_physload(atop(start),
                   1034:                                    atop(istart), atop(start),
                   1035:                                    atop(istart), VM_FREELIST_DEFAULT);
                   1036:                        }
                   1037:
                   1038:                        /*
                   1039:                         * Load the pieces that come after
                   1040:                         * the intersection into the default
                   1041:                         * free list.
                   1042:                         */
                   1043:                        if ((istart + isize) < end) {
                   1044: #if 0
                   1045:                                printf("     AFTER 0x%lx -> 0x%lx\n",
                   1046:                                    (istart + isize), end - 1);
                   1047: #endif
                   1048:                                uvm_page_physload(atop(istart + isize),
                   1049:                                    atop(end), atop(istart + isize),
                   1050:                                    atop(end), VM_FREELIST_DEFAULT);
                   1051:                        }
                   1052:                } else {
                   1053:                        uvm_page_physload(atop(start), atop(end),
                   1054:                            atop(start), atop(end), VM_FREELIST_DEFAULT);
                   1055:                }
                   1056: #else  /* NISADMA > 0 */
                   1057:                uvm_page_physload(atop(start), atop(end),
                   1058:                    atop(start), atop(end), VM_FREELIST_DEFAULT);
                   1059: #endif /* NISADMA > 0 */
                   1060:                ++loop;
                   1061:        }
                   1062:
1.30.2.6  nathanw  1063:        virtual_avail = KERNEL_VM_BASE;
                   1064:        virtual_end = KERNEL_VM_BASE + KERNEL_VM_SIZE;
1.30.2.2  thorpej  1065:
                   1066:        /*
1.30.2.6  nathanw  1067:         * now we allocate the "special" VAs which are used for tmp mappings
                   1068:         * by the pmap (and other modules).  we allocate the VAs by advancing
                   1069:         * virtual_avail (note that there are no pages mapped at these VAs).
                   1070:         * we find the PTE that maps the allocated VA via the linear PTE
                   1071:         * mapping.
1.30.2.2  thorpej  1072:         */
                   1073:
1.30.2.6  nathanw  1074:        pte = ((pt_entry_t *) PTE_BASE) + atop(virtual_avail);
                   1075:
                   1076:        csrcp = virtual_avail; csrc_pte = pte;
                   1077:        virtual_avail += PAGE_SIZE; pte++;
                   1078:
                   1079:        cdstp = virtual_avail; cdst_pte = pte;
                   1080:        virtual_avail += PAGE_SIZE; pte++;
                   1081:
                   1082:        memhook = (char *) virtual_avail;       /* don't need pte */
                   1083:        virtual_avail += PAGE_SIZE; pte++;
                   1084:
                   1085:        msgbufaddr = (caddr_t) virtual_avail;   /* don't need pte */
                   1086:        virtual_avail += round_page(MSGBUFSIZE);
                   1087:        pte += atop(round_page(MSGBUFSIZE));
1.30.2.2  thorpej  1088:
                   1089:        /*
                   1090:         * init the static-global locks and global lists.
                   1091:         */
                   1092:        spinlockinit(&pmap_main_lock, "pmaplk", 0);
                   1093:        simple_lock_init(&pvalloc_lock);
1.30.2.6  nathanw  1094:        simple_lock_init(&pmaps_lock);
                   1095:        LIST_INIT(&pmaps);
1.30.2.2  thorpej  1096:        TAILQ_INIT(&pv_freepages);
                   1097:        TAILQ_INIT(&pv_unusedpgs);
                   1098:
                   1099:        /*
                   1100:         * initialize the pmap pool.
                   1101:         */
                   1102:
                   1103:        pool_init(&pmap_pmap_pool, sizeof(struct pmap), 0, 0, 0, "pmappl",
1.30.2.6  nathanw  1104:                  &pool_allocator_nointr);
1.30.2.2  thorpej  1105:
1.30.2.5  nathanw  1106:        cpu_dcache_wbinv_all();
1.30.2.2  thorpej  1107: }
                   1108:
                   1109: /*
                   1110:  * void pmap_init(void)
                   1111:  *
                   1112:  * Initialize the pmap module.
                   1113:  * Called by vm_init() in vm/vm_init.c in order to initialise
                   1114:  * any structures that the pmap system needs to map virtual memory.
                   1115:  */
                   1116:
                   1117: extern int physmem;
                   1118:
                   1119: void
1.30.2.6  nathanw  1120: pmap_init(void)
1.30.2.2  thorpej  1121: {
                   1122:
                   1123:        /*
                   1124:         * Set the available memory vars - These do not map to real memory
                   1125:         * addresses and cannot as the physical memory is fragmented.
                   1126:         * They are used by ps for %mem calculations.
                   1127:         * One could argue whether this should be the entire memory or just
                   1128:         * the memory that is useable in a user process.
                   1129:         */
                   1130:        avail_start = 0;
                   1131:        avail_end = physmem * NBPG;
                   1132:
                   1133:        /*
                   1134:         * now we need to free enough pv_entry structures to allow us to get
                   1135:         * the kmem_map/kmem_object allocated and inited (done after this
                   1136:         * function is finished).  to do this we allocate one bootstrap page out
                   1137:         * of kernel_map and use it to provide an initial pool of pv_entry
                   1138:         * structures.   we never free this page.
                   1139:         */
                   1140:
                   1141:        pv_initpage = (struct pv_page *) uvm_km_alloc(kernel_map, PAGE_SIZE);
                   1142:        if (pv_initpage == NULL)
                   1143:                panic("pmap_init: pv_initpage");
                   1144:        pv_cachedva = 0;   /* a VA we have allocated but not used yet */
                   1145:        pv_nfpvents = 0;
                   1146:        (void) pmap_add_pvpage(pv_initpage, FALSE);
                   1147:
                   1148:        pmap_initialized = TRUE;
                   1149:
                   1150:        /* Initialise our L1 page table queues and counters */
                   1151:        SIMPLEQ_INIT(&l1pt_static_queue);
                   1152:        l1pt_static_queue_count = 0;
                   1153:        l1pt_static_create_count = 0;
                   1154:        SIMPLEQ_INIT(&l1pt_queue);
                   1155:        l1pt_queue_count = 0;
                   1156:        l1pt_create_count = 0;
                   1157:        l1pt_reuse_count = 0;
                   1158: }
                   1159:
                   1160: /*
                   1161:  * pmap_postinit()
                   1162:  *
                   1163:  * This routine is called after the vm and kmem subsystems have been
                   1164:  * initialised. This allows the pmap code to perform any initialisation
                   1165:  * that can only be done one the memory allocation is in place.
                   1166:  */
                   1167:
                   1168: void
1.30.2.6  nathanw  1169: pmap_postinit(void)
1.30.2.2  thorpej  1170: {
                   1171:        int loop;
                   1172:        struct l1pt *pt;
                   1173:
                   1174: #ifdef PMAP_STATIC_L1S
                   1175:        for (loop = 0; loop < PMAP_STATIC_L1S; ++loop) {
                   1176: #else  /* PMAP_STATIC_L1S */
                   1177:        for (loop = 0; loop < max_processes; ++loop) {
                   1178: #endif /* PMAP_STATIC_L1S */
                   1179:                /* Allocate a L1 page table */
                   1180:                pt = pmap_alloc_l1pt();
                   1181:                if (!pt)
                   1182:                        panic("Cannot allocate static L1 page tables\n");
                   1183:
                   1184:                /* Clean it */
1.30.2.7  nathanw  1185:                bzero((void *)pt->pt_va, L1_TABLE_SIZE);
1.30.2.2  thorpej  1186:                pt->pt_flags |= (PTFLAG_STATIC | PTFLAG_CLEAN);
                   1187:                /* Add the page table to the queue */
                   1188:                SIMPLEQ_INSERT_TAIL(&l1pt_static_queue, pt, pt_queue);
                   1189:                ++l1pt_static_queue_count;
                   1190:                ++l1pt_static_create_count;
                   1191:        }
                   1192: }
                   1193:
                   1194:
                   1195: /*
                   1196:  * Create and return a physical map.
                   1197:  *
                   1198:  * If the size specified for the map is zero, the map is an actual physical
                   1199:  * map, and may be referenced by the hardware.
                   1200:  *
                   1201:  * If the size specified is non-zero, the map will be used in software only,
                   1202:  * and is bounded by that size.
                   1203:  */
                   1204:
                   1205: pmap_t
1.30.2.6  nathanw  1206: pmap_create(void)
1.30.2.2  thorpej  1207: {
                   1208:        struct pmap *pmap;
                   1209:
                   1210:        /*
                   1211:         * Fetch pmap entry from the pool
                   1212:         */
                   1213:
                   1214:        pmap = pool_get(&pmap_pmap_pool, PR_WAITOK);
                   1215:        /* XXX is this really needed! */
                   1216:        memset(pmap, 0, sizeof(*pmap));
                   1217:
                   1218:        simple_lock_init(&pmap->pm_obj.vmobjlock);
                   1219:        pmap->pm_obj.pgops = NULL;      /* currently not a mappable object */
                   1220:        TAILQ_INIT(&pmap->pm_obj.memq);
                   1221:        pmap->pm_obj.uo_npages = 0;
                   1222:        pmap->pm_obj.uo_refs = 1;
                   1223:        pmap->pm_stats.wired_count = 0;
                   1224:        pmap->pm_stats.resident_count = 1;
1.30.2.6  nathanw  1225:        pmap->pm_ptphint = NULL;
1.30.2.2  thorpej  1226:
                   1227:        /* Now init the machine part of the pmap */
                   1228:        pmap_pinit(pmap);
                   1229:        return(pmap);
                   1230: }
                   1231:
                   1232: /*
                   1233:  * pmap_alloc_l1pt()
                   1234:  *
                   1235:  * This routine allocates physical and virtual memory for a L1 page table
                   1236:  * and wires it.
                   1237:  * A l1pt structure is returned to describe the allocated page table.
                   1238:  *
                   1239:  * This routine is allowed to fail if the required memory cannot be allocated.
                   1240:  * In this case NULL is returned.
                   1241:  */
                   1242:
                   1243: struct l1pt *
                   1244: pmap_alloc_l1pt(void)
                   1245: {
                   1246:        paddr_t pa;
                   1247:        vaddr_t va;
                   1248:        struct l1pt *pt;
                   1249:        int error;
                   1250:        struct vm_page *m;
1.30.2.7  nathanw  1251:        pt_entry_t *pte;
1.30.2.2  thorpej  1252:
                   1253:        /* Allocate virtual address space for the L1 page table */
1.30.2.7  nathanw  1254:        va = uvm_km_valloc(kernel_map, L1_TABLE_SIZE);
1.30.2.2  thorpej  1255:        if (va == 0) {
                   1256: #ifdef DIAGNOSTIC
                   1257:                PDEBUG(0,
                   1258:                    printf("pmap: Cannot allocate pageable memory for L1\n"));
                   1259: #endif /* DIAGNOSTIC */
                   1260:                return(NULL);
                   1261:        }
                   1262:
                   1263:        /* Allocate memory for the l1pt structure */
                   1264:        pt = (struct l1pt *)malloc(sizeof(struct l1pt), M_VMPMAP, M_WAITOK);
                   1265:
                   1266:        /*
                   1267:         * Allocate pages from the VM system.
                   1268:         */
1.30.2.7  nathanw  1269:        error = uvm_pglistalloc(L1_TABLE_SIZE, physical_start, physical_end,
                   1270:            L1_TABLE_SIZE, 0, &pt->pt_plist, 1, M_WAITOK);
1.30.2.2  thorpej  1271:        if (error) {
                   1272: #ifdef DIAGNOSTIC
                   1273:                PDEBUG(0,
                   1274:                    printf("pmap: Cannot allocate physical mem for L1 (%d)\n",
                   1275:                    error));
                   1276: #endif /* DIAGNOSTIC */
                   1277:                /* Release the resources we already have claimed */
                   1278:                free(pt, M_VMPMAP);
1.30.2.7  nathanw  1279:                uvm_km_free(kernel_map, va, L1_TABLE_SIZE);
1.30.2.2  thorpej  1280:                return(NULL);
                   1281:        }
                   1282:
                   1283:        /* Map our physical pages into our virtual space */
                   1284:        pt->pt_va = va;
1.30.2.6  nathanw  1285:        m = TAILQ_FIRST(&pt->pt_plist);
1.30.2.7  nathanw  1286:        while (m && va < (pt->pt_va + L1_TABLE_SIZE)) {
1.30.2.2  thorpej  1287:                pa = VM_PAGE_TO_PHYS(m);
                   1288:
1.30.2.7  nathanw  1289:                pte = vtopte(va);
                   1290:
                   1291:                /*
                   1292:                 * Assert that the PTE is invalid.  If it's invalid,
                   1293:                 * then we are guaranteed that there won't be an entry
                   1294:                 * for this VA in the TLB.
                   1295:                 */
                   1296:                KDASSERT(pmap_pte_v(pte) == 0);
1.30.2.2  thorpej  1297:
1.30.2.7  nathanw  1298:                *pte = L2_S_PROTO | VM_PAGE_TO_PHYS(m) |
                   1299:                    L2_S_PROT(PTE_KERNEL, VM_PROT_READ|VM_PROT_WRITE);
1.30.2.2  thorpej  1300:
                   1301:                va += NBPG;
                   1302:                m = m->pageq.tqe_next;
                   1303:        }
                   1304:
                   1305: #ifdef DIAGNOSTIC
                   1306:        if (m)
                   1307:                panic("pmap_alloc_l1pt: pglist not empty\n");
                   1308: #endif /* DIAGNOSTIC */
                   1309:
                   1310:        pt->pt_flags = 0;
                   1311:        return(pt);
                   1312: }
                   1313:
                   1314: /*
                   1315:  * Free a L1 page table previously allocated with pmap_alloc_l1pt().
                   1316:  */
1.30.2.4  nathanw  1317: static void
1.30.2.6  nathanw  1318: pmap_free_l1pt(struct l1pt *pt)
1.30.2.2  thorpej  1319: {
                   1320:        /* Separate the physical memory for the virtual space */
1.30.2.7  nathanw  1321:        pmap_kremove(pt->pt_va, L1_TABLE_SIZE);
1.30.2.2  thorpej  1322:        pmap_update(pmap_kernel());
                   1323:
                   1324:        /* Return the physical memory */
                   1325:        uvm_pglistfree(&pt->pt_plist);
                   1326:
                   1327:        /* Free the virtual space */
1.30.2.7  nathanw  1328:        uvm_km_free(kernel_map, pt->pt_va, L1_TABLE_SIZE);
1.30.2.2  thorpej  1329:
                   1330:        /* Free the l1pt structure */
                   1331:        free(pt, M_VMPMAP);
                   1332: }
                   1333:
                   1334: /*
1.30.2.7  nathanw  1335:  * pmap_alloc_ptpt:
                   1336:  *
                   1337:  *     Allocate the page table that maps the PTE array.
                   1338:  */
                   1339: static int
                   1340: pmap_alloc_ptpt(struct pmap *pmap)
                   1341: {
                   1342:        struct vm_page *pg;
                   1343:        pt_entry_t *pte;
                   1344:
                   1345:        KASSERT(pmap->pm_vptpt == 0);
                   1346:
                   1347:        pmap->pm_vptpt = uvm_km_valloc(kernel_map, L2_TABLE_SIZE);
                   1348:        if (pmap->pm_vptpt == 0) {
                   1349:                PDEBUG(0,
                   1350:                    printf("pmap_alloc_ptpt: no KVA for PTPT\n"));
                   1351:                return (ENOMEM);
                   1352:        }
                   1353:
                   1354:        for (;;) {
                   1355:                pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_ZERO);
                   1356:                if (pg != NULL)
                   1357:                        break;
                   1358:                uvm_wait("pmap_ptpt");
                   1359:        }
                   1360:
                   1361:        pmap->pm_pptpt = VM_PAGE_TO_PHYS(pg);
                   1362:
                   1363:        pte = vtopte(pmap->pm_vptpt);
                   1364:
                   1365:        KDASSERT(pmap_pte_v(pte) == 0);
                   1366:
                   1367:        *pte = L2_S_PROTO | pmap->pm_pptpt |
                   1368:            L2_S_PROT(PTE_KERNEL, VM_PROT_READ|VM_PROT_WRITE);
                   1369:
                   1370:        return (0);
                   1371: }
                   1372:
                   1373: /*
                   1374:  * pmap_free_ptpt:
                   1375:  *
                   1376:  *     Free the page table that maps the PTE array.
                   1377:  */
                   1378: static void
                   1379: pmap_free_ptpt(struct pmap *pmap)
                   1380: {
                   1381:
                   1382:        pmap_kremove(pmap->pm_vptpt, L2_TABLE_SIZE);
                   1383:        pmap_update(pmap_kernel());
                   1384:
                   1385:        uvm_pagefree(PHYS_TO_VM_PAGE(pmap->pm_pptpt));
                   1386:
                   1387:        uvm_km_free(kernel_map, pmap->pm_vptpt, L2_TABLE_SIZE);
                   1388: }
                   1389:
                   1390: /*
1.30.2.2  thorpej  1391:  * Allocate a page directory.
                   1392:  * This routine will either allocate a new page directory from the pool
                   1393:  * of L1 page tables currently held by the kernel or it will allocate
                   1394:  * a new one via pmap_alloc_l1pt().
                   1395:  * It will then initialise the l1 page table for use.
                   1396:  */
1.30.2.4  nathanw  1397: static int
1.30.2.6  nathanw  1398: pmap_allocpagedir(struct pmap *pmap)
1.30.2.2  thorpej  1399: {
                   1400:        paddr_t pa;
                   1401:        struct l1pt *pt;
1.30.2.7  nathanw  1402:        int error;
1.30.2.2  thorpej  1403:
                   1404:        PDEBUG(0, printf("pmap_allocpagedir(%p)\n", pmap));
                   1405:
                   1406:        /* Do we have any spare L1's lying around ? */
                   1407:        if (l1pt_static_queue_count) {
                   1408:                --l1pt_static_queue_count;
1.30.2.8! nathanw  1409:                pt = SIMPLEQ_FIRST(&l1pt_static_queue);
        !          1410:                SIMPLEQ_REMOVE_HEAD(&l1pt_static_queue, pt_queue);
1.30.2.2  thorpej  1411:        } else if (l1pt_queue_count) {
                   1412:                --l1pt_queue_count;
1.30.2.8! nathanw  1413:                pt = SIMPLEQ_FIRST(&l1pt_queue);
        !          1414:                SIMPLEQ_REMOVE_HEAD(&l1pt_queue, pt_queue);
1.30.2.2  thorpej  1415:                ++l1pt_reuse_count;
                   1416:        } else {
                   1417:                pt = pmap_alloc_l1pt();
                   1418:                if (!pt)
                   1419:                        return(ENOMEM);
                   1420:                ++l1pt_create_count;
                   1421:        }
                   1422:
                   1423:        /* Store the pointer to the l1 descriptor in the pmap. */
                   1424:        pmap->pm_l1pt = pt;
                   1425:
                   1426:        /* Get the physical address of the start of the l1 */
1.30.2.6  nathanw  1427:        pa = VM_PAGE_TO_PHYS(TAILQ_FIRST(&pt->pt_plist));
1.30.2.2  thorpej  1428:
                   1429:        /* Store the virtual address of the l1 in the pmap. */
                   1430:        pmap->pm_pdir = (pd_entry_t *)pt->pt_va;
                   1431:
                   1432:        /* Clean the L1 if it is dirty */
                   1433:        if (!(pt->pt_flags & PTFLAG_CLEAN))
1.30.2.7  nathanw  1434:                bzero((void *)pmap->pm_pdir, (L1_TABLE_SIZE - KERNEL_PD_SIZE));
1.30.2.2  thorpej  1435:
                   1436:        /* Allocate a page table to map all the page tables for this pmap */
1.30.2.7  nathanw  1437:        if ((error = pmap_alloc_ptpt(pmap)) != 0) {
                   1438:                pmap_freepagedir(pmap);
                   1439:                return (error);
1.30.2.2  thorpej  1440:        }
                   1441:
1.30.2.7  nathanw  1442:        /* need to lock this all up for growkernel */
1.30.2.6  nathanw  1443:        simple_lock(&pmaps_lock);
                   1444:
                   1445:        /* Duplicate the kernel mappings. */
1.30.2.7  nathanw  1446:        bcopy((char *)pmap_kernel()->pm_pdir + (L1_TABLE_SIZE - KERNEL_PD_SIZE),
                   1447:                (char *)pmap->pm_pdir + (L1_TABLE_SIZE - KERNEL_PD_SIZE),
1.30.2.6  nathanw  1448:                KERNEL_PD_SIZE);
                   1449:
1.30.2.2  thorpej  1450:        /* Wire in this page table */
1.30.2.6  nathanw  1451:        pmap_map_in_l1(pmap, PTE_BASE, pmap->pm_pptpt, TRUE);
1.30.2.2  thorpej  1452:
                   1453:        pt->pt_flags &= ~PTFLAG_CLEAN;  /* L1 is dirty now */
1.30.2.6  nathanw  1454:
1.30.2.2  thorpej  1455:        /*
1.30.2.6  nathanw  1456:         * Map the kernel page tables into the new PT map.
1.30.2.2  thorpej  1457:         */
1.30.2.6  nathanw  1458:        bcopy((char *)(PTE_BASE
                   1459:            + (PTE_BASE >> (PGSHIFT - 2))
1.30.2.7  nathanw  1460:            + ((L1_TABLE_SIZE - KERNEL_PD_SIZE) >> 2)),
                   1461:            (char *)pmap->pm_vptpt + ((L1_TABLE_SIZE - KERNEL_PD_SIZE) >> 2),
1.30.2.2  thorpej  1462:            (KERNEL_PD_SIZE >> 2));
                   1463:
1.30.2.6  nathanw  1464:        LIST_INSERT_HEAD(&pmaps, pmap, pm_list);
                   1465:        simple_unlock(&pmaps_lock);
                   1466:
1.30.2.2  thorpej  1467:        return(0);
                   1468: }
                   1469:
                   1470:
                   1471: /*
                   1472:  * Initialize a preallocated and zeroed pmap structure,
                   1473:  * such as one in a vmspace structure.
                   1474:  */
                   1475:
                   1476: void
1.30.2.6  nathanw  1477: pmap_pinit(struct pmap *pmap)
1.30.2.2  thorpej  1478: {
                   1479:        int backoff = 6;
                   1480:        int retry = 10;
                   1481:
                   1482:        PDEBUG(0, printf("pmap_pinit(%p)\n", pmap));
                   1483:
                   1484:        /* Keep looping until we succeed in allocating a page directory */
                   1485:        while (pmap_allocpagedir(pmap) != 0) {
                   1486:                /*
                   1487:                 * Ok we failed to allocate a suitable block of memory for an
                   1488:                 * L1 page table. This means that either:
                   1489:                 * 1. 16KB of virtual address space could not be allocated
                   1490:                 * 2. 16KB of physically contiguous memory on a 16KB boundary
                   1491:                 *    could not be allocated.
                   1492:                 *
                   1493:                 * Since we cannot fail we will sleep for a while and try
                   1494:                 * again.
                   1495:                 *
                   1496:                 * Searching for a suitable L1 PT is expensive:
                   1497:                 * to avoid hogging the system when memory is really
                   1498:                 * scarce, use an exponential back-off so that
                   1499:                 * eventually we won't retry more than once every 8
                   1500:                 * seconds.  This should allow other processes to run
                   1501:                 * to completion and free up resources.
                   1502:                 */
                   1503:                (void) ltsleep(&lbolt, PVM, "l1ptwait", (hz << 3) >> backoff,
                   1504:                    NULL);
                   1505:                if (--retry == 0) {
                   1506:                        retry = 10;
                   1507:                        if (backoff)
                   1508:                                --backoff;
                   1509:                }
                   1510:        }
                   1511:
1.30.2.7  nathanw  1512:        if (vector_page < KERNEL_BASE) {
                   1513:                /*
                   1514:                 * Map the vector page.  This will also allocate and map
                   1515:                 * an L2 table for it.
                   1516:                 */
                   1517:                pmap_enter(pmap, vector_page, systempage.pv_pa,
                   1518:                    VM_PROT_READ, VM_PROT_READ | PMAP_WIRED);
                   1519:                pmap_update(pmap);
                   1520:        }
1.30.2.2  thorpej  1521: }
                   1522:
                   1523:
                   1524: void
1.30.2.6  nathanw  1525: pmap_freepagedir(struct pmap *pmap)
1.30.2.2  thorpej  1526: {
                   1527:        /* Free the memory used for the page table mapping */
                   1528:        if (pmap->pm_vptpt != 0)
1.30.2.7  nathanw  1529:                pmap_free_ptpt(pmap);
1.30.2.2  thorpej  1530:
                   1531:        /* junk the L1 page table */
                   1532:        if (pmap->pm_l1pt->pt_flags & PTFLAG_STATIC) {
                   1533:                /* Add the page table to the queue */
                   1534:                SIMPLEQ_INSERT_TAIL(&l1pt_static_queue, pmap->pm_l1pt, pt_queue);
                   1535:                ++l1pt_static_queue_count;
                   1536:        } else if (l1pt_queue_count < 8) {
                   1537:                /* Add the page table to the queue */
                   1538:                SIMPLEQ_INSERT_TAIL(&l1pt_queue, pmap->pm_l1pt, pt_queue);
                   1539:                ++l1pt_queue_count;
                   1540:        } else
                   1541:                pmap_free_l1pt(pmap->pm_l1pt);
                   1542: }
                   1543:
                   1544:
                   1545: /*
                   1546:  * Retire the given physical map from service.
                   1547:  * Should only be called if the map contains no valid mappings.
                   1548:  */
                   1549:
                   1550: void
1.30.2.6  nathanw  1551: pmap_destroy(struct pmap *pmap)
1.30.2.2  thorpej  1552: {
                   1553:        struct vm_page *page;
                   1554:        int count;
                   1555:
                   1556:        if (pmap == NULL)
                   1557:                return;
                   1558:
                   1559:        PDEBUG(0, printf("pmap_destroy(%p)\n", pmap));
                   1560:
                   1561:        /*
                   1562:         * Drop reference count
                   1563:         */
                   1564:        simple_lock(&pmap->pm_obj.vmobjlock);
                   1565:        count = --pmap->pm_obj.uo_refs;
                   1566:        simple_unlock(&pmap->pm_obj.vmobjlock);
                   1567:        if (count > 0) {
                   1568:                return;
                   1569:        }
                   1570:
                   1571:        /*
                   1572:         * reference count is zero, free pmap resources and then free pmap.
                   1573:         */
1.30.2.6  nathanw  1574:
                   1575:        /*
                   1576:         * remove it from global list of pmaps
                   1577:         */
                   1578:
                   1579:        simple_lock(&pmaps_lock);
                   1580:        LIST_REMOVE(pmap, pm_list);
                   1581:        simple_unlock(&pmaps_lock);
1.30.2.2  thorpej  1582:
1.30.2.7  nathanw  1583:        if (vector_page < KERNEL_BASE) {
                   1584:                /* Remove the vector page mapping */
                   1585:                pmap_remove(pmap, vector_page, vector_page + NBPG);
                   1586:                pmap_update(pmap);
                   1587:        }
1.30.2.2  thorpej  1588:
                   1589:        /*
                   1590:         * Free any page tables still mapped
                   1591:         * This is only temporay until pmap_enter can count the number
                   1592:         * of mappings made in a page table. Then pmap_remove() can
                   1593:         * reduce the count and free the pagetable when the count
                   1594:         * reaches zero.  Note that entries in this list should match the
                   1595:         * contents of the ptpt, however this is faster than walking a 1024
                   1596:         * entries looking for pt's
                   1597:         * taken from i386 pmap.c
                   1598:         */
1.30.2.8! nathanw  1599:        /*
        !          1600:         * vmobjlock must be held while freeing pages
        !          1601:         */
        !          1602:        simple_lock(&pmap->pm_obj.vmobjlock);
1.30.2.6  nathanw  1603:        while ((page = TAILQ_FIRST(&pmap->pm_obj.memq)) != NULL) {
                   1604:                KASSERT((page->flags & PG_BUSY) == 0);
1.30.2.2  thorpej  1605:                page->wire_count = 0;
                   1606:                uvm_pagefree(page);
                   1607:        }
1.30.2.8! nathanw  1608:        simple_unlock(&pmap->pm_obj.vmobjlock);
1.30.2.2  thorpej  1609:
                   1610:        /* Free the page dir */
                   1611:        pmap_freepagedir(pmap);
                   1612:
                   1613:        /* return the pmap to the pool */
                   1614:        pool_put(&pmap_pmap_pool, pmap);
                   1615: }
                   1616:
                   1617:
                   1618: /*
                   1619:  * void pmap_reference(struct pmap *pmap)
                   1620:  *
                   1621:  * Add a reference to the specified pmap.
                   1622:  */
                   1623:
                   1624: void
1.30.2.6  nathanw  1625: pmap_reference(struct pmap *pmap)
1.30.2.2  thorpej  1626: {
                   1627:        if (pmap == NULL)
                   1628:                return;
                   1629:
                   1630:        simple_lock(&pmap->pm_lock);
                   1631:        pmap->pm_obj.uo_refs++;
                   1632:        simple_unlock(&pmap->pm_lock);
                   1633: }
                   1634:
                   1635: /*
                   1636:  * void pmap_virtual_space(vaddr_t *start, vaddr_t *end)
                   1637:  *
                   1638:  * Return the start and end addresses of the kernel's virtual space.
                   1639:  * These values are setup in pmap_bootstrap and are updated as pages
                   1640:  * are allocated.
                   1641:  */
                   1642:
                   1643: void
1.30.2.6  nathanw  1644: pmap_virtual_space(vaddr_t *start, vaddr_t *end)
1.30.2.2  thorpej  1645: {
1.30.2.6  nathanw  1646:        *start = virtual_avail;
1.30.2.2  thorpej  1647:        *end = virtual_end;
                   1648: }
                   1649:
                   1650: /*
                   1651:  * Activate the address space for the specified process.  If the process
                   1652:  * is the current process, load the new MMU context.
                   1653:  */
                   1654: void
1.30.2.6  nathanw  1655: pmap_activate(struct lwp *l)
1.30.2.2  thorpej  1656: {
                   1657:        struct pmap *pmap = l->l_proc->p_vmspace->vm_map.pmap;
                   1658:        struct pcb *pcb = &l->l_addr->u_pcb;
                   1659:
                   1660:        (void) pmap_extract(pmap_kernel(), (vaddr_t)pmap->pm_pdir,
                   1661:            (paddr_t *)&pcb->pcb_pagedir);
                   1662:
                   1663:        PDEBUG(0, printf("pmap_activate: p=%p pmap=%p pcb=%p pdir=%p l1=%p\n",
                   1664:            p, pmap, pcb, pmap->pm_pdir, pcb->pcb_pagedir));
                   1665:
                   1666:        if (l == curproc) {
                   1667:                PDEBUG(0, printf("pmap_activate: setting TTB\n"));
                   1668:                setttb((u_int)pcb->pcb_pagedir);
                   1669:        }
                   1670: }
                   1671:
                   1672: /*
                   1673:  * Deactivate the address space of the specified process.
                   1674:  */
                   1675: void
1.30.2.6  nathanw  1676: pmap_deactivate(struct lwp *l)
1.30.2.2  thorpej  1677: {
                   1678: }
                   1679:
1.30.2.3  nathanw  1680: /*
                   1681:  * Perform any deferred pmap operations.
                   1682:  */
                   1683: void
                   1684: pmap_update(struct pmap *pmap)
                   1685: {
                   1686:
                   1687:        /*
                   1688:         * We haven't deferred any pmap operations, but we do need to
                   1689:         * make sure TLB/cache operations have completed.
                   1690:         */
                   1691:        cpu_cpwait();
                   1692: }
1.30.2.2  thorpej  1693:
                   1694: /*
                   1695:  * pmap_clean_page()
                   1696:  *
                   1697:  * This is a local function used to work out the best strategy to clean
                   1698:  * a single page referenced by its entry in the PV table. It's used by
                   1699:  * pmap_copy_page, pmap_zero page and maybe some others later on.
                   1700:  *
                   1701:  * Its policy is effectively:
                   1702:  *  o If there are no mappings, we don't bother doing anything with the cache.
                   1703:  *  o If there is one mapping, we clean just that page.
                   1704:  *  o If there are multiple mappings, we clean the entire cache.
                   1705:  *
                   1706:  * So that some functions can be further optimised, it returns 0 if it didn't
                   1707:  * clean the entire cache, or 1 if it did.
                   1708:  *
                   1709:  * XXX One bug in this routine is that if the pv_entry has a single page
                   1710:  * mapped at 0x00000000 a whole cache clean will be performed rather than
                   1711:  * just the 1 page. Since this should not occur in everyday use and if it does
                   1712:  * it will just result in not the most efficient clean for the page.
                   1713:  */
                   1714: static int
1.30.2.6  nathanw  1715: pmap_clean_page(struct pv_entry *pv, boolean_t is_src)
1.30.2.2  thorpej  1716: {
                   1717:        struct pmap *pmap;
                   1718:        struct pv_entry *npv;
                   1719:        int cache_needs_cleaning = 0;
                   1720:        vaddr_t page_to_clean = 0;
                   1721:
                   1722:        if (pv == NULL)
                   1723:                /* nothing mapped in so nothing to flush */
                   1724:                return (0);
                   1725:
                   1726:        /* Since we flush the cache each time we change curproc, we
                   1727:         * only need to flush the page if it is in the current pmap.
                   1728:         */
                   1729:        if (curproc)
                   1730:                pmap = curproc->l_proc->p_vmspace->vm_map.pmap;
                   1731:        else
                   1732:                pmap = pmap_kernel();
                   1733:
                   1734:        for (npv = pv; npv; npv = npv->pv_next) {
                   1735:                if (npv->pv_pmap == pmap) {
                   1736:                        /* The page is mapped non-cacheable in
                   1737:                         * this map.  No need to flush the cache.
                   1738:                         */
1.30.2.7  nathanw  1739:                        if (npv->pv_flags & PVF_NC) {
1.30.2.2  thorpej  1740: #ifdef DIAGNOSTIC
                   1741:                                if (cache_needs_cleaning)
                   1742:                                        panic("pmap_clean_page: "
                   1743:                                                        "cache inconsistency");
                   1744: #endif
                   1745:                                break;
                   1746:                        }
                   1747: #if 0
1.30.2.8! nathanw  1748:                        /*
        !          1749:                         * XXX Can't do this because pmap_protect doesn't
        !          1750:                         * XXX clean the page when it does a write-protect.
        !          1751:                         */
        !          1752:                        else if (is_src && (npv->pv_flags & PVF_WRITE) == 0)
1.30.2.2  thorpej  1753:                                continue;
                   1754: #endif
                   1755:                        if (cache_needs_cleaning){
                   1756:                                page_to_clean = 0;
                   1757:                                break;
                   1758:                        }
                   1759:                        else
                   1760:                                page_to_clean = npv->pv_va;
                   1761:                        cache_needs_cleaning = 1;
                   1762:                }
                   1763:        }
                   1764:
                   1765:        if (page_to_clean)
1.30.2.5  nathanw  1766:                cpu_idcache_wbinv_range(page_to_clean, NBPG);
1.30.2.2  thorpej  1767:        else if (cache_needs_cleaning) {
1.30.2.5  nathanw  1768:                cpu_idcache_wbinv_all();
1.30.2.2  thorpej  1769:                return (1);
                   1770:        }
                   1771:        return (0);
                   1772: }
                   1773:
                   1774: /*
                   1775:  * pmap_zero_page()
                   1776:  *
                   1777:  * Zero a given physical page by mapping it at a page hook point.
                   1778:  * In doing the zero page op, the page we zero is mapped cachable, as with
                   1779:  * StrongARM accesses to non-cached pages are non-burst making writing
                   1780:  * _any_ bulk data very slow.
                   1781:  */
1.30.2.7  nathanw  1782: #if ARM_MMU_GENERIC == 1
1.30.2.2  thorpej  1783: void
1.30.2.7  nathanw  1784: pmap_zero_page_generic(paddr_t phys)
1.30.2.2  thorpej  1785: {
1.30.2.6  nathanw  1786: #ifdef DEBUG
                   1787:        struct vm_page *pg = PHYS_TO_VM_PAGE(phys);
                   1788:
                   1789:        if (pg->mdpage.pvh_list != NULL)
                   1790:                panic("pmap_zero_page: page has mappings");
                   1791: #endif
1.30.2.2  thorpej  1792:
1.30.2.7  nathanw  1793:        KDASSERT((phys & PGOFSET) == 0);
                   1794:
1.30.2.2  thorpej  1795:        /*
                   1796:         * Hook in the page, zero it, and purge the cache for that
                   1797:         * zeroed page. Invalidate the TLB as needed.
                   1798:         */
1.30.2.7  nathanw  1799:        *cdst_pte = L2_S_PROTO | phys |
                   1800:            L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | pte_l2_s_cache_mode;
1.30.2.6  nathanw  1801:        cpu_tlb_flushD_SE(cdstp);
1.30.2.3  nathanw  1802:        cpu_cpwait();
1.30.2.6  nathanw  1803:        bzero_page(cdstp);
                   1804:        cpu_dcache_wbinv_range(cdstp, NBPG);
1.30.2.2  thorpej  1805: }
1.30.2.7  nathanw  1806: #endif /* ARM_MMU_GENERIC == 1 */
                   1807:
                   1808: #if ARM_MMU_XSCALE == 1
                   1809: void
                   1810: pmap_zero_page_xscale(paddr_t phys)
                   1811: {
                   1812: #ifdef DEBUG
                   1813:        struct vm_page *pg = PHYS_TO_VM_PAGE(phys);
                   1814:
                   1815:        if (pg->mdpage.pvh_list != NULL)
                   1816:                panic("pmap_zero_page: page has mappings");
                   1817: #endif
                   1818:
                   1819:        KDASSERT((phys & PGOFSET) == 0);
                   1820:
                   1821:        /*
                   1822:         * Hook in the page, zero it, and purge the cache for that
                   1823:         * zeroed page. Invalidate the TLB as needed.
                   1824:         */
                   1825:        *cdst_pte = L2_S_PROTO | phys |
                   1826:            L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) |
                   1827:            L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X);       /* mini-data */
                   1828:        cpu_tlb_flushD_SE(cdstp);
                   1829:        cpu_cpwait();
                   1830:        bzero_page(cdstp);
                   1831:        xscale_cache_clean_minidata();
                   1832: }
                   1833: #endif /* ARM_MMU_XSCALE == 1 */
1.30.2.2  thorpej  1834:
                   1835: /* pmap_pageidlezero()
                   1836:  *
                   1837:  * The same as above, except that we assume that the page is not
                   1838:  * mapped.  This means we never have to flush the cache first.  Called
                   1839:  * from the idle loop.
                   1840:  */
                   1841: boolean_t
1.30.2.6  nathanw  1842: pmap_pageidlezero(paddr_t phys)
1.30.2.2  thorpej  1843: {
                   1844:        int i, *ptr;
                   1845:        boolean_t rv = TRUE;
1.30.2.6  nathanw  1846: #ifdef DEBUG
                   1847:        struct vm_page *pg;
1.30.2.2  thorpej  1848:
1.30.2.6  nathanw  1849:        pg = PHYS_TO_VM_PAGE(phys);
                   1850:        if (pg->mdpage.pvh_list != NULL)
                   1851:                panic("pmap_pageidlezero: page has mappings");
1.30.2.2  thorpej  1852: #endif
1.30.2.7  nathanw  1853:
                   1854:        KDASSERT((phys & PGOFSET) == 0);
                   1855:
1.30.2.2  thorpej  1856:        /*
                   1857:         * Hook in the page, zero it, and purge the cache for that
                   1858:         * zeroed page. Invalidate the TLB as needed.
                   1859:         */
1.30.2.7  nathanw  1860:        *cdst_pte = L2_S_PROTO | phys |
                   1861:            L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | pte_l2_s_cache_mode;
1.30.2.6  nathanw  1862:        cpu_tlb_flushD_SE(cdstp);
1.30.2.3  nathanw  1863:        cpu_cpwait();
                   1864:
1.30.2.6  nathanw  1865:        for (i = 0, ptr = (int *)cdstp;
1.30.2.2  thorpej  1866:                        i < (NBPG / sizeof(int)); i++) {
                   1867:                if (sched_whichqs != 0) {
                   1868:                        /*
                   1869:                         * A process has become ready.  Abort now,
                   1870:                         * so we don't keep it waiting while we
                   1871:                         * do slow memory access to finish this
                   1872:                         * page.
                   1873:                         */
                   1874:                        rv = FALSE;
                   1875:                        break;
                   1876:                }
                   1877:                *ptr++ = 0;
                   1878:        }
                   1879:
                   1880:        if (rv)
                   1881:                /*
                   1882:                 * if we aborted we'll rezero this page again later so don't
                   1883:                 * purge it unless we finished it
                   1884:                 */
1.30.2.6  nathanw  1885:                cpu_dcache_wbinv_range(cdstp, NBPG);
1.30.2.2  thorpej  1886:        return (rv);
                   1887: }
                   1888:
                   1889: /*
                   1890:  * pmap_copy_page()
                   1891:  *
                   1892:  * Copy one physical page into another, by mapping the pages into
                   1893:  * hook points. The same comment regarding cachability as in
                   1894:  * pmap_zero_page also applies here.
                   1895:  */
1.30.2.7  nathanw  1896: #if ARM_MMU_GENERIC == 1
1.30.2.2  thorpej  1897: void
1.30.2.7  nathanw  1898: pmap_copy_page_generic(paddr_t src, paddr_t dst)
1.30.2.6  nathanw  1899: {
                   1900:        struct vm_page *src_pg = PHYS_TO_VM_PAGE(src);
                   1901: #ifdef DEBUG
                   1902:        struct vm_page *dst_pg = PHYS_TO_VM_PAGE(dst);
                   1903:
                   1904:        if (dst_pg->mdpage.pvh_list != NULL)
                   1905:                panic("pmap_copy_page: dst page has mappings");
                   1906: #endif
                   1907:
1.30.2.7  nathanw  1908:        KDASSERT((src & PGOFSET) == 0);
                   1909:        KDASSERT((dst & PGOFSET) == 0);
                   1910:
1.30.2.6  nathanw  1911:        /*
                   1912:         * Clean the source page.  Hold the source page's lock for
                   1913:         * the duration of the copy so that no other mappings can
                   1914:         * be created while we have a potentially aliased mapping.
                   1915:         */
                   1916:        simple_lock(&src_pg->mdpage.pvh_slock);
                   1917:        (void) pmap_clean_page(src_pg->mdpage.pvh_list, TRUE);
                   1918:
1.30.2.2  thorpej  1919:        /*
                   1920:         * Map the pages into the page hook points, copy them, and purge
                   1921:         * the cache for the appropriate page. Invalidate the TLB
                   1922:         * as required.
                   1923:         */
1.30.2.7  nathanw  1924:        *csrc_pte = L2_S_PROTO | src |
                   1925:            L2_S_PROT(PTE_KERNEL, VM_PROT_READ) | pte_l2_s_cache_mode;
                   1926:        *cdst_pte = L2_S_PROTO | dst |
                   1927:            L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | pte_l2_s_cache_mode;
1.30.2.6  nathanw  1928:        cpu_tlb_flushD_SE(csrcp);
                   1929:        cpu_tlb_flushD_SE(cdstp);
1.30.2.3  nathanw  1930:        cpu_cpwait();
1.30.2.6  nathanw  1931:        bcopy_page(csrcp, cdstp);
                   1932:        cpu_dcache_inv_range(csrcp, NBPG);
                   1933:        simple_unlock(&src_pg->mdpage.pvh_slock); /* cache is safe again */
                   1934:        cpu_dcache_wbinv_range(cdstp, NBPG);
1.30.2.2  thorpej  1935: }
1.30.2.7  nathanw  1936: #endif /* ARM_MMU_GENERIC == 1 */
                   1937:
                   1938: #if ARM_MMU_XSCALE == 1
                   1939: void
                   1940: pmap_copy_page_xscale(paddr_t src, paddr_t dst)
                   1941: {
                   1942:        struct vm_page *src_pg = PHYS_TO_VM_PAGE(src);
                   1943: #ifdef DEBUG
                   1944:        struct vm_page *dst_pg = PHYS_TO_VM_PAGE(dst);
                   1945:
                   1946:        if (dst_pg->mdpage.pvh_list != NULL)
                   1947:                panic("pmap_copy_page: dst page has mappings");
                   1948: #endif
                   1949:
                   1950:        KDASSERT((src & PGOFSET) == 0);
                   1951:        KDASSERT((dst & PGOFSET) == 0);
                   1952:
                   1953:        /*
                   1954:         * Clean the source page.  Hold the source page's lock for
                   1955:         * the duration of the copy so that no other mappings can
                   1956:         * be created while we have a potentially aliased mapping.
                   1957:         */
                   1958:        simple_lock(&src_pg->mdpage.pvh_slock);
                   1959:        (void) pmap_clean_page(src_pg->mdpage.pvh_list, TRUE);
                   1960:
                   1961:        /*
                   1962:         * Map the pages into the page hook points, copy them, and purge
                   1963:         * the cache for the appropriate page. Invalidate the TLB
                   1964:         * as required.
                   1965:         */
                   1966:        *csrc_pte = L2_S_PROTO | src |
                   1967:            L2_S_PROT(PTE_KERNEL, VM_PROT_READ) |
                   1968:            L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X);       /* mini-data */
                   1969:        *cdst_pte = L2_S_PROTO | dst |
                   1970:            L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) |
                   1971:            L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X);       /* mini-data */
                   1972:        cpu_tlb_flushD_SE(csrcp);
                   1973:        cpu_tlb_flushD_SE(cdstp);
                   1974:        cpu_cpwait();
                   1975:        bcopy_page(csrcp, cdstp);
                   1976:        simple_unlock(&src_pg->mdpage.pvh_slock); /* cache is safe again */
                   1977:        xscale_cache_clean_minidata();
                   1978: }
                   1979: #endif /* ARM_MMU_XSCALE == 1 */
1.30.2.2  thorpej  1980:
                   1981: #if 0
                   1982: void
1.30.2.6  nathanw  1983: pmap_pte_addref(struct pmap *pmap, vaddr_t va)
1.30.2.2  thorpej  1984: {
                   1985:        pd_entry_t *pde;
                   1986:        paddr_t pa;
                   1987:        struct vm_page *m;
                   1988:
                   1989:        if (pmap == pmap_kernel())
                   1990:                return;
                   1991:
1.30.2.7  nathanw  1992:        pde = pmap_pde(pmap, va & ~(3 << L1_S_SHIFT));
1.30.2.2  thorpej  1993:        pa = pmap_pte_pa(pde);
                   1994:        m = PHYS_TO_VM_PAGE(pa);
                   1995:        ++m->wire_count;
                   1996: #ifdef MYCROFT_HACK
                   1997:        printf("addref pmap=%p va=%08lx pde=%p pa=%08lx m=%p wire=%d\n",
                   1998:            pmap, va, pde, pa, m, m->wire_count);
                   1999: #endif
                   2000: }
                   2001:
                   2002: void
1.30.2.6  nathanw  2003: pmap_pte_delref(struct pmap *pmap, vaddr_t va)
1.30.2.2  thorpej  2004: {
                   2005:        pd_entry_t *pde;
                   2006:        paddr_t pa;
                   2007:        struct vm_page *m;
                   2008:
                   2009:        if (pmap == pmap_kernel())
                   2010:                return;
                   2011:
1.30.2.7  nathanw  2012:        pde = pmap_pde(pmap, va & ~(3 << L1_S_SHIFT));
1.30.2.2  thorpej  2013:        pa = pmap_pte_pa(pde);
                   2014:        m = PHYS_TO_VM_PAGE(pa);
                   2015:        --m->wire_count;
                   2016: #ifdef MYCROFT_HACK
                   2017:        printf("delref pmap=%p va=%08lx pde=%p pa=%08lx m=%p wire=%d\n",
                   2018:            pmap, va, pde, pa, m, m->wire_count);
                   2019: #endif
                   2020:        if (m->wire_count == 0) {
                   2021: #ifdef MYCROFT_HACK
                   2022:                printf("delref pmap=%p va=%08lx pde=%p pa=%08lx m=%p\n",
                   2023:                    pmap, va, pde, pa, m);
                   2024: #endif
                   2025:                pmap_unmap_in_l1(pmap, va);
                   2026:                uvm_pagefree(m);
                   2027:                --pmap->pm_stats.resident_count;
                   2028:        }
                   2029: }
                   2030: #else
                   2031: #define        pmap_pte_addref(pmap, va)
                   2032: #define        pmap_pte_delref(pmap, va)
                   2033: #endif
                   2034:
                   2035: /*
                   2036:  * Since we have a virtually indexed cache, we may need to inhibit caching if
                   2037:  * there is more than one mapping and at least one of them is writable.
                   2038:  * Since we purge the cache on every context switch, we only need to check for
                   2039:  * other mappings within the same pmap, or kernel_pmap.
                   2040:  * This function is also called when a page is unmapped, to possibly reenable
                   2041:  * caching on any remaining mappings.
                   2042:  *
                   2043:  * The code implements the following logic, where:
                   2044:  *
                   2045:  * KW = # of kernel read/write pages
                   2046:  * KR = # of kernel read only pages
                   2047:  * UW = # of user read/write pages
                   2048:  * UR = # of user read only pages
                   2049:  * OW = # of user read/write pages in another pmap, then
                   2050:  *
                   2051:  * KC = kernel mapping is cacheable
                   2052:  * UC = user mapping is cacheable
                   2053:  *
                   2054:  *                     KW=0,KR=0  KW=0,KR>0  KW=1,KR=0  KW>1,KR>=0
                   2055:  *                   +---------------------------------------------
                   2056:  * UW=0,UR=0,OW=0    | ---        KC=1       KC=1       KC=0
                   2057:  * UW=0,UR>0,OW=0    | UC=1       KC=1,UC=1  KC=0,UC=0  KC=0,UC=0
                   2058:  * UW=0,UR>0,OW>0    | UC=1       KC=0,UC=1  KC=0,UC=0  KC=0,UC=0
                   2059:  * UW=1,UR=0,OW=0    | UC=1       KC=0,UC=0  KC=0,UC=0  KC=0,UC=0
                   2060:  * UW>1,UR>=0,OW>=0  | UC=0       KC=0,UC=0  KC=0,UC=0  KC=0,UC=0
                   2061:  *
                   2062:  * Note that the pmap must have it's ptes mapped in, and passed with ptes.
                   2063:  */
                   2064: __inline static void
1.30.2.6  nathanw  2065: pmap_vac_me_harder(struct pmap *pmap, struct vm_page *pg, pt_entry_t *ptes,
1.30.2.2  thorpej  2066:        boolean_t clear_cache)
                   2067: {
                   2068:        if (pmap == pmap_kernel())
1.30.2.6  nathanw  2069:                pmap_vac_me_kpmap(pmap, pg, ptes, clear_cache);
1.30.2.2  thorpej  2070:        else
1.30.2.6  nathanw  2071:                pmap_vac_me_user(pmap, pg, ptes, clear_cache);
1.30.2.2  thorpej  2072: }
                   2073:
                   2074: static void
1.30.2.6  nathanw  2075: pmap_vac_me_kpmap(struct pmap *pmap, struct vm_page *pg, pt_entry_t *ptes,
1.30.2.2  thorpej  2076:        boolean_t clear_cache)
                   2077: {
                   2078:        int user_entries = 0;
                   2079:        int user_writable = 0;
                   2080:        int user_cacheable = 0;
                   2081:        int kernel_entries = 0;
                   2082:        int kernel_writable = 0;
                   2083:        int kernel_cacheable = 0;
                   2084:        struct pv_entry *pv;
                   2085:        struct pmap *last_pmap = pmap;
                   2086:
                   2087: #ifdef DIAGNOSTIC
                   2088:        if (pmap != pmap_kernel())
                   2089:                panic("pmap_vac_me_kpmap: pmap != pmap_kernel()");
                   2090: #endif
                   2091:
                   2092:        /*
                   2093:         * Pass one, see if there are both kernel and user pmaps for
                   2094:         * this page.  Calculate whether there are user-writable or
                   2095:         * kernel-writable pages.
                   2096:         */
1.30.2.6  nathanw  2097:        for (pv = pg->mdpage.pvh_list; pv != NULL; pv = pv->pv_next) {
1.30.2.2  thorpej  2098:                if (pv->pv_pmap != pmap) {
                   2099:                        user_entries++;
1.30.2.7  nathanw  2100:                        if (pv->pv_flags & PVF_WRITE)
1.30.2.2  thorpej  2101:                                user_writable++;
1.30.2.7  nathanw  2102:                        if ((pv->pv_flags & PVF_NC) == 0)
1.30.2.2  thorpej  2103:                                user_cacheable++;
                   2104:                } else {
                   2105:                        kernel_entries++;
1.30.2.7  nathanw  2106:                        if (pv->pv_flags & PVF_WRITE)
1.30.2.2  thorpej  2107:                                kernel_writable++;
1.30.2.7  nathanw  2108:                        if ((pv->pv_flags & PVF_NC) == 0)
1.30.2.2  thorpej  2109:                                kernel_cacheable++;
                   2110:                }
                   2111:        }
                   2112:
                   2113:        /*
                   2114:         * We know we have just been updating a kernel entry, so if
                   2115:         * all user pages are already cacheable, then there is nothing
                   2116:         * further to do.
                   2117:         */
                   2118:        if (kernel_entries == 0 &&
                   2119:            user_cacheable == user_entries)
                   2120:                return;
                   2121:
                   2122:        if (user_entries) {
                   2123:                /*
                   2124:                 * Scan over the list again, for each entry, if it
                   2125:                 * might not be set correctly, call pmap_vac_me_user
                   2126:                 * to recalculate the settings.
                   2127:                 */
1.30.2.6  nathanw  2128:                for (pv = pg->mdpage.pvh_list; pv; pv = pv->pv_next) {
1.30.2.2  thorpej  2129:                        /*
                   2130:                         * We know kernel mappings will get set
                   2131:                         * correctly in other calls.  We also know
                   2132:                         * that if the pmap is the same as last_pmap
                   2133:                         * then we've just handled this entry.
                   2134:                         */
                   2135:                        if (pv->pv_pmap == pmap || pv->pv_pmap == last_pmap)
                   2136:                                continue;
                   2137:                        /*
                   2138:                         * If there are kernel entries and this page
                   2139:                         * is writable but non-cacheable, then we can
                   2140:                         * skip this entry also.
                   2141:                         */
                   2142:                        if (kernel_entries > 0 &&
1.30.2.7  nathanw  2143:                            (pv->pv_flags & (PVF_NC | PVF_WRITE)) ==
                   2144:                            (PVF_NC | PVF_WRITE))
1.30.2.2  thorpej  2145:                                continue;
                   2146:                        /*
                   2147:                         * Similarly if there are no kernel-writable
                   2148:                         * entries and the page is already
                   2149:                         * read-only/cacheable.
                   2150:                         */
                   2151:                        if (kernel_writable == 0 &&
1.30.2.7  nathanw  2152:                            (pv->pv_flags & (PVF_NC | PVF_WRITE)) == 0)
1.30.2.2  thorpej  2153:                                continue;
                   2154:                        /*
                   2155:                         * For some of the remaining cases, we know
                   2156:                         * that we must recalculate, but for others we
                   2157:                         * can't tell if they are correct or not, so
                   2158:                         * we recalculate anyway.
                   2159:                         */
                   2160:                        pmap_unmap_ptes(last_pmap);
                   2161:                        last_pmap = pv->pv_pmap;
                   2162:                        ptes = pmap_map_ptes(last_pmap);
1.30.2.6  nathanw  2163:                        pmap_vac_me_user(last_pmap, pg, ptes,
1.30.2.2  thorpej  2164:                            pmap_is_curpmap(last_pmap));
                   2165:                }
                   2166:                /* Restore the pte mapping that was passed to us.  */
                   2167:                if (last_pmap != pmap) {
                   2168:                        pmap_unmap_ptes(last_pmap);
                   2169:                        ptes = pmap_map_ptes(pmap);
                   2170:                }
                   2171:                if (kernel_entries == 0)
                   2172:                        return;
                   2173:        }
                   2174:
1.30.2.6  nathanw  2175:        pmap_vac_me_user(pmap, pg, ptes, clear_cache);
1.30.2.2  thorpej  2176:        return;
                   2177: }
                   2178:
                   2179: static void
1.30.2.6  nathanw  2180: pmap_vac_me_user(struct pmap *pmap, struct vm_page *pg, pt_entry_t *ptes,
1.30.2.2  thorpej  2181:        boolean_t clear_cache)
                   2182: {
                   2183:        struct pmap *kpmap = pmap_kernel();
                   2184:        struct pv_entry *pv, *npv;
                   2185:        int entries = 0;
                   2186:        int writable = 0;
                   2187:        int cacheable_entries = 0;
                   2188:        int kern_cacheable = 0;
                   2189:        int other_writable = 0;
                   2190:
1.30.2.6  nathanw  2191:        pv = pg->mdpage.pvh_list;
1.30.2.2  thorpej  2192:        KASSERT(ptes != NULL);
                   2193:
                   2194:        /*
                   2195:         * Count mappings and writable mappings in this pmap.
                   2196:         * Include kernel mappings as part of our own.
                   2197:         * Keep a pointer to the first one.
                   2198:         */
                   2199:        for (npv = pv; npv; npv = npv->pv_next) {
                   2200:                /* Count mappings in the same pmap */
                   2201:                if (pmap == npv->pv_pmap ||
                   2202:                    kpmap == npv->pv_pmap) {
                   2203:                        if (entries++ == 0)
                   2204:                                pv = npv;
                   2205:                        /* Cacheable mappings */
1.30.2.7  nathanw  2206:                        if ((npv->pv_flags & PVF_NC) == 0) {
1.30.2.2  thorpej  2207:                                cacheable_entries++;
                   2208:                                if (kpmap == npv->pv_pmap)
                   2209:                                        kern_cacheable++;
                   2210:                        }
                   2211:                        /* Writable mappings */
1.30.2.7  nathanw  2212:                        if (npv->pv_flags & PVF_WRITE)
1.30.2.2  thorpej  2213:                                ++writable;
1.30.2.7  nathanw  2214:                } else if (npv->pv_flags & PVF_WRITE)
1.30.2.2  thorpej  2215:                        other_writable = 1;
                   2216:        }
                   2217:
                   2218:        PDEBUG(3,printf("pmap_vac_me_harder: pmap %p Entries %d, "
                   2219:                "writable %d cacheable %d %s\n", pmap, entries, writable,
                   2220:                cacheable_entries, clear_cache ? "clean" : "no clean"));
                   2221:
                   2222:        /*
                   2223:         * Enable or disable caching as necessary.
                   2224:         * Note: the first entry might be part of the kernel pmap,
                   2225:         * so we can't assume this is indicative of the state of the
                   2226:         * other (maybe non-kpmap) entries.
                   2227:         */
                   2228:        if ((entries > 1 && writable) ||
                   2229:            (entries > 0 && pmap == kpmap && other_writable)) {
                   2230:                if (cacheable_entries == 0)
                   2231:                    return;
                   2232:                for (npv = pv; npv; npv = npv->pv_next) {
                   2233:                        if ((pmap == npv->pv_pmap
                   2234:                            || kpmap == npv->pv_pmap) &&
1.30.2.7  nathanw  2235:                            (npv->pv_flags & PVF_NC) == 0) {
                   2236:                                ptes[arm_btop(npv->pv_va)] &= ~L2_S_CACHE_MASK;
                   2237:                                npv->pv_flags |= PVF_NC;
1.30.2.2  thorpej  2238:                                /*
                   2239:                                 * If this page needs flushing from the
                   2240:                                 * cache, and we aren't going to do it
                   2241:                                 * below, do it now.
                   2242:                                 */
                   2243:                                if ((cacheable_entries < 4 &&
                   2244:                                    (clear_cache || npv->pv_pmap == kpmap)) ||
                   2245:                                    (npv->pv_pmap == kpmap &&
                   2246:                                    !clear_cache && kern_cacheable < 4)) {
1.30.2.5  nathanw  2247:                                        cpu_idcache_wbinv_range(npv->pv_va,
1.30.2.2  thorpej  2248:                                            NBPG);
                   2249:                                        cpu_tlb_flushID_SE(npv->pv_va);
                   2250:                                }
                   2251:                        }
                   2252:                }
                   2253:                if ((clear_cache && cacheable_entries >= 4) ||
                   2254:                    kern_cacheable >= 4) {
1.30.2.5  nathanw  2255:                        cpu_idcache_wbinv_all();
1.30.2.2  thorpej  2256:                        cpu_tlb_flushID();
                   2257:                }
1.30.2.3  nathanw  2258:                cpu_cpwait();
1.30.2.2  thorpej  2259:        } else if (entries > 0) {
                   2260:                /*
                   2261:                 * Turn cacheing back on for some pages.  If it is a kernel
                   2262:                 * page, only do so if there are no other writable pages.
                   2263:                 */
                   2264:                for (npv = pv; npv; npv = npv->pv_next) {
                   2265:                        if ((pmap == npv->pv_pmap ||
                   2266:                            (kpmap == npv->pv_pmap && other_writable == 0)) &&
1.30.2.7  nathanw  2267:                            (npv->pv_flags & PVF_NC)) {
                   2268:                                ptes[arm_btop(npv->pv_va)] |=
                   2269:                                    pte_l2_s_cache_mode;
                   2270:                                npv->pv_flags &= ~PVF_NC;
1.30.2.2  thorpej  2271:                        }
                   2272:                }
                   2273:        }
                   2274: }
                   2275:
                   2276: /*
                   2277:  * pmap_remove()
                   2278:  *
                   2279:  * pmap_remove is responsible for nuking a number of mappings for a range
                   2280:  * of virtual address space in the current pmap. To do this efficiently
                   2281:  * is interesting, because in a number of cases a wide virtual address
                   2282:  * range may be supplied that contains few actual mappings. So, the
                   2283:  * optimisations are:
                   2284:  *  1. Try and skip over hunks of address space for which an L1 entry
                   2285:  *     does not exist.
                   2286:  *  2. Build up a list of pages we've hit, up to a maximum, so we can
                   2287:  *     maybe do just a partial cache clean. This path of execution is
                   2288:  *     complicated by the fact that the cache must be flushed _before_
                   2289:  *     the PTE is nuked, being a VAC :-)
                   2290:  *  3. Maybe later fast-case a single page, but I don't think this is
                   2291:  *     going to make _that_ much difference overall.
                   2292:  */
                   2293:
                   2294: #define PMAP_REMOVE_CLEAN_LIST_SIZE    3
                   2295:
                   2296: void
1.30.2.6  nathanw  2297: pmap_remove(struct pmap *pmap, vaddr_t sva, vaddr_t eva)
1.30.2.2  thorpej  2298: {
                   2299:        int cleanlist_idx = 0;
                   2300:        struct pagelist {
                   2301:                vaddr_t va;
                   2302:                pt_entry_t *pte;
                   2303:        } cleanlist[PMAP_REMOVE_CLEAN_LIST_SIZE];
                   2304:        pt_entry_t *pte = 0, *ptes;
                   2305:        paddr_t pa;
                   2306:        int pmap_active;
1.30.2.6  nathanw  2307:        struct vm_page *pg;
1.30.2.2  thorpej  2308:
                   2309:        /* Exit quick if there is no pmap */
                   2310:        if (!pmap)
                   2311:                return;
                   2312:
1.30.2.7  nathanw  2313:        PDEBUG(0, printf("pmap_remove: pmap=%p sva=%08lx eva=%08lx\n",
                   2314:            pmap, sva, eva));
1.30.2.2  thorpej  2315:
                   2316:        /*
1.30.2.6  nathanw  2317:         * we lock in the pmap => vm_page direction
1.30.2.2  thorpej  2318:         */
                   2319:        PMAP_MAP_TO_HEAD_LOCK();
                   2320:
                   2321:        ptes = pmap_map_ptes(pmap);
                   2322:        /* Get a page table pointer */
                   2323:        while (sva < eva) {
                   2324:                if (pmap_pde_page(pmap_pde(pmap, sva)))
                   2325:                        break;
1.30.2.7  nathanw  2326:                sva = (sva & L1_S_FRAME) + L1_S_SIZE;
1.30.2.2  thorpej  2327:        }
                   2328:
1.30.2.6  nathanw  2329:        pte = &ptes[arm_btop(sva)];
1.30.2.2  thorpej  2330:        /* Note if the pmap is active thus require cache and tlb cleans */
1.30.2.6  nathanw  2331:        pmap_active = pmap_is_curpmap(pmap);
1.30.2.2  thorpej  2332:
                   2333:        /* Now loop along */
                   2334:        while (sva < eva) {
                   2335:                /* Check if we can move to the next PDE (l1 chunk) */
1.30.2.7  nathanw  2336:                if (!(sva & L2_ADDR_BITS))
1.30.2.2  thorpej  2337:                        if (!pmap_pde_page(pmap_pde(pmap, sva))) {
1.30.2.7  nathanw  2338:                                sva += L1_S_SIZE;
                   2339:                                pte += arm_btop(L1_S_SIZE);
1.30.2.2  thorpej  2340:                                continue;
                   2341:                        }
                   2342:
                   2343:                /* We've found a valid PTE, so this page of PTEs has to go. */
                   2344:                if (pmap_pte_v(pte)) {
                   2345:                        /* Update statistics */
                   2346:                        --pmap->pm_stats.resident_count;
                   2347:
                   2348:                        /*
                   2349:                         * Add this page to our cache remove list, if we can.
                   2350:                         * If, however the cache remove list is totally full,
                   2351:                         * then do a complete cache invalidation taking note
                   2352:                         * to backtrack the PTE table beforehand, and ignore
                   2353:                         * the lists in future because there's no longer any
                   2354:                         * point in bothering with them (we've paid the
                   2355:                         * penalty, so will carry on unhindered). Otherwise,
                   2356:                         * when we fall out, we just clean the list.
                   2357:                         */
                   2358:                        PDEBUG(10, printf("remove: inv pte at %p(%x) ", pte, *pte));
                   2359:                        pa = pmap_pte_pa(pte);
                   2360:
                   2361:                        if (cleanlist_idx < PMAP_REMOVE_CLEAN_LIST_SIZE) {
                   2362:                                /* Add to the clean list. */
                   2363:                                cleanlist[cleanlist_idx].pte = pte;
                   2364:                                cleanlist[cleanlist_idx].va = sva;
                   2365:                                cleanlist_idx++;
                   2366:                        } else if (cleanlist_idx == PMAP_REMOVE_CLEAN_LIST_SIZE) {
                   2367:                                int cnt;
                   2368:
                   2369:                                /* Nuke everything if needed. */
                   2370:                                if (pmap_active) {
1.30.2.5  nathanw  2371:                                        cpu_idcache_wbinv_all();
1.30.2.2  thorpej  2372:                                        cpu_tlb_flushID();
                   2373:                                }
                   2374:
                   2375:                                /*
                   2376:                                 * Roll back the previous PTE list,
                   2377:                                 * and zero out the current PTE.
                   2378:                                 */
                   2379:                                for (cnt = 0; cnt < PMAP_REMOVE_CLEAN_LIST_SIZE; cnt++) {
                   2380:                                        *cleanlist[cnt].pte = 0;
                   2381:                                        pmap_pte_delref(pmap, cleanlist[cnt].va);
                   2382:                                }
                   2383:                                *pte = 0;
                   2384:                                pmap_pte_delref(pmap, sva);
                   2385:                                cleanlist_idx++;
                   2386:                        } else {
                   2387:                                /*
                   2388:                                 * We've already nuked the cache and
                   2389:                                 * TLB, so just carry on regardless,
                   2390:                                 * and we won't need to do it again
                   2391:                                 */
                   2392:                                *pte = 0;
                   2393:                                pmap_pte_delref(pmap, sva);
                   2394:                        }
                   2395:
                   2396:                        /*
                   2397:                         * Update flags. In a number of circumstances,
                   2398:                         * we could cluster a lot of these and do a
                   2399:                         * number of sequential pages in one go.
                   2400:                         */
1.30.2.6  nathanw  2401:                        if ((pg = PHYS_TO_VM_PAGE(pa)) != NULL) {
1.30.2.2  thorpej  2402:                                struct pv_entry *pve;
1.30.2.6  nathanw  2403:                                simple_lock(&pg->mdpage.pvh_slock);
                   2404:                                pve = pmap_remove_pv(pg, pmap, sva);
1.30.2.2  thorpej  2405:                                pmap_free_pv(pmap, pve);
1.30.2.6  nathanw  2406:                                pmap_vac_me_harder(pmap, pg, ptes, FALSE);
                   2407:                                simple_unlock(&pg->mdpage.pvh_slock);
1.30.2.2  thorpej  2408:                        }
                   2409:                }
                   2410:                sva += NBPG;
                   2411:                pte++;
                   2412:        }
                   2413:
                   2414:        pmap_unmap_ptes(pmap);
                   2415:        /*
                   2416:         * Now, if we've fallen through down to here, chances are that there
                   2417:         * are less than PMAP_REMOVE_CLEAN_LIST_SIZE mappings left.
                   2418:         */
                   2419:        if (cleanlist_idx <= PMAP_REMOVE_CLEAN_LIST_SIZE) {
                   2420:                u_int cnt;
                   2421:
                   2422:                for (cnt = 0; cnt < cleanlist_idx; cnt++) {
                   2423:                        if (pmap_active) {
1.30.2.5  nathanw  2424:                                cpu_idcache_wbinv_range(cleanlist[cnt].va,
                   2425:                                    NBPG);
1.30.2.2  thorpej  2426:                                *cleanlist[cnt].pte = 0;
                   2427:                                cpu_tlb_flushID_SE(cleanlist[cnt].va);
                   2428:                        } else
                   2429:                                *cleanlist[cnt].pte = 0;
                   2430:                        pmap_pte_delref(pmap, cleanlist[cnt].va);
                   2431:                }
                   2432:        }
                   2433:        PMAP_MAP_TO_HEAD_UNLOCK();
                   2434: }
                   2435:
                   2436: /*
                   2437:  * Routine:    pmap_remove_all
                   2438:  * Function:
                   2439:  *             Removes this physical page from
                   2440:  *             all physical maps in which it resides.
                   2441:  *             Reflects back modify bits to the pager.
                   2442:  */
                   2443:
1.30.2.4  nathanw  2444: static void
1.30.2.6  nathanw  2445: pmap_remove_all(struct vm_page *pg)
1.30.2.2  thorpej  2446: {
                   2447:        struct pv_entry *pv, *npv;
                   2448:        struct pmap *pmap;
                   2449:        pt_entry_t *pte, *ptes;
                   2450:
1.30.2.6  nathanw  2451:        PDEBUG(0, printf("pmap_remove_all: pa=%lx ", VM_PAGE_TO_PHYS(pg)));
1.30.2.2  thorpej  2452:
1.30.2.6  nathanw  2453:        /* set vm_page => pmap locking */
1.30.2.2  thorpej  2454:        PMAP_HEAD_TO_MAP_LOCK();
                   2455:
1.30.2.6  nathanw  2456:        simple_lock(&pg->mdpage.pvh_slock);
1.30.2.2  thorpej  2457:
1.30.2.6  nathanw  2458:        pv = pg->mdpage.pvh_list;
                   2459:        if (pv == NULL) {
                   2460:                PDEBUG(0, printf("free page\n"));
                   2461:                simple_unlock(&pg->mdpage.pvh_slock);
                   2462:                PMAP_HEAD_TO_MAP_UNLOCK();
                   2463:                return;
1.30.2.2  thorpej  2464:        }
                   2465:        pmap_clean_page(pv, FALSE);
                   2466:
                   2467:        while (pv) {
                   2468:                pmap = pv->pv_pmap;
                   2469:                ptes = pmap_map_ptes(pmap);
1.30.2.6  nathanw  2470:                pte = &ptes[arm_btop(pv->pv_va)];
1.30.2.2  thorpej  2471:
                   2472:                PDEBUG(0, printf("[%p,%08x,%08lx,%08x] ", pmap, *pte,
                   2473:                    pv->pv_va, pv->pv_flags));
                   2474: #ifdef DEBUG
1.30.2.7  nathanw  2475:                if (pmap_pde_page(pmap_pde(pmap, pv->pv_va)) == 0 ||
                   2476:                    pmap_pte_v(pte) == 0 ||
                   2477:                    pmap_pte_pa(pte) != VM_PAGE_TO_PHYS(pg))
1.30.2.2  thorpej  2478:                        panic("pmap_remove_all: bad mapping");
                   2479: #endif /* DEBUG */
                   2480:
                   2481:                /*
                   2482:                 * Update statistics
                   2483:                 */
                   2484:                --pmap->pm_stats.resident_count;
                   2485:
                   2486:                /* Wired bit */
1.30.2.7  nathanw  2487:                if (pv->pv_flags & PVF_WIRED)
1.30.2.2  thorpej  2488:                        --pmap->pm_stats.wired_count;
                   2489:
                   2490:                /*
                   2491:                 * Invalidate the PTEs.
                   2492:                 * XXX: should cluster them up and invalidate as many
                   2493:                 * as possible at once.
                   2494:                 */
                   2495:
                   2496: #ifdef needednotdone
                   2497: reduce wiring count on page table pages as references drop
                   2498: #endif
                   2499:
                   2500:                *pte = 0;
                   2501:                pmap_pte_delref(pmap, pv->pv_va);
                   2502:
                   2503:                npv = pv->pv_next;
                   2504:                pmap_free_pv(pmap, pv);
                   2505:                pv = npv;
                   2506:                pmap_unmap_ptes(pmap);
                   2507:        }
1.30.2.6  nathanw  2508:        pg->mdpage.pvh_list = NULL;
                   2509:        simple_unlock(&pg->mdpage.pvh_slock);
1.30.2.2  thorpej  2510:        PMAP_HEAD_TO_MAP_UNLOCK();
                   2511:
                   2512:        PDEBUG(0, printf("done\n"));
                   2513:        cpu_tlb_flushID();
1.30.2.3  nathanw  2514:        cpu_cpwait();
1.30.2.2  thorpej  2515: }
                   2516:
                   2517:
                   2518: /*
                   2519:  * Set the physical protection on the specified range of this map as requested.
                   2520:  */
                   2521:
                   2522: void
1.30.2.6  nathanw  2523: pmap_protect(struct pmap *pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
1.30.2.2  thorpej  2524: {
                   2525:        pt_entry_t *pte = NULL, *ptes;
1.30.2.6  nathanw  2526:        struct vm_page *pg;
1.30.2.2  thorpej  2527:        int armprot;
                   2528:        int flush = 0;
                   2529:        paddr_t pa;
                   2530:
                   2531:        PDEBUG(0, printf("pmap_protect: pmap=%p %08lx->%08lx %x\n",
                   2532:            pmap, sva, eva, prot));
                   2533:
                   2534:        if (~prot & VM_PROT_READ) {
                   2535:                /* Just remove the mappings. */
                   2536:                pmap_remove(pmap, sva, eva);
1.30.2.4  nathanw  2537:                /* pmap_update not needed as it should be called by the caller
                   2538:                 * of pmap_protect */
1.30.2.2  thorpej  2539:                return;
                   2540:        }
                   2541:        if (prot & VM_PROT_WRITE) {
                   2542:                /*
                   2543:                 * If this is a read->write transition, just ignore it and let
                   2544:                 * uvm_fault() take care of it later.
                   2545:                 */
                   2546:                return;
                   2547:        }
                   2548:
                   2549:        /* Need to lock map->head */
                   2550:        PMAP_MAP_TO_HEAD_LOCK();
                   2551:
                   2552:        ptes = pmap_map_ptes(pmap);
1.30.2.8! nathanw  2553:
        !          2554:        /*
        !          2555:         * OK, at this point, we know we're doing write-protect operation.
        !          2556:         * If the pmap is active, write-back the range.
        !          2557:         */
        !          2558:        if (pmap_is_curpmap(pmap))
        !          2559:                cpu_dcache_wb_range(sva, eva - sva);
        !          2560:
1.30.2.2  thorpej  2561:        /*
                   2562:         * We need to acquire a pointer to a page table page before entering
                   2563:         * the following loop.
                   2564:         */
                   2565:        while (sva < eva) {
                   2566:                if (pmap_pde_page(pmap_pde(pmap, sva)))
                   2567:                        break;
1.30.2.7  nathanw  2568:                sva = (sva & L1_S_FRAME) + L1_S_SIZE;
1.30.2.2  thorpej  2569:        }
                   2570:
1.30.2.6  nathanw  2571:        pte = &ptes[arm_btop(sva)];
1.30.2.2  thorpej  2572:
                   2573:        while (sva < eva) {
                   2574:                /* only check once in a while */
1.30.2.7  nathanw  2575:                if ((sva & L2_ADDR_BITS) == 0) {
1.30.2.2  thorpej  2576:                        if (!pmap_pde_page(pmap_pde(pmap, sva))) {
                   2577:                                /* We can race ahead here, to the next pde. */
1.30.2.7  nathanw  2578:                                sva += L1_S_SIZE;
                   2579:                                pte += arm_btop(L1_S_SIZE);
1.30.2.2  thorpej  2580:                                continue;
                   2581:                        }
                   2582:                }
                   2583:
                   2584:                if (!pmap_pte_v(pte))
                   2585:                        goto next;
                   2586:
                   2587:                flush = 1;
                   2588:
                   2589:                armprot = 0;
                   2590:                if (sva < VM_MAXUSER_ADDRESS)
1.30.2.7  nathanw  2591:                        armprot |= L2_S_PROT_U;
1.30.2.2  thorpej  2592:                else if (sva < VM_MAX_ADDRESS)
1.30.2.7  nathanw  2593:                        armprot |= L2_S_PROT_W;  /* XXX Ekk what is this ? */
1.30.2.2  thorpej  2594:                *pte = (*pte & 0xfffff00f) | armprot;
                   2595:
                   2596:                pa = pmap_pte_pa(pte);
                   2597:
                   2598:                /* Get the physical page index */
                   2599:
                   2600:                /* Clear write flag */
1.30.2.6  nathanw  2601:                if ((pg = PHYS_TO_VM_PAGE(pa)) != NULL) {
                   2602:                        simple_lock(&pg->mdpage.pvh_slock);
1.30.2.7  nathanw  2603:                        (void) pmap_modify_pv(pmap, sva, pg, PVF_WRITE, 0);
1.30.2.6  nathanw  2604:                        pmap_vac_me_harder(pmap, pg, ptes, FALSE);
                   2605:                        simple_unlock(&pg->mdpage.pvh_slock);
1.30.2.2  thorpej  2606:                }
                   2607:
                   2608: next:
                   2609:                sva += NBPG;
                   2610:                pte++;
                   2611:        }
                   2612:        pmap_unmap_ptes(pmap);
                   2613:        PMAP_MAP_TO_HEAD_UNLOCK();
                   2614:        if (flush)
                   2615:                cpu_tlb_flushID();
                   2616: }
                   2617:
                   2618: /*
                   2619:  * void pmap_enter(struct pmap *pmap, vaddr_t va, paddr_t pa, vm_prot_t prot,
                   2620:  * int flags)
                   2621:  *
                   2622:  *      Insert the given physical page (p) at
                   2623:  *      the specified virtual address (v) in the
                   2624:  *      target physical map with the protection requested.
                   2625:  *
                   2626:  *      If specified, the page will be wired down, meaning
                   2627:  *      that the related pte can not be reclaimed.
                   2628:  *
                   2629:  *      NB:  This is the only routine which MAY NOT lazy-evaluate
                   2630:  *      or lose information.  That is, this routine must actually
                   2631:  *      insert this page into the given map NOW.
                   2632:  */
                   2633:
                   2634: int
1.30.2.6  nathanw  2635: pmap_enter(struct pmap *pmap, vaddr_t va, paddr_t pa, vm_prot_t prot,
                   2636:     int flags)
1.30.2.2  thorpej  2637: {
1.30.2.6  nathanw  2638:        pt_entry_t *ptes, opte, npte;
1.30.2.2  thorpej  2639:        paddr_t opa;
                   2640:        boolean_t wired = (flags & PMAP_WIRED) != 0;
1.30.2.6  nathanw  2641:        struct vm_page *pg;
1.30.2.2  thorpej  2642:        struct pv_entry *pve;
1.30.2.6  nathanw  2643:        int error, nflags;
1.30.2.2  thorpej  2644:
                   2645:        PDEBUG(5, printf("pmap_enter: V%08lx P%08lx in pmap %p prot=%08x, wired = %d\n",
                   2646:            va, pa, pmap, prot, wired));
                   2647:
                   2648: #ifdef DIAGNOSTIC
                   2649:        /* Valid address ? */
1.30.2.6  nathanw  2650:        if (va >= (pmap_curmaxkvaddr))
1.30.2.2  thorpej  2651:                panic("pmap_enter: too big");
                   2652:        if (pmap != pmap_kernel() && va != 0) {
                   2653:                if (va < VM_MIN_ADDRESS || va >= VM_MAXUSER_ADDRESS)
                   2654:                        panic("pmap_enter: kernel page in user map");
                   2655:        } else {
                   2656:                if (va >= VM_MIN_ADDRESS && va < VM_MAXUSER_ADDRESS)
                   2657:                        panic("pmap_enter: user page in kernel map");
                   2658:                if (va >= VM_MAXUSER_ADDRESS && va < VM_MAX_ADDRESS)
                   2659:                        panic("pmap_enter: entering PT page");
                   2660:        }
                   2661: #endif
1.30.2.7  nathanw  2662:
                   2663:        KDASSERT(((va | pa) & PGOFSET) == 0);
                   2664:
1.30.2.6  nathanw  2665:        /*
                   2666:         * Get a pointer to the page.  Later on in this function, we
                   2667:         * test for a managed page by checking pg != NULL.
                   2668:         */
                   2669:        pg = pmap_initialized ? PHYS_TO_VM_PAGE(pa) : NULL;
                   2670:
1.30.2.2  thorpej  2671:        /* get lock */
                   2672:        PMAP_MAP_TO_HEAD_LOCK();
1.30.2.6  nathanw  2673:
1.30.2.2  thorpej  2674:        /*
1.30.2.6  nathanw  2675:         * map the ptes.  If there's not already an L2 table for this
                   2676:         * address, allocate one.
1.30.2.2  thorpej  2677:         */
1.30.2.6  nathanw  2678:        ptes = pmap_map_ptes(pmap);             /* locks pmap */
                   2679:        if (pmap_pde_v(pmap_pde(pmap, va)) == 0) {
1.30.2.2  thorpej  2680:                struct vm_page *ptp;
                   2681:
1.30.2.6  nathanw  2682:                /* kernel should be pre-grown */
                   2683:                KASSERT(pmap != pmap_kernel());
                   2684:
1.30.2.2  thorpej  2685:                /* if failure is allowed then don't try too hard */
1.30.2.7  nathanw  2686:                ptp = pmap_get_ptp(pmap, va & L1_S_FRAME);
1.30.2.2  thorpej  2687:                if (ptp == NULL) {
                   2688:                        if (flags & PMAP_CANFAIL) {
                   2689:                                error = ENOMEM;
                   2690:                                goto out;
                   2691:                        }
                   2692:                        panic("pmap_enter: get ptp failed");
                   2693:                }
                   2694:        }
1.30.2.6  nathanw  2695:        opte = ptes[arm_btop(va)];
1.30.2.2  thorpej  2696:
                   2697:        nflags = 0;
                   2698:        if (prot & VM_PROT_WRITE)
1.30.2.7  nathanw  2699:                nflags |= PVF_WRITE;
1.30.2.2  thorpej  2700:        if (wired)
1.30.2.7  nathanw  2701:                nflags |= PVF_WIRED;
1.30.2.2  thorpej  2702:
                   2703:        /* Is the pte valid ? If so then this page is already mapped */
1.30.2.6  nathanw  2704:        if (l2pte_valid(opte)) {
1.30.2.2  thorpej  2705:                /* Get the physical address of the current page mapped */
1.30.2.6  nathanw  2706:                opa = l2pte_pa(opte);
1.30.2.2  thorpej  2707:
                   2708:                /* Are we mapping the same page ? */
                   2709:                if (opa == pa) {
                   2710:                        /* Has the wiring changed ? */
1.30.2.6  nathanw  2711:                        if (pg != NULL) {
                   2712:                                simple_lock(&pg->mdpage.pvh_slock);
                   2713:                                (void) pmap_modify_pv(pmap, va, pg,
1.30.2.7  nathanw  2714:                                    PVF_WRITE | PVF_WIRED, nflags);
1.30.2.6  nathanw  2715:                                simple_unlock(&pg->mdpage.pvh_slock);
                   2716:                        }
1.30.2.2  thorpej  2717:                } else {
1.30.2.6  nathanw  2718:                        struct vm_page *opg;
                   2719:
1.30.2.2  thorpej  2720:                        /* We are replacing the page with a new one. */
1.30.2.5  nathanw  2721:                        cpu_idcache_wbinv_range(va, NBPG);
1.30.2.2  thorpej  2722:
                   2723:                        /*
                   2724:                         * If it is part of our managed memory then we
                   2725:                         * must remove it from the PV list
                   2726:                         */
1.30.2.6  nathanw  2727:                        if ((opg = PHYS_TO_VM_PAGE(opa)) != NULL) {
                   2728:                                simple_lock(&opg->mdpage.pvh_slock);
                   2729:                                pve = pmap_remove_pv(opg, pmap, va);
                   2730:                                simple_unlock(&opg->mdpage.pvh_slock);
1.30.2.2  thorpej  2731:                        } else {
                   2732:                                pve = NULL;
                   2733:                        }
                   2734:
                   2735:                        goto enter;
                   2736:                }
                   2737:        } else {
                   2738:                opa = 0;
                   2739:                pve = NULL;
                   2740:                pmap_pte_addref(pmap, va);
                   2741:
                   2742:                /* pte is not valid so we must be hooking in a new page */
                   2743:                ++pmap->pm_stats.resident_count;
                   2744:
                   2745:        enter:
                   2746:                /*
                   2747:                 * Enter on the PV list if part of our managed memory
                   2748:                 */
1.30.2.6  nathanw  2749:                if (pg != NULL) {
1.30.2.2  thorpej  2750:                        if (pve == NULL) {
                   2751:                                pve = pmap_alloc_pv(pmap, ALLOCPV_NEED);
                   2752:                                if (pve == NULL) {
                   2753:                                        if (flags & PMAP_CANFAIL) {
                   2754:                                                error = ENOMEM;
                   2755:                                                goto out;
                   2756:                                        }
1.30.2.6  nathanw  2757:                                        panic("pmap_enter: no pv entries "
                   2758:                                            "available");
1.30.2.2  thorpej  2759:                                }
                   2760:                        }
                   2761:                        /* enter_pv locks pvh when adding */
1.30.2.6  nathanw  2762:                        pmap_enter_pv(pg, pve, pmap, va, NULL, nflags);
1.30.2.2  thorpej  2763:                } else {
                   2764:                        if (pve != NULL)
                   2765:                                pmap_free_pv(pmap, pve);
                   2766:                }
                   2767:        }
                   2768:
                   2769:        /* Construct the pte, giving the correct access. */
1.30.2.7  nathanw  2770:        npte = pa;
1.30.2.2  thorpej  2771:
                   2772:        /* VA 0 is magic. */
1.30.2.7  nathanw  2773:        if (pmap != pmap_kernel() && va != vector_page)
                   2774:                npte |= L2_S_PROT_U;
1.30.2.2  thorpej  2775:
1.30.2.6  nathanw  2776:        if (pg != NULL) {
1.30.2.2  thorpej  2777: #ifdef DIAGNOSTIC
                   2778:                if ((flags & VM_PROT_ALL) & ~prot)
                   2779:                        panic("pmap_enter: access_type exceeds prot");
                   2780: #endif
1.30.2.7  nathanw  2781:                npte |= pte_l2_s_cache_mode;
1.30.2.2  thorpej  2782:                if (flags & VM_PROT_WRITE) {
1.30.2.7  nathanw  2783:                        npte |= L2_S_PROTO | L2_S_PROT_W;
                   2784:                        pg->mdpage.pvh_attrs |= PVF_REF | PVF_MOD;
1.30.2.2  thorpej  2785:                } else if (flags & VM_PROT_ALL) {
1.30.2.7  nathanw  2786:                        npte |= L2_S_PROTO;
                   2787:                        pg->mdpage.pvh_attrs |= PVF_REF;
1.30.2.2  thorpej  2788:                } else
1.30.2.7  nathanw  2789:                        npte |= L2_TYPE_INV;
1.30.2.2  thorpej  2790:        } else {
                   2791:                if (prot & VM_PROT_WRITE)
1.30.2.7  nathanw  2792:                        npte |= L2_S_PROTO | L2_S_PROT_W;
1.30.2.2  thorpej  2793:                else if (prot & VM_PROT_ALL)
1.30.2.7  nathanw  2794:                        npte |= L2_S_PROTO;
1.30.2.2  thorpej  2795:                else
1.30.2.7  nathanw  2796:                        npte |= L2_TYPE_INV;
1.30.2.2  thorpej  2797:        }
                   2798:
1.30.2.6  nathanw  2799:        ptes[arm_btop(va)] = npte;
1.30.2.2  thorpej  2800:
1.30.2.6  nathanw  2801:        if (pg != NULL) {
                   2802:                simple_lock(&pg->mdpage.pvh_slock);
                   2803:                pmap_vac_me_harder(pmap, pg, ptes, pmap_is_curpmap(pmap));
                   2804:                simple_unlock(&pg->mdpage.pvh_slock);
1.30.2.2  thorpej  2805:        }
                   2806:
                   2807:        /* Better flush the TLB ... */
                   2808:        cpu_tlb_flushID_SE(va);
                   2809:        error = 0;
                   2810: out:
1.30.2.6  nathanw  2811:        pmap_unmap_ptes(pmap);                  /* unlocks pmap */
1.30.2.2  thorpej  2812:        PMAP_MAP_TO_HEAD_UNLOCK();
                   2813:
                   2814:        return error;
                   2815: }
                   2816:
1.30.2.6  nathanw  2817: /*
                   2818:  * pmap_kenter_pa: enter a kernel mapping
                   2819:  *
                   2820:  * => no need to lock anything assume va is already allocated
                   2821:  * => should be faster than normal pmap enter function
                   2822:  */
1.30.2.2  thorpej  2823: void
1.30.2.6  nathanw  2824: pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot)
1.30.2.2  thorpej  2825: {
                   2826:        pt_entry_t *pte;
                   2827:
                   2828:        pte = vtopte(va);
                   2829:        KASSERT(!pmap_pte_v(pte));
1.30.2.7  nathanw  2830:
                   2831:        *pte = L2_S_PROTO | pa |
                   2832:            L2_S_PROT(PTE_KERNEL, prot) | pte_l2_s_cache_mode;
1.30.2.2  thorpej  2833: }
                   2834:
                   2835: void
1.30.2.6  nathanw  2836: pmap_kremove(vaddr_t va, vsize_t len)
1.30.2.2  thorpej  2837: {
                   2838:        pt_entry_t *pte;
                   2839:
                   2840:        for (len >>= PAGE_SHIFT; len > 0; len--, va += PAGE_SIZE) {
                   2841:
                   2842:                /*
                   2843:                 * We assume that we will only be called with small
                   2844:                 * regions of memory.
                   2845:                 */
                   2846:
                   2847:                KASSERT(pmap_pde_page(pmap_pde(pmap_kernel(), va)));
                   2848:                pte = vtopte(va);
1.30.2.5  nathanw  2849:                cpu_idcache_wbinv_range(va, PAGE_SIZE);
1.30.2.2  thorpej  2850:                *pte = 0;
                   2851:                cpu_tlb_flushID_SE(va);
                   2852:        }
                   2853: }
                   2854:
                   2855: /*
                   2856:  * pmap_page_protect:
                   2857:  *
                   2858:  * Lower the permission for all mappings to a given page.
                   2859:  */
                   2860:
                   2861: void
1.30.2.6  nathanw  2862: pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
1.30.2.2  thorpej  2863: {
                   2864:
1.30.2.6  nathanw  2865:        PDEBUG(0, printf("pmap_page_protect(pa=%lx, prot=%d)\n",
                   2866:            VM_PAGE_TO_PHYS(pg), prot));
1.30.2.2  thorpej  2867:
                   2868:        switch(prot) {
                   2869:        case VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE:
                   2870:        case VM_PROT_READ|VM_PROT_WRITE:
                   2871:                return;
                   2872:
                   2873:        case VM_PROT_READ:
                   2874:        case VM_PROT_READ|VM_PROT_EXECUTE:
1.30.2.7  nathanw  2875:                pmap_clearbit(pg, PVF_WRITE);
1.30.2.2  thorpej  2876:                break;
                   2877:
                   2878:        default:
1.30.2.6  nathanw  2879:                pmap_remove_all(pg);
1.30.2.2  thorpej  2880:                break;
                   2881:        }
                   2882: }
                   2883:
                   2884:
                   2885: /*
                   2886:  * Routine:    pmap_unwire
                   2887:  * Function:   Clear the wired attribute for a map/virtual-address
                   2888:  *             pair.
                   2889:  * In/out conditions:
                   2890:  *             The mapping must already exist in the pmap.
                   2891:  */
                   2892:
                   2893: void
1.30.2.6  nathanw  2894: pmap_unwire(struct pmap *pmap, vaddr_t va)
1.30.2.2  thorpej  2895: {
1.30.2.6  nathanw  2896:        pt_entry_t *ptes;
                   2897:        struct vm_page *pg;
1.30.2.2  thorpej  2898:        paddr_t pa;
                   2899:
1.30.2.6  nathanw  2900:        PMAP_MAP_TO_HEAD_LOCK();
                   2901:        ptes = pmap_map_ptes(pmap);             /* locks pmap */
1.30.2.2  thorpej  2902:
1.30.2.6  nathanw  2903:        if (pmap_pde_v(pmap_pde(pmap, va))) {
1.30.2.2  thorpej  2904: #ifdef DIAGNOSTIC
1.30.2.6  nathanw  2905:                if (l2pte_valid(ptes[arm_btop(va)]) == 0)
                   2906:                        panic("pmap_unwire: invalid L2 PTE");
1.30.2.2  thorpej  2907: #endif
1.30.2.6  nathanw  2908:                /* Extract the physical address of the page */
                   2909:                pa = l2pte_pa(ptes[arm_btop(va)]);
1.30.2.2  thorpej  2910:
1.30.2.6  nathanw  2911:                if ((pg = PHYS_TO_VM_PAGE(pa)) == NULL)
                   2912:                        goto out;
                   2913:
                   2914:                /* Update the wired bit in the pv entry for this page. */
                   2915:                simple_lock(&pg->mdpage.pvh_slock);
1.30.2.7  nathanw  2916:                (void) pmap_modify_pv(pmap, va, pg, PVF_WIRED, 0);
1.30.2.6  nathanw  2917:                simple_unlock(&pg->mdpage.pvh_slock);
                   2918:        }
                   2919: #ifdef DIAGNOSTIC
                   2920:        else {
                   2921:                panic("pmap_unwire: invalid L1 PTE");
1.30.2.2  thorpej  2922:        }
1.30.2.6  nathanw  2923: #endif
                   2924:  out:
                   2925:        pmap_unmap_ptes(pmap);                  /* unlocks pmap */
                   2926:        PMAP_MAP_TO_HEAD_UNLOCK();
1.30.2.2  thorpej  2927: }
                   2928:
                   2929: /*
                   2930:  * Routine:  pmap_extract
                   2931:  * Function:
                   2932:  *           Extract the physical page address associated
                   2933:  *           with the given map/virtual_address pair.
                   2934:  */
                   2935: boolean_t
1.30.2.6  nathanw  2936: pmap_extract(struct pmap *pmap, vaddr_t va, paddr_t *pap)
1.30.2.2  thorpej  2937: {
1.30.2.5  nathanw  2938:        pd_entry_t *pde;
1.30.2.2  thorpej  2939:        pt_entry_t *pte, *ptes;
                   2940:        paddr_t pa;
                   2941:
1.30.2.7  nathanw  2942:        PDEBUG(5, printf("pmap_extract: pmap=%p, va=0x%08lx -> ", pmap, va));
                   2943:
                   2944:        ptes = pmap_map_ptes(pmap);             /* locks pmap */
1.30.2.2  thorpej  2945:
1.30.2.5  nathanw  2946:        pde = pmap_pde(pmap, va);
1.30.2.6  nathanw  2947:        pte = &ptes[arm_btop(va)];
1.30.2.2  thorpej  2948:
1.30.2.5  nathanw  2949:        if (pmap_pde_section(pde)) {
1.30.2.7  nathanw  2950:                pa = (*pde & L1_S_FRAME) | (va & L1_S_OFFSET);
                   2951:                PDEBUG(5, printf("section pa=0x%08lx\n", pa));
1.30.2.5  nathanw  2952:                goto out;
                   2953:        } else if (pmap_pde_page(pde) == 0 || pmap_pte_v(pte) == 0) {
1.30.2.7  nathanw  2954:                PDEBUG(5, printf("no mapping\n"));
                   2955:                goto failed;
1.30.2.2  thorpej  2956:        }
                   2957:
1.30.2.7  nathanw  2958:        if ((*pte & L2_TYPE_MASK) == L2_TYPE_L) {
                   2959:                pa = (*pte & L2_L_FRAME) | (va & L2_L_OFFSET);
                   2960:                PDEBUG(5, printf("large page pa=0x%08lx\n", pa));
1.30.2.5  nathanw  2961:                goto out;
                   2962:        }
1.30.2.2  thorpej  2963:
1.30.2.7  nathanw  2964:        pa = (*pte & L2_S_FRAME) | (va & L2_S_OFFSET);
                   2965:        PDEBUG(5, printf("small page pa=0x%08lx\n", pa));
1.30.2.2  thorpej  2966:
1.30.2.5  nathanw  2967:  out:
1.30.2.7  nathanw  2968:        if (pap != NULL)
                   2969:                *pap = pa;
                   2970:
                   2971:        pmap_unmap_ptes(pmap);                  /* unlocks pmap */
                   2972:        return (TRUE);
                   2973:
                   2974:  failed:
                   2975:        pmap_unmap_ptes(pmap);                  /* unlocks pmap */
                   2976:        return (FALSE);
1.30.2.2  thorpej  2977: }
                   2978:
                   2979:
                   2980: /*
1.30.2.6  nathanw  2981:  * pmap_copy:
                   2982:  *
                   2983:  *     Copy the range specified by src_addr/len from the source map to the
                   2984:  *     range dst_addr/len in the destination map.
1.30.2.2  thorpej  2985:  *
1.30.2.6  nathanw  2986:  *     This routine is only advisory and need not do anything.
1.30.2.2  thorpej  2987:  */
1.30.2.6  nathanw  2988: /* Call deleted in <arm/arm32/pmap.h> */
1.30.2.2  thorpej  2989:
                   2990: #if defined(PMAP_DEBUG)
                   2991: void
                   2992: pmap_dump_pvlist(phys, m)
                   2993:        vaddr_t phys;
                   2994:        char *m;
                   2995: {
1.30.2.6  nathanw  2996:        struct vm_page *pg;
1.30.2.2  thorpej  2997:        struct pv_entry *pv;
                   2998:
1.30.2.6  nathanw  2999:        if ((pg = PHYS_TO_VM_PAGE(phys)) == NULL) {
1.30.2.2  thorpej  3000:                printf("INVALID PA\n");
                   3001:                return;
                   3002:        }
1.30.2.6  nathanw  3003:        simple_lock(&pg->mdpage.pvh_slock);
1.30.2.2  thorpej  3004:        printf("%s %08lx:", m, phys);
1.30.2.6  nathanw  3005:        if (pg->mdpage.pvh_list == NULL) {
1.30.2.8! nathanw  3006:                simple_unlock(&pg->mdpage.pvh_slock);
1.30.2.2  thorpej  3007:                printf(" no mappings\n");
                   3008:                return;
                   3009:        }
                   3010:
1.30.2.6  nathanw  3011:        for (pv = pg->mdpage.pvh_list; pv; pv = pv->pv_next)
1.30.2.2  thorpej  3012:                printf(" pmap %p va %08lx flags %08x", pv->pv_pmap,
                   3013:                    pv->pv_va, pv->pv_flags);
                   3014:
                   3015:        printf("\n");
1.30.2.6  nathanw  3016:        simple_unlock(&pg->mdpage.pvh_slock);
1.30.2.2  thorpej  3017: }
                   3018:
                   3019: #endif /* PMAP_DEBUG */
                   3020:
                   3021: static pt_entry_t *
                   3022: pmap_map_ptes(struct pmap *pmap)
                   3023: {
                   3024:        struct lwp *l;
1.30.2.6  nathanw  3025:        struct proc *p;
1.30.2.2  thorpej  3026:
                   3027:        /* the kernel's pmap is always accessible */
                   3028:        if (pmap == pmap_kernel()) {
1.30.2.6  nathanw  3029:                return (pt_entry_t *)PTE_BASE;
1.30.2.2  thorpej  3030:        }
                   3031:
                   3032:        if (pmap_is_curpmap(pmap)) {
                   3033:                simple_lock(&pmap->pm_obj.vmobjlock);
1.30.2.6  nathanw  3034:                return (pt_entry_t *)PTE_BASE;
1.30.2.2  thorpej  3035:        }
1.30.2.6  nathanw  3036:
1.30.2.2  thorpej  3037:        l = curproc;
1.30.2.6  nathanw  3038:        KDASSERT(l != NULL);
1.30.2.2  thorpej  3039:        p = l->l_proc;
                   3040:
                   3041:        /* need to lock both curpmap and pmap: use ordered locking */
1.30.2.6  nathanw  3042:        if ((vaddr_t) pmap < (vaddr_t) p->p_vmspace->vm_map.pmap) {
1.30.2.2  thorpej  3043:                simple_lock(&pmap->pm_obj.vmobjlock);
                   3044:                simple_lock(&p->p_vmspace->vm_map.pmap->pm_obj.vmobjlock);
                   3045:        } else {
                   3046:                simple_lock(&p->p_vmspace->vm_map.pmap->pm_obj.vmobjlock);
                   3047:                simple_lock(&pmap->pm_obj.vmobjlock);
                   3048:        }
                   3049:
1.30.2.6  nathanw  3050:        pmap_map_in_l1(p->p_vmspace->vm_map.pmap, APTE_BASE, pmap->pm_pptpt,
                   3051:            FALSE);
1.30.2.2  thorpej  3052:        cpu_tlb_flushD();
1.30.2.3  nathanw  3053:        cpu_cpwait();
1.30.2.6  nathanw  3054:        return (pt_entry_t *)APTE_BASE;
1.30.2.2  thorpej  3055: }
                   3056:
                   3057: /*
                   3058:  * pmap_unmap_ptes: unlock the PTE mapping of "pmap"
                   3059:  */
                   3060:
                   3061: static void
1.30.2.6  nathanw  3062: pmap_unmap_ptes(struct pmap *pmap)
1.30.2.2  thorpej  3063: {
                   3064:
                   3065:        if (pmap == pmap_kernel()) {
                   3066:                return;
                   3067:        }
                   3068:        if (pmap_is_curpmap(pmap)) {
                   3069:                simple_unlock(&pmap->pm_obj.vmobjlock);
                   3070:        } else {
1.30.2.6  nathanw  3071:                KDASSERT(curproc != NULL);
1.30.2.2  thorpej  3072:                simple_unlock(&pmap->pm_obj.vmobjlock);
1.30.2.6  nathanw  3073:                simple_unlock(
                   3074:                    &curproc->l_proc->p_vmspace->vm_map.pmap->pm_obj.vmobjlock);
1.30.2.2  thorpej  3075:        }
                   3076: }
                   3077:
                   3078: /*
                   3079:  * Modify pte bits for all ptes corresponding to the given physical address.
                   3080:  * We use `maskbits' rather than `clearbits' because we're always passing
                   3081:  * constants and the latter would require an extra inversion at run-time.
                   3082:  */
                   3083:
                   3084: static void
1.30.2.6  nathanw  3085: pmap_clearbit(struct vm_page *pg, u_int maskbits)
1.30.2.2  thorpej  3086: {
                   3087:        struct pv_entry *pv;
1.30.2.6  nathanw  3088:        pt_entry_t *ptes;
1.30.2.2  thorpej  3089:        vaddr_t va;
1.30.2.6  nathanw  3090:        int tlbentry;
1.30.2.2  thorpej  3091:
                   3092:        PDEBUG(1, printf("pmap_clearbit: pa=%08lx mask=%08x\n",
1.30.2.6  nathanw  3093:            VM_PAGE_TO_PHYS(pg), maskbits));
1.30.2.2  thorpej  3094:
                   3095:        tlbentry = 0;
                   3096:
                   3097:        PMAP_HEAD_TO_MAP_LOCK();
1.30.2.6  nathanw  3098:        simple_lock(&pg->mdpage.pvh_slock);
1.30.2.2  thorpej  3099:
                   3100:        /*
                   3101:         * Clear saved attributes (modify, reference)
                   3102:         */
1.30.2.6  nathanw  3103:        pg->mdpage.pvh_attrs &= ~maskbits;
1.30.2.2  thorpej  3104:
1.30.2.6  nathanw  3105:        if (pg->mdpage.pvh_list == NULL) {
                   3106:                simple_unlock(&pg->mdpage.pvh_slock);
1.30.2.2  thorpej  3107:                PMAP_HEAD_TO_MAP_UNLOCK();
                   3108:                return;
                   3109:        }
                   3110:
                   3111:        /*
                   3112:         * Loop over all current mappings setting/clearing as appropos
                   3113:         */
1.30.2.6  nathanw  3114:        for (pv = pg->mdpage.pvh_list; pv; pv = pv->pv_next) {
1.30.2.2  thorpej  3115:                va = pv->pv_va;
                   3116:                pv->pv_flags &= ~maskbits;
1.30.2.6  nathanw  3117:                ptes = pmap_map_ptes(pv->pv_pmap);      /* locks pmap */
                   3118:                KASSERT(pmap_pde_v(pmap_pde(pv->pv_pmap, va)));
1.30.2.7  nathanw  3119:                if (maskbits & (PVF_WRITE|PVF_MOD)) {
                   3120:                        if ((pv->pv_flags & PVF_NC)) {
1.30.2.2  thorpej  3121:                                /*
                   3122:                                 * Entry is not cacheable: reenable
                   3123:                                 * the cache, nothing to flush
                   3124:                                 *
                   3125:                                 * Don't turn caching on again if this
                   3126:                                 * is a modified emulation.  This
                   3127:                                 * would be inconsitent with the
                   3128:                                 * settings created by
                   3129:                                 * pmap_vac_me_harder().
                   3130:                                 *
                   3131:                                 * There's no need to call
                   3132:                                 * pmap_vac_me_harder() here: all
                   3133:                                 * pages are loosing their write
                   3134:                                 * permission.
                   3135:                                 *
                   3136:                                 */
1.30.2.7  nathanw  3137:                                if (maskbits & PVF_WRITE) {
                   3138:                                        ptes[arm_btop(va)] |=
                   3139:                                            pte_l2_s_cache_mode;
                   3140:                                        pv->pv_flags &= ~PVF_NC;
1.30.2.2  thorpej  3141:                                }
1.30.2.6  nathanw  3142:                        } else if (pmap_is_curpmap(pv->pv_pmap)) {
1.30.2.2  thorpej  3143:                                /*
                   3144:                                 * Entry is cacheable: check if pmap is
                   3145:                                 * current if it is flush it,
                   3146:                                 * otherwise it won't be in the cache
                   3147:                                 */
1.30.2.5  nathanw  3148:                                cpu_idcache_wbinv_range(pv->pv_va, NBPG);
1.30.2.6  nathanw  3149:                        }
1.30.2.2  thorpej  3150:
                   3151:                        /* make the pte read only */
1.30.2.7  nathanw  3152:                        ptes[arm_btop(va)] &= ~L2_S_PROT_W;
1.30.2.2  thorpej  3153:                }
                   3154:
1.30.2.7  nathanw  3155:                if (maskbits & PVF_REF)
1.30.2.6  nathanw  3156:                        ptes[arm_btop(va)] =
1.30.2.7  nathanw  3157:                            (ptes[arm_btop(va)] & ~L2_TYPE_MASK) | L2_TYPE_INV;
1.30.2.2  thorpej  3158:
1.30.2.6  nathanw  3159:                if (pmap_is_curpmap(pv->pv_pmap)) {
1.30.2.2  thorpej  3160:                        /*
                   3161:                         * if we had cacheable pte's we'd clean the
                   3162:                         * pte out to memory here
                   3163:                         *
                   3164:                         * flush tlb entry as it's in the current pmap
                   3165:                         */
                   3166:                        cpu_tlb_flushID_SE(pv->pv_va);
1.30.2.6  nathanw  3167:                }
                   3168:                pmap_unmap_ptes(pv->pv_pmap);           /* unlocks pmap */
1.30.2.2  thorpej  3169:        }
1.30.2.3  nathanw  3170:        cpu_cpwait();
1.30.2.2  thorpej  3171:
1.30.2.6  nathanw  3172:        simple_unlock(&pg->mdpage.pvh_slock);
1.30.2.2  thorpej  3173:        PMAP_HEAD_TO_MAP_UNLOCK();
                   3174: }
                   3175:
1.30.2.6  nathanw  3176: /*
                   3177:  * pmap_clear_modify:
                   3178:  *
                   3179:  *     Clear the "modified" attribute for a page.
                   3180:  */
1.30.2.2  thorpej  3181: boolean_t
1.30.2.6  nathanw  3182: pmap_clear_modify(struct vm_page *pg)
1.30.2.2  thorpej  3183: {
                   3184:        boolean_t rv;
                   3185:
1.30.2.7  nathanw  3186:        if (pg->mdpage.pvh_attrs & PVF_MOD) {
1.30.2.6  nathanw  3187:                rv = TRUE;
1.30.2.7  nathanw  3188:                pmap_clearbit(pg, PVF_MOD);
1.30.2.6  nathanw  3189:        } else
                   3190:                rv = FALSE;
1.30.2.2  thorpej  3191:
1.30.2.6  nathanw  3192:        PDEBUG(0, printf("pmap_clear_modify pa=%08lx -> %d\n",
                   3193:            VM_PAGE_TO_PHYS(pg), rv));
                   3194:
                   3195:        return (rv);
                   3196: }
1.30.2.2  thorpej  3197:
1.30.2.6  nathanw  3198: /*
                   3199:  * pmap_clear_reference:
                   3200:  *
                   3201:  *     Clear the "referenced" attribute for a page.
                   3202:  */
1.30.2.2  thorpej  3203: boolean_t
1.30.2.6  nathanw  3204: pmap_clear_reference(struct vm_page *pg)
1.30.2.2  thorpej  3205: {
                   3206:        boolean_t rv;
                   3207:
1.30.2.7  nathanw  3208:        if (pg->mdpage.pvh_attrs & PVF_REF) {
1.30.2.6  nathanw  3209:                rv = TRUE;
1.30.2.7  nathanw  3210:                pmap_clearbit(pg, PVF_REF);
1.30.2.6  nathanw  3211:        } else
                   3212:                rv = FALSE;
1.30.2.2  thorpej  3213:
1.30.2.6  nathanw  3214:        PDEBUG(0, printf("pmap_clear_reference pa=%08lx -> %d\n",
                   3215:            VM_PAGE_TO_PHYS(pg), rv));
1.30.2.2  thorpej  3216:
1.30.2.6  nathanw  3217:        return (rv);
1.30.2.2  thorpej  3218: }
                   3219:
1.30.2.6  nathanw  3220: /*
                   3221:  * pmap_is_modified:
                   3222:  *
                   3223:  *     Test if a page has the "modified" attribute.
                   3224:  */
                   3225: /* See <arm/arm32/pmap.h> */
1.30.2.2  thorpej  3226:
1.30.2.6  nathanw  3227: /*
                   3228:  * pmap_is_referenced:
                   3229:  *
                   3230:  *     Test if a page has the "referenced" attribute.
                   3231:  */
                   3232: /* See <arm/arm32/pmap.h> */
1.30.2.2  thorpej  3233:
                   3234: int
1.30.2.6  nathanw  3235: pmap_modified_emulation(struct pmap *pmap, vaddr_t va)
1.30.2.2  thorpej  3236: {
1.30.2.6  nathanw  3237:        pt_entry_t *ptes;
                   3238:        struct vm_page *pg;
1.30.2.2  thorpej  3239:        paddr_t pa;
                   3240:        u_int flags;
1.30.2.6  nathanw  3241:        int rv = 0;
1.30.2.2  thorpej  3242:
                   3243:        PDEBUG(2, printf("pmap_modified_emulation\n"));
                   3244:
1.30.2.6  nathanw  3245:        PMAP_MAP_TO_HEAD_LOCK();
                   3246:        ptes = pmap_map_ptes(pmap);             /* locks pmap */
                   3247:
                   3248:        if (pmap_pde_v(pmap_pde(pmap, va)) == 0) {
                   3249:                PDEBUG(2, printf("L1 PTE invalid\n"));
                   3250:                goto out;
1.30.2.2  thorpej  3251:        }
                   3252:
1.30.2.6  nathanw  3253:        PDEBUG(1, printf("pte=%08x\n", ptes[arm_btop(va)]));
                   3254:
                   3255:        /* Check for a invalid pte */
                   3256:        if (l2pte_valid(ptes[arm_btop(va)]) == 0)
                   3257:                goto out;
1.30.2.2  thorpej  3258:
                   3259:        /* This can happen if user code tries to access kernel memory. */
1.30.2.7  nathanw  3260:        if ((ptes[arm_btop(va)] & L2_S_PROT_W) != 0)
1.30.2.6  nathanw  3261:                goto out;
1.30.2.2  thorpej  3262:
                   3263:        /* Extract the physical address of the page */
1.30.2.6  nathanw  3264:        pa = l2pte_pa(ptes[arm_btop(va)]);
                   3265:        if ((pg = PHYS_TO_VM_PAGE(pa)) == NULL)
                   3266:                goto out;
1.30.2.2  thorpej  3267:
                   3268:        /* Get the current flags for this page. */
1.30.2.6  nathanw  3269:        simple_lock(&pg->mdpage.pvh_slock);
1.30.2.2  thorpej  3270:
1.30.2.6  nathanw  3271:        flags = pmap_modify_pv(pmap, va, pg, 0, 0);
1.30.2.2  thorpej  3272:        PDEBUG(2, printf("pmap_modified_emulation: flags = %08x\n", flags));
                   3273:
                   3274:        /*
                   3275:         * Do the flags say this page is writable ? If not then it is a
                   3276:         * genuine write fault. If yes then the write fault is our fault
                   3277:         * as we did not reflect the write access in the PTE. Now we know
                   3278:         * a write has occurred we can correct this and also set the
                   3279:         * modified bit
                   3280:         */
1.30.2.7  nathanw  3281:        if (~flags & PVF_WRITE) {
1.30.2.6  nathanw  3282:                simple_unlock(&pg->mdpage.pvh_slock);
                   3283:                goto out;
1.30.2.2  thorpej  3284:        }
                   3285:
1.30.2.6  nathanw  3286:        PDEBUG(0,
                   3287:            printf("pmap_modified_emulation: Got a hit va=%08lx, pte = %08x\n",
                   3288:            va, ptes[arm_btop(va)]));
1.30.2.7  nathanw  3289:        pg->mdpage.pvh_attrs |= PVF_REF | PVF_MOD;
1.30.2.2  thorpej  3290:
                   3291:        /*
                   3292:         * Re-enable write permissions for the page.  No need to call
                   3293:         * pmap_vac_me_harder(), since this is just a
1.30.2.7  nathanw  3294:         * modified-emulation fault, and the PVF_WRITE bit isn't changing.
                   3295:         * We've already set the cacheable bits based on the assumption
                   3296:         * that we can write to this page.
1.30.2.2  thorpej  3297:         */
1.30.2.6  nathanw  3298:        ptes[arm_btop(va)] =
1.30.2.7  nathanw  3299:            (ptes[arm_btop(va)] & ~L2_TYPE_MASK) | L2_S_PROTO | L2_S_PROT_W;
1.30.2.6  nathanw  3300:        PDEBUG(0, printf("->(%08x)\n", ptes[arm_btop(va)]));
                   3301:
                   3302:        simple_unlock(&pg->mdpage.pvh_slock);
1.30.2.2  thorpej  3303:
                   3304:        cpu_tlb_flushID_SE(va);
1.30.2.3  nathanw  3305:        cpu_cpwait();
1.30.2.6  nathanw  3306:        rv = 1;
                   3307:  out:
                   3308:        pmap_unmap_ptes(pmap);                  /* unlocks pmap */
                   3309:        PMAP_MAP_TO_HEAD_UNLOCK();
                   3310:        return (rv);
1.30.2.2  thorpej  3311: }
                   3312:
                   3313: int
1.30.2.6  nathanw  3314: pmap_handled_emulation(struct pmap *pmap, vaddr_t va)
1.30.2.2  thorpej  3315: {
1.30.2.6  nathanw  3316:        pt_entry_t *ptes;
                   3317:        struct vm_page *pg;
1.30.2.2  thorpej  3318:        paddr_t pa;
1.30.2.6  nathanw  3319:        int rv = 0;
1.30.2.2  thorpej  3320:
                   3321:        PDEBUG(2, printf("pmap_handled_emulation\n"));
                   3322:
1.30.2.6  nathanw  3323:        PMAP_MAP_TO_HEAD_LOCK();
                   3324:        ptes = pmap_map_ptes(pmap);             /* locks pmap */
                   3325:
                   3326:        if (pmap_pde_v(pmap_pde(pmap, va)) == 0) {
                   3327:                PDEBUG(2, printf("L1 PTE invalid\n"));
                   3328:                goto out;
1.30.2.2  thorpej  3329:        }
                   3330:
1.30.2.6  nathanw  3331:        PDEBUG(1, printf("pte=%08x\n", ptes[arm_btop(va)]));
                   3332:
                   3333:        /* Check for invalid pte */
                   3334:        if (l2pte_valid(ptes[arm_btop(va)]) == 0)
                   3335:                goto out;
1.30.2.2  thorpej  3336:
                   3337:        /* This can happen if user code tries to access kernel memory. */
1.30.2.7  nathanw  3338:        if ((ptes[arm_btop(va)] & L2_TYPE_MASK) != L2_TYPE_INV)
1.30.2.6  nathanw  3339:                goto out;
1.30.2.2  thorpej  3340:
                   3341:        /* Extract the physical address of the page */
1.30.2.6  nathanw  3342:        pa = l2pte_pa(ptes[arm_btop(va)]);
                   3343:        if ((pg = PHYS_TO_VM_PAGE(pa)) == NULL)
                   3344:                goto out;
                   3345:
                   3346:        simple_lock(&pg->mdpage.pvh_slock);
1.30.2.2  thorpej  3347:
                   3348:        /*
                   3349:         * Ok we just enable the pte and mark the attibs as handled
1.30.2.6  nathanw  3350:         * XXX Should we traverse the PV list and enable all PTEs?
1.30.2.2  thorpej  3351:         */
1.30.2.6  nathanw  3352:        PDEBUG(0,
                   3353:            printf("pmap_handled_emulation: Got a hit va=%08lx pte = %08x\n",
                   3354:            va, ptes[arm_btop(va)]));
1.30.2.7  nathanw  3355:        pg->mdpage.pvh_attrs |= PVF_REF;
1.30.2.6  nathanw  3356:
1.30.2.7  nathanw  3357:        ptes[arm_btop(va)] = (ptes[arm_btop(va)] & ~L2_TYPE_MASK) | L2_S_PROTO;
1.30.2.6  nathanw  3358:        PDEBUG(0, printf("->(%08x)\n", ptes[arm_btop(va)]));
                   3359:
                   3360:        simple_unlock(&pg->mdpage.pvh_slock);
1.30.2.2  thorpej  3361:
                   3362:        cpu_tlb_flushID_SE(va);
1.30.2.3  nathanw  3363:        cpu_cpwait();
1.30.2.6  nathanw  3364:        rv = 1;
                   3365:  out:
                   3366:        pmap_unmap_ptes(pmap);                  /* unlocks pmap */
                   3367:        PMAP_MAP_TO_HEAD_UNLOCK();
                   3368:        return (rv);
1.30.2.2  thorpej  3369: }
                   3370:
                   3371: /*
                   3372:  * pmap_collect: free resources held by a pmap
                   3373:  *
                   3374:  * => optional function.
                   3375:  * => called when a process is swapped out to free memory.
                   3376:  */
                   3377:
                   3378: void
1.30.2.6  nathanw  3379: pmap_collect(struct pmap *pmap)
1.30.2.2  thorpej  3380: {
                   3381: }
                   3382:
                   3383: /*
                   3384:  * Routine:    pmap_procwr
                   3385:  *
                   3386:  * Function:
                   3387:  *     Synchronize caches corresponding to [addr, addr+len) in p.
                   3388:  *
                   3389:  */
                   3390: void
1.30.2.6  nathanw  3391: pmap_procwr(struct proc *p, vaddr_t va, int len)
1.30.2.2  thorpej  3392: {
                   3393:        /* We only need to do anything if it is the current process. */
                   3394:        if (curproc != NULL && p == curproc->l_proc)
1.30.2.5  nathanw  3395:                cpu_icache_sync_range(va, len);
1.30.2.2  thorpej  3396: }
                   3397: /*
                   3398:  * PTP functions
                   3399:  */
                   3400:
                   3401: /*
                   3402:  * pmap_get_ptp: get a PTP (if there isn't one, allocate a new one)
                   3403:  *
                   3404:  * => pmap should NOT be pmap_kernel()
                   3405:  * => pmap should be locked
                   3406:  */
                   3407:
                   3408: static struct vm_page *
1.30.2.6  nathanw  3409: pmap_get_ptp(struct pmap *pmap, vaddr_t va)
1.30.2.2  thorpej  3410: {
1.30.2.6  nathanw  3411:        struct vm_page *ptp;
1.30.2.2  thorpej  3412:
1.30.2.6  nathanw  3413:        if (pmap_pde_page(pmap_pde(pmap, va))) {
1.30.2.2  thorpej  3414:
1.30.2.6  nathanw  3415:                /* valid... check hint (saves us a PA->PG lookup) */
                   3416:                if (pmap->pm_ptphint &&
1.30.2.7  nathanw  3417:                    (pmap->pm_pdir[pmap_pdei(va)] & L2_S_FRAME) ==
1.30.2.6  nathanw  3418:                    VM_PAGE_TO_PHYS(pmap->pm_ptphint))
                   3419:                        return (pmap->pm_ptphint);
                   3420:                ptp = uvm_pagelookup(&pmap->pm_obj, va);
1.30.2.2  thorpej  3421: #ifdef DIAGNOSTIC
1.30.2.6  nathanw  3422:                if (ptp == NULL)
                   3423:                        panic("pmap_get_ptp: unmanaged user PTP");
1.30.2.2  thorpej  3424: #endif
1.30.2.6  nathanw  3425:                pmap->pm_ptphint = ptp;
                   3426:                return(ptp);
                   3427:        }
1.30.2.2  thorpej  3428:
1.30.2.6  nathanw  3429:        /* allocate a new PTP (updates ptphint) */
                   3430:        return(pmap_alloc_ptp(pmap, va));
1.30.2.2  thorpej  3431: }
                   3432:
                   3433: /*
                   3434:  * pmap_alloc_ptp: allocate a PTP for a PMAP
                   3435:  *
                   3436:  * => pmap should already be locked by caller
                   3437:  * => we use the ptp's wire_count to count the number of active mappings
                   3438:  *     in the PTP (we start it at one to prevent any chance this PTP
                   3439:  *     will ever leak onto the active/inactive queues)
                   3440:  */
                   3441:
                   3442: /*__inline */ static struct vm_page *
1.30.2.6  nathanw  3443: pmap_alloc_ptp(struct pmap *pmap, vaddr_t va)
1.30.2.2  thorpej  3444: {
                   3445:        struct vm_page *ptp;
                   3446:
                   3447:        ptp = uvm_pagealloc(&pmap->pm_obj, va, NULL,
                   3448:                UVM_PGA_USERESERVE|UVM_PGA_ZERO);
1.30.2.6  nathanw  3449:        if (ptp == NULL)
1.30.2.2  thorpej  3450:                return (NULL);
                   3451:
                   3452:        /* got one! */
                   3453:        ptp->flags &= ~PG_BUSY; /* never busy */
                   3454:        ptp->wire_count = 1;    /* no mappings yet */
                   3455:        pmap_map_in_l1(pmap, va, VM_PAGE_TO_PHYS(ptp), TRUE);
                   3456:        pmap->pm_stats.resident_count++;        /* count PTP as resident */
1.30.2.6  nathanw  3457:        pmap->pm_ptphint = ptp;
1.30.2.2  thorpej  3458:        return (ptp);
                   3459: }
                   3460:
1.30.2.6  nathanw  3461: vaddr_t
                   3462: pmap_growkernel(vaddr_t maxkvaddr)
                   3463: {
                   3464:        struct pmap *kpm = pmap_kernel(), *pm;
                   3465:        int s;
                   3466:        paddr_t ptaddr;
                   3467:        struct vm_page *ptp;
                   3468:
                   3469:        if (maxkvaddr <= pmap_curmaxkvaddr)
                   3470:                goto out;               /* we are OK */
                   3471:        NPDEBUG(PDB_GROWKERN, printf("pmap_growkernel: growing kernel from %lx to %lx\n",
                   3472:                    pmap_curmaxkvaddr, maxkvaddr));
                   3473:
                   3474:        /*
                   3475:         * whoops!   we need to add kernel PTPs
                   3476:         */
                   3477:
                   3478:        s = splhigh();  /* to be safe */
                   3479:        simple_lock(&kpm->pm_obj.vmobjlock);
                   3480:        /* due to the way the arm pmap works we map 4MB at a time */
                   3481:        for (/*null*/ ; pmap_curmaxkvaddr < maxkvaddr;
1.30.2.7  nathanw  3482:             pmap_curmaxkvaddr += 4 * L1_S_SIZE) {
1.30.2.6  nathanw  3483:
                   3484:                if (uvm.page_init_done == FALSE) {
                   3485:
                   3486:                        /*
                   3487:                         * we're growing the kernel pmap early (from
                   3488:                         * uvm_pageboot_alloc()).  this case must be
                   3489:                         * handled a little differently.
                   3490:                         */
                   3491:
                   3492:                        if (uvm_page_physget(&ptaddr) == FALSE)
                   3493:                                panic("pmap_growkernel: out of memory");
                   3494:                        pmap_zero_page(ptaddr);
                   3495:
                   3496:                        /* map this page in */
                   3497:                        pmap_map_in_l1(kpm, pmap_curmaxkvaddr, ptaddr, TRUE);
                   3498:
                   3499:                        /* count PTP as resident */
                   3500:                        kpm->pm_stats.resident_count++;
                   3501:                        continue;
                   3502:                }
                   3503:
                   3504:                /*
                   3505:                 * THIS *MUST* BE CODED SO AS TO WORK IN THE
                   3506:                 * pmap_initialized == FALSE CASE!  WE MAY BE
                   3507:                 * INVOKED WHILE pmap_init() IS RUNNING!
                   3508:                 */
                   3509:
                   3510:                if ((ptp = pmap_alloc_ptp(kpm, pmap_curmaxkvaddr)) == NULL)
                   3511:                        panic("pmap_growkernel: alloc ptp failed");
                   3512:
                   3513:                /* distribute new kernel PTP to all active pmaps */
                   3514:                simple_lock(&pmaps_lock);
                   3515:                LIST_FOREACH(pm, &pmaps, pm_list) {
                   3516:                        pmap_map_in_l1(pm, pmap_curmaxkvaddr,
                   3517:                            VM_PAGE_TO_PHYS(ptp), TRUE);
                   3518:                }
                   3519:
                   3520:                simple_unlock(&pmaps_lock);
                   3521:        }
                   3522:
                   3523:        /*
                   3524:         * flush out the cache, expensive but growkernel will happen so
                   3525:         * rarely
                   3526:         */
                   3527:        cpu_tlb_flushD();
                   3528:        cpu_cpwait();
                   3529:
                   3530:        simple_unlock(&kpm->pm_obj.vmobjlock);
                   3531:        splx(s);
                   3532:
                   3533: out:
                   3534:        return (pmap_curmaxkvaddr);
                   3535: }
                   3536:
1.30.2.7  nathanw  3537: /************************ Utility routines ****************************/
                   3538:
                   3539: /*
                   3540:  * vector_page_setprot:
                   3541:  *
                   3542:  *     Manipulate the protection of the vector page.
                   3543:  */
                   3544: void
                   3545: vector_page_setprot(int prot)
                   3546: {
                   3547:        pt_entry_t *pte;
                   3548:
                   3549:        pte = vtopte(vector_page);
1.30.2.6  nathanw  3550:
1.30.2.7  nathanw  3551:        *pte = (*pte & ~L1_S_PROT_MASK) | L2_S_PROT(PTE_KERNEL, prot);
                   3552:        cpu_tlb_flushD_SE(vector_page);
                   3553:        cpu_cpwait();
                   3554: }
1.30.2.6  nathanw  3555:
1.30.2.5  nathanw  3556: /************************ Bootstrapping routines ****************************/
                   3557:
                   3558: /*
                   3559:  * This list exists for the benefit of pmap_map_chunk().  It keeps track
                   3560:  * of the kernel L2 tables during bootstrap, so that pmap_map_chunk() can
                   3561:  * find them as necessary.
                   3562:  *
                   3563:  * Note that the data on this list is not valid after initarm() returns.
                   3564:  */
                   3565: SLIST_HEAD(, pv_addr) kernel_pt_list = SLIST_HEAD_INITIALIZER(kernel_pt_list);
                   3566:
                   3567: static vaddr_t
                   3568: kernel_pt_lookup(paddr_t pa)
                   3569: {
                   3570:        pv_addr_t *pv;
                   3571:
                   3572:        SLIST_FOREACH(pv, &kernel_pt_list, pv_list) {
                   3573:                if (pv->pv_pa == pa)
                   3574:                        return (pv->pv_va);
                   3575:        }
                   3576:        return (0);
                   3577: }
                   3578:
                   3579: /*
                   3580:  * pmap_map_section:
                   3581:  *
                   3582:  *     Create a single section mapping.
                   3583:  */
                   3584: void
                   3585: pmap_map_section(vaddr_t l1pt, vaddr_t va, paddr_t pa, int prot, int cache)
                   3586: {
                   3587:        pd_entry_t *pde = (pd_entry_t *) l1pt;
1.30.2.7  nathanw  3588:        pd_entry_t fl = (cache == PTE_CACHE) ? pte_l1_s_cache_mode : 0;
1.30.2.5  nathanw  3589:
1.30.2.7  nathanw  3590:        KASSERT(((va | pa) & L1_S_OFFSET) == 0);
1.30.2.5  nathanw  3591:
1.30.2.7  nathanw  3592:        pde[va >> L1_S_SHIFT] = L1_S_PROTO | pa |
                   3593:            L1_S_PROT(PTE_KERNEL, prot) | fl;
1.30.2.5  nathanw  3594: }
                   3595:
                   3596: /*
                   3597:  * pmap_map_entry:
                   3598:  *
                   3599:  *     Create a single page mapping.
                   3600:  */
                   3601: void
                   3602: pmap_map_entry(vaddr_t l1pt, vaddr_t va, paddr_t pa, int prot, int cache)
                   3603: {
                   3604:        pd_entry_t *pde = (pd_entry_t *) l1pt;
1.30.2.7  nathanw  3605:        pt_entry_t fl = (cache == PTE_CACHE) ? pte_l2_s_cache_mode : 0;
1.30.2.5  nathanw  3606:        pt_entry_t *pte;
                   3607:
                   3608:        KASSERT(((va | pa) & PGOFSET) == 0);
                   3609:
1.30.2.7  nathanw  3610:        if ((pde[va >> L1_S_SHIFT] & L1_TYPE_MASK) != L1_TYPE_C)
1.30.2.5  nathanw  3611:                panic("pmap_map_entry: no L2 table for VA 0x%08lx", va);
                   3612:
                   3613:        pte = (pt_entry_t *)
1.30.2.7  nathanw  3614:            kernel_pt_lookup(pde[va >> L1_S_SHIFT] & L2_S_FRAME);
1.30.2.5  nathanw  3615:        if (pte == NULL)
                   3616:                panic("pmap_map_entry: can't find L2 table for VA 0x%08lx", va);
                   3617:
1.30.2.7  nathanw  3618:        pte[(va >> PGSHIFT) & 0x3ff] = L2_S_PROTO | pa |
                   3619:            L2_S_PROT(PTE_KERNEL, prot) | fl;
1.30.2.5  nathanw  3620: }
                   3621:
                   3622: /*
                   3623:  * pmap_link_l2pt:
                   3624:  *
                   3625:  *     Link the L2 page table specified by "pa" into the L1
                   3626:  *     page table at the slot for "va".
                   3627:  */
                   3628: void
                   3629: pmap_link_l2pt(vaddr_t l1pt, vaddr_t va, pv_addr_t *l2pv)
                   3630: {
                   3631:        pd_entry_t *pde = (pd_entry_t *) l1pt;
1.30.2.7  nathanw  3632:        u_int slot = va >> L1_S_SHIFT;
1.30.2.5  nathanw  3633:
                   3634:        KASSERT((l2pv->pv_pa & PGOFSET) == 0);
                   3635:
1.30.2.7  nathanw  3636:        pde[slot + 0] = L1_C_PROTO | (l2pv->pv_pa + 0x000);
                   3637:        pde[slot + 1] = L1_C_PROTO | (l2pv->pv_pa + 0x400);
                   3638:        pde[slot + 2] = L1_C_PROTO | (l2pv->pv_pa + 0x800);
                   3639:        pde[slot + 3] = L1_C_PROTO | (l2pv->pv_pa + 0xc00);
1.30.2.5  nathanw  3640:
                   3641:        SLIST_INSERT_HEAD(&kernel_pt_list, l2pv, pv_list);
                   3642: }
                   3643:
                   3644: /*
                   3645:  * pmap_map_chunk:
                   3646:  *
                   3647:  *     Map a chunk of memory using the most efficient mappings
                   3648:  *     possible (section, large page, small page) into the
                   3649:  *     provided L1 and L2 tables at the specified virtual address.
                   3650:  */
                   3651: vsize_t
                   3652: pmap_map_chunk(vaddr_t l1pt, vaddr_t va, paddr_t pa, vsize_t size,
                   3653:     int prot, int cache)
                   3654: {
                   3655:        pd_entry_t *pde = (pd_entry_t *) l1pt;
1.30.2.7  nathanw  3656:        pt_entry_t *pte, fl;
1.30.2.5  nathanw  3657:        vsize_t resid;
                   3658:        int i;
                   3659:
                   3660:        resid = (size + (NBPG - 1)) & ~(NBPG - 1);
                   3661:
                   3662:        if (l1pt == 0)
                   3663:                panic("pmap_map_chunk: no L1 table provided");
                   3664:
                   3665: #ifdef VERBOSE_INIT_ARM
                   3666:        printf("pmap_map_chunk: pa=0x%lx va=0x%lx size=0x%lx resid=0x%lx "
                   3667:            "prot=0x%x cache=%d\n", pa, va, size, resid, prot, cache);
                   3668: #endif
                   3669:
                   3670:        size = resid;
                   3671:
                   3672:        while (resid > 0) {
                   3673:                /* See if we can use a section mapping. */
1.30.2.7  nathanw  3674:                if (((pa | va) & L1_S_OFFSET) == 0 &&
                   3675:                    resid >= L1_S_SIZE) {
                   3676:                        fl = (cache == PTE_CACHE) ? pte_l1_s_cache_mode : 0;
1.30.2.5  nathanw  3677: #ifdef VERBOSE_INIT_ARM
                   3678:                        printf("S");
                   3679: #endif
1.30.2.7  nathanw  3680:                        pde[va >> L1_S_SHIFT] = L1_S_PROTO | pa |
                   3681:                            L1_S_PROT(PTE_KERNEL, prot) | fl;
                   3682:                        va += L1_S_SIZE;
                   3683:                        pa += L1_S_SIZE;
                   3684:                        resid -= L1_S_SIZE;
1.30.2.5  nathanw  3685:                        continue;
                   3686:                }
                   3687:
                   3688:                /*
                   3689:                 * Ok, we're going to use an L2 table.  Make sure
                   3690:                 * one is actually in the corresponding L1 slot
                   3691:                 * for the current VA.
                   3692:                 */
1.30.2.7  nathanw  3693:                if ((pde[va >> L1_S_SHIFT] & L1_TYPE_MASK) != L1_TYPE_C)
1.30.2.5  nathanw  3694:                        panic("pmap_map_chunk: no L2 table for VA 0x%08lx", va);
                   3695:
                   3696:                pte = (pt_entry_t *)
1.30.2.7  nathanw  3697:                    kernel_pt_lookup(pde[va >> L1_S_SHIFT] & L2_S_FRAME);
1.30.2.5  nathanw  3698:                if (pte == NULL)
                   3699:                        panic("pmap_map_chunk: can't find L2 table for VA"
                   3700:                            "0x%08lx", va);
                   3701:
                   3702:                /* See if we can use a L2 large page mapping. */
1.30.2.7  nathanw  3703:                if (((pa | va) & L2_L_OFFSET) == 0 &&
                   3704:                    resid >= L2_L_SIZE) {
                   3705:                        fl = (cache == PTE_CACHE) ? pte_l2_l_cache_mode : 0;
1.30.2.5  nathanw  3706: #ifdef VERBOSE_INIT_ARM
                   3707:                        printf("L");
                   3708: #endif
                   3709:                        for (i = 0; i < 16; i++) {
                   3710:                                pte[((va >> PGSHIFT) & 0x3f0) + i] =
1.30.2.7  nathanw  3711:                                    L2_L_PROTO | pa |
                   3712:                                    L2_L_PROT(PTE_KERNEL, prot) | fl;
1.30.2.5  nathanw  3713:                        }
1.30.2.7  nathanw  3714:                        va += L2_L_SIZE;
                   3715:                        pa += L2_L_SIZE;
                   3716:                        resid -= L2_L_SIZE;
1.30.2.5  nathanw  3717:                        continue;
                   3718:                }
                   3719:
                   3720:                /* Use a small page mapping. */
1.30.2.7  nathanw  3721:                fl = (cache == PTE_CACHE) ? pte_l2_s_cache_mode : 0;
1.30.2.5  nathanw  3722: #ifdef VERBOSE_INIT_ARM
                   3723:                printf("P");
                   3724: #endif
1.30.2.7  nathanw  3725:                pte[(va >> PGSHIFT) & 0x3ff] = L2_S_PROTO | pa |
                   3726:                    L2_S_PROT(PTE_KERNEL, prot) | fl;
1.30.2.5  nathanw  3727:                va += NBPG;
                   3728:                pa += NBPG;
                   3729:                resid -= NBPG;
                   3730:        }
                   3731: #ifdef VERBOSE_INIT_ARM
                   3732:        printf("\n");
                   3733: #endif
                   3734:        return (size);
                   3735: }
1.30.2.7  nathanw  3736:
                   3737: /********************** PTE initialization routines **************************/
                   3738:
                   3739: /*
                   3740:  * These routines are called when the CPU type is identified to set up
                   3741:  * the PTE prototypes, cache modes, etc.
                   3742:  *
                   3743:  * The variables are always here, just in case LKMs need to reference
                   3744:  * them (though, they shouldn't).
                   3745:  */
                   3746:
                   3747: pt_entry_t     pte_l1_s_cache_mode;
                   3748: pt_entry_t     pte_l1_s_cache_mask;
                   3749:
                   3750: pt_entry_t     pte_l2_l_cache_mode;
                   3751: pt_entry_t     pte_l2_l_cache_mask;
                   3752:
                   3753: pt_entry_t     pte_l2_s_cache_mode;
                   3754: pt_entry_t     pte_l2_s_cache_mask;
                   3755:
                   3756: pt_entry_t     pte_l2_s_prot_u;
                   3757: pt_entry_t     pte_l2_s_prot_w;
                   3758: pt_entry_t     pte_l2_s_prot_mask;
                   3759:
                   3760: pt_entry_t     pte_l1_s_proto;
                   3761: pt_entry_t     pte_l1_c_proto;
                   3762: pt_entry_t     pte_l2_s_proto;
                   3763:
                   3764: void           (*pmap_copy_page_func)(paddr_t, paddr_t);
                   3765: void           (*pmap_zero_page_func)(paddr_t);
                   3766:
                   3767: #if ARM_MMU_GENERIC == 1
                   3768: void
                   3769: pmap_pte_init_generic(void)
                   3770: {
                   3771:
                   3772:        pte_l1_s_cache_mode = L1_S_B|L1_S_C;
                   3773:        pte_l1_s_cache_mask = L1_S_CACHE_MASK_generic;
                   3774:
                   3775:        pte_l2_l_cache_mode = L2_B|L2_C;
                   3776:        pte_l2_l_cache_mask = L2_L_CACHE_MASK_generic;
                   3777:
                   3778:        pte_l2_s_cache_mode = L2_B|L2_C;
                   3779:        pte_l2_s_cache_mask = L2_S_CACHE_MASK_generic;
                   3780:
                   3781:        pte_l2_s_prot_u = L2_S_PROT_U_generic;
                   3782:        pte_l2_s_prot_w = L2_S_PROT_W_generic;
                   3783:        pte_l2_s_prot_mask = L2_S_PROT_MASK_generic;
                   3784:
                   3785:        pte_l1_s_proto = L1_S_PROTO_generic;
                   3786:        pte_l1_c_proto = L1_C_PROTO_generic;
                   3787:        pte_l2_s_proto = L2_S_PROTO_generic;
                   3788:
                   3789:        pmap_copy_page_func = pmap_copy_page_generic;
                   3790:        pmap_zero_page_func = pmap_zero_page_generic;
                   3791: }
                   3792:
                   3793: #if defined(CPU_ARM9)
                   3794: void
                   3795: pmap_pte_init_arm9(void)
                   3796: {
                   3797:
                   3798:        /*
                   3799:         * ARM9 is compatible with generic, but we want to use
                   3800:         * write-through caching for now.
                   3801:         */
                   3802:        pmap_pte_init_generic();
                   3803:
                   3804:        pte_l1_s_cache_mode = L1_S_C;
                   3805:        pte_l2_l_cache_mode = L2_C;
                   3806:        pte_l2_s_cache_mode = L2_C;
                   3807: }
                   3808: #endif /* CPU_ARM9 */
                   3809: #endif /* ARM_MMU_GENERIC == 1 */
                   3810:
                   3811: #if ARM_MMU_XSCALE == 1
                   3812: void
                   3813: pmap_pte_init_xscale(void)
                   3814: {
1.30.2.8! nathanw  3815:        uint32_t auxctl;
1.30.2.7  nathanw  3816:
1.30.2.8! nathanw  3817:        pte_l1_s_cache_mode = L1_S_B|L1_S_C;
1.30.2.7  nathanw  3818:        pte_l1_s_cache_mask = L1_S_CACHE_MASK_xscale;
                   3819:
1.30.2.8! nathanw  3820:        pte_l2_l_cache_mode = L2_B|L2_C;
1.30.2.7  nathanw  3821:        pte_l2_l_cache_mask = L2_L_CACHE_MASK_xscale;
                   3822:
1.30.2.8! nathanw  3823:        pte_l2_s_cache_mode = L2_B|L2_C;
1.30.2.7  nathanw  3824:        pte_l2_s_cache_mask = L2_S_CACHE_MASK_xscale;
                   3825:
                   3826: #ifdef XSCALE_CACHE_WRITE_THROUGH
                   3827:        /*
                   3828:         * Some versions of the XScale core have various bugs in
                   3829:         * their cache units, the work-around for which is to run
                   3830:         * the cache in write-through mode.  Unfortunately, this
                   3831:         * has a major (negative) impact on performance.  So, we
                   3832:         * go ahead and run fast-and-loose, in the hopes that we
                   3833:         * don't line up the planets in a way that will trip the
                   3834:         * bugs.
                   3835:         *
                   3836:         * However, we give you the option to be slow-but-correct.
                   3837:         */
                   3838:        pte_l1_s_cache_mode = L1_S_C;
                   3839:        pte_l2_l_cache_mode = L2_C;
                   3840:        pte_l2_s_cache_mode = L2_C;
                   3841: #endif /* XSCALE_CACHE_WRITE_THROUGH */
                   3842:
                   3843:        pte_l2_s_prot_u = L2_S_PROT_U_xscale;
                   3844:        pte_l2_s_prot_w = L2_S_PROT_W_xscale;
                   3845:        pte_l2_s_prot_mask = L2_S_PROT_MASK_xscale;
                   3846:
                   3847:        pte_l1_s_proto = L1_S_PROTO_xscale;
                   3848:        pte_l1_c_proto = L1_C_PROTO_xscale;
                   3849:        pte_l2_s_proto = L2_S_PROTO_xscale;
                   3850:
                   3851:        pmap_copy_page_func = pmap_copy_page_xscale;
                   3852:        pmap_zero_page_func = pmap_zero_page_xscale;
1.30.2.8! nathanw  3853:
        !          3854:        /*
        !          3855:         * Disable ECC protection of page table access, for now.
        !          3856:         */
        !          3857:        __asm __volatile("mrc p15, 0, %0, c1, c0, 1"
        !          3858:                : "=r" (auxctl));
        !          3859:        auxctl &= ~XSCALE_AUXCTL_P;
        !          3860:        __asm __volatile("mcr p15, 0, %0, c1, c0, 1"
        !          3861:                :
        !          3862:                : "r" (auxctl));
1.30.2.7  nathanw  3863: }
                   3864:
                   3865: /*
                   3866:  * xscale_setup_minidata:
                   3867:  *
                   3868:  *     Set up the mini-data cache clean area.  We require the
                   3869:  *     caller to allocate the right amount of physically and
                   3870:  *     virtually contiguous space.
                   3871:  */
                   3872: void
                   3873: xscale_setup_minidata(vaddr_t l1pt, vaddr_t va, paddr_t pa)
                   3874: {
                   3875:        extern vaddr_t xscale_minidata_clean_addr;
                   3876:        extern vsize_t xscale_minidata_clean_size; /* already initialized */
                   3877:        pd_entry_t *pde = (pd_entry_t *) l1pt;
                   3878:        pt_entry_t *pte;
                   3879:        vsize_t size;
1.30.2.8! nathanw  3880:        uint32_t auxctl;
1.30.2.7  nathanw  3881:
                   3882:        xscale_minidata_clean_addr = va;
                   3883:
                   3884:        /* Round it to page size. */
                   3885:        size = (xscale_minidata_clean_size + L2_S_OFFSET) & L2_S_FRAME;
                   3886:
                   3887:        for (; size != 0;
                   3888:             va += L2_S_SIZE, pa += L2_S_SIZE, size -= L2_S_SIZE) {
                   3889:                pte = (pt_entry_t *)
                   3890:                    kernel_pt_lookup(pde[va >> L1_S_SHIFT] & L2_S_FRAME);
                   3891:                if (pte == NULL)
                   3892:                        panic("xscale_setup_minidata: can't find L2 table for "
                   3893:                            "VA 0x%08lx", va);
                   3894:                pte[(va >> PGSHIFT) & 0x3ff] = L2_S_PROTO | pa |
                   3895:                    L2_S_PROT(PTE_KERNEL, VM_PROT_READ) |
                   3896:                    L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X);
                   3897:        }
1.30.2.8! nathanw  3898:
        !          3899:        /*
        !          3900:         * Configure the mini-data cache for write-back with
        !          3901:         * read/write-allocate.
        !          3902:         *
        !          3903:         * NOTE: In order to reconfigure the mini-data cache, we must
        !          3904:         * make sure it contains no valid data!  In order to do that,
        !          3905:         * we must issue a global data cache invalidate command!
        !          3906:         *
        !          3907:         * WE ASSUME WE ARE RUNNING UN-CACHED WHEN THIS ROUTINE IS CALLED!
        !          3908:         * THIS IS VERY IMPORTANT!
        !          3909:         */
        !          3910:
        !          3911:        /* Invalidate data and mini-data. */
        !          3912:        __asm __volatile("mcr p15, 0, %0, c7, c6, 0"
        !          3913:                :
        !          3914:                : "r" (auxctl));
        !          3915:
        !          3916:
        !          3917:        __asm __volatile("mrc p15, 0, %0, c1, c0, 1"
        !          3918:                : "=r" (auxctl));
        !          3919:        auxctl = (auxctl & ~XSCALE_AUXCTL_MD_MASK) | XSCALE_AUXCTL_MD_WB_RWA;
        !          3920:        __asm __volatile("mcr p15, 0, %0, c1, c0, 1"
        !          3921:                :
        !          3922:                : "r" (auxctl));
1.30.2.7  nathanw  3923: }
                   3924: #endif /* ARM_MMU_XSCALE == 1 */

CVSweb <webmaster@jp.NetBSD.org>