[BACK]Return to pmap.c CVS log [TXT][DIR] Up to [cvs.NetBSD.org] / src / sys / arch / arm / arm32

Annotation of src/sys/arch/arm/arm32/pmap.c, Revision 1.46

1.46    ! thorpej     1: /*     $NetBSD: pmap.c,v 1.45 2002/02/21 06:36:11 thorpej Exp $        */
1.12      chris       2:
                      3: /*
                      4:  * Copyright (c) 2001 Richard Earnshaw
                      5:  * Copyright (c) 2001 Christopher Gilbert
                      6:  * All rights reserved.
                      7:  *
                      8:  * 1. Redistributions of source code must retain the above copyright
                      9:  *    notice, this list of conditions and the following disclaimer.
                     10:  * 2. Redistributions in binary form must reproduce the above copyright
                     11:  *    notice, this list of conditions and the following disclaimer in the
                     12:  *    documentation and/or other materials provided with the distribution.
                     13:  * 3. The name of the company nor the name of the author may be used to
                     14:  *    endorse or promote products derived from this software without specific
                     15:  *    prior written permission.
                     16:  *
                     17:  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
                     18:  * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
                     19:  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
                     20:  * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
                     21:  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
                     22:  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
                     23:  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
                     24:  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
                     25:  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
                     26:  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
                     27:  * SUCH DAMAGE.
                     28:  */
1.1       matt       29:
                     30: /*-
                     31:  * Copyright (c) 1999 The NetBSD Foundation, Inc.
                     32:  * All rights reserved.
                     33:  *
                     34:  * This code is derived from software contributed to The NetBSD Foundation
                     35:  * by Charles M. Hannum.
                     36:  *
                     37:  * Redistribution and use in source and binary forms, with or without
                     38:  * modification, are permitted provided that the following conditions
                     39:  * are met:
                     40:  * 1. Redistributions of source code must retain the above copyright
                     41:  *    notice, this list of conditions and the following disclaimer.
                     42:  * 2. Redistributions in binary form must reproduce the above copyright
                     43:  *    notice, this list of conditions and the following disclaimer in the
                     44:  *    documentation and/or other materials provided with the distribution.
                     45:  * 3. All advertising materials mentioning features or use of this software
                     46:  *    must display the following acknowledgement:
                     47:  *        This product includes software developed by the NetBSD
                     48:  *        Foundation, Inc. and its contributors.
                     49:  * 4. Neither the name of The NetBSD Foundation nor the names of its
                     50:  *    contributors may be used to endorse or promote products derived
                     51:  *    from this software without specific prior written permission.
                     52:  *
                     53:  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
                     54:  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
                     55:  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
                     56:  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
                     57:  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
                     58:  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
                     59:  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
                     60:  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
                     61:  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
                     62:  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
                     63:  * POSSIBILITY OF SUCH DAMAGE.
                     64:  */
                     65:
                     66: /*
                     67:  * Copyright (c) 1994-1998 Mark Brinicombe.
                     68:  * Copyright (c) 1994 Brini.
                     69:  * All rights reserved.
                     70:  *
                     71:  * This code is derived from software written for Brini by Mark Brinicombe
                     72:  *
                     73:  * Redistribution and use in source and binary forms, with or without
                     74:  * modification, are permitted provided that the following conditions
                     75:  * are met:
                     76:  * 1. Redistributions of source code must retain the above copyright
                     77:  *    notice, this list of conditions and the following disclaimer.
                     78:  * 2. Redistributions in binary form must reproduce the above copyright
                     79:  *    notice, this list of conditions and the following disclaimer in the
                     80:  *    documentation and/or other materials provided with the distribution.
                     81:  * 3. All advertising materials mentioning features or use of this software
                     82:  *    must display the following acknowledgement:
                     83:  *     This product includes software developed by Mark Brinicombe.
                     84:  * 4. The name of the author may not be used to endorse or promote products
                     85:  *    derived from this software without specific prior written permission.
                     86:  *
                     87:  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
                     88:  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
                     89:  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
                     90:  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
                     91:  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
                     92:  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
                     93:  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
                     94:  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
                     95:  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
                     96:  *
                     97:  * RiscBSD kernel project
                     98:  *
                     99:  * pmap.c
                    100:  *
                    101:  * Machine dependant vm stuff
                    102:  *
                    103:  * Created      : 20/09/94
                    104:  */
                    105:
                    106: /*
                    107:  * Performance improvements, UVM changes, overhauls and part-rewrites
                    108:  * were contributed by Neil A. Carson <neil@causality.com>.
                    109:  */
                    110:
                    111: /*
                    112:  * The dram block info is currently referenced from the bootconfig.
                    113:  * This should be placed in a separate structure.
                    114:  */
                    115:
                    116: /*
                    117:  * Special compilation symbols
                    118:  * PMAP_DEBUG          - Build in pmap_debug_level code
                    119:  */
                    120:
                    121: /* Include header files */
                    122:
                    123: #include "opt_pmap_debug.h"
                    124: #include "opt_ddb.h"
                    125:
                    126: #include <sys/types.h>
                    127: #include <sys/param.h>
                    128: #include <sys/kernel.h>
                    129: #include <sys/systm.h>
                    130: #include <sys/proc.h>
                    131: #include <sys/malloc.h>
                    132: #include <sys/user.h>
1.10      chris     133: #include <sys/pool.h>
1.16      chris     134: #include <sys/cdefs.h>
                    135:
1.1       matt      136: #include <uvm/uvm.h>
                    137:
                    138: #include <machine/bootconfig.h>
                    139: #include <machine/bus.h>
                    140: #include <machine/pmap.h>
                    141: #include <machine/pcb.h>
                    142: #include <machine/param.h>
1.32      thorpej   143: #include <arm/arm32/katelib.h>
1.16      chris     144:
1.46    ! thorpej   145: __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.45 2002/02/21 06:36:11 thorpej Exp $");
1.1       matt      146: #ifdef PMAP_DEBUG
                    147: #define        PDEBUG(_lev_,_stat_) \
                    148:        if (pmap_debug_level >= (_lev_)) \
                    149:                ((_stat_))
                    150: int pmap_debug_level = -2;
1.17      chris     151:
                    152: /*
                    153:  * for switching to potentially finer grained debugging
                    154:  */
                    155: #define        PDB_FOLLOW      0x0001
                    156: #define        PDB_INIT        0x0002
                    157: #define        PDB_ENTER       0x0004
                    158: #define        PDB_REMOVE      0x0008
                    159: #define        PDB_CREATE      0x0010
                    160: #define        PDB_PTPAGE      0x0020
                    161: #define        PDB_ASN         0x0040
                    162: #define        PDB_BITS        0x0080
                    163: #define        PDB_COLLECT     0x0100
                    164: #define        PDB_PROTECT     0x0200
                    165: #define        PDB_BOOTSTRAP   0x1000
                    166: #define        PDB_PARANOIA    0x2000
                    167: #define        PDB_WIRING      0x4000
                    168: #define        PDB_PVDUMP      0x8000
                    169:
                    170: int debugmap = 0;
                    171: int pmapdebug = PDB_PARANOIA | PDB_FOLLOW;
                    172: #define        NPDEBUG(_lev_,_stat_) \
                    173:        if (pmapdebug & (_lev_)) \
                    174:                ((_stat_))
                    175:
1.1       matt      176: #else  /* PMAP_DEBUG */
                    177: #define        PDEBUG(_lev_,_stat_) /* Nothing */
1.17      chris     178: #define PDEBUG(_lev_,_stat_) /* Nothing */
1.1       matt      179: #endif /* PMAP_DEBUG */
                    180:
                    181: struct pmap     kernel_pmap_store;
                    182:
1.10      chris     183: /*
                    184:  * pool that pmap structures are allocated from
                    185:  */
                    186:
                    187: struct pool pmap_pmap_pool;
                    188:
1.1       matt      189: pagehook_t page_hook0;
                    190: pagehook_t page_hook1;
                    191: char *memhook;
                    192: pt_entry_t msgbufpte;
                    193: extern caddr_t msgbufaddr;
                    194:
                    195: boolean_t pmap_initialized = FALSE;    /* Has pmap_init completed? */
1.17      chris     196: /*
                    197:  * locking data structures
                    198:  */
1.1       matt      199:
1.17      chris     200: static struct lock pmap_main_lock;
                    201: static struct simplelock pvalloc_lock;
                    202: #ifdef LOCKDEBUG
                    203: #define PMAP_MAP_TO_HEAD_LOCK() \
                    204:      (void) spinlockmgr(&pmap_main_lock, LK_SHARED, NULL)
                    205: #define PMAP_MAP_TO_HEAD_UNLOCK() \
                    206:      (void) spinlockmgr(&pmap_main_lock, LK_RELEASE, NULL)
                    207:
                    208: #define PMAP_HEAD_TO_MAP_LOCK() \
                    209:      (void) spinlockmgr(&pmap_main_lock, LK_EXCLUSIVE, NULL)
                    210: #define PMAP_HEAD_TO_MAP_UNLOCK() \
                    211:      (void) spinlockmgr(&pmap_main_lock, LK_RELEASE, NULL)
                    212: #else
                    213: #define        PMAP_MAP_TO_HEAD_LOCK()         /* nothing */
                    214: #define        PMAP_MAP_TO_HEAD_UNLOCK()       /* nothing */
                    215: #define        PMAP_HEAD_TO_MAP_LOCK()         /* nothing */
                    216: #define        PMAP_HEAD_TO_MAP_UNLOCK()       /* nothing */
                    217: #endif /* LOCKDEBUG */
                    218:
                    219: /*
                    220:  * pv_page management structures: locked by pvalloc_lock
                    221:  */
1.1       matt      222:
1.17      chris     223: TAILQ_HEAD(pv_pagelist, pv_page);
                    224: static struct pv_pagelist pv_freepages;        /* list of pv_pages with free entrys */
                    225: static struct pv_pagelist pv_unusedpgs; /* list of unused pv_pages */
                    226: static int pv_nfpvents;                        /* # of free pv entries */
                    227: static struct pv_page *pv_initpage;    /* bootstrap page from kernel_map */
                    228: static vaddr_t pv_cachedva;            /* cached VA for later use */
                    229:
                    230: #define PVE_LOWAT (PVE_PER_PVPAGE / 2) /* free pv_entry low water mark */
                    231: #define PVE_HIWAT (PVE_LOWAT + (PVE_PER_PVPAGE * 2))
                    232:                                        /* high water mark */
                    233:
                    234: /*
                    235:  * local prototypes
                    236:  */
                    237:
                    238: static struct pv_entry *pmap_add_pvpage __P((struct pv_page *, boolean_t));
                    239: static struct pv_entry *pmap_alloc_pv __P((struct pmap *, int)); /* see codes below */
                    240: #define ALLOCPV_NEED   0       /* need PV now */
                    241: #define ALLOCPV_TRY    1       /* just try to allocate, don't steal */
                    242: #define ALLOCPV_NONEED 2       /* don't need PV, just growing cache */
                    243: static struct pv_entry *pmap_alloc_pvpage __P((struct pmap *, int));
1.39      thorpej   244: static void             pmap_enter_pv __P((struct pv_head *,
1.17      chris     245:                                            struct pv_entry *, struct pmap *,
                    246:                                            vaddr_t, struct vm_page *, int));
                    247: static void             pmap_free_pv __P((struct pmap *, struct pv_entry *));
                    248: static void             pmap_free_pvs __P((struct pmap *, struct pv_entry *));
                    249: static void             pmap_free_pv_doit __P((struct pv_entry *));
                    250: static void             pmap_free_pvpage __P((void));
                    251: static boolean_t        pmap_is_curpmap __P((struct pmap *));
1.39      thorpej   252: static struct pv_entry *pmap_remove_pv __P((struct pv_head *, struct pmap *,
1.17      chris     253:                        vaddr_t));
                    254: #define PMAP_REMOVE_ALL                0       /* remove all mappings */
                    255: #define PMAP_REMOVE_SKIPWIRED  1       /* skip wired mappings */
1.1       matt      256:
1.39      thorpej   257: static u_int pmap_modify_pv __P((struct pmap *, vaddr_t, struct pv_head *,
1.33      chris     258:        u_int, u_int));
                    259:
                    260: static void pmap_free_l1pt __P((struct l1pt *));
                    261: static int pmap_allocpagedir __P((struct pmap *));
                    262: static int pmap_clean_page __P((struct pv_entry *, boolean_t));
1.39      thorpej   263: static struct pv_head *pmap_find_pvh __P((paddr_t));
                    264: static void pmap_remove_all __P((paddr_t));
1.33      chris     265:
                    266:
1.2       matt      267: vsize_t npages;
1.1       matt      268:
1.17      chris     269: static struct vm_page  *pmap_alloc_ptp __P((struct pmap *, vaddr_t, boolean_t));
                    270: static struct vm_page  *pmap_get_ptp __P((struct pmap *, vaddr_t, boolean_t));
1.39      thorpej   271: __inline static void pmap_clearbit __P((paddr_t, unsigned int));
                    272: __inline static boolean_t pmap_testbit __P((paddr_t, unsigned int));
1.17      chris     273:
1.2       matt      274: extern paddr_t physical_start;
                    275: extern paddr_t physical_freestart;
                    276: extern paddr_t physical_end;
                    277: extern paddr_t physical_freeend;
1.1       matt      278: extern unsigned int free_pages;
                    279: extern int max_processes;
                    280:
                    281: vaddr_t virtual_start;
                    282: vaddr_t virtual_end;
                    283:
                    284: vaddr_t avail_start;
                    285: vaddr_t avail_end;
                    286:
                    287: extern pv_addr_t systempage;
                    288:
                    289: #define ALLOC_PAGE_HOOK(x, s) \
                    290:        x.va = virtual_start; \
1.15      chris     291:        x.pte = (pt_entry_t *)pmap_pte(pmap_kernel(), virtual_start); \
1.1       matt      292:        virtual_start += s;
                    293:
                    294: /* Variables used by the L1 page table queue code */
                    295: SIMPLEQ_HEAD(l1pt_queue, l1pt);
                    296: struct l1pt_queue l1pt_static_queue;   /* head of our static l1 queue */
                    297: int l1pt_static_queue_count;           /* items in the static l1 queue */
                    298: int l1pt_static_create_count;          /* static l1 items created */
                    299: struct l1pt_queue l1pt_queue;          /* head of our l1 queue */
                    300: int l1pt_queue_count;                  /* items in the l1 queue */
                    301: int l1pt_create_count;                 /* stat - L1's create count */
                    302: int l1pt_reuse_count;                  /* stat - L1's reused count */
                    303:
                    304: /* Local function prototypes (not used outside this file) */
1.15      chris     305: pt_entry_t *pmap_pte __P((struct pmap *pmap, vaddr_t va));
1.39      thorpej   306: void pmap_copy_on_write __P((paddr_t pa));
1.15      chris     307: void pmap_pinit __P((struct pmap *));
                    308: void pmap_freepagedir __P((struct pmap *));
1.1       matt      309:
                    310: /* Other function prototypes */
                    311: extern void bzero_page __P((vaddr_t));
                    312: extern void bcopy_page __P((vaddr_t, vaddr_t));
                    313:
                    314: struct l1pt *pmap_alloc_l1pt __P((void));
1.15      chris     315: static __inline void pmap_map_in_l1 __P((struct pmap *pmap, vaddr_t va,
1.17      chris     316:      vaddr_t l2pa, boolean_t));
1.1       matt      317:
1.11      chris     318: static pt_entry_t *pmap_map_ptes __P((struct pmap *));
1.17      chris     319: static void pmap_unmap_ptes __P((struct pmap *));
1.11      chris     320:
1.39      thorpej   321: __inline static void pmap_vac_me_harder __P((struct pmap *, struct pv_head *,
1.25      rearnsha  322:     pt_entry_t *, boolean_t));
1.39      thorpej   323: static void pmap_vac_me_kpmap __P((struct pmap *, struct pv_head *,
1.25      rearnsha  324:     pt_entry_t *, boolean_t));
1.39      thorpej   325: static void pmap_vac_me_user __P((struct pmap *, struct pv_head *,
1.25      rearnsha  326:     pt_entry_t *, boolean_t));
1.11      chris     327:
1.17      chris     328: /*
1.27      rearnsha  329:  * Cache enable bits in PTE to use on pages that are cacheable.
                    330:  * On most machines this is cacheable/bufferable, but on some, eg arm10, we
                    331:  * can chose between write-through and write-back cacheing.
                    332:  */
                    333: pt_entry_t pte_cache_mode = (PT_C | PT_B);
                    334:
                    335: /*
1.17      chris     336:  * real definition of pv_entry.
                    337:  */
                    338:
                    339: struct pv_entry {
                    340:        struct pv_entry *pv_next;       /* next pv_entry */
                    341:        struct pmap     *pv_pmap;        /* pmap where mapping lies */
                    342:        vaddr_t         pv_va;          /* virtual address for mapping */
                    343:        int             pv_flags;       /* flags */
                    344:        struct vm_page  *pv_ptp;        /* vm_page for the ptp */
                    345: };
                    346:
                    347: /*
                    348:  * pv_entrys are dynamically allocated in chunks from a single page.
                    349:  * we keep track of how many pv_entrys are in use for each page and
                    350:  * we can free pv_entry pages if needed.  there is one lock for the
                    351:  * entire allocation system.
                    352:  */
                    353:
                    354: struct pv_page_info {
                    355:        TAILQ_ENTRY(pv_page) pvpi_list;
                    356:        struct pv_entry *pvpi_pvfree;
                    357:        int pvpi_nfree;
                    358: };
                    359:
                    360: /*
                    361:  * number of pv_entry's in a pv_page
                    362:  * (note: won't work on systems where NPBG isn't a constant)
                    363:  */
                    364:
                    365: #define PVE_PER_PVPAGE ((NBPG - sizeof(struct pv_page_info)) / \
                    366:                        sizeof(struct pv_entry))
                    367:
                    368: /*
                    369:  * a pv_page: where pv_entrys are allocated from
                    370:  */
                    371:
                    372: struct pv_page {
                    373:        struct pv_page_info pvinfo;
                    374:        struct pv_entry pvents[PVE_PER_PVPAGE];
                    375: };
                    376:
1.1       matt      377: #ifdef MYCROFT_HACK
                    378: int mycroft_hack = 0;
                    379: #endif
                    380:
                    381: /* Function to set the debug level of the pmap code */
                    382:
                    383: #ifdef PMAP_DEBUG
                    384: void
                    385: pmap_debug(level)
                    386:        int level;
                    387: {
                    388:        pmap_debug_level = level;
                    389:        printf("pmap_debug: level=%d\n", pmap_debug_level);
                    390: }
                    391: #endif /* PMAP_DEBUG */
                    392:
1.22      chris     393: __inline static boolean_t
1.17      chris     394: pmap_is_curpmap(struct pmap *pmap)
                    395: {
                    396:     if ((curproc && curproc->p_vmspace->vm_map.pmap == pmap)
                    397:            || (pmap == pmap_kernel()))
                    398:        return (TRUE);
                    399:     return (FALSE);
                    400: }
1.1       matt      401: #include "isadma.h"
                    402:
                    403: #if NISADMA > 0
                    404: /*
                    405:  * Used to protect memory for ISA DMA bounce buffers.  If, when loading
                    406:  * pages into the system, memory intersects with any of these ranges,
                    407:  * the intersecting memory will be loaded into a lower-priority free list.
                    408:  */
                    409: bus_dma_segment_t *pmap_isa_dma_ranges;
                    410: int pmap_isa_dma_nranges;
                    411:
1.2       matt      412: boolean_t pmap_isa_dma_range_intersect __P((paddr_t, psize_t,
                    413:            paddr_t *, psize_t *));
1.1       matt      414:
                    415: /*
                    416:  * Check if a memory range intersects with an ISA DMA range, and
                    417:  * return the page-rounded intersection if it does.  The intersection
                    418:  * will be placed on a lower-priority free list.
                    419:  */
                    420: boolean_t
                    421: pmap_isa_dma_range_intersect(pa, size, pap, sizep)
1.2       matt      422:        paddr_t pa;
                    423:        psize_t size;
                    424:        paddr_t *pap;
                    425:        psize_t *sizep;
1.1       matt      426: {
                    427:        bus_dma_segment_t *ds;
                    428:        int i;
                    429:
                    430:        if (pmap_isa_dma_ranges == NULL)
                    431:                return (FALSE);
                    432:
                    433:        for (i = 0, ds = pmap_isa_dma_ranges;
                    434:             i < pmap_isa_dma_nranges; i++, ds++) {
                    435:                if (ds->ds_addr <= pa && pa < (ds->ds_addr + ds->ds_len)) {
                    436:                        /*
                    437:                         * Beginning of region intersects with this range.
                    438:                         */
                    439:                        *pap = trunc_page(pa);
                    440:                        *sizep = round_page(min(pa + size,
                    441:                            ds->ds_addr + ds->ds_len) - pa);
                    442:                        return (TRUE);
                    443:                }
                    444:                if (pa < ds->ds_addr && ds->ds_addr < (pa + size)) {
                    445:                        /*
                    446:                         * End of region intersects with this range.
                    447:                         */
                    448:                        *pap = trunc_page(ds->ds_addr);
                    449:                        *sizep = round_page(min((pa + size) - ds->ds_addr,
                    450:                            ds->ds_len));
                    451:                        return (TRUE);
                    452:                }
                    453:        }
                    454:
                    455:        /*
                    456:         * No intersection found.
                    457:         */
                    458:        return (FALSE);
                    459: }
                    460: #endif /* NISADMA > 0 */
                    461:
                    462: /*
1.17      chris     463:  * p v _ e n t r y   f u n c t i o n s
                    464:  */
                    465:
                    466: /*
                    467:  * pv_entry allocation functions:
                    468:  *   the main pv_entry allocation functions are:
                    469:  *     pmap_alloc_pv: allocate a pv_entry structure
                    470:  *     pmap_free_pv: free one pv_entry
                    471:  *     pmap_free_pvs: free a list of pv_entrys
                    472:  *
                    473:  * the rest are helper functions
1.1       matt      474:  */
                    475:
                    476: /*
1.17      chris     477:  * pmap_alloc_pv: inline function to allocate a pv_entry structure
                    478:  * => we lock pvalloc_lock
                    479:  * => if we fail, we call out to pmap_alloc_pvpage
                    480:  * => 3 modes:
                    481:  *    ALLOCPV_NEED   = we really need a pv_entry, even if we have to steal it
                    482:  *    ALLOCPV_TRY    = we want a pv_entry, but not enough to steal
                    483:  *    ALLOCPV_NONEED = we are trying to grow our free list, don't really need
                    484:  *                     one now
                    485:  *
                    486:  * "try" is for optional functions like pmap_copy().
1.1       matt      487:  */
1.17      chris     488:
                    489: __inline static struct pv_entry *
                    490: pmap_alloc_pv(pmap, mode)
                    491:        struct pmap *pmap;
                    492:        int mode;
1.1       matt      493: {
1.17      chris     494:        struct pv_page *pvpage;
                    495:        struct pv_entry *pv;
                    496:
                    497:        simple_lock(&pvalloc_lock);
                    498:
                    499:        if (pv_freepages.tqh_first != NULL) {
                    500:                pvpage = pv_freepages.tqh_first;
                    501:                pvpage->pvinfo.pvpi_nfree--;
                    502:                if (pvpage->pvinfo.pvpi_nfree == 0) {
                    503:                        /* nothing left in this one? */
                    504:                        TAILQ_REMOVE(&pv_freepages, pvpage, pvinfo.pvpi_list);
                    505:                }
                    506:                pv = pvpage->pvinfo.pvpi_pvfree;
                    507: #ifdef DIAGNOSTIC
                    508:                if (pv == NULL)
                    509:                        panic("pmap_alloc_pv: pvpi_nfree off");
                    510: #endif
                    511:                pvpage->pvinfo.pvpi_pvfree = pv->pv_next;
                    512:                pv_nfpvents--;  /* took one from pool */
                    513:        } else {
                    514:                pv = NULL;              /* need more of them */
                    515:        }
                    516:
                    517:        /*
                    518:         * if below low water mark or we didn't get a pv_entry we try and
                    519:         * create more pv_entrys ...
                    520:         */
                    521:
                    522:        if (pv_nfpvents < PVE_LOWAT || pv == NULL) {
                    523:                if (pv == NULL)
                    524:                        pv = pmap_alloc_pvpage(pmap, (mode == ALLOCPV_TRY) ?
                    525:                                               mode : ALLOCPV_NEED);
                    526:                else
                    527:                        (void) pmap_alloc_pvpage(pmap, ALLOCPV_NONEED);
                    528:        }
                    529:
                    530:        simple_unlock(&pvalloc_lock);
                    531:        return(pv);
                    532: }
                    533:
                    534: /*
                    535:  * pmap_alloc_pvpage: maybe allocate a new pvpage
                    536:  *
                    537:  * if need_entry is false: try and allocate a new pv_page
                    538:  * if need_entry is true: try and allocate a new pv_page and return a
                    539:  *     new pv_entry from it.   if we are unable to allocate a pv_page
                    540:  *     we make a last ditch effort to steal a pv_page from some other
                    541:  *     mapping.    if that fails, we panic...
                    542:  *
                    543:  * => we assume that the caller holds pvalloc_lock
                    544:  */
                    545:
                    546: static struct pv_entry *
                    547: pmap_alloc_pvpage(pmap, mode)
                    548:        struct pmap *pmap;
                    549:        int mode;
                    550: {
                    551:        struct vm_page *pg;
                    552:        struct pv_page *pvpage;
1.1       matt      553:        struct pv_entry *pv;
1.17      chris     554:        int s;
                    555:
                    556:        /*
                    557:         * if we need_entry and we've got unused pv_pages, allocate from there
                    558:         */
                    559:
                    560:        if (mode != ALLOCPV_NONEED && pv_unusedpgs.tqh_first != NULL) {
                    561:
                    562:                /* move it to pv_freepages list */
                    563:                pvpage = pv_unusedpgs.tqh_first;
                    564:                TAILQ_REMOVE(&pv_unusedpgs, pvpage, pvinfo.pvpi_list);
                    565:                TAILQ_INSERT_HEAD(&pv_freepages, pvpage, pvinfo.pvpi_list);
                    566:
                    567:                /* allocate a pv_entry */
                    568:                pvpage->pvinfo.pvpi_nfree--;    /* can't go to zero */
                    569:                pv = pvpage->pvinfo.pvpi_pvfree;
                    570: #ifdef DIAGNOSTIC
                    571:                if (pv == NULL)
                    572:                        panic("pmap_alloc_pvpage: pvpi_nfree off");
                    573: #endif
                    574:                pvpage->pvinfo.pvpi_pvfree = pv->pv_next;
                    575:
                    576:                pv_nfpvents--;  /* took one from pool */
                    577:                return(pv);
                    578:        }
1.1       matt      579:
                    580:        /*
1.17      chris     581:         *  see if we've got a cached unmapped VA that we can map a page in.
                    582:         * if not, try to allocate one.
1.1       matt      583:         */
                    584:
1.23      chs       585:
1.17      chris     586:        if (pv_cachedva == 0) {
1.23      chs       587:                s = splvm();
                    588:                pv_cachedva = uvm_km_kmemalloc(kmem_map, NULL,
1.17      chris     589:                    PAGE_SIZE, UVM_KMF_TRYLOCK|UVM_KMF_VALLOC);
1.23      chs       590:                splx(s);
1.17      chris     591:                if (pv_cachedva == 0) {
                    592:                        return (NULL);
1.1       matt      593:                }
                    594:        }
1.17      chris     595:
1.23      chs       596:        pg = uvm_pagealloc(NULL, pv_cachedva - vm_map_min(kernel_map), NULL,
                    597:            UVM_PGA_USERESERVE);
1.17      chris     598:        if (pg)
                    599:                pg->flags &= ~PG_BUSY;  /* never busy */
                    600:
                    601:        if (pg == NULL)
                    602:                return (NULL);
                    603:
                    604:        /*
                    605:         * add a mapping for our new pv_page and free its entrys (save one!)
                    606:         *
                    607:         * NOTE: If we are allocating a PV page for the kernel pmap, the
                    608:         * pmap is already locked!  (...but entering the mapping is safe...)
                    609:         */
                    610:
                    611:        pmap_kenter_pa(pv_cachedva, VM_PAGE_TO_PHYS(pg), VM_PROT_ALL);
1.19      chris     612:        pmap_update(pmap_kernel());
1.17      chris     613:        pvpage = (struct pv_page *) pv_cachedva;
                    614:        pv_cachedva = 0;
                    615:        return (pmap_add_pvpage(pvpage, mode != ALLOCPV_NONEED));
1.1       matt      616: }
                    617:
                    618: /*
1.17      chris     619:  * pmap_add_pvpage: add a pv_page's pv_entrys to the free list
                    620:  *
                    621:  * => caller must hold pvalloc_lock
                    622:  * => if need_entry is true, we allocate and return one pv_entry
1.1       matt      623:  */
                    624:
1.17      chris     625: static struct pv_entry *
                    626: pmap_add_pvpage(pvp, need_entry)
                    627:        struct pv_page *pvp;
                    628:        boolean_t need_entry;
1.1       matt      629: {
1.17      chris     630:        int tofree, lcv;
                    631:
                    632:        /* do we need to return one? */
                    633:        tofree = (need_entry) ? PVE_PER_PVPAGE - 1 : PVE_PER_PVPAGE;
1.1       matt      634:
1.17      chris     635:        pvp->pvinfo.pvpi_pvfree = NULL;
                    636:        pvp->pvinfo.pvpi_nfree = tofree;
                    637:        for (lcv = 0 ; lcv < tofree ; lcv++) {
                    638:                pvp->pvents[lcv].pv_next = pvp->pvinfo.pvpi_pvfree;
                    639:                pvp->pvinfo.pvpi_pvfree = &pvp->pvents[lcv];
1.1       matt      640:        }
1.17      chris     641:        if (need_entry)
                    642:                TAILQ_INSERT_TAIL(&pv_freepages, pvp, pvinfo.pvpi_list);
                    643:        else
                    644:                TAILQ_INSERT_TAIL(&pv_unusedpgs, pvp, pvinfo.pvpi_list);
                    645:        pv_nfpvents += tofree;
                    646:        return((need_entry) ? &pvp->pvents[lcv] : NULL);
1.1       matt      647: }
                    648:
1.17      chris     649: /*
                    650:  * pmap_free_pv_doit: actually free a pv_entry
                    651:  *
                    652:  * => do not call this directly!  instead use either
                    653:  *    1. pmap_free_pv ==> free a single pv_entry
                    654:  *    2. pmap_free_pvs => free a list of pv_entrys
                    655:  * => we must be holding pvalloc_lock
                    656:  */
                    657:
                    658: __inline static void
                    659: pmap_free_pv_doit(pv)
                    660:        struct pv_entry *pv;
1.1       matt      661: {
1.17      chris     662:        struct pv_page *pvp;
1.1       matt      663:
1.17      chris     664:        pvp = (struct pv_page *) arm_trunc_page((vaddr_t)pv);
                    665:        pv_nfpvents++;
                    666:        pvp->pvinfo.pvpi_nfree++;
1.1       matt      667:
1.17      chris     668:        /* nfree == 1 => fully allocated page just became partly allocated */
                    669:        if (pvp->pvinfo.pvpi_nfree == 1) {
                    670:                TAILQ_INSERT_HEAD(&pv_freepages, pvp, pvinfo.pvpi_list);
1.1       matt      671:        }
                    672:
1.17      chris     673:        /* free it */
                    674:        pv->pv_next = pvp->pvinfo.pvpi_pvfree;
                    675:        pvp->pvinfo.pvpi_pvfree = pv;
1.1       matt      676:
1.17      chris     677:        /*
                    678:         * are all pv_page's pv_entry's free?  move it to unused queue.
                    679:         */
1.1       matt      680:
1.17      chris     681:        if (pvp->pvinfo.pvpi_nfree == PVE_PER_PVPAGE) {
                    682:                TAILQ_REMOVE(&pv_freepages, pvp, pvinfo.pvpi_list);
                    683:                TAILQ_INSERT_HEAD(&pv_unusedpgs, pvp, pvinfo.pvpi_list);
1.1       matt      684:        }
                    685: }
                    686:
                    687: /*
1.17      chris     688:  * pmap_free_pv: free a single pv_entry
                    689:  *
                    690:  * => we gain the pvalloc_lock
1.1       matt      691:  */
                    692:
1.17      chris     693: __inline static void
                    694: pmap_free_pv(pmap, pv)
1.15      chris     695:        struct pmap *pmap;
1.1       matt      696:        struct pv_entry *pv;
                    697: {
1.17      chris     698:        simple_lock(&pvalloc_lock);
                    699:        pmap_free_pv_doit(pv);
                    700:
                    701:        /*
                    702:         * Can't free the PV page if the PV entries were associated with
                    703:         * the kernel pmap; the pmap is already locked.
                    704:         */
                    705:        if (pv_nfpvents > PVE_HIWAT && pv_unusedpgs.tqh_first != NULL &&
                    706:            pmap != pmap_kernel())
                    707:                pmap_free_pvpage();
                    708:
                    709:        simple_unlock(&pvalloc_lock);
                    710: }
1.1       matt      711:
1.17      chris     712: /*
                    713:  * pmap_free_pvs: free a list of pv_entrys
                    714:  *
                    715:  * => we gain the pvalloc_lock
                    716:  */
1.1       matt      717:
1.17      chris     718: __inline static void
                    719: pmap_free_pvs(pmap, pvs)
                    720:        struct pmap *pmap;
                    721:        struct pv_entry *pvs;
                    722: {
                    723:        struct pv_entry *nextpv;
1.1       matt      724:
1.17      chris     725:        simple_lock(&pvalloc_lock);
1.1       matt      726:
1.17      chris     727:        for ( /* null */ ; pvs != NULL ; pvs = nextpv) {
                    728:                nextpv = pvs->pv_next;
                    729:                pmap_free_pv_doit(pvs);
1.1       matt      730:        }
                    731:
1.17      chris     732:        /*
                    733:         * Can't free the PV page if the PV entries were associated with
                    734:         * the kernel pmap; the pmap is already locked.
                    735:         */
                    736:        if (pv_nfpvents > PVE_HIWAT && pv_unusedpgs.tqh_first != NULL &&
                    737:            pmap != pmap_kernel())
                    738:                pmap_free_pvpage();
1.1       matt      739:
1.17      chris     740:        simple_unlock(&pvalloc_lock);
1.1       matt      741: }
                    742:
                    743:
                    744: /*
1.17      chris     745:  * pmap_free_pvpage: try and free an unused pv_page structure
                    746:  *
                    747:  * => assume caller is holding the pvalloc_lock and that
                    748:  *     there is a page on the pv_unusedpgs list
                    749:  * => if we can't get a lock on the kmem_map we try again later
                    750:  * => note: analysis of MI kmem_map usage [i.e. malloc/free] shows
                    751:  *     that if we can lock the kmem_map then we are not already
                    752:  *     holding kmem_object's lock.
1.1       matt      753:  */
                    754:
1.17      chris     755: static void
                    756: pmap_free_pvpage()
1.1       matt      757: {
1.17      chris     758:        int s;
                    759:        struct vm_map *map;
                    760:        struct vm_map_entry *dead_entries;
                    761:        struct pv_page *pvp;
                    762:
                    763:        s = splvm(); /* protect kmem_map */
1.1       matt      764:
1.17      chris     765:        pvp = pv_unusedpgs.tqh_first;
1.1       matt      766:
                    767:        /*
1.17      chris     768:         * note: watch out for pv_initpage which is allocated out of
                    769:         * kernel_map rather than kmem_map.
1.1       matt      770:         */
1.17      chris     771:        if (pvp == pv_initpage)
                    772:                map = kernel_map;
                    773:        else
                    774:                map = kmem_map;
                    775:
                    776:        if (vm_map_lock_try(map)) {
                    777:
                    778:                /* remove pvp from pv_unusedpgs */
                    779:                TAILQ_REMOVE(&pv_unusedpgs, pvp, pvinfo.pvpi_list);
                    780:
                    781:                /* unmap the page */
                    782:                dead_entries = NULL;
                    783:                uvm_unmap_remove(map, (vaddr_t)pvp, ((vaddr_t)pvp) + PAGE_SIZE,
                    784:                    &dead_entries);
                    785:                vm_map_unlock(map);
                    786:
                    787:                if (dead_entries != NULL)
                    788:                        uvm_unmap_detach(dead_entries, 0);
1.1       matt      789:
1.17      chris     790:                pv_nfpvents -= PVE_PER_PVPAGE;  /* update free count */
1.1       matt      791:        }
                    792:
1.17      chris     793:        if (pvp == pv_initpage)
                    794:                /* no more initpage, we've freed it */
                    795:                pv_initpage = NULL;
1.1       matt      796:
                    797:        splx(s);
                    798: }
                    799:
                    800: /*
1.17      chris     801:  * main pv_entry manipulation functions:
1.39      thorpej   802:  *   pmap_enter_pv: enter a mapping onto a pv_head list
                    803:  *   pmap_remove_pv: remove a mappiing from a pv_head list
1.17      chris     804:  *
                    805:  * NOTE: pmap_enter_pv expects to lock the pvh itself
                    806:  *       pmap_remove_pv expects te caller to lock the pvh before calling
                    807:  */
                    808:
                    809: /*
1.39      thorpej   810:  * pmap_enter_pv: enter a mapping onto a pv_head lst
1.17      chris     811:  *
                    812:  * => caller should hold the proper lock on pmap_main_lock
                    813:  * => caller should have pmap locked
1.39      thorpej   814:  * => we will gain the lock on the pv_head and allocate the new pv_entry
1.17      chris     815:  * => caller should adjust ptp's wire_count before calling
                    816:  * => caller should not adjust pmap's wire_count
                    817:  */
                    818:
                    819: __inline static void
1.39      thorpej   820: pmap_enter_pv(pvh, pve, pmap, va, ptp, flags)
                    821:        struct pv_head *pvh;
1.17      chris     822:        struct pv_entry *pve;   /* preallocated pve for us to use */
                    823:        struct pmap *pmap;
                    824:        vaddr_t va;
                    825:        struct vm_page *ptp;    /* PTP in pmap that maps this VA */
                    826:        int flags;
                    827: {
                    828:        pve->pv_pmap = pmap;
                    829:        pve->pv_va = va;
                    830:        pve->pv_ptp = ptp;                      /* NULL for kernel pmap */
                    831:        pve->pv_flags = flags;
1.39      thorpej   832:        simple_lock(&pvh->pvh_lock);            /* lock pv_head */
                    833:        pve->pv_next = pvh->pvh_list;           /* add to ... */
                    834:        pvh->pvh_list = pve;                    /* ... locked list */
                    835:        simple_unlock(&pvh->pvh_lock);          /* unlock, done! */
1.17      chris     836:        if (pve->pv_flags & PT_W)
                    837:                ++pmap->pm_stats.wired_count;
                    838: }
                    839:
                    840: /*
                    841:  * pmap_remove_pv: try to remove a mapping from a pv_list
                    842:  *
                    843:  * => caller should hold proper lock on pmap_main_lock
                    844:  * => pmap should be locked
1.39      thorpej   845:  * => caller should hold lock on pv_head [so that attrs can be adjusted]
1.17      chris     846:  * => caller should adjust ptp's wire_count and free PTP if needed
                    847:  * => caller should NOT adjust pmap's wire_count
                    848:  * => we return the removed pve
                    849:  */
                    850:
                    851: __inline static struct pv_entry *
1.39      thorpej   852: pmap_remove_pv(pvh, pmap, va)
                    853:        struct pv_head *pvh;
1.17      chris     854:        struct pmap *pmap;
                    855:        vaddr_t va;
                    856: {
                    857:        struct pv_entry *pve, **prevptr;
                    858:
1.39      thorpej   859:        prevptr = &pvh->pvh_list;               /* previous pv_entry pointer */
1.17      chris     860:        pve = *prevptr;
                    861:        while (pve) {
                    862:                if (pve->pv_pmap == pmap && pve->pv_va == va) { /* match? */
                    863:                        *prevptr = pve->pv_next;                /* remove it! */
                    864:                        if (pve->pv_flags & PT_W)
                    865:                            --pmap->pm_stats.wired_count;
                    866:                        break;
                    867:                }
                    868:                prevptr = &pve->pv_next;                /* previous pointer */
                    869:                pve = pve->pv_next;                     /* advance */
                    870:        }
                    871:        return(pve);                            /* return removed pve */
                    872: }
                    873:
                    874: /*
                    875:  *
                    876:  * pmap_modify_pv: Update pv flags
                    877:  *
1.39      thorpej   878:  * => caller should hold lock on pv_head [so that attrs can be adjusted]
1.17      chris     879:  * => caller should NOT adjust pmap's wire_count
1.29      rearnsha  880:  * => caller must call pmap_vac_me_harder() if writable status of a page
                    881:  *    may have changed.
1.17      chris     882:  * => we return the old flags
                    883:  *
1.1       matt      884:  * Modify a physical-virtual mapping in the pv table
                    885:  */
                    886:
1.33      chris     887: /*__inline */
                    888: static u_int
1.39      thorpej   889: pmap_modify_pv(pmap, va, pvh, bic_mask, eor_mask)
1.15      chris     890:        struct pmap *pmap;
1.1       matt      891:        vaddr_t va;
1.39      thorpej   892:        struct pv_head *pvh;
1.1       matt      893:        u_int bic_mask;
                    894:        u_int eor_mask;
                    895: {
                    896:        struct pv_entry *npv;
                    897:        u_int flags, oflags;
                    898:
                    899:        /*
                    900:         * There is at least one VA mapping this page.
                    901:         */
                    902:
1.39      thorpej   903:        for (npv = pvh->pvh_list; npv; npv = npv->pv_next) {
1.1       matt      904:                if (pmap == npv->pv_pmap && va == npv->pv_va) {
                    905:                        oflags = npv->pv_flags;
                    906:                        npv->pv_flags = flags =
                    907:                            ((oflags & ~bic_mask) ^ eor_mask);
                    908:                        if ((flags ^ oflags) & PT_W) {
                    909:                                if (flags & PT_W)
                    910:                                        ++pmap->pm_stats.wired_count;
                    911:                                else
                    912:                                        --pmap->pm_stats.wired_count;
                    913:                        }
                    914:                        return (oflags);
                    915:                }
                    916:        }
                    917:        return (0);
                    918: }
                    919:
                    920: /*
                    921:  * Map the specified level 2 pagetable into the level 1 page table for
                    922:  * the given pmap to cover a chunk of virtual address space starting from the
                    923:  * address specified.
                    924:  */
                    925: static /*__inline*/ void
1.17      chris     926: pmap_map_in_l1(pmap, va, l2pa, selfref)
1.15      chris     927:        struct pmap *pmap;
1.1       matt      928:        vaddr_t va, l2pa;
1.17      chris     929:        boolean_t selfref;
1.1       matt      930: {
                    931:        vaddr_t ptva;
                    932:
                    933:        /* Calculate the index into the L1 page table. */
                    934:        ptva = (va >> PDSHIFT) & ~3;
                    935:
                    936:        PDEBUG(0, printf("wiring %08lx in to pd%p pte0x%lx va0x%lx\n", l2pa,
                    937:            pmap->pm_pdir, L1_PTE(l2pa), ptva));
                    938:
                    939:        /* Map page table into the L1. */
                    940:        pmap->pm_pdir[ptva + 0] = L1_PTE(l2pa + 0x000);
                    941:        pmap->pm_pdir[ptva + 1] = L1_PTE(l2pa + 0x400);
                    942:        pmap->pm_pdir[ptva + 2] = L1_PTE(l2pa + 0x800);
                    943:        pmap->pm_pdir[ptva + 3] = L1_PTE(l2pa + 0xc00);
                    944:
                    945:        PDEBUG(0, printf("pt self reference %lx in %lx\n",
                    946:            L2_PTE_NC_NB(l2pa, AP_KRW), pmap->pm_vptpt));
                    947:
                    948:        /* Map the page table into the page table area. */
1.17      chris     949:        if (selfref) {
                    950:                *((pt_entry_t *)(pmap->pm_vptpt + ptva)) =
                    951:                        L2_PTE_NC_NB(l2pa, AP_KRW);
                    952:        }
1.1       matt      953:        /* XXX should be a purge */
                    954: /*     cpu_tlb_flushD();*/
                    955: }
                    956:
                    957: #if 0
                    958: static /*__inline*/ void
                    959: pmap_unmap_in_l1(pmap, va)
1.15      chris     960:        struct pmap *pmap;
1.1       matt      961:        vaddr_t va;
                    962: {
                    963:        vaddr_t ptva;
                    964:
                    965:        /* Calculate the index into the L1 page table. */
                    966:        ptva = (va >> PDSHIFT) & ~3;
                    967:
                    968:        /* Unmap page table from the L1. */
                    969:        pmap->pm_pdir[ptva + 0] = 0;
                    970:        pmap->pm_pdir[ptva + 1] = 0;
                    971:        pmap->pm_pdir[ptva + 2] = 0;
                    972:        pmap->pm_pdir[ptva + 3] = 0;
                    973:
                    974:        /* Unmap the page table from the page table area. */
                    975:        *((pt_entry_t *)(pmap->pm_vptpt + ptva)) = 0;
                    976:
                    977:        /* XXX should be a purge */
                    978: /*     cpu_tlb_flushD();*/
                    979: }
                    980: #endif
                    981:
                    982: /*
                    983:  *     Used to map a range of physical addresses into kernel
                    984:  *     virtual address space.
                    985:  *
                    986:  *     For now, VM is already on, we only need to map the
                    987:  *     specified memory.
                    988:  */
                    989: vaddr_t
                    990: pmap_map(va, spa, epa, prot)
                    991:        vaddr_t va, spa, epa;
                    992:        int prot;
                    993: {
                    994:        while (spa < epa) {
1.20      chris     995:                pmap_kenter_pa(va, spa, prot);
1.1       matt      996:                va += NBPG;
                    997:                spa += NBPG;
                    998:        }
1.19      chris     999:        pmap_update(pmap_kernel());
1.1       matt     1000:        return(va);
                   1001: }
                   1002:
                   1003:
                   1004: /*
1.3       matt     1005:  * void pmap_bootstrap(pd_entry_t *kernel_l1pt, pv_addr_t kernel_ptpt)
1.1       matt     1006:  *
                   1007:  * bootstrap the pmap system. This is called from initarm and allows
                   1008:  * the pmap system to initailise any structures it requires.
                   1009:  *
                   1010:  * Currently this sets up the kernel_pmap that is statically allocated
                   1011:  * and also allocated virtual addresses for certain page hooks.
                   1012:  * Currently the only one page hook is allocated that is used
                   1013:  * to zero physical pages of memory.
                   1014:  * It also initialises the start and end address of the kernel data space.
                   1015:  */
1.2       matt     1016: extern paddr_t physical_freestart;
                   1017: extern paddr_t physical_freeend;
1.1       matt     1018:
1.17      chris    1019: char *boot_head;
1.1       matt     1020:
                   1021: void
                   1022: pmap_bootstrap(kernel_l1pt, kernel_ptpt)
                   1023:        pd_entry_t *kernel_l1pt;
                   1024:        pv_addr_t kernel_ptpt;
                   1025: {
                   1026:        int loop;
1.2       matt     1027:        paddr_t start, end;
1.1       matt     1028: #if NISADMA > 0
1.2       matt     1029:        paddr_t istart;
                   1030:        psize_t isize;
1.1       matt     1031: #endif
                   1032:
1.15      chris    1033:        pmap_kernel()->pm_pdir = kernel_l1pt;
                   1034:        pmap_kernel()->pm_pptpt = kernel_ptpt.pv_pa;
                   1035:        pmap_kernel()->pm_vptpt = kernel_ptpt.pv_va;
                   1036:        simple_lock_init(&pmap_kernel()->pm_lock);
1.16      chris    1037:        pmap_kernel()->pm_obj.pgops = NULL;
                   1038:        TAILQ_INIT(&(pmap_kernel()->pm_obj.memq));
                   1039:        pmap_kernel()->pm_obj.uo_npages = 0;
                   1040:        pmap_kernel()->pm_obj.uo_refs = 1;
                   1041:
1.1       matt     1042:        /*
                   1043:         * Initialize PAGE_SIZE-dependent variables.
                   1044:         */
                   1045:        uvm_setpagesize();
                   1046:
                   1047:        npages = 0;
                   1048:        loop = 0;
                   1049:        while (loop < bootconfig.dramblocks) {
1.2       matt     1050:                start = (paddr_t)bootconfig.dram[loop].address;
1.1       matt     1051:                end = start + (bootconfig.dram[loop].pages * NBPG);
                   1052:                if (start < physical_freestart)
                   1053:                        start = physical_freestart;
                   1054:                if (end > physical_freeend)
                   1055:                        end = physical_freeend;
                   1056: #if 0
                   1057:                printf("%d: %lx -> %lx\n", loop, start, end - 1);
                   1058: #endif
                   1059: #if NISADMA > 0
                   1060:                if (pmap_isa_dma_range_intersect(start, end - start,
                   1061:                    &istart, &isize)) {
                   1062:                        /*
                   1063:                         * Place the pages that intersect with the
                   1064:                         * ISA DMA range onto the ISA DMA free list.
                   1065:                         */
                   1066: #if 0
                   1067:                        printf("    ISADMA 0x%lx -> 0x%lx\n", istart,
                   1068:                            istart + isize - 1);
                   1069: #endif
                   1070:                        uvm_page_physload(atop(istart),
                   1071:                            atop(istart + isize), atop(istart),
                   1072:                            atop(istart + isize), VM_FREELIST_ISADMA);
                   1073:                        npages += atop(istart + isize) - atop(istart);
                   1074:
                   1075:                        /*
                   1076:                         * Load the pieces that come before
                   1077:                         * the intersection into the default
                   1078:                         * free list.
                   1079:                         */
                   1080:                        if (start < istart) {
                   1081: #if 0
                   1082:                                printf("    BEFORE 0x%lx -> 0x%lx\n",
                   1083:                                    start, istart - 1);
                   1084: #endif
                   1085:                                uvm_page_physload(atop(start),
                   1086:                                    atop(istart), atop(start),
                   1087:                                    atop(istart), VM_FREELIST_DEFAULT);
                   1088:                                npages += atop(istart) - atop(start);
                   1089:                        }
                   1090:
                   1091:                        /*
                   1092:                         * Load the pieces that come after
                   1093:                         * the intersection into the default
                   1094:                         * free list.
                   1095:                         */
                   1096:                        if ((istart + isize) < end) {
                   1097: #if 0
                   1098:                                printf("     AFTER 0x%lx -> 0x%lx\n",
                   1099:                                    (istart + isize), end - 1);
                   1100: #endif
                   1101:                                uvm_page_physload(atop(istart + isize),
                   1102:                                    atop(end), atop(istart + isize),
                   1103:                                    atop(end), VM_FREELIST_DEFAULT);
                   1104:                                npages += atop(end) - atop(istart + isize);
                   1105:                        }
                   1106:                } else {
                   1107:                        uvm_page_physload(atop(start), atop(end),
                   1108:                            atop(start), atop(end), VM_FREELIST_DEFAULT);
                   1109:                        npages += atop(end) - atop(start);
                   1110:                }
                   1111: #else  /* NISADMA > 0 */
                   1112:                uvm_page_physload(atop(start), atop(end),
                   1113:                    atop(start), atop(end), VM_FREELIST_DEFAULT);
                   1114:                npages += atop(end) - atop(start);
                   1115: #endif /* NISADMA > 0 */
                   1116:                ++loop;
                   1117:        }
                   1118:
                   1119: #ifdef MYCROFT_HACK
                   1120:        printf("npages = %ld\n", npages);
                   1121: #endif
                   1122:
                   1123:        virtual_start = KERNEL_VM_BASE;
                   1124:        virtual_end = virtual_start + KERNEL_VM_SIZE - 1;
                   1125:
                   1126:        ALLOC_PAGE_HOOK(page_hook0, NBPG);
                   1127:        ALLOC_PAGE_HOOK(page_hook1, NBPG);
                   1128:
                   1129:        /*
                   1130:         * The mem special device needs a virtual hook but we don't
                   1131:         * need a pte
                   1132:         */
                   1133:        memhook = (char *)virtual_start;
                   1134:        virtual_start += NBPG;
                   1135:
                   1136:        msgbufaddr = (caddr_t)virtual_start;
1.15      chris    1137:        msgbufpte = (pt_entry_t)pmap_pte(pmap_kernel(), virtual_start);
1.1       matt     1138:        virtual_start += round_page(MSGBUFSIZE);
                   1139:
1.17      chris    1140:        /*
                   1141:         * init the static-global locks and global lists.
                   1142:         */
                   1143:        spinlockinit(&pmap_main_lock, "pmaplk", 0);
                   1144:        simple_lock_init(&pvalloc_lock);
                   1145:        TAILQ_INIT(&pv_freepages);
                   1146:        TAILQ_INIT(&pv_unusedpgs);
1.1       matt     1147:
1.10      chris    1148:        /*
1.39      thorpej  1149:         * compute the number of pages we have and then allocate RAM
                   1150:         * for each pages' pv_head and saved attributes.
                   1151:         */
                   1152:        {
                   1153:                int npages, lcv;
                   1154:                vsize_t s;
                   1155:
                   1156:                npages = 0;
                   1157:                for (lcv = 0 ; lcv < vm_nphysseg ; lcv++)
                   1158:                        npages += (vm_physmem[lcv].end - vm_physmem[lcv].start);
                   1159:                s = (vsize_t) (sizeof(struct pv_head) * npages +
                   1160:                                sizeof(char) * npages);
                   1161:                s = round_page(s); /* round up */
                   1162:                boot_head = (char *)uvm_pageboot_alloc(s);
                   1163:                bzero((char *)boot_head, s);
                   1164:                if (boot_head == 0)
                   1165:                        panic("pmap_init: unable to allocate pv_heads");
                   1166:        }
                   1167:
                   1168:        /*
1.10      chris    1169:         * initialize the pmap pool.
                   1170:         */
                   1171:
                   1172:        pool_init(&pmap_pmap_pool, sizeof(struct pmap), 0, 0, 0, "pmappl",
                   1173:                  0, pool_page_alloc_nointr, pool_page_free_nointr, M_VMPMAP);
                   1174:
1.36      thorpej  1175:        cpu_dcache_wbinv_all();
1.1       matt     1176: }
                   1177:
                   1178: /*
                   1179:  * void pmap_init(void)
                   1180:  *
                   1181:  * Initialize the pmap module.
                   1182:  * Called by vm_init() in vm/vm_init.c in order to initialise
                   1183:  * any structures that the pmap system needs to map virtual memory.
                   1184:  */
                   1185:
                   1186: extern int physmem;
                   1187:
                   1188: void
                   1189: pmap_init()
                   1190: {
1.39      thorpej  1191:        int lcv, i;
                   1192:
                   1193: #ifdef MYCROFT_HACK
                   1194:        printf("physmem = %d\n", physmem);
                   1195: #endif
1.1       matt     1196:
                   1197:        /*
                   1198:         * Set the available memory vars - These do not map to real memory
                   1199:         * addresses and cannot as the physical memory is fragmented.
                   1200:         * They are used by ps for %mem calculations.
                   1201:         * One could argue whether this should be the entire memory or just
                   1202:         * the memory that is useable in a user process.
                   1203:         */
                   1204:        avail_start = 0;
                   1205:        avail_end = physmem * NBPG;
                   1206:
1.39      thorpej  1207:        /* allocate pv_head stuff first */
                   1208:        for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) {
                   1209:                vm_physmem[lcv].pmseg.pvhead = (struct pv_head *)boot_head;
                   1210:                boot_head = (char *)(vaddr_t)(vm_physmem[lcv].pmseg.pvhead +
                   1211:                                 (vm_physmem[lcv].end - vm_physmem[lcv].start));
                   1212:                for (i = 0;
                   1213:                     i < (vm_physmem[lcv].end - vm_physmem[lcv].start); i++) {
                   1214:                        simple_lock_init(
                   1215:                            &vm_physmem[lcv].pmseg.pvhead[i].pvh_lock);
                   1216:                }
                   1217:        }
                   1218:
                   1219:        /* now allocate attrs */
                   1220:        for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) {
                   1221:                vm_physmem[lcv].pmseg.attrs = (char *) boot_head;
                   1222:                boot_head = (char *)(vaddr_t)(vm_physmem[lcv].pmseg.attrs +
                   1223:                                 (vm_physmem[lcv].end - vm_physmem[lcv].start));
                   1224:        }
                   1225:
1.17      chris    1226:        /*
                   1227:         * now we need to free enough pv_entry structures to allow us to get
                   1228:         * the kmem_map/kmem_object allocated and inited (done after this
                   1229:         * function is finished).  to do this we allocate one bootstrap page out
                   1230:         * of kernel_map and use it to provide an initial pool of pv_entry
                   1231:         * structures.   we never free this page.
                   1232:         */
                   1233:
                   1234:        pv_initpage = (struct pv_page *) uvm_km_alloc(kernel_map, PAGE_SIZE);
                   1235:        if (pv_initpage == NULL)
                   1236:                panic("pmap_init: pv_initpage");
                   1237:        pv_cachedva = 0;   /* a VA we have allocated but not used yet */
                   1238:        pv_nfpvents = 0;
                   1239:        (void) pmap_add_pvpage(pv_initpage, FALSE);
                   1240:
1.39      thorpej  1241: #ifdef MYCROFT_HACK
                   1242:        for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) {
                   1243:                printf("physseg[%d] pvent=%p attrs=%p start=%ld end=%ld\n",
                   1244:                    lcv,
                   1245:                    vm_physmem[lcv].pmseg.pvent, vm_physmem[lcv].pmseg.attrs,
                   1246:                    vm_physmem[lcv].start, vm_physmem[lcv].end);
                   1247:        }
                   1248: #endif
1.1       matt     1249:        pmap_initialized = TRUE;
                   1250:
                   1251:        /* Initialise our L1 page table queues and counters */
                   1252:        SIMPLEQ_INIT(&l1pt_static_queue);
                   1253:        l1pt_static_queue_count = 0;
                   1254:        l1pt_static_create_count = 0;
                   1255:        SIMPLEQ_INIT(&l1pt_queue);
                   1256:        l1pt_queue_count = 0;
                   1257:        l1pt_create_count = 0;
                   1258:        l1pt_reuse_count = 0;
                   1259: }
                   1260:
                   1261: /*
                   1262:  * pmap_postinit()
                   1263:  *
                   1264:  * This routine is called after the vm and kmem subsystems have been
                   1265:  * initialised. This allows the pmap code to perform any initialisation
                   1266:  * that can only be done one the memory allocation is in place.
                   1267:  */
                   1268:
                   1269: void
                   1270: pmap_postinit()
                   1271: {
                   1272:        int loop;
                   1273:        struct l1pt *pt;
                   1274:
                   1275: #ifdef PMAP_STATIC_L1S
                   1276:        for (loop = 0; loop < PMAP_STATIC_L1S; ++loop) {
                   1277: #else  /* PMAP_STATIC_L1S */
                   1278:        for (loop = 0; loop < max_processes; ++loop) {
                   1279: #endif /* PMAP_STATIC_L1S */
                   1280:                /* Allocate a L1 page table */
                   1281:                pt = pmap_alloc_l1pt();
                   1282:                if (!pt)
                   1283:                        panic("Cannot allocate static L1 page tables\n");
                   1284:
                   1285:                /* Clean it */
                   1286:                bzero((void *)pt->pt_va, PD_SIZE);
                   1287:                pt->pt_flags |= (PTFLAG_STATIC | PTFLAG_CLEAN);
                   1288:                /* Add the page table to the queue */
                   1289:                SIMPLEQ_INSERT_TAIL(&l1pt_static_queue, pt, pt_queue);
                   1290:                ++l1pt_static_queue_count;
                   1291:                ++l1pt_static_create_count;
                   1292:        }
                   1293: }
                   1294:
                   1295:
                   1296: /*
                   1297:  * Create and return a physical map.
                   1298:  *
                   1299:  * If the size specified for the map is zero, the map is an actual physical
                   1300:  * map, and may be referenced by the hardware.
                   1301:  *
                   1302:  * If the size specified is non-zero, the map will be used in software only,
                   1303:  * and is bounded by that size.
                   1304:  */
                   1305:
                   1306: pmap_t
                   1307: pmap_create()
                   1308: {
1.15      chris    1309:        struct pmap *pmap;
1.1       matt     1310:
1.10      chris    1311:        /*
                   1312:         * Fetch pmap entry from the pool
                   1313:         */
                   1314:
                   1315:        pmap = pool_get(&pmap_pmap_pool, PR_WAITOK);
1.17      chris    1316:        /* XXX is this really needed! */
                   1317:        memset(pmap, 0, sizeof(*pmap));
1.1       matt     1318:
1.16      chris    1319:        simple_lock_init(&pmap->pm_obj.vmobjlock);
                   1320:        pmap->pm_obj.pgops = NULL;      /* currently not a mappable object */
                   1321:        TAILQ_INIT(&pmap->pm_obj.memq);
                   1322:        pmap->pm_obj.uo_npages = 0;
                   1323:        pmap->pm_obj.uo_refs = 1;
                   1324:        pmap->pm_stats.wired_count = 0;
                   1325:        pmap->pm_stats.resident_count = 1;
                   1326:
1.1       matt     1327:        /* Now init the machine part of the pmap */
                   1328:        pmap_pinit(pmap);
                   1329:        return(pmap);
                   1330: }
                   1331:
                   1332: /*
                   1333:  * pmap_alloc_l1pt()
                   1334:  *
                   1335:  * This routine allocates physical and virtual memory for a L1 page table
                   1336:  * and wires it.
                   1337:  * A l1pt structure is returned to describe the allocated page table.
                   1338:  *
                   1339:  * This routine is allowed to fail if the required memory cannot be allocated.
                   1340:  * In this case NULL is returned.
                   1341:  */
                   1342:
                   1343: struct l1pt *
                   1344: pmap_alloc_l1pt(void)
                   1345: {
1.2       matt     1346:        paddr_t pa;
                   1347:        vaddr_t va;
1.1       matt     1348:        struct l1pt *pt;
                   1349:        int error;
1.9       chs      1350:        struct vm_page *m;
1.11      chris    1351:        pt_entry_t *ptes;
1.1       matt     1352:
                   1353:        /* Allocate virtual address space for the L1 page table */
                   1354:        va = uvm_km_valloc(kernel_map, PD_SIZE);
                   1355:        if (va == 0) {
                   1356: #ifdef DIAGNOSTIC
1.26      rearnsha 1357:                PDEBUG(0,
                   1358:                    printf("pmap: Cannot allocate pageable memory for L1\n"));
1.1       matt     1359: #endif /* DIAGNOSTIC */
                   1360:                return(NULL);
                   1361:        }
                   1362:
                   1363:        /* Allocate memory for the l1pt structure */
                   1364:        pt = (struct l1pt *)malloc(sizeof(struct l1pt), M_VMPMAP, M_WAITOK);
                   1365:
                   1366:        /*
                   1367:         * Allocate pages from the VM system.
                   1368:         */
                   1369:        TAILQ_INIT(&pt->pt_plist);
                   1370:        error = uvm_pglistalloc(PD_SIZE, physical_start, physical_end,
                   1371:            PD_SIZE, 0, &pt->pt_plist, 1, M_WAITOK);
                   1372:        if (error) {
                   1373: #ifdef DIAGNOSTIC
1.26      rearnsha 1374:                PDEBUG(0,
                   1375:                    printf("pmap: Cannot allocate physical mem for L1 (%d)\n",
                   1376:                    error));
1.1       matt     1377: #endif /* DIAGNOSTIC */
                   1378:                /* Release the resources we already have claimed */
                   1379:                free(pt, M_VMPMAP);
                   1380:                uvm_km_free(kernel_map, va, PD_SIZE);
                   1381:                return(NULL);
                   1382:        }
                   1383:
                   1384:        /* Map our physical pages into our virtual space */
                   1385:        pt->pt_va = va;
                   1386:        m = pt->pt_plist.tqh_first;
1.11      chris    1387:        ptes = pmap_map_ptes(pmap_kernel());
1.1       matt     1388:        while (m && va < (pt->pt_va + PD_SIZE)) {
                   1389:                pa = VM_PAGE_TO_PHYS(m);
                   1390:
1.20      chris    1391:                pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE);
1.1       matt     1392:
                   1393:                /* Revoke cacheability and bufferability */
                   1394:                /* XXX should be done better than this */
1.11      chris    1395:                ptes[arm_byte_to_page(va)] &= ~(PT_C | PT_B);
1.1       matt     1396:
                   1397:                va += NBPG;
                   1398:                m = m->pageq.tqe_next;
                   1399:        }
1.11      chris    1400:        pmap_unmap_ptes(pmap_kernel());
1.19      chris    1401:        pmap_update(pmap_kernel());
1.1       matt     1402:
                   1403: #ifdef DIAGNOSTIC
                   1404:        if (m)
                   1405:                panic("pmap_alloc_l1pt: pglist not empty\n");
                   1406: #endif /* DIAGNOSTIC */
                   1407:
                   1408:        pt->pt_flags = 0;
                   1409:        return(pt);
                   1410: }
                   1411:
                   1412: /*
                   1413:  * Free a L1 page table previously allocated with pmap_alloc_l1pt().
                   1414:  */
1.33      chris    1415: static void
1.1       matt     1416: pmap_free_l1pt(pt)
                   1417:        struct l1pt *pt;
                   1418: {
                   1419:        /* Separate the physical memory for the virtual space */
1.20      chris    1420:        pmap_kremove(pt->pt_va, PD_SIZE);
1.19      chris    1421:        pmap_update(pmap_kernel());
1.1       matt     1422:
                   1423:        /* Return the physical memory */
                   1424:        uvm_pglistfree(&pt->pt_plist);
                   1425:
                   1426:        /* Free the virtual space */
                   1427:        uvm_km_free(kernel_map, pt->pt_va, PD_SIZE);
                   1428:
                   1429:        /* Free the l1pt structure */
                   1430:        free(pt, M_VMPMAP);
                   1431: }
                   1432:
                   1433: /*
                   1434:  * Allocate a page directory.
                   1435:  * This routine will either allocate a new page directory from the pool
                   1436:  * of L1 page tables currently held by the kernel or it will allocate
                   1437:  * a new one via pmap_alloc_l1pt().
                   1438:  * It will then initialise the l1 page table for use.
                   1439:  */
1.33      chris    1440: static int
1.1       matt     1441: pmap_allocpagedir(pmap)
                   1442:        struct pmap *pmap;
                   1443: {
1.2       matt     1444:        paddr_t pa;
1.1       matt     1445:        struct l1pt *pt;
                   1446:        pt_entry_t *pte;
                   1447:
                   1448:        PDEBUG(0, printf("pmap_allocpagedir(%p)\n", pmap));
                   1449:
                   1450:        /* Do we have any spare L1's lying around ? */
                   1451:        if (l1pt_static_queue_count) {
                   1452:                --l1pt_static_queue_count;
                   1453:                pt = l1pt_static_queue.sqh_first;
                   1454:                SIMPLEQ_REMOVE_HEAD(&l1pt_static_queue, pt, pt_queue);
                   1455:        } else if (l1pt_queue_count) {
                   1456:                --l1pt_queue_count;
                   1457:                pt = l1pt_queue.sqh_first;
                   1458:                SIMPLEQ_REMOVE_HEAD(&l1pt_queue, pt, pt_queue);
                   1459:                ++l1pt_reuse_count;
                   1460:        } else {
                   1461:                pt = pmap_alloc_l1pt();
                   1462:                if (!pt)
                   1463:                        return(ENOMEM);
                   1464:                ++l1pt_create_count;
                   1465:        }
                   1466:
                   1467:        /* Store the pointer to the l1 descriptor in the pmap. */
                   1468:        pmap->pm_l1pt = pt;
                   1469:
                   1470:        /* Get the physical address of the start of the l1 */
                   1471:        pa = VM_PAGE_TO_PHYS(pt->pt_plist.tqh_first);
                   1472:
                   1473:        /* Store the virtual address of the l1 in the pmap. */
                   1474:        pmap->pm_pdir = (pd_entry_t *)pt->pt_va;
                   1475:
                   1476:        /* Clean the L1 if it is dirty */
                   1477:        if (!(pt->pt_flags & PTFLAG_CLEAN))
                   1478:                bzero((void *)pmap->pm_pdir, (PD_SIZE - KERNEL_PD_SIZE));
                   1479:
                   1480:        /* Do we already have the kernel mappings ? */
                   1481:        if (!(pt->pt_flags & PTFLAG_KPT)) {
                   1482:                /* Duplicate the kernel mapping i.e. all mappings 0xf0000000+ */
                   1483:
1.15      chris    1484:                bcopy((char *)pmap_kernel()->pm_pdir + (PD_SIZE - KERNEL_PD_SIZE),
1.1       matt     1485:                    (char *)pmap->pm_pdir + (PD_SIZE - KERNEL_PD_SIZE),
                   1486:                    KERNEL_PD_SIZE);
                   1487:                pt->pt_flags |= PTFLAG_KPT;
                   1488:        }
                   1489:
                   1490:        /* Allocate a page table to map all the page tables for this pmap */
                   1491:
                   1492: #ifdef DIAGNOSTIC
                   1493:        if (pmap->pm_vptpt) {
                   1494:                /* XXX What if we have one already ? */
                   1495:                panic("pmap_allocpagedir: have pt already\n");
                   1496:        }
                   1497: #endif /* DIAGNOSTIC */
                   1498:        pmap->pm_vptpt = uvm_km_zalloc(kernel_map, NBPG);
1.5       toshii   1499:        if (pmap->pm_vptpt == 0) {
                   1500:                pmap_freepagedir(pmap);
                   1501:                return(ENOMEM);
                   1502:        }
                   1503:
1.15      chris    1504:        (void) pmap_extract(pmap_kernel(), pmap->pm_vptpt, &pmap->pm_pptpt);
1.1       matt     1505:        pmap->pm_pptpt &= PG_FRAME;
                   1506:        /* Revoke cacheability and bufferability */
                   1507:        /* XXX should be done better than this */
1.15      chris    1508:        pte = pmap_pte(pmap_kernel(), pmap->pm_vptpt);
1.1       matt     1509:        *pte = *pte & ~(PT_C | PT_B);
                   1510:
                   1511:        /* Wire in this page table */
1.17      chris    1512:        pmap_map_in_l1(pmap, PROCESS_PAGE_TBLS_BASE, pmap->pm_pptpt, TRUE);
1.1       matt     1513:
                   1514:        pt->pt_flags &= ~PTFLAG_CLEAN;  /* L1 is dirty now */
                   1515:
                   1516:        /*
                   1517:         * Map the kernel page tables for 0xf0000000 +
                   1518:         * into the page table used to map the
                   1519:         * pmap's page tables
                   1520:         */
                   1521:        bcopy((char *)(PROCESS_PAGE_TBLS_BASE
                   1522:            + (PROCESS_PAGE_TBLS_BASE >> (PGSHIFT - 2))
                   1523:            + ((PD_SIZE - KERNEL_PD_SIZE) >> 2)),
                   1524:            (char *)pmap->pm_vptpt + ((PD_SIZE - KERNEL_PD_SIZE) >> 2),
                   1525:            (KERNEL_PD_SIZE >> 2));
                   1526:
                   1527:        return(0);
                   1528: }
                   1529:
                   1530:
                   1531: /*
                   1532:  * Initialize a preallocated and zeroed pmap structure,
                   1533:  * such as one in a vmspace structure.
                   1534:  */
                   1535:
                   1536: void
                   1537: pmap_pinit(pmap)
                   1538:        struct pmap *pmap;
                   1539: {
1.26      rearnsha 1540:        int backoff = 6;
                   1541:        int retry = 10;
                   1542:
1.1       matt     1543:        PDEBUG(0, printf("pmap_pinit(%p)\n", pmap));
                   1544:
                   1545:        /* Keep looping until we succeed in allocating a page directory */
                   1546:        while (pmap_allocpagedir(pmap) != 0) {
                   1547:                /*
                   1548:                 * Ok we failed to allocate a suitable block of memory for an
                   1549:                 * L1 page table. This means that either:
                   1550:                 * 1. 16KB of virtual address space could not be allocated
                   1551:                 * 2. 16KB of physically contiguous memory on a 16KB boundary
                   1552:                 *    could not be allocated.
                   1553:                 *
                   1554:                 * Since we cannot fail we will sleep for a while and try
1.17      chris    1555:                 * again.
1.26      rearnsha 1556:                 *
                   1557:                 * Searching for a suitable L1 PT is expensive:
                   1558:                 * to avoid hogging the system when memory is really
                   1559:                 * scarce, use an exponential back-off so that
                   1560:                 * eventually we won't retry more than once every 8
                   1561:                 * seconds.  This should allow other processes to run
                   1562:                 * to completion and free up resources.
1.1       matt     1563:                 */
1.26      rearnsha 1564:                (void) ltsleep(&lbolt, PVM, "l1ptwait", (hz << 3) >> backoff,
                   1565:                    NULL);
                   1566:                if (--retry == 0) {
                   1567:                        retry = 10;
                   1568:                        if (backoff)
                   1569:                                --backoff;
                   1570:                }
1.1       matt     1571:        }
                   1572:
                   1573:        /* Map zero page for the pmap. This will also map the L2 for it */
                   1574:        pmap_enter(pmap, 0x00000000, systempage.pv_pa,
                   1575:            VM_PROT_READ, VM_PROT_READ | PMAP_WIRED);
1.19      chris    1576:        pmap_update(pmap);
1.1       matt     1577: }
                   1578:
                   1579:
                   1580: void
                   1581: pmap_freepagedir(pmap)
1.15      chris    1582:        struct pmap *pmap;
1.1       matt     1583: {
                   1584:        /* Free the memory used for the page table mapping */
1.5       toshii   1585:        if (pmap->pm_vptpt != 0)
                   1586:                uvm_km_free(kernel_map, (vaddr_t)pmap->pm_vptpt, NBPG);
1.1       matt     1587:
                   1588:        /* junk the L1 page table */
                   1589:        if (pmap->pm_l1pt->pt_flags & PTFLAG_STATIC) {
                   1590:                /* Add the page table to the queue */
                   1591:                SIMPLEQ_INSERT_TAIL(&l1pt_static_queue, pmap->pm_l1pt, pt_queue);
                   1592:                ++l1pt_static_queue_count;
                   1593:        } else if (l1pt_queue_count < 8) {
                   1594:                /* Add the page table to the queue */
                   1595:                SIMPLEQ_INSERT_TAIL(&l1pt_queue, pmap->pm_l1pt, pt_queue);
                   1596:                ++l1pt_queue_count;
                   1597:        } else
                   1598:                pmap_free_l1pt(pmap->pm_l1pt);
                   1599: }
                   1600:
                   1601:
                   1602: /*
                   1603:  * Retire the given physical map from service.
                   1604:  * Should only be called if the map contains no valid mappings.
                   1605:  */
                   1606:
                   1607: void
                   1608: pmap_destroy(pmap)
1.15      chris    1609:        struct pmap *pmap;
1.1       matt     1610: {
1.17      chris    1611:        struct vm_page *page;
1.1       matt     1612:        int count;
                   1613:
                   1614:        if (pmap == NULL)
                   1615:                return;
                   1616:
                   1617:        PDEBUG(0, printf("pmap_destroy(%p)\n", pmap));
1.17      chris    1618:
                   1619:        /*
                   1620:         * Drop reference count
                   1621:         */
                   1622:        simple_lock(&pmap->pm_obj.vmobjlock);
1.16      chris    1623:        count = --pmap->pm_obj.uo_refs;
1.17      chris    1624:        simple_unlock(&pmap->pm_obj.vmobjlock);
                   1625:        if (count > 0) {
                   1626:                return;
1.1       matt     1627:        }
                   1628:
1.17      chris    1629:        /*
                   1630:         * reference count is zero, free pmap resources and then free pmap.
                   1631:         */
                   1632:
1.1       matt     1633:        /* Remove the zero page mapping */
                   1634:        pmap_remove(pmap, 0x00000000, 0x00000000 + NBPG);
1.19      chris    1635:        pmap_update(pmap);
1.1       matt     1636:
                   1637:        /*
                   1638:         * Free any page tables still mapped
                   1639:         * This is only temporay until pmap_enter can count the number
                   1640:         * of mappings made in a page table. Then pmap_remove() can
                   1641:         * reduce the count and free the pagetable when the count
1.16      chris    1642:         * reaches zero.  Note that entries in this list should match the
                   1643:         * contents of the ptpt, however this is faster than walking a 1024
                   1644:         * entries looking for pt's
                   1645:         * taken from i386 pmap.c
1.1       matt     1646:         */
1.16      chris    1647:        while (pmap->pm_obj.memq.tqh_first != NULL) {
                   1648:                page = pmap->pm_obj.memq.tqh_first;
                   1649: #ifdef DIAGNOSTIC
                   1650:                if (page->flags & PG_BUSY)
                   1651:                        panic("pmap_release: busy page table page");
                   1652: #endif
                   1653:                /* pmap_page_protect?  currently no need for it. */
                   1654:
                   1655:                page->wire_count = 0;
                   1656:                uvm_pagefree(page);
1.1       matt     1657:        }
1.16      chris    1658:
1.1       matt     1659:        /* Free the page dir */
                   1660:        pmap_freepagedir(pmap);
1.17      chris    1661:
                   1662:        /* return the pmap to the pool */
                   1663:        pool_put(&pmap_pmap_pool, pmap);
1.1       matt     1664: }
                   1665:
                   1666:
                   1667: /*
1.15      chris    1668:  * void pmap_reference(struct pmap *pmap)
1.1       matt     1669:  *
                   1670:  * Add a reference to the specified pmap.
                   1671:  */
                   1672:
                   1673: void
                   1674: pmap_reference(pmap)
1.15      chris    1675:        struct pmap *pmap;
1.1       matt     1676: {
                   1677:        if (pmap == NULL)
                   1678:                return;
                   1679:
                   1680:        simple_lock(&pmap->pm_lock);
1.16      chris    1681:        pmap->pm_obj.uo_refs++;
1.1       matt     1682:        simple_unlock(&pmap->pm_lock);
                   1683: }
                   1684:
                   1685: /*
                   1686:  * void pmap_virtual_space(vaddr_t *start, vaddr_t *end)
                   1687:  *
                   1688:  * Return the start and end addresses of the kernel's virtual space.
                   1689:  * These values are setup in pmap_bootstrap and are updated as pages
                   1690:  * are allocated.
                   1691:  */
                   1692:
                   1693: void
                   1694: pmap_virtual_space(start, end)
                   1695:        vaddr_t *start;
                   1696:        vaddr_t *end;
                   1697: {
                   1698:        *start = virtual_start;
                   1699:        *end = virtual_end;
                   1700: }
                   1701:
                   1702:
                   1703: /*
                   1704:  * Activate the address space for the specified process.  If the process
                   1705:  * is the current process, load the new MMU context.
                   1706:  */
                   1707: void
                   1708: pmap_activate(p)
                   1709:        struct proc *p;
                   1710: {
1.15      chris    1711:        struct pmap *pmap = p->p_vmspace->vm_map.pmap;
1.1       matt     1712:        struct pcb *pcb = &p->p_addr->u_pcb;
                   1713:
1.15      chris    1714:        (void) pmap_extract(pmap_kernel(), (vaddr_t)pmap->pm_pdir,
1.1       matt     1715:            (paddr_t *)&pcb->pcb_pagedir);
                   1716:
                   1717:        PDEBUG(0, printf("pmap_activate: p=%p pmap=%p pcb=%p pdir=%p l1=%p\n",
                   1718:            p, pmap, pcb, pmap->pm_pdir, pcb->pcb_pagedir));
                   1719:
                   1720:        if (p == curproc) {
                   1721:                PDEBUG(0, printf("pmap_activate: setting TTB\n"));
                   1722:                setttb((u_int)pcb->pcb_pagedir);
                   1723:        }
                   1724: #if 0
                   1725:        pmap->pm_pdchanged = FALSE;
                   1726: #endif
                   1727: }
                   1728:
                   1729:
                   1730: /*
                   1731:  * Deactivate the address space of the specified process.
                   1732:  */
                   1733: void
                   1734: pmap_deactivate(p)
                   1735:        struct proc *p;
                   1736: {
                   1737: }
                   1738:
1.31      thorpej  1739: /*
                   1740:  * Perform any deferred pmap operations.
                   1741:  */
                   1742: void
                   1743: pmap_update(struct pmap *pmap)
                   1744: {
                   1745:
                   1746:        /*
                   1747:         * We haven't deferred any pmap operations, but we do need to
                   1748:         * make sure TLB/cache operations have completed.
                   1749:         */
                   1750:        cpu_cpwait();
                   1751: }
1.1       matt     1752:
                   1753: /*
                   1754:  * pmap_clean_page()
                   1755:  *
                   1756:  * This is a local function used to work out the best strategy to clean
                   1757:  * a single page referenced by its entry in the PV table. It's used by
                   1758:  * pmap_copy_page, pmap_zero page and maybe some others later on.
                   1759:  *
                   1760:  * Its policy is effectively:
                   1761:  *  o If there are no mappings, we don't bother doing anything with the cache.
                   1762:  *  o If there is one mapping, we clean just that page.
                   1763:  *  o If there are multiple mappings, we clean the entire cache.
                   1764:  *
                   1765:  * So that some functions can be further optimised, it returns 0 if it didn't
                   1766:  * clean the entire cache, or 1 if it did.
                   1767:  *
                   1768:  * XXX One bug in this routine is that if the pv_entry has a single page
                   1769:  * mapped at 0x00000000 a whole cache clean will be performed rather than
                   1770:  * just the 1 page. Since this should not occur in everyday use and if it does
                   1771:  * it will just result in not the most efficient clean for the page.
                   1772:  */
                   1773: static int
1.17      chris    1774: pmap_clean_page(pv, is_src)
1.1       matt     1775:        struct pv_entry *pv;
1.17      chris    1776:        boolean_t is_src;
1.1       matt     1777: {
1.17      chris    1778:        struct pmap *pmap;
                   1779:        struct pv_entry *npv;
1.1       matt     1780:        int cache_needs_cleaning = 0;
                   1781:        vaddr_t page_to_clean = 0;
                   1782:
1.17      chris    1783:        if (pv == NULL)
                   1784:                /* nothing mapped in so nothing to flush */
                   1785:                return (0);
                   1786:
                   1787:        /* Since we flush the cache each time we change curproc, we
                   1788:         * only need to flush the page if it is in the current pmap.
                   1789:         */
                   1790:        if (curproc)
                   1791:                pmap = curproc->p_vmspace->vm_map.pmap;
                   1792:        else
                   1793:                pmap = pmap_kernel();
                   1794:
                   1795:        for (npv = pv; npv; npv = npv->pv_next) {
                   1796:                if (npv->pv_pmap == pmap) {
                   1797:                        /* The page is mapped non-cacheable in
                   1798:                         * this map.  No need to flush the cache.
                   1799:                         */
                   1800:                        if (npv->pv_flags & PT_NC) {
                   1801: #ifdef DIAGNOSTIC
                   1802:                                if (cache_needs_cleaning)
                   1803:                                        panic("pmap_clean_page: "
                   1804:                                                        "cache inconsistency");
                   1805: #endif
                   1806:                                break;
                   1807:                        }
                   1808: #if 0
                   1809:                        /* This doesn't work, because pmap_protect
                   1810:                           doesn't flush changes on pages that it
                   1811:                           has write-protected.  */
1.21      chris    1812:
1.25      rearnsha 1813:                        /* If the page is not writable and this
1.17      chris    1814:                           is the source, then there is no need
                   1815:                           to flush it from the cache.  */
                   1816:                        else if (is_src && ! (npv->pv_flags & PT_Wr))
                   1817:                                continue;
                   1818: #endif
                   1819:                        if (cache_needs_cleaning){
                   1820:                                page_to_clean = 0;
                   1821:                                break;
                   1822:                        }
                   1823:                        else
                   1824:                                page_to_clean = npv->pv_va;
                   1825:                        cache_needs_cleaning = 1;
                   1826:                }
1.1       matt     1827:        }
                   1828:
                   1829:        if (page_to_clean)
1.36      thorpej  1830:                cpu_idcache_wbinv_range(page_to_clean, NBPG);
1.1       matt     1831:        else if (cache_needs_cleaning) {
1.36      thorpej  1832:                cpu_idcache_wbinv_all();
1.1       matt     1833:                return (1);
                   1834:        }
                   1835:        return (0);
                   1836: }
                   1837:
                   1838: /*
1.39      thorpej  1839:  * pmap_find_pv()
                   1840:  *
                   1841:  * This is a local function that finds a PV head for a given physical page.
                   1842:  * This is a common op, and this function removes loads of ifdefs in the code.
                   1843:  */
                   1844: static __inline struct pv_head *
                   1845: pmap_find_pvh(phys)
                   1846:        paddr_t phys;
                   1847: {
                   1848:        int bank, off;
                   1849:        struct pv_head *pvh;
                   1850:
                   1851:        if ((bank = vm_physseg_find(atop(phys), &off)) == -1)
                   1852:                panic("pmap_find_pv: not a real page, phys=%lx\n", phys);
                   1853:        pvh = &vm_physmem[bank].pmseg.pvhead[off];
                   1854:        return (pvh);
                   1855: }
                   1856:
                   1857: /*
1.1       matt     1858:  * pmap_zero_page()
                   1859:  *
                   1860:  * Zero a given physical page by mapping it at a page hook point.
                   1861:  * In doing the zero page op, the page we zero is mapped cachable, as with
                   1862:  * StrongARM accesses to non-cached pages are non-burst making writing
                   1863:  * _any_ bulk data very slow.
                   1864:  */
                   1865: void
                   1866: pmap_zero_page(phys)
1.2       matt     1867:        paddr_t phys;
1.1       matt     1868: {
1.39      thorpej  1869:        struct pv_head *pvh;
1.1       matt     1870:
                   1871:        /* Get an entry for this page, and clean it it. */
1.39      thorpej  1872:        pvh = pmap_find_pvh(phys);
                   1873:        simple_lock(&pvh->pvh_lock);
                   1874:        pmap_clean_page(pvh->pvh_list, FALSE);
                   1875:        simple_unlock(&pvh->pvh_lock);
1.17      chris    1876:
1.1       matt     1877:        /*
                   1878:         * Hook in the page, zero it, and purge the cache for that
                   1879:         * zeroed page. Invalidate the TLB as needed.
                   1880:         */
                   1881:        *page_hook0.pte = L2_PTE(phys & PG_FRAME, AP_KRW);
                   1882:        cpu_tlb_flushD_SE(page_hook0.va);
1.32      thorpej  1883:        cpu_cpwait();
1.1       matt     1884:        bzero_page(page_hook0.va);
1.36      thorpej  1885:        cpu_dcache_wbinv_range(page_hook0.va, NBPG);
1.1       matt     1886: }
                   1887:
1.17      chris    1888: /* pmap_pageidlezero()
                   1889:  *
                   1890:  * The same as above, except that we assume that the page is not
                   1891:  * mapped.  This means we never have to flush the cache first.  Called
                   1892:  * from the idle loop.
                   1893:  */
                   1894: boolean_t
                   1895: pmap_pageidlezero(phys)
                   1896:     paddr_t phys;
                   1897: {
                   1898:        int i, *ptr;
                   1899:        boolean_t rv = TRUE;
                   1900:
                   1901: #ifdef DIAGNOSTIC
1.39      thorpej  1902:        struct pv_head *pvh;
1.17      chris    1903:
1.39      thorpej  1904:        pvh = pmap_find_pvh(phys);
                   1905:        if (pvh->pvh_list != NULL)
1.17      chris    1906:                panic("pmap_pageidlezero: zeroing mapped page\n");
                   1907: #endif
                   1908:
                   1909:        /*
                   1910:         * Hook in the page, zero it, and purge the cache for that
                   1911:         * zeroed page. Invalidate the TLB as needed.
                   1912:         */
                   1913:        *page_hook0.pte = L2_PTE(phys & PG_FRAME, AP_KRW);
                   1914:        cpu_tlb_flushD_SE(page_hook0.va);
1.32      thorpej  1915:        cpu_cpwait();
                   1916:
1.17      chris    1917:        for (i = 0, ptr = (int *)page_hook0.va;
                   1918:                        i < (NBPG / sizeof(int)); i++) {
                   1919:                if (sched_whichqs != 0) {
                   1920:                        /*
                   1921:                         * A process has become ready.  Abort now,
                   1922:                         * so we don't keep it waiting while we
                   1923:                         * do slow memory access to finish this
                   1924:                         * page.
                   1925:                         */
                   1926:                        rv = FALSE;
                   1927:                        break;
                   1928:                }
                   1929:                *ptr++ = 0;
                   1930:        }
                   1931:
                   1932:        if (rv)
                   1933:                /*
                   1934:                 * if we aborted we'll rezero this page again later so don't
                   1935:                 * purge it unless we finished it
                   1936:                 */
1.36      thorpej  1937:                cpu_dcache_wbinv_range(page_hook0.va, NBPG);
1.17      chris    1938:        return (rv);
                   1939: }
                   1940:
1.1       matt     1941: /*
                   1942:  * pmap_copy_page()
                   1943:  *
                   1944:  * Copy one physical page into another, by mapping the pages into
                   1945:  * hook points. The same comment regarding cachability as in
                   1946:  * pmap_zero_page also applies here.
                   1947:  */
                   1948: void
                   1949: pmap_copy_page(src, dest)
1.2       matt     1950:        paddr_t src;
                   1951:        paddr_t dest;
1.1       matt     1952: {
1.39      thorpej  1953:        struct pv_head *src_pvh, *dest_pvh;
1.20      chris    1954:        boolean_t cleanedcache;
1.1       matt     1955:
                   1956:        /* Get PV entries for the pages, and clean them if needed. */
1.39      thorpej  1957:        src_pvh = pmap_find_pvh(src);
1.20      chris    1958:
1.39      thorpej  1959:        simple_lock(&src_pvh->pvh_lock);
                   1960:        cleanedcache = pmap_clean_page(src_pvh->pvh_list, TRUE);
                   1961:        simple_unlock(&src_pvh->pvh_lock);
1.1       matt     1962:
1.20      chris    1963:        if (cleanedcache == 0) {
1.39      thorpej  1964:                dest_pvh = pmap_find_pvh(dest);
                   1965:                simple_lock(&dest_pvh->pvh_lock);
                   1966:                pmap_clean_page(dest_pvh->pvh_list, FALSE);
                   1967:                simple_unlock(&dest_pvh->pvh_lock);
1.20      chris    1968:        }
1.1       matt     1969:        /*
                   1970:         * Map the pages into the page hook points, copy them, and purge
                   1971:         * the cache for the appropriate page. Invalidate the TLB
                   1972:         * as required.
                   1973:         */
                   1974:        *page_hook0.pte = L2_PTE(src & PG_FRAME, AP_KRW);
                   1975:        *page_hook1.pte = L2_PTE(dest & PG_FRAME, AP_KRW);
                   1976:        cpu_tlb_flushD_SE(page_hook0.va);
                   1977:        cpu_tlb_flushD_SE(page_hook1.va);
1.32      thorpej  1978:        cpu_cpwait();
1.1       matt     1979:        bcopy_page(page_hook0.va, page_hook1.va);
1.36      thorpej  1980:        cpu_dcache_wbinv_range(page_hook0.va, NBPG);
                   1981:        cpu_dcache_wbinv_range(page_hook1.va, NBPG);
1.1       matt     1982: }
                   1983:
                   1984: #if 0
                   1985: void
                   1986: pmap_pte_addref(pmap, va)
1.15      chris    1987:        struct pmap *pmap;
1.1       matt     1988:        vaddr_t va;
                   1989: {
                   1990:        pd_entry_t *pde;
1.2       matt     1991:        paddr_t pa;
1.1       matt     1992:        struct vm_page *m;
                   1993:
                   1994:        if (pmap == pmap_kernel())
                   1995:                return;
                   1996:
                   1997:        pde = pmap_pde(pmap, va & ~(3 << PDSHIFT));
                   1998:        pa = pmap_pte_pa(pde);
                   1999:        m = PHYS_TO_VM_PAGE(pa);
                   2000:        ++m->wire_count;
                   2001: #ifdef MYCROFT_HACK
                   2002:        printf("addref pmap=%p va=%08lx pde=%p pa=%08lx m=%p wire=%d\n",
                   2003:            pmap, va, pde, pa, m, m->wire_count);
                   2004: #endif
                   2005: }
                   2006:
                   2007: void
                   2008: pmap_pte_delref(pmap, va)
1.15      chris    2009:        struct pmap *pmap;
1.1       matt     2010:        vaddr_t va;
                   2011: {
                   2012:        pd_entry_t *pde;
1.2       matt     2013:        paddr_t pa;
1.1       matt     2014:        struct vm_page *m;
                   2015:
                   2016:        if (pmap == pmap_kernel())
                   2017:                return;
                   2018:
                   2019:        pde = pmap_pde(pmap, va & ~(3 << PDSHIFT));
                   2020:        pa = pmap_pte_pa(pde);
                   2021:        m = PHYS_TO_VM_PAGE(pa);
                   2022:        --m->wire_count;
                   2023: #ifdef MYCROFT_HACK
                   2024:        printf("delref pmap=%p va=%08lx pde=%p pa=%08lx m=%p wire=%d\n",
                   2025:            pmap, va, pde, pa, m, m->wire_count);
                   2026: #endif
                   2027:        if (m->wire_count == 0) {
                   2028: #ifdef MYCROFT_HACK
                   2029:                printf("delref pmap=%p va=%08lx pde=%p pa=%08lx m=%p\n",
                   2030:                    pmap, va, pde, pa, m);
                   2031: #endif
                   2032:                pmap_unmap_in_l1(pmap, va);
                   2033:                uvm_pagefree(m);
                   2034:                --pmap->pm_stats.resident_count;
                   2035:        }
                   2036: }
                   2037: #else
                   2038: #define        pmap_pte_addref(pmap, va)
                   2039: #define        pmap_pte_delref(pmap, va)
                   2040: #endif
                   2041:
                   2042: /*
                   2043:  * Since we have a virtually indexed cache, we may need to inhibit caching if
                   2044:  * there is more than one mapping and at least one of them is writable.
                   2045:  * Since we purge the cache on every context switch, we only need to check for
                   2046:  * other mappings within the same pmap, or kernel_pmap.
                   2047:  * This function is also called when a page is unmapped, to possibly reenable
                   2048:  * caching on any remaining mappings.
1.28      rearnsha 2049:  *
                   2050:  * The code implements the following logic, where:
                   2051:  *
                   2052:  * KW = # of kernel read/write pages
                   2053:  * KR = # of kernel read only pages
                   2054:  * UW = # of user read/write pages
                   2055:  * UR = # of user read only pages
                   2056:  * OW = # of user read/write pages in another pmap, then
                   2057:  *
                   2058:  * KC = kernel mapping is cacheable
                   2059:  * UC = user mapping is cacheable
                   2060:  *
                   2061:  *                     KW=0,KR=0  KW=0,KR>0  KW=1,KR=0  KW>1,KR>=0
                   2062:  *                   +---------------------------------------------
                   2063:  * UW=0,UR=0,OW=0    | ---        KC=1       KC=1       KC=0
                   2064:  * UW=0,UR>0,OW=0    | UC=1       KC=1,UC=1  KC=0,UC=0  KC=0,UC=0
                   2065:  * UW=0,UR>0,OW>0    | UC=1       KC=0,UC=1  KC=0,UC=0  KC=0,UC=0
                   2066:  * UW=1,UR=0,OW=0    | UC=1       KC=0,UC=0  KC=0,UC=0  KC=0,UC=0
                   2067:  * UW>1,UR>=0,OW>=0  | UC=0       KC=0,UC=0  KC=0,UC=0  KC=0,UC=0
1.11      chris    2068:  *
                   2069:  * Note that the pmap must have it's ptes mapped in, and passed with ptes.
1.1       matt     2070:  */
1.25      rearnsha 2071: __inline static void
1.39      thorpej  2072: pmap_vac_me_harder(struct pmap *pmap, struct pv_head *pvh, pt_entry_t *ptes,
1.12      chris    2073:        boolean_t clear_cache)
1.1       matt     2074: {
1.25      rearnsha 2075:        if (pmap == pmap_kernel())
1.39      thorpej  2076:                pmap_vac_me_kpmap(pmap, pvh, ptes, clear_cache);
1.25      rearnsha 2077:        else
1.39      thorpej  2078:                pmap_vac_me_user(pmap, pvh, ptes, clear_cache);
1.25      rearnsha 2079: }
                   2080:
                   2081: static void
1.39      thorpej  2082: pmap_vac_me_kpmap(struct pmap *pmap, struct pv_head *pvh, pt_entry_t *ptes,
1.25      rearnsha 2083:        boolean_t clear_cache)
                   2084: {
                   2085:        int user_entries = 0;
                   2086:        int user_writable = 0;
                   2087:        int user_cacheable = 0;
                   2088:        int kernel_entries = 0;
                   2089:        int kernel_writable = 0;
                   2090:        int kernel_cacheable = 0;
                   2091:        struct pv_entry *pv;
                   2092:        struct pmap *last_pmap = pmap;
                   2093:
                   2094: #ifdef DIAGNOSTIC
                   2095:        if (pmap != pmap_kernel())
                   2096:                panic("pmap_vac_me_kpmap: pmap != pmap_kernel()");
                   2097: #endif
                   2098:
                   2099:        /*
                   2100:         * Pass one, see if there are both kernel and user pmaps for
                   2101:         * this page.  Calculate whether there are user-writable or
                   2102:         * kernel-writable pages.
                   2103:         */
1.39      thorpej  2104:        for (pv = pvh->pvh_list; pv != NULL; pv = pv->pv_next) {
1.25      rearnsha 2105:                if (pv->pv_pmap != pmap) {
                   2106:                        user_entries++;
                   2107:                        if (pv->pv_flags & PT_Wr)
                   2108:                                user_writable++;
                   2109:                        if ((pv->pv_flags & PT_NC) == 0)
                   2110:                                user_cacheable++;
                   2111:                } else {
                   2112:                        kernel_entries++;
                   2113:                        if (pv->pv_flags & PT_Wr)
                   2114:                                kernel_writable++;
                   2115:                        if ((pv->pv_flags & PT_NC) == 0)
                   2116:                                kernel_cacheable++;
                   2117:                }
                   2118:        }
                   2119:
                   2120:        /*
                   2121:         * We know we have just been updating a kernel entry, so if
                   2122:         * all user pages are already cacheable, then there is nothing
                   2123:         * further to do.
                   2124:         */
                   2125:        if (kernel_entries == 0 &&
                   2126:            user_cacheable == user_entries)
                   2127:                return;
                   2128:
                   2129:        if (user_entries) {
                   2130:                /*
                   2131:                 * Scan over the list again, for each entry, if it
                   2132:                 * might not be set correctly, call pmap_vac_me_user
                   2133:                 * to recalculate the settings.
                   2134:                 */
1.39      thorpej  2135:                for (pv = pvh->pvh_list; pv; pv = pv->pv_next) {
1.25      rearnsha 2136:                        /*
                   2137:                         * We know kernel mappings will get set
                   2138:                         * correctly in other calls.  We also know
                   2139:                         * that if the pmap is the same as last_pmap
                   2140:                         * then we've just handled this entry.
                   2141:                         */
                   2142:                        if (pv->pv_pmap == pmap || pv->pv_pmap == last_pmap)
                   2143:                                continue;
                   2144:                        /*
                   2145:                         * If there are kernel entries and this page
                   2146:                         * is writable but non-cacheable, then we can
                   2147:                         * skip this entry also.
                   2148:                         */
                   2149:                        if (kernel_entries > 0 &&
                   2150:                            (pv->pv_flags & (PT_NC | PT_Wr)) ==
                   2151:                            (PT_NC | PT_Wr))
                   2152:                                continue;
                   2153:                        /*
                   2154:                         * Similarly if there are no kernel-writable
                   2155:                         * entries and the page is already
                   2156:                         * read-only/cacheable.
                   2157:                         */
                   2158:                        if (kernel_writable == 0 &&
                   2159:                            (pv->pv_flags & (PT_NC | PT_Wr)) == 0)
                   2160:                                continue;
                   2161:                        /*
                   2162:                         * For some of the remaining cases, we know
                   2163:                         * that we must recalculate, but for others we
                   2164:                         * can't tell if they are correct or not, so
                   2165:                         * we recalculate anyway.
                   2166:                         */
                   2167:                        pmap_unmap_ptes(last_pmap);
                   2168:                        last_pmap = pv->pv_pmap;
                   2169:                        ptes = pmap_map_ptes(last_pmap);
1.39      thorpej  2170:                        pmap_vac_me_user(last_pmap, pvh, ptes,
1.25      rearnsha 2171:                            pmap_is_curpmap(last_pmap));
                   2172:                }
                   2173:                /* Restore the pte mapping that was passed to us.  */
                   2174:                if (last_pmap != pmap) {
                   2175:                        pmap_unmap_ptes(last_pmap);
                   2176:                        ptes = pmap_map_ptes(pmap);
                   2177:                }
                   2178:                if (kernel_entries == 0)
                   2179:                        return;
                   2180:        }
                   2181:
1.39      thorpej  2182:        pmap_vac_me_user(pmap, pvh, ptes, clear_cache);
1.25      rearnsha 2183:        return;
                   2184: }
                   2185:
                   2186: static void
1.39      thorpej  2187: pmap_vac_me_user(struct pmap *pmap, struct pv_head *pvh, pt_entry_t *ptes,
1.25      rearnsha 2188:        boolean_t clear_cache)
                   2189: {
                   2190:        struct pmap *kpmap = pmap_kernel();
1.17      chris    2191:        struct pv_entry *pv, *npv;
1.1       matt     2192:        int entries = 0;
1.25      rearnsha 2193:        int writable = 0;
1.12      chris    2194:        int cacheable_entries = 0;
1.25      rearnsha 2195:        int kern_cacheable = 0;
                   2196:        int other_writable = 0;
1.1       matt     2197:
1.39      thorpej  2198:        pv = pvh->pvh_list;
1.11      chris    2199:        KASSERT(ptes != NULL);
1.1       matt     2200:
                   2201:        /*
                   2202:         * Count mappings and writable mappings in this pmap.
1.25      rearnsha 2203:         * Include kernel mappings as part of our own.
1.1       matt     2204:         * Keep a pointer to the first one.
                   2205:         */
                   2206:        for (npv = pv; npv; npv = npv->pv_next) {
                   2207:                /* Count mappings in the same pmap */
1.25      rearnsha 2208:                if (pmap == npv->pv_pmap ||
                   2209:                    kpmap == npv->pv_pmap) {
1.1       matt     2210:                        if (entries++ == 0)
                   2211:                                pv = npv;
1.12      chris    2212:                        /* Cacheable mappings */
1.25      rearnsha 2213:                        if ((npv->pv_flags & PT_NC) == 0) {
1.12      chris    2214:                                cacheable_entries++;
1.25      rearnsha 2215:                                if (kpmap == npv->pv_pmap)
                   2216:                                        kern_cacheable++;
                   2217:                        }
                   2218:                        /* Writable mappings */
1.1       matt     2219:                        if (npv->pv_flags & PT_Wr)
1.25      rearnsha 2220:                                ++writable;
                   2221:                } else if (npv->pv_flags & PT_Wr)
                   2222:                        other_writable = 1;
1.1       matt     2223:        }
                   2224:
1.12      chris    2225:        PDEBUG(3,printf("pmap_vac_me_harder: pmap %p Entries %d, "
1.25      rearnsha 2226:                "writable %d cacheable %d %s\n", pmap, entries, writable,
1.12      chris    2227:                cacheable_entries, clear_cache ? "clean" : "no clean"));
                   2228:
1.1       matt     2229:        /*
                   2230:         * Enable or disable caching as necessary.
1.25      rearnsha 2231:         * Note: the first entry might be part of the kernel pmap,
                   2232:         * so we can't assume this is indicative of the state of the
                   2233:         * other (maybe non-kpmap) entries.
1.1       matt     2234:         */
1.25      rearnsha 2235:        if ((entries > 1 && writable) ||
                   2236:            (entries > 0 && pmap == kpmap && other_writable)) {
1.12      chris    2237:                if (cacheable_entries == 0)
                   2238:                    return;
1.25      rearnsha 2239:                for (npv = pv; npv; npv = npv->pv_next) {
                   2240:                        if ((pmap == npv->pv_pmap
                   2241:                            || kpmap == npv->pv_pmap) &&
1.12      chris    2242:                            (npv->pv_flags & PT_NC) == 0) {
                   2243:                                ptes[arm_byte_to_page(npv->pv_va)] &=
1.11      chris    2244:                                    ~(PT_C | PT_B);
1.12      chris    2245:                                npv->pv_flags |= PT_NC;
1.25      rearnsha 2246:                                /*
                   2247:                                 * If this page needs flushing from the
                   2248:                                 * cache, and we aren't going to do it
                   2249:                                 * below, do it now.
                   2250:                                 */
                   2251:                                if ((cacheable_entries < 4 &&
                   2252:                                    (clear_cache || npv->pv_pmap == kpmap)) ||
                   2253:                                    (npv->pv_pmap == kpmap &&
                   2254:                                    !clear_cache && kern_cacheable < 4)) {
1.36      thorpej  2255:                                        cpu_idcache_wbinv_range(npv->pv_va,
1.12      chris    2256:                                            NBPG);
                   2257:                                        cpu_tlb_flushID_SE(npv->pv_va);
                   2258:                                }
1.1       matt     2259:                        }
                   2260:                }
1.25      rearnsha 2261:                if ((clear_cache && cacheable_entries >= 4) ||
                   2262:                    kern_cacheable >= 4) {
1.36      thorpej  2263:                        cpu_idcache_wbinv_all();
1.12      chris    2264:                        cpu_tlb_flushID();
                   2265:                }
1.32      thorpej  2266:                cpu_cpwait();
1.1       matt     2267:        } else if (entries > 0) {
1.25      rearnsha 2268:                /*
                   2269:                 * Turn cacheing back on for some pages.  If it is a kernel
                   2270:                 * page, only do so if there are no other writable pages.
                   2271:                 */
                   2272:                for (npv = pv; npv; npv = npv->pv_next) {
                   2273:                        if ((pmap == npv->pv_pmap ||
                   2274:                            (kpmap == npv->pv_pmap && other_writable == 0)) &&
                   2275:                            (npv->pv_flags & PT_NC)) {
1.11      chris    2276:                                ptes[arm_byte_to_page(npv->pv_va)] |=
1.27      rearnsha 2277:                                    pte_cache_mode;
1.12      chris    2278:                                npv->pv_flags &= ~PT_NC;
1.1       matt     2279:                        }
                   2280:                }
                   2281:        }
                   2282: }
                   2283:
                   2284: /*
                   2285:  * pmap_remove()
                   2286:  *
                   2287:  * pmap_remove is responsible for nuking a number of mappings for a range
                   2288:  * of virtual address space in the current pmap. To do this efficiently
                   2289:  * is interesting, because in a number of cases a wide virtual address
                   2290:  * range may be supplied that contains few actual mappings. So, the
                   2291:  * optimisations are:
                   2292:  *  1. Try and skip over hunks of address space for which an L1 entry
                   2293:  *     does not exist.
                   2294:  *  2. Build up a list of pages we've hit, up to a maximum, so we can
                   2295:  *     maybe do just a partial cache clean. This path of execution is
                   2296:  *     complicated by the fact that the cache must be flushed _before_
                   2297:  *     the PTE is nuked, being a VAC :-)
                   2298:  *  3. Maybe later fast-case a single page, but I don't think this is
                   2299:  *     going to make _that_ much difference overall.
                   2300:  */
                   2301:
                   2302: #define PMAP_REMOVE_CLEAN_LIST_SIZE    3
                   2303:
                   2304: void
                   2305: pmap_remove(pmap, sva, eva)
1.15      chris    2306:        struct pmap *pmap;
1.1       matt     2307:        vaddr_t sva;
                   2308:        vaddr_t eva;
                   2309: {
                   2310:        int cleanlist_idx = 0;
                   2311:        struct pagelist {
                   2312:                vaddr_t va;
                   2313:                pt_entry_t *pte;
                   2314:        } cleanlist[PMAP_REMOVE_CLEAN_LIST_SIZE];
1.11      chris    2315:        pt_entry_t *pte = 0, *ptes;
1.2       matt     2316:        paddr_t pa;
1.1       matt     2317:        int pmap_active;
1.39      thorpej  2318:        struct pv_head *pvh;
1.1       matt     2319:
                   2320:        /* Exit quick if there is no pmap */
                   2321:        if (!pmap)
                   2322:                return;
                   2323:
                   2324:        PDEBUG(0, printf("pmap_remove: pmap=%p sva=%08lx eva=%08lx\n", pmap, sva, eva));
                   2325:
                   2326:        sva &= PG_FRAME;
                   2327:        eva &= PG_FRAME;
                   2328:
1.17      chris    2329:        /*
1.39      thorpej  2330:         * we lock in the pmap => pv_head direction
1.17      chris    2331:         */
                   2332:        PMAP_MAP_TO_HEAD_LOCK();
                   2333:
1.11      chris    2334:        ptes = pmap_map_ptes(pmap);
1.1       matt     2335:        /* Get a page table pointer */
                   2336:        while (sva < eva) {
1.30      rearnsha 2337:                if (pmap_pde_page(pmap_pde(pmap, sva)))
1.1       matt     2338:                        break;
                   2339:                sva = (sva & PD_MASK) + NBPD;
                   2340:        }
1.11      chris    2341:
                   2342:        pte = &ptes[arm_byte_to_page(sva)];
1.1       matt     2343:        /* Note if the pmap is active thus require cache and tlb cleans */
                   2344:        if ((curproc && curproc->p_vmspace->vm_map.pmap == pmap)
1.15      chris    2345:            || (pmap == pmap_kernel()))
1.1       matt     2346:                pmap_active = 1;
                   2347:        else
                   2348:                pmap_active = 0;
                   2349:
                   2350:        /* Now loop along */
                   2351:        while (sva < eva) {
                   2352:                /* Check if we can move to the next PDE (l1 chunk) */
                   2353:                if (!(sva & PT_MASK))
1.30      rearnsha 2354:                        if (!pmap_pde_page(pmap_pde(pmap, sva))) {
1.1       matt     2355:                                sva += NBPD;
                   2356:                                pte += arm_byte_to_page(NBPD);
                   2357:                                continue;
                   2358:                        }
                   2359:
                   2360:                /* We've found a valid PTE, so this page of PTEs has to go. */
                   2361:                if (pmap_pte_v(pte)) {
1.39      thorpej  2362:                        int bank, off;
                   2363:
1.1       matt     2364:                        /* Update statistics */
                   2365:                        --pmap->pm_stats.resident_count;
                   2366:
                   2367:                        /*
                   2368:                         * Add this page to our cache remove list, if we can.
                   2369:                         * If, however the cache remove list is totally full,
                   2370:                         * then do a complete cache invalidation taking note
                   2371:                         * to backtrack the PTE table beforehand, and ignore
                   2372:                         * the lists in future because there's no longer any
                   2373:                         * point in bothering with them (we've paid the
                   2374:                         * penalty, so will carry on unhindered). Otherwise,
                   2375:                         * when we fall out, we just clean the list.
                   2376:                         */
                   2377:                        PDEBUG(10, printf("remove: inv pte at %p(%x) ", pte, *pte));
                   2378:                        pa = pmap_pte_pa(pte);
                   2379:
                   2380:                        if (cleanlist_idx < PMAP_REMOVE_CLEAN_LIST_SIZE) {
                   2381:                                /* Add to the clean list. */
                   2382:                                cleanlist[cleanlist_idx].pte = pte;
                   2383:                                cleanlist[cleanlist_idx].va = sva;
                   2384:                                cleanlist_idx++;
                   2385:                        } else if (cleanlist_idx == PMAP_REMOVE_CLEAN_LIST_SIZE) {
                   2386:                                int cnt;
                   2387:
                   2388:                                /* Nuke everything if needed. */
                   2389:                                if (pmap_active) {
1.36      thorpej  2390:                                        cpu_idcache_wbinv_all();
1.1       matt     2391:                                        cpu_tlb_flushID();
                   2392:                                }
                   2393:
                   2394:                                /*
                   2395:                                 * Roll back the previous PTE list,
                   2396:                                 * and zero out the current PTE.
                   2397:                                 */
                   2398:                                for (cnt = 0; cnt < PMAP_REMOVE_CLEAN_LIST_SIZE; cnt++) {
                   2399:                                        *cleanlist[cnt].pte = 0;
                   2400:                                        pmap_pte_delref(pmap, cleanlist[cnt].va);
                   2401:                                }
                   2402:                                *pte = 0;
                   2403:                                pmap_pte_delref(pmap, sva);
                   2404:                                cleanlist_idx++;
                   2405:                        } else {
                   2406:                                /*
                   2407:                                 * We've already nuked the cache and
                   2408:                                 * TLB, so just carry on regardless,
                   2409:                                 * and we won't need to do it again
                   2410:                                 */
                   2411:                                *pte = 0;
                   2412:                                pmap_pte_delref(pmap, sva);
                   2413:                        }
                   2414:
                   2415:                        /*
                   2416:                         * Update flags. In a number of circumstances,
                   2417:                         * we could cluster a lot of these and do a
                   2418:                         * number of sequential pages in one go.
                   2419:                         */
1.39      thorpej  2420:                        if ((bank = vm_physseg_find(atop(pa), &off)) != -1) {
1.17      chris    2421:                                struct pv_entry *pve;
1.39      thorpej  2422:                                pvh = &vm_physmem[bank].pmseg.pvhead[off];
                   2423:                                simple_lock(&pvh->pvh_lock);
                   2424:                                pve = pmap_remove_pv(pvh, pmap, sva);
1.17      chris    2425:                                pmap_free_pv(pmap, pve);
1.39      thorpej  2426:                                pmap_vac_me_harder(pmap, pvh, ptes, FALSE);
                   2427:                                simple_unlock(&pvh->pvh_lock);
1.1       matt     2428:                        }
                   2429:                }
                   2430:                sva += NBPG;
                   2431:                pte++;
                   2432:        }
                   2433:
1.11      chris    2434:        pmap_unmap_ptes(pmap);
1.1       matt     2435:        /*
                   2436:         * Now, if we've fallen through down to here, chances are that there
                   2437:         * are less than PMAP_REMOVE_CLEAN_LIST_SIZE mappings left.
                   2438:         */
                   2439:        if (cleanlist_idx <= PMAP_REMOVE_CLEAN_LIST_SIZE) {
                   2440:                u_int cnt;
                   2441:
                   2442:                for (cnt = 0; cnt < cleanlist_idx; cnt++) {
                   2443:                        if (pmap_active) {
1.36      thorpej  2444:                                cpu_idcache_wbinv_range(cleanlist[cnt].va,
                   2445:                                    NBPG);
1.1       matt     2446:                                *cleanlist[cnt].pte = 0;
                   2447:                                cpu_tlb_flushID_SE(cleanlist[cnt].va);
                   2448:                        } else
                   2449:                                *cleanlist[cnt].pte = 0;
                   2450:                        pmap_pte_delref(pmap, cleanlist[cnt].va);
                   2451:                }
                   2452:        }
1.17      chris    2453:        PMAP_MAP_TO_HEAD_UNLOCK();
1.1       matt     2454: }
                   2455:
                   2456: /*
                   2457:  * Routine:    pmap_remove_all
                   2458:  * Function:
                   2459:  *             Removes this physical page from
                   2460:  *             all physical maps in which it resides.
                   2461:  *             Reflects back modify bits to the pager.
                   2462:  */
                   2463:
1.33      chris    2464: static void
1.39      thorpej  2465: pmap_remove_all(pa)
                   2466:        paddr_t pa;
1.1       matt     2467: {
1.17      chris    2468:        struct pv_entry *pv, *npv;
1.39      thorpej  2469:        struct pv_head *pvh;
1.15      chris    2470:        struct pmap *pmap;
1.11      chris    2471:        pt_entry_t *pte, *ptes;
1.1       matt     2472:
1.39      thorpej  2473:        PDEBUG(0, printf("pmap_remove_all: pa=%lx ", pa));
1.1       matt     2474:
1.39      thorpej  2475:        /* set pv_head => pmap locking */
1.17      chris    2476:        PMAP_HEAD_TO_MAP_LOCK();
1.1       matt     2477:
1.39      thorpej  2478:        pvh = pmap_find_pvh(pa);
                   2479:        simple_lock(&pvh->pvh_lock);
1.17      chris    2480:
1.39      thorpej  2481:        pv = pvh->pvh_list;
                   2482:        if (pv == NULL)
                   2483:        {
                   2484:            PDEBUG(0, printf("free page\n"));
                   2485:            simple_unlock(&pvh->pvh_lock);
                   2486:            PMAP_HEAD_TO_MAP_UNLOCK();
                   2487:            return;
1.1       matt     2488:        }
1.17      chris    2489:        pmap_clean_page(pv, FALSE);
1.1       matt     2490:
                   2491:        while (pv) {
                   2492:                pmap = pv->pv_pmap;
1.11      chris    2493:                ptes = pmap_map_ptes(pmap);
                   2494:                pte = &ptes[arm_byte_to_page(pv->pv_va)];
1.1       matt     2495:
                   2496:                PDEBUG(0, printf("[%p,%08x,%08lx,%08x] ", pmap, *pte,
                   2497:                    pv->pv_va, pv->pv_flags));
                   2498: #ifdef DEBUG
1.32      thorpej  2499:                if (!pmap_pde_page(pmap_pde(pmap, pv->pv_va)) ||
1.30      rearnsha 2500:                    !pmap_pte_v(pte) || pmap_pte_pa(pte) != pa)
1.1       matt     2501:                        panic("pmap_remove_all: bad mapping");
                   2502: #endif /* DEBUG */
                   2503:
                   2504:                /*
                   2505:                 * Update statistics
                   2506:                 */
                   2507:                --pmap->pm_stats.resident_count;
                   2508:
                   2509:                /* Wired bit */
                   2510:                if (pv->pv_flags & PT_W)
                   2511:                        --pmap->pm_stats.wired_count;
                   2512:
                   2513:                /*
                   2514:                 * Invalidate the PTEs.
                   2515:                 * XXX: should cluster them up and invalidate as many
                   2516:                 * as possible at once.
                   2517:                 */
                   2518:
                   2519: #ifdef needednotdone
                   2520: reduce wiring count on page table pages as references drop
                   2521: #endif
                   2522:
                   2523:                *pte = 0;
                   2524:                pmap_pte_delref(pmap, pv->pv_va);
                   2525:
                   2526:                npv = pv->pv_next;
1.17      chris    2527:                pmap_free_pv(pmap, pv);
1.1       matt     2528:                pv = npv;
1.11      chris    2529:                pmap_unmap_ptes(pmap);
1.1       matt     2530:        }
1.39      thorpej  2531:        pvh->pvh_list = NULL;
                   2532:        simple_unlock(&pvh->pvh_lock);
1.17      chris    2533:        PMAP_HEAD_TO_MAP_UNLOCK();
1.1       matt     2534:
                   2535:        PDEBUG(0, printf("done\n"));
                   2536:        cpu_tlb_flushID();
1.32      thorpej  2537:        cpu_cpwait();
1.1       matt     2538: }
                   2539:
                   2540:
                   2541: /*
                   2542:  * Set the physical protection on the specified range of this map as requested.
                   2543:  */
                   2544:
                   2545: void
                   2546: pmap_protect(pmap, sva, eva, prot)
1.15      chris    2547:        struct pmap *pmap;
1.1       matt     2548:        vaddr_t sva;
                   2549:        vaddr_t eva;
                   2550:        vm_prot_t prot;
                   2551: {
1.11      chris    2552:        pt_entry_t *pte = NULL, *ptes;
1.1       matt     2553:        int armprot;
                   2554:        int flush = 0;
1.2       matt     2555:        paddr_t pa;
1.39      thorpej  2556:        int bank, off;
                   2557:        struct pv_head *pvh;
1.1       matt     2558:
                   2559:        PDEBUG(0, printf("pmap_protect: pmap=%p %08lx->%08lx %x\n",
                   2560:            pmap, sva, eva, prot));
                   2561:
                   2562:        if (~prot & VM_PROT_READ) {
                   2563:                /* Just remove the mappings. */
                   2564:                pmap_remove(pmap, sva, eva);
1.33      chris    2565:                /* pmap_update not needed as it should be called by the caller
                   2566:                 * of pmap_protect */
1.1       matt     2567:                return;
                   2568:        }
                   2569:        if (prot & VM_PROT_WRITE) {
                   2570:                /*
                   2571:                 * If this is a read->write transition, just ignore it and let
                   2572:                 * uvm_fault() take care of it later.
                   2573:                 */
                   2574:                return;
                   2575:        }
                   2576:
                   2577:        sva &= PG_FRAME;
                   2578:        eva &= PG_FRAME;
                   2579:
1.17      chris    2580:        /* Need to lock map->head */
                   2581:        PMAP_MAP_TO_HEAD_LOCK();
                   2582:
1.11      chris    2583:        ptes = pmap_map_ptes(pmap);
1.1       matt     2584:        /*
                   2585:         * We need to acquire a pointer to a page table page before entering
                   2586:         * the following loop.
                   2587:         */
                   2588:        while (sva < eva) {
1.30      rearnsha 2589:                if (pmap_pde_page(pmap_pde(pmap, sva)))
1.1       matt     2590:                        break;
                   2591:                sva = (sva & PD_MASK) + NBPD;
                   2592:        }
1.11      chris    2593:
                   2594:        pte = &ptes[arm_byte_to_page(sva)];
1.17      chris    2595:
1.1       matt     2596:        while (sva < eva) {
                   2597:                /* only check once in a while */
                   2598:                if ((sva & PT_MASK) == 0) {
1.30      rearnsha 2599:                        if (!pmap_pde_page(pmap_pde(pmap, sva))) {
1.1       matt     2600:                                /* We can race ahead here, to the next pde. */
                   2601:                                sva += NBPD;
                   2602:                                pte += arm_byte_to_page(NBPD);
                   2603:                                continue;
                   2604:                        }
                   2605:                }
                   2606:
                   2607:                if (!pmap_pte_v(pte))
                   2608:                        goto next;
                   2609:
                   2610:                flush = 1;
                   2611:
                   2612:                armprot = 0;
                   2613:                if (sva < VM_MAXUSER_ADDRESS)
                   2614:                        armprot |= PT_AP(AP_U);
                   2615:                else if (sva < VM_MAX_ADDRESS)
                   2616:                        armprot |= PT_AP(AP_W);  /* XXX Ekk what is this ? */
                   2617:                *pte = (*pte & 0xfffff00f) | armprot;
                   2618:
                   2619:                pa = pmap_pte_pa(pte);
                   2620:
                   2621:                /* Get the physical page index */
                   2622:
                   2623:                /* Clear write flag */
1.39      thorpej  2624:                if ((bank = vm_physseg_find(atop(pa), &off)) != -1) {
                   2625:                        pvh = &vm_physmem[bank].pmseg.pvhead[off];
                   2626:                        simple_lock(&pvh->pvh_lock);
                   2627:                        (void) pmap_modify_pv(pmap, sva, pvh, PT_Wr, 0);
                   2628:                        pmap_vac_me_harder(pmap, pvh, ptes, FALSE);
                   2629:                        simple_unlock(&pvh->pvh_lock);
1.1       matt     2630:                }
                   2631:
                   2632: next:
                   2633:                sva += NBPG;
                   2634:                pte++;
                   2635:        }
1.11      chris    2636:        pmap_unmap_ptes(pmap);
1.17      chris    2637:        PMAP_MAP_TO_HEAD_UNLOCK();
1.1       matt     2638:        if (flush)
                   2639:                cpu_tlb_flushID();
                   2640: }
                   2641:
                   2642: /*
1.15      chris    2643:  * void pmap_enter(struct pmap *pmap, vaddr_t va, paddr_t pa, vm_prot_t prot,
1.1       matt     2644:  * int flags)
                   2645:  *
                   2646:  *      Insert the given physical page (p) at
                   2647:  *      the specified virtual address (v) in the
                   2648:  *      target physical map with the protection requested.
                   2649:  *
                   2650:  *      If specified, the page will be wired down, meaning
                   2651:  *      that the related pte can not be reclaimed.
                   2652:  *
                   2653:  *      NB:  This is the only routine which MAY NOT lazy-evaluate
                   2654:  *      or lose information.  That is, this routine must actually
                   2655:  *      insert this page into the given map NOW.
                   2656:  */
                   2657:
                   2658: int
                   2659: pmap_enter(pmap, va, pa, prot, flags)
1.15      chris    2660:        struct pmap *pmap;
1.1       matt     2661:        vaddr_t va;
1.2       matt     2662:        paddr_t pa;
1.1       matt     2663:        vm_prot_t prot;
                   2664:        int flags;
                   2665: {
1.11      chris    2666:        pt_entry_t *pte, *ptes;
1.1       matt     2667:        u_int npte;
1.39      thorpej  2668:        int bank, off;
1.2       matt     2669:        paddr_t opa;
1.1       matt     2670:        int nflags;
                   2671:        boolean_t wired = (flags & PMAP_WIRED) != 0;
1.17      chris    2672:        struct pv_entry *pve;
1.39      thorpej  2673:        struct pv_head  *pvh;
1.17      chris    2674:        int error;
1.1       matt     2675:
                   2676:        PDEBUG(5, printf("pmap_enter: V%08lx P%08lx in pmap %p prot=%08x, wired = %d\n",
                   2677:            va, pa, pmap, prot, wired));
                   2678:
                   2679: #ifdef DIAGNOSTIC
                   2680:        /* Valid address ? */
                   2681:        if (va >= (KERNEL_VM_BASE + KERNEL_VM_SIZE))
                   2682:                panic("pmap_enter: too big");
                   2683:        if (pmap != pmap_kernel() && va != 0) {
                   2684:                if (va < VM_MIN_ADDRESS || va >= VM_MAXUSER_ADDRESS)
                   2685:                        panic("pmap_enter: kernel page in user map");
                   2686:        } else {
                   2687:                if (va >= VM_MIN_ADDRESS && va < VM_MAXUSER_ADDRESS)
                   2688:                        panic("pmap_enter: user page in kernel map");
                   2689:                if (va >= VM_MAXUSER_ADDRESS && va < VM_MAX_ADDRESS)
                   2690:                        panic("pmap_enter: entering PT page");
                   2691:        }
                   2692: #endif
1.17      chris    2693:        /* get lock */
                   2694:        PMAP_MAP_TO_HEAD_LOCK();
1.1       matt     2695:        /*
                   2696:         * Get a pointer to the pte for this virtual address. If the
                   2697:         * pte pointer is NULL then we are missing the L2 page table
                   2698:         * so we need to create one.
                   2699:         */
1.24      chris    2700:        /* XXX horrible hack to get us working with lockdebug */
                   2701:        simple_lock(&pmap->pm_obj.vmobjlock);
1.1       matt     2702:        pte = pmap_pte(pmap, va);
                   2703:        if (!pte) {
1.17      chris    2704:                struct vm_page *ptp;
                   2705:
                   2706:                /* if failure is allowed then don't try too hard */
                   2707:                ptp = pmap_get_ptp(pmap, va, flags & PMAP_CANFAIL);
                   2708:                if (ptp == NULL) {
                   2709:                        if (flags & PMAP_CANFAIL) {
                   2710:                                error = ENOMEM;
                   2711:                                goto out;
                   2712:                        }
                   2713:                        panic("pmap_enter: get ptp failed");
1.1       matt     2714:                }
1.16      chris    2715:
1.1       matt     2716:                pte = pmap_pte(pmap, va);
                   2717: #ifdef DIAGNOSTIC
                   2718:                if (!pte)
                   2719:                        panic("pmap_enter: no pte");
                   2720: #endif
                   2721:        }
                   2722:
                   2723:        nflags = 0;
                   2724:        if (prot & VM_PROT_WRITE)
                   2725:                nflags |= PT_Wr;
                   2726:        if (wired)
                   2727:                nflags |= PT_W;
                   2728:
                   2729:        /* More debugging info */
                   2730:        PDEBUG(5, printf("pmap_enter: pte for V%08lx = V%p (%08x)\n", va, pte,
                   2731:            *pte));
                   2732:
                   2733:        /* Is the pte valid ? If so then this page is already mapped */
                   2734:        if (pmap_pte_v(pte)) {
                   2735:                /* Get the physical address of the current page mapped */
                   2736:                opa = pmap_pte_pa(pte);
                   2737:
                   2738: #ifdef MYCROFT_HACK
                   2739:                printf("pmap_enter: pmap=%p va=%lx pa=%lx opa=%lx\n", pmap, va, pa, opa);
                   2740: #endif
                   2741:
                   2742:                /* Are we mapping the same page ? */
                   2743:                if (opa == pa) {
                   2744:                        /* All we must be doing is changing the protection */
                   2745:                        PDEBUG(0, printf("Case 02 in pmap_enter (V%08lx P%08lx)\n",
                   2746:                            va, pa));
                   2747:
                   2748:                        /* Has the wiring changed ? */
1.39      thorpej  2749:                        if ((bank = vm_physseg_find(atop(pa), &off)) != -1) {
                   2750:                                pvh = &vm_physmem[bank].pmseg.pvhead[off];
                   2751:                                simple_lock(&pvh->pvh_lock);
                   2752:                                (void) pmap_modify_pv(pmap, va, pvh,
1.1       matt     2753:                                    PT_Wr | PT_W, nflags);
1.39      thorpej  2754:                                simple_unlock(&pvh->pvh_lock);
                   2755:                        } else {
                   2756:                                pvh = NULL;
                   2757:                        }
1.1       matt     2758:                } else {
                   2759:                        /* We are replacing the page with a new one. */
1.36      thorpej  2760:                        cpu_idcache_wbinv_range(va, NBPG);
1.1       matt     2761:
                   2762:                        PDEBUG(0, printf("Case 03 in pmap_enter (V%08lx P%08lx P%08lx)\n",
                   2763:                            va, pa, opa));
                   2764:
                   2765:                        /*
                   2766:                         * If it is part of our managed memory then we
                   2767:                         * must remove it from the PV list
                   2768:                         */
1.39      thorpej  2769:                        if ((bank = vm_physseg_find(atop(opa), &off)) != -1) {
                   2770:                                pvh = &vm_physmem[bank].pmseg.pvhead[off];
                   2771:                                simple_lock(&pvh->pvh_lock);
                   2772:                                pve = pmap_remove_pv(pvh, pmap, va);
                   2773:                                simple_unlock(&pvh->pvh_lock);
1.17      chris    2774:                        } else {
                   2775:                                pve = NULL;
1.1       matt     2776:                        }
                   2777:
                   2778:                        goto enter;
                   2779:                }
                   2780:        } else {
                   2781:                opa = 0;
1.17      chris    2782:                pve = NULL;
1.1       matt     2783:                pmap_pte_addref(pmap, va);
                   2784:
                   2785:                /* pte is not valid so we must be hooking in a new page */
                   2786:                ++pmap->pm_stats.resident_count;
                   2787:
                   2788:        enter:
                   2789:                /*
                   2790:                 * Enter on the PV list if part of our managed memory
                   2791:                 */
1.39      thorpej  2792:                bank = vm_physseg_find(atop(pa), &off);
                   2793:
                   2794:                if (pmap_initialized && (bank != -1)) {
                   2795:                        pvh = &vm_physmem[bank].pmseg.pvhead[off];
1.17      chris    2796:                        if (pve == NULL) {
                   2797:                                pve = pmap_alloc_pv(pmap, ALLOCPV_NEED);
                   2798:                                if (pve == NULL) {
                   2799:                                        if (flags & PMAP_CANFAIL) {
                   2800:                                                error = ENOMEM;
                   2801:                                                goto out;
                   2802:                                        }
                   2803:                                        panic("pmap_enter: no pv entries available");
                   2804:                                }
                   2805:                        }
                   2806:                        /* enter_pv locks pvh when adding */
1.39      thorpej  2807:                        pmap_enter_pv(pvh, pve, pmap, va, NULL, nflags);
1.17      chris    2808:                } else {
1.39      thorpej  2809:                        pvh = NULL;
1.17      chris    2810:                        if (pve != NULL)
                   2811:                                pmap_free_pv(pmap, pve);
1.1       matt     2812:                }
                   2813:        }
                   2814:
                   2815: #ifdef MYCROFT_HACK
                   2816:        if (mycroft_hack)
                   2817:                printf("pmap_enter: pmap=%p va=%lx pa=%lx opa=%lx bank=%d off=%d pv=%p\n", pmap, va, pa, opa, bank, off, pv);
                   2818: #endif
                   2819:
                   2820:        /* Construct the pte, giving the correct access. */
                   2821:        npte = (pa & PG_FRAME);
                   2822:
                   2823:        /* VA 0 is magic. */
                   2824:        if (pmap != pmap_kernel() && va != 0)
                   2825:                npte |= PT_AP(AP_U);
                   2826:
1.39      thorpej  2827:        if (pmap_initialized && bank != -1) {
1.1       matt     2828: #ifdef DIAGNOSTIC
                   2829:                if ((flags & VM_PROT_ALL) & ~prot)
                   2830:                        panic("pmap_enter: access_type exceeds prot");
                   2831: #endif
1.27      rearnsha 2832:                npte |= pte_cache_mode;
1.1       matt     2833:                if (flags & VM_PROT_WRITE) {
                   2834:                        npte |= L2_SPAGE | PT_AP(AP_W);
1.39      thorpej  2835:                        vm_physmem[bank].pmseg.attrs[off] |= PT_H | PT_M;
1.1       matt     2836:                } else if (flags & VM_PROT_ALL) {
                   2837:                        npte |= L2_SPAGE;
1.39      thorpej  2838:                        vm_physmem[bank].pmseg.attrs[off] |= PT_H;
1.1       matt     2839:                } else
                   2840:                        npte |= L2_INVAL;
                   2841:        } else {
                   2842:                if (prot & VM_PROT_WRITE)
                   2843:                        npte |= L2_SPAGE | PT_AP(AP_W);
                   2844:                else if (prot & VM_PROT_ALL)
                   2845:                        npte |= L2_SPAGE;
                   2846:                else
                   2847:                        npte |= L2_INVAL;
                   2848:        }
                   2849:
                   2850: #ifdef MYCROFT_HACK
                   2851:        if (mycroft_hack)
                   2852:                printf("pmap_enter: pmap=%p va=%lx pa=%lx prot=%x wired=%d access_type=%x npte=%08x\n", pmap, va, pa, prot, wired, flags & VM_PROT_ALL, npte);
                   2853: #endif
                   2854:
                   2855:        *pte = npte;
                   2856:
1.39      thorpej  2857:        if (pmap_initialized && bank != -1)
                   2858:        {
1.12      chris    2859:                boolean_t pmap_active = FALSE;
1.11      chris    2860:                /* XXX this will change once the whole of pmap_enter uses
                   2861:                 * map_ptes
                   2862:                 */
                   2863:                ptes = pmap_map_ptes(pmap);
1.12      chris    2864:                if ((curproc && curproc->p_vmspace->vm_map.pmap == pmap)
1.15      chris    2865:                    || (pmap == pmap_kernel()))
1.12      chris    2866:                        pmap_active = TRUE;
1.39      thorpej  2867:                simple_lock(&pvh->pvh_lock);
                   2868:                pmap_vac_me_harder(pmap, pvh, ptes, pmap_active);
                   2869:                simple_unlock(&pvh->pvh_lock);
1.11      chris    2870:                pmap_unmap_ptes(pmap);
                   2871:        }
1.1       matt     2872:
                   2873:        /* Better flush the TLB ... */
                   2874:        cpu_tlb_flushID_SE(va);
1.17      chris    2875:        error = 0;
                   2876: out:
1.24      chris    2877:        simple_unlock(&pmap->pm_obj.vmobjlock);
1.17      chris    2878:        PMAP_MAP_TO_HEAD_UNLOCK();
1.1       matt     2879:        PDEBUG(5, printf("pmap_enter: pte = V%p %08x\n", pte, *pte));
                   2880:
1.17      chris    2881:        return error;
1.1       matt     2882: }
                   2883:
                   2884: void
                   2885: pmap_kenter_pa(va, pa, prot)
                   2886:        vaddr_t va;
                   2887:        paddr_t pa;
                   2888:        vm_prot_t prot;
                   2889: {
1.14      chs      2890:        struct pmap *pmap = pmap_kernel();
1.13      chris    2891:        pt_entry_t *pte;
1.14      chs      2892:        struct vm_page *pg;
1.13      chris    2893:
1.30      rearnsha 2894:        if (!pmap_pde_page(pmap_pde(pmap, va))) {
1.14      chs      2895:
1.30      rearnsha 2896: #ifdef DIAGNOSTIC
                   2897:                if (pmap_pde_v(pmap_pde(pmap, va)))
                   2898:                        panic("Trying to map kernel page into section mapping"
                   2899:                            " VA=%lx PA=%lx", va, pa);
                   2900: #endif
1.13      chris    2901:                /*
                   2902:                 * For the kernel pmaps it would be better to ensure
                   2903:                 * that they are always present, and to grow the
                   2904:                 * kernel as required.
                   2905:                 */
                   2906:
1.24      chris    2907:                /* must lock the pmap */
                   2908:                simple_lock(&(pmap_kernel()->pm_obj.vmobjlock));
1.13      chris    2909:                /* Allocate a page table */
1.16      chris    2910:                pg = uvm_pagealloc(&(pmap_kernel()->pm_obj), 0, NULL,
1.14      chs      2911:                    UVM_PGA_USERESERVE | UVM_PGA_ZERO);
                   2912:                if (pg == NULL) {
1.13      chris    2913:                        panic("pmap_kenter_pa: no free pages");
                   2914:                }
1.16      chris    2915:                pg->flags &= ~PG_BUSY;  /* never busy */
1.13      chris    2916:
                   2917:                /* Wire this page table into the L1. */
1.17      chris    2918:                pmap_map_in_l1(pmap, va, VM_PAGE_TO_PHYS(pg), TRUE);
1.24      chris    2919:                simple_unlock(&(pmap_kernel()->pm_obj.vmobjlock));
1.13      chris    2920:        }
                   2921:        pte = vtopte(va);
1.14      chs      2922:        KASSERT(!pmap_pte_v(pte));
1.13      chris    2923:        *pte = L2_PTE(pa, AP_KRW);
1.1       matt     2924: }
                   2925:
                   2926: void
                   2927: pmap_kremove(va, len)
                   2928:        vaddr_t va;
                   2929:        vsize_t len;
                   2930: {
1.14      chs      2931:        pt_entry_t *pte;
                   2932:
1.1       matt     2933:        for (len >>= PAGE_SHIFT; len > 0; len--, va += PAGE_SIZE) {
1.13      chris    2934:
1.14      chs      2935:                /*
                   2936:                 * We assume that we will only be called with small
                   2937:                 * regions of memory.
                   2938:                 */
                   2939:
1.30      rearnsha 2940:                KASSERT(pmap_pde_page(pmap_pde(pmap_kernel(), va)));
1.13      chris    2941:                pte = vtopte(va);
1.36      thorpej  2942:                cpu_idcache_wbinv_range(va, PAGE_SIZE);
1.13      chris    2943:                *pte = 0;
                   2944:                cpu_tlb_flushID_SE(va);
1.1       matt     2945:        }
                   2946: }
                   2947:
                   2948: /*
                   2949:  * pmap_page_protect:
                   2950:  *
                   2951:  * Lower the permission for all mappings to a given page.
                   2952:  */
                   2953:
                   2954: void
                   2955: pmap_page_protect(pg, prot)
                   2956:        struct vm_page *pg;
                   2957:        vm_prot_t prot;
                   2958: {
1.39      thorpej  2959:        paddr_t pa = VM_PAGE_TO_PHYS(pg);
1.1       matt     2960:
1.39      thorpej  2961:        PDEBUG(0, printf("pmap_page_protect(pa=%lx, prot=%d)\n", pa, prot));
1.1       matt     2962:
                   2963:        switch(prot) {
1.17      chris    2964:        case VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE:
                   2965:        case VM_PROT_READ|VM_PROT_WRITE:
                   2966:                return;
                   2967:
1.1       matt     2968:        case VM_PROT_READ:
                   2969:        case VM_PROT_READ|VM_PROT_EXECUTE:
1.39      thorpej  2970:                pmap_copy_on_write(pa);
1.1       matt     2971:                break;
                   2972:
                   2973:        default:
1.39      thorpej  2974:                pmap_remove_all(pa);
1.1       matt     2975:                break;
                   2976:        }
                   2977: }
                   2978:
                   2979:
                   2980: /*
                   2981:  * Routine:    pmap_unwire
                   2982:  * Function:   Clear the wired attribute for a map/virtual-address
                   2983:  *             pair.
                   2984:  * In/out conditions:
                   2985:  *             The mapping must already exist in the pmap.
                   2986:  */
                   2987:
                   2988: void
                   2989: pmap_unwire(pmap, va)
1.15      chris    2990:        struct pmap *pmap;
1.1       matt     2991:        vaddr_t va;
                   2992: {
                   2993:        pt_entry_t *pte;
1.2       matt     2994:        paddr_t pa;
1.39      thorpej  2995:        int bank, off;
                   2996:        struct pv_head *pvh;
1.1       matt     2997:
                   2998:        /*
                   2999:         * Make sure pmap is valid. -dct
                   3000:         */
                   3001:        if (pmap == NULL)
                   3002:                return;
                   3003:
                   3004:        /* Get the pte */
                   3005:        pte = pmap_pte(pmap, va);
                   3006:        if (!pte)
                   3007:                return;
                   3008:
                   3009:        /* Extract the physical address of the page */
                   3010:        pa = pmap_pte_pa(pte);
                   3011:
1.39      thorpej  3012:        if ((bank = vm_physseg_find(atop(pa), &off)) == -1)
1.1       matt     3013:                return;
1.39      thorpej  3014:        pvh = &vm_physmem[bank].pmseg.pvhead[off];
                   3015:        simple_lock(&pvh->pvh_lock);
1.1       matt     3016:        /* Update the wired bit in the pv entry for this page. */
1.39      thorpej  3017:        (void) pmap_modify_pv(pmap, va, pvh, PT_W, 0);
                   3018:        simple_unlock(&pvh->pvh_lock);
1.1       matt     3019: }
                   3020:
                   3021: /*
1.15      chris    3022:  * pt_entry_t *pmap_pte(struct pmap *pmap, vaddr_t va)
1.1       matt     3023:  *
                   3024:  * Return the pointer to a page table entry corresponding to the supplied
                   3025:  * virtual address.
                   3026:  *
                   3027:  * The page directory is first checked to make sure that a page table
                   3028:  * for the address in question exists and if it does a pointer to the
                   3029:  * entry is returned.
                   3030:  *
                   3031:  * The way this works is that that the kernel page tables are mapped
                   3032:  * into the memory map at ALT_PAGE_TBLS_BASE to ALT_PAGE_TBLS_BASE+4MB.
                   3033:  * This allows page tables to be located quickly.
                   3034:  */
                   3035: pt_entry_t *
                   3036: pmap_pte(pmap, va)
1.15      chris    3037:        struct pmap *pmap;
1.1       matt     3038:        vaddr_t va;
                   3039: {
                   3040:        pt_entry_t *ptp;
                   3041:        pt_entry_t *result;
                   3042:
                   3043:        /* The pmap must be valid */
                   3044:        if (!pmap)
                   3045:                return(NULL);
                   3046:
                   3047:        /* Return the address of the pte */
                   3048:        PDEBUG(10, printf("pmap_pte: pmap=%p va=V%08lx pde = V%p (%08X)\n",
                   3049:            pmap, va, pmap_pde(pmap, va), *(pmap_pde(pmap, va))));
                   3050:
                   3051:        /* Do we have a valid pde ? If not we don't have a page table */
1.30      rearnsha 3052:        if (!pmap_pde_page(pmap_pde(pmap, va))) {
1.39      thorpej  3053:                PDEBUG(0, printf("pmap_pte: failed - pde = %p\n",
                   3054:                    pmap_pde(pmap, va)));
1.1       matt     3055:                return(NULL);
                   3056:        }
                   3057:
                   3058:        PDEBUG(10, printf("pmap pagetable = P%08lx current = P%08x\n",
                   3059:            pmap->pm_pptpt, (*((pt_entry_t *)(PROCESS_PAGE_TBLS_BASE
                   3060:            + (PROCESS_PAGE_TBLS_BASE >> (PGSHIFT - 2)) +
                   3061:            (PROCESS_PAGE_TBLS_BASE >> PDSHIFT))) & PG_FRAME)));
                   3062:
                   3063:        /*
                   3064:         * If the pmap is the kernel pmap or the pmap is the active one
                   3065:         * then we can just return a pointer to entry relative to
                   3066:         * PROCESS_PAGE_TBLS_BASE.
                   3067:         * Otherwise we need to map the page tables to an alternative
                   3068:         * address and reference them there.
                   3069:         */
1.15      chris    3070:        if (pmap == pmap_kernel() || pmap->pm_pptpt
1.1       matt     3071:            == (*((pt_entry_t *)(PROCESS_PAGE_TBLS_BASE
                   3072:            + ((PROCESS_PAGE_TBLS_BASE >> (PGSHIFT - 2)) &
                   3073:            ~3) + (PROCESS_PAGE_TBLS_BASE >> PDSHIFT))) & PG_FRAME)) {
                   3074:                ptp = (pt_entry_t *)PROCESS_PAGE_TBLS_BASE;
                   3075:        } else {
                   3076:                struct proc *p = curproc;
                   3077:
                   3078:                /* If we don't have a valid curproc use proc0 */
                   3079:                /* Perhaps we should just use kernel_pmap instead */
                   3080:                if (p == NULL)
                   3081:                        p = &proc0;
                   3082: #ifdef DIAGNOSTIC
                   3083:                /*
                   3084:                 * The pmap should always be valid for the process so
                   3085:                 * panic if it is not.
                   3086:                 */
                   3087:                if (!p->p_vmspace || !p->p_vmspace->vm_map.pmap) {
                   3088:                        printf("pmap_pte: va=%08lx p=%p vm=%p\n",
                   3089:                            va, p, p->p_vmspace);
                   3090:                        console_debugger();
                   3091:                }
                   3092:                /*
                   3093:                 * The pmap for the current process should be mapped. If it
                   3094:                 * is not then we have a problem.
                   3095:                 */
                   3096:                if (p->p_vmspace->vm_map.pmap->pm_pptpt !=
                   3097:                    (*((pt_entry_t *)(PROCESS_PAGE_TBLS_BASE
                   3098:                    + (PROCESS_PAGE_TBLS_BASE >> (PGSHIFT - 2)) +
                   3099:                    (PROCESS_PAGE_TBLS_BASE >> PDSHIFT))) & PG_FRAME)) {
                   3100:                        printf("pmap pagetable = P%08lx current = P%08x ",
                   3101:                            pmap->pm_pptpt, (*((pt_entry_t *)(PROCESS_PAGE_TBLS_BASE
                   3102:                            + (PROCESS_PAGE_TBLS_BASE >> (PGSHIFT - 2)) +
                   3103:                            (PROCESS_PAGE_TBLS_BASE >> PDSHIFT))) &
                   3104:                            PG_FRAME));
                   3105:                        printf("pptpt=%lx\n", p->p_vmspace->vm_map.pmap->pm_pptpt);
                   3106:                        panic("pmap_pte: current and pmap mismatch\n");
                   3107:                }
                   3108: #endif
                   3109:
                   3110:                ptp = (pt_entry_t *)ALT_PAGE_TBLS_BASE;
                   3111:                pmap_map_in_l1(p->p_vmspace->vm_map.pmap, ALT_PAGE_TBLS_BASE,
1.17      chris    3112:                    pmap->pm_pptpt, FALSE);
1.1       matt     3113:                cpu_tlb_flushD();
1.32      thorpej  3114:                cpu_cpwait();
1.1       matt     3115:        }
                   3116:        PDEBUG(10, printf("page tables base = %p offset=%lx\n", ptp,
                   3117:            ((va >> (PGSHIFT-2)) & ~3)));
                   3118:        result = (pt_entry_t *)((char *)ptp + ((va >> (PGSHIFT-2)) & ~3));
                   3119:        return(result);
                   3120: }
                   3121:
                   3122: /*
                   3123:  * Routine:  pmap_extract
                   3124:  * Function:
                   3125:  *           Extract the physical page address associated
                   3126:  *           with the given map/virtual_address pair.
                   3127:  */
                   3128: boolean_t
                   3129: pmap_extract(pmap, va, pap)
1.15      chris    3130:        struct pmap *pmap;
1.1       matt     3131:        vaddr_t va;
                   3132:        paddr_t *pap;
                   3133: {
1.34      thorpej  3134:        pd_entry_t *pde;
1.11      chris    3135:        pt_entry_t *pte, *ptes;
1.1       matt     3136:        paddr_t pa;
1.34      thorpej  3137:        boolean_t rv = TRUE;
1.1       matt     3138:
                   3139:        PDEBUG(5, printf("pmap_extract: pmap=%p, va=V%08lx\n", pmap, va));
                   3140:
                   3141:        /*
1.11      chris    3142:         * Get the pte for this virtual address.
1.1       matt     3143:         */
1.34      thorpej  3144:        pde = pmap_pde(pmap, va);
1.11      chris    3145:        ptes = pmap_map_ptes(pmap);
                   3146:        pte = &ptes[arm_byte_to_page(va)];
1.1       matt     3147:
1.34      thorpej  3148:        if (pmap_pde_section(pde)) {
                   3149:                pa = (*pde & PD_MASK) | (va & (L1_SEC_SIZE - 1));
                   3150:                goto out;
                   3151:        } else if (pmap_pde_page(pde) == 0 || pmap_pte_v(pte) == 0) {
                   3152:                rv = FALSE;
                   3153:                goto out;
1.11      chris    3154:        }
1.1       matt     3155:
1.34      thorpej  3156:        if ((*pte & L2_MASK) == L2_LPAGE) {
1.1       matt     3157:                /* Extract the physical address from the pte */
1.34      thorpej  3158:                pa = *pte & ~(L2_LPAGE_SIZE - 1);
1.1       matt     3159:
                   3160:                PDEBUG(5, printf("pmap_extract: LPAGE pa = P%08lx\n",
                   3161:                    (pa | (va & (L2_LPAGE_SIZE - 1)))));
                   3162:
                   3163:                if (pap != NULL)
                   3164:                        *pap = pa | (va & (L2_LPAGE_SIZE - 1));
1.34      thorpej  3165:                goto out;
                   3166:        }
                   3167:
                   3168:        /* Extract the physical address from the pte */
                   3169:        pa = pmap_pte_pa(pte);
1.1       matt     3170:
1.34      thorpej  3171:        PDEBUG(5, printf("pmap_extract: SPAGE pa = P%08lx\n",
                   3172:            (pa | (va & ~PG_FRAME))));
1.1       matt     3173:
1.34      thorpej  3174:        if (pap != NULL)
                   3175:                *pap = pa | (va & ~PG_FRAME);
                   3176:  out:
1.11      chris    3177:        pmap_unmap_ptes(pmap);
1.34      thorpej  3178:        return (rv);
1.1       matt     3179: }
                   3180:
                   3181:
                   3182: /*
1.39      thorpej  3183:  * Copy the range specified by src_addr/len from the source map to the
                   3184:  * range dst_addr/len in the destination map.
1.1       matt     3185:  *
1.39      thorpej  3186:  * This routine is only advisory and need not do anything.
1.1       matt     3187:  */
                   3188:
1.39      thorpej  3189: void
                   3190: pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr)
                   3191:        struct pmap *dst_pmap;
                   3192:        struct pmap *src_pmap;
                   3193:        vaddr_t dst_addr;
                   3194:        vsize_t len;
                   3195:        vaddr_t src_addr;
                   3196: {
                   3197:        PDEBUG(0, printf("pmap_copy(%p, %p, %lx, %lx, %lx)\n",
                   3198:            dst_pmap, src_pmap, dst_addr, len, src_addr));
                   3199: }
1.1       matt     3200:
                   3201: #if defined(PMAP_DEBUG)
                   3202: void
                   3203: pmap_dump_pvlist(phys, m)
                   3204:        vaddr_t phys;
                   3205:        char *m;
                   3206: {
1.39      thorpej  3207:        struct pv_head *pvh;
1.1       matt     3208:        struct pv_entry *pv;
1.39      thorpej  3209:        int bank, off;
1.1       matt     3210:
1.39      thorpej  3211:        if ((bank = vm_physseg_find(atop(phys), &off)) == -1) {
1.1       matt     3212:                printf("INVALID PA\n");
                   3213:                return;
                   3214:        }
1.39      thorpej  3215:        pvh = &vm_physmem[bank].pmseg.pvhead[off];
                   3216:        simple_lock(&pvh->pvh_lock);
1.1       matt     3217:        printf("%s %08lx:", m, phys);
1.39      thorpej  3218:        if (pvh->pvh_list == NULL) {
1.1       matt     3219:                printf(" no mappings\n");
                   3220:                return;
                   3221:        }
                   3222:
1.39      thorpej  3223:        for (pv = pvh->pvh_list; pv; pv = pv->pv_next)
1.1       matt     3224:                printf(" pmap %p va %08lx flags %08x", pv->pv_pmap,
                   3225:                    pv->pv_va, pv->pv_flags);
                   3226:
                   3227:        printf("\n");
1.39      thorpej  3228:        simple_unlock(&pvh->pvh_lock);
1.1       matt     3229: }
                   3230:
                   3231: #endif /* PMAP_DEBUG */
                   3232:
1.22      chris    3233: __inline static boolean_t
1.39      thorpej  3234: pmap_testbit(pa, setbits)
                   3235:        paddr_t pa;
1.22      chris    3236:        unsigned int setbits;
1.1       matt     3237: {
1.39      thorpej  3238:        int bank, off;
                   3239:
                   3240:        PDEBUG(1, printf("pmap_testbit: pa=%08lx set=%08x\n", pa, setbits));
1.1       matt     3241:
1.39      thorpej  3242:        if ((bank = vm_physseg_find(atop(pa), &off)) == -1)
                   3243:                return(FALSE);
1.1       matt     3244:
                   3245:        /*
                   3246:         * Check saved info only
                   3247:         */
1.39      thorpej  3248:        if (vm_physmem[bank].pmseg.attrs[off] & setbits) {
1.1       matt     3249:                PDEBUG(0, printf("pmap_attributes = %02x\n",
1.39      thorpej  3250:                    vm_physmem[bank].pmseg.attrs[off]));
1.1       matt     3251:                return(TRUE);
                   3252:        }
                   3253:
                   3254:        return(FALSE);
                   3255: }
                   3256:
1.11      chris    3257: static pt_entry_t *
                   3258: pmap_map_ptes(struct pmap *pmap)
                   3259: {
1.17      chris    3260:        struct proc *p;
                   3261:
                   3262:        /* the kernel's pmap is always accessible */
                   3263:        if (pmap == pmap_kernel()) {
                   3264:                return (pt_entry_t *)PROCESS_PAGE_TBLS_BASE ;
                   3265:        }
                   3266:
                   3267:        if (pmap_is_curpmap(pmap)) {
                   3268:                simple_lock(&pmap->pm_obj.vmobjlock);
                   3269:                return (pt_entry_t *)PROCESS_PAGE_TBLS_BASE;
                   3270:        }
                   3271:
                   3272:        p = curproc;
                   3273:
                   3274:        if (p == NULL)
                   3275:                p = &proc0;
                   3276:
                   3277:        /* need to lock both curpmap and pmap: use ordered locking */
                   3278:        if ((unsigned) pmap < (unsigned) curproc->p_vmspace->vm_map.pmap) {
                   3279:                simple_lock(&pmap->pm_obj.vmobjlock);
                   3280:                simple_lock(&curproc->p_vmspace->vm_map.pmap->pm_obj.vmobjlock);
                   3281:        } else {
                   3282:                simple_lock(&curproc->p_vmspace->vm_map.pmap->pm_obj.vmobjlock);
                   3283:                simple_lock(&pmap->pm_obj.vmobjlock);
                   3284:        }
1.11      chris    3285:
1.17      chris    3286:        pmap_map_in_l1(p->p_vmspace->vm_map.pmap, ALT_PAGE_TBLS_BASE,
                   3287:                        pmap->pm_pptpt, FALSE);
                   3288:        cpu_tlb_flushD();
1.32      thorpej  3289:        cpu_cpwait();
1.17      chris    3290:        return (pt_entry_t *)ALT_PAGE_TBLS_BASE;
                   3291: }
                   3292:
                   3293: /*
                   3294:  * pmap_unmap_ptes: unlock the PTE mapping of "pmap"
                   3295:  */
                   3296:
                   3297: static void
                   3298: pmap_unmap_ptes(pmap)
                   3299:        struct pmap *pmap;
                   3300: {
                   3301:        if (pmap == pmap_kernel()) {
                   3302:                return;
                   3303:        }
                   3304:        if (pmap_is_curpmap(pmap)) {
                   3305:                simple_unlock(&pmap->pm_obj.vmobjlock);
                   3306:        } else {
                   3307:                simple_unlock(&pmap->pm_obj.vmobjlock);
                   3308:                simple_unlock(&curproc->p_vmspace->vm_map.pmap->pm_obj.vmobjlock);
                   3309:        }
1.11      chris    3310: }
1.1       matt     3311:
                   3312: /*
                   3313:  * Modify pte bits for all ptes corresponding to the given physical address.
                   3314:  * We use `maskbits' rather than `clearbits' because we're always passing
                   3315:  * constants and the latter would require an extra inversion at run-time.
                   3316:  */
                   3317:
1.22      chris    3318: static void
1.39      thorpej  3319: pmap_clearbit(pa, maskbits)
                   3320:        paddr_t pa;
1.22      chris    3321:        unsigned int maskbits;
1.1       matt     3322: {
                   3323:        struct pv_entry *pv;
1.39      thorpej  3324:        struct pv_head *pvh;
1.1       matt     3325:        pt_entry_t *pte;
                   3326:        vaddr_t va;
1.39      thorpej  3327:        int bank, off, tlbentry;
1.1       matt     3328:
                   3329:        PDEBUG(1, printf("pmap_clearbit: pa=%08lx mask=%08x\n",
1.39      thorpej  3330:            pa, maskbits));
1.21      chris    3331:
                   3332:        tlbentry = 0;
                   3333:
1.39      thorpej  3334:        if ((bank = vm_physseg_find(atop(pa), &off)) == -1)
                   3335:                return;
1.17      chris    3336:        PMAP_HEAD_TO_MAP_LOCK();
1.39      thorpej  3337:        pvh = &vm_physmem[bank].pmseg.pvhead[off];
                   3338:        simple_lock(&pvh->pvh_lock);
1.17      chris    3339:
1.1       matt     3340:        /*
                   3341:         * Clear saved attributes (modify, reference)
                   3342:         */
1.39      thorpej  3343:        vm_physmem[bank].pmseg.attrs[off] &= ~maskbits;
1.1       matt     3344:
1.39      thorpej  3345:        if (pvh->pvh_list == NULL) {
                   3346:                simple_unlock(&pvh->pvh_lock);
1.17      chris    3347:                PMAP_HEAD_TO_MAP_UNLOCK();
1.1       matt     3348:                return;
                   3349:        }
                   3350:
                   3351:        /*
                   3352:         * Loop over all current mappings setting/clearing as appropos
                   3353:         */
1.39      thorpej  3354:        for (pv = pvh->pvh_list; pv; pv = pv->pv_next) {
1.1       matt     3355:                va = pv->pv_va;
                   3356:                pv->pv_flags &= ~maskbits;
                   3357:                pte = pmap_pte(pv->pv_pmap, va);
1.17      chris    3358:                KASSERT(pte != NULL);
1.29      rearnsha 3359:                if (maskbits & (PT_Wr|PT_M)) {
                   3360:                        if ((pv->pv_flags & PT_NC)) {
                   3361:                                /*
                   3362:                                 * Entry is not cacheable: reenable
                   3363:                                 * the cache, nothing to flush
                   3364:                                 *
                   3365:                                 * Don't turn caching on again if this
                   3366:                                 * is a modified emulation.  This
                   3367:                                 * would be inconsitent with the
                   3368:                                 * settings created by
                   3369:                                 * pmap_vac_me_harder().
                   3370:                                 *
                   3371:                                 * There's no need to call
                   3372:                                 * pmap_vac_me_harder() here: all
                   3373:                                 * pages are loosing their write
                   3374:                                 * permission.
                   3375:                                 *
                   3376:                                 */
                   3377:                                if (maskbits & PT_Wr) {
                   3378:                                        *pte |= pte_cache_mode;
                   3379:                                        pv->pv_flags &= ~PT_NC;
                   3380:                                }
                   3381:                        } else if (pmap_is_curpmap(pv->pv_pmap))
                   3382:                                /*
                   3383:                                 * Entry is cacheable: check if pmap is
                   3384:                                 * current if it is flush it,
                   3385:                                 * otherwise it won't be in the cache
                   3386:                                 */
1.36      thorpej  3387:                                cpu_idcache_wbinv_range(pv->pv_va, NBPG);
1.29      rearnsha 3388:
                   3389:                        /* make the pte read only */
                   3390:                        *pte &= ~PT_AP(AP_W);
                   3391:                }
                   3392:
                   3393:                if (maskbits & PT_H)
                   3394:                        *pte = (*pte & ~L2_MASK) | L2_INVAL;
1.21      chris    3395:
1.29      rearnsha 3396:                if (pmap_is_curpmap(pv->pv_pmap))
1.21      chris    3397:                        /*
1.29      rearnsha 3398:                         * if we had cacheable pte's we'd clean the
                   3399:                         * pte out to memory here
                   3400:                         *
1.21      chris    3401:                         * flush tlb entry as it's in the current pmap
                   3402:                         */
                   3403:                        cpu_tlb_flushID_SE(pv->pv_va);
1.29      rearnsha 3404:        }
1.32      thorpej  3405:        cpu_cpwait();
1.21      chris    3406:
1.39      thorpej  3407:        simple_unlock(&pvh->pvh_lock);
1.17      chris    3408:        PMAP_HEAD_TO_MAP_UNLOCK();
1.1       matt     3409: }
                   3410:
                   3411:
                   3412: boolean_t
                   3413: pmap_clear_modify(pg)
                   3414:        struct vm_page *pg;
                   3415: {
1.39      thorpej  3416:        paddr_t pa = VM_PAGE_TO_PHYS(pg);
1.1       matt     3417:        boolean_t rv;
                   3418:
1.39      thorpej  3419:        PDEBUG(0, printf("pmap_clear_modify pa=%08lx\n", pa));
                   3420:        rv = pmap_testbit(pa, PT_M);
                   3421:        pmap_clearbit(pa, PT_M);
1.1       matt     3422:        return rv;
                   3423: }
                   3424:
                   3425:
                   3426: boolean_t
                   3427: pmap_clear_reference(pg)
                   3428:        struct vm_page *pg;
                   3429: {
1.39      thorpej  3430:        paddr_t pa = VM_PAGE_TO_PHYS(pg);
1.1       matt     3431:        boolean_t rv;
                   3432:
1.39      thorpej  3433:        PDEBUG(0, printf("pmap_clear_reference pa=%08lx\n", pa));
                   3434:        rv = pmap_testbit(pa, PT_H);
                   3435:        pmap_clearbit(pa, PT_H);
1.1       matt     3436:        return rv;
                   3437: }
                   3438:
                   3439:
                   3440: void
1.39      thorpej  3441: pmap_copy_on_write(pa)
                   3442:        paddr_t pa;
                   3443: {
                   3444:        PDEBUG(0, printf("pmap_copy_on_write pa=%08lx\n", pa));
                   3445:        pmap_clearbit(pa, PT_Wr);
                   3446: }
                   3447:
                   3448:
                   3449: boolean_t
                   3450: pmap_is_modified(pg)
1.37      thorpej  3451:        struct vm_page *pg;
1.1       matt     3452: {
1.39      thorpej  3453:        paddr_t pa = VM_PAGE_TO_PHYS(pg);
                   3454:        boolean_t result;
                   3455:
                   3456:        result = pmap_testbit(pa, PT_M);
                   3457:        PDEBUG(1, printf("pmap_is_modified pa=%08lx %x\n", pa, result));
                   3458:        return (result);
1.1       matt     3459: }
                   3460:
                   3461:
1.39      thorpej  3462: boolean_t
                   3463: pmap_is_referenced(pg)
                   3464:        struct vm_page *pg;
                   3465: {
                   3466:        paddr_t pa = VM_PAGE_TO_PHYS(pg);
                   3467:        boolean_t result;
                   3468:
                   3469:        result = pmap_testbit(pa, PT_H);
                   3470:        PDEBUG(0, printf("pmap_is_referenced pa=%08lx %x\n", pa, result));
                   3471:        return (result);
                   3472: }
                   3473:
1.1       matt     3474:
                   3475: int
                   3476: pmap_modified_emulation(pmap, va)
1.15      chris    3477:        struct pmap *pmap;
1.1       matt     3478:        vaddr_t va;
                   3479: {
                   3480:        pt_entry_t *pte;
1.2       matt     3481:        paddr_t pa;
1.39      thorpej  3482:        int bank, off;
                   3483:        struct pv_head *pvh;
1.1       matt     3484:        u_int flags;
                   3485:
                   3486:        PDEBUG(2, printf("pmap_modified_emulation\n"));
                   3487:
                   3488:        /* Get the pte */
                   3489:        pte = pmap_pte(pmap, va);
                   3490:        if (!pte) {
                   3491:                PDEBUG(2, printf("no pte\n"));
                   3492:                return(0);
                   3493:        }
                   3494:
                   3495:        PDEBUG(1, printf("*pte=%08x\n", *pte));
                   3496:
                   3497:        /* Check for a zero pte */
                   3498:        if (*pte == 0)
                   3499:                return(0);
                   3500:
                   3501:        /* This can happen if user code tries to access kernel memory. */
                   3502:        if ((*pte & PT_AP(AP_W)) != 0)
                   3503:                return (0);
                   3504:
                   3505:        /* Extract the physical address of the page */
                   3506:        pa = pmap_pte_pa(pte);
1.39      thorpej  3507:        if ((bank = vm_physseg_find(atop(pa), &off)) == -1)
1.1       matt     3508:                return(0);
                   3509:
1.39      thorpej  3510:        PMAP_HEAD_TO_MAP_LOCK();
1.37      thorpej  3511:        /* Get the current flags for this page. */
1.39      thorpej  3512:        pvh = &vm_physmem[bank].pmseg.pvhead[off];
                   3513:        /* XXX: needed if we hold head->map lock? */
                   3514:        simple_lock(&pvh->pvh_lock);
1.17      chris    3515:
1.39      thorpej  3516:        flags = pmap_modify_pv(pmap, va, pvh, 0, 0);
1.1       matt     3517:        PDEBUG(2, printf("pmap_modified_emulation: flags = %08x\n", flags));
                   3518:
                   3519:        /*
                   3520:         * Do the flags say this page is writable ? If not then it is a
                   3521:         * genuine write fault. If yes then the write fault is our fault
                   3522:         * as we did not reflect the write access in the PTE. Now we know
                   3523:         * a write has occurred we can correct this and also set the
                   3524:         * modified bit
                   3525:         */
1.17      chris    3526:        if (~flags & PT_Wr) {
1.39      thorpej  3527:                simple_unlock(&pvh->pvh_lock);
1.17      chris    3528:                PMAP_HEAD_TO_MAP_UNLOCK();
1.1       matt     3529:                return(0);
1.17      chris    3530:        }
1.1       matt     3531:
                   3532:        PDEBUG(0, printf("pmap_modified_emulation: Got a hit va=%08lx, pte = %p (%08x)\n",
                   3533:            va, pte, *pte));
1.39      thorpej  3534:        vm_physmem[bank].pmseg.attrs[off] |= PT_H | PT_M;
1.29      rearnsha 3535:
                   3536:        /*
                   3537:         * Re-enable write permissions for the page.  No need to call
                   3538:         * pmap_vac_me_harder(), since this is just a
                   3539:         * modified-emulation fault, and the PT_Wr bit isn't changing.  We've
                   3540:         * already set the cacheable bits based on the assumption that we
                   3541:         * can write to this page.
                   3542:         */
1.1       matt     3543:        *pte = (*pte & ~L2_MASK) | L2_SPAGE | PT_AP(AP_W);
                   3544:        PDEBUG(0, printf("->(%08x)\n", *pte));
                   3545:
1.39      thorpej  3546:        simple_unlock(&pvh->pvh_lock);
1.17      chris    3547:        PMAP_HEAD_TO_MAP_UNLOCK();
1.1       matt     3548:        /* Return, indicating the problem has been dealt with */
                   3549:        cpu_tlb_flushID_SE(va);
1.32      thorpej  3550:        cpu_cpwait();
1.1       matt     3551:        return(1);
                   3552: }
                   3553:
                   3554:
                   3555: int
                   3556: pmap_handled_emulation(pmap, va)
1.15      chris    3557:        struct pmap *pmap;
1.1       matt     3558:        vaddr_t va;
                   3559: {
                   3560:        pt_entry_t *pte;
1.2       matt     3561:        paddr_t pa;
1.39      thorpej  3562:        int bank, off;
1.1       matt     3563:
                   3564:        PDEBUG(2, printf("pmap_handled_emulation\n"));
                   3565:
                   3566:        /* Get the pte */
                   3567:        pte = pmap_pte(pmap, va);
                   3568:        if (!pte) {
                   3569:                PDEBUG(2, printf("no pte\n"));
                   3570:                return(0);
                   3571:        }
                   3572:
                   3573:        PDEBUG(1, printf("*pte=%08x\n", *pte));
                   3574:
                   3575:        /* Check for a zero pte */
                   3576:        if (*pte == 0)
                   3577:                return(0);
                   3578:
                   3579:        /* This can happen if user code tries to access kernel memory. */
                   3580:        if ((*pte & L2_MASK) != L2_INVAL)
                   3581:                return (0);
                   3582:
                   3583:        /* Extract the physical address of the page */
                   3584:        pa = pmap_pte_pa(pte);
1.39      thorpej  3585:        if ((bank = vm_physseg_find(atop(pa), &off)) == -1)
                   3586:                return(0);
1.1       matt     3587:
                   3588:        /*
                   3589:         * Ok we just enable the pte and mark the attibs as handled
                   3590:         */
                   3591:        PDEBUG(0, printf("pmap_handled_emulation: Got a hit va=%08lx pte = %p (%08x)\n",
                   3592:            va, pte, *pte));
1.39      thorpej  3593:        vm_physmem[bank].pmseg.attrs[off] |= PT_H;
1.1       matt     3594:        *pte = (*pte & ~L2_MASK) | L2_SPAGE;
                   3595:        PDEBUG(0, printf("->(%08x)\n", *pte));
                   3596:
                   3597:        /* Return, indicating the problem has been dealt with */
                   3598:        cpu_tlb_flushID_SE(va);
1.32      thorpej  3599:        cpu_cpwait();
1.1       matt     3600:        return(1);
                   3601: }
                   3602:
1.17      chris    3603:
                   3604:
                   3605:
1.1       matt     3606: /*
                   3607:  * pmap_collect: free resources held by a pmap
                   3608:  *
                   3609:  * => optional function.
                   3610:  * => called when a process is swapped out to free memory.
                   3611:  */
                   3612:
                   3613: void
                   3614: pmap_collect(pmap)
1.15      chris    3615:        struct pmap *pmap;
1.1       matt     3616: {
                   3617: }
                   3618:
                   3619: /*
                   3620:  * Routine:    pmap_procwr
                   3621:  *
                   3622:  * Function:
                   3623:  *     Synchronize caches corresponding to [addr, addr+len) in p.
                   3624:  *
                   3625:  */
                   3626: void
                   3627: pmap_procwr(p, va, len)
                   3628:        struct proc     *p;
                   3629:        vaddr_t         va;
1.3       matt     3630:        int             len;
1.1       matt     3631: {
                   3632:        /* We only need to do anything if it is the current process. */
                   3633:        if (p == curproc)
1.36      thorpej  3634:                cpu_icache_sync_range(va, len);
1.17      chris    3635: }
                   3636: /*
                   3637:  * PTP functions
                   3638:  */
                   3639:
                   3640: /*
                   3641:  * pmap_steal_ptp: Steal a PTP from somewhere else.
                   3642:  *
                   3643:  * This is just a placeholder, for now we never steal.
                   3644:  */
                   3645:
                   3646: static struct vm_page *
                   3647: pmap_steal_ptp(struct pmap *pmap, vaddr_t va)
                   3648: {
                   3649:     return (NULL);
                   3650: }
                   3651:
                   3652: /*
                   3653:  * pmap_get_ptp: get a PTP (if there isn't one, allocate a new one)
                   3654:  *
                   3655:  * => pmap should NOT be pmap_kernel()
                   3656:  * => pmap should be locked
                   3657:  */
                   3658:
                   3659: static struct vm_page *
                   3660: pmap_get_ptp(struct pmap *pmap, vaddr_t va, boolean_t just_try)
                   3661: {
                   3662:     struct vm_page *ptp;
                   3663:
1.30      rearnsha 3664:     if (pmap_pde_page(pmap_pde(pmap, va))) {
1.17      chris    3665:
                   3666:        /* valid... check hint (saves us a PA->PG lookup) */
                   3667: #if 0
                   3668:        if (pmap->pm_ptphint &&
                   3669:                ((unsigned)pmap_pde(pmap, va) & PG_FRAME) ==
                   3670:                VM_PAGE_TO_PHYS(pmap->pm_ptphint))
                   3671:            return (pmap->pm_ptphint);
                   3672: #endif
                   3673:        ptp = uvm_pagelookup(&pmap->pm_obj, va);
                   3674: #ifdef DIAGNOSTIC
                   3675:        if (ptp == NULL)
                   3676:            panic("pmap_get_ptp: unmanaged user PTP");
                   3677: #endif
                   3678: //     pmap->pm_ptphint = ptp;
                   3679:        return(ptp);
                   3680:     }
                   3681:
                   3682:     /* allocate a new PTP (updates ptphint) */
                   3683:     return(pmap_alloc_ptp(pmap, va, just_try));
                   3684: }
                   3685:
                   3686: /*
                   3687:  * pmap_alloc_ptp: allocate a PTP for a PMAP
                   3688:  *
                   3689:  * => pmap should already be locked by caller
                   3690:  * => we use the ptp's wire_count to count the number of active mappings
                   3691:  *     in the PTP (we start it at one to prevent any chance this PTP
                   3692:  *     will ever leak onto the active/inactive queues)
                   3693:  */
                   3694:
                   3695: /*__inline */ static struct vm_page *
                   3696: pmap_alloc_ptp(struct pmap *pmap, vaddr_t va, boolean_t just_try)
                   3697: {
                   3698:        struct vm_page *ptp;
                   3699:
                   3700:        ptp = uvm_pagealloc(&pmap->pm_obj, va, NULL,
                   3701:                UVM_PGA_USERESERVE|UVM_PGA_ZERO);
                   3702:        if (ptp == NULL) {
                   3703:            if (just_try)
                   3704:                return (NULL);
                   3705:
                   3706:            ptp = pmap_steal_ptp(pmap, va);
                   3707:
                   3708:            if (ptp == NULL)
                   3709:                return (NULL);
                   3710:            /* Stole a page, zero it.  */
                   3711:            pmap_zero_page(VM_PAGE_TO_PHYS(ptp));
                   3712:        }
                   3713:
                   3714:        /* got one! */
                   3715:        ptp->flags &= ~PG_BUSY; /* never busy */
                   3716:        ptp->wire_count = 1;    /* no mappings yet */
                   3717:        pmap_map_in_l1(pmap, va, VM_PAGE_TO_PHYS(ptp), TRUE);
                   3718:        pmap->pm_stats.resident_count++;        /* count PTP as resident */
                   3719: //     pmap->pm_ptphint = ptp;
                   3720:        return (ptp);
1.1       matt     3721: }
                   3722:
1.40      thorpej  3723: /************************ Bootstrapping routines ****************************/
                   3724:
                   3725: /*
1.46    ! thorpej  3726:  * This list exists for the benefit of pmap_map_chunk().  It keeps track
        !          3727:  * of the kernel L2 tables during bootstrap, so that pmap_map_chunk() can
        !          3728:  * find them as necessary.
        !          3729:  *
        !          3730:  * Note that the data on this list is not valid after initarm() returns.
        !          3731:  */
        !          3732: SLIST_HEAD(, pv_addr) kernel_pt_list = SLIST_HEAD_INITIALIZER(kernel_pt_list);
        !          3733:
        !          3734: static vaddr_t
        !          3735: kernel_pt_lookup(paddr_t pa)
        !          3736: {
        !          3737:        pv_addr_t *pv;
        !          3738:
        !          3739:        SLIST_FOREACH(pv, &kernel_pt_list, pv_list) {
        !          3740:                if (pv->pv_pa == pa)
        !          3741:                        return (pv->pv_va);
        !          3742:        }
        !          3743:        return (0);
        !          3744: }
        !          3745:
        !          3746: /*
1.40      thorpej  3747:  * pmap_map_section:
                   3748:  *
                   3749:  *     Create a single section mapping.
                   3750:  */
                   3751: void
                   3752: pmap_map_section(vaddr_t l1pt, vaddr_t va, paddr_t pa, int prot, int cache)
                   3753: {
                   3754:        pd_entry_t *pde = (pd_entry_t *) l1pt;
1.43      thorpej  3755:        pd_entry_t ap = (prot & VM_PROT_WRITE) ? AP_KRW : AP_KR;
                   3756:        pd_entry_t fl = (cache == PTE_CACHE) ? pte_cache_mode : 0;
1.40      thorpej  3757:
                   3758:        KASSERT(((va | pa) & (L1_SEC_SIZE - 1)) == 0);
                   3759:
1.43      thorpej  3760:        pde[va >> PDSHIFT] = L1_SECPTE(pa & PD_MASK, ap, fl);
1.41      thorpej  3761: }
                   3762:
                   3763: /*
                   3764:  * pmap_map_entry:
                   3765:  *
                   3766:  *     Create a single page mapping.
                   3767:  */
                   3768: void
                   3769: pmap_map_entry(vaddr_t l2pt, vaddr_t va, paddr_t pa, int prot, int cache)
                   3770: {
                   3771:        pt_entry_t *pte = (pt_entry_t *) l2pt;
                   3772:        pt_entry_t ap = (prot & VM_PROT_WRITE) ? AP_KRW : AP_KR;
                   3773:        pt_entry_t fl = (cache == PTE_CACHE) ? pte_cache_mode : 0;
                   3774:
                   3775:        KASSERT(((va | pa) & PGOFSET) == 0);
                   3776:
                   3777: #ifdef cats    /* XXXJRT */
                   3778:        pte[(va >> PGSHIFT) & 0x7ff] = L2_SPTE(pa & PG_FRAME, ap, fl);
                   3779: #else
                   3780:        pte[(va >> PGSHIFT) & 0x3ff] = L2_SPTE(pa & PG_FRAME, ap, fl);
                   3781: #endif
1.42      thorpej  3782: }
                   3783:
                   3784: /*
                   3785:  * pmap_link_l2pt:
                   3786:  *
                   3787:  *     Link the L2 page table specified by "pa" into the L1
                   3788:  *     page table at the slot for "va".
                   3789:  */
                   3790: void
1.46    ! thorpej  3791: pmap_link_l2pt(vaddr_t l1pt, vaddr_t va, pv_addr_t *l2pv)
1.42      thorpej  3792: {
                   3793:        pd_entry_t *pde = (pd_entry_t *) l1pt;
                   3794:        u_int slot = va >> PDSHIFT;
                   3795:
1.46    ! thorpej  3796:        KASSERT((l2pv->pv_pa & PGOFSET) == 0);
        !          3797:
        !          3798:        pde[slot + 0] = L1_PTE(l2pv->pv_pa + 0x000);
        !          3799:        pde[slot + 1] = L1_PTE(l2pv->pv_pa + 0x400);
        !          3800:        pde[slot + 2] = L1_PTE(l2pv->pv_pa + 0x800);
        !          3801:        pde[slot + 3] = L1_PTE(l2pv->pv_pa + 0xc00);
1.42      thorpej  3802:
1.46    ! thorpej  3803:        SLIST_INSERT_HEAD(&kernel_pt_list, l2pv, pv_list);
1.43      thorpej  3804: }
                   3805:
                   3806: /*
                   3807:  * pmap_map_chunk:
                   3808:  *
                   3809:  *     Map a chunk of memory using the most efficient mappings
                   3810:  *     possible (section, large page, small page) into the
                   3811:  *     provided L1 and L2 tables at the specified virtual address.
                   3812:  */
                   3813: vsize_t
1.46    ! thorpej  3814: pmap_map_chunk(vaddr_t l1pt, vaddr_t va, paddr_t pa, vsize_t size,
        !          3815:     int prot, int cache)
1.43      thorpej  3816: {
                   3817:        pd_entry_t *pde = (pd_entry_t *) l1pt;
                   3818:        pt_entry_t ap = (prot & VM_PROT_WRITE) ? AP_KRW : AP_KR;
                   3819:        pt_entry_t fl = (cache == PTE_CACHE) ? pte_cache_mode : 0;
1.46    ! thorpej  3820:        pt_entry_t *pte;
1.43      thorpej  3821:        vsize_t resid;
                   3822:        int i;
                   3823:
                   3824:        resid = (size + (NBPG - 1)) & ~(NBPG - 1);
                   3825:
1.44      thorpej  3826:        if (l1pt == 0)
                   3827:                panic("pmap_map_chunk: no L1 table provided");
                   3828:
1.43      thorpej  3829: #ifdef VERBOSE_INIT_ARM
                   3830:        printf("pmap_map_chunk: pa=0x%lx va=0x%lx size=0x%lx resid=0x%lx "
                   3831:            "prot=0x%x cache=%d\n", pa, va, size, resid, prot, cache);
                   3832: #endif
                   3833:
                   3834:        size = resid;
                   3835:
                   3836:        while (resid > 0) {
                   3837:                /* See if we can use a section mapping. */
1.44      thorpej  3838:                if (((pa | va) & (L1_SEC_SIZE - 1)) == 0 &&
1.43      thorpej  3839:                    resid >= L1_SEC_SIZE) {
                   3840: #ifdef VERBOSE_INIT_ARM
                   3841:                        printf("S");
                   3842: #endif
                   3843:                        pde[va >> PDSHIFT] = L1_SECPTE(pa, ap, fl);
                   3844:                        va += L1_SEC_SIZE;
                   3845:                        pa += L1_SEC_SIZE;
                   3846:                        resid -= L1_SEC_SIZE;
                   3847:                        continue;
                   3848:                }
1.45      thorpej  3849:
                   3850:                /*
                   3851:                 * Ok, we're going to use an L2 table.  Make sure
                   3852:                 * one is actually in the corresponding L1 slot
                   3853:                 * for the current VA.
                   3854:                 */
                   3855:                if ((pde[va >> PDSHIFT] & L1_MASK) != L1_PAGE)
1.46    ! thorpej  3856:                        panic("pmap_map_chunk: no L2 table for VA 0x%08lx", va);
        !          3857:
        !          3858:                pte = (pt_entry_t *)
        !          3859:                    kernel_pt_lookup(pde[va >> PDSHIFT] & PG_FRAME);
        !          3860:                if (pte == NULL)
        !          3861:                        panic("pmap_map_chunk: can't find L2 table for VA"
        !          3862:                            "0x%08lx", va);
1.43      thorpej  3863:
                   3864:                /* See if we can use a L2 large page mapping. */
                   3865:                if (((pa | va) & (L2_LPAGE_SIZE - 1)) == 0 &&
                   3866:                    resid >= L2_LPAGE_SIZE) {
                   3867: #ifdef VERBOSE_INIT_ARM
                   3868:                        printf("L");
                   3869: #endif
                   3870:                        for (i = 0; i < 16; i++) {
                   3871:                                pte[((va >> PGSHIFT) & 0x3f0) + i] =
                   3872:                                    L2_LPTE(pa, ap, fl);
                   3873:                        }
                   3874:                        va += L2_LPAGE_SIZE;
                   3875:                        pa += L2_LPAGE_SIZE;
                   3876:                        resid -= L2_LPAGE_SIZE;
                   3877:                        continue;
                   3878:                }
                   3879:
                   3880:                /* Use a small page mapping. */
                   3881: #ifdef VERBOSE_INIT_ARM
                   3882:                printf("P");
                   3883: #endif
                   3884:                pte[(va >> PGSHIFT) & 0x3ff] = L2_SPTE(pa, ap, fl);
                   3885:                va += NBPG;
                   3886:                pa += NBPG;
                   3887:                resid -= NBPG;
                   3888:        }
                   3889: #ifdef VERBOSE_INIT_ARM
                   3890:        printf("\n");
                   3891: #endif
                   3892:        return (size);
1.40      thorpej  3893: }

CVSweb <webmaster@jp.NetBSD.org>