[BACK]Return to pmap.c CVS log [TXT][DIR] Up to [cvs.NetBSD.org] / src / sys / arch / hppa / hppa

Annotation of src/sys/arch/hppa/hppa/pmap.c, Revision 1.78

1.77      uebayasi    1: /*     $NetBSD$        */
1.1       fredette    2:
                      3: /*-
                      4:  * Copyright (c) 2001, 2002 The NetBSD Foundation, Inc.
                      5:  * All rights reserved.
                      6:  *
                      7:  * This code is derived from software contributed to The NetBSD Foundation
                      8:  * by Matthew Fredette.
                      9:  *
                     10:  * Redistribution and use in source and binary forms, with or without
                     11:  * modification, are permitted provided that the following conditions
                     12:  * are met:
                     13:  * 1. Redistributions of source code must retain the above copyright
                     14:  *    notice, this list of conditions and the following disclaimer.
                     15:  * 2. Redistributions in binary form must reproduce the above copyright
                     16:  *    notice, this list of conditions and the following disclaimer in the
                     17:  *    documentation and/or other materials provided with the distribution.
                     18:  *
                     19:  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
                     20:  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
                     21:  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
                     22:  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
                     23:  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
                     24:  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
                     25:  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
                     26:  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
                     27:  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
                     28:  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
                     29:  * POSSIBILITY OF SUCH DAMAGE.
                     30:  */
                     31:
1.49      skrll      32: /*     $OpenBSD: pmap.c,v 1.132 2008/04/18 06:42:21 djm Exp $  */
1.1       fredette   33:
                     34: /*
1.49      skrll      35:  * Copyright (c) 1998-2004 Michael Shalayeff
1.1       fredette   36:  * All rights reserved.
                     37:  *
                     38:  * Redistribution and use in source and binary forms, with or without
                     39:  * modification, are permitted provided that the following conditions
                     40:  * are met:
                     41:  * 1. Redistributions of source code must retain the above copyright
                     42:  *    notice, this list of conditions and the following disclaimer.
                     43:  * 2. Redistributions in binary form must reproduce the above copyright
                     44:  *    notice, this list of conditions and the following disclaimer in the
                     45:  *    documentation and/or other materials provided with the distribution.
                     46:  *
                     47:  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
                     48:  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
                     49:  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
1.49      skrll      50:  * IN NO EVENT SHALL THE AUTHOR OR HIS RELATIVES BE LIABLE FOR ANY DIRECT,
                     51:  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
                     52:  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
                     53:  * SERVICES; LOSS OF MIND, USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
                     54:  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
                     55:  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
                     56:  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
                     57:  * THE POSSIBILITY OF SUCH DAMAGE.
1.1       fredette   58:  */
                     59: /*
                     60:  * References:
                     61:  * 1. PA7100LC ERS, Hewlett-Packard, March 30 1999, Public version 1.0
                     62:  * 2. PA7300LC ERS, Hewlett-Packard, March 18 1996, Version 1.0
1.49      skrll      63:  * 3. PA-RISC 1.1 Architecture and Instruction Set Reference Manual,
                     64:  *    Hewlett-Packard, February 1994, Third Edition
1.1       fredette   65:  */
1.9       lukem      66:
                     67: #include <sys/cdefs.h>
1.77      uebayasi   68: __KERNEL_RCSID(0, "$NetBSD$");
1.70      skrll      69:
                     70: #include "opt_cputype.h"
1.1       fredette   71:
                     72: #include <sys/param.h>
                     73: #include <sys/systm.h>
                     74: #include <sys/malloc.h>
                     75: #include <sys/proc.h>
                     76:
                     77: #include <uvm/uvm.h>
                     78:
                     79: #include <machine/reg.h>
                     80: #include <machine/psl.h>
                     81: #include <machine/cpu.h>
                     82: #include <machine/pmap.h>
                     83: #include <machine/pte.h>
                     84: #include <machine/cpufunc.h>
1.49      skrll      85: #include <machine/iomod.h>
1.1       fredette   86:
                     87: #include <hppa/hppa/hpt.h>
                     88: #include <hppa/hppa/machdep.h>
                     89:
1.49      skrll      90: #if defined(DDB)
                     91: #include <ddb/db_output.h>
                     92: #endif
1.1       fredette   93:
                     94: #ifdef PMAPDEBUG
1.49      skrll      95:
                     96: #define        static  /**/
                     97: #define        inline  /**/
                     98:
                     99: #define        DPRINTF(l,s)    do {            \
                    100:        if ((pmapdebug & (l)) == (l))   \
                    101:                printf s;               \
                    102: } while(0)
                    103:
                    104: #define        PDB_FOLLOW      0x00000001
1.1       fredette  105: #define        PDB_INIT        0x00000002
                    106: #define        PDB_ENTER       0x00000004
                    107: #define        PDB_REMOVE      0x00000008
1.49      skrll     108: #define        PDB_CREATE      0x00000010
                    109: #define        PDB_PTPAGE      0x00000020
1.1       fredette  110: #define        PDB_CACHE       0x00000040
                    111: #define        PDB_BITS        0x00000080
1.49      skrll     112: #define        PDB_COLLECT     0x00000100
1.1       fredette  113: #define        PDB_PROTECT     0x00000200
1.49      skrll     114: #define        PDB_EXTRACT     0x00000400
                    115: #define        PDB_VP          0x00000800
                    116: #define        PDB_PV          0x00001000
                    117: #define        PDB_PARANOIA    0x00002000
1.1       fredette  118: #define        PDB_WIRING      0x00004000
1.49      skrll     119: #define        PDB_PMAP        0x00008000
1.1       fredette  120: #define        PDB_STEAL       0x00010000
1.49      skrll     121: #define        PDB_PHYS        0x00020000
                    122: #define        PDB_POOL        0x00040000
                    123: #define        PDB_ALIAS       0x00080000
1.1       fredette  124: int pmapdebug = 0
1.49      skrll     125:        | PDB_INIT
                    126:        | PDB_FOLLOW
                    127:        | PDB_VP
                    128:        | PDB_PV
1.1       fredette  129:        | PDB_ENTER
                    130:        | PDB_REMOVE
1.49      skrll     131:        | PDB_STEAL
1.1       fredette  132:        | PDB_PROTECT
1.49      skrll     133:        | PDB_PHYS
                    134:        | PDB_ALIAS
1.1       fredette  135:        ;
                    136: #else
1.49      skrll     137: #define        DPRINTF(l,s)    /* */
1.1       fredette  138: #endif
                    139:
1.49      skrll     140: int            pmap_hptsize = 16 * PAGE_SIZE;  /* patchable */
                    141: vaddr_t                pmap_hpt;
1.1       fredette  142:
1.44      pooka     143: static struct pmap     kernel_pmap_store;
1.45      pooka     144: struct pmap            *const kernel_pmap_ptr = &kernel_pmap_store;
1.49      skrll     145:
                    146: int            hppa_sid_max = HPPA_SID_MAX;
                    147: struct pool    pmap_pool;
                    148: struct pool    pmap_pv_pool;
                    149: int            pmap_pvlowat = 252;
1.33      thorpej   150: bool           pmap_initialized = false;
1.1       fredette  151:
1.49      skrll     152: static kmutex_t        pmaps_lock;
1.1       fredette  153:
1.49      skrll     154: u_int  hppa_prot[8];
                    155: u_int  sid_counter;
1.1       fredette  156:
                    157: /*
1.3       fredette  158:  * Page 3-6 of the "PA-RISC 1.1 Architecture and Instruction Set
                    159:  * Reference Manual" (HP part number 09740-90039) defines equivalent
                    160:  * and non-equivalent virtual addresses in the cache.
                    161:  *
1.33      thorpej   162:  * This macro evaluates to true iff the two space/virtual address
1.3       fredette  163:  * combinations are non-equivalent aliases, and therefore will find
                    164:  * two different locations in the cache.
                    165:  *
                    166:  * NB: currently, the CPU-specific desidhash() functions disable the
                    167:  * use of the space in all cache hashing functions.  This means that
                    168:  * this macro definition is stricter than it has to be (because it
1.49      skrll     169:  * takes space into account), but one day cache space hashing should
                    170:  * be re-enabled.  Cache space hashing should yield better performance
                    171:  * through better utilization of the cache, assuming that most aliasing
1.3       fredette  172:  * is the read-only kind, which we do allow in the cache.
                    173:  */
                    174: #define NON_EQUIVALENT_ALIAS(sp1, va1, sp2, va2) \
                    175:   (((((va1) ^ (va2)) & ~HPPA_PGAMASK) != 0) || \
                    176:    ((((sp1) ^ (sp2)) & ~HPPA_SPAMASK) != 0))
1.1       fredette  177:
1.2       fredette  178: /* Prototypes. */
1.49      skrll     179: struct vm_page *pmap_pagealloc(struct uvm_object *, voff_t);
                    180: void pmap_pagefree(struct vm_page *);
                    181:
                    182: static inline void pmap_sdir_set(pa_space_t, volatile uint32_t *);
                    183: static inline uint32_t *pmap_sdir_get(pa_space_t);
                    184:
                    185: static inline volatile pt_entry_t *pmap_pde_get(volatile uint32_t *, vaddr_t);
                    186: static inline void pmap_pde_set(pmap_t, vaddr_t, paddr_t);
                    187: static inline pt_entry_t *pmap_pde_alloc(pmap_t, vaddr_t, struct vm_page **);
                    188: static inline struct vm_page *pmap_pde_ptp(pmap_t, volatile pt_entry_t *);
                    189: static inline void pmap_pde_release(pmap_t, vaddr_t, struct vm_page *);
                    190:
                    191: static inline volatile pt_entry_t *pmap_pde_get(volatile uint32_t *, vaddr_t);
                    192: static inline void pmap_pde_set(pmap_t, vaddr_t, paddr_t);
                    193:
                    194: void pmap_pte_flush(pmap_t, vaddr_t, pt_entry_t);
                    195:
                    196: static inline pt_entry_t pmap_pte_get(volatile pt_entry_t *, vaddr_t);
                    197: static inline void pmap_pte_set(volatile pt_entry_t *, vaddr_t, pt_entry_t);
                    198:
                    199: static inline pt_entry_t pmap_vp_find(pmap_t, vaddr_t);
                    200:
1.25      skrll     201: static inline struct pv_entry *pmap_pv_alloc(void);
                    202: static inline void pmap_pv_free(struct pv_entry *);
1.49      skrll     203: static inline void pmap_pv_enter(struct vm_page *, struct pv_entry *, pmap_t,
                    204:     vaddr_t , struct vm_page *, u_int);
                    205: static inline struct pv_entry *pmap_pv_remove(struct vm_page *, pmap_t,
                    206:     vaddr_t);
                    207:
                    208: static inline void pmap_flush_page(struct vm_page *, bool);
                    209:
                    210: void pmap_copy_page(paddr_t, paddr_t);
1.2       fredette  211:
1.66      skrll     212: static void pmap_page_physload(paddr_t, paddr_t);
1.65      skrll     213:
1.49      skrll     214: #ifdef USE_HPT
                    215: static inline struct hpt_entry *pmap_hash(pmap_t, vaddr_t);
                    216: static inline uint32_t pmap_vtag(pmap_t, vaddr_t);
                    217:
                    218: #ifdef DDB
                    219: void pmap_hptdump(void);
                    220: #endif
                    221: #endif
                    222:
                    223: #ifdef DDB
                    224: void pmap_dump_table(pa_space_t, vaddr_t);
                    225: void pmap_dump_pv(paddr_t);
                    226: #endif
                    227:
1.73      skrll     228: void pmap_page_remove_locked(struct vm_page *);
                    229: int pmap_check_alias(struct vm_page *, vaddr_t, pt_entry_t);
1.49      skrll     230:
1.71      skrll     231: /* un-invert PVF_REF */
1.49      skrll     232: #define pmap_pvh_attrs(a) \
1.73      skrll     233:        (((a) & (PVF_MOD|PVF_REF)) ^ PVF_REF)
1.49      skrll     234:
                    235: #define PMAP_LOCK(pm)                                  \
                    236:        do {                                            \
                    237:                if ((pm) != pmap_kernel())              \
                    238:                        mutex_enter(&(pm)->pm_lock);    \
                    239:        } while (/*CONSTCOND*/0)
                    240:
                    241: #define PMAP_UNLOCK(pm)                                        \
                    242:        do {                                            \
                    243:                if ((pm) != pmap_kernel())              \
                    244:                        mutex_exit(&(pm)->pm_lock);     \
                    245:        } while (/*CONSTCOND*/0)
                    246:
                    247: struct vm_page *
                    248: pmap_pagealloc(struct uvm_object *obj, voff_t off)
                    249: {
                    250:        struct vm_page *pg;
                    251:
                    252:        if ((pg = uvm_pagealloc(obj, off, NULL,
                    253:            UVM_PGA_USERESERVE | UVM_PGA_ZERO)) == NULL)
                    254:                printf("pmap_pagealloc fail\n");
                    255:
                    256:        return (pg);
                    257: }
                    258:
                    259: void
                    260: pmap_pagefree(struct vm_page *pg)
                    261: {
1.68      skrll     262:        paddr_t pa = VM_PAGE_TO_PHYS(pg);
                    263:        pdcache(HPPA_SID_KERNEL, pa, PAGE_SIZE);
                    264:
                    265: #if defined(HP8000_CPU) || defined(HP8200_CPU) || \
                    266:     defined(HP8500_CPU) || defined(HP8600_CPU)
                    267:        pdtlb(HPPA_SID_KERNEL, pa);
                    268:        pitlb(HPPA_SID_KERNEL, pa);
                    269: #endif
1.49      skrll     270:        uvm_pagefree(pg);
                    271: }
                    272:
                    273: #ifdef USE_HPT
1.1       fredette  274: /*
1.49      skrll     275:  * This hash function is the one used by the hardware TLB walker on the 7100LC.
1.1       fredette  276:  */
1.49      skrll     277: static inline struct hpt_entry *
                    278: pmap_hash(pmap_t pmap, vaddr_t va)
                    279: {
                    280:
                    281:        return (struct hpt_entry *)(pmap_hpt +
                    282:            (((va >> 8) ^ (pmap->pm_space << 9)) & (pmap_hptsize - 1)));
                    283: }
                    284:
                    285: static inline uint32_t
                    286: pmap_vtag(pmap_t pmap, vaddr_t va)
                    287: {
                    288:
                    289:        return (0x80000000 | (pmap->pm_space & 0xffff) |
                    290:            ((va >> 1) & 0x7fff0000));
                    291: }
                    292: #endif
                    293:
1.18      perry     294: static inline void
1.49      skrll     295: pmap_sdir_set(pa_space_t space, volatile uint32_t *pd)
1.1       fredette  296: {
1.49      skrll     297:        volatile uint32_t *vtop;
                    298:
                    299:        mfctl(CR_VTOP, vtop);
                    300:
                    301:        KASSERT(vtop != NULL);
1.1       fredette  302:
1.49      skrll     303:        vtop[space] = (uint32_t)pd;
1.1       fredette  304: }
                    305:
1.49      skrll     306: static inline uint32_t *
                    307: pmap_sdir_get(pa_space_t space)
1.1       fredette  308: {
1.49      skrll     309:        uint32_t *vtop;
1.1       fredette  310:
1.49      skrll     311:        mfctl(CR_VTOP, vtop);
                    312:        return ((uint32_t *)vtop[space]);
                    313: }
1.2       fredette  314:
1.49      skrll     315: static inline volatile pt_entry_t *
                    316: pmap_pde_get(volatile uint32_t *pd, vaddr_t va)
                    317: {
1.1       fredette  318:
1.49      skrll     319:        return ((pt_entry_t *)pd[va >> 22]);
1.1       fredette  320: }
                    321:
1.18      perry     322: static inline void
1.49      skrll     323: pmap_pde_set(pmap_t pm, vaddr_t va, paddr_t ptp)
                    324: {
                    325:
                    326:        DPRINTF(PDB_FOLLOW|PDB_VP,
1.62      skrll     327:            ("%s(%p, 0x%lx, 0x%lx)\n", __func__, pm, va, ptp));
1.49      skrll     328:
                    329:        KASSERT((ptp & PGOFSET) == 0);
                    330:
                    331:        pm->pm_pdir[va >> 22] = ptp;
                    332: }
                    333:
                    334: static inline pt_entry_t *
                    335: pmap_pde_alloc(pmap_t pm, vaddr_t va, struct vm_page **pdep)
1.1       fredette  336: {
1.49      skrll     337:        struct vm_page *pg;
                    338:        paddr_t pa;
                    339:
                    340:        DPRINTF(PDB_FOLLOW|PDB_VP,
1.62      skrll     341:            ("%s(%p, 0x%lx, %p)\n", __func__, pm, va, pdep));
1.49      skrll     342:
                    343:        KASSERT(pm != pmap_kernel());
                    344:        KASSERT(mutex_owned(&pm->pm_lock));
                    345:
                    346:        pg = pmap_pagealloc(&pm->pm_obj, va);
                    347:
                    348:        if (pg == NULL)
                    349:                return NULL;
1.1       fredette  350:
1.49      skrll     351:        pa = VM_PAGE_TO_PHYS(pg);
                    352:
1.62      skrll     353:        DPRINTF(PDB_FOLLOW|PDB_VP, ("%s: pde %lx\n", __func__, pa));
1.49      skrll     354:
                    355:        pg->flags &= ~PG_BUSY;          /* never busy */
                    356:        pg->wire_count = 1;             /* no mappings yet */
                    357:        pmap_pde_set(pm, va, pa);
                    358:        pm->pm_stats.resident_count++;  /* count PTP as resident */
                    359:        pm->pm_ptphint = pg;
                    360:        if (pdep)
                    361:                *pdep = pg;
                    362:        return ((pt_entry_t *)pa);
1.1       fredette  363: }
                    364:
1.49      skrll     365: static inline struct vm_page *
                    366: pmap_pde_ptp(pmap_t pm, volatile pt_entry_t *pde)
1.1       fredette  367: {
1.49      skrll     368:        paddr_t pa = (paddr_t)pde;
                    369:
                    370:        DPRINTF(PDB_FOLLOW|PDB_PV, ("%s(%p, %p)\n", __func__, pm, pde));
                    371:
                    372:        if (pm->pm_ptphint && VM_PAGE_TO_PHYS(pm->pm_ptphint) == pa)
                    373:                return (pm->pm_ptphint);
                    374:
1.62      skrll     375:        DPRINTF(PDB_FOLLOW|PDB_PV, ("%s: lookup 0x%lx\n", __func__, pa));
1.49      skrll     376:
                    377:        return (PHYS_TO_VM_PAGE(pa));
1.1       fredette  378: }
                    379:
1.49      skrll     380: static inline void
                    381: pmap_pde_release(pmap_t pmap, vaddr_t va, struct vm_page *ptp)
1.1       fredette  382: {
                    383:
1.49      skrll     384:        DPRINTF(PDB_FOLLOW|PDB_PV,
1.62      skrll     385:            ("%s(%p, 0x%lx, %p)\n", __func__, pmap, va, ptp));
1.49      skrll     386:
                    387:        KASSERT(pmap != pmap_kernel());
1.63      skrll     388:        if (--ptp->wire_count <= 1) {
1.49      skrll     389:                DPRINTF(PDB_FOLLOW|PDB_PV,
                    390:                    ("%s: disposing ptp %p\n", __func__, ptp));
                    391:                pmap_pde_set(pmap, va, 0);
                    392:                pmap->pm_stats.resident_count--;
                    393:                if (pmap->pm_ptphint == ptp)
                    394:                        pmap->pm_ptphint = TAILQ_FIRST(&pmap->pm_obj.memq);
                    395:                ptp->wire_count = 0;
                    396:
                    397:                KASSERT((ptp->flags & PG_BUSY) == 0);
                    398:
                    399:                pmap_pagefree(ptp);
                    400:        }
1.1       fredette  401: }
                    402:
1.49      skrll     403: static inline pt_entry_t
                    404: pmap_pte_get(volatile pt_entry_t *pde, vaddr_t va)
1.1       fredette  405: {
                    406:
1.49      skrll     407:        return (pde[(va >> 12) & 0x3ff]);
1.1       fredette  408: }
                    409:
1.49      skrll     410: static inline void
                    411: pmap_pte_set(volatile pt_entry_t *pde, vaddr_t va, pt_entry_t pte)
1.1       fredette  412: {
                    413:
1.62      skrll     414:        DPRINTF(PDB_FOLLOW|PDB_VP, ("%s(%p, 0x%lx, 0x%x)\n",
                    415:            __func__, pde, va, pte));
1.49      skrll     416:
                    417:        KASSERT(pde != NULL);
                    418:        KASSERT(((paddr_t)pde & PGOFSET) == 0);
                    419:
                    420:        pde[(va >> 12) & 0x3ff] = pte;
                    421: }
1.1       fredette  422:
1.49      skrll     423: void
                    424: pmap_pte_flush(pmap_t pmap, vaddr_t va, pt_entry_t pte)
                    425: {
                    426:
1.68      skrll     427:        fdcache(pmap->pm_space, va, PAGE_SIZE);
1.49      skrll     428:        if (pte & PTE_PROT(TLB_EXECUTE)) {
                    429:                ficache(pmap->pm_space, va, PAGE_SIZE);
                    430:                pitlb(pmap->pm_space, va);
                    431:        }
1.73      skrll     432:        pdtlb(pmap->pm_space, va);
1.49      skrll     433: #ifdef USE_HPT
                    434:        if (pmap_hpt) {
                    435:                struct hpt_entry *hpt;
                    436:                hpt = pmap_hash(pmap, va);
                    437:                if (hpt->hpt_valid &&
                    438:                    hpt->hpt_space == pmap->pm_space &&
                    439:                    hpt->hpt_vpn == ((va >> 1) & 0x7fff0000))
                    440:                        hpt->hpt_space = 0xffff;
                    441:        }
                    442: #endif
1.1       fredette  443: }
                    444:
1.49      skrll     445: static inline pt_entry_t
                    446: pmap_vp_find(pmap_t pm, vaddr_t va)
1.3       fredette  447: {
1.49      skrll     448:        volatile pt_entry_t *pde;
1.3       fredette  449:
1.49      skrll     450:        if (!(pde = pmap_pde_get(pm->pm_pdir, va)))
                    451:                return (0);
1.3       fredette  452:
1.49      skrll     453:        return (pmap_pte_get(pde, va));
                    454: }
                    455:
                    456: #ifdef DDB
                    457: void
                    458: pmap_dump_table(pa_space_t space, vaddr_t sva)
                    459: {
                    460:        char buf[64];
1.61      skrll     461:        volatile pt_entry_t *pde = NULL;
                    462:        vaddr_t va = sva;
                    463:        vaddr_t pdemask = 1;
                    464:        pt_entry_t pte;
                    465:        uint32_t *pd;
1.3       fredette  466:
1.61      skrll     467:        if (space > hppa_sid_max)
                    468:                return;
1.49      skrll     469:
1.61      skrll     470:        pd = pmap_sdir_get(space);
                    471:        if (!pd)
                    472:                return;
1.49      skrll     473:
1.61      skrll     474:        do {
                    475:                if (pdemask != (va & PDE_MASK)) {
                    476:                        pdemask = va & PDE_MASK;
                    477:                        pde = pmap_pde_get(pd, va);
                    478:                        if (!pde) {
                    479:                                va = pdemask + PDE_SIZE;
                    480:                                continue;
1.3       fredette  481:                        }
1.69      skrll     482:                        db_printf("%x:%8p:\n", space, pde);
1.61      skrll     483:                }
1.49      skrll     484:
1.61      skrll     485:                pte = pmap_pte_get(pde, va);
                    486:                if (pte) {
1.49      skrll     487:                        snprintb(buf, sizeof(buf), TLB_BITS,
                    488:                           TLB_PROT(pte & PAGE_MASK));
1.69      skrll     489:                        db_printf("0x%08lx-0x%08x:%s\n", va, pte & ~PAGE_MASK,
1.49      skrll     490:                            buf);
1.3       fredette  491:                }
1.61      skrll     492:                va += PAGE_SIZE;
                    493:        } while (va != 0);
1.49      skrll     494: }
1.3       fredette  495:
1.49      skrll     496: void
                    497: pmap_dump_pv(paddr_t pa)
                    498: {
                    499:        struct vm_page *pg;
1.77      uebayasi  500:        struct vm_page_md *md;
1.49      skrll     501:        struct pv_entry *pve;
1.3       fredette  502:
1.49      skrll     503:        pg = PHYS_TO_VM_PAGE(pa);
1.77      uebayasi  504:        md = VM_PAGE_TO_MD(pg);
                    505:        mutex_enter(&md->pvh_lock);
                    506:        db_printf("pg %p attr 0x%08x aliases %d\n", pg, md->pvh_attrs,
                    507:            md->pvh_aliases);
                    508:        for (pve = md->pvh_list; pve; pve = pve->pv_next)
1.69      skrll     509:                db_printf("%x:%lx\n", pve->pv_pmap->pm_space,
1.49      skrll     510:                    pve->pv_va & PV_VAMASK);
1.77      uebayasi  511:        mutex_exit(&md->pvh_lock);
1.3       fredette  512: }
1.49      skrll     513: #endif
1.3       fredette  514:
1.73      skrll     515: int
                    516: pmap_check_alias(struct vm_page *pg, vaddr_t va, pt_entry_t pte)
1.3       fredette  517: {
1.77      uebayasi  518:        struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
1.73      skrll     519:        struct pv_entry *pve;
                    520:        int ret = 0;
1.49      skrll     521:
1.73      skrll     522:        /* check for non-equ aliased mappings */
1.77      uebayasi  523:        for (pve = md->pvh_list; pve; pve = pve->pv_next) {
1.73      skrll     524:                vaddr_t pva = pve->pv_va & PV_VAMASK;
1.49      skrll     525:
1.73      skrll     526:                pte |= pmap_vp_find(pve->pv_pmap, pva);
                    527:                if ((va & HPPA_PGAOFF) != (pva & HPPA_PGAOFF) &&
                    528:                    (pte & PTE_PROT(TLB_WRITE))) {
1.49      skrll     529:
                    530:                        DPRINTF(PDB_FOLLOW|PDB_ALIAS,
1.74      skrll     531:                             ("%s: aliased writable mapping 0x%x:0x%lx\n",
1.73      skrll     532:                             __func__, pve->pv_pmap->pm_space, pve->pv_va));
                    533:                        ret++;
1.3       fredette  534:                }
1.73      skrll     535:        }
1.3       fredette  536:
1.73      skrll     537:         return (ret);
1.3       fredette  538: }
                    539:
                    540: /*
1.49      skrll     541:  * This allocates and returns a new struct pv_entry.
1.1       fredette  542:  */
1.18      perry     543: static inline struct pv_entry *
1.49      skrll     544: pmap_pv_alloc(void)
1.1       fredette  545: {
1.49      skrll     546:        struct pv_entry *pv;
1.1       fredette  547:
1.49      skrll     548:        DPRINTF(PDB_FOLLOW|PDB_PV, ("%s()\n", __func__));
1.1       fredette  549:
1.49      skrll     550:        pv = pool_get(&pmap_pv_pool, PR_NOWAIT);
1.1       fredette  551:
1.49      skrll     552:        DPRINTF(PDB_FOLLOW|PDB_PV, ("%s: %p\n", __func__, pv));
1.5       fredette  553:
1.49      skrll     554:        return (pv);
                    555: }
1.5       fredette  556:
1.49      skrll     557: static inline void
                    558: pmap_pv_free(struct pv_entry *pv)
                    559: {
1.5       fredette  560:
1.49      skrll     561:        if (pv->pv_ptp)
                    562:                pmap_pde_release(pv->pv_pmap, pv->pv_va & PV_VAMASK,
                    563:                    pv->pv_ptp);
1.5       fredette  564:
1.49      skrll     565:        pool_put(&pmap_pv_pool, pv);
1.1       fredette  566: }
                    567:
1.18      perry     568: static inline void
1.49      skrll     569: pmap_pv_enter(struct vm_page *pg, struct pv_entry *pve, pmap_t pm,
                    570:     vaddr_t va, struct vm_page *pdep, u_int flags)
1.1       fredette  571: {
1.77      uebayasi  572:        struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
                    573:
1.62      skrll     574:        DPRINTF(PDB_FOLLOW|PDB_PV, ("%s(%p, %p, %p, 0x%lx, %p, 0x%x)\n",
                    575:            __func__, pg, pve, pm, va, pdep, flags));
1.49      skrll     576:
1.77      uebayasi  577:        KASSERT(mutex_owned(&md->pvh_lock));
1.5       fredette  578:
1.49      skrll     579:        pve->pv_pmap = pm;
                    580:        pve->pv_va = va | flags;
                    581:        pve->pv_ptp = pdep;
1.77      uebayasi  582:        pve->pv_next = md->pvh_list;
                    583:        md->pvh_list = pve;
1.49      skrll     584: }
1.5       fredette  585:
1.49      skrll     586: static inline struct pv_entry *
                    587: pmap_pv_remove(struct vm_page *pg, pmap_t pmap, vaddr_t va)
                    588: {
1.77      uebayasi  589:        struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
1.49      skrll     590:        struct pv_entry **pve, *pv;
1.1       fredette  591:
1.77      uebayasi  592:        KASSERT(mutex_owned(&md->pvh_lock));
1.1       fredette  593:
1.77      uebayasi  594:        for (pv = *(pve = &md->pvh_list);
1.49      skrll     595:            pv; pv = *(pve = &(*pve)->pv_next))
                    596:                if (pv->pv_pmap == pmap && (pv->pv_va & PV_VAMASK) == va) {
                    597:                        *pve = pv->pv_next;
                    598:                        break;
                    599:                }
                    600:        return (pv);
1.1       fredette  601: }
                    602:
1.64      skrll     603: #define        FIRST_16M atop(16 * 1024 * 1024)
                    604:
                    605: static void
                    606: pmap_page_physload(paddr_t spa, paddr_t epa)
                    607: {
                    608:
                    609:        if (spa < FIRST_16M && epa <= FIRST_16M) {
                    610:                DPRINTF(PDB_INIT, ("%s: phys segment 0x%05lx 0x%05lx\n",
                    611:                    __func__, spa, epa));
                    612:
                    613:                uvm_page_physload(spa, epa, spa, epa, VM_FREELIST_ISADMA);
                    614:        } else if (spa < FIRST_16M && epa > FIRST_16M) {
                    615:                DPRINTF(PDB_INIT, ("%s: phys segment 0x%05lx 0x%05lx\n",
                    616:                    __func__, spa, FIRST_16M));
                    617:
                    618:                uvm_page_physload(spa, FIRST_16M, spa, FIRST_16M,
                    619:                    VM_FREELIST_ISADMA);
                    620:
                    621:                DPRINTF(PDB_INIT, ("%s: phys segment 0x%05lx 0x%05lx\n",
                    622:                    __func__, FIRST_16M, epa));
                    623:
                    624:                uvm_page_physload(FIRST_16M, epa, FIRST_16M, epa,
                    625:                    VM_FREELIST_DEFAULT);
                    626:        } else {
                    627:                DPRINTF(PDB_INIT, ("%s: phys segment 0x%05lx 0x%05lx\n",
                    628:                    __func__, spa, epa));
                    629:
                    630:                uvm_page_physload(spa, epa, spa, epa, VM_FREELIST_DEFAULT);
                    631:        }
                    632:
                    633:        availphysmem += epa - spa;
                    634: }
                    635:
1.1       fredette  636: /*
1.22      skrll     637:  * Bootstrap the system enough to run with virtual memory.
1.25      skrll     638:  * Map the kernel's code, data and bss, and allocate the system page table.
1.22      skrll     639:  * Called with mapping OFF.
                    640:  *
                    641:  * Parameters:
                    642:  * vstart      PA of first available physical page
1.1       fredette  643:  */
                    644: void
1.49      skrll     645: pmap_bootstrap(vaddr_t vstart)
1.1       fredette  646: {
1.49      skrll     647:        vaddr_t va, addr;
1.1       fredette  648:        vsize_t size;
1.49      skrll     649:        extern paddr_t hppa_vtop;
                    650:        pmap_t kpm;
                    651:        int npdes, nkpdes;
                    652:        extern int resvphysmem;
1.2       fredette  653:        vsize_t btlb_entry_min, btlb_entry_max, btlb_entry_got;
1.49      skrll     654:        paddr_t ksrx, kerx, ksro, kero, ksrw, kerw;
                    655:        extern int usebtlb;
1.1       fredette  656:
1.29      skrll     657:        /* Provided by the linker script */
1.49      skrll     658:        extern int kernel_text, etext;
                    659:        extern int __rodata_start, __rodata_end;
                    660:        extern int __data_start;
1.29      skrll     661:
1.62      skrll     662:        DPRINTF(PDB_FOLLOW|PDB_INIT, ("%s(0x%lx)\n", __func__, vstart));
1.21      skrll     663:
1.1       fredette  664:        uvm_setpagesize();
                    665:
1.49      skrll     666:        hppa_prot[UVM_PROT_NONE]  = TLB_AR_NA;
                    667:        hppa_prot[UVM_PROT_READ]  = TLB_AR_R;
                    668:        hppa_prot[UVM_PROT_WRITE] = TLB_AR_RW;
                    669:        hppa_prot[UVM_PROT_RW]    = TLB_AR_RW;
                    670:        hppa_prot[UVM_PROT_EXEC]  = TLB_AR_RX;
                    671:        hppa_prot[UVM_PROT_RX]    = TLB_AR_RX;
                    672:        hppa_prot[UVM_PROT_WX]    = TLB_AR_RWX;
                    673:        hppa_prot[UVM_PROT_RWX]   = TLB_AR_RWX;
1.1       fredette  674:
                    675:        /*
                    676:         * Initialize kernel pmap
                    677:         */
1.49      skrll     678:        addr = round_page(vstart);
                    679:        kpm = pmap_kernel();
                    680:        memset(kpm, 0, sizeof(*kpm));
                    681:
                    682:        UVM_OBJ_INIT(&kpm->pm_obj, NULL, 1);
                    683:        kpm->pm_space = HPPA_SID_KERNEL;
                    684:        kpm->pm_pid = HPPA_PID_KERNEL;
                    685:        kpm->pm_pdir_pg = NULL;
                    686:        kpm->pm_pdir = (uint32_t *)addr;
                    687:
                    688:        memset((void *)addr, 0, PAGE_SIZE);
                    689:        fdcache(HPPA_SID_KERNEL, addr, PAGE_SIZE);
                    690:        addr += PAGE_SIZE;
1.29      skrll     691:
1.1       fredette  692:        /*
                    693:         * Allocate various tables and structures.
                    694:         */
1.49      skrll     695:        mtctl(addr, CR_VTOP);
                    696:        hppa_vtop = addr;
                    697:        size = round_page((hppa_sid_max + 1) * 4);
                    698:        memset((void *)addr, 0, size);
                    699:        fdcache(HPPA_SID_KERNEL, addr, size);
1.62      skrll     700:        DPRINTF(PDB_INIT, ("%s: vtop 0x%lx @ 0x%lx\n", __func__, size,
                    701:            addr));
1.49      skrll     702:
                    703:        addr += size;
                    704:        pmap_sdir_set(HPPA_SID_KERNEL, kpm->pm_pdir);
                    705:
                    706:        /*
                    707:         * cpuid() found out how big the HPT should be, so align addr to
                    708:         * what will be its beginning.  We don't waste the pages skipped
                    709:         * for the alignment.
                    710:         */
                    711: #ifdef USE_HPT
                    712:        if (pmap_hptsize) {
                    713:                struct hpt_entry *hptp;
                    714:                int i, error;
                    715:
                    716:                if (addr & (pmap_hptsize - 1))
                    717:                        addr += pmap_hptsize;
                    718:                addr &= ~(pmap_hptsize - 1);
                    719:
                    720:                memset((void *)addr, 0, pmap_hptsize);
                    721:                hptp = (struct hpt_entry *)addr;
                    722:                for (i = pmap_hptsize / sizeof(struct hpt_entry); i--; ) {
                    723:                        hptp[i].hpt_valid = 0;
                    724:                        hptp[i].hpt_space = 0xffff;
                    725:                        hptp[i].hpt_vpn = 0;
                    726:                }
                    727:                pmap_hpt = addr;
                    728:                addr += pmap_hptsize;
                    729:
1.62      skrll     730:                DPRINTF(PDB_INIT, ("%s: hpt_table 0x%x @ 0x%lx\n", __func__,
                    731:                    pmap_hptsize, addr));
1.1       fredette  732:
1.49      skrll     733:                if ((error = (cpu_hpt_init)(pmap_hpt, pmap_hptsize)) < 0) {
                    734:                        printf("WARNING: HPT init error %d -- DISABLED\n",
                    735:                            error);
                    736:                        pmap_hpt = 0;
                    737:                } else
                    738:                        DPRINTF(PDB_INIT,
1.62      skrll     739:                            ("%s: HPT installed for %ld entries @ 0x%lx\n",
1.49      skrll     740:                            __func__, pmap_hptsize / sizeof(struct hpt_entry),
1.62      skrll     741:                            addr));
1.1       fredette  742:        }
1.49      skrll     743: #endif
1.2       fredette  744:
1.49      skrll     745:        /* Setup vtop in lwp0 trapframe. */
                    746:        lwp0.l_md.md_regs->tf_vtop = hppa_vtop;
1.28      skrll     747:
1.49      skrll     748:        /* Pre-allocate PDEs for kernel virtual */
                    749:        nkpdes = (VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) / PDE_SIZE;
                    750:        /* ... and io space too */
                    751:        nkpdes += HPPA_IOLEN / PDE_SIZE;
                    752:        /* ... and all physmem (VA == PA) */
                    753:        npdes = nkpdes + (physmem + atop(PDE_SIZE) - 1) / atop(PDE_SIZE);
                    754:
                    755:        DPRINTF(PDB_INIT, ("%s: npdes %d\n", __func__, npdes));
                    756:
                    757:        /* map the pdes */
                    758:        for (va = 0; npdes--; va += PDE_SIZE, addr += PAGE_SIZE) {
                    759:                /* last nkpdes are for the kernel virtual */
                    760:                if (npdes == nkpdes - 1)
                    761:                        va = SYSCALLGATE;
                    762:                if (npdes == HPPA_IOLEN / PDE_SIZE - 1)
                    763:                        va = HPPA_IOBEGIN;
                    764:                /* now map the pde for the physmem */
                    765:                memset((void *)addr, 0, PAGE_SIZE);
                    766:                DPRINTF(PDB_INIT|PDB_VP,
1.62      skrll     767:                    ("%s: pde premap 0x%08lx 0x%08lx\n", __func__, va,
                    768:                    addr));
1.49      skrll     769:                pmap_pde_set(kpm, va, addr);
                    770:                kpm->pm_stats.resident_count++; /* count PTP as resident */
                    771:        }
1.2       fredette  772:
                    773:        /*
1.49      skrll     774:         * At this point we've finished reserving memory for the kernel.
                    775:         */
                    776:        /* XXXNH */
                    777:        resvphysmem = atop(addr);
                    778:
                    779:        ksrx = (paddr_t) &kernel_text;
                    780:        kerx = (paddr_t) &etext;
                    781:        ksro = (paddr_t) &__rodata_start;
                    782:        kero = (paddr_t) &__rodata_end;
                    783:        ksrw = (paddr_t) &__data_start;
                    784:        kerw = addr;
1.1       fredette  785:
                    786:        /*
1.2       fredette  787:         * The kernel text, data, and bss must be direct-mapped,
                    788:         * because the kernel often runs in physical mode, and
                    789:         * anyways the loader loaded the kernel into physical
                    790:         * memory exactly where it was linked.
                    791:         *
                    792:         * All memory already allocated after bss, either by
                    793:         * our caller or by this function itself, must also be
                    794:         * direct-mapped, because it's completely unmanaged
                    795:         * and was allocated in physical mode.
1.1       fredette  796:         *
1.2       fredette  797:         * BTLB entries are used to do this direct mapping.
                    798:         * BTLB entries have a minimum and maximum possible size,
                    799:         * and MD code gives us these sizes in units of pages.
1.1       fredette  800:         */
1.49      skrll     801:
1.2       fredette  802:        btlb_entry_min = (vsize_t) hppa_btlb_size_min * PAGE_SIZE;
                    803:        btlb_entry_max = (vsize_t) hppa_btlb_size_max * PAGE_SIZE;
1.1       fredette  804:
1.2       fredette  805:        /*
1.49      skrll     806:         * To try to conserve BTLB entries, take a hint from how
                    807:         * the kernel was linked: take the kernel text start as
1.2       fredette  808:         * our effective minimum BTLB entry size, assuming that
                    809:         * the data segment was also aligned to that size.
                    810:         *
                    811:         * In practice, linking the kernel at 2MB, and aligning
                    812:         * the data segment to a 2MB boundary, should control well
                    813:         * how much of the BTLB the pmap uses.  However, this code
                    814:         * should not rely on this 2MB magic number, nor should
                    815:         * it rely on the data segment being aligned at all.  This
                    816:         * is to allow (smaller) kernels (linked lower) to work fine.
1.1       fredette  817:         */
1.2       fredette  818:        btlb_entry_min = (vaddr_t) &kernel_text;
1.1       fredette  819:
1.49      skrll     820:        if (usebtlb) {
                    821: #define BTLB_SET_SIZE 16
                    822:                vaddr_t btlb_entry_start[BTLB_SET_SIZE];
                    823:                vsize_t btlb_entry_size[BTLB_SET_SIZE];
                    824:                int btlb_entry_vm_prot[BTLB_SET_SIZE];
                    825:                int btlb_i;
                    826:                int btlb_j;
1.23      skrll     827:
1.49      skrll     828:                /*
                    829:                 * Now make BTLB entries to direct-map the kernel text
                    830:                 * read- and execute-only as much as possible.  Note that
                    831:                 * if the data segment isn't nicely aligned, the last
                    832:                 * BTLB entry for the kernel text may also cover some of
                    833:                 * the data segment, meaning it will have to allow writing.
                    834:                 */
                    835:                addr = ksrx;
1.28      skrll     836:
1.49      skrll     837:                DPRINTF(PDB_INIT,
                    838:                    ("%s: BTLB mapping text and rodata @ %p - %p\n", __func__,
                    839:                    (void *)addr, (void *)kero));
                    840:
                    841:                btlb_j = 0;
                    842:                while (addr < (vaddr_t) kero) {
                    843:
                    844:                        /* Set up the next BTLB entry. */
                    845:                        KASSERT(btlb_j < BTLB_SET_SIZE);
                    846:                        btlb_entry_start[btlb_j] = addr;
                    847:                        btlb_entry_size[btlb_j] = btlb_entry_min;
                    848:                        btlb_entry_vm_prot[btlb_j] =
                    849:                            VM_PROT_READ | VM_PROT_EXECUTE;
                    850:                        if (addr + btlb_entry_min > kero)
                    851:                                btlb_entry_vm_prot[btlb_j] |= VM_PROT_WRITE;
                    852:
                    853:                        /* Coalesce BTLB entries whenever possible. */
                    854:                        while (btlb_j > 0 &&
                    855:                            btlb_entry_vm_prot[btlb_j] ==
                    856:                                btlb_entry_vm_prot[btlb_j - 1] &&
                    857:                            btlb_entry_size[btlb_j] ==
                    858:                                btlb_entry_size[btlb_j - 1] &&
                    859:                            !(btlb_entry_start[btlb_j - 1] &
                    860:                                ((btlb_entry_size[btlb_j - 1] << 1) - 1)) &&
                    861:                            (btlb_entry_size[btlb_j - 1] << 1) <=
                    862:                                btlb_entry_max)
                    863:                                btlb_entry_size[--btlb_j] <<= 1;
                    864:
                    865:                        /* Move on. */
                    866:                        addr =
                    867:                            btlb_entry_start[btlb_j] + btlb_entry_size[btlb_j];
                    868:                        btlb_j++;
                    869:                }
1.1       fredette  870:
1.49      skrll     871:                /*
                    872:                 * Now make BTLB entries to direct-map the kernel data,
                    873:                 * bss, and all of the preallocated space read-write.
                    874:                 *
                    875:                 * Note that, unlike above, we're not concerned with
                    876:                 * making these BTLB entries such that they finish as
                    877:                 * close as possible to the end of the space we need
                    878:                 * them to map.  Instead, to minimize the number of BTLB
                    879:                 * entries we need, we make them as large as possible.
                    880:                 * The only thing this wastes is kernel virtual space,
                    881:                 * which is plentiful.
                    882:                 */
1.2       fredette  883:
1.49      skrll     884:                DPRINTF(PDB_INIT, ("%s: mapping data, bss, etc @ %p - %p\n",
                    885:                    __func__, (void *)addr, (void *)kerw));
1.28      skrll     886:
1.49      skrll     887:                while (addr < kerw) {
1.28      skrll     888:
1.49      skrll     889:                        /* Make the next BTLB entry. */
                    890:                        KASSERT(btlb_j < BTLB_SET_SIZE);
                    891:                        size = btlb_entry_min;
                    892:                        while ((addr + size) < kerw &&
                    893:                                (size << 1) < btlb_entry_max &&
                    894:                            !(addr & ((size << 1) - 1)))
                    895:                                size <<= 1;
                    896:                        btlb_entry_start[btlb_j] = addr;
                    897:                        btlb_entry_size[btlb_j] = size;
                    898:                        btlb_entry_vm_prot[btlb_j] =
                    899:                            VM_PROT_READ | VM_PROT_WRITE;
                    900:
                    901:                        /* Move on. */
                    902:                        addr =
                    903:                            btlb_entry_start[btlb_j] + btlb_entry_size[btlb_j];
                    904:                        btlb_j++;
                    905:                }
                    906:
                    907:                /* Now insert all of the BTLB entries. */
                    908:                for (btlb_i = 0; btlb_i < btlb_j; btlb_i++) {
                    909:                        int error;
                    910:                        int prot;
                    911:
                    912:                        btlb_entry_got = btlb_entry_size[btlb_i];
                    913:                        prot = btlb_entry_vm_prot[btlb_i];
                    914:
                    915:                        error = hppa_btlb_insert(kpm->pm_space,
                    916:                            btlb_entry_start[btlb_i], btlb_entry_start[btlb_i],
                    917:                            &btlb_entry_got,
                    918:                            kpm->pm_pid | pmap_prot(kpm, prot));
                    919:
                    920:                        if (error)
                    921:                                panic("%s: cannot insert BTLB entry",
                    922:                                    __func__);
                    923:                        if (btlb_entry_got != btlb_entry_size[btlb_i])
                    924:                                panic("%s: BTLB entry mapped wrong amount",
                    925:                                    __func__);
                    926:                }
1.2       fredette  927:
1.49      skrll     928:                kerw =
                    929:                    btlb_entry_start[btlb_j - 1] + btlb_entry_size[btlb_j - 1];
1.1       fredette  930:        }
                    931:
1.2       fredette  932:        /*
1.49      skrll     933:         * We now know the exact beginning of managed kernel virtual space.
                    934:         *
                    935:         * Finally, load physical pages into UVM.  There are three segments of
                    936:         * pages.
1.2       fredette  937:         */
1.1       fredette  938:
1.49      skrll     939:        availphysmem = 0;
                    940:
1.67      skrll     941:        pmap_page_physload(resvmem, atop(ksrx));
1.64      skrll     942:        pmap_page_physload(atop(kero), atop(ksrw));
                    943:        pmap_page_physload(atop(kerw), physmem);
1.29      skrll     944:
1.49      skrll     945:        mutex_init(&pmaps_lock, MUTEX_DEFAULT, IPL_NONE);
1.29      skrll     946:
1.49      skrll     947:        /* TODO optimize/inline the kenter */
                    948:        for (va = PAGE_SIZE; va < ptoa(physmem); va += PAGE_SIZE) {
                    949:                vm_prot_t prot = UVM_PROT_RW;
1.1       fredette  950:
1.49      skrll     951:                if (va < resvmem)
                    952:                        prot = UVM_PROT_RX;
                    953:                else if (va >= ksrx && va < kerx)
                    954:                        prot = UVM_PROT_RX;
                    955:                else if (va >= ksro && va < kero)
                    956:                        prot = UVM_PROT_R;
                    957: #ifdef DIAGNOSTIC
1.59      rmind     958:                else if (va == uvm_lwp_getuarea(&lwp0) + USPACE - PAGE_SIZE)
1.49      skrll     959:                        prot = UVM_PROT_NONE;
                    960: #endif
1.54      cegger    961:                pmap_kenter_pa(va, va, prot, 0);
1.49      skrll     962:        }
1.1       fredette  963:
1.49      skrll     964:        /* XXXNH update */
1.62      skrll     965:        DPRINTF(PDB_INIT, ("%s: mapped 0x%lx - 0x%lx\n", __func__, ksro,
                    966:            kero));
                    967:        DPRINTF(PDB_INIT, ("%s: mapped 0x%lx - 0x%lx\n", __func__, ksrw,
                    968:            kerw));
1.8       thorpej   969:
1.1       fredette  970: }
                    971:
                    972: /*
                    973:  * Finishes the initialization of the pmap module.
1.56      skrll     974:  * This procedure is called from uvm_init() in uvm/uvm_init.c
1.1       fredette  975:  * to initialize any remaining data structures that the pmap module
                    976:  * needs to map virtual memory (VM is already ON).
                    977:  */
                    978: void
1.11      chs       979: pmap_init(void)
1.1       fredette  980: {
1.11      chs       981:        extern void gateway_page(void);
1.49      skrll     982:        volatile pt_entry_t *pde;
                    983:
                    984:        DPRINTF(PDB_FOLLOW|PDB_INIT, ("%s()\n", __func__));
                    985:
                    986:        sid_counter = HPPA_SID_KERNEL;
1.1       fredette  987:
1.49      skrll     988:        pool_init(&pmap_pool, sizeof(struct pmap), 0, 0, 0, "pmappl",
                    989:            &pool_allocator_nointr, IPL_NONE);
                    990:        pool_init(&pmap_pv_pool, sizeof(struct pv_entry), 0, 0, 0, "pmappv",
                    991:            &pool_allocator_nointr, IPL_NONE);
                    992:
                    993:        pool_setlowat(&pmap_pv_pool, pmap_pvlowat);
                    994:        pool_sethiwat(&pmap_pv_pool, pmap_pvlowat * 32);
1.1       fredette  995:
                    996:        /*
1.56      skrll     997:         * map SysCall gateway page once for everybody
1.1       fredette  998:         * NB: we'll have to remap the phys memory
                    999:         *     if we have any at SYSCALLGATE address (;
                   1000:         *
                   1001:         * no spls since no interrupts
                   1002:         */
1.49      skrll    1003:        if (!(pde = pmap_pde_get(pmap_kernel()->pm_pdir, SYSCALLGATE)) &&
                   1004:            !(pde = pmap_pde_alloc(pmap_kernel(), SYSCALLGATE, NULL)))
                   1005:                panic("pmap_init: cannot allocate pde");
                   1006:
                   1007:        pmap_pte_set(pde, SYSCALLGATE, (paddr_t)&gateway_page |
                   1008:            PTE_PROT(TLB_GATE_PROT));
1.1       fredette 1009:
1.33      thorpej  1010:        pmap_initialized = true;
1.49      skrll    1011:
                   1012:        DPRINTF(PDB_FOLLOW|PDB_INIT, ("%s(): done\n", __func__));
1.1       fredette 1013: }
                   1014:
1.49      skrll    1015: /*
                   1016:  * How much virtual space does this kernel have?
1.1       fredette 1017:  */
1.49      skrll    1018: void
                   1019: pmap_virtual_space(vaddr_t *startp, vaddr_t *endp)
1.1       fredette 1020: {
                   1021:
1.49      skrll    1022:        *startp = SYSCALLGATE + PAGE_SIZE;
                   1023:        *endp = VM_MAX_KERNEL_ADDRESS;
1.1       fredette 1024: }
                   1025:
                   1026: /*
                   1027:  * pmap_create()
                   1028:  *
                   1029:  * Create and return a physical map.
1.49      skrll    1030:  * The map is an actual physical map, and may be referenced by the hardware.
1.1       fredette 1031:  */
                   1032: pmap_t
1.11      chs      1033: pmap_create(void)
1.1       fredette 1034: {
1.10      chs      1035:        pmap_t pmap;
1.49      skrll    1036:        pa_space_t space;
1.1       fredette 1037:
1.49      skrll    1038:        pmap = pool_get(&pmap_pool, PR_WAITOK);
                   1039:
                   1040:        DPRINTF(PDB_FOLLOW|PDB_PMAP, ("%s: pmap = %p\n", __func__, pmap));
                   1041:
                   1042:        UVM_OBJ_INIT(&pmap->pm_obj, NULL, 1);
                   1043:
                   1044:        mutex_enter(&pmaps_lock);
1.1       fredette 1045:
                   1046:        /*
1.49      skrll    1047:         * Allocate space IDs for the pmap; we get the protection ID from this.
                   1048:         * If all are allocated, there is nothing we can do.
1.1       fredette 1049:         */
1.49      skrll    1050:        /* XXXNH can't this loop forever??? */
                   1051:        for (space = sid_counter; pmap_sdir_get(space);
                   1052:            space = (space + 1) % hppa_sid_max)
                   1053:                ;
                   1054:
                   1055:        if ((pmap->pm_pdir_pg = pmap_pagealloc(NULL, 0)) == NULL)
                   1056:                panic("pmap_create: no pages");
                   1057:        pmap->pm_ptphint = NULL;
                   1058:        pmap->pm_pdir = (uint32_t *)VM_PAGE_TO_PHYS(pmap->pm_pdir_pg);
                   1059:        pmap_sdir_set(space, pmap->pm_pdir);
                   1060:
                   1061:        pmap->pm_space = space;
                   1062:        pmap->pm_pid = (space + 1) << 1;
                   1063:
                   1064:        pmap->pm_stats.resident_count = 1;
                   1065:        pmap->pm_stats.wired_count = 0;
                   1066:
                   1067:        mutex_exit(&pmaps_lock);
1.1       fredette 1068:
1.49      skrll    1069:        DPRINTF(PDB_FOLLOW|PDB_PMAP, ("%s: pm = %p, space = %d, pid = %d\n",
                   1070:            __func__, pmap, space, pmap->pm_pid));
1.1       fredette 1071:
1.49      skrll    1072:        return (pmap);
1.1       fredette 1073: }
                   1074:
                   1075: /*
                   1076:  * pmap_destroy(pmap)
                   1077:  *     Gives up a reference to the specified pmap.  When the reference count
                   1078:  *     reaches zero the pmap structure is added to the pmap free list.
                   1079:  *     Should only be called if the map contains no valid mappings.
                   1080:  */
                   1081: void
1.11      chs      1082: pmap_destroy(pmap_t pmap)
1.1       fredette 1083: {
1.49      skrll    1084: #ifdef DIAGNOSTIC
                   1085:        struct vm_page *pg;
                   1086: #endif
                   1087:        int refs;
1.1       fredette 1088:
1.49      skrll    1089:        DPRINTF(PDB_FOLLOW|PDB_PMAP, ("%s(%p)\n", __func__, pmap));
1.1       fredette 1090:
1.49      skrll    1091:        mutex_enter(&pmap->pm_lock);
                   1092:        refs = --pmap->pm_obj.uo_refs;
                   1093:        mutex_exit(&pmap->pm_lock);
1.1       fredette 1094:
1.49      skrll    1095:        if (refs > 0)
                   1096:                return;
1.1       fredette 1097:
1.49      skrll    1098: #ifdef DIAGNOSTIC
                   1099:        while ((pg = TAILQ_FIRST(&pmap->pm_obj.memq))) {
                   1100:                pt_entry_t *pde, *epde;
                   1101:                struct vm_page *sheep;
                   1102:                struct pv_entry *haggis;
                   1103:
                   1104:                if (pg == pmap->pm_pdir_pg)
                   1105:                        continue;
                   1106:
                   1107:                DPRINTF(PDB_FOLLOW, ("%s(%p): stray ptp "
                   1108:                    "0x%lx w/ %d ents:", __func__, pmap, VM_PAGE_TO_PHYS(pg),
                   1109:                    pg->wire_count - 1));
                   1110:
                   1111:                pde = (pt_entry_t *)VM_PAGE_TO_PHYS(pg);
                   1112:                epde = (pt_entry_t *)(VM_PAGE_TO_PHYS(pg) + PAGE_SIZE);
                   1113:                for (; pde < epde; pde++) {
                   1114:                        if (*pde == 0)
                   1115:                                continue;
1.1       fredette 1116:
1.49      skrll    1117:                        sheep = PHYS_TO_VM_PAGE(PTE_PAGE(*pde));
1.77      uebayasi 1118:                        struct vm_page_md * const md = VM_PAGE_TO_MD(sheep);
                   1119:                        for (haggis = md->pvh_list; haggis != NULL; )
1.49      skrll    1120:                                if (haggis->pv_pmap == pmap) {
                   1121:
1.62      skrll    1122:                                        DPRINTF(PDB_FOLLOW, (" 0x%lx",
                   1123:                                            haggis->pv_va));
1.49      skrll    1124:
                   1125:                                        pmap_remove(pmap,
                   1126:                                            haggis->pv_va & PV_VAMASK,
                   1127:                                            haggis->pv_va + PAGE_SIZE);
                   1128:
                   1129:                                        /*
                   1130:                                         * exploit the sacred knowledge of
                   1131:                                         * lambeous ozzmosis
                   1132:                                         */
1.77      uebayasi 1133:                                        haggis = md->pvh_list;
1.49      skrll    1134:                                } else
                   1135:                                        haggis = haggis->pv_next;
                   1136:                }
                   1137:                DPRINTF(PDB_FOLLOW, ("\n"));
1.1       fredette 1138:        }
1.49      skrll    1139: #endif
                   1140:        pmap_sdir_set(pmap->pm_space, 0);
                   1141:        mutex_enter(&pmap->pm_lock);
                   1142:        pmap_pagefree(pmap->pm_pdir_pg);
                   1143:        mutex_exit(&pmap->pm_lock);
                   1144:        mutex_destroy(&pmap->pm_lock);
                   1145:        pmap->pm_pdir_pg = NULL;
                   1146:        pool_put(&pmap_pool, pmap);
1.1       fredette 1147: }
                   1148:
                   1149: /*
1.49      skrll    1150:  * Add a reference to the specified pmap.
1.1       fredette 1151:  */
                   1152: void
1.49      skrll    1153: pmap_reference(pmap_t pmap)
1.1       fredette 1154: {
1.36      skrll    1155:
1.49      skrll    1156:        DPRINTF(PDB_FOLLOW|PDB_PMAP, ("%s(%p)\n", __func__, pmap));
                   1157:
                   1158:        mutex_enter(&pmap->pm_lock);
                   1159:        pmap->pm_obj.uo_refs++;
                   1160:        mutex_exit(&pmap->pm_lock);
                   1161: }
1.1       fredette 1162:
                   1163: /*
                   1164:  * pmap_enter(pmap, va, pa, prot, flags)
                   1165:  *     Create a translation for the virtual address (va) to the physical
                   1166:  *     address (pa) in the pmap with the protection requested. If the
                   1167:  *     translation is wired then we can not allow a page fault to occur
                   1168:  *     for this mapping.
                   1169:  */
                   1170: int
1.48      cegger   1171: pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
1.1       fredette 1172: {
1.49      skrll    1173:        volatile pt_entry_t *pde;
                   1174:        pt_entry_t pte;
                   1175:        struct vm_page *pg, *ptp = NULL;
                   1176:        struct pv_entry *pve;
1.32      thorpej  1177:        bool wired = (flags & PMAP_WIRED) != 0;
1.1       fredette 1178:
1.49      skrll    1179:        DPRINTF(PDB_FOLLOW|PDB_ENTER,
1.62      skrll    1180:            ("%s(%p, 0x%lx, 0x%lx, 0x%x, 0x%x)\n", __func__, pmap, va, pa,
                   1181:            prot, flags));
1.49      skrll    1182:
                   1183:        PMAP_LOCK(pmap);
                   1184:
                   1185:        if (!(pde = pmap_pde_get(pmap->pm_pdir, va)) &&
                   1186:            !(pde = pmap_pde_alloc(pmap, va, &ptp))) {
                   1187:                if (flags & PMAP_CANFAIL) {
                   1188:                        PMAP_UNLOCK(pmap);
                   1189:                        return (ENOMEM);
                   1190:                }
                   1191:
                   1192:                panic("pmap_enter: cannot allocate pde");
1.16      chs      1193:        }
1.1       fredette 1194:
1.49      skrll    1195:        if (!ptp)
                   1196:                ptp = pmap_pde_ptp(pmap, pde);
                   1197:
                   1198:        if ((pte = pmap_pte_get(pde, va))) {
                   1199:
                   1200:                DPRINTF(PDB_ENTER,
1.62      skrll    1201:                    ("%s: remapping 0x%x -> 0x%lx\n", __func__, pte, pa));
1.49      skrll    1202:
                   1203:                pmap_pte_flush(pmap, va, pte);
                   1204:                if (wired && !(pte & PTE_PROT(TLB_WIRED)))
                   1205:                        pmap->pm_stats.wired_count++;
                   1206:                else if (!wired && (pte & PTE_PROT(TLB_WIRED)))
                   1207:                        pmap->pm_stats.wired_count--;
1.1       fredette 1208:
1.49      skrll    1209:                if (PTE_PAGE(pte) == pa) {
                   1210:                        DPRINTF(PDB_FOLLOW|PDB_ENTER,
                   1211:                            ("%s: same page\n", __func__));
                   1212:                        goto enter;
                   1213:                }
1.1       fredette 1214:
1.49      skrll    1215:                pg = PHYS_TO_VM_PAGE(PTE_PAGE(pte));
1.77      uebayasi 1216:                struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
                   1217:                mutex_enter(&md->pvh_lock);
1.49      skrll    1218:                pve = pmap_pv_remove(pg, pmap, va);
1.77      uebayasi 1219:                md->pvh_attrs |= pmap_pvh_attrs(pte);
                   1220:                mutex_exit(&md->pvh_lock);
1.1       fredette 1221:        } else {
1.62      skrll    1222:                DPRINTF(PDB_ENTER, ("%s: new mapping 0x%lx -> 0x%lx\n",
                   1223:                    __func__, va, pa));
1.49      skrll    1224:                pte = PTE_PROT(TLB_REFTRAP);
                   1225:                pve = NULL;
                   1226:                pmap->pm_stats.resident_count++;
                   1227:                if (wired)
                   1228:                        pmap->pm_stats.wired_count++;
                   1229:                if (ptp)
                   1230:                        ptp->wire_count++;
                   1231:        }
                   1232:
                   1233:        if (pmap_initialized && (pg = PHYS_TO_VM_PAGE(pa))) {
1.77      uebayasi 1234:                struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
1.49      skrll    1235:
                   1236:                if (!pve && !(pve = pmap_pv_alloc())) {
                   1237:                        if (flags & PMAP_CANFAIL) {
1.77      uebayasi 1238:                                mutex_exit(&md->pvh_lock);
1.49      skrll    1239:                                PMAP_UNLOCK(pmap);
                   1240:                                return (ENOMEM);
1.1       fredette 1241:                        }
1.49      skrll    1242:                        panic("%s: no pv entries available", __func__);
1.1       fredette 1243:                }
1.73      skrll    1244:                 pte |= PTE_PROT(pmap_prot(pmap, prot));
1.77      uebayasi 1245:                mutex_enter(&md->pvh_lock);
1.73      skrll    1246:                if (pmap_check_alias(pg, va, pte))
                   1247:                        pmap_page_remove_locked(pg);
1.49      skrll    1248:                pmap_pv_enter(pg, pve, pmap, va, ptp, 0);
1.77      uebayasi 1249:                mutex_exit(&md->pvh_lock);
1.49      skrll    1250:        } else if (pve) {
                   1251:                pmap_pv_free(pve);
1.1       fredette 1252:        }
                   1253:
1.49      skrll    1254: enter:
                   1255:        /* preserve old ref & mod */
                   1256:        pte = pa | PTE_PROT(pmap_prot(pmap, prot)) |
                   1257:            (pte & PTE_PROT(TLB_UNCACHEABLE|TLB_DIRTY|TLB_REFTRAP));
                   1258:        if (wired)
                   1259:                pte |= PTE_PROT(TLB_WIRED);
                   1260:        pmap_pte_set(pde, va, pte);
                   1261:
                   1262:        PMAP_UNLOCK(pmap);
                   1263:
                   1264:        DPRINTF(PDB_FOLLOW|PDB_ENTER, ("%s: leaving\n", __func__));
1.1       fredette 1265:
                   1266:        return (0);
                   1267: }
                   1268:
                   1269: /*
                   1270:  * pmap_remove(pmap, sva, eva)
1.49      skrll    1271:  *     unmaps all virtual addresses in the virtual address
1.1       fredette 1272:  *     range determined by [sva, eva) and pmap.
                   1273:  *     sva and eva must be on machine independent page boundaries and
                   1274:  *     sva must be less than or equal to eva.
                   1275:  */
                   1276: void
1.11      chs      1277: pmap_remove(pmap_t pmap, vaddr_t sva, vaddr_t eva)
1.1       fredette 1278: {
1.49      skrll    1279:        struct pv_entry *pve;
                   1280:        volatile pt_entry_t *pde = NULL;
                   1281:        pt_entry_t pte;
                   1282:        struct vm_page *pg;
                   1283:        vaddr_t pdemask;
                   1284:        int batch;
                   1285:
                   1286:        DPRINTF(PDB_FOLLOW|PDB_REMOVE,
1.62      skrll    1287:            ("%s(%p, 0x%lx, 0x%lx)\n", __func__, pmap, sva, eva));
1.49      skrll    1288:
                   1289:        PMAP_LOCK(pmap);
                   1290:
1.75      skrll    1291:        for (batch = 0; sva < eva; sva += PAGE_SIZE) {
                   1292:                pdemask = sva & PDE_MASK;
                   1293:                if (!(pde = pmap_pde_get(pmap->pm_pdir, sva))) {
                   1294:                        sva = pdemask + PDE_SIZE - PAGE_SIZE;
                   1295:                        continue;
1.49      skrll    1296:                }
1.75      skrll    1297:                batch = pdemask == sva && sva + PDE_SIZE <= eva;
1.49      skrll    1298:
                   1299:                if ((pte = pmap_pte_get(pde, sva))) {
                   1300:
                   1301:                        /* TODO measure here the speed tradeoff
                   1302:                         * for flushing whole 4M vs per-page
                   1303:                         * in case of non-complete pde fill
                   1304:                         */
                   1305:                        pmap_pte_flush(pmap, sva, pte);
                   1306:                        if (pte & PTE_PROT(TLB_WIRED))
                   1307:                                pmap->pm_stats.wired_count--;
                   1308:                        pmap->pm_stats.resident_count--;
                   1309:
                   1310:                        /* iff properly accounted pde will be dropped anyway */
                   1311:                        if (!batch)
                   1312:                                pmap_pte_set(pde, sva, 0);
                   1313:
                   1314:                        if (pmap_initialized &&
                   1315:                            (pg = PHYS_TO_VM_PAGE(PTE_PAGE(pte)))) {
1.77      uebayasi 1316:                                struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
1.49      skrll    1317:
1.77      uebayasi 1318:                                mutex_enter(&md->pvh_lock);
1.49      skrll    1319:
                   1320:                                pve = pmap_pv_remove(pg, pmap, sva);
1.77      uebayasi 1321:                                md->pvh_attrs |= pmap_pvh_attrs(pte);
1.1       fredette 1322:
1.77      uebayasi 1323:                                mutex_exit(&md->pvh_lock);
1.1       fredette 1324:
1.49      skrll    1325:                                if (pve != NULL)
                   1326:                                        pmap_pv_free(pve);
1.1       fredette 1327:                        }
                   1328:                }
                   1329:        }
                   1330:
1.49      skrll    1331:        PMAP_UNLOCK(pmap);
                   1332:
                   1333:        DPRINTF(PDB_FOLLOW|PDB_REMOVE, ("%s: leaving\n", __func__));
1.1       fredette 1334: }
                   1335:
1.49      skrll    1336:
1.1       fredette 1337: void
1.49      skrll    1338: pmap_write_protect(pmap_t pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
1.1       fredette 1339: {
1.49      skrll    1340:        struct vm_page *pg;
                   1341:        volatile pt_entry_t *pde = NULL;
                   1342:        pt_entry_t pte;
                   1343:        u_int pteprot, pdemask;
                   1344:
                   1345:        DPRINTF(PDB_FOLLOW|PDB_PMAP,
1.62      skrll    1346:            ("%s(%p, %lx, %lx, %x)\n", __func__, pmap, sva, eva, prot));
1.49      skrll    1347:
                   1348:        sva = trunc_page(sva);
                   1349:        pteprot = PTE_PROT(pmap_prot(pmap, prot));
1.1       fredette 1350:
1.49      skrll    1351:        PMAP_LOCK(pmap);
1.1       fredette 1352:
1.49      skrll    1353:        for (pdemask = 1; sva < eva; sva += PAGE_SIZE) {
                   1354:                if (pdemask != (sva & PDE_MASK)) {
                   1355:                        pdemask = sva & PDE_MASK;
                   1356:                        if (!(pde = pmap_pde_get(pmap->pm_pdir, sva))) {
1.50      skrll    1357:                                sva = pdemask + PDE_SIZE - PAGE_SIZE;
1.1       fredette 1358:                                continue;
1.49      skrll    1359:                        }
                   1360:                }
                   1361:                if ((pte = pmap_pte_get(pde, sva))) {
                   1362:
                   1363:                        DPRINTF(PDB_PMAP,
1.62      skrll    1364:                            ("%s: va=0x%lx pte=0x%x\n", __func__, sva,  pte));
1.1       fredette 1365:                        /*
1.49      skrll    1366:                         * Determine if mapping is changing.
                   1367:                         * If not, nothing to do.
1.1       fredette 1368:                         */
1.49      skrll    1369:                        if ((pte & PTE_PROT(TLB_AR_MASK)) == pteprot)
1.1       fredette 1370:                                continue;
1.49      skrll    1371:
                   1372:                        pg = PHYS_TO_VM_PAGE(PTE_PAGE(pte));
1.77      uebayasi 1373:                        struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
                   1374:                        mutex_enter(&md->pvh_lock);
                   1375:                        md->pvh_attrs |= pmap_pvh_attrs(pte);
                   1376:                        mutex_exit(&md->pvh_lock);
1.49      skrll    1377:
                   1378:                        pmap_pte_flush(pmap, sva, pte);
                   1379:                        pte &= ~PTE_PROT(TLB_AR_MASK);
                   1380:                        pte |= pteprot;
                   1381:                        pmap_pte_set(pde, sva, pte);
1.1       fredette 1382:                }
                   1383:        }
1.49      skrll    1384:
                   1385:        PMAP_UNLOCK(pmap);
1.1       fredette 1386: }
                   1387:
                   1388: void
1.49      skrll    1389: pmap_page_remove(struct vm_page *pg)
1.1       fredette 1390: {
1.77      uebayasi 1391:        struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
1.73      skrll    1392:
1.77      uebayasi 1393:        mutex_enter(&md->pvh_lock);
1.73      skrll    1394:        pmap_page_remove_locked(pg);
1.77      uebayasi 1395:        mutex_exit(&md->pvh_lock);
1.73      skrll    1396: }
                   1397:
                   1398: void
                   1399: pmap_page_remove_locked(struct vm_page *pg)
                   1400: {
1.77      uebayasi 1401:        struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
1.49      skrll    1402:        struct pv_entry *pve, *npve, **pvp;
1.1       fredette 1403:
1.49      skrll    1404:        DPRINTF(PDB_FOLLOW|PDB_PV, ("%s(%p)\n", __func__, pg));
1.1       fredette 1405:
1.77      uebayasi 1406:        if (md->pvh_list == NULL)
1.1       fredette 1407:                return;
                   1408:
1.77      uebayasi 1409:        pvp = &md->pvh_list;
                   1410:        for (pve = md->pvh_list; pve; pve = npve) {
1.49      skrll    1411:                pmap_t pmap = pve->pv_pmap;
                   1412:                vaddr_t va = pve->pv_va & PV_VAMASK;
                   1413:                volatile pt_entry_t *pde;
                   1414:                pt_entry_t pte;
                   1415:
                   1416:                PMAP_LOCK(pmap);
1.1       fredette 1417:
1.49      skrll    1418:                pde = pmap_pde_get(pmap->pm_pdir, va);
                   1419:                pte = pmap_pte_get(pde, va);
1.15      chs      1420:
1.49      skrll    1421:                npve = pve->pv_next;
                   1422:                /*
                   1423:                 * If this was an unmanaged mapping, it must be preserved. Move
                   1424:                 * it back on the list and advance the end-of-list pointer.
                   1425:                 */
                   1426:                if (pve->pv_va & PV_KENTER) {
                   1427:                        *pvp = pve;
                   1428:                        pvp = &pve->pv_next;
1.73      skrll    1429:                } else
1.77      uebayasi 1430:                        md->pvh_attrs |= pmap_pvh_attrs(pte);
1.49      skrll    1431:
                   1432:                pmap_pte_flush(pmap, va, pte);
                   1433:                if (pte & PTE_PROT(TLB_WIRED))
                   1434:                        pmap->pm_stats.wired_count--;
                   1435:                pmap->pm_stats.resident_count--;
                   1436:
1.73      skrll    1437:                if (!(pve->pv_va & PV_KENTER)) {
                   1438:                        pmap_pte_set(pde, va, 0);
                   1439:                        pmap_pv_free(pve);
                   1440:                }
1.49      skrll    1441:                PMAP_UNLOCK(pmap);
1.1       fredette 1442:        }
1.49      skrll    1443:        *pvp = NULL;
                   1444:
                   1445:        DPRINTF(PDB_FOLLOW|PDB_PV, ("%s: leaving\n", __func__));
1.1       fredette 1446: }
                   1447:
                   1448: /*
                   1449:  *     Routine:        pmap_unwire
                   1450:  *     Function:       Change the wiring attribute for a map/virtual-address
                   1451:  *                     pair.
                   1452:  *     In/out conditions:
                   1453:  *                     The mapping must already exist in the pmap.
                   1454:  *
                   1455:  * Change the wiring for a given virtual page. This routine currently is
                   1456:  * only used to unwire pages and hence the mapping entry will exist.
                   1457:  */
                   1458: void
1.11      chs      1459: pmap_unwire(pmap_t pmap, vaddr_t va)
1.1       fredette 1460: {
1.49      skrll    1461:        volatile pt_entry_t *pde;
                   1462:        pt_entry_t pte = 0;
1.1       fredette 1463:
1.62      skrll    1464:        DPRINTF(PDB_FOLLOW|PDB_PMAP, ("%s(%p, 0x%lx)\n", __func__, pmap, va));
1.1       fredette 1465:
1.49      skrll    1466:        PMAP_LOCK(pmap);
                   1467:        if ((pde = pmap_pde_get(pmap->pm_pdir, va))) {
                   1468:                pte = pmap_pte_get(pde, va);
                   1469:
                   1470:                KASSERT(pte);
                   1471:
                   1472:                if (pte & PTE_PROT(TLB_WIRED)) {
                   1473:                        pte &= ~PTE_PROT(TLB_WIRED);
                   1474:                        pmap->pm_stats.wired_count--;
                   1475:                        pmap_pte_set(pde, va, pte);
                   1476:                }
                   1477:        }
                   1478:        PMAP_UNLOCK(pmap);
1.1       fredette 1479:
1.49      skrll    1480:        DPRINTF(PDB_FOLLOW|PDB_PMAP, ("%s: leaving\n", __func__));
1.1       fredette 1481: }
                   1482:
1.32      thorpej  1483: bool
1.49      skrll    1484: pmap_changebit(struct vm_page *pg, u_int set, u_int clear)
1.1       fredette 1485: {
1.77      uebayasi 1486:        struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
1.73      skrll    1487:        struct pv_entry *pve;
                   1488:        int res;
1.49      skrll    1489:
1.62      skrll    1490:        DPRINTF(PDB_FOLLOW|PDB_BITS,
1.49      skrll    1491:            ("%s(%p, %x, %x)\n", __func__, pg, set, clear));
1.1       fredette 1492:
1.76      skrll    1493:        KASSERT((set & ~(PVF_REF|PVF_UNCACHEABLE)) == 0);
                   1494:        KASSERT((clear & ~(PVF_MOD|PVF_WRITE|PVF_UNCACHEABLE)) == 0);
1.73      skrll    1495:
1.77      uebayasi 1496:        mutex_enter(&md->pvh_lock);
1.1       fredette 1497:
1.49      skrll    1498:        /* preserve other bits */
1.77      uebayasi 1499:        res = md->pvh_attrs & (set | clear);
                   1500:        md->pvh_attrs ^= res;
1.1       fredette 1501:
1.77      uebayasi 1502:        for (pve = md->pvh_list; pve; pve = pve->pv_next) {
1.49      skrll    1503:                pmap_t pmap = pve->pv_pmap;
                   1504:                vaddr_t va = pve->pv_va & PV_VAMASK;
                   1505:                volatile pt_entry_t *pde;
                   1506:                pt_entry_t opte, pte;
1.1       fredette 1507:
1.49      skrll    1508:                if ((pde = pmap_pde_get(pmap->pm_pdir, va))) {
                   1509:                        opte = pte = pmap_pte_get(pde, va);
                   1510: #ifdef PMAPDEBUG
                   1511:                        if (!pte) {
                   1512:                                DPRINTF(PDB_FOLLOW|PDB_BITS,
1.62      skrll    1513:                                    ("%s: zero pte for 0x%lx\n", __func__,
                   1514:                                    va));
1.49      skrll    1515:                                continue;
                   1516:                        }
                   1517: #endif
                   1518:                        pte &= ~clear;
                   1519:                        pte |= set;
1.1       fredette 1520:
1.49      skrll    1521:                        if (!(pve->pv_va & PV_KENTER)) {
1.77      uebayasi 1522:                                md->pvh_attrs |= pmap_pvh_attrs(pte);
1.49      skrll    1523:                                res |= pmap_pvh_attrs(opte);
                   1524:                        }
1.1       fredette 1525:
1.49      skrll    1526:                        if (opte != pte) {
                   1527:                                pmap_pte_flush(pmap, va, opte);
                   1528:                                pmap_pte_set(pde, va, pte);
                   1529:                        }
                   1530:                }
                   1531:        }
1.77      uebayasi 1532:        mutex_exit(&md->pvh_lock);
1.1       fredette 1533:
1.49      skrll    1534:        return ((res & (clear | set)) != 0);
1.1       fredette 1535: }
                   1536:
1.49      skrll    1537: bool
                   1538: pmap_testbit(struct vm_page *pg, u_int bit)
1.1       fredette 1539: {
1.77      uebayasi 1540:        struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
1.49      skrll    1541:        struct pv_entry *pve;
                   1542:        pt_entry_t pte;
                   1543:        int ret;
1.1       fredette 1544:
1.49      skrll    1545:        DPRINTF(PDB_FOLLOW|PDB_BITS, ("%s(%p, %x)\n", __func__, pg, bit));
1.1       fredette 1546:
1.77      uebayasi 1547:        mutex_enter(&md->pvh_lock);
1.1       fredette 1548:
1.77      uebayasi 1549:        for (pve = md->pvh_list; !(md->pvh_attrs & bit) && pve;
1.49      skrll    1550:            pve = pve->pv_next) {
                   1551:                pmap_t pm = pve->pv_pmap;
1.1       fredette 1552:
1.49      skrll    1553:                pte = pmap_vp_find(pm, pve->pv_va & PV_VAMASK);
                   1554:                if (pve->pv_va & PV_KENTER)
                   1555:                        continue;
1.1       fredette 1556:
1.77      uebayasi 1557:                md->pvh_attrs |= pmap_pvh_attrs(pte);
1.49      skrll    1558:        }
1.77      uebayasi 1559:        ret = ((md->pvh_attrs & bit) != 0);
                   1560:        mutex_exit(&md->pvh_lock);
1.1       fredette 1561:
1.49      skrll    1562:        return ret;
1.1       fredette 1563: }
                   1564:
                   1565: /*
1.49      skrll    1566:  * pmap_extract(pmap, va, pap)
                   1567:  *     fills in the physical address corresponding to the
                   1568:  *     virtual address specified by pmap and va into the
                   1569:  *     storage pointed to by pap and returns true if the
                   1570:  *     virtual address is mapped. returns false in not mapped.
1.1       fredette 1571:  */
1.49      skrll    1572: bool
                   1573: pmap_extract(pmap_t pmap, vaddr_t va, paddr_t *pap)
1.1       fredette 1574: {
1.49      skrll    1575:        pt_entry_t pte;
                   1576:
1.62      skrll    1577:        DPRINTF(PDB_FOLLOW|PDB_EXTRACT, ("%s(%p, %lx)\n", __func__, pmap, va));
1.1       fredette 1578:
1.49      skrll    1579:        PMAP_LOCK(pmap);
                   1580:        pte = pmap_vp_find(pmap, va);
                   1581:        PMAP_UNLOCK(pmap);
1.1       fredette 1582:
1.49      skrll    1583:        if (pte) {
                   1584:                if (pap)
                   1585:                        *pap = (pte & ~PGOFSET) | (va & PGOFSET);
                   1586:                return true;
1.1       fredette 1587:        }
1.49      skrll    1588:
                   1589:        return false;
1.1       fredette 1590: }
                   1591:
1.49      skrll    1592:
1.1       fredette 1593: /*
1.49      skrll    1594:  * pmap_activate(lwp)
1.59      rmind    1595:  *
                   1596:  *     Activates the vmspace for the given LWP.
                   1597:  *     This is not necessarily the current LWP.
1.1       fredette 1598:  */
1.49      skrll    1599: void
                   1600: pmap_activate(struct lwp *l)
1.1       fredette 1601: {
1.49      skrll    1602:        struct proc *p = l->l_proc;
                   1603:        pmap_t pmap = p->p_vmspace->vm_map.pmap;
                   1604:        pa_space_t space = pmap->pm_space;
1.57      rmind    1605:        struct pcb *pcb = lwp_getpcb(l);
1.49      skrll    1606:
                   1607:        /* space is cached for the copy{in,out}'s pleasure */
                   1608:        pcb->pcb_space = space;
1.60      skrll    1609:        fdcache(HPPA_SID_KERNEL, (vaddr_t)pcb, sizeof(struct pcb));
1.49      skrll    1610:
                   1611:        if (p == curproc)
                   1612:                mtctl(pmap->pm_pid, CR_PIDR2);
1.1       fredette 1613: }
                   1614:
1.49      skrll    1615:
                   1616: static inline void
                   1617: pmap_flush_page(struct vm_page *pg, bool purge)
1.1       fredette 1618: {
1.77      uebayasi 1619:        struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
1.49      skrll    1620:        struct pv_entry *pve;
                   1621:
                   1622:        DPRINTF(PDB_FOLLOW|PDB_CACHE, ("%s(%p, %d)\n", __func__, pg, purge));
                   1623:
                   1624:        /* purge cache for all possible mappings for the pa */
1.77      uebayasi 1625:        for (pve = md->pvh_list; pve; pve = pve->pv_next) {
1.49      skrll    1626:                vaddr_t va = pve->pv_va & PV_VAMASK;
1.68      skrll    1627:                pa_space_t sp = pve->pv_pmap->pm_space;
1.49      skrll    1628:
                   1629:                if (purge)
1.68      skrll    1630:                        pdcache(sp, va, PAGE_SIZE);
1.49      skrll    1631:                else
1.68      skrll    1632:                        fdcache(sp, va, PAGE_SIZE);
                   1633: #if defined(HP8000_CPU) || defined(HP8200_CPU) || \
                   1634:     defined(HP8500_CPU) || defined(HP8600_CPU)
1.73      skrll    1635:                ficache(sp, va, PAGE_SIZE);
1.68      skrll    1636:                pdtlb(sp, va);
                   1637:                pitlb(sp, va);
                   1638: #endif
1.49      skrll    1639:        }
1.1       fredette 1640: }
                   1641:
                   1642: /*
1.49      skrll    1643:  * pmap_zero_page(pa)
1.1       fredette 1644:  *
1.49      skrll    1645:  * Zeros the specified page.
1.1       fredette 1646:  */
1.49      skrll    1647: void
                   1648: pmap_zero_page(paddr_t pa)
1.1       fredette 1649: {
1.49      skrll    1650:
1.62      skrll    1651:        DPRINTF(PDB_FOLLOW|PDB_PHYS, ("%s(%lx)\n", __func__, pa));
1.49      skrll    1652:
1.77      uebayasi 1653:        KASSERT(VM_PAGE_TO_MD(PHYS_TO_VM_PAGE(pa))->pvh_list == NULL);
1.49      skrll    1654:
                   1655:        memset((void *)pa, 0, PAGE_SIZE);
                   1656:        fdcache(HPPA_SID_KERNEL, pa, PAGE_SIZE);
1.73      skrll    1657:
1.68      skrll    1658: #if defined(HP8000_CPU) || defined(HP8200_CPU) || \
                   1659:     defined(HP8500_CPU) || defined(HP8600_CPU)
1.73      skrll    1660:        ficache(HPPA_SID_KERNEL, pa, PAGE_SIZE);
1.68      skrll    1661:        pdtlb(HPPA_SID_KERNEL, pa);
                   1662:        pitlb(HPPA_SID_KERNEL, pa);
                   1663: #endif
1.1       fredette 1664: }
                   1665:
                   1666: /*
1.49      skrll    1667:  * pmap_copy_page(src, dst)
                   1668:  *
                   1669:  * pmap_copy_page copies the source page to the destination page.
1.1       fredette 1670:  */
1.49      skrll    1671: void
                   1672: pmap_copy_page(paddr_t spa, paddr_t dpa)
1.1       fredette 1673: {
1.49      skrll    1674:        struct vm_page *srcpg = PHYS_TO_VM_PAGE(spa);
                   1675:
1.62      skrll    1676:        DPRINTF(PDB_FOLLOW|PDB_PHYS, ("%s(%lx, %lx)\n", __func__, spa, dpa));
1.49      skrll    1677:
1.77      uebayasi 1678:        KASSERT(VM_PAGE_TO_MD(PHYS_TO_VM_PAGE(dpa))->pvh_list == NULL);
1.49      skrll    1679:
                   1680:        pmap_flush_page(srcpg, false);
                   1681:
                   1682:        memcpy((void *)dpa, (void *)spa, PAGE_SIZE);
                   1683:
                   1684:        pdcache(HPPA_SID_KERNEL, spa, PAGE_SIZE);
                   1685:        fdcache(HPPA_SID_KERNEL, dpa, PAGE_SIZE);
1.68      skrll    1686: #if defined(HP8000_CPU) || defined(HP8200_CPU) || \
                   1687:     defined(HP8500_CPU) || defined(HP8600_CPU)
1.73      skrll    1688:        ficache(HPPA_SID_KERNEL, spa, PAGE_SIZE);
                   1689:        ficache(HPPA_SID_KERNEL, dpa, PAGE_SIZE);
1.68      skrll    1690:        pdtlb(HPPA_SID_KERNEL, spa);
                   1691:        pdtlb(HPPA_SID_KERNEL, dpa);
                   1692:        pitlb(HPPA_SID_KERNEL, spa);
                   1693:        pitlb(HPPA_SID_KERNEL, dpa);
                   1694: #endif
1.1       fredette 1695: }
                   1696:
                   1697: void
1.54      cegger   1698: pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
1.1       fredette 1699: {
1.49      skrll    1700:        volatile pt_entry_t *pde;
                   1701:        pt_entry_t pte, opte;
                   1702:
1.1       fredette 1703: #ifdef PMAPDEBUG
                   1704:        int opmapdebug = pmapdebug;
                   1705:
                   1706:        /*
1.49      skrll    1707:         * If we're being told to map page zero, we can't call printf() at all,
                   1708:         * because doing so would lead to an infinite recursion on this call.
1.1       fredette 1709:         * (printf requires page zero to be mapped).
                   1710:         */
                   1711:        if (va == 0)
                   1712:                pmapdebug = 0;
                   1713: #endif /* PMAPDEBUG */
                   1714:
1.49      skrll    1715:        DPRINTF(PDB_FOLLOW|PDB_ENTER,
1.62      skrll    1716:            ("%s(%lx, %lx, %x)\n", __func__, va, pa, prot));
1.49      skrll    1717:
                   1718:        if (!(pde = pmap_pde_get(pmap_kernel()->pm_pdir, va)) &&
                   1719:            !(pde = pmap_pde_alloc(pmap_kernel(), va, NULL)))
                   1720:                panic("pmap_kenter_pa: cannot allocate pde for va=0x%lx", va);
                   1721:        opte = pmap_pte_get(pde, va);
                   1722:        pte = pa | PTE_PROT(TLB_WIRED | TLB_REFTRAP |
                   1723:            pmap_prot(pmap_kernel(), prot & VM_PROT_ALL));
1.55      skrll    1724:        if (pa >= HPPA_IOBEGIN || (flags & PMAP_NOCACHE))
1.49      skrll    1725:                pte |= PTE_PROT(TLB_UNCACHEABLE);
                   1726:        pmap_kernel()->pm_stats.wired_count++;
                   1727:        pmap_kernel()->pm_stats.resident_count++;
                   1728:        if (opte)
                   1729:                pmap_pte_flush(pmap_kernel(), va, opte);
                   1730:
                   1731:        if (pmap_initialized) {
                   1732:                struct vm_page *pg;
                   1733:
                   1734:                pg = PHYS_TO_VM_PAGE(PTE_PAGE(pte));
                   1735:                if (pg != NULL) {
1.77      uebayasi 1736:                        struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
                   1737:
1.49      skrll    1738:                        KASSERT(pa < HPPA_IOBEGIN);
                   1739:
1.73      skrll    1740:                        struct pv_entry *pve;
1.49      skrll    1741:
1.73      skrll    1742:                        pve = pmap_pv_alloc();
                   1743:                        if (!pve)
                   1744:                                panic("%s: no pv entries available",
                   1745:                                    __func__);
                   1746:                        DPRINTF(PDB_FOLLOW|PDB_ENTER,
                   1747:                            ("%s(%lx, %lx, %x) TLB_KENTER\n", __func__,
                   1748:                            va, pa, pte));
1.49      skrll    1749:
1.77      uebayasi 1750:                        mutex_enter(&md->pvh_lock);
1.73      skrll    1751:                        if (pmap_check_alias(pg, va, pte))
                   1752:                                pmap_page_remove_locked(pg);
                   1753:                        pmap_pv_enter(pg, pve, pmap_kernel(), va, NULL,
                   1754:                            PV_KENTER);
1.77      uebayasi 1755:                        mutex_exit(&md->pvh_lock);
1.49      skrll    1756:                }
                   1757:        }
                   1758:        pmap_pte_set(pde, va, pte);
                   1759:
                   1760:        DPRINTF(PDB_FOLLOW|PDB_ENTER, ("%s: leaving\n", __func__));
1.12      chs      1761:
1.1       fredette 1762: #ifdef PMAPDEBUG
                   1763:        pmapdebug = opmapdebug;
                   1764: #endif /* PMAPDEBUG */
                   1765: }
                   1766:
                   1767: void
1.11      chs      1768: pmap_kremove(vaddr_t va, vsize_t size)
1.1       fredette 1769: {
1.49      skrll    1770:        struct pv_entry *pve;
                   1771:        vaddr_t eva, pdemask;
                   1772:        volatile pt_entry_t *pde = NULL;
                   1773:        pt_entry_t pte;
                   1774:        struct vm_page *pg;
                   1775:        pmap_t pmap = pmap_kernel();
1.1       fredette 1776: #ifdef PMAPDEBUG
                   1777:        int opmapdebug = pmapdebug;
                   1778:
                   1779:        /*
1.49      skrll    1780:         * If we're being told to unmap page zero, we can't call printf() at
                   1781:         * all as printf requires page zero to be mapped.
1.1       fredette 1782:         */
                   1783:        if (va == 0)
                   1784:                pmapdebug = 0;
                   1785: #endif /* PMAPDEBUG */
                   1786:
1.49      skrll    1787:        DPRINTF(PDB_FOLLOW|PDB_REMOVE,
1.62      skrll    1788:            ("%s(%lx, %lx)\n", __func__, va, size));
1.49      skrll    1789: #ifdef PMAPDEBUG
                   1790:
                   1791:        /*
                   1792:         * Don't allow the VA == PA mappings, apart from page zero, to be
                   1793:         * removed. Page zero is given special treatment so that we get TLB
                   1794:         * faults when the kernel tries to de-reference NULL or anything else
                   1795:         * in the first page when it shouldn't.
                   1796:         */
                   1797:        if (va != 0 && va < ptoa(physmem)) {
                   1798:                DPRINTF(PDB_FOLLOW|PDB_REMOVE,
1.62      skrll    1799:                    ("%s(%lx, %lx): unmapping physmem\n", __func__, va,
                   1800:                    size));
1.49      skrll    1801:                pmapdebug = opmapdebug;
                   1802:                return;
                   1803:        }
                   1804: #endif
                   1805:
                   1806:        for (pdemask = 1, eva = va + size; va < eva; va += PAGE_SIZE) {
                   1807:                if (pdemask != (va & PDE_MASK)) {
                   1808:                        pdemask = va & PDE_MASK;
                   1809:                        if (!(pde = pmap_pde_get(pmap->pm_pdir, va))) {
1.50      skrll    1810:                                va = pdemask + PDE_SIZE - PAGE_SIZE;
1.49      skrll    1811:                                continue;
                   1812:                        }
                   1813:                }
                   1814:                if (!(pte = pmap_pte_get(pde, va))) {
                   1815:                        DPRINTF(PDB_FOLLOW|PDB_REMOVE,
1.62      skrll    1816:                            ("%s: unmapping unmapped 0x%lx\n", __func__,
                   1817:                            va));
1.49      skrll    1818:                        continue;
                   1819:                }
                   1820:
                   1821:                pmap_pte_flush(pmap, va, pte);
                   1822:                pmap_pte_set(pde, va, 0);
                   1823:                if (pmap_initialized && (pg = PHYS_TO_VM_PAGE(PTE_PAGE(pte)))) {
1.77      uebayasi 1824:                        struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
1.49      skrll    1825:
1.77      uebayasi 1826:                        mutex_enter(&md->pvh_lock);
1.49      skrll    1827:
                   1828:                        pve = pmap_pv_remove(pg, pmap, va);
1.1       fredette 1829:
1.77      uebayasi 1830:                        mutex_exit(&md->pvh_lock);
1.49      skrll    1831:                        if (pve != NULL)
                   1832:                                pmap_pv_free(pve);
1.1       fredette 1833:                }
                   1834:        }
1.49      skrll    1835:        DPRINTF(PDB_FOLLOW|PDB_REMOVE, ("%s: leaving\n", __func__));
                   1836:
1.1       fredette 1837: #ifdef PMAPDEBUG
                   1838:        pmapdebug = opmapdebug;
                   1839: #endif /* PMAPDEBUG */
                   1840: }
                   1841:
1.49      skrll    1842: #if defined(USE_HPT)
                   1843: #if defined(DDB)
1.1       fredette 1844: /*
                   1845:  * prints whole va->pa (aka HPT or HVT)
                   1846:  */
                   1847: void
1.11      chs      1848: pmap_hptdump(void)
1.1       fredette 1849: {
1.10      chs      1850:        struct hpt_entry *hpt, *ehpt;
1.1       fredette 1851:
1.49      skrll    1852:        hpt = (struct hpt_entry *)pmap_hpt;
                   1853:        ehpt = (struct hpt_entry *)((int)hpt + pmap_hptsize);
1.1       fredette 1854:        db_printf("HPT dump %p-%p:\n", hpt, ehpt);
                   1855:        for (; hpt < ehpt; hpt++)
1.49      skrll    1856:                if (hpt->hpt_valid) {
1.19      skrll    1857:                        char buf[128];
                   1858:
1.46      christos 1859:                        snprintb(buf, sizeof(buf), TLB_BITS, hpt->hpt_tlbprot);
                   1860:
1.19      skrll    1861:                        db_printf("hpt@%p: %x{%sv=%x:%x},%s,%x\n",
1.1       fredette 1862:                            hpt, *(int *)hpt, (hpt->hpt_valid?"ok,":""),
                   1863:                            hpt->hpt_space, hpt->hpt_vpn << 9,
1.19      skrll    1864:                            buf, tlbptob(hpt->hpt_tlbpage));
1.1       fredette 1865:                }
                   1866: }
                   1867: #endif
1.49      skrll    1868: #endif

CVSweb <webmaster@jp.NetBSD.org>