[BACK]Return to pmap.c CVS log [TXT][DIR] Up to [cvs.NetBSD.org] / src / sys / arch / hppa / hppa

Annotation of src/sys/arch/hppa/hppa/pmap.c, Revision 1.10

1.10    ! chs         1: /*     $NetBSD: pmap.c,v 1.9 2003/07/15 02:29:39 lukem Exp $   */
1.1       fredette    2:
                      3: /*-
                      4:  * Copyright (c) 2001, 2002 The NetBSD Foundation, Inc.
                      5:  * All rights reserved.
                      6:  *
                      7:  * This code is derived from software contributed to The NetBSD Foundation
                      8:  * by Matthew Fredette.
                      9:  *
                     10:  * Redistribution and use in source and binary forms, with or without
                     11:  * modification, are permitted provided that the following conditions
                     12:  * are met:
                     13:  * 1. Redistributions of source code must retain the above copyright
                     14:  *    notice, this list of conditions and the following disclaimer.
                     15:  * 2. Redistributions in binary form must reproduce the above copyright
                     16:  *    notice, this list of conditions and the following disclaimer in the
                     17:  *    documentation and/or other materials provided with the distribution.
                     18:  * 3. All advertising materials mentioning features or use of this software
                     19:  *    must display the following acknowledgement:
                     20:  *      This product includes software developed by the NetBSD
                     21:  *      Foundation, Inc. and its contributors.
                     22:  * 4. Neither the name of The NetBSD Foundation nor the names of its
                     23:  *    contributors may be used to endorse or promote products derived
                     24:  *    from this software without specific prior written permission.
                     25:  *
                     26:  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
                     27:  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
                     28:  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
                     29:  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
                     30:  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
                     31:  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
                     32:  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
                     33:  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
                     34:  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
                     35:  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
                     36:  * POSSIBILITY OF SUCH DAMAGE.
                     37:  */
                     38:
                     39: /*     $OpenBSD: pmap.c,v 1.46 2001/07/25 13:25:31 art Exp $   */
                     40:
                     41: /*
                     42:  * Copyright (c) 1998-2001 Michael Shalayeff
                     43:  * All rights reserved.
                     44:  *
                     45:  * Redistribution and use in source and binary forms, with or without
                     46:  * modification, are permitted provided that the following conditions
                     47:  * are met:
                     48:  * 1. Redistributions of source code must retain the above copyright
                     49:  *    notice, this list of conditions and the following disclaimer.
                     50:  * 2. Redistributions in binary form must reproduce the above copyright
                     51:  *    notice, this list of conditions and the following disclaimer in the
                     52:  *    documentation and/or other materials provided with the distribution.
                     53:  * 3. All advertising materials mentioning features or use of this software
                     54:  *    must display the following acknowledgement:
                     55:  *     This product includes software developed by Michael Shalayeff.
                     56:  * 4. The name of the author may not be used to endorse or promote products
                     57:  *    derived from this software without specific prior written permission.
                     58:  *
                     59:  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
                     60:  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
                     61:  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
                     62:  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
                     63:  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
                     64:  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
                     65:  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
                     66:  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
                     67:  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
                     68:  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
                     69:  */
                     70: /*
                     71:  * Copyright 1996 1995 by Open Software Foundation, Inc.
                     72:  *              All Rights Reserved
                     73:  *
                     74:  * Permission to use, copy, modify, and distribute this software and
                     75:  * its documentation for any purpose and without fee is hereby granted,
                     76:  * provided that the above copyright notice appears in all copies and
                     77:  * that both the copyright notice and this permission notice appear in
                     78:  * supporting documentation.
                     79:  *
                     80:  * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
                     81:  * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
                     82:  * FOR A PARTICULAR PURPOSE.
                     83:  *
                     84:  * IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
                     85:  * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
                     86:  * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
                     87:  * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
                     88:  * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
                     89:  */
                     90: /*
                     91:  * Mach Operating System
                     92:  * Copyright (c) 1990,1991,1992,1993,1994 The University of Utah and
                     93:  * the Computer Systems Laboratory (CSL).
                     94:  * Copyright (c) 1991,1987 Carnegie Mellon University.
                     95:  * All rights reserved.
                     96:  *
                     97:  * Permission to use, copy, modify and distribute this software and its
                     98:  * documentation is hereby granted, provided that both the copyright
                     99:  * notice and this permission notice appear in all copies of the
                    100:  * software, derivative works or modified versions, and any portions
                    101:  * thereof, and that both notices appear in supporting documentation,
                    102:  * and that all advertising materials mentioning features or use of
                    103:  * this software display the following acknowledgement: ``This product
                    104:  * includes software developed by the Computer Systems Laboratory at
                    105:  * the University of Utah.''
                    106:  *
                    107:  * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
                    108:  * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
                    109:  * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
                    110:  * THIS SOFTWARE.
                    111:  *
                    112:  * CSL requests users of this software to return to csl-dist@cs.utah.edu any
                    113:  * improvements that they make and grant CSL redistribution rights.
                    114:  *
                    115:  * Carnegie Mellon requests users of this software to return to
                    116:  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
                    117:  *  School of Computer Science
                    118:  *  Carnegie Mellon University
                    119:  *  Pittsburgh PA 15213-3890
                    120:  * any improvements or extensions that they make and grant Carnegie Mellon
                    121:  * the rights to redistribute these changes.
                    122:  *
                    123:  *     Utah $Hdr: pmap.c 1.49 94/12/15$
                    124:  *     Author: Mike Hibler, Bob Wheeler, University of Utah CSL, 10/90
                    125:  */
                    126: /*
                    127:  *     Manages physical address maps for hppa.
                    128:  *
                    129:  *     In addition to hardware address maps, this
                    130:  *     module is called upon to provide software-use-only
                    131:  *     maps which may or may not be stored in the same
                    132:  *     form as hardware maps.  These pseudo-maps are
                    133:  *     used to store intermediate results from copy
                    134:  *     operations to and from address spaces.
                    135:  *
                    136:  *     Since the information managed by this module is
                    137:  *     also stored by the logical address mapping module,
                    138:  *     this module may throw away valid virtual-to-physical
                    139:  *     mappings at almost any time.  However, invalidations
                    140:  *     of virtual-to-physical mappings must be done as
                    141:  *     requested.
                    142:  *
                    143:  *     In order to cope with hardware architectures which
                    144:  *     make virtual-to-physical map invalidates expensive,
                    145:  *     this module may delay invalidate or reduced protection
                    146:  *     operations until such time as they are actually
                    147:  *     necessary.  This module is given full information to
                    148:  *     when physical maps must be made correct.
                    149:  *
                    150:  */
                    151: /*
                    152:  * CAVEATS:
                    153:  *
                    154:  *     Needs more work for MP support
                    155:  *     page maps are stored as linear linked lists, some
                    156:  *             improvement may be achieved should we use smth else
                    157:  *     protection id (pid) allocation should be done in a pid_t fashion
                    158:  *             (maybe just use the pid itself)
                    159:  *     some ppl say, block tlb entries should be maintained somewhere in uvm
                    160:  *             and be ready for reloads in the fault handler.
                    161:  *     usage of __inline grows the code size by 100%, but hopefully
                    162:  *             makes it faster as well, since the functions are actually
                    163:  *             very small.
                    164:  *             retail:  8.1k -> 15.1K
                    165:  *             debug:  12.2k -> 22.1K
                    166:  *
                    167:  * References:
                    168:  * 1. PA7100LC ERS, Hewlett-Packard, March 30 1999, Public version 1.0
                    169:  * 2. PA7300LC ERS, Hewlett-Packard, March 18 1996, Version 1.0
                    170:  *
                    171:  */
1.9       lukem     172:
                    173: #include <sys/cdefs.h>
1.10    ! chs       174: __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.9 2003/07/15 02:29:39 lukem Exp $");
1.1       fredette  175:
                    176: #include <sys/param.h>
                    177: #include <sys/systm.h>
                    178: #include <sys/lock.h>
                    179: #include <sys/malloc.h>
                    180: #include <sys/user.h>
                    181: #include <sys/proc.h>
                    182:
                    183: #include <uvm/uvm.h>
                    184:
                    185: #include <machine/reg.h>
                    186: #include <machine/psl.h>
                    187: #include <machine/cpu.h>
                    188: #include <machine/pmap.h>
                    189: #include <machine/pte.h>
                    190: #include <machine/cpufunc.h>
                    191:
                    192: #include <hppa/hppa/hpt.h>
                    193: #include <hppa/hppa/machdep.h>
                    194:
                    195: #define static /**/
                    196: #define        __inline /* */
                    197:
                    198: #ifdef PMAPDEBUG
                    199: #define        PDB_INIT        0x00000002
                    200: #define        PDB_ENTER       0x00000004
                    201: #define        PDB_REMOVE      0x00000008
                    202: #define        PDB_KENTER      0x00000010
                    203: #define        PDB_PMAP        0x00000020
                    204: #define        PDB_CACHE       0x00000040
                    205: #define        PDB_BITS        0x00000080
                    206: #define        PDB_EXTRACT     0x00000100
                    207: #define        PDB_PROTECT     0x00000200
                    208: #define        PDB_PV_ALLOC    0x00000400
                    209: #define        PDB_PV_ENTER    0x00000800
                    210: #define        PDB_PV_REMOVE   0x00001000
                    211: #define        PDB_PV_FIND_VA  0x00002000
                    212: #define        PDB_WIRING      0x00004000
                    213: #define        PDB_ZERO        0x00008000
                    214: #define        PDB_STEAL       0x00010000
                    215: #define        PDB_COPY        0x00020000
                    216: int pmapdebug = 0
                    217: #if 1
                    218:        | PDB_ENTER
                    219:        | PDB_REMOVE
                    220:        | PDB_KENTER
                    221:        | PDB_BITS
                    222:        | PDB_PROTECT
                    223:        | PDB_EXTRACT
                    224:        | PDB_WIRING
                    225:        | PDB_ZERO
                    226:        | PDB_STEAL
                    227:        | PDB_COPY
                    228: #endif
                    229:        ;
                    230: #define PMAP_PRINTF_MASK(m,v,x) do {   \
                    231:   if ((pmapdebug & (m)) == (v)) {      \
                    232:     printf("%s", __FUNCTION__);                \
                    233:     printf x;                          \
                    234:   }                                    \
                    235: } while(/* CONSTCOND */ 0)
                    236: #else
                    237: #define PMAP_PRINTF_MASK(m,v,x) do { } while(/* CONSTCOND */ 0)
                    238: #endif
                    239: #define PMAP_PRINTF(v,x) PMAP_PRINTF_MASK(v,v,x)
                    240:
1.8       thorpej   241: vaddr_t        virtual_steal, virtual_start, virtual_end;
1.1       fredette  242:
                    243: /* These two virtual pages are available for copying and zeroing. */
                    244: static vaddr_t tmp_vpages[2];
                    245:
                    246: /* Free list of PV entries. */
                    247: static struct pv_entry *pv_free_list;
                    248:
1.3       fredette  249: /* This is an array of struct pv_head, one per physical page. */
                    250: static struct pv_head *pv_head_tbl;
1.1       fredette  251:
1.3       fredette  252: /*
                    253:  * This is a bitmap of page-is-aliased bits.
                    254:  * The magic 5 is log2(sizeof(u_int) * 8), and the magic 31 is 2^5 - 1.
1.1       fredette  255:  */
1.3       fredette  256: static u_int *page_aliased_bitmap;
                    257: #define _PAGE_ALIASED_WORD(pa) page_aliased_bitmap[((pa) >> PGSHIFT) >> 5]
                    258: #define _PAGE_ALIASED_BIT(pa) (1 << (((pa) >> PGSHIFT) & 31))
                    259: #define PAGE_IS_ALIASED(pa) (_PAGE_ALIASED_WORD(pa) & _PAGE_ALIASED_BIT(pa))
1.1       fredette  260:
                    261: struct pmap    kernel_pmap_store;
                    262: pmap_t         kernel_pmap;
                    263: boolean_t      pmap_initialized = FALSE;
                    264:
                    265: TAILQ_HEAD(, pmap)     pmap_freelist;  /* list of free pmaps */
                    266: u_int pmap_nfree;
                    267: struct simplelock pmap_freelock;       /* and lock */
                    268:
                    269: struct simplelock pmap_lock;   /* XXX this is all broken */
                    270: struct simplelock sid_pid_lock;        /* pids */
                    271:
                    272: u_int  pages_per_vm_page;
                    273: u_int  pid_counter;
                    274:
                    275: #ifdef PMAPDEBUG
                    276: void pmap_hptdump __P((void));
                    277: #endif
                    278:
                    279: u_int  kern_prot[8], user_prot[8];
                    280:
                    281: vaddr_t        hpt_base;
                    282: vsize_t        hpt_mask;
                    283:
                    284: #define        pmap_sid(pmap, va) \
                    285:        (((va & 0xc0000000) != 0xc0000000)? pmap->pmap_space : HPPA_SID_KERNEL)
                    286:
                    287: /*
1.3       fredette  288:  * Page 3-6 of the "PA-RISC 1.1 Architecture and Instruction Set
                    289:  * Reference Manual" (HP part number 09740-90039) defines equivalent
                    290:  * and non-equivalent virtual addresses in the cache.
                    291:  *
                    292:  * This macro evaluates to TRUE iff the two space/virtual address
                    293:  * combinations are non-equivalent aliases, and therefore will find
                    294:  * two different locations in the cache.
                    295:  *
                    296:  * NB: currently, the CPU-specific desidhash() functions disable the
                    297:  * use of the space in all cache hashing functions.  This means that
                    298:  * this macro definition is stricter than it has to be (because it
                    299:  * takes space into account), but one day cache space hashing should
                    300:  * be re-enabled.  Cache space hashing should yield better performance
                    301:  * through better utilization of the cache, assuming that most aliasing
                    302:  * is the read-only kind, which we do allow in the cache.
                    303:  */
                    304: #define NON_EQUIVALENT_ALIAS(sp1, va1, sp2, va2) \
                    305:   (((((va1) ^ (va2)) & ~HPPA_PGAMASK) != 0) || \
                    306:    ((((sp1) ^ (sp2)) & ~HPPA_SPAMASK) != 0))
1.1       fredette  307:
1.2       fredette  308: /* Prototypes. */
1.3       fredette  309: void __pmap_pv_update __P((paddr_t, struct pv_entry *, u_int, u_int));
1.2       fredette  310: static __inline void pmap_pv_remove __P((struct pv_entry *));
                    311:
1.1       fredette  312: /*
                    313:  * Given a directly-mapped region, this makes pv_entries out of it and
                    314:  * adds them to the free list.
                    315:  */
                    316: static __inline void pmap_pv_add __P((vaddr_t, vaddr_t));
                    317: static __inline void
                    318: pmap_pv_add(vaddr_t pv_start, vaddr_t pv_end)
                    319: {
                    320:        struct pv_entry *pv;
                    321:        int s;
                    322:
                    323:        /* Align pv_start, then add the new pv_entries. */
                    324:        pv_start = (pv_start + sizeof(*pv) - 1) & ~(sizeof(*pv) - 1);
                    325:        pv = (struct pv_entry *) pv_start;
                    326:        s = splvm();
                    327:        while ((vaddr_t)(pv + 1) <= pv_end) {
                    328:                pv->pv_next = pv_free_list;
                    329:                pv_free_list = pv;
                    330:                pv++;
                    331:        }
                    332:        splx(s);
                    333:
                    334:        PMAP_PRINTF(PDB_INIT, (": %d pv_entries @ %x allocated\n",
                    335:                    (pv - (struct pv_entry *) pv_start), (u_int)pv_start));
                    336: }
                    337:
                    338: /*
                    339:  * This allocates and returns a new struct pv_entry.
                    340:  *
1.2       fredette  341:  * If we run out of preallocated struct pv_entries, we have to forcibly
                    342:  * free one.   malloc() isn't an option, because a) we'll probably end
                    343:  * up back here anyways when malloc() maps what it's trying to return to
                    344:  * us, and b) even if malloc() did succeed, the TLB fault handlers run
                    345:  * in physical mode and thus require that all pv_entries be directly
1.1       fredette  346:  * mapped, a quality unlikely for malloc()-returned memory.
                    347:  */
                    348: static __inline struct pv_entry *pmap_pv_alloc __P((void));
                    349: static __inline struct pv_entry *
                    350: pmap_pv_alloc(void)
                    351: {
1.2       fredette  352:        struct pv_entry *pv, *pv_fallback;
                    353:        u_int hpt_index_first, hpt_index, hpt_size;
                    354:        struct hpt_entry *hpt;
1.1       fredette  355:
                    356:        pv = pv_free_list;
1.2       fredette  357:        if (pv == NULL) {
                    358:                /*
                    359:                 * We need to find a struct pv_entry to forcibly
1.3       fredette  360:                 * free.  It cannot be wired.  We prefer to free
                    361:                 * mappings that aren't marked as referenced.  We
                    362:                 * search the HPT for an entry to free, starting
                    363:                 * at a semirandom HPT index determined by the
                    364:                 * current value of the interval timer.
1.2       fredette  365:                 */
                    366:                hpt_size = hpt_mask / sizeof(*hpt);
                    367:                mfctl(CR_ITMR, hpt_index_first);
                    368:                hpt_index = hpt_index_first = hpt_index_first & hpt_size;
                    369:                pv_fallback = NULL;
                    370:                do {
                    371:                        hpt = ((struct hpt_entry *) hpt_base) + hpt_index;
                    372:                        for (pv = hpt->hpt_entry;
                    373:                             pv != NULL;
                    374:                             pv = pv->pv_hash) {
1.3       fredette  375:                                if (!(pv->pv_tlbprot & TLB_WIRED)) {
1.2       fredette  376:                                        if (!(pv->pv_tlbprot & TLB_REF))
                    377:                                                break;
                    378:                                        pv_fallback = pv;
                    379:                                }
                    380:                        }
                    381:                        if (pv != NULL)
                    382:                                break;
                    383:                        if (pv_fallback != NULL) {
                    384:                                pv = pv_fallback;
                    385:                                break;
                    386:                        }
                    387:                        hpt_index = (hpt_index + 1) & hpt_size;
                    388:                } while (hpt_index != hpt_index_first);
                    389:
                    390:                /* Remove the mapping. */
                    391:                if (pv != NULL) {
                    392:                        KASSERT(pv->pv_pmap->pmap_stats.resident_count > 0);
                    393:                        pv->pv_pmap->pmap_stats.resident_count--;
                    394:                        pmap_pv_remove(pv);
                    395:                        pv = pv_free_list;
                    396:                }
                    397:
                    398:                if (pv == NULL)
                    399:                        panic("out of pv_entries");
                    400:
                    401:        }
1.1       fredette  402:        pv_free_list = pv->pv_next;
                    403:        pv->pv_next = NULL;
                    404:
                    405:        PMAP_PRINTF(PDB_PV_ALLOC, ("() = %p\n", pv));
                    406:        return pv;
                    407: }
                    408:
                    409: /*
                    410:  * Given a struct pv_entry allocated by pmap_pv_alloc, this frees it.
                    411:  */
                    412: static __inline void pmap_pv_free __P((struct pv_entry *));
                    413: static __inline void
                    414: pmap_pv_free(struct pv_entry *pv)
                    415: {
                    416:        PMAP_PRINTF(PDB_PV_ALLOC, ("(%p)\n", pv));
                    417:
                    418:        pv->pv_next = pv_free_list;
                    419:        pv_free_list = pv;
                    420: }
                    421:
                    422: /*
                    423:  * Given a VA, this hashes it into an HPT index.
                    424:  *
                    425:  * This hash function is the one used by the hardware TLB filler on
                    426:  * the 7100LC, to index the hardware page table (HPT), which is sort
                    427:  * of a cache of TLB entries.
                    428:  *
                    429:  * On other CPUs, locore.S has a software TLB filler that does exactly
                    430:  * the same thing, right down to using this same hash function.
                    431:  *
                    432:  * This HPT is also used as a general VA->PA mapping store, with
                    433:  * struct pv_entry chains hanging off of the HPT entries.
                    434:  */
                    435: static __inline struct hpt_entry *pmap_hpt_hash __P((pa_space_t, vaddr_t));
                    436: static __inline struct hpt_entry *
                    437: pmap_hpt_hash(pa_space_t sp, vaddr_t va)
                    438: {
                    439:        struct hpt_entry *hpt;
                    440:        __asm __volatile (
                    441:                "extru  %2, 23, 20, %%r22\n\t"  /* r22 = (va >> 8) */
                    442:                "zdep   %1, 22, 16, %%r23\n\t"  /* r23 = (sp << 9) */
                    443:                "dep    %%r0, 31, 4, %%r22\n\t" /* r22 &= ~0xf */
                    444:                "xor    %%r22,%%r23, %%r23\n\t" /* r23 ^= r22 */
                    445:                "mfctl  %%cr24, %%r22\n\t"      /* r22 = sizeof(HPT)-1 */
                    446:                "and    %%r22,%%r23, %%r23\n\t" /* r23 &= r22 */
                    447:                "mfctl  %%cr25, %%r22\n\t"      /* r22 = addr of HPT table */
                    448:                "or     %%r23, %%r22, %0"       /* %0 = HPT entry */
                    449:                : "=r" (hpt) : "r" (sp), "r" (va) : "r22", "r23");
                    450:        return hpt;
                    451: }
                    452:
                    453: /*
                    454:  * Given a PA, returns the table offset for it.
                    455:  */
                    456: static __inline int pmap_table_find_pa __P((paddr_t));
                    457: static __inline int
                    458: pmap_table_find_pa(paddr_t pa)
                    459: {
1.2       fredette  460:        int off;
1.1       fredette  461:
1.2       fredette  462:        off = atop(pa);
                    463:        return (off < totalphysmem) ? off : -1;
1.1       fredette  464: }
                    465:
                    466: /*
                    467:  * Given a PA, returns the first mapping for it.
                    468:  */
                    469: static __inline struct pv_entry *pmap_pv_find_pa __P((paddr_t));
                    470: static __inline struct pv_entry *
                    471: pmap_pv_find_pa(paddr_t pa)
                    472: {
                    473:        int table_off;
                    474:
                    475:        table_off = pmap_table_find_pa(pa);
                    476:        KASSERT(table_off >= 0);
1.3       fredette  477:        return pv_head_tbl[table_off].pv_head_pvs;
1.1       fredette  478: }
                    479:
                    480: /*
                    481:  * Given a VA, this finds any mapping for it.
                    482:  */
                    483: static __inline struct pv_entry *pmap_pv_find_va __P((pa_space_t, vaddr_t));
                    484: static __inline struct pv_entry *
                    485: pmap_pv_find_va(pa_space_t space, vaddr_t va)
                    486: {
                    487:        struct pv_entry *pv = pmap_hpt_hash(space, va)->hpt_entry;
                    488:
                    489:        while(pv && (pv->pv_va != va || pv->pv_space != space))
                    490:                pv = pv->pv_hash;
                    491:
                    492:        PMAP_PRINTF(PDB_PV_FIND_VA, ("(0x%x:%p) = %p\n",
                    493:                                          space, (caddr_t)va, pv));
                    494:        return pv;
                    495: }
                    496:
                    497: /*
1.3       fredette  498:  * Given a page's PA, checks for non-equivalent aliasing,
                    499:  * and stores and returns the result.
                    500:  */
                    501: static int pmap_pv_check_alias __P((paddr_t));
                    502: static int
                    503: pmap_pv_check_alias(paddr_t pa)
                    504: {
                    505:        struct pv_entry *pv_outer, *pv;
                    506:        pa_space_t space;
                    507:        vaddr_t va;
                    508:        int aliased;
                    509:        u_int *aliased_word, aliased_bit;
                    510:
                    511:        /* By default we find no aliasing. */
                    512:        aliased = FALSE;
                    513:
                    514:        /*
1.5       fredette  515:         * We should never get called on I/O pages.
1.3       fredette  516:         */
1.5       fredette  517:        KASSERT(pa < HPPA_IOSPACE);
1.3       fredette  518:
                    519:        /*
                    520:         * Make an outer loop over the mappings, checking
                    521:         * each following inner mapping for non-equivalent
                    522:         * aliasing.  If the non-equivalent alias relation
                    523:         * is deemed transitive, this outer loop only needs
                    524:         * one iteration.
                    525:         */
1.5       fredette  526:        for (pv_outer = pmap_pv_find_pa(pa);
1.3       fredette  527:             pv_outer != NULL;
                    528:             pv_outer = pv_outer->pv_next) {
                    529:
                    530:                /* Load this outer mapping's space and address. */
                    531:                space = pv_outer->pv_space;
                    532:                va = pv_outer->pv_va;
                    533:
                    534:                /* Do the inner loop. */
                    535:                for (pv = pv_outer->pv_next;
                    536:                     pv != NULL;
                    537:                     pv = pv->pv_next) {
                    538:                        if (NON_EQUIVALENT_ALIAS(space, va,
                    539:                                pv->pv_space, pv->pv_va)) {
                    540:                                aliased = TRUE;
                    541:                                break;
                    542:                        }
                    543:                }
                    544:
                    545: #ifndef NON_EQUIVALENT_ALIAS_TRANSITIVE
                    546:                if (aliased)
                    547: #endif /* !NON_EQUIVALENT_ALIAS_TRANSITIVE */
                    548:                        break;
                    549:        }
                    550:
                    551:        /* Store and return the result. */
                    552:        aliased_word = &_PAGE_ALIASED_WORD(pa);
                    553:        aliased_bit = _PAGE_ALIASED_BIT(pa);
                    554:        *aliased_word = (*aliased_word & ~aliased_bit) |
                    555:                (aliased ? aliased_bit : 0);
                    556:        return aliased;
                    557: }
                    558:
                    559: /*
                    560:  * Given a VA->PA mapping and tlbprot bits to clear and set,
                    561:  * this flushes the mapping from the TLB and cache, and changes
                    562:  * the protection accordingly.  This is used when a mapping is
                    563:  * changing.
                    564:  */
                    565: static __inline void _pmap_pv_update __P((paddr_t, struct pv_entry *,
                    566:                                          u_int, u_int));
                    567: static __inline void
                    568: _pmap_pv_update(paddr_t pa, struct pv_entry *pv,
                    569:                u_int tlbprot_clear, u_int tlbprot_set)
                    570: {
                    571:        struct pv_entry *ppv;
                    572:        int no_rw_alias;
                    573:
                    574:        /*
1.5       fredette  575:         * We should never get called on I/O pages.
                    576:         */
                    577:        KASSERT(pa < HPPA_IOSPACE);
                    578:
                    579:        /*
1.3       fredette  580:         * If the TLB protection of this mapping is changing,
                    581:         * check for a change in the no read-write alias state
                    582:         * of the page.
                    583:         */
                    584:        KASSERT((tlbprot_clear & TLB_AR_MASK) == 0 ||
                    585:                (tlbprot_clear & TLB_AR_MASK) == TLB_AR_MASK);
                    586:        if (tlbprot_clear & TLB_AR_MASK) {
                    587:
                    588:                /*
                    589:                 * Assume that no read-write aliasing
                    590:                 * exists.  It does exist if this page is
                    591:                 * aliased and any mapping is writable.
                    592:                 */
                    593:                no_rw_alias = TLB_NO_RW_ALIAS;
                    594:                if (PAGE_IS_ALIASED(pa)) {
                    595:                        for (ppv = pmap_pv_find_pa(pa);
                    596:                             ppv != NULL;
                    597:                             ppv = ppv->pv_next) {
                    598:                                if (TLB_AR_WRITABLE(ppv == pv ?
                    599:                                                    tlbprot_set :
                    600:                                                    ppv->pv_tlbprot)) {
                    601:                                        no_rw_alias = 0;
                    602:                                        break;
                    603:                                }
                    604:                        }
                    605:                }
                    606:
                    607:                /* Note if the no read-write alias state has changed. */
                    608:                if ((pv->pv_tlbprot & TLB_NO_RW_ALIAS) ^ no_rw_alias) {
                    609:                        tlbprot_clear |= TLB_NO_RW_ALIAS;
                    610:                        tlbprot_set |= no_rw_alias;
                    611:                }
                    612:        }
                    613:
                    614:        /*
                    615:         * Now call our asm helper function.  At the very least,
                    616:         * this will flush out the requested mapping and change
                    617:         * its protection.  If the changes touch any of TLB_REF,
                    618:         * TLB_DIRTY, and TLB_NO_RW_ALIAS, all mappings of the
                    619:         * page will be flushed and changed.
                    620:         */
                    621:        __pmap_pv_update(pa, pv, tlbprot_clear, tlbprot_set);
                    622: }
                    623: #define pmap_pv_update(pv, tc, ts) \
                    624:        _pmap_pv_update(tlbptob((pv)->pv_tlbpage), pv, tc, ts)
                    625:
                    626: /*
1.1       fredette  627:  * Given a pmap, a VA, a PA, and a TLB protection, this enters
                    628:  * a new mapping and returns the new struct pv_entry.
                    629:  */
                    630: static __inline struct pv_entry *pmap_pv_enter __P((pmap_t, pa_space_t, vaddr_t, paddr_t, u_int));
                    631: static __inline struct pv_entry *
                    632: pmap_pv_enter(pmap_t pmap, pa_space_t space, vaddr_t va,
                    633:              paddr_t pa, u_int tlbprot)
                    634: {
                    635:        struct hpt_entry *hpt = pmap_hpt_hash(space, va);
                    636:        int table_off;
1.3       fredette  637:        struct pv_head *hpv;
1.5       fredette  638:        struct pv_entry *pv, *pv_other;
1.1       fredette  639:
                    640: #ifdef DIAGNOSTIC
                    641:        /* Make sure this VA isn't already entered. */
                    642:        for (pv = hpt->hpt_entry; pv != NULL; pv = pv->pv_hash)
                    643:                if (pv->pv_va == va && pv->pv_space == space)
                    644:                        panic("pmap_pv_enter: VA already entered");
                    645: #endif /* DIAGNOSTIC */
                    646:
                    647:        /*
1.5       fredette  648:         * Allocate a new pv_entry, fill it, and link it into the HPT.
1.3       fredette  649:         */
1.1       fredette  650:        pv = pmap_pv_alloc();
                    651:        pv->pv_va = va;
                    652:        pv->pv_pmap = pmap;
                    653:        pv->pv_space = space;
                    654:        pv->pv_tlbprot = tlbprot;
                    655:        pv->pv_tlbpage = tlbbtop(pa);
1.3       fredette  656:        pv->pv_hpt = hpt;
1.1       fredette  657:        pv->pv_hash = hpt->hpt_entry;
                    658:        hpt->hpt_entry = pv;
                    659:
1.5       fredette  660:        /*
                    661:         * If this mapping is for I/O space, mark the mapping
                    662:         * uncacheable.  (This is fine even on CPUs that don't
                    663:         * support the U-bit; these CPUs don't cache references
                    664:         * to I/O space.)  Also mark this mapping as having
                    665:         * no read/write aliasing, and we're done - we don't
                    666:         * keep PA->VA lists for I/O space.
                    667:         */
                    668:        if (pa >= HPPA_IOSPACE) {
                    669:                KASSERT(tlbprot & TLB_UNMANAGED);
                    670:                pv->pv_tlbprot |= TLB_UNCACHEABLE | TLB_NO_RW_ALIAS;
                    671:                return pv;
                    672:        }
                    673:
                    674:        /* Get the head of the PA->VA translation list. */
                    675:        table_off = pmap_table_find_pa(pa);
                    676:        KASSERT(table_off >= 0);
                    677:        hpv = pv_head_tbl + table_off;
                    678:
                    679: #ifdef DIAGNOSTIC
                    680:        /* Make sure this VA isn't already entered. */
                    681:        for (pv_other = hpv->pv_head_pvs;
                    682:             pv_other != NULL;
                    683:             pv_other = pv_other->pv_next)
                    684:                if (pmap == pv_other->pv_pmap && va == pv_other->pv_va)
                    685:                        panic("pmap_pv_enter: VA already in pv_tab");
                    686: #endif /* DIAGNOSTIC */
                    687:
                    688:        /*
                    689:         * Link this mapping into the PA->VA list.
                    690:         */
                    691:        pv_other = hpv->pv_head_pvs;
                    692:        pv->pv_next = pv_other;
                    693:        hpv->pv_head_pvs = pv;
                    694:
                    695:        /*
                    696:         * If there are no other mappings of this page, this
                    697:         * mapping has no read/write aliasing.  Otherwise, give
                    698:         * this mapping the same TLB_NO_RW_ALIAS status as the
                    699:         * other mapping (all mappings of the same page must
                    700:         * always be marked the same).
                    701:         */
                    702:        pv->pv_tlbprot |= (pv_other == NULL ?
                    703:                           TLB_NO_RW_ALIAS :
                    704:                           (pv_other->pv_tlbprot & TLB_NO_RW_ALIAS));
                    705:
1.3       fredette  706:        /* Check for read-write aliasing. */
                    707:        if (!PAGE_IS_ALIASED(pa))
                    708:                pmap_pv_check_alias(pa);
                    709:        _pmap_pv_update(pa, pv, TLB_AR_MASK, tlbprot & TLB_AR_MASK);
                    710:
1.1       fredette  711:        return pv;
                    712: }
                    713:
                    714: /*
                    715:  * Given a particular VA->PA mapping, this removes it.
                    716:  */
                    717: static __inline void
                    718: pmap_pv_remove(struct pv_entry *pv)
                    719: {
                    720:        paddr_t pa = tlbptob(pv->pv_tlbpage);
                    721:        int table_off;
1.3       fredette  722:        struct pv_head *hpv;
                    723:        struct pv_entry **_pv;
1.1       fredette  724:
                    725:        PMAP_PRINTF(PDB_PV_REMOVE, ("(%p)\n", pv));
                    726:
1.5       fredette  727:        /* Unlink this pv_entry from the HPT. */
1.3       fredette  728:        _pv = &pv->pv_hpt->hpt_entry;
                    729:        while (*_pv != pv) {
                    730:                KASSERT(*_pv != NULL);
1.1       fredette  731:                _pv = &(*_pv)->pv_hash;
1.3       fredette  732:        }
1.1       fredette  733:        *_pv = pv->pv_hash;
1.5       fredette  734:
                    735:        /*
                    736:         * If this mapping is for I/O space, simply flush the
                    737:         * old mapping, free it, and we're done.
                    738:         */
                    739:        if (pa >= HPPA_IOSPACE) {
                    740:                __pmap_pv_update(pa, pv, 0, 0);
                    741:                pmap_pv_free(pv);
                    742:                return;
                    743:        }
                    744:
                    745:        /* Get the head of the PA->VA translation list. */
                    746:        table_off = pmap_table_find_pa(pa);
                    747:        KASSERT(table_off >= 0);
                    748:        hpv = pv_head_tbl + table_off;
                    749:
                    750:        /* Unlink this pv_entry from the PA->VA translation list. */
1.3       fredette  751:        _pv = &hpv->pv_head_pvs;
                    752:        while (*_pv != pv) {
                    753:                KASSERT(*_pv != NULL);
                    754:                _pv = &(*_pv)->pv_next;
                    755:        }
                    756:        *_pv = pv->pv_next;
1.1       fredette  757:
                    758:        /*
1.3       fredette  759:         * Check for read-write aliasing.  This will also flush
                    760:         * the old mapping.
1.1       fredette  761:         */
1.3       fredette  762:        if (PAGE_IS_ALIASED(pa))
                    763:                pmap_pv_check_alias(pa);
                    764:        _pmap_pv_update(pa, pv, TLB_AR_MASK, TLB_AR_KR);
1.1       fredette  765:
1.3       fredette  766:        /* Free this mapping. */
                    767:        pmap_pv_free(pv);
1.1       fredette  768: }
                    769:
                    770: /*
                    771:  *     Bootstrap the system enough to run with virtual memory.
                    772:  *     Map the kernel's code and data, and allocate the system page table.
                    773:  *     Called with mapping OFF.
                    774:  *
                    775:  *     Parameters:
                    776:  *     vstart  PA of first available physical page
                    777:  *     vend    PA of last available physical page
                    778:  */
                    779: void
                    780: pmap_bootstrap(vstart, vend)
                    781:        vaddr_t *vstart;
                    782:        vaddr_t *vend;
                    783: {
                    784:        vaddr_t addr;
                    785:        vsize_t size;
                    786:        vaddr_t pv_region;
                    787:        struct hpt_entry *hptp;
                    788: #define BTLB_SET_SIZE 16
                    789:        vaddr_t btlb_entry_start[BTLB_SET_SIZE];
                    790:        vsize_t btlb_entry_size[BTLB_SET_SIZE];
                    791:        int btlb_entry_vm_prot[BTLB_SET_SIZE];
                    792:        int btlb_i, btlb_j;
1.2       fredette  793:        vsize_t btlb_entry_min, btlb_entry_max, btlb_entry_got;
1.1       fredette  794:        extern int kernel_text, etext;
1.2       fredette  795:        vaddr_t kernel_data;
                    796:        paddr_t phys_start, phys_end;
1.1       fredette  797:
                    798:        uvm_setpagesize();
                    799:
1.6       thorpej   800:        pages_per_vm_page = 1;  /* XXX This should die */
1.1       fredette  801:
                    802:        kern_prot[VM_PROT_NONE | VM_PROT_NONE  | VM_PROT_NONE]    =TLB_AR_NA;
                    803:        kern_prot[VM_PROT_READ | VM_PROT_NONE  | VM_PROT_NONE]    =TLB_AR_KR;
                    804:        kern_prot[VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_NONE]    =TLB_AR_KRW;
                    805:        kern_prot[VM_PROT_READ | VM_PROT_WRITE | VM_PROT_NONE]    =TLB_AR_KRW;
                    806:        kern_prot[VM_PROT_NONE | VM_PROT_NONE  | VM_PROT_EXECUTE] =TLB_AR_KRX;
                    807:        kern_prot[VM_PROT_READ | VM_PROT_NONE  | VM_PROT_EXECUTE] =TLB_AR_KRX;
                    808:        kern_prot[VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_EXECUTE] =TLB_AR_KRWX;
                    809:        kern_prot[VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE] =TLB_AR_KRWX;
                    810:
                    811:        user_prot[VM_PROT_NONE | VM_PROT_NONE  | VM_PROT_NONE]    =TLB_AR_NA;
                    812:        user_prot[VM_PROT_READ | VM_PROT_NONE  | VM_PROT_NONE]    =TLB_AR_UR;
                    813:        user_prot[VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_NONE]    =TLB_AR_URW;
                    814:        user_prot[VM_PROT_READ | VM_PROT_WRITE | VM_PROT_NONE]    =TLB_AR_URW;
                    815:        user_prot[VM_PROT_NONE | VM_PROT_NONE  | VM_PROT_EXECUTE] =TLB_AR_URX;
                    816:        user_prot[VM_PROT_READ | VM_PROT_NONE  | VM_PROT_EXECUTE] =TLB_AR_URX;
                    817:        user_prot[VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_EXECUTE] =TLB_AR_URWX;
                    818:        user_prot[VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE] =TLB_AR_URWX;
                    819:
                    820:        /*
                    821:         * Initialize kernel pmap
                    822:         */
                    823:        kernel_pmap = &kernel_pmap_store;
                    824: #if    NCPUS > 1
                    825:        lock_init(&pmap_lock, FALSE, ETAP_VM_PMAP_SYS, ETAP_VM_PMAP_SYS_I);
                    826: #endif /* NCPUS > 1 */
                    827:        simple_lock_init(&kernel_pmap->pmap_lock);
                    828:        simple_lock_init(&pmap_freelock);
                    829:        simple_lock_init(&sid_pid_lock);
                    830:
                    831:        kernel_pmap->pmap_refcnt = 1;
                    832:        kernel_pmap->pmap_space = HPPA_SID_KERNEL;
                    833:        kernel_pmap->pmap_pid = HPPA_PID_KERNEL;
                    834:
                    835:        /*
                    836:         * Allocate various tables and structures.
                    837:         */
                    838:        addr = hppa_round_page(*vstart);
                    839:        virtual_end = *vend;
                    840:
                    841:        /*
                    842:         * Figure out how big the HPT must be, and align
                    843:         * addr to what will be its beginning.  We don't
                    844:         * waste the pages skipped for the alignment;
                    845:         * they become struct pv_entry pages.
                    846:         */
1.2       fredette  847:        pv_region = addr;
1.1       fredette  848:        mfctl(CR_HPTMASK, size);
                    849:        addr = (addr + size) & ~(size);
                    850:        pv_free_list = NULL;
                    851:        pmap_pv_add(pv_region, addr);
                    852:
                    853:        /* Allocate the HPT */
                    854:        for (hptp = (struct hpt_entry *)addr;
                    855:             ((u_int)hptp - addr) <= size; hptp++) {
                    856:                hptp->hpt_valid   = 0;
                    857:                hptp->hpt_vpn     = 0;
                    858:                hptp->hpt_space   = -1;
                    859:                hptp->hpt_tlbpage = 0;
                    860:                hptp->hpt_tlbprot = 0;
                    861:                hptp->hpt_entry   = NULL;
                    862:        }
                    863: #ifdef PMAPDEBUG
                    864:        if (pmapdebug & PDB_INIT)
                    865:                printf("hpt_table: 0x%lx @ %p\n", size + 1, (caddr_t)addr);
                    866: #endif
                    867:        /* load cr25 with the address of the HPT table
                    868:           NB: It sez CR_VTOP, but we (and the TLB handlers) know better ... */
                    869:        mtctl(addr, CR_VTOP);
                    870:        hpt_base = addr;
                    871:        hpt_mask = size;
1.10    ! chs       872:        lwp0.l_md.md_regs->tf_hptm = size;
        !           873:        lwp0.l_md.md_regs->tf_vtop = addr;
1.1       fredette  874:        addr += size + 1;
                    875:
1.3       fredette  876:        /* Allocate the struct pv_head array. */
1.2       fredette  877:        addr = ALIGN(addr);
1.3       fredette  878:        pv_head_tbl = (struct pv_head *) addr;
1.2       fredette  879:        memset(pv_head_tbl, 0, sizeof(*pv_head_tbl) * totalphysmem);
                    880:        addr = (vaddr_t) (pv_head_tbl + totalphysmem);
                    881:
1.3       fredette  882:        /* Allocate the page aliased bitmap. */
1.2       fredette  883:        addr = ALIGN(addr);
1.3       fredette  884:        page_aliased_bitmap = (u_int *) addr;
                    885:        addr = (vaddr_t) (&_PAGE_ALIASED_WORD(totalphysmem) + 1);
                    886:        memset(page_aliased_bitmap, 0, addr - (vaddr_t) page_aliased_bitmap);
1.2       fredette  887:
                    888:        /*
                    889:         * Allocate the largest struct pv_entry region.   The
                    890:         * 6 is a magic constant, chosen to allow on average
                    891:         * all physical pages to have 6 simultaneous mappings
                    892:         * without having to reclaim any struct pv_entry.
                    893:         */
                    894:        pv_region = addr;
                    895:        addr += sizeof(struct pv_entry) * totalphysmem * 6;
                    896:        pmap_pv_add(pv_region, addr);
                    897:
                    898:        /*
                    899:         * Allocate the steal region.  Because pmap_steal_memory
                    900:         * must panic whenever an allocation cannot be fulfilled,
                    901:         * we have to guess at the maximum amount of space that
                    902:         * might be stolen.  Overestimating is not really a problem,
                    903:         * as it only leads to lost virtual space, not lost physical
                    904:         * pages.
                    905:         */
                    906:        addr = hppa_round_page(addr);
                    907:        virtual_steal = addr;
                    908:        addr += totalphysmem * sizeof(struct vm_page);
                    909:        memset((caddr_t) virtual_steal, 0, addr - virtual_steal);
                    910:
                    911:        /*
                    912:         * We now have a rough idea of where managed kernel virtual
                    913:         * space will begin, and we can start mapping everything
                    914:         * before that.
                    915:         */
                    916:        addr = hppa_round_page(addr);
                    917:        *vstart = addr;
                    918:
1.1       fredette  919:        /*
1.2       fredette  920:         * In general, the virtual space below the kernel text is
                    921:         * left unmapped, to allow detection of NULL dereferences.
                    922:         * However, these tmp_vpages are two virtual pages right
                    923:         * before the kernel text that can be mapped for page copying
                    924:         * and zeroing.
1.1       fredette  925:         */
1.2       fredette  926:        tmp_vpages[1] = hppa_trunc_page((vaddr_t) &kernel_text) - PAGE_SIZE;
                    927:        tmp_vpages[0] = tmp_vpages[1] - PAGE_SIZE;
1.1       fredette  928:
                    929:        /*
1.2       fredette  930:         * The kernel text, data, and bss must be direct-mapped,
                    931:         * because the kernel often runs in physical mode, and
                    932:         * anyways the loader loaded the kernel into physical
                    933:         * memory exactly where it was linked.
                    934:         *
                    935:         * All memory already allocated after bss, either by
                    936:         * our caller or by this function itself, must also be
                    937:         * direct-mapped, because it's completely unmanaged
                    938:         * and was allocated in physical mode.
1.1       fredette  939:         *
1.2       fredette  940:         * BTLB entries are used to do this direct mapping.
                    941:         * BTLB entries have a minimum and maximum possible size,
                    942:         * and MD code gives us these sizes in units of pages.
1.1       fredette  943:         */
1.2       fredette  944:        btlb_entry_min = (vsize_t) hppa_btlb_size_min * PAGE_SIZE;
                    945:        btlb_entry_max = (vsize_t) hppa_btlb_size_max * PAGE_SIZE;
1.1       fredette  946:
1.2       fredette  947:        /*
                    948:         * We begin by making BTLB entries for the kernel text.
                    949:         * To keep things simple, we insist that the kernel text
                    950:         * be aligned to the minimum BTLB entry size.
                    951:         */
                    952:        if (((vaddr_t) &kernel_text) & (btlb_entry_min - 1))
                    953:                panic("kernel text not aligned to BTLB minimum size");
1.1       fredette  954:
                    955:        /*
1.2       fredette  956:         * To try to conserve BTLB entries, take a hint from how
                    957:         * the kernel was linked: take the kernel text start as
                    958:         * our effective minimum BTLB entry size, assuming that
                    959:         * the data segment was also aligned to that size.
                    960:         *
                    961:         * In practice, linking the kernel at 2MB, and aligning
                    962:         * the data segment to a 2MB boundary, should control well
                    963:         * how much of the BTLB the pmap uses.  However, this code
                    964:         * should not rely on this 2MB magic number, nor should
                    965:         * it rely on the data segment being aligned at all.  This
                    966:         * is to allow (smaller) kernels (linked lower) to work fine.
1.1       fredette  967:         */
1.2       fredette  968:        btlb_entry_min = (vaddr_t) &kernel_text;
                    969:        __asm __volatile (
                    970:                "       ldil L%%$global$, %0    \n"
                    971:                "       ldo R%%$global$(%0), %0 \n"
                    972:                : "=r" (kernel_data));
1.1       fredette  973:
                    974:        /*
1.2       fredette  975:         * Now make BTLB entries to direct-map the kernel text
                    976:         * read- and execute-only as much as possible.  Note that
                    977:         * if the data segment isn't nicely aligned, the last
                    978:         * BTLB entry for the kernel text may also cover some of
                    979:         * the data segment, meaning it will have to allow writing.
                    980:         */
                    981:        addr = (vaddr_t) &kernel_text;
                    982:        btlb_j = 0;
                    983:        while (addr < (vaddr_t) &etext) {
1.1       fredette  984:
                    985:                /* Set up the next BTLB entry. */
                    986:                KASSERT(btlb_j < BTLB_SET_SIZE);
1.2       fredette  987:                btlb_entry_start[btlb_j] = addr;
1.1       fredette  988:                btlb_entry_size[btlb_j] = btlb_entry_min;
1.2       fredette  989:                btlb_entry_vm_prot[btlb_j] = VM_PROT_READ | VM_PROT_EXECUTE;
                    990:                if (addr + btlb_entry_min > kernel_data)
1.1       fredette  991:                        btlb_entry_vm_prot[btlb_j] |= VM_PROT_WRITE;
                    992:
1.2       fredette  993:                /* Coalesce BTLB entries whenever possible. */
1.1       fredette  994:                while (btlb_j > 0 &&
                    995:                        btlb_entry_vm_prot[btlb_j] ==
                    996:                        btlb_entry_vm_prot[btlb_j - 1] &&
                    997:                        btlb_entry_size[btlb_j] ==
                    998:                        btlb_entry_size[btlb_j - 1] &&
                    999:                        !(btlb_entry_start[btlb_j - 1] &
1.2       fredette 1000:                          ((btlb_entry_size[btlb_j - 1] << 1) - 1)) &&
                   1001:                        (btlb_entry_size[btlb_j - 1] << 1) <=
                   1002:                        btlb_entry_max)
1.1       fredette 1003:                        btlb_entry_size[--btlb_j] <<= 1;
                   1004:
                   1005:                /* Move on. */
1.2       fredette 1006:                addr = btlb_entry_start[btlb_j] + btlb_entry_size[btlb_j];
1.1       fredette 1007:                btlb_j++;
1.2       fredette 1008:        }
                   1009:
                   1010:        /*
                   1011:         * Now make BTLB entries to direct-map the kernel data,
                   1012:         * bss, and all of the preallocated space read-write.
                   1013:         *
                   1014:         * Note that, unlike above, we're not concerned with
                   1015:         * making these BTLB entries such that they finish as
                   1016:         * close as possible to the end of the space we need
                   1017:         * them to map.  Instead, to minimize the number of BTLB
                   1018:         * entries we need, we make them as large as possible.
                   1019:         * The only thing this wastes is kernel virtual space,
                   1020:         * which is plentiful.
                   1021:         */
                   1022:        while (addr < *vstart) {
                   1023:
                   1024:                /* Make the next BTLB entry. */
                   1025:                KASSERT(btlb_j < BTLB_SET_SIZE);
                   1026:                size = btlb_entry_min;
                   1027:                while ((addr + size) < *vstart &&
                   1028:                        (size << 1) < btlb_entry_max &&
                   1029:                        !(addr & ((size << 1) - 1)))
                   1030:                        size <<= 1;
                   1031:                btlb_entry_start[btlb_j] = addr;
                   1032:                btlb_entry_size[btlb_j] = size;
                   1033:                btlb_entry_vm_prot[btlb_j] = VM_PROT_READ | VM_PROT_WRITE;
                   1034:
                   1035:                /* Move on. */
                   1036:                addr = btlb_entry_start[btlb_j] + btlb_entry_size[btlb_j];
                   1037:                btlb_j++;
                   1038:        }
                   1039:
1.1       fredette 1040:        /* Now insert all of the BTLB entries. */
                   1041:        for (btlb_i = 0; btlb_i < btlb_j; btlb_i++) {
                   1042:                btlb_entry_got = btlb_entry_size[btlb_i];
1.4       fredette 1043:                if (hppa_btlb_insert(kernel_pmap->pmap_space,
1.1       fredette 1044:                                btlb_entry_start[btlb_i],
                   1045:                                btlb_entry_start[btlb_i],
                   1046:                                &btlb_entry_got,
                   1047:                                kernel_pmap->pmap_pid |
                   1048:                                pmap_prot(kernel_pmap,
                   1049:                                        btlb_entry_vm_prot[btlb_i])) < 0)
                   1050:                        panic("pmap_bootstrap: cannot insert BTLB entry");
                   1051:                if (btlb_entry_got != btlb_entry_size[btlb_i])
                   1052:                        panic("pmap_bootstrap: BTLB entry mapped wrong amount");
                   1053:        }
                   1054:
1.2       fredette 1055:        /*
                   1056:         * We now know the exact beginning of managed kernel
                   1057:         * virtual space.
                   1058:         */
1.1       fredette 1059:        *vstart = btlb_entry_start[btlb_j - 1] + btlb_entry_size[btlb_j - 1];
1.8       thorpej  1060:        virtual_start = *vstart;
1.1       fredette 1061:
                   1062:        /*
1.2       fredette 1063:         * Finally, load physical pages into UVM.  There are
                   1064:         * three segments of pages.
1.1       fredette 1065:         */
1.2       fredette 1066:        physmem = 0;
                   1067:
                   1068:        /* The first segment runs from [resvmem..kernel_text). */
                   1069:        phys_start = resvmem;
                   1070:        phys_end = atop(hppa_trunc_page(&kernel_text));
                   1071: #ifdef DIAGNOSTIC
                   1072:        printf("phys segment: 0x%x 0x%x\n", (u_int)phys_start, (u_int)phys_end);
                   1073: #endif
                   1074:        if (phys_end > phys_start) {
                   1075:                uvm_page_physload(phys_start, phys_end,
                   1076:                        phys_start, phys_end, VM_FREELIST_DEFAULT);
                   1077:                physmem += phys_end - phys_start;
                   1078:        }
1.1       fredette 1079:
1.2       fredette 1080:        /* The second segment runs from [etext..kernel_data). */
                   1081:        phys_start = atop(hppa_round_page((vaddr_t) &etext));
                   1082:        phys_end = atop(hppa_trunc_page(kernel_data));
                   1083: #ifdef DIAGNOSTIC
                   1084:        printf("phys segment: 0x%x 0x%x\n", (u_int)phys_start, (u_int)phys_end);
1.1       fredette 1085: #endif
1.2       fredette 1086:        if (phys_end > phys_start) {
                   1087:                uvm_page_physload(phys_start, phys_end,
                   1088:                        phys_start, phys_end, VM_FREELIST_DEFAULT);
                   1089:                physmem += phys_end - phys_start;
                   1090:        }
1.1       fredette 1091:
1.2       fredette 1092:        /* The third segment runs from [virtual_steal..totalphysmem). */
                   1093:        phys_start = atop(virtual_steal);
                   1094:        phys_end = totalphysmem;
                   1095: #ifdef DIAGNOSTIC
                   1096:        printf("phys segment: 0x%x 0x%x\n", (u_int)phys_start, (u_int)phys_end);
                   1097: #endif
                   1098:        if (phys_end > phys_start) {
                   1099:                uvm_page_physload(phys_start, phys_end,
                   1100:                        phys_start, phys_end, VM_FREELIST_DEFAULT);
                   1101:                physmem += phys_end - phys_start;
                   1102:        }
1.1       fredette 1103: }
                   1104:
                   1105: /*
                   1106:  * pmap_steal_memory(size, startp, endp)
                   1107:  *     steals memory block of size `size' from directly mapped
                   1108:  *     segment (mapped behind the scenes).
                   1109:  *     directly mapped cannot be grown dynamically once allocated.
                   1110:  */
                   1111: vaddr_t
1.8       thorpej  1112: pmap_steal_memory(size, startp, endp)
1.1       fredette 1113:        vsize_t size;
1.8       thorpej  1114:        vaddr_t *startp;
                   1115:        vaddr_t *endp;
1.1       fredette 1116: {
                   1117:        vaddr_t va;
1.2       fredette 1118:        int lcv;
1.1       fredette 1119:
                   1120:        PMAP_PRINTF(PDB_STEAL, ("(%lx, %p, %p)\n", size, startp, endp));
                   1121:
1.8       thorpej  1122:        /* Remind the caller of the start and end of virtual space. */
                   1123:        if (startp)
                   1124:                *startp = virtual_start;
                   1125:        if (endp)
                   1126:                *endp = virtual_end;
                   1127:
1.1       fredette 1128:        /* Round the allocation up to a page. */
                   1129:        size = hppa_round_page(size);
                   1130:
                   1131:        /* We must panic if we cannot steal the memory. */
1.8       thorpej  1132:        if (size > virtual_start - virtual_steal)
1.1       fredette 1133:                panic("pmap_steal_memory: out of memory");
                   1134:
                   1135:        /* Steal the memory. */
                   1136:        va = virtual_steal;
                   1137:        virtual_steal += size;
1.2       fredette 1138:        PMAP_PRINTF(PDB_STEAL, (": steal %ld bytes @%x\n", size, (u_int)va));
                   1139:        for (lcv = 0; lcv < vm_nphysseg ; lcv++)
                   1140:                if (vm_physmem[lcv].start == atop(va)) {
                   1141:                        vm_physmem[lcv].start = atop(virtual_steal);
                   1142:                        vm_physmem[lcv].avail_start = atop(virtual_steal);
                   1143:                        break;
                   1144:                }
                   1145:        if (lcv == vm_nphysseg)
                   1146:                panic("pmap_steal_memory inconsistency");
1.1       fredette 1147:
                   1148:        return va;
1.8       thorpej  1149: }
                   1150:
                   1151: /*
                   1152:  * How much virtual space does this kernel have?
                   1153:  * (After mapping kernel text, data, etc.)
                   1154:  */
                   1155: void
                   1156: pmap_virtual_space(vaddr_t *vstartp, vaddr_t *vendp)
                   1157: {
                   1158:        *vstartp = virtual_start;
                   1159:        *vendp = virtual_end;
1.1       fredette 1160: }
                   1161:
                   1162: /*
                   1163:  * Finishes the initialization of the pmap module.
                   1164:  * This procedure is called from vm_mem_init() in vm/vm_init.c
                   1165:  * to initialize any remaining data structures that the pmap module
                   1166:  * needs to map virtual memory (VM is already ON).
                   1167:  */
                   1168: void
                   1169: pmap_init()
                   1170: {
                   1171:        extern void gateway_page __P((void));
                   1172:
                   1173:        TAILQ_INIT(&pmap_freelist);
                   1174:        pid_counter = HPPA_PID_KERNEL + 2;
                   1175:
                   1176:        /*
                   1177:         * map SysCall gateways page once for everybody
                   1178:         * NB: we'll have to remap the phys memory
                   1179:         *     if we have any at SYSCALLGATE address (;
                   1180:         *
                   1181:         * no spls since no interrupts
                   1182:         */
1.3       fredette 1183:        pmap_pv_enter(pmap_kernel(), HPPA_SID_KERNEL, SYSCALLGATE,
                   1184:                      (paddr_t)&gateway_page,
                   1185:                      TLB_GATE_PROT | TLB_UNMANAGED | TLB_WIRED);
1.1       fredette 1186:
                   1187:        pmap_initialized = TRUE;
                   1188: }
                   1189:
                   1190: /*
                   1191:  * Initialize a preallocated and zeroed pmap structure,
                   1192:  * such as one in a vmspace structure.
                   1193:  */
                   1194: static void pmap_pinit __P((pmap_t));
                   1195: static void
                   1196: pmap_pinit(pmap)
                   1197:        pmap_t pmap;
                   1198: {
1.10    ! chs      1199:        u_int pid;
1.1       fredette 1200:        int s;
                   1201:
                   1202:        PMAP_PRINTF(PDB_PMAP, ("(%p), pid=%x\n", pmap, pmap->pmap_pid));
                   1203:
                   1204:        if (!(pid = pmap->pmap_pid)) {
                   1205:
                   1206:                /*
                   1207:                 * Allocate space and protection IDs for the pmap.
                   1208:                 * If all are allocated, there is nothing we can do.
                   1209:                 */
                   1210:                s = splvm();
                   1211:                if (pid_counter < HPPA_MAX_PID) {
                   1212:                        pid = pid_counter;
                   1213:                        pid_counter += 2;
                   1214:                } else
                   1215:                        pid = 0;
                   1216:                splx(s);
                   1217:
                   1218:                if (pid == 0)
                   1219:                        panic ("no more pmap ids\n");
                   1220:
                   1221:                simple_lock_init(&pmap->pmap_lock);
                   1222:        }
                   1223:
                   1224:        s = splvm();
                   1225:        pmap->pmap_pid = pid;
                   1226:        pmap->pmap_space = (pmap->pmap_pid >> 1) - 1;
                   1227:        pmap->pmap_refcnt = 1;
                   1228:        pmap->pmap_stats.resident_count = 0;
                   1229:        pmap->pmap_stats.wired_count = 0;
                   1230:        splx(s);
                   1231: }
                   1232:
                   1233: /*
                   1234:  * pmap_create()
                   1235:  *
                   1236:  * Create and return a physical map.
                   1237:  * the map is an actual physical map, and may be referenced by the hardware.
                   1238:  */
                   1239: pmap_t
                   1240: pmap_create()
                   1241: {
1.10    ! chs      1242:        pmap_t pmap;
1.1       fredette 1243:        int s;
                   1244:
                   1245:        PMAP_PRINTF(PDB_PMAP, ("()"));
                   1246:
                   1247:        /*
                   1248:         * If there is a pmap in the pmap free list, reuse it.
                   1249:         */
                   1250:        s = splvm();
                   1251:        if (pmap_nfree) {
                   1252:                pmap = pmap_freelist.tqh_first;
                   1253:                TAILQ_REMOVE(&pmap_freelist, pmap, pmap_list);
                   1254:                pmap_nfree--;
                   1255:                splx(s);
                   1256:        } else {
                   1257:                splx(s);
                   1258:                MALLOC(pmap, struct pmap *, sizeof(*pmap), M_VMMAP, M_NOWAIT);
                   1259:                if (pmap == NULL)
                   1260:                        return NULL;
                   1261:                bzero(pmap, sizeof(*pmap));
                   1262:        }
                   1263:
                   1264:        pmap_pinit(pmap);
                   1265:
                   1266:        return(pmap);
                   1267: }
                   1268:
                   1269: /*
                   1270:  * pmap_destroy(pmap)
                   1271:  *     Gives up a reference to the specified pmap.  When the reference count
                   1272:  *     reaches zero the pmap structure is added to the pmap free list.
                   1273:  *     Should only be called if the map contains no valid mappings.
                   1274:  */
                   1275: void
                   1276: pmap_destroy(pmap)
                   1277:        pmap_t pmap;
                   1278: {
                   1279:        int ref_count;
                   1280:        int s;
                   1281:
                   1282:        PMAP_PRINTF(PDB_PMAP, ("(%p)\n", pmap));
                   1283:
                   1284:        s = splvm();
                   1285:
                   1286:        ref_count = --pmap->pmap_refcnt;
                   1287:
                   1288:        if (ref_count < 0)
                   1289:                panic("pmap_destroy(): ref_count < 0");
                   1290:        if (!ref_count) {
                   1291:                KASSERT(pmap->pmap_stats.resident_count == 0);
                   1292:                KASSERT(pmap->pmap_stats.wired_count == 0);
                   1293:
                   1294:                /*
                   1295:                 * Add the pmap to the pmap free list
                   1296:                 * We cannot free() disposed pmaps because of
                   1297:                 * PID shortage of 2^16
                   1298:                 */
                   1299:                TAILQ_INSERT_HEAD(&pmap_freelist, pmap, pmap_list);
                   1300:                pmap_nfree++;
                   1301:        }
                   1302:        splx(s);
                   1303: }
                   1304:
                   1305: /*
1.10    ! chs      1306:  * pmap_activate(lwp)
        !          1307:  *     Activates the vmspace for the given LWP.  This
1.1       fredette 1308:  *     isn't necessarily the current process.
                   1309:  */
                   1310: void
1.10    ! chs      1311: pmap_activate(struct lwp *l)
1.1       fredette 1312: {
1.10    ! chs      1313:        struct proc *p = l->l_proc;
1.1       fredette 1314:        pmap_t pmap = p->p_vmspace->vm_map.pmap;
                   1315:        pa_space_t space = pmap->pmap_space;
1.10    ! chs      1316:        struct trapframe *tf = l->l_md.md_regs;
1.1       fredette 1317:
                   1318:        /* space is cached for the copy{in,out}'s pleasure */
1.10    ! chs      1319:        l->l_addr->u_pcb.pcb_space = space;
1.1       fredette 1320:
                   1321:        /* Load all of the user's space registers. */
                   1322:        tf->tf_sr0 = tf->tf_sr1 = tf->tf_sr2 = tf->tf_sr3 =
                   1323:        tf->tf_sr4 = tf->tf_sr5 = tf->tf_sr6 = space;
                   1324:        tf->tf_iisq_head = tf->tf_iisq_tail = space;
                   1325:
                   1326:        /*
                   1327:         * Load the protection registers.  NB that
                   1328:         * if p *is* the current process, we set pidr2
                   1329:         * to the new space immediately, so any copyins
                   1330:         * or copyouts that happen before we return to
                   1331:         * userspace work.
                   1332:         */
                   1333:        tf->tf_pidr1 = tf->tf_pidr2 = pmap->pmap_pid;
                   1334:        if (p == curproc)
                   1335:                mtctl(pmap->pmap_pid, CR_PIDR2);
                   1336: }
                   1337:
                   1338: /*
                   1339:  * pmap_enter(pmap, va, pa, prot, flags)
                   1340:  *     Create a translation for the virtual address (va) to the physical
                   1341:  *     address (pa) in the pmap with the protection requested. If the
                   1342:  *     translation is wired then we can not allow a page fault to occur
                   1343:  *     for this mapping.
                   1344:  */
                   1345: int
                   1346: pmap_enter(pmap, va, pa, prot, flags)
                   1347:        pmap_t pmap;
                   1348:        vaddr_t va;
                   1349:        paddr_t pa;
                   1350:        vm_prot_t prot;
                   1351:        int flags;
                   1352: {
1.10    ! chs      1353:        struct pv_entry *pv;
1.1       fredette 1354:        u_int tlbpage, tlbprot;
                   1355:        pa_space_t space;
                   1356:        boolean_t waswired;
                   1357:        boolean_t wired = (flags & PMAP_WIRED) != 0;
                   1358:        int s;
                   1359:
                   1360:        /* Get a handle on the mapping we want to enter. */
                   1361:        space = pmap_sid(pmap, va);
                   1362:        va = hppa_trunc_page(va);
                   1363:        pa = hppa_trunc_page(pa);
                   1364:        tlbpage = tlbbtop(pa);
                   1365:        tlbprot = pmap_prot(pmap, prot) | pmap->pmap_pid;
                   1366:        if (wired)
                   1367:                tlbprot |= TLB_WIRED;
                   1368:
                   1369: #ifdef PMAPDEBUG
                   1370:        if (!pmap_initialized || (pmapdebug & PDB_ENTER))
                   1371:                PMAP_PRINTF(0, ("(%p, %p, %p, %x, %swired)\n",
                   1372:                                pmap, (caddr_t)va, (caddr_t)pa,
                   1373:                                prot, wired? "" : "un"));
                   1374: #endif
                   1375:
                   1376:        s = splvm();
                   1377:
                   1378:        if (!(pv = pmap_pv_find_va(space, va))) {
                   1379:                /*
                   1380:                 * Mapping for this virtual address doesn't exist.
                   1381:                 * Enter a new mapping.
                   1382:                 */
                   1383:                pv = pmap_pv_enter(pmap, space, va, pa, tlbprot);
                   1384:                pmap->pmap_stats.resident_count++;
                   1385:                waswired = FALSE;
                   1386:        } else {
1.3       fredette 1387:                KASSERT((pv->pv_tlbprot & TLB_UNMANAGED) == 0);
1.1       fredette 1388:                waswired = pv->pv_tlbprot & TLB_WIRED;
                   1389:
                   1390:                /* see if we are remapping the page to another PA */
                   1391:                if (pv->pv_tlbpage != tlbpage) {
                   1392:                        PMAP_PRINTF(PDB_ENTER, (": moving pa %x -> %x\n",
                   1393:                                                pv->pv_tlbpage, tlbpage));
                   1394:                        /* update tlbprot to avoid extra subsequent fault */
                   1395:                        pmap_pv_remove(pv);
                   1396:                        pv = pmap_pv_enter(pmap, space, va, pa, tlbprot);
                   1397:                } else {
                   1398:                        /* We are just changing the protection.  */
                   1399: #ifdef PMAPDEBUG
                   1400:                        if (pmapdebug & PDB_ENTER) {
                   1401:                                char buffer1[64];
                   1402:                                char buffer2[64];
                   1403:                                bitmask_snprintf(pv->pv_tlbprot, TLB_BITS,
                   1404:                                                 buffer1, sizeof(buffer1));
                   1405:                                bitmask_snprintf(tlbprot, TLB_BITS,
                   1406:                                                 buffer2, sizeof(buffer2));
                   1407:                                printf("pmap_enter: changing %s->%s\n",
                   1408:                                    buffer1, buffer2);
                   1409:                        }
                   1410: #endif
                   1411:                        pmap_pv_update(pv, TLB_AR_MASK|TLB_PID_MASK|TLB_WIRED,
                   1412:                                       tlbprot);
                   1413:                }
                   1414:        }
                   1415:
                   1416:        /*
                   1417:         * Adjust statistics
                   1418:         */
                   1419:        if (wired && !waswired) {
                   1420:                pmap->pmap_stats.wired_count++;
                   1421:        } else if (!wired && waswired) {
                   1422:                pmap->pmap_stats.wired_count--;
                   1423:        }
                   1424:        splx(s);
                   1425:
                   1426:        return (0);
                   1427: }
                   1428:
                   1429: /*
                   1430:  * pmap_remove(pmap, sva, eva)
                   1431:  *     unmaps all virtual addresses v in the virtual address
                   1432:  *     range determined by [sva, eva) and pmap.
                   1433:  *     sva and eva must be on machine independent page boundaries and
                   1434:  *     sva must be less than or equal to eva.
                   1435:  */
                   1436: void
                   1437: pmap_remove(pmap, sva, eva)
1.10    ! chs      1438:        pmap_t pmap;
        !          1439:        vaddr_t sva;
        !          1440:        vaddr_t eva;
1.1       fredette 1441: {
1.10    ! chs      1442:        struct pv_entry *pv;
        !          1443:        pa_space_t space;
1.1       fredette 1444:        int s;
                   1445:
                   1446:        PMAP_PRINTF(PDB_REMOVE, ("(%p, %p, %p)\n",
                   1447:                                 pmap, (caddr_t)sva, (caddr_t)eva));
                   1448:
                   1449:        sva = hppa_trunc_page(sva);
                   1450:        space = pmap_sid(pmap, sva);
                   1451:
                   1452:        s = splvm();
                   1453:
                   1454:        while (sva < eva) {
                   1455:                pv = pmap_pv_find_va(space, sva);
                   1456:                if (pv) {
1.3       fredette 1457:                        KASSERT((pv->pv_tlbprot & TLB_UNMANAGED) == 0);
1.1       fredette 1458:                        KASSERT(pmap->pmap_stats.resident_count > 0);
                   1459:                        pmap->pmap_stats.resident_count--;
                   1460:                        if (pv->pv_tlbprot & TLB_WIRED) {
                   1461:                                KASSERT(pmap->pmap_stats.wired_count > 0);
                   1462:                                pmap->pmap_stats.wired_count--;
                   1463:                        }
                   1464:                        pmap_pv_remove(pv);
                   1465:                        PMAP_PRINTF(PDB_REMOVE, (": removed %p for 0x%x:%p\n",
                   1466:                                                 pv, space, (caddr_t)sva));
                   1467:                }
                   1468:                sva += PAGE_SIZE;
                   1469:        }
                   1470:
                   1471:        splx(s);
                   1472: }
                   1473:
                   1474: /*
                   1475:  *     pmap_page_protect(pa, prot)
                   1476:  *
                   1477:  *     Lower the permission for all mappings to a given page.
                   1478:  */
                   1479: void
                   1480: pmap_page_protect(pg, prot)
                   1481:        struct vm_page *pg;
                   1482:        vm_prot_t prot;
                   1483: {
1.10    ! chs      1484:        struct pv_entry *pv, *pv_next;
        !          1485:        pmap_t pmap;
        !          1486:        u_int tlbprot;
1.1       fredette 1487:        paddr_t pa = VM_PAGE_TO_PHYS(pg);
                   1488:        int s;
                   1489:
                   1490:        PMAP_PRINTF(PDB_PROTECT, ("(%p, %x)\n", (caddr_t)pa, prot));
                   1491:
                   1492:        switch (prot) {
                   1493:        case VM_PROT_ALL:
                   1494:                return;
                   1495:        case VM_PROT_READ:
                   1496:        case VM_PROT_READ|VM_PROT_EXECUTE:
                   1497:                s = splvm();
                   1498:                for (pv = pmap_pv_find_pa(pa); pv; pv = pv->pv_next) {
                   1499:                        /* Ignore unmanaged mappings. */
1.3       fredette 1500:                        if (pv->pv_tlbprot & TLB_UNMANAGED)
1.1       fredette 1501:                                continue;
                   1502:                        /*
                   1503:                         * Compare new protection with old to see if
                   1504:                         * anything needs to be changed.
                   1505:                         */
                   1506:                        tlbprot = pmap_prot(pv->pv_pmap, prot);
                   1507:                        if ((pv->pv_tlbprot & TLB_AR_MASK) != tlbprot) {
                   1508:                                pmap_pv_update(pv, TLB_AR_MASK, tlbprot);
                   1509:                        }
                   1510:                }
                   1511:                splx(s);
                   1512:                break;
                   1513:        default:
                   1514:                s = splvm();
                   1515:                for (pv = pmap_pv_find_pa(pa); pv != NULL; pv = pv_next) {
                   1516:                        pv_next = pv->pv_next;
                   1517:                        /* Ignore unmanaged mappings. */
1.3       fredette 1518:                        if (pv->pv_tlbprot & TLB_UNMANAGED)
1.1       fredette 1519:                                continue;
                   1520: #ifdef PMAPDEBUG
                   1521:                        if (pmapdebug & PDB_PROTECT) {
                   1522:                                char buffer[64];
                   1523:                                bitmask_snprintf(pv->pv_tlbprot, TLB_BITS,
                   1524:                                                 buffer, sizeof(buffer));
                   1525:                                printf("pv={%p,%x:%x,%s,%x}->%p\n",
                   1526:                                    pv->pv_pmap, pv->pv_space, pv->pv_va,
                   1527:                                    buffer,
                   1528:                                    tlbptob(pv->pv_tlbpage), pv->pv_hash);
                   1529:                        }
                   1530: #endif
                   1531:                        pmap = pv->pv_pmap;
                   1532:                        if (pv->pv_tlbprot & TLB_WIRED) {
                   1533:                                KASSERT(pmap->pmap_stats.wired_count > 0);
                   1534:                                pmap->pmap_stats.wired_count--;
                   1535:                        }
                   1536:                        pmap_pv_remove(pv);
                   1537:                        KASSERT(pmap->pmap_stats.resident_count > 0);
                   1538:                        pmap->pmap_stats.resident_count--;
                   1539:                }
                   1540:                splx(s);
                   1541:                break;
                   1542:        }
                   1543: }
                   1544:
                   1545: /*
                   1546:  * pmap_protect(pmap, s, e, prot)
                   1547:  *     changes the protection on all virtual addresses v in the
                   1548:  *     virtual address range determined by [s, e) and pmap to prot.
                   1549:  *     s and e must be on machine independent page boundaries and
                   1550:  *     s must be less than or equal to e.
                   1551:  */
                   1552: void
                   1553: pmap_protect(pmap, sva, eva, prot)
                   1554:        pmap_t pmap;
                   1555:        vaddr_t sva;
                   1556:        vaddr_t eva;
                   1557:        vm_prot_t prot;
                   1558: {
1.10    ! chs      1559:        struct pv_entry *pv;
1.1       fredette 1560:        u_int tlbprot;
                   1561:        pa_space_t space;
                   1562:        int s;
                   1563:
                   1564:        PMAP_PRINTF(PDB_PROTECT, ("(%p, %p, %p, %x)\n",
                   1565:                                 pmap, (caddr_t)sva, (caddr_t)eva, prot));
                   1566:
                   1567:        if (prot == VM_PROT_NONE) {
                   1568:                pmap_remove(pmap, sva, eva);
                   1569:                return;
                   1570:        }
                   1571:        if (prot & VM_PROT_WRITE)
                   1572:                return;
                   1573:
                   1574:        sva = hppa_trunc_page(sva);
                   1575:        space = pmap_sid(pmap, sva);
                   1576:        tlbprot = pmap_prot(pmap, prot);
                   1577:
                   1578:        s = splvm();
                   1579:        for(; sva < eva; sva += PAGE_SIZE) {
                   1580:                if((pv = pmap_pv_find_va(space, sva))) {
1.3       fredette 1581:                        KASSERT((pv->pv_tlbprot & TLB_UNMANAGED) == 0);
1.1       fredette 1582:                        /*
                   1583:                         * Compare new protection with old to see if
                   1584:                         * anything needs to be changed.
                   1585:                         */
                   1586:                        if ((pv->pv_tlbprot & TLB_AR_MASK) != tlbprot) {
                   1587:                                pmap_pv_update(pv, TLB_AR_MASK, tlbprot);
                   1588:                        }
                   1589:                }
                   1590:        }
                   1591:        splx(s);
                   1592: }
                   1593:
                   1594: /*
                   1595:  *     Routine:        pmap_unwire
                   1596:  *     Function:       Change the wiring attribute for a map/virtual-address
                   1597:  *                     pair.
                   1598:  *     In/out conditions:
                   1599:  *                     The mapping must already exist in the pmap.
                   1600:  *
                   1601:  * Change the wiring for a given virtual page. This routine currently is
                   1602:  * only used to unwire pages and hence the mapping entry will exist.
                   1603:  */
                   1604: void
                   1605: pmap_unwire(pmap, va)
                   1606:        pmap_t  pmap;
                   1607:        vaddr_t va;
                   1608: {
                   1609:        struct pv_entry *pv;
                   1610:        int s;
                   1611:
                   1612:        va = hppa_trunc_page(va);
                   1613:        PMAP_PRINTF(PDB_WIRING, ("(%p, %p)\n", pmap, (caddr_t)va));
                   1614:
                   1615:        simple_lock(&pmap->pmap_lock);
                   1616:
                   1617:        s = splvm();
                   1618:        if ((pv = pmap_pv_find_va(pmap_sid(pmap, va), va)) == NULL)
                   1619:                panic("pmap_unwire: can't find mapping entry");
                   1620:
1.3       fredette 1621:        KASSERT((pv->pv_tlbprot & TLB_UNMANAGED) == 0);
1.1       fredette 1622:        if (pv->pv_tlbprot & TLB_WIRED) {
                   1623:                KASSERT(pmap->pmap_stats.wired_count > 0);
                   1624:                pv->pv_tlbprot &= ~TLB_WIRED;
                   1625:                pmap->pmap_stats.wired_count--;
                   1626:        }
                   1627:        splx(s);
                   1628:        simple_unlock(&pmap->pmap_lock);
                   1629: }
                   1630:
                   1631: /*
                   1632:  * pmap_extract(pmap, va, pap)
                   1633:  *     fills in the physical address corrsponding to the
                   1634:  *     virtual address specified by pmap and va into the
                   1635:  *     storage pointed to by pap and returns TRUE if the
                   1636:  *     virtual address is mapped. returns FALSE in not mapped.
                   1637:  */
                   1638: boolean_t
                   1639: pmap_extract(pmap, va, pap)
                   1640:        pmap_t pmap;
                   1641:        vaddr_t va;
                   1642:        paddr_t *pap;
                   1643: {
                   1644:        struct pv_entry *pv;
                   1645:        vaddr_t off;
                   1646:        int s;
                   1647:
                   1648:        off = va;
                   1649:        off -= (va = hppa_trunc_page(va));
                   1650:
                   1651:        s = splvm();
                   1652:        if ((pv = pmap_pv_find_va(pmap_sid(pmap, va), va))) {
                   1653:                if (pap != NULL)
                   1654:                        *pap = tlbptob(pv->pv_tlbpage) + off;
                   1655:                PMAP_PRINTF(PDB_EXTRACT, ("(%p, %p) = %p\n",
                   1656:                                pmap, (caddr_t)va,
                   1657:                                (caddr_t)(tlbptob(pv->pv_tlbpage) + off)));
                   1658:        } else {
                   1659:                PMAP_PRINTF(PDB_EXTRACT, ("(%p, %p) unmapped\n",
                   1660:                                         pmap, (caddr_t)va));
                   1661:        }
                   1662:        splx(s);
                   1663:        return (pv != NULL);
                   1664: }
                   1665:
                   1666: /*
                   1667:  * pmap_zero_page(pa)
                   1668:  *
                   1669:  * Zeros the specified page.
                   1670:  */
                   1671: void
                   1672: pmap_zero_page(pa)
1.10    ! chs      1673:        paddr_t pa;
1.1       fredette 1674: {
                   1675:        struct pv_entry *pv;
                   1676:        int s;
                   1677:
                   1678:        PMAP_PRINTF(PDB_ZERO, ("(%p)\n", (caddr_t)pa));
                   1679:
                   1680:        s = splvm(); /* XXX are we already that high? */
                   1681:
                   1682:        /* Map the physical page. */
1.3       fredette 1683:        pv = pmap_pv_enter(pmap_kernel(), HPPA_SID_KERNEL, tmp_vpages[1], pa,
                   1684:                        TLB_AR_KRW | TLB_UNMANAGED | TLB_WIRED);
1.1       fredette 1685:
                   1686:        /* Zero it. */
                   1687:        memset((caddr_t)tmp_vpages[1], 0, PAGE_SIZE);
                   1688:
                   1689:        /* Unmap the physical page. */
                   1690:        pmap_pv_remove(pv);
                   1691:
                   1692:        splx(s);
                   1693: }
                   1694:
                   1695: /*
                   1696:  * pmap_copy_page(src, dst)
                   1697:  *
                   1698:  * pmap_copy_page copies the src page to the destination page. If a mapping
                   1699:  * can be found for the source, we use that virtual address. Otherwise, a
                   1700:  * slower physical page copy must be done. The destination is always a
                   1701:  * physical address sivnce there is usually no mapping for it.
                   1702:  */
                   1703: void
                   1704: pmap_copy_page(spa, dpa)
                   1705:        paddr_t spa;
                   1706:        paddr_t dpa;
                   1707: {
                   1708:        struct pv_entry *spv, *dpv;
                   1709:        int s;
                   1710:
                   1711:        PMAP_PRINTF(PDB_COPY, ("(%p, %p)\n", (caddr_t)spa, (caddr_t)dpa));
                   1712:
                   1713:        s = splvm(); /* XXX are we already that high? */
                   1714:
                   1715:        /* Map the two pages. */
1.3       fredette 1716:        spv = pmap_pv_enter(pmap_kernel(), HPPA_SID_KERNEL, tmp_vpages[0], spa,
                   1717:                        TLB_AR_KR | TLB_UNMANAGED | TLB_WIRED);
                   1718:        dpv = pmap_pv_enter(pmap_kernel(), HPPA_SID_KERNEL, tmp_vpages[1], dpa,
                   1719:                        TLB_AR_KRW | TLB_UNMANAGED | TLB_WIRED);
1.1       fredette 1720:
                   1721:        /* Do the copy. */
                   1722:        memcpy((caddr_t)tmp_vpages[1], (const caddr_t)tmp_vpages[0], PAGE_SIZE);
                   1723:
                   1724:        /* Unmap the pages. */
                   1725:        pmap_pv_remove(spv);
                   1726:        pmap_pv_remove(dpv);
                   1727:
                   1728:        splx(s);
                   1729: }
                   1730:
                   1731: /*
                   1732:  * Given a PA and a bit, this tests and clears that bit in
                   1733:  * the modref information for the PA.
                   1734:  */
                   1735: static __inline boolean_t pmap_clear_bit __P((paddr_t, u_int));
                   1736: static __inline boolean_t
1.3       fredette 1737: pmap_clear_bit(paddr_t pa, u_int tlbprot_bit)
1.1       fredette 1738: {
                   1739:        int table_off;
1.3       fredette 1740:        struct pv_head *hpv;
                   1741:        u_int pv_head_bit;
1.1       fredette 1742:        boolean_t ret;
                   1743:        int s;
                   1744:
                   1745:        table_off = pmap_table_find_pa(pa);
                   1746:        KASSERT(table_off >= 0);
1.3       fredette 1747:        hpv = pv_head_tbl + table_off;
                   1748:        pv_head_bit = (tlbprot_bit == TLB_REF ? PV_HEAD_REF : PV_HEAD_DIRTY);
1.1       fredette 1749:        s = splvm();
1.3       fredette 1750:        _pmap_pv_update(pa, NULL, tlbprot_bit, 0);
                   1751:        ret = hpv->pv_head_writable_dirty_ref & pv_head_bit;
                   1752:        hpv->pv_head_writable_dirty_ref &= ~pv_head_bit;
1.1       fredette 1753:        splx(s);
                   1754:        return ret;
                   1755: }
                   1756:
                   1757: /*
                   1758:  * Given a PA and a bit, this tests that bit in the modref
                   1759:  * information for the PA.
                   1760:  */
                   1761: static __inline boolean_t pmap_test_bit __P((paddr_t, u_int));
                   1762: static __inline boolean_t
1.3       fredette 1763: pmap_test_bit(paddr_t pa, u_int tlbprot_bit)
1.1       fredette 1764: {
                   1765:        int table_off;
1.3       fredette 1766:        struct pv_head *hpv;
                   1767:        u_int pv_head_bit;
1.1       fredette 1768:        struct pv_entry *pv;
                   1769:        boolean_t ret;
                   1770:        int s;
                   1771:
                   1772:        table_off = pmap_table_find_pa(pa);
                   1773:        KASSERT(table_off >= 0);
1.3       fredette 1774:        hpv = pv_head_tbl + table_off;
                   1775:        pv_head_bit = (tlbprot_bit == TLB_REF ? PV_HEAD_REF : PV_HEAD_DIRTY);
1.1       fredette 1776:        s = splvm();
1.3       fredette 1777:        ret = (hpv->pv_head_writable_dirty_ref & pv_head_bit) != 0;
1.1       fredette 1778:        if (!ret) {
1.3       fredette 1779:                for (pv = hpv->pv_head_pvs;
1.1       fredette 1780:                     pv != NULL;
                   1781:                     pv = pv->pv_next) {
1.3       fredette 1782:                        if ((pv->pv_tlbprot & (TLB_UNMANAGED | tlbprot_bit)) ==
                   1783:                            tlbprot_bit) {
                   1784:                                hpv->pv_head_writable_dirty_ref |= pv_head_bit;
1.1       fredette 1785:                                ret = TRUE;
                   1786:                                break;
                   1787:                        }
                   1788:                }
                   1789:        }
                   1790:        splx(s);
                   1791:        return ret;
                   1792: }
                   1793:
                   1794: /*
                   1795:  * pmap_clear_modify(pa)
                   1796:  *     clears the hardware modified ("dirty") bit for one
                   1797:  *     machine independant page starting at the given
                   1798:  *     physical address.  phys must be aligned on a machine
                   1799:  *     independant page boundary.
                   1800:  */
                   1801: boolean_t
                   1802: pmap_clear_modify(pg)
                   1803:        struct vm_page *pg;
                   1804: {
1.10    ! chs      1805:        paddr_t pa = VM_PAGE_TO_PHYS(pg);
1.1       fredette 1806:        boolean_t ret = pmap_clear_bit(pa, TLB_DIRTY);
                   1807:        PMAP_PRINTF(PDB_BITS, ("(%p) = %d\n", (caddr_t)pa, ret));
                   1808:        return ret;
                   1809: }
                   1810:
                   1811: /*
                   1812:  * pmap_is_modified(pa)
                   1813:  *     returns TRUE if the given physical page has been modified
                   1814:  *     since the last call to pmap_clear_modify().
                   1815:  */
                   1816: boolean_t
                   1817: pmap_is_modified(pg)
                   1818:        struct vm_page *pg;
                   1819: {
1.10    ! chs      1820:        paddr_t pa = VM_PAGE_TO_PHYS(pg);
1.1       fredette 1821:        boolean_t ret = pmap_test_bit(pa, TLB_DIRTY);
                   1822:        PMAP_PRINTF(PDB_BITS, ("(%p) = %d\n", (caddr_t)pa, ret));
                   1823:        return ret;
                   1824: }
                   1825:
                   1826: /*
                   1827:  * pmap_clear_reference(pa)
                   1828:  *     clears the hardware referenced bit in the given machine
                   1829:  *     independant physical page.
                   1830:  *
                   1831:  *     Currently, we treat a TLB miss as a reference; i.e. to clear
                   1832:  *     the reference bit we flush all mappings for pa from the TLBs.
                   1833:  */
                   1834: boolean_t
                   1835: pmap_clear_reference(pg)
                   1836:        struct vm_page *pg;
                   1837: {
1.10    ! chs      1838:        paddr_t pa = VM_PAGE_TO_PHYS(pg);
1.1       fredette 1839:        boolean_t ret = pmap_clear_bit(pa, TLB_REF);
                   1840:        PMAP_PRINTF(PDB_BITS, ("(%p) = %d\n", (caddr_t)pa, ret));
                   1841:        return ret;
                   1842: }
                   1843:
                   1844: /*
                   1845:  * pmap_is_referenced(pa)
                   1846:  *     returns TRUE if the given physical page has been referenced
                   1847:  *     since the last call to pmap_clear_reference().
                   1848:  */
                   1849: boolean_t
                   1850: pmap_is_referenced(pg)
                   1851:        struct vm_page *pg;
                   1852: {
1.10    ! chs      1853:        paddr_t pa = VM_PAGE_TO_PHYS(pg);
1.1       fredette 1854:        boolean_t ret = pmap_test_bit(pa, TLB_REF);
                   1855:        PMAP_PRINTF(PDB_BITS, ("(%p) = %d\n", (caddr_t)pa, ret));
                   1856:        return ret;
                   1857: }
                   1858:
                   1859: void
                   1860: pmap_kenter_pa(va, pa, prot)
                   1861:        vaddr_t va;
                   1862:        paddr_t pa;
                   1863:        vm_prot_t prot;
                   1864: {
                   1865:        int s;
                   1866: #ifdef PMAPDEBUG
                   1867:        int opmapdebug = pmapdebug;
                   1868:
                   1869:        /*
                   1870:         * If we're being told to map page zero, we can't
                   1871:         * call printf() at all, because doing so would
                   1872:         * lead to an infinite recursion on this call.
                   1873:         * (printf requires page zero to be mapped).
                   1874:         */
                   1875:        if (va == 0)
                   1876:                pmapdebug = 0;
                   1877: #endif /* PMAPDEBUG */
                   1878:
                   1879:        PMAP_PRINTF(PDB_KENTER, ("(%p, %p, %x)\n",
                   1880:                                 (caddr_t)va, (caddr_t)pa, prot));
                   1881:        va = hppa_trunc_page(va);
                   1882:        s = splvm();
                   1883:        KASSERT(pmap_pv_find_va(HPPA_SID_KERNEL, va) == NULL);
1.3       fredette 1884:        pmap_pv_enter(pmap_kernel(), HPPA_SID_KERNEL, va, pa,
1.1       fredette 1885:                      pmap_prot(pmap_kernel(), prot) |
1.3       fredette 1886:                      TLB_WIRED | TLB_UNMANAGED);
1.1       fredette 1887:        splx(s);
                   1888: #ifdef PMAPDEBUG
                   1889:        pmapdebug = opmapdebug;
                   1890: #endif /* PMAPDEBUG */
                   1891: }
                   1892:
                   1893: void
                   1894: pmap_kremove(va, size)
                   1895:        vaddr_t va;
                   1896:        vsize_t size;
                   1897: {
1.10    ! chs      1898:        struct pv_entry *pv;
1.1       fredette 1899:        int s;
                   1900: #ifdef PMAPDEBUG
                   1901:        int opmapdebug = pmapdebug;
                   1902:
                   1903:        /*
                   1904:         * If we're being told to unmap page zero, we can't
                   1905:         * call printf() at all, because doing so would
                   1906:         * lead to an infinite recursion on this call.
                   1907:         * (printf requires page zero to be mapped).
                   1908:         */
                   1909:        if (va == 0)
                   1910:                pmapdebug = 0;
                   1911: #endif /* PMAPDEBUG */
                   1912:
                   1913:        PMAP_PRINTF(PDB_KENTER, ("(%p, %x)\n",
                   1914:                                 (caddr_t)va, (u_int)size));
                   1915:
                   1916:        size += va;
                   1917:        va = hppa_trunc_page(va);
                   1918:        size -= va;
                   1919:        s = splvm();
                   1920:        for (size = hppa_round_page(size); size;
                   1921:            size -= PAGE_SIZE, va += PAGE_SIZE) {
                   1922:                pv = pmap_pv_find_va(HPPA_SID_KERNEL, va);
                   1923:                if (pv) {
1.3       fredette 1924:                        KASSERT((pv->pv_tlbprot & TLB_UNMANAGED) != 0);
1.1       fredette 1925:                        pmap_pv_remove(pv);
                   1926:                } else {
                   1927:                        PMAP_PRINTF(PDB_REMOVE, (": no pv for %p\n",
                   1928:                                                 (caddr_t)va));
                   1929:                }
                   1930:        }
                   1931:        splx(s);
                   1932: #ifdef PMAPDEBUG
                   1933:        pmapdebug = opmapdebug;
                   1934: #endif /* PMAPDEBUG */
                   1935: }
                   1936:
                   1937: /*
                   1938:  * pmap_redzone(sva, eva, create)
                   1939:  *     creates or removes a red zone in already mapped and wired memory,
                   1940:  *     from [sva, eva) in the kernel map.
                   1941:  */
                   1942: void
                   1943: pmap_redzone(vaddr_t sva, vaddr_t eva, int create)
                   1944: {
                   1945:        vaddr_t va;
                   1946:        struct pv_entry *pv;
                   1947:        u_int tlbprot;
                   1948:        int s;
                   1949:
                   1950:        sva = hppa_trunc_page(sva);
                   1951:        tlbprot = (create ? TLB_AR_NA : TLB_AR_KRW);
                   1952:        s = splvm();
                   1953:        for(va = sva; va < eva; va += PAGE_SIZE) {
                   1954:                pv = pmap_pv_find_va(HPPA_SID_KERNEL, va);
                   1955:                KASSERT(pv != NULL);
                   1956:                /*
                   1957:                 * Compare new protection with old to see if
                   1958:                 * anything needs to be changed.
                   1959:                 */
                   1960:                if ((pv->pv_tlbprot & TLB_AR_MASK) != tlbprot)
                   1961:                        pmap_pv_update(pv, TLB_AR_MASK, tlbprot);
                   1962:        }
                   1963:        splx(s);
                   1964: }
                   1965:
                   1966: #if defined(PMAPDEBUG) && defined(DDB)
                   1967: #include <ddb/db_output.h>
                   1968: /*
                   1969:  * prints whole va->pa (aka HPT or HVT)
                   1970:  */
                   1971: void
                   1972: pmap_hptdump()
                   1973: {
1.10    ! chs      1974:        struct hpt_entry *hpt, *ehpt;
        !          1975:        struct pv_entry *pv;
1.1       fredette 1976:
                   1977:        mfctl(CR_HPTMASK, ehpt);
                   1978:        mfctl(CR_VTOP, hpt);
                   1979:        ehpt = (struct hpt_entry *)((int)hpt + (int)ehpt + 1);
                   1980:        db_printf("HPT dump %p-%p:\n", hpt, ehpt);
                   1981:        for (; hpt < ehpt; hpt++)
                   1982:                if (hpt->hpt_valid || hpt->hpt_entry) {
                   1983:                        db_printf("hpt@%p: %x{%sv=%x:%x},%b,%x\n",
                   1984:                            hpt, *(int *)hpt, (hpt->hpt_valid?"ok,":""),
                   1985:                            hpt->hpt_space, hpt->hpt_vpn << 9,
                   1986:                            hpt->hpt_tlbprot, TLB_BITS,
                   1987:                            tlbptob(hpt->hpt_tlbpage));
                   1988:                        for (pv = hpt->hpt_entry; pv; pv = pv->pv_hash)
                   1989:                                db_printf("    pv={%p,%x:%x,%b,%x}->%p\n",
                   1990:                                    pv->pv_pmap, pv->pv_space, pv->pv_va,
                   1991:                                    pv->pv_tlbprot, TLB_BITS,
                   1992:                                    tlbptob(pv->pv_tlbpage), pv->pv_hash);
                   1993:                }
                   1994: }
                   1995: #endif

CVSweb <webmaster@jp.NetBSD.org>