[BACK]Return to pmap.c CVS log [TXT][DIR] Up to [cvs.NetBSD.org] / src / sys / arch / sparc / sparc

Annotation of src/sys/arch/sparc/sparc/pmap.c, Revision 1.165

1.165   ! pk          1: /*     $NetBSD: pmap.c,v 1.164 2000/05/31 05:28:30 thorpej Exp $ */
1.22      deraadt     2:
1.1       deraadt     3: /*
1.55      pk          4:  * Copyright (c) 1996
1.57      abrown      5:  *     The President and Fellows of Harvard College. All rights reserved.
1.1       deraadt     6:  * Copyright (c) 1992, 1993
                      7:  *     The Regents of the University of California.  All rights reserved.
                      8:  *
                      9:  * This software was developed by the Computer Systems Engineering group
                     10:  * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
                     11:  * contributed to Berkeley.
                     12:  *
                     13:  * All advertising materials mentioning features or use of this software
                     14:  * must display the following acknowledgement:
1.55      pk         15:  *     This product includes software developed by Harvard University.
1.1       deraadt    16:  *     This product includes software developed by the University of
                     17:  *     California, Lawrence Berkeley Laboratory.
                     18:  *
                     19:  * Redistribution and use in source and binary forms, with or without
                     20:  * modification, are permitted provided that the following conditions
                     21:  * are met:
1.55      pk         22:  *
1.1       deraadt    23:  * 1. Redistributions of source code must retain the above copyright
                     24:  *    notice, this list of conditions and the following disclaimer.
                     25:  * 2. Redistributions in binary form must reproduce the above copyright
                     26:  *    notice, this list of conditions and the following disclaimer in the
                     27:  *    documentation and/or other materials provided with the distribution.
                     28:  * 3. All advertising materials mentioning features or use of this software
                     29:  *    must display the following acknowledgement:
1.55      pk         30:  *     This product includes software developed by Aaron Brown and
                     31:  *     Harvard University.
                     32:  *      This product includes software developed by the University of
                     33:  *      California, Berkeley and its contributors.
1.1       deraadt    34:  * 4. Neither the name of the University nor the names of its contributors
                     35:  *    may be used to endorse or promote products derived from this software
                     36:  *    without specific prior written permission.
                     37:  *
                     38:  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
                     39:  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
                     40:  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
                     41:  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
                     42:  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
                     43:  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
                     44:  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
                     45:  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
                     46:  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
                     47:  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
                     48:  * SUCH DAMAGE.
                     49:  *
1.22      deraadt    50:  *     @(#)pmap.c      8.4 (Berkeley) 2/5/94
1.55      pk         51:  *
1.1       deraadt    52:  */
                     53:
                     54: /*
                     55:  * SPARC physical map management code.
                     56:  * Does not function on multiprocessors (yet).
                     57:  */
1.112     mrg        58:
1.119     jonathan   59: #include "opt_ddb.h"
1.152     pk         60: #include "opt_multiprocessor.h"
1.1       deraadt    61:
                     62: #include <sys/param.h>
                     63: #include <sys/systm.h>
                     64: #include <sys/device.h>
                     65: #include <sys/proc.h>
1.43      pk         66: #include <sys/queue.h>
1.1       deraadt    67: #include <sys/malloc.h>
1.87      pk         68: #include <sys/lock.h>
1.121     pk         69: #include <sys/pool.h>
1.67      pk         70: #include <sys/exec.h>
                     71: #include <sys/core.h>
                     72: #include <sys/kcore.h>
1.1       deraadt    73:
                     74: #include <vm/vm.h>
                     75: #include <vm/vm_kern.h>
                     76: #include <vm/vm_prot.h>
                     77: #include <vm/vm_page.h>
                     78:
1.110     mrg        79: #include <uvm/uvm.h>
                     80:
1.1       deraadt    81: #include <machine/autoconf.h>
                     82: #include <machine/bsd_openprom.h>
1.19      deraadt    83: #include <machine/oldmon.h>
1.1       deraadt    84: #include <machine/cpu.h>
                     85: #include <machine/ctlreg.h>
1.67      pk         86: #include <machine/kcore.h>
1.1       deraadt    87:
                     88: #include <sparc/sparc/asm.h>
                     89: #include <sparc/sparc/cache.h>
1.3       deraadt    90: #include <sparc/sparc/vaddrs.h>
1.69      pk         91: #include <sparc/sparc/cpuvar.h>
1.1       deraadt    92:
                     93: /*
                     94:  * The SPARCstation offers us the following challenges:
                     95:  *
                     96:  *   1. A virtual address cache.  This is, strictly speaking, not
                     97:  *     part of the architecture, but the code below assumes one.
                     98:  *     This is a write-through cache on the 4c and a write-back cache
                     99:  *     on others.
                    100:  *
1.55      pk        101:  *   2. (4/4c only) An MMU that acts like a cache.  There is not enough
                    102:  *     space in the MMU to map everything all the time.  Instead, we need
1.1       deraadt   103:  *     to load MMU with the `working set' of translations for each
1.55      pk        104:  *     process. The sun4m does not act like a cache; tables are maintained
                    105:  *     in physical memory.
1.1       deraadt   106:  *
                    107:  *   3.        Segmented virtual and physical spaces.  The upper 12 bits of
                    108:  *     a virtual address (the virtual segment) index a segment table,
                    109:  *     giving a physical segment.  The physical segment selects a
                    110:  *     `Page Map Entry Group' (PMEG) and the virtual page number---the
                    111:  *     next 5 or 6 bits of the virtual address---select the particular
                    112:  *     `Page Map Entry' for the page.  We call the latter a PTE and
                    113:  *     call each Page Map Entry Group a pmeg (for want of a better name).
1.55      pk        114:  *     Note that the sun4m has an unsegmented 36-bit physical space.
1.1       deraadt   115:  *
                    116:  *     Since there are no valid bits in the segment table, the only way
                    117:  *     to have an invalid segment is to make one full pmeg of invalid PTEs.
1.55      pk        118:  *     We use the last one (since the ROM does as well) (sun4/4c only)
1.1       deraadt   119:  *
                    120:  *   4. Discontiguous physical pages.  The Mach VM expects physical pages
                    121:  *     to be in one sequential lump.
                    122:  *
                    123:  *   5. The MMU is always on: it is not possible to disable it.  This is
                    124:  *     mainly a startup hassle.
                    125:  */
                    126:
                    127: struct pmap_stats {
                    128:        int     ps_unlink_pvfirst;      /* # of pv_unlinks on head */
                    129:        int     ps_unlink_pvsearch;     /* # of pv_unlink searches */
                    130:        int     ps_changeprots;         /* # of calls to changeprot */
                    131:        int     ps_useless_changeprots; /* # of changeprots for wiring */
                    132:        int     ps_enter_firstpv;       /* pv heads entered */
                    133:        int     ps_enter_secondpv;      /* pv nonheads entered */
                    134:        int     ps_useless_changewire;  /* useless wiring changes */
                    135:        int     ps_npg_prot_all;        /* # of active pages protected */
                    136:        int     ps_npg_prot_actual;     /* # pages actually affected */
1.70      pk        137:        int     ps_npmeg_free;          /* # of free pmegs */
                    138:        int     ps_npmeg_locked;        /* # of pmegs on locked list */
                    139:        int     ps_npmeg_lru;           /* # of pmegs on lru list */
1.1       deraadt   140: } pmap_stats;
                    141:
                    142: #ifdef DEBUG
                    143: #define        PDB_CREATE      0x0001
                    144: #define        PDB_DESTROY     0x0002
                    145: #define        PDB_REMOVE      0x0004
                    146: #define        PDB_CHANGEPROT  0x0008
                    147: #define        PDB_ENTER       0x0010
1.90      pk        148: #define        PDB_FOLLOW      0x0020
1.1       deraadt   149:
                    150: #define        PDB_MMU_ALLOC   0x0100
                    151: #define        PDB_MMU_STEAL   0x0200
                    152: #define        PDB_CTX_ALLOC   0x0400
                    153: #define        PDB_CTX_STEAL   0x0800
1.43      pk        154: #define        PDB_MMUREG_ALLOC        0x1000
                    155: #define        PDB_MMUREG_STEAL        0x2000
1.55      pk        156: #define        PDB_CACHESTUFF  0x4000
1.72      pk        157: #define        PDB_SWITCHMAP   0x8000
                    158: #define        PDB_SANITYCHK   0x10000
1.55      pk        159: int    pmapdebug = 0;
1.1       deraadt   160: #endif
                    161:
1.55      pk        162: #if 0
1.10      deraadt   163: #define        splpmap() splimp()
1.55      pk        164: #endif
1.1       deraadt   165:
                    166: /*
                    167:  * First and last managed physical addresses.
                    168:  */
1.124     pk        169: paddr_t        vm_first_phys, vm_num_phys;
1.1       deraadt   170:
                    171: /*
                    172:  * For each managed physical page, there is a list of all currently
                    173:  * valid virtual mappings of that page.  Since there is usually one
                    174:  * (or zero) mapping per page, the table begins with an initial entry,
                    175:  * rather than a pointer; this head entry is empty iff its pv_pmap
                    176:  * field is NULL.
                    177:  *
                    178:  * Note that these are per machine independent page (so there may be
                    179:  * only one for every two hardware pages, e.g.).  Since the virtual
                    180:  * address is aligned on a page boundary, the low order bits are free
                    181:  * for storing flags.  Only the head of each list has flags.
                    182:  *
                    183:  * THIS SHOULD BE PART OF THE CORE MAP
                    184:  */
                    185: struct pvlist {
1.84      pk        186:        struct          pvlist *pv_next;        /* next pvlist, if any */
                    187:        struct          pmap *pv_pmap;          /* pmap of this va */
1.124     pk        188:        vaddr_t         pv_va;                  /* virtual address */
1.84      pk        189:        int             pv_flags;               /* flags (below) */
1.1       deraadt   190: };
                    191:
                    192: /*
                    193:  * Flags in pv_flags.  Note that PV_MOD must be 1 and PV_REF must be 2
                    194:  * since they must line up with the bits in the hardware PTEs (see pte.h).
1.115     pk        195:  * SUN4M bits are at a slightly different location in the PTE.
                    196:  * Note: the REF, MOD and ANC flag bits occur only in the head of a pvlist.
                    197:  * The cacheable bit (either PV_NC or PV_C4M) is meaningful in each
                    198:  * individual pv entry.
                    199:  */
                    200: #define PV_MOD         1       /* page modified */
                    201: #define PV_REF         2       /* page referenced */
                    202: #define PV_NC          4       /* page cannot be cached */
                    203: #define PV_REF4M       1       /* page referenced (SRMMU) */
                    204: #define PV_MOD4M       2       /* page modified (SRMMU) */
                    205: #define PV_C4M         4       /* page _can_ be cached (SRMMU) */
                    206: #define PV_ANC         0x10    /* page has incongruent aliases */
1.1       deraadt   207:
                    208: struct pvlist *pv_table;       /* array of entries, one per physical page */
                    209:
1.124     pk        210: #define pvhead(pa)     (&pv_table[((pa) - vm_first_phys) >> PGSHIFT])
1.1       deraadt   211:
1.124     pk        212: static vsize_t pv_table_map __P((paddr_t, int));
                    213: static paddr_t pv_physmem;
1.122     pk        214: static struct pool pv_pool;
                    215:
                    216:
1.1       deraadt   217: /*
                    218:  * Each virtual segment within each pmap is either valid or invalid.
                    219:  * It is valid if pm_npte[VA_VSEG(va)] is not 0.  This does not mean
                    220:  * it is in the MMU, however; that is true iff pm_segmap[VA_VSEG(va)]
                    221:  * does not point to the invalid PMEG.
                    222:  *
1.55      pk        223:  * In the older SPARC architectures (pre-4m), page tables are cached in the
                    224:  * MMU. The following discussion applies to these architectures:
                    225:  *
1.1       deraadt   226:  * If a virtual segment is valid and loaded, the correct PTEs appear
                    227:  * in the MMU only.  If it is valid and unloaded, the correct PTEs appear
                    228:  * in the pm_pte[VA_VSEG(va)] only.  However, some effort is made to keep
                    229:  * the software copies consistent enough with the MMU so that libkvm can
                    230:  * do user address translations.  In particular, pv_changepte() and
                    231:  * pmap_enu() maintain consistency, while less critical changes are
                    232:  * not maintained.  pm_pte[VA_VSEG(va)] always points to space for those
                    233:  * PTEs, unless this is the kernel pmap, in which case pm_pte[x] is not
                    234:  * used (sigh).
                    235:  *
                    236:  * Each PMEG in the MMU is either free or contains PTEs corresponding to
                    237:  * some pmap and virtual segment.  If it contains some PTEs, it also contains
                    238:  * reference and modify bits that belong in the pv_table.  If we need
                    239:  * to steal a PMEG from some process (if we need one and none are free)
                    240:  * we must copy the ref and mod bits, and update pm_segmap in the other
                    241:  * pmap to show that its virtual segment is no longer in the MMU.
                    242:  *
                    243:  * There are 128 PMEGs in a small Sun-4, of which only a few dozen are
                    244:  * tied down permanently, leaving `about' 100 to be spread among
                    245:  * running processes.  These are managed as an LRU cache.  Before
                    246:  * calling the VM paging code for a user page fault, the fault handler
                    247:  * calls mmu_load(pmap, va) to try to get a set of PTEs put into the
                    248:  * MMU.  mmu_load will check the validity of the segment and tell whether
                    249:  * it did something.
                    250:  *
                    251:  * Since I hate the name PMEG I call this data structure an `mmu entry'.
                    252:  * Each mmuentry is on exactly one of three `usage' lists: free, LRU,
                    253:  * or locked.  The LRU list is for user processes; the locked list is
                    254:  * for kernel entries; both are doubly linked queues headed by `mmuhd's.
                    255:  * The free list is a simple list, headed by a free list pointer.
1.55      pk        256:  *
                    257:  * In the sun4m architecture using the SPARC Reference MMU (SRMMU), three
                    258:  * levels of page tables are maintained in physical memory. We use the same
                    259:  * structures as with the 3-level old-style MMU (pm_regmap, pm_segmap,
                    260:  * rg_segmap, sg_pte, etc) to maintain kernel-edible page tables; we also
                    261:  * build a parallel set of physical tables that can be used by the MMU.
                    262:  * (XXX: This seems redundant, but is it necessary for the unified kernel?)
                    263:  *
                    264:  * If a virtual segment is valid, its entries will be in both parallel lists.
                    265:  * If it is not valid, then its entry in the kernel tables will be zero, and
                    266:  * its entry in the MMU tables will either be nonexistent or zero as well.
1.72      pk        267:  *
                    268:  * The Reference MMU generally uses a Translation Look-aside Buffer (TLB)
                    269:  * to cache the result of recently executed page table walks. When
                    270:  * manipulating page tables, we need to ensure consistency of the
                    271:  * in-memory and TLB copies of the page table entries. This is handled
                    272:  * by flushing (and invalidating) a TLB entry when appropriate before
                    273:  * altering an in-memory page table entry.
1.1       deraadt   274:  */
                    275: struct mmuentry {
1.43      pk        276:        TAILQ_ENTRY(mmuentry)   me_list;        /* usage list link */
                    277:        TAILQ_ENTRY(mmuentry)   me_pmchain;     /* pmap owner link */
1.1       deraadt   278:        struct  pmap *me_pmap;          /* pmap, if in use */
1.43      pk        279:        u_short me_vreg;                /* associated virtual region/segment */
                    280:        u_short me_vseg;                /* associated virtual region/segment */
1.45      pk        281:        u_short me_cookie;              /* hardware SMEG/PMEG number */
1.1       deraadt   282: };
1.43      pk        283: struct mmuentry *mmusegments;  /* allocated in pmap_bootstrap */
                    284: struct mmuentry *mmuregions;   /* allocated in pmap_bootstrap */
1.1       deraadt   285:
1.43      pk        286: struct mmuhd segm_freelist, segm_lru, segm_locked;
                    287: struct mmuhd region_freelist, region_lru, region_locked;
1.1       deraadt   288:
1.69      pk        289: int    seginval;               /* [4/4c] the invalid segment number */
                    290: int    reginval;               /* [4/3mmu] the invalid region number */
1.1       deraadt   291:
                    292: /*
1.55      pk        293:  * (sun4/4c)
1.1       deraadt   294:  * A context is simply a small number that dictates which set of 4096
                    295:  * segment map entries the MMU uses.  The Sun 4c has eight such sets.
                    296:  * These are alloted in an `almost MRU' fashion.
1.55      pk        297:  * (sun4m)
                    298:  * A context is simply a small number that indexes the context table, the
                    299:  * root-level page table mapping 4G areas. Each entry in this table points
                    300:  * to a 1st-level region table. A SPARC reference MMU will usually use 16
                    301:  * such contexts, but some offer as many as 64k contexts; the theoretical
                    302:  * maximum is 2^32 - 1, but this would create overlarge context tables.
1.1       deraadt   303:  *
                    304:  * Each context is either free or attached to a pmap.
                    305:  *
                    306:  * Since the virtual address cache is tagged by context, when we steal
                    307:  * a context we have to flush (that part of) the cache.
                    308:  */
                    309: union ctxinfo {
                    310:        union   ctxinfo *c_nextfree;    /* free list (if free) */
                    311:        struct  pmap *c_pmap;           /* pmap (if busy) */
                    312: };
1.69      pk        313:
                    314: #define ncontext       (cpuinfo.mmu_ncontext)
                    315: #define ctx_kick       (cpuinfo.ctx_kick)
                    316: #define ctx_kickdir    (cpuinfo.ctx_kickdir)
                    317: #define ctx_freelist   (cpuinfo.ctx_freelist)
                    318:
1.122     pk        319: void   ctx_alloc __P((struct pmap *));
                    320: void   ctx_free __P((struct pmap *));
                    321:
1.1       deraadt   322: caddr_t        vpage[2];               /* two reserved MD virtual pages */
1.101     pk        323: #if defined(SUN4M)
                    324: int    *vpage_pte[2];          /* pte location of vpage[] */
                    325: #endif
1.41      mycroft   326: caddr_t        vmmap;                  /* one reserved MI vpage for /dev/mem */
1.55      pk        327: caddr_t        vdumppages;             /* 32KB worth of reserved dump pages */
1.1       deraadt   328:
1.69      pk        329: smeg_t         tregion;        /* [4/3mmu] Region for temporary mappings */
                    330:
1.43      pk        331: struct pmap    kernel_pmap_store;              /* the kernel's pmap */
                    332: struct regmap  kernel_regmap_store[NKREG];     /* the kernel's regmap */
                    333: struct segmap  kernel_segmap_store[NKREG*NSEGRG];/* the kernel's segmaps */
1.1       deraadt   334:
1.69      pk        335: #if defined(SUN4M)
1.55      pk        336: u_int  *kernel_regtable_store;         /* 1k of storage to map the kernel */
                    337: u_int  *kernel_segtable_store;         /* 2k of storage to map the kernel */
                    338: u_int  *kernel_pagtable_store;         /* 128k of storage to map the kernel */
                    339:
1.121     pk        340: /*
                    341:  * Memory pools and back-end supplier for SRMMU page tables.
                    342:  * Share a pool between the level 2 and level 3 page tables,
                    343:  * since these are equal in size.
                    344:  */
                    345: static struct pool L1_pool;
                    346: static struct pool L23_pool;
                    347:
                    348: static void *pgt_page_alloc __P((unsigned long, int, int));
                    349: static void  pgt_page_free __P((void *, unsigned long, int));
                    350:
1.55      pk        351: #endif
                    352:
1.30      pk        353: #define        MA_SIZE 32              /* size of memory descriptor arrays */
1.1       deraadt   354: struct memarr pmemarr[MA_SIZE];/* physical memory regions */
                    355: int    npmemarr;               /* number of entries in pmemarr */
1.124     pk        356: /*static*/ paddr_t     avail_start;    /* first free physical page */
                    357: /*static*/ paddr_t     avail_end;      /* last free physical page */
                    358: /*static*/ vaddr_t     virtual_avail;  /* first free virtual page number */
                    359: /*static*/ vaddr_t     virtual_end;    /* last free virtual page number */
1.29      pk        360:
1.107     pk        361: static void pmap_page_upload __P((void));
1.118     thorpej   362: void pmap_release __P((pmap_t));
1.107     pk        363:
1.45      pk        364: int mmu_has_hole;
                    365:
1.124     pk        366: vaddr_t prom_vstart;   /* For /dev/kmem */
                    367: vaddr_t prom_vend;
1.1       deraadt   368:
1.134     thorpej   369: /*
                    370:  * Memory pool for pmap structures.
                    371:  */
                    372: static struct pool pmap_pmap_pool;
                    373:
1.55      pk        374: #if defined(SUN4)
1.31      pk        375: /*
1.55      pk        376:  * [sun4]: segfixmask: on some systems (4/110) "getsegmap()" returns a
                    377:  * partly invalid value. getsegmap returns a 16 bit value on the sun4,
                    378:  * but only the first 8 or so bits are valid (the rest are *supposed* to
                    379:  * be zero. On the 4/110 the bits that are supposed to be zero are
                    380:  * all one instead. e.g. KERNBASE is usually mapped by pmeg number zero.
                    381:  * On a 4/300 getsegmap(KERNBASE) == 0x0000, but
1.31      pk        382:  * on a 4/100 getsegmap(KERNBASE) == 0xff00
                    383:  *
1.55      pk        384:  * This confuses mmu_reservemon() and causes it to not reserve the PROM's
                    385:  * pmegs. Then the PROM's pmegs get used during autoconfig and everything
1.31      pk        386:  * falls apart!  (not very fun to debug, BTW.)
                    387:  *
1.43      pk        388:  * solution: mask the invalid bits in the getsetmap macro.
1.31      pk        389:  */
                    390:
                    391: static u_long segfixmask = 0xffffffff; /* all bits valid to start */
1.55      pk        392: #else
                    393: #define segfixmask 0xffffffff  /* It's in getsegmap's scope */
1.31      pk        394: #endif
                    395:
1.1       deraadt   396: /*
                    397:  * pseudo-functions for mnemonic value
                    398:  */
1.55      pk        399: #define        getsegmap(va)           (CPU_ISSUN4C \
                    400:                                        ? lduba(va, ASI_SEGMAP) \
                    401:                                        : (lduha(va, ASI_SEGMAP) & segfixmask))
                    402: #define        setsegmap(va, pmeg)     (CPU_ISSUN4C \
                    403:                                        ? stba(va, ASI_SEGMAP, pmeg) \
                    404:                                        : stha(va, ASI_SEGMAP, pmeg))
                    405:
                    406: /* 3-level sun4 MMU only: */
                    407: #define        getregmap(va)           ((unsigned)lduha((va)+2, ASI_REGMAP) >> 8)
                    408: #define        setregmap(va, smeg)     stha((va)+2, ASI_REGMAP, (smeg << 8))
                    409:
                    410: #if defined(SUN4M)
1.132     pk        411: void           setpgt4m __P((int *ptep, int pte));
                    412: void           setpte4m __P((vaddr_t va, int pte));
1.55      pk        413: #endif
                    414:
                    415: /* Function pointer messiness for supporting multiple sparc architectures
                    416:  * within a single kernel: notice that there are two versions of many of the
                    417:  * functions within this file/module, one for the sun4/sun4c and the other
                    418:  * for the sun4m. For performance reasons (since things like pte bits don't
                    419:  * map nicely between the two architectures), there are separate functions
                    420:  * rather than unified functions which test the cputyp variable. If only
                    421:  * one architecture is being used, then the non-suffixed function calls
                    422:  * are macro-translated into the appropriate xxx4_4c or xxx4m call. If
                    423:  * multiple architectures are defined, the calls translate to (*xxx_p),
                    424:  * i.e. they indirect through function pointers initialized as appropriate
                    425:  * to the run-time architecture in pmap_bootstrap. See also pmap.h.
                    426:  */
                    427:
                    428: #if defined(SUN4M)
1.71      pk        429: static void mmu_setup4m_L1 __P((int, struct pmap *));
                    430: static void mmu_setup4m_L2 __P((int, struct regmap *));
                    431: static void  mmu_setup4m_L3 __P((int, struct segmap *));
1.77      pk        432: /*static*/ void        mmu_reservemon4m __P((struct pmap *));
1.58      pk        433:
1.124     pk        434: /*static*/ void pmap_rmk4m __P((struct pmap *, vaddr_t, vaddr_t, int, int));
                    435: /*static*/ void pmap_rmu4m __P((struct pmap *, vaddr_t, vaddr_t, int, int));
                    436: /*static*/ void pmap_enk4m __P((struct pmap *, vaddr_t, vm_prot_t,
                    437:                                int, struct pvlist *, int));
                    438: /*static*/ void pmap_enu4m __P((struct pmap *, vaddr_t, vm_prot_t,
                    439:                                int, struct pvlist *, int));
1.55      pk        440: /*static*/ void pv_changepte4m __P((struct pvlist *, int, int));
                    441: /*static*/ int  pv_syncflags4m __P((struct pvlist *));
1.124     pk        442: /*static*/ int  pv_link4m __P((struct pvlist *, struct pmap *, vaddr_t, int));
                    443: /*static*/ void pv_unlink4m __P((struct pvlist *, struct pmap *, vaddr_t));
1.55      pk        444: #endif
                    445:
                    446: #if defined(SUN4) || defined(SUN4C)
1.58      pk        447: /*static*/ void        mmu_reservemon4_4c __P((int *, int *));
1.124     pk        448: /*static*/ void pmap_rmk4_4c __P((struct pmap *, vaddr_t, vaddr_t, int, int));
                    449: /*static*/ void pmap_rmu4_4c __P((struct pmap *, vaddr_t, vaddr_t, int, int));
                    450: /*static*/ void pmap_enk4_4c __P((struct pmap *, vaddr_t, vm_prot_t,
                    451:                                  int, struct pvlist *, int));
                    452: /*static*/ void pmap_enu4_4c __P((struct pmap *, vaddr_t, vm_prot_t,
                    453:                                  int, struct pvlist *, int));
1.55      pk        454: /*static*/ void pv_changepte4_4c __P((struct pvlist *, int, int));
                    455: /*static*/ int  pv_syncflags4_4c __P((struct pvlist *));
1.124     pk        456: /*static*/ int  pv_link4_4c __P((struct pvlist *, struct pmap *, vaddr_t, int));
                    457: /*static*/ void pv_unlink4_4c __P((struct pvlist *, struct pmap *, vaddr_t));
1.55      pk        458: #endif
                    459:
                    460: #if !defined(SUN4M) && (defined(SUN4) || defined(SUN4C))
                    461: #define                pmap_rmk        pmap_rmk4_4c
                    462: #define                pmap_rmu        pmap_rmu4_4c
                    463:
                    464: #elif defined(SUN4M) && !(defined(SUN4) || defined(SUN4C))
                    465: #define                pmap_rmk        pmap_rmk4m
                    466: #define                pmap_rmu        pmap_rmu4m
                    467:
                    468: #else  /* must use function pointers */
                    469:
                    470: /* function pointer declarations */
                    471: /* from pmap.h: */
1.151     chs       472: boolean_t      (*pmap_clear_modify_p) __P((struct vm_page *));
                    473: boolean_t      (*pmap_clear_reference_p) __P((struct vm_page *));
1.154     chs       474: int            (*pmap_enter_p) __P((pmap_t, vaddr_t, paddr_t, vm_prot_t, int));
1.149     thorpej   475: boolean_t      (*pmap_extract_p) __P((pmap_t, vaddr_t, paddr_t *));
1.151     chs       476: boolean_t      (*pmap_is_modified_p) __P((struct vm_page *));
                    477: boolean_t      (*pmap_is_referenced_p) __P((struct vm_page *));
                    478: void           (*pmap_kenter_pa_p) __P((vaddr_t, paddr_t, vm_prot_t));
                    479: void           (*pmap_kenter_pgs_p) __P((vaddr_t, struct vm_page **, int));
                    480: void           (*pmap_kremove_p) __P((vaddr_t, vsize_t));
                    481: void           (*pmap_page_protect_p) __P((struct vm_page *, vm_prot_t));
1.124     pk        482: void           (*pmap_protect_p) __P((pmap_t, vaddr_t, vaddr_t, vm_prot_t));
                    483: void           (*pmap_changeprot_p) __P((pmap_t, vaddr_t, vm_prot_t, int));
1.55      pk        484: /* local: */
1.124     pk        485: void           (*pmap_rmk_p) __P((struct pmap *, vaddr_t, vaddr_t, int, int));
                    486: void           (*pmap_rmu_p) __P((struct pmap *, vaddr_t, vaddr_t, int, int));
1.55      pk        487:
                    488: #define                pmap_rmk        (*pmap_rmk_p)
                    489: #define                pmap_rmu        (*pmap_rmu_p)
                    490:
                    491: #endif
                    492:
                    493: /* --------------------------------------------------------------*/
                    494:
                    495: /*
                    496:  * Next we have some Sun4m-specific routines which have no 4/4c
                    497:  * counterparts, or which are 4/4c macros.
                    498:  */
                    499:
                    500: #if defined(SUN4M)
1.143     pk        501: /*
                    502:  * Macros which implement SRMMU TLB flushing/invalidation
                    503:  */
                    504: #define tlb_flush_page(va)    \
1.145     pk        505:        sta(((vaddr_t)(va) & ~0xfff) | ASI_SRMMUFP_L3, ASI_SRMMUFP, 0)
1.143     pk        506:
                    507: #define tlb_flush_segment(vr, vs) \
                    508:        sta(((vr)<<RGSHIFT) | ((vs)<<SGSHIFT) | ASI_SRMMUFP_L2, ASI_SRMMUFP,0)
1.55      pk        509:
1.143     pk        510: #define tlb_flush_region(vr) \
                    511:        sta(((vr) << RGSHIFT) | ASI_SRMMUFP_L1, ASI_SRMMUFP, 0)
1.55      pk        512:
1.143     pk        513: #define tlb_flush_context()    sta(ASI_SRMMUFP_L0, ASI_SRMMUFP, 0)
                    514: #define tlb_flush_all()                sta(ASI_SRMMUFP_LN, ASI_SRMMUFP, 0)
1.55      pk        515:
                    516: static u_int   VA2PA __P((caddr_t));
1.97      pk        517: static u_long  srmmu_bypass_read __P((u_long));
1.55      pk        518:
                    519: /*
                    520:  * VA2PA(addr) -- converts a virtual address to a physical address using
                    521:  * the MMU's currently-installed page tables. As a side effect, the address
                    522:  * translation used may cause the associated pte to be encached. The correct
                    523:  * context for VA must be set before this is called.
                    524:  *
                    525:  * This routine should work with any level of mapping, as it is used
                    526:  * during bootup to interact with the ROM's initial L1 mapping of the kernel.
                    527:  */
1.160     pk        528: static u_int
1.55      pk        529: VA2PA(addr)
1.124     pk        530:        caddr_t addr;
1.55      pk        531: {
1.124     pk        532:        u_int pte;
1.55      pk        533:
                    534:        /* we'll use that handy SRMMU flush/probe! %%%: make consts below! */
                    535:        /* Try each level in turn until we find a valid pte. Otherwise panic */
                    536:
                    537:        pte = lda(((u_int)addr & ~0xfff) | ASI_SRMMUFP_L3, ASI_SRMMUFP);
1.143     pk        538:        /* Unlock fault status; required on Hypersparc modules */
                    539:        (void)lda(SRMMU_SFSR, ASI_SRMMU);
1.55      pk        540:        if ((pte & SRMMU_TETYPE) == SRMMU_TEPTE)
                    541:            return (((pte & SRMMU_PPNMASK) << SRMMU_PPNPASHIFT) |
                    542:                    ((u_int)addr & 0xfff));
1.60      pk        543:
                    544:        /* A `TLB Flush Entire' is required before any L0, L1 or L2 probe */
                    545:        tlb_flush_all();
                    546:
1.55      pk        547:        pte = lda(((u_int)addr & ~0xfff) | ASI_SRMMUFP_L2, ASI_SRMMUFP);
                    548:        if ((pte & SRMMU_TETYPE) == SRMMU_TEPTE)
                    549:            return (((pte & SRMMU_PPNMASK) << SRMMU_PPNPASHIFT) |
                    550:                    ((u_int)addr & 0x3ffff));
                    551:        pte = lda(((u_int)addr & ~0xfff) | ASI_SRMMUFP_L1, ASI_SRMMUFP);
                    552:        if ((pte & SRMMU_TETYPE) == SRMMU_TEPTE)
                    553:            return (((pte & SRMMU_PPNMASK) << SRMMU_PPNPASHIFT) |
                    554:                    ((u_int)addr & 0xffffff));
                    555:        pte = lda(((u_int)addr & ~0xfff) | ASI_SRMMUFP_L0, ASI_SRMMUFP);
                    556:        if ((pte & SRMMU_TETYPE) == SRMMU_TEPTE)
                    557:            return (((pte & SRMMU_PPNMASK) << SRMMU_PPNPASHIFT) |
                    558:                    ((u_int)addr & 0xffffffff));
                    559:
1.160     pk        560: #ifdef DIAGNOSTIC
1.55      pk        561:        panic("VA2PA: Asked to translate unmapped VA %p", addr);
1.160     pk        562: #else
                    563:        return (0);
                    564: #endif
1.55      pk        565: }
                    566:
1.85      pk        567: __inline void
                    568: setpgt4m(ptep, pte)
                    569:        int *ptep;
                    570:        int pte;
                    571: {
1.130     pk        572:        swap(ptep, pte);
1.160     pk        573: #if 0
1.121     pk        574:        /* XXX - uncaching in pgt_page_alloc() below is not yet quite Okay */
1.103     pk        575:        if (cpuinfo.cpu_type == CPUTYP_SS1_MBUS_NOMXCC)
1.85      pk        576:                cpuinfo.pcache_flush_line((int)ptep, VA2PA((caddr_t)ptep));
1.100     pk        577: #endif
1.85      pk        578: }
                    579:
1.92      pk        580: /* Set the page table entry for va to pte. */
1.55      pk        581: __inline void
                    582: setpte4m(va, pte)
1.124     pk        583:        vaddr_t va;
1.115     pk        584:        int pte;
1.55      pk        585: {
1.115     pk        586:        struct pmap *pm;
                    587:        struct regmap *rm;
                    588:        struct segmap *sm;
1.55      pk        589:
1.100     pk        590:        if (getcontext4m() != 0)
                    591:                panic("setpte4m: user context");
                    592:
                    593:        pm = pmap_kernel();
1.55      pk        594:
                    595:        /* Note: inline version of setptesw4m() */
                    596: #ifdef DEBUG
                    597:        if (pm->pm_regmap == NULL)
                    598:                panic("setpte4m: no regmap entry");
1.43      pk        599: #endif
1.55      pk        600:        rm = &pm->pm_regmap[VA_VREG(va)];
                    601:        sm = &rm->rg_segmap[VA_VSEG(va)];
1.1       deraadt   602:
1.55      pk        603: #ifdef DEBUG
1.100     pk        604:        if (rm->rg_segmap == NULL)
                    605:                panic("setpte4m: no segmap for va %p (rp=%p)",
                    606:                        (caddr_t)va, (caddr_t)rm);
                    607:
                    608:        if (sm->sg_pte == NULL)
                    609:                panic("setpte4m: no pte for va %p (rp=%p, sp=%p)",
                    610:                      (caddr_t)va, rm, sm);
1.55      pk        611: #endif
                    612:        tlb_flush_page(va);
1.72      pk        613:        setpgt4m(sm->sg_pte + VA_SUN4M_VPG(va), pte);
1.55      pk        614: }
1.72      pk        615:
1.100     pk        616: void   pcache_flush __P((caddr_t, caddr_t, int));
                    617: void
                    618: pcache_flush(va, pa, n)
                    619:        caddr_t va, pa;
                    620:        int     n;
                    621: {
1.109     pk        622:        void (*f)__P((int,int)) = cpuinfo.pcache_flush_line;
                    623:
1.100     pk        624:        while ((n -= 4) >= 0)
1.109     pk        625:                (*f)((u_int)va+n, (u_int)pa+n);
1.100     pk        626: }
                    627:
                    628: /*
1.121     pk        629:  * Page table pool back-end.
                    630:  */
1.100     pk        631: void *
1.121     pk        632: pgt_page_alloc(sz, flags, mtype)
                    633:        unsigned long sz;
                    634:        int flags;
                    635:        int mtype;
1.100     pk        636: {
1.121     pk        637:        caddr_t p;
1.100     pk        638:
1.121     pk        639:        p = (caddr_t)uvm_km_kmemalloc(kernel_map, uvm.kernel_object,
1.124     pk        640:                                      (vsize_t)sz, UVM_KMF_NOWAIT);
1.100     pk        641:
1.121     pk        642:        if ((cpuinfo.flags & CPUFLG_CACHEPAGETABLES) == 0) {
                    643:                pcache_flush(p, (caddr_t)VA2PA(p), sz);
                    644:                kvm_uncache(p, sz/NBPG);
1.100     pk        645:        }
                    646:        return (p);
1.121     pk        647: }
                    648:
1.100     pk        649: void
1.121     pk        650: pgt_page_free(v, sz, mtype)
                    651:        void *v;
                    652:        unsigned long sz;
                    653:        int mtype;
1.100     pk        654: {
1.124     pk        655:        uvm_km_free(kernel_map, (vaddr_t)v, sz);
1.100     pk        656: }
1.55      pk        657: #endif /* 4m only */
1.1       deraadt   658:
                    659: /*----------------------------------------------------------------*/
                    660:
1.72      pk        661: /*
                    662:  * The following three macros are to be used in sun4/sun4c code only.
                    663:  */
1.69      pk        664: #if defined(SUN4_MMU3L)
                    665: #define CTX_USABLE(pm,rp) (                                    \
1.72      pk        666:                ((pm)->pm_ctx != NULL &&                        \
                    667:                 (!HASSUN4_MMU3L || (rp)->rg_smeg != reginval)) \
1.69      pk        668: )
1.43      pk        669: #else
1.55      pk        670: #define CTX_USABLE(pm,rp)      ((pm)->pm_ctx != NULL )
1.43      pk        671: #endif
                    672:
1.55      pk        673: #define GAP_WIDEN(pm,vr) do if (CPU_ISSUN4OR4C) {      \
                    674:        if (vr + 1 == pm->pm_gap_start)                 \
                    675:                pm->pm_gap_start = vr;                  \
                    676:        if (vr == pm->pm_gap_end)                       \
                    677:                pm->pm_gap_end = vr + 1;                \
1.43      pk        678: } while (0)
                    679:
1.55      pk        680: #define GAP_SHRINK(pm,vr) do if (CPU_ISSUN4OR4C) {                     \
1.124     pk        681:        int x;                                                          \
1.43      pk        682:        x = pm->pm_gap_start + (pm->pm_gap_end - pm->pm_gap_start) / 2; \
                    683:        if (vr > x) {                                                   \
                    684:                if (vr < pm->pm_gap_end)                                \
                    685:                        pm->pm_gap_end = vr;                            \
                    686:        } else {                                                        \
                    687:                if (vr >= pm->pm_gap_start && x != pm->pm_gap_start)    \
                    688:                        pm->pm_gap_start = vr + 1;                      \
                    689:        }                                                               \
                    690: } while (0)
                    691:
1.72      pk        692:
1.122     pk        693: static void get_phys_mem __P((void));
1.53      christos  694: static void sortm __P((struct memarr *, int));
                    695: void   pv_flushcache __P((struct pvlist *));
                    696: void   kvm_iocache __P((caddr_t, int));
1.122     pk        697:
1.53      christos  698: #ifdef DEBUG
                    699: void   pm_check __P((char *, struct pmap *));
                    700: void   pm_check_k __P((char *, struct pmap *));
                    701: void   pm_check_u __P((char *, struct pmap *));
                    702: #endif
                    703:
                    704:
1.2       deraadt   705: /*
1.122     pk        706:  * Grab physical memory list and use it to compute `physmem' and
1.136     pk        707:  * `avail_end'. The latter is used in conjunction with
1.122     pk        708:  * `avail_start' to dispatch left-over physical pages to the
                    709:  * VM system.
                    710:  */
                    711: void
                    712: get_phys_mem()
                    713: {
                    714:        struct memarr *mp;
                    715:        int i;
                    716:
                    717:        npmemarr = makememarr(pmemarr, MA_SIZE, MEMARR_AVAILPHYS);
                    718:        sortm(pmemarr, npmemarr);
1.136     pk        719:        if (pmemarr[0].addr != 0)
                    720:                panic("pmap_bootstrap: no memory?!");
                    721:
1.122     pk        722:        avail_end = pmemarr[npmemarr-1].addr + pmemarr[npmemarr-1].len;
                    723:        for (physmem = 0, mp = pmemarr, i = npmemarr; --i >= 0; mp++)
                    724:                physmem += btoc(mp->len);
                    725: }
                    726:
                    727: /*
1.2       deraadt   728:  * Sort a memory array by address.
                    729:  */
                    730: static void
                    731: sortm(mp, n)
1.124     pk        732:        struct memarr *mp;
                    733:        int n;
1.2       deraadt   734: {
1.124     pk        735:        struct memarr *mpj;
                    736:        int i, j;
                    737:        paddr_t addr;
                    738:        psize_t len;
1.2       deraadt   739:
                    740:        /* Insertion sort.  This is O(n^2), but so what? */
                    741:        for (i = 1; i < n; i++) {
                    742:                /* save i'th entry */
                    743:                addr = mp[i].addr;
                    744:                len = mp[i].len;
                    745:                /* find j such that i'th entry goes before j'th */
                    746:                for (j = 0, mpj = mp; j < i; j++, mpj++)
                    747:                        if (addr < mpj->addr)
                    748:                                break;
                    749:                /* slide up any additional entries */
1.138     perry     750:                memmove(mpj + 1, mpj, (i - j) * sizeof(*mp));
1.2       deraadt   751:                mpj->addr = addr;
                    752:                mpj->len = len;
                    753:        }
                    754: }
                    755:
1.29      pk        756: /*
1.106     thorpej   757:  * Support functions for vm_page_bootstrap().
1.29      pk        758:  */
                    759:
                    760: /*
                    761:  * How much virtual space does this kernel have?
                    762:  * (After mapping kernel text, data, etc.)
                    763:  */
                    764: void
                    765: pmap_virtual_space(v_start, v_end)
1.124     pk        766:         vaddr_t *v_start;
                    767:         vaddr_t *v_end;
1.29      pk        768: {
                    769:         *v_start = virtual_avail;
                    770:         *v_end   = virtual_end;
                    771: }
                    772:
                    773: /*
1.107     pk        774:  * Helper routine that hands off available physical pages to the VM system.
1.29      pk        775:  */
1.107     pk        776: static void
                    777: pmap_page_upload()
1.29      pk        778: {
1.124     pk        779:        int     n = 0;
1.163     pk        780:        paddr_t start, end;
1.29      pk        781:
1.107     pk        782:        for (n = 0; n < npmemarr; n++) {
                    783:                /*
1.163     pk        784:                 * Assume `avail_start' is always in the first segment; we
1.107     pk        785:                 * already made that assumption in pmap_bootstrap()..
                    786:                 */
1.163     pk        787:                start = (n == 0) ? avail_start : pmemarr[n].addr;
1.107     pk        788:                end = pmemarr[n].addr + pmemarr[n].len;
                    789:                if (start == end)
                    790:                        continue;
1.29      pk        791:
1.110     mrg       792:                uvm_page_physload(
                    793:                        atop(start),
                    794:                        atop(end),
                    795:                        atop(start),
1.120     thorpej   796:                        atop(end), VM_FREELIST_DEFAULT);
1.29      pk        797:        }
                    798: }
                    799:
1.39      pk        800: int
                    801: pmap_pa_exists(pa)
1.124     pk        802:        paddr_t pa;
1.39      pk        803: {
1.124     pk        804:        int nmem;
                    805:        struct memarr *mp;
1.39      pk        806:
                    807:        for (mp = pmemarr, nmem = npmemarr; --nmem >= 0; mp++) {
                    808:                if (pa >= mp->addr && pa < mp->addr + mp->len)
                    809:                        return 1;
                    810:        }
                    811:
                    812:        return 0;
                    813: }
1.29      pk        814:
1.1       deraadt   815: /* update pv_flags given a valid pte */
1.55      pk        816: #define        MR4_4C(pte) (((pte) >> PG_M_SHIFT) & (PV_MOD | PV_REF))
                    817: #define MR4M(pte) (((pte) >> PG_M_SHIFT4M) & (PV_MOD4M | PV_REF4M))
1.1       deraadt   818:
                    819: /*----------------------------------------------------------------*/
                    820:
                    821: /*
                    822:  * Agree with the monitor ROM as to how many MMU entries are
                    823:  * to be reserved, and map all of its segments into all contexts.
                    824:  *
                    825:  * Unfortunately, while the Version 0 PROM had a nice linked list of
                    826:  * taken virtual memory, the Version 2 PROM provides instead a convoluted
                    827:  * description of *free* virtual memory.  Rather than invert this, we
                    828:  * resort to two magic constants from the PROM vector description file.
                    829:  */
1.55      pk        830: #if defined(SUN4) || defined(SUN4C)
1.43      pk        831: void
1.58      pk        832: mmu_reservemon4_4c(nrp, nsp)
1.124     pk        833:        int *nrp, *nsp;
1.1       deraadt   834: {
1.124     pk        835:        u_int va = 0, eva = 0;
                    836:        int mmuseg, i, nr, ns, vr, lastvr;
1.69      pk        837: #if defined(SUN4_MMU3L)
1.124     pk        838:        int mmureg;
1.53      christos  839: #endif
1.124     pk        840:        struct regmap *rp;
1.1       deraadt   841:
1.55      pk        842: #if defined(SUN4M)
                    843:        if (CPU_ISSUN4M) {
1.81      pk        844:                panic("mmu_reservemon4_4c called on Sun4M machine");
1.55      pk        845:                return;
                    846:        }
                    847: #endif
                    848:
1.20      deraadt   849: #if defined(SUN4)
1.55      pk        850:        if (CPU_ISSUN4) {
1.29      pk        851:                prom_vstart = va = OLDMON_STARTVADDR;
                    852:                prom_vend = eva = OLDMON_ENDVADDR;
1.20      deraadt   853:        }
                    854: #endif
                    855: #if defined(SUN4C)
1.55      pk        856:        if (CPU_ISSUN4C) {
1.29      pk        857:                prom_vstart = va = OPENPROM_STARTVADDR;
                    858:                prom_vend = eva = OPENPROM_ENDVADDR;
1.19      deraadt   859:        }
1.20      deraadt   860: #endif
1.43      pk        861:        ns = *nsp;
                    862:        nr = *nrp;
                    863:        lastvr = 0;
1.1       deraadt   864:        while (va < eva) {
1.43      pk        865:                vr = VA_VREG(va);
                    866:                rp = &pmap_kernel()->pm_regmap[vr];
                    867:
1.69      pk        868: #if defined(SUN4_MMU3L)
                    869:                if (HASSUN4_MMU3L && vr != lastvr) {
1.43      pk        870:                        lastvr = vr;
                    871:                        mmureg = getregmap(va);
                    872:                        if (mmureg < nr)
                    873:                                rp->rg_smeg = nr = mmureg;
                    874:                        /*
                    875:                         * On 3-level MMU machines, we distribute regions,
                    876:                         * rather than segments, amongst the contexts.
                    877:                         */
                    878:                        for (i = ncontext; --i > 0;)
1.137     pk        879:                                prom_setcontext(i, (caddr_t)va, mmureg);
1.43      pk        880:                }
                    881: #endif
1.1       deraadt   882:                mmuseg = getsegmap(va);
1.43      pk        883:                if (mmuseg < ns)
                    884:                        ns = mmuseg;
1.69      pk        885:
                    886:                if (!HASSUN4_MMU3L)
1.43      pk        887:                        for (i = ncontext; --i > 0;)
1.137     pk        888:                                prom_setcontext(i, (caddr_t)va, mmuseg);
1.43      pk        889:
1.1       deraadt   890:                if (mmuseg == seginval) {
                    891:                        va += NBPSG;
                    892:                        continue;
                    893:                }
1.43      pk        894:                /*
                    895:                 * Another PROM segment. Enter into region map.
                    896:                 * Assume the entire segment is valid.
                    897:                 */
                    898:                rp->rg_nsegmap += 1;
                    899:                rp->rg_segmap[VA_VSEG(va)].sg_pmeg = mmuseg;
                    900:                rp->rg_segmap[VA_VSEG(va)].sg_npte = NPTESG;
                    901:
1.1       deraadt   902:                /* PROM maps its memory user-accessible: fix it. */
                    903:                for (i = NPTESG; --i >= 0; va += NBPG)
1.55      pk        904:                        setpte4(va, getpte4(va) | PG_S);
1.1       deraadt   905:        }
1.43      pk        906:        *nsp = ns;
                    907:        *nrp = nr;
                    908:        return;
1.1       deraadt   909: }
1.55      pk        910: #endif
                    911:
                    912: #if defined(SUN4M) /* Sun4M versions of above */
                    913:
1.97      pk        914: u_long
                    915: srmmu_bypass_read(paddr)
                    916:        u_long  paddr;
                    917: {
                    918:        unsigned long v;
                    919:
1.158     pk        920:        if (cpuinfo.mxcc) {
1.97      pk        921:                /*
                    922:                 * We're going to have to use MMU passthrough. If we're on
1.158     pk        923:                 * a Viking SuperSPARC with a MultiCache Controller, we
                    924:                 * need to set the AC (Alternate Cacheable) bit in the MMU's
                    925:                 * control register in order to not by-pass the cache.
1.97      pk        926:                 */
                    927:
1.158     pk        928:                unsigned long s = lda(SRMMU_PCR, ASI_SRMMU);
1.97      pk        929:
                    930:                /* set MMU AC bit */
                    931:                sta(SRMMU_PCR, ASI_SRMMU, s | VIKING_PCR_AC);
                    932:                v = lda(paddr, ASI_BYPASS);
                    933:                sta(SRMMU_PCR, ASI_SRMMU, s);
                    934:        } else
                    935:                v = lda(paddr, ASI_BYPASS);
                    936:
                    937:        return (v);
                    938: }
                    939:
                    940:
1.55      pk        941: /*
                    942:  * Take the monitor's initial page table layout, convert it to 3rd-level pte's
                    943:  * (it starts out as a L1 mapping), and install it along with a set of kernel
                    944:  * mapping tables as the kernel's initial page table setup. Also create and
                    945:  * enable a context table. I suppose we also want to block user-mode access
                    946:  * to the new kernel/ROM mappings.
                    947:  */
                    948:
1.58      pk        949: /*
                    950:  * mmu_reservemon4m(): Copies the existing (ROM) page tables to kernel space,
1.55      pk        951:  * converting any L1/L2 PTEs to L3 PTEs. Does *not* copy the L1 entry mapping
1.96      pk        952:  * the kernel at KERNBASE since we don't want to map 16M of physical
                    953:  * memory for the kernel. Thus the kernel must be installed later!
1.55      pk        954:  * Also installs ROM mappings into the kernel pmap.
                    955:  * NOTE: This also revokes all user-mode access to the mapped regions.
                    956:  */
                    957: void
1.77      pk        958: mmu_reservemon4m(kpmap)
1.55      pk        959:        struct pmap *kpmap;
                    960: {
1.71      pk        961:        unsigned int rom_ctxtbl;
1.124     pk        962:        int te;
1.55      pk        963:
1.97      pk        964:        prom_vstart = OPENPROM_STARTVADDR;
                    965:        prom_vend = OPENPROM_ENDVADDR;
1.55      pk        966:
                    967:        /*
                    968:         * XXX: although the Sun4M can handle 36 bits of physical
                    969:         * address space, we assume that all these page tables, etc
                    970:         * are in the lower 4G (32-bits) of address space, i.e. out of I/O
                    971:         * space. Eventually this should be changed to support the 36 bit
                    972:         * physical addressing, in case some crazed ROM designer decides to
                    973:         * stick the pagetables up there. In that case, we should use MMU
                    974:         * transparent mode, (i.e. ASI 0x20 to 0x2f) to access
                    975:         * physical memory.
                    976:         */
                    977:
1.71      pk        978:        rom_ctxtbl = (lda(SRMMU_CXTPTR,ASI_SRMMU) << SRMMU_PPNPASHIFT);
1.55      pk        979:
1.97      pk        980:        te = srmmu_bypass_read(rom_ctxtbl);     /* i.e. context 0 */
1.69      pk        981:
1.55      pk        982:        switch (te & SRMMU_TETYPE) {
1.62      pk        983:        case SRMMU_TEINVALID:
1.69      pk        984:                cpuinfo.ctx_tbl[0] = SRMMU_TEINVALID;
1.77      pk        985:                panic("mmu_reservemon4m: no existing L0 mapping! "
                    986:                      "(How are we running?");
1.55      pk        987:                break;
1.62      pk        988:        case SRMMU_TEPTE:
1.55      pk        989: #ifdef DEBUG
1.66      christos  990:                printf("mmu_reservemon4m: trying to remap 4G segment!\n");
1.55      pk        991: #endif
                    992:                panic("mmu_reservemon4m: can't handle ROM 4G page size");
                    993:                /* XXX: Should make this work, however stupid it is */
                    994:                break;
1.62      pk        995:        case SRMMU_TEPTD:
1.71      pk        996:                mmu_setup4m_L1(te, kpmap);
1.55      pk        997:                break;
1.62      pk        998:        default:
1.55      pk        999:                panic("mmu_reservemon4m: unknown pagetable entry type");
                   1000:        }
                   1001: }
                   1002:
                   1003: void
1.71      pk       1004: mmu_setup4m_L1(regtblptd, kpmap)
1.55      pk       1005:        int regtblptd;          /* PTD for region table to be remapped */
                   1006:        struct pmap *kpmap;
                   1007: {
1.124     pk       1008:        unsigned int regtblrover;
                   1009:        int i;
1.55      pk       1010:        unsigned int te;
1.71      pk       1011:        struct regmap *rp;
1.55      pk       1012:        int j, k;
                   1013:
1.69      pk       1014:        /*
                   1015:         * Here we scan the region table to copy any entries which appear.
1.55      pk       1016:         * We are only concerned with regions in kernel space and above
1.96      pk       1017:         * (i.e. regions VA_VREG(KERNBASE)+1 to 0xff). We ignore the first
                   1018:         * region (at VA_VREG(KERNBASE)), since that is the 16MB L1 mapping
                   1019:         * that the ROM used to map the kernel in initially. Later, we will
                   1020:         * rebuild a new L3 mapping for the kernel and install it before
                   1021:         * switching to the new pagetables.
1.55      pk       1022:         */
1.71      pk       1023:        regtblrover =
                   1024:                ((regtblptd & ~SRMMU_TETYPE) << SRMMU_PPNPASHIFT) +
                   1025:                (VA_VREG(KERNBASE)+1) * sizeof(long);   /* kernel only */
1.55      pk       1026:
                   1027:        for (i = VA_VREG(KERNBASE) + 1; i < SRMMU_L1SIZE;
                   1028:             i++, regtblrover += sizeof(long)) {
1.71      pk       1029:
                   1030:                /* The region we're dealing with */
                   1031:                rp = &kpmap->pm_regmap[i];
                   1032:
1.97      pk       1033:                te = srmmu_bypass_read(regtblrover);
1.55      pk       1034:                switch(te & SRMMU_TETYPE) {
1.62      pk       1035:                case SRMMU_TEINVALID:
1.55      pk       1036:                        break;
1.71      pk       1037:
1.62      pk       1038:                case SRMMU_TEPTE:
1.55      pk       1039: #ifdef DEBUG
1.81      pk       1040:                        printf("mmu_setup4m_L1: "
1.77      pk       1041:                               "converting region 0x%x from L1->L3\n", i);
1.55      pk       1042: #endif
1.71      pk       1043:                        /*
                   1044:                         * This region entry covers 64MB of memory -- or
                   1045:                         * (NSEGRG * NPTESG) pages -- which we must convert
                   1046:                         * into a 3-level description.
1.55      pk       1047:                         */
1.71      pk       1048:
1.55      pk       1049:                        for (j = 0; j < SRMMU_L2SIZE; j++) {
1.71      pk       1050:                                struct segmap *sp = &rp->rg_segmap[j];
1.55      pk       1051:
                   1052:                                for (k = 0; k < SRMMU_L3SIZE; k++) {
1.71      pk       1053:                                        sp->sg_npte++;
1.97      pk       1054:                                        setpgt4m(&sp->sg_pte[k],
                   1055:                                                (te & SRMMU_L1PPNMASK) |
                   1056:                                                (j << SRMMU_L2PPNSHFT) |
                   1057:                                                (k << SRMMU_L3PPNSHFT) |
                   1058:                                                (te & SRMMU_PGBITSMSK) |
                   1059:                                                ((te & SRMMU_PROT_MASK) |
                   1060:                                                 PPROT_U2S_OMASK) |
                   1061:                                                SRMMU_TEPTE);
1.55      pk       1062:                                }
                   1063:                        }
                   1064:                        break;
1.71      pk       1065:
1.62      pk       1066:                case SRMMU_TEPTD:
1.71      pk       1067:                        mmu_setup4m_L2(te, rp);
1.55      pk       1068:                        break;
1.71      pk       1069:
1.62      pk       1070:                default:
1.55      pk       1071:                        panic("mmu_setup4m_L1: unknown pagetable entry type");
                   1072:                }
                   1073:        }
                   1074: }
                   1075:
                   1076: void
1.71      pk       1077: mmu_setup4m_L2(segtblptd, rp)
1.55      pk       1078:        int segtblptd;
1.71      pk       1079:        struct regmap *rp;
1.55      pk       1080: {
1.124     pk       1081:        unsigned int segtblrover;
                   1082:        int i, k;
1.55      pk       1083:        unsigned int te;
1.71      pk       1084:        struct segmap *sp;
1.55      pk       1085:
                   1086:        segtblrover = (segtblptd & ~SRMMU_TETYPE) << SRMMU_PPNPASHIFT;
                   1087:        for (i = 0; i < SRMMU_L2SIZE; i++, segtblrover += sizeof(long)) {
1.71      pk       1088:
                   1089:                sp = &rp->rg_segmap[i];
                   1090:
1.97      pk       1091:                te = srmmu_bypass_read(segtblrover);
1.55      pk       1092:                switch(te & SRMMU_TETYPE) {
1.62      pk       1093:                case SRMMU_TEINVALID:
1.55      pk       1094:                        break;
1.71      pk       1095:
1.62      pk       1096:                case SRMMU_TEPTE:
1.55      pk       1097: #ifdef DEBUG
1.81      pk       1098:                        printf("mmu_setup4m_L2: converting L2 entry at segment 0x%x to L3\n",i);
1.55      pk       1099: #endif
1.71      pk       1100:                        /*
                   1101:                         * This segment entry covers 256KB of memory -- or
                   1102:                         * (NPTESG) pages -- which we must convert
                   1103:                         * into a 3-level description.
                   1104:                         */
1.55      pk       1105:                        for (k = 0; k < SRMMU_L3SIZE; k++) {
1.71      pk       1106:                                sp->sg_npte++;
1.97      pk       1107:                                setpgt4m(&sp->sg_pte[k],
                   1108:                                        (te & SRMMU_L1PPNMASK) |
                   1109:                                        (te & SRMMU_L2PPNMASK) |
                   1110:                                        (k << SRMMU_L3PPNSHFT) |
                   1111:                                        (te & SRMMU_PGBITSMSK) |
                   1112:                                        ((te & SRMMU_PROT_MASK) |
                   1113:                                         PPROT_U2S_OMASK) |
                   1114:                                        SRMMU_TEPTE);
1.55      pk       1115:                        }
                   1116:                        break;
1.71      pk       1117:
1.62      pk       1118:                case SRMMU_TEPTD:
1.71      pk       1119:                        mmu_setup4m_L3(te, sp);
1.55      pk       1120:                        break;
1.71      pk       1121:
1.62      pk       1122:                default:
1.55      pk       1123:                        panic("mmu_setup4m_L2: unknown pagetable entry type");
                   1124:                }
                   1125:        }
                   1126: }
                   1127:
1.71      pk       1128: void
                   1129: mmu_setup4m_L3(pagtblptd, sp)
1.124     pk       1130:        int pagtblptd;
1.71      pk       1131:        struct segmap *sp;
1.55      pk       1132: {
1.124     pk       1133:        unsigned int pagtblrover;
                   1134:        int i;
                   1135:        unsigned int te;
1.55      pk       1136:
                   1137:        pagtblrover = (pagtblptd & ~SRMMU_TETYPE) << SRMMU_PPNPASHIFT;
                   1138:        for (i = 0; i < SRMMU_L3SIZE; i++, pagtblrover += sizeof(long)) {
1.97      pk       1139:                te = srmmu_bypass_read(pagtblrover);
1.55      pk       1140:                switch(te & SRMMU_TETYPE) {
1.62      pk       1141:                case SRMMU_TEINVALID:
1.55      pk       1142:                        break;
1.62      pk       1143:                case SRMMU_TEPTE:
1.71      pk       1144:                        sp->sg_npte++;
1.97      pk       1145:                        setpgt4m(&sp->sg_pte[i], te | PPROT_U2S_OMASK);
1.55      pk       1146:                        break;
1.62      pk       1147:                case SRMMU_TEPTD:
1.55      pk       1148:                        panic("mmu_setup4m_L3: PTD found in L3 page table");
1.62      pk       1149:                default:
1.55      pk       1150:                        panic("mmu_setup4m_L3: unknown pagetable entry type");
                   1151:                }
                   1152:        }
                   1153: }
                   1154: #endif /* defined SUN4M */
1.1       deraadt  1155:
                   1156: /*----------------------------------------------------------------*/
                   1157:
                   1158: /*
                   1159:  * MMU management.
                   1160:  */
1.43      pk       1161: struct mmuentry *me_alloc __P((struct mmuhd *, struct pmap *, int, int));
                   1162: void           me_free __P((struct pmap *, u_int));
                   1163: struct mmuentry        *region_alloc __P((struct mmuhd *, struct pmap *, int));
                   1164: void           region_free __P((struct pmap *, u_int));
1.1       deraadt  1165:
                   1166: /*
                   1167:  * Change contexts.  We need the old context number as well as the new
                   1168:  * one.  If the context is changing, we must write all user windows
                   1169:  * first, lest an interrupt cause them to be written to the (other)
                   1170:  * user whose context we set here.
                   1171:  */
                   1172: #define        CHANGE_CONTEXTS(old, new) \
                   1173:        if ((old) != (new)) { \
                   1174:                write_user_windows(); \
                   1175:                setcontext(new); \
                   1176:        }
                   1177:
1.55      pk       1178: #if defined(SUN4) || defined(SUN4C) /* This is old sun MMU stuff */
1.1       deraadt  1179: /*
                   1180:  * Allocate an MMU entry (i.e., a PMEG).
                   1181:  * If necessary, steal one from someone else.
                   1182:  * Put it on the tail of the given queue
                   1183:  * (which is either the LRU list or the locked list).
                   1184:  * The locked list is not actually ordered, but this is easiest.
                   1185:  * Also put it on the given (new) pmap's chain,
                   1186:  * enter its pmeg number into that pmap's segmap,
                   1187:  * and store the pmeg's new virtual segment number (me->me_vseg).
                   1188:  *
                   1189:  * This routine is large and complicated, but it must be fast
                   1190:  * since it implements the dynamic allocation of MMU entries.
                   1191:  */
                   1192: struct mmuentry *
1.43      pk       1193: me_alloc(mh, newpm, newvreg, newvseg)
1.124     pk       1194:        struct mmuhd *mh;
                   1195:        struct pmap *newpm;
                   1196:        int newvreg, newvseg;
                   1197: {
                   1198:        struct mmuentry *me;
                   1199:        struct pmap *pm;
                   1200:        int i, va, pa, *pte, tpte;
1.1       deraadt  1201:        int ctx;
1.43      pk       1202:        struct regmap *rp;
                   1203:        struct segmap *sp;
1.1       deraadt  1204:
                   1205:        /* try free list first */
1.43      pk       1206:        if ((me = segm_freelist.tqh_first) != NULL) {
                   1207:                TAILQ_REMOVE(&segm_freelist, me, me_list);
1.1       deraadt  1208: #ifdef DEBUG
                   1209:                if (me->me_pmap != NULL)
                   1210:                        panic("me_alloc: freelist entry has pmap");
                   1211:                if (pmapdebug & PDB_MMU_ALLOC)
1.66      christos 1212:                        printf("me_alloc: got pmeg %d\n", me->me_cookie);
1.1       deraadt  1213: #endif
1.43      pk       1214:                TAILQ_INSERT_TAIL(mh, me, me_list);
1.1       deraadt  1215:
                   1216:                /* onto on pmap chain; pmap is already locked, if needed */
1.43      pk       1217:                TAILQ_INSERT_TAIL(&newpm->pm_seglist, me, me_pmchain);
1.70      pk       1218: #ifdef DIAGNOSTIC
                   1219:                pmap_stats.ps_npmeg_free--;
                   1220:                if (mh == &segm_locked)
                   1221:                        pmap_stats.ps_npmeg_locked++;
                   1222:                else
                   1223:                        pmap_stats.ps_npmeg_lru++;
                   1224: #endif
1.1       deraadt  1225:
                   1226:                /* into pmap segment table, with backpointers */
1.43      pk       1227:                newpm->pm_regmap[newvreg].rg_segmap[newvseg].sg_pmeg = me->me_cookie;
1.1       deraadt  1228:                me->me_pmap = newpm;
                   1229:                me->me_vseg = newvseg;
1.43      pk       1230:                me->me_vreg = newvreg;
1.1       deraadt  1231:
                   1232:                return (me);
                   1233:        }
                   1234:
                   1235:        /* no luck, take head of LRU list */
1.43      pk       1236:        if ((me = segm_lru.tqh_first) == NULL)
1.1       deraadt  1237:                panic("me_alloc: all pmegs gone");
1.43      pk       1238:
1.1       deraadt  1239:        pm = me->me_pmap;
                   1240:        if (pm == NULL)
                   1241:                panic("me_alloc: LRU entry has no pmap");
1.42      mycroft  1242:        if (pm == pmap_kernel())
1.1       deraadt  1243:                panic("me_alloc: stealing from kernel");
1.12      pk       1244: #ifdef DEBUG
1.1       deraadt  1245:        if (pmapdebug & (PDB_MMU_ALLOC | PDB_MMU_STEAL))
1.91      fair     1246:                printf("me_alloc: stealing pmeg 0x%x from pmap %p\n",
1.43      pk       1247:                    me->me_cookie, pm);
1.1       deraadt  1248: #endif
                   1249:        /*
                   1250:         * Remove from LRU list, and insert at end of new list
                   1251:         * (probably the LRU list again, but so what?).
                   1252:         */
1.43      pk       1253:        TAILQ_REMOVE(&segm_lru, me, me_list);
                   1254:        TAILQ_INSERT_TAIL(mh, me, me_list);
                   1255:
1.70      pk       1256: #ifdef DIAGNOSTIC
                   1257:        if (mh == &segm_locked) {
                   1258:                pmap_stats.ps_npmeg_lru--;
                   1259:                pmap_stats.ps_npmeg_locked++;
                   1260:        }
                   1261: #endif
                   1262:
1.43      pk       1263:        rp = &pm->pm_regmap[me->me_vreg];
                   1264:        if (rp->rg_segmap == NULL)
                   1265:                panic("me_alloc: LRU entry's pmap has no segments");
                   1266:        sp = &rp->rg_segmap[me->me_vseg];
                   1267:        pte = sp->sg_pte;
                   1268:        if (pte == NULL)
                   1269:                panic("me_alloc: LRU entry's pmap has no ptes");
1.1       deraadt  1270:
                   1271:        /*
                   1272:         * The PMEG must be mapped into some context so that we can
                   1273:         * read its PTEs.  Use its current context if it has one;
                   1274:         * if not, and since context 0 is reserved for the kernel,
                   1275:         * the simplest method is to switch to 0 and map the PMEG
                   1276:         * to virtual address 0---which, being a user space address,
                   1277:         * is by definition not in use.
                   1278:         *
                   1279:         * XXX for ncpus>1 must use per-cpu VA?
                   1280:         * XXX do not have to flush cache immediately
                   1281:         */
1.71      pk       1282:        ctx = getcontext4();
1.43      pk       1283:        if (CTX_USABLE(pm,rp)) {
1.1       deraadt  1284:                CHANGE_CONTEXTS(ctx, pm->pm_ctxnum);
1.69      pk       1285:                cache_flush_segment(me->me_vreg, me->me_vseg);
1.43      pk       1286:                va = VSTOVA(me->me_vreg,me->me_vseg);
1.1       deraadt  1287:        } else {
                   1288:                CHANGE_CONTEXTS(ctx, 0);
1.69      pk       1289:                if (HASSUN4_MMU3L)
1.43      pk       1290:                        setregmap(0, tregion);
                   1291:                setsegmap(0, me->me_cookie);
1.1       deraadt  1292:                /*
                   1293:                 * No cache flush needed: it happened earlier when
                   1294:                 * the old context was taken.
                   1295:                 */
                   1296:                va = 0;
                   1297:        }
                   1298:
                   1299:        /*
                   1300:         * Record reference and modify bits for each page,
                   1301:         * and copy PTEs into kernel memory so that they can
                   1302:         * be reloaded later.
                   1303:         */
                   1304:        i = NPTESG;
                   1305:        do {
1.55      pk       1306:                tpte = getpte4(va);
1.33      pk       1307:                if ((tpte & (PG_V | PG_TYPE)) == (PG_V | PG_OBMEM)) {
1.60      pk       1308:                        pa = ptoa(tpte & PG_PFNUM);
1.1       deraadt  1309:                        if (managed(pa))
1.55      pk       1310:                                pvhead(pa)->pv_flags |= MR4_4C(tpte);
1.1       deraadt  1311:                }
                   1312:                *pte++ = tpte & ~(PG_U|PG_M);
                   1313:                va += NBPG;
                   1314:        } while (--i > 0);
                   1315:
                   1316:        /* update segment tables */
                   1317:        simple_lock(&pm->pm_lock); /* what if other cpu takes mmuentry ?? */
1.43      pk       1318:        if (CTX_USABLE(pm,rp))
                   1319:                setsegmap(VSTOVA(me->me_vreg,me->me_vseg), seginval);
                   1320:        sp->sg_pmeg = seginval;
1.1       deraadt  1321:
                   1322:        /* off old pmap chain */
1.43      pk       1323:        TAILQ_REMOVE(&pm->pm_seglist, me, me_pmchain);
1.1       deraadt  1324:        simple_unlock(&pm->pm_lock);
1.71      pk       1325:        setcontext4(ctx);       /* done with old context */
1.1       deraadt  1326:
                   1327:        /* onto new pmap chain; new pmap is already locked, if needed */
1.43      pk       1328:        TAILQ_INSERT_TAIL(&newpm->pm_seglist, me, me_pmchain);
1.1       deraadt  1329:
                   1330:        /* into new segment table, with backpointers */
1.43      pk       1331:        newpm->pm_regmap[newvreg].rg_segmap[newvseg].sg_pmeg = me->me_cookie;
1.1       deraadt  1332:        me->me_pmap = newpm;
                   1333:        me->me_vseg = newvseg;
1.43      pk       1334:        me->me_vreg = newvreg;
1.1       deraadt  1335:
                   1336:        return (me);
                   1337: }
                   1338:
                   1339: /*
                   1340:  * Free an MMU entry.
                   1341:  *
                   1342:  * Assumes the corresponding pmap is already locked.
                   1343:  * Does NOT flush cache, but does record ref and mod bits.
                   1344:  * The rest of each PTE is discarded.
                   1345:  * CALLER MUST SET CONTEXT to pm->pm_ctxnum (if pmap has
                   1346:  * a context) or to 0 (if not).  Caller must also update
                   1347:  * pm->pm_segmap and (possibly) the hardware.
                   1348:  */
                   1349: void
                   1350: me_free(pm, pmeg)
1.124     pk       1351:        struct pmap *pm;
                   1352:        u_int pmeg;
1.1       deraadt  1353: {
1.124     pk       1354:        struct mmuentry *me = &mmusegments[pmeg];
                   1355:        int i, va, pa, tpte;
                   1356:        int vr;
                   1357:        struct regmap *rp;
1.43      pk       1358:
                   1359:        vr = me->me_vreg;
1.1       deraadt  1360:
                   1361: #ifdef DEBUG
                   1362:        if (pmapdebug & PDB_MMU_ALLOC)
1.66      christos 1363:                printf("me_free: freeing pmeg %d from pmap %p\n",
1.43      pk       1364:                    me->me_cookie, pm);
                   1365:        if (me->me_cookie != pmeg)
1.1       deraadt  1366:                panic("me_free: wrong mmuentry");
                   1367:        if (pm != me->me_pmap)
                   1368:                panic("me_free: pm != me_pmap");
                   1369: #endif
                   1370:
1.43      pk       1371:        rp = &pm->pm_regmap[vr];
                   1372:
1.1       deraadt  1373:        /* just like me_alloc, but no cache flush, and context already set */
1.43      pk       1374:        if (CTX_USABLE(pm,rp)) {
                   1375:                va = VSTOVA(vr,me->me_vseg);
                   1376:        } else {
                   1377: #ifdef DEBUG
1.71      pk       1378: if (getcontext4() != 0) panic("me_free: ctx != 0");
1.43      pk       1379: #endif
1.69      pk       1380:                if (HASSUN4_MMU3L)
1.43      pk       1381:                        setregmap(0, tregion);
                   1382:                setsegmap(0, me->me_cookie);
1.1       deraadt  1383:                va = 0;
                   1384:        }
                   1385:        i = NPTESG;
                   1386:        do {
1.55      pk       1387:                tpte = getpte4(va);
1.33      pk       1388:                if ((tpte & (PG_V | PG_TYPE)) == (PG_V | PG_OBMEM)) {
1.60      pk       1389:                        pa = ptoa(tpte & PG_PFNUM);
1.1       deraadt  1390:                        if (managed(pa))
1.55      pk       1391:                                pvhead(pa)->pv_flags |= MR4_4C(tpte);
1.1       deraadt  1392:                }
                   1393:                va += NBPG;
                   1394:        } while (--i > 0);
                   1395:
                   1396:        /* take mmu entry off pmap chain */
1.43      pk       1397:        TAILQ_REMOVE(&pm->pm_seglist, me, me_pmchain);
                   1398:        /* ... and remove from segment map */
                   1399:        if (rp->rg_segmap == NULL)
                   1400:                panic("me_free: no segments in pmap");
                   1401:        rp->rg_segmap[me->me_vseg].sg_pmeg = seginval;
                   1402:
                   1403:        /* off LRU or lock chain */
                   1404:        if (pm == pmap_kernel()) {
                   1405:                TAILQ_REMOVE(&segm_locked, me, me_list);
1.70      pk       1406: #ifdef DIAGNOSTIC
                   1407:                pmap_stats.ps_npmeg_locked--;
                   1408: #endif
1.43      pk       1409:        } else {
                   1410:                TAILQ_REMOVE(&segm_lru, me, me_list);
1.70      pk       1411: #ifdef DIAGNOSTIC
                   1412:                pmap_stats.ps_npmeg_lru--;
                   1413: #endif
1.43      pk       1414:        }
                   1415:
                   1416:        /* no associated pmap; on free list */
                   1417:        me->me_pmap = NULL;
                   1418:        TAILQ_INSERT_TAIL(&segm_freelist, me, me_list);
1.70      pk       1419: #ifdef DIAGNOSTIC
                   1420:        pmap_stats.ps_npmeg_free++;
                   1421: #endif
1.43      pk       1422: }
                   1423:
1.69      pk       1424: #if defined(SUN4_MMU3L)
1.43      pk       1425:
                   1426: /* XXX - Merge with segm_alloc/segm_free ? */
                   1427:
                   1428: struct mmuentry *
                   1429: region_alloc(mh, newpm, newvr)
1.124     pk       1430:        struct mmuhd *mh;
                   1431:        struct pmap *newpm;
                   1432:        int newvr;
1.43      pk       1433: {
1.124     pk       1434:        struct mmuentry *me;
                   1435:        struct pmap *pm;
1.43      pk       1436:        int ctx;
                   1437:        struct regmap *rp;
                   1438:
                   1439:        /* try free list first */
                   1440:        if ((me = region_freelist.tqh_first) != NULL) {
                   1441:                TAILQ_REMOVE(&region_freelist, me, me_list);
                   1442: #ifdef DEBUG
                   1443:                if (me->me_pmap != NULL)
                   1444:                        panic("region_alloc: freelist entry has pmap");
                   1445:                if (pmapdebug & PDB_MMUREG_ALLOC)
1.91      fair     1446:                        printf("region_alloc: got smeg 0x%x\n", me->me_cookie);
1.43      pk       1447: #endif
                   1448:                TAILQ_INSERT_TAIL(mh, me, me_list);
                   1449:
                   1450:                /* onto on pmap chain; pmap is already locked, if needed */
                   1451:                TAILQ_INSERT_TAIL(&newpm->pm_reglist, me, me_pmchain);
                   1452:
                   1453:                /* into pmap segment table, with backpointers */
                   1454:                newpm->pm_regmap[newvr].rg_smeg = me->me_cookie;
                   1455:                me->me_pmap = newpm;
                   1456:                me->me_vreg = newvr;
                   1457:
                   1458:                return (me);
                   1459:        }
                   1460:
                   1461:        /* no luck, take head of LRU list */
                   1462:        if ((me = region_lru.tqh_first) == NULL)
                   1463:                panic("region_alloc: all smegs gone");
                   1464:
                   1465:        pm = me->me_pmap;
                   1466:        if (pm == NULL)
                   1467:                panic("region_alloc: LRU entry has no pmap");
                   1468:        if (pm == pmap_kernel())
                   1469:                panic("region_alloc: stealing from kernel");
                   1470: #ifdef DEBUG
                   1471:        if (pmapdebug & (PDB_MMUREG_ALLOC | PDB_MMUREG_STEAL))
1.91      fair     1472:                printf("region_alloc: stealing smeg 0x%x from pmap %p\n",
1.43      pk       1473:                    me->me_cookie, pm);
                   1474: #endif
                   1475:        /*
                   1476:         * Remove from LRU list, and insert at end of new list
                   1477:         * (probably the LRU list again, but so what?).
                   1478:         */
                   1479:        TAILQ_REMOVE(&region_lru, me, me_list);
                   1480:        TAILQ_INSERT_TAIL(mh, me, me_list);
                   1481:
                   1482:        rp = &pm->pm_regmap[me->me_vreg];
1.71      pk       1483:        ctx = getcontext4();
1.43      pk       1484:        if (pm->pm_ctx) {
                   1485:                CHANGE_CONTEXTS(ctx, pm->pm_ctxnum);
1.69      pk       1486:                cache_flush_region(me->me_vreg);
1.43      pk       1487:        }
                   1488:
                   1489:        /* update region tables */
                   1490:        simple_lock(&pm->pm_lock); /* what if other cpu takes mmuentry ?? */
                   1491:        if (pm->pm_ctx)
                   1492:                setregmap(VRTOVA(me->me_vreg), reginval);
                   1493:        rp->rg_smeg = reginval;
                   1494:
                   1495:        /* off old pmap chain */
                   1496:        TAILQ_REMOVE(&pm->pm_reglist, me, me_pmchain);
                   1497:        simple_unlock(&pm->pm_lock);
1.71      pk       1498:        setcontext4(ctx);       /* done with old context */
1.43      pk       1499:
                   1500:        /* onto new pmap chain; new pmap is already locked, if needed */
                   1501:        TAILQ_INSERT_TAIL(&newpm->pm_reglist, me, me_pmchain);
                   1502:
                   1503:        /* into new segment table, with backpointers */
                   1504:        newpm->pm_regmap[newvr].rg_smeg = me->me_cookie;
                   1505:        me->me_pmap = newpm;
                   1506:        me->me_vreg = newvr;
                   1507:
                   1508:        return (me);
                   1509: }
                   1510:
                   1511: /*
                   1512:  * Free an MMU entry.
                   1513:  *
                   1514:  * Assumes the corresponding pmap is already locked.
                   1515:  * Does NOT flush cache. ???
                   1516:  * CALLER MUST SET CONTEXT to pm->pm_ctxnum (if pmap has
                   1517:  * a context) or to 0 (if not).  Caller must also update
                   1518:  * pm->pm_regmap and (possibly) the hardware.
                   1519:  */
                   1520: void
                   1521: region_free(pm, smeg)
1.124     pk       1522:        struct pmap *pm;
                   1523:        u_int smeg;
1.43      pk       1524: {
1.124     pk       1525:        struct mmuentry *me = &mmuregions[smeg];
1.43      pk       1526:
                   1527: #ifdef DEBUG
                   1528:        if (pmapdebug & PDB_MMUREG_ALLOC)
1.91      fair     1529:                printf("region_free: freeing smeg 0x%x from pmap %p\n",
1.43      pk       1530:                    me->me_cookie, pm);
                   1531:        if (me->me_cookie != smeg)
                   1532:                panic("region_free: wrong mmuentry");
                   1533:        if (pm != me->me_pmap)
                   1534:                panic("region_free: pm != me_pmap");
                   1535: #endif
                   1536:
                   1537:        if (pm->pm_ctx)
1.69      pk       1538:                cache_flush_region(me->me_vreg);
1.43      pk       1539:
                   1540:        /* take mmu entry off pmap chain */
                   1541:        TAILQ_REMOVE(&pm->pm_reglist, me, me_pmchain);
1.1       deraadt  1542:        /* ... and remove from segment map */
1.43      pk       1543:        pm->pm_regmap[smeg].rg_smeg = reginval;
1.1       deraadt  1544:
                   1545:        /* off LRU or lock chain */
1.43      pk       1546:        if (pm == pmap_kernel()) {
                   1547:                TAILQ_REMOVE(&region_locked, me, me_list);
                   1548:        } else {
                   1549:                TAILQ_REMOVE(&region_lru, me, me_list);
                   1550:        }
1.1       deraadt  1551:
                   1552:        /* no associated pmap; on free list */
                   1553:        me->me_pmap = NULL;
1.43      pk       1554:        TAILQ_INSERT_TAIL(&region_freelist, me, me_list);
1.1       deraadt  1555: }
1.43      pk       1556: #endif
1.1       deraadt  1557:
                   1558: /*
                   1559:  * `Page in' (load or inspect) an MMU entry; called on page faults.
                   1560:  * Returns 1 if we reloaded the segment, -1 if the segment was
                   1561:  * already loaded and the page was marked valid (in which case the
                   1562:  * fault must be a bus error or something), or 0 (segment loaded but
                   1563:  * PTE not valid, or segment not loaded at all).
                   1564:  */
                   1565: int
1.61      pk       1566: mmu_pagein(pm, va, prot)
1.124     pk       1567:        struct pmap *pm;
1.148     pk       1568:        vaddr_t va;
                   1569:        int prot;
1.1       deraadt  1570: {
1.124     pk       1571:        int *pte;
                   1572:        int vr, vs, pmeg, i, s, bits;
1.43      pk       1573:        struct regmap *rp;
                   1574:        struct segmap *sp;
                   1575:
1.148     pk       1576:        if (va >= (unsigned long)KERNBASE)
                   1577:                return (0);
                   1578:
1.45      pk       1579:        if (prot != VM_PROT_NONE)
                   1580:                bits = PG_V | ((prot & VM_PROT_WRITE) ? PG_W : 0);
                   1581:        else
                   1582:                bits = 0;
                   1583:
1.43      pk       1584:        vr = VA_VREG(va);
                   1585:        vs = VA_VSEG(va);
                   1586:        rp = &pm->pm_regmap[vr];
                   1587: #ifdef DEBUG
1.150     pk       1588:        if (pm == pmap_kernel())
                   1589:        printf("mmu_pagein: kernel wants map at va 0x%lx, vr %d, vs %d\n",
                   1590:                (u_long)va, vr, vs);
1.43      pk       1591: #endif
1.145     pk       1592: #if 0
                   1593: #if defined(SUN4_MMU3L)
                   1594: printf("mmu_pagein: pm=%p, va 0x%x, vr %d, vs %d, rp=%p, segmap=%p\n", pm, va, vr, vs, rp, rp->rg_segmap);
                   1595: #endif
                   1596: #endif
1.43      pk       1597:
                   1598:        /* return 0 if we have no PMEGs to load */
                   1599:        if (rp->rg_segmap == NULL)
                   1600:                return (0);
1.145     pk       1601:
1.69      pk       1602: #if defined(SUN4_MMU3L)
                   1603:        if (HASSUN4_MMU3L && rp->rg_smeg == reginval) {
1.43      pk       1604:                smeg_t smeg;
                   1605:                unsigned int tva = VA_ROUNDDOWNTOREG(va);
                   1606:                struct segmap *sp = rp->rg_segmap;
                   1607:
                   1608:                s = splpmap();          /* paranoid */
                   1609:                smeg = region_alloc(&region_lru, pm, vr)->me_cookie;
                   1610:                setregmap(tva, smeg);
                   1611:                i = NSEGRG;
                   1612:                do {
                   1613:                        setsegmap(tva, sp++->sg_pmeg);
                   1614:                        tva += NBPSG;
                   1615:                } while (--i > 0);
                   1616:                splx(s);
                   1617:        }
                   1618: #endif
                   1619:        sp = &rp->rg_segmap[vs];
1.1       deraadt  1620:
                   1621:        /* return 0 if we have no PTEs to load */
1.43      pk       1622:        if ((pte = sp->sg_pte) == NULL)
1.1       deraadt  1623:                return (0);
1.43      pk       1624:
1.1       deraadt  1625:        /* return -1 if the fault is `hard', 0 if not */
1.43      pk       1626:        if (sp->sg_pmeg != seginval)
1.55      pk       1627:                return (bits && (getpte4(va) & bits) == bits ? -1 : 0);
1.1       deraadt  1628:
                   1629:        /* reload segment: write PTEs into a new LRU entry */
                   1630:        va = VA_ROUNDDOWNTOSEG(va);
                   1631:        s = splpmap();          /* paranoid */
1.43      pk       1632:        pmeg = me_alloc(&segm_lru, pm, vr, vs)->me_cookie;
1.1       deraadt  1633:        setsegmap(va, pmeg);
                   1634:        i = NPTESG;
                   1635:        do {
1.55      pk       1636:                setpte4(va, *pte++);
1.1       deraadt  1637:                va += NBPG;
                   1638:        } while (--i > 0);
                   1639:        splx(s);
                   1640:        return (1);
                   1641: }
1.55      pk       1642: #endif /* defined SUN4 or SUN4C */
                   1643:
1.1       deraadt  1644: /*
                   1645:  * Allocate a context.  If necessary, steal one from someone else.
                   1646:  * Changes hardware context number and loads segment map.
                   1647:  *
                   1648:  * This routine is only ever called from locore.s just after it has
                   1649:  * saved away the previous process, so there are no active user windows.
                   1650:  */
                   1651: void
                   1652: ctx_alloc(pm)
1.124     pk       1653:        struct pmap *pm;
1.1       deraadt  1654: {
1.124     pk       1655:        union ctxinfo *c;
                   1656:        int s, cnum, i, doflush;
                   1657:        struct regmap *rp;
                   1658:        int gap_start, gap_end;
                   1659:        unsigned long va;
1.1       deraadt  1660:
1.55      pk       1661: /*XXX-GCC!*/gap_start=gap_end=0;
1.1       deraadt  1662: #ifdef DEBUG
                   1663:        if (pm->pm_ctx)
                   1664:                panic("ctx_alloc pm_ctx");
                   1665:        if (pmapdebug & PDB_CTX_ALLOC)
1.66      christos 1666:                printf("ctx_alloc(%p)\n", pm);
1.1       deraadt  1667: #endif
1.55      pk       1668:        if (CPU_ISSUN4OR4C) {
                   1669:                gap_start = pm->pm_gap_start;
                   1670:                gap_end = pm->pm_gap_end;
                   1671:        }
1.13      pk       1672:
1.49      pk       1673:        s = splpmap();
1.1       deraadt  1674:        if ((c = ctx_freelist) != NULL) {
                   1675:                ctx_freelist = c->c_nextfree;
1.69      pk       1676:                cnum = c - cpuinfo.ctxinfo;
1.49      pk       1677:                doflush = 0;
1.1       deraadt  1678:        } else {
                   1679:                if ((ctx_kick += ctx_kickdir) >= ncontext) {
                   1680:                        ctx_kick = ncontext - 1;
                   1681:                        ctx_kickdir = -1;
                   1682:                } else if (ctx_kick < 1) {
                   1683:                        ctx_kick = 1;
                   1684:                        ctx_kickdir = 1;
                   1685:                }
1.69      pk       1686:                c = &cpuinfo.ctxinfo[cnum = ctx_kick];
1.1       deraadt  1687: #ifdef DEBUG
                   1688:                if (c->c_pmap == NULL)
                   1689:                        panic("ctx_alloc cu_pmap");
                   1690:                if (pmapdebug & (PDB_CTX_ALLOC | PDB_CTX_STEAL))
1.66      christos 1691:                        printf("ctx_alloc: steal context %d from %p\n",
1.1       deraadt  1692:                            cnum, c->c_pmap);
                   1693: #endif
                   1694:                c->c_pmap->pm_ctx = NULL;
1.69      pk       1695:                doflush = (CACHEINFO.c_vactype != VAC_NONE);
1.55      pk       1696:                if (CPU_ISSUN4OR4C) {
                   1697:                        if (gap_start < c->c_pmap->pm_gap_start)
                   1698:                                gap_start = c->c_pmap->pm_gap_start;
                   1699:                        if (gap_end > c->c_pmap->pm_gap_end)
                   1700:                                gap_end = c->c_pmap->pm_gap_end;
                   1701:                }
1.1       deraadt  1702:        }
1.49      pk       1703:
1.1       deraadt  1704:        c->c_pmap = pm;
                   1705:        pm->pm_ctx = c;
                   1706:        pm->pm_ctxnum = cnum;
                   1707:
1.55      pk       1708:        if (CPU_ISSUN4OR4C) {
                   1709:                /*
                   1710:                 * Write pmap's region (3-level MMU) or segment table into
                   1711:                 * the MMU.
                   1712:                 *
                   1713:                 * Only write those entries that actually map something in
                   1714:                 * this context by maintaining a pair of region numbers in
                   1715:                 * between which the pmap has no valid mappings.
                   1716:                 *
                   1717:                 * If a context was just allocated from the free list, trust
                   1718:                 * that all its pmeg numbers are `seginval'. We make sure this
                   1719:                 * is the case initially in pmap_bootstrap(). Otherwise, the
                   1720:                 * context was freed by calling ctx_free() in pmap_release(),
                   1721:                 * which in turn is supposedly called only when all mappings
                   1722:                 * have been removed.
                   1723:                 *
                   1724:                 * On the other hand, if the context had to be stolen from
                   1725:                 * another pmap, we possibly shrink the gap to be the
                   1726:                 * disjuction of the new and the previous map.
                   1727:                 */
1.43      pk       1728:
1.80      pk       1729:                setcontext4(cnum);
1.55      pk       1730:                splx(s);
                   1731:                if (doflush)
                   1732:                        cache_flush_context();
1.43      pk       1733:
1.55      pk       1734:                rp = pm->pm_regmap;
                   1735:                for (va = 0, i = NUREG; --i >= 0; ) {
                   1736:                        if (VA_VREG(va) >= gap_start) {
                   1737:                                va = VRTOVA(gap_end);
                   1738:                                i -= gap_end - gap_start;
                   1739:                                rp += gap_end - gap_start;
                   1740:                                if (i < 0)
                   1741:                                        break;
                   1742:                                /* mustn't re-enter this branch */
                   1743:                                gap_start = NUREG;
                   1744:                        }
1.69      pk       1745:                        if (HASSUN4_MMU3L) {
1.55      pk       1746:                                setregmap(va, rp++->rg_smeg);
                   1747:                                va += NBPRG;
1.69      pk       1748:                        } else {
1.124     pk       1749:                                int j;
                   1750:                                struct segmap *sp = rp->rg_segmap;
1.55      pk       1751:                                for (j = NSEGRG; --j >= 0; va += NBPSG)
                   1752:                                        setsegmap(va,
                   1753:                                                  sp?sp++->sg_pmeg:seginval);
                   1754:                                rp++;
                   1755:                        }
1.43      pk       1756:                }
1.55      pk       1757:
                   1758:        } else if (CPU_ISSUN4M) {
                   1759:
1.80      pk       1760: #if defined(SUN4M)
1.55      pk       1761:                /*
                   1762:                 * Reload page and context tables to activate the page tables
                   1763:                 * for this context.
                   1764:                 *
                   1765:                 * The gap stuff isn't really needed in the Sun4m architecture,
                   1766:                 * since we don't have to worry about excessive mappings (all
                   1767:                 * mappings exist since the page tables must be complete for
                   1768:                 * the mmu to be happy).
                   1769:                 *
                   1770:                 * If a context was just allocated from the free list, trust
                   1771:                 * that all of its mmu-edible page tables are zeroed out
                   1772:                 * (except for those associated with the kernel). We make
                   1773:                 * sure this is the case initially in pmap_bootstrap() and
                   1774:                 * pmap_init() (?).
                   1775:                 * Otherwise, the context was freed by calling ctx_free() in
                   1776:                 * pmap_release(), which in turn is supposedly called only
                   1777:                 * when all mappings have been removed.
                   1778:                 *
                   1779:                 * XXX: Do we have to flush cache after reloading ctx tbl?
                   1780:                 */
                   1781:
1.123     pk       1782:                /* Do any cache flush needed on context switch */
                   1783:                (*cpuinfo.pure_vcache_flush)();
1.157     pk       1784:
                   1785:                /*
                   1786:                 * We need to flush the cache only when stealing a context
                   1787:                 * from another pmap. In that case it's Ok to switch the
                   1788:                 * context and leave it set, since it the context table
                   1789:                 * will have a valid region table entry for this context
                   1790:                 * number.
                   1791:                 *
                   1792:                 * Otherwise, we switch to the new context after loading
                   1793:                 * the context table entry with the new pmap's region.
                   1794:                 */
                   1795:                if (doflush) {
                   1796:                        setcontext4m(cnum);
                   1797:                        cache_flush_context();
                   1798:                }
1.152     pk       1799:
                   1800:                /*
                   1801:                 * The context allocated to a process is the same on all CPUs.
                   1802:                 * Here we install the per-CPU region table in each CPU's
                   1803:                 * context table slot.
                   1804:                 *
                   1805:                 * Note on multi-threaded processes: a context must remain
                   1806:                 * valid as long as any thread is still running on a cpu.
                   1807:                 */
1.133     pk       1808: #if defined(MULTIPROCESSOR)
1.152     pk       1809:                for (i = 0; i < ncpu; i++)
                   1810: #else
                   1811:                i = 0;
                   1812: #endif
                   1813:                {
1.133     pk       1814:                        struct cpu_info *cpi = cpus[i];
1.152     pk       1815: #if defined(MULTIPROCESSOR)
1.133     pk       1816:                        if (cpi == NULL)
                   1817:                                continue;
1.152     pk       1818: #endif
1.133     pk       1819:                        setpgt4m(&cpi->ctx_tbl[cnum],
1.152     pk       1820:                                 (pm->pm_reg_ptps_pa[i] >> SRMMU_PPNPASHIFT) |
1.133     pk       1821:                                        SRMMU_TEPTD);
                   1822:                }
1.55      pk       1823:
1.157     pk       1824:                /* Set context if not yet done above to flush the cache */
                   1825:                if (!doflush)
                   1826:                        setcontext4m(cnum);
                   1827:
1.55      pk       1828:                tlb_flush_context(); /* remove any remnant garbage from tlb */
1.43      pk       1829: #endif
1.55      pk       1830:                splx(s);
1.13      pk       1831:        }
1.1       deraadt  1832: }
                   1833:
                   1834: /*
                   1835:  * Give away a context.  Flushes cache and sets current context to 0.
                   1836:  */
                   1837: void
                   1838: ctx_free(pm)
                   1839:        struct pmap *pm;
                   1840: {
1.124     pk       1841:        union ctxinfo *c;
                   1842:        int newc, oldc;
1.1       deraadt  1843:
                   1844:        if ((c = pm->pm_ctx) == NULL)
                   1845:                panic("ctx_free");
                   1846:        pm->pm_ctx = NULL;
                   1847:        oldc = getcontext();
1.55      pk       1848:
1.69      pk       1849:        if (CACHEINFO.c_vactype != VAC_NONE) {
1.123     pk       1850:                /* Do any cache flush needed on context switch */
                   1851:                (*cpuinfo.pure_vcache_flush)();
                   1852:
1.1       deraadt  1853:                newc = pm->pm_ctxnum;
                   1854:                CHANGE_CONTEXTS(oldc, newc);
                   1855:                cache_flush_context();
1.55      pk       1856: #if defined(SUN4M)
                   1857:                if (CPU_ISSUN4M)
                   1858:                        tlb_flush_context();
                   1859: #endif
1.1       deraadt  1860:        } else {
1.55      pk       1861: #if defined(SUN4M)
1.88      pk       1862:                if (CPU_ISSUN4M) {
1.123     pk       1863:                        /* Do any cache flush needed on context switch */
                   1864:                        (*cpuinfo.pure_vcache_flush)();
1.88      pk       1865:                        newc = pm->pm_ctxnum;
                   1866:                        CHANGE_CONTEXTS(oldc, newc);
1.55      pk       1867:                        tlb_flush_context();
1.88      pk       1868:                }
1.55      pk       1869: #endif
1.1       deraadt  1870:        }
1.156     pk       1871:        setcontext(oldc);
                   1872:
1.1       deraadt  1873:        c->c_nextfree = ctx_freelist;
                   1874:        ctx_freelist = c;
                   1875: }
                   1876:
                   1877:
                   1878: /*----------------------------------------------------------------*/
                   1879:
                   1880: /*
                   1881:  * pvlist functions.
                   1882:  */
                   1883:
                   1884: /*
                   1885:  * Walk the given pv list, and for each PTE, set or clear some bits
                   1886:  * (e.g., PG_W or PG_NC).
                   1887:  *
                   1888:  * As a special case, this never clears PG_W on `pager' pages.
                   1889:  * These, being kernel addresses, are always in hardware and have
                   1890:  * a context.
                   1891:  *
                   1892:  * This routine flushes the cache for any page whose PTE changes,
                   1893:  * as long as the process has a context; this is overly conservative.
                   1894:  * It also copies ref and mod bits to the pvlist, on the theory that
                   1895:  * this might save work later.  (XXX should test this theory)
1.115     pk       1896:  *
                   1897:  * In addition, if the cacheable bit (PG_NC) is updated in the PTE
                   1898:  * the corresponding PV_NC flag is also updated in each pv entry. This
                   1899:  * is done so kvm_uncache() can use this routine and have the uncached
                   1900:  * status stick.
1.1       deraadt  1901:  */
1.55      pk       1902:
                   1903: #if defined(SUN4) || defined(SUN4C)
                   1904:
1.1       deraadt  1905: void
1.55      pk       1906: pv_changepte4_4c(pv0, bis, bic)
1.115     pk       1907:        struct pvlist *pv0;
                   1908:        int bis, bic;
1.1       deraadt  1909: {
1.115     pk       1910:        int *pte;
                   1911:        struct pvlist *pv;
                   1912:        struct pmap *pm;
                   1913:        int va, vr, vs;
1.1       deraadt  1914:        int ctx, s;
1.43      pk       1915:        struct regmap *rp;
                   1916:        struct segmap *sp;
1.1       deraadt  1917:
                   1918:        write_user_windows();           /* paranoid? */
                   1919:
                   1920:        s = splpmap();                  /* paranoid? */
                   1921:        if (pv0->pv_pmap == NULL) {
                   1922:                splx(s);
                   1923:                return;
                   1924:        }
1.71      pk       1925:        ctx = getcontext4();
1.1       deraadt  1926:        for (pv = pv0; pv != NULL; pv = pv->pv_next) {
                   1927:                pm = pv->pv_pmap;
1.81      pk       1928: #ifdef DIAGNOSTIC
                   1929:                if(pm == NULL)
                   1930:                        panic("pv_changepte: pm == NULL");
                   1931: #endif
1.1       deraadt  1932:                va = pv->pv_va;
1.43      pk       1933:                vr = VA_VREG(va);
                   1934:                vs = VA_VSEG(va);
                   1935:                rp = &pm->pm_regmap[vr];
                   1936:                if (rp->rg_segmap == NULL)
                   1937:                        panic("pv_changepte: no segments");
                   1938:
                   1939:                sp = &rp->rg_segmap[vs];
                   1940:                pte = sp->sg_pte;
                   1941:
                   1942:                if (sp->sg_pmeg == seginval) {
                   1943:                        /* not in hardware: just fix software copy */
                   1944:                        if (pte == NULL)
1.81      pk       1945:                                panic("pv_changepte: pte == NULL");
1.43      pk       1946:                        pte += VA_VPG(va);
                   1947:                        *pte = (*pte | bis) & ~bic;
                   1948:                } else {
1.124     pk       1949:                        int tpte;
1.1       deraadt  1950:
                   1951:                        /* in hardware: fix hardware copy */
1.43      pk       1952:                        if (CTX_USABLE(pm,rp)) {
1.110     mrg      1953:                                /*
                   1954:                                 * Bizarreness:  we never clear PG_W on
1.125     pk       1955:                                 * pager pages.
1.110     mrg      1956:                                 */
                   1957:                                if (bic == PG_W &&
                   1958:                                    va >= uvm.pager_sva && va < uvm.pager_eva)
                   1959:                                        continue;
1.125     pk       1960:
1.71      pk       1961:                                setcontext4(pm->pm_ctxnum);
1.1       deraadt  1962:                                /* XXX should flush only when necessary */
1.55      pk       1963:                                tpte = getpte4(va);
1.88      pk       1964:                                /*
                   1965:                                 * XXX: always flush cache; conservative, but
                   1966:                                 * needed to invalidate cache tag protection
                   1967:                                 * bits and when disabling caching.
                   1968:                                 */
                   1969:                                cache_flush_page(va);
1.1       deraadt  1970:                        } else {
                   1971:                                /* XXX per-cpu va? */
1.71      pk       1972:                                setcontext4(0);
1.69      pk       1973:                                if (HASSUN4_MMU3L)
1.43      pk       1974:                                        setregmap(0, tregion);
                   1975:                                setsegmap(0, sp->sg_pmeg);
1.18      deraadt  1976:                                va = VA_VPG(va) << PGSHIFT;
1.55      pk       1977:                                tpte = getpte4(va);
1.1       deraadt  1978:                        }
                   1979:                        if (tpte & PG_V)
1.115     pk       1980:                                pv0->pv_flags |= MR4_4C(tpte);
1.1       deraadt  1981:                        tpte = (tpte | bis) & ~bic;
1.55      pk       1982:                        setpte4(va, tpte);
1.1       deraadt  1983:                        if (pte != NULL)        /* update software copy */
                   1984:                                pte[VA_VPG(va)] = tpte;
1.115     pk       1985:
                   1986:                        /* Update PV_NC flag if required */
                   1987:                        if (bis & PG_NC)
                   1988:                                pv->pv_flags |= PV_NC;
                   1989:                        if (bic & PG_NC)
                   1990:                                pv->pv_flags &= ~PV_NC;
1.1       deraadt  1991:                }
                   1992:        }
1.71      pk       1993:        setcontext4(ctx);
1.1       deraadt  1994:        splx(s);
                   1995: }
                   1996:
                   1997: /*
                   1998:  * Sync ref and mod bits in pvlist (turns off same in hardware PTEs).
                   1999:  * Returns the new flags.
                   2000:  *
                   2001:  * This is just like pv_changepte, but we never add or remove bits,
                   2002:  * hence never need to adjust software copies.
                   2003:  */
                   2004: int
1.55      pk       2005: pv_syncflags4_4c(pv0)
1.124     pk       2006:        struct pvlist *pv0;
1.1       deraadt  2007: {
1.124     pk       2008:        struct pvlist *pv;
                   2009:        struct pmap *pm;
                   2010:        int tpte, va, vr, vs, pmeg, flags;
1.1       deraadt  2011:        int ctx, s;
1.43      pk       2012:        struct regmap *rp;
                   2013:        struct segmap *sp;
1.1       deraadt  2014:
                   2015:        write_user_windows();           /* paranoid? */
                   2016:
                   2017:        s = splpmap();                  /* paranoid? */
                   2018:        if (pv0->pv_pmap == NULL) {     /* paranoid */
                   2019:                splx(s);
                   2020:                return (0);
                   2021:        }
1.71      pk       2022:        ctx = getcontext4();
1.1       deraadt  2023:        flags = pv0->pv_flags;
                   2024:        for (pv = pv0; pv != NULL; pv = pv->pv_next) {
                   2025:                pm = pv->pv_pmap;
                   2026:                va = pv->pv_va;
1.43      pk       2027:                vr = VA_VREG(va);
                   2028:                vs = VA_VSEG(va);
                   2029:                rp = &pm->pm_regmap[vr];
                   2030:                if (rp->rg_segmap == NULL)
                   2031:                        panic("pv_syncflags: no segments");
                   2032:                sp = &rp->rg_segmap[vs];
                   2033:
                   2034:                if ((pmeg = sp->sg_pmeg) == seginval)
1.1       deraadt  2035:                        continue;
1.43      pk       2036:
                   2037:                if (CTX_USABLE(pm,rp)) {
1.71      pk       2038:                        setcontext4(pm->pm_ctxnum);
1.1       deraadt  2039:                        /* XXX should flush only when necessary */
1.55      pk       2040:                        tpte = getpte4(va);
1.69      pk       2041:                        if (tpte & PG_M)
1.34      pk       2042:                                cache_flush_page(va);
1.1       deraadt  2043:                } else {
                   2044:                        /* XXX per-cpu va? */
1.71      pk       2045:                        setcontext4(0);
1.69      pk       2046:                        if (HASSUN4_MMU3L)
1.43      pk       2047:                                setregmap(0, tregion);
1.1       deraadt  2048:                        setsegmap(0, pmeg);
1.18      deraadt  2049:                        va = VA_VPG(va) << PGSHIFT;
1.55      pk       2050:                        tpte = getpte4(va);
1.1       deraadt  2051:                }
                   2052:                if (tpte & (PG_M|PG_U) && tpte & PG_V) {
1.86      pk       2053:                        flags |= MR4_4C(tpte);
1.1       deraadt  2054:                        tpte &= ~(PG_M|PG_U);
1.55      pk       2055:                        setpte4(va, tpte);
1.1       deraadt  2056:                }
                   2057:        }
                   2058:        pv0->pv_flags = flags;
1.71      pk       2059:        setcontext4(ctx);
1.1       deraadt  2060:        splx(s);
                   2061:        return (flags);
                   2062: }
                   2063:
                   2064: /*
                   2065:  * pv_unlink is a helper function for pmap_remove.
                   2066:  * It takes a pointer to the pv_table head for some physical address
                   2067:  * and removes the appropriate (pmap, va) entry.
                   2068:  *
                   2069:  * Once the entry is removed, if the pv_table head has the cache
                   2070:  * inhibit bit set, see if we can turn that off; if so, walk the
                   2071:  * pvlist and turn off PG_NC in each PTE.  (The pvlist is by
                   2072:  * definition nonempty, since it must have at least two elements
                   2073:  * in it to have PV_NC set, and we only remove one here.)
                   2074:  */
1.43      pk       2075: /*static*/ void
1.55      pk       2076: pv_unlink4_4c(pv, pm, va)
1.124     pk       2077:        struct pvlist *pv;
                   2078:        struct pmap *pm;
                   2079:        vaddr_t va;
1.1       deraadt  2080: {
1.124     pk       2081:        struct pvlist *npv;
1.1       deraadt  2082:
1.11      pk       2083: #ifdef DIAGNOSTIC
                   2084:        if (pv->pv_pmap == NULL)
                   2085:                panic("pv_unlink0");
                   2086: #endif
1.1       deraadt  2087:        /*
                   2088:         * First entry is special (sigh).
                   2089:         */
                   2090:        npv = pv->pv_next;
                   2091:        if (pv->pv_pmap == pm && pv->pv_va == va) {
                   2092:                pmap_stats.ps_unlink_pvfirst++;
                   2093:                if (npv != NULL) {
1.115     pk       2094:                        /*
                   2095:                         * Shift next entry into the head.
                   2096:                         * Make sure to retain the REF, MOD and ANC flags.
                   2097:                         */
1.1       deraadt  2098:                        pv->pv_next = npv->pv_next;
                   2099:                        pv->pv_pmap = npv->pv_pmap;
                   2100:                        pv->pv_va = npv->pv_va;
1.115     pk       2101:                        pv->pv_flags &= ~PV_NC;
                   2102:                        pv->pv_flags |= npv->pv_flags & PV_NC;
1.122     pk       2103:                        pool_put(&pv_pool, npv);
1.86      pk       2104:                } else {
1.115     pk       2105:                        /*
                   2106:                         * No mappings left; we still need to maintain
                   2107:                         * the REF and MOD flags. since pmap_is_modified()
                   2108:                         * can still be called for this page.
                   2109:                         */
1.1       deraadt  2110:                        pv->pv_pmap = NULL;
1.115     pk       2111:                        pv->pv_flags &= ~(PV_NC|PV_ANC);
1.86      pk       2112:                        return;
                   2113:                }
1.1       deraadt  2114:        } else {
1.124     pk       2115:                struct pvlist *prev;
1.1       deraadt  2116:
                   2117:                for (prev = pv;; prev = npv, npv = npv->pv_next) {
                   2118:                        pmap_stats.ps_unlink_pvsearch++;
                   2119:                        if (npv == NULL)
                   2120:                                panic("pv_unlink");
                   2121:                        if (npv->pv_pmap == pm && npv->pv_va == va)
                   2122:                                break;
                   2123:                }
                   2124:                prev->pv_next = npv->pv_next;
1.122     pk       2125:                pool_put(&pv_pool, npv);
1.1       deraadt  2126:        }
1.115     pk       2127:        if (pv->pv_flags & PV_ANC && (pv->pv_flags & PV_NC) == 0) {
1.1       deraadt  2128:                /*
                   2129:                 * Not cached: check to see if we can fix that now.
                   2130:                 */
                   2131:                va = pv->pv_va;
                   2132:                for (npv = pv->pv_next; npv != NULL; npv = npv->pv_next)
1.115     pk       2133:                        if (BADALIAS(va, npv->pv_va) || (npv->pv_flags & PV_NC))
1.1       deraadt  2134:                                return;
1.115     pk       2135:                pv->pv_flags &= ~PV_ANC;
1.58      pk       2136:                pv_changepte4_4c(pv, 0, PG_NC);
1.1       deraadt  2137:        }
                   2138: }
                   2139:
                   2140: /*
                   2141:  * pv_link is the inverse of pv_unlink, and is used in pmap_enter.
                   2142:  * It returns PG_NC if the (new) pvlist says that the address cannot
                   2143:  * be cached.
                   2144:  */
1.43      pk       2145: /*static*/ int
1.115     pk       2146: pv_link4_4c(pv, pm, va, nc)
                   2147:        struct pvlist *pv;
                   2148:        struct pmap *pm;
1.124     pk       2149:        vaddr_t va;
1.115     pk       2150:        int nc;
1.1       deraadt  2151: {
1.115     pk       2152:        struct pvlist *npv;
                   2153:        int ret;
                   2154:
                   2155:        ret = nc ? PG_NC : 0;
1.1       deraadt  2156:
                   2157:        if (pv->pv_pmap == NULL) {
                   2158:                /* no pvlist entries yet */
                   2159:                pmap_stats.ps_enter_firstpv++;
                   2160:                pv->pv_next = NULL;
                   2161:                pv->pv_pmap = pm;
                   2162:                pv->pv_va = va;
1.115     pk       2163:                pv->pv_flags |= nc ? PV_NC : 0;
                   2164:                return (ret);
1.1       deraadt  2165:        }
                   2166:        /*
                   2167:         * Before entering the new mapping, see if
                   2168:         * it will cause old mappings to become aliased
                   2169:         * and thus need to be `discached'.
                   2170:         */
                   2171:        pmap_stats.ps_enter_secondpv++;
1.115     pk       2172:        if (pv->pv_flags & (PV_NC|PV_ANC)) {
1.1       deraadt  2173:                /* already uncached, just stay that way */
                   2174:                ret = PG_NC;
                   2175:        } else {
                   2176:                for (npv = pv; npv != NULL; npv = npv->pv_next) {
1.115     pk       2177:                        if (npv->pv_flags & PV_NC) {
                   2178:                                ret = PG_NC;
                   2179:                                break;
                   2180:                        }
1.1       deraadt  2181:                        if (BADALIAS(va, npv->pv_va)) {
1.43      pk       2182: #ifdef DEBUG
1.84      pk       2183:                                if (pmapdebug & PDB_CACHESTUFF)
                   2184:                                        printf(
1.91      fair     2185:                        "pv_link: badalias: pid %d, 0x%lx<=>0x%lx, pa 0x%lx\n",
1.84      pk       2186:                                        curproc ? curproc->p_pid : -1,
                   2187:                                        va, npv->pv_va,
                   2188:                                        vm_first_phys + (pv-pv_table)*NBPG);
1.43      pk       2189: #endif
1.115     pk       2190:                                /* Mark list head `uncached due to aliases' */
                   2191:                                pv->pv_flags |= PV_ANC;
1.58      pk       2192:                                pv_changepte4_4c(pv, ret = PG_NC, 0);
1.1       deraadt  2193:                                break;
                   2194:                        }
                   2195:                }
                   2196:        }
1.122     pk       2197:        npv = pool_get(&pv_pool, PR_WAITOK);
1.1       deraadt  2198:        npv->pv_next = pv->pv_next;
                   2199:        npv->pv_pmap = pm;
                   2200:        npv->pv_va = va;
1.115     pk       2201:        npv->pv_flags = nc ? PV_NC : 0;
1.1       deraadt  2202:        pv->pv_next = npv;
                   2203:        return (ret);
                   2204: }
                   2205:
1.55      pk       2206: #endif /* sun4, sun4c code */
                   2207:
                   2208: #if defined(SUN4M)             /* Sun4M versions of above */
1.1       deraadt  2209: /*
1.55      pk       2210:  * Walk the given pv list, and for each PTE, set or clear some bits
                   2211:  * (e.g., PG_W or PG_NC).
                   2212:  *
                   2213:  * As a special case, this never clears PG_W on `pager' pages.
                   2214:  * These, being kernel addresses, are always in hardware and have
                   2215:  * a context.
                   2216:  *
                   2217:  * This routine flushes the cache for any page whose PTE changes,
                   2218:  * as long as the process has a context; this is overly conservative.
                   2219:  * It also copies ref and mod bits to the pvlist, on the theory that
                   2220:  * this might save work later.  (XXX should test this theory)
1.115     pk       2221:  *
                   2222:  * In addition, if the cacheable bit (SRMMU_PG_C) is updated in the PTE
                   2223:  * the corresponding PV_C4M flag is also updated in each pv entry. This
                   2224:  * is done so kvm_uncache() can use this routine and have the uncached
                   2225:  * status stick.
1.1       deraadt  2226:  */
1.53      christos 2227: void
1.55      pk       2228: pv_changepte4m(pv0, bis, bic)
1.115     pk       2229:        struct pvlist *pv0;
                   2230:        int bis, bic;
1.55      pk       2231: {
1.115     pk       2232:        struct pvlist *pv;
                   2233:        struct pmap *pm;
                   2234:        int va, vr;
1.55      pk       2235:        int ctx, s;
                   2236:        struct regmap *rp;
1.72      pk       2237:        struct segmap *sp;
1.1       deraadt  2238:
1.55      pk       2239:        write_user_windows();           /* paranoid? */
1.1       deraadt  2240:
1.55      pk       2241:        s = splpmap();                  /* paranoid? */
                   2242:        if (pv0->pv_pmap == NULL) {
                   2243:                splx(s);
                   2244:                return;
1.1       deraadt  2245:        }
1.71      pk       2246:        ctx = getcontext4m();
1.55      pk       2247:        for (pv = pv0; pv != NULL; pv = pv->pv_next) {
1.115     pk       2248:                int tpte;
1.55      pk       2249:                pm = pv->pv_pmap;
1.81      pk       2250: #ifdef DIAGNOSTIC
1.61      pk       2251:                if (pm == NULL)
1.81      pk       2252:                        panic("pv_changepte: pm == NULL");
                   2253: #endif
1.55      pk       2254:                va = pv->pv_va;
                   2255:                vr = VA_VREG(va);
                   2256:                rp = &pm->pm_regmap[vr];
                   2257:                if (rp->rg_segmap == NULL)
                   2258:                        panic("pv_changepte: no segments");
                   2259:
1.72      pk       2260:                sp = &rp->rg_segmap[VA_VSEG(va)];
                   2261:
                   2262:                if (pm->pm_ctx) {
1.110     mrg      2263:                        /*
                   2264:                         * Bizarreness:  we never clear PG_W on
1.125     pk       2265:                         * pager pages.
1.110     mrg      2266:                         */
                   2267:                        if ((bic & PPROT_WRITE) &&
                   2268:                            va >= uvm.pager_sva && va < uvm.pager_eva)
                   2269:                                continue;
1.72      pk       2270:
1.88      pk       2271:                        setcontext4m(pm->pm_ctxnum);
                   2272:
                   2273:                        /*
                   2274:                         * XXX: always flush cache; conservative, but
                   2275:                         * needed to invalidate cache tag protection
                   2276:                         * bits and when disabling caching.
                   2277:                         */
                   2278:                        cache_flush_page(va);
                   2279:
1.72      pk       2280:                        /* Flush TLB so memory copy is up-to-date */
                   2281:                        tlb_flush_page(va);
1.88      pk       2282:
1.72      pk       2283:                }
                   2284:
                   2285:                tpte = sp->sg_pte[VA_SUN4M_VPG(va)];
                   2286:                if ((tpte & SRMMU_TETYPE) != SRMMU_TEPTE) {
                   2287:                        printf("pv_changepte: invalid PTE for 0x%x\n", va);
                   2288:                        continue;
1.55      pk       2289:                }
                   2290:
1.115     pk       2291:                pv0->pv_flags |= MR4M(tpte);
1.55      pk       2292:                tpte = (tpte | bis) & ~bic;
1.115     pk       2293:                setpgt4m(&sp->sg_pte[VA_SUN4M_VPG(va)], tpte);
                   2294:
                   2295:                /* Update PV_C4M flag if required */
                   2296:                if (bis & SRMMU_PG_C)
                   2297:                        pv->pv_flags |= PV_C4M;
                   2298:                if (bic & SRMMU_PG_C)
                   2299:                        pv->pv_flags &= ~PV_C4M;
1.55      pk       2300:
                   2301:        }
1.71      pk       2302:        setcontext4m(ctx);
1.55      pk       2303:        splx(s);
                   2304: }
                   2305:
                   2306: /*
                   2307:  * Sync ref and mod bits in pvlist. If page has been ref'd or modified,
                   2308:  * update ref/mod bits in pvlist, and clear the hardware bits.
                   2309:  *
                   2310:  * Return the new flags.
                   2311:  */
                   2312: int
                   2313: pv_syncflags4m(pv0)
1.124     pk       2314:        struct pvlist *pv0;
1.55      pk       2315: {
1.124     pk       2316:        struct pvlist *pv;
                   2317:        struct pmap *pm;
                   2318:        int tpte, va, vr, vs, flags;
1.55      pk       2319:        int ctx, s;
                   2320:        struct regmap *rp;
                   2321:        struct segmap *sp;
                   2322:
                   2323:        write_user_windows();           /* paranoid? */
                   2324:
                   2325:        s = splpmap();                  /* paranoid? */
                   2326:        if (pv0->pv_pmap == NULL) {     /* paranoid */
                   2327:                splx(s);
                   2328:                return (0);
                   2329:        }
1.71      pk       2330:        ctx = getcontext4m();
1.55      pk       2331:        flags = pv0->pv_flags;
                   2332:        for (pv = pv0; pv != NULL; pv = pv->pv_next) {
                   2333:                pm = pv->pv_pmap;
                   2334:                va = pv->pv_va;
                   2335:                vr = VA_VREG(va);
                   2336:                vs = VA_VSEG(va);
                   2337:                rp = &pm->pm_regmap[vr];
                   2338:                if (rp->rg_segmap == NULL)
                   2339:                        panic("pv_syncflags: no segments");
                   2340:                sp = &rp->rg_segmap[vs];
                   2341:
                   2342:                if (sp->sg_pte == NULL) /* invalid */
1.60      pk       2343:                        continue;
1.55      pk       2344:
1.62      pk       2345:                /*
                   2346:                 * We need the PTE from memory as the TLB version will
                   2347:                 * always have the SRMMU_PG_R bit on.
                   2348:                 */
1.72      pk       2349:                if (pm->pm_ctx) {
1.71      pk       2350:                        setcontext4m(pm->pm_ctxnum);
1.55      pk       2351:                        tlb_flush_page(va);
                   2352:                }
1.72      pk       2353:                tpte = sp->sg_pte[VA_SUN4M_VPG(va)];
1.62      pk       2354:
1.55      pk       2355:                if ((tpte & SRMMU_TETYPE) == SRMMU_TEPTE && /* if valid pte */
                   2356:                    (tpte & (SRMMU_PG_M|SRMMU_PG_R))) {   /* and mod/refd */
1.72      pk       2357:
1.115     pk       2358:                        flags |= MR4M(tpte);
1.72      pk       2359:
                   2360:                        if (pm->pm_ctx && (tpte & SRMMU_PG_M)) {
1.145     pk       2361:                                /* Only do this for write-back caches? */
                   2362:                                cache_flush_page(va);
                   2363:                                /*
                   2364:                                 * VIPT caches might use the TLB when
                   2365:                                 * flushing, so we flush the TLB again.
                   2366:                                 */
                   2367:                                tlb_flush_page(va);
1.72      pk       2368:                        }
                   2369:
                   2370:                        /* Clear mod/ref bits from PTE and write it back */
1.55      pk       2371:                        tpte &= ~(SRMMU_PG_M | SRMMU_PG_R);
1.72      pk       2372:                        setpgt4m(&sp->sg_pte[VA_SUN4M_VPG(va)], tpte);
1.55      pk       2373:                }
                   2374:        }
                   2375:        pv0->pv_flags = flags;
1.71      pk       2376:        setcontext4m(ctx);
1.55      pk       2377:        splx(s);
                   2378:        return (flags);
                   2379: }
                   2380:
                   2381: void
                   2382: pv_unlink4m(pv, pm, va)
1.124     pk       2383:        struct pvlist *pv;
                   2384:        struct pmap *pm;
                   2385:        vaddr_t va;
1.55      pk       2386: {
1.124     pk       2387:        struct pvlist *npv;
1.55      pk       2388:
                   2389: #ifdef DIAGNOSTIC
                   2390:        if (pv->pv_pmap == NULL)
                   2391:                panic("pv_unlink0");
                   2392: #endif
                   2393:        /*
                   2394:         * First entry is special (sigh).
                   2395:         */
                   2396:        npv = pv->pv_next;
                   2397:        if (pv->pv_pmap == pm && pv->pv_va == va) {
                   2398:                pmap_stats.ps_unlink_pvfirst++;
                   2399:                if (npv != NULL) {
1.115     pk       2400:                        /*
                   2401:                         * Shift next entry into the head.
1.155     pk       2402:                         * Make sure to retain the REF, MOD and ANC flags
                   2403:                         * on the list head.
1.115     pk       2404:                         */
1.55      pk       2405:                        pv->pv_next = npv->pv_next;
                   2406:                        pv->pv_pmap = npv->pv_pmap;
                   2407:                        pv->pv_va = npv->pv_va;
1.115     pk       2408:                        pv->pv_flags &= ~PV_C4M;
                   2409:                        pv->pv_flags |= (npv->pv_flags & PV_C4M);
1.122     pk       2410:                        pool_put(&pv_pool, npv);
1.86      pk       2411:                } else {
1.115     pk       2412:                        /*
1.155     pk       2413:                         * No mappings left; we need to maintain
                   2414:                         * the REF and MOD flags, since pmap_is_modified()
1.115     pk       2415:                         * can still be called for this page.
                   2416:                         */
1.55      pk       2417:                        pv->pv_pmap = NULL;
1.115     pk       2418:                        pv->pv_flags &= ~(PV_C4M|PV_ANC);
1.86      pk       2419:                        return;
                   2420:                }
1.55      pk       2421:        } else {
1.124     pk       2422:                struct pvlist *prev;
1.55      pk       2423:
                   2424:                for (prev = pv;; prev = npv, npv = npv->pv_next) {
                   2425:                        pmap_stats.ps_unlink_pvsearch++;
                   2426:                        if (npv == NULL)
                   2427:                                panic("pv_unlink");
                   2428:                        if (npv->pv_pmap == pm && npv->pv_va == va)
                   2429:                                break;
                   2430:                }
                   2431:                prev->pv_next = npv->pv_next;
1.122     pk       2432:                pool_put(&pv_pool, npv);
1.55      pk       2433:        }
1.115     pk       2434:        if ((pv->pv_flags & (PV_C4M|PV_ANC)) == (PV_C4M|PV_ANC)) {
1.55      pk       2435:                /*
                   2436:                 * Not cached: check to see if we can fix that now.
                   2437:                 */
                   2438:                va = pv->pv_va;
                   2439:                for (npv = pv->pv_next; npv != NULL; npv = npv->pv_next)
1.115     pk       2440:                        if (BADALIAS(va, npv->pv_va) ||
                   2441:                            (npv->pv_flags & PV_C4M) == 0)
1.55      pk       2442:                                return;
1.155     pk       2443:                pv->pv_flags &= ~PV_ANC;
1.55      pk       2444:                pv_changepte4m(pv, SRMMU_PG_C, 0);
                   2445:        }
                   2446: }
                   2447:
                   2448: /*
                   2449:  * pv_link is the inverse of pv_unlink, and is used in pmap_enter.
                   2450:  * It returns SRMMU_PG_C if the (new) pvlist says that the address cannot
                   2451:  * be cached (i.e. its results must be (& ~)'d in.
                   2452:  */
                   2453: /*static*/ int
1.115     pk       2454: pv_link4m(pv, pm, va, nc)
                   2455:        struct pvlist *pv;
                   2456:        struct pmap *pm;
1.124     pk       2457:        vaddr_t va;
1.115     pk       2458:        int nc;
1.55      pk       2459: {
1.115     pk       2460:        struct pvlist *npv;
                   2461:        int ret;
                   2462:
                   2463:        ret = nc ? SRMMU_PG_C : 0;
1.55      pk       2464:
                   2465:        if (pv->pv_pmap == NULL) {
                   2466:                /* no pvlist entries yet */
                   2467:                pmap_stats.ps_enter_firstpv++;
                   2468:                pv->pv_next = NULL;
                   2469:                pv->pv_pmap = pm;
                   2470:                pv->pv_va = va;
1.115     pk       2471:                pv->pv_flags |= nc ? 0 : PV_C4M;
                   2472:                return (ret);
1.55      pk       2473:        }
                   2474:        /*
                   2475:         * Before entering the new mapping, see if
                   2476:         * it will cause old mappings to become aliased
                   2477:         * and thus need to be `discached'.
                   2478:         */
                   2479:        pmap_stats.ps_enter_secondpv++;
1.115     pk       2480:        if ((pv->pv_flags & PV_ANC) != 0 || (pv->pv_flags & PV_C4M) == 0) {
1.55      pk       2481:                /* already uncached, just stay that way */
                   2482:                ret = SRMMU_PG_C;
                   2483:        } else {
                   2484:                for (npv = pv; npv != NULL; npv = npv->pv_next) {
1.115     pk       2485:                        if ((npv->pv_flags & PV_C4M) == 0) {
                   2486:                                ret = SRMMU_PG_C;
                   2487:                                break;
                   2488:                        }
1.55      pk       2489:                        if (BADALIAS(va, npv->pv_va)) {
                   2490: #ifdef DEBUG
1.84      pk       2491:                                if (pmapdebug & PDB_CACHESTUFF)
                   2492:                                        printf(
1.91      fair     2493:                        "pv_link: badalias: pid %d, 0x%lx<=>0x%lx, pa 0x%lx\n",
1.84      pk       2494:                                        curproc ? curproc->p_pid : -1,
                   2495:                                        va, npv->pv_va,
                   2496:                                        vm_first_phys + (pv-pv_table)*NBPG);
1.55      pk       2497: #endif
1.115     pk       2498:                                /* Mark list head `uncached due to aliases' */
                   2499:                                pv->pv_flags |= PV_ANC;
1.58      pk       2500:                                pv_changepte4m(pv, 0, ret = SRMMU_PG_C);
1.55      pk       2501:                                /* cache_flush_page(va); XXX: needed? */
                   2502:                                break;
                   2503:                        }
                   2504:                }
                   2505:        }
1.122     pk       2506:        npv = pool_get(&pv_pool, PR_WAITOK);
1.55      pk       2507:        npv->pv_next = pv->pv_next;
                   2508:        npv->pv_pmap = pm;
                   2509:        npv->pv_va = va;
1.115     pk       2510:        npv->pv_flags = nc ? 0 : PV_C4M;
1.55      pk       2511:        pv->pv_next = npv;
                   2512:        return (ret);
                   2513: }
                   2514: #endif
                   2515:
                   2516: /*
                   2517:  * Walk the given list and flush the cache for each (MI) page that is
                   2518:  * potentially in the cache. Called only if vactype != VAC_NONE.
                   2519:  */
                   2520: void
                   2521: pv_flushcache(pv)
1.124     pk       2522:        struct pvlist *pv;
1.55      pk       2523: {
1.124     pk       2524:        struct pmap *pm;
                   2525:        int s, ctx;
1.55      pk       2526:
                   2527:        write_user_windows();   /* paranoia? */
                   2528:
                   2529:        s = splpmap();          /* XXX extreme paranoia */
                   2530:        if ((pm = pv->pv_pmap) != NULL) {
                   2531:                ctx = getcontext();
                   2532:                for (;;) {
                   2533:                        if (pm->pm_ctx) {
                   2534:                                setcontext(pm->pm_ctxnum);
                   2535:                                cache_flush_page(pv->pv_va);
1.145     pk       2536: #if defined(SUN4M)
                   2537:                                if (CPU_ISSUN4M)
                   2538:                                        tlb_flush_page(pv->pv_va);
                   2539: #endif
1.55      pk       2540:                        }
                   2541:                        pv = pv->pv_next;
                   2542:                        if (pv == NULL)
                   2543:                                break;
                   2544:                        pm = pv->pv_pmap;
                   2545:                }
                   2546:                setcontext(ctx);
                   2547:        }
                   2548:        splx(s);
                   2549: }
                   2550:
1.124     pk       2551: vsize_t
1.122     pk       2552: pv_table_map(base, mapit)
1.124     pk       2553:        paddr_t base;
1.122     pk       2554:        int mapit;
                   2555: {
                   2556:        int nmem;
                   2557:        struct memarr *mp;
1.124     pk       2558:        vsize_t s;
                   2559:        vaddr_t sva, va, eva;
                   2560:        paddr_t pa;
1.122     pk       2561:
                   2562:        /*
                   2563:         * Map pv_table[] as a `sparse' array. pv_table_map() is called
                   2564:         * twice: the first time `mapit' is 0, and the number of
                   2565:         * physical pages needed to map the used pieces of pv_table[]
                   2566:         * is computed;  the second time those pages are used to
                   2567:         * actually map pv_table[].
                   2568:         * In both cases, this function returns the amount of physical
                   2569:         * memory needed.
                   2570:         */
                   2571:
                   2572:        if (!mapit)
                   2573:                /* Mark physical pages for pv_table[] */
                   2574:                pv_physmem = base;
                   2575:
                   2576:        pa = pv_physmem; /* XXX - always init `pa' to appease gcc */
                   2577:
                   2578:        s = 0;
                   2579:        sva = eva = 0;
                   2580:        for (mp = pmemarr, nmem = npmemarr; --nmem >= 0; mp++) {
                   2581:                int len;
1.124     pk       2582:                paddr_t addr;
1.122     pk       2583:
                   2584:                len = mp->len;
                   2585:                if ((addr = mp->addr) < base) {
                   2586:                        /*
                   2587:                         * pv_table[] covers everything above `avail_start'.
                   2588:                         */
                   2589:                        addr = base;
                   2590:                        len -= base;
                   2591:                }
                   2592:
                   2593:                /* Calculate stretch of pv_table */
                   2594:                len = sizeof(struct pvlist) * btoc(len);
1.124     pk       2595:                va = (vaddr_t)&pv_table[btoc(addr - base)];
1.122     pk       2596:                sva = trunc_page(va);
                   2597:
                   2598:                if (sva < eva) {
                   2599:                        /* This chunk overlaps the previous in pv_table[] */
                   2600:                        sva += NBPG;
                   2601:                        if (sva < eva)
                   2602:                                panic("pv_table_map: sva(0x%lx)<eva(0x%lx)",
                   2603:                                      sva, eva);
                   2604:                }
                   2605:                eva = roundup(va + len, NBPG);
                   2606:
                   2607:                /* Add this range to the total */
                   2608:                s += eva - sva;
                   2609:
                   2610:                if (mapit) {
                   2611:                        /* Map this piece of pv_table[] */
                   2612:                        for (va = sva; va < eva; va += PAGE_SIZE) {
                   2613:                                pmap_enter(pmap_kernel(), va, pa,
1.153     thorpej  2614:                                    VM_PROT_READ|VM_PROT_WRITE,
                   2615:                                    VM_PROT_READ|VM_PROT_WRITE|PMAP_WIRED);
1.122     pk       2616:                                pa += PAGE_SIZE;
                   2617:                        }
                   2618:                        bzero((caddr_t)sva, eva - sva);
                   2619:                }
                   2620:        }
                   2621:        return (s);
                   2622: }
                   2623:
1.55      pk       2624: /*----------------------------------------------------------------*/
                   2625:
                   2626: /*
                   2627:  * At last, pmap code.
                   2628:  */
1.1       deraadt  2629:
1.99      fair     2630: #if defined(SUN4) && (defined(SUN4C) || defined(SUN4M))
1.18      deraadt  2631: int nptesg;
                   2632: #endif
                   2633:
1.55      pk       2634: #if defined(SUN4M)
                   2635: static void pmap_bootstrap4m __P((void));
                   2636: #endif
                   2637: #if defined(SUN4) || defined(SUN4C)
                   2638: static void pmap_bootstrap4_4c __P((int, int, int));
                   2639: #endif
                   2640:
1.1       deraadt  2641: /*
                   2642:  * Bootstrap the system enough to run with VM enabled.
                   2643:  *
1.43      pk       2644:  * nsegment is the number of mmu segment entries (``PMEGs'');
                   2645:  * nregion is the number of mmu region entries (``SMEGs'');
1.1       deraadt  2646:  * nctx is the number of contexts.
                   2647:  */
                   2648: void
1.43      pk       2649: pmap_bootstrap(nctx, nregion, nsegment)
                   2650:        int nsegment, nctx, nregion;
1.1       deraadt  2651: {
1.55      pk       2652:
1.110     mrg      2653:        uvmexp.pagesize = NBPG;
                   2654:        uvm_setpagesize();
1.55      pk       2655:
                   2656: #if defined(SUN4) && (defined(SUN4C) || defined(SUN4M))
                   2657:        /* In this case NPTESG is not a #define */
                   2658:        nptesg = (NBPSG >> pgshift);
                   2659: #endif
                   2660:
1.69      pk       2661: #if 0
1.55      pk       2662:        ncontext = nctx;
1.69      pk       2663: #endif
1.55      pk       2664:
                   2665: #if defined(SUN4M)
                   2666:        if (CPU_ISSUN4M) {
                   2667:                pmap_bootstrap4m();
                   2668:                return;
                   2669:        }
                   2670: #endif
                   2671: #if defined(SUN4) || defined(SUN4C)
                   2672:        if (CPU_ISSUN4OR4C) {
                   2673:                pmap_bootstrap4_4c(nctx, nregion, nsegment);
                   2674:                return;
                   2675:        }
                   2676: #endif
                   2677: }
                   2678:
                   2679: #if defined(SUN4) || defined(SUN4C)
                   2680: void
                   2681: pmap_bootstrap4_4c(nctx, nregion, nsegment)
                   2682:        int nsegment, nctx, nregion;
                   2683: {
1.122     pk       2684:        union ctxinfo *ci;
                   2685:        struct mmuentry *mmuseg;
1.77      pk       2686: #if defined(SUN4_MMU3L)
1.122     pk       2687:        struct mmuentry *mmureg;
1.53      christos 2688: #endif
1.43      pk       2689:        struct   regmap *rp;
1.122     pk       2690:        int i, j;
                   2691:        int npte, zseg, vr, vs;
                   2692:        int rcookie, scookie;
                   2693:        caddr_t p;
1.1       deraadt  2694:        int lastpage;
1.139     chs      2695:        vaddr_t va;
1.1       deraadt  2696:        extern char end[];
1.7       pk       2697: #ifdef DDB
                   2698:        extern char *esym;
                   2699: #endif
1.1       deraadt  2700:
1.45      pk       2701:        switch (cputyp) {
                   2702:        case CPU_SUN4C:
                   2703:                mmu_has_hole = 1;
                   2704:                break;
                   2705:        case CPU_SUN4:
1.69      pk       2706:                if (cpuinfo.cpu_type != CPUTYP_4_400) {
1.45      pk       2707:                        mmu_has_hole = 1;
                   2708:                        break;
                   2709:                }
                   2710:        }
                   2711:
1.110     mrg      2712:        uvmexp.pagesize = NBPG;
                   2713:        uvm_setpagesize();
1.19      deraadt  2714:
1.31      pk       2715: #if defined(SUN4)
                   2716:        /*
                   2717:         * set up the segfixmask to mask off invalid bits
                   2718:         */
1.43      pk       2719:        segfixmask =  nsegment - 1; /* assume nsegment is a power of 2 */
                   2720: #ifdef DIAGNOSTIC
                   2721:        if (((nsegment & segfixmask) | (nsegment & ~segfixmask)) != nsegment) {
1.66      christos 2722:                printf("pmap_bootstrap: unsuitable number of segments (%d)\n",
1.43      pk       2723:                        nsegment);
                   2724:                callrom();
                   2725:        }
                   2726: #endif
1.31      pk       2727: #endif
                   2728:
1.55      pk       2729: #if defined(SUN4M) /* We're in a dual-arch kernel. Setup 4/4c fn. ptrs */
                   2730:        pmap_clear_modify_p     =       pmap_clear_modify4_4c;
                   2731:        pmap_clear_reference_p  =       pmap_clear_reference4_4c;
                   2732:        pmap_enter_p            =       pmap_enter4_4c;
                   2733:        pmap_extract_p          =       pmap_extract4_4c;
                   2734:        pmap_is_modified_p      =       pmap_is_modified4_4c;
                   2735:        pmap_is_referenced_p    =       pmap_is_referenced4_4c;
1.151     chs      2736:        pmap_kenter_pa_p        =       pmap_kenter_pa4_4c;
                   2737:        pmap_kenter_pgs_p       =       pmap_kenter_pgs4_4c;
                   2738:        pmap_kremove_p          =       pmap_kremove4_4c;
1.55      pk       2739:        pmap_page_protect_p     =       pmap_page_protect4_4c;
                   2740:        pmap_protect_p          =       pmap_protect4_4c;
                   2741:        pmap_changeprot_p       =       pmap_changeprot4_4c;
                   2742:        pmap_rmk_p              =       pmap_rmk4_4c;
                   2743:        pmap_rmu_p              =       pmap_rmu4_4c;
                   2744: #endif /* defined SUN4M */
1.43      pk       2745:
1.1       deraadt  2746:        /*
                   2747:         * Last segment is the `invalid' one (one PMEG of pte's with !pg_v).
                   2748:         * It will never be used for anything else.
                   2749:         */
1.43      pk       2750:        seginval = --nsegment;
                   2751:
1.69      pk       2752: #if defined(SUN4_MMU3L)
                   2753:        if (HASSUN4_MMU3L)
1.43      pk       2754:                reginval = --nregion;
                   2755: #endif
                   2756:
                   2757:        /*
                   2758:         * Intialize the kernel pmap.
                   2759:         */
                   2760:        /* kernel_pmap_store.pm_ctxnum = 0; */
1.111     chs      2761:        simple_lock_init(&kernel_pmap_store.pm_lock);
1.43      pk       2762:        kernel_pmap_store.pm_refcount = 1;
1.69      pk       2763: #if defined(SUN4_MMU3L)
1.43      pk       2764:        TAILQ_INIT(&kernel_pmap_store.pm_reglist);
                   2765: #endif
                   2766:        TAILQ_INIT(&kernel_pmap_store.pm_seglist);
                   2767:
                   2768:        kernel_pmap_store.pm_regmap = &kernel_regmap_store[-NUREG];
                   2769:        for (i = NKREG; --i >= 0;) {
1.69      pk       2770: #if defined(SUN4_MMU3L)
1.43      pk       2771:                kernel_regmap_store[i].rg_smeg = reginval;
                   2772: #endif
                   2773:                kernel_regmap_store[i].rg_segmap =
                   2774:                        &kernel_segmap_store[i * NSEGRG];
                   2775:                for (j = NSEGRG; --j >= 0;)
                   2776:                        kernel_segmap_store[i * NSEGRG + j].sg_pmeg = seginval;
                   2777:        }
1.1       deraadt  2778:
                   2779:        /*
                   2780:         * Preserve the monitor ROM's reserved VM region, so that
                   2781:         * we can use L1-A or the monitor's debugger.  As a side
                   2782:         * effect we map the ROM's reserved VM into all contexts
                   2783:         * (otherwise L1-A crashes the machine!).
                   2784:         */
1.43      pk       2785:
1.58      pk       2786:        mmu_reservemon4_4c(&nregion, &nsegment);
1.43      pk       2787:
1.69      pk       2788: #if defined(SUN4_MMU3L)
1.43      pk       2789:        /* Reserve one region for temporary mappings */
1.143     pk       2790:        if (HASSUN4_MMU3L)
                   2791:                tregion = --nregion;
1.43      pk       2792: #endif
1.1       deraadt  2793:
                   2794:        /*
1.43      pk       2795:         * Allocate and clear mmu entries and context structures.
1.1       deraadt  2796:         */
                   2797:        p = end;
1.7       pk       2798: #ifdef DDB
                   2799:        if (esym != 0)
1.78      pk       2800:                p = esym;
1.7       pk       2801: #endif
1.69      pk       2802: #if defined(SUN4_MMU3L)
1.43      pk       2803:        mmuregions = mmureg = (struct mmuentry *)p;
                   2804:        p += nregion * sizeof(struct mmuentry);
1.78      pk       2805:        bzero(mmuregions, nregion * sizeof(struct mmuentry));
1.43      pk       2806: #endif
                   2807:        mmusegments = mmuseg = (struct mmuentry *)p;
                   2808:        p += nsegment * sizeof(struct mmuentry);
1.78      pk       2809:        bzero(mmusegments, nsegment * sizeof(struct mmuentry));
                   2810:
1.69      pk       2811:        pmap_kernel()->pm_ctx = cpuinfo.ctxinfo = ci = (union ctxinfo *)p;
1.1       deraadt  2812:        p += nctx * sizeof *ci;
                   2813:
1.43      pk       2814:        /* Initialize MMU resource queues */
1.69      pk       2815: #if defined(SUN4_MMU3L)
1.43      pk       2816:        TAILQ_INIT(&region_freelist);
                   2817:        TAILQ_INIT(&region_lru);
                   2818:        TAILQ_INIT(&region_locked);
                   2819: #endif
                   2820:        TAILQ_INIT(&segm_freelist);
                   2821:        TAILQ_INIT(&segm_lru);
                   2822:        TAILQ_INIT(&segm_locked);
                   2823:
1.1       deraadt  2824:        /*
                   2825:         * Set up the `constants' for the call to vm_init()
                   2826:         * in main().  All pages beginning at p (rounded up to
                   2827:         * the next whole page) and continuing through the number
                   2828:         * of available pages are free, but they start at a higher
                   2829:         * virtual address.  This gives us two mappable MD pages
                   2830:         * for pmap_zero_page and pmap_copy_page, and one MI page
                   2831:         * for /dev/mem, all with no associated physical memory.
                   2832:         */
                   2833:        p = (caddr_t)(((u_int)p + NBPG - 1) & ~PGOFSET);
1.36      pk       2834:
                   2835:        /*
1.122     pk       2836:         * Grab physical memory list.
1.36      pk       2837:         */
1.122     pk       2838:        get_phys_mem();
                   2839:
                   2840:        /* Allocate physical memory for pv_table[] */
1.124     pk       2841:        p += pv_table_map((paddr_t)p - KERNBASE, 0);
                   2842:        avail_start = (paddr_t)p - KERNBASE;
1.38      pk       2843:
                   2844:        i = (int)p;
                   2845:        vpage[0] = p, p += NBPG;
                   2846:        vpage[1] = p, p += NBPG;
1.41      mycroft  2847:        vmmap = p, p += NBPG;
1.38      pk       2848:        p = reserve_dumppages(p);
1.39      pk       2849:
1.122     pk       2850:        /* Allocate virtual memory for pv_table[]. */
1.37      pk       2851:        pv_table = (struct pvlist *)p;
                   2852:        p += round_page(sizeof(struct pvlist) * atop(avail_end - avail_start));
1.36      pk       2853:
1.124     pk       2854:        virtual_avail = (vaddr_t)p;
1.1       deraadt  2855:        virtual_end = VM_MAX_KERNEL_ADDRESS;
                   2856:
                   2857:        p = (caddr_t)i;                 /* retract to first free phys */
                   2858:
                   2859:        /*
                   2860:         * All contexts are free except the kernel's.
                   2861:         *
                   2862:         * XXX sun4c could use context 0 for users?
                   2863:         */
1.42      mycroft  2864:        ci->c_pmap = pmap_kernel();
1.1       deraadt  2865:        ctx_freelist = ci + 1;
                   2866:        for (i = 1; i < ncontext; i++) {
                   2867:                ci++;
                   2868:                ci->c_nextfree = ci + 1;
                   2869:        }
                   2870:        ci->c_nextfree = NULL;
                   2871:        ctx_kick = 0;
                   2872:        ctx_kickdir = -1;
                   2873:
                   2874:        /*
                   2875:         * Init mmu entries that map the kernel physical addresses.
                   2876:         *
                   2877:         * All the other MMU entries are free.
                   2878:         *
                   2879:         * THIS ASSUMES SEGMENT i IS MAPPED BY MMU ENTRY i DURING THE
                   2880:         * BOOT PROCESS
                   2881:         */
1.43      pk       2882:
                   2883:        zseg = ((((u_int)p + NBPSG - 1) & ~SGOFSET) - KERNBASE) >> SGSHIFT;
1.1       deraadt  2884:        lastpage = VA_VPG(p);
                   2885:        if (lastpage == 0)
1.43      pk       2886:                /*
                   2887:                 * If the page bits in p are 0, we filled the last segment
                   2888:                 * exactly (now how did that happen?); if not, it is
                   2889:                 * the last page filled in the last segment.
                   2890:                 */
1.1       deraadt  2891:                lastpage = NPTESG;
1.43      pk       2892:
1.1       deraadt  2893:        p = (caddr_t)KERNBASE;          /* first va */
                   2894:        vs = VA_VSEG(KERNBASE);         /* first virtual segment */
1.43      pk       2895:        vr = VA_VREG(KERNBASE);         /* first virtual region */
                   2896:        rp = &pmap_kernel()->pm_regmap[vr];
                   2897:
                   2898:        for (rcookie = 0, scookie = 0;;) {
                   2899:
1.1       deraadt  2900:                /*
1.43      pk       2901:                 * Distribute each kernel region/segment into all contexts.
1.1       deraadt  2902:                 * This is done through the monitor ROM, rather than
                   2903:                 * directly here: if we do a setcontext we will fault,
                   2904:                 * as we are not (yet) mapped in any other context.
                   2905:                 */
1.43      pk       2906:
                   2907:                if ((vs % NSEGRG) == 0) {
                   2908:                        /* Entering a new region */
                   2909:                        if (VA_VREG(p) > vr) {
                   2910: #ifdef DEBUG
1.66      christos 2911:                                printf("note: giant kernel!\n");
1.43      pk       2912: #endif
                   2913:                                vr++, rp++;
                   2914:                        }
1.69      pk       2915: #if defined(SUN4_MMU3L)
                   2916:                        if (HASSUN4_MMU3L) {
1.43      pk       2917:                                for (i = 1; i < nctx; i++)
1.137     pk       2918:                                        prom_setcontext(i, p, rcookie);
1.43      pk       2919:
                   2920:                                TAILQ_INSERT_TAIL(&region_locked,
                   2921:                                                  mmureg, me_list);
                   2922:                                TAILQ_INSERT_TAIL(&pmap_kernel()->pm_reglist,
                   2923:                                                  mmureg, me_pmchain);
                   2924:                                mmureg->me_cookie = rcookie;
                   2925:                                mmureg->me_pmap = pmap_kernel();
                   2926:                                mmureg->me_vreg = vr;
                   2927:                                rp->rg_smeg = rcookie;
                   2928:                                mmureg++;
                   2929:                                rcookie++;
                   2930:                        }
                   2931: #endif
                   2932:                }
                   2933:
1.69      pk       2934: #if defined(SUN4_MMU3L)
                   2935:                if (!HASSUN4_MMU3L)
1.43      pk       2936: #endif
                   2937:                        for (i = 1; i < nctx; i++)
1.137     pk       2938:                                prom_setcontext(i, p, scookie);
1.1       deraadt  2939:
                   2940:                /* set up the mmu entry */
1.43      pk       2941:                TAILQ_INSERT_TAIL(&segm_locked, mmuseg, me_list);
                   2942:                TAILQ_INSERT_TAIL(&pmap_kernel()->pm_seglist, mmuseg, me_pmchain);
1.70      pk       2943:                pmap_stats.ps_npmeg_locked++;
1.43      pk       2944:                mmuseg->me_cookie = scookie;
                   2945:                mmuseg->me_pmap = pmap_kernel();
                   2946:                mmuseg->me_vreg = vr;
                   2947:                mmuseg->me_vseg = vs % NSEGRG;
                   2948:                rp->rg_segmap[vs % NSEGRG].sg_pmeg = scookie;
                   2949:                npte = ++scookie < zseg ? NPTESG : lastpage;
                   2950:                rp->rg_segmap[vs % NSEGRG].sg_npte = npte;
                   2951:                rp->rg_nsegmap += 1;
                   2952:                mmuseg++;
1.1       deraadt  2953:                vs++;
1.43      pk       2954:                if (scookie < zseg) {
1.1       deraadt  2955:                        p += NBPSG;
                   2956:                        continue;
                   2957:                }
1.43      pk       2958:
1.1       deraadt  2959:                /*
                   2960:                 * Unmap the pages, if any, that are not part of
                   2961:                 * the final segment.
                   2962:                 */
1.43      pk       2963:                for (p += npte << PGSHIFT; npte < NPTESG; npte++, p += NBPG)
1.55      pk       2964:                        setpte4(p, 0);
1.43      pk       2965:
1.69      pk       2966: #if defined(SUN4_MMU3L)
                   2967:                if (HASSUN4_MMU3L) {
1.43      pk       2968:                        /*
                   2969:                         * Unmap the segments, if any, that are not part of
                   2970:                         * the final region.
                   2971:                         */
                   2972:                        for (i = rp->rg_nsegmap; i < NSEGRG; i++, p += NBPSG)
                   2973:                                setsegmap(p, seginval);
1.139     chs      2974:
                   2975:                        /*
                   2976:                         * Unmap any kernel regions that we aren't using.
                   2977:                         */
                   2978:                        for (i = 0; i < nctx; i++) {
                   2979:                                setcontext4(i);
                   2980:                                for (va = (vaddr_t)p;
                   2981:                                     va < (OPENPROM_STARTVADDR & ~(NBPRG - 1));
                   2982:                                     va += NBPRG)
                   2983:                                        setregmap(va, reginval);
                   2984:                        }
                   2985:
                   2986:                } else
                   2987: #endif
                   2988:                {
                   2989:                        /*
                   2990:                         * Unmap any kernel segments that we aren't using.
                   2991:                         */
                   2992:                        for (i = 0; i < nctx; i++) {
                   2993:                                setcontext4(i);
                   2994:                                for (va = (vaddr_t)p;
                   2995:                                     va < (OPENPROM_STARTVADDR & ~(NBPSG - 1));
                   2996:                                     va += NBPSG)
                   2997:                                        setsegmap(va, seginval);
                   2998:                        }
1.43      pk       2999:                }
1.1       deraadt  3000:                break;
                   3001:        }
1.43      pk       3002:
1.69      pk       3003: #if defined(SUN4_MMU3L)
                   3004:        if (HASSUN4_MMU3L)
1.43      pk       3005:                for (; rcookie < nregion; rcookie++, mmureg++) {
                   3006:                        mmureg->me_cookie = rcookie;
                   3007:                        TAILQ_INSERT_TAIL(&region_freelist, mmureg, me_list);
                   3008:                }
                   3009: #endif
                   3010:
                   3011:        for (; scookie < nsegment; scookie++, mmuseg++) {
                   3012:                mmuseg->me_cookie = scookie;
                   3013:                TAILQ_INSERT_TAIL(&segm_freelist, mmuseg, me_list);
1.70      pk       3014:                pmap_stats.ps_npmeg_free++;
1.1       deraadt  3015:        }
                   3016:
1.13      pk       3017:        /* Erase all spurious user-space segmaps */
                   3018:        for (i = 1; i < ncontext; i++) {
1.71      pk       3019:                setcontext4(i);
1.69      pk       3020:                if (HASSUN4_MMU3L)
1.43      pk       3021:                        for (p = 0, j = NUREG; --j >= 0; p += NBPRG)
                   3022:                                setregmap(p, reginval);
                   3023:                else
                   3024:                        for (p = 0, vr = 0; vr < NUREG; vr++) {
1.45      pk       3025:                                if (VA_INHOLE(p)) {
                   3026:                                        p = (caddr_t)MMU_HOLE_END;
                   3027:                                        vr = VA_VREG(p);
1.43      pk       3028:                                }
                   3029:                                for (j = NSEGRG; --j >= 0; p += NBPSG)
                   3030:                                        setsegmap(p, seginval);
                   3031:                        }
1.13      pk       3032:        }
1.71      pk       3033:        setcontext4(0);
1.13      pk       3034:
1.1       deraadt  3035:        /*
                   3036:         * write protect & encache kernel text;
                   3037:         * set red zone at kernel base; enable cache on message buffer.
                   3038:         */
                   3039:        {
1.23      deraadt  3040:                extern char etext[];
1.1       deraadt  3041: #ifdef KGDB
1.124     pk       3042:                int mask = ~PG_NC;      /* XXX chgkprot is busted */
1.1       deraadt  3043: #else
1.124     pk       3044:                int mask = ~(PG_W | PG_NC);
1.1       deraadt  3045: #endif
1.2       deraadt  3046:
1.23      deraadt  3047:                for (p = (caddr_t)trapbase; p < etext; p += NBPG)
1.55      pk       3048:                        setpte4(p, getpte4(p) & mask);
1.1       deraadt  3049:        }
1.107     pk       3050:        pmap_page_upload();
1.1       deraadt  3051: }
1.55      pk       3052: #endif
1.1       deraadt  3053:
1.55      pk       3054: #if defined(SUN4M)             /* Sun4M version of pmap_bootstrap */
                   3055: /*
                   3056:  * Bootstrap the system enough to run with VM enabled on a Sun4M machine.
                   3057:  *
                   3058:  * Switches from ROM to kernel page tables, and sets up initial mappings.
                   3059:  */
                   3060: static void
                   3061: pmap_bootstrap4m(void)
1.36      pk       3062: {
1.124     pk       3063:        int i, j;
1.122     pk       3064:        caddr_t p, q;
                   3065:        union ctxinfo *ci;
                   3066:        int reg, seg;
1.71      pk       3067:        unsigned int ctxtblsize;
1.79      pk       3068:        caddr_t pagetables_start, pagetables_end;
1.55      pk       3069:        extern char end[];
                   3070:        extern char etext[];
1.78      pk       3071:        extern caddr_t reserve_dumppages(caddr_t);
1.55      pk       3072: #ifdef DDB
                   3073:        extern char *esym;
                   3074: #endif
1.36      pk       3075:
1.55      pk       3076: #if defined(SUN4) || defined(SUN4C) /* setup 4M fn. ptrs for dual-arch kernel */
                   3077:        pmap_clear_modify_p     =       pmap_clear_modify4m;
                   3078:        pmap_clear_reference_p  =       pmap_clear_reference4m;
                   3079:        pmap_enter_p            =       pmap_enter4m;
                   3080:        pmap_extract_p          =       pmap_extract4m;
                   3081:        pmap_is_modified_p      =       pmap_is_modified4m;
                   3082:        pmap_is_referenced_p    =       pmap_is_referenced4m;
1.151     chs      3083:        pmap_kenter_pa_p        =       pmap_kenter_pa4m;
                   3084:        pmap_kenter_pgs_p       =       pmap_kenter_pgs4m;
                   3085:        pmap_kremove_p          =       pmap_kremove4m;
1.55      pk       3086:        pmap_page_protect_p     =       pmap_page_protect4m;
                   3087:        pmap_protect_p          =       pmap_protect4m;
                   3088:        pmap_changeprot_p       =       pmap_changeprot4m;
                   3089:        pmap_rmk_p              =       pmap_rmk4m;
                   3090:        pmap_rmu_p              =       pmap_rmu4m;
                   3091: #endif /* defined Sun4/Sun4c */
1.37      pk       3092:
1.36      pk       3093:        /*
1.152     pk       3094:         * p points to top of kernel mem
                   3095:         */
                   3096:        p = end;
                   3097: #ifdef DDB
                   3098:        /* Skip over DDB symbols */
                   3099:        if (esym != 0)
                   3100:                p = esym;
                   3101: #endif
                   3102:
                   3103:        /*
1.55      pk       3104:         * Intialize the kernel pmap.
                   3105:         */
                   3106:        /* kernel_pmap_store.pm_ctxnum = 0; */
1.87      pk       3107:        simple_lock_init(&kernel_pmap_store.pm_lock);
1.55      pk       3108:        kernel_pmap_store.pm_refcount = 1;
1.71      pk       3109:
                   3110:        /*
                   3111:         * Set up pm_regmap for kernel to point NUREG *below* the beginning
1.55      pk       3112:         * of kernel regmap storage. Since the kernel only uses regions
                   3113:         * above NUREG, we save storage space and can index kernel and
                   3114:         * user regions in the same way
1.36      pk       3115:         */
1.55      pk       3116:        kernel_pmap_store.pm_regmap = &kernel_regmap_store[-NUREG];
                   3117:        bzero(kernel_regmap_store, NKREG * sizeof(struct regmap));
                   3118:        bzero(kernel_segmap_store, NKREG * NSEGRG * sizeof(struct segmap));
                   3119:        for (i = NKREG; --i >= 0;) {
                   3120:                kernel_regmap_store[i].rg_segmap =
                   3121:                        &kernel_segmap_store[i * NSEGRG];
                   3122:                kernel_regmap_store[i].rg_seg_ptps = NULL;
                   3123:                for (j = NSEGRG; --j >= 0;)
                   3124:                        kernel_segmap_store[i * NSEGRG + j].sg_pte = NULL;
                   3125:        }
1.38      pk       3126:
1.152     pk       3127:        /* Allocate kernel region pointer tables */
                   3128:        pmap_kernel()->pm_reg_ptps = (int **)(q = p);
                   3129:        p += ncpu * sizeof(int **);
                   3130:        bzero(q, (u_int)p - (u_int)q);
                   3131:
                   3132:        pmap_kernel()->pm_reg_ptps_pa = (int *)(q = p);
                   3133:        p += ncpu * sizeof(int *);
                   3134:        bzero(q, (u_int)p - (u_int)q);
1.77      pk       3135:
1.71      pk       3136:        /* Allocate context administration */
1.69      pk       3137:        pmap_kernel()->pm_ctx = cpuinfo.ctxinfo = ci = (union ctxinfo *)p;
1.55      pk       3138:        p += ncontext * sizeof *ci;
1.69      pk       3139:        bzero((caddr_t)ci, (u_int)p - (u_int)ci);
1.55      pk       3140:
1.77      pk       3141:
                   3142:        /*
                   3143:         * Set up the `constants' for the call to vm_init()
                   3144:         * in main().  All pages beginning at p (rounded up to
                   3145:         * the next whole page) and continuing through the number
                   3146:         * of available pages are free.
                   3147:         */
                   3148:        p = (caddr_t)(((u_int)p + NBPG - 1) & ~PGOFSET);
1.122     pk       3149:
1.77      pk       3150:        /*
1.122     pk       3151:         * Grab physical memory list.
1.77      pk       3152:         */
1.122     pk       3153:        get_phys_mem();
                   3154:
                   3155:        /* Allocate physical memory for pv_table[] */
1.124     pk       3156:        p += pv_table_map((paddr_t)p - KERNBASE, 0);
1.77      pk       3157:
                   3158:        /*
                   3159:         * Reserve memory for MMU pagetables. Some of these have severe
                   3160:         * alignment restrictions. We allocate in a sequence that
                   3161:         * minimizes alignment gaps.
                   3162:         */
                   3163:
1.163     pk       3164:        pagetables_start = p;
1.55      pk       3165:
                   3166:        /*
1.77      pk       3167:         * Allocate context table.
1.71      pk       3168:         * To keep supersparc happy, minimum aligment is on a 4K boundary.
                   3169:         */
                   3170:        ctxtblsize = max(ncontext,1024) * sizeof(int);
                   3171:        cpuinfo.ctx_tbl = (int *)roundup((u_int)p, ctxtblsize);
1.128     pk       3172:        cpuinfo.ctx_tbl_pa = (paddr_t)cpuinfo.ctx_tbl - KERNBASE;
1.71      pk       3173:        p = (caddr_t)((u_int)cpuinfo.ctx_tbl + ctxtblsize);
                   3174:
                   3175:        /*
                   3176:         * Reserve memory for segment and page tables needed to map the entire
1.163     pk       3177:         * kernel. This takes (2K + NKREG * 16K) of space, but unfortunately
                   3178:         * is necessary since pmap_enter() *must* be able to enter a kernel
                   3179:         * mapping without delay.
1.55      pk       3180:         */
1.122     pk       3181:        p = (caddr_t) roundup((u_int)p, SRMMU_L1SIZE * sizeof(u_int));
                   3182:        qzero(p, SRMMU_L1SIZE * sizeof(u_int));
1.77      pk       3183:        kernel_regtable_store = (u_int *)p;
1.122     pk       3184:        p += SRMMU_L1SIZE * sizeof(u_int);
1.77      pk       3185:
1.122     pk       3186:        p = (caddr_t) roundup((u_int)p, SRMMU_L2SIZE * sizeof(u_int));
                   3187:        qzero(p, (SRMMU_L2SIZE * sizeof(u_int)) * NKREG);
1.77      pk       3188:        kernel_segtable_store = (u_int *)p;
1.122     pk       3189:        p += (SRMMU_L2SIZE * sizeof(u_int)) * NKREG;
1.77      pk       3190:
1.122     pk       3191:        p = (caddr_t) roundup((u_int)p, SRMMU_L3SIZE * sizeof(u_int));
                   3192:        /* zero it: all will be SRMMU_TEINVALID */
                   3193:        qzero(p, ((SRMMU_L3SIZE * sizeof(u_int)) * NKREG) * NSEGRG);
1.77      pk       3194:        kernel_pagtable_store = (u_int *)p;
1.122     pk       3195:        p += ((SRMMU_L3SIZE * sizeof(u_int)) * NKREG) * NSEGRG;
1.77      pk       3196:
1.163     pk       3197:        /* Round to next page and mark end of pre-wired kernel space */
1.77      pk       3198:        p = (caddr_t)(((u_int)p + NBPG - 1) & ~PGOFSET);
1.79      pk       3199:        pagetables_end = p;
1.163     pk       3200:        avail_start = (paddr_t)p - KERNBASE;
1.71      pk       3201:
                   3202:        /*
1.163     pk       3203:         * Now wire the region and segment tables of the kernel map.
1.71      pk       3204:         */
1.152     pk       3205:        pmap_kernel()->pm_reg_ptps[0] = (int *) kernel_regtable_store;
                   3206:        pmap_kernel()->pm_reg_ptps_pa[0] =
                   3207:                VA2PA((caddr_t)pmap_kernel()->pm_reg_ptps[0]);
1.71      pk       3208:
                   3209:        /* Install L1 table in context 0 */
1.79      pk       3210:        setpgt4m(&cpuinfo.ctx_tbl[0],
1.152     pk       3211:            (pmap_kernel()->pm_reg_ptps_pa[0] >> SRMMU_PPNPASHIFT) | SRMMU_TEPTD);
1.71      pk       3212:
1.96      pk       3213:        for (reg = 0; reg < NKREG; reg++) {
1.77      pk       3214:                struct regmap *rp;
1.71      pk       3215:                caddr_t kphyssegtbl;
                   3216:
                   3217:                /*
1.77      pk       3218:                 * Entering new region; install & build segtbl
1.71      pk       3219:                 */
                   3220:
1.96      pk       3221:                rp = &pmap_kernel()->pm_regmap[reg + VA_VREG(KERNBASE)];
1.71      pk       3222:
                   3223:                kphyssegtbl = (caddr_t)
1.96      pk       3224:                    &kernel_segtable_store[reg * SRMMU_L2SIZE];
1.71      pk       3225:
1.152     pk       3226:                setpgt4m(&pmap_kernel()->pm_reg_ptps[0][reg + VA_VREG(KERNBASE)],
                   3227:                         (VA2PA(kphyssegtbl) >> SRMMU_PPNPASHIFT) | SRMMU_TEPTD);
1.71      pk       3228:
                   3229:                rp->rg_seg_ptps = (int *)kphyssegtbl;
                   3230:
                   3231:                for (seg = 0; seg < NSEGRG; seg++) {
1.77      pk       3232:                        struct segmap *sp;
1.71      pk       3233:                        caddr_t kphyspagtbl;
                   3234:
                   3235:                        rp->rg_nsegmap++;
                   3236:
                   3237:                        sp = &rp->rg_segmap[seg];
                   3238:                        kphyspagtbl = (caddr_t)
                   3239:                            &kernel_pagtable_store
1.96      pk       3240:                                [((reg * NSEGRG) + seg) * SRMMU_L3SIZE];
1.71      pk       3241:
1.77      pk       3242:                        setpgt4m(&rp->rg_seg_ptps[seg],
                   3243:                                 (VA2PA(kphyspagtbl) >> SRMMU_PPNPASHIFT) |
                   3244:                                 SRMMU_TEPTD);
1.71      pk       3245:                        sp->sg_pte = (int *) kphyspagtbl;
                   3246:                }
                   3247:        }
                   3248:
                   3249:        /*
                   3250:         * Preserve the monitor ROM's reserved VM region, so that
                   3251:         * we can use L1-A or the monitor's debugger.
1.55      pk       3252:         */
1.77      pk       3253:        mmu_reservemon4m(&kernel_pmap_store);
1.55      pk       3254:
                   3255:        /*
1.77      pk       3256:         * Reserve virtual address space for two mappable MD pages
                   3257:         * for pmap_zero_page and pmap_copy_page, one MI page
                   3258:         * for /dev/mem, and some more for dumpsys().
1.55      pk       3259:         */
1.77      pk       3260:        q = p;
1.55      pk       3261:        vpage[0] = p, p += NBPG;
                   3262:        vpage[1] = p, p += NBPG;
                   3263:        vmmap = p, p += NBPG;
                   3264:        p = reserve_dumppages(p);
                   3265:
1.101     pk       3266:        /* Find PTE locations of vpage[] to optimize zero_fill() et.al. */
                   3267:        for (i = 0; i < 2; i++) {
                   3268:                struct regmap *rp;
                   3269:                struct segmap *sp;
                   3270:                rp = &pmap_kernel()->pm_regmap[VA_VREG(vpage[i])];
                   3271:                sp = &rp->rg_segmap[VA_VSEG(vpage[i])];
                   3272:                vpage_pte[i] = &sp->sg_pte[VA_SUN4M_VPG(vpage[i])];
                   3273:        }
                   3274:
1.122     pk       3275:        /* Allocate virtual memory for pv_table[]. */
1.55      pk       3276:        pv_table = (struct pvlist *)p;
                   3277:        p += round_page(sizeof(struct pvlist) * atop(avail_end - avail_start));
                   3278:
1.124     pk       3279:        virtual_avail = (vaddr_t)p;
1.55      pk       3280:        virtual_end = VM_MAX_KERNEL_ADDRESS;
                   3281:
1.77      pk       3282:        p = q;                  /* retract to first free phys */
1.55      pk       3283:
1.69      pk       3284:        /*
                   3285:         * Set up the ctxinfo structures (freelist of contexts)
1.55      pk       3286:         */
                   3287:        ci->c_pmap = pmap_kernel();
                   3288:        ctx_freelist = ci + 1;
                   3289:        for (i = 1; i < ncontext; i++) {
                   3290:                ci++;
                   3291:                ci->c_nextfree = ci + 1;
                   3292:        }
                   3293:        ci->c_nextfree = NULL;
                   3294:        ctx_kick = 0;
                   3295:        ctx_kickdir = -1;
                   3296:
1.69      pk       3297:        /*
                   3298:         * Now map the kernel into our new set of page tables, then
1.55      pk       3299:         * (finally) switch over to our running page tables.
                   3300:         * We map from KERNBASE to p into context 0's page tables (and
                   3301:         * the kernel pmap).
                   3302:         */
                   3303: #ifdef DEBUG                   /* Sanity checks */
                   3304:        if ((u_int)p % NBPG != 0)
1.69      pk       3305:                panic("pmap_bootstrap4m: p misaligned?!?");
1.55      pk       3306:        if (KERNBASE % NBPRG != 0)
1.69      pk       3307:                panic("pmap_bootstrap4m: KERNBASE not region-aligned");
1.55      pk       3308: #endif
1.69      pk       3309:
                   3310:        for (q = (caddr_t) KERNBASE; q < p; q += NBPG) {
1.77      pk       3311:                struct regmap *rp;
                   3312:                struct segmap *sp;
                   3313:                int pte;
                   3314:
1.69      pk       3315:                /*
1.71      pk       3316:                 * Now install entry for current page.
1.69      pk       3317:                 */
1.77      pk       3318:                rp = &pmap_kernel()->pm_regmap[VA_VREG(q)];
                   3319:                sp = &rp->rg_segmap[VA_VSEG(q)];
                   3320:                sp->sg_npte++;
                   3321:
                   3322:                pte = ((int)q - KERNBASE) >> SRMMU_PPNPASHIFT;
1.122     pk       3323:                pte |= PPROT_N_RX | SRMMU_TEPTE;
                   3324:
                   3325:                /* Deal with the cacheable bit for pagetable memory */
                   3326:                if ((cpuinfo.flags & CPUFLG_CACHEPAGETABLES) != 0 ||
                   3327:                    q < pagetables_start || q >= pagetables_end)
                   3328:                        pte |= SRMMU_PG_C;
                   3329:
1.77      pk       3330:                /* write-protect kernel text */
                   3331:                if (q < (caddr_t) trapbase || q >= etext)
                   3332:                        pte |= PPROT_WRITE;
                   3333:
                   3334:                setpgt4m(&sp->sg_pte[VA_VPG(q)], pte);
1.69      pk       3335:        }
                   3336:
1.160     pk       3337:        if ((cpuinfo.flags & CPUFLG_CACHEPAGETABLES) == 0) {
                   3338:                /*
                   3339:                 * The page tables have been setup. Since we're still
                   3340:                 * running on the PROM's memory map, the memory we
                   3341:                 * allocated for our page tables might still be cached.
                   3342:                 * Flush it now, and don't touch it again until we
                   3343:                 * switch to our own tables (will be done immediately below).
                   3344:                 */
1.100     pk       3345:                pcache_flush(pagetables_start, (caddr_t)VA2PA(pagetables_start),
                   3346:                             pagetables_end - pagetables_start);
1.160     pk       3347:        }
1.100     pk       3348:
1.55      pk       3349:        /*
                   3350:         * Now switch to kernel pagetables (finally!)
                   3351:         */
1.69      pk       3352:        mmu_install_tables(&cpuinfo);
1.79      pk       3353:
1.107     pk       3354:        pmap_page_upload();
1.69      pk       3355: }
                   3356:
1.97      pk       3357: static u_long prom_ctxreg;
                   3358:
1.69      pk       3359: void
                   3360: mmu_install_tables(sc)
1.127     pk       3361:        struct cpu_info *sc;
1.69      pk       3362: {
                   3363:
                   3364: #ifdef DEBUG
                   3365:        printf("pmap_bootstrap: installing kernel page tables...");
                   3366: #endif
1.71      pk       3367:        setcontext4m(0);        /* paranoia? %%%: Make 0x3 a define! below */
1.69      pk       3368:
                   3369:        /* Enable MMU tablewalk caching, flush TLB */
                   3370:        if (sc->mmu_enable != 0)
                   3371:                sc->mmu_enable();
                   3372:
                   3373:        tlb_flush_all();
1.97      pk       3374:        prom_ctxreg = lda(SRMMU_CXTPTR, ASI_SRMMU);
1.69      pk       3375:
                   3376:        sta(SRMMU_CXTPTR, ASI_SRMMU,
1.128     pk       3377:                (sc->ctx_tbl_pa >> SRMMU_PPNPASHIFT) & ~0x3);
1.69      pk       3378:
                   3379:        tlb_flush_all();
                   3380:
                   3381: #ifdef DEBUG
                   3382:        printf("done.\n");
                   3383: #endif
                   3384: }
1.55      pk       3385:
1.97      pk       3386: void srmmu_restore_prom_ctx __P((void));
                   3387:
                   3388: void
                   3389: srmmu_restore_prom_ctx()
                   3390: {
                   3391:        tlb_flush_all();
                   3392:        sta(SRMMU_CXTPTR, ASI_SRMMU, prom_ctxreg);
                   3393:        tlb_flush_all();
1.164     thorpej  3394: }
                   3395:
                   3396: /*
                   3397:  * Globalize the boot cpu's cpu_info structure.
                   3398:  */
                   3399: void
                   3400: pmap_globalize_boot_cpuinfo(cpi)
                   3401:        struct cpu_info *cpi;
                   3402: {
                   3403:        vaddr_t va;
                   3404:        vsize_t off;
                   3405:
1.165   ! pk       3406:        off = 0;
        !          3407:        for (va = (vaddr_t)cpi; off < sizeof(*cpi); va += NBPG, off += NBPG) {
1.164     thorpej  3408:                paddr_t pa = VA2PA((caddr_t)CPUINFO_VA + off);
                   3409:                pmap_enter(pmap_kernel(), va, pa,
                   3410:                    VM_PROT_READ|VM_PROT_WRITE,
                   3411:                    VM_PROT_READ|VM_PROT_WRITE|PMAP_WIRED);
                   3412:        }
1.97      pk       3413: }
                   3414:
1.69      pk       3415: /*
1.128     pk       3416:  * Allocate per-CPU page tables. One region, segment and page table
                   3417:  * is needed to map CPUINFO_VA to different physical addresses on
                   3418:  * each CPU. Since the kernel region and segment tables are all
                   3419:  * pre-wired (in bootstrap() above) and we also assume that the
                   3420:  * first segment (256K) of kernel space is fully populated with
                   3421:  * pages from the start, these per-CPU tables will never need
                   3422:  * to be updated when mapping kernel virtual memory.
                   3423:  *
1.69      pk       3424:  * Note: this routine is called in the context of the boot CPU
                   3425:  * during autoconfig.
                   3426:  */
                   3427: void
                   3428: pmap_alloc_cpu(sc)
1.127     pk       3429:        struct cpu_info *sc;
1.69      pk       3430: {
1.128     pk       3431:        vaddr_t va;
                   3432:        u_int *ctxtable, *regtable, *segtable, *pagtable;
1.72      pk       3433:        int vr, vs, vpg;
                   3434:        struct regmap *rp;
                   3435:        struct segmap *sp;
1.128     pk       3436:        int ctxsize;
                   3437:        struct pglist mlist;
                   3438:        vm_page_t m;
                   3439:        int cachebit;
                   3440:
1.129     pk       3441:        cachebit = (sc->flags & CPUFLG_CACHEPAGETABLES) != 0;
1.128     pk       3442:
                   3443:        /*
                   3444:         * Allocate properly aligned and contiguous physically memory
                   3445:         * for the context table.
                   3446:         */
                   3447:        TAILQ_INIT(&mlist);
                   3448:        ctxsize = sc->mmu_ncontext * sizeof(int);
                   3449:        if (uvm_pglistalloc(ctxsize, vm_first_phys, vm_first_phys+vm_num_phys,
                   3450:                            ctxsize, 0, &mlist, 1, 0) != 0)
                   3451:                panic("pmap_alloc_cpu: no memory");
                   3452:
                   3453:        va = uvm_km_valloc(kernel_map, ctxsize);
                   3454:        if (va == 0)
                   3455:                panic("pmap_alloc_cpu: no memory");
                   3456:
                   3457:        ctxtable = (int *)va;
                   3458:
                   3459:        m = TAILQ_FIRST(&mlist);
                   3460:        sc->ctx_tbl_pa = VM_PAGE_TO_PHYS(m);
                   3461:
                   3462:        /* Map the pages */
                   3463:        for (; m != NULL; m = TAILQ_NEXT(m,pageq)) {
                   3464:                paddr_t pa = VM_PAGE_TO_PHYS(m);
1.129     pk       3465:                pmap_enter(pmap_kernel(), va, pa | (cachebit ? 0 : PMAP_NC),
1.153     thorpej  3466:                    VM_PROT_READ|VM_PROT_WRITE,
                   3467:                    VM_PROT_READ|VM_PROT_WRITE|PMAP_WIRED);
1.128     pk       3468:                va += NBPG;
                   3469:        }
1.72      pk       3470:
1.128     pk       3471:        /*
                   3472:         * Get memory for a region, segment and page table.
                   3473:         */
                   3474:        va = uvm_km_alloc(kernel_map, NBPG);
                   3475:        if (va == 0)
                   3476:                panic("pmap_alloc_cpu: no memory");
                   3477:        if (cachebit == 0)
                   3478:                kvm_uncache((caddr_t)va, 1);
                   3479:
                   3480:        regtable = (u_int *)va;
                   3481:        segtable = regtable + SRMMU_L1SIZE;
                   3482:        pagtable = segtable + SRMMU_L2SIZE;
1.72      pk       3483:
1.133     pk       3484:        /*
1.152     pk       3485:         * Store the region table pointer (and its corresponding physical
                   3486:         * address) in the CPU's slot in the kernel pmap region table
                   3487:         * pointer table.
1.133     pk       3488:         */
1.152     pk       3489:        pmap_kernel()->pm_reg_ptps[sc->cpu_no] = regtable;
                   3490:        pmap_kernel()->pm_reg_ptps_pa[sc->cpu_no] = VA2PA((caddr_t)regtable);
1.133     pk       3491:
1.72      pk       3492:        vr = VA_VREG(CPUINFO_VA);
                   3493:        vs = VA_VSEG(CPUINFO_VA);
                   3494:        vpg = VA_VPG(CPUINFO_VA);
                   3495:        rp = &pmap_kernel()->pm_regmap[vr];
                   3496:        sp = &rp->rg_segmap[vs];
                   3497:
                   3498:        /*
                   3499:         * Copy page tables, then modify entry for CPUINFO_VA so that
                   3500:         * it points at the per-CPU pages.
                   3501:         */
1.128     pk       3502:        setpgt4m(&ctxtable[0],
1.152     pk       3503:                (VA2PA((caddr_t)regtable) >> SRMMU_PPNPASHIFT) | SRMMU_TEPTD);
1.128     pk       3504:
1.152     pk       3505:        qcopy(pmap_kernel()->pm_reg_ptps[0], regtable, SRMMU_L1SIZE * sizeof(int));
1.128     pk       3506:        setpgt4m(&regtable[vr],
                   3507:                (VA2PA((caddr_t)segtable) >> SRMMU_PPNPASHIFT) | SRMMU_TEPTD);
                   3508:
                   3509:        qcopy(rp->rg_seg_ptps, segtable, SRMMU_L2SIZE * sizeof(int));
                   3510:        setpgt4m(&segtable[vs],
                   3511:                (VA2PA((caddr_t)pagtable) >> SRMMU_PPNPASHIFT) | SRMMU_TEPTD);
                   3512:
                   3513:        qcopy(sp->sg_pte, pagtable, SRMMU_L3SIZE * sizeof(int));
                   3514:        setpgt4m(&pagtable[vpg],
                   3515:                (VA2PA((caddr_t)sc) >> SRMMU_PPNPASHIFT) |
1.129     pk       3516:                (SRMMU_TEPTE | PPROT_N_RWX | SRMMU_PG_C));
1.72      pk       3517:
                   3518:        sc->ctx_tbl = ctxtable;
1.55      pk       3519: }
1.97      pk       3520: #endif /* SUN4M */
1.55      pk       3521:
1.69      pk       3522:
1.55      pk       3523: void
                   3524: pmap_init()
                   3525: {
1.152     pk       3526:        u_int sizeof_pmap;
1.55      pk       3527:
                   3528:        if (PAGE_SIZE != NBPG)
                   3529:                panic("pmap_init: CLSIZE!=1");
                   3530:
1.122     pk       3531:        /* Map pv_table[] */
                   3532:        (void)pv_table_map(avail_start, 1);
1.55      pk       3533:
1.38      pk       3534:        vm_first_phys = avail_start;
                   3535:        vm_num_phys = avail_end - avail_start;
1.121     pk       3536:
1.122     pk       3537:        /* Setup a pool for additional pvlist structures */
                   3538:        pool_init(&pv_pool, sizeof(struct pvlist), 0, 0, 0, "pvtable", 0,
1.121     pk       3539:                  NULL, NULL, 0);
                   3540:
1.152     pk       3541:        /*
                   3542:         * Setup a pool for pmap structures.
                   3543:         * The pool size includes space for an array of per-cpu
                   3544:         * region table pointers & physical addresses
                   3545:         */
                   3546:        sizeof_pmap = ALIGN(sizeof(struct pmap)) +
                   3547:                      ncpu * sizeof(int *) +            /* pm_reg_ptps */
                   3548:                      ncpu * sizeof(int);               /* pm_reg_ptps_pa */
                   3549:        pool_init(&pmap_pmap_pool, sizeof_pmap, 0, 0, 0, "pmappl",
1.134     thorpej  3550:                  0, pool_page_alloc_nointr, pool_page_free_nointr, M_VMPMAP);
                   3551:
1.121     pk       3552: #if defined(SUN4M)
                   3553:        if (CPU_ISSUN4M) {
                   3554:                /*
                   3555:                 * The SRMMU only ever needs chunks in one of two sizes:
                   3556:                 * 1024 (for region level tables) and 256 (for segment
                   3557:                 * and page level tables).
                   3558:                 */
                   3559:                int n;
                   3560:
                   3561:                n = SRMMU_L1SIZE * sizeof(int);
                   3562:                pool_init(&L1_pool, n, n, 0, 0, "L1 pagetable", 0,
                   3563:                          pgt_page_alloc, pgt_page_free, 0);
                   3564:
                   3565:                n = SRMMU_L2SIZE * sizeof(int);
                   3566:                pool_init(&L23_pool, n, n, 0, 0, "L2/L3 pagetable", 0,
                   3567:                          pgt_page_alloc, pgt_page_free, 0);
                   3568:        }
                   3569: #endif
1.36      pk       3570: }
                   3571:
1.1       deraadt  3572:
                   3573: /*
                   3574:  * Map physical addresses into kernel VM.
                   3575:  */
1.124     pk       3576: vaddr_t
1.1       deraadt  3577: pmap_map(va, pa, endpa, prot)
1.124     pk       3578:        vaddr_t va;
                   3579:        paddr_t pa, endpa;
                   3580:        int prot;
1.1       deraadt  3581: {
1.124     pk       3582:        int pgsize = PAGE_SIZE;
1.1       deraadt  3583:
                   3584:        while (pa < endpa) {
1.153     thorpej  3585:                pmap_enter(pmap_kernel(), va, pa, prot, PMAP_WIRED);
1.1       deraadt  3586:                va += pgsize;
                   3587:                pa += pgsize;
                   3588:        }
                   3589:        return (va);
                   3590: }
                   3591:
                   3592: /*
                   3593:  * Create and return a physical map.
                   3594:  *
                   3595:  * If size is nonzero, the map is useless. (ick)
                   3596:  */
                   3597: struct pmap *
1.151     chs      3598: pmap_create()
1.1       deraadt  3599: {
1.124     pk       3600:        struct pmap *pm;
1.152     pk       3601:        u_long addr;
                   3602:        void *urp;
1.1       deraadt  3603:
1.134     thorpej  3604:        pm = pool_get(&pmap_pmap_pool, PR_WAITOK);
1.1       deraadt  3605: #ifdef DEBUG
                   3606:        if (pmapdebug & PDB_CREATE)
1.66      christos 3607:                printf("pmap_create: created %p\n", pm);
1.1       deraadt  3608: #endif
1.134     thorpej  3609:        bzero(pm, sizeof *pm);
1.1       deraadt  3610:
1.152     pk       3611:        /*
                   3612:         * `pmap_pool' entries include space for the per-CPU
                   3613:         * region table pointer arrays.
                   3614:         */
                   3615:        addr = (u_long)pm + ALIGN(sizeof(struct pmap));
                   3616:        pm->pm_reg_ptps = (int **)addr;
                   3617:        addr += ncpu * sizeof(int *);
                   3618:        pm->pm_reg_ptps_pa = (int *)addr;
                   3619:
                   3620:        pm->pm_regstore = urp = malloc(NUREG * sizeof(struct regmap),
                   3621:                                        M_VMPMAP, M_WAITOK);
                   3622:        qzero((caddr_t)urp, NUREG * sizeof(struct regmap));
1.13      pk       3623:
1.1       deraadt  3624:        /* pm->pm_ctx = NULL; */
                   3625:        simple_lock_init(&pm->pm_lock);
                   3626:        pm->pm_refcount = 1;
1.43      pk       3627:        pm->pm_regmap = urp;
1.55      pk       3628:
                   3629:        if (CPU_ISSUN4OR4C) {
                   3630:                TAILQ_INIT(&pm->pm_seglist);
1.69      pk       3631: #if defined(SUN4_MMU3L)
1.55      pk       3632:                TAILQ_INIT(&pm->pm_reglist);
1.69      pk       3633:                if (HASSUN4_MMU3L) {
                   3634:                        int i;
                   3635:                        for (i = NUREG; --i >= 0;)
                   3636:                                pm->pm_regmap[i].rg_smeg = reginval;
                   3637:                }
1.43      pk       3638: #endif
1.100     pk       3639:                pm->pm_gap_end = VA_VREG(VM_MAXUSER_ADDRESS);
1.55      pk       3640:        }
                   3641: #if defined(SUN4M)
                   3642:        else {
1.152     pk       3643:                int i, n;
1.79      pk       3644:
1.55      pk       3645:                /*
                   3646:                 * We must allocate and initialize hardware-readable (MMU)
                   3647:                 * pagetables. We must also map the kernel regions into this
                   3648:                 * pmap's pagetables, so that we can access the kernel from
1.89      pk       3649:                 * this user context.
1.55      pk       3650:                 *
                   3651:                 */
1.152     pk       3652: #if defined(MULTIPROCESSOR)
                   3653:                for (n = 0; n < ncpu; n++)
                   3654: #else
                   3655:                n = 0;
                   3656: #endif
                   3657:                {
                   3658:                        int *upt, *kpt;
                   3659:
                   3660:                        upt = pool_get(&L1_pool, PR_WAITOK);
                   3661:                        pm->pm_reg_ptps[n] = upt;
                   3662:                        pm->pm_reg_ptps_pa[n] = VA2PA((char *)upt);
                   3663:
                   3664:                        /* Invalidate user space regions */
                   3665:                        for (i = 0; i < NUREG; i++)
                   3666:                                setpgt4m(upt++, SRMMU_TEINVALID);
                   3667:
                   3668:                        /* Copy kernel regions */
                   3669:                        kpt = &pmap_kernel()->pm_reg_ptps[n][VA_VREG(KERNBASE)];
                   3670:                        for (i = 0; i < NKREG; i++) {
                   3671:                                setpgt4m(upt++, kpt[i]);
                   3672:                        }
1.79      pk       3673:                }
1.55      pk       3674:        }
                   3675: #endif
                   3676:
1.152     pk       3677:        return (pm);
1.1       deraadt  3678: }
                   3679:
                   3680: /*
                   3681:  * Retire the given pmap from service.
                   3682:  * Should only be called if the map contains no valid mappings.
                   3683:  */
                   3684: void
                   3685: pmap_destroy(pm)
1.124     pk       3686:        struct pmap *pm;
1.1       deraadt  3687: {
                   3688:        int count;
                   3689:
                   3690:        if (pm == NULL)
                   3691:                return;
                   3692: #ifdef DEBUG
                   3693:        if (pmapdebug & PDB_DESTROY)
1.66      christos 3694:                printf("pmap_destroy(%p)\n", pm);
1.1       deraadt  3695: #endif
                   3696:        simple_lock(&pm->pm_lock);
                   3697:        count = --pm->pm_refcount;
                   3698:        simple_unlock(&pm->pm_lock);
                   3699:        if (count == 0) {
                   3700:                pmap_release(pm);
1.134     thorpej  3701:                pool_put(&pmap_pmap_pool, pm);
1.1       deraadt  3702:        }
                   3703: }
                   3704:
                   3705: /*
                   3706:  * Release any resources held by the given physical map.
                   3707:  * Called when a pmap initialized by pmap_pinit is being released.
                   3708:  */
                   3709: void
                   3710: pmap_release(pm)
1.124     pk       3711:        struct pmap *pm;
1.1       deraadt  3712: {
1.124     pk       3713:        union ctxinfo *c;
                   3714:        int s = splpmap();      /* paranoia */
1.1       deraadt  3715:
                   3716: #ifdef DEBUG
                   3717:        if (pmapdebug & PDB_DESTROY)
1.66      christos 3718:                printf("pmap_release(%p)\n", pm);
1.1       deraadt  3719: #endif
1.55      pk       3720:
                   3721:        if (CPU_ISSUN4OR4C) {
1.69      pk       3722: #if defined(SUN4_MMU3L)
1.55      pk       3723:                if (pm->pm_reglist.tqh_first)
                   3724:                        panic("pmap_release: region list not empty");
1.43      pk       3725: #endif
1.55      pk       3726:                if (pm->pm_seglist.tqh_first)
                   3727:                        panic("pmap_release: segment list not empty");
                   3728:
                   3729:                if ((c = pm->pm_ctx) != NULL) {
                   3730:                        if (pm->pm_ctxnum == 0)
                   3731:                                panic("pmap_release: releasing kernel");
                   3732:                        ctx_free(pm);
                   3733:                }
1.1       deraadt  3734:        }
1.102     pk       3735:
                   3736: #if defined(SUN4M)
                   3737:        if (CPU_ISSUN4M) {
1.152     pk       3738:                int n;
1.102     pk       3739:                if ((c = pm->pm_ctx) != NULL) {
                   3740:                        if (pm->pm_ctxnum == 0)
                   3741:                                panic("pmap_release: releasing kernel");
                   3742:                        ctx_free(pm);
                   3743:                }
1.152     pk       3744: #if defined(MULTIPROCESSOR)
                   3745:                for (n = 0; n < ncpu; n++)
                   3746: #else
                   3747:                n = 0;
                   3748: #endif
                   3749:                {
1.157     pk       3750:                        int *pt = pm->pm_reg_ptps[n];
1.152     pk       3751:                        pm->pm_reg_ptps[n] = NULL;
                   3752:                        pm->pm_reg_ptps_pa[n] = 0;
1.157     pk       3753:                        pool_put(&L1_pool, pt);
1.152     pk       3754:                }
1.102     pk       3755:        }
                   3756: #endif
1.1       deraadt  3757:        splx(s);
1.55      pk       3758:
1.43      pk       3759: #ifdef DEBUG
1.55      pk       3760: if (pmapdebug) {
1.43      pk       3761:        int vs, vr;
                   3762:        for (vr = 0; vr < NUREG; vr++) {
                   3763:                struct regmap *rp = &pm->pm_regmap[vr];
                   3764:                if (rp->rg_nsegmap != 0)
1.66      christos 3765:                        printf("pmap_release: %d segments remain in "
1.43      pk       3766:                                "region %d\n", rp->rg_nsegmap, vr);
                   3767:                if (rp->rg_segmap != NULL) {
1.66      christos 3768:                        printf("pmap_release: segments still "
1.43      pk       3769:                                "allocated in region %d\n", vr);
                   3770:                        for (vs = 0; vs < NSEGRG; vs++) {
                   3771:                                struct segmap *sp = &rp->rg_segmap[vs];
                   3772:                                if (sp->sg_npte != 0)
1.66      christos 3773:                                        printf("pmap_release: %d ptes "
1.43      pk       3774:                                             "remain in segment %d\n",
                   3775:                                                sp->sg_npte, vs);
                   3776:                                if (sp->sg_pte != NULL) {
1.66      christos 3777:                                        printf("pmap_release: ptes still "
1.43      pk       3778:                                             "allocated in segment %d\n", vs);
                   3779:                                }
                   3780:                        }
                   3781:                }
                   3782:        }
                   3783: }
                   3784: #endif
1.102     pk       3785:
1.43      pk       3786:        if (pm->pm_regstore)
1.49      pk       3787:                free(pm->pm_regstore, M_VMPMAP);
1.1       deraadt  3788: }
                   3789:
                   3790: /*
                   3791:  * Add a reference to the given pmap.
                   3792:  */
                   3793: void
                   3794: pmap_reference(pm)
                   3795:        struct pmap *pm;
                   3796: {
                   3797:
                   3798:        if (pm != NULL) {
                   3799:                simple_lock(&pm->pm_lock);
                   3800:                pm->pm_refcount++;
                   3801:                simple_unlock(&pm->pm_lock);
                   3802:        }
                   3803: }
                   3804:
                   3805: /*
                   3806:  * Remove the given range of mapping entries.
                   3807:  * The starting and ending addresses are already rounded to pages.
                   3808:  * Sheer lunacy: pmap_remove is often asked to remove nonexistent
                   3809:  * mappings.
                   3810:  */
                   3811: void
                   3812: pmap_remove(pm, va, endva)
1.124     pk       3813:        struct pmap *pm;
                   3814:        vaddr_t va, endva;
1.1       deraadt  3815: {
1.124     pk       3816:        vaddr_t nva;
                   3817:        int vr, vs, s, ctx;
                   3818:        void (*rm)(struct pmap *, vaddr_t, vaddr_t, int, int);
1.1       deraadt  3819:
                   3820:        if (pm == NULL)
                   3821:                return;
1.13      pk       3822:
1.1       deraadt  3823: #ifdef DEBUG
                   3824:        if (pmapdebug & PDB_REMOVE)
1.91      fair     3825:                printf("pmap_remove(%p, 0x%lx, 0x%lx)\n", pm, va, endva);
1.1       deraadt  3826: #endif
                   3827:
1.42      mycroft  3828:        if (pm == pmap_kernel()) {
1.1       deraadt  3829:                /*
                   3830:                 * Removing from kernel address space.
                   3831:                 */
                   3832:                rm = pmap_rmk;
                   3833:        } else {
                   3834:                /*
                   3835:                 * Removing from user address space.
                   3836:                 */
                   3837:                write_user_windows();
                   3838:                rm = pmap_rmu;
                   3839:        }
                   3840:
                   3841:        ctx = getcontext();
                   3842:        s = splpmap();          /* XXX conservative */
                   3843:        simple_lock(&pm->pm_lock);
                   3844:        for (; va < endva; va = nva) {
                   3845:                /* do one virtual segment at a time */
1.43      pk       3846:                vr = VA_VREG(va);
                   3847:                vs = VA_VSEG(va);
                   3848:                nva = VSTOVA(vr, vs + 1);
1.1       deraadt  3849:                if (nva == 0 || nva > endva)
                   3850:                        nva = endva;
1.76      pk       3851:                if (pm->pm_regmap[vr].rg_nsegmap != 0)
                   3852:                        (*rm)(pm, va, nva, vr, vs);
1.1       deraadt  3853:        }
                   3854:        simple_unlock(&pm->pm_lock);
                   3855:        splx(s);
                   3856:        setcontext(ctx);
                   3857: }
                   3858:
                   3859: /*
                   3860:  * The following magic number was chosen because:
                   3861:  *     1. It is the same amount of work to cache_flush_page 4 pages
                   3862:  *        as to cache_flush_segment 1 segment (so at 4 the cost of
                   3863:  *        flush is the same).
                   3864:  *     2. Flushing extra pages is bad (causes cache not to work).
                   3865:  *     3. The current code, which malloc()s 5 pages for each process
                   3866:  *        for a user vmspace/pmap, almost never touches all 5 of those
                   3867:  *        pages.
                   3868:  */
1.13      pk       3869: #if 0
                   3870: #define        PMAP_RMK_MAGIC  (cacheinfo.c_hwflush?5:64)      /* if > magic, use cache_flush_segment */
                   3871: #else
1.1       deraadt  3872: #define        PMAP_RMK_MAGIC  5       /* if > magic, use cache_flush_segment */
1.13      pk       3873: #endif
1.1       deraadt  3874:
                   3875: /*
                   3876:  * Remove a range contained within a single segment.
                   3877:  * These are egregiously complicated routines.
                   3878:  */
                   3879:
1.55      pk       3880: #if defined(SUN4) || defined(SUN4C)
                   3881:
1.43      pk       3882: /* remove from kernel */
1.55      pk       3883: /*static*/ void
                   3884: pmap_rmk4_4c(pm, va, endva, vr, vs)
1.124     pk       3885:        struct pmap *pm;
                   3886:        vaddr_t va, endva;
                   3887:        int vr, vs;
                   3888: {
                   3889:        int i, tpte, perpage, npg;
                   3890:        struct pvlist *pv;
                   3891:        int nleft, pmeg;
1.43      pk       3892:        struct regmap *rp;
                   3893:        struct segmap *sp;
                   3894:
                   3895:        rp = &pm->pm_regmap[vr];
                   3896:        sp = &rp->rg_segmap[vs];
                   3897:
                   3898:        if (rp->rg_nsegmap == 0)
                   3899:                return;
                   3900:
                   3901: #ifdef DEBUG
                   3902:        if (rp->rg_segmap == NULL)
                   3903:                panic("pmap_rmk: no segments");
                   3904: #endif
                   3905:
                   3906:        if ((nleft = sp->sg_npte) == 0)
                   3907:                return;
                   3908:
                   3909:        pmeg = sp->sg_pmeg;
1.1       deraadt  3910:
                   3911: #ifdef DEBUG
                   3912:        if (pmeg == seginval)
                   3913:                panic("pmap_rmk: not loaded");
                   3914:        if (pm->pm_ctx == NULL)
                   3915:                panic("pmap_rmk: lost context");
                   3916: #endif
                   3917:
1.71      pk       3918:        setcontext4(0);
1.1       deraadt  3919:        /* decide how to flush cache */
                   3920:        npg = (endva - va) >> PGSHIFT;
                   3921:        if (npg > PMAP_RMK_MAGIC) {
                   3922:                /* flush the whole segment */
                   3923:                perpage = 0;
1.69      pk       3924:                cache_flush_segment(vr, vs);
1.1       deraadt  3925:        } else {
                   3926:                /* flush each page individually; some never need flushing */
1.69      pk       3927:                perpage = (CACHEINFO.c_vactype != VAC_NONE);
1.1       deraadt  3928:        }
                   3929:        while (va < endva) {
1.55      pk       3930:                tpte = getpte4(va);
1.1       deraadt  3931:                if ((tpte & PG_V) == 0) {
1.63      pk       3932:                        va += NBPG;
1.1       deraadt  3933:                        continue;
                   3934:                }
1.35      pk       3935:                if ((tpte & PG_TYPE) == PG_OBMEM) {
                   3936:                        /* if cacheable, flush page as needed */
                   3937:                        if (perpage && (tpte & PG_NC) == 0)
1.1       deraadt  3938:                                cache_flush_page(va);
1.60      pk       3939:                        i = ptoa(tpte & PG_PFNUM);
1.1       deraadt  3940:                        if (managed(i)) {
                   3941:                                pv = pvhead(i);
1.55      pk       3942:                                pv->pv_flags |= MR4_4C(tpte);
1.58      pk       3943:                                pv_unlink4_4c(pv, pm, va);
1.1       deraadt  3944:                        }
                   3945:                }
                   3946:                nleft--;
1.131     pk       3947: #ifdef DIAGNOSTIC
                   3948:                if (nleft < 0)
                   3949:                        panic("pmap_rmk: too many PTEs in segment; "
                   3950:                              "va 0x%lx; endva 0x%lx", va, endva);
                   3951: #endif
1.55      pk       3952:                setpte4(va, 0);
1.1       deraadt  3953:                va += NBPG;
                   3954:        }
                   3955:
                   3956:        /*
                   3957:         * If the segment is all gone, remove it from everyone and
                   3958:         * free the MMU entry.
                   3959:         */
1.43      pk       3960:        if ((sp->sg_npte = nleft) == 0) {
                   3961:                va = VSTOVA(vr,vs);             /* retract */
1.69      pk       3962: #if defined(SUN4_MMU3L)
                   3963:                if (HASSUN4_MMU3L)
1.1       deraadt  3964:                        setsegmap(va, seginval);
1.43      pk       3965:                else
                   3966: #endif
                   3967:                        for (i = ncontext; --i >= 0;) {
1.71      pk       3968:                                setcontext4(i);
1.43      pk       3969:                                setsegmap(va, seginval);
                   3970:                        }
                   3971:                me_free(pm, pmeg);
                   3972:                if (--rp->rg_nsegmap == 0) {
1.69      pk       3973: #if defined(SUN4_MMU3L)
                   3974:                        if (HASSUN4_MMU3L) {
1.43      pk       3975:                                for (i = ncontext; --i >= 0;) {
1.71      pk       3976:                                        setcontext4(i);
1.43      pk       3977:                                        setregmap(va, reginval);
                   3978:                                }
                   3979:                                /* note: context is 0 */
                   3980:                                region_free(pm, rp->rg_smeg);
                   3981:                        }
                   3982: #endif
1.1       deraadt  3983:                }
                   3984:        }
                   3985: }
                   3986:
1.55      pk       3987: #endif /* sun4, sun4c */
1.1       deraadt  3988:
1.55      pk       3989: #if defined(SUN4M)             /* 4M version of pmap_rmk */
                   3990: /* remove from kernel (4m)*/
                   3991: /*static*/ void
                   3992: pmap_rmk4m(pm, va, endva, vr, vs)
1.124     pk       3993:        struct pmap *pm;
                   3994:        vaddr_t va, endva;
                   3995:        int vr, vs;
                   3996: {
                   3997:        int i, tpte, perpage, npg;
                   3998:        struct pvlist *pv;
                   3999:        int nleft;
1.43      pk       4000:        struct regmap *rp;
                   4001:        struct segmap *sp;
                   4002:
                   4003:        rp = &pm->pm_regmap[vr];
1.55      pk       4004:        sp = &rp->rg_segmap[vs];
                   4005:
1.43      pk       4006:        if (rp->rg_nsegmap == 0)
                   4007:                return;
1.55      pk       4008:
                   4009: #ifdef DEBUG
1.43      pk       4010:        if (rp->rg_segmap == NULL)
1.55      pk       4011:                panic("pmap_rmk: no segments");
                   4012: #endif
1.43      pk       4013:
                   4014:        if ((nleft = sp->sg_npte) == 0)
                   4015:                return;
                   4016:
1.161     pk       4017: #ifdef DIAGNOSTIC
1.55      pk       4018:        if (sp->sg_pte == NULL || rp->rg_seg_ptps == NULL)
                   4019:                panic("pmap_rmk: segment/region does not exist");
                   4020:        if (pm->pm_ctx == NULL)
                   4021:                panic("pmap_rmk: lost context");
                   4022: #endif
1.43      pk       4023:
1.71      pk       4024:        setcontext4m(0);
1.55      pk       4025:        /* decide how to flush cache */
                   4026:        npg = (endva - va) >> PGSHIFT;
                   4027:        if (npg > PMAP_RMK_MAGIC) {
                   4028:                /* flush the whole segment */
                   4029:                perpage = 0;
1.69      pk       4030:                if (CACHEINFO.c_vactype != VAC_NONE)
1.55      pk       4031:                        cache_flush_segment(vr, vs);
                   4032:        } else {
                   4033:                /* flush each page individually; some never need flushing */
1.69      pk       4034:                perpage = (CACHEINFO.c_vactype != VAC_NONE);
1.55      pk       4035:        }
                   4036:        while (va < endva) {
1.72      pk       4037:                tpte = sp->sg_pte[VA_SUN4M_VPG(va)];
1.55      pk       4038:                if ((tpte & SRMMU_TETYPE) != SRMMU_TEPTE) {
1.72      pk       4039: #ifdef DEBUG
                   4040:                        if ((pmapdebug & PDB_SANITYCHK) &&
                   4041:                            (getpte4m(va) & SRMMU_TETYPE) == SRMMU_TEPTE)
1.91      fair     4042:                                panic("pmap_rmk: Spurious kTLB entry for 0x%lx",
1.81      pk       4043:                                      va);
1.72      pk       4044: #endif
1.61      pk       4045:                        va += NBPG;
1.55      pk       4046:                        continue;
                   4047:                }
                   4048:                if ((tpte & SRMMU_PGTYPE) == PG_SUN4M_OBMEM) {
                   4049:                        /* if cacheable, flush page as needed */
                   4050:                        if (perpage && (tpte & SRMMU_PG_C))
1.69      pk       4051:                                cache_flush_page(va);
1.60      pk       4052:                        i = ptoa((tpte & SRMMU_PPNMASK) >> SRMMU_PPNSHIFT);
1.55      pk       4053:                        if (managed(i)) {
                   4054:                                pv = pvhead(i);
                   4055:                                pv->pv_flags |= MR4M(tpte);
1.58      pk       4056:                                pv_unlink4m(pv, pm, va);
1.55      pk       4057:                        }
                   4058:                }
1.72      pk       4059:                tlb_flush_page(va);
                   4060:                setpgt4m(&sp->sg_pte[VA_SUN4M_VPG(va)], SRMMU_TEINVALID);
1.131     pk       4061:                nleft--;
                   4062: #ifdef DIAGNOSTIC
                   4063:                if (nleft < 0)
                   4064:                        panic("pmap_rmk: too many PTEs in segment; "
                   4065:                              "va 0x%lx; endva 0x%lx", va, endva);
                   4066: #endif
1.55      pk       4067:                va += NBPG;
                   4068:        }
                   4069:
1.161     pk       4070:        sp->sg_npte = nleft;
1.55      pk       4071: }
1.145     pk       4072: #endif /* SUN4M */
1.161     pk       4073:
1.55      pk       4074: /*
                   4075:  * Just like pmap_rmk_magic, but we have a different threshold.
                   4076:  * Note that this may well deserve further tuning work.
                   4077:  */
                   4078: #if 0
                   4079: #define        PMAP_RMU_MAGIC  (cacheinfo.c_hwflush?4:64)      /* if > magic, use cache_flush_segment */
                   4080: #else
                   4081: #define        PMAP_RMU_MAGIC  4       /* if > magic, use cache_flush_segment */
                   4082: #endif
                   4083:
                   4084: #if defined(SUN4) || defined(SUN4C)
                   4085:
                   4086: /* remove from user */
                   4087: /*static*/ void
                   4088: pmap_rmu4_4c(pm, va, endva, vr, vs)
1.124     pk       4089:        struct pmap *pm;
                   4090:        vaddr_t va, endva;
                   4091:        int vr, vs;
                   4092: {
                   4093:        int *pte0, i, pteva, tpte, perpage, npg;
                   4094:        struct pvlist *pv;
                   4095:        int nleft, pmeg;
1.55      pk       4096:        struct regmap *rp;
                   4097:        struct segmap *sp;
                   4098:
                   4099:        rp = &pm->pm_regmap[vr];
                   4100:        if (rp->rg_nsegmap == 0)
                   4101:                return;
                   4102:        if (rp->rg_segmap == NULL)
                   4103:                panic("pmap_rmu: no segments");
                   4104:
                   4105:        sp = &rp->rg_segmap[vs];
                   4106:        if ((nleft = sp->sg_npte) == 0)
                   4107:                return;
                   4108:        if (sp->sg_pte == NULL)
                   4109:                panic("pmap_rmu: no pages");
                   4110:
                   4111:
                   4112:        pmeg = sp->sg_pmeg;
                   4113:        pte0 = sp->sg_pte;
1.1       deraadt  4114:
                   4115:        if (pmeg == seginval) {
1.124     pk       4116:                int *pte = pte0 + VA_VPG(va);
1.1       deraadt  4117:
                   4118:                /*
                   4119:                 * PTEs are not in MMU.  Just invalidate software copies.
                   4120:                 */
1.63      pk       4121:                for (; va < endva; pte++, va += NBPG) {
1.1       deraadt  4122:                        tpte = *pte;
                   4123:                        if ((tpte & PG_V) == 0) {
                   4124:                                /* nothing to remove (braindead VM layer) */
                   4125:                                continue;
                   4126:                        }
                   4127:                        if ((tpte & PG_TYPE) == PG_OBMEM) {
1.60      pk       4128:                                i = ptoa(tpte & PG_PFNUM);
1.21      deraadt  4129:                                if (managed(i))
1.58      pk       4130:                                        pv_unlink4_4c(pvhead(i), pm, va);
1.1       deraadt  4131:                        }
                   4132:                        nleft--;
1.131     pk       4133: #ifdef DIAGNOSTIC
                   4134:                        if (nleft < 0)
                   4135:                                panic("pmap_rmu: too many PTEs in segment; "
                   4136:                                      "va 0x%lx; endva 0x%lx", va, endva);
                   4137: #endif
1.1       deraadt  4138:                        *pte = 0;
                   4139:                }
1.43      pk       4140:                if ((sp->sg_npte = nleft) == 0) {
1.49      pk       4141:                        free(pte0, M_VMPMAP);
1.43      pk       4142:                        sp->sg_pte = NULL;
                   4143:                        if (--rp->rg_nsegmap == 0) {
1.49      pk       4144:                                free(rp->rg_segmap, M_VMPMAP);
1.43      pk       4145:                                rp->rg_segmap = NULL;
1.69      pk       4146: #if defined(SUN4_MMU3L)
                   4147:                                if (HASSUN4_MMU3L && rp->rg_smeg != reginval) {
1.43      pk       4148:                                        if (pm->pm_ctx) {
1.71      pk       4149:                                                setcontext4(pm->pm_ctxnum);
1.43      pk       4150:                                                setregmap(va, reginval);
                   4151:                                        } else
1.71      pk       4152:                                                setcontext4(0);
1.43      pk       4153:                                        region_free(pm, rp->rg_smeg);
                   4154:                                }
                   4155: #endif
                   4156:                        }
1.1       deraadt  4157:                }
1.43      pk       4158:                return;
1.1       deraadt  4159:        }
                   4160:
                   4161:        /*
                   4162:         * PTEs are in MMU.  Invalidate in hardware, update ref &
                   4163:         * mod bits, and flush cache if required.
                   4164:         */
1.43      pk       4165:        if (CTX_USABLE(pm,rp)) {
1.1       deraadt  4166:                /* process has a context, must flush cache */
                   4167:                npg = (endva - va) >> PGSHIFT;
1.71      pk       4168:                setcontext4(pm->pm_ctxnum);
1.1       deraadt  4169:                if (npg > PMAP_RMU_MAGIC) {
                   4170:                        perpage = 0; /* flush the whole segment */
1.69      pk       4171:                        cache_flush_segment(vr, vs);
1.1       deraadt  4172:                } else
1.69      pk       4173:                        perpage = (CACHEINFO.c_vactype != VAC_NONE);
1.1       deraadt  4174:                pteva = va;
                   4175:        } else {
                   4176:                /* no context, use context 0; cache flush unnecessary */
1.71      pk       4177:                setcontext4(0);
1.69      pk       4178:                if (HASSUN4_MMU3L)
1.43      pk       4179:                        setregmap(0, tregion);
1.1       deraadt  4180:                /* XXX use per-cpu pteva? */
                   4181:                setsegmap(0, pmeg);
1.18      deraadt  4182:                pteva = VA_VPG(va) << PGSHIFT;
1.1       deraadt  4183:                perpage = 0;
                   4184:        }
1.63      pk       4185:        for (; va < endva; pteva += NBPG, va += NBPG) {
1.55      pk       4186:                tpte = getpte4(pteva);
1.1       deraadt  4187:                if ((tpte & PG_V) == 0)
                   4188:                        continue;
1.35      pk       4189:                if ((tpte & PG_TYPE) == PG_OBMEM) {
                   4190:                        /* if cacheable, flush page as needed */
                   4191:                        if (perpage && (tpte & PG_NC) == 0)
1.1       deraadt  4192:                                cache_flush_page(va);
1.60      pk       4193:                        i = ptoa(tpte & PG_PFNUM);
1.1       deraadt  4194:                        if (managed(i)) {
                   4195:                                pv = pvhead(i);
1.55      pk       4196:                                pv->pv_flags |= MR4_4C(tpte);
1.58      pk       4197:                                pv_unlink4_4c(pv, pm, va);
1.1       deraadt  4198:                        }
                   4199:                }
                   4200:                nleft--;
1.131     pk       4201: #ifdef DIAGNOSTIC
                   4202:                if (nleft < 0)
                   4203:                        panic("pmap_rmu: too many PTEs in segment; "
                   4204:                             "va 0x%lx; endva 0x%lx; pmeg %d", va, endva, pmeg);
                   4205: #endif
1.55      pk       4206:                setpte4(pteva, 0);
1.43      pk       4207:                pte0[VA_VPG(pteva)] = 0;
1.1       deraadt  4208:        }
                   4209:
                   4210:        /*
                   4211:         * If the segment is all gone, and the context is loaded, give
                   4212:         * the segment back.
                   4213:         */
1.43      pk       4214:        if ((sp->sg_npte = nleft) == 0 /* ??? && pm->pm_ctx != NULL*/) {
                   4215: #ifdef DEBUG
                   4216: if (pm->pm_ctx == NULL) {
1.66      christos 4217:        printf("pmap_rmu: no context here...");
1.43      pk       4218: }
                   4219: #endif
                   4220:                va = VSTOVA(vr,vs);             /* retract */
                   4221:                if (CTX_USABLE(pm,rp))
                   4222:                        setsegmap(va, seginval);
1.69      pk       4223:                else if (HASSUN4_MMU3L && rp->rg_smeg != reginval) {
1.43      pk       4224:                        /* note: context already set earlier */
                   4225:                        setregmap(0, rp->rg_smeg);
                   4226:                        setsegmap(vs << SGSHIFT, seginval);
                   4227:                }
1.49      pk       4228:                free(pte0, M_VMPMAP);
1.43      pk       4229:                sp->sg_pte = NULL;
1.1       deraadt  4230:                me_free(pm, pmeg);
1.13      pk       4231:
1.43      pk       4232:                if (--rp->rg_nsegmap == 0) {
1.49      pk       4233:                        free(rp->rg_segmap, M_VMPMAP);
1.43      pk       4234:                        rp->rg_segmap = NULL;
                   4235:                        GAP_WIDEN(pm,vr);
                   4236:
1.69      pk       4237: #if defined(SUN4_MMU3L)
                   4238:                        if (HASSUN4_MMU3L && rp->rg_smeg != reginval) {
1.43      pk       4239:                                /* note: context already set */
                   4240:                                if (pm->pm_ctx)
                   4241:                                        setregmap(va, reginval);
                   4242:                                region_free(pm, rp->rg_smeg);
                   4243:                        }
                   4244: #endif
                   4245:                }
1.13      pk       4246:
1.1       deraadt  4247:        }
                   4248: }
                   4249:
1.55      pk       4250: #endif /* sun4,4c */
                   4251:
                   4252: #if defined(SUN4M)             /* 4M version of pmap_rmu */
                   4253: /* remove from user */
                   4254: /*static*/ void
                   4255: pmap_rmu4m(pm, va, endva, vr, vs)
1.124     pk       4256:        struct pmap *pm;
                   4257:        vaddr_t va, endva;
                   4258:        int vr, vs;
                   4259: {
                   4260:        int *pte0, i, perpage, npg;
                   4261:        struct pvlist *pv;
                   4262:        int nleft;
1.55      pk       4263:        struct regmap *rp;
                   4264:        struct segmap *sp;
                   4265:
                   4266:        rp = &pm->pm_regmap[vr];
                   4267:        if (rp->rg_nsegmap == 0)
                   4268:                return;
                   4269:        if (rp->rg_segmap == NULL)
                   4270:                panic("pmap_rmu: no segments");
                   4271:
                   4272:        sp = &rp->rg_segmap[vs];
                   4273:        if ((nleft = sp->sg_npte) == 0)
                   4274:                return;
1.76      pk       4275:
1.55      pk       4276:        if (sp->sg_pte == NULL)
                   4277:                panic("pmap_rmu: no pages");
                   4278:
                   4279:        pte0 = sp->sg_pte;
                   4280:
                   4281:        /*
                   4282:         * Invalidate PTE in MMU pagetables. Flush cache if necessary.
                   4283:         */
1.72      pk       4284:        if (pm->pm_ctx) {
1.55      pk       4285:                /* process has a context, must flush cache */
1.71      pk       4286:                setcontext4m(pm->pm_ctxnum);
1.69      pk       4287:                if (CACHEINFO.c_vactype != VAC_NONE) {
1.63      pk       4288:                        npg = (endva - va) >> PGSHIFT;
                   4289:                        if (npg > PMAP_RMU_MAGIC) {
                   4290:                                perpage = 0; /* flush the whole segment */
1.55      pk       4291:                                cache_flush_segment(vr, vs);
1.63      pk       4292:                        } else
                   4293:                                perpage = 1;
1.55      pk       4294:                } else
1.63      pk       4295:                        perpage = 0;
1.55      pk       4296:        } else {
                   4297:                /* no context; cache flush unnecessary */
                   4298:                perpage = 0;
                   4299:        }
1.63      pk       4300:        for (; va < endva; va += NBPG) {
1.100     pk       4301:                int tpte;
                   4302:
                   4303:                tpte = pte0[VA_SUN4M_VPG(va)];
1.72      pk       4304:
                   4305:                if ((tpte & SRMMU_TETYPE) != SRMMU_TEPTE) {
                   4306: #ifdef DEBUG
                   4307:                        if ((pmapdebug & PDB_SANITYCHK) &&
                   4308:                            pm->pm_ctx &&
                   4309:                            (getpte4m(va) & SRMMU_TEPTE) == SRMMU_TEPTE)
1.91      fair     4310:                                panic("pmap_rmu: Spurious uTLB entry for 0x%lx",
1.81      pk       4311:                                      va);
1.72      pk       4312: #endif
1.55      pk       4313:                        continue;
1.72      pk       4314:                }
                   4315:
1.55      pk       4316:                if ((tpte & SRMMU_PGTYPE) == PG_SUN4M_OBMEM) {
                   4317:                        /* if cacheable, flush page as needed */
                   4318:                        if (perpage && (tpte & SRMMU_PG_C))
1.60      pk       4319:                                cache_flush_page(va);
                   4320:                        i = ptoa((tpte & SRMMU_PPNMASK) >> SRMMU_PPNSHIFT);
1.55      pk       4321:                        if (managed(i)) {
                   4322:                                pv = pvhead(i);
                   4323:                                pv->pv_flags |= MR4M(tpte);
1.58      pk       4324:                                pv_unlink4m(pv, pm, va);
1.55      pk       4325:                        }
                   4326:                }
                   4327:                nleft--;
1.131     pk       4328: #ifdef DIAGNOSTIC
                   4329:                if (nleft < 0)
                   4330:                        panic("pmap_rmu: too many PTEs in segment; "
                   4331:                              "va 0x%lx; endva 0x%lx", va, endva);
                   4332: #endif
1.145     pk       4333:                if (pm->pm_ctx)
                   4334:                        tlb_flush_page(va);
                   4335:
1.72      pk       4336:                setpgt4m(&pte0[VA_SUN4M_VPG(va)], SRMMU_TEINVALID);
1.55      pk       4337:        }
                   4338:
                   4339:        /*
                   4340:         * If the segment is all gone, and the context is loaded, give
                   4341:         * the segment back.
                   4342:         */
1.72      pk       4343:        if ((sp->sg_npte = nleft) == 0) {
1.55      pk       4344: #ifdef DEBUG
                   4345:                if (pm->pm_ctx == NULL) {
1.66      christos 4346:                        printf("pmap_rmu: no context here...");
1.55      pk       4347:                }
                   4348: #endif
                   4349:                va = VSTOVA(vr,vs);             /* retract */
                   4350:
1.88      pk       4351:                if (pm->pm_ctx)
                   4352:                        tlb_flush_segment(vr, vs);      /* Paranoia? */
1.73      pk       4353:                setpgt4m(&rp->rg_seg_ptps[vs], SRMMU_TEINVALID);
1.157     pk       4354:                sp->sg_pte = NULL;
1.121     pk       4355:                pool_put(&L23_pool, pte0);
1.55      pk       4356:
                   4357:                if (--rp->rg_nsegmap == 0) {
1.152     pk       4358:                        int n;
                   4359:
1.88      pk       4360:                        if (pm->pm_ctx)
1.143     pk       4361:                                tlb_flush_region(vr);   /* Paranoia? */
1.152     pk       4362: #ifdef MULTIPROCESSOR
                   4363:                        for (n = 0; n < ncpu; n++)
                   4364: #else
                   4365:                        n = 0;
                   4366: #endif
                   4367:                        {
                   4368:                                setpgt4m(&pm->pm_reg_ptps[n][vr],
                   4369:                                         SRMMU_TEINVALID);
                   4370:                        }
1.55      pk       4371:                        free(rp->rg_segmap, M_VMPMAP);
                   4372:                        rp->rg_segmap = NULL;
1.121     pk       4373:                        pool_put(&L23_pool, rp->rg_seg_ptps);
1.55      pk       4374:                }
                   4375:        }
                   4376: }
1.143     pk       4377: #endif /* SUN4M */
1.55      pk       4378:
1.1       deraadt  4379: /*
                   4380:  * Lower (make more strict) the protection on the specified
                   4381:  * physical page.
                   4382:  *
                   4383:  * There are only two cases: either the protection is going to 0
                   4384:  * (in which case we do the dirty work here), or it is going from
                   4385:  * to read-only (in which case pv_changepte does the trick).
                   4386:  */
1.55      pk       4387:
                   4388: #if defined(SUN4) || defined(SUN4C)
1.1       deraadt  4389: void
1.151     chs      4390: pmap_page_protect4_4c(pg, prot)
                   4391:        struct vm_page *pg;
1.1       deraadt  4392:        vm_prot_t prot;
                   4393: {
1.124     pk       4394:        struct pvlist *pv, *pv0, *npv;
                   4395:        struct pmap *pm;
                   4396:        int va, vr, vs, pteva, tpte;
                   4397:        int flags, nleft, i, s, ctx;
1.43      pk       4398:        struct regmap *rp;
                   4399:        struct segmap *sp;
1.151     chs      4400:        paddr_t pa = VM_PAGE_TO_PHYS(pg);
1.1       deraadt  4401:
                   4402: #ifdef DEBUG
                   4403:        if ((pmapdebug & PDB_CHANGEPROT) ||
                   4404:            (pmapdebug & PDB_REMOVE && prot == VM_PROT_NONE))
1.91      fair     4405:                printf("pmap_page_protect(0x%lx, 0x%x)\n", pa, prot);
1.1       deraadt  4406: #endif
                   4407:        /*
                   4408:         * Skip unmanaged pages, or operations that do not take
                   4409:         * away write permission.
                   4410:         */
1.82      pk       4411:        if ((pa & (PMAP_TNC_4 & ~PMAP_NC)) ||
1.34      pk       4412:             !managed(pa) || prot & VM_PROT_WRITE)
1.1       deraadt  4413:                return;
1.162     pk       4414:
1.1       deraadt  4415:        write_user_windows();   /* paranoia */
1.162     pk       4416:        pv = pvhead(pa);
1.1       deraadt  4417:        if (prot & VM_PROT_READ) {
1.162     pk       4418:                pv_changepte4_4c(pv, 0, PG_W);
1.1       deraadt  4419:                return;
                   4420:        }
                   4421:
                   4422:        /*
                   4423:         * Remove all access to all people talking to this page.
                   4424:         * Walk down PV list, removing all mappings.
                   4425:         * The logic is much like that for pmap_remove,
                   4426:         * but we know we are removing exactly one page.
                   4427:         */
                   4428:        s = splpmap();
1.162     pk       4429:        if (pv->pv_pmap == NULL) {
1.1       deraadt  4430:                splx(s);
                   4431:                return;
                   4432:        }
1.71      pk       4433:        ctx = getcontext4();
1.1       deraadt  4434:        pv0 = pv;
1.162     pk       4435:
                   4436:        /* This pv head will become empty, so clear caching state flags */
                   4437:        flags = pv->pv_flags & ~(PV_NC|PV_ANC);
                   4438:
                   4439:        while (pv != NULL) {
                   4440:                pm = pv->pv_pmap;
1.1       deraadt  4441:                va = pv->pv_va;
1.43      pk       4442:                vr = VA_VREG(va);
                   4443:                vs = VA_VSEG(va);
                   4444:                rp = &pm->pm_regmap[vr];
                   4445:                if (rp->rg_nsegmap == 0)
                   4446:                        panic("pmap_remove_all: empty vreg");
                   4447:                sp = &rp->rg_segmap[vs];
                   4448:                if ((nleft = sp->sg_npte) == 0)
1.1       deraadt  4449:                        panic("pmap_remove_all: empty vseg");
1.162     pk       4450:
                   4451:                sp->sg_npte = --nleft;
1.43      pk       4452:
                   4453:                if (sp->sg_pmeg == seginval) {
                   4454:                        /* Definitely not a kernel map */
1.1       deraadt  4455:                        if (nleft) {
1.43      pk       4456:                                sp->sg_pte[VA_VPG(va)] = 0;
1.1       deraadt  4457:                        } else {
1.49      pk       4458:                                free(sp->sg_pte, M_VMPMAP);
1.43      pk       4459:                                sp->sg_pte = NULL;
                   4460:                                if (--rp->rg_nsegmap == 0) {
1.49      pk       4461:                                        free(rp->rg_segmap, M_VMPMAP);
1.43      pk       4462:                                        rp->rg_segmap = NULL;
                   4463:                                        GAP_WIDEN(pm,vr);
1.69      pk       4464: #if defined(SUN4_MMU3L)
                   4465:                                        if (HASSUN4_MMU3L && rp->rg_smeg != reginval) {
1.43      pk       4466:                                                if (pm->pm_ctx) {
1.71      pk       4467:                                                        setcontext4(pm->pm_ctxnum);
1.43      pk       4468:                                                        setregmap(va, reginval);
                   4469:                                                } else
1.71      pk       4470:                                                        setcontext4(0);
1.43      pk       4471:                                                region_free(pm, rp->rg_smeg);
                   4472:                                        }
                   4473: #endif
                   4474:                                }
1.1       deraadt  4475:                        }
                   4476:                        goto nextpv;
                   4477:                }
1.84      pk       4478:
1.43      pk       4479:                if (CTX_USABLE(pm,rp)) {
1.71      pk       4480:                        setcontext4(pm->pm_ctxnum);
1.1       deraadt  4481:                        pteva = va;
1.69      pk       4482:                        cache_flush_page(va);
1.1       deraadt  4483:                } else {
1.71      pk       4484:                        setcontext4(0);
1.1       deraadt  4485:                        /* XXX use per-cpu pteva? */
1.69      pk       4486:                        if (HASSUN4_MMU3L)
1.43      pk       4487:                                setregmap(0, tregion);
                   4488:                        setsegmap(0, sp->sg_pmeg);
1.18      deraadt  4489:                        pteva = VA_VPG(va) << PGSHIFT;
1.1       deraadt  4490:                }
1.43      pk       4491:
1.55      pk       4492:                tpte = getpte4(pteva);
1.43      pk       4493:                if ((tpte & PG_V) == 0)
1.91      fair     4494:                        panic("pmap_page_protect !PG_V: ctx %d, va 0x%x, pte 0x%x",
1.84      pk       4495:                              pm->pm_ctxnum, va, tpte);
1.55      pk       4496:                flags |= MR4_4C(tpte);
1.43      pk       4497:
1.1       deraadt  4498:                if (nleft) {
1.55      pk       4499:                        setpte4(pteva, 0);
1.44      pk       4500:                        if (sp->sg_pte != NULL)
                   4501:                                sp->sg_pte[VA_VPG(pteva)] = 0;
1.84      pk       4502:                        goto nextpv;
                   4503:                }
                   4504:
                   4505:                /* Entire segment is gone */
                   4506:                if (pm == pmap_kernel()) {
                   4507: #if defined(SUN4_MMU3L)
                   4508:                        if (!HASSUN4_MMU3L)
1.43      pk       4509: #endif
1.84      pk       4510:                                for (i = ncontext; --i >= 0;) {
                   4511:                                        setcontext4(i);
                   4512:                                        setsegmap(va, seginval);
                   4513:                                }
                   4514:                        me_free(pm, sp->sg_pmeg);
                   4515:                        if (--rp->rg_nsegmap == 0) {
1.69      pk       4516: #if defined(SUN4_MMU3L)
1.84      pk       4517:                                if (HASSUN4_MMU3L) {
1.43      pk       4518:                                        for (i = ncontext; --i >= 0;) {
1.71      pk       4519:                                                setcontext4(i);
1.84      pk       4520:                                                setregmap(va, reginval);
1.43      pk       4521:                                        }
1.84      pk       4522:                                        region_free(pm, rp->rg_smeg);
                   4523:                                }
1.43      pk       4524: #endif
1.84      pk       4525:                        }
                   4526:                } else {
                   4527:                        if (CTX_USABLE(pm,rp))
                   4528:                                /* `pteva'; we might be using tregion */
                   4529:                                setsegmap(pteva, seginval);
1.69      pk       4530: #if defined(SUN4_MMU3L)
1.84      pk       4531:                        else if (HASSUN4_MMU3L &&
                   4532:                                 rp->rg_smeg != reginval) {
                   4533:                                /* note: context already set earlier */
                   4534:                                setregmap(0, rp->rg_smeg);
                   4535:                                setsegmap(vs << SGSHIFT, seginval);
                   4536:                        }
1.43      pk       4537: #endif
1.84      pk       4538:                        free(sp->sg_pte, M_VMPMAP);
                   4539:                        sp->sg_pte = NULL;
                   4540:                        me_free(pm, sp->sg_pmeg);
1.43      pk       4541:
1.84      pk       4542:                        if (--rp->rg_nsegmap == 0) {
1.69      pk       4543: #if defined(SUN4_MMU3L)
1.84      pk       4544:                                if (HASSUN4_MMU3L &&
                   4545:                                    rp->rg_smeg != reginval) {
                   4546:                                        if (pm->pm_ctx)
                   4547:                                                setregmap(va, reginval);
                   4548:                                        region_free(pm, rp->rg_smeg);
                   4549:                                }
1.43      pk       4550: #endif
1.84      pk       4551:                                free(rp->rg_segmap, M_VMPMAP);
                   4552:                                rp->rg_segmap = NULL;
                   4553:                                GAP_WIDEN(pm,vr);
1.1       deraadt  4554:                        }
                   4555:                }
1.84      pk       4556:
1.1       deraadt  4557:        nextpv:
                   4558:                npv = pv->pv_next;
                   4559:                if (pv != pv0)
1.122     pk       4560:                        pool_put(&pv_pool, pv);
1.162     pk       4561:                pv = npv;
1.1       deraadt  4562:        }
1.162     pk       4563:
                   4564:        /* Finally, update pv head */
1.1       deraadt  4565:        pv0->pv_pmap = NULL;
1.162     pk       4566:        pv0->pv_next = NULL;
1.1       deraadt  4567:        pv0->pv_flags = flags;
1.71      pk       4568:        setcontext4(ctx);
1.1       deraadt  4569:        splx(s);
                   4570: }
                   4571:
                   4572: /*
                   4573:  * Lower (make more strict) the protection on the specified
                   4574:  * range of this pmap.
                   4575:  *
                   4576:  * There are only two cases: either the protection is going to 0
                   4577:  * (in which case we call pmap_remove to do the dirty work), or
                   4578:  * it is going from read/write to read-only.  The latter is
                   4579:  * fairly easy.
                   4580:  */
                   4581: void
1.55      pk       4582: pmap_protect4_4c(pm, sva, eva, prot)
1.124     pk       4583:        struct pmap *pm;
                   4584:        vaddr_t sva, eva;
1.1       deraadt  4585:        vm_prot_t prot;
                   4586: {
1.124     pk       4587:        int va, nva, vr, vs;
                   4588:        int s, ctx;
1.43      pk       4589:        struct regmap *rp;
                   4590:        struct segmap *sp;
1.1       deraadt  4591:
                   4592:        if (pm == NULL || prot & VM_PROT_WRITE)
                   4593:                return;
1.43      pk       4594:
1.1       deraadt  4595:        if ((prot & VM_PROT_READ) == 0) {
                   4596:                pmap_remove(pm, sva, eva);
                   4597:                return;
                   4598:        }
                   4599:
                   4600:        write_user_windows();
1.71      pk       4601:        ctx = getcontext4();
1.1       deraadt  4602:        s = splpmap();
                   4603:        simple_lock(&pm->pm_lock);
                   4604:
                   4605:        for (va = sva; va < eva;) {
1.43      pk       4606:                vr = VA_VREG(va);
                   4607:                vs = VA_VSEG(va);
                   4608:                rp = &pm->pm_regmap[vr];
                   4609:                nva = VSTOVA(vr,vs + 1);
1.1       deraadt  4610: if (nva == 0) panic("pmap_protect: last segment");     /* cannot happen */
                   4611:                if (nva > eva)
                   4612:                        nva = eva;
1.43      pk       4613:                if (rp->rg_nsegmap == 0) {
1.1       deraadt  4614:                        va = nva;
                   4615:                        continue;
                   4616:                }
1.43      pk       4617: #ifdef DEBUG
                   4618:                if (rp->rg_segmap == NULL)
                   4619:                        panic("pmap_protect: no segments");
                   4620: #endif
                   4621:                sp = &rp->rg_segmap[vs];
                   4622:                if (sp->sg_npte == 0) {
                   4623:                        va = nva;
                   4624:                        continue;
                   4625:                }
                   4626: #ifdef DEBUG
                   4627:                if (pm != pmap_kernel() && sp->sg_pte == NULL)
                   4628:                        panic("pmap_protect: no pages");
                   4629: #endif
                   4630:                if (sp->sg_pmeg == seginval) {
1.124     pk       4631:                        int *pte = &sp->sg_pte[VA_VPG(va)];
1.1       deraadt  4632:
                   4633:                        /* not in MMU; just clear PG_W from core copies */
                   4634:                        for (; va < nva; va += NBPG)
                   4635:                                *pte++ &= ~PG_W;
                   4636:                } else {
                   4637:                        /* in MMU: take away write bits from MMU PTEs */
1.43      pk       4638:                        if (CTX_USABLE(pm,rp)) {
1.124     pk       4639:                                int tpte;
1.1       deraadt  4640:
                   4641:                                /*
                   4642:                                 * Flush cache so that any existing cache
                   4643:                                 * tags are updated.  This is really only
                   4644:                                 * needed for PTEs that lose PG_W.
                   4645:                                 */
1.71      pk       4646:                                setcontext4(pm->pm_ctxnum);
1.1       deraadt  4647:                                for (; va < nva; va += NBPG) {
1.55      pk       4648:                                        tpte = getpte4(va);
1.1       deraadt  4649:                                        pmap_stats.ps_npg_prot_all++;
1.35      pk       4650:                                        if ((tpte & (PG_W|PG_TYPE)) ==
                   4651:                                            (PG_W|PG_OBMEM)) {
1.1       deraadt  4652:                                                pmap_stats.ps_npg_prot_actual++;
1.69      pk       4653:                                                cache_flush_page(va);
1.55      pk       4654:                                                setpte4(va, tpte & ~PG_W);
1.1       deraadt  4655:                                        }
                   4656:                                }
                   4657:                        } else {
1.124     pk       4658:                                int pteva;
1.1       deraadt  4659:
                   4660:                                /*
                   4661:                                 * No context, hence not cached;
                   4662:                                 * just update PTEs.
                   4663:                                 */
1.71      pk       4664:                                setcontext4(0);
1.1       deraadt  4665:                                /* XXX use per-cpu pteva? */
1.69      pk       4666:                                if (HASSUN4_MMU3L)
1.43      pk       4667:                                        setregmap(0, tregion);
                   4668:                                setsegmap(0, sp->sg_pmeg);
1.18      deraadt  4669:                                pteva = VA_VPG(va) << PGSHIFT;
1.1       deraadt  4670:                                for (; va < nva; pteva += NBPG, va += NBPG)
1.55      pk       4671:                                        setpte4(pteva, getpte4(pteva) & ~PG_W);
1.1       deraadt  4672:                        }
                   4673:                }
                   4674:        }
                   4675:        simple_unlock(&pm->pm_lock);
1.12      pk       4676:        splx(s);
1.71      pk       4677:        setcontext4(ctx);
1.1       deraadt  4678: }
                   4679:
                   4680: /*
                   4681:  * Change the protection and/or wired status of the given (MI) virtual page.
                   4682:  * XXX: should have separate function (or flag) telling whether only wiring
                   4683:  * is changing.
                   4684:  */
                   4685: void
1.55      pk       4686: pmap_changeprot4_4c(pm, va, prot, wired)
1.124     pk       4687:        struct pmap *pm;
                   4688:        vaddr_t va;
1.1       deraadt  4689:        vm_prot_t prot;
                   4690:        int wired;
                   4691: {
1.124     pk       4692:        int vr, vs, tpte, newprot, ctx, s;
1.43      pk       4693:        struct regmap *rp;
                   4694:        struct segmap *sp;
1.1       deraadt  4695:
                   4696: #ifdef DEBUG
                   4697:        if (pmapdebug & PDB_CHANGEPROT)
1.91      fair     4698:                printf("pmap_changeprot(%p, 0x%lx, 0x%x, 0x%x)\n",
1.1       deraadt  4699:                    pm, va, prot, wired);
                   4700: #endif
                   4701:
                   4702:        write_user_windows();   /* paranoia */
                   4703:
1.64      pk       4704:        va &= ~(NBPG-1);
1.42      mycroft  4705:        if (pm == pmap_kernel())
1.1       deraadt  4706:                newprot = prot & VM_PROT_WRITE ? PG_S|PG_W : PG_S;
                   4707:        else
                   4708:                newprot = prot & VM_PROT_WRITE ? PG_W : 0;
1.43      pk       4709:        vr = VA_VREG(va);
                   4710:        vs = VA_VSEG(va);
1.1       deraadt  4711:        s = splpmap();          /* conservative */
1.43      pk       4712:        rp = &pm->pm_regmap[vr];
                   4713:        if (rp->rg_nsegmap == 0) {
1.66      christos 4714:                printf("pmap_changeprot: no segments in %d\n", vr);
1.43      pk       4715:                return;
                   4716:        }
                   4717:        if (rp->rg_segmap == NULL) {
1.66      christos 4718:                printf("pmap_changeprot: no segments in %d!\n", vr);
1.43      pk       4719:                return;
                   4720:        }
                   4721:        sp = &rp->rg_segmap[vs];
                   4722:
1.1       deraadt  4723:        pmap_stats.ps_changeprots++;
                   4724:
1.43      pk       4725: #ifdef DEBUG
                   4726:        if (pm != pmap_kernel() && sp->sg_pte == NULL)
                   4727:                panic("pmap_changeprot: no pages");
                   4728: #endif
                   4729:
1.1       deraadt  4730:        /* update PTEs in software or hardware */
1.43      pk       4731:        if (sp->sg_pmeg == seginval) {
1.124     pk       4732:                int *pte = &sp->sg_pte[VA_VPG(va)];
1.1       deraadt  4733:
                   4734:                /* update in software */
                   4735:                if ((*pte & PG_PROT) == newprot)
                   4736:                        goto useless;
                   4737:                *pte = (*pte & ~PG_PROT) | newprot;
                   4738:        } else {
                   4739:                /* update in hardware */
1.71      pk       4740:                ctx = getcontext4();
1.43      pk       4741:                if (CTX_USABLE(pm,rp)) {
1.88      pk       4742:                        /*
                   4743:                         * Use current context.
                   4744:                         * Flush cache if page has been referenced to
                   4745:                         * avoid stale protection bits in the cache tags.
                   4746:                         */
1.71      pk       4747:                        setcontext4(pm->pm_ctxnum);
1.55      pk       4748:                        tpte = getpte4(va);
1.11      pk       4749:                        if ((tpte & PG_PROT) == newprot) {
1.71      pk       4750:                                setcontext4(ctx);
1.1       deraadt  4751:                                goto useless;
1.11      pk       4752:                        }
1.88      pk       4753:                        if ((tpte & (PG_U|PG_NC|PG_TYPE)) == (PG_U|PG_OBMEM))
1.1       deraadt  4754:                                cache_flush_page((int)va);
                   4755:                } else {
1.71      pk       4756:                        setcontext4(0);
1.1       deraadt  4757:                        /* XXX use per-cpu va? */
1.69      pk       4758:                        if (HASSUN4_MMU3L)
1.43      pk       4759:                                setregmap(0, tregion);
                   4760:                        setsegmap(0, sp->sg_pmeg);
1.18      deraadt  4761:                        va = VA_VPG(va) << PGSHIFT;
1.55      pk       4762:                        tpte = getpte4(va);
1.11      pk       4763:                        if ((tpte & PG_PROT) == newprot) {
1.71      pk       4764:                                setcontext4(ctx);
1.1       deraadt  4765:                                goto useless;
1.11      pk       4766:                        }
1.1       deraadt  4767:                }
                   4768:                tpte = (tpte & ~PG_PROT) | newprot;
1.55      pk       4769:                setpte4(va, tpte);
1.71      pk       4770:                setcontext4(ctx);
1.1       deraadt  4771:        }
                   4772:        splx(s);
                   4773:        return;
                   4774:
                   4775: useless:
                   4776:        /* only wiring changed, and we ignore wiring */
                   4777:        pmap_stats.ps_useless_changeprots++;
                   4778:        splx(s);
                   4779: }
                   4780:
1.55      pk       4781: #endif /* sun4, 4c */
                   4782:
                   4783: #if defined(SUN4M)             /* 4M version of protection routines above */
1.1       deraadt  4784: /*
1.55      pk       4785:  * Lower (make more strict) the protection on the specified
                   4786:  * physical page.
1.1       deraadt  4787:  *
1.55      pk       4788:  * There are only two cases: either the protection is going to 0
                   4789:  * (in which case we do the dirty work here), or it is going
                   4790:  * to read-only (in which case pv_changepte does the trick).
1.1       deraadt  4791:  */
                   4792: void
1.151     chs      4793: pmap_page_protect4m(pg, prot)
                   4794:        struct vm_page *pg;
1.1       deraadt  4795:        vm_prot_t prot;
                   4796: {
1.124     pk       4797:        struct pvlist *pv, *pv0, *npv;
                   4798:        struct pmap *pm;
                   4799:        int va, vr, vs, tpte;
                   4800:        int flags, nleft, s, ctx;
1.55      pk       4801:        struct regmap *rp;
                   4802:        struct segmap *sp;
1.151     chs      4803:        paddr_t pa = VM_PAGE_TO_PHYS(pg);
1.45      pk       4804:
                   4805: #ifdef DEBUG
1.55      pk       4806:        if ((pmapdebug & PDB_CHANGEPROT) ||
                   4807:            (pmapdebug & PDB_REMOVE && prot == VM_PROT_NONE))
1.91      fair     4808:                printf("pmap_page_protect(0x%lx, 0x%x)\n", pa, prot);
1.45      pk       4809: #endif
1.55      pk       4810:        /*
                   4811:         * Skip unmanaged pages, or operations that do not take
                   4812:         * away write permission.
                   4813:         */
                   4814:        if (!managed(pa) || prot & VM_PROT_WRITE)
                   4815:                return;
1.162     pk       4816:
1.55      pk       4817:        write_user_windows();   /* paranoia */
1.162     pk       4818:        pv = pvhead(pa);
1.55      pk       4819:        if (prot & VM_PROT_READ) {
1.162     pk       4820:                pv_changepte4m(pv, 0, PPROT_WRITE);
1.45      pk       4821:                return;
                   4822:        }
1.39      pk       4823:
1.1       deraadt  4824:        /*
1.55      pk       4825:         * Remove all access to all people talking to this page.
                   4826:         * Walk down PV list, removing all mappings.
                   4827:         * The logic is much like that for pmap_remove,
                   4828:         * but we know we are removing exactly one page.
1.1       deraadt  4829:         */
1.55      pk       4830:        s = splpmap();
1.162     pk       4831:        if (pv->pv_pmap == NULL) {
1.55      pk       4832:                splx(s);
                   4833:                return;
1.1       deraadt  4834:        }
1.71      pk       4835:        ctx = getcontext4m();
1.55      pk       4836:        pv0 = pv;
1.162     pk       4837:
                   4838:        /* This pv head will become empty, so clear caching state flags */
                   4839:        flags = pv->pv_flags & ~(PV_C4M|PV_ANC);
                   4840:
                   4841:        while (pv != NULL) {
                   4842:                pm = pv->pv_pmap;
1.55      pk       4843:                va = pv->pv_va;
                   4844:                vr = VA_VREG(va);
                   4845:                vs = VA_VSEG(va);
                   4846:                rp = &pm->pm_regmap[vr];
                   4847:                if (rp->rg_nsegmap == 0)
                   4848:                        panic("pmap_remove_all: empty vreg");
                   4849:                sp = &rp->rg_segmap[vs];
                   4850:                if ((nleft = sp->sg_npte) == 0)
                   4851:                        panic("pmap_remove_all: empty vseg");
1.162     pk       4852:                sp->sg_npte = --nleft;
1.1       deraadt  4853:
1.55      pk       4854:                /* Invalidate PTE in MMU pagetables. Flush cache if necessary */
1.72      pk       4855:                if (pm->pm_ctx) {
1.71      pk       4856:                        setcontext4m(pm->pm_ctxnum);
1.69      pk       4857:                        cache_flush_page(va);
1.55      pk       4858:                        tlb_flush_page(va);
1.72      pk       4859:                }
                   4860:
                   4861:                tpte = sp->sg_pte[VA_SUN4M_VPG(va)];
1.162     pk       4862:                setpgt4m(&sp->sg_pte[VA_SUN4M_VPG(va)], SRMMU_TEINVALID);
1.1       deraadt  4863:
1.55      pk       4864:                if ((tpte & SRMMU_TETYPE) != SRMMU_TEPTE)
                   4865:                        panic("pmap_page_protect !PG_V");
1.72      pk       4866:
1.55      pk       4867:                flags |= MR4M(tpte);
1.43      pk       4868:
1.162     pk       4869:                if (nleft == 0 && pm != pmap_kernel()) {
                   4870:                        /*
                   4871:                         * Entire user mode segment is gone
                   4872:                         */
1.83      pk       4873:                        if (pm->pm_ctx)
                   4874:                                tlb_flush_segment(vr, vs);
                   4875:                        setpgt4m(&rp->rg_seg_ptps[vs], SRMMU_TEINVALID);
1.121     pk       4876:                        pool_put(&L23_pool, sp->sg_pte);
1.83      pk       4877:                        sp->sg_pte = NULL;
1.55      pk       4878:
1.83      pk       4879:                        if (--rp->rg_nsegmap == 0) {
1.152     pk       4880:                                int n;
1.88      pk       4881:                                if (pm->pm_ctx)
1.143     pk       4882:                                        tlb_flush_region(vr);
1.152     pk       4883:
                   4884:                                /*
                   4885:                                 * Replicate segment de-allocation in each
                   4886:                                 * CPU's region table.
                   4887:                                 */
                   4888: #ifdef MULTIPROCESSOR
                   4889:                                for (n = 0; n < ncpu; n++)
                   4890: #else
                   4891:                                n = 0;
                   4892: #endif
                   4893:                                {
                   4894:                                        setpgt4m(&pm->pm_reg_ptps[n][vr],
                   4895:                                                 SRMMU_TEINVALID);
                   4896:                                }
1.83      pk       4897:                                free(rp->rg_segmap, M_VMPMAP);
                   4898:                                rp->rg_segmap = NULL;
1.121     pk       4899:                                pool_put(&L23_pool, rp->rg_seg_ptps);
1.55      pk       4900:                        }
                   4901:                }
1.83      pk       4902:
1.55      pk       4903:                npv = pv->pv_next;
                   4904:                if (pv != pv0)
1.122     pk       4905:                        pool_put(&pv_pool, pv);
1.162     pk       4906:                pv = npv;
1.55      pk       4907:        }
1.162     pk       4908:
                   4909:        /* Finally, update pv head */
1.55      pk       4910:        pv0->pv_pmap = NULL;
1.162     pk       4911:        pv0->pv_next = NULL;
1.55      pk       4912:        pv0->pv_flags = flags;
1.71      pk       4913:        setcontext4m(ctx);
1.55      pk       4914:        splx(s);
                   4915: }
                   4916:
                   4917: /*
                   4918:  * Lower (make more strict) the protection on the specified
                   4919:  * range of this pmap.
                   4920:  *
                   4921:  * There are only two cases: either the protection is going to 0
                   4922:  * (in which case we call pmap_remove to do the dirty work), or
                   4923:  * it is going from read/write to read-only.  The latter is
                   4924:  * fairly easy.
                   4925:  */
                   4926: void
                   4927: pmap_protect4m(pm, sva, eva, prot)
1.124     pk       4928:        struct pmap *pm;
                   4929:        vaddr_t sva, eva;
1.55      pk       4930:        vm_prot_t prot;
                   4931: {
1.145     pk       4932:        vaddr_t va, nva;
                   4933:        int s, ctx, vr, vs;
1.55      pk       4934:        struct regmap *rp;
                   4935:        struct segmap *sp;
                   4936:
                   4937:        if (pm == NULL || prot & VM_PROT_WRITE)
                   4938:                return;
                   4939:
                   4940:        if ((prot & VM_PROT_READ) == 0) {
                   4941:                pmap_remove(pm, sva, eva);
                   4942:                return;
                   4943:        }
                   4944:
1.145     pk       4945: #ifdef DEBUG
                   4946:        if (pmapdebug & PDB_CHANGEPROT)
                   4947:                printf("pmap_protect[curpid %d, ctx %d](%lx, %lx, %x)\n",
                   4948:                        curproc==NULL ? -1 : curproc->p_pid,
                   4949:                        pm->pm_ctx ? pm->pm_ctxnum : -1, sva, eva, prot);
                   4950: #endif
                   4951:
1.55      pk       4952:        write_user_windows();
1.71      pk       4953:        ctx = getcontext4m();
1.55      pk       4954:        s = splpmap();
                   4955:        simple_lock(&pm->pm_lock);
                   4956:
                   4957:        for (va = sva; va < eva;) {
                   4958:                vr = VA_VREG(va);
                   4959:                vs = VA_VSEG(va);
                   4960:                rp = &pm->pm_regmap[vr];
                   4961:                nva = VSTOVA(vr,vs + 1);
                   4962:                if (nva == 0)   /* XXX */
                   4963:                        panic("pmap_protect: last segment"); /* cannot happen(why?)*/
                   4964:                if (nva > eva)
                   4965:                        nva = eva;
                   4966:                if (rp->rg_nsegmap == 0) {
                   4967:                        va = nva;
                   4968:                        continue;
                   4969:                }
                   4970: #ifdef DEBUG
                   4971:                if (rp->rg_segmap == NULL)
                   4972:                        panic("pmap_protect: no segments");
                   4973: #endif
                   4974:                sp = &rp->rg_segmap[vs];
                   4975:                if (sp->sg_npte == 0) {
                   4976:                        va = nva;
                   4977:                        continue;
                   4978:                }
                   4979: #ifdef DEBUG
                   4980:                if (sp->sg_pte == NULL)
                   4981:                        panic("pmap_protect: no pages");
                   4982: #endif
1.145     pk       4983:                /*
                   4984:                 * pages loaded: take away write bits from MMU PTEs
                   4985:                 */
1.72      pk       4986:                if (pm->pm_ctx)
                   4987:                        setcontext4m(pm->pm_ctxnum);
                   4988:
                   4989:                pmap_stats.ps_npg_prot_all = (nva - va) >> PGSHIFT;
                   4990:                for (; va < nva; va += NBPG) {
                   4991:                        int tpte;
1.100     pk       4992:
1.72      pk       4993:                        tpte = sp->sg_pte[VA_SUN4M_VPG(va)];
1.55      pk       4994:                        /*
                   4995:                         * Flush cache so that any existing cache
                   4996:                         * tags are updated.  This is really only
                   4997:                         * needed for PTEs that lose PG_W.
                   4998:                         */
1.72      pk       4999:                        if ((tpte & (PPROT_WRITE|SRMMU_PGTYPE)) ==
                   5000:                            (PPROT_WRITE|PG_SUN4M_OBMEM)) {
                   5001:                                pmap_stats.ps_npg_prot_actual++;
                   5002:                                if (pm->pm_ctx) {
1.69      pk       5003:                                        cache_flush_page(va);
1.145     pk       5004:                                        /* Flush TLB entry */
                   5005:                                        tlb_flush_page(va);
1.55      pk       5006:                                }
1.72      pk       5007:                                setpgt4m(&sp->sg_pte[VA_SUN4M_VPG(va)],
                   5008:                                         tpte & ~PPROT_WRITE);
1.55      pk       5009:                        }
                   5010:                }
                   5011:        }
                   5012:        simple_unlock(&pm->pm_lock);
                   5013:        splx(s);
1.71      pk       5014:        setcontext4m(ctx);
1.55      pk       5015: }
                   5016:
                   5017: /*
                   5018:  * Change the protection and/or wired status of the given (MI) virtual page.
                   5019:  * XXX: should have separate function (or flag) telling whether only wiring
                   5020:  * is changing.
                   5021:  */
                   5022: void
                   5023: pmap_changeprot4m(pm, va, prot, wired)
1.124     pk       5024:        struct pmap *pm;
                   5025:        vaddr_t va;
1.55      pk       5026:        vm_prot_t prot;
                   5027:        int wired;
                   5028: {
1.124     pk       5029:        int pte, newprot, ctx, s;
1.100     pk       5030:        struct regmap *rp;
                   5031:        struct segmap *sp;
1.55      pk       5032:
                   5033: #ifdef DEBUG
                   5034:        if (pmapdebug & PDB_CHANGEPROT)
1.91      fair     5035:                printf("pmap_changeprot(%p, 0x%lx, 0x%x, 0x%x)\n",
1.55      pk       5036:                    pm, va, prot, wired);
                   5037: #endif
                   5038:
                   5039:        write_user_windows();   /* paranoia */
                   5040:
1.64      pk       5041:        va &= ~(NBPG-1);
1.55      pk       5042:        if (pm == pmap_kernel())
                   5043:                newprot = prot & VM_PROT_WRITE ? PPROT_N_RWX : PPROT_N_RX;
                   5044:        else
                   5045:                newprot = prot & VM_PROT_WRITE ? PPROT_RWX_RWX : PPROT_RX_RX;
                   5046:
                   5047:        pmap_stats.ps_changeprots++;
                   5048:
                   5049:        s = splpmap();          /* conservative */
1.100     pk       5050:
                   5051:        rp = &pm->pm_regmap[VA_VREG(va)];
                   5052:        sp = &rp->rg_segmap[VA_VSEG(va)];
                   5053:
                   5054:        pte = sp->sg_pte[VA_SUN4M_VPG(va)];
                   5055:        if ((pte & SRMMU_PROT_MASK) == newprot) {
                   5056:                /* only wiring changed, and we ignore wiring */
                   5057:                pmap_stats.ps_useless_changeprots++;
                   5058:                goto out;
                   5059:        }
                   5060:
                   5061:        if (pm->pm_ctx) {
1.88      pk       5062:                /*
                   5063:                 * Use current context.
                   5064:                 * Flush cache if page has been referenced to
                   5065:                 * avoid stale protection bits in the cache tags.
                   5066:                 */
1.145     pk       5067:
                   5068:                ctx = getcontext4m();
                   5069:                setcontext4m(pm->pm_ctxnum);
1.100     pk       5070:                if ((pte & (SRMMU_PG_C|SRMMU_PGTYPE)) ==
1.88      pk       5071:                    (SRMMU_PG_C|PG_SUN4M_OBMEM))
                   5072:                        cache_flush_page(va);
1.145     pk       5073:
                   5074:                tlb_flush_page(va);
                   5075:                setcontext4m(ctx);
1.55      pk       5076:        }
1.100     pk       5077:
                   5078:        setpgt4m(&sp->sg_pte[VA_SUN4M_VPG(va)],
                   5079:                 (pte & ~SRMMU_PROT_MASK) | newprot);
1.145     pk       5080:
1.72      pk       5081: out:
1.55      pk       5082:        splx(s);
                   5083: }
1.145     pk       5084: #endif /* SUN4M */
1.55      pk       5085:
                   5086: /*
                   5087:  * Insert (MI) physical page pa at virtual address va in the given pmap.
                   5088:  * NB: the pa parameter includes type bits PMAP_OBIO, PMAP_NC as necessary.
                   5089:  *
                   5090:  * If pa is not in the `managed' range it will not be `bank mapped'.
                   5091:  * This works during bootstrap only because the first 4MB happens to
                   5092:  * map one-to-one.
                   5093:  *
                   5094:  * There may already be something else there, or we might just be
                   5095:  * changing protections and/or wiring on an existing mapping.
                   5096:  *     XXX     should have different entry points for changing!
                   5097:  */
                   5098:
                   5099: #if defined(SUN4) || defined(SUN4C)
                   5100:
1.153     thorpej  5101: int
                   5102: pmap_enter4_4c(pm, va, pa, prot, flags)
1.124     pk       5103:        struct pmap *pm;
                   5104:        vaddr_t va;
                   5105:        paddr_t pa;
1.55      pk       5106:        vm_prot_t prot;
1.153     thorpej  5107:        int flags;
1.55      pk       5108: {
1.124     pk       5109:        struct pvlist *pv;
                   5110:        int pteproto, ctx;
1.153     thorpej  5111:        boolean_t wired = (flags & PMAP_WIRED) != 0;
1.55      pk       5112:
                   5113:        if (pm == NULL)
1.153     thorpej  5114:                return (KERN_SUCCESS);
1.55      pk       5115:
                   5116:        if (VA_INHOLE(va)) {
                   5117: #ifdef DEBUG
1.91      fair     5118:                printf("pmap_enter: pm %p, va 0x%lx, pa 0x%lx: in MMU hole\n",
1.55      pk       5119:                        pm, va, pa);
                   5120: #endif
1.153     thorpej  5121:                return (KERN_SUCCESS);
1.55      pk       5122:        }
                   5123:
                   5124: #ifdef DEBUG
                   5125:        if (pmapdebug & PDB_ENTER)
1.91      fair     5126:                printf("pmap_enter(%p, 0x%lx, 0x%lx, 0x%x, 0x%x)\n",
1.55      pk       5127:                    pm, va, pa, prot, wired);
                   5128: #endif
                   5129:
1.82      pk       5130:        pteproto = PG_V | PMAP_T2PTE_4(pa);
                   5131:        pa &= ~PMAP_TNC_4;
1.55      pk       5132:        /*
                   5133:         * Set up prototype for new PTE.  Cannot set PG_NC from PV_NC yet
                   5134:         * since the pvlist no-cache bit might change as a result of the
                   5135:         * new mapping.
                   5136:         */
                   5137:        if ((pteproto & PG_TYPE) == PG_OBMEM && managed(pa)) {
                   5138: #ifdef DIAGNOSTIC
                   5139:                if (!pmap_pa_exists(pa))
1.91      fair     5140:                        panic("pmap_enter: no such address: 0x%lx", pa);
1.55      pk       5141: #endif
                   5142:                pv = pvhead(pa);
                   5143:        } else {
                   5144:                pv = NULL;
                   5145:        }
1.60      pk       5146:        pteproto |= atop(pa) & PG_PFNUM;
1.55      pk       5147:        if (prot & VM_PROT_WRITE)
                   5148:                pteproto |= PG_W;
                   5149:
1.71      pk       5150:        ctx = getcontext4();
1.55      pk       5151:        if (pm == pmap_kernel())
                   5152:                pmap_enk4_4c(pm, va, prot, wired, pv, pteproto | PG_S);
                   5153:        else
                   5154:                pmap_enu4_4c(pm, va, prot, wired, pv, pteproto);
1.71      pk       5155:        setcontext4(ctx);
1.153     thorpej  5156:        return (KERN_SUCCESS);
1.55      pk       5157: }
                   5158:
                   5159: /* enter new (or change existing) kernel mapping */
                   5160: void
                   5161: pmap_enk4_4c(pm, va, prot, wired, pv, pteproto)
1.124     pk       5162:        struct pmap *pm;
                   5163:        vaddr_t va;
1.55      pk       5164:        vm_prot_t prot;
                   5165:        int wired;
1.124     pk       5166:        struct pvlist *pv;
                   5167:        int pteproto;
1.55      pk       5168: {
1.124     pk       5169:        int vr, vs, tpte, i, s;
1.55      pk       5170:        struct regmap *rp;
                   5171:        struct segmap *sp;
                   5172:
                   5173:        vr = VA_VREG(va);
                   5174:        vs = VA_VSEG(va);
                   5175:        rp = &pm->pm_regmap[vr];
                   5176:        sp = &rp->rg_segmap[vs];
                   5177:        s = splpmap();          /* XXX way too conservative */
                   5178:
1.69      pk       5179: #if defined(SUN4_MMU3L)
                   5180:        if (HASSUN4_MMU3L && rp->rg_smeg == reginval) {
1.124     pk       5181:                vaddr_t tva;
1.55      pk       5182:                rp->rg_smeg = region_alloc(&region_locked, pm, vr)->me_cookie;
                   5183:                i = ncontext - 1;
                   5184:                do {
1.71      pk       5185:                        setcontext4(i);
1.55      pk       5186:                        setregmap(va, rp->rg_smeg);
                   5187:                } while (--i >= 0);
1.1       deraadt  5188:
1.43      pk       5189:                /* set all PTEs to invalid, then overwrite one PTE below */
                   5190:                tva = VA_ROUNDDOWNTOREG(va);
                   5191:                for (i = 0; i < NSEGRG; i++) {
                   5192:                        setsegmap(tva, rp->rg_segmap[i].sg_pmeg);
                   5193:                        tva += NBPSG;
                   5194:                };
                   5195:        }
                   5196: #endif
1.55      pk       5197:        if (sp->sg_pmeg != seginval && (tpte = getpte4(va)) & PG_V) {
1.124     pk       5198:                int addr;
1.1       deraadt  5199:
1.34      pk       5200:                /* old mapping exists, and is of the same pa type */
                   5201:                if ((tpte & (PG_PFNUM|PG_TYPE)) ==
                   5202:                    (pteproto & (PG_PFNUM|PG_TYPE))) {
1.1       deraadt  5203:                        /* just changing protection and/or wiring */
                   5204:                        splx(s);
1.81      pk       5205:                        pmap_changeprot4_4c(pm, va, prot, wired);
1.1       deraadt  5206:                        return;
                   5207:                }
                   5208:
1.34      pk       5209:                if ((tpte & PG_TYPE) == PG_OBMEM) {
1.43      pk       5210: #ifdef DEBUG
1.91      fair     5211: printf("pmap_enk: changing existing va=>pa entry: va 0x%lx, pteproto 0x%x\n",
1.43      pk       5212:        va, pteproto);
                   5213: #endif
1.34      pk       5214:                        /*
                   5215:                         * Switcheroo: changing pa for this va.
                   5216:                         * If old pa was managed, remove from pvlist.
                   5217:                         * If old page was cached, flush cache.
                   5218:                         */
1.60      pk       5219:                        addr = ptoa(tpte & PG_PFNUM);
1.31      pk       5220:                        if (managed(addr))
1.58      pk       5221:                                pv_unlink4_4c(pvhead(addr), pm, va);
1.34      pk       5222:                        if ((tpte & PG_NC) == 0) {
1.71      pk       5223:                                setcontext4(0); /* ??? */
1.69      pk       5224:                                cache_flush_page((int)va);
1.34      pk       5225:                        }
1.1       deraadt  5226:                }
                   5227:        } else {
                   5228:                /* adding new entry */
1.43      pk       5229:                sp->sg_npte++;
1.1       deraadt  5230:        }
                   5231:
                   5232:        /*
                   5233:         * If the new mapping is for a managed PA, enter into pvlist.
                   5234:         * Note that the mapping for a malloc page will always be
                   5235:         * unique (hence will never cause a second call to malloc).
                   5236:         */
                   5237:        if (pv != NULL)
1.115     pk       5238:                pteproto |= pv_link4_4c(pv, pm, va, pteproto & PG_NC);
1.1       deraadt  5239:
1.43      pk       5240:        if (sp->sg_pmeg == seginval) {
1.124     pk       5241:                int tva;
1.1       deraadt  5242:
                   5243:                /*
                   5244:                 * Allocate an MMU entry now (on locked list),
                   5245:                 * and map it into every context.  Set all its
                   5246:                 * PTEs invalid (we will then overwrite one, but
                   5247:                 * this is more efficient than looping twice).
                   5248:                 */
                   5249: #ifdef DEBUG
                   5250:                if (pm->pm_ctx == NULL || pm->pm_ctxnum != 0)
                   5251:                        panic("pmap_enk: kern seg but no kern ctx");
                   5252: #endif
1.43      pk       5253:                sp->sg_pmeg = me_alloc(&segm_locked, pm, vr, vs)->me_cookie;
                   5254:                rp->rg_nsegmap++;
                   5255:
1.69      pk       5256: #if defined(SUN4_MMU3L)
                   5257:                if (HASSUN4_MMU3L)
1.43      pk       5258:                        setsegmap(va, sp->sg_pmeg);
                   5259:                else
                   5260: #endif
                   5261:                {
                   5262:                        i = ncontext - 1;
                   5263:                        do {
1.71      pk       5264:                                setcontext4(i);
1.43      pk       5265:                                setsegmap(va, sp->sg_pmeg);
                   5266:                        } while (--i >= 0);
                   5267:                }
1.1       deraadt  5268:
                   5269:                /* set all PTEs to invalid, then overwrite one PTE below */
                   5270:                tva = VA_ROUNDDOWNTOSEG(va);
                   5271:                i = NPTESG;
                   5272:                do {
1.55      pk       5273:                        setpte4(tva, 0);
1.1       deraadt  5274:                        tva += NBPG;
                   5275:                } while (--i > 0);
                   5276:        }
                   5277:
                   5278:        /* ptes kept in hardware only */
1.55      pk       5279:        setpte4(va, pteproto);
1.1       deraadt  5280:        splx(s);
                   5281: }
                   5282:
                   5283: /* enter new (or change existing) user mapping */
1.53      christos 5284: void
1.55      pk       5285: pmap_enu4_4c(pm, va, prot, wired, pv, pteproto)
1.124     pk       5286:        struct pmap *pm;
                   5287:        vaddr_t va;
1.1       deraadt  5288:        vm_prot_t prot;
                   5289:        int wired;
1.124     pk       5290:        struct pvlist *pv;
                   5291:        int pteproto;
1.1       deraadt  5292: {
1.124     pk       5293:        int vr, vs, *pte, tpte, pmeg, s, doflush;
1.43      pk       5294:        struct regmap *rp;
                   5295:        struct segmap *sp;
1.1       deraadt  5296:
                   5297:        write_user_windows();           /* XXX conservative */
1.43      pk       5298:        vr = VA_VREG(va);
                   5299:        vs = VA_VSEG(va);
                   5300:        rp = &pm->pm_regmap[vr];
1.1       deraadt  5301:        s = splpmap();                  /* XXX conservative */
                   5302:
                   5303:        /*
                   5304:         * If there is no space in which the PTEs can be written
                   5305:         * while they are not in the hardware, this must be a new
                   5306:         * virtual segment.  Get PTE space and count the segment.
                   5307:         *
                   5308:         * TO SPEED UP CTX ALLOC, PUT SEGMENT BOUNDS STUFF HERE
                   5309:         * AND IN pmap_rmu()
                   5310:         */
1.13      pk       5311:
1.43      pk       5312:        GAP_SHRINK(pm,vr);
1.13      pk       5313:
                   5314: #ifdef DEBUG
                   5315:        if (pm->pm_gap_end < pm->pm_gap_start) {
1.91      fair     5316:                printf("pmap_enu: gap_start 0x%x, gap_end 0x%x",
1.13      pk       5317:                        pm->pm_gap_start, pm->pm_gap_end);
                   5318:                panic("pmap_enu: gap botch");
                   5319:        }
                   5320: #endif
                   5321:
1.43      pk       5322: rretry:
                   5323:        if (rp->rg_segmap == NULL) {
                   5324:                /* definitely a new mapping */
1.124     pk       5325:                int i;
                   5326:                int size = NSEGRG * sizeof (struct segmap);
1.43      pk       5327:
                   5328:                sp = (struct segmap *)malloc((u_long)size, M_VMPMAP, M_WAITOK);
                   5329:                if (rp->rg_segmap != NULL) {
1.66      christos 5330: printf("pmap_enter: segment filled during sleep\n");   /* can this happen? */
1.49      pk       5331:                        free(sp, M_VMPMAP);
1.43      pk       5332:                        goto rretry;
                   5333:                }
1.55      pk       5334:                qzero((caddr_t)sp, size);
1.43      pk       5335:                rp->rg_segmap = sp;
                   5336:                rp->rg_nsegmap = 0;
                   5337:                for (i = NSEGRG; --i >= 0;)
                   5338:                        sp++->sg_pmeg = seginval;
1.145     pk       5339:
1.144     pk       5340: #if defined(SUN4_MMU3L)
                   5341: /*
                   5342:  * XXX - preallocate the region MMU cookies.
                   5343:  * XXX - Doing this keeps the machine running for a while
                   5344:  * XXX - Remove or alter this after dealing with the bugs...
                   5345:  */
                   5346:                if (HASSUN4_MMU3L) {
                   5347:                        vaddr_t tva;
                   5348:                        rp->rg_smeg = region_alloc(&region_lru, pm, vr)->me_cookie;
                   5349:                        setregmap(va, rp->rg_smeg);
                   5350:
                   5351:                        tva = VA_ROUNDDOWNTOREG(va);
                   5352:                        for (i = 0; i < NSEGRG; i++) {
                   5353:                                setsegmap(tva, seginval);
                   5354:                                tva += NBPSG;
                   5355:                        };
                   5356:                }
                   5357: /* XXX  - end of work-around */
                   5358: #endif
1.43      pk       5359:        }
                   5360:
                   5361:        sp = &rp->rg_segmap[vs];
                   5362:
                   5363: sretry:
                   5364:        if ((pte = sp->sg_pte) == NULL) {
1.1       deraadt  5365:                /* definitely a new mapping */
1.124     pk       5366:                int size = NPTESG * sizeof *pte;
1.1       deraadt  5367:
                   5368:                pte = (int *)malloc((u_long)size, M_VMPMAP, M_WAITOK);
1.43      pk       5369:                if (sp->sg_pte != NULL) {
1.66      christos 5370: printf("pmap_enter: pte filled during sleep\n");       /* can this happen? */
1.49      pk       5371:                        free(pte, M_VMPMAP);
1.43      pk       5372:                        goto sretry;
1.1       deraadt  5373:                }
                   5374: #ifdef DEBUG
1.43      pk       5375:                if (sp->sg_pmeg != seginval)
1.1       deraadt  5376:                        panic("pmap_enter: new ptes, but not seginval");
                   5377: #endif
1.55      pk       5378:                qzero((caddr_t)pte, size);
1.43      pk       5379:                sp->sg_pte = pte;
                   5380:                sp->sg_npte = 1;
                   5381:                rp->rg_nsegmap++;
1.1       deraadt  5382:        } else {
                   5383:                /* might be a change: fetch old pte */
                   5384:                doflush = 0;
1.55      pk       5385:                if ((pmeg = sp->sg_pmeg) == seginval) {
                   5386:                        /* software pte */
                   5387:                        tpte = pte[VA_VPG(va)];
                   5388:                } else {
                   5389:                        /* hardware pte */
                   5390:                        if (CTX_USABLE(pm,rp)) {
1.71      pk       5391:                                setcontext4(pm->pm_ctxnum);
1.55      pk       5392:                                tpte = getpte4(va);
1.69      pk       5393:                                doflush = CACHEINFO.c_vactype != VAC_NONE;
1.55      pk       5394:                        } else {
1.71      pk       5395:                                setcontext4(0);
1.55      pk       5396:                                /* XXX use per-cpu pteva? */
1.69      pk       5397:                                if (HASSUN4_MMU3L)
1.55      pk       5398:                                        setregmap(0, tregion);
                   5399:                                setsegmap(0, pmeg);
                   5400:                                tpte = getpte4(VA_VPG(va) << PGSHIFT);
                   5401:                        }
                   5402:                }
                   5403:                if (tpte & PG_V) {
1.124     pk       5404:                        int addr;
1.55      pk       5405:
                   5406:                        /* old mapping exists, and is of the same pa type */
                   5407:                        if ((tpte & (PG_PFNUM|PG_TYPE)) ==
                   5408:                            (pteproto & (PG_PFNUM|PG_TYPE))) {
                   5409:                                /* just changing prot and/or wiring */
                   5410:                                splx(s);
                   5411:                                /* caller should call this directly: */
1.60      pk       5412:                                pmap_changeprot4_4c(pm, va, prot, wired);
1.55      pk       5413:                                if (wired)
                   5414:                                        pm->pm_stats.wired_count++;
                   5415:                                else
                   5416:                                        pm->pm_stats.wired_count--;
                   5417:                                return;
                   5418:                        }
                   5419:                        /*
                   5420:                         * Switcheroo: changing pa for this va.
                   5421:                         * If old pa was managed, remove from pvlist.
                   5422:                         * If old page was cached, flush cache.
                   5423:                         */
1.65      christos 5424: #if 0
1.91      fair     5425: printf("%s[%d]: pmap_enu: changing existing va(0x%x)=>pa entry\n",
1.65      christos 5426:        curproc->p_comm, curproc->p_pid, va);
                   5427: #endif
1.55      pk       5428:                        if ((tpte & PG_TYPE) == PG_OBMEM) {
1.60      pk       5429:                                addr = ptoa(tpte & PG_PFNUM);
1.55      pk       5430:                                if (managed(addr))
1.58      pk       5431:                                        pv_unlink4_4c(pvhead(addr), pm, va);
1.69      pk       5432:                                if (doflush && (tpte & PG_NC) == 0)
1.55      pk       5433:                                        cache_flush_page((int)va);
                   5434:                        }
                   5435:                } else {
                   5436:                        /* adding new entry */
                   5437:                        sp->sg_npte++;
                   5438:
                   5439:                        /*
                   5440:                         * Increment counters
                   5441:                         */
                   5442:                        if (wired)
                   5443:                                pm->pm_stats.wired_count++;
                   5444:                }
                   5445:        }
                   5446:
                   5447:        if (pv != NULL)
1.115     pk       5448:                pteproto |= pv_link4_4c(pv, pm, va, pteproto & PG_NC);
1.55      pk       5449:
                   5450:        /*
                   5451:         * Update hardware & software PTEs.
                   5452:         */
                   5453:        if ((pmeg = sp->sg_pmeg) != seginval) {
1.81      pk       5454:                /* ptes are in hardware */
1.55      pk       5455:                if (CTX_USABLE(pm,rp))
1.71      pk       5456:                        setcontext4(pm->pm_ctxnum);
1.55      pk       5457:                else {
1.71      pk       5458:                        setcontext4(0);
1.55      pk       5459:                        /* XXX use per-cpu pteva? */
1.69      pk       5460:                        if (HASSUN4_MMU3L)
1.55      pk       5461:                                setregmap(0, tregion);
                   5462:                        setsegmap(0, pmeg);
                   5463:                        va = VA_VPG(va) << PGSHIFT;
                   5464:                }
                   5465:                setpte4(va, pteproto);
                   5466:        }
                   5467:        /* update software copy */
                   5468:        pte += VA_VPG(va);
                   5469:        *pte = pteproto;
                   5470:
                   5471:        splx(s);
                   5472: }
                   5473:
1.151     chs      5474: void
                   5475: pmap_kenter_pa4_4c(va, pa, prot)
                   5476:        vaddr_t va;
                   5477:        paddr_t pa;
                   5478:        vm_prot_t prot;
                   5479: {
1.153     thorpej  5480:        pmap_enter4_4c(pmap_kernel(), va, pa, prot, PMAP_WIRED);
1.151     chs      5481: }
                   5482:
                   5483: void
                   5484: pmap_kenter_pgs4_4c(va, pgs, npgs)
                   5485:        vaddr_t va;
                   5486:        struct vm_page **pgs;
                   5487:        int npgs;
                   5488: {
                   5489:        int i;
                   5490:
                   5491:        for (i = 0; i < npgs; i++, va += PAGE_SIZE) {
                   5492:                pmap_enter4_4c(pmap_kernel(), va, VM_PAGE_TO_PHYS(pgs[i]),
1.153     thorpej  5493:                                VM_PROT_READ|VM_PROT_WRITE, PMAP_WIRED);
1.151     chs      5494:        }
                   5495: }
                   5496:
                   5497: void
                   5498: pmap_kremove4_4c(va, len)
                   5499:        vaddr_t va;
                   5500:        vsize_t len;
                   5501: {
                   5502:        for (len >>= PAGE_SHIFT; len > 0; len--, va += PAGE_SIZE) {
                   5503:                pmap_remove(pmap_kernel(), va, va + PAGE_SIZE);
                   5504:        }
                   5505: }
                   5506:
1.55      pk       5507: #endif /*sun4,4c*/
                   5508:
                   5509: #if defined(SUN4M)             /* Sun4M versions of enter routines */
                   5510: /*
                   5511:  * Insert (MI) physical page pa at virtual address va in the given pmap.
                   5512:  * NB: the pa parameter includes type bits PMAP_OBIO, PMAP_NC as necessary.
                   5513:  *
                   5514:  * If pa is not in the `managed' range it will not be `bank mapped'.
                   5515:  * This works during bootstrap only because the first 4MB happens to
                   5516:  * map one-to-one.
                   5517:  *
                   5518:  * There may already be something else there, or we might just be
                   5519:  * changing protections and/or wiring on an existing mapping.
                   5520:  *     XXX     should have different entry points for changing!
                   5521:  */
                   5522:
1.153     thorpej  5523: int
                   5524: pmap_enter4m(pm, va, pa, prot, flags)
1.124     pk       5525:        struct pmap *pm;
                   5526:        vaddr_t va;
                   5527:        paddr_t pa;
1.55      pk       5528:        vm_prot_t prot;
1.153     thorpej  5529:        int flags;
1.55      pk       5530: {
1.124     pk       5531:        struct pvlist *pv;
                   5532:        int pteproto, ctx;
1.153     thorpej  5533:        boolean_t wired = (flags & PMAP_WIRED) != 0;
1.55      pk       5534:
                   5535:        if (pm == NULL)
1.153     thorpej  5536:                return (KERN_SUCCESS);
1.55      pk       5537:
                   5538: #ifdef DEBUG
                   5539:        if (pmapdebug & PDB_ENTER)
1.145     pk       5540:                printf("pmap_enter[curpid %d, ctx %d]"
                   5541:                        "(%p, 0x%lx, 0x%lx, 0x%x, 0x%x)\n",
                   5542:                        curproc==NULL ? -1 : curproc->p_pid,
                   5543:                        pm->pm_ctx==NULL ? -1 : pm->pm_ctxnum,
                   5544:                        pm, va, pa, prot, wired);
1.55      pk       5545: #endif
1.60      pk       5546:
                   5547:        /* Initialise pteproto with cache bit */
                   5548:        pteproto = (pa & PMAP_NC) == 0 ? SRMMU_PG_C : 0;
1.55      pk       5549:
1.82      pk       5550: #ifdef DEBUG
                   5551:        if (pa & PMAP_TYPE_SRMMU) {     /* this page goes in an iospace */
1.69      pk       5552:                if (cpuinfo.cpu_type == CPUTYP_MS1)
1.58      pk       5553:                        panic("pmap_enter4m: attempt to use 36-bit iospace on"
                   5554:                              " MicroSPARC");
1.55      pk       5555:        }
1.82      pk       5556: #endif
                   5557:        pteproto |= PMAP_T2PTE_SRMMU(pa);
1.55      pk       5558:
                   5559:        /* Make sure we get a pte with appropriate perms! */
                   5560:        pteproto |= SRMMU_TEPTE | PPROT_RX_RX;
                   5561:
1.82      pk       5562:        pa &= ~PMAP_TNC_SRMMU;
1.55      pk       5563:        /*
                   5564:         * Set up prototype for new PTE.  Cannot set PG_NC from PV_NC yet
                   5565:         * since the pvlist no-cache bit might change as a result of the
                   5566:         * new mapping.
                   5567:         */
                   5568:        if ((pteproto & SRMMU_PGTYPE) == PG_SUN4M_OBMEM && managed(pa)) {
                   5569: #ifdef DIAGNOSTIC
                   5570:                if (!pmap_pa_exists(pa))
1.91      fair     5571:                        panic("pmap_enter: no such address: 0x%lx", pa);
1.55      pk       5572: #endif
                   5573:                pv = pvhead(pa);
                   5574:        } else {
                   5575:                pv = NULL;
                   5576:        }
1.60      pk       5577:        pteproto |= (atop(pa) << SRMMU_PPNSHIFT);
1.55      pk       5578:
                   5579:        if (prot & VM_PROT_WRITE)
                   5580:                pteproto |= PPROT_WRITE;
                   5581:
1.71      pk       5582:        ctx = getcontext4m();
1.55      pk       5583:
                   5584:        if (pm == pmap_kernel())
1.58      pk       5585:                pmap_enk4m(pm, va, prot, wired, pv, pteproto | PPROT_S);
1.55      pk       5586:        else
1.58      pk       5587:                pmap_enu4m(pm, va, prot, wired, pv, pteproto);
1.55      pk       5588:
1.71      pk       5589:        setcontext4m(ctx);
1.153     thorpej  5590:        return (KERN_SUCCESS);
1.55      pk       5591: }
                   5592:
                   5593: /* enter new (or change existing) kernel mapping */
                   5594: void
                   5595: pmap_enk4m(pm, va, prot, wired, pv, pteproto)
1.124     pk       5596:        struct pmap *pm;
                   5597:        vaddr_t va;
1.55      pk       5598:        vm_prot_t prot;
                   5599:        int wired;
1.124     pk       5600:        struct pvlist *pv;
                   5601:        int pteproto;
1.55      pk       5602: {
1.124     pk       5603:        int vr, vs, tpte, s;
1.55      pk       5604:        struct regmap *rp;
                   5605:        struct segmap *sp;
                   5606:
                   5607: #ifdef DEBUG
                   5608:        if (va < KERNBASE)
1.72      pk       5609:                panic("pmap_enk4m: can't enter va 0x%lx below KERNBASE", va);
1.55      pk       5610: #endif
                   5611:        vr = VA_VREG(va);
                   5612:        vs = VA_VSEG(va);
                   5613:        rp = &pm->pm_regmap[vr];
                   5614:        sp = &rp->rg_segmap[vs];
                   5615:
                   5616:        s = splpmap();          /* XXX way too conservative */
                   5617:
                   5618:        if (rp->rg_seg_ptps == NULL) /* enter new region */
1.91      fair     5619:                panic("pmap_enk4m: missing kernel region table for va 0x%lx",va);
1.55      pk       5620:
1.72      pk       5621:        tpte = sp->sg_pte[VA_SUN4M_VPG(va)];
                   5622:        if ((tpte & SRMMU_TETYPE) == SRMMU_TEPTE) {
1.124     pk       5623:                int addr;
1.55      pk       5624:
                   5625:                /* old mapping exists, and is of the same pa type */
                   5626:
                   5627:                if ((tpte & SRMMU_PPNMASK) == (pteproto & SRMMU_PPNMASK)) {
                   5628:                        /* just changing protection and/or wiring */
                   5629:                        splx(s);
1.81      pk       5630:                        pmap_changeprot4m(pm, va, prot, wired);
1.55      pk       5631:                        return;
                   5632:                }
                   5633:
                   5634:                if ((tpte & SRMMU_PGTYPE) == PG_SUN4M_OBMEM) {
                   5635: #ifdef DEBUG
1.91      fair     5636: printf("pmap_enk4m: changing existing va=>pa entry: va 0x%lx, pteproto 0x%x, "
                   5637:        "oldpte 0x%x\n", va, pteproto, tpte);
1.55      pk       5638: #endif
                   5639:                        /*
                   5640:                         * Switcheroo: changing pa for this va.
                   5641:                         * If old pa was managed, remove from pvlist.
                   5642:                         * If old page was cached, flush cache.
                   5643:                         */
1.60      pk       5644:                        addr = ptoa((tpte & SRMMU_PPNMASK) >> SRMMU_PPNSHIFT);
1.55      pk       5645:                        if (managed(addr))
1.58      pk       5646:                                pv_unlink4m(pvhead(addr), pm, va);
1.55      pk       5647:                        if (tpte & SRMMU_PG_C) {
1.71      pk       5648:                                setcontext4m(0);        /* ??? */
1.69      pk       5649:                                cache_flush_page((int)va);
1.55      pk       5650:                        }
                   5651:                }
                   5652:        } else {
                   5653:                /* adding new entry */
                   5654:                sp->sg_npte++;
                   5655:        }
                   5656:
                   5657:        /*
                   5658:         * If the new mapping is for a managed PA, enter into pvlist.
                   5659:         * Note that the mapping for a malloc page will always be
                   5660:         * unique (hence will never cause a second call to malloc).
                   5661:         */
                   5662:        if (pv != NULL)
1.115     pk       5663:                pteproto &= ~(pv_link4m(pv, pm, va, (pteproto & SRMMU_PG_C) == 0));
1.55      pk       5664:
1.72      pk       5665: #ifdef DEBUG
1.55      pk       5666:        if (sp->sg_pte == NULL) /* If no existing pagetable */
1.60      pk       5667:                panic("pmap_enk4m: missing segment table for va 0x%lx",va);
1.72      pk       5668: #endif
1.55      pk       5669:
1.72      pk       5670:        tlb_flush_page(va);
                   5671:        setpgt4m(&sp->sg_pte[VA_SUN4M_VPG(va)], pteproto);
1.55      pk       5672:
                   5673:        splx(s);
                   5674: }
                   5675:
                   5676: /* enter new (or change existing) user mapping */
                   5677: void
                   5678: pmap_enu4m(pm, va, prot, wired, pv, pteproto)
1.124     pk       5679:        struct pmap *pm;
                   5680:        vaddr_t va;
1.55      pk       5681:        vm_prot_t prot;
                   5682:        int wired;
1.124     pk       5683:        struct pvlist *pv;
                   5684:        int pteproto;
1.55      pk       5685: {
1.124     pk       5686:        int vr, vs, *pte, tpte, s;
1.55      pk       5687:        struct regmap *rp;
                   5688:        struct segmap *sp;
                   5689:
1.72      pk       5690: #ifdef DEBUG
                   5691:        if (KERNBASE < va)
                   5692:                panic("pmap_enu4m: can't enter va 0x%lx above KERNBASE", va);
                   5693: #endif
                   5694:
1.55      pk       5695:        write_user_windows();           /* XXX conservative */
                   5696:        vr = VA_VREG(va);
                   5697:        vs = VA_VSEG(va);
                   5698:        rp = &pm->pm_regmap[vr];
                   5699:        s = splpmap();                  /* XXX conservative */
                   5700:
                   5701: rretry:
                   5702:        if (rp->rg_segmap == NULL) {
                   5703:                /* definitely a new mapping */
1.124     pk       5704:                int size = NSEGRG * sizeof (struct segmap);
1.55      pk       5705:
                   5706:                sp = (struct segmap *)malloc((u_long)size, M_VMPMAP, M_WAITOK);
                   5707:                if (rp->rg_segmap != NULL) {
                   5708: #ifdef DEBUG
1.66      christos 5709: printf("pmap_enu4m: segment filled during sleep\n");   /* can this happen? */
1.55      pk       5710: #endif
                   5711:                        free(sp, M_VMPMAP);
                   5712:                        goto rretry;
                   5713:                }
                   5714:                qzero((caddr_t)sp, size);
                   5715:                rp->rg_segmap = sp;
                   5716:                rp->rg_nsegmap = 0;
                   5717:                rp->rg_seg_ptps = NULL;
                   5718:        }
                   5719: rgretry:
                   5720:        if (rp->rg_seg_ptps == NULL) {
                   5721:                /* Need a segment table */
1.100     pk       5722:                int i, *ptd;
1.73      pk       5723:
1.121     pk       5724:                ptd = pool_get(&L23_pool, PR_WAITOK);
1.55      pk       5725:                if (rp->rg_seg_ptps != NULL) {
                   5726: #ifdef DEBUG
1.66      christos 5727: printf("pmap_enu4m: bizarre segment table fill during sleep\n");
1.55      pk       5728: #endif
1.121     pk       5729:                        pool_put(&L23_pool, ptd);
1.55      pk       5730:                        goto rgretry;
                   5731:                }
                   5732:
1.73      pk       5733:                rp->rg_seg_ptps = ptd;
                   5734:                for (i = 0; i < SRMMU_L2SIZE; i++)
1.74      pk       5735:                        setpgt4m(&ptd[i], SRMMU_TEINVALID);
1.152     pk       5736:
                   5737:                /* Replicate segment allocation in each CPU's region table */
                   5738: #ifdef MULTIPROCESSOR
                   5739:                for (i = 0; i < ncpu; i++)
                   5740: #else
                   5741:                i = 0;
                   5742: #endif
                   5743:                {
                   5744:                        setpgt4m(&pm->pm_reg_ptps[i][vr],
                   5745:                                 (VA2PA((caddr_t)ptd) >> SRMMU_PPNPASHIFT) |
                   5746:                                        SRMMU_TEPTD);
                   5747:                }
1.55      pk       5748:        }
                   5749:
                   5750:        sp = &rp->rg_segmap[vs];
                   5751:
                   5752: sretry:
                   5753:        if ((pte = sp->sg_pte) == NULL) {
                   5754:                /* definitely a new mapping */
1.100     pk       5755:                int i;
1.55      pk       5756:
1.121     pk       5757:                pte = pool_get(&L23_pool, PR_WAITOK);
1.55      pk       5758:                if (sp->sg_pte != NULL) {
1.66      christos 5759: printf("pmap_enter: pte filled during sleep\n");       /* can this happen? */
1.121     pk       5760:                        pool_put(&L23_pool, pte);
1.55      pk       5761:                        goto sretry;
                   5762:                }
                   5763:
                   5764:                sp->sg_pte = pte;
                   5765:                sp->sg_npte = 1;
                   5766:                rp->rg_nsegmap++;
1.74      pk       5767:                for (i = 0; i < SRMMU_L3SIZE; i++)
                   5768:                        setpgt4m(&pte[i], SRMMU_TEINVALID);
1.72      pk       5769:                setpgt4m(&rp->rg_seg_ptps[vs],
                   5770:                        (VA2PA((caddr_t)pte) >> SRMMU_PPNPASHIFT) | SRMMU_TEPTD);
1.55      pk       5771:        } else {
1.72      pk       5772:                /*
                   5773:                 * Might be a change: fetch old pte
                   5774:                 */
1.143     pk       5775:                if (pm->pm_ctx) {
                   5776:                        setcontext4m(pm->pm_ctxnum);
                   5777:                        tlb_flush_page(va);
                   5778:                }
1.72      pk       5779:                tpte = pte[VA_SUN4M_VPG(va)];
1.55      pk       5780:
                   5781:                if ((tpte & SRMMU_TETYPE) == SRMMU_TEPTE) {
1.124     pk       5782:                        int addr;
1.1       deraadt  5783:
1.34      pk       5784:                        /* old mapping exists, and is of the same pa type */
1.55      pk       5785:                        if ((tpte & SRMMU_PPNMASK) ==
                   5786:                            (pteproto & SRMMU_PPNMASK)) {
1.1       deraadt  5787:                                /* just changing prot and/or wiring */
                   5788:                                splx(s);
                   5789:                                /* caller should call this directly: */
1.60      pk       5790:                                pmap_changeprot4m(pm, va, prot, wired);
1.15      deraadt  5791:                                if (wired)
                   5792:                                        pm->pm_stats.wired_count++;
                   5793:                                else
                   5794:                                        pm->pm_stats.wired_count--;
1.1       deraadt  5795:                                return;
                   5796:                        }
                   5797:                        /*
                   5798:                         * Switcheroo: changing pa for this va.
                   5799:                         * If old pa was managed, remove from pvlist.
                   5800:                         * If old page was cached, flush cache.
                   5801:                         */
1.60      pk       5802: #ifdef DEBUG
1.72      pk       5803: if (pmapdebug & PDB_SWITCHMAP)
1.143     pk       5804: printf("%s[%d]: pmap_enu: changing existing va 0x%x: pte 0x%x=>0x%x\n",
                   5805:        curproc->p_comm, curproc->p_pid, (int)va, tpte, pteproto);
1.60      pk       5806: #endif
1.55      pk       5807:                        if ((tpte & SRMMU_PGTYPE) == PG_SUN4M_OBMEM) {
1.60      pk       5808:                                addr = ptoa( (tpte & SRMMU_PPNMASK) >>
                   5809:                                             SRMMU_PPNSHIFT);
1.100     pk       5810:                                if (managed(addr)) {
                   5811:                                        pvhead(addr)->pv_flags |= MR4M(tpte);
1.58      pk       5812:                                        pv_unlink4m(pvhead(addr), pm, va);
1.100     pk       5813:                                }
1.72      pk       5814:                                if (pm->pm_ctx && (tpte & SRMMU_PG_C))
1.34      pk       5815:                                        cache_flush_page((int)va);
1.31      pk       5816:                        }
1.1       deraadt  5817:                } else {
                   5818:                        /* adding new entry */
1.43      pk       5819:                        sp->sg_npte++;
1.15      deraadt  5820:
                   5821:                        /*
                   5822:                         * Increment counters
                   5823:                         */
                   5824:                        if (wired)
                   5825:                                pm->pm_stats.wired_count++;
1.1       deraadt  5826:                }
                   5827:        }
                   5828:        if (pv != NULL)
1.115     pk       5829:                pteproto &= ~(pv_link4m(pv, pm, va, (pteproto & SRMMU_PG_C) == 0));
1.1       deraadt  5830:
                   5831:        /*
1.72      pk       5832:         * Update PTEs, flush TLB as necessary.
1.1       deraadt  5833:         */
1.72      pk       5834:        if (pm->pm_ctx) {
1.71      pk       5835:                setcontext4m(pm->pm_ctxnum);
1.72      pk       5836:                tlb_flush_page(va);
                   5837:        }
                   5838:        setpgt4m(&sp->sg_pte[VA_SUN4M_VPG(va)], pteproto);
1.1       deraadt  5839:
                   5840:        splx(s);
                   5841: }
1.151     chs      5842:
                   5843: void
                   5844: pmap_kenter_pa4m(va, pa, prot)
                   5845:        vaddr_t va;
                   5846:        paddr_t pa;
                   5847:        vm_prot_t prot;
                   5848: {
1.153     thorpej  5849:        pmap_enter4m(pmap_kernel(), va, pa, prot, PMAP_WIRED);
1.151     chs      5850: }
                   5851:
                   5852: void
                   5853: pmap_kenter_pgs4m(va, pgs, npgs)
                   5854:        vaddr_t va;
                   5855:        struct vm_page **pgs;
                   5856:        int npgs;
                   5857: {
                   5858:        int i;
                   5859:
                   5860:        for (i = 0; i < npgs; i++, va += PAGE_SIZE) {
                   5861:                pmap_enter4m(pmap_kernel(), va, VM_PAGE_TO_PHYS(pgs[i]),
1.153     thorpej  5862:                             VM_PROT_READ|VM_PROT_WRITE, PMAP_WIRED);
1.151     chs      5863:        }
                   5864: }
                   5865:
                   5866: void
                   5867: pmap_kremove4m(va, len)
                   5868:        vaddr_t va;
                   5869:        vsize_t len;
                   5870: {
                   5871:        for (len >>= PAGE_SHIFT; len > 0; len--, va += PAGE_SIZE) {
                   5872:                pmap_remove(pmap_kernel(), va, va + PAGE_SIZE);
                   5873:        }
                   5874: }
                   5875:
1.145     pk       5876: #endif /* SUN4M */
1.1       deraadt  5877:
                   5878: /*
1.147     thorpej  5879:  * Clear the wiring attribute for a map/virtual-address pair.
1.1       deraadt  5880:  */
                   5881: /* ARGSUSED */
                   5882: void
1.147     thorpej  5883: pmap_unwire(pm, va)
1.1       deraadt  5884:        struct pmap *pm;
1.124     pk       5885:        vaddr_t va;
1.1       deraadt  5886: {
                   5887:
                   5888:        pmap_stats.ps_useless_changewire++;
                   5889: }
                   5890:
                   5891: /*
                   5892:  * Extract the physical page address associated
                   5893:  * with the given map/virtual_address pair.
                   5894:  * GRR, the vm code knows; we should not have to do this!
                   5895:  */
1.55      pk       5896:
                   5897: #if defined(SUN4) || defined(SUN4C)
1.149     thorpej  5898: boolean_t
                   5899: pmap_extract4_4c(pm, va, pap)
1.124     pk       5900:        struct pmap *pm;
                   5901:        vaddr_t va;
1.149     thorpej  5902:        paddr_t *pap;
1.1       deraadt  5903: {
1.124     pk       5904:        int tpte;
                   5905:        int vr, vs;
1.43      pk       5906:        struct regmap *rp;
                   5907:        struct segmap *sp;
1.1       deraadt  5908:
                   5909:        if (pm == NULL) {
1.90      pk       5910: #ifdef DEBUG
                   5911:                if (pmapdebug & PDB_FOLLOW)
                   5912:                        printf("pmap_extract: null pmap\n");
                   5913: #endif
1.149     thorpej  5914:                return (FALSE);
1.1       deraadt  5915:        }
1.43      pk       5916:        vr = VA_VREG(va);
                   5917:        vs = VA_VSEG(va);
                   5918:        rp = &pm->pm_regmap[vr];
                   5919:        if (rp->rg_segmap == NULL) {
1.90      pk       5920: #ifdef DEBUG
                   5921:                if (pmapdebug & PDB_FOLLOW)
                   5922:                        printf("pmap_extract: invalid segment (%d)\n", vr);
                   5923: #endif
1.149     thorpej  5924:                return (FALSE);
1.43      pk       5925:        }
                   5926:        sp = &rp->rg_segmap[vs];
                   5927:
                   5928:        if (sp->sg_pmeg != seginval) {
1.124     pk       5929:                int ctx = getcontext4();
1.1       deraadt  5930:
1.43      pk       5931:                if (CTX_USABLE(pm,rp)) {
1.61      pk       5932:                        CHANGE_CONTEXTS(ctx, pm->pm_ctxnum);
1.55      pk       5933:                        tpte = getpte4(va);
1.1       deraadt  5934:                } else {
1.61      pk       5935:                        CHANGE_CONTEXTS(ctx, 0);
1.69      pk       5936:                        if (HASSUN4_MMU3L)
1.43      pk       5937:                                setregmap(0, tregion);
                   5938:                        setsegmap(0, sp->sg_pmeg);
1.55      pk       5939:                        tpte = getpte4(VA_VPG(va) << PGSHIFT);
1.1       deraadt  5940:                }
1.71      pk       5941:                setcontext4(ctx);
1.1       deraadt  5942:        } else {
1.124     pk       5943:                int *pte = sp->sg_pte;
1.1       deraadt  5944:
                   5945:                if (pte == NULL) {
1.90      pk       5946: #ifdef DEBUG
                   5947:                        if (pmapdebug & PDB_FOLLOW)
                   5948:                                printf("pmap_extract: invalid segment\n");
                   5949: #endif
1.149     thorpej  5950:                        return (FALSE);
1.1       deraadt  5951:                }
                   5952:                tpte = pte[VA_VPG(va)];
                   5953:        }
                   5954:        if ((tpte & PG_V) == 0) {
1.90      pk       5955: #ifdef DEBUG
                   5956:                if (pmapdebug & PDB_FOLLOW)
                   5957:                        printf("pmap_extract: invalid pte\n");
                   5958: #endif
1.149     thorpej  5959:                return (FALSE);
1.1       deraadt  5960:        }
                   5961:        tpte &= PG_PFNUM;
1.60      pk       5962:        tpte = tpte;
1.149     thorpej  5963:        if (pap != NULL)
                   5964:                *pap = (tpte << PGSHIFT) | (va & PGOFSET);
                   5965:        return (TRUE);
1.1       deraadt  5966: }
1.55      pk       5967: #endif /*4,4c*/
                   5968:
                   5969: #if defined(SUN4M)             /* 4m version of pmap_extract */
                   5970: /*
                   5971:  * Extract the physical page address associated
                   5972:  * with the given map/virtual_address pair.
                   5973:  * GRR, the vm code knows; we should not have to do this!
                   5974:  */
1.149     thorpej  5975: boolean_t
                   5976: pmap_extract4m(pm, va, pap)
1.124     pk       5977:        struct pmap *pm;
                   5978:        vaddr_t va;
1.149     thorpej  5979:        paddr_t *pap;
1.55      pk       5980: {
1.90      pk       5981:        struct regmap *rm;
                   5982:        struct segmap *sm;
                   5983:        int pte;
1.55      pk       5984:
                   5985:        if (pm == NULL) {
1.90      pk       5986: #ifdef DEBUG
                   5987:                if (pmapdebug & PDB_FOLLOW)
                   5988:                        printf("pmap_extract: null pmap\n");
                   5989: #endif
1.149     thorpej  5990:                return (FALSE);
1.55      pk       5991:        }
                   5992:
1.113     pk       5993:        if ((rm = pm->pm_regmap) == NULL) {
1.90      pk       5994: #ifdef DEBUG
                   5995:                if (pmapdebug & PDB_FOLLOW)
1.145     pk       5996:                        printf("pmap_extract: no regmap entry\n");
1.90      pk       5997: #endif
1.149     thorpej  5998:                return (FALSE);
1.90      pk       5999:        }
1.113     pk       6000:
                   6001:        rm += VA_VREG(va);
                   6002:        if ((sm = rm->rg_segmap) == NULL) {
1.90      pk       6003: #ifdef DEBUG
                   6004:                if (pmapdebug & PDB_FOLLOW)
1.145     pk       6005:                        printf("pmap_extract: no segmap\n");
1.90      pk       6006: #endif
1.149     thorpej  6007:                return (FALSE);
1.90      pk       6008:        }
1.113     pk       6009:
                   6010:        sm += VA_VSEG(va);
                   6011:        if (sm->sg_pte == NULL) {
                   6012: #ifdef DEBUG
                   6013:                if (pmapdebug & PDB_FOLLOW)
1.145     pk       6014:                        printf("pmap_extract: no ptes\n");
1.113     pk       6015: #endif
1.149     thorpej  6016:                return (FALSE);
1.113     pk       6017:        }
                   6018:
1.90      pk       6019:        pte = sm->sg_pte[VA_SUN4M_VPG(va)];
                   6020:        if ((pte & SRMMU_TETYPE) != SRMMU_TEPTE) {
1.72      pk       6021: #ifdef DEBUG
1.90      pk       6022:                if (pmapdebug & PDB_FOLLOW)
                   6023:                        printf("pmap_extract: invalid pte of type %d\n",
                   6024:                               pte & SRMMU_TETYPE);
                   6025: #endif
1.149     thorpej  6026:                return (FALSE);
1.72      pk       6027:        }
1.55      pk       6028:
1.149     thorpej  6029:        if (pap != NULL)
                   6030:                *pap = ptoa((pte & SRMMU_PPNMASK) >> SRMMU_PPNSHIFT) |
                   6031:                    VA_OFF(va);
                   6032:        return (TRUE);
1.55      pk       6033: }
                   6034: #endif /* sun4m */
1.1       deraadt  6035:
                   6036: /*
                   6037:  * Copy the range specified by src_addr/len
                   6038:  * from the source map to the range dst_addr/len
                   6039:  * in the destination map.
                   6040:  *
                   6041:  * This routine is only advisory and need not do anything.
                   6042:  */
                   6043: /* ARGSUSED */
1.94      pk       6044: int pmap_copy_disabled=0;
1.1       deraadt  6045: void
                   6046: pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr)
                   6047:        struct pmap *dst_pmap, *src_pmap;
1.124     pk       6048:        vaddr_t dst_addr;
                   6049:        vsize_t len;
                   6050:        vaddr_t src_addr;
1.1       deraadt  6051: {
1.94      pk       6052: #if notyet
1.92      pk       6053:        struct regmap *rm;
                   6054:        struct segmap *sm;
                   6055:
1.94      pk       6056:        if (pmap_copy_disabled)
                   6057:                return;
1.92      pk       6058: #ifdef DIAGNOSTIC
                   6059:        if (VA_OFF(src_addr) != 0)
                   6060:                printf("pmap_copy: addr not page aligned: 0x%lx\n", src_addr);
                   6061:        if ((len & (NBPG-1)) != 0)
                   6062:                printf("pmap_copy: length not page aligned: 0x%lx\n", len);
                   6063: #endif
                   6064:
                   6065:        if (src_pmap == NULL)
                   6066:                return;
                   6067:
1.55      pk       6068:        if (CPU_ISSUN4M) {
1.92      pk       6069:                int i, npg, pte;
1.124     pk       6070:                paddr_t pa;
1.92      pk       6071:
                   6072:                npg = len >> PGSHIFT;
                   6073:                for (i = 0; i < npg; i++) {
                   6074:                        tlb_flush_page(src_addr);
1.115     pk       6075:                        if ((rm = src_pmap->pm_regmap) == NULL)
                   6076:                                continue;
                   6077:                        rm += VA_VREG(src_addr);
                   6078:
                   6079:                        if ((sm = rm->rg_segmap) == NULL)
1.92      pk       6080:                                continue;
1.115     pk       6081:                        sm += VA_VSEG(src_addr);
                   6082:                        if (sm->sg_npte == 0)
1.92      pk       6083:                                continue;
1.115     pk       6084:
1.92      pk       6085:                        pte = sm->sg_pte[VA_SUN4M_VPG(src_addr)];
                   6086:                        if ((pte & SRMMU_TETYPE) != SRMMU_TEPTE)
                   6087:                                continue;
                   6088:
                   6089:                        pa = ptoa((pte & SRMMU_PPNMASK) >> SRMMU_PPNSHIFT);
1.55      pk       6090:                        pmap_enter(dst_pmap, dst_addr,
1.92      pk       6091:                                   pa,
1.60      pk       6092:                                   (pte & PPROT_WRITE)
1.92      pk       6093:                                        ? (VM_PROT_WRITE | VM_PROT_READ)
1.60      pk       6094:                                        : VM_PROT_READ,
1.153     thorpej  6095:                                   0);
1.55      pk       6096:                        src_addr += NBPG;
                   6097:                        dst_addr += NBPG;
                   6098:                }
                   6099:        }
                   6100: #endif
1.1       deraadt  6101: }
                   6102:
                   6103: /*
                   6104:  * Require that all active physical maps contain no
                   6105:  * incorrect entries NOW.  [This update includes
                   6106:  * forcing updates of any address map caching.]
                   6107:  */
                   6108: void
                   6109: pmap_update()
                   6110: {
1.55      pk       6111: #if defined(SUN4M)
                   6112:        if (CPU_ISSUN4M)
                   6113:                tlb_flush_all();        /* %%%: Extreme Paranoia?  */
                   6114: #endif
1.1       deraadt  6115: }
                   6116:
                   6117: /*
                   6118:  * Garbage collects the physical map system for
                   6119:  * pages which are no longer used.
                   6120:  * Success need not be guaranteed -- that is, there
                   6121:  * may well be pages which are not referenced, but
                   6122:  * others may be collected.
                   6123:  * Called by the pageout daemon when pages are scarce.
                   6124:  */
                   6125: /* ARGSUSED */
                   6126: void
                   6127: pmap_collect(pm)
                   6128:        struct pmap *pm;
                   6129: {
                   6130: }
                   6131:
1.55      pk       6132: #if defined(SUN4) || defined(SUN4C)
                   6133:
1.1       deraadt  6134: /*
                   6135:  * Clear the modify bit for the given physical page.
                   6136:  */
1.151     chs      6137: boolean_t
                   6138: pmap_clear_modify4_4c(pg)
                   6139:        struct vm_page *pg;
1.1       deraadt  6140: {
1.124     pk       6141:        struct pvlist *pv;
1.151     chs      6142:        paddr_t pa = VM_PAGE_TO_PHYS(pg);
                   6143:        boolean_t rv;
1.1       deraadt  6144:
1.82      pk       6145:        if ((pa & (PMAP_TNC_4 & ~PMAP_NC)) == 0 && managed(pa)) {
1.1       deraadt  6146:                pv = pvhead(pa);
1.58      pk       6147:                (void) pv_syncflags4_4c(pv);
1.151     chs      6148:                rv = pv->pv_flags & PV_MOD;
1.1       deraadt  6149:                pv->pv_flags &= ~PV_MOD;
1.151     chs      6150:                return rv;
1.1       deraadt  6151:        }
1.151     chs      6152:        return (0);
1.1       deraadt  6153: }
                   6154:
                   6155: /*
                   6156:  * Tell whether the given physical page has been modified.
                   6157:  */
1.151     chs      6158: boolean_t
                   6159: pmap_is_modified4_4c(pg)
                   6160:        struct vm_page *pg;
1.1       deraadt  6161: {
1.124     pk       6162:        struct pvlist *pv;
1.151     chs      6163:        paddr_t pa = VM_PAGE_TO_PHYS(pg);
1.1       deraadt  6164:
1.82      pk       6165:        if ((pa & (PMAP_TNC_4 & ~PMAP_NC)) == 0 && managed(pa)) {
1.1       deraadt  6166:                pv = pvhead(pa);
1.58      pk       6167:                if (pv->pv_flags & PV_MOD || pv_syncflags4_4c(pv) & PV_MOD)
1.1       deraadt  6168:                        return (1);
                   6169:        }
                   6170:        return (0);
                   6171: }
                   6172:
                   6173: /*
                   6174:  * Clear the reference bit for the given physical page.
                   6175:  */
1.151     chs      6176: boolean_t
                   6177: pmap_clear_reference4_4c(pg)
                   6178:        struct vm_page *pg;
1.1       deraadt  6179: {
1.124     pk       6180:        struct pvlist *pv;
1.151     chs      6181:        paddr_t pa = VM_PAGE_TO_PHYS(pg);
                   6182:        boolean_t rv;
1.1       deraadt  6183:
1.82      pk       6184:        if ((pa & (PMAP_TNC_4 & ~PMAP_NC)) == 0 && managed(pa)) {
1.1       deraadt  6185:                pv = pvhead(pa);
1.58      pk       6186:                (void) pv_syncflags4_4c(pv);
1.151     chs      6187:                rv = pv->pv_flags & PV_REF;
1.1       deraadt  6188:                pv->pv_flags &= ~PV_REF;
1.151     chs      6189:                return rv;
1.1       deraadt  6190:        }
1.151     chs      6191:        return (0);
1.1       deraadt  6192: }
                   6193:
                   6194: /*
                   6195:  * Tell whether the given physical page has been referenced.
                   6196:  */
1.151     chs      6197: boolean_t
                   6198: pmap_is_referenced4_4c(pg)
                   6199:        struct vm_page *pg;
1.1       deraadt  6200: {
1.124     pk       6201:        struct pvlist *pv;
1.151     chs      6202:        paddr_t pa = VM_PAGE_TO_PHYS(pg);
1.1       deraadt  6203:
1.82      pk       6204:        if ((pa & (PMAP_TNC_4 & ~PMAP_NC)) == 0 && managed(pa)) {
1.1       deraadt  6205:                pv = pvhead(pa);
1.58      pk       6206:                if (pv->pv_flags & PV_REF || pv_syncflags4_4c(pv) & PV_REF)
1.1       deraadt  6207:                        return (1);
                   6208:        }
                   6209:        return (0);
                   6210: }
1.55      pk       6211: #endif /*4,4c*/
                   6212:
1.58      pk       6213: #if defined(SUN4M)
                   6214:
                   6215: /*
                   6216:  * 4m versions of bit test/set routines
                   6217:  *
                   6218:  * Note that the 4m-specific routines should eventually service these
                   6219:  * requests from their page tables, and the whole pvlist bit mess should
                   6220:  * be dropped for the 4m (unless this causes a performance hit from
                   6221:  * tracing down pagetables/regmap/segmaps).
                   6222:  */
                   6223:
1.55      pk       6224: /*
                   6225:  * Clear the modify bit for the given physical page.
                   6226:  */
1.151     chs      6227: boolean_t
                   6228: pmap_clear_modify4m(pg)           /* XXX %%%: Should service from swpagetbl for 4m */
                   6229:        struct vm_page *pg;
1.55      pk       6230: {
1.124     pk       6231:        struct pvlist *pv;
1.151     chs      6232:        paddr_t pa = VM_PAGE_TO_PHYS(pg);
                   6233:        boolean_t rv;
1.55      pk       6234:
1.82      pk       6235:        if ((pa & (PMAP_TNC_SRMMU & ~PMAP_NC)) == 0 && managed(pa)) {
1.55      pk       6236:                pv = pvhead(pa);
1.58      pk       6237:                (void) pv_syncflags4m(pv);
1.151     chs      6238:                rv = pv->pv_flags & PV_MOD4M;
1.55      pk       6239:                pv->pv_flags &= ~PV_MOD4M;
1.151     chs      6240:                return rv;
1.55      pk       6241:        }
1.151     chs      6242:        return (0);
1.55      pk       6243: }
                   6244:
                   6245: /*
                   6246:  * Tell whether the given physical page has been modified.
                   6247:  */
1.151     chs      6248: boolean_t
                   6249: pmap_is_modified4m(pg) /* Test performance with SUN4M && SUN4/4C. XXX */
                   6250:        struct vm_page *pg;
1.55      pk       6251: {
1.124     pk       6252:        struct pvlist *pv;
1.151     chs      6253:        paddr_t pa = VM_PAGE_TO_PHYS(pg);
1.55      pk       6254:
1.82      pk       6255:        if ((pa & (PMAP_TNC_SRMMU & ~PMAP_NC)) == 0 && managed(pa)) {
1.55      pk       6256:                pv = pvhead(pa);
                   6257:                if (pv->pv_flags & PV_MOD4M || pv_syncflags4m(pv) & PV_MOD4M)
                   6258:                        return(1);
                   6259:        }
                   6260:        return (0);
                   6261: }
                   6262:
                   6263: /*
                   6264:  * Clear the reference bit for the given physical page.
                   6265:  */
1.151     chs      6266: boolean_t
                   6267: pmap_clear_reference4m(pg)
                   6268:        struct vm_page *pg;
1.55      pk       6269: {
1.124     pk       6270:        struct pvlist *pv;
1.151     chs      6271:        paddr_t pa = VM_PAGE_TO_PHYS(pg);
                   6272:        boolean_t rv;
1.55      pk       6273:
1.82      pk       6274:        if ((pa & (PMAP_TNC_SRMMU & ~PMAP_NC)) == 0 && managed(pa)) {
1.55      pk       6275:                pv = pvhead(pa);
1.58      pk       6276:                (void) pv_syncflags4m(pv);
1.151     chs      6277:                rv = pv->pv_flags & PV_REF4M;
1.55      pk       6278:                pv->pv_flags &= ~PV_REF4M;
1.151     chs      6279:                return rv;
1.55      pk       6280:        }
1.151     chs      6281:        return (0);
1.55      pk       6282: }
                   6283:
                   6284: /*
                   6285:  * Tell whether the given physical page has been referenced.
                   6286:  */
                   6287: int
1.151     chs      6288: pmap_is_referenced4m(pg)
                   6289:        struct vm_page *pg;
1.55      pk       6290: {
1.124     pk       6291:        struct pvlist *pv;
1.151     chs      6292:        paddr_t pa = VM_PAGE_TO_PHYS(pg);
1.55      pk       6293:
1.82      pk       6294:        if ((pa & (PMAP_TNC_SRMMU & ~PMAP_NC)) == 0 && managed(pa)) {
1.55      pk       6295:                pv = pvhead(pa);
                   6296:                if (pv->pv_flags & PV_REF4M || pv_syncflags4m(pv) & PV_REF4M)
                   6297:                        return(1);
                   6298:        }
                   6299:        return (0);
                   6300: }
                   6301: #endif /* 4m */
1.2       deraadt  6302:
                   6303: /*
1.1       deraadt  6304:  * Fill the given MI physical page with zero bytes.
                   6305:  *
                   6306:  * We avoid stomping on the cache.
                   6307:  * XXX might be faster to use destination's context and allow cache to fill?
                   6308:  */
1.55      pk       6309:
                   6310: #if defined(SUN4) || defined(SUN4C)
                   6311:
1.1       deraadt  6312: void
1.55      pk       6313: pmap_zero_page4_4c(pa)
1.124     pk       6314:        paddr_t pa;
1.1       deraadt  6315: {
1.124     pk       6316:        caddr_t va;
                   6317:        int pte;
1.1       deraadt  6318:
1.82      pk       6319:        if (((pa & (PMAP_TNC_4 & ~PMAP_NC)) == 0) && managed(pa)) {
1.1       deraadt  6320:                /*
                   6321:                 * The following might not be necessary since the page
                   6322:                 * is being cleared because it is about to be allocated,
                   6323:                 * i.e., is in use by no one.
                   6324:                 */
1.69      pk       6325:                pv_flushcache(pvhead(pa));
1.60      pk       6326:        }
                   6327:        pte = PG_V | PG_S | PG_W | PG_NC | (atop(pa) & PG_PFNUM);
1.1       deraadt  6328:
                   6329:        va = vpage[0];
1.55      pk       6330:        setpte4(va, pte);
1.1       deraadt  6331:        qzero(va, NBPG);
1.55      pk       6332:        setpte4(va, 0);
1.1       deraadt  6333: }
                   6334:
                   6335: /*
                   6336:  * Copy the given MI physical source page to its destination.
                   6337:  *
                   6338:  * We avoid stomping on the cache as above (with same `XXX' note).
                   6339:  * We must first flush any write-back cache for the source page.
                   6340:  * We go ahead and stomp on the kernel's virtual cache for the
                   6341:  * source page, since the cache can read memory MUCH faster than
                   6342:  * the processor.
                   6343:  */
                   6344: void
1.55      pk       6345: pmap_copy_page4_4c(src, dst)
1.124     pk       6346:        paddr_t src, dst;
1.1       deraadt  6347: {
1.124     pk       6348:        caddr_t sva, dva;
                   6349:        int spte, dpte;
1.1       deraadt  6350:
                   6351:        if (managed(src)) {
1.69      pk       6352:                if (CACHEINFO.c_vactype == VAC_WRITEBACK)
1.1       deraadt  6353:                        pv_flushcache(pvhead(src));
1.60      pk       6354:        }
                   6355:        spte = PG_V | PG_S | (atop(src) & PG_PFNUM);
1.1       deraadt  6356:
                   6357:        if (managed(dst)) {
                   6358:                /* similar `might not be necessary' comment applies */
1.69      pk       6359:                if (CACHEINFO.c_vactype != VAC_NONE)
1.1       deraadt  6360:                        pv_flushcache(pvhead(dst));
1.60      pk       6361:        }
                   6362:        dpte = PG_V | PG_S | PG_W | PG_NC | (atop(dst) & PG_PFNUM);
1.1       deraadt  6363:
                   6364:        sva = vpage[0];
                   6365:        dva = vpage[1];
1.55      pk       6366:        setpte4(sva, spte);
                   6367:        setpte4(dva, dpte);
1.1       deraadt  6368:        qcopy(sva, dva, NBPG);  /* loads cache, so we must ... */
1.145     pk       6369:        cache_flush_page((vaddr_t)sva);
1.55      pk       6370:        setpte4(sva, 0);
                   6371:        setpte4(dva, 0);
                   6372: }
                   6373: #endif /* 4, 4c */
                   6374:
                   6375: #if defined(SUN4M)             /* Sun4M version of copy/zero routines */
                   6376: /*
                   6377:  * Fill the given MI physical page with zero bytes.
                   6378:  *
                   6379:  * We avoid stomping on the cache.
                   6380:  * XXX might be faster to use destination's context and allow cache to fill?
                   6381:  */
                   6382: void
                   6383: pmap_zero_page4m(pa)
1.124     pk       6384:        paddr_t pa;
1.55      pk       6385: {
1.124     pk       6386:        caddr_t va;
                   6387:        int pte;
1.55      pk       6388:
1.82      pk       6389:        if (((pa & (PMAP_TNC_SRMMU & ~PMAP_NC)) == 0) && managed(pa)) {
1.55      pk       6390:                /*
                   6391:                 * The following might not be necessary since the page
                   6392:                 * is being cleared because it is about to be allocated,
                   6393:                 * i.e., is in use by no one.
                   6394:                 */
1.69      pk       6395:                if (CACHEINFO.c_vactype != VAC_NONE)
1.55      pk       6396:                        pv_flushcache(pvhead(pa));
1.60      pk       6397:        }
1.145     pk       6398:        pte = SRMMU_TEPTE | PPROT_N_RWX | (atop(pa) << SRMMU_PPNSHIFT);
1.69      pk       6399:        if (cpuinfo.flags & CPUFLG_CACHE_MANDATORY)
1.68      abrown   6400:                pte |= SRMMU_PG_C;
                   6401:
1.55      pk       6402:        va = vpage[0];
1.142     pk       6403:        setpgt4m(vpage_pte[0], pte);
1.55      pk       6404:        qzero(va, NBPG);
1.101     pk       6405:        /* Remove temporary mapping */
1.145     pk       6406:        tlb_flush_page(va);
1.142     pk       6407:        setpgt4m(vpage_pte[0], SRMMU_TEINVALID);
1.55      pk       6408: }
                   6409:
1.159     pk       6410: /*
                   6411:  * Viking/MXCC specific version of pmap_zero_page
                   6412:  */
1.158     pk       6413: void
                   6414: pmap_zero_page_viking_mxcc(pa)
                   6415:        paddr_t pa;
                   6416: {
                   6417:        u_int offset;
                   6418:        u_int stream_data_addr = MXCC_STREAM_DATA;
                   6419:        u_int64_t v = (u_int64_t)pa;
                   6420:
                   6421:        /* Load MXCC stream data register with 0 (bottom 32 bytes only) */
                   6422:        stda(stream_data_addr+0, ASI_CONTROL, 0);
                   6423:        stda(stream_data_addr+8, ASI_CONTROL, 0);
                   6424:        stda(stream_data_addr+16, ASI_CONTROL, 0);
                   6425:        stda(stream_data_addr+24, ASI_CONTROL, 0);
                   6426:
                   6427:        /* Then write the stream data register to each block in the page */
                   6428:        v |= MXCC_STREAM_C;
                   6429:        for (offset = 0; offset < NBPG; offset += MXCC_STREAM_BLKSZ) {
                   6430:                stda(MXCC_STREAM_DST, ASI_CONTROL, v | offset);
                   6431:        }
                   6432: }
                   6433:
1.55      pk       6434: /*
1.159     pk       6435:  * HyperSPARC/RT625 specific version of pmap_zero_page
                   6436:  */
                   6437: void
                   6438: pmap_zero_page_hypersparc(pa)
                   6439:        paddr_t pa;
                   6440: {
                   6441:        caddr_t va;
                   6442:        int pte;
                   6443:        int offset;
                   6444:
                   6445:        /*
                   6446:         * We still have to map the page, since ASI_BLOCKFILL
                   6447:         * takes virtual addresses. This also means we have to
                   6448:         * consider cache aliasing; therefore we still need
                   6449:         * to flush the cache here. All we gain is the speed-up
                   6450:         * in zero-fill loop itself..
                   6451:         */
                   6452:        if (((pa & (PMAP_TNC_SRMMU & ~PMAP_NC)) == 0) && managed(pa)) {
                   6453:                /*
                   6454:                 * The following might not be necessary since the page
                   6455:                 * is being cleared because it is about to be allocated,
                   6456:                 * i.e., is in use by no one.
                   6457:                 */
                   6458:                if (CACHEINFO.c_vactype != VAC_NONE)
                   6459:                        pv_flushcache(pvhead(pa));
                   6460:        }
                   6461:        pte = SRMMU_TEPTE | SRMMU_PG_C | PPROT_N_RWX |
                   6462:                (atop(pa) << SRMMU_PPNSHIFT);
                   6463:
                   6464:        va = vpage[0];
                   6465:        setpgt4m(vpage_pte[0], pte);
                   6466:        for (offset = 0; offset < NBPG; offset += 32) {
                   6467:                sta(va + offset, ASI_BLOCKFILL, 0);
                   6468:        }
                   6469:        /* Remove temporary mapping */
                   6470:        tlb_flush_page(va);
                   6471:        setpgt4m(vpage_pte[0], SRMMU_TEINVALID);
                   6472: }
                   6473:
                   6474: /*
1.55      pk       6475:  * Copy the given MI physical source page to its destination.
                   6476:  *
                   6477:  * We avoid stomping on the cache as above (with same `XXX' note).
                   6478:  * We must first flush any write-back cache for the source page.
                   6479:  * We go ahead and stomp on the kernel's virtual cache for the
                   6480:  * source page, since the cache can read memory MUCH faster than
                   6481:  * the processor.
                   6482:  */
                   6483: void
                   6484: pmap_copy_page4m(src, dst)
1.124     pk       6485:        paddr_t src, dst;
1.55      pk       6486: {
1.124     pk       6487:        caddr_t sva, dva;
                   6488:        int spte, dpte;
1.55      pk       6489:
                   6490:        if (managed(src)) {
1.69      pk       6491:                if (CACHEINFO.c_vactype == VAC_WRITEBACK)
1.55      pk       6492:                        pv_flushcache(pvhead(src));
1.60      pk       6493:        }
1.145     pk       6494:
                   6495:        spte = SRMMU_TEPTE | SRMMU_PG_C | PPROT_N_RX |
1.60      pk       6496:                (atop(src) << SRMMU_PPNSHIFT);
1.55      pk       6497:
                   6498:        if (managed(dst)) {
                   6499:                /* similar `might not be necessary' comment applies */
1.69      pk       6500:                if (CACHEINFO.c_vactype != VAC_NONE)
1.55      pk       6501:                        pv_flushcache(pvhead(dst));
1.60      pk       6502:        }
1.145     pk       6503:
                   6504:        dpte = SRMMU_TEPTE | PPROT_N_RWX | (atop(dst) << SRMMU_PPNSHIFT);
1.69      pk       6505:        if (cpuinfo.flags & CPUFLG_CACHE_MANDATORY)
1.68      abrown   6506:                dpte |= SRMMU_PG_C;
1.60      pk       6507:
1.55      pk       6508:        sva = vpage[0];
                   6509:        dva = vpage[1];
1.142     pk       6510:        setpgt4m(vpage_pte[0], spte);
                   6511:        setpgt4m(vpage_pte[1], dpte);
1.55      pk       6512:        qcopy(sva, dva, NBPG);  /* loads cache, so we must ... */
1.145     pk       6513:        cache_flush_page((vaddr_t)sva);
                   6514:        tlb_flush_page(sva);
1.142     pk       6515:        setpgt4m(vpage_pte[0], SRMMU_TEINVALID);
1.145     pk       6516:        tlb_flush_page(dva);
1.142     pk       6517:        setpgt4m(vpage_pte[1], SRMMU_TEINVALID);
1.158     pk       6518: }
                   6519:
1.159     pk       6520: /*
                   6521:  * Viking/MXCC specific version of pmap_copy_page
                   6522:  */
1.158     pk       6523: void
                   6524: pmap_copy_page_viking_mxcc(src, dst)
                   6525:        paddr_t src, dst;
                   6526: {
                   6527:        u_int offset;
                   6528:        u_int64_t v1 = (u_int64_t)src;
                   6529:        u_int64_t v2 = (u_int64_t)dst;
                   6530:
                   6531:        /* Enable cache-coherency */
                   6532:        v1 |= MXCC_STREAM_C;
                   6533:        v2 |= MXCC_STREAM_C;
                   6534:
                   6535:        /* Copy through stream data register */
                   6536:        for (offset = 0; offset < NBPG; offset += MXCC_STREAM_BLKSZ) {
                   6537:                stda(MXCC_STREAM_SRC, ASI_CONTROL, v1 | offset);
                   6538:                stda(MXCC_STREAM_DST, ASI_CONTROL, v2 | offset);
                   6539:        }
1.159     pk       6540: }
                   6541:
                   6542: /*
                   6543:  * HyperSPARC/RT625 specific version of pmap_copy_page
                   6544:  */
                   6545: void
                   6546: pmap_copy_page_hypersparc(src, dst)
                   6547:        paddr_t src, dst;
                   6548: {
                   6549:        caddr_t sva, dva;
                   6550:        int spte, dpte;
                   6551:        int offset;
                   6552:
                   6553:        /*
                   6554:         * We still have to map the pages, since ASI_BLOCKCOPY
                   6555:         * takes virtual addresses. This also means we have to
                   6556:         * consider cache aliasing; therefore we still need
                   6557:         * to flush the cache here. All we gain is the speed-up
                   6558:         * in copy loop itself..
                   6559:         */
                   6560:
                   6561:        if (managed(src)) {
                   6562:                if (CACHEINFO.c_vactype == VAC_WRITEBACK)
                   6563:                        pv_flushcache(pvhead(src));
                   6564:        }
                   6565:
                   6566:        spte = SRMMU_TEPTE | SRMMU_PG_C | PPROT_N_RX |
                   6567:                (atop(src) << SRMMU_PPNSHIFT);
                   6568:
                   6569:        if (managed(dst)) {
                   6570:                /* similar `might not be necessary' comment applies */
                   6571:                if (CACHEINFO.c_vactype != VAC_NONE)
                   6572:                        pv_flushcache(pvhead(dst));
                   6573:        }
                   6574:
                   6575:        dpte = SRMMU_TEPTE | SRMMU_PG_C | PPROT_N_RWX |
                   6576:                (atop(dst) << SRMMU_PPNSHIFT);
                   6577:
                   6578:        sva = vpage[0];
                   6579:        dva = vpage[1];
                   6580:        setpgt4m(vpage_pte[0], spte);
                   6581:        setpgt4m(vpage_pte[1], dpte);
                   6582:
                   6583:        for (offset = 0; offset < NBPG; offset += 32) {
                   6584:                sta(dva + offset, ASI_BLOCKCOPY, sva + offset);
                   6585:        }
                   6586:
                   6587:        tlb_flush_page(sva);
                   6588:        setpgt4m(vpage_pte[0], SRMMU_TEINVALID);
                   6589:        tlb_flush_page(dva);
                   6590:        setpgt4m(vpage_pte[1], SRMMU_TEINVALID);
1.1       deraadt  6591: }
1.143     pk       6592: #endif /* SUN4M */
1.1       deraadt  6593:
                   6594: /*
                   6595:  * Turn a cdevsw d_mmap value into a byte address for pmap_enter.
                   6596:  * XXX this should almost certainly be done differently, and
                   6597:  *     elsewhere, or even not at all
                   6598:  */
1.124     pk       6599: paddr_t
1.1       deraadt  6600: pmap_phys_address(x)
                   6601:        int x;
                   6602: {
                   6603:
1.124     pk       6604:        return ((paddr_t)x);
1.1       deraadt  6605: }
                   6606:
                   6607: /*
                   6608:  * Turn off cache for a given (va, number of pages).
                   6609:  *
                   6610:  * We just assert PG_NC for each PTE; the addresses must reside
                   6611:  * in locked kernel space.  A cache flush is also done.
                   6612:  */
1.53      christos 6613: void
1.1       deraadt  6614: kvm_uncache(va, npages)
1.115     pk       6615:        caddr_t va;
                   6616:        int npages;
1.1       deraadt  6617: {
1.115     pk       6618:        int pte;
1.124     pk       6619:        paddr_t pa;
1.88      pk       6620:
1.55      pk       6621:        if (CPU_ISSUN4M) {
                   6622: #if defined(SUN4M)
1.100     pk       6623:                int ctx = getcontext4m();
                   6624:
                   6625:                setcontext4m(0);
1.55      pk       6626:                for (; --npages >= 0; va += NBPG) {
1.124     pk       6627:                        pte = getpte4m((vaddr_t) va);
1.55      pk       6628:                        if ((pte & SRMMU_TETYPE) != SRMMU_TEPTE)
                   6629:                                panic("kvm_uncache: table entry not pte");
1.115     pk       6630:
1.128     pk       6631:                        if ((pte & SRMMU_PGTYPE) == PG_SUN4M_OBMEM)
                   6632:                                cache_flush_page((int)va);
1.143     pk       6633:
1.115     pk       6634:                        pa = ptoa((pte & SRMMU_PPNMASK) >> SRMMU_PPNSHIFT);
1.143     pk       6635:
1.115     pk       6636:                        if ((pte & SRMMU_PGTYPE) == PG_SUN4M_OBMEM &&
                   6637:                            managed(pa)) {
                   6638:                                pv_changepte4m(pvhead(pa), 0, SRMMU_PG_C);
                   6639:                        }
1.116     pk       6640:                        pte &= ~SRMMU_PG_C;
1.124     pk       6641:                        setpte4m((vaddr_t) va, pte);
1.116     pk       6642:
1.55      pk       6643:                }
1.100     pk       6644:                setcontext4m(ctx);
1.55      pk       6645: #endif
                   6646:        } else {
                   6647: #if defined(SUN4) || defined(SUN4C)
                   6648:                for (; --npages >= 0; va += NBPG) {
                   6649:                        pte = getpte4(va);
                   6650:                        if ((pte & PG_V) == 0)
                   6651:                                panic("kvm_uncache !pg_v");
1.115     pk       6652:
                   6653:                        pa = ptoa(pte & PG_PFNUM);
                   6654:                        if ((pte & PG_TYPE) == PG_OBMEM &&
                   6655:                            managed(pa)) {
                   6656:                                pv_changepte4_4c(pvhead(pa), PG_NC, 0);
                   6657:                        }
1.116     pk       6658:                        pte |= PG_NC;
                   6659:                        setpte4(va, pte);
                   6660:                        if ((pte & PG_TYPE) == PG_OBMEM)
                   6661:                                cache_flush_page((int)va);
1.55      pk       6662:                }
                   6663: #endif
1.1       deraadt  6664:        }
1.21      deraadt  6665: }
                   6666:
1.46      pk       6667: /*
                   6668:  * Turn on IO cache for a given (va, number of pages).
                   6669:  *
                   6670:  * We just assert PG_NC for each PTE; the addresses must reside
                   6671:  * in locked kernel space.  A cache flush is also done.
                   6672:  */
1.53      christos 6673: void
1.46      pk       6674: kvm_iocache(va, npages)
1.124     pk       6675:        caddr_t va;
                   6676:        int npages;
1.46      pk       6677: {
                   6678:
1.55      pk       6679: #ifdef SUN4M
                   6680:        if (CPU_ISSUN4M) /* %%%: Implement! */
                   6681:                panic("kvm_iocache: 4m iocache not implemented");
                   6682: #endif
                   6683: #if defined(SUN4) || defined(SUN4C)
1.46      pk       6684:        for (; --npages >= 0; va += NBPG) {
1.124     pk       6685:                int pte = getpte4(va);
1.46      pk       6686:                if ((pte & PG_V) == 0)
                   6687:                        panic("kvm_iocache !pg_v");
                   6688:                pte |= PG_IOC;
1.55      pk       6689:                setpte4(va, pte);
1.46      pk       6690:        }
1.55      pk       6691: #endif
1.46      pk       6692: }
                   6693:
1.21      deraadt  6694: int
                   6695: pmap_count_ptes(pm)
1.124     pk       6696:        struct pmap *pm;
1.21      deraadt  6697: {
1.124     pk       6698:        int idx, total;
                   6699:        struct regmap *rp;
                   6700:        struct segmap *sp;
1.21      deraadt  6701:
1.43      pk       6702:        if (pm == pmap_kernel()) {
                   6703:                rp = &pm->pm_regmap[NUREG];
                   6704:                idx = NKREG;
                   6705:        } else {
                   6706:                rp = pm->pm_regmap;
                   6707:                idx = NUREG;
                   6708:        }
1.21      deraadt  6709:        for (total = 0; idx;)
1.43      pk       6710:                if ((sp = rp[--idx].rg_segmap) != NULL)
                   6711:                        total += sp->sg_npte;
1.21      deraadt  6712:        pm->pm_stats.resident_count = total;
                   6713:        return (total);
1.24      pk       6714: }
                   6715:
                   6716: /*
1.51      gwr      6717:  * Find first virtual address >= *va that is
                   6718:  * least likely to cause cache aliases.
                   6719:  * (This will just seg-align mappings.)
1.24      pk       6720:  */
1.51      gwr      6721: void
1.52      pk       6722: pmap_prefer(foff, vap)
1.124     pk       6723:        vaddr_t foff;
                   6724:        vaddr_t *vap;
1.24      pk       6725: {
1.124     pk       6726:        vaddr_t va = *vap;
                   6727:        long d, m;
1.52      pk       6728:
                   6729:        if (VA_INHOLE(va))
                   6730:                va = MMU_HOLE_END;
1.24      pk       6731:
1.48      pk       6732:        m = CACHE_ALIAS_DIST;
                   6733:        if (m == 0)             /* m=0 => no cache aliasing */
1.51      gwr      6734:                return;
1.24      pk       6735:
1.52      pk       6736:        d = foff - va;
                   6737:        d &= (m - 1);
                   6738:        *vap = va + d;
1.23      deraadt  6739: }
                   6740:
1.53      christos 6741: void
1.23      deraadt  6742: pmap_redzone()
                   6743: {
1.100     pk       6744:        pmap_remove(pmap_kernel(), KERNBASE, KERNBASE+NBPG);
1.104     thorpej  6745: }
                   6746:
                   6747: /*
                   6748:  * Activate the address space for the specified process.  If the
                   6749:  * process is the current process, load the new MMU context.
                   6750:  */
                   6751: void
                   6752: pmap_activate(p)
                   6753:        struct proc *p;
                   6754: {
                   6755:        pmap_t pmap = p->p_vmspace->vm_map.pmap;
                   6756:        int s;
                   6757:
                   6758:        /*
                   6759:         * This is essentially the same thing that happens in cpu_switch()
                   6760:         * when the newly selected process is about to run, except that we
                   6761:         * have to make sure to clean the register windows before we set
                   6762:         * the new context.
                   6763:         */
                   6764:
                   6765:        s = splpmap();
                   6766:        if (p == curproc) {
                   6767:                write_user_windows();
1.123     pk       6768:                if (pmap->pm_ctx == NULL) {
1.104     thorpej  6769:                        ctx_alloc(pmap);        /* performs setcontext() */
1.123     pk       6770:                } else {
                   6771:                        /* Do any cache flush needed on context switch */
                   6772:                        (*cpuinfo.pure_vcache_flush)();
1.104     thorpej  6773:                        setcontext(pmap->pm_ctxnum);
1.123     pk       6774:                }
1.104     thorpej  6775:        }
                   6776:        splx(s);
                   6777: }
                   6778:
                   6779: /*
                   6780:  * Deactivate the address space of the specified process.
                   6781:  */
                   6782: void
                   6783: pmap_deactivate(p)
                   6784:        struct proc *p;
                   6785: {
1.1       deraadt  6786: }
1.43      pk       6787:
                   6788: #ifdef DEBUG
                   6789: /*
                   6790:  * Check consistency of a pmap (time consuming!).
                   6791:  */
1.53      christos 6792: void
1.43      pk       6793: pm_check(s, pm)
                   6794:        char *s;
                   6795:        struct pmap *pm;
                   6796: {
                   6797:        if (pm == pmap_kernel())
                   6798:                pm_check_k(s, pm);
                   6799:        else
                   6800:                pm_check_u(s, pm);
                   6801: }
                   6802:
1.53      christos 6803: void
1.43      pk       6804: pm_check_u(s, pm)
                   6805:        char *s;
                   6806:        struct pmap *pm;
                   6807: {
                   6808:        struct regmap *rp;
                   6809:        struct segmap *sp;
                   6810:        int n, vs, vr, j, m, *pte;
                   6811:
1.55      pk       6812:        if (pm->pm_regmap == NULL)
1.72      pk       6813:                panic("%s: CHK(pmap %p): no region mapping", s, pm);
1.55      pk       6814:
                   6815: #if defined(SUN4M)
                   6816:        if (CPU_ISSUN4M &&
1.152     pk       6817:            (pm->pm_reg_ptps[0] == NULL ||
                   6818:             pm->pm_reg_ptps_pa[0] != VA2PA((caddr_t)pm->pm_reg_ptps[0])))
1.72      pk       6819:                panic("%s: CHK(pmap %p): no SRMMU region table or bad pa: "
                   6820:                      "tblva=%p, tblpa=0x%x",
1.152     pk       6821:                        s, pm, pm->pm_reg_ptps[0], pm->pm_reg_ptps_pa[0]);
1.55      pk       6822:
                   6823:        if (CPU_ISSUN4M && pm->pm_ctx != NULL &&
1.152     pk       6824:            (cpuinfo.ctx_tbl[pm->pm_ctxnum] != ((VA2PA((caddr_t)pm->pm_reg_ptps[0])
1.55      pk       6825:                                              >> SRMMU_PPNPASHIFT) |
                   6826:                                             SRMMU_TEPTD)))
1.91      fair     6827:            panic("%s: CHK(pmap %p): SRMMU region table at 0x%x not installed "
1.152     pk       6828:                  "for context %d", s, pm, pm->pm_reg_ptps_pa[0], pm->pm_ctxnum);
1.55      pk       6829: #endif
                   6830:
1.43      pk       6831:        for (vr = 0; vr < NUREG; vr++) {
                   6832:                rp = &pm->pm_regmap[vr];
                   6833:                if (rp->rg_nsegmap == 0)
                   6834:                        continue;
                   6835:                if (rp->rg_segmap == NULL)
                   6836:                        panic("%s: CHK(vr %d): nsegmap = %d; sp==NULL",
                   6837:                                s, vr, rp->rg_nsegmap);
1.55      pk       6838: #if defined(SUN4M)
                   6839:                if (CPU_ISSUN4M && rp->rg_seg_ptps == NULL)
                   6840:                    panic("%s: CHK(vr %d): nsegmap=%d; no SRMMU segment table",
                   6841:                          s, vr, rp->rg_nsegmap);
                   6842:                if (CPU_ISSUN4M &&
1.152     pk       6843:                    pm->pm_reg_ptps[0][vr] != ((VA2PA((caddr_t)rp->rg_seg_ptps) >>
1.55      pk       6844:                                            SRMMU_PPNPASHIFT) | SRMMU_TEPTD))
                   6845:                    panic("%s: CHK(vr %d): SRMMU segtbl not installed",s,vr);
                   6846: #endif
1.43      pk       6847:                if ((unsigned int)rp < KERNBASE)
1.54      christos 6848:                        panic("%s: rp=%p", s, rp);
1.43      pk       6849:                n = 0;
                   6850:                for (vs = 0; vs < NSEGRG; vs++) {
                   6851:                        sp = &rp->rg_segmap[vs];
                   6852:                        if ((unsigned int)sp < KERNBASE)
1.54      christos 6853:                                panic("%s: sp=%p", s, sp);
1.43      pk       6854:                        if (sp->sg_npte != 0) {
                   6855:                                n++;
                   6856:                                if (sp->sg_pte == NULL)
                   6857:                                        panic("%s: CHK(vr %d, vs %d): npte=%d, "
                   6858:                                           "pte=NULL", s, vr, vs, sp->sg_npte);
1.55      pk       6859: #if defined(SUN4M)
                   6860:                                if (CPU_ISSUN4M &&
                   6861:                                    rp->rg_seg_ptps[vs] !=
                   6862:                                     ((VA2PA((caddr_t)sp->sg_pte)
                   6863:                                        >> SRMMU_PPNPASHIFT) |
                   6864:                                       SRMMU_TEPTD))
                   6865:                                    panic("%s: CHK(vr %d, vs %d): SRMMU page "
                   6866:                                          "table not installed correctly",s,vr,
                   6867:                                          vs);
                   6868: #endif
1.43      pk       6869:                                pte=sp->sg_pte;
                   6870:                                m = 0;
                   6871:                                for (j=0; j<NPTESG; j++,pte++)
1.55      pk       6872:                                    if ((CPU_ISSUN4M
                   6873:                                         ?((*pte & SRMMU_TETYPE) == SRMMU_TEPTE)
                   6874:                                         :(*pte & PG_V)))
                   6875:                                        m++;
1.43      pk       6876:                                if (m != sp->sg_npte)
                   6877:                                    /*if (pmapdebug & 0x10000)*/
1.66      christos 6878:                                        printf("%s: user CHK(vr %d, vs %d): "
1.43      pk       6879:                                            "npte(%d) != # valid(%d)\n",
                   6880:                                                s, vr, vs, sp->sg_npte, m);
                   6881:                        }
                   6882:                }
                   6883:                if (n != rp->rg_nsegmap)
                   6884:                        panic("%s: CHK(vr %d): inconsistent "
                   6885:                                "# of pte's: %d, should be %d",
                   6886:                                s, vr, rp->rg_nsegmap, n);
                   6887:        }
1.53      christos 6888:        return;
1.43      pk       6889: }
                   6890:
1.53      christos 6891: void
1.55      pk       6892: pm_check_k(s, pm)              /* Note: not as extensive as pm_check_u. */
1.43      pk       6893:        char *s;
                   6894:        struct pmap *pm;
                   6895: {
                   6896:        struct regmap *rp;
                   6897:        int vr, vs, n;
                   6898:
1.55      pk       6899:        if (pm->pm_regmap == NULL)
1.122     pk       6900:                panic("%s: CHK(pmap %p): no region mapping", s, pm);
1.55      pk       6901:
                   6902: #if defined(SUN4M)
                   6903:        if (CPU_ISSUN4M &&
1.152     pk       6904:            (pm->pm_reg_ptps[0] == NULL ||
                   6905:             pm->pm_reg_ptps_pa[0] != VA2PA((caddr_t)pm->pm_reg_ptps[0])))
1.91      fair     6906:            panic("%s: CHK(pmap %p): no SRMMU region table or bad pa: tblva=%p, tblpa=0x%x",
1.152     pk       6907:                  s, pm, pm->pm_reg_ptps[0], pm->pm_reg_ptps_pa[0]);
1.55      pk       6908:
                   6909:        if (CPU_ISSUN4M &&
1.152     pk       6910:            (cpuinfo.ctx_tbl[0] != ((VA2PA((caddr_t)pm->pm_reg_ptps[0]) >>
1.55      pk       6911:                                             SRMMU_PPNPASHIFT) | SRMMU_TEPTD)))
1.91      fair     6912:            panic("%s: CHK(pmap %p): SRMMU region table at 0x%x not installed "
1.152     pk       6913:                  "for context %d", s, pm, pm->pm_reg_ptps_pa[0], 0);
1.55      pk       6914: #endif
1.43      pk       6915:        for (vr = NUREG; vr < NUREG+NKREG; vr++) {
                   6916:                rp = &pm->pm_regmap[vr];
                   6917:                if (rp->rg_segmap == NULL)
                   6918:                        panic("%s: CHK(vr %d): nsegmap = %d; sp==NULL",
                   6919:                                s, vr, rp->rg_nsegmap);
                   6920:                if (rp->rg_nsegmap == 0)
                   6921:                        continue;
1.55      pk       6922: #if defined(SUN4M)
                   6923:                if (CPU_ISSUN4M && rp->rg_seg_ptps == NULL)
                   6924:                    panic("%s: CHK(vr %d): nsegmap=%d; no SRMMU segment table",
                   6925:                          s, vr, rp->rg_nsegmap);
                   6926:                if (CPU_ISSUN4M &&
1.152     pk       6927:                    pm->pm_reg_ptps[0][vr] != ((VA2PA((caddr_t)rp->rg_seg_ptps[0]) >>
1.55      pk       6928:                                            SRMMU_PPNPASHIFT) | SRMMU_TEPTD))
                   6929:                    panic("%s: CHK(vr %d): SRMMU segtbl not installed",s,vr);
                   6930: #endif
1.72      pk       6931:                if (CPU_ISSUN4M) {
                   6932:                        n = NSEGRG;
                   6933:                } else {
                   6934:                        for (n = 0, vs = 0; vs < NSEGRG; vs++) {
                   6935:                                if (rp->rg_segmap[vs].sg_npte)
                   6936:                                        n++;
                   6937:                        }
1.43      pk       6938:                }
                   6939:                if (n != rp->rg_nsegmap)
1.66      christos 6940:                        printf("%s: kernel CHK(vr %d): inconsistent "
1.43      pk       6941:                                "# of pte's: %d, should be %d\n",
                   6942:                                s, vr, rp->rg_nsegmap, n);
                   6943:        }
1.53      christos 6944:        return;
1.43      pk       6945: }
                   6946: #endif
1.46      pk       6947:
                   6948: /*
1.98      pk       6949:  * Return the number of disk blocks that pmap_dumpmmu() will dump.
1.46      pk       6950:  */
                   6951: int
                   6952: pmap_dumpsize()
                   6953: {
1.98      pk       6954:        int     sz;
1.67      pk       6955:
                   6956:        sz = ALIGN(sizeof(kcore_seg_t)) + ALIGN(sizeof(cpu_kcore_hdr_t));
                   6957:        sz += npmemarr * sizeof(phys_ram_seg_t);
1.98      pk       6958:        sz += sizeof(kernel_segmap_store);
1.55      pk       6959:
                   6960:        if (CPU_ISSUN4OR4C)
1.98      pk       6961:                /* For each pmeg in the MMU, we'll write NPTESG PTEs. */
1.67      pk       6962:                sz += (seginval + 1) * NPTESG * sizeof(int);
                   6963:
1.98      pk       6964:        return btodb(sz + DEV_BSIZE - 1);
1.46      pk       6965: }
                   6966:
                   6967: /*
1.98      pk       6968:  * Write the core dump headers and MD data to the dump device.
                   6969:  * We dump the following items:
                   6970:  *
                   6971:  *     kcore_seg_t              MI header defined in <sys/kcore.h>)
                   6972:  *     cpu_kcore_hdr_t          MD header defined in <machine/kcore.h>)
                   6973:  *     phys_ram_seg_t[npmemarr] physical memory segments
                   6974:  *     segmap_t[NKREG*NSEGRG]   the kernel's segment map
                   6975:  *     the MMU pmegs on sun4/sun4c
1.46      pk       6976:  */
                   6977: int
                   6978: pmap_dumpmmu(dump, blkno)
1.124     pk       6979:        daddr_t blkno;
                   6980:        int (*dump)     __P((dev_t, daddr_t, caddr_t, size_t));
1.46      pk       6981: {
1.67      pk       6982:        kcore_seg_t     *ksegp;
                   6983:        cpu_kcore_hdr_t *kcpup;
                   6984:        phys_ram_seg_t  memseg;
1.124     pk       6985:        int             error = 0;
                   6986:        int             i, memsegoffset, segmapoffset, pmegoffset;
1.67      pk       6987:        int             buffer[dbtob(1) / sizeof(int)];
                   6988:        int             *bp, *ep;
1.55      pk       6989: #if defined(SUN4C) || defined(SUN4)
1.124     pk       6990:        int     pmeg;
1.55      pk       6991: #endif
1.46      pk       6992:
1.67      pk       6993: #define EXPEDITE(p,n) do {                                             \
                   6994:        int *sp = (int *)(p);                                           \
                   6995:        int sz = (n);                                                   \
                   6996:        while (sz > 0) {                                                \
                   6997:                *bp++ = *sp++;                                          \
                   6998:                if (bp >= ep) {                                         \
                   6999:                        error = (*dump)(dumpdev, blkno,                 \
                   7000:                                        (caddr_t)buffer, dbtob(1));     \
                   7001:                        if (error != 0)                                 \
                   7002:                                return (error);                         \
                   7003:                        ++blkno;                                        \
                   7004:                        bp = buffer;                                    \
                   7005:                }                                                       \
                   7006:                sz -= 4;                                                \
                   7007:        }                                                               \
                   7008: } while (0)
                   7009:
                   7010:        setcontext(0);
                   7011:
                   7012:        /* Setup bookkeeping pointers */
                   7013:        bp = buffer;
                   7014:        ep = &buffer[sizeof(buffer) / sizeof(buffer[0])];
                   7015:
                   7016:        /* Fill in MI segment header */
                   7017:        ksegp = (kcore_seg_t *)bp;
                   7018:        CORE_SETMAGIC(*ksegp, KCORE_MAGIC, MID_MACHINE, CORE_CPU);
1.98      pk       7019:        ksegp->c_size = dbtob(pmap_dumpsize()) - ALIGN(sizeof(kcore_seg_t));
1.67      pk       7020:
                   7021:        /* Fill in MD segment header (interpreted by MD part of libkvm) */
                   7022:        kcpup = (cpu_kcore_hdr_t *)((int)bp + ALIGN(sizeof(kcore_seg_t)));
                   7023:        kcpup->cputype = cputyp;
1.98      pk       7024:        kcpup->kernbase = KERNBASE;
1.67      pk       7025:        kcpup->nmemseg = npmemarr;
                   7026:        kcpup->memsegoffset = memsegoffset = ALIGN(sizeof(cpu_kcore_hdr_t));
1.98      pk       7027:        kcpup->nsegmap = NKREG*NSEGRG;
                   7028:        kcpup->segmapoffset = segmapoffset =
                   7029:                memsegoffset + npmemarr * sizeof(phys_ram_seg_t);
                   7030:
1.67      pk       7031:        kcpup->npmeg = (CPU_ISSUN4OR4C) ? seginval + 1 : 0;
                   7032:        kcpup->pmegoffset = pmegoffset =
1.98      pk       7033:                segmapoffset + kcpup->nsegmap * sizeof(struct segmap);
1.67      pk       7034:
                   7035:        /* Note: we have assumed everything fits in buffer[] so far... */
1.98      pk       7036:        bp = (int *)((int)kcpup + ALIGN(sizeof(cpu_kcore_hdr_t)));
1.67      pk       7037:
1.98      pk       7038: #if 0
1.67      pk       7039:        /* Align storage for upcoming quad-aligned segment array */
                   7040:        while (bp != (int *)ALIGN(bp)) {
                   7041:                int dummy = 0;
                   7042:                EXPEDITE(&dummy, 4);
                   7043:        }
1.98      pk       7044: #endif
                   7045:
1.67      pk       7046:        for (i = 0; i < npmemarr; i++) {
                   7047:                memseg.start = pmemarr[i].addr;
                   7048:                memseg.size = pmemarr[i].len;
                   7049:                EXPEDITE(&memseg, sizeof(phys_ram_seg_t));
                   7050:        }
1.98      pk       7051:
                   7052:        EXPEDITE(&kernel_segmap_store, sizeof(kernel_segmap_store));
1.67      pk       7053:
                   7054:        if (CPU_ISSUN4M)
                   7055:                goto out;
1.55      pk       7056:
                   7057: #if defined(SUN4C) || defined(SUN4)
1.46      pk       7058:        /*
                   7059:         * dump page table entries
                   7060:         *
                   7061:         * We dump each pmeg in order (by segment number).  Since the MMU
                   7062:         * automatically maps the given virtual segment to a pmeg we must
                   7063:         * iterate over the segments by incrementing an unused segment slot
                   7064:         * in the MMU.  This fixed segment number is used in the virtual
                   7065:         * address argument to getpte().
                   7066:         */
1.55      pk       7067:
1.46      pk       7068:        /*
                   7069:         * Go through the pmegs and dump each one.
                   7070:         */
                   7071:        for (pmeg = 0; pmeg <= seginval; ++pmeg) {
1.124     pk       7072:                int va = 0;
1.46      pk       7073:
                   7074:                setsegmap(va, pmeg);
                   7075:                i = NPTESG;
                   7076:                do {
1.67      pk       7077:                        int pte = getpte4(va);
                   7078:                        EXPEDITE(&pte, sizeof(pte));
1.46      pk       7079:                        va += NBPG;
                   7080:                } while (--i > 0);
                   7081:        }
                   7082:        setsegmap(0, seginval);
1.67      pk       7083: #endif
1.46      pk       7084:
1.67      pk       7085: out:
                   7086:        if (bp != buffer)
1.46      pk       7087:                error = (*dump)(dumpdev, blkno++, (caddr_t)buffer, dbtob(1));
                   7088:
                   7089:        return (error);
1.92      pk       7090: }
                   7091:
                   7092: /*
                   7093:  * Helper function for debuggers.
                   7094:  */
                   7095: void
                   7096: pmap_writetext(dst, ch)
                   7097:        unsigned char *dst;
                   7098:        int ch;
                   7099: {
1.95      pk       7100:        int s, pte0, pte, ctx;
1.124     pk       7101:        vaddr_t va;
1.92      pk       7102:
                   7103:        s = splpmap();
                   7104:        va = (unsigned long)dst & (~PGOFSET);
                   7105:        cpuinfo.cache_flush(dst, 1);
                   7106:
1.95      pk       7107:        ctx = getcontext();
                   7108:        setcontext(0);
                   7109:
1.92      pk       7110: #if defined(SUN4M)
                   7111:        if (CPU_ISSUN4M) {
                   7112:                pte0 = getpte4m(va);
                   7113:                if ((pte0 & SRMMU_TETYPE) != SRMMU_TEPTE) {
                   7114:                        splx(s);
                   7115:                        return;
                   7116:                }
                   7117:                pte = pte0 | PPROT_WRITE;
                   7118:                setpte4m(va, pte);
                   7119:                *dst = (unsigned char)ch;
                   7120:                setpte4m(va, pte0);
                   7121:
                   7122:        }
                   7123: #endif
                   7124: #if defined(SUN4) || defined(SUN4C)
                   7125:        if (CPU_ISSUN4C || CPU_ISSUN4) {
                   7126:                pte0 = getpte4(va);
                   7127:                if ((pte0 & PG_V) == 0) {
                   7128:                        splx(s);
                   7129:                        return;
                   7130:                }
                   7131:                pte = pte0 | PG_W;
                   7132:                setpte4(va, pte);
                   7133:                *dst = (unsigned char)ch;
                   7134:                setpte4(va, pte0);
                   7135:        }
                   7136: #endif
                   7137:        cpuinfo.cache_flush(dst, 1);
1.95      pk       7138:        setcontext(ctx);
1.92      pk       7139:        splx(s);
1.55      pk       7140: }
                   7141:
                   7142: #ifdef EXTREME_DEBUG
                   7143:
                   7144: static void test_region __P((int, int, int));
                   7145:
                   7146: void
                   7147: debug_pagetables()
                   7148: {
1.124     pk       7149:        int i;
                   7150:        int *regtbl;
                   7151:        int te;
1.55      pk       7152:
1.66      christos 7153:        printf("\nncontext=%d. ",ncontext);
                   7154:        printf("Context table is at va 0x%x. Level 0 PTP: 0x%x\n",
1.69      pk       7155:               cpuinfo.ctx_tbl, cpuinfo.ctx_tbl[0]);
1.66      christos 7156:        printf("Context 0 region table is at va 0x%x, pa 0x%x. Contents:\n",
1.152     pk       7157:               pmap_kernel()->pm_reg_ptps[0], pmap_kernel()->pm_reg_ptps_pa[0]);
1.55      pk       7158:
1.152     pk       7159:        regtbl = pmap_kernel()->pm_reg_ptps[0];
1.55      pk       7160:
1.66      christos 7161:        printf("PROM vector is at 0x%x\n",promvec);
                   7162:        printf("PROM reboot routine is at 0x%x\n",promvec->pv_reboot);
                   7163:        printf("PROM abort routine is at 0x%x\n",promvec->pv_abort);
                   7164:        printf("PROM halt routine is at 0x%x\n",promvec->pv_halt);
1.55      pk       7165:
1.66      christos 7166:        printf("Testing region 0xfe: ");
1.55      pk       7167:        test_region(0xfe,0,16*1024*1024);
1.66      christos 7168:        printf("Testing region 0xff: ");
1.55      pk       7169:        test_region(0xff,0,16*1024*1024);
1.96      pk       7170:        printf("Testing kernel region 0x%x: ", VA_VREG(KERNBASE));
                   7171:        test_region(VA_VREG(KERNBASE), 4096, avail_start);
1.55      pk       7172:        cngetc();
                   7173:
                   7174:        for (i = 0; i < SRMMU_L1SIZE; i++) {
                   7175:                te = regtbl[i];
                   7176:                if ((te & SRMMU_TETYPE) == SRMMU_TEINVALID)
                   7177:                    continue;
1.66      christos 7178:                printf("Region 0x%x: PTE=0x%x <%s> L2PA=0x%x kernL2VA=0x%x\n",
1.55      pk       7179:                       i, te, ((te & SRMMU_TETYPE) == SRMMU_TEPTE ? "pte" :
                   7180:                               ((te & SRMMU_TETYPE) == SRMMU_TEPTD ? "ptd" :
                   7181:                                ((te & SRMMU_TETYPE) == SRMMU_TEINVALID ?
                   7182:                                 "invalid" : "reserved"))),
                   7183:                       (te & ~0x3) << SRMMU_PPNPASHIFT,
                   7184:                       pmap_kernel()->pm_regmap[i].rg_seg_ptps);
                   7185:        }
1.66      christos 7186:        printf("Press q to halt...\n");
1.55      pk       7187:        if (cngetc()=='q')
                   7188:            callrom();
                   7189: }
                   7190:
                   7191: static u_int
                   7192: VA2PAsw(ctx, addr, pte)
1.124     pk       7193:        int ctx;
                   7194:        caddr_t addr;
1.55      pk       7195:        int *pte;
                   7196: {
1.124     pk       7197:        int *curtbl;
                   7198:        int curpte;
1.55      pk       7199:
                   7200: #ifdef EXTREME_EXTREME_DEBUG
1.66      christos 7201:        printf("Looking up addr 0x%x in context 0x%x\n",addr,ctx);
1.55      pk       7202: #endif
                   7203:        /* L0 */
1.69      pk       7204:        *pte = curpte = cpuinfo.ctx_tbl[ctx];
1.55      pk       7205: #ifdef EXTREME_EXTREME_DEBUG
1.66      christos 7206:        printf("Got L0 pte 0x%x\n",pte);
1.55      pk       7207: #endif
                   7208:        if ((curpte & SRMMU_TETYPE) == SRMMU_TEPTE) {
                   7209:                return (((curpte & SRMMU_PPNMASK) << SRMMU_PPNPASHIFT) |
                   7210:                        ((u_int)addr & 0xffffffff));
                   7211:        }
                   7212:        if ((curpte & SRMMU_TETYPE) != SRMMU_TEPTD) {
1.66      christos 7213:                printf("Bad context table entry 0x%x for context 0x%x\n",
1.55      pk       7214:                       curpte, ctx);
                   7215:                return 0;
                   7216:        }
                   7217:        /* L1 */
1.96      pk       7218:        curtbl = ((curpte & ~0x3) << 4) | KERNBASE; /* correct for krn*/
1.55      pk       7219:        *pte = curpte = curtbl[VA_VREG(addr)];
                   7220: #ifdef EXTREME_EXTREME_DEBUG
1.66      christos 7221:        printf("L1 table at 0x%x.\nGot L1 pte 0x%x\n",curtbl,curpte);
1.55      pk       7222: #endif
                   7223:        if ((curpte & SRMMU_TETYPE) == SRMMU_TEPTE)
                   7224:            return (((curpte & SRMMU_PPNMASK) << SRMMU_PPNPASHIFT) |
                   7225:                    ((u_int)addr & 0xffffff));
                   7226:        if ((curpte & SRMMU_TETYPE) != SRMMU_TEPTD) {
1.66      christos 7227:                printf("Bad region table entry 0x%x for region 0x%x\n",
1.55      pk       7228:                       curpte, VA_VREG(addr));
                   7229:                return 0;
                   7230:        }
                   7231:        /* L2 */
1.96      pk       7232:        curtbl = ((curpte & ~0x3) << 4) | KERNBASE; /* correct for krn*/
1.55      pk       7233:        *pte = curpte = curtbl[VA_VSEG(addr)];
                   7234: #ifdef EXTREME_EXTREME_DEBUG
1.66      christos 7235:        printf("L2 table at 0x%x.\nGot L2 pte 0x%x\n",curtbl,curpte);
1.55      pk       7236: #endif
                   7237:        if ((curpte & SRMMU_TETYPE) == SRMMU_TEPTE)
                   7238:            return (((curpte & SRMMU_PPNMASK) << SRMMU_PPNPASHIFT) |
                   7239:                    ((u_int)addr & 0x3ffff));
                   7240:        if ((curpte & SRMMU_TETYPE) != SRMMU_TEPTD) {
1.66      christos 7241:                printf("Bad segment table entry 0x%x for reg 0x%x, seg 0x%x\n",
1.55      pk       7242:                       curpte, VA_VREG(addr), VA_VSEG(addr));
                   7243:                return 0;
                   7244:        }
                   7245:        /* L3 */
1.96      pk       7246:        curtbl = ((curpte & ~0x3) << 4) | KERNBASE; /* correct for krn*/
1.55      pk       7247:        *pte = curpte = curtbl[VA_VPG(addr)];
                   7248: #ifdef EXTREME_EXTREME_DEBUG
1.66      christos 7249:        printf("L3 table at 0x%x.\nGot L3 pte 0x%x\n",curtbl,curpte);
1.55      pk       7250: #endif
                   7251:        if ((curpte & SRMMU_TETYPE) == SRMMU_TEPTE)
                   7252:            return (((curpte & SRMMU_PPNMASK) << SRMMU_PPNPASHIFT) |
                   7253:                    ((u_int)addr & 0xfff));
                   7254:        else {
1.66      christos 7255:                printf("Bad L3 pte 0x%x for reg 0x%x, seg 0x%x, pg 0x%x\n",
1.55      pk       7256:                       curpte, VA_VREG(addr), VA_VSEG(addr), VA_VPG(addr));
                   7257:                return 0;
                   7258:        }
1.66      christos 7259:        printf("Bizarreness with address 0x%x!\n",addr);
1.55      pk       7260: }
                   7261:
                   7262: void test_region(reg, start, stop)
1.124     pk       7263:        int reg;
                   7264:        int start, stop;
1.55      pk       7265: {
1.124     pk       7266:        int i;
                   7267:        int addr;
                   7268:        int pte;
1.55      pk       7269:        int ptesw;
                   7270: /*     int cnt=0;
                   7271: */
                   7272:
                   7273:        for (i = start; i < stop; i+= NBPG) {
                   7274:                addr = (reg << RGSHIFT) | i;
                   7275:                pte=lda(((u_int)(addr)) | ASI_SRMMUFP_LN, ASI_SRMMUFP);
                   7276:                if (pte) {
1.66      christos 7277: /*                     printf("Valid address 0x%x\n",addr);
1.55      pk       7278:                        if (++cnt == 20) {
                   7279:                                cngetc();
                   7280:                                cnt=0;
                   7281:                        }
                   7282: */
                   7283:                        if (VA2PA(addr) != VA2PAsw(0,addr,&ptesw)) {
1.66      christos 7284:                                printf("Mismatch at address 0x%x.\n",addr);
1.55      pk       7285:                                if (cngetc()=='q') break;
                   7286:                        }
1.96      pk       7287:                        if (reg == VA_VREG(KERNBASE))
                   7288:                                /* kernel permissions are different */
                   7289:                                continue;
1.55      pk       7290:                        if ((pte&SRMMU_PROT_MASK)!=(ptesw&SRMMU_PROT_MASK)) {
1.66      christos 7291:                                printf("Mismatched protections at address "
1.55      pk       7292:                                       "0x%x; pte=0x%x, ptesw=0x%x\n",
                   7293:                                       addr,pte,ptesw);
                   7294:                                if (cngetc()=='q') break;
                   7295:                        }
                   7296:                }
                   7297:        }
1.66      christos 7298:        printf("done.\n");
1.46      pk       7299: }
1.55      pk       7300:
                   7301:
                   7302: void print_fe_map(void)
                   7303: {
                   7304:        u_int i, pte;
                   7305:
1.66      christos 7306:        printf("map of region 0xfe:\n");
1.55      pk       7307:        for (i = 0xfe000000; i < 0xff000000; i+=4096) {
                   7308:                if (((pte = getpte4m(i)) & SRMMU_TETYPE) != SRMMU_TEPTE)
                   7309:                    continue;
1.91      fair     7310:                printf("0x%x -> 0x%x%x (pte 0x%x)\n", i, pte >> 28,
1.55      pk       7311:                       (pte & ~0xff) << 4, pte);
                   7312:        }
1.66      christos 7313:        printf("done\n");
1.55      pk       7314: }
                   7315:
                   7316: #endif

CVSweb <webmaster@jp.NetBSD.org>