[BACK]Return to pmap.c CVS log [TXT][DIR] Up to [cvs.NetBSD.org] / src / sys / arch / sparc / sparc

Annotation of src/sys/arch/sparc/sparc/pmap.c, Revision 1.124

1.124   ! pk          1: /*     $NetBSD: pmap.c,v 1.123 1998/07/26 23:35:33 pk Exp $ */
1.22      deraadt     2:
1.1       deraadt     3: /*
1.55      pk          4:  * Copyright (c) 1996
1.57      abrown      5:  *     The President and Fellows of Harvard College. All rights reserved.
1.1       deraadt     6:  * Copyright (c) 1992, 1993
                      7:  *     The Regents of the University of California.  All rights reserved.
                      8:  *
                      9:  * This software was developed by the Computer Systems Engineering group
                     10:  * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
                     11:  * contributed to Berkeley.
                     12:  *
                     13:  * All advertising materials mentioning features or use of this software
                     14:  * must display the following acknowledgement:
1.55      pk         15:  *     This product includes software developed by Harvard University.
1.1       deraadt    16:  *     This product includes software developed by the University of
                     17:  *     California, Lawrence Berkeley Laboratory.
                     18:  *
                     19:  * Redistribution and use in source and binary forms, with or without
                     20:  * modification, are permitted provided that the following conditions
                     21:  * are met:
1.55      pk         22:  *
1.1       deraadt    23:  * 1. Redistributions of source code must retain the above copyright
                     24:  *    notice, this list of conditions and the following disclaimer.
                     25:  * 2. Redistributions in binary form must reproduce the above copyright
                     26:  *    notice, this list of conditions and the following disclaimer in the
                     27:  *    documentation and/or other materials provided with the distribution.
                     28:  * 3. All advertising materials mentioning features or use of this software
                     29:  *    must display the following acknowledgement:
1.55      pk         30:  *     This product includes software developed by Aaron Brown and
                     31:  *     Harvard University.
                     32:  *      This product includes software developed by the University of
                     33:  *      California, Berkeley and its contributors.
1.1       deraadt    34:  * 4. Neither the name of the University nor the names of its contributors
                     35:  *    may be used to endorse or promote products derived from this software
                     36:  *    without specific prior written permission.
                     37:  *
                     38:  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
                     39:  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
                     40:  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
                     41:  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
                     42:  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
                     43:  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
                     44:  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
                     45:  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
                     46:  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
                     47:  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
                     48:  * SUCH DAMAGE.
                     49:  *
1.22      deraadt    50:  *     @(#)pmap.c      8.4 (Berkeley) 2/5/94
1.55      pk         51:  *
1.1       deraadt    52:  */
                     53:
                     54: /*
                     55:  * SPARC physical map management code.
                     56:  * Does not function on multiprocessors (yet).
                     57:  */
1.112     mrg        58:
1.119     jonathan   59: #include "opt_ddb.h"
1.112     mrg        60: #include "opt_uvm.h"
1.1       deraadt    61:
                     62: #include <sys/param.h>
                     63: #include <sys/systm.h>
                     64: #include <sys/device.h>
                     65: #include <sys/proc.h>
1.43      pk         66: #include <sys/queue.h>
1.1       deraadt    67: #include <sys/malloc.h>
1.87      pk         68: #include <sys/lock.h>
1.121     pk         69: #include <sys/pool.h>
1.67      pk         70: #include <sys/exec.h>
                     71: #include <sys/core.h>
                     72: #include <sys/kcore.h>
1.1       deraadt    73:
                     74: #include <vm/vm.h>
                     75: #include <vm/vm_kern.h>
                     76: #include <vm/vm_prot.h>
                     77: #include <vm/vm_page.h>
                     78:
1.110     mrg        79: #if defined(UVM)
                     80: #include <uvm/uvm.h>
                     81: #endif
                     82:
1.1       deraadt    83: #include <machine/autoconf.h>
                     84: #include <machine/bsd_openprom.h>
1.19      deraadt    85: #include <machine/oldmon.h>
1.1       deraadt    86: #include <machine/cpu.h>
                     87: #include <machine/ctlreg.h>
1.67      pk         88: #include <machine/kcore.h>
1.1       deraadt    89:
                     90: #include <sparc/sparc/asm.h>
                     91: #include <sparc/sparc/cache.h>
1.3       deraadt    92: #include <sparc/sparc/vaddrs.h>
1.69      pk         93: #include <sparc/sparc/cpuvar.h>
1.1       deraadt    94:
                     95: #ifdef DEBUG
                     96: #define PTE_BITS "\20\40V\37W\36S\35NC\33IO\32U\31M"
1.55      pk         97: #define PTE_BITS4M "\20\10C\7M\6R\5ACC3\4ACC2\3ACC1\2TYP2\1TYP1"
1.1       deraadt    98: #endif
                     99:
                    100: /*
                    101:  * The SPARCstation offers us the following challenges:
                    102:  *
                    103:  *   1. A virtual address cache.  This is, strictly speaking, not
                    104:  *     part of the architecture, but the code below assumes one.
                    105:  *     This is a write-through cache on the 4c and a write-back cache
                    106:  *     on others.
                    107:  *
1.55      pk        108:  *   2. (4/4c only) An MMU that acts like a cache.  There is not enough
                    109:  *     space in the MMU to map everything all the time.  Instead, we need
1.1       deraadt   110:  *     to load MMU with the `working set' of translations for each
1.55      pk        111:  *     process. The sun4m does not act like a cache; tables are maintained
                    112:  *     in physical memory.
1.1       deraadt   113:  *
                    114:  *   3.        Segmented virtual and physical spaces.  The upper 12 bits of
                    115:  *     a virtual address (the virtual segment) index a segment table,
                    116:  *     giving a physical segment.  The physical segment selects a
                    117:  *     `Page Map Entry Group' (PMEG) and the virtual page number---the
                    118:  *     next 5 or 6 bits of the virtual address---select the particular
                    119:  *     `Page Map Entry' for the page.  We call the latter a PTE and
                    120:  *     call each Page Map Entry Group a pmeg (for want of a better name).
1.55      pk        121:  *     Note that the sun4m has an unsegmented 36-bit physical space.
1.1       deraadt   122:  *
                    123:  *     Since there are no valid bits in the segment table, the only way
                    124:  *     to have an invalid segment is to make one full pmeg of invalid PTEs.
1.55      pk        125:  *     We use the last one (since the ROM does as well) (sun4/4c only)
1.1       deraadt   126:  *
                    127:  *   4. Discontiguous physical pages.  The Mach VM expects physical pages
                    128:  *     to be in one sequential lump.
                    129:  *
                    130:  *   5. The MMU is always on: it is not possible to disable it.  This is
                    131:  *     mainly a startup hassle.
                    132:  */
                    133:
                    134: struct pmap_stats {
                    135:        int     ps_unlink_pvfirst;      /* # of pv_unlinks on head */
                    136:        int     ps_unlink_pvsearch;     /* # of pv_unlink searches */
                    137:        int     ps_changeprots;         /* # of calls to changeprot */
                    138:        int     ps_useless_changeprots; /* # of changeprots for wiring */
                    139:        int     ps_enter_firstpv;       /* pv heads entered */
                    140:        int     ps_enter_secondpv;      /* pv nonheads entered */
                    141:        int     ps_useless_changewire;  /* useless wiring changes */
                    142:        int     ps_npg_prot_all;        /* # of active pages protected */
                    143:        int     ps_npg_prot_actual;     /* # pages actually affected */
1.70      pk        144:        int     ps_npmeg_free;          /* # of free pmegs */
                    145:        int     ps_npmeg_locked;        /* # of pmegs on locked list */
                    146:        int     ps_npmeg_lru;           /* # of pmegs on lru list */
1.1       deraadt   147: } pmap_stats;
                    148:
                    149: #ifdef DEBUG
                    150: #define        PDB_CREATE      0x0001
                    151: #define        PDB_DESTROY     0x0002
                    152: #define        PDB_REMOVE      0x0004
                    153: #define        PDB_CHANGEPROT  0x0008
                    154: #define        PDB_ENTER       0x0010
1.90      pk        155: #define        PDB_FOLLOW      0x0020
1.1       deraadt   156:
                    157: #define        PDB_MMU_ALLOC   0x0100
                    158: #define        PDB_MMU_STEAL   0x0200
                    159: #define        PDB_CTX_ALLOC   0x0400
                    160: #define        PDB_CTX_STEAL   0x0800
1.43      pk        161: #define        PDB_MMUREG_ALLOC        0x1000
                    162: #define        PDB_MMUREG_STEAL        0x2000
1.55      pk        163: #define        PDB_CACHESTUFF  0x4000
1.72      pk        164: #define        PDB_SWITCHMAP   0x8000
                    165: #define        PDB_SANITYCHK   0x10000
1.55      pk        166: int    pmapdebug = 0;
1.1       deraadt   167: #endif
                    168:
1.55      pk        169: #if 0
1.10      deraadt   170: #define        splpmap() splimp()
1.55      pk        171: #endif
1.1       deraadt   172:
                    173: /*
                    174:  * First and last managed physical addresses.
                    175:  */
1.124   ! pk        176: paddr_t        vm_first_phys, vm_num_phys;
1.1       deraadt   177:
                    178: /*
                    179:  * For each managed physical page, there is a list of all currently
                    180:  * valid virtual mappings of that page.  Since there is usually one
                    181:  * (or zero) mapping per page, the table begins with an initial entry,
                    182:  * rather than a pointer; this head entry is empty iff its pv_pmap
                    183:  * field is NULL.
                    184:  *
                    185:  * Note that these are per machine independent page (so there may be
                    186:  * only one for every two hardware pages, e.g.).  Since the virtual
                    187:  * address is aligned on a page boundary, the low order bits are free
                    188:  * for storing flags.  Only the head of each list has flags.
                    189:  *
                    190:  * THIS SHOULD BE PART OF THE CORE MAP
                    191:  */
                    192: struct pvlist {
1.84      pk        193:        struct          pvlist *pv_next;        /* next pvlist, if any */
                    194:        struct          pmap *pv_pmap;          /* pmap of this va */
1.124   ! pk        195:        vaddr_t         pv_va;                  /* virtual address */
1.84      pk        196:        int             pv_flags;               /* flags (below) */
1.1       deraadt   197: };
                    198:
                    199: /*
                    200:  * Flags in pv_flags.  Note that PV_MOD must be 1 and PV_REF must be 2
                    201:  * since they must line up with the bits in the hardware PTEs (see pte.h).
1.115     pk        202:  * SUN4M bits are at a slightly different location in the PTE.
                    203:  * Note: the REF, MOD and ANC flag bits occur only in the head of a pvlist.
                    204:  * The cacheable bit (either PV_NC or PV_C4M) is meaningful in each
                    205:  * individual pv entry.
                    206:  */
                    207: #define PV_MOD         1       /* page modified */
                    208: #define PV_REF         2       /* page referenced */
                    209: #define PV_NC          4       /* page cannot be cached */
                    210: #define PV_REF4M       1       /* page referenced (SRMMU) */
                    211: #define PV_MOD4M       2       /* page modified (SRMMU) */
                    212: #define PV_C4M         4       /* page _can_ be cached (SRMMU) */
                    213: #define PV_ANC         0x10    /* page has incongruent aliases */
1.1       deraadt   214:
                    215: struct pvlist *pv_table;       /* array of entries, one per physical page */
                    216:
1.124   ! pk        217: #define pvhead(pa)     (&pv_table[((pa) - vm_first_phys) >> PGSHIFT])
1.1       deraadt   218:
1.124   ! pk        219: static vsize_t pv_table_map __P((paddr_t, int));
        !           220: static paddr_t pv_physmem;
1.122     pk        221: static struct pool pv_pool;
                    222:
                    223:
1.1       deraadt   224: /*
                    225:  * Each virtual segment within each pmap is either valid or invalid.
                    226:  * It is valid if pm_npte[VA_VSEG(va)] is not 0.  This does not mean
                    227:  * it is in the MMU, however; that is true iff pm_segmap[VA_VSEG(va)]
                    228:  * does not point to the invalid PMEG.
                    229:  *
1.55      pk        230:  * In the older SPARC architectures (pre-4m), page tables are cached in the
                    231:  * MMU. The following discussion applies to these architectures:
                    232:  *
1.1       deraadt   233:  * If a virtual segment is valid and loaded, the correct PTEs appear
                    234:  * in the MMU only.  If it is valid and unloaded, the correct PTEs appear
                    235:  * in the pm_pte[VA_VSEG(va)] only.  However, some effort is made to keep
                    236:  * the software copies consistent enough with the MMU so that libkvm can
                    237:  * do user address translations.  In particular, pv_changepte() and
                    238:  * pmap_enu() maintain consistency, while less critical changes are
                    239:  * not maintained.  pm_pte[VA_VSEG(va)] always points to space for those
                    240:  * PTEs, unless this is the kernel pmap, in which case pm_pte[x] is not
                    241:  * used (sigh).
                    242:  *
                    243:  * Each PMEG in the MMU is either free or contains PTEs corresponding to
                    244:  * some pmap and virtual segment.  If it contains some PTEs, it also contains
                    245:  * reference and modify bits that belong in the pv_table.  If we need
                    246:  * to steal a PMEG from some process (if we need one and none are free)
                    247:  * we must copy the ref and mod bits, and update pm_segmap in the other
                    248:  * pmap to show that its virtual segment is no longer in the MMU.
                    249:  *
                    250:  * There are 128 PMEGs in a small Sun-4, of which only a few dozen are
                    251:  * tied down permanently, leaving `about' 100 to be spread among
                    252:  * running processes.  These are managed as an LRU cache.  Before
                    253:  * calling the VM paging code for a user page fault, the fault handler
                    254:  * calls mmu_load(pmap, va) to try to get a set of PTEs put into the
                    255:  * MMU.  mmu_load will check the validity of the segment and tell whether
                    256:  * it did something.
                    257:  *
                    258:  * Since I hate the name PMEG I call this data structure an `mmu entry'.
                    259:  * Each mmuentry is on exactly one of three `usage' lists: free, LRU,
                    260:  * or locked.  The LRU list is for user processes; the locked list is
                    261:  * for kernel entries; both are doubly linked queues headed by `mmuhd's.
                    262:  * The free list is a simple list, headed by a free list pointer.
1.55      pk        263:  *
                    264:  * In the sun4m architecture using the SPARC Reference MMU (SRMMU), three
                    265:  * levels of page tables are maintained in physical memory. We use the same
                    266:  * structures as with the 3-level old-style MMU (pm_regmap, pm_segmap,
                    267:  * rg_segmap, sg_pte, etc) to maintain kernel-edible page tables; we also
                    268:  * build a parallel set of physical tables that can be used by the MMU.
                    269:  * (XXX: This seems redundant, but is it necessary for the unified kernel?)
                    270:  *
                    271:  * If a virtual segment is valid, its entries will be in both parallel lists.
                    272:  * If it is not valid, then its entry in the kernel tables will be zero, and
                    273:  * its entry in the MMU tables will either be nonexistent or zero as well.
1.72      pk        274:  *
                    275:  * The Reference MMU generally uses a Translation Look-aside Buffer (TLB)
                    276:  * to cache the result of recently executed page table walks. When
                    277:  * manipulating page tables, we need to ensure consistency of the
                    278:  * in-memory and TLB copies of the page table entries. This is handled
                    279:  * by flushing (and invalidating) a TLB entry when appropriate before
                    280:  * altering an in-memory page table entry.
1.1       deraadt   281:  */
                    282: struct mmuentry {
1.43      pk        283:        TAILQ_ENTRY(mmuentry)   me_list;        /* usage list link */
                    284:        TAILQ_ENTRY(mmuentry)   me_pmchain;     /* pmap owner link */
1.1       deraadt   285:        struct  pmap *me_pmap;          /* pmap, if in use */
1.43      pk        286:        u_short me_vreg;                /* associated virtual region/segment */
                    287:        u_short me_vseg;                /* associated virtual region/segment */
1.45      pk        288:        u_short me_cookie;              /* hardware SMEG/PMEG number */
1.1       deraadt   289: };
1.43      pk        290: struct mmuentry *mmusegments;  /* allocated in pmap_bootstrap */
                    291: struct mmuentry *mmuregions;   /* allocated in pmap_bootstrap */
1.1       deraadt   292:
1.43      pk        293: struct mmuhd segm_freelist, segm_lru, segm_locked;
                    294: struct mmuhd region_freelist, region_lru, region_locked;
1.1       deraadt   295:
1.69      pk        296: int    seginval;               /* [4/4c] the invalid segment number */
                    297: int    reginval;               /* [4/3mmu] the invalid region number */
1.1       deraadt   298:
                    299: /*
1.55      pk        300:  * (sun4/4c)
1.1       deraadt   301:  * A context is simply a small number that dictates which set of 4096
                    302:  * segment map entries the MMU uses.  The Sun 4c has eight such sets.
                    303:  * These are alloted in an `almost MRU' fashion.
1.55      pk        304:  * (sun4m)
                    305:  * A context is simply a small number that indexes the context table, the
                    306:  * root-level page table mapping 4G areas. Each entry in this table points
                    307:  * to a 1st-level region table. A SPARC reference MMU will usually use 16
                    308:  * such contexts, but some offer as many as 64k contexts; the theoretical
                    309:  * maximum is 2^32 - 1, but this would create overlarge context tables.
1.1       deraadt   310:  *
                    311:  * Each context is either free or attached to a pmap.
                    312:  *
                    313:  * Since the virtual address cache is tagged by context, when we steal
                    314:  * a context we have to flush (that part of) the cache.
                    315:  */
                    316: union ctxinfo {
                    317:        union   ctxinfo *c_nextfree;    /* free list (if free) */
                    318:        struct  pmap *c_pmap;           /* pmap (if busy) */
                    319: };
1.69      pk        320:
                    321: #define ncontext       (cpuinfo.mmu_ncontext)
                    322: #define ctx_kick       (cpuinfo.ctx_kick)
                    323: #define ctx_kickdir    (cpuinfo.ctx_kickdir)
                    324: #define ctx_freelist   (cpuinfo.ctx_freelist)
                    325:
1.122     pk        326: void   ctx_alloc __P((struct pmap *));
                    327: void   ctx_free __P((struct pmap *));
                    328:
1.69      pk        329: #if 0
1.1       deraadt   330: union ctxinfo *ctxinfo;                /* allocated at in pmap_bootstrap */
                    331:
                    332: union  ctxinfo *ctx_freelist;  /* context free list */
                    333: int    ctx_kick;               /* allocation rover when none free */
                    334: int    ctx_kickdir;            /* ctx_kick roves both directions */
                    335:
1.69      pk        336: char   *ctxbusyvector;         /* [4m] tells what contexts are busy (XXX)*/
                    337: #endif
1.55      pk        338:
1.1       deraadt   339: caddr_t        vpage[2];               /* two reserved MD virtual pages */
1.101     pk        340: #if defined(SUN4M)
                    341: int    *vpage_pte[2];          /* pte location of vpage[] */
                    342: #endif
1.41      mycroft   343: caddr_t        vmmap;                  /* one reserved MI vpage for /dev/mem */
1.55      pk        344: caddr_t        vdumppages;             /* 32KB worth of reserved dump pages */
1.1       deraadt   345:
1.69      pk        346: smeg_t         tregion;        /* [4/3mmu] Region for temporary mappings */
                    347:
1.43      pk        348: struct pmap    kernel_pmap_store;              /* the kernel's pmap */
                    349: struct regmap  kernel_regmap_store[NKREG];     /* the kernel's regmap */
                    350: struct segmap  kernel_segmap_store[NKREG*NSEGRG];/* the kernel's segmaps */
1.1       deraadt   351:
1.69      pk        352: #if defined(SUN4M)
1.55      pk        353: u_int  *kernel_regtable_store;         /* 1k of storage to map the kernel */
                    354: u_int  *kernel_segtable_store;         /* 2k of storage to map the kernel */
                    355: u_int  *kernel_pagtable_store;         /* 128k of storage to map the kernel */
                    356:
                    357: u_int  *kernel_iopte_table;            /* 64k of storage for iommu */
                    358: u_int  kernel_iopte_table_pa;
1.121     pk        359:
                    360: /*
                    361:  * Memory pools and back-end supplier for SRMMU page tables.
                    362:  * Share a pool between the level 2 and level 3 page tables,
                    363:  * since these are equal in size.
                    364:  */
                    365: static struct pool L1_pool;
                    366: static struct pool L23_pool;
                    367:
                    368: static void *pgt_page_alloc __P((unsigned long, int, int));
                    369: static void  pgt_page_free __P((void *, unsigned long, int));
                    370:
1.55      pk        371: #endif
                    372:
1.30      pk        373: #define        MA_SIZE 32              /* size of memory descriptor arrays */
1.1       deraadt   374: struct memarr pmemarr[MA_SIZE];/* physical memory regions */
                    375: int    npmemarr;               /* number of entries in pmemarr */
1.124   ! pk        376: /*static*/ paddr_t     avail_start;    /* first free physical page */
        !           377: /*static*/ paddr_t     avail_end;      /* last free physical page */
        !           378: /*static*/ paddr_t     unavail_gap_start;/* first stolen free phys page */
        !           379: /*static*/ paddr_t     unavail_gap_end;/* last stolen free physical page */
        !           380: /*static*/ vaddr_t     virtual_avail;  /* first free virtual page number */
        !           381: /*static*/ vaddr_t     virtual_end;    /* last free virtual page number */
1.29      pk        382:
1.107     pk        383: static void pmap_page_upload __P((void));
1.118     thorpej   384: void pmap_pinit __P((pmap_t));
                    385: void pmap_release __P((pmap_t));
1.107     pk        386:
1.45      pk        387: int mmu_has_hole;
                    388:
1.124   ! pk        389: vaddr_t prom_vstart;   /* For /dev/kmem */
        !           390: vaddr_t prom_vend;
1.1       deraadt   391:
1.55      pk        392: #if defined(SUN4)
1.31      pk        393: /*
1.55      pk        394:  * [sun4]: segfixmask: on some systems (4/110) "getsegmap()" returns a
                    395:  * partly invalid value. getsegmap returns a 16 bit value on the sun4,
                    396:  * but only the first 8 or so bits are valid (the rest are *supposed* to
                    397:  * be zero. On the 4/110 the bits that are supposed to be zero are
                    398:  * all one instead. e.g. KERNBASE is usually mapped by pmeg number zero.
                    399:  * On a 4/300 getsegmap(KERNBASE) == 0x0000, but
1.31      pk        400:  * on a 4/100 getsegmap(KERNBASE) == 0xff00
                    401:  *
1.55      pk        402:  * This confuses mmu_reservemon() and causes it to not reserve the PROM's
                    403:  * pmegs. Then the PROM's pmegs get used during autoconfig and everything
1.31      pk        404:  * falls apart!  (not very fun to debug, BTW.)
                    405:  *
1.43      pk        406:  * solution: mask the invalid bits in the getsetmap macro.
1.31      pk        407:  */
                    408:
                    409: static u_long segfixmask = 0xffffffff; /* all bits valid to start */
1.55      pk        410: #else
                    411: #define segfixmask 0xffffffff  /* It's in getsegmap's scope */
1.31      pk        412: #endif
                    413:
1.1       deraadt   414: /*
                    415:  * pseudo-functions for mnemonic value
                    416:  */
1.71      pk        417: #define getcontext4()          lduba(AC_CONTEXT, ASI_CONTROL)
                    418: #define getcontext4m()         lda(SRMMU_CXR, ASI_SRMMU)
1.55      pk        419: #define getcontext()           (CPU_ISSUN4M \
1.71      pk        420:                                        ? getcontext4m() \
                    421:                                        : getcontext4()  )
                    422:
                    423: #define setcontext4(c)         stba(AC_CONTEXT, ASI_CONTROL, c)
                    424: #define setcontext4m(c)                sta(SRMMU_CXR, ASI_SRMMU, c)
1.55      pk        425: #define setcontext(c)          (CPU_ISSUN4M \
1.71      pk        426:                                        ? setcontext4m(c) \
                    427:                                        : setcontext4(c)  )
1.55      pk        428:
                    429: #define        getsegmap(va)           (CPU_ISSUN4C \
                    430:                                        ? lduba(va, ASI_SEGMAP) \
                    431:                                        : (lduha(va, ASI_SEGMAP) & segfixmask))
                    432: #define        setsegmap(va, pmeg)     (CPU_ISSUN4C \
                    433:                                        ? stba(va, ASI_SEGMAP, pmeg) \
                    434:                                        : stha(va, ASI_SEGMAP, pmeg))
                    435:
                    436: /* 3-level sun4 MMU only: */
                    437: #define        getregmap(va)           ((unsigned)lduha((va)+2, ASI_REGMAP) >> 8)
                    438: #define        setregmap(va, smeg)     stha((va)+2, ASI_REGMAP, (smeg << 8))
                    439:
                    440: #if defined(SUN4M)
                    441: #define getpte4m(va)           lda((va & 0xFFFFF000) | ASI_SRMMUFP_L3, \
                    442:                                    ASI_SRMMUFP)
1.72      pk        443: void   setpgt4m __P((int *ptep, int pte));
1.124   ! pk        444: void   setpte4m __P((vaddr_t va, int pte));
        !           445: void   setptesw4m __P((struct pmap *pm, vaddr_t va, int pte));
        !           446: static u_int   getptesw4m __P((struct pmap *pm, vaddr_t va));
1.55      pk        447: #endif
                    448:
                    449: #if defined(SUN4) || defined(SUN4C)
                    450: #define        getpte4(va)             lda(va, ASI_PTE)
                    451: #define        setpte4(va, pte)        sta(va, ASI_PTE, pte)
                    452: #endif
                    453:
                    454: /* Function pointer messiness for supporting multiple sparc architectures
                    455:  * within a single kernel: notice that there are two versions of many of the
                    456:  * functions within this file/module, one for the sun4/sun4c and the other
                    457:  * for the sun4m. For performance reasons (since things like pte bits don't
                    458:  * map nicely between the two architectures), there are separate functions
                    459:  * rather than unified functions which test the cputyp variable. If only
                    460:  * one architecture is being used, then the non-suffixed function calls
                    461:  * are macro-translated into the appropriate xxx4_4c or xxx4m call. If
                    462:  * multiple architectures are defined, the calls translate to (*xxx_p),
                    463:  * i.e. they indirect through function pointers initialized as appropriate
                    464:  * to the run-time architecture in pmap_bootstrap. See also pmap.h.
                    465:  */
                    466:
                    467: #if defined(SUN4M)
1.71      pk        468: static void mmu_setup4m_L1 __P((int, struct pmap *));
                    469: static void mmu_setup4m_L2 __P((int, struct regmap *));
                    470: static void  mmu_setup4m_L3 __P((int, struct segmap *));
1.77      pk        471: /*static*/ void        mmu_reservemon4m __P((struct pmap *));
1.58      pk        472:
1.124   ! pk        473: /*static*/ void pmap_rmk4m __P((struct pmap *, vaddr_t, vaddr_t, int, int));
        !           474: /*static*/ void pmap_rmu4m __P((struct pmap *, vaddr_t, vaddr_t, int, int));
        !           475: /*static*/ void pmap_enk4m __P((struct pmap *, vaddr_t, vm_prot_t,
        !           476:                                int, struct pvlist *, int));
        !           477: /*static*/ void pmap_enu4m __P((struct pmap *, vaddr_t, vm_prot_t,
        !           478:                                int, struct pvlist *, int));
1.55      pk        479: /*static*/ void pv_changepte4m __P((struct pvlist *, int, int));
                    480: /*static*/ int  pv_syncflags4m __P((struct pvlist *));
1.124   ! pk        481: /*static*/ int  pv_link4m __P((struct pvlist *, struct pmap *, vaddr_t, int));
        !           482: /*static*/ void pv_unlink4m __P((struct pvlist *, struct pmap *, vaddr_t));
1.55      pk        483: #endif
                    484:
                    485: #if defined(SUN4) || defined(SUN4C)
1.58      pk        486: /*static*/ void        mmu_reservemon4_4c __P((int *, int *));
1.124   ! pk        487: /*static*/ void pmap_rmk4_4c __P((struct pmap *, vaddr_t, vaddr_t, int, int));
        !           488: /*static*/ void pmap_rmu4_4c __P((struct pmap *, vaddr_t, vaddr_t, int, int));
        !           489: /*static*/ void pmap_enk4_4c __P((struct pmap *, vaddr_t, vm_prot_t,
        !           490:                                  int, struct pvlist *, int));
        !           491: /*static*/ void pmap_enu4_4c __P((struct pmap *, vaddr_t, vm_prot_t,
        !           492:                                  int, struct pvlist *, int));
1.55      pk        493: /*static*/ void pv_changepte4_4c __P((struct pvlist *, int, int));
                    494: /*static*/ int  pv_syncflags4_4c __P((struct pvlist *));
1.124   ! pk        495: /*static*/ int  pv_link4_4c __P((struct pvlist *, struct pmap *, vaddr_t, int));
        !           496: /*static*/ void pv_unlink4_4c __P((struct pvlist *, struct pmap *, vaddr_t));
1.55      pk        497: #endif
                    498:
                    499: #if !defined(SUN4M) && (defined(SUN4) || defined(SUN4C))
                    500: #define                pmap_rmk        pmap_rmk4_4c
                    501: #define                pmap_rmu        pmap_rmu4_4c
                    502:
                    503: #elif defined(SUN4M) && !(defined(SUN4) || defined(SUN4C))
                    504: #define                pmap_rmk        pmap_rmk4m
                    505: #define                pmap_rmu        pmap_rmu4m
                    506:
                    507: #else  /* must use function pointers */
                    508:
                    509: /* function pointer declarations */
                    510: /* from pmap.h: */
1.124   ! pk        511: void           (*pmap_clear_modify_p) __P((paddr_t pa));
        !           512: void           (*pmap_clear_reference_p) __P((paddr_t pa));
        !           513: void           (*pmap_copy_page_p) __P((paddr_t, paddr_t));
        !           514: void           (*pmap_enter_p) __P((pmap_t, vaddr_t, paddr_t,
        !           515:                                     vm_prot_t, boolean_t));
        !           516: paddr_t                (*pmap_extract_p) __P((pmap_t, vaddr_t));
        !           517: boolean_t      (*pmap_is_modified_p) __P((paddr_t pa));
        !           518: boolean_t      (*pmap_is_referenced_p) __P((paddr_t pa));
        !           519: void           (*pmap_page_protect_p) __P((paddr_t, vm_prot_t));
        !           520: void           (*pmap_protect_p) __P((pmap_t, vaddr_t, vaddr_t, vm_prot_t));
        !           521: void           (*pmap_zero_page_p) __P((paddr_t));
        !           522: void           (*pmap_changeprot_p) __P((pmap_t, vaddr_t, vm_prot_t, int));
1.55      pk        523: /* local: */
1.124   ! pk        524: void           (*pmap_rmk_p) __P((struct pmap *, vaddr_t, vaddr_t, int, int));
        !           525: void           (*pmap_rmu_p) __P((struct pmap *, vaddr_t, vaddr_t, int, int));
1.55      pk        526:
                    527: #define                pmap_rmk        (*pmap_rmk_p)
                    528: #define                pmap_rmu        (*pmap_rmu_p)
                    529:
                    530: #endif
                    531:
                    532: /* --------------------------------------------------------------*/
                    533:
                    534: /*
                    535:  * Next we have some Sun4m-specific routines which have no 4/4c
                    536:  * counterparts, or which are 4/4c macros.
                    537:  */
                    538:
                    539: #if defined(SUN4M)
                    540:
                    541: /* Macros which implement SRMMU TLB flushing/invalidation */
                    542:
                    543: #define tlb_flush_page(va)    sta((va & ~0xfff) | ASI_SRMMUFP_L3, ASI_SRMMUFP,0)
                    544: #define tlb_flush_segment(vreg, vseg) sta((vreg << RGSHIFT) | (vseg << SGSHIFT)\
                    545:                                          | ASI_SRMMUFP_L2, ASI_SRMMUFP,0)
                    546: #define tlb_flush_context()   sta(ASI_SRMMUFP_L1, ASI_SRMMUFP, 0)
                    547: #define tlb_flush_all()              sta(ASI_SRMMUFP_LN, ASI_SRMMUFP, 0)
                    548:
                    549: static u_int   VA2PA __P((caddr_t));
1.97      pk        550: static u_long  srmmu_bypass_read __P((u_long));
1.55      pk        551:
                    552: /*
                    553:  * VA2PA(addr) -- converts a virtual address to a physical address using
                    554:  * the MMU's currently-installed page tables. As a side effect, the address
                    555:  * translation used may cause the associated pte to be encached. The correct
                    556:  * context for VA must be set before this is called.
                    557:  *
                    558:  * This routine should work with any level of mapping, as it is used
                    559:  * during bootup to interact with the ROM's initial L1 mapping of the kernel.
                    560:  */
                    561: static __inline u_int
                    562: VA2PA(addr)
1.124   ! pk        563:        caddr_t addr;
1.55      pk        564: {
1.124   ! pk        565:        u_int pte;
1.55      pk        566:
                    567:        /* we'll use that handy SRMMU flush/probe! %%%: make consts below! */
                    568:        /* Try each level in turn until we find a valid pte. Otherwise panic */
                    569:
                    570:        pte = lda(((u_int)addr & ~0xfff) | ASI_SRMMUFP_L3, ASI_SRMMUFP);
                    571:        if ((pte & SRMMU_TETYPE) == SRMMU_TEPTE)
                    572:            return (((pte & SRMMU_PPNMASK) << SRMMU_PPNPASHIFT) |
                    573:                    ((u_int)addr & 0xfff));
1.60      pk        574:
                    575:        /* A `TLB Flush Entire' is required before any L0, L1 or L2 probe */
                    576:        tlb_flush_all();
                    577:
1.55      pk        578:        pte = lda(((u_int)addr & ~0xfff) | ASI_SRMMUFP_L2, ASI_SRMMUFP);
                    579:        if ((pte & SRMMU_TETYPE) == SRMMU_TEPTE)
                    580:            return (((pte & SRMMU_PPNMASK) << SRMMU_PPNPASHIFT) |
                    581:                    ((u_int)addr & 0x3ffff));
                    582:        pte = lda(((u_int)addr & ~0xfff) | ASI_SRMMUFP_L1, ASI_SRMMUFP);
                    583:        if ((pte & SRMMU_TETYPE) == SRMMU_TEPTE)
                    584:            return (((pte & SRMMU_PPNMASK) << SRMMU_PPNPASHIFT) |
                    585:                    ((u_int)addr & 0xffffff));
                    586:        pte = lda(((u_int)addr & ~0xfff) | ASI_SRMMUFP_L0, ASI_SRMMUFP);
                    587:        if ((pte & SRMMU_TETYPE) == SRMMU_TEPTE)
                    588:            return (((pte & SRMMU_PPNMASK) << SRMMU_PPNPASHIFT) |
                    589:                    ((u_int)addr & 0xffffffff));
                    590:
                    591:        panic("VA2PA: Asked to translate unmapped VA %p", addr);
                    592: }
                    593:
                    594: /*
                    595:  * Get the page table entry (PTE) for va by looking it up in the software
                    596:  * page tables. These are the same tables that are used by the MMU; this
                    597:  * routine allows easy access to the page tables even if the context
                    598:  * corresponding to the table is not loaded or selected.
                    599:  * This routine should NOT be used if there is any chance that the desired
                    600:  * pte is in the TLB cache, since it will return stale data in that case.
                    601:  * For that case, and for general use, use getpte4m, which is much faster
                    602:  * and avoids walking in-memory page tables if the page is in the cache.
                    603:  * Note also that this routine only works if a kernel mapping has been
                    604:  * installed for the given page!
                    605:  */
                    606: __inline u_int
                    607: getptesw4m(pm, va)             /* Assumes L3 mapping! */
1.124   ! pk        608:        struct pmap *pm;
        !           609:        vaddr_t va;
1.55      pk        610: {
1.124   ! pk        611:        struct regmap *rm;
        !           612:        struct segmap *sm;
1.55      pk        613:
                    614:        rm = &pm->pm_regmap[VA_VREG(va)];
                    615: #ifdef DEBUG
                    616:        if (rm == NULL)
1.58      pk        617:                panic("getptesw4m: no regmap entry");
1.55      pk        618: #endif
                    619:        sm = &rm->rg_segmap[VA_VSEG(va)];
                    620: #ifdef DEBUG
                    621:        if (sm == NULL)
1.58      pk        622:                panic("getptesw4m: no segmap");
1.55      pk        623: #endif
                    624:        return (sm->sg_pte[VA_SUN4M_VPG(va)]);  /* return pte */
                    625: }
                    626:
1.85      pk        627: __inline void
                    628: setpgt4m(ptep, pte)
                    629:        int *ptep;
                    630:        int pte;
                    631: {
                    632:        *ptep = pte;
1.103     pk        633: #if 1
1.121     pk        634:        /* XXX - uncaching in pgt_page_alloc() below is not yet quite Okay */
1.103     pk        635:        if (cpuinfo.cpu_type == CPUTYP_SS1_MBUS_NOMXCC)
1.85      pk        636:                cpuinfo.pcache_flush_line((int)ptep, VA2PA((caddr_t)ptep));
1.100     pk        637: #endif
1.85      pk        638: }
                    639:
1.55      pk        640: /*
                    641:  * Set the page table entry for va to pte. Only affects software MMU page-
                    642:  * tables (the in-core pagetables read by the MMU). Ignores TLB, and
                    643:  * thus should _not_ be called if the pte translation could be in the TLB.
                    644:  * In this case, use setpte4m().
                    645:  */
                    646: __inline void
                    647: setptesw4m(pm, va, pte)
1.124   ! pk        648:        struct pmap *pm;
        !           649:        vaddr_t va;
        !           650:        int pte;
1.55      pk        651: {
1.124   ! pk        652:        struct regmap *rm;
        !           653:        struct segmap *sm;
1.55      pk        654:
                    655:        rm = &pm->pm_regmap[VA_VREG(va)];
                    656:
                    657: #ifdef DEBUG
                    658:        if (pm->pm_regmap == NULL || rm == NULL)
1.82      pk        659:                panic("setptesw4m: no regmap entry");
1.55      pk        660: #endif
                    661:        sm = &rm->rg_segmap[VA_VSEG(va)];
                    662:
                    663: #ifdef DEBUG
                    664:        if (rm->rg_segmap == NULL || sm == NULL || sm->sg_pte == NULL)
1.82      pk        665:                panic("setptesw4m: no segmap for va %p", (caddr_t)va);
1.55      pk        666: #endif
1.85      pk        667:        setpgt4m(sm->sg_pte + VA_SUN4M_VPG(va), pte);
1.72      pk        668: }
                    669:
1.92      pk        670: /* Set the page table entry for va to pte. */
1.55      pk        671: __inline void
                    672: setpte4m(va, pte)
1.124   ! pk        673:        vaddr_t va;
1.115     pk        674:        int pte;
1.55      pk        675: {
1.115     pk        676:        struct pmap *pm;
                    677:        struct regmap *rm;
                    678:        struct segmap *sm;
1.55      pk        679:
1.100     pk        680:        if (getcontext4m() != 0)
                    681:                panic("setpte4m: user context");
                    682:
                    683:        pm = pmap_kernel();
1.55      pk        684:
                    685:        /* Note: inline version of setptesw4m() */
                    686: #ifdef DEBUG
                    687:        if (pm->pm_regmap == NULL)
                    688:                panic("setpte4m: no regmap entry");
1.43      pk        689: #endif
1.55      pk        690:        rm = &pm->pm_regmap[VA_VREG(va)];
                    691:        sm = &rm->rg_segmap[VA_VSEG(va)];
1.1       deraadt   692:
1.55      pk        693: #ifdef DEBUG
1.100     pk        694:        if (rm->rg_segmap == NULL)
                    695:                panic("setpte4m: no segmap for va %p (rp=%p)",
                    696:                        (caddr_t)va, (caddr_t)rm);
                    697:
                    698:        if (sm->sg_pte == NULL)
                    699:                panic("setpte4m: no pte for va %p (rp=%p, sp=%p)",
                    700:                      (caddr_t)va, rm, sm);
1.55      pk        701: #endif
                    702:        tlb_flush_page(va);
1.72      pk        703:        setpgt4m(sm->sg_pte + VA_SUN4M_VPG(va), pte);
1.55      pk        704: }
1.72      pk        705:
1.100     pk        706: void   pcache_flush __P((caddr_t, caddr_t, int));
                    707: void
                    708: pcache_flush(va, pa, n)
                    709:        caddr_t va, pa;
                    710:        int     n;
                    711: {
1.109     pk        712:        void (*f)__P((int,int)) = cpuinfo.pcache_flush_line;
                    713:
1.100     pk        714:        while ((n -= 4) >= 0)
1.109     pk        715:                (*f)((u_int)va+n, (u_int)pa+n);
1.100     pk        716: }
                    717:
                    718: /*
1.121     pk        719:  * Page table pool back-end.
                    720:  */
1.100     pk        721: void *
1.121     pk        722: pgt_page_alloc(sz, flags, mtype)
                    723:        unsigned long sz;
                    724:        int flags;
                    725:        int mtype;
1.100     pk        726: {
1.121     pk        727:        caddr_t p;
1.100     pk        728:
1.121     pk        729:        p = (caddr_t)uvm_km_kmemalloc(kernel_map, uvm.kernel_object,
1.124   ! pk        730:                                      (vsize_t)sz, UVM_KMF_NOWAIT);
1.100     pk        731:
1.121     pk        732:        if ((cpuinfo.flags & CPUFLG_CACHEPAGETABLES) == 0) {
                    733:                pcache_flush(p, (caddr_t)VA2PA(p), sz);
                    734:                kvm_uncache(p, sz/NBPG);
1.100     pk        735:        }
                    736:        return (p);
1.121     pk        737: }
                    738:
1.100     pk        739: void
1.121     pk        740: pgt_page_free(v, sz, mtype)
                    741:        void *v;
                    742:        unsigned long sz;
                    743:        int mtype;
1.100     pk        744: {
1.124   ! pk        745:        uvm_km_free(kernel_map, (vaddr_t)v, sz);
1.100     pk        746: }
1.55      pk        747: #endif /* 4m only */
1.1       deraadt   748:
                    749: /*----------------------------------------------------------------*/
                    750:
1.72      pk        751: /*
                    752:  * The following three macros are to be used in sun4/sun4c code only.
                    753:  */
1.69      pk        754: #if defined(SUN4_MMU3L)
                    755: #define CTX_USABLE(pm,rp) (                                    \
1.72      pk        756:                ((pm)->pm_ctx != NULL &&                        \
                    757:                 (!HASSUN4_MMU3L || (rp)->rg_smeg != reginval)) \
1.69      pk        758: )
1.43      pk        759: #else
1.55      pk        760: #define CTX_USABLE(pm,rp)      ((pm)->pm_ctx != NULL )
1.43      pk        761: #endif
                    762:
1.55      pk        763: #define GAP_WIDEN(pm,vr) do if (CPU_ISSUN4OR4C) {      \
                    764:        if (vr + 1 == pm->pm_gap_start)                 \
                    765:                pm->pm_gap_start = vr;                  \
                    766:        if (vr == pm->pm_gap_end)                       \
                    767:                pm->pm_gap_end = vr + 1;                \
1.43      pk        768: } while (0)
                    769:
1.55      pk        770: #define GAP_SHRINK(pm,vr) do if (CPU_ISSUN4OR4C) {                     \
1.124   ! pk        771:        int x;                                                          \
1.43      pk        772:        x = pm->pm_gap_start + (pm->pm_gap_end - pm->pm_gap_start) / 2; \
                    773:        if (vr > x) {                                                   \
                    774:                if (vr < pm->pm_gap_end)                                \
                    775:                        pm->pm_gap_end = vr;                            \
                    776:        } else {                                                        \
                    777:                if (vr >= pm->pm_gap_start && x != pm->pm_gap_start)    \
                    778:                        pm->pm_gap_start = vr + 1;                      \
                    779:        }                                                               \
                    780: } while (0)
                    781:
1.72      pk        782:
1.122     pk        783: static void get_phys_mem __P((void));
1.53      christos  784: static void sortm __P((struct memarr *, int));
                    785: void   pv_flushcache __P((struct pvlist *));
                    786: void   kvm_iocache __P((caddr_t, int));
1.122     pk        787:
1.53      christos  788: #ifdef DEBUG
                    789: void   pm_check __P((char *, struct pmap *));
                    790: void   pm_check_k __P((char *, struct pmap *));
                    791: void   pm_check_u __P((char *, struct pmap *));
                    792: #endif
                    793:
                    794:
1.2       deraadt   795: /*
1.122     pk        796:  * Grab physical memory list and use it to compute `physmem' and
                    797:  * `avail_end'. The latter is used in conjuction with
                    798:  * `avail_start' to dispatch left-over physical pages to the
                    799:  * VM system.
                    800:  */
                    801: void
                    802: get_phys_mem()
                    803: {
                    804:        struct memarr *mp;
                    805:        int i;
                    806:
                    807:        npmemarr = makememarr(pmemarr, MA_SIZE, MEMARR_AVAILPHYS);
                    808:        sortm(pmemarr, npmemarr);
                    809:        if (pmemarr[0].addr != 0) {
                    810:                printf("pmap_bootstrap: no kernel memory?!\n");
                    811:                callrom();
                    812:        }
                    813:        avail_end = pmemarr[npmemarr-1].addr + pmemarr[npmemarr-1].len;
                    814:        for (physmem = 0, mp = pmemarr, i = npmemarr; --i >= 0; mp++)
                    815:                physmem += btoc(mp->len);
                    816: }
                    817:
                    818: /*
1.2       deraadt   819:  * Sort a memory array by address.
                    820:  */
                    821: static void
                    822: sortm(mp, n)
1.124   ! pk        823:        struct memarr *mp;
        !           824:        int n;
1.2       deraadt   825: {
1.124   ! pk        826:        struct memarr *mpj;
        !           827:        int i, j;
        !           828:        paddr_t addr;
        !           829:        psize_t len;
1.2       deraadt   830:
                    831:        /* Insertion sort.  This is O(n^2), but so what? */
                    832:        for (i = 1; i < n; i++) {
                    833:                /* save i'th entry */
                    834:                addr = mp[i].addr;
                    835:                len = mp[i].len;
                    836:                /* find j such that i'th entry goes before j'th */
                    837:                for (j = 0, mpj = mp; j < i; j++, mpj++)
                    838:                        if (addr < mpj->addr)
                    839:                                break;
                    840:                /* slide up any additional entries */
                    841:                ovbcopy(mpj, mpj + 1, (i - j) * sizeof(*mp));
                    842:                mpj->addr = addr;
                    843:                mpj->len = len;
                    844:        }
                    845: }
                    846:
1.29      pk        847: /*
1.106     thorpej   848:  * Support functions for vm_page_bootstrap().
1.29      pk        849:  */
                    850:
                    851: /*
                    852:  * How much virtual space does this kernel have?
                    853:  * (After mapping kernel text, data, etc.)
                    854:  */
                    855: void
                    856: pmap_virtual_space(v_start, v_end)
1.124   ! pk        857:         vaddr_t *v_start;
        !           858:         vaddr_t *v_end;
1.29      pk        859: {
                    860:         *v_start = virtual_avail;
                    861:         *v_end   = virtual_end;
                    862: }
                    863:
                    864: /*
1.107     pk        865:  * Helper routine that hands off available physical pages to the VM system.
1.29      pk        866:  */
1.107     pk        867: static void
                    868: pmap_page_upload()
1.29      pk        869: {
1.124   ! pk        870:        int     n = 0;
        !           871:        paddr_t start, end, avail_next;
1.29      pk        872:
1.108     pk        873:        avail_next = avail_start;
                    874:        if (unavail_gap_start != 0) {
1.107     pk        875:                /* First, the gap we created in pmap_bootstrap() */
1.108     pk        876:                if (avail_next != unavail_gap_start)
                    877:                        /* Avoid empty ranges */
1.110     mrg       878: #if defined(UVM)
                    879:                        uvm_page_physload(
                    880:                                atop(avail_next),
                    881:                                atop(unavail_gap_start),
                    882:                                atop(avail_next),
1.120     thorpej   883:                                atop(unavail_gap_start),
                    884:                                VM_FREELIST_DEFAULT);
1.110     mrg       885: #else
1.108     pk        886:                        vm_page_physload(
                    887:                                atop(avail_next),
                    888:                                atop(unavail_gap_start),
                    889:                                atop(avail_next),
                    890:                                atop(unavail_gap_start));
1.110     mrg       891: #endif
1.108     pk        892:                avail_next = unavail_gap_end;
1.107     pk        893:        }
1.29      pk        894:
1.107     pk        895:        for (n = 0; n < npmemarr; n++) {
                    896:                /*
                    897:                 * Assume `avail_next' is always in the first segment; we
                    898:                 * already made that assumption in pmap_bootstrap()..
                    899:                 */
                    900:                start = (n == 0) ? avail_next : pmemarr[n].addr;
                    901:                end = pmemarr[n].addr + pmemarr[n].len;
                    902:                if (start == end)
                    903:                        continue;
1.29      pk        904:
1.110     mrg       905: #if defined(UVM)
                    906:                uvm_page_physload(
                    907:                        atop(start),
                    908:                        atop(end),
                    909:                        atop(start),
1.120     thorpej   910:                        atop(end), VM_FREELIST_DEFAULT);
1.110     mrg       911: #else
1.107     pk        912:                vm_page_physload(
                    913:                        atop(start),
                    914:                        atop(end),
                    915:                        atop(start),
                    916:                        atop(end));
1.110     mrg       917: #endif
1.29      pk        918:        }
                    919:
                    920: }
                    921:
1.124   ! pk        922: #if 0
1.29      pk        923: /*
                    924:  * pmap_page_index()
                    925:  *
                    926:  * Given a physical address, return a page index.
                    927:  *
                    928:  * There can be some values that we never return (i.e. a hole)
                    929:  * as long as the range of indices returned by this function
                    930:  * is smaller than the value returned by pmap_free_pages().
                    931:  * The returned index does NOT need to start at zero.
                    932:  *
                    933:  */
1.50      christos  934: int
1.29      pk        935: pmap_page_index(pa)
1.124   ! pk        936:        paddr_t pa;
1.29      pk        937: {
1.124   ! pk        938:        paddr_t idx;
1.29      pk        939:        int nmem;
1.124   ! pk        940:        struct memarr *mp;
1.29      pk        941:
                    942: #ifdef  DIAGNOSTIC
                    943:        if (pa < avail_start || pa >= avail_end)
1.54      christos  944:                panic("pmap_page_index: pa=0x%lx", pa);
1.29      pk        945: #endif
                    946:
                    947:        for (idx = 0, mp = pmemarr, nmem = npmemarr; --nmem >= 0; mp++) {
                    948:                if (pa >= mp->addr && pa < mp->addr + mp->len)
                    949:                        break;
                    950:                idx += atop(mp->len);
                    951:        }
                    952:
1.124   ! pk        953:        return (int)(idx + atop(pa - mp->addr));
1.29      pk        954: }
1.124   ! pk        955: #endif
1.39      pk        956:
                    957: int
                    958: pmap_pa_exists(pa)
1.124   ! pk        959:        paddr_t pa;
1.39      pk        960: {
1.124   ! pk        961:        int nmem;
        !           962:        struct memarr *mp;
1.39      pk        963:
                    964:        for (mp = pmemarr, nmem = npmemarr; --nmem >= 0; mp++) {
                    965:                if (pa >= mp->addr && pa < mp->addr + mp->len)
                    966:                        return 1;
                    967:        }
                    968:
                    969:        return 0;
                    970: }
1.29      pk        971:
1.1       deraadt   972: /* update pv_flags given a valid pte */
1.55      pk        973: #define        MR4_4C(pte) (((pte) >> PG_M_SHIFT) & (PV_MOD | PV_REF))
                    974: #define MR4M(pte) (((pte) >> PG_M_SHIFT4M) & (PV_MOD4M | PV_REF4M))
1.1       deraadt   975:
                    976: /*----------------------------------------------------------------*/
                    977:
                    978: /*
                    979:  * Agree with the monitor ROM as to how many MMU entries are
                    980:  * to be reserved, and map all of its segments into all contexts.
                    981:  *
                    982:  * Unfortunately, while the Version 0 PROM had a nice linked list of
                    983:  * taken virtual memory, the Version 2 PROM provides instead a convoluted
                    984:  * description of *free* virtual memory.  Rather than invert this, we
                    985:  * resort to two magic constants from the PROM vector description file.
                    986:  */
1.55      pk        987: #if defined(SUN4) || defined(SUN4C)
1.43      pk        988: void
1.58      pk        989: mmu_reservemon4_4c(nrp, nsp)
1.124   ! pk        990:        int *nrp, *nsp;
1.1       deraadt   991: {
1.124   ! pk        992:        u_int va = 0, eva = 0;
        !           993:        int mmuseg, i, nr, ns, vr, lastvr;
1.69      pk        994: #if defined(SUN4_MMU3L)
1.124   ! pk        995:        int mmureg;
1.53      christos  996: #endif
1.124   ! pk        997:        struct regmap *rp;
1.1       deraadt   998:
1.55      pk        999: #if defined(SUN4M)
                   1000:        if (CPU_ISSUN4M) {
1.81      pk       1001:                panic("mmu_reservemon4_4c called on Sun4M machine");
1.55      pk       1002:                return;
                   1003:        }
                   1004: #endif
                   1005:
1.20      deraadt  1006: #if defined(SUN4)
1.55      pk       1007:        if (CPU_ISSUN4) {
1.29      pk       1008:                prom_vstart = va = OLDMON_STARTVADDR;
                   1009:                prom_vend = eva = OLDMON_ENDVADDR;
1.20      deraadt  1010:        }
                   1011: #endif
                   1012: #if defined(SUN4C)
1.55      pk       1013:        if (CPU_ISSUN4C) {
1.29      pk       1014:                prom_vstart = va = OPENPROM_STARTVADDR;
                   1015:                prom_vend = eva = OPENPROM_ENDVADDR;
1.19      deraadt  1016:        }
1.20      deraadt  1017: #endif
1.43      pk       1018:        ns = *nsp;
                   1019:        nr = *nrp;
                   1020:        lastvr = 0;
1.1       deraadt  1021:        while (va < eva) {
1.43      pk       1022:                vr = VA_VREG(va);
                   1023:                rp = &pmap_kernel()->pm_regmap[vr];
                   1024:
1.69      pk       1025: #if defined(SUN4_MMU3L)
                   1026:                if (HASSUN4_MMU3L && vr != lastvr) {
1.43      pk       1027:                        lastvr = vr;
                   1028:                        mmureg = getregmap(va);
                   1029:                        if (mmureg < nr)
                   1030:                                rp->rg_smeg = nr = mmureg;
                   1031:                        /*
                   1032:                         * On 3-level MMU machines, we distribute regions,
                   1033:                         * rather than segments, amongst the contexts.
                   1034:                         */
                   1035:                        for (i = ncontext; --i > 0;)
                   1036:                                (*promvec->pv_setctxt)(i, (caddr_t)va, mmureg);
                   1037:                }
                   1038: #endif
1.1       deraadt  1039:                mmuseg = getsegmap(va);
1.43      pk       1040:                if (mmuseg < ns)
                   1041:                        ns = mmuseg;
1.69      pk       1042:
                   1043:                if (!HASSUN4_MMU3L)
1.43      pk       1044:                        for (i = ncontext; --i > 0;)
                   1045:                                (*promvec->pv_setctxt)(i, (caddr_t)va, mmuseg);
                   1046:
1.1       deraadt  1047:                if (mmuseg == seginval) {
                   1048:                        va += NBPSG;
                   1049:                        continue;
                   1050:                }
1.43      pk       1051:                /*
                   1052:                 * Another PROM segment. Enter into region map.
                   1053:                 * Assume the entire segment is valid.
                   1054:                 */
                   1055:                rp->rg_nsegmap += 1;
                   1056:                rp->rg_segmap[VA_VSEG(va)].sg_pmeg = mmuseg;
                   1057:                rp->rg_segmap[VA_VSEG(va)].sg_npte = NPTESG;
                   1058:
1.1       deraadt  1059:                /* PROM maps its memory user-accessible: fix it. */
                   1060:                for (i = NPTESG; --i >= 0; va += NBPG)
1.55      pk       1061:                        setpte4(va, getpte4(va) | PG_S);
1.1       deraadt  1062:        }
1.43      pk       1063:        *nsp = ns;
                   1064:        *nrp = nr;
                   1065:        return;
1.1       deraadt  1066: }
1.55      pk       1067: #endif
                   1068:
                   1069: #if defined(SUN4M) /* Sun4M versions of above */
                   1070:
1.97      pk       1071: u_long
                   1072: srmmu_bypass_read(paddr)
                   1073:        u_long  paddr;
                   1074: {
                   1075:        unsigned long v;
                   1076:
                   1077:        if (/*cpuinfo.cpu_impl == 4 && */cpuinfo.mxcc) {
                   1078:                /*
                   1079:                 * We're going to have to use MMU passthrough. If we're on
                   1080:                 * a Viking MicroSparc without an mbus, we need to turn
                   1081:                 * off traps and set the AC bit at 0x8000 in the MMU's
                   1082:                 * control register.  Ugh.
                   1083:                 */
                   1084:
                   1085:                unsigned long s = lda(SRMMU_PCR,ASI_SRMMU);
                   1086:
                   1087:                /* set MMU AC bit */
                   1088:                sta(SRMMU_PCR, ASI_SRMMU, s | VIKING_PCR_AC);
                   1089:                v = lda(paddr, ASI_BYPASS);
                   1090:                sta(SRMMU_PCR, ASI_SRMMU, s);
                   1091:        } else
                   1092:                v = lda(paddr, ASI_BYPASS);
                   1093:
                   1094:        return (v);
                   1095: }
                   1096:
                   1097:
1.55      pk       1098: /*
                   1099:  * Take the monitor's initial page table layout, convert it to 3rd-level pte's
                   1100:  * (it starts out as a L1 mapping), and install it along with a set of kernel
                   1101:  * mapping tables as the kernel's initial page table setup. Also create and
                   1102:  * enable a context table. I suppose we also want to block user-mode access
                   1103:  * to the new kernel/ROM mappings.
                   1104:  */
                   1105:
1.58      pk       1106: /*
                   1107:  * mmu_reservemon4m(): Copies the existing (ROM) page tables to kernel space,
1.55      pk       1108:  * converting any L1/L2 PTEs to L3 PTEs. Does *not* copy the L1 entry mapping
1.96      pk       1109:  * the kernel at KERNBASE since we don't want to map 16M of physical
                   1110:  * memory for the kernel. Thus the kernel must be installed later!
1.55      pk       1111:  * Also installs ROM mappings into the kernel pmap.
                   1112:  * NOTE: This also revokes all user-mode access to the mapped regions.
                   1113:  */
                   1114: void
1.77      pk       1115: mmu_reservemon4m(kpmap)
1.55      pk       1116:        struct pmap *kpmap;
                   1117: {
1.71      pk       1118:        unsigned int rom_ctxtbl;
1.124   ! pk       1119:        int te;
1.55      pk       1120:
1.97      pk       1121:        prom_vstart = OPENPROM_STARTVADDR;
                   1122:        prom_vend = OPENPROM_ENDVADDR;
1.55      pk       1123:
                   1124:        /*
                   1125:         * XXX: although the Sun4M can handle 36 bits of physical
                   1126:         * address space, we assume that all these page tables, etc
                   1127:         * are in the lower 4G (32-bits) of address space, i.e. out of I/O
                   1128:         * space. Eventually this should be changed to support the 36 bit
                   1129:         * physical addressing, in case some crazed ROM designer decides to
                   1130:         * stick the pagetables up there. In that case, we should use MMU
                   1131:         * transparent mode, (i.e. ASI 0x20 to 0x2f) to access
                   1132:         * physical memory.
                   1133:         */
                   1134:
1.71      pk       1135:        rom_ctxtbl = (lda(SRMMU_CXTPTR,ASI_SRMMU) << SRMMU_PPNPASHIFT);
1.55      pk       1136:
1.97      pk       1137:        te = srmmu_bypass_read(rom_ctxtbl);     /* i.e. context 0 */
1.69      pk       1138:
1.55      pk       1139:        switch (te & SRMMU_TETYPE) {
1.62      pk       1140:        case SRMMU_TEINVALID:
1.69      pk       1141:                cpuinfo.ctx_tbl[0] = SRMMU_TEINVALID;
1.77      pk       1142:                panic("mmu_reservemon4m: no existing L0 mapping! "
                   1143:                      "(How are we running?");
1.55      pk       1144:                break;
1.62      pk       1145:        case SRMMU_TEPTE:
1.55      pk       1146: #ifdef DEBUG
1.66      christos 1147:                printf("mmu_reservemon4m: trying to remap 4G segment!\n");
1.55      pk       1148: #endif
                   1149:                panic("mmu_reservemon4m: can't handle ROM 4G page size");
                   1150:                /* XXX: Should make this work, however stupid it is */
                   1151:                break;
1.62      pk       1152:        case SRMMU_TEPTD:
1.71      pk       1153:                mmu_setup4m_L1(te, kpmap);
1.55      pk       1154:                break;
1.62      pk       1155:        default:
1.55      pk       1156:                panic("mmu_reservemon4m: unknown pagetable entry type");
                   1157:        }
                   1158: }
                   1159:
                   1160: void
1.71      pk       1161: mmu_setup4m_L1(regtblptd, kpmap)
1.55      pk       1162:        int regtblptd;          /* PTD for region table to be remapped */
                   1163:        struct pmap *kpmap;
                   1164: {
1.124   ! pk       1165:        unsigned int regtblrover;
        !          1166:        int i;
1.55      pk       1167:        unsigned int te;
1.71      pk       1168:        struct regmap *rp;
1.55      pk       1169:        int j, k;
                   1170:
1.69      pk       1171:        /*
                   1172:         * Here we scan the region table to copy any entries which appear.
1.55      pk       1173:         * We are only concerned with regions in kernel space and above
1.96      pk       1174:         * (i.e. regions VA_VREG(KERNBASE)+1 to 0xff). We ignore the first
                   1175:         * region (at VA_VREG(KERNBASE)), since that is the 16MB L1 mapping
                   1176:         * that the ROM used to map the kernel in initially. Later, we will
                   1177:         * rebuild a new L3 mapping for the kernel and install it before
                   1178:         * switching to the new pagetables.
1.55      pk       1179:         */
1.71      pk       1180:        regtblrover =
                   1181:                ((regtblptd & ~SRMMU_TETYPE) << SRMMU_PPNPASHIFT) +
                   1182:                (VA_VREG(KERNBASE)+1) * sizeof(long);   /* kernel only */
1.55      pk       1183:
                   1184:        for (i = VA_VREG(KERNBASE) + 1; i < SRMMU_L1SIZE;
                   1185:             i++, regtblrover += sizeof(long)) {
1.71      pk       1186:
                   1187:                /* The region we're dealing with */
                   1188:                rp = &kpmap->pm_regmap[i];
                   1189:
1.97      pk       1190:                te = srmmu_bypass_read(regtblrover);
1.55      pk       1191:                switch(te & SRMMU_TETYPE) {
1.62      pk       1192:                case SRMMU_TEINVALID:
1.55      pk       1193:                        break;
1.71      pk       1194:
1.62      pk       1195:                case SRMMU_TEPTE:
1.55      pk       1196: #ifdef DEBUG
1.81      pk       1197:                        printf("mmu_setup4m_L1: "
1.77      pk       1198:                               "converting region 0x%x from L1->L3\n", i);
1.55      pk       1199: #endif
1.71      pk       1200:                        /*
                   1201:                         * This region entry covers 64MB of memory -- or
                   1202:                         * (NSEGRG * NPTESG) pages -- which we must convert
                   1203:                         * into a 3-level description.
1.55      pk       1204:                         */
1.71      pk       1205:
1.55      pk       1206:                        for (j = 0; j < SRMMU_L2SIZE; j++) {
1.71      pk       1207:                                struct segmap *sp = &rp->rg_segmap[j];
1.55      pk       1208:
                   1209:                                for (k = 0; k < SRMMU_L3SIZE; k++) {
1.71      pk       1210:                                        sp->sg_npte++;
1.97      pk       1211:                                        setpgt4m(&sp->sg_pte[k],
                   1212:                                                (te & SRMMU_L1PPNMASK) |
                   1213:                                                (j << SRMMU_L2PPNSHFT) |
                   1214:                                                (k << SRMMU_L3PPNSHFT) |
                   1215:                                                (te & SRMMU_PGBITSMSK) |
                   1216:                                                ((te & SRMMU_PROT_MASK) |
                   1217:                                                 PPROT_U2S_OMASK) |
                   1218:                                                SRMMU_TEPTE);
1.55      pk       1219:                                }
                   1220:                        }
                   1221:                        break;
1.71      pk       1222:
1.62      pk       1223:                case SRMMU_TEPTD:
1.71      pk       1224:                        mmu_setup4m_L2(te, rp);
1.55      pk       1225:                        break;
1.71      pk       1226:
1.62      pk       1227:                default:
1.55      pk       1228:                        panic("mmu_setup4m_L1: unknown pagetable entry type");
                   1229:                }
                   1230:        }
                   1231: }
                   1232:
                   1233: void
1.71      pk       1234: mmu_setup4m_L2(segtblptd, rp)
1.55      pk       1235:        int segtblptd;
1.71      pk       1236:        struct regmap *rp;
1.55      pk       1237: {
1.124   ! pk       1238:        unsigned int segtblrover;
        !          1239:        int i, k;
1.55      pk       1240:        unsigned int te;
1.71      pk       1241:        struct segmap *sp;
1.55      pk       1242:
                   1243:        segtblrover = (segtblptd & ~SRMMU_TETYPE) << SRMMU_PPNPASHIFT;
                   1244:        for (i = 0; i < SRMMU_L2SIZE; i++, segtblrover += sizeof(long)) {
1.71      pk       1245:
                   1246:                sp = &rp->rg_segmap[i];
                   1247:
1.97      pk       1248:                te = srmmu_bypass_read(segtblrover);
1.55      pk       1249:                switch(te & SRMMU_TETYPE) {
1.62      pk       1250:                case SRMMU_TEINVALID:
1.55      pk       1251:                        break;
1.71      pk       1252:
1.62      pk       1253:                case SRMMU_TEPTE:
1.55      pk       1254: #ifdef DEBUG
1.81      pk       1255:                        printf("mmu_setup4m_L2: converting L2 entry at segment 0x%x to L3\n",i);
1.55      pk       1256: #endif
1.71      pk       1257:                        /*
                   1258:                         * This segment entry covers 256KB of memory -- or
                   1259:                         * (NPTESG) pages -- which we must convert
                   1260:                         * into a 3-level description.
                   1261:                         */
1.55      pk       1262:                        for (k = 0; k < SRMMU_L3SIZE; k++) {
1.71      pk       1263:                                sp->sg_npte++;
1.97      pk       1264:                                setpgt4m(&sp->sg_pte[k],
                   1265:                                        (te & SRMMU_L1PPNMASK) |
                   1266:                                        (te & SRMMU_L2PPNMASK) |
                   1267:                                        (k << SRMMU_L3PPNSHFT) |
                   1268:                                        (te & SRMMU_PGBITSMSK) |
                   1269:                                        ((te & SRMMU_PROT_MASK) |
                   1270:                                         PPROT_U2S_OMASK) |
                   1271:                                        SRMMU_TEPTE);
1.55      pk       1272:                        }
                   1273:                        break;
1.71      pk       1274:
1.62      pk       1275:                case SRMMU_TEPTD:
1.71      pk       1276:                        mmu_setup4m_L3(te, sp);
1.55      pk       1277:                        break;
1.71      pk       1278:
1.62      pk       1279:                default:
1.55      pk       1280:                        panic("mmu_setup4m_L2: unknown pagetable entry type");
                   1281:                }
                   1282:        }
                   1283: }
                   1284:
1.71      pk       1285: void
                   1286: mmu_setup4m_L3(pagtblptd, sp)
1.124   ! pk       1287:        int pagtblptd;
1.71      pk       1288:        struct segmap *sp;
1.55      pk       1289: {
1.124   ! pk       1290:        unsigned int pagtblrover;
        !          1291:        int i;
        !          1292:        unsigned int te;
1.55      pk       1293:
                   1294:        pagtblrover = (pagtblptd & ~SRMMU_TETYPE) << SRMMU_PPNPASHIFT;
                   1295:        for (i = 0; i < SRMMU_L3SIZE; i++, pagtblrover += sizeof(long)) {
1.97      pk       1296:                te = srmmu_bypass_read(pagtblrover);
1.55      pk       1297:                switch(te & SRMMU_TETYPE) {
1.62      pk       1298:                case SRMMU_TEINVALID:
1.55      pk       1299:                        break;
1.62      pk       1300:                case SRMMU_TEPTE:
1.71      pk       1301:                        sp->sg_npte++;
1.97      pk       1302:                        setpgt4m(&sp->sg_pte[i], te | PPROT_U2S_OMASK);
1.55      pk       1303:                        break;
1.62      pk       1304:                case SRMMU_TEPTD:
1.55      pk       1305:                        panic("mmu_setup4m_L3: PTD found in L3 page table");
1.62      pk       1306:                default:
1.55      pk       1307:                        panic("mmu_setup4m_L3: unknown pagetable entry type");
                   1308:                }
                   1309:        }
                   1310: }
                   1311: #endif /* defined SUN4M */
1.1       deraadt  1312:
                   1313: /*----------------------------------------------------------------*/
                   1314:
                   1315: /*
                   1316:  * MMU management.
                   1317:  */
1.43      pk       1318: struct mmuentry *me_alloc __P((struct mmuhd *, struct pmap *, int, int));
                   1319: void           me_free __P((struct pmap *, u_int));
                   1320: struct mmuentry        *region_alloc __P((struct mmuhd *, struct pmap *, int));
                   1321: void           region_free __P((struct pmap *, u_int));
1.1       deraadt  1322:
                   1323: /*
                   1324:  * Change contexts.  We need the old context number as well as the new
                   1325:  * one.  If the context is changing, we must write all user windows
                   1326:  * first, lest an interrupt cause them to be written to the (other)
                   1327:  * user whose context we set here.
                   1328:  */
                   1329: #define        CHANGE_CONTEXTS(old, new) \
                   1330:        if ((old) != (new)) { \
                   1331:                write_user_windows(); \
                   1332:                setcontext(new); \
                   1333:        }
                   1334:
1.55      pk       1335: #if defined(SUN4) || defined(SUN4C) /* This is old sun MMU stuff */
1.1       deraadt  1336: /*
                   1337:  * Allocate an MMU entry (i.e., a PMEG).
                   1338:  * If necessary, steal one from someone else.
                   1339:  * Put it on the tail of the given queue
                   1340:  * (which is either the LRU list or the locked list).
                   1341:  * The locked list is not actually ordered, but this is easiest.
                   1342:  * Also put it on the given (new) pmap's chain,
                   1343:  * enter its pmeg number into that pmap's segmap,
                   1344:  * and store the pmeg's new virtual segment number (me->me_vseg).
                   1345:  *
                   1346:  * This routine is large and complicated, but it must be fast
                   1347:  * since it implements the dynamic allocation of MMU entries.
                   1348:  */
                   1349: struct mmuentry *
1.43      pk       1350: me_alloc(mh, newpm, newvreg, newvseg)
1.124   ! pk       1351:        struct mmuhd *mh;
        !          1352:        struct pmap *newpm;
        !          1353:        int newvreg, newvseg;
        !          1354: {
        !          1355:        struct mmuentry *me;
        !          1356:        struct pmap *pm;
        !          1357:        int i, va, pa, *pte, tpte;
1.1       deraadt  1358:        int ctx;
1.43      pk       1359:        struct regmap *rp;
                   1360:        struct segmap *sp;
1.1       deraadt  1361:
                   1362:        /* try free list first */
1.43      pk       1363:        if ((me = segm_freelist.tqh_first) != NULL) {
                   1364:                TAILQ_REMOVE(&segm_freelist, me, me_list);
1.1       deraadt  1365: #ifdef DEBUG
                   1366:                if (me->me_pmap != NULL)
                   1367:                        panic("me_alloc: freelist entry has pmap");
                   1368:                if (pmapdebug & PDB_MMU_ALLOC)
1.66      christos 1369:                        printf("me_alloc: got pmeg %d\n", me->me_cookie);
1.1       deraadt  1370: #endif
1.43      pk       1371:                TAILQ_INSERT_TAIL(mh, me, me_list);
1.1       deraadt  1372:
                   1373:                /* onto on pmap chain; pmap is already locked, if needed */
1.43      pk       1374:                TAILQ_INSERT_TAIL(&newpm->pm_seglist, me, me_pmchain);
1.70      pk       1375: #ifdef DIAGNOSTIC
                   1376:                pmap_stats.ps_npmeg_free--;
                   1377:                if (mh == &segm_locked)
                   1378:                        pmap_stats.ps_npmeg_locked++;
                   1379:                else
                   1380:                        pmap_stats.ps_npmeg_lru++;
                   1381: #endif
1.1       deraadt  1382:
                   1383:                /* into pmap segment table, with backpointers */
1.43      pk       1384:                newpm->pm_regmap[newvreg].rg_segmap[newvseg].sg_pmeg = me->me_cookie;
1.1       deraadt  1385:                me->me_pmap = newpm;
                   1386:                me->me_vseg = newvseg;
1.43      pk       1387:                me->me_vreg = newvreg;
1.1       deraadt  1388:
                   1389:                return (me);
                   1390:        }
                   1391:
                   1392:        /* no luck, take head of LRU list */
1.43      pk       1393:        if ((me = segm_lru.tqh_first) == NULL)
1.1       deraadt  1394:                panic("me_alloc: all pmegs gone");
1.43      pk       1395:
1.1       deraadt  1396:        pm = me->me_pmap;
                   1397:        if (pm == NULL)
                   1398:                panic("me_alloc: LRU entry has no pmap");
1.42      mycroft  1399:        if (pm == pmap_kernel())
1.1       deraadt  1400:                panic("me_alloc: stealing from kernel");
1.12      pk       1401: #ifdef DEBUG
1.1       deraadt  1402:        if (pmapdebug & (PDB_MMU_ALLOC | PDB_MMU_STEAL))
1.91      fair     1403:                printf("me_alloc: stealing pmeg 0x%x from pmap %p\n",
1.43      pk       1404:                    me->me_cookie, pm);
1.1       deraadt  1405: #endif
                   1406:        /*
                   1407:         * Remove from LRU list, and insert at end of new list
                   1408:         * (probably the LRU list again, but so what?).
                   1409:         */
1.43      pk       1410:        TAILQ_REMOVE(&segm_lru, me, me_list);
                   1411:        TAILQ_INSERT_TAIL(mh, me, me_list);
                   1412:
1.70      pk       1413: #ifdef DIAGNOSTIC
                   1414:        if (mh == &segm_locked) {
                   1415:                pmap_stats.ps_npmeg_lru--;
                   1416:                pmap_stats.ps_npmeg_locked++;
                   1417:        }
                   1418: #endif
                   1419:
1.43      pk       1420:        rp = &pm->pm_regmap[me->me_vreg];
                   1421:        if (rp->rg_segmap == NULL)
                   1422:                panic("me_alloc: LRU entry's pmap has no segments");
                   1423:        sp = &rp->rg_segmap[me->me_vseg];
                   1424:        pte = sp->sg_pte;
                   1425:        if (pte == NULL)
                   1426:                panic("me_alloc: LRU entry's pmap has no ptes");
1.1       deraadt  1427:
                   1428:        /*
                   1429:         * The PMEG must be mapped into some context so that we can
                   1430:         * read its PTEs.  Use its current context if it has one;
                   1431:         * if not, and since context 0 is reserved for the kernel,
                   1432:         * the simplest method is to switch to 0 and map the PMEG
                   1433:         * to virtual address 0---which, being a user space address,
                   1434:         * is by definition not in use.
                   1435:         *
                   1436:         * XXX for ncpus>1 must use per-cpu VA?
                   1437:         * XXX do not have to flush cache immediately
                   1438:         */
1.71      pk       1439:        ctx = getcontext4();
1.43      pk       1440:        if (CTX_USABLE(pm,rp)) {
1.1       deraadt  1441:                CHANGE_CONTEXTS(ctx, pm->pm_ctxnum);
1.69      pk       1442:                cache_flush_segment(me->me_vreg, me->me_vseg);
1.43      pk       1443:                va = VSTOVA(me->me_vreg,me->me_vseg);
1.1       deraadt  1444:        } else {
                   1445:                CHANGE_CONTEXTS(ctx, 0);
1.69      pk       1446:                if (HASSUN4_MMU3L)
1.43      pk       1447:                        setregmap(0, tregion);
                   1448:                setsegmap(0, me->me_cookie);
1.1       deraadt  1449:                /*
                   1450:                 * No cache flush needed: it happened earlier when
                   1451:                 * the old context was taken.
                   1452:                 */
                   1453:                va = 0;
                   1454:        }
                   1455:
                   1456:        /*
                   1457:         * Record reference and modify bits for each page,
                   1458:         * and copy PTEs into kernel memory so that they can
                   1459:         * be reloaded later.
                   1460:         */
                   1461:        i = NPTESG;
                   1462:        do {
1.55      pk       1463:                tpte = getpte4(va);
1.33      pk       1464:                if ((tpte & (PG_V | PG_TYPE)) == (PG_V | PG_OBMEM)) {
1.60      pk       1465:                        pa = ptoa(tpte & PG_PFNUM);
1.1       deraadt  1466:                        if (managed(pa))
1.55      pk       1467:                                pvhead(pa)->pv_flags |= MR4_4C(tpte);
1.1       deraadt  1468:                }
                   1469:                *pte++ = tpte & ~(PG_U|PG_M);
                   1470:                va += NBPG;
                   1471:        } while (--i > 0);
                   1472:
                   1473:        /* update segment tables */
                   1474:        simple_lock(&pm->pm_lock); /* what if other cpu takes mmuentry ?? */
1.43      pk       1475:        if (CTX_USABLE(pm,rp))
                   1476:                setsegmap(VSTOVA(me->me_vreg,me->me_vseg), seginval);
                   1477:        sp->sg_pmeg = seginval;
1.1       deraadt  1478:
                   1479:        /* off old pmap chain */
1.43      pk       1480:        TAILQ_REMOVE(&pm->pm_seglist, me, me_pmchain);
1.1       deraadt  1481:        simple_unlock(&pm->pm_lock);
1.71      pk       1482:        setcontext4(ctx);       /* done with old context */
1.1       deraadt  1483:
                   1484:        /* onto new pmap chain; new pmap is already locked, if needed */
1.43      pk       1485:        TAILQ_INSERT_TAIL(&newpm->pm_seglist, me, me_pmchain);
1.1       deraadt  1486:
                   1487:        /* into new segment table, with backpointers */
1.43      pk       1488:        newpm->pm_regmap[newvreg].rg_segmap[newvseg].sg_pmeg = me->me_cookie;
1.1       deraadt  1489:        me->me_pmap = newpm;
                   1490:        me->me_vseg = newvseg;
1.43      pk       1491:        me->me_vreg = newvreg;
1.1       deraadt  1492:
                   1493:        return (me);
                   1494: }
                   1495:
                   1496: /*
                   1497:  * Free an MMU entry.
                   1498:  *
                   1499:  * Assumes the corresponding pmap is already locked.
                   1500:  * Does NOT flush cache, but does record ref and mod bits.
                   1501:  * The rest of each PTE is discarded.
                   1502:  * CALLER MUST SET CONTEXT to pm->pm_ctxnum (if pmap has
                   1503:  * a context) or to 0 (if not).  Caller must also update
                   1504:  * pm->pm_segmap and (possibly) the hardware.
                   1505:  */
                   1506: void
                   1507: me_free(pm, pmeg)
1.124   ! pk       1508:        struct pmap *pm;
        !          1509:        u_int pmeg;
1.1       deraadt  1510: {
1.124   ! pk       1511:        struct mmuentry *me = &mmusegments[pmeg];
        !          1512:        int i, va, pa, tpte;
        !          1513:        int vr;
        !          1514:        struct regmap *rp;
1.43      pk       1515:
                   1516:        vr = me->me_vreg;
1.1       deraadt  1517:
                   1518: #ifdef DEBUG
                   1519:        if (pmapdebug & PDB_MMU_ALLOC)
1.66      christos 1520:                printf("me_free: freeing pmeg %d from pmap %p\n",
1.43      pk       1521:                    me->me_cookie, pm);
                   1522:        if (me->me_cookie != pmeg)
1.1       deraadt  1523:                panic("me_free: wrong mmuentry");
                   1524:        if (pm != me->me_pmap)
                   1525:                panic("me_free: pm != me_pmap");
                   1526: #endif
                   1527:
1.43      pk       1528:        rp = &pm->pm_regmap[vr];
                   1529:
1.1       deraadt  1530:        /* just like me_alloc, but no cache flush, and context already set */
1.43      pk       1531:        if (CTX_USABLE(pm,rp)) {
                   1532:                va = VSTOVA(vr,me->me_vseg);
                   1533:        } else {
                   1534: #ifdef DEBUG
1.71      pk       1535: if (getcontext4() != 0) panic("me_free: ctx != 0");
1.43      pk       1536: #endif
1.69      pk       1537:                if (HASSUN4_MMU3L)
1.43      pk       1538:                        setregmap(0, tregion);
                   1539:                setsegmap(0, me->me_cookie);
1.1       deraadt  1540:                va = 0;
                   1541:        }
                   1542:        i = NPTESG;
                   1543:        do {
1.55      pk       1544:                tpte = getpte4(va);
1.33      pk       1545:                if ((tpte & (PG_V | PG_TYPE)) == (PG_V | PG_OBMEM)) {
1.60      pk       1546:                        pa = ptoa(tpte & PG_PFNUM);
1.1       deraadt  1547:                        if (managed(pa))
1.55      pk       1548:                                pvhead(pa)->pv_flags |= MR4_4C(tpte);
1.1       deraadt  1549:                }
                   1550:                va += NBPG;
                   1551:        } while (--i > 0);
                   1552:
                   1553:        /* take mmu entry off pmap chain */
1.43      pk       1554:        TAILQ_REMOVE(&pm->pm_seglist, me, me_pmchain);
                   1555:        /* ... and remove from segment map */
                   1556:        if (rp->rg_segmap == NULL)
                   1557:                panic("me_free: no segments in pmap");
                   1558:        rp->rg_segmap[me->me_vseg].sg_pmeg = seginval;
                   1559:
                   1560:        /* off LRU or lock chain */
                   1561:        if (pm == pmap_kernel()) {
                   1562:                TAILQ_REMOVE(&segm_locked, me, me_list);
1.70      pk       1563: #ifdef DIAGNOSTIC
                   1564:                pmap_stats.ps_npmeg_locked--;
                   1565: #endif
1.43      pk       1566:        } else {
                   1567:                TAILQ_REMOVE(&segm_lru, me, me_list);
1.70      pk       1568: #ifdef DIAGNOSTIC
                   1569:                pmap_stats.ps_npmeg_lru--;
                   1570: #endif
1.43      pk       1571:        }
                   1572:
                   1573:        /* no associated pmap; on free list */
                   1574:        me->me_pmap = NULL;
                   1575:        TAILQ_INSERT_TAIL(&segm_freelist, me, me_list);
1.70      pk       1576: #ifdef DIAGNOSTIC
                   1577:        pmap_stats.ps_npmeg_free++;
                   1578: #endif
1.43      pk       1579: }
                   1580:
1.69      pk       1581: #if defined(SUN4_MMU3L)
1.43      pk       1582:
                   1583: /* XXX - Merge with segm_alloc/segm_free ? */
                   1584:
                   1585: struct mmuentry *
                   1586: region_alloc(mh, newpm, newvr)
1.124   ! pk       1587:        struct mmuhd *mh;
        !          1588:        struct pmap *newpm;
        !          1589:        int newvr;
1.43      pk       1590: {
1.124   ! pk       1591:        struct mmuentry *me;
        !          1592:        struct pmap *pm;
1.43      pk       1593:        int ctx;
                   1594:        struct regmap *rp;
                   1595:
                   1596:        /* try free list first */
                   1597:        if ((me = region_freelist.tqh_first) != NULL) {
                   1598:                TAILQ_REMOVE(&region_freelist, me, me_list);
                   1599: #ifdef DEBUG
                   1600:                if (me->me_pmap != NULL)
                   1601:                        panic("region_alloc: freelist entry has pmap");
                   1602:                if (pmapdebug & PDB_MMUREG_ALLOC)
1.91      fair     1603:                        printf("region_alloc: got smeg 0x%x\n", me->me_cookie);
1.43      pk       1604: #endif
                   1605:                TAILQ_INSERT_TAIL(mh, me, me_list);
                   1606:
                   1607:                /* onto on pmap chain; pmap is already locked, if needed */
                   1608:                TAILQ_INSERT_TAIL(&newpm->pm_reglist, me, me_pmchain);
                   1609:
                   1610:                /* into pmap segment table, with backpointers */
                   1611:                newpm->pm_regmap[newvr].rg_smeg = me->me_cookie;
                   1612:                me->me_pmap = newpm;
                   1613:                me->me_vreg = newvr;
                   1614:
                   1615:                return (me);
                   1616:        }
                   1617:
                   1618:        /* no luck, take head of LRU list */
                   1619:        if ((me = region_lru.tqh_first) == NULL)
                   1620:                panic("region_alloc: all smegs gone");
                   1621:
                   1622:        pm = me->me_pmap;
                   1623:        if (pm == NULL)
                   1624:                panic("region_alloc: LRU entry has no pmap");
                   1625:        if (pm == pmap_kernel())
                   1626:                panic("region_alloc: stealing from kernel");
                   1627: #ifdef DEBUG
                   1628:        if (pmapdebug & (PDB_MMUREG_ALLOC | PDB_MMUREG_STEAL))
1.91      fair     1629:                printf("region_alloc: stealing smeg 0x%x from pmap %p\n",
1.43      pk       1630:                    me->me_cookie, pm);
                   1631: #endif
                   1632:        /*
                   1633:         * Remove from LRU list, and insert at end of new list
                   1634:         * (probably the LRU list again, but so what?).
                   1635:         */
                   1636:        TAILQ_REMOVE(&region_lru, me, me_list);
                   1637:        TAILQ_INSERT_TAIL(mh, me, me_list);
                   1638:
                   1639:        rp = &pm->pm_regmap[me->me_vreg];
1.71      pk       1640:        ctx = getcontext4();
1.43      pk       1641:        if (pm->pm_ctx) {
                   1642:                CHANGE_CONTEXTS(ctx, pm->pm_ctxnum);
1.69      pk       1643:                cache_flush_region(me->me_vreg);
1.43      pk       1644:        }
                   1645:
                   1646:        /* update region tables */
                   1647:        simple_lock(&pm->pm_lock); /* what if other cpu takes mmuentry ?? */
                   1648:        if (pm->pm_ctx)
                   1649:                setregmap(VRTOVA(me->me_vreg), reginval);
                   1650:        rp->rg_smeg = reginval;
                   1651:
                   1652:        /* off old pmap chain */
                   1653:        TAILQ_REMOVE(&pm->pm_reglist, me, me_pmchain);
                   1654:        simple_unlock(&pm->pm_lock);
1.71      pk       1655:        setcontext4(ctx);       /* done with old context */
1.43      pk       1656:
                   1657:        /* onto new pmap chain; new pmap is already locked, if needed */
                   1658:        TAILQ_INSERT_TAIL(&newpm->pm_reglist, me, me_pmchain);
                   1659:
                   1660:        /* into new segment table, with backpointers */
                   1661:        newpm->pm_regmap[newvr].rg_smeg = me->me_cookie;
                   1662:        me->me_pmap = newpm;
                   1663:        me->me_vreg = newvr;
                   1664:
                   1665:        return (me);
                   1666: }
                   1667:
                   1668: /*
                   1669:  * Free an MMU entry.
                   1670:  *
                   1671:  * Assumes the corresponding pmap is already locked.
                   1672:  * Does NOT flush cache. ???
                   1673:  * CALLER MUST SET CONTEXT to pm->pm_ctxnum (if pmap has
                   1674:  * a context) or to 0 (if not).  Caller must also update
                   1675:  * pm->pm_regmap and (possibly) the hardware.
                   1676:  */
                   1677: void
                   1678: region_free(pm, smeg)
1.124   ! pk       1679:        struct pmap *pm;
        !          1680:        u_int smeg;
1.43      pk       1681: {
1.124   ! pk       1682:        struct mmuentry *me = &mmuregions[smeg];
1.43      pk       1683:
                   1684: #ifdef DEBUG
                   1685:        if (pmapdebug & PDB_MMUREG_ALLOC)
1.91      fair     1686:                printf("region_free: freeing smeg 0x%x from pmap %p\n",
1.43      pk       1687:                    me->me_cookie, pm);
                   1688:        if (me->me_cookie != smeg)
                   1689:                panic("region_free: wrong mmuentry");
                   1690:        if (pm != me->me_pmap)
                   1691:                panic("region_free: pm != me_pmap");
                   1692: #endif
                   1693:
                   1694:        if (pm->pm_ctx)
1.69      pk       1695:                cache_flush_region(me->me_vreg);
1.43      pk       1696:
                   1697:        /* take mmu entry off pmap chain */
                   1698:        TAILQ_REMOVE(&pm->pm_reglist, me, me_pmchain);
1.1       deraadt  1699:        /* ... and remove from segment map */
1.43      pk       1700:        pm->pm_regmap[smeg].rg_smeg = reginval;
1.1       deraadt  1701:
                   1702:        /* off LRU or lock chain */
1.43      pk       1703:        if (pm == pmap_kernel()) {
                   1704:                TAILQ_REMOVE(&region_locked, me, me_list);
                   1705:        } else {
                   1706:                TAILQ_REMOVE(&region_lru, me, me_list);
                   1707:        }
1.1       deraadt  1708:
                   1709:        /* no associated pmap; on free list */
                   1710:        me->me_pmap = NULL;
1.43      pk       1711:        TAILQ_INSERT_TAIL(&region_freelist, me, me_list);
1.1       deraadt  1712: }
1.43      pk       1713: #endif
1.1       deraadt  1714:
                   1715: /*
                   1716:  * `Page in' (load or inspect) an MMU entry; called on page faults.
                   1717:  * Returns 1 if we reloaded the segment, -1 if the segment was
                   1718:  * already loaded and the page was marked valid (in which case the
                   1719:  * fault must be a bus error or something), or 0 (segment loaded but
                   1720:  * PTE not valid, or segment not loaded at all).
                   1721:  */
                   1722: int
1.61      pk       1723: mmu_pagein(pm, va, prot)
1.124   ! pk       1724:        struct pmap *pm;
        !          1725:        int va, prot;
1.1       deraadt  1726: {
1.124   ! pk       1727:        int *pte;
        !          1728:        int vr, vs, pmeg, i, s, bits;
1.43      pk       1729:        struct regmap *rp;
                   1730:        struct segmap *sp;
                   1731:
1.45      pk       1732:        if (prot != VM_PROT_NONE)
                   1733:                bits = PG_V | ((prot & VM_PROT_WRITE) ? PG_W : 0);
                   1734:        else
                   1735:                bits = 0;
                   1736:
1.43      pk       1737:        vr = VA_VREG(va);
                   1738:        vs = VA_VSEG(va);
                   1739:        rp = &pm->pm_regmap[vr];
                   1740: #ifdef DEBUG
                   1741: if (pm == pmap_kernel())
1.91      fair     1742: printf("mmu_pagein: kernel wants map at va 0x%x, vr %d, vs %d\n", va, vr, vs);
1.43      pk       1743: #endif
                   1744:
                   1745:        /* return 0 if we have no PMEGs to load */
                   1746:        if (rp->rg_segmap == NULL)
                   1747:                return (0);
1.69      pk       1748: #if defined(SUN4_MMU3L)
                   1749:        if (HASSUN4_MMU3L && rp->rg_smeg == reginval) {
1.43      pk       1750:                smeg_t smeg;
                   1751:                unsigned int tva = VA_ROUNDDOWNTOREG(va);
                   1752:                struct segmap *sp = rp->rg_segmap;
                   1753:
                   1754:                s = splpmap();          /* paranoid */
                   1755:                smeg = region_alloc(&region_lru, pm, vr)->me_cookie;
                   1756:                setregmap(tva, smeg);
                   1757:                i = NSEGRG;
                   1758:                do {
                   1759:                        setsegmap(tva, sp++->sg_pmeg);
                   1760:                        tva += NBPSG;
                   1761:                } while (--i > 0);
                   1762:                splx(s);
                   1763:        }
                   1764: #endif
                   1765:        sp = &rp->rg_segmap[vs];
1.1       deraadt  1766:
                   1767:        /* return 0 if we have no PTEs to load */
1.43      pk       1768:        if ((pte = sp->sg_pte) == NULL)
1.1       deraadt  1769:                return (0);
1.43      pk       1770:
1.1       deraadt  1771:        /* return -1 if the fault is `hard', 0 if not */
1.43      pk       1772:        if (sp->sg_pmeg != seginval)
1.55      pk       1773:                return (bits && (getpte4(va) & bits) == bits ? -1 : 0);
1.1       deraadt  1774:
                   1775:        /* reload segment: write PTEs into a new LRU entry */
                   1776:        va = VA_ROUNDDOWNTOSEG(va);
                   1777:        s = splpmap();          /* paranoid */
1.43      pk       1778:        pmeg = me_alloc(&segm_lru, pm, vr, vs)->me_cookie;
1.1       deraadt  1779:        setsegmap(va, pmeg);
                   1780:        i = NPTESG;
                   1781:        do {
1.55      pk       1782:                setpte4(va, *pte++);
1.1       deraadt  1783:                va += NBPG;
                   1784:        } while (--i > 0);
                   1785:        splx(s);
                   1786:        return (1);
                   1787: }
1.55      pk       1788: #endif /* defined SUN4 or SUN4C */
                   1789:
1.1       deraadt  1790: /*
                   1791:  * Allocate a context.  If necessary, steal one from someone else.
                   1792:  * Changes hardware context number and loads segment map.
                   1793:  *
                   1794:  * This routine is only ever called from locore.s just after it has
                   1795:  * saved away the previous process, so there are no active user windows.
                   1796:  */
                   1797: void
                   1798: ctx_alloc(pm)
1.124   ! pk       1799:        struct pmap *pm;
1.1       deraadt  1800: {
1.124   ! pk       1801:        union ctxinfo *c;
        !          1802:        int s, cnum, i, doflush;
        !          1803:        struct regmap *rp;
        !          1804:        int gap_start, gap_end;
        !          1805:        unsigned long va;
1.1       deraadt  1806:
1.55      pk       1807: /*XXX-GCC!*/gap_start=gap_end=0;
1.1       deraadt  1808: #ifdef DEBUG
                   1809:        if (pm->pm_ctx)
                   1810:                panic("ctx_alloc pm_ctx");
                   1811:        if (pmapdebug & PDB_CTX_ALLOC)
1.66      christos 1812:                printf("ctx_alloc(%p)\n", pm);
1.1       deraadt  1813: #endif
1.55      pk       1814:        if (CPU_ISSUN4OR4C) {
                   1815:                gap_start = pm->pm_gap_start;
                   1816:                gap_end = pm->pm_gap_end;
                   1817:        }
1.13      pk       1818:
1.49      pk       1819:        s = splpmap();
1.1       deraadt  1820:        if ((c = ctx_freelist) != NULL) {
                   1821:                ctx_freelist = c->c_nextfree;
1.69      pk       1822:                cnum = c - cpuinfo.ctxinfo;
1.49      pk       1823:                doflush = 0;
1.1       deraadt  1824:        } else {
                   1825:                if ((ctx_kick += ctx_kickdir) >= ncontext) {
                   1826:                        ctx_kick = ncontext - 1;
                   1827:                        ctx_kickdir = -1;
                   1828:                } else if (ctx_kick < 1) {
                   1829:                        ctx_kick = 1;
                   1830:                        ctx_kickdir = 1;
                   1831:                }
1.69      pk       1832:                c = &cpuinfo.ctxinfo[cnum = ctx_kick];
1.1       deraadt  1833: #ifdef DEBUG
                   1834:                if (c->c_pmap == NULL)
                   1835:                        panic("ctx_alloc cu_pmap");
                   1836:                if (pmapdebug & (PDB_CTX_ALLOC | PDB_CTX_STEAL))
1.66      christos 1837:                        printf("ctx_alloc: steal context %d from %p\n",
1.1       deraadt  1838:                            cnum, c->c_pmap);
                   1839: #endif
                   1840:                c->c_pmap->pm_ctx = NULL;
1.69      pk       1841:                doflush = (CACHEINFO.c_vactype != VAC_NONE);
1.55      pk       1842:                if (CPU_ISSUN4OR4C) {
                   1843:                        if (gap_start < c->c_pmap->pm_gap_start)
                   1844:                                gap_start = c->c_pmap->pm_gap_start;
                   1845:                        if (gap_end > c->c_pmap->pm_gap_end)
                   1846:                                gap_end = c->c_pmap->pm_gap_end;
                   1847:                }
1.1       deraadt  1848:        }
1.49      pk       1849:
1.1       deraadt  1850:        c->c_pmap = pm;
                   1851:        pm->pm_ctx = c;
                   1852:        pm->pm_ctxnum = cnum;
                   1853:
1.55      pk       1854:        if (CPU_ISSUN4OR4C) {
                   1855:                /*
                   1856:                 * Write pmap's region (3-level MMU) or segment table into
                   1857:                 * the MMU.
                   1858:                 *
                   1859:                 * Only write those entries that actually map something in
                   1860:                 * this context by maintaining a pair of region numbers in
                   1861:                 * between which the pmap has no valid mappings.
                   1862:                 *
                   1863:                 * If a context was just allocated from the free list, trust
                   1864:                 * that all its pmeg numbers are `seginval'. We make sure this
                   1865:                 * is the case initially in pmap_bootstrap(). Otherwise, the
                   1866:                 * context was freed by calling ctx_free() in pmap_release(),
                   1867:                 * which in turn is supposedly called only when all mappings
                   1868:                 * have been removed.
                   1869:                 *
                   1870:                 * On the other hand, if the context had to be stolen from
                   1871:                 * another pmap, we possibly shrink the gap to be the
                   1872:                 * disjuction of the new and the previous map.
                   1873:                 */
1.43      pk       1874:
1.80      pk       1875:                setcontext4(cnum);
1.55      pk       1876:                splx(s);
                   1877:                if (doflush)
                   1878:                        cache_flush_context();
1.43      pk       1879:
1.55      pk       1880:                rp = pm->pm_regmap;
                   1881:                for (va = 0, i = NUREG; --i >= 0; ) {
                   1882:                        if (VA_VREG(va) >= gap_start) {
                   1883:                                va = VRTOVA(gap_end);
                   1884:                                i -= gap_end - gap_start;
                   1885:                                rp += gap_end - gap_start;
                   1886:                                if (i < 0)
                   1887:                                        break;
                   1888:                                /* mustn't re-enter this branch */
                   1889:                                gap_start = NUREG;
                   1890:                        }
1.69      pk       1891:                        if (HASSUN4_MMU3L) {
1.55      pk       1892:                                setregmap(va, rp++->rg_smeg);
                   1893:                                va += NBPRG;
1.69      pk       1894:                        } else {
1.124   ! pk       1895:                                int j;
        !          1896:                                struct segmap *sp = rp->rg_segmap;
1.55      pk       1897:                                for (j = NSEGRG; --j >= 0; va += NBPSG)
                   1898:                                        setsegmap(va,
                   1899:                                                  sp?sp++->sg_pmeg:seginval);
                   1900:                                rp++;
                   1901:                        }
1.43      pk       1902:                }
1.55      pk       1903:
                   1904:        } else if (CPU_ISSUN4M) {
                   1905:
1.80      pk       1906: #if defined(SUN4M)
1.55      pk       1907:                /*
                   1908:                 * Reload page and context tables to activate the page tables
                   1909:                 * for this context.
                   1910:                 *
                   1911:                 * The gap stuff isn't really needed in the Sun4m architecture,
                   1912:                 * since we don't have to worry about excessive mappings (all
                   1913:                 * mappings exist since the page tables must be complete for
                   1914:                 * the mmu to be happy).
                   1915:                 *
                   1916:                 * If a context was just allocated from the free list, trust
                   1917:                 * that all of its mmu-edible page tables are zeroed out
                   1918:                 * (except for those associated with the kernel). We make
                   1919:                 * sure this is the case initially in pmap_bootstrap() and
                   1920:                 * pmap_init() (?).
                   1921:                 * Otherwise, the context was freed by calling ctx_free() in
                   1922:                 * pmap_release(), which in turn is supposedly called only
                   1923:                 * when all mappings have been removed.
                   1924:                 *
                   1925:                 * XXX: Do we have to flush cache after reloading ctx tbl?
                   1926:                 */
                   1927:
1.123     pk       1928:                /* Do any cache flush needed on context switch */
                   1929:                (*cpuinfo.pure_vcache_flush)();
1.79      pk       1930: #ifdef DEBUG
1.69      pk       1931: #if 0
1.61      pk       1932:                ctxbusyvector[cnum] = 1; /* mark context as busy */
1.69      pk       1933: #endif
1.55      pk       1934:                if (pm->pm_reg_ptps_pa == 0)
                   1935:                        panic("ctx_alloc: no region table in current pmap");
                   1936: #endif
                   1937:                /*setcontext(0); * paranoia? can we modify curr. ctx? */
1.79      pk       1938:                setpgt4m(&cpuinfo.ctx_tbl[cnum],
                   1939:                        (pm->pm_reg_ptps_pa >> SRMMU_PPNPASHIFT) | SRMMU_TEPTD);
1.55      pk       1940:
1.80      pk       1941:                setcontext4m(cnum);
1.55      pk       1942:                if (doflush)
                   1943:                        cache_flush_context();
                   1944:                tlb_flush_context(); /* remove any remnant garbage from tlb */
1.43      pk       1945: #endif
1.55      pk       1946:                splx(s);
1.13      pk       1947:        }
1.1       deraadt  1948: }
                   1949:
                   1950: /*
                   1951:  * Give away a context.  Flushes cache and sets current context to 0.
                   1952:  */
                   1953: void
                   1954: ctx_free(pm)
                   1955:        struct pmap *pm;
                   1956: {
1.124   ! pk       1957:        union ctxinfo *c;
        !          1958:        int newc, oldc;
1.1       deraadt  1959:
                   1960:        if ((c = pm->pm_ctx) == NULL)
                   1961:                panic("ctx_free");
                   1962:        pm->pm_ctx = NULL;
                   1963:        oldc = getcontext();
1.55      pk       1964:
1.69      pk       1965:        if (CACHEINFO.c_vactype != VAC_NONE) {
1.123     pk       1966:                /* Do any cache flush needed on context switch */
                   1967:                (*cpuinfo.pure_vcache_flush)();
                   1968:
1.1       deraadt  1969:                newc = pm->pm_ctxnum;
                   1970:                CHANGE_CONTEXTS(oldc, newc);
                   1971:                cache_flush_context();
1.55      pk       1972: #if defined(SUN4M)
                   1973:                if (CPU_ISSUN4M)
                   1974:                        tlb_flush_context();
                   1975: #endif
1.1       deraadt  1976:                setcontext(0);
                   1977:        } else {
1.55      pk       1978: #if defined(SUN4M)
1.88      pk       1979:                if (CPU_ISSUN4M) {
1.123     pk       1980:                        /* Do any cache flush needed on context switch */
                   1981:                        (*cpuinfo.pure_vcache_flush)();
1.88      pk       1982:                        newc = pm->pm_ctxnum;
                   1983:                        CHANGE_CONTEXTS(oldc, newc);
1.55      pk       1984:                        tlb_flush_context();
1.88      pk       1985:                }
1.55      pk       1986: #endif
1.1       deraadt  1987:                CHANGE_CONTEXTS(oldc, 0);
                   1988:        }
                   1989:        c->c_nextfree = ctx_freelist;
                   1990:        ctx_freelist = c;
1.55      pk       1991:
1.69      pk       1992: #if 0
1.55      pk       1993: #if defined(SUN4M)
                   1994:        if (CPU_ISSUN4M) {
                   1995:                /* Map kernel back into unused context */
                   1996:                newc = pm->pm_ctxnum;
1.69      pk       1997:                cpuinfo.ctx_tbl[newc] = cpuinfo.ctx_tbl[0];
1.55      pk       1998:                if (newc)
                   1999:                        ctxbusyvector[newc] = 0; /* mark as free */
                   2000:        }
                   2001: #endif
1.69      pk       2002: #endif
1.1       deraadt  2003: }
                   2004:
                   2005:
                   2006: /*----------------------------------------------------------------*/
                   2007:
                   2008: /*
                   2009:  * pvlist functions.
                   2010:  */
                   2011:
                   2012: /*
                   2013:  * Walk the given pv list, and for each PTE, set or clear some bits
                   2014:  * (e.g., PG_W or PG_NC).
                   2015:  *
                   2016:  * As a special case, this never clears PG_W on `pager' pages.
                   2017:  * These, being kernel addresses, are always in hardware and have
                   2018:  * a context.
                   2019:  *
                   2020:  * This routine flushes the cache for any page whose PTE changes,
                   2021:  * as long as the process has a context; this is overly conservative.
                   2022:  * It also copies ref and mod bits to the pvlist, on the theory that
                   2023:  * this might save work later.  (XXX should test this theory)
1.115     pk       2024:  *
                   2025:  * In addition, if the cacheable bit (PG_NC) is updated in the PTE
                   2026:  * the corresponding PV_NC flag is also updated in each pv entry. This
                   2027:  * is done so kvm_uncache() can use this routine and have the uncached
                   2028:  * status stick.
1.1       deraadt  2029:  */
1.55      pk       2030:
                   2031: #if defined(SUN4) || defined(SUN4C)
                   2032:
1.1       deraadt  2033: void
1.55      pk       2034: pv_changepte4_4c(pv0, bis, bic)
1.115     pk       2035:        struct pvlist *pv0;
                   2036:        int bis, bic;
1.1       deraadt  2037: {
1.115     pk       2038:        int *pte;
                   2039:        struct pvlist *pv;
                   2040:        struct pmap *pm;
                   2041:        int va, vr, vs;
1.1       deraadt  2042:        int ctx, s;
1.43      pk       2043:        struct regmap *rp;
                   2044:        struct segmap *sp;
1.1       deraadt  2045:
                   2046:        write_user_windows();           /* paranoid? */
                   2047:
                   2048:        s = splpmap();                  /* paranoid? */
                   2049:        if (pv0->pv_pmap == NULL) {
                   2050:                splx(s);
                   2051:                return;
                   2052:        }
1.71      pk       2053:        ctx = getcontext4();
1.1       deraadt  2054:        for (pv = pv0; pv != NULL; pv = pv->pv_next) {
                   2055:                pm = pv->pv_pmap;
1.81      pk       2056: #ifdef DIAGNOSTIC
                   2057:                if(pm == NULL)
                   2058:                        panic("pv_changepte: pm == NULL");
                   2059: #endif
1.1       deraadt  2060:                va = pv->pv_va;
1.43      pk       2061:                vr = VA_VREG(va);
                   2062:                vs = VA_VSEG(va);
                   2063:                rp = &pm->pm_regmap[vr];
                   2064:                if (rp->rg_segmap == NULL)
                   2065:                        panic("pv_changepte: no segments");
                   2066:
                   2067:                sp = &rp->rg_segmap[vs];
                   2068:                pte = sp->sg_pte;
                   2069:
                   2070:                if (sp->sg_pmeg == seginval) {
                   2071:                        /* not in hardware: just fix software copy */
                   2072:                        if (pte == NULL)
1.81      pk       2073:                                panic("pv_changepte: pte == NULL");
1.43      pk       2074:                        pte += VA_VPG(va);
                   2075:                        *pte = (*pte | bis) & ~bic;
                   2076:                } else {
1.124   ! pk       2077:                        int tpte;
1.1       deraadt  2078:
                   2079:                        /* in hardware: fix hardware copy */
1.43      pk       2080:                        if (CTX_USABLE(pm,rp)) {
1.110     mrg      2081: #if defined(UVM)
                   2082:                                /*
                   2083:                                 * Bizarreness:  we never clear PG_W on
                   2084:                                 * pager pages, nor PG_NC on DVMA pages.
                   2085:                                 */
                   2086:                                if (bic == PG_W &&
                   2087:                                    va >= uvm.pager_sva && va < uvm.pager_eva)
                   2088:                                        continue;
                   2089: #else
1.124   ! pk       2090:                                extern vaddr_t pager_sva, pager_eva;
1.1       deraadt  2091:
1.8       pk       2092:                                /*
                   2093:                                 * Bizarreness:  we never clear PG_W on
                   2094:                                 * pager pages, nor PG_NC on DVMA pages.
                   2095:                                 */
1.1       deraadt  2096:                                if (bic == PG_W &&
                   2097:                                    va >= pager_sva && va < pager_eva)
1.3       deraadt  2098:                                        continue;
1.110     mrg      2099: #endif
1.3       deraadt  2100:                                if (bic == PG_NC &&
                   2101:                                    va >= DVMA_BASE && va < DVMA_END)
1.1       deraadt  2102:                                        continue;
1.71      pk       2103:                                setcontext4(pm->pm_ctxnum);
1.1       deraadt  2104:                                /* XXX should flush only when necessary */
1.55      pk       2105:                                tpte = getpte4(va);
1.88      pk       2106:                                /*
                   2107:                                 * XXX: always flush cache; conservative, but
                   2108:                                 * needed to invalidate cache tag protection
                   2109:                                 * bits and when disabling caching.
                   2110:                                 */
                   2111:                                cache_flush_page(va);
1.1       deraadt  2112:                        } else {
                   2113:                                /* XXX per-cpu va? */
1.71      pk       2114:                                setcontext4(0);
1.69      pk       2115:                                if (HASSUN4_MMU3L)
1.43      pk       2116:                                        setregmap(0, tregion);
                   2117:                                setsegmap(0, sp->sg_pmeg);
1.18      deraadt  2118:                                va = VA_VPG(va) << PGSHIFT;
1.55      pk       2119:                                tpte = getpte4(va);
1.1       deraadt  2120:                        }
                   2121:                        if (tpte & PG_V)
1.115     pk       2122:                                pv0->pv_flags |= MR4_4C(tpte);
1.1       deraadt  2123:                        tpte = (tpte | bis) & ~bic;
1.55      pk       2124:                        setpte4(va, tpte);
1.1       deraadt  2125:                        if (pte != NULL)        /* update software copy */
                   2126:                                pte[VA_VPG(va)] = tpte;
1.115     pk       2127:
                   2128:                        /* Update PV_NC flag if required */
                   2129:                        if (bis & PG_NC)
                   2130:                                pv->pv_flags |= PV_NC;
                   2131:                        if (bic & PG_NC)
                   2132:                                pv->pv_flags &= ~PV_NC;
1.1       deraadt  2133:                }
                   2134:        }
1.71      pk       2135:        setcontext4(ctx);
1.1       deraadt  2136:        splx(s);
                   2137: }
                   2138:
                   2139: /*
                   2140:  * Sync ref and mod bits in pvlist (turns off same in hardware PTEs).
                   2141:  * Returns the new flags.
                   2142:  *
                   2143:  * This is just like pv_changepte, but we never add or remove bits,
                   2144:  * hence never need to adjust software copies.
                   2145:  */
                   2146: int
1.55      pk       2147: pv_syncflags4_4c(pv0)
1.124   ! pk       2148:        struct pvlist *pv0;
1.1       deraadt  2149: {
1.124   ! pk       2150:        struct pvlist *pv;
        !          2151:        struct pmap *pm;
        !          2152:        int tpte, va, vr, vs, pmeg, flags;
1.1       deraadt  2153:        int ctx, s;
1.43      pk       2154:        struct regmap *rp;
                   2155:        struct segmap *sp;
1.1       deraadt  2156:
                   2157:        write_user_windows();           /* paranoid? */
                   2158:
                   2159:        s = splpmap();                  /* paranoid? */
                   2160:        if (pv0->pv_pmap == NULL) {     /* paranoid */
                   2161:                splx(s);
                   2162:                return (0);
                   2163:        }
1.71      pk       2164:        ctx = getcontext4();
1.1       deraadt  2165:        flags = pv0->pv_flags;
                   2166:        for (pv = pv0; pv != NULL; pv = pv->pv_next) {
                   2167:                pm = pv->pv_pmap;
                   2168:                va = pv->pv_va;
1.43      pk       2169:                vr = VA_VREG(va);
                   2170:                vs = VA_VSEG(va);
                   2171:                rp = &pm->pm_regmap[vr];
                   2172:                if (rp->rg_segmap == NULL)
                   2173:                        panic("pv_syncflags: no segments");
                   2174:                sp = &rp->rg_segmap[vs];
                   2175:
                   2176:                if ((pmeg = sp->sg_pmeg) == seginval)
1.1       deraadt  2177:                        continue;
1.43      pk       2178:
                   2179:                if (CTX_USABLE(pm,rp)) {
1.71      pk       2180:                        setcontext4(pm->pm_ctxnum);
1.1       deraadt  2181:                        /* XXX should flush only when necessary */
1.55      pk       2182:                        tpte = getpte4(va);
1.69      pk       2183:                        if (tpte & PG_M)
1.34      pk       2184:                                cache_flush_page(va);
1.1       deraadt  2185:                } else {
                   2186:                        /* XXX per-cpu va? */
1.71      pk       2187:                        setcontext4(0);
1.69      pk       2188:                        if (HASSUN4_MMU3L)
1.43      pk       2189:                                setregmap(0, tregion);
1.1       deraadt  2190:                        setsegmap(0, pmeg);
1.18      deraadt  2191:                        va = VA_VPG(va) << PGSHIFT;
1.55      pk       2192:                        tpte = getpte4(va);
1.1       deraadt  2193:                }
                   2194:                if (tpte & (PG_M|PG_U) && tpte & PG_V) {
1.86      pk       2195:                        flags |= MR4_4C(tpte);
1.1       deraadt  2196:                        tpte &= ~(PG_M|PG_U);
1.55      pk       2197:                        setpte4(va, tpte);
1.1       deraadt  2198:                }
                   2199:        }
                   2200:        pv0->pv_flags = flags;
1.71      pk       2201:        setcontext4(ctx);
1.1       deraadt  2202:        splx(s);
                   2203:        return (flags);
                   2204: }
                   2205:
                   2206: /*
                   2207:  * pv_unlink is a helper function for pmap_remove.
                   2208:  * It takes a pointer to the pv_table head for some physical address
                   2209:  * and removes the appropriate (pmap, va) entry.
                   2210:  *
                   2211:  * Once the entry is removed, if the pv_table head has the cache
                   2212:  * inhibit bit set, see if we can turn that off; if so, walk the
                   2213:  * pvlist and turn off PG_NC in each PTE.  (The pvlist is by
                   2214:  * definition nonempty, since it must have at least two elements
                   2215:  * in it to have PV_NC set, and we only remove one here.)
                   2216:  */
1.43      pk       2217: /*static*/ void
1.55      pk       2218: pv_unlink4_4c(pv, pm, va)
1.124   ! pk       2219:        struct pvlist *pv;
        !          2220:        struct pmap *pm;
        !          2221:        vaddr_t va;
1.1       deraadt  2222: {
1.124   ! pk       2223:        struct pvlist *npv;
1.1       deraadt  2224:
1.11      pk       2225: #ifdef DIAGNOSTIC
                   2226:        if (pv->pv_pmap == NULL)
                   2227:                panic("pv_unlink0");
                   2228: #endif
1.1       deraadt  2229:        /*
                   2230:         * First entry is special (sigh).
                   2231:         */
                   2232:        npv = pv->pv_next;
                   2233:        if (pv->pv_pmap == pm && pv->pv_va == va) {
                   2234:                pmap_stats.ps_unlink_pvfirst++;
                   2235:                if (npv != NULL) {
1.115     pk       2236:                        /*
                   2237:                         * Shift next entry into the head.
                   2238:                         * Make sure to retain the REF, MOD and ANC flags.
                   2239:                         */
1.1       deraadt  2240:                        pv->pv_next = npv->pv_next;
                   2241:                        pv->pv_pmap = npv->pv_pmap;
                   2242:                        pv->pv_va = npv->pv_va;
1.115     pk       2243:                        pv->pv_flags &= ~PV_NC;
                   2244:                        pv->pv_flags |= npv->pv_flags & PV_NC;
1.122     pk       2245:                        pool_put(&pv_pool, npv);
1.86      pk       2246:                } else {
1.115     pk       2247:                        /*
                   2248:                         * No mappings left; we still need to maintain
                   2249:                         * the REF and MOD flags. since pmap_is_modified()
                   2250:                         * can still be called for this page.
                   2251:                         */
1.1       deraadt  2252:                        pv->pv_pmap = NULL;
1.115     pk       2253:                        pv->pv_flags &= ~(PV_NC|PV_ANC);
1.86      pk       2254:                        return;
                   2255:                }
1.1       deraadt  2256:        } else {
1.124   ! pk       2257:                struct pvlist *prev;
1.1       deraadt  2258:
                   2259:                for (prev = pv;; prev = npv, npv = npv->pv_next) {
                   2260:                        pmap_stats.ps_unlink_pvsearch++;
                   2261:                        if (npv == NULL)
                   2262:                                panic("pv_unlink");
                   2263:                        if (npv->pv_pmap == pm && npv->pv_va == va)
                   2264:                                break;
                   2265:                }
                   2266:                prev->pv_next = npv->pv_next;
1.122     pk       2267:                pool_put(&pv_pool, npv);
1.1       deraadt  2268:        }
1.115     pk       2269:        if (pv->pv_flags & PV_ANC && (pv->pv_flags & PV_NC) == 0) {
1.1       deraadt  2270:                /*
                   2271:                 * Not cached: check to see if we can fix that now.
                   2272:                 */
                   2273:                va = pv->pv_va;
                   2274:                for (npv = pv->pv_next; npv != NULL; npv = npv->pv_next)
1.115     pk       2275:                        if (BADALIAS(va, npv->pv_va) || (npv->pv_flags & PV_NC))
1.1       deraadt  2276:                                return;
1.115     pk       2277:                pv->pv_flags &= ~PV_ANC;
1.58      pk       2278:                pv_changepte4_4c(pv, 0, PG_NC);
1.1       deraadt  2279:        }
                   2280: }
                   2281:
                   2282: /*
                   2283:  * pv_link is the inverse of pv_unlink, and is used in pmap_enter.
                   2284:  * It returns PG_NC if the (new) pvlist says that the address cannot
                   2285:  * be cached.
                   2286:  */
1.43      pk       2287: /*static*/ int
1.115     pk       2288: pv_link4_4c(pv, pm, va, nc)
                   2289:        struct pvlist *pv;
                   2290:        struct pmap *pm;
1.124   ! pk       2291:        vaddr_t va;
1.115     pk       2292:        int nc;
1.1       deraadt  2293: {
1.115     pk       2294:        struct pvlist *npv;
                   2295:        int ret;
                   2296:
                   2297:        ret = nc ? PG_NC : 0;
1.1       deraadt  2298:
                   2299:        if (pv->pv_pmap == NULL) {
                   2300:                /* no pvlist entries yet */
                   2301:                pmap_stats.ps_enter_firstpv++;
                   2302:                pv->pv_next = NULL;
                   2303:                pv->pv_pmap = pm;
                   2304:                pv->pv_va = va;
1.115     pk       2305:                pv->pv_flags |= nc ? PV_NC : 0;
                   2306:                return (ret);
1.1       deraadt  2307:        }
                   2308:        /*
                   2309:         * Before entering the new mapping, see if
                   2310:         * it will cause old mappings to become aliased
                   2311:         * and thus need to be `discached'.
                   2312:         */
                   2313:        pmap_stats.ps_enter_secondpv++;
1.115     pk       2314:        if (pv->pv_flags & (PV_NC|PV_ANC)) {
1.1       deraadt  2315:                /* already uncached, just stay that way */
                   2316:                ret = PG_NC;
                   2317:        } else {
                   2318:                for (npv = pv; npv != NULL; npv = npv->pv_next) {
1.115     pk       2319:                        if (npv->pv_flags & PV_NC) {
                   2320:                                ret = PG_NC;
                   2321:                                break;
                   2322:                        }
1.1       deraadt  2323:                        if (BADALIAS(va, npv->pv_va)) {
1.43      pk       2324: #ifdef DEBUG
1.84      pk       2325:                                if (pmapdebug & PDB_CACHESTUFF)
                   2326:                                        printf(
1.91      fair     2327:                        "pv_link: badalias: pid %d, 0x%lx<=>0x%lx, pa 0x%lx\n",
1.84      pk       2328:                                        curproc ? curproc->p_pid : -1,
                   2329:                                        va, npv->pv_va,
                   2330:                                        vm_first_phys + (pv-pv_table)*NBPG);
1.43      pk       2331: #endif
1.115     pk       2332:                                /* Mark list head `uncached due to aliases' */
                   2333:                                pv->pv_flags |= PV_ANC;
1.58      pk       2334:                                pv_changepte4_4c(pv, ret = PG_NC, 0);
1.1       deraadt  2335:                                break;
                   2336:                        }
                   2337:                }
                   2338:        }
1.122     pk       2339:        npv = pool_get(&pv_pool, PR_WAITOK);
1.1       deraadt  2340:        npv->pv_next = pv->pv_next;
                   2341:        npv->pv_pmap = pm;
                   2342:        npv->pv_va = va;
1.115     pk       2343:        npv->pv_flags = nc ? PV_NC : 0;
1.1       deraadt  2344:        pv->pv_next = npv;
                   2345:        return (ret);
                   2346: }
                   2347:
1.55      pk       2348: #endif /* sun4, sun4c code */
                   2349:
                   2350: #if defined(SUN4M)             /* Sun4M versions of above */
1.1       deraadt  2351: /*
1.55      pk       2352:  * Walk the given pv list, and for each PTE, set or clear some bits
                   2353:  * (e.g., PG_W or PG_NC).
                   2354:  *
                   2355:  * As a special case, this never clears PG_W on `pager' pages.
                   2356:  * These, being kernel addresses, are always in hardware and have
                   2357:  * a context.
                   2358:  *
                   2359:  * This routine flushes the cache for any page whose PTE changes,
                   2360:  * as long as the process has a context; this is overly conservative.
                   2361:  * It also copies ref and mod bits to the pvlist, on the theory that
                   2362:  * this might save work later.  (XXX should test this theory)
1.115     pk       2363:  *
                   2364:  * In addition, if the cacheable bit (SRMMU_PG_C) is updated in the PTE
                   2365:  * the corresponding PV_C4M flag is also updated in each pv entry. This
                   2366:  * is done so kvm_uncache() can use this routine and have the uncached
                   2367:  * status stick.
1.1       deraadt  2368:  */
1.53      christos 2369: void
1.55      pk       2370: pv_changepte4m(pv0, bis, bic)
1.115     pk       2371:        struct pvlist *pv0;
                   2372:        int bis, bic;
1.55      pk       2373: {
1.115     pk       2374:        struct pvlist *pv;
                   2375:        struct pmap *pm;
                   2376:        int va, vr;
1.55      pk       2377:        int ctx, s;
                   2378:        struct regmap *rp;
1.72      pk       2379:        struct segmap *sp;
1.1       deraadt  2380:
1.55      pk       2381:        write_user_windows();           /* paranoid? */
1.1       deraadt  2382:
1.55      pk       2383:        s = splpmap();                  /* paranoid? */
                   2384:        if (pv0->pv_pmap == NULL) {
                   2385:                splx(s);
                   2386:                return;
1.1       deraadt  2387:        }
1.71      pk       2388:        ctx = getcontext4m();
1.55      pk       2389:        for (pv = pv0; pv != NULL; pv = pv->pv_next) {
1.115     pk       2390:                int tpte;
1.55      pk       2391:                pm = pv->pv_pmap;
1.81      pk       2392: #ifdef DIAGNOSTIC
1.61      pk       2393:                if (pm == NULL)
1.81      pk       2394:                        panic("pv_changepte: pm == NULL");
                   2395: #endif
1.55      pk       2396:                va = pv->pv_va;
                   2397:                vr = VA_VREG(va);
                   2398:                rp = &pm->pm_regmap[vr];
                   2399:                if (rp->rg_segmap == NULL)
                   2400:                        panic("pv_changepte: no segments");
                   2401:
1.72      pk       2402:                sp = &rp->rg_segmap[VA_VSEG(va)];
                   2403:
                   2404:                if (pm->pm_ctx) {
1.110     mrg      2405: #if defined(UVM)
                   2406:                        /*
                   2407:                         * Bizarreness:  we never clear PG_W on
                   2408:                         * pager pages, nor set PG_C on DVMA pages.
                   2409:                         */
                   2410:                        if ((bic & PPROT_WRITE) &&
                   2411:                            va >= uvm.pager_sva && va < uvm.pager_eva)
                   2412:                                continue;
                   2413: #else
                   2414:
1.124   ! pk       2415:                        extern vaddr_t pager_sva, pager_eva;
1.1       deraadt  2416:
1.55      pk       2417:                        /*
                   2418:                         * Bizarreness:  we never clear PG_W on
                   2419:                         * pager pages, nor set PG_C on DVMA pages.
                   2420:                         */
                   2421:                        if ((bic & PPROT_WRITE) &&
                   2422:                            va >= pager_sva && va < pager_eva)
1.60      pk       2423:                                continue;
1.110     mrg      2424: #endif
1.55      pk       2425:                        if ((bis & SRMMU_PG_C) &&
                   2426:                            va >= DVMA_BASE && va < DVMA_END)
1.60      pk       2427:                                continue;
1.72      pk       2428:
1.88      pk       2429:                        setcontext4m(pm->pm_ctxnum);
                   2430:
                   2431:                        /*
                   2432:                         * XXX: always flush cache; conservative, but
                   2433:                         * needed to invalidate cache tag protection
                   2434:                         * bits and when disabling caching.
                   2435:                         */
                   2436:                        cache_flush_page(va);
                   2437:
1.72      pk       2438:                        /* Flush TLB so memory copy is up-to-date */
                   2439:                        tlb_flush_page(va);
1.88      pk       2440:
1.72      pk       2441:                }
                   2442:
                   2443:                tpte = sp->sg_pte[VA_SUN4M_VPG(va)];
                   2444:                if ((tpte & SRMMU_TETYPE) != SRMMU_TEPTE) {
                   2445:                        printf("pv_changepte: invalid PTE for 0x%x\n", va);
                   2446:                        continue;
1.55      pk       2447:                }
                   2448:
1.115     pk       2449:                pv0->pv_flags |= MR4M(tpte);
1.55      pk       2450:                tpte = (tpte | bis) & ~bic;
1.115     pk       2451:                setpgt4m(&sp->sg_pte[VA_SUN4M_VPG(va)], tpte);
                   2452:
                   2453:                /* Update PV_C4M flag if required */
                   2454:                if (bis & SRMMU_PG_C)
                   2455:                        pv->pv_flags |= PV_C4M;
                   2456:                if (bic & SRMMU_PG_C)
                   2457:                        pv->pv_flags &= ~PV_C4M;
1.55      pk       2458:
                   2459:        }
1.71      pk       2460:        setcontext4m(ctx);
1.55      pk       2461:        splx(s);
                   2462: }
                   2463:
                   2464: /*
                   2465:  * Sync ref and mod bits in pvlist. If page has been ref'd or modified,
                   2466:  * update ref/mod bits in pvlist, and clear the hardware bits.
                   2467:  *
                   2468:  * Return the new flags.
                   2469:  */
                   2470: int
                   2471: pv_syncflags4m(pv0)
1.124   ! pk       2472:        struct pvlist *pv0;
1.55      pk       2473: {
1.124   ! pk       2474:        struct pvlist *pv;
        !          2475:        struct pmap *pm;
        !          2476:        int tpte, va, vr, vs, flags;
1.55      pk       2477:        int ctx, s;
                   2478:        struct regmap *rp;
                   2479:        struct segmap *sp;
                   2480:
                   2481:        write_user_windows();           /* paranoid? */
                   2482:
                   2483:        s = splpmap();                  /* paranoid? */
                   2484:        if (pv0->pv_pmap == NULL) {     /* paranoid */
                   2485:                splx(s);
                   2486:                return (0);
                   2487:        }
1.71      pk       2488:        ctx = getcontext4m();
1.55      pk       2489:        flags = pv0->pv_flags;
                   2490:        for (pv = pv0; pv != NULL; pv = pv->pv_next) {
                   2491:                pm = pv->pv_pmap;
                   2492:                va = pv->pv_va;
                   2493:                vr = VA_VREG(va);
                   2494:                vs = VA_VSEG(va);
                   2495:                rp = &pm->pm_regmap[vr];
                   2496:                if (rp->rg_segmap == NULL)
                   2497:                        panic("pv_syncflags: no segments");
                   2498:                sp = &rp->rg_segmap[vs];
                   2499:
                   2500:                if (sp->sg_pte == NULL) /* invalid */
1.60      pk       2501:                        continue;
1.55      pk       2502:
1.62      pk       2503:                /*
                   2504:                 * We need the PTE from memory as the TLB version will
                   2505:                 * always have the SRMMU_PG_R bit on.
                   2506:                 */
1.72      pk       2507:                if (pm->pm_ctx) {
1.71      pk       2508:                        setcontext4m(pm->pm_ctxnum);
1.55      pk       2509:                        tlb_flush_page(va);
                   2510:                }
1.72      pk       2511:                tpte = sp->sg_pte[VA_SUN4M_VPG(va)];
1.62      pk       2512:
1.55      pk       2513:                if ((tpte & SRMMU_TETYPE) == SRMMU_TEPTE && /* if valid pte */
                   2514:                    (tpte & (SRMMU_PG_M|SRMMU_PG_R))) {   /* and mod/refd */
1.72      pk       2515:
1.115     pk       2516:                        flags |= MR4M(tpte);
1.72      pk       2517:
                   2518:                        if (pm->pm_ctx && (tpte & SRMMU_PG_M)) {
                   2519:                                cache_flush_page(va); /* XXX: do we need this?*/
                   2520:                                tlb_flush_page(va); /* paranoid? */
                   2521:                        }
                   2522:
                   2523:                        /* Clear mod/ref bits from PTE and write it back */
1.55      pk       2524:                        tpte &= ~(SRMMU_PG_M | SRMMU_PG_R);
1.72      pk       2525:                        setpgt4m(&sp->sg_pte[VA_SUN4M_VPG(va)], tpte);
1.55      pk       2526:                }
                   2527:        }
                   2528:        pv0->pv_flags = flags;
1.71      pk       2529:        setcontext4m(ctx);
1.55      pk       2530:        splx(s);
                   2531:        return (flags);
                   2532: }
                   2533:
                   2534: void
                   2535: pv_unlink4m(pv, pm, va)
1.124   ! pk       2536:        struct pvlist *pv;
        !          2537:        struct pmap *pm;
        !          2538:        vaddr_t va;
1.55      pk       2539: {
1.124   ! pk       2540:        struct pvlist *npv;
1.55      pk       2541:
                   2542: #ifdef DIAGNOSTIC
                   2543:        if (pv->pv_pmap == NULL)
                   2544:                panic("pv_unlink0");
                   2545: #endif
                   2546:        /*
                   2547:         * First entry is special (sigh).
                   2548:         */
                   2549:        npv = pv->pv_next;
                   2550:        if (pv->pv_pmap == pm && pv->pv_va == va) {
                   2551:                pmap_stats.ps_unlink_pvfirst++;
                   2552:                if (npv != NULL) {
1.115     pk       2553:                        /*
                   2554:                         * Shift next entry into the head.
                   2555:                         * Make sure to retain the REF, MOD and ANC flags.
                   2556:                         */
1.55      pk       2557:                        pv->pv_next = npv->pv_next;
                   2558:                        pv->pv_pmap = npv->pv_pmap;
                   2559:                        pv->pv_va = npv->pv_va;
1.115     pk       2560:                        pv->pv_flags &= ~PV_C4M;
                   2561:                        pv->pv_flags |= (npv->pv_flags & PV_C4M);
1.122     pk       2562:                        pool_put(&pv_pool, npv);
1.86      pk       2563:                } else {
1.115     pk       2564:                        /*
                   2565:                         * No mappings left; we still need to maintain
                   2566:                         * the REF and MOD flags. since pmap_is_modified()
                   2567:                         * can still be called for this page.
                   2568:                         */
1.55      pk       2569:                        pv->pv_pmap = NULL;
1.115     pk       2570:                        pv->pv_flags &= ~(PV_C4M|PV_ANC);
1.86      pk       2571:                        return;
                   2572:                }
1.55      pk       2573:        } else {
1.124   ! pk       2574:                struct pvlist *prev;
1.55      pk       2575:
                   2576:                for (prev = pv;; prev = npv, npv = npv->pv_next) {
                   2577:                        pmap_stats.ps_unlink_pvsearch++;
                   2578:                        if (npv == NULL)
                   2579:                                panic("pv_unlink");
                   2580:                        if (npv->pv_pmap == pm && npv->pv_va == va)
                   2581:                                break;
                   2582:                }
                   2583:                prev->pv_next = npv->pv_next;
1.122     pk       2584:                pool_put(&pv_pool, npv);
1.55      pk       2585:        }
1.115     pk       2586:        if ((pv->pv_flags & (PV_C4M|PV_ANC)) == (PV_C4M|PV_ANC)) {
1.55      pk       2587:                /*
                   2588:                 * Not cached: check to see if we can fix that now.
                   2589:                 */
                   2590:                va = pv->pv_va;
                   2591:                for (npv = pv->pv_next; npv != NULL; npv = npv->pv_next)
1.115     pk       2592:                        if (BADALIAS(va, npv->pv_va) ||
                   2593:                            (npv->pv_flags & PV_C4M) == 0)
1.55      pk       2594:                                return;
1.115     pk       2595:                pv->pv_flags &= PV_ANC;
1.55      pk       2596:                pv_changepte4m(pv, SRMMU_PG_C, 0);
                   2597:        }
                   2598: }
                   2599:
                   2600: /*
                   2601:  * pv_link is the inverse of pv_unlink, and is used in pmap_enter.
                   2602:  * It returns SRMMU_PG_C if the (new) pvlist says that the address cannot
                   2603:  * be cached (i.e. its results must be (& ~)'d in.
                   2604:  */
                   2605: /*static*/ int
1.115     pk       2606: pv_link4m(pv, pm, va, nc)
                   2607:        struct pvlist *pv;
                   2608:        struct pmap *pm;
1.124   ! pk       2609:        vaddr_t va;
1.115     pk       2610:        int nc;
1.55      pk       2611: {
1.115     pk       2612:        struct pvlist *npv;
                   2613:        int ret;
                   2614:
                   2615:        ret = nc ? SRMMU_PG_C : 0;
1.55      pk       2616:
                   2617:        if (pv->pv_pmap == NULL) {
                   2618:                /* no pvlist entries yet */
                   2619:                pmap_stats.ps_enter_firstpv++;
                   2620:                pv->pv_next = NULL;
                   2621:                pv->pv_pmap = pm;
                   2622:                pv->pv_va = va;
1.115     pk       2623:                pv->pv_flags |= nc ? 0 : PV_C4M;
                   2624:                return (ret);
1.55      pk       2625:        }
                   2626:        /*
                   2627:         * Before entering the new mapping, see if
                   2628:         * it will cause old mappings to become aliased
                   2629:         * and thus need to be `discached'.
                   2630:         */
                   2631:        pmap_stats.ps_enter_secondpv++;
1.115     pk       2632:        if ((pv->pv_flags & PV_ANC) != 0 || (pv->pv_flags & PV_C4M) == 0) {
1.55      pk       2633:                /* already uncached, just stay that way */
                   2634:                ret = SRMMU_PG_C;
                   2635:        } else {
                   2636:                for (npv = pv; npv != NULL; npv = npv->pv_next) {
1.115     pk       2637:                        if ((npv->pv_flags & PV_C4M) == 0) {
                   2638:                                ret = SRMMU_PG_C;
                   2639:                                break;
                   2640:                        }
1.55      pk       2641:                        if (BADALIAS(va, npv->pv_va)) {
                   2642: #ifdef DEBUG
1.84      pk       2643:                                if (pmapdebug & PDB_CACHESTUFF)
                   2644:                                        printf(
1.91      fair     2645:                        "pv_link: badalias: pid %d, 0x%lx<=>0x%lx, pa 0x%lx\n",
1.84      pk       2646:                                        curproc ? curproc->p_pid : -1,
                   2647:                                        va, npv->pv_va,
                   2648:                                        vm_first_phys + (pv-pv_table)*NBPG);
1.55      pk       2649: #endif
1.115     pk       2650:                                /* Mark list head `uncached due to aliases' */
                   2651:                                pv->pv_flags |= PV_ANC;
1.58      pk       2652:                                pv_changepte4m(pv, 0, ret = SRMMU_PG_C);
1.55      pk       2653:                                /* cache_flush_page(va); XXX: needed? */
                   2654:                                break;
                   2655:                        }
                   2656:                }
                   2657:        }
1.122     pk       2658:        npv = pool_get(&pv_pool, PR_WAITOK);
1.55      pk       2659:        npv->pv_next = pv->pv_next;
                   2660:        npv->pv_pmap = pm;
                   2661:        npv->pv_va = va;
1.115     pk       2662:        npv->pv_flags = nc ? 0 : PV_C4M;
1.55      pk       2663:        pv->pv_next = npv;
                   2664:        return (ret);
                   2665: }
                   2666: #endif
                   2667:
                   2668: /*
                   2669:  * Walk the given list and flush the cache for each (MI) page that is
                   2670:  * potentially in the cache. Called only if vactype != VAC_NONE.
                   2671:  */
                   2672: void
                   2673: pv_flushcache(pv)
1.124   ! pk       2674:        struct pvlist *pv;
1.55      pk       2675: {
1.124   ! pk       2676:        struct pmap *pm;
        !          2677:        int s, ctx;
1.55      pk       2678:
                   2679:        write_user_windows();   /* paranoia? */
                   2680:
                   2681:        s = splpmap();          /* XXX extreme paranoia */
                   2682:        if ((pm = pv->pv_pmap) != NULL) {
                   2683:                ctx = getcontext();
                   2684:                for (;;) {
                   2685:                        if (pm->pm_ctx) {
                   2686:                                setcontext(pm->pm_ctxnum);
                   2687:                                cache_flush_page(pv->pv_va);
                   2688:                        }
                   2689:                        pv = pv->pv_next;
                   2690:                        if (pv == NULL)
                   2691:                                break;
                   2692:                        pm = pv->pv_pmap;
                   2693:                }
                   2694:                setcontext(ctx);
                   2695:        }
                   2696:        splx(s);
                   2697: }
                   2698:
1.124   ! pk       2699: vsize_t
1.122     pk       2700: pv_table_map(base, mapit)
1.124   ! pk       2701:        paddr_t base;
1.122     pk       2702:        int mapit;
                   2703: {
                   2704:        int nmem;
                   2705:        struct memarr *mp;
1.124   ! pk       2706:        vsize_t s;
        !          2707:        vaddr_t sva, va, eva;
        !          2708:        paddr_t pa;
1.122     pk       2709:
                   2710:        /*
                   2711:         * Map pv_table[] as a `sparse' array. pv_table_map() is called
                   2712:         * twice: the first time `mapit' is 0, and the number of
                   2713:         * physical pages needed to map the used pieces of pv_table[]
                   2714:         * is computed;  the second time those pages are used to
                   2715:         * actually map pv_table[].
                   2716:         * In both cases, this function returns the amount of physical
                   2717:         * memory needed.
                   2718:         */
                   2719:
                   2720:        if (!mapit)
                   2721:                /* Mark physical pages for pv_table[] */
                   2722:                pv_physmem = base;
                   2723:
                   2724:        pa = pv_physmem; /* XXX - always init `pa' to appease gcc */
                   2725:
                   2726:        s = 0;
                   2727:        sva = eva = 0;
                   2728:        for (mp = pmemarr, nmem = npmemarr; --nmem >= 0; mp++) {
                   2729:                int len;
1.124   ! pk       2730:                paddr_t addr;
1.122     pk       2731:
                   2732:                len = mp->len;
                   2733:                if ((addr = mp->addr) < base) {
                   2734:                        /*
                   2735:                         * pv_table[] covers everything above `avail_start'.
                   2736:                         */
                   2737:                        addr = base;
                   2738:                        len -= base;
                   2739:                }
                   2740:
                   2741:                /* Calculate stretch of pv_table */
                   2742:                len = sizeof(struct pvlist) * btoc(len);
1.124   ! pk       2743:                va = (vaddr_t)&pv_table[btoc(addr - base)];
1.122     pk       2744:                sva = trunc_page(va);
                   2745:
                   2746:                if (sva < eva) {
                   2747:                        /* This chunk overlaps the previous in pv_table[] */
                   2748:                        sva += NBPG;
                   2749:                        if (sva < eva)
                   2750:                                panic("pv_table_map: sva(0x%lx)<eva(0x%lx)",
                   2751:                                      sva, eva);
                   2752:                }
                   2753:                eva = roundup(va + len, NBPG);
                   2754:
                   2755:                /* Add this range to the total */
                   2756:                s += eva - sva;
                   2757:
                   2758:                if (mapit) {
                   2759:                        /* Map this piece of pv_table[] */
                   2760:                        for (va = sva; va < eva; va += PAGE_SIZE) {
                   2761:                                pmap_enter(pmap_kernel(), va, pa,
                   2762:                                           VM_PROT_READ|VM_PROT_WRITE, 1);
                   2763:                                pa += PAGE_SIZE;
                   2764:                        }
                   2765:                        bzero((caddr_t)sva, eva - sva);
                   2766:                }
                   2767:        }
                   2768:        return (s);
                   2769: }
                   2770:
1.55      pk       2771: /*----------------------------------------------------------------*/
                   2772:
                   2773: /*
                   2774:  * At last, pmap code.
                   2775:  */
1.1       deraadt  2776:
1.99      fair     2777: #if defined(SUN4) && (defined(SUN4C) || defined(SUN4M))
1.18      deraadt  2778: int nptesg;
                   2779: #endif
                   2780:
1.55      pk       2781: #if defined(SUN4M)
                   2782: static void pmap_bootstrap4m __P((void));
                   2783: #endif
                   2784: #if defined(SUN4) || defined(SUN4C)
                   2785: static void pmap_bootstrap4_4c __P((int, int, int));
                   2786: #endif
                   2787:
1.1       deraadt  2788: /*
                   2789:  * Bootstrap the system enough to run with VM enabled.
                   2790:  *
1.43      pk       2791:  * nsegment is the number of mmu segment entries (``PMEGs'');
                   2792:  * nregion is the number of mmu region entries (``SMEGs'');
1.1       deraadt  2793:  * nctx is the number of contexts.
                   2794:  */
                   2795: void
1.43      pk       2796: pmap_bootstrap(nctx, nregion, nsegment)
                   2797:        int nsegment, nctx, nregion;
1.1       deraadt  2798: {
1.55      pk       2799:
1.110     mrg      2800: #if defined(UVM)
                   2801:        uvmexp.pagesize = NBPG;
                   2802:        uvm_setpagesize();
                   2803: #else
1.55      pk       2804:        cnt.v_page_size = NBPG;
                   2805:        vm_set_page_size();
1.110     mrg      2806: #endif
1.55      pk       2807:
                   2808: #if defined(SUN4) && (defined(SUN4C) || defined(SUN4M))
                   2809:        /* In this case NPTESG is not a #define */
                   2810:        nptesg = (NBPSG >> pgshift);
                   2811: #endif
                   2812:
1.69      pk       2813: #if 0
1.55      pk       2814:        ncontext = nctx;
1.69      pk       2815: #endif
1.55      pk       2816:
                   2817: #if defined(SUN4M)
                   2818:        if (CPU_ISSUN4M) {
                   2819:                pmap_bootstrap4m();
                   2820:                return;
                   2821:        }
                   2822: #endif
                   2823: #if defined(SUN4) || defined(SUN4C)
                   2824:        if (CPU_ISSUN4OR4C) {
                   2825:                pmap_bootstrap4_4c(nctx, nregion, nsegment);
                   2826:                return;
                   2827:        }
                   2828: #endif
                   2829: }
                   2830:
                   2831: #if defined(SUN4) || defined(SUN4C)
                   2832: void
                   2833: pmap_bootstrap4_4c(nctx, nregion, nsegment)
                   2834:        int nsegment, nctx, nregion;
                   2835: {
1.122     pk       2836:        union ctxinfo *ci;
                   2837:        struct mmuentry *mmuseg;
1.77      pk       2838: #if defined(SUN4_MMU3L)
1.122     pk       2839:        struct mmuentry *mmureg;
1.53      christos 2840: #endif
1.43      pk       2841:        struct   regmap *rp;
1.122     pk       2842:        int i, j;
                   2843:        int npte, zseg, vr, vs;
                   2844:        int rcookie, scookie;
                   2845:        caddr_t p;
                   2846:        void (*rom_setmap)(int ctx, caddr_t va, int pmeg);
1.1       deraadt  2847:        int lastpage;
                   2848:        extern char end[];
1.7       pk       2849: #ifdef DDB
                   2850:        extern char *esym;
                   2851: #endif
1.1       deraadt  2852:
1.45      pk       2853:        switch (cputyp) {
                   2854:        case CPU_SUN4C:
                   2855:                mmu_has_hole = 1;
                   2856:                break;
                   2857:        case CPU_SUN4:
1.69      pk       2858:                if (cpuinfo.cpu_type != CPUTYP_4_400) {
1.45      pk       2859:                        mmu_has_hole = 1;
                   2860:                        break;
                   2861:                }
                   2862:        }
                   2863:
1.110     mrg      2864: #if defined(UVM)
                   2865:        uvmexp.pagesize = NBPG;
                   2866:        uvm_setpagesize();
                   2867: #else
1.19      deraadt  2868:        cnt.v_page_size = NBPG;
                   2869:        vm_set_page_size();
1.110     mrg      2870: #endif
1.19      deraadt  2871:
1.31      pk       2872: #if defined(SUN4)
                   2873:        /*
                   2874:         * set up the segfixmask to mask off invalid bits
                   2875:         */
1.43      pk       2876:        segfixmask =  nsegment - 1; /* assume nsegment is a power of 2 */
                   2877: #ifdef DIAGNOSTIC
                   2878:        if (((nsegment & segfixmask) | (nsegment & ~segfixmask)) != nsegment) {
1.66      christos 2879:                printf("pmap_bootstrap: unsuitable number of segments (%d)\n",
1.43      pk       2880:                        nsegment);
                   2881:                callrom();
                   2882:        }
                   2883: #endif
1.31      pk       2884: #endif
                   2885:
1.55      pk       2886: #if defined(SUN4M) /* We're in a dual-arch kernel. Setup 4/4c fn. ptrs */
                   2887:        pmap_clear_modify_p     =       pmap_clear_modify4_4c;
                   2888:        pmap_clear_reference_p  =       pmap_clear_reference4_4c;
                   2889:        pmap_copy_page_p        =       pmap_copy_page4_4c;
                   2890:        pmap_enter_p            =       pmap_enter4_4c;
                   2891:        pmap_extract_p          =       pmap_extract4_4c;
                   2892:        pmap_is_modified_p      =       pmap_is_modified4_4c;
                   2893:        pmap_is_referenced_p    =       pmap_is_referenced4_4c;
                   2894:        pmap_page_protect_p     =       pmap_page_protect4_4c;
                   2895:        pmap_protect_p          =       pmap_protect4_4c;
                   2896:        pmap_zero_page_p        =       pmap_zero_page4_4c;
                   2897:        pmap_changeprot_p       =       pmap_changeprot4_4c;
                   2898:        pmap_rmk_p              =       pmap_rmk4_4c;
                   2899:        pmap_rmu_p              =       pmap_rmu4_4c;
                   2900: #endif /* defined SUN4M */
1.43      pk       2901:
1.1       deraadt  2902:        /*
                   2903:         * Last segment is the `invalid' one (one PMEG of pte's with !pg_v).
                   2904:         * It will never be used for anything else.
                   2905:         */
1.43      pk       2906:        seginval = --nsegment;
                   2907:
1.69      pk       2908: #if defined(SUN4_MMU3L)
                   2909:        if (HASSUN4_MMU3L)
1.43      pk       2910:                reginval = --nregion;
                   2911: #endif
                   2912:
                   2913:        /*
                   2914:         * Intialize the kernel pmap.
                   2915:         */
                   2916:        /* kernel_pmap_store.pm_ctxnum = 0; */
1.111     chs      2917:        simple_lock_init(&kernel_pmap_store.pm_lock);
1.43      pk       2918:        kernel_pmap_store.pm_refcount = 1;
1.69      pk       2919: #if defined(SUN4_MMU3L)
1.43      pk       2920:        TAILQ_INIT(&kernel_pmap_store.pm_reglist);
                   2921: #endif
                   2922:        TAILQ_INIT(&kernel_pmap_store.pm_seglist);
                   2923:
                   2924:        kernel_pmap_store.pm_regmap = &kernel_regmap_store[-NUREG];
                   2925:        for (i = NKREG; --i >= 0;) {
1.69      pk       2926: #if defined(SUN4_MMU3L)
1.43      pk       2927:                kernel_regmap_store[i].rg_smeg = reginval;
                   2928: #endif
                   2929:                kernel_regmap_store[i].rg_segmap =
                   2930:                        &kernel_segmap_store[i * NSEGRG];
                   2931:                for (j = NSEGRG; --j >= 0;)
                   2932:                        kernel_segmap_store[i * NSEGRG + j].sg_pmeg = seginval;
                   2933:        }
1.1       deraadt  2934:
                   2935:        /*
                   2936:         * Preserve the monitor ROM's reserved VM region, so that
                   2937:         * we can use L1-A or the monitor's debugger.  As a side
                   2938:         * effect we map the ROM's reserved VM into all contexts
                   2939:         * (otherwise L1-A crashes the machine!).
                   2940:         */
1.43      pk       2941:
1.58      pk       2942:        mmu_reservemon4_4c(&nregion, &nsegment);
1.43      pk       2943:
1.69      pk       2944: #if defined(SUN4_MMU3L)
1.43      pk       2945:        /* Reserve one region for temporary mappings */
                   2946:        tregion = --nregion;
                   2947: #endif
1.1       deraadt  2948:
                   2949:        /*
1.43      pk       2950:         * Allocate and clear mmu entries and context structures.
1.1       deraadt  2951:         */
                   2952:        p = end;
1.7       pk       2953: #ifdef DDB
                   2954:        if (esym != 0)
1.78      pk       2955:                p = esym;
1.7       pk       2956: #endif
1.69      pk       2957: #if defined(SUN4_MMU3L)
1.43      pk       2958:        mmuregions = mmureg = (struct mmuentry *)p;
                   2959:        p += nregion * sizeof(struct mmuentry);
1.78      pk       2960:        bzero(mmuregions, nregion * sizeof(struct mmuentry));
1.43      pk       2961: #endif
                   2962:        mmusegments = mmuseg = (struct mmuentry *)p;
                   2963:        p += nsegment * sizeof(struct mmuentry);
1.78      pk       2964:        bzero(mmusegments, nsegment * sizeof(struct mmuentry));
                   2965:
1.69      pk       2966:        pmap_kernel()->pm_ctx = cpuinfo.ctxinfo = ci = (union ctxinfo *)p;
1.1       deraadt  2967:        p += nctx * sizeof *ci;
                   2968:
1.43      pk       2969:        /* Initialize MMU resource queues */
1.69      pk       2970: #if defined(SUN4_MMU3L)
1.43      pk       2971:        TAILQ_INIT(&region_freelist);
                   2972:        TAILQ_INIT(&region_lru);
                   2973:        TAILQ_INIT(&region_locked);
                   2974: #endif
                   2975:        TAILQ_INIT(&segm_freelist);
                   2976:        TAILQ_INIT(&segm_lru);
                   2977:        TAILQ_INIT(&segm_locked);
                   2978:
1.1       deraadt  2979:        /*
                   2980:         * Set up the `constants' for the call to vm_init()
                   2981:         * in main().  All pages beginning at p (rounded up to
                   2982:         * the next whole page) and continuing through the number
                   2983:         * of available pages are free, but they start at a higher
                   2984:         * virtual address.  This gives us two mappable MD pages
                   2985:         * for pmap_zero_page and pmap_copy_page, and one MI page
                   2986:         * for /dev/mem, all with no associated physical memory.
                   2987:         */
                   2988:        p = (caddr_t)(((u_int)p + NBPG - 1) & ~PGOFSET);
1.36      pk       2989:
                   2990:        /*
1.122     pk       2991:         * Grab physical memory list.
1.36      pk       2992:         */
1.122     pk       2993:        get_phys_mem();
                   2994:
                   2995:        /* Allocate physical memory for pv_table[] */
1.124   ! pk       2996:        p += pv_table_map((paddr_t)p - KERNBASE, 0);
        !          2997:        avail_start = (paddr_t)p - KERNBASE;
1.38      pk       2998:
                   2999:        i = (int)p;
                   3000:        vpage[0] = p, p += NBPG;
                   3001:        vpage[1] = p, p += NBPG;
1.41      mycroft  3002:        vmmap = p, p += NBPG;
1.38      pk       3003:        p = reserve_dumppages(p);
1.39      pk       3004:
1.122     pk       3005:        /* Allocate virtual memory for pv_table[]. */
1.37      pk       3006:        pv_table = (struct pvlist *)p;
                   3007:        p += round_page(sizeof(struct pvlist) * atop(avail_end - avail_start));
1.36      pk       3008:
1.124   ! pk       3009:        virtual_avail = (vaddr_t)p;
1.1       deraadt  3010:        virtual_end = VM_MAX_KERNEL_ADDRESS;
                   3011:
                   3012:        p = (caddr_t)i;                 /* retract to first free phys */
                   3013:
                   3014:        /*
                   3015:         * All contexts are free except the kernel's.
                   3016:         *
                   3017:         * XXX sun4c could use context 0 for users?
                   3018:         */
1.42      mycroft  3019:        ci->c_pmap = pmap_kernel();
1.1       deraadt  3020:        ctx_freelist = ci + 1;
                   3021:        for (i = 1; i < ncontext; i++) {
                   3022:                ci++;
                   3023:                ci->c_nextfree = ci + 1;
                   3024:        }
                   3025:        ci->c_nextfree = NULL;
                   3026:        ctx_kick = 0;
                   3027:        ctx_kickdir = -1;
                   3028:
                   3029:        /*
                   3030:         * Init mmu entries that map the kernel physical addresses.
                   3031:         *
                   3032:         * All the other MMU entries are free.
                   3033:         *
                   3034:         * THIS ASSUMES SEGMENT i IS MAPPED BY MMU ENTRY i DURING THE
                   3035:         * BOOT PROCESS
                   3036:         */
1.43      pk       3037:
                   3038:        rom_setmap = promvec->pv_setctxt;
                   3039:        zseg = ((((u_int)p + NBPSG - 1) & ~SGOFSET) - KERNBASE) >> SGSHIFT;
1.1       deraadt  3040:        lastpage = VA_VPG(p);
                   3041:        if (lastpage == 0)
1.43      pk       3042:                /*
                   3043:                 * If the page bits in p are 0, we filled the last segment
                   3044:                 * exactly (now how did that happen?); if not, it is
                   3045:                 * the last page filled in the last segment.
                   3046:                 */
1.1       deraadt  3047:                lastpage = NPTESG;
1.43      pk       3048:
1.1       deraadt  3049:        p = (caddr_t)KERNBASE;          /* first va */
                   3050:        vs = VA_VSEG(KERNBASE);         /* first virtual segment */
1.43      pk       3051:        vr = VA_VREG(KERNBASE);         /* first virtual region */
                   3052:        rp = &pmap_kernel()->pm_regmap[vr];
                   3053:
                   3054:        for (rcookie = 0, scookie = 0;;) {
                   3055:
1.1       deraadt  3056:                /*
1.43      pk       3057:                 * Distribute each kernel region/segment into all contexts.
1.1       deraadt  3058:                 * This is done through the monitor ROM, rather than
                   3059:                 * directly here: if we do a setcontext we will fault,
                   3060:                 * as we are not (yet) mapped in any other context.
                   3061:                 */
1.43      pk       3062:
                   3063:                if ((vs % NSEGRG) == 0) {
                   3064:                        /* Entering a new region */
                   3065:                        if (VA_VREG(p) > vr) {
                   3066: #ifdef DEBUG
1.66      christos 3067:                                printf("note: giant kernel!\n");
1.43      pk       3068: #endif
                   3069:                                vr++, rp++;
                   3070:                        }
1.69      pk       3071: #if defined(SUN4_MMU3L)
                   3072:                        if (HASSUN4_MMU3L) {
1.43      pk       3073:                                for (i = 1; i < nctx; i++)
                   3074:                                        rom_setmap(i, p, rcookie);
                   3075:
                   3076:                                TAILQ_INSERT_TAIL(&region_locked,
                   3077:                                                  mmureg, me_list);
                   3078:                                TAILQ_INSERT_TAIL(&pmap_kernel()->pm_reglist,
                   3079:                                                  mmureg, me_pmchain);
                   3080:                                mmureg->me_cookie = rcookie;
                   3081:                                mmureg->me_pmap = pmap_kernel();
                   3082:                                mmureg->me_vreg = vr;
                   3083:                                rp->rg_smeg = rcookie;
                   3084:                                mmureg++;
                   3085:                                rcookie++;
                   3086:                        }
                   3087: #endif
                   3088:                }
                   3089:
1.69      pk       3090: #if defined(SUN4_MMU3L)
                   3091:                if (!HASSUN4_MMU3L)
1.43      pk       3092: #endif
                   3093:                        for (i = 1; i < nctx; i++)
                   3094:                                rom_setmap(i, p, scookie);
1.1       deraadt  3095:
                   3096:                /* set up the mmu entry */
1.43      pk       3097:                TAILQ_INSERT_TAIL(&segm_locked, mmuseg, me_list);
                   3098:                TAILQ_INSERT_TAIL(&pmap_kernel()->pm_seglist, mmuseg, me_pmchain);
1.70      pk       3099:                pmap_stats.ps_npmeg_locked++;
1.43      pk       3100:                mmuseg->me_cookie = scookie;
                   3101:                mmuseg->me_pmap = pmap_kernel();
                   3102:                mmuseg->me_vreg = vr;
                   3103:                mmuseg->me_vseg = vs % NSEGRG;
                   3104:                rp->rg_segmap[vs % NSEGRG].sg_pmeg = scookie;
                   3105:                npte = ++scookie < zseg ? NPTESG : lastpage;
                   3106:                rp->rg_segmap[vs % NSEGRG].sg_npte = npte;
                   3107:                rp->rg_nsegmap += 1;
                   3108:                mmuseg++;
1.1       deraadt  3109:                vs++;
1.43      pk       3110:                if (scookie < zseg) {
1.1       deraadt  3111:                        p += NBPSG;
                   3112:                        continue;
                   3113:                }
1.43      pk       3114:
1.1       deraadt  3115:                /*
                   3116:                 * Unmap the pages, if any, that are not part of
                   3117:                 * the final segment.
                   3118:                 */
1.43      pk       3119:                for (p += npte << PGSHIFT; npte < NPTESG; npte++, p += NBPG)
1.55      pk       3120:                        setpte4(p, 0);
1.43      pk       3121:
1.69      pk       3122: #if defined(SUN4_MMU3L)
                   3123:                if (HASSUN4_MMU3L) {
1.43      pk       3124:                        /*
                   3125:                         * Unmap the segments, if any, that are not part of
                   3126:                         * the final region.
                   3127:                         */
                   3128:                        for (i = rp->rg_nsegmap; i < NSEGRG; i++, p += NBPSG)
                   3129:                                setsegmap(p, seginval);
                   3130:                }
                   3131: #endif
1.1       deraadt  3132:                break;
                   3133:        }
1.43      pk       3134:
1.69      pk       3135: #if defined(SUN4_MMU3L)
                   3136:        if (HASSUN4_MMU3L)
1.43      pk       3137:                for (; rcookie < nregion; rcookie++, mmureg++) {
                   3138:                        mmureg->me_cookie = rcookie;
                   3139:                        TAILQ_INSERT_TAIL(&region_freelist, mmureg, me_list);
                   3140:                }
                   3141: #endif
                   3142:
                   3143:        for (; scookie < nsegment; scookie++, mmuseg++) {
                   3144:                mmuseg->me_cookie = scookie;
                   3145:                TAILQ_INSERT_TAIL(&segm_freelist, mmuseg, me_list);
1.70      pk       3146:                pmap_stats.ps_npmeg_free++;
1.1       deraadt  3147:        }
                   3148:
1.13      pk       3149:        /* Erase all spurious user-space segmaps */
                   3150:        for (i = 1; i < ncontext; i++) {
1.71      pk       3151:                setcontext4(i);
1.69      pk       3152:                if (HASSUN4_MMU3L)
1.43      pk       3153:                        for (p = 0, j = NUREG; --j >= 0; p += NBPRG)
                   3154:                                setregmap(p, reginval);
                   3155:                else
                   3156:                        for (p = 0, vr = 0; vr < NUREG; vr++) {
1.45      pk       3157:                                if (VA_INHOLE(p)) {
                   3158:                                        p = (caddr_t)MMU_HOLE_END;
                   3159:                                        vr = VA_VREG(p);
1.43      pk       3160:                                }
                   3161:                                for (j = NSEGRG; --j >= 0; p += NBPSG)
                   3162:                                        setsegmap(p, seginval);
                   3163:                        }
1.13      pk       3164:        }
1.71      pk       3165:        setcontext4(0);
1.13      pk       3166:
1.1       deraadt  3167:        /*
                   3168:         * write protect & encache kernel text;
                   3169:         * set red zone at kernel base; enable cache on message buffer.
                   3170:         */
                   3171:        {
1.23      deraadt  3172:                extern char etext[];
1.1       deraadt  3173: #ifdef KGDB
1.124   ! pk       3174:                int mask = ~PG_NC;      /* XXX chgkprot is busted */
1.1       deraadt  3175: #else
1.124   ! pk       3176:                int mask = ~(PG_W | PG_NC);
1.1       deraadt  3177: #endif
1.2       deraadt  3178:
1.23      deraadt  3179:                for (p = (caddr_t)trapbase; p < etext; p += NBPG)
1.55      pk       3180:                        setpte4(p, getpte4(p) & mask);
1.1       deraadt  3181:        }
1.105     mrg      3182: #if defined(MACHINE_NEW_NONCONTIG)
1.107     pk       3183:        pmap_page_upload();
1.105     mrg      3184: #endif
1.1       deraadt  3185: }
1.55      pk       3186: #endif
1.1       deraadt  3187:
1.55      pk       3188: #if defined(SUN4M)             /* Sun4M version of pmap_bootstrap */
                   3189: /*
                   3190:  * Bootstrap the system enough to run with VM enabled on a Sun4M machine.
                   3191:  *
                   3192:  * Switches from ROM to kernel page tables, and sets up initial mappings.
                   3193:  */
                   3194: static void
                   3195: pmap_bootstrap4m(void)
1.36      pk       3196: {
1.124   ! pk       3197:        int i, j;
1.122     pk       3198:        caddr_t p, q;
                   3199:        union ctxinfo *ci;
                   3200:        int reg, seg;
1.71      pk       3201:        unsigned int ctxtblsize;
1.79      pk       3202:        caddr_t pagetables_start, pagetables_end;
1.55      pk       3203:        extern char end[];
                   3204:        extern char etext[];
1.78      pk       3205:        extern caddr_t reserve_dumppages(caddr_t);
1.55      pk       3206: #ifdef DDB
                   3207:        extern char *esym;
                   3208: #endif
1.36      pk       3209:
1.55      pk       3210: #if defined(SUN4) || defined(SUN4C) /* setup 4M fn. ptrs for dual-arch kernel */
                   3211:        pmap_clear_modify_p     =       pmap_clear_modify4m;
                   3212:        pmap_clear_reference_p  =       pmap_clear_reference4m;
                   3213:        pmap_copy_page_p        =       pmap_copy_page4m;
                   3214:        pmap_enter_p            =       pmap_enter4m;
                   3215:        pmap_extract_p          =       pmap_extract4m;
                   3216:        pmap_is_modified_p      =       pmap_is_modified4m;
                   3217:        pmap_is_referenced_p    =       pmap_is_referenced4m;
                   3218:        pmap_page_protect_p     =       pmap_page_protect4m;
                   3219:        pmap_protect_p          =       pmap_protect4m;
                   3220:        pmap_zero_page_p        =       pmap_zero_page4m;
                   3221:        pmap_changeprot_p       =       pmap_changeprot4m;
                   3222:        pmap_rmk_p              =       pmap_rmk4m;
                   3223:        pmap_rmu_p              =       pmap_rmu4m;
                   3224: #endif /* defined Sun4/Sun4c */
1.37      pk       3225:
1.36      pk       3226:        /*
1.55      pk       3227:         * Intialize the kernel pmap.
                   3228:         */
                   3229:        /* kernel_pmap_store.pm_ctxnum = 0; */
1.87      pk       3230:        simple_lock_init(&kernel_pmap_store.pm_lock);
1.55      pk       3231:        kernel_pmap_store.pm_refcount = 1;
1.71      pk       3232:
                   3233:        /*
                   3234:         * Set up pm_regmap for kernel to point NUREG *below* the beginning
1.55      pk       3235:         * of kernel regmap storage. Since the kernel only uses regions
                   3236:         * above NUREG, we save storage space and can index kernel and
                   3237:         * user regions in the same way
1.36      pk       3238:         */
1.55      pk       3239:        kernel_pmap_store.pm_regmap = &kernel_regmap_store[-NUREG];
                   3240:        kernel_pmap_store.pm_reg_ptps = NULL;
                   3241:        kernel_pmap_store.pm_reg_ptps_pa = 0;
                   3242:        bzero(kernel_regmap_store, NKREG * sizeof(struct regmap));
                   3243:        bzero(kernel_segmap_store, NKREG * NSEGRG * sizeof(struct segmap));
                   3244:        for (i = NKREG; --i >= 0;) {
                   3245:                kernel_regmap_store[i].rg_segmap =
                   3246:                        &kernel_segmap_store[i * NSEGRG];
                   3247:                kernel_regmap_store[i].rg_seg_ptps = NULL;
                   3248:                for (j = NSEGRG; --j >= 0;)
                   3249:                        kernel_segmap_store[i * NSEGRG + j].sg_pte = NULL;
                   3250:        }
1.38      pk       3251:
1.55      pk       3252:        p = end;                /* p points to top of kernel mem */
                   3253: #ifdef DDB
                   3254:        if (esym != 0)
1.78      pk       3255:                p = esym;
1.55      pk       3256: #endif
                   3257:
1.77      pk       3258:
1.71      pk       3259:        /* Allocate context administration */
1.69      pk       3260:        pmap_kernel()->pm_ctx = cpuinfo.ctxinfo = ci = (union ctxinfo *)p;
1.55      pk       3261:        p += ncontext * sizeof *ci;
1.69      pk       3262:        bzero((caddr_t)ci, (u_int)p - (u_int)ci);
1.77      pk       3263: #if 0
1.55      pk       3264:        ctxbusyvector = p;
                   3265:        p += ncontext;
                   3266:        bzero(ctxbusyvector, ncontext);
                   3267:        ctxbusyvector[0] = 1;   /* context 0 is always in use */
1.69      pk       3268: #endif
1.55      pk       3269:
1.77      pk       3270:
                   3271:        /*
                   3272:         * Set up the `constants' for the call to vm_init()
                   3273:         * in main().  All pages beginning at p (rounded up to
                   3274:         * the next whole page) and continuing through the number
                   3275:         * of available pages are free.
                   3276:         */
                   3277:        p = (caddr_t)(((u_int)p + NBPG - 1) & ~PGOFSET);
1.122     pk       3278:
1.77      pk       3279:        /*
1.122     pk       3280:         * Grab physical memory list.
1.77      pk       3281:         */
1.122     pk       3282:        get_phys_mem();
                   3283:
                   3284:        /* Allocate physical memory for pv_table[] */
1.124   ! pk       3285:        p += pv_table_map((paddr_t)p - KERNBASE, 0);
        !          3286:        avail_start = (paddr_t)p - KERNBASE;
1.77      pk       3287:
                   3288:        /*
                   3289:         * Reserve memory for MMU pagetables. Some of these have severe
                   3290:         * alignment restrictions. We allocate in a sequence that
                   3291:         * minimizes alignment gaps.
                   3292:         * The amount of physical memory that becomes unavailable for
1.108     pk       3293:         * general VM use is marked by [unavail_gap_start, unavail_gap_end>.
1.77      pk       3294:         */
                   3295:
1.55      pk       3296:        /*
1.71      pk       3297:         * Reserve memory for I/O pagetables. This takes 64k of memory
1.55      pk       3298:         * since we want to have 64M of dvma space (this actually depends
1.77      pk       3299:         * on the definition of DVMA4M_BASE...we may drop it back to 32M).
                   3300:         * The table must be aligned on a (-DVMA4M_BASE/NBPG) boundary
                   3301:         * (i.e. 64K for 64M of dvma space).
1.55      pk       3302:         */
                   3303: #ifdef DEBUG
                   3304:        if ((0 - DVMA4M_BASE) % (16*1024*1024))
1.71      pk       3305:            panic("pmap_bootstrap4m: invalid DVMA4M_BASE of 0x%x", DVMA4M_BASE);
1.55      pk       3306: #endif
                   3307:
1.77      pk       3308:        p = (caddr_t) roundup((u_int)p, (0 - DVMA4M_BASE) / 1024);
1.124   ! pk       3309:        unavail_gap_start = (paddr_t)p - KERNBASE;
1.55      pk       3310:
                   3311:        kernel_iopte_table = (u_int *)p;
                   3312:        kernel_iopte_table_pa = VA2PA((caddr_t)kernel_iopte_table);
                   3313:        p += (0 - DVMA4M_BASE) / 1024;
                   3314:
1.79      pk       3315:        pagetables_start = p;
1.55      pk       3316:        /*
1.77      pk       3317:         * Allocate context table.
1.71      pk       3318:         * To keep supersparc happy, minimum aligment is on a 4K boundary.
                   3319:         */
                   3320:        ctxtblsize = max(ncontext,1024) * sizeof(int);
                   3321:        cpuinfo.ctx_tbl = (int *)roundup((u_int)p, ctxtblsize);
                   3322:        p = (caddr_t)((u_int)cpuinfo.ctx_tbl + ctxtblsize);
                   3323:
                   3324:        /*
                   3325:         * Reserve memory for segment and page tables needed to map the entire
1.96      pk       3326:         * kernel. This takes (2k + NKREG * 16k) of space, but
1.55      pk       3327:         * unfortunately is necessary since pmap_enk *must* be able to enter
                   3328:         * a kernel mapping without resorting to malloc, or else the
                   3329:         * possibility of deadlock arises (pmap_enk4m is called to enter a
                   3330:         * mapping; it needs to malloc a page table; malloc then calls
                   3331:         * pmap_enk4m to enter the new malloc'd page; pmap_enk4m needs to
                   3332:         * malloc a page table to enter _that_ mapping; malloc deadlocks since
                   3333:         * it is already allocating that object).
                   3334:         */
1.122     pk       3335:        p = (caddr_t) roundup((u_int)p, SRMMU_L1SIZE * sizeof(u_int));
                   3336:        qzero(p, SRMMU_L1SIZE * sizeof(u_int));
1.77      pk       3337:        kernel_regtable_store = (u_int *)p;
1.122     pk       3338:        p += SRMMU_L1SIZE * sizeof(u_int);
1.77      pk       3339:
1.122     pk       3340:        p = (caddr_t) roundup((u_int)p, SRMMU_L2SIZE * sizeof(u_int));
                   3341:        qzero(p, (SRMMU_L2SIZE * sizeof(u_int)) * NKREG);
1.77      pk       3342:        kernel_segtable_store = (u_int *)p;
1.122     pk       3343:        p += (SRMMU_L2SIZE * sizeof(u_int)) * NKREG;
1.77      pk       3344:
1.122     pk       3345:        p = (caddr_t) roundup((u_int)p, SRMMU_L3SIZE * sizeof(u_int));
                   3346:        /* zero it: all will be SRMMU_TEINVALID */
                   3347:        qzero(p, ((SRMMU_L3SIZE * sizeof(u_int)) * NKREG) * NSEGRG);
1.77      pk       3348:        kernel_pagtable_store = (u_int *)p;
1.122     pk       3349:        p += ((SRMMU_L3SIZE * sizeof(u_int)) * NKREG) * NSEGRG;
1.77      pk       3350:
                   3351:        /* Round to next page and mark end of stolen pages */
                   3352:        p = (caddr_t)(((u_int)p + NBPG - 1) & ~PGOFSET);
1.79      pk       3353:        pagetables_end = p;
1.124   ! pk       3354:        unavail_gap_end = (paddr_t)p - KERNBASE;
1.71      pk       3355:
                   3356:        /*
                   3357:         * Since we've statically allocated space to map the entire kernel,
                   3358:         * we might as well pre-wire the mappings to save time in pmap_enter.
                   3359:         * This also gets around nasty problems with caching of L1/L2 ptp's.
                   3360:         *
                   3361:         * XXX WHY DO WE HAVE THIS CACHING PROBLEM WITH L1/L2 PTPS????? %%%
                   3362:         */
                   3363:
                   3364:        pmap_kernel()->pm_reg_ptps = (int *) kernel_regtable_store;
                   3365:        pmap_kernel()->pm_reg_ptps_pa =
                   3366:                VA2PA((caddr_t)pmap_kernel()->pm_reg_ptps);
                   3367:
                   3368:        /* Install L1 table in context 0 */
1.79      pk       3369:        setpgt4m(&cpuinfo.ctx_tbl[0],
                   3370:            (pmap_kernel()->pm_reg_ptps_pa >> SRMMU_PPNPASHIFT) | SRMMU_TEPTD);
1.71      pk       3371:
                   3372:        /* XXX:rethink - Store pointer to region table address */
                   3373:        cpuinfo.L1_ptps = pmap_kernel()->pm_reg_ptps;
1.55      pk       3374:
1.96      pk       3375:        for (reg = 0; reg < NKREG; reg++) {
1.77      pk       3376:                struct regmap *rp;
1.71      pk       3377:                caddr_t kphyssegtbl;
                   3378:
                   3379:                /*
1.77      pk       3380:                 * Entering new region; install & build segtbl
1.71      pk       3381:                 */
                   3382:
1.96      pk       3383:                rp = &pmap_kernel()->pm_regmap[reg + VA_VREG(KERNBASE)];
1.71      pk       3384:
                   3385:                kphyssegtbl = (caddr_t)
1.96      pk       3386:                    &kernel_segtable_store[reg * SRMMU_L2SIZE];
1.71      pk       3387:
1.96      pk       3388:                setpgt4m(&pmap_kernel()->pm_reg_ptps[reg + VA_VREG(KERNBASE)],
1.77      pk       3389:                    (VA2PA(kphyssegtbl) >> SRMMU_PPNPASHIFT) | SRMMU_TEPTD);
1.71      pk       3390:
                   3391:                rp->rg_seg_ptps = (int *)kphyssegtbl;
                   3392:
                   3393:                if (rp->rg_segmap == NULL) {
                   3394:                        printf("rp->rg_segmap == NULL!\n");
1.96      pk       3395:                        rp->rg_segmap = &kernel_segmap_store[reg * NSEGRG];
1.71      pk       3396:                }
                   3397:
                   3398:                for (seg = 0; seg < NSEGRG; seg++) {
1.77      pk       3399:                        struct segmap *sp;
1.71      pk       3400:                        caddr_t kphyspagtbl;
                   3401:
                   3402:                        rp->rg_nsegmap++;
                   3403:
                   3404:                        sp = &rp->rg_segmap[seg];
                   3405:                        kphyspagtbl = (caddr_t)
                   3406:                            &kernel_pagtable_store
1.96      pk       3407:                                [((reg * NSEGRG) + seg) * SRMMU_L3SIZE];
1.71      pk       3408:
1.77      pk       3409:                        setpgt4m(&rp->rg_seg_ptps[seg],
                   3410:                                 (VA2PA(kphyspagtbl) >> SRMMU_PPNPASHIFT) |
                   3411:                                 SRMMU_TEPTD);
1.71      pk       3412:                        sp->sg_pte = (int *) kphyspagtbl;
                   3413:                }
                   3414:        }
                   3415:
                   3416:        /*
                   3417:         * Preserve the monitor ROM's reserved VM region, so that
                   3418:         * we can use L1-A or the monitor's debugger.
1.55      pk       3419:         */
1.77      pk       3420:        mmu_reservemon4m(&kernel_pmap_store);
1.55      pk       3421:
                   3422:        /*
1.77      pk       3423:         * Reserve virtual address space for two mappable MD pages
                   3424:         * for pmap_zero_page and pmap_copy_page, one MI page
                   3425:         * for /dev/mem, and some more for dumpsys().
1.55      pk       3426:         */
1.77      pk       3427:        q = p;
1.55      pk       3428:        vpage[0] = p, p += NBPG;
                   3429:        vpage[1] = p, p += NBPG;
                   3430:        vmmap = p, p += NBPG;
                   3431:        p = reserve_dumppages(p);
                   3432:
1.101     pk       3433:        /* Find PTE locations of vpage[] to optimize zero_fill() et.al. */
                   3434:        for (i = 0; i < 2; i++) {
                   3435:                struct regmap *rp;
                   3436:                struct segmap *sp;
                   3437:                rp = &pmap_kernel()->pm_regmap[VA_VREG(vpage[i])];
                   3438:                sp = &rp->rg_segmap[VA_VSEG(vpage[i])];
                   3439:                vpage_pte[i] = &sp->sg_pte[VA_SUN4M_VPG(vpage[i])];
                   3440:        }
                   3441:
1.122     pk       3442:        /* Allocate virtual memory for pv_table[]. */
1.55      pk       3443:        pv_table = (struct pvlist *)p;
                   3444:        p += round_page(sizeof(struct pvlist) * atop(avail_end - avail_start));
                   3445:
1.124   ! pk       3446:        virtual_avail = (vaddr_t)p;
1.55      pk       3447:        virtual_end = VM_MAX_KERNEL_ADDRESS;
                   3448:
1.77      pk       3449:        p = q;                  /* retract to first free phys */
1.55      pk       3450:
1.69      pk       3451:        /*
                   3452:         * Set up the ctxinfo structures (freelist of contexts)
1.55      pk       3453:         */
                   3454:        ci->c_pmap = pmap_kernel();
                   3455:        ctx_freelist = ci + 1;
                   3456:        for (i = 1; i < ncontext; i++) {
                   3457:                ci++;
                   3458:                ci->c_nextfree = ci + 1;
                   3459:        }
                   3460:        ci->c_nextfree = NULL;
                   3461:        ctx_kick = 0;
                   3462:        ctx_kickdir = -1;
                   3463:
1.69      pk       3464:        /*
                   3465:         * Now map the kernel into our new set of page tables, then
1.55      pk       3466:         * (finally) switch over to our running page tables.
                   3467:         * We map from KERNBASE to p into context 0's page tables (and
                   3468:         * the kernel pmap).
                   3469:         */
                   3470: #ifdef DEBUG                   /* Sanity checks */
                   3471:        if ((u_int)p % NBPG != 0)
1.69      pk       3472:                panic("pmap_bootstrap4m: p misaligned?!?");
1.55      pk       3473:        if (KERNBASE % NBPRG != 0)
1.69      pk       3474:                panic("pmap_bootstrap4m: KERNBASE not region-aligned");
1.55      pk       3475: #endif
1.69      pk       3476:
                   3477:        for (q = (caddr_t) KERNBASE; q < p; q += NBPG) {
1.77      pk       3478:                struct regmap *rp;
                   3479:                struct segmap *sp;
                   3480:                int pte;
                   3481:
1.79      pk       3482:                if ((int)q >= KERNBASE + avail_start &&
1.108     pk       3483:                    (int)q < KERNBASE + unavail_gap_start)
1.77      pk       3484:                        /* This gap is part of VM-managed pages */
                   3485:                        continue;
                   3486:
1.69      pk       3487:                /*
1.71      pk       3488:                 * Now install entry for current page.
1.69      pk       3489:                 */
1.77      pk       3490:                rp = &pmap_kernel()->pm_regmap[VA_VREG(q)];
                   3491:                sp = &rp->rg_segmap[VA_VSEG(q)];
                   3492:                sp->sg_npte++;
                   3493:
                   3494:                pte = ((int)q - KERNBASE) >> SRMMU_PPNPASHIFT;
1.122     pk       3495:                pte |= PPROT_N_RX | SRMMU_TEPTE;
                   3496:
                   3497:                /* Deal with the cacheable bit for pagetable memory */
                   3498:                if ((cpuinfo.flags & CPUFLG_CACHEPAGETABLES) != 0 ||
                   3499:                    q < pagetables_start || q >= pagetables_end)
                   3500:                        pte |= SRMMU_PG_C;
                   3501:
1.77      pk       3502:                /* write-protect kernel text */
                   3503:                if (q < (caddr_t) trapbase || q >= etext)
                   3504:                        pte |= PPROT_WRITE;
                   3505:
                   3506:                setpgt4m(&sp->sg_pte[VA_VPG(q)], pte);
1.69      pk       3507:        }
                   3508:
1.77      pk       3509: #if 0
1.55      pk       3510:        /*
                   3511:         * We also install the kernel mapping into all other contexts by
1.69      pk       3512:         * copying the context 0 L1 PTP from cpuinfo.ctx_tbl[0] into the
1.55      pk       3513:         * remainder of the context table (i.e. we share the kernel page-
                   3514:         * tables). Each user pmap automatically gets the kernel mapped
                   3515:         * into it when it is created, but we do this extra step early on
                   3516:         * in case some twit decides to switch to a context with no user
                   3517:         * pmap associated with it.
                   3518:         */
                   3519:        for (i = 1; i < ncontext; i++)
1.69      pk       3520:                cpuinfo.ctx_tbl[i] = cpuinfo.ctx_tbl[0];
                   3521: #endif
1.55      pk       3522:
1.100     pk       3523:        if ((cpuinfo.flags & CPUFLG_CACHEPAGETABLES) == 0)
                   3524:                /* Flush page tables from cache */
                   3525:                pcache_flush(pagetables_start, (caddr_t)VA2PA(pagetables_start),
                   3526:                             pagetables_end - pagetables_start);
                   3527:
1.55      pk       3528:        /*
                   3529:         * Now switch to kernel pagetables (finally!)
                   3530:         */
1.69      pk       3531:        mmu_install_tables(&cpuinfo);
1.79      pk       3532:
1.107     pk       3533: #if defined(MACHINE_NEW_NONCONTIG)
                   3534:        pmap_page_upload();
                   3535: #endif
1.69      pk       3536: }
                   3537:
1.97      pk       3538: static u_long prom_ctxreg;
                   3539:
1.69      pk       3540: void
                   3541: mmu_install_tables(sc)
                   3542:        struct cpu_softc *sc;
                   3543: {
                   3544:
                   3545: #ifdef DEBUG
                   3546:        printf("pmap_bootstrap: installing kernel page tables...");
                   3547: #endif
1.71      pk       3548:        setcontext4m(0);        /* paranoia? %%%: Make 0x3 a define! below */
1.69      pk       3549:
                   3550:        /* Enable MMU tablewalk caching, flush TLB */
                   3551:        if (sc->mmu_enable != 0)
                   3552:                sc->mmu_enable();
                   3553:
                   3554:        tlb_flush_all();
1.97      pk       3555:        prom_ctxreg = lda(SRMMU_CXTPTR, ASI_SRMMU);
1.69      pk       3556:
                   3557:        sta(SRMMU_CXTPTR, ASI_SRMMU,
                   3558:            (VA2PA((caddr_t)sc->ctx_tbl) >> SRMMU_PPNPASHIFT) & ~0x3);
                   3559:
                   3560:        tlb_flush_all();
                   3561:
                   3562: #ifdef DEBUG
                   3563:        printf("done.\n");
                   3564: #endif
                   3565: }
1.55      pk       3566:
1.97      pk       3567: void srmmu_restore_prom_ctx __P((void));
                   3568:
                   3569: void
                   3570: srmmu_restore_prom_ctx()
                   3571: {
                   3572:        tlb_flush_all();
                   3573:        sta(SRMMU_CXTPTR, ASI_SRMMU, prom_ctxreg);
                   3574:        tlb_flush_all();
                   3575: }
                   3576:
1.69      pk       3577: /*
                   3578:  * Allocate per-CPU page tables.
                   3579:  * Note: this routine is called in the context of the boot CPU
                   3580:  * during autoconfig.
                   3581:  */
                   3582: void
                   3583: pmap_alloc_cpu(sc)
                   3584:        struct cpu_softc *sc;
                   3585: {
1.72      pk       3586:        caddr_t cpustore;
                   3587:        int *ctxtable;
                   3588:        int *regtable;
                   3589:        int *segtable;
                   3590:        int *pagtable;
                   3591:        int vr, vs, vpg;
                   3592:        struct regmap *rp;
                   3593:        struct segmap *sp;
                   3594:
                   3595:        /* Allocate properly aligned and physically contiguous memory here */
                   3596:        cpustore = 0;
                   3597:        ctxtable = 0;
                   3598:        regtable = 0;
                   3599:        segtable = 0;
                   3600:        pagtable = 0;
                   3601:
                   3602:        vr = VA_VREG(CPUINFO_VA);
                   3603:        vs = VA_VSEG(CPUINFO_VA);
                   3604:        vpg = VA_VPG(CPUINFO_VA);
                   3605:        rp = &pmap_kernel()->pm_regmap[vr];
                   3606:        sp = &rp->rg_segmap[vs];
                   3607:
                   3608:        /*
                   3609:         * Copy page tables, then modify entry for CPUINFO_VA so that
                   3610:         * it points at the per-CPU pages.
                   3611:         */
                   3612:        bcopy(cpuinfo.L1_ptps, regtable, SRMMU_L1SIZE * sizeof(int));
                   3613:        regtable[vr] =
                   3614:                (VA2PA((caddr_t)segtable) >> SRMMU_PPNPASHIFT) | SRMMU_TEPTD;
                   3615:
                   3616:        bcopy(rp->rg_seg_ptps, segtable, SRMMU_L2SIZE * sizeof(int));
                   3617:        segtable[vs] =
                   3618:                (VA2PA((caddr_t)pagtable) >> SRMMU_PPNPASHIFT) | SRMMU_TEPTD;
                   3619:
                   3620:        bcopy(sp->sg_pte, pagtable, SRMMU_L3SIZE * sizeof(int));
                   3621:        pagtable[vpg] =
                   3622:                (VA2PA((caddr_t)cpustore) >> SRMMU_PPNPASHIFT) |
                   3623:                (SRMMU_TEPTE | PPROT_RWX_RWX | SRMMU_PG_C);
1.69      pk       3624:
1.72      pk       3625:        /* Install L1 table in context 0 */
                   3626:        ctxtable[0] = ((u_int)regtable >> SRMMU_PPNPASHIFT) | SRMMU_TEPTD;
                   3627:
                   3628:        sc->ctx_tbl = ctxtable;
                   3629:        sc->L1_ptps = regtable;
1.69      pk       3630:
1.72      pk       3631: #if 0
1.69      pk       3632:        if ((sc->flags & CPUFLG_CACHEPAGETABLES) == 0) {
1.72      pk       3633:                kvm_uncache((caddr_t)0, 1);
1.69      pk       3634:        }
1.72      pk       3635: #endif
1.55      pk       3636: }
1.97      pk       3637: #endif /* SUN4M */
1.55      pk       3638:
1.69      pk       3639:
1.55      pk       3640: void
                   3641: pmap_init()
                   3642: {
                   3643:
                   3644:        if (PAGE_SIZE != NBPG)
                   3645:                panic("pmap_init: CLSIZE!=1");
                   3646:
1.122     pk       3647:        /* Map pv_table[] */
                   3648:        (void)pv_table_map(avail_start, 1);
1.55      pk       3649:
1.38      pk       3650:        vm_first_phys = avail_start;
                   3651:        vm_num_phys = avail_end - avail_start;
1.121     pk       3652:
1.122     pk       3653:        /* Setup a pool for additional pvlist structures */
                   3654:        pool_init(&pv_pool, sizeof(struct pvlist), 0, 0, 0, "pvtable", 0,
1.121     pk       3655:                  NULL, NULL, 0);
                   3656:
                   3657: #if defined(SUN4M)
                   3658:        if (CPU_ISSUN4M) {
                   3659:                /*
                   3660:                 * The SRMMU only ever needs chunks in one of two sizes:
                   3661:                 * 1024 (for region level tables) and 256 (for segment
                   3662:                 * and page level tables).
                   3663:                 */
                   3664:                int n;
                   3665:
                   3666:                n = SRMMU_L1SIZE * sizeof(int);
                   3667:                pool_init(&L1_pool, n, n, 0, 0, "L1 pagetable", 0,
                   3668:                          pgt_page_alloc, pgt_page_free, 0);
                   3669:
                   3670:                n = SRMMU_L2SIZE * sizeof(int);
                   3671:                pool_init(&L23_pool, n, n, 0, 0, "L2/L3 pagetable", 0,
                   3672:                          pgt_page_alloc, pgt_page_free, 0);
                   3673:        }
                   3674: #endif
1.36      pk       3675: }
                   3676:
1.1       deraadt  3677:
                   3678: /*
                   3679:  * Map physical addresses into kernel VM.
                   3680:  */
1.124   ! pk       3681: vaddr_t
1.1       deraadt  3682: pmap_map(va, pa, endpa, prot)
1.124   ! pk       3683:        vaddr_t va;
        !          3684:        paddr_t pa, endpa;
        !          3685:        int prot;
1.1       deraadt  3686: {
1.124   ! pk       3687:        int pgsize = PAGE_SIZE;
1.1       deraadt  3688:
                   3689:        while (pa < endpa) {
1.42      mycroft  3690:                pmap_enter(pmap_kernel(), va, pa, prot, 1);
1.1       deraadt  3691:                va += pgsize;
                   3692:                pa += pgsize;
                   3693:        }
                   3694:        return (va);
                   3695: }
                   3696:
                   3697: /*
                   3698:  * Create and return a physical map.
                   3699:  *
                   3700:  * If size is nonzero, the map is useless. (ick)
                   3701:  */
                   3702: struct pmap *
                   3703: pmap_create(size)
1.124   ! pk       3704:        vsize_t size;
1.1       deraadt  3705: {
1.124   ! pk       3706:        struct pmap *pm;
1.1       deraadt  3707:
                   3708:        if (size)
                   3709:                return (NULL);
                   3710:        pm = (struct pmap *)malloc(sizeof *pm, M_VMPMAP, M_WAITOK);
                   3711: #ifdef DEBUG
                   3712:        if (pmapdebug & PDB_CREATE)
1.66      christos 3713:                printf("pmap_create: created %p\n", pm);
1.1       deraadt  3714: #endif
                   3715:        bzero((caddr_t)pm, sizeof *pm);
                   3716:        pmap_pinit(pm);
                   3717:        return (pm);
                   3718: }
                   3719:
                   3720: /*
                   3721:  * Initialize a preallocated and zeroed pmap structure,
                   3722:  * such as one in a vmspace structure.
                   3723:  */
                   3724: void
                   3725: pmap_pinit(pm)
1.124   ! pk       3726:        struct pmap *pm;
1.1       deraadt  3727: {
1.124   ! pk       3728:        int size;
1.43      pk       3729:        void *urp;
1.1       deraadt  3730:
                   3731: #ifdef DEBUG
                   3732:        if (pmapdebug & PDB_CREATE)
1.66      christos 3733:                printf("pmap_pinit(%p)\n", pm);
1.1       deraadt  3734: #endif
1.13      pk       3735:
1.43      pk       3736:        size = NUREG * sizeof(struct regmap);
1.55      pk       3737:
1.43      pk       3738:        pm->pm_regstore = urp = malloc(size, M_VMPMAP, M_WAITOK);
1.55      pk       3739:        qzero((caddr_t)urp, size);
1.1       deraadt  3740:        /* pm->pm_ctx = NULL; */
                   3741:        simple_lock_init(&pm->pm_lock);
                   3742:        pm->pm_refcount = 1;
1.43      pk       3743:        pm->pm_regmap = urp;
1.55      pk       3744:
                   3745:        if (CPU_ISSUN4OR4C) {
                   3746:                TAILQ_INIT(&pm->pm_seglist);
1.69      pk       3747: #if defined(SUN4_MMU3L)
1.55      pk       3748:                TAILQ_INIT(&pm->pm_reglist);
1.69      pk       3749:                if (HASSUN4_MMU3L) {
                   3750:                        int i;
                   3751:                        for (i = NUREG; --i >= 0;)
                   3752:                                pm->pm_regmap[i].rg_smeg = reginval;
                   3753:                }
1.43      pk       3754: #endif
1.100     pk       3755:                pm->pm_gap_end = VA_VREG(VM_MAXUSER_ADDRESS);
1.55      pk       3756:        }
                   3757: #if defined(SUN4M)
                   3758:        else {
1.79      pk       3759:                int i;
                   3760:
1.55      pk       3761:                /*
                   3762:                 * We must allocate and initialize hardware-readable (MMU)
                   3763:                 * pagetables. We must also map the kernel regions into this
                   3764:                 * pmap's pagetables, so that we can access the kernel from
1.89      pk       3765:                 * this user context.
1.55      pk       3766:                 *
                   3767:                 * Note: pm->pm_regmap's have been zeroed already, so we don't
                   3768:                 * need to explicitly mark them as invalid (a null
                   3769:                 * rg_seg_ptps pointer indicates invalid for the 4m)
                   3770:                 */
1.121     pk       3771:                urp = pool_get(&L1_pool, PR_WAITOK);
1.55      pk       3772:                pm->pm_reg_ptps = urp;
                   3773:                pm->pm_reg_ptps_pa = VA2PA(urp);
1.89      pk       3774:                for (i = 0; i < NUREG; i++)
                   3775:                        setpgt4m(&pm->pm_reg_ptps[i], SRMMU_TEINVALID);
1.55      pk       3776:
1.79      pk       3777:                /* Copy kernel regions */
                   3778:                for (i = 0; i < NKREG; i++) {
                   3779:                        setpgt4m(&pm->pm_reg_ptps[VA_VREG(KERNBASE) + i],
                   3780:                                 cpuinfo.L1_ptps[VA_VREG(KERNBASE) + i]);
                   3781:                }
1.55      pk       3782:        }
                   3783: #endif
                   3784:
1.43      pk       3785:        return;
1.1       deraadt  3786: }
                   3787:
                   3788: /*
                   3789:  * Retire the given pmap from service.
                   3790:  * Should only be called if the map contains no valid mappings.
                   3791:  */
                   3792: void
                   3793: pmap_destroy(pm)
1.124   ! pk       3794:        struct pmap *pm;
1.1       deraadt  3795: {
                   3796:        int count;
                   3797:
                   3798:        if (pm == NULL)
                   3799:                return;
                   3800: #ifdef DEBUG
                   3801:        if (pmapdebug & PDB_DESTROY)
1.66      christos 3802:                printf("pmap_destroy(%p)\n", pm);
1.1       deraadt  3803: #endif
                   3804:        simple_lock(&pm->pm_lock);
                   3805:        count = --pm->pm_refcount;
                   3806:        simple_unlock(&pm->pm_lock);
                   3807:        if (count == 0) {
                   3808:                pmap_release(pm);
1.49      pk       3809:                free(pm, M_VMPMAP);
1.1       deraadt  3810:        }
                   3811: }
                   3812:
                   3813: /*
                   3814:  * Release any resources held by the given physical map.
                   3815:  * Called when a pmap initialized by pmap_pinit is being released.
                   3816:  */
                   3817: void
                   3818: pmap_release(pm)
1.124   ! pk       3819:        struct pmap *pm;
1.1       deraadt  3820: {
1.124   ! pk       3821:        union ctxinfo *c;
        !          3822:        int s = splpmap();      /* paranoia */
1.1       deraadt  3823:
                   3824: #ifdef DEBUG
                   3825:        if (pmapdebug & PDB_DESTROY)
1.66      christos 3826:                printf("pmap_release(%p)\n", pm);
1.1       deraadt  3827: #endif
1.55      pk       3828:
                   3829:        if (CPU_ISSUN4OR4C) {
1.69      pk       3830: #if defined(SUN4_MMU3L)
1.55      pk       3831:                if (pm->pm_reglist.tqh_first)
                   3832:                        panic("pmap_release: region list not empty");
1.43      pk       3833: #endif
1.55      pk       3834:                if (pm->pm_seglist.tqh_first)
                   3835:                        panic("pmap_release: segment list not empty");
                   3836:
                   3837:                if ((c = pm->pm_ctx) != NULL) {
                   3838:                        if (pm->pm_ctxnum == 0)
                   3839:                                panic("pmap_release: releasing kernel");
                   3840:                        ctx_free(pm);
                   3841:                }
1.1       deraadt  3842:        }
1.102     pk       3843:
                   3844: #if defined(SUN4M)
                   3845:        if (CPU_ISSUN4M) {
                   3846:                if ((c = pm->pm_ctx) != NULL) {
                   3847:                        if (pm->pm_ctxnum == 0)
                   3848:                                panic("pmap_release: releasing kernel");
                   3849:                        ctx_free(pm);
                   3850:                }
1.121     pk       3851:                pool_put(&L1_pool, pm->pm_reg_ptps);
1.102     pk       3852:                pm->pm_reg_ptps = NULL;
                   3853:                pm->pm_reg_ptps_pa = 0;
                   3854:        }
                   3855: #endif
1.1       deraadt  3856:        splx(s);
1.55      pk       3857:
1.43      pk       3858: #ifdef DEBUG
1.55      pk       3859: if (pmapdebug) {
1.43      pk       3860:        int vs, vr;
                   3861:        for (vr = 0; vr < NUREG; vr++) {
                   3862:                struct regmap *rp = &pm->pm_regmap[vr];
                   3863:                if (rp->rg_nsegmap != 0)
1.66      christos 3864:                        printf("pmap_release: %d segments remain in "
1.43      pk       3865:                                "region %d\n", rp->rg_nsegmap, vr);
                   3866:                if (rp->rg_segmap != NULL) {
1.66      christos 3867:                        printf("pmap_release: segments still "
1.43      pk       3868:                                "allocated in region %d\n", vr);
                   3869:                        for (vs = 0; vs < NSEGRG; vs++) {
                   3870:                                struct segmap *sp = &rp->rg_segmap[vs];
                   3871:                                if (sp->sg_npte != 0)
1.66      christos 3872:                                        printf("pmap_release: %d ptes "
1.43      pk       3873:                                             "remain in segment %d\n",
                   3874:                                                sp->sg_npte, vs);
                   3875:                                if (sp->sg_pte != NULL) {
1.66      christos 3876:                                        printf("pmap_release: ptes still "
1.43      pk       3877:                                             "allocated in segment %d\n", vs);
                   3878:                                }
                   3879:                        }
                   3880:                }
                   3881:        }
                   3882: }
                   3883: #endif
1.102     pk       3884:
1.43      pk       3885:        if (pm->pm_regstore)
1.49      pk       3886:                free(pm->pm_regstore, M_VMPMAP);
1.1       deraadt  3887: }
                   3888:
                   3889: /*
                   3890:  * Add a reference to the given pmap.
                   3891:  */
                   3892: void
                   3893: pmap_reference(pm)
                   3894:        struct pmap *pm;
                   3895: {
                   3896:
                   3897:        if (pm != NULL) {
                   3898:                simple_lock(&pm->pm_lock);
                   3899:                pm->pm_refcount++;
                   3900:                simple_unlock(&pm->pm_lock);
                   3901:        }
                   3902: }
                   3903:
                   3904: /*
                   3905:  * Remove the given range of mapping entries.
                   3906:  * The starting and ending addresses are already rounded to pages.
                   3907:  * Sheer lunacy: pmap_remove is often asked to remove nonexistent
                   3908:  * mappings.
                   3909:  */
                   3910: void
                   3911: pmap_remove(pm, va, endva)
1.124   ! pk       3912:        struct pmap *pm;
        !          3913:        vaddr_t va, endva;
1.1       deraadt  3914: {
1.124   ! pk       3915:        vaddr_t nva;
        !          3916:        int vr, vs, s, ctx;
        !          3917:        void (*rm)(struct pmap *, vaddr_t, vaddr_t, int, int);
1.1       deraadt  3918:
                   3919:        if (pm == NULL)
                   3920:                return;
1.13      pk       3921:
1.1       deraadt  3922: #ifdef DEBUG
                   3923:        if (pmapdebug & PDB_REMOVE)
1.91      fair     3924:                printf("pmap_remove(%p, 0x%lx, 0x%lx)\n", pm, va, endva);
1.1       deraadt  3925: #endif
                   3926:
1.42      mycroft  3927:        if (pm == pmap_kernel()) {
1.1       deraadt  3928:                /*
                   3929:                 * Removing from kernel address space.
                   3930:                 */
                   3931:                rm = pmap_rmk;
                   3932:        } else {
                   3933:                /*
                   3934:                 * Removing from user address space.
                   3935:                 */
                   3936:                write_user_windows();
                   3937:                rm = pmap_rmu;
                   3938:        }
                   3939:
                   3940:        ctx = getcontext();
                   3941:        s = splpmap();          /* XXX conservative */
                   3942:        simple_lock(&pm->pm_lock);
                   3943:        for (; va < endva; va = nva) {
                   3944:                /* do one virtual segment at a time */
1.43      pk       3945:                vr = VA_VREG(va);
                   3946:                vs = VA_VSEG(va);
                   3947:                nva = VSTOVA(vr, vs + 1);
1.1       deraadt  3948:                if (nva == 0 || nva > endva)
                   3949:                        nva = endva;
1.76      pk       3950:                if (pm->pm_regmap[vr].rg_nsegmap != 0)
                   3951:                        (*rm)(pm, va, nva, vr, vs);
1.1       deraadt  3952:        }
                   3953:        simple_unlock(&pm->pm_lock);
                   3954:        splx(s);
                   3955:        setcontext(ctx);
                   3956: }
                   3957:
                   3958: /*
                   3959:  * The following magic number was chosen because:
                   3960:  *     1. It is the same amount of work to cache_flush_page 4 pages
                   3961:  *        as to cache_flush_segment 1 segment (so at 4 the cost of
                   3962:  *        flush is the same).
                   3963:  *     2. Flushing extra pages is bad (causes cache not to work).
                   3964:  *     3. The current code, which malloc()s 5 pages for each process
                   3965:  *        for a user vmspace/pmap, almost never touches all 5 of those
                   3966:  *        pages.
                   3967:  */
1.13      pk       3968: #if 0
                   3969: #define        PMAP_RMK_MAGIC  (cacheinfo.c_hwflush?5:64)      /* if > magic, use cache_flush_segment */
                   3970: #else
1.1       deraadt  3971: #define        PMAP_RMK_MAGIC  5       /* if > magic, use cache_flush_segment */
1.13      pk       3972: #endif
1.1       deraadt  3973:
                   3974: /*
                   3975:  * Remove a range contained within a single segment.
                   3976:  * These are egregiously complicated routines.
                   3977:  */
                   3978:
1.55      pk       3979: #if defined(SUN4) || defined(SUN4C)
                   3980:
1.43      pk       3981: /* remove from kernel */
1.55      pk       3982: /*static*/ void
                   3983: pmap_rmk4_4c(pm, va, endva, vr, vs)
1.124   ! pk       3984:        struct pmap *pm;
        !          3985:        vaddr_t va, endva;
        !          3986:        int vr, vs;
        !          3987: {
        !          3988:        int i, tpte, perpage, npg;
        !          3989:        struct pvlist *pv;
        !          3990:        int nleft, pmeg;
1.43      pk       3991:        struct regmap *rp;
                   3992:        struct segmap *sp;
                   3993:
                   3994:        rp = &pm->pm_regmap[vr];
                   3995:        sp = &rp->rg_segmap[vs];
                   3996:
                   3997:        if (rp->rg_nsegmap == 0)
                   3998:                return;
                   3999:
                   4000: #ifdef DEBUG
                   4001:        if (rp->rg_segmap == NULL)
                   4002:                panic("pmap_rmk: no segments");
                   4003: #endif
                   4004:
                   4005:        if ((nleft = sp->sg_npte) == 0)
                   4006:                return;
                   4007:
                   4008:        pmeg = sp->sg_pmeg;
1.1       deraadt  4009:
                   4010: #ifdef DEBUG
                   4011:        if (pmeg == seginval)
                   4012:                panic("pmap_rmk: not loaded");
                   4013:        if (pm->pm_ctx == NULL)
                   4014:                panic("pmap_rmk: lost context");
                   4015: #endif
                   4016:
1.71      pk       4017:        setcontext4(0);
1.1       deraadt  4018:        /* decide how to flush cache */
                   4019:        npg = (endva - va) >> PGSHIFT;
                   4020:        if (npg > PMAP_RMK_MAGIC) {
                   4021:                /* flush the whole segment */
                   4022:                perpage = 0;
1.69      pk       4023:                cache_flush_segment(vr, vs);
1.1       deraadt  4024:        } else {
                   4025:                /* flush each page individually; some never need flushing */
1.69      pk       4026:                perpage = (CACHEINFO.c_vactype != VAC_NONE);
1.1       deraadt  4027:        }
                   4028:        while (va < endva) {
1.55      pk       4029:                tpte = getpte4(va);
1.1       deraadt  4030:                if ((tpte & PG_V) == 0) {
1.63      pk       4031:                        va += NBPG;
1.1       deraadt  4032:                        continue;
                   4033:                }
1.35      pk       4034:                if ((tpte & PG_TYPE) == PG_OBMEM) {
                   4035:                        /* if cacheable, flush page as needed */
                   4036:                        if (perpage && (tpte & PG_NC) == 0)
1.1       deraadt  4037:                                cache_flush_page(va);
1.60      pk       4038:                        i = ptoa(tpte & PG_PFNUM);
1.1       deraadt  4039:                        if (managed(i)) {
                   4040:                                pv = pvhead(i);
1.55      pk       4041:                                pv->pv_flags |= MR4_4C(tpte);
1.58      pk       4042:                                pv_unlink4_4c(pv, pm, va);
1.1       deraadt  4043:                        }
                   4044:                }
                   4045:                nleft--;
1.55      pk       4046:                setpte4(va, 0);
1.1       deraadt  4047:                va += NBPG;
                   4048:        }
                   4049:
                   4050:        /*
                   4051:         * If the segment is all gone, remove it from everyone and
                   4052:         * free the MMU entry.
                   4053:         */
1.43      pk       4054:        if ((sp->sg_npte = nleft) == 0) {
                   4055:                va = VSTOVA(vr,vs);             /* retract */
1.69      pk       4056: #if defined(SUN4_MMU3L)
                   4057:                if (HASSUN4_MMU3L)
1.1       deraadt  4058:                        setsegmap(va, seginval);
1.43      pk       4059:                else
                   4060: #endif
                   4061:                        for (i = ncontext; --i >= 0;) {
1.71      pk       4062:                                setcontext4(i);
1.43      pk       4063:                                setsegmap(va, seginval);
                   4064:                        }
                   4065:                me_free(pm, pmeg);
                   4066:                if (--rp->rg_nsegmap == 0) {
1.69      pk       4067: #if defined(SUN4_MMU3L)
                   4068:                        if (HASSUN4_MMU3L) {
1.43      pk       4069:                                for (i = ncontext; --i >= 0;) {
1.71      pk       4070:                                        setcontext4(i);
1.43      pk       4071:                                        setregmap(va, reginval);
                   4072:                                }
                   4073:                                /* note: context is 0 */
                   4074:                                region_free(pm, rp->rg_smeg);
                   4075:                        }
                   4076: #endif
1.1       deraadt  4077:                }
                   4078:        }
                   4079: }
                   4080:
1.55      pk       4081: #endif /* sun4, sun4c */
1.1       deraadt  4082:
1.55      pk       4083: #if defined(SUN4M)             /* 4M version of pmap_rmk */
                   4084: /* remove from kernel (4m)*/
                   4085: /*static*/ void
                   4086: pmap_rmk4m(pm, va, endva, vr, vs)
1.124   ! pk       4087:        struct pmap *pm;
        !          4088:        vaddr_t va, endva;
        !          4089:        int vr, vs;
        !          4090: {
        !          4091:        int i, tpte, perpage, npg;
        !          4092:        struct pvlist *pv;
        !          4093:        int nleft;
1.43      pk       4094:        struct regmap *rp;
                   4095:        struct segmap *sp;
                   4096:
                   4097:        rp = &pm->pm_regmap[vr];
1.55      pk       4098:        sp = &rp->rg_segmap[vs];
                   4099:
1.43      pk       4100:        if (rp->rg_nsegmap == 0)
                   4101:                return;
1.55      pk       4102:
                   4103: #ifdef DEBUG
1.43      pk       4104:        if (rp->rg_segmap == NULL)
1.55      pk       4105:                panic("pmap_rmk: no segments");
                   4106: #endif
1.43      pk       4107:
                   4108:        if ((nleft = sp->sg_npte) == 0)
                   4109:                return;
                   4110:
1.55      pk       4111: #ifdef DEBUG
                   4112:        if (sp->sg_pte == NULL || rp->rg_seg_ptps == NULL)
                   4113:                panic("pmap_rmk: segment/region does not exist");
                   4114:        if (pm->pm_ctx == NULL)
                   4115:                panic("pmap_rmk: lost context");
                   4116: #endif
1.43      pk       4117:
1.71      pk       4118:        setcontext4m(0);
1.55      pk       4119:        /* decide how to flush cache */
                   4120:        npg = (endva - va) >> PGSHIFT;
                   4121:        if (npg > PMAP_RMK_MAGIC) {
                   4122:                /* flush the whole segment */
                   4123:                perpage = 0;
1.69      pk       4124:                if (CACHEINFO.c_vactype != VAC_NONE)
1.55      pk       4125:                        cache_flush_segment(vr, vs);
                   4126:        } else {
                   4127:                /* flush each page individually; some never need flushing */
1.69      pk       4128:                perpage = (CACHEINFO.c_vactype != VAC_NONE);
1.55      pk       4129:        }
                   4130:        while (va < endva) {
1.72      pk       4131:                tpte = sp->sg_pte[VA_SUN4M_VPG(va)];
1.55      pk       4132:                if ((tpte & SRMMU_TETYPE) != SRMMU_TEPTE) {
1.72      pk       4133: #ifdef DEBUG
                   4134:                        if ((pmapdebug & PDB_SANITYCHK) &&
                   4135:                            (getpte4m(va) & SRMMU_TETYPE) == SRMMU_TEPTE)
1.91      fair     4136:                                panic("pmap_rmk: Spurious kTLB entry for 0x%lx",
1.81      pk       4137:                                      va);
1.72      pk       4138: #endif
1.61      pk       4139:                        va += NBPG;
1.55      pk       4140:                        continue;
                   4141:                }
                   4142:                if ((tpte & SRMMU_PGTYPE) == PG_SUN4M_OBMEM) {
                   4143:                        /* if cacheable, flush page as needed */
                   4144:                        if (perpage && (tpte & SRMMU_PG_C))
1.69      pk       4145:                                cache_flush_page(va);
1.60      pk       4146:                        i = ptoa((tpte & SRMMU_PPNMASK) >> SRMMU_PPNSHIFT);
1.55      pk       4147:                        if (managed(i)) {
                   4148:                                pv = pvhead(i);
                   4149:                                pv->pv_flags |= MR4M(tpte);
1.58      pk       4150:                                pv_unlink4m(pv, pm, va);
1.55      pk       4151:                        }
                   4152:                }
                   4153:                nleft--;
1.72      pk       4154:                tlb_flush_page(va);
                   4155:                setpgt4m(&sp->sg_pte[VA_SUN4M_VPG(va)], SRMMU_TEINVALID);
1.55      pk       4156:                va += NBPG;
                   4157:        }
                   4158:
                   4159:        /*
                   4160:         * If the segment is all gone, remove it from everyone and
                   4161:         * flush the TLB.
                   4162:         */
                   4163:        if ((sp->sg_npte = nleft) == 0) {
                   4164:                va = VSTOVA(vr,vs);             /* retract */
                   4165:
                   4166:                tlb_flush_segment(vr, vs);      /* Paranoia? */
                   4167:
1.58      pk       4168:                /*
                   4169:                 * We need to free the segment table. The problem is that
1.55      pk       4170:                 * we can't free the initial (bootstrap) mapping, so
                   4171:                 * we have to explicitly check for this case (ugh).
                   4172:                 */
                   4173:                if (va < virtual_avail) {
                   4174: #ifdef DEBUG
1.66      christos 4175:                        printf("pmap_rmk4m: attempt to free base kernel alloc\n");
1.55      pk       4176: #endif
                   4177:                        /* sp->sg_pte = NULL; */
                   4178:                        sp->sg_npte = 0;
                   4179:                        return;
                   4180:                }
                   4181:                /* no need to free the table; it is statically allocated */
                   4182:                qzero(sp->sg_pte, SRMMU_L3SIZE * sizeof(long));
                   4183:        }
                   4184:        /* if we're done with a region, leave it wired */
                   4185: }
                   4186: #endif /* sun4m */
                   4187: /*
                   4188:  * Just like pmap_rmk_magic, but we have a different threshold.
                   4189:  * Note that this may well deserve further tuning work.
                   4190:  */
                   4191: #if 0
                   4192: #define        PMAP_RMU_MAGIC  (cacheinfo.c_hwflush?4:64)      /* if > magic, use cache_flush_segment */
                   4193: #else
                   4194: #define        PMAP_RMU_MAGIC  4       /* if > magic, use cache_flush_segment */
                   4195: #endif
                   4196:
                   4197: #if defined(SUN4) || defined(SUN4C)
                   4198:
                   4199: /* remove from user */
                   4200: /*static*/ void
                   4201: pmap_rmu4_4c(pm, va, endva, vr, vs)
1.124   ! pk       4202:        struct pmap *pm;
        !          4203:        vaddr_t va, endva;
        !          4204:        int vr, vs;
        !          4205: {
        !          4206:        int *pte0, i, pteva, tpte, perpage, npg;
        !          4207:        struct pvlist *pv;
        !          4208:        int nleft, pmeg;
1.55      pk       4209:        struct regmap *rp;
                   4210:        struct segmap *sp;
                   4211:
                   4212:        rp = &pm->pm_regmap[vr];
                   4213:        if (rp->rg_nsegmap == 0)
                   4214:                return;
                   4215:        if (rp->rg_segmap == NULL)
                   4216:                panic("pmap_rmu: no segments");
                   4217:
                   4218:        sp = &rp->rg_segmap[vs];
                   4219:        if ((nleft = sp->sg_npte) == 0)
                   4220:                return;
                   4221:        if (sp->sg_pte == NULL)
                   4222:                panic("pmap_rmu: no pages");
                   4223:
                   4224:
                   4225:        pmeg = sp->sg_pmeg;
                   4226:        pte0 = sp->sg_pte;
1.1       deraadt  4227:
                   4228:        if (pmeg == seginval) {
1.124   ! pk       4229:                int *pte = pte0 + VA_VPG(va);
1.1       deraadt  4230:
                   4231:                /*
                   4232:                 * PTEs are not in MMU.  Just invalidate software copies.
                   4233:                 */
1.63      pk       4234:                for (; va < endva; pte++, va += NBPG) {
1.1       deraadt  4235:                        tpte = *pte;
                   4236:                        if ((tpte & PG_V) == 0) {
                   4237:                                /* nothing to remove (braindead VM layer) */
                   4238:                                continue;
                   4239:                        }
                   4240:                        if ((tpte & PG_TYPE) == PG_OBMEM) {
1.60      pk       4241:                                i = ptoa(tpte & PG_PFNUM);
1.21      deraadt  4242:                                if (managed(i))
1.58      pk       4243:                                        pv_unlink4_4c(pvhead(i), pm, va);
1.1       deraadt  4244:                        }
                   4245:                        nleft--;
                   4246:                        *pte = 0;
                   4247:                }
1.43      pk       4248:                if ((sp->sg_npte = nleft) == 0) {
1.49      pk       4249:                        free(pte0, M_VMPMAP);
1.43      pk       4250:                        sp->sg_pte = NULL;
                   4251:                        if (--rp->rg_nsegmap == 0) {
1.49      pk       4252:                                free(rp->rg_segmap, M_VMPMAP);
1.43      pk       4253:                                rp->rg_segmap = NULL;
1.69      pk       4254: #if defined(SUN4_MMU3L)
                   4255:                                if (HASSUN4_MMU3L && rp->rg_smeg != reginval) {
1.43      pk       4256:                                        if (pm->pm_ctx) {
1.71      pk       4257:                                                setcontext4(pm->pm_ctxnum);
1.43      pk       4258:                                                setregmap(va, reginval);
                   4259:                                        } else
1.71      pk       4260:                                                setcontext4(0);
1.43      pk       4261:                                        region_free(pm, rp->rg_smeg);
                   4262:                                }
                   4263: #endif
                   4264:                        }
1.1       deraadt  4265:                }
1.43      pk       4266:                return;
1.1       deraadt  4267:        }
                   4268:
                   4269:        /*
                   4270:         * PTEs are in MMU.  Invalidate in hardware, update ref &
                   4271:         * mod bits, and flush cache if required.
                   4272:         */
1.43      pk       4273:        if (CTX_USABLE(pm,rp)) {
1.1       deraadt  4274:                /* process has a context, must flush cache */
                   4275:                npg = (endva - va) >> PGSHIFT;
1.71      pk       4276:                setcontext4(pm->pm_ctxnum);
1.1       deraadt  4277:                if (npg > PMAP_RMU_MAGIC) {
                   4278:                        perpage = 0; /* flush the whole segment */
1.69      pk       4279:                        cache_flush_segment(vr, vs);
1.1       deraadt  4280:                } else
1.69      pk       4281:                        perpage = (CACHEINFO.c_vactype != VAC_NONE);
1.1       deraadt  4282:                pteva = va;
                   4283:        } else {
                   4284:                /* no context, use context 0; cache flush unnecessary */
1.71      pk       4285:                setcontext4(0);
1.69      pk       4286:                if (HASSUN4_MMU3L)
1.43      pk       4287:                        setregmap(0, tregion);
1.1       deraadt  4288:                /* XXX use per-cpu pteva? */
                   4289:                setsegmap(0, pmeg);
1.18      deraadt  4290:                pteva = VA_VPG(va) << PGSHIFT;
1.1       deraadt  4291:                perpage = 0;
                   4292:        }
1.63      pk       4293:        for (; va < endva; pteva += NBPG, va += NBPG) {
1.55      pk       4294:                tpte = getpte4(pteva);
1.1       deraadt  4295:                if ((tpte & PG_V) == 0)
                   4296:                        continue;
1.35      pk       4297:                if ((tpte & PG_TYPE) == PG_OBMEM) {
                   4298:                        /* if cacheable, flush page as needed */
                   4299:                        if (perpage && (tpte & PG_NC) == 0)
1.1       deraadt  4300:                                cache_flush_page(va);
1.60      pk       4301:                        i = ptoa(tpte & PG_PFNUM);
1.1       deraadt  4302:                        if (managed(i)) {
                   4303:                                pv = pvhead(i);
1.55      pk       4304:                                pv->pv_flags |= MR4_4C(tpte);
1.58      pk       4305:                                pv_unlink4_4c(pv, pm, va);
1.1       deraadt  4306:                        }
                   4307:                }
                   4308:                nleft--;
1.55      pk       4309:                setpte4(pteva, 0);
1.43      pk       4310:                pte0[VA_VPG(pteva)] = 0;
1.1       deraadt  4311:        }
                   4312:
                   4313:        /*
                   4314:         * If the segment is all gone, and the context is loaded, give
                   4315:         * the segment back.
                   4316:         */
1.43      pk       4317:        if ((sp->sg_npte = nleft) == 0 /* ??? && pm->pm_ctx != NULL*/) {
                   4318: #ifdef DEBUG
                   4319: if (pm->pm_ctx == NULL) {
1.66      christos 4320:        printf("pmap_rmu: no context here...");
1.43      pk       4321: }
                   4322: #endif
                   4323:                va = VSTOVA(vr,vs);             /* retract */
                   4324:                if (CTX_USABLE(pm,rp))
                   4325:                        setsegmap(va, seginval);
1.69      pk       4326:                else if (HASSUN4_MMU3L && rp->rg_smeg != reginval) {
1.43      pk       4327:                        /* note: context already set earlier */
                   4328:                        setregmap(0, rp->rg_smeg);
                   4329:                        setsegmap(vs << SGSHIFT, seginval);
                   4330:                }
1.49      pk       4331:                free(pte0, M_VMPMAP);
1.43      pk       4332:                sp->sg_pte = NULL;
1.1       deraadt  4333:                me_free(pm, pmeg);
1.13      pk       4334:
1.43      pk       4335:                if (--rp->rg_nsegmap == 0) {
1.49      pk       4336:                        free(rp->rg_segmap, M_VMPMAP);
1.43      pk       4337:                        rp->rg_segmap = NULL;
                   4338:                        GAP_WIDEN(pm,vr);
                   4339:
1.69      pk       4340: #if defined(SUN4_MMU3L)
                   4341:                        if (HASSUN4_MMU3L && rp->rg_smeg != reginval) {
1.43      pk       4342:                                /* note: context already set */
                   4343:                                if (pm->pm_ctx)
                   4344:                                        setregmap(va, reginval);
                   4345:                                region_free(pm, rp->rg_smeg);
                   4346:                        }
                   4347: #endif
                   4348:                }
1.13      pk       4349:
1.1       deraadt  4350:        }
                   4351: }
                   4352:
1.55      pk       4353: #endif /* sun4,4c */
                   4354:
                   4355: #if defined(SUN4M)             /* 4M version of pmap_rmu */
                   4356: /* remove from user */
                   4357: /*static*/ void
                   4358: pmap_rmu4m(pm, va, endva, vr, vs)
1.124   ! pk       4359:        struct pmap *pm;
        !          4360:        vaddr_t va, endva;
        !          4361:        int vr, vs;
        !          4362: {
        !          4363:        int *pte0, i, perpage, npg;
        !          4364:        struct pvlist *pv;
        !          4365:        int nleft;
1.55      pk       4366:        struct regmap *rp;
                   4367:        struct segmap *sp;
                   4368:
                   4369:        rp = &pm->pm_regmap[vr];
                   4370:        if (rp->rg_nsegmap == 0)
                   4371:                return;
                   4372:        if (rp->rg_segmap == NULL)
                   4373:                panic("pmap_rmu: no segments");
                   4374:
                   4375:        sp = &rp->rg_segmap[vs];
                   4376:        if ((nleft = sp->sg_npte) == 0)
                   4377:                return;
1.76      pk       4378:
1.55      pk       4379:        if (sp->sg_pte == NULL)
                   4380:                panic("pmap_rmu: no pages");
                   4381:
                   4382:        pte0 = sp->sg_pte;
                   4383:
                   4384:        /*
                   4385:         * Invalidate PTE in MMU pagetables. Flush cache if necessary.
                   4386:         */
1.72      pk       4387:        if (pm->pm_ctx) {
1.55      pk       4388:                /* process has a context, must flush cache */
1.71      pk       4389:                setcontext4m(pm->pm_ctxnum);
1.69      pk       4390:                if (CACHEINFO.c_vactype != VAC_NONE) {
1.63      pk       4391:                        npg = (endva - va) >> PGSHIFT;
                   4392:                        if (npg > PMAP_RMU_MAGIC) {
                   4393:                                perpage = 0; /* flush the whole segment */
1.55      pk       4394:                                cache_flush_segment(vr, vs);
1.63      pk       4395:                        } else
                   4396:                                perpage = 1;
1.55      pk       4397:                } else
1.63      pk       4398:                        perpage = 0;
1.55      pk       4399:        } else {
                   4400:                /* no context; cache flush unnecessary */
                   4401:                perpage = 0;
                   4402:        }
1.63      pk       4403:        for (; va < endva; va += NBPG) {
1.100     pk       4404:                int tpte;
                   4405:
                   4406:                if (pm->pm_ctx)
                   4407:                        tlb_flush_page(va);
1.72      pk       4408:
1.100     pk       4409:                tpte = pte0[VA_SUN4M_VPG(va)];
1.72      pk       4410:
                   4411:                if ((tpte & SRMMU_TETYPE) != SRMMU_TEPTE) {
                   4412: #ifdef DEBUG
                   4413:                        if ((pmapdebug & PDB_SANITYCHK) &&
                   4414:                            pm->pm_ctx &&
                   4415:                            (getpte4m(va) & SRMMU_TEPTE) == SRMMU_TEPTE)
1.91      fair     4416:                                panic("pmap_rmu: Spurious uTLB entry for 0x%lx",
1.81      pk       4417:                                      va);
1.72      pk       4418: #endif
1.55      pk       4419:                        continue;
1.72      pk       4420:                }
                   4421:
1.55      pk       4422:                if ((tpte & SRMMU_PGTYPE) == PG_SUN4M_OBMEM) {
                   4423:                        /* if cacheable, flush page as needed */
                   4424:                        if (perpage && (tpte & SRMMU_PG_C))
1.60      pk       4425:                                cache_flush_page(va);
                   4426:                        i = ptoa((tpte & SRMMU_PPNMASK) >> SRMMU_PPNSHIFT);
1.55      pk       4427:                        if (managed(i)) {
                   4428:                                pv = pvhead(i);
                   4429:                                pv->pv_flags |= MR4M(tpte);
1.58      pk       4430:                                pv_unlink4m(pv, pm, va);
1.55      pk       4431:                        }
                   4432:                }
                   4433:                nleft--;
1.72      pk       4434:                setpgt4m(&pte0[VA_SUN4M_VPG(va)], SRMMU_TEINVALID);
1.55      pk       4435:        }
                   4436:
                   4437:        /*
                   4438:         * If the segment is all gone, and the context is loaded, give
                   4439:         * the segment back.
                   4440:         */
1.72      pk       4441:        if ((sp->sg_npte = nleft) == 0) {
1.55      pk       4442: #ifdef DEBUG
                   4443:                if (pm->pm_ctx == NULL) {
1.66      christos 4444:                        printf("pmap_rmu: no context here...");
1.55      pk       4445:                }
                   4446: #endif
                   4447:                va = VSTOVA(vr,vs);             /* retract */
                   4448:
1.88      pk       4449:                if (pm->pm_ctx)
                   4450:                        tlb_flush_segment(vr, vs);      /* Paranoia? */
1.73      pk       4451:                setpgt4m(&rp->rg_seg_ptps[vs], SRMMU_TEINVALID);
1.121     pk       4452:                pool_put(&L23_pool, pte0);
1.55      pk       4453:                sp->sg_pte = NULL;
                   4454:
                   4455:                if (--rp->rg_nsegmap == 0) {
1.88      pk       4456:                        if (pm->pm_ctx)
                   4457:                                tlb_flush_context();    /* Paranoia? */
                   4458:                        setpgt4m(&pm->pm_reg_ptps[vr], SRMMU_TEINVALID);
1.55      pk       4459:                        free(rp->rg_segmap, M_VMPMAP);
                   4460:                        rp->rg_segmap = NULL;
1.121     pk       4461:                        pool_put(&L23_pool, rp->rg_seg_ptps);
1.55      pk       4462:                }
                   4463:        }
                   4464: }
                   4465: #endif /* sun4m */
                   4466:
1.1       deraadt  4467: /*
                   4468:  * Lower (make more strict) the protection on the specified
                   4469:  * physical page.
                   4470:  *
                   4471:  * There are only two cases: either the protection is going to 0
                   4472:  * (in which case we do the dirty work here), or it is going from
                   4473:  * to read-only (in which case pv_changepte does the trick).
                   4474:  */
1.55      pk       4475:
                   4476: #if defined(SUN4) || defined(SUN4C)
1.1       deraadt  4477: void
1.55      pk       4478: pmap_page_protect4_4c(pa, prot)
1.124   ! pk       4479:        paddr_t pa;
1.1       deraadt  4480:        vm_prot_t prot;
                   4481: {
1.124   ! pk       4482:        struct pvlist *pv, *pv0, *npv;
        !          4483:        struct pmap *pm;
        !          4484:        int va, vr, vs, pteva, tpte;
        !          4485:        int flags, nleft, i, s, ctx;
1.43      pk       4486:        struct regmap *rp;
                   4487:        struct segmap *sp;
1.1       deraadt  4488:
                   4489: #ifdef DEBUG
                   4490:        if ((pmapdebug & PDB_CHANGEPROT) ||
                   4491:            (pmapdebug & PDB_REMOVE && prot == VM_PROT_NONE))
1.91      fair     4492:                printf("pmap_page_protect(0x%lx, 0x%x)\n", pa, prot);
1.1       deraadt  4493: #endif
                   4494:        /*
                   4495:         * Skip unmanaged pages, or operations that do not take
                   4496:         * away write permission.
                   4497:         */
1.82      pk       4498:        if ((pa & (PMAP_TNC_4 & ~PMAP_NC)) ||
1.34      pk       4499:             !managed(pa) || prot & VM_PROT_WRITE)
1.1       deraadt  4500:                return;
                   4501:        write_user_windows();   /* paranoia */
                   4502:        if (prot & VM_PROT_READ) {
1.58      pk       4503:                pv_changepte4_4c(pvhead(pa), 0, PG_W);
1.1       deraadt  4504:                return;
                   4505:        }
                   4506:
                   4507:        /*
                   4508:         * Remove all access to all people talking to this page.
                   4509:         * Walk down PV list, removing all mappings.
                   4510:         * The logic is much like that for pmap_remove,
                   4511:         * but we know we are removing exactly one page.
                   4512:         */
                   4513:        pv = pvhead(pa);
                   4514:        s = splpmap();
                   4515:        if ((pm = pv->pv_pmap) == NULL) {
                   4516:                splx(s);
                   4517:                return;
                   4518:        }
1.71      pk       4519:        ctx = getcontext4();
1.1       deraadt  4520:        pv0 = pv;
                   4521:        flags = pv->pv_flags & ~PV_NC;
                   4522:        for (;; pm = pv->pv_pmap) {
                   4523:                va = pv->pv_va;
1.43      pk       4524:                vr = VA_VREG(va);
                   4525:                vs = VA_VSEG(va);
                   4526:                rp = &pm->pm_regmap[vr];
                   4527:                if (rp->rg_nsegmap == 0)
                   4528:                        panic("pmap_remove_all: empty vreg");
                   4529:                sp = &rp->rg_segmap[vs];
                   4530:                if ((nleft = sp->sg_npte) == 0)
1.1       deraadt  4531:                        panic("pmap_remove_all: empty vseg");
                   4532:                nleft--;
1.43      pk       4533:                sp->sg_npte = nleft;
                   4534:
                   4535:                if (sp->sg_pmeg == seginval) {
                   4536:                        /* Definitely not a kernel map */
1.1       deraadt  4537:                        if (nleft) {
1.43      pk       4538:                                sp->sg_pte[VA_VPG(va)] = 0;
1.1       deraadt  4539:                        } else {
1.49      pk       4540:                                free(sp->sg_pte, M_VMPMAP);
1.43      pk       4541:                                sp->sg_pte = NULL;
                   4542:                                if (--rp->rg_nsegmap == 0) {
1.49      pk       4543:                                        free(rp->rg_segmap, M_VMPMAP);
1.43      pk       4544:                                        rp->rg_segmap = NULL;
                   4545:                                        GAP_WIDEN(pm,vr);
1.69      pk       4546: #if defined(SUN4_MMU3L)
                   4547:                                        if (HASSUN4_MMU3L && rp->rg_smeg != reginval) {
1.43      pk       4548:                                                if (pm->pm_ctx) {
1.71      pk       4549:                                                        setcontext4(pm->pm_ctxnum);
1.43      pk       4550:                                                        setregmap(va, reginval);
                   4551:                                                } else
1.71      pk       4552:                                                        setcontext4(0);
1.43      pk       4553:                                                region_free(pm, rp->rg_smeg);
                   4554:                                        }
                   4555: #endif
                   4556:                                }
1.1       deraadt  4557:                        }
                   4558:                        goto nextpv;
                   4559:                }
1.84      pk       4560:
1.43      pk       4561:                if (CTX_USABLE(pm,rp)) {
1.71      pk       4562:                        setcontext4(pm->pm_ctxnum);
1.1       deraadt  4563:                        pteva = va;
1.69      pk       4564:                        cache_flush_page(va);
1.1       deraadt  4565:                } else {
1.71      pk       4566:                        setcontext4(0);
1.1       deraadt  4567:                        /* XXX use per-cpu pteva? */
1.69      pk       4568:                        if (HASSUN4_MMU3L)
1.43      pk       4569:                                setregmap(0, tregion);
                   4570:                        setsegmap(0, sp->sg_pmeg);
1.18      deraadt  4571:                        pteva = VA_VPG(va) << PGSHIFT;
1.1       deraadt  4572:                }
1.43      pk       4573:
1.55      pk       4574:                tpte = getpte4(pteva);
1.43      pk       4575:                if ((tpte & PG_V) == 0)
1.91      fair     4576:                        panic("pmap_page_protect !PG_V: ctx %d, va 0x%x, pte 0x%x",
1.84      pk       4577:                              pm->pm_ctxnum, va, tpte);
1.55      pk       4578:                flags |= MR4_4C(tpte);
1.43      pk       4579:
1.1       deraadt  4580:                if (nleft) {
1.55      pk       4581:                        setpte4(pteva, 0);
1.44      pk       4582:                        if (sp->sg_pte != NULL)
                   4583:                                sp->sg_pte[VA_VPG(pteva)] = 0;
1.84      pk       4584:                        goto nextpv;
                   4585:                }
                   4586:
                   4587:                /* Entire segment is gone */
                   4588:                if (pm == pmap_kernel()) {
                   4589: #if defined(SUN4_MMU3L)
                   4590:                        if (!HASSUN4_MMU3L)
1.43      pk       4591: #endif
1.84      pk       4592:                                for (i = ncontext; --i >= 0;) {
                   4593:                                        setcontext4(i);
                   4594:                                        setsegmap(va, seginval);
                   4595:                                }
                   4596:                        me_free(pm, sp->sg_pmeg);
                   4597:                        if (--rp->rg_nsegmap == 0) {
1.69      pk       4598: #if defined(SUN4_MMU3L)
1.84      pk       4599:                                if (HASSUN4_MMU3L) {
1.43      pk       4600:                                        for (i = ncontext; --i >= 0;) {
1.71      pk       4601:                                                setcontext4(i);
1.84      pk       4602:                                                setregmap(va, reginval);
1.43      pk       4603:                                        }
1.84      pk       4604:                                        region_free(pm, rp->rg_smeg);
                   4605:                                }
1.43      pk       4606: #endif
1.84      pk       4607:                        }
                   4608:                } else {
                   4609:                        if (CTX_USABLE(pm,rp))
                   4610:                                /* `pteva'; we might be using tregion */
                   4611:                                setsegmap(pteva, seginval);
1.69      pk       4612: #if defined(SUN4_MMU3L)
1.84      pk       4613:                        else if (HASSUN4_MMU3L &&
                   4614:                                 rp->rg_smeg != reginval) {
                   4615:                                /* note: context already set earlier */
                   4616:                                setregmap(0, rp->rg_smeg);
                   4617:                                setsegmap(vs << SGSHIFT, seginval);
                   4618:                        }
1.43      pk       4619: #endif
1.84      pk       4620:                        free(sp->sg_pte, M_VMPMAP);
                   4621:                        sp->sg_pte = NULL;
                   4622:                        me_free(pm, sp->sg_pmeg);
1.43      pk       4623:
1.84      pk       4624:                        if (--rp->rg_nsegmap == 0) {
1.69      pk       4625: #if defined(SUN4_MMU3L)
1.84      pk       4626:                                if (HASSUN4_MMU3L &&
                   4627:                                    rp->rg_smeg != reginval) {
                   4628:                                        if (pm->pm_ctx)
                   4629:                                                setregmap(va, reginval);
                   4630:                                        region_free(pm, rp->rg_smeg);
                   4631:                                }
1.43      pk       4632: #endif
1.84      pk       4633:                                free(rp->rg_segmap, M_VMPMAP);
                   4634:                                rp->rg_segmap = NULL;
                   4635:                                GAP_WIDEN(pm,vr);
1.1       deraadt  4636:                        }
                   4637:                }
1.84      pk       4638:
1.1       deraadt  4639:        nextpv:
                   4640:                npv = pv->pv_next;
                   4641:                if (pv != pv0)
1.122     pk       4642:                        pool_put(&pv_pool, pv);
1.1       deraadt  4643:                if ((pv = npv) == NULL)
                   4644:                        break;
                   4645:        }
                   4646:        pv0->pv_pmap = NULL;
1.11      pk       4647:        pv0->pv_next = NULL; /* ? */
1.1       deraadt  4648:        pv0->pv_flags = flags;
1.71      pk       4649:        setcontext4(ctx);
1.1       deraadt  4650:        splx(s);
                   4651: }
                   4652:
                   4653: /*
                   4654:  * Lower (make more strict) the protection on the specified
                   4655:  * range of this pmap.
                   4656:  *
                   4657:  * There are only two cases: either the protection is going to 0
                   4658:  * (in which case we call pmap_remove to do the dirty work), or
                   4659:  * it is going from read/write to read-only.  The latter is
                   4660:  * fairly easy.
                   4661:  */
                   4662: void
1.55      pk       4663: pmap_protect4_4c(pm, sva, eva, prot)
1.124   ! pk       4664:        struct pmap *pm;
        !          4665:        vaddr_t sva, eva;
1.1       deraadt  4666:        vm_prot_t prot;
                   4667: {
1.124   ! pk       4668:        int va, nva, vr, vs;
        !          4669:        int s, ctx;
1.43      pk       4670:        struct regmap *rp;
                   4671:        struct segmap *sp;
1.1       deraadt  4672:
                   4673:        if (pm == NULL || prot & VM_PROT_WRITE)
                   4674:                return;
1.43      pk       4675:
1.1       deraadt  4676:        if ((prot & VM_PROT_READ) == 0) {
                   4677:                pmap_remove(pm, sva, eva);
                   4678:                return;
                   4679:        }
                   4680:
                   4681:        write_user_windows();
1.71      pk       4682:        ctx = getcontext4();
1.1       deraadt  4683:        s = splpmap();
                   4684:        simple_lock(&pm->pm_lock);
                   4685:
                   4686:        for (va = sva; va < eva;) {
1.43      pk       4687:                vr = VA_VREG(va);
                   4688:                vs = VA_VSEG(va);
                   4689:                rp = &pm->pm_regmap[vr];
                   4690:                nva = VSTOVA(vr,vs + 1);
1.1       deraadt  4691: if (nva == 0) panic("pmap_protect: last segment");     /* cannot happen */
                   4692:                if (nva > eva)
                   4693:                        nva = eva;
1.43      pk       4694:                if (rp->rg_nsegmap == 0) {
1.1       deraadt  4695:                        va = nva;
                   4696:                        continue;
                   4697:                }
1.43      pk       4698: #ifdef DEBUG
                   4699:                if (rp->rg_segmap == NULL)
                   4700:                        panic("pmap_protect: no segments");
                   4701: #endif
                   4702:                sp = &rp->rg_segmap[vs];
                   4703:                if (sp->sg_npte == 0) {
                   4704:                        va = nva;
                   4705:                        continue;
                   4706:                }
                   4707: #ifdef DEBUG
                   4708:                if (pm != pmap_kernel() && sp->sg_pte == NULL)
                   4709:                        panic("pmap_protect: no pages");
                   4710: #endif
                   4711:                if (sp->sg_pmeg == seginval) {
1.124   ! pk       4712:                        int *pte = &sp->sg_pte[VA_VPG(va)];
1.1       deraadt  4713:
                   4714:                        /* not in MMU; just clear PG_W from core copies */
                   4715:                        for (; va < nva; va += NBPG)
                   4716:                                *pte++ &= ~PG_W;
                   4717:                } else {
                   4718:                        /* in MMU: take away write bits from MMU PTEs */
1.43      pk       4719:                        if (CTX_USABLE(pm,rp)) {
1.124   ! pk       4720:                                int tpte;
1.1       deraadt  4721:
                   4722:                                /*
                   4723:                                 * Flush cache so that any existing cache
                   4724:                                 * tags are updated.  This is really only
                   4725:                                 * needed for PTEs that lose PG_W.
                   4726:                                 */
1.71      pk       4727:                                setcontext4(pm->pm_ctxnum);
1.1       deraadt  4728:                                for (; va < nva; va += NBPG) {
1.55      pk       4729:                                        tpte = getpte4(va);
1.1       deraadt  4730:                                        pmap_stats.ps_npg_prot_all++;
1.35      pk       4731:                                        if ((tpte & (PG_W|PG_TYPE)) ==
                   4732:                                            (PG_W|PG_OBMEM)) {
1.1       deraadt  4733:                                                pmap_stats.ps_npg_prot_actual++;
1.69      pk       4734:                                                cache_flush_page(va);
1.55      pk       4735:                                                setpte4(va, tpte & ~PG_W);
1.1       deraadt  4736:                                        }
                   4737:                                }
                   4738:                        } else {
1.124   ! pk       4739:                                int pteva;
1.1       deraadt  4740:
                   4741:                                /*
                   4742:                                 * No context, hence not cached;
                   4743:                                 * just update PTEs.
                   4744:                                 */
1.71      pk       4745:                                setcontext4(0);
1.1       deraadt  4746:                                /* XXX use per-cpu pteva? */
1.69      pk       4747:                                if (HASSUN4_MMU3L)
1.43      pk       4748:                                        setregmap(0, tregion);
                   4749:                                setsegmap(0, sp->sg_pmeg);
1.18      deraadt  4750:                                pteva = VA_VPG(va) << PGSHIFT;
1.1       deraadt  4751:                                for (; va < nva; pteva += NBPG, va += NBPG)
1.55      pk       4752:                                        setpte4(pteva, getpte4(pteva) & ~PG_W);
1.1       deraadt  4753:                        }
                   4754:                }
                   4755:        }
                   4756:        simple_unlock(&pm->pm_lock);
1.12      pk       4757:        splx(s);
1.71      pk       4758:        setcontext4(ctx);
1.1       deraadt  4759: }
                   4760:
                   4761: /*
                   4762:  * Change the protection and/or wired status of the given (MI) virtual page.
                   4763:  * XXX: should have separate function (or flag) telling whether only wiring
                   4764:  * is changing.
                   4765:  */
                   4766: void
1.55      pk       4767: pmap_changeprot4_4c(pm, va, prot, wired)
1.124   ! pk       4768:        struct pmap *pm;
        !          4769:        vaddr_t va;
1.1       deraadt  4770:        vm_prot_t prot;
                   4771:        int wired;
                   4772: {
1.124   ! pk       4773:        int vr, vs, tpte, newprot, ctx, s;
1.43      pk       4774:        struct regmap *rp;
                   4775:        struct segmap *sp;
1.1       deraadt  4776:
                   4777: #ifdef DEBUG
                   4778:        if (pmapdebug & PDB_CHANGEPROT)
1.91      fair     4779:                printf("pmap_changeprot(%p, 0x%lx, 0x%x, 0x%x)\n",
1.1       deraadt  4780:                    pm, va, prot, wired);
                   4781: #endif
                   4782:
                   4783:        write_user_windows();   /* paranoia */
                   4784:
1.64      pk       4785:        va &= ~(NBPG-1);
1.42      mycroft  4786:        if (pm == pmap_kernel())
1.1       deraadt  4787:                newprot = prot & VM_PROT_WRITE ? PG_S|PG_W : PG_S;
                   4788:        else
                   4789:                newprot = prot & VM_PROT_WRITE ? PG_W : 0;
1.43      pk       4790:        vr = VA_VREG(va);
                   4791:        vs = VA_VSEG(va);
1.1       deraadt  4792:        s = splpmap();          /* conservative */
1.43      pk       4793:        rp = &pm->pm_regmap[vr];
                   4794:        if (rp->rg_nsegmap == 0) {
1.66      christos 4795:                printf("pmap_changeprot: no segments in %d\n", vr);
1.43      pk       4796:                return;
                   4797:        }
                   4798:        if (rp->rg_segmap == NULL) {
1.66      christos 4799:                printf("pmap_changeprot: no segments in %d!\n", vr);
1.43      pk       4800:                return;
                   4801:        }
                   4802:        sp = &rp->rg_segmap[vs];
                   4803:
1.1       deraadt  4804:        pmap_stats.ps_changeprots++;
                   4805:
1.43      pk       4806: #ifdef DEBUG
                   4807:        if (pm != pmap_kernel() && sp->sg_pte == NULL)
                   4808:                panic("pmap_changeprot: no pages");
                   4809: #endif
                   4810:
1.1       deraadt  4811:        /* update PTEs in software or hardware */
1.43      pk       4812:        if (sp->sg_pmeg == seginval) {
1.124   ! pk       4813:                int *pte = &sp->sg_pte[VA_VPG(va)];
1.1       deraadt  4814:
                   4815:                /* update in software */
                   4816:                if ((*pte & PG_PROT) == newprot)
                   4817:                        goto useless;
                   4818:                *pte = (*pte & ~PG_PROT) | newprot;
                   4819:        } else {
                   4820:                /* update in hardware */
1.71      pk       4821:                ctx = getcontext4();
1.43      pk       4822:                if (CTX_USABLE(pm,rp)) {
1.88      pk       4823:                        /*
                   4824:                         * Use current context.
                   4825:                         * Flush cache if page has been referenced to
                   4826:                         * avoid stale protection bits in the cache tags.
                   4827:                         */
1.71      pk       4828:                        setcontext4(pm->pm_ctxnum);
1.55      pk       4829:                        tpte = getpte4(va);
1.11      pk       4830:                        if ((tpte & PG_PROT) == newprot) {
1.71      pk       4831:                                setcontext4(ctx);
1.1       deraadt  4832:                                goto useless;
1.11      pk       4833:                        }
1.88      pk       4834:                        if ((tpte & (PG_U|PG_NC|PG_TYPE)) == (PG_U|PG_OBMEM))
1.1       deraadt  4835:                                cache_flush_page((int)va);
                   4836:                } else {
1.71      pk       4837:                        setcontext4(0);
1.1       deraadt  4838:                        /* XXX use per-cpu va? */
1.69      pk       4839:                        if (HASSUN4_MMU3L)
1.43      pk       4840:                                setregmap(0, tregion);
                   4841:                        setsegmap(0, sp->sg_pmeg);
1.18      deraadt  4842:                        va = VA_VPG(va) << PGSHIFT;
1.55      pk       4843:                        tpte = getpte4(va);
1.11      pk       4844:                        if ((tpte & PG_PROT) == newprot) {
1.71      pk       4845:                                setcontext4(ctx);
1.1       deraadt  4846:                                goto useless;
1.11      pk       4847:                        }
1.1       deraadt  4848:                }
                   4849:                tpte = (tpte & ~PG_PROT) | newprot;
1.55      pk       4850:                setpte4(va, tpte);
1.71      pk       4851:                setcontext4(ctx);
1.1       deraadt  4852:        }
                   4853:        splx(s);
                   4854:        return;
                   4855:
                   4856: useless:
                   4857:        /* only wiring changed, and we ignore wiring */
                   4858:        pmap_stats.ps_useless_changeprots++;
                   4859:        splx(s);
                   4860: }
                   4861:
1.55      pk       4862: #endif /* sun4, 4c */
                   4863:
                   4864: #if defined(SUN4M)             /* 4M version of protection routines above */
1.1       deraadt  4865: /*
1.55      pk       4866:  * Lower (make more strict) the protection on the specified
                   4867:  * physical page.
1.1       deraadt  4868:  *
1.55      pk       4869:  * There are only two cases: either the protection is going to 0
                   4870:  * (in which case we do the dirty work here), or it is going
                   4871:  * to read-only (in which case pv_changepte does the trick).
1.1       deraadt  4872:  */
                   4873: void
1.55      pk       4874: pmap_page_protect4m(pa, prot)
1.124   ! pk       4875:        paddr_t pa;
1.1       deraadt  4876:        vm_prot_t prot;
                   4877: {
1.124   ! pk       4878:        struct pvlist *pv, *pv0, *npv;
        !          4879:        struct pmap *pm;
        !          4880:        int va, vr, vs, tpte;
        !          4881:        int flags, nleft, s, ctx;
1.55      pk       4882:        struct regmap *rp;
                   4883:        struct segmap *sp;
1.45      pk       4884:
                   4885: #ifdef DEBUG
1.55      pk       4886:        if ((pmapdebug & PDB_CHANGEPROT) ||
                   4887:            (pmapdebug & PDB_REMOVE && prot == VM_PROT_NONE))
1.91      fair     4888:                printf("pmap_page_protect(0x%lx, 0x%x)\n", pa, prot);
1.45      pk       4889: #endif
1.55      pk       4890:        /*
                   4891:         * Skip unmanaged pages, or operations that do not take
                   4892:         * away write permission.
                   4893:         */
                   4894:        if (!managed(pa) || prot & VM_PROT_WRITE)
                   4895:                return;
                   4896:        write_user_windows();   /* paranoia */
                   4897:        if (prot & VM_PROT_READ) {
                   4898:                pv_changepte4m(pvhead(pa), 0, PPROT_WRITE);
1.45      pk       4899:                return;
                   4900:        }
1.39      pk       4901:
1.1       deraadt  4902:        /*
1.55      pk       4903:         * Remove all access to all people talking to this page.
                   4904:         * Walk down PV list, removing all mappings.
                   4905:         * The logic is much like that for pmap_remove,
                   4906:         * but we know we are removing exactly one page.
1.1       deraadt  4907:         */
1.55      pk       4908:        pv = pvhead(pa);
                   4909:        s = splpmap();
                   4910:        if ((pm = pv->pv_pmap) == NULL) {
                   4911:                splx(s);
                   4912:                return;
1.1       deraadt  4913:        }
1.71      pk       4914:        ctx = getcontext4m();
1.55      pk       4915:        pv0 = pv;
                   4916:        flags = pv->pv_flags /*| PV_C4M*/;      /* %%%: ???? */
                   4917:        for (;; pm = pv->pv_pmap) {
                   4918:                va = pv->pv_va;
                   4919:                vr = VA_VREG(va);
                   4920:                vs = VA_VSEG(va);
                   4921:                rp = &pm->pm_regmap[vr];
                   4922:                if (rp->rg_nsegmap == 0)
                   4923:                        panic("pmap_remove_all: empty vreg");
                   4924:                sp = &rp->rg_segmap[vs];
                   4925:                if ((nleft = sp->sg_npte) == 0)
                   4926:                        panic("pmap_remove_all: empty vseg");
                   4927:                nleft--;
                   4928:                sp->sg_npte = nleft;
1.1       deraadt  4929:
1.55      pk       4930:                /* Invalidate PTE in MMU pagetables. Flush cache if necessary */
1.72      pk       4931:                if (pm->pm_ctx) {
1.71      pk       4932:                        setcontext4m(pm->pm_ctxnum);
1.69      pk       4933:                        cache_flush_page(va);
1.55      pk       4934:                        tlb_flush_page(va);
1.72      pk       4935:                }
                   4936:
                   4937:                tpte = sp->sg_pte[VA_SUN4M_VPG(va)];
1.1       deraadt  4938:
1.55      pk       4939:                if ((tpte & SRMMU_TETYPE) != SRMMU_TEPTE)
                   4940:                        panic("pmap_page_protect !PG_V");
1.72      pk       4941:
1.55      pk       4942:                flags |= MR4M(tpte);
1.43      pk       4943:
1.83      pk       4944:                if (nleft) {
                   4945:                        setpgt4m(&sp->sg_pte[VA_SUN4M_VPG(va)], SRMMU_TEINVALID);
                   4946:                        goto nextpv;
                   4947:                }
                   4948:
                   4949:                /* Entire segment is gone */
                   4950:                if (pm == pmap_kernel()) {
                   4951:                        tlb_flush_segment(vr, vs); /* Paranoid? */
1.72      pk       4952:                        setpgt4m(&sp->sg_pte[VA_SUN4M_VPG(va)], SRMMU_TEINVALID);
1.83      pk       4953:                        if (va < virtual_avail) {
1.55      pk       4954: #ifdef DEBUG
1.83      pk       4955:                                printf(
                   4956:                                 "pmap_page_protect: attempt to free"
                   4957:                                 " base kernel allocation\n");
1.55      pk       4958: #endif
1.83      pk       4959:                                goto nextpv;
                   4960:                        }
1.72      pk       4961: #if 0 /* no need for this */
1.83      pk       4962:                        /* no need to free the table; it is static */
                   4963:                        qzero(sp->sg_pte, SRMMU_L3SIZE * sizeof(int));
1.72      pk       4964: #endif
1.43      pk       4965:
1.83      pk       4966:                        /* if we're done with a region, leave it */
1.55      pk       4967:
1.83      pk       4968:                } else {        /* User mode mapping */
                   4969:                        if (pm->pm_ctx)
                   4970:                                tlb_flush_segment(vr, vs);
                   4971:                        setpgt4m(&rp->rg_seg_ptps[vs], SRMMU_TEINVALID);
1.121     pk       4972:                        pool_put(&L23_pool, sp->sg_pte);
1.83      pk       4973:                        sp->sg_pte = NULL;
1.55      pk       4974:
1.83      pk       4975:                        if (--rp->rg_nsegmap == 0) {
1.88      pk       4976:                                if (pm->pm_ctx)
                   4977:                                        tlb_flush_context();
                   4978:                                setpgt4m(&pm->pm_reg_ptps[vr], SRMMU_TEINVALID);
1.83      pk       4979:                                free(rp->rg_segmap, M_VMPMAP);
                   4980:                                rp->rg_segmap = NULL;
1.121     pk       4981:                                pool_put(&L23_pool, rp->rg_seg_ptps);
1.55      pk       4982:                        }
                   4983:                }
1.83      pk       4984:
1.55      pk       4985:        nextpv:
                   4986:                npv = pv->pv_next;
                   4987:                if (pv != pv0)
1.122     pk       4988:                        pool_put(&pv_pool, pv);
1.55      pk       4989:                if ((pv = npv) == NULL)
                   4990:                        break;
                   4991:        }
                   4992:        pv0->pv_pmap = NULL;
                   4993:        pv0->pv_next = NULL; /* ? */
                   4994:        pv0->pv_flags = flags;
1.71      pk       4995:        setcontext4m(ctx);
1.55      pk       4996:        splx(s);
                   4997: }
                   4998:
                   4999: /*
                   5000:  * Lower (make more strict) the protection on the specified
                   5001:  * range of this pmap.
                   5002:  *
                   5003:  * There are only two cases: either the protection is going to 0
                   5004:  * (in which case we call pmap_remove to do the dirty work), or
                   5005:  * it is going from read/write to read-only.  The latter is
                   5006:  * fairly easy.
                   5007:  */
                   5008: void
                   5009: pmap_protect4m(pm, sva, eva, prot)
1.124   ! pk       5010:        struct pmap *pm;
        !          5011:        vaddr_t sva, eva;
1.55      pk       5012:        vm_prot_t prot;
                   5013: {
1.124   ! pk       5014:        int va, nva, vr, vs;
        !          5015:        int s, ctx;
1.55      pk       5016:        struct regmap *rp;
                   5017:        struct segmap *sp;
                   5018:
                   5019:        if (pm == NULL || prot & VM_PROT_WRITE)
                   5020:                return;
                   5021:
                   5022:        if ((prot & VM_PROT_READ) == 0) {
                   5023:                pmap_remove(pm, sva, eva);
                   5024:                return;
                   5025:        }
                   5026:
                   5027:        write_user_windows();
1.71      pk       5028:        ctx = getcontext4m();
1.55      pk       5029:        s = splpmap();
                   5030:        simple_lock(&pm->pm_lock);
                   5031:
                   5032:        for (va = sva; va < eva;) {
                   5033:                vr = VA_VREG(va);
                   5034:                vs = VA_VSEG(va);
                   5035:                rp = &pm->pm_regmap[vr];
                   5036:                nva = VSTOVA(vr,vs + 1);
                   5037:                if (nva == 0)   /* XXX */
                   5038:                        panic("pmap_protect: last segment"); /* cannot happen(why?)*/
                   5039:                if (nva > eva)
                   5040:                        nva = eva;
                   5041:                if (rp->rg_nsegmap == 0) {
                   5042:                        va = nva;
                   5043:                        continue;
                   5044:                }
                   5045: #ifdef DEBUG
                   5046:                if (rp->rg_segmap == NULL)
                   5047:                        panic("pmap_protect: no segments");
                   5048: #endif
                   5049:                sp = &rp->rg_segmap[vs];
                   5050:                if (sp->sg_npte == 0) {
                   5051:                        va = nva;
                   5052:                        continue;
                   5053:                }
                   5054: #ifdef DEBUG
                   5055:                if (sp->sg_pte == NULL)
                   5056:                        panic("pmap_protect: no pages");
                   5057: #endif
1.72      pk       5058:                /* pages loaded: take away write bits from MMU PTEs */
                   5059:                if (pm->pm_ctx)
                   5060:                        setcontext4m(pm->pm_ctxnum);
                   5061:
                   5062:                pmap_stats.ps_npg_prot_all = (nva - va) >> PGSHIFT;
                   5063:                for (; va < nva; va += NBPG) {
                   5064:                        int tpte;
1.100     pk       5065:
                   5066:                        if (pm->pm_ctx) {
                   5067:                                /* Flush TLB entry */
                   5068:                                tlb_flush_page(va);
                   5069:                        }
                   5070:
1.72      pk       5071:                        tpte = sp->sg_pte[VA_SUN4M_VPG(va)];
1.55      pk       5072:                        /*
                   5073:                         * Flush cache so that any existing cache
                   5074:                         * tags are updated.  This is really only
                   5075:                         * needed for PTEs that lose PG_W.
                   5076:                         */
1.72      pk       5077:                        if ((tpte & (PPROT_WRITE|SRMMU_PGTYPE)) ==
                   5078:                            (PPROT_WRITE|PG_SUN4M_OBMEM)) {
                   5079:                                pmap_stats.ps_npg_prot_actual++;
                   5080:                                if (pm->pm_ctx) {
1.69      pk       5081:                                        cache_flush_page(va);
1.55      pk       5082:                                }
1.72      pk       5083:                                setpgt4m(&sp->sg_pte[VA_SUN4M_VPG(va)],
                   5084:                                         tpte & ~PPROT_WRITE);
1.55      pk       5085:                        }
                   5086:                }
                   5087:        }
                   5088:        simple_unlock(&pm->pm_lock);
                   5089:        splx(s);
1.71      pk       5090:        setcontext4m(ctx);
1.55      pk       5091: }
                   5092:
                   5093: /*
                   5094:  * Change the protection and/or wired status of the given (MI) virtual page.
                   5095:  * XXX: should have separate function (or flag) telling whether only wiring
                   5096:  * is changing.
                   5097:  */
                   5098: void
                   5099: pmap_changeprot4m(pm, va, prot, wired)
1.124   ! pk       5100:        struct pmap *pm;
        !          5101:        vaddr_t va;
1.55      pk       5102:        vm_prot_t prot;
                   5103:        int wired;
                   5104: {
1.124   ! pk       5105:        int pte, newprot, ctx, s;
1.100     pk       5106:        struct regmap *rp;
                   5107:        struct segmap *sp;
1.55      pk       5108:
                   5109: #ifdef DEBUG
                   5110:        if (pmapdebug & PDB_CHANGEPROT)
1.91      fair     5111:                printf("pmap_changeprot(%p, 0x%lx, 0x%x, 0x%x)\n",
1.55      pk       5112:                    pm, va, prot, wired);
                   5113: #endif
                   5114:
                   5115:        write_user_windows();   /* paranoia */
                   5116:
1.64      pk       5117:        va &= ~(NBPG-1);
1.55      pk       5118:        if (pm == pmap_kernel())
                   5119:                newprot = prot & VM_PROT_WRITE ? PPROT_N_RWX : PPROT_N_RX;
                   5120:        else
                   5121:                newprot = prot & VM_PROT_WRITE ? PPROT_RWX_RWX : PPROT_RX_RX;
                   5122:
                   5123:        pmap_stats.ps_changeprots++;
                   5124:
                   5125:        s = splpmap();          /* conservative */
1.100     pk       5126:
                   5127:        rp = &pm->pm_regmap[VA_VREG(va)];
                   5128:        sp = &rp->rg_segmap[VA_VSEG(va)];
                   5129:
1.71      pk       5130:        ctx = getcontext4m();
1.55      pk       5131:        if (pm->pm_ctx) {
1.100     pk       5132:                /* Flush TLB entry */
                   5133:                setcontext4m(pm->pm_ctxnum);
                   5134:                tlb_flush_page(va);
                   5135:        }
                   5136:        pte = sp->sg_pte[VA_SUN4M_VPG(va)];
                   5137:
                   5138:        if ((pte & SRMMU_PROT_MASK) == newprot) {
                   5139:                /* only wiring changed, and we ignore wiring */
                   5140:                pmap_stats.ps_useless_changeprots++;
                   5141:                goto out;
                   5142:        }
                   5143:
                   5144:        if (pm->pm_ctx) {
1.88      pk       5145:                /*
                   5146:                 * Use current context.
                   5147:                 * Flush cache if page has been referenced to
                   5148:                 * avoid stale protection bits in the cache tags.
                   5149:                 */
1.100     pk       5150:                if ((pte & (SRMMU_PG_C|SRMMU_PGTYPE)) ==
1.88      pk       5151:                    (SRMMU_PG_C|PG_SUN4M_OBMEM))
                   5152:                        cache_flush_page(va);
1.55      pk       5153:        }
1.100     pk       5154:
                   5155:        setpgt4m(&sp->sg_pte[VA_SUN4M_VPG(va)],
                   5156:                 (pte & ~SRMMU_PROT_MASK) | newprot);
1.72      pk       5157: out:
1.71      pk       5158:        setcontext4m(ctx);
1.55      pk       5159:        splx(s);
                   5160: }
                   5161: #endif /* 4m */
                   5162:
                   5163: /*
                   5164:  * Insert (MI) physical page pa at virtual address va in the given pmap.
                   5165:  * NB: the pa parameter includes type bits PMAP_OBIO, PMAP_NC as necessary.
                   5166:  *
                   5167:  * If pa is not in the `managed' range it will not be `bank mapped'.
                   5168:  * This works during bootstrap only because the first 4MB happens to
                   5169:  * map one-to-one.
                   5170:  *
                   5171:  * There may already be something else there, or we might just be
                   5172:  * changing protections and/or wiring on an existing mapping.
                   5173:  *     XXX     should have different entry points for changing!
                   5174:  */
                   5175:
                   5176: #if defined(SUN4) || defined(SUN4C)
                   5177:
                   5178: void
                   5179: pmap_enter4_4c(pm, va, pa, prot, wired)
1.124   ! pk       5180:        struct pmap *pm;
        !          5181:        vaddr_t va;
        !          5182:        paddr_t pa;
1.55      pk       5183:        vm_prot_t prot;
                   5184:        int wired;
                   5185: {
1.124   ! pk       5186:        struct pvlist *pv;
        !          5187:        int pteproto, ctx;
1.55      pk       5188:
                   5189:        if (pm == NULL)
                   5190:                return;
                   5191:
                   5192:        if (VA_INHOLE(va)) {
                   5193: #ifdef DEBUG
1.91      fair     5194:                printf("pmap_enter: pm %p, va 0x%lx, pa 0x%lx: in MMU hole\n",
1.55      pk       5195:                        pm, va, pa);
                   5196: #endif
                   5197:                return;
                   5198:        }
                   5199:
                   5200: #ifdef DEBUG
                   5201:        if (pmapdebug & PDB_ENTER)
1.91      fair     5202:                printf("pmap_enter(%p, 0x%lx, 0x%lx, 0x%x, 0x%x)\n",
1.55      pk       5203:                    pm, va, pa, prot, wired);
                   5204: #endif
                   5205:
1.82      pk       5206:        pteproto = PG_V | PMAP_T2PTE_4(pa);
                   5207:        pa &= ~PMAP_TNC_4;
1.55      pk       5208:        /*
                   5209:         * Set up prototype for new PTE.  Cannot set PG_NC from PV_NC yet
                   5210:         * since the pvlist no-cache bit might change as a result of the
                   5211:         * new mapping.
                   5212:         */
                   5213:        if ((pteproto & PG_TYPE) == PG_OBMEM && managed(pa)) {
                   5214: #ifdef DIAGNOSTIC
                   5215:                if (!pmap_pa_exists(pa))
1.91      fair     5216:                        panic("pmap_enter: no such address: 0x%lx", pa);
1.55      pk       5217: #endif
                   5218:                pv = pvhead(pa);
                   5219:        } else {
                   5220:                pv = NULL;
                   5221:        }
1.60      pk       5222:        pteproto |= atop(pa) & PG_PFNUM;
1.55      pk       5223:        if (prot & VM_PROT_WRITE)
                   5224:                pteproto |= PG_W;
                   5225:
1.71      pk       5226:        ctx = getcontext4();
1.55      pk       5227:        if (pm == pmap_kernel())
                   5228:                pmap_enk4_4c(pm, va, prot, wired, pv, pteproto | PG_S);
                   5229:        else
                   5230:                pmap_enu4_4c(pm, va, prot, wired, pv, pteproto);
1.71      pk       5231:        setcontext4(ctx);
1.55      pk       5232: }
                   5233:
                   5234: /* enter new (or change existing) kernel mapping */
                   5235: void
                   5236: pmap_enk4_4c(pm, va, prot, wired, pv, pteproto)
1.124   ! pk       5237:        struct pmap *pm;
        !          5238:        vaddr_t va;
1.55      pk       5239:        vm_prot_t prot;
                   5240:        int wired;
1.124   ! pk       5241:        struct pvlist *pv;
        !          5242:        int pteproto;
1.55      pk       5243: {
1.124   ! pk       5244:        int vr, vs, tpte, i, s;
1.55      pk       5245:        struct regmap *rp;
                   5246:        struct segmap *sp;
                   5247:
                   5248:        vr = VA_VREG(va);
                   5249:        vs = VA_VSEG(va);
                   5250:        rp = &pm->pm_regmap[vr];
                   5251:        sp = &rp->rg_segmap[vs];
                   5252:        s = splpmap();          /* XXX way too conservative */
                   5253:
1.69      pk       5254: #if defined(SUN4_MMU3L)
                   5255:        if (HASSUN4_MMU3L && rp->rg_smeg == reginval) {
1.124   ! pk       5256:                vaddr_t tva;
1.55      pk       5257:                rp->rg_smeg = region_alloc(&region_locked, pm, vr)->me_cookie;
                   5258:                i = ncontext - 1;
                   5259:                do {
1.71      pk       5260:                        setcontext4(i);
1.55      pk       5261:                        setregmap(va, rp->rg_smeg);
                   5262:                } while (--i >= 0);
1.1       deraadt  5263:
1.43      pk       5264:                /* set all PTEs to invalid, then overwrite one PTE below */
                   5265:                tva = VA_ROUNDDOWNTOREG(va);
                   5266:                for (i = 0; i < NSEGRG; i++) {
                   5267:                        setsegmap(tva, rp->rg_segmap[i].sg_pmeg);
                   5268:                        tva += NBPSG;
                   5269:                };
                   5270:        }
                   5271: #endif
1.55      pk       5272:        if (sp->sg_pmeg != seginval && (tpte = getpte4(va)) & PG_V) {
1.124   ! pk       5273:                int addr;
1.1       deraadt  5274:
1.34      pk       5275:                /* old mapping exists, and is of the same pa type */
                   5276:                if ((tpte & (PG_PFNUM|PG_TYPE)) ==
                   5277:                    (pteproto & (PG_PFNUM|PG_TYPE))) {
1.1       deraadt  5278:                        /* just changing protection and/or wiring */
                   5279:                        splx(s);
1.81      pk       5280:                        pmap_changeprot4_4c(pm, va, prot, wired);
1.1       deraadt  5281:                        return;
                   5282:                }
                   5283:
1.34      pk       5284:                if ((tpte & PG_TYPE) == PG_OBMEM) {
1.43      pk       5285: #ifdef DEBUG
1.91      fair     5286: printf("pmap_enk: changing existing va=>pa entry: va 0x%lx, pteproto 0x%x\n",
1.43      pk       5287:        va, pteproto);
                   5288: #endif
1.34      pk       5289:                        /*
                   5290:                         * Switcheroo: changing pa for this va.
                   5291:                         * If old pa was managed, remove from pvlist.
                   5292:                         * If old page was cached, flush cache.
                   5293:                         */
1.60      pk       5294:                        addr = ptoa(tpte & PG_PFNUM);
1.31      pk       5295:                        if (managed(addr))
1.58      pk       5296:                                pv_unlink4_4c(pvhead(addr), pm, va);
1.34      pk       5297:                        if ((tpte & PG_NC) == 0) {
1.71      pk       5298:                                setcontext4(0); /* ??? */
1.69      pk       5299:                                cache_flush_page((int)va);
1.34      pk       5300:                        }
1.1       deraadt  5301:                }
                   5302:        } else {
                   5303:                /* adding new entry */
1.43      pk       5304:                sp->sg_npte++;
1.1       deraadt  5305:        }
                   5306:
                   5307:        /*
                   5308:         * If the new mapping is for a managed PA, enter into pvlist.
                   5309:         * Note that the mapping for a malloc page will always be
                   5310:         * unique (hence will never cause a second call to malloc).
                   5311:         */
                   5312:        if (pv != NULL)
1.115     pk       5313:                pteproto |= pv_link4_4c(pv, pm, va, pteproto & PG_NC);
1.1       deraadt  5314:
1.43      pk       5315:        if (sp->sg_pmeg == seginval) {
1.124   ! pk       5316:                int tva;
1.1       deraadt  5317:
                   5318:                /*
                   5319:                 * Allocate an MMU entry now (on locked list),
                   5320:                 * and map it into every context.  Set all its
                   5321:                 * PTEs invalid (we will then overwrite one, but
                   5322:                 * this is more efficient than looping twice).
                   5323:                 */
                   5324: #ifdef DEBUG
                   5325:                if (pm->pm_ctx == NULL || pm->pm_ctxnum != 0)
                   5326:                        panic("pmap_enk: kern seg but no kern ctx");
                   5327: #endif
1.43      pk       5328:                sp->sg_pmeg = me_alloc(&segm_locked, pm, vr, vs)->me_cookie;
                   5329:                rp->rg_nsegmap++;
                   5330:
1.69      pk       5331: #if defined(SUN4_MMU3L)
                   5332:                if (HASSUN4_MMU3L)
1.43      pk       5333:                        setsegmap(va, sp->sg_pmeg);
                   5334:                else
                   5335: #endif
                   5336:                {
                   5337:                        i = ncontext - 1;
                   5338:                        do {
1.71      pk       5339:                                setcontext4(i);
1.43      pk       5340:                                setsegmap(va, sp->sg_pmeg);
                   5341:                        } while (--i >= 0);
                   5342:                }
1.1       deraadt  5343:
                   5344:                /* set all PTEs to invalid, then overwrite one PTE below */
                   5345:                tva = VA_ROUNDDOWNTOSEG(va);
                   5346:                i = NPTESG;
                   5347:                do {
1.55      pk       5348:                        setpte4(tva, 0);
1.1       deraadt  5349:                        tva += NBPG;
                   5350:                } while (--i > 0);
                   5351:        }
                   5352:
                   5353:        /* ptes kept in hardware only */
1.55      pk       5354:        setpte4(va, pteproto);
1.1       deraadt  5355:        splx(s);
                   5356: }
                   5357:
                   5358: /* enter new (or change existing) user mapping */
1.53      christos 5359: void
1.55      pk       5360: pmap_enu4_4c(pm, va, prot, wired, pv, pteproto)
1.124   ! pk       5361:        struct pmap *pm;
        !          5362:        vaddr_t va;
1.1       deraadt  5363:        vm_prot_t prot;
                   5364:        int wired;
1.124   ! pk       5365:        struct pvlist *pv;
        !          5366:        int pteproto;
1.1       deraadt  5367: {
1.124   ! pk       5368:        int vr, vs, *pte, tpte, pmeg, s, doflush;
1.43      pk       5369:        struct regmap *rp;
                   5370:        struct segmap *sp;
1.1       deraadt  5371:
                   5372:        write_user_windows();           /* XXX conservative */
1.43      pk       5373:        vr = VA_VREG(va);
                   5374:        vs = VA_VSEG(va);
                   5375:        rp = &pm->pm_regmap[vr];
1.1       deraadt  5376:        s = splpmap();                  /* XXX conservative */
                   5377:
                   5378:        /*
                   5379:         * If there is no space in which the PTEs can be written
                   5380:         * while they are not in the hardware, this must be a new
                   5381:         * virtual segment.  Get PTE space and count the segment.
                   5382:         *
                   5383:         * TO SPEED UP CTX ALLOC, PUT SEGMENT BOUNDS STUFF HERE
                   5384:         * AND IN pmap_rmu()
                   5385:         */
1.13      pk       5386:
1.43      pk       5387:        GAP_SHRINK(pm,vr);
1.13      pk       5388:
                   5389: #ifdef DEBUG
                   5390:        if (pm->pm_gap_end < pm->pm_gap_start) {
1.91      fair     5391:                printf("pmap_enu: gap_start 0x%x, gap_end 0x%x",
1.13      pk       5392:                        pm->pm_gap_start, pm->pm_gap_end);
                   5393:                panic("pmap_enu: gap botch");
                   5394:        }
                   5395: #endif
                   5396:
1.43      pk       5397: rretry:
                   5398:        if (rp->rg_segmap == NULL) {
                   5399:                /* definitely a new mapping */
1.124   ! pk       5400:                int i;
        !          5401:                int size = NSEGRG * sizeof (struct segmap);
1.43      pk       5402:
                   5403:                sp = (struct segmap *)malloc((u_long)size, M_VMPMAP, M_WAITOK);
                   5404:                if (rp->rg_segmap != NULL) {
1.66      christos 5405: printf("pmap_enter: segment filled during sleep\n");   /* can this happen? */
1.49      pk       5406:                        free(sp, M_VMPMAP);
1.43      pk       5407:                        goto rretry;
                   5408:                }
1.55      pk       5409:                qzero((caddr_t)sp, size);
1.43      pk       5410:                rp->rg_segmap = sp;
                   5411:                rp->rg_nsegmap = 0;
                   5412:                for (i = NSEGRG; --i >= 0;)
                   5413:                        sp++->sg_pmeg = seginval;
                   5414:        }
                   5415:
                   5416:        sp = &rp->rg_segmap[vs];
                   5417:
                   5418: sretry:
                   5419:        if ((pte = sp->sg_pte) == NULL) {
1.1       deraadt  5420:                /* definitely a new mapping */
1.124   ! pk       5421:                int size = NPTESG * sizeof *pte;
1.1       deraadt  5422:
                   5423:                pte = (int *)malloc((u_long)size, M_VMPMAP, M_WAITOK);
1.43      pk       5424:                if (sp->sg_pte != NULL) {
1.66      christos 5425: printf("pmap_enter: pte filled during sleep\n");       /* can this happen? */
1.49      pk       5426:                        free(pte, M_VMPMAP);
1.43      pk       5427:                        goto sretry;
1.1       deraadt  5428:                }
                   5429: #ifdef DEBUG
1.43      pk       5430:                if (sp->sg_pmeg != seginval)
1.1       deraadt  5431:                        panic("pmap_enter: new ptes, but not seginval");
                   5432: #endif
1.55      pk       5433:                qzero((caddr_t)pte, size);
1.43      pk       5434:                sp->sg_pte = pte;
                   5435:                sp->sg_npte = 1;
                   5436:                rp->rg_nsegmap++;
1.1       deraadt  5437:        } else {
                   5438:                /* might be a change: fetch old pte */
                   5439:                doflush = 0;
1.55      pk       5440:                if ((pmeg = sp->sg_pmeg) == seginval) {
                   5441:                        /* software pte */
                   5442:                        tpte = pte[VA_VPG(va)];
                   5443:                } else {
                   5444:                        /* hardware pte */
                   5445:                        if (CTX_USABLE(pm,rp)) {
1.71      pk       5446:                                setcontext4(pm->pm_ctxnum);
1.55      pk       5447:                                tpte = getpte4(va);
1.69      pk       5448:                                doflush = CACHEINFO.c_vactype != VAC_NONE;
1.55      pk       5449:                        } else {
1.71      pk       5450:                                setcontext4(0);
1.55      pk       5451:                                /* XXX use per-cpu pteva? */
1.69      pk       5452:                                if (HASSUN4_MMU3L)
1.55      pk       5453:                                        setregmap(0, tregion);
                   5454:                                setsegmap(0, pmeg);
                   5455:                                tpte = getpte4(VA_VPG(va) << PGSHIFT);
                   5456:                        }
                   5457:                }
                   5458:                if (tpte & PG_V) {
1.124   ! pk       5459:                        int addr;
1.55      pk       5460:
                   5461:                        /* old mapping exists, and is of the same pa type */
                   5462:                        if ((tpte & (PG_PFNUM|PG_TYPE)) ==
                   5463:                            (pteproto & (PG_PFNUM|PG_TYPE))) {
                   5464:                                /* just changing prot and/or wiring */
                   5465:                                splx(s);
                   5466:                                /* caller should call this directly: */
1.60      pk       5467:                                pmap_changeprot4_4c(pm, va, prot, wired);
1.55      pk       5468:                                if (wired)
                   5469:                                        pm->pm_stats.wired_count++;
                   5470:                                else
                   5471:                                        pm->pm_stats.wired_count--;
                   5472:                                return;
                   5473:                        }
                   5474:                        /*
                   5475:                         * Switcheroo: changing pa for this va.
                   5476:                         * If old pa was managed, remove from pvlist.
                   5477:                         * If old page was cached, flush cache.
                   5478:                         */
1.65      christos 5479: #if 0
1.91      fair     5480: printf("%s[%d]: pmap_enu: changing existing va(0x%x)=>pa entry\n",
1.65      christos 5481:        curproc->p_comm, curproc->p_pid, va);
                   5482: #endif
1.55      pk       5483:                        if ((tpte & PG_TYPE) == PG_OBMEM) {
1.60      pk       5484:                                addr = ptoa(tpte & PG_PFNUM);
1.55      pk       5485:                                if (managed(addr))
1.58      pk       5486:                                        pv_unlink4_4c(pvhead(addr), pm, va);
1.69      pk       5487:                                if (doflush && (tpte & PG_NC) == 0)
1.55      pk       5488:                                        cache_flush_page((int)va);
                   5489:                        }
                   5490:                } else {
                   5491:                        /* adding new entry */
                   5492:                        sp->sg_npte++;
                   5493:
                   5494:                        /*
                   5495:                         * Increment counters
                   5496:                         */
                   5497:                        if (wired)
                   5498:                                pm->pm_stats.wired_count++;
                   5499:                }
                   5500:        }
                   5501:
                   5502:        if (pv != NULL)
1.115     pk       5503:                pteproto |= pv_link4_4c(pv, pm, va, pteproto & PG_NC);
1.55      pk       5504:
                   5505:        /*
                   5506:         * Update hardware & software PTEs.
                   5507:         */
                   5508:        if ((pmeg = sp->sg_pmeg) != seginval) {
1.81      pk       5509:                /* ptes are in hardware */
1.55      pk       5510:                if (CTX_USABLE(pm,rp))
1.71      pk       5511:                        setcontext4(pm->pm_ctxnum);
1.55      pk       5512:                else {
1.71      pk       5513:                        setcontext4(0);
1.55      pk       5514:                        /* XXX use per-cpu pteva? */
1.69      pk       5515:                        if (HASSUN4_MMU3L)
1.55      pk       5516:                                setregmap(0, tregion);
                   5517:                        setsegmap(0, pmeg);
                   5518:                        va = VA_VPG(va) << PGSHIFT;
                   5519:                }
                   5520:                setpte4(va, pteproto);
                   5521:        }
                   5522:        /* update software copy */
                   5523:        pte += VA_VPG(va);
                   5524:        *pte = pteproto;
                   5525:
                   5526:        splx(s);
                   5527: }
                   5528:
                   5529: #endif /*sun4,4c*/
                   5530:
                   5531: #if defined(SUN4M)             /* Sun4M versions of enter routines */
                   5532: /*
                   5533:  * Insert (MI) physical page pa at virtual address va in the given pmap.
                   5534:  * NB: the pa parameter includes type bits PMAP_OBIO, PMAP_NC as necessary.
                   5535:  *
                   5536:  * If pa is not in the `managed' range it will not be `bank mapped'.
                   5537:  * This works during bootstrap only because the first 4MB happens to
                   5538:  * map one-to-one.
                   5539:  *
                   5540:  * There may already be something else there, or we might just be
                   5541:  * changing protections and/or wiring on an existing mapping.
                   5542:  *     XXX     should have different entry points for changing!
                   5543:  */
                   5544:
                   5545: void
                   5546: pmap_enter4m(pm, va, pa, prot, wired)
1.124   ! pk       5547:        struct pmap *pm;
        !          5548:        vaddr_t va;
        !          5549:        paddr_t pa;
1.55      pk       5550:        vm_prot_t prot;
                   5551:        int wired;
                   5552: {
1.124   ! pk       5553:        struct pvlist *pv;
        !          5554:        int pteproto, ctx;
1.55      pk       5555:
                   5556:        if (pm == NULL)
                   5557:                return;
                   5558:
                   5559: #ifdef DEBUG
                   5560:        if (pmapdebug & PDB_ENTER)
1.91      fair     5561:                printf("pmap_enter(%p, 0x%lx, 0x%lx, 0x%x, 0x%x)\n",
1.55      pk       5562:                    pm, va, pa, prot, wired);
                   5563: #endif
1.60      pk       5564:
                   5565:        /* Initialise pteproto with cache bit */
                   5566:        pteproto = (pa & PMAP_NC) == 0 ? SRMMU_PG_C : 0;
1.55      pk       5567:
1.82      pk       5568: #ifdef DEBUG
                   5569:        if (pa & PMAP_TYPE_SRMMU) {     /* this page goes in an iospace */
1.69      pk       5570:                if (cpuinfo.cpu_type == CPUTYP_MS1)
1.58      pk       5571:                        panic("pmap_enter4m: attempt to use 36-bit iospace on"
                   5572:                              " MicroSPARC");
1.55      pk       5573:        }
1.82      pk       5574: #endif
                   5575:        pteproto |= PMAP_T2PTE_SRMMU(pa);
1.55      pk       5576:
                   5577:        /* Make sure we get a pte with appropriate perms! */
                   5578:        pteproto |= SRMMU_TEPTE | PPROT_RX_RX;
                   5579:
1.82      pk       5580:        pa &= ~PMAP_TNC_SRMMU;
1.55      pk       5581:        /*
                   5582:         * Set up prototype for new PTE.  Cannot set PG_NC from PV_NC yet
                   5583:         * since the pvlist no-cache bit might change as a result of the
                   5584:         * new mapping.
                   5585:         */
                   5586:        if ((pteproto & SRMMU_PGTYPE) == PG_SUN4M_OBMEM && managed(pa)) {
                   5587: #ifdef DIAGNOSTIC
                   5588:                if (!pmap_pa_exists(pa))
1.91      fair     5589:                        panic("pmap_enter: no such address: 0x%lx", pa);
1.55      pk       5590: #endif
                   5591:                pv = pvhead(pa);
                   5592:        } else {
                   5593:                pv = NULL;
                   5594:        }
1.60      pk       5595:        pteproto |= (atop(pa) << SRMMU_PPNSHIFT);
1.55      pk       5596:
                   5597:        if (prot & VM_PROT_WRITE)
                   5598:                pteproto |= PPROT_WRITE;
                   5599:
1.71      pk       5600:        ctx = getcontext4m();
1.55      pk       5601:
                   5602:        if (pm == pmap_kernel())
1.58      pk       5603:                pmap_enk4m(pm, va, prot, wired, pv, pteproto | PPROT_S);
1.55      pk       5604:        else
1.58      pk       5605:                pmap_enu4m(pm, va, prot, wired, pv, pteproto);
1.55      pk       5606:
1.71      pk       5607:        setcontext4m(ctx);
1.55      pk       5608: }
                   5609:
                   5610: /* enter new (or change existing) kernel mapping */
                   5611: void
                   5612: pmap_enk4m(pm, va, prot, wired, pv, pteproto)
1.124   ! pk       5613:        struct pmap *pm;
        !          5614:        vaddr_t va;
1.55      pk       5615:        vm_prot_t prot;
                   5616:        int wired;
1.124   ! pk       5617:        struct pvlist *pv;
        !          5618:        int pteproto;
1.55      pk       5619: {
1.124   ! pk       5620:        int vr, vs, tpte, s;
1.55      pk       5621:        struct regmap *rp;
                   5622:        struct segmap *sp;
                   5623:
                   5624: #ifdef DEBUG
                   5625:        if (va < KERNBASE)
1.72      pk       5626:                panic("pmap_enk4m: can't enter va 0x%lx below KERNBASE", va);
1.55      pk       5627: #endif
                   5628:        vr = VA_VREG(va);
                   5629:        vs = VA_VSEG(va);
                   5630:        rp = &pm->pm_regmap[vr];
                   5631:        sp = &rp->rg_segmap[vs];
                   5632:
                   5633:        s = splpmap();          /* XXX way too conservative */
                   5634:
                   5635:        if (rp->rg_seg_ptps == NULL) /* enter new region */
1.91      fair     5636:                panic("pmap_enk4m: missing kernel region table for va 0x%lx",va);
1.55      pk       5637:
1.72      pk       5638:        tpte = sp->sg_pte[VA_SUN4M_VPG(va)];
                   5639:        if ((tpte & SRMMU_TETYPE) == SRMMU_TEPTE) {
1.124   ! pk       5640:                int addr;
1.55      pk       5641:
                   5642:                /* old mapping exists, and is of the same pa type */
                   5643:
                   5644:                if ((tpte & SRMMU_PPNMASK) == (pteproto & SRMMU_PPNMASK)) {
                   5645:                        /* just changing protection and/or wiring */
                   5646:                        splx(s);
1.81      pk       5647:                        pmap_changeprot4m(pm, va, prot, wired);
1.55      pk       5648:                        return;
                   5649:                }
                   5650:
                   5651:                if ((tpte & SRMMU_PGTYPE) == PG_SUN4M_OBMEM) {
                   5652: #ifdef DEBUG
1.91      fair     5653: printf("pmap_enk4m: changing existing va=>pa entry: va 0x%lx, pteproto 0x%x, "
                   5654:        "oldpte 0x%x\n", va, pteproto, tpte);
1.55      pk       5655: #endif
                   5656:                        /*
                   5657:                         * Switcheroo: changing pa for this va.
                   5658:                         * If old pa was managed, remove from pvlist.
                   5659:                         * If old page was cached, flush cache.
                   5660:                         */
1.60      pk       5661:                        addr = ptoa((tpte & SRMMU_PPNMASK) >> SRMMU_PPNSHIFT);
1.55      pk       5662:                        if (managed(addr))
1.58      pk       5663:                                pv_unlink4m(pvhead(addr), pm, va);
1.55      pk       5664:                        if (tpte & SRMMU_PG_C) {
1.71      pk       5665:                                setcontext4m(0);        /* ??? */
1.69      pk       5666:                                cache_flush_page((int)va);
1.55      pk       5667:                        }
                   5668:                }
                   5669:        } else {
                   5670:                /* adding new entry */
                   5671:                sp->sg_npte++;
                   5672:        }
                   5673:
                   5674:        /*
                   5675:         * If the new mapping is for a managed PA, enter into pvlist.
                   5676:         * Note that the mapping for a malloc page will always be
                   5677:         * unique (hence will never cause a second call to malloc).
                   5678:         */
                   5679:        if (pv != NULL)
1.115     pk       5680:                pteproto &= ~(pv_link4m(pv, pm, va, (pteproto & SRMMU_PG_C) == 0));
1.55      pk       5681:
1.72      pk       5682: #ifdef DEBUG
1.55      pk       5683:        if (sp->sg_pte == NULL) /* If no existing pagetable */
1.60      pk       5684:                panic("pmap_enk4m: missing segment table for va 0x%lx",va);
1.72      pk       5685: #endif
1.55      pk       5686:
1.72      pk       5687:        tlb_flush_page(va);
                   5688:        setpgt4m(&sp->sg_pte[VA_SUN4M_VPG(va)], pteproto);
1.55      pk       5689:
                   5690:        splx(s);
                   5691: }
                   5692:
                   5693: /* enter new (or change existing) user mapping */
                   5694: void
                   5695: pmap_enu4m(pm, va, prot, wired, pv, pteproto)
1.124   ! pk       5696:        struct pmap *pm;
        !          5697:        vaddr_t va;
1.55      pk       5698:        vm_prot_t prot;
                   5699:        int wired;
1.124   ! pk       5700:        struct pvlist *pv;
        !          5701:        int pteproto;
1.55      pk       5702: {
1.124   ! pk       5703:        int vr, vs, *pte, tpte, s;
1.55      pk       5704:        struct regmap *rp;
                   5705:        struct segmap *sp;
                   5706:
1.72      pk       5707: #ifdef DEBUG
                   5708:        if (KERNBASE < va)
                   5709:                panic("pmap_enu4m: can't enter va 0x%lx above KERNBASE", va);
                   5710: #endif
                   5711:
1.55      pk       5712:        write_user_windows();           /* XXX conservative */
                   5713:        vr = VA_VREG(va);
                   5714:        vs = VA_VSEG(va);
                   5715:        rp = &pm->pm_regmap[vr];
                   5716:        s = splpmap();                  /* XXX conservative */
                   5717:
                   5718: rretry:
                   5719:        if (rp->rg_segmap == NULL) {
                   5720:                /* definitely a new mapping */
1.124   ! pk       5721:                int size = NSEGRG * sizeof (struct segmap);
1.55      pk       5722:
                   5723:                sp = (struct segmap *)malloc((u_long)size, M_VMPMAP, M_WAITOK);
                   5724:                if (rp->rg_segmap != NULL) {
                   5725: #ifdef DEBUG
1.66      christos 5726: printf("pmap_enu4m: segment filled during sleep\n");   /* can this happen? */
1.55      pk       5727: #endif
                   5728:                        free(sp, M_VMPMAP);
                   5729:                        goto rretry;
                   5730:                }
                   5731:                qzero((caddr_t)sp, size);
                   5732:                rp->rg_segmap = sp;
                   5733:                rp->rg_nsegmap = 0;
                   5734:                rp->rg_seg_ptps = NULL;
                   5735:        }
                   5736: rgretry:
                   5737:        if (rp->rg_seg_ptps == NULL) {
                   5738:                /* Need a segment table */
1.100     pk       5739:                int i, *ptd;
1.73      pk       5740:
1.121     pk       5741:                ptd = pool_get(&L23_pool, PR_WAITOK);
1.55      pk       5742:                if (rp->rg_seg_ptps != NULL) {
                   5743: #ifdef DEBUG
1.66      christos 5744: printf("pmap_enu4m: bizarre segment table fill during sleep\n");
1.55      pk       5745: #endif
1.121     pk       5746:                        pool_put(&L23_pool, ptd);
1.55      pk       5747:                        goto rgretry;
                   5748:                }
                   5749:
1.73      pk       5750:                rp->rg_seg_ptps = ptd;
                   5751:                for (i = 0; i < SRMMU_L2SIZE; i++)
1.74      pk       5752:                        setpgt4m(&ptd[i], SRMMU_TEINVALID);
1.72      pk       5753:                setpgt4m(&pm->pm_reg_ptps[vr],
1.73      pk       5754:                         (VA2PA((caddr_t)ptd) >> SRMMU_PPNPASHIFT) | SRMMU_TEPTD);
1.55      pk       5755:        }
                   5756:
                   5757:        sp = &rp->rg_segmap[vs];
                   5758:
                   5759: sretry:
                   5760:        if ((pte = sp->sg_pte) == NULL) {
                   5761:                /* definitely a new mapping */
1.100     pk       5762:                int i;
1.55      pk       5763:
1.121     pk       5764:                pte = pool_get(&L23_pool, PR_WAITOK);
1.55      pk       5765:                if (sp->sg_pte != NULL) {
1.66      christos 5766: printf("pmap_enter: pte filled during sleep\n");       /* can this happen? */
1.121     pk       5767:                        pool_put(&L23_pool, pte);
1.55      pk       5768:                        goto sretry;
                   5769:                }
                   5770:
                   5771:                sp->sg_pte = pte;
                   5772:                sp->sg_npte = 1;
                   5773:                rp->rg_nsegmap++;
1.74      pk       5774:                for (i = 0; i < SRMMU_L3SIZE; i++)
                   5775:                        setpgt4m(&pte[i], SRMMU_TEINVALID);
1.72      pk       5776:                setpgt4m(&rp->rg_seg_ptps[vs],
                   5777:                        (VA2PA((caddr_t)pte) >> SRMMU_PPNPASHIFT) | SRMMU_TEPTD);
1.55      pk       5778:        } else {
1.72      pk       5779:                /*
                   5780:                 * Might be a change: fetch old pte
                   5781:                 */
1.100     pk       5782:                tlb_flush_page(va);
1.72      pk       5783:                tpte = pte[VA_SUN4M_VPG(va)];
1.55      pk       5784:
                   5785:                if ((tpte & SRMMU_TETYPE) == SRMMU_TEPTE) {
1.124   ! pk       5786:                        int addr;
1.1       deraadt  5787:
1.34      pk       5788:                        /* old mapping exists, and is of the same pa type */
1.55      pk       5789:                        if ((tpte & SRMMU_PPNMASK) ==
                   5790:                            (pteproto & SRMMU_PPNMASK)) {
1.1       deraadt  5791:                                /* just changing prot and/or wiring */
                   5792:                                splx(s);
                   5793:                                /* caller should call this directly: */
1.60      pk       5794:                                pmap_changeprot4m(pm, va, prot, wired);
1.15      deraadt  5795:                                if (wired)
                   5796:                                        pm->pm_stats.wired_count++;
                   5797:                                else
                   5798:                                        pm->pm_stats.wired_count--;
1.1       deraadt  5799:                                return;
                   5800:                        }
                   5801:                        /*
                   5802:                         * Switcheroo: changing pa for this va.
                   5803:                         * If old pa was managed, remove from pvlist.
                   5804:                         * If old page was cached, flush cache.
                   5805:                         */
1.60      pk       5806: #ifdef DEBUG
1.72      pk       5807: if (pmapdebug & PDB_SWITCHMAP)
1.91      fair     5808: printf("%s[%d]: pmap_enu: changing existing va(0x%x)=>pa(pte=0x%x) entry\n",
1.72      pk       5809:        curproc->p_comm, curproc->p_pid, (int)va, (int)pte);
1.60      pk       5810: #endif
1.55      pk       5811:                        if ((tpte & SRMMU_PGTYPE) == PG_SUN4M_OBMEM) {
1.60      pk       5812:                                addr = ptoa( (tpte & SRMMU_PPNMASK) >>
                   5813:                                             SRMMU_PPNSHIFT);
1.100     pk       5814:                                if (managed(addr)) {
                   5815:                                        pvhead(addr)->pv_flags |= MR4M(tpte);
1.58      pk       5816:                                        pv_unlink4m(pvhead(addr), pm, va);
1.100     pk       5817:                                }
1.72      pk       5818:                                if (pm->pm_ctx && (tpte & SRMMU_PG_C))
1.34      pk       5819:                                        cache_flush_page((int)va);
1.31      pk       5820:                        }
1.1       deraadt  5821:                } else {
                   5822:                        /* adding new entry */
1.43      pk       5823:                        sp->sg_npte++;
1.15      deraadt  5824:
                   5825:                        /*
                   5826:                         * Increment counters
                   5827:                         */
                   5828:                        if (wired)
                   5829:                                pm->pm_stats.wired_count++;
1.1       deraadt  5830:                }
                   5831:        }
                   5832:        if (pv != NULL)
1.115     pk       5833:                pteproto &= ~(pv_link4m(pv, pm, va, (pteproto & SRMMU_PG_C) == 0));
1.1       deraadt  5834:
                   5835:        /*
1.72      pk       5836:         * Update PTEs, flush TLB as necessary.
1.1       deraadt  5837:         */
1.72      pk       5838:        if (pm->pm_ctx) {
1.71      pk       5839:                setcontext4m(pm->pm_ctxnum);
1.72      pk       5840:                tlb_flush_page(va);
                   5841:        }
                   5842:        setpgt4m(&sp->sg_pte[VA_SUN4M_VPG(va)], pteproto);
1.1       deraadt  5843:
                   5844:        splx(s);
                   5845: }
1.55      pk       5846: #endif /* sun4m */
1.1       deraadt  5847:
                   5848: /*
                   5849:  * Change the wiring attribute for a map/virtual-address pair.
                   5850:  */
                   5851: /* ARGSUSED */
                   5852: void
                   5853: pmap_change_wiring(pm, va, wired)
                   5854:        struct pmap *pm;
1.124   ! pk       5855:        vaddr_t va;
1.1       deraadt  5856:        int wired;
                   5857: {
                   5858:
                   5859:        pmap_stats.ps_useless_changewire++;
                   5860: }
                   5861:
                   5862: /*
                   5863:  * Extract the physical page address associated
                   5864:  * with the given map/virtual_address pair.
                   5865:  * GRR, the vm code knows; we should not have to do this!
                   5866:  */
1.55      pk       5867:
                   5868: #if defined(SUN4) || defined(SUN4C)
1.124   ! pk       5869: paddr_t
1.55      pk       5870: pmap_extract4_4c(pm, va)
1.124   ! pk       5871:        struct pmap *pm;
        !          5872:        vaddr_t va;
1.1       deraadt  5873: {
1.124   ! pk       5874:        int tpte;
        !          5875:        int vr, vs;
1.43      pk       5876:        struct regmap *rp;
                   5877:        struct segmap *sp;
1.1       deraadt  5878:
                   5879:        if (pm == NULL) {
1.90      pk       5880: #ifdef DEBUG
                   5881:                if (pmapdebug & PDB_FOLLOW)
                   5882:                        printf("pmap_extract: null pmap\n");
                   5883: #endif
1.1       deraadt  5884:                return (0);
                   5885:        }
1.43      pk       5886:        vr = VA_VREG(va);
                   5887:        vs = VA_VSEG(va);
                   5888:        rp = &pm->pm_regmap[vr];
                   5889:        if (rp->rg_segmap == NULL) {
1.90      pk       5890: #ifdef DEBUG
                   5891:                if (pmapdebug & PDB_FOLLOW)
                   5892:                        printf("pmap_extract: invalid segment (%d)\n", vr);
                   5893: #endif
1.43      pk       5894:                return (0);
                   5895:        }
                   5896:        sp = &rp->rg_segmap[vs];
                   5897:
                   5898:        if (sp->sg_pmeg != seginval) {
1.124   ! pk       5899:                int ctx = getcontext4();
1.1       deraadt  5900:
1.43      pk       5901:                if (CTX_USABLE(pm,rp)) {
1.61      pk       5902:                        CHANGE_CONTEXTS(ctx, pm->pm_ctxnum);
1.55      pk       5903:                        tpte = getpte4(va);
1.1       deraadt  5904:                } else {
1.61      pk       5905:                        CHANGE_CONTEXTS(ctx, 0);
1.69      pk       5906:                        if (HASSUN4_MMU3L)
1.43      pk       5907:                                setregmap(0, tregion);
                   5908:                        setsegmap(0, sp->sg_pmeg);
1.55      pk       5909:                        tpte = getpte4(VA_VPG(va) << PGSHIFT);
1.1       deraadt  5910:                }
1.71      pk       5911:                setcontext4(ctx);
1.1       deraadt  5912:        } else {
1.124   ! pk       5913:                int *pte = sp->sg_pte;
1.1       deraadt  5914:
                   5915:                if (pte == NULL) {
1.90      pk       5916: #ifdef DEBUG
                   5917:                        if (pmapdebug & PDB_FOLLOW)
                   5918:                                printf("pmap_extract: invalid segment\n");
                   5919: #endif
1.1       deraadt  5920:                        return (0);
                   5921:                }
                   5922:                tpte = pte[VA_VPG(va)];
                   5923:        }
                   5924:        if ((tpte & PG_V) == 0) {
1.90      pk       5925: #ifdef DEBUG
                   5926:                if (pmapdebug & PDB_FOLLOW)
                   5927:                        printf("pmap_extract: invalid pte\n");
                   5928: #endif
1.1       deraadt  5929:                return (0);
                   5930:        }
                   5931:        tpte &= PG_PFNUM;
1.60      pk       5932:        tpte = tpte;
1.1       deraadt  5933:        return ((tpte << PGSHIFT) | (va & PGOFSET));
                   5934: }
1.55      pk       5935: #endif /*4,4c*/
                   5936:
                   5937: #if defined(SUN4M)             /* 4m version of pmap_extract */
                   5938: /*
                   5939:  * Extract the physical page address associated
                   5940:  * with the given map/virtual_address pair.
                   5941:  * GRR, the vm code knows; we should not have to do this!
                   5942:  */
1.124   ! pk       5943: paddr_t
1.55      pk       5944: pmap_extract4m(pm, va)
1.124   ! pk       5945:        struct pmap *pm;
        !          5946:        vaddr_t va;
1.55      pk       5947: {
1.90      pk       5948:        struct regmap *rm;
                   5949:        struct segmap *sm;
                   5950:        int pte;
1.55      pk       5951:
                   5952:        if (pm == NULL) {
1.90      pk       5953: #ifdef DEBUG
                   5954:                if (pmapdebug & PDB_FOLLOW)
                   5955:                        printf("pmap_extract: null pmap\n");
                   5956: #endif
1.55      pk       5957:                return (0);
                   5958:        }
                   5959:
1.113     pk       5960:        if ((rm = pm->pm_regmap) == NULL) {
1.90      pk       5961: #ifdef DEBUG
                   5962:                if (pmapdebug & PDB_FOLLOW)
1.92      pk       5963:                        printf("pmap_extract: no regmap entry");
1.90      pk       5964: #endif
                   5965:                return (0);
                   5966:        }
1.113     pk       5967:
                   5968:        rm += VA_VREG(va);
                   5969:        if ((sm = rm->rg_segmap) == NULL) {
1.90      pk       5970: #ifdef DEBUG
                   5971:                if (pmapdebug & PDB_FOLLOW)
1.92      pk       5972:                        panic("pmap_extract: no segmap");
1.90      pk       5973: #endif
                   5974:                return (0);
                   5975:        }
1.113     pk       5976:
                   5977:        sm += VA_VSEG(va);
                   5978:        if (sm->sg_pte == NULL) {
                   5979: #ifdef DEBUG
                   5980:                if (pmapdebug & PDB_FOLLOW)
                   5981:                        panic("pmap_extract: no ptes");
                   5982: #endif
                   5983:                return (0);
                   5984:        }
                   5985:
1.90      pk       5986:        pte = sm->sg_pte[VA_SUN4M_VPG(va)];
                   5987:        if ((pte & SRMMU_TETYPE) != SRMMU_TEPTE) {
1.72      pk       5988: #ifdef DEBUG
1.90      pk       5989:                if (pmapdebug & PDB_FOLLOW)
                   5990:                        printf("pmap_extract: invalid pte of type %d\n",
                   5991:                               pte & SRMMU_TETYPE);
                   5992: #endif
1.72      pk       5993:                return (0);
                   5994:        }
1.55      pk       5995:
1.79      pk       5996:        return (ptoa((pte & SRMMU_PPNMASK) >> SRMMU_PPNSHIFT) | VA_OFF(va));
1.55      pk       5997: }
                   5998: #endif /* sun4m */
1.1       deraadt  5999:
                   6000: /*
                   6001:  * Copy the range specified by src_addr/len
                   6002:  * from the source map to the range dst_addr/len
                   6003:  * in the destination map.
                   6004:  *
                   6005:  * This routine is only advisory and need not do anything.
                   6006:  */
                   6007: /* ARGSUSED */
1.94      pk       6008: int pmap_copy_disabled=0;
1.1       deraadt  6009: void
                   6010: pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr)
                   6011:        struct pmap *dst_pmap, *src_pmap;
1.124   ! pk       6012:        vaddr_t dst_addr;
        !          6013:        vsize_t len;
        !          6014:        vaddr_t src_addr;
1.1       deraadt  6015: {
1.94      pk       6016: #if notyet
1.92      pk       6017:        struct regmap *rm;
                   6018:        struct segmap *sm;
                   6019:
1.94      pk       6020:        if (pmap_copy_disabled)
                   6021:                return;
1.92      pk       6022: #ifdef DIAGNOSTIC
                   6023:        if (VA_OFF(src_addr) != 0)
                   6024:                printf("pmap_copy: addr not page aligned: 0x%lx\n", src_addr);
                   6025:        if ((len & (NBPG-1)) != 0)
                   6026:                printf("pmap_copy: length not page aligned: 0x%lx\n", len);
                   6027: #endif
                   6028:
                   6029:        if (src_pmap == NULL)
                   6030:                return;
                   6031:
1.55      pk       6032:        if (CPU_ISSUN4M) {
1.92      pk       6033:                int i, npg, pte;
1.124   ! pk       6034:                paddr_t pa;
1.92      pk       6035:
                   6036:                npg = len >> PGSHIFT;
                   6037:                for (i = 0; i < npg; i++) {
                   6038:                        tlb_flush_page(src_addr);
1.115     pk       6039:                        if ((rm = src_pmap->pm_regmap) == NULL)
                   6040:                                continue;
                   6041:                        rm += VA_VREG(src_addr);
                   6042:
                   6043:                        if ((sm = rm->rg_segmap) == NULL)
1.92      pk       6044:                                continue;
1.115     pk       6045:                        sm += VA_VSEG(src_addr);
                   6046:                        if (sm->sg_npte == 0)
1.92      pk       6047:                                continue;
1.115     pk       6048:
1.92      pk       6049:                        pte = sm->sg_pte[VA_SUN4M_VPG(src_addr)];
                   6050:                        if ((pte & SRMMU_TETYPE) != SRMMU_TEPTE)
                   6051:                                continue;
                   6052:
                   6053:                        pa = ptoa((pte & SRMMU_PPNMASK) >> SRMMU_PPNSHIFT);
1.55      pk       6054:                        pmap_enter(dst_pmap, dst_addr,
1.92      pk       6055:                                   pa,
1.60      pk       6056:                                   (pte & PPROT_WRITE)
1.92      pk       6057:                                        ? (VM_PROT_WRITE | VM_PROT_READ)
1.60      pk       6058:                                        : VM_PROT_READ,
1.55      pk       6059:                                   0);
                   6060:                        src_addr += NBPG;
                   6061:                        dst_addr += NBPG;
                   6062:                }
                   6063:        }
                   6064: #endif
1.1       deraadt  6065: }
                   6066:
                   6067: /*
                   6068:  * Require that all active physical maps contain no
                   6069:  * incorrect entries NOW.  [This update includes
                   6070:  * forcing updates of any address map caching.]
                   6071:  */
                   6072: void
                   6073: pmap_update()
                   6074: {
1.55      pk       6075: #if defined(SUN4M)
                   6076:        if (CPU_ISSUN4M)
                   6077:                tlb_flush_all();        /* %%%: Extreme Paranoia?  */
                   6078: #endif
1.1       deraadt  6079: }
                   6080:
                   6081: /*
                   6082:  * Garbage collects the physical map system for
                   6083:  * pages which are no longer used.
                   6084:  * Success need not be guaranteed -- that is, there
                   6085:  * may well be pages which are not referenced, but
                   6086:  * others may be collected.
                   6087:  * Called by the pageout daemon when pages are scarce.
                   6088:  */
                   6089: /* ARGSUSED */
                   6090: void
                   6091: pmap_collect(pm)
                   6092:        struct pmap *pm;
                   6093: {
                   6094: }
                   6095:
1.55      pk       6096: #if defined(SUN4) || defined(SUN4C)
                   6097:
1.1       deraadt  6098: /*
                   6099:  * Clear the modify bit for the given physical page.
                   6100:  */
                   6101: void
1.55      pk       6102: pmap_clear_modify4_4c(pa)
1.124   ! pk       6103:        paddr_t pa;
1.1       deraadt  6104: {
1.124   ! pk       6105:        struct pvlist *pv;
1.1       deraadt  6106:
1.82      pk       6107:        if ((pa & (PMAP_TNC_4 & ~PMAP_NC)) == 0 && managed(pa)) {
1.1       deraadt  6108:                pv = pvhead(pa);
1.58      pk       6109:                (void) pv_syncflags4_4c(pv);
1.1       deraadt  6110:                pv->pv_flags &= ~PV_MOD;
                   6111:        }
                   6112: }
                   6113:
                   6114: /*
                   6115:  * Tell whether the given physical page has been modified.
                   6116:  */
                   6117: int
1.55      pk       6118: pmap_is_modified4_4c(pa)
1.124   ! pk       6119:        paddr_t pa;
1.1       deraadt  6120: {
1.124   ! pk       6121:        struct pvlist *pv;
1.1       deraadt  6122:
1.82      pk       6123:        if ((pa & (PMAP_TNC_4 & ~PMAP_NC)) == 0 && managed(pa)) {
1.1       deraadt  6124:                pv = pvhead(pa);
1.58      pk       6125:                if (pv->pv_flags & PV_MOD || pv_syncflags4_4c(pv) & PV_MOD)
1.1       deraadt  6126:                        return (1);
                   6127:        }
                   6128:        return (0);
                   6129: }
                   6130:
                   6131: /*
                   6132:  * Clear the reference bit for the given physical page.
                   6133:  */
                   6134: void
1.55      pk       6135: pmap_clear_reference4_4c(pa)
1.124   ! pk       6136:        paddr_t pa;
1.1       deraadt  6137: {
1.124   ! pk       6138:        struct pvlist *pv;
1.1       deraadt  6139:
1.82      pk       6140:        if ((pa & (PMAP_TNC_4 & ~PMAP_NC)) == 0 && managed(pa)) {
1.1       deraadt  6141:                pv = pvhead(pa);
1.58      pk       6142:                (void) pv_syncflags4_4c(pv);
1.1       deraadt  6143:                pv->pv_flags &= ~PV_REF;
                   6144:        }
                   6145: }
                   6146:
                   6147: /*
                   6148:  * Tell whether the given physical page has been referenced.
                   6149:  */
                   6150: int
1.55      pk       6151: pmap_is_referenced4_4c(pa)
1.124   ! pk       6152:        paddr_t pa;
1.1       deraadt  6153: {
1.124   ! pk       6154:        struct pvlist *pv;
1.1       deraadt  6155:
1.82      pk       6156:        if ((pa & (PMAP_TNC_4 & ~PMAP_NC)) == 0 && managed(pa)) {
1.1       deraadt  6157:                pv = pvhead(pa);
1.58      pk       6158:                if (pv->pv_flags & PV_REF || pv_syncflags4_4c(pv) & PV_REF)
1.1       deraadt  6159:                        return (1);
                   6160:        }
                   6161:        return (0);
                   6162: }
1.55      pk       6163: #endif /*4,4c*/
                   6164:
1.58      pk       6165: #if defined(SUN4M)
                   6166:
                   6167: /*
                   6168:  * 4m versions of bit test/set routines
                   6169:  *
                   6170:  * Note that the 4m-specific routines should eventually service these
                   6171:  * requests from their page tables, and the whole pvlist bit mess should
                   6172:  * be dropped for the 4m (unless this causes a performance hit from
                   6173:  * tracing down pagetables/regmap/segmaps).
                   6174:  */
                   6175:
1.55      pk       6176: /*
                   6177:  * Clear the modify bit for the given physical page.
                   6178:  */
                   6179: void
                   6180: pmap_clear_modify4m(pa)           /* XXX %%%: Should service from swpagetbl for 4m */
1.124   ! pk       6181:        paddr_t pa;
1.55      pk       6182: {
1.124   ! pk       6183:        struct pvlist *pv;
1.55      pk       6184:
1.82      pk       6185:        if ((pa & (PMAP_TNC_SRMMU & ~PMAP_NC)) == 0 && managed(pa)) {
1.55      pk       6186:                pv = pvhead(pa);
1.58      pk       6187:                (void) pv_syncflags4m(pv);
1.55      pk       6188:                pv->pv_flags &= ~PV_MOD4M;
                   6189:        }
                   6190: }
                   6191:
                   6192: /*
                   6193:  * Tell whether the given physical page has been modified.
                   6194:  */
                   6195: int
                   6196: pmap_is_modified4m(pa) /* Test performance with SUN4M && SUN4/4C. XXX */
1.124   ! pk       6197:        paddr_t pa;
1.55      pk       6198: {
1.124   ! pk       6199:        struct pvlist *pv;
1.55      pk       6200:
1.82      pk       6201:        if ((pa & (PMAP_TNC_SRMMU & ~PMAP_NC)) == 0 && managed(pa)) {
1.55      pk       6202:                pv = pvhead(pa);
                   6203:                if (pv->pv_flags & PV_MOD4M || pv_syncflags4m(pv) & PV_MOD4M)
                   6204:                        return(1);
                   6205:        }
                   6206:        return (0);
                   6207: }
                   6208:
                   6209: /*
                   6210:  * Clear the reference bit for the given physical page.
                   6211:  */
                   6212: void
                   6213: pmap_clear_reference4m(pa)
1.124   ! pk       6214:        paddr_t pa;
1.55      pk       6215: {
1.124   ! pk       6216:        struct pvlist *pv;
1.55      pk       6217:
1.82      pk       6218:        if ((pa & (PMAP_TNC_SRMMU & ~PMAP_NC)) == 0 && managed(pa)) {
1.55      pk       6219:                pv = pvhead(pa);
1.58      pk       6220:                (void) pv_syncflags4m(pv);
1.55      pk       6221:                pv->pv_flags &= ~PV_REF4M;
                   6222:        }
                   6223: }
                   6224:
                   6225: /*
                   6226:  * Tell whether the given physical page has been referenced.
                   6227:  */
                   6228: int
                   6229: pmap_is_referenced4m(pa)
1.124   ! pk       6230:        paddr_t pa;
1.55      pk       6231: {
1.124   ! pk       6232:        struct pvlist *pv;
1.55      pk       6233:
1.82      pk       6234:        if ((pa & (PMAP_TNC_SRMMU & ~PMAP_NC)) == 0 && managed(pa)) {
1.55      pk       6235:                pv = pvhead(pa);
                   6236:                if (pv->pv_flags & PV_REF4M || pv_syncflags4m(pv) & PV_REF4M)
                   6237:                        return(1);
                   6238:        }
                   6239:        return (0);
                   6240: }
                   6241: #endif /* 4m */
1.1       deraadt  6242:
                   6243: /*
                   6244:  * Make the specified pages (by pmap, offset) pageable (or not) as requested.
                   6245:  *
                   6246:  * A page which is not pageable may not take a fault; therefore, its page
                   6247:  * table entry must remain valid for the duration (or at least, the trap
                   6248:  * handler must not call vm_fault).
                   6249:  *
                   6250:  * This routine is merely advisory; pmap_enter will specify that these pages
                   6251:  * are to be wired down (or not) as appropriate.
                   6252:  */
                   6253: /* ARGSUSED */
                   6254: void
                   6255: pmap_pageable(pm, start, end, pageable)
                   6256:        struct pmap *pm;
1.124   ! pk       6257:        vaddr_t start, end;
1.1       deraadt  6258:        int pageable;
                   6259: {
1.2       deraadt  6260: }
                   6261:
                   6262: /*
1.1       deraadt  6263:  * Fill the given MI physical page with zero bytes.
                   6264:  *
                   6265:  * We avoid stomping on the cache.
                   6266:  * XXX might be faster to use destination's context and allow cache to fill?
                   6267:  */
1.55      pk       6268:
                   6269: #if defined(SUN4) || defined(SUN4C)
                   6270:
1.1       deraadt  6271: void
1.55      pk       6272: pmap_zero_page4_4c(pa)
1.124   ! pk       6273:        paddr_t pa;
1.1       deraadt  6274: {
1.124   ! pk       6275:        caddr_t va;
        !          6276:        int pte;
1.1       deraadt  6277:
1.82      pk       6278:        if (((pa & (PMAP_TNC_4 & ~PMAP_NC)) == 0) && managed(pa)) {
1.1       deraadt  6279:                /*
                   6280:                 * The following might not be necessary since the page
                   6281:                 * is being cleared because it is about to be allocated,
                   6282:                 * i.e., is in use by no one.
                   6283:                 */
1.69      pk       6284:                pv_flushcache(pvhead(pa));
1.60      pk       6285:        }
                   6286:        pte = PG_V | PG_S | PG_W | PG_NC | (atop(pa) & PG_PFNUM);
1.1       deraadt  6287:
                   6288:        va = vpage[0];
1.55      pk       6289:        setpte4(va, pte);
1.1       deraadt  6290:        qzero(va, NBPG);
1.55      pk       6291:        setpte4(va, 0);
1.1       deraadt  6292: }
                   6293:
                   6294: /*
                   6295:  * Copy the given MI physical source page to its destination.
                   6296:  *
                   6297:  * We avoid stomping on the cache as above (with same `XXX' note).
                   6298:  * We must first flush any write-back cache for the source page.
                   6299:  * We go ahead and stomp on the kernel's virtual cache for the
                   6300:  * source page, since the cache can read memory MUCH faster than
                   6301:  * the processor.
                   6302:  */
                   6303: void
1.55      pk       6304: pmap_copy_page4_4c(src, dst)
1.124   ! pk       6305:        paddr_t src, dst;
1.1       deraadt  6306: {
1.124   ! pk       6307:        caddr_t sva, dva;
        !          6308:        int spte, dpte;
1.1       deraadt  6309:
                   6310:        if (managed(src)) {
1.69      pk       6311:                if (CACHEINFO.c_vactype == VAC_WRITEBACK)
1.1       deraadt  6312:                        pv_flushcache(pvhead(src));
1.60      pk       6313:        }
                   6314:        spte = PG_V | PG_S | (atop(src) & PG_PFNUM);
1.1       deraadt  6315:
                   6316:        if (managed(dst)) {
                   6317:                /* similar `might not be necessary' comment applies */
1.69      pk       6318:                if (CACHEINFO.c_vactype != VAC_NONE)
1.1       deraadt  6319:                        pv_flushcache(pvhead(dst));
1.60      pk       6320:        }
                   6321:        dpte = PG_V | PG_S | PG_W | PG_NC | (atop(dst) & PG_PFNUM);
1.1       deraadt  6322:
                   6323:        sva = vpage[0];
                   6324:        dva = vpage[1];
1.55      pk       6325:        setpte4(sva, spte);
                   6326:        setpte4(dva, dpte);
1.1       deraadt  6327:        qcopy(sva, dva, NBPG);  /* loads cache, so we must ... */
1.69      pk       6328:        cache_flush_page((int)sva);
1.55      pk       6329:        setpte4(sva, 0);
                   6330:        setpte4(dva, 0);
                   6331: }
                   6332: #endif /* 4, 4c */
                   6333:
                   6334: #if defined(SUN4M)             /* Sun4M version of copy/zero routines */
                   6335: /*
                   6336:  * Fill the given MI physical page with zero bytes.
                   6337:  *
                   6338:  * We avoid stomping on the cache.
                   6339:  * XXX might be faster to use destination's context and allow cache to fill?
                   6340:  */
                   6341: void
                   6342: pmap_zero_page4m(pa)
1.124   ! pk       6343:        paddr_t pa;
1.55      pk       6344: {
1.124   ! pk       6345:        caddr_t va;
        !          6346:        int pte;
1.55      pk       6347:
1.82      pk       6348:        if (((pa & (PMAP_TNC_SRMMU & ~PMAP_NC)) == 0) && managed(pa)) {
1.55      pk       6349:                /*
                   6350:                 * The following might not be necessary since the page
                   6351:                 * is being cleared because it is about to be allocated,
                   6352:                 * i.e., is in use by no one.
                   6353:                 */
1.69      pk       6354:                if (CACHEINFO.c_vactype != VAC_NONE)
1.55      pk       6355:                        pv_flushcache(pvhead(pa));
1.60      pk       6356:        }
1.68      abrown   6357:        pte = (SRMMU_TEPTE | PPROT_S | PPROT_WRITE |
                   6358:               (atop(pa) << SRMMU_PPNSHIFT));
1.69      pk       6359:        if (cpuinfo.flags & CPUFLG_CACHE_MANDATORY)
1.68      abrown   6360:                pte |= SRMMU_PG_C;
                   6361:        else
                   6362:                pte &= ~SRMMU_PG_C;
                   6363:
1.55      pk       6364:        va = vpage[0];
1.101     pk       6365:        *vpage_pte[0] = pte;
1.55      pk       6366:        qzero(va, NBPG);
1.101     pk       6367:        /* Remove temporary mapping */
                   6368:        tlb_flush_page((int)va);
                   6369:        *vpage_pte[0] = SRMMU_TEINVALID;
1.55      pk       6370: }
                   6371:
                   6372: /*
                   6373:  * Copy the given MI physical source page to its destination.
                   6374:  *
                   6375:  * We avoid stomping on the cache as above (with same `XXX' note).
                   6376:  * We must first flush any write-back cache for the source page.
                   6377:  * We go ahead and stomp on the kernel's virtual cache for the
                   6378:  * source page, since the cache can read memory MUCH faster than
                   6379:  * the processor.
                   6380:  */
                   6381: void
                   6382: pmap_copy_page4m(src, dst)
1.124   ! pk       6383:        paddr_t src, dst;
1.55      pk       6384: {
1.124   ! pk       6385:        caddr_t sva, dva;
        !          6386:        int spte, dpte;
1.55      pk       6387:
                   6388:        if (managed(src)) {
1.69      pk       6389:                if (CACHEINFO.c_vactype == VAC_WRITEBACK)
1.55      pk       6390:                        pv_flushcache(pvhead(src));
1.60      pk       6391:        }
                   6392:        spte = SRMMU_TEPTE | SRMMU_PG_C | PPROT_S |
                   6393:                (atop(src) << SRMMU_PPNSHIFT);
1.55      pk       6394:
                   6395:        if (managed(dst)) {
                   6396:                /* similar `might not be necessary' comment applies */
1.69      pk       6397:                if (CACHEINFO.c_vactype != VAC_NONE)
1.55      pk       6398:                        pv_flushcache(pvhead(dst));
1.60      pk       6399:        }
1.68      abrown   6400:        dpte = (SRMMU_TEPTE | PPROT_S | PPROT_WRITE |
                   6401:               (atop(dst) << SRMMU_PPNSHIFT));
1.69      pk       6402:        if (cpuinfo.flags & CPUFLG_CACHE_MANDATORY)
1.68      abrown   6403:                dpte |= SRMMU_PG_C;
                   6404:        else
                   6405:                dpte &= ~SRMMU_PG_C;
1.60      pk       6406:
1.55      pk       6407:        sva = vpage[0];
                   6408:        dva = vpage[1];
1.101     pk       6409:        *vpage_pte[0] = spte;
                   6410:        *vpage_pte[1] = dpte;
1.55      pk       6411:        qcopy(sva, dva, NBPG);  /* loads cache, so we must ... */
1.69      pk       6412:        cache_flush_page((int)sva);
1.101     pk       6413:        *vpage_pte[0] = SRMMU_TEINVALID;
                   6414:        *vpage_pte[1] = SRMMU_TEINVALID;
                   6415:        tlb_flush_page((int)sva);
                   6416:        tlb_flush_page((int)dva);
1.1       deraadt  6417: }
1.55      pk       6418: #endif /* Sun4M */
1.1       deraadt  6419:
                   6420: /*
                   6421:  * Turn a cdevsw d_mmap value into a byte address for pmap_enter.
                   6422:  * XXX this should almost certainly be done differently, and
                   6423:  *     elsewhere, or even not at all
                   6424:  */
1.124   ! pk       6425: paddr_t
1.1       deraadt  6426: pmap_phys_address(x)
                   6427:        int x;
                   6428: {
                   6429:
1.124   ! pk       6430:        return ((paddr_t)x);
1.1       deraadt  6431: }
                   6432:
                   6433: /*
                   6434:  * Turn off cache for a given (va, number of pages).
                   6435:  *
                   6436:  * We just assert PG_NC for each PTE; the addresses must reside
                   6437:  * in locked kernel space.  A cache flush is also done.
                   6438:  */
1.53      christos 6439: void
1.1       deraadt  6440: kvm_uncache(va, npages)
1.115     pk       6441:        caddr_t va;
                   6442:        int npages;
1.1       deraadt  6443: {
1.115     pk       6444:        int pte;
1.124   ! pk       6445:        paddr_t pa;
1.88      pk       6446:
1.55      pk       6447:        if (CPU_ISSUN4M) {
                   6448: #if defined(SUN4M)
1.100     pk       6449:                int ctx = getcontext4m();
                   6450:
                   6451:                setcontext4m(0);
1.55      pk       6452:                for (; --npages >= 0; va += NBPG) {
1.124   ! pk       6453:                        pte = getpte4m((vaddr_t) va);
1.55      pk       6454:                        if ((pte & SRMMU_TETYPE) != SRMMU_TEPTE)
                   6455:                                panic("kvm_uncache: table entry not pte");
1.115     pk       6456:
                   6457:                        pa = ptoa((pte & SRMMU_PPNMASK) >> SRMMU_PPNSHIFT);
                   6458:                        if ((pte & SRMMU_PGTYPE) == PG_SUN4M_OBMEM &&
                   6459:                            managed(pa)) {
                   6460:                                pv_changepte4m(pvhead(pa), 0, SRMMU_PG_C);
                   6461:                        }
1.116     pk       6462:                        pte &= ~SRMMU_PG_C;
1.124   ! pk       6463:                        setpte4m((vaddr_t) va, pte);
1.116     pk       6464:                        if ((pte & SRMMU_PGTYPE) == PG_SUN4M_OBMEM)
                   6465:                                cache_flush_page((int)va);
                   6466:
1.55      pk       6467:                }
1.100     pk       6468:                setcontext4m(ctx);
1.55      pk       6469: #endif
                   6470:        } else {
                   6471: #if defined(SUN4) || defined(SUN4C)
                   6472:                for (; --npages >= 0; va += NBPG) {
                   6473:                        pte = getpte4(va);
                   6474:                        if ((pte & PG_V) == 0)
                   6475:                                panic("kvm_uncache !pg_v");
1.115     pk       6476:
                   6477:                        pa = ptoa(pte & PG_PFNUM);
                   6478:                        if ((pte & PG_TYPE) == PG_OBMEM &&
                   6479:                            managed(pa)) {
                   6480:                                pv_changepte4_4c(pvhead(pa), PG_NC, 0);
                   6481:                        }
1.116     pk       6482:                        pte |= PG_NC;
                   6483:                        setpte4(va, pte);
                   6484:                        if ((pte & PG_TYPE) == PG_OBMEM)
                   6485:                                cache_flush_page((int)va);
1.55      pk       6486:                }
                   6487: #endif
1.1       deraadt  6488:        }
1.21      deraadt  6489: }
                   6490:
1.46      pk       6491: /*
                   6492:  * Turn on IO cache for a given (va, number of pages).
                   6493:  *
                   6494:  * We just assert PG_NC for each PTE; the addresses must reside
                   6495:  * in locked kernel space.  A cache flush is also done.
                   6496:  */
1.53      christos 6497: void
1.46      pk       6498: kvm_iocache(va, npages)
1.124   ! pk       6499:        caddr_t va;
        !          6500:        int npages;
1.46      pk       6501: {
                   6502:
1.55      pk       6503: #ifdef SUN4M
                   6504:        if (CPU_ISSUN4M) /* %%%: Implement! */
                   6505:                panic("kvm_iocache: 4m iocache not implemented");
                   6506: #endif
                   6507: #if defined(SUN4) || defined(SUN4C)
1.46      pk       6508:        for (; --npages >= 0; va += NBPG) {
1.124   ! pk       6509:                int pte = getpte4(va);
1.46      pk       6510:                if ((pte & PG_V) == 0)
                   6511:                        panic("kvm_iocache !pg_v");
                   6512:                pte |= PG_IOC;
1.55      pk       6513:                setpte4(va, pte);
1.46      pk       6514:        }
1.55      pk       6515: #endif
1.46      pk       6516: }
                   6517:
1.21      deraadt  6518: int
                   6519: pmap_count_ptes(pm)
1.124   ! pk       6520:        struct pmap *pm;
1.21      deraadt  6521: {
1.124   ! pk       6522:        int idx, total;
        !          6523:        struct regmap *rp;
        !          6524:        struct segmap *sp;
1.21      deraadt  6525:
1.43      pk       6526:        if (pm == pmap_kernel()) {
                   6527:                rp = &pm->pm_regmap[NUREG];
                   6528:                idx = NKREG;
                   6529:        } else {
                   6530:                rp = pm->pm_regmap;
                   6531:                idx = NUREG;
                   6532:        }
1.21      deraadt  6533:        for (total = 0; idx;)
1.43      pk       6534:                if ((sp = rp[--idx].rg_segmap) != NULL)
                   6535:                        total += sp->sg_npte;
1.21      deraadt  6536:        pm->pm_stats.resident_count = total;
                   6537:        return (total);
1.24      pk       6538: }
                   6539:
                   6540: /*
1.51      gwr      6541:  * Find first virtual address >= *va that is
                   6542:  * least likely to cause cache aliases.
                   6543:  * (This will just seg-align mappings.)
1.24      pk       6544:  */
1.51      gwr      6545: void
1.52      pk       6546: pmap_prefer(foff, vap)
1.124   ! pk       6547:        vaddr_t foff;
        !          6548:        vaddr_t *vap;
1.24      pk       6549: {
1.124   ! pk       6550:        vaddr_t va = *vap;
        !          6551:        long d, m;
1.52      pk       6552:
                   6553:        if (VA_INHOLE(va))
                   6554:                va = MMU_HOLE_END;
1.24      pk       6555:
1.48      pk       6556:        m = CACHE_ALIAS_DIST;
                   6557:        if (m == 0)             /* m=0 => no cache aliasing */
1.51      gwr      6558:                return;
1.24      pk       6559:
1.52      pk       6560:        d = foff - va;
                   6561:        d &= (m - 1);
                   6562:        *vap = va + d;
1.23      deraadt  6563: }
                   6564:
1.53      christos 6565: void
1.23      deraadt  6566: pmap_redzone()
                   6567: {
1.100     pk       6568:        pmap_remove(pmap_kernel(), KERNBASE, KERNBASE+NBPG);
1.104     thorpej  6569: }
                   6570:
                   6571: /*
                   6572:  * Activate the address space for the specified process.  If the
                   6573:  * process is the current process, load the new MMU context.
                   6574:  */
                   6575: void
                   6576: pmap_activate(p)
                   6577:        struct proc *p;
                   6578: {
                   6579:        pmap_t pmap = p->p_vmspace->vm_map.pmap;
                   6580:        int s;
                   6581:
                   6582:        /*
                   6583:         * This is essentially the same thing that happens in cpu_switch()
                   6584:         * when the newly selected process is about to run, except that we
                   6585:         * have to make sure to clean the register windows before we set
                   6586:         * the new context.
                   6587:         */
                   6588:
                   6589:        s = splpmap();
                   6590:        if (p == curproc) {
                   6591:                write_user_windows();
1.123     pk       6592:                if (pmap->pm_ctx == NULL) {
1.104     thorpej  6593:                        ctx_alloc(pmap);        /* performs setcontext() */
1.123     pk       6594:                } else {
                   6595:                        /* Do any cache flush needed on context switch */
                   6596:                        (*cpuinfo.pure_vcache_flush)();
1.104     thorpej  6597:                        setcontext(pmap->pm_ctxnum);
1.123     pk       6598:                }
1.104     thorpej  6599:        }
                   6600:        splx(s);
                   6601: }
                   6602:
                   6603: /*
                   6604:  * Deactivate the address space of the specified process.
                   6605:  */
                   6606: void
                   6607: pmap_deactivate(p)
                   6608:        struct proc *p;
                   6609: {
1.1       deraadt  6610: }
1.43      pk       6611:
                   6612: #ifdef DEBUG
                   6613: /*
                   6614:  * Check consistency of a pmap (time consuming!).
                   6615:  */
1.53      christos 6616: void
1.43      pk       6617: pm_check(s, pm)
                   6618:        char *s;
                   6619:        struct pmap *pm;
                   6620: {
                   6621:        if (pm == pmap_kernel())
                   6622:                pm_check_k(s, pm);
                   6623:        else
                   6624:                pm_check_u(s, pm);
                   6625: }
                   6626:
1.53      christos 6627: void
1.43      pk       6628: pm_check_u(s, pm)
                   6629:        char *s;
                   6630:        struct pmap *pm;
                   6631: {
                   6632:        struct regmap *rp;
                   6633:        struct segmap *sp;
                   6634:        int n, vs, vr, j, m, *pte;
                   6635:
1.55      pk       6636:        if (pm->pm_regmap == NULL)
1.72      pk       6637:                panic("%s: CHK(pmap %p): no region mapping", s, pm);
1.55      pk       6638:
                   6639: #if defined(SUN4M)
                   6640:        if (CPU_ISSUN4M &&
                   6641:            (pm->pm_reg_ptps == NULL ||
                   6642:             pm->pm_reg_ptps_pa != VA2PA((caddr_t)pm->pm_reg_ptps)))
1.72      pk       6643:                panic("%s: CHK(pmap %p): no SRMMU region table or bad pa: "
                   6644:                      "tblva=%p, tblpa=0x%x",
                   6645:                        s, pm, pm->pm_reg_ptps, pm->pm_reg_ptps_pa);
1.55      pk       6646:
                   6647:        if (CPU_ISSUN4M && pm->pm_ctx != NULL &&
1.69      pk       6648:            (cpuinfo.ctx_tbl[pm->pm_ctxnum] != ((VA2PA((caddr_t)pm->pm_reg_ptps)
1.55      pk       6649:                                              >> SRMMU_PPNPASHIFT) |
                   6650:                                             SRMMU_TEPTD)))
1.91      fair     6651:            panic("%s: CHK(pmap %p): SRMMU region table at 0x%x not installed "
1.55      pk       6652:                  "for context %d", s, pm, pm->pm_reg_ptps_pa, pm->pm_ctxnum);
                   6653: #endif
                   6654:
1.43      pk       6655:        for (vr = 0; vr < NUREG; vr++) {
                   6656:                rp = &pm->pm_regmap[vr];
                   6657:                if (rp->rg_nsegmap == 0)
                   6658:                        continue;
                   6659:                if (rp->rg_segmap == NULL)
                   6660:                        panic("%s: CHK(vr %d): nsegmap = %d; sp==NULL",
                   6661:                                s, vr, rp->rg_nsegmap);
1.55      pk       6662: #if defined(SUN4M)
                   6663:                if (CPU_ISSUN4M && rp->rg_seg_ptps == NULL)
                   6664:                    panic("%s: CHK(vr %d): nsegmap=%d; no SRMMU segment table",
                   6665:                          s, vr, rp->rg_nsegmap);
                   6666:                if (CPU_ISSUN4M &&
                   6667:                    pm->pm_reg_ptps[vr] != ((VA2PA((caddr_t)rp->rg_seg_ptps) >>
                   6668:                                            SRMMU_PPNPASHIFT) | SRMMU_TEPTD))
                   6669:                    panic("%s: CHK(vr %d): SRMMU segtbl not installed",s,vr);
                   6670: #endif
1.43      pk       6671:                if ((unsigned int)rp < KERNBASE)
1.54      christos 6672:                        panic("%s: rp=%p", s, rp);
1.43      pk       6673:                n = 0;
                   6674:                for (vs = 0; vs < NSEGRG; vs++) {
                   6675:                        sp = &rp->rg_segmap[vs];
                   6676:                        if ((unsigned int)sp < KERNBASE)
1.54      christos 6677:                                panic("%s: sp=%p", s, sp);
1.43      pk       6678:                        if (sp->sg_npte != 0) {
                   6679:                                n++;
                   6680:                                if (sp->sg_pte == NULL)
                   6681:                                        panic("%s: CHK(vr %d, vs %d): npte=%d, "
                   6682:                                           "pte=NULL", s, vr, vs, sp->sg_npte);
1.55      pk       6683: #if defined(SUN4M)
                   6684:                                if (CPU_ISSUN4M &&
                   6685:                                    rp->rg_seg_ptps[vs] !=
                   6686:                                     ((VA2PA((caddr_t)sp->sg_pte)
                   6687:                                        >> SRMMU_PPNPASHIFT) |
                   6688:                                       SRMMU_TEPTD))
                   6689:                                    panic("%s: CHK(vr %d, vs %d): SRMMU page "
                   6690:                                          "table not installed correctly",s,vr,
                   6691:                                          vs);
                   6692: #endif
1.43      pk       6693:                                pte=sp->sg_pte;
                   6694:                                m = 0;
                   6695:                                for (j=0; j<NPTESG; j++,pte++)
1.55      pk       6696:                                    if ((CPU_ISSUN4M
                   6697:                                         ?((*pte & SRMMU_TETYPE) == SRMMU_TEPTE)
                   6698:                                         :(*pte & PG_V)))
                   6699:                                        m++;
1.43      pk       6700:                                if (m != sp->sg_npte)
                   6701:                                    /*if (pmapdebug & 0x10000)*/
1.66      christos 6702:                                        printf("%s: user CHK(vr %d, vs %d): "
1.43      pk       6703:                                            "npte(%d) != # valid(%d)\n",
                   6704:                                                s, vr, vs, sp->sg_npte, m);
                   6705:                        }
                   6706:                }
                   6707:                if (n != rp->rg_nsegmap)
                   6708:                        panic("%s: CHK(vr %d): inconsistent "
                   6709:                                "# of pte's: %d, should be %d",
                   6710:                                s, vr, rp->rg_nsegmap, n);
                   6711:        }
1.53      christos 6712:        return;
1.43      pk       6713: }
                   6714:
1.53      christos 6715: void
1.55      pk       6716: pm_check_k(s, pm)              /* Note: not as extensive as pm_check_u. */
1.43      pk       6717:        char *s;
                   6718:        struct pmap *pm;
                   6719: {
                   6720:        struct regmap *rp;
                   6721:        int vr, vs, n;
                   6722:
1.55      pk       6723:        if (pm->pm_regmap == NULL)
1.122     pk       6724:                panic("%s: CHK(pmap %p): no region mapping", s, pm);
1.55      pk       6725:
                   6726: #if defined(SUN4M)
                   6727:        if (CPU_ISSUN4M &&
                   6728:            (pm->pm_reg_ptps == NULL ||
                   6729:             pm->pm_reg_ptps_pa != VA2PA((caddr_t)pm->pm_reg_ptps)))
1.91      fair     6730:            panic("%s: CHK(pmap %p): no SRMMU region table or bad pa: tblva=%p, tblpa=0x%x",
1.55      pk       6731:                  s, pm, pm->pm_reg_ptps, pm->pm_reg_ptps_pa);
                   6732:
                   6733:        if (CPU_ISSUN4M &&
1.69      pk       6734:            (cpuinfo.ctx_tbl[0] != ((VA2PA((caddr_t)pm->pm_reg_ptps) >>
1.55      pk       6735:                                             SRMMU_PPNPASHIFT) | SRMMU_TEPTD)))
1.91      fair     6736:            panic("%s: CHK(pmap %p): SRMMU region table at 0x%x not installed "
1.55      pk       6737:                  "for context %d", s, pm, pm->pm_reg_ptps_pa, 0);
                   6738: #endif
1.43      pk       6739:        for (vr = NUREG; vr < NUREG+NKREG; vr++) {
                   6740:                rp = &pm->pm_regmap[vr];
                   6741:                if (rp->rg_segmap == NULL)
                   6742:                        panic("%s: CHK(vr %d): nsegmap = %d; sp==NULL",
                   6743:                                s, vr, rp->rg_nsegmap);
                   6744:                if (rp->rg_nsegmap == 0)
                   6745:                        continue;
1.55      pk       6746: #if defined(SUN4M)
                   6747:                if (CPU_ISSUN4M && rp->rg_seg_ptps == NULL)
                   6748:                    panic("%s: CHK(vr %d): nsegmap=%d; no SRMMU segment table",
                   6749:                          s, vr, rp->rg_nsegmap);
                   6750:                if (CPU_ISSUN4M &&
                   6751:                    pm->pm_reg_ptps[vr] != ((VA2PA((caddr_t)rp->rg_seg_ptps) >>
                   6752:                                            SRMMU_PPNPASHIFT) | SRMMU_TEPTD))
                   6753:                    panic("%s: CHK(vr %d): SRMMU segtbl not installed",s,vr);
                   6754: #endif
1.72      pk       6755:                if (CPU_ISSUN4M) {
                   6756:                        n = NSEGRG;
                   6757:                } else {
                   6758:                        for (n = 0, vs = 0; vs < NSEGRG; vs++) {
                   6759:                                if (rp->rg_segmap[vs].sg_npte)
                   6760:                                        n++;
                   6761:                        }
1.43      pk       6762:                }
                   6763:                if (n != rp->rg_nsegmap)
1.66      christos 6764:                        printf("%s: kernel CHK(vr %d): inconsistent "
1.43      pk       6765:                                "# of pte's: %d, should be %d\n",
                   6766:                                s, vr, rp->rg_nsegmap, n);
                   6767:        }
1.53      christos 6768:        return;
1.43      pk       6769: }
                   6770: #endif
1.46      pk       6771:
                   6772: /*
1.98      pk       6773:  * Return the number of disk blocks that pmap_dumpmmu() will dump.
1.46      pk       6774:  */
                   6775: int
                   6776: pmap_dumpsize()
                   6777: {
1.98      pk       6778:        int     sz;
1.67      pk       6779:
                   6780:        sz = ALIGN(sizeof(kcore_seg_t)) + ALIGN(sizeof(cpu_kcore_hdr_t));
                   6781:        sz += npmemarr * sizeof(phys_ram_seg_t);
1.98      pk       6782:        sz += sizeof(kernel_segmap_store);
1.55      pk       6783:
                   6784:        if (CPU_ISSUN4OR4C)
1.98      pk       6785:                /* For each pmeg in the MMU, we'll write NPTESG PTEs. */
1.67      pk       6786:                sz += (seginval + 1) * NPTESG * sizeof(int);
                   6787:
1.98      pk       6788:        return btodb(sz + DEV_BSIZE - 1);
1.46      pk       6789: }
                   6790:
                   6791: /*
1.98      pk       6792:  * Write the core dump headers and MD data to the dump device.
                   6793:  * We dump the following items:
                   6794:  *
                   6795:  *     kcore_seg_t              MI header defined in <sys/kcore.h>)
                   6796:  *     cpu_kcore_hdr_t          MD header defined in <machine/kcore.h>)
                   6797:  *     phys_ram_seg_t[npmemarr] physical memory segments
                   6798:  *     segmap_t[NKREG*NSEGRG]   the kernel's segment map
                   6799:  *     the MMU pmegs on sun4/sun4c
1.46      pk       6800:  */
                   6801: int
                   6802: pmap_dumpmmu(dump, blkno)
1.124   ! pk       6803:        daddr_t blkno;
        !          6804:        int (*dump)     __P((dev_t, daddr_t, caddr_t, size_t));
1.46      pk       6805: {
1.67      pk       6806:        kcore_seg_t     *ksegp;
                   6807:        cpu_kcore_hdr_t *kcpup;
                   6808:        phys_ram_seg_t  memseg;
1.124   ! pk       6809:        int             error = 0;
        !          6810:        int             i, memsegoffset, segmapoffset, pmegoffset;
1.67      pk       6811:        int             buffer[dbtob(1) / sizeof(int)];
                   6812:        int             *bp, *ep;
1.55      pk       6813: #if defined(SUN4C) || defined(SUN4)
1.124   ! pk       6814:        int     pmeg;
1.55      pk       6815: #endif
1.46      pk       6816:
1.67      pk       6817: #define EXPEDITE(p,n) do {                                             \
                   6818:        int *sp = (int *)(p);                                           \
                   6819:        int sz = (n);                                                   \
                   6820:        while (sz > 0) {                                                \
                   6821:                *bp++ = *sp++;                                          \
                   6822:                if (bp >= ep) {                                         \
                   6823:                        error = (*dump)(dumpdev, blkno,                 \
                   6824:                                        (caddr_t)buffer, dbtob(1));     \
                   6825:                        if (error != 0)                                 \
                   6826:                                return (error);                         \
                   6827:                        ++blkno;                                        \
                   6828:                        bp = buffer;                                    \
                   6829:                }                                                       \
                   6830:                sz -= 4;                                                \
                   6831:        }                                                               \
                   6832: } while (0)
                   6833:
                   6834:        setcontext(0);
                   6835:
                   6836:        /* Setup bookkeeping pointers */
                   6837:        bp = buffer;
                   6838:        ep = &buffer[sizeof(buffer) / sizeof(buffer[0])];
                   6839:
                   6840:        /* Fill in MI segment header */
                   6841:        ksegp = (kcore_seg_t *)bp;
                   6842:        CORE_SETMAGIC(*ksegp, KCORE_MAGIC, MID_MACHINE, CORE_CPU);
1.98      pk       6843:        ksegp->c_size = dbtob(pmap_dumpsize()) - ALIGN(sizeof(kcore_seg_t));
1.67      pk       6844:
                   6845:        /* Fill in MD segment header (interpreted by MD part of libkvm) */
                   6846:        kcpup = (cpu_kcore_hdr_t *)((int)bp + ALIGN(sizeof(kcore_seg_t)));
                   6847:        kcpup->cputype = cputyp;
1.98      pk       6848:        kcpup->kernbase = KERNBASE;
1.67      pk       6849:        kcpup->nmemseg = npmemarr;
                   6850:        kcpup->memsegoffset = memsegoffset = ALIGN(sizeof(cpu_kcore_hdr_t));
1.98      pk       6851:        kcpup->nsegmap = NKREG*NSEGRG;
                   6852:        kcpup->segmapoffset = segmapoffset =
                   6853:                memsegoffset + npmemarr * sizeof(phys_ram_seg_t);
                   6854:
1.67      pk       6855:        kcpup->npmeg = (CPU_ISSUN4OR4C) ? seginval + 1 : 0;
                   6856:        kcpup->pmegoffset = pmegoffset =
1.98      pk       6857:                segmapoffset + kcpup->nsegmap * sizeof(struct segmap);
1.67      pk       6858:
                   6859:        /* Note: we have assumed everything fits in buffer[] so far... */
1.98      pk       6860:        bp = (int *)((int)kcpup + ALIGN(sizeof(cpu_kcore_hdr_t)));
1.67      pk       6861:
1.98      pk       6862: #if 0
1.67      pk       6863:        /* Align storage for upcoming quad-aligned segment array */
                   6864:        while (bp != (int *)ALIGN(bp)) {
                   6865:                int dummy = 0;
                   6866:                EXPEDITE(&dummy, 4);
                   6867:        }
1.98      pk       6868: #endif
                   6869:
1.67      pk       6870:        for (i = 0; i < npmemarr; i++) {
                   6871:                memseg.start = pmemarr[i].addr;
                   6872:                memseg.size = pmemarr[i].len;
                   6873:                EXPEDITE(&memseg, sizeof(phys_ram_seg_t));
                   6874:        }
1.98      pk       6875:
                   6876:        EXPEDITE(&kernel_segmap_store, sizeof(kernel_segmap_store));
1.67      pk       6877:
                   6878:        if (CPU_ISSUN4M)
                   6879:                goto out;
1.55      pk       6880:
                   6881: #if defined(SUN4C) || defined(SUN4)
1.46      pk       6882:        /*
                   6883:         * dump page table entries
                   6884:         *
                   6885:         * We dump each pmeg in order (by segment number).  Since the MMU
                   6886:         * automatically maps the given virtual segment to a pmeg we must
                   6887:         * iterate over the segments by incrementing an unused segment slot
                   6888:         * in the MMU.  This fixed segment number is used in the virtual
                   6889:         * address argument to getpte().
                   6890:         */
1.55      pk       6891:
1.46      pk       6892:        /*
                   6893:         * Go through the pmegs and dump each one.
                   6894:         */
                   6895:        for (pmeg = 0; pmeg <= seginval; ++pmeg) {
1.124   ! pk       6896:                int va = 0;
1.46      pk       6897:
                   6898:                setsegmap(va, pmeg);
                   6899:                i = NPTESG;
                   6900:                do {
1.67      pk       6901:                        int pte = getpte4(va);
                   6902:                        EXPEDITE(&pte, sizeof(pte));
1.46      pk       6903:                        va += NBPG;
                   6904:                } while (--i > 0);
                   6905:        }
                   6906:        setsegmap(0, seginval);
1.67      pk       6907: #endif
1.46      pk       6908:
1.67      pk       6909: out:
                   6910:        if (bp != buffer)
1.46      pk       6911:                error = (*dump)(dumpdev, blkno++, (caddr_t)buffer, dbtob(1));
                   6912:
                   6913:        return (error);
1.92      pk       6914: }
                   6915:
                   6916: /*
                   6917:  * Helper function for debuggers.
                   6918:  */
                   6919: void
                   6920: pmap_writetext(dst, ch)
                   6921:        unsigned char *dst;
                   6922:        int ch;
                   6923: {
1.95      pk       6924:        int s, pte0, pte, ctx;
1.124   ! pk       6925:        vaddr_t va;
1.92      pk       6926:
                   6927:        s = splpmap();
                   6928:        va = (unsigned long)dst & (~PGOFSET);
                   6929:        cpuinfo.cache_flush(dst, 1);
                   6930:
1.95      pk       6931:        ctx = getcontext();
                   6932:        setcontext(0);
                   6933:
1.92      pk       6934: #if defined(SUN4M)
                   6935:        if (CPU_ISSUN4M) {
                   6936:                pte0 = getpte4m(va);
                   6937:                if ((pte0 & SRMMU_TETYPE) != SRMMU_TEPTE) {
                   6938:                        splx(s);
                   6939:                        return;
                   6940:                }
                   6941:                pte = pte0 | PPROT_WRITE;
                   6942:                setpte4m(va, pte);
                   6943:                *dst = (unsigned char)ch;
                   6944:                setpte4m(va, pte0);
                   6945:
                   6946:        }
                   6947: #endif
                   6948: #if defined(SUN4) || defined(SUN4C)
                   6949:        if (CPU_ISSUN4C || CPU_ISSUN4) {
                   6950:                pte0 = getpte4(va);
                   6951:                if ((pte0 & PG_V) == 0) {
                   6952:                        splx(s);
                   6953:                        return;
                   6954:                }
                   6955:                pte = pte0 | PG_W;
                   6956:                setpte4(va, pte);
                   6957:                *dst = (unsigned char)ch;
                   6958:                setpte4(va, pte0);
                   6959:        }
                   6960: #endif
                   6961:        cpuinfo.cache_flush(dst, 1);
1.95      pk       6962:        setcontext(ctx);
1.92      pk       6963:        splx(s);
1.55      pk       6964: }
                   6965:
                   6966: #ifdef EXTREME_DEBUG
                   6967:
                   6968: static void test_region __P((int, int, int));
                   6969:
                   6970: void
                   6971: debug_pagetables()
                   6972: {
1.124   ! pk       6973:        int i;
        !          6974:        int *regtbl;
        !          6975:        int te;
1.55      pk       6976:
1.66      christos 6977:        printf("\nncontext=%d. ",ncontext);
                   6978:        printf("Context table is at va 0x%x. Level 0 PTP: 0x%x\n",
1.69      pk       6979:               cpuinfo.ctx_tbl, cpuinfo.ctx_tbl[0]);
1.66      christos 6980:        printf("Context 0 region table is at va 0x%x, pa 0x%x. Contents:\n",
1.55      pk       6981:               pmap_kernel()->pm_reg_ptps, pmap_kernel()->pm_reg_ptps_pa);
                   6982:
                   6983:        regtbl = pmap_kernel()->pm_reg_ptps;
                   6984:
1.66      christos 6985:        printf("PROM vector is at 0x%x\n",promvec);
                   6986:        printf("PROM reboot routine is at 0x%x\n",promvec->pv_reboot);
                   6987:        printf("PROM abort routine is at 0x%x\n",promvec->pv_abort);
                   6988:        printf("PROM halt routine is at 0x%x\n",promvec->pv_halt);
1.55      pk       6989:
1.66      christos 6990:        printf("Testing region 0xfe: ");
1.55      pk       6991:        test_region(0xfe,0,16*1024*1024);
1.66      christos 6992:        printf("Testing region 0xff: ");
1.55      pk       6993:        test_region(0xff,0,16*1024*1024);
1.96      pk       6994:        printf("Testing kernel region 0x%x: ", VA_VREG(KERNBASE));
                   6995:        test_region(VA_VREG(KERNBASE), 4096, avail_start);
1.55      pk       6996:        cngetc();
                   6997:
                   6998:        for (i = 0; i < SRMMU_L1SIZE; i++) {
                   6999:                te = regtbl[i];
                   7000:                if ((te & SRMMU_TETYPE) == SRMMU_TEINVALID)
                   7001:                    continue;
1.66      christos 7002:                printf("Region 0x%x: PTE=0x%x <%s> L2PA=0x%x kernL2VA=0x%x\n",
1.55      pk       7003:                       i, te, ((te & SRMMU_TETYPE) == SRMMU_TEPTE ? "pte" :
                   7004:                               ((te & SRMMU_TETYPE) == SRMMU_TEPTD ? "ptd" :
                   7005:                                ((te & SRMMU_TETYPE) == SRMMU_TEINVALID ?
                   7006:                                 "invalid" : "reserved"))),
                   7007:                       (te & ~0x3) << SRMMU_PPNPASHIFT,
                   7008:                       pmap_kernel()->pm_regmap[i].rg_seg_ptps);
                   7009:        }
1.66      christos 7010:        printf("Press q to halt...\n");
1.55      pk       7011:        if (cngetc()=='q')
                   7012:            callrom();
                   7013: }
                   7014:
                   7015: static u_int
                   7016: VA2PAsw(ctx, addr, pte)
1.124   ! pk       7017:        int ctx;
        !          7018:        caddr_t addr;
1.55      pk       7019:        int *pte;
                   7020: {
1.124   ! pk       7021:        int *curtbl;
        !          7022:        int curpte;
1.55      pk       7023:
                   7024: #ifdef EXTREME_EXTREME_DEBUG
1.66      christos 7025:        printf("Looking up addr 0x%x in context 0x%x\n",addr,ctx);
1.55      pk       7026: #endif
                   7027:        /* L0 */
1.69      pk       7028:        *pte = curpte = cpuinfo.ctx_tbl[ctx];
1.55      pk       7029: #ifdef EXTREME_EXTREME_DEBUG
1.66      christos 7030:        printf("Got L0 pte 0x%x\n",pte);
1.55      pk       7031: #endif
                   7032:        if ((curpte & SRMMU_TETYPE) == SRMMU_TEPTE) {
                   7033:                return (((curpte & SRMMU_PPNMASK) << SRMMU_PPNPASHIFT) |
                   7034:                        ((u_int)addr & 0xffffffff));
                   7035:        }
                   7036:        if ((curpte & SRMMU_TETYPE) != SRMMU_TEPTD) {
1.66      christos 7037:                printf("Bad context table entry 0x%x for context 0x%x\n",
1.55      pk       7038:                       curpte, ctx);
                   7039:                return 0;
                   7040:        }
                   7041:        /* L1 */
1.96      pk       7042:        curtbl = ((curpte & ~0x3) << 4) | KERNBASE; /* correct for krn*/
1.55      pk       7043:        *pte = curpte = curtbl[VA_VREG(addr)];
                   7044: #ifdef EXTREME_EXTREME_DEBUG
1.66      christos 7045:        printf("L1 table at 0x%x.\nGot L1 pte 0x%x\n",curtbl,curpte);
1.55      pk       7046: #endif
                   7047:        if ((curpte & SRMMU_TETYPE) == SRMMU_TEPTE)
                   7048:            return (((curpte & SRMMU_PPNMASK) << SRMMU_PPNPASHIFT) |
                   7049:                    ((u_int)addr & 0xffffff));
                   7050:        if ((curpte & SRMMU_TETYPE) != SRMMU_TEPTD) {
1.66      christos 7051:                printf("Bad region table entry 0x%x for region 0x%x\n",
1.55      pk       7052:                       curpte, VA_VREG(addr));
                   7053:                return 0;
                   7054:        }
                   7055:        /* L2 */
1.96      pk       7056:        curtbl = ((curpte & ~0x3) << 4) | KERNBASE; /* correct for krn*/
1.55      pk       7057:        *pte = curpte = curtbl[VA_VSEG(addr)];
                   7058: #ifdef EXTREME_EXTREME_DEBUG
1.66      christos 7059:        printf("L2 table at 0x%x.\nGot L2 pte 0x%x\n",curtbl,curpte);
1.55      pk       7060: #endif
                   7061:        if ((curpte & SRMMU_TETYPE) == SRMMU_TEPTE)
                   7062:            return (((curpte & SRMMU_PPNMASK) << SRMMU_PPNPASHIFT) |
                   7063:                    ((u_int)addr & 0x3ffff));
                   7064:        if ((curpte & SRMMU_TETYPE) != SRMMU_TEPTD) {
1.66      christos 7065:                printf("Bad segment table entry 0x%x for reg 0x%x, seg 0x%x\n",
1.55      pk       7066:                       curpte, VA_VREG(addr), VA_VSEG(addr));
                   7067:                return 0;
                   7068:        }
                   7069:        /* L3 */
1.96      pk       7070:        curtbl = ((curpte & ~0x3) << 4) | KERNBASE; /* correct for krn*/
1.55      pk       7071:        *pte = curpte = curtbl[VA_VPG(addr)];
                   7072: #ifdef EXTREME_EXTREME_DEBUG
1.66      christos 7073:        printf("L3 table at 0x%x.\nGot L3 pte 0x%x\n",curtbl,curpte);
1.55      pk       7074: #endif
                   7075:        if ((curpte & SRMMU_TETYPE) == SRMMU_TEPTE)
                   7076:            return (((curpte & SRMMU_PPNMASK) << SRMMU_PPNPASHIFT) |
                   7077:                    ((u_int)addr & 0xfff));
                   7078:        else {
1.66      christos 7079:                printf("Bad L3 pte 0x%x for reg 0x%x, seg 0x%x, pg 0x%x\n",
1.55      pk       7080:                       curpte, VA_VREG(addr), VA_VSEG(addr), VA_VPG(addr));
                   7081:                return 0;
                   7082:        }
1.66      christos 7083:        printf("Bizarreness with address 0x%x!\n",addr);
1.55      pk       7084: }
                   7085:
                   7086: void test_region(reg, start, stop)
1.124   ! pk       7087:        int reg;
        !          7088:        int start, stop;
1.55      pk       7089: {
1.124   ! pk       7090:        int i;
        !          7091:        int addr;
        !          7092:        int pte;
1.55      pk       7093:        int ptesw;
                   7094: /*     int cnt=0;
                   7095: */
                   7096:
                   7097:        for (i = start; i < stop; i+= NBPG) {
                   7098:                addr = (reg << RGSHIFT) | i;
                   7099:                pte=lda(((u_int)(addr)) | ASI_SRMMUFP_LN, ASI_SRMMUFP);
                   7100:                if (pte) {
1.66      christos 7101: /*                     printf("Valid address 0x%x\n",addr);
1.55      pk       7102:                        if (++cnt == 20) {
                   7103:                                cngetc();
                   7104:                                cnt=0;
                   7105:                        }
                   7106: */
                   7107:                        if (VA2PA(addr) != VA2PAsw(0,addr,&ptesw)) {
1.66      christos 7108:                                printf("Mismatch at address 0x%x.\n",addr);
1.55      pk       7109:                                if (cngetc()=='q') break;
                   7110:                        }
1.96      pk       7111:                        if (reg == VA_VREG(KERNBASE))
                   7112:                                /* kernel permissions are different */
                   7113:                                continue;
1.55      pk       7114:                        if ((pte&SRMMU_PROT_MASK)!=(ptesw&SRMMU_PROT_MASK)) {
1.66      christos 7115:                                printf("Mismatched protections at address "
1.55      pk       7116:                                       "0x%x; pte=0x%x, ptesw=0x%x\n",
                   7117:                                       addr,pte,ptesw);
                   7118:                                if (cngetc()=='q') break;
                   7119:                        }
                   7120:                }
                   7121:        }
1.66      christos 7122:        printf("done.\n");
1.46      pk       7123: }
1.55      pk       7124:
                   7125:
                   7126: void print_fe_map(void)
                   7127: {
                   7128:        u_int i, pte;
                   7129:
1.66      christos 7130:        printf("map of region 0xfe:\n");
1.55      pk       7131:        for (i = 0xfe000000; i < 0xff000000; i+=4096) {
                   7132:                if (((pte = getpte4m(i)) & SRMMU_TETYPE) != SRMMU_TEPTE)
                   7133:                    continue;
1.91      fair     7134:                printf("0x%x -> 0x%x%x (pte 0x%x)\n", i, pte >> 28,
1.55      pk       7135:                       (pte & ~0xff) << 4, pte);
                   7136:        }
1.66      christos 7137:        printf("done\n");
1.55      pk       7138: }
                   7139:
                   7140: #endif

CVSweb <webmaster@jp.NetBSD.org>