[BACK]Return to pmap.c CVS log [TXT][DIR] Up to [cvs.NetBSD.org] / src / sys / arch / sparc / sparc

Annotation of src/sys/arch/sparc/sparc/pmap.c, Revision 1.83

1.83    ! pk          1: /*     $NetBSD: pmap.c,v 1.82 1997/05/24 20:09:45 pk Exp $ */
1.22      deraadt     2:
1.1       deraadt     3: /*
1.55      pk          4:  * Copyright (c) 1996
1.57      abrown      5:  *     The President and Fellows of Harvard College. All rights reserved.
1.1       deraadt     6:  * Copyright (c) 1992, 1993
                      7:  *     The Regents of the University of California.  All rights reserved.
                      8:  *
                      9:  * This software was developed by the Computer Systems Engineering group
                     10:  * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
                     11:  * contributed to Berkeley.
                     12:  *
                     13:  * All advertising materials mentioning features or use of this software
                     14:  * must display the following acknowledgement:
1.55      pk         15:  *     This product includes software developed by Harvard University.
1.1       deraadt    16:  *     This product includes software developed by the University of
                     17:  *     California, Lawrence Berkeley Laboratory.
                     18:  *
                     19:  * Redistribution and use in source and binary forms, with or without
                     20:  * modification, are permitted provided that the following conditions
                     21:  * are met:
1.55      pk         22:  *
1.1       deraadt    23:  * 1. Redistributions of source code must retain the above copyright
                     24:  *    notice, this list of conditions and the following disclaimer.
                     25:  * 2. Redistributions in binary form must reproduce the above copyright
                     26:  *    notice, this list of conditions and the following disclaimer in the
                     27:  *    documentation and/or other materials provided with the distribution.
                     28:  * 3. All advertising materials mentioning features or use of this software
                     29:  *    must display the following acknowledgement:
1.55      pk         30:  *     This product includes software developed by Aaron Brown and
                     31:  *     Harvard University.
                     32:  *      This product includes software developed by the University of
                     33:  *      California, Berkeley and its contributors.
1.1       deraadt    34:  * 4. Neither the name of the University nor the names of its contributors
                     35:  *    may be used to endorse or promote products derived from this software
                     36:  *    without specific prior written permission.
                     37:  *
                     38:  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
                     39:  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
                     40:  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
                     41:  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
                     42:  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
                     43:  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
                     44:  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
                     45:  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
                     46:  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
                     47:  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
                     48:  * SUCH DAMAGE.
                     49:  *
1.22      deraadt    50:  *     @(#)pmap.c      8.4 (Berkeley) 2/5/94
1.55      pk         51:  *
1.1       deraadt    52:  */
                     53:
                     54: /*
                     55:  * SPARC physical map management code.
                     56:  * Does not function on multiprocessors (yet).
                     57:  */
                     58:
                     59: #include <sys/param.h>
                     60: #include <sys/systm.h>
                     61: #include <sys/device.h>
                     62: #include <sys/proc.h>
1.43      pk         63: #include <sys/queue.h>
1.1       deraadt    64: #include <sys/malloc.h>
1.67      pk         65: #include <sys/exec.h>
                     66: #include <sys/core.h>
                     67: #include <sys/kcore.h>
1.1       deraadt    68:
                     69: #include <vm/vm.h>
                     70: #include <vm/vm_kern.h>
                     71: #include <vm/vm_prot.h>
                     72: #include <vm/vm_page.h>
                     73:
                     74: #include <machine/autoconf.h>
                     75: #include <machine/bsd_openprom.h>
1.19      deraadt    76: #include <machine/oldmon.h>
1.1       deraadt    77: #include <machine/cpu.h>
                     78: #include <machine/ctlreg.h>
1.67      pk         79: #include <machine/kcore.h>
1.1       deraadt    80:
                     81: #include <sparc/sparc/asm.h>
                     82: #include <sparc/sparc/cache.h>
1.3       deraadt    83: #include <sparc/sparc/vaddrs.h>
1.69      pk         84: #include <sparc/sparc/cpuvar.h>
1.1       deraadt    85:
                     86: #ifdef DEBUG
                     87: #define PTE_BITS "\20\40V\37W\36S\35NC\33IO\32U\31M"
1.55      pk         88: #define PTE_BITS4M "\20\10C\7M\6R\5ACC3\4ACC2\3ACC1\2TYP2\1TYP1"
1.1       deraadt    89: #endif
                     90:
                     91: /*
                     92:  * The SPARCstation offers us the following challenges:
                     93:  *
                     94:  *   1. A virtual address cache.  This is, strictly speaking, not
                     95:  *     part of the architecture, but the code below assumes one.
                     96:  *     This is a write-through cache on the 4c and a write-back cache
                     97:  *     on others.
                     98:  *
1.55      pk         99:  *   2. (4/4c only) An MMU that acts like a cache.  There is not enough
                    100:  *     space in the MMU to map everything all the time.  Instead, we need
1.1       deraadt   101:  *     to load MMU with the `working set' of translations for each
1.55      pk        102:  *     process. The sun4m does not act like a cache; tables are maintained
                    103:  *     in physical memory.
1.1       deraadt   104:  *
                    105:  *   3.        Segmented virtual and physical spaces.  The upper 12 bits of
                    106:  *     a virtual address (the virtual segment) index a segment table,
                    107:  *     giving a physical segment.  The physical segment selects a
                    108:  *     `Page Map Entry Group' (PMEG) and the virtual page number---the
                    109:  *     next 5 or 6 bits of the virtual address---select the particular
                    110:  *     `Page Map Entry' for the page.  We call the latter a PTE and
                    111:  *     call each Page Map Entry Group a pmeg (for want of a better name).
1.55      pk        112:  *     Note that the sun4m has an unsegmented 36-bit physical space.
1.1       deraadt   113:  *
                    114:  *     Since there are no valid bits in the segment table, the only way
                    115:  *     to have an invalid segment is to make one full pmeg of invalid PTEs.
1.55      pk        116:  *     We use the last one (since the ROM does as well) (sun4/4c only)
1.1       deraadt   117:  *
                    118:  *   4. Discontiguous physical pages.  The Mach VM expects physical pages
                    119:  *     to be in one sequential lump.
                    120:  *
                    121:  *   5. The MMU is always on: it is not possible to disable it.  This is
                    122:  *     mainly a startup hassle.
                    123:  */
                    124:
                    125: struct pmap_stats {
                    126:        int     ps_unlink_pvfirst;      /* # of pv_unlinks on head */
                    127:        int     ps_unlink_pvsearch;     /* # of pv_unlink searches */
                    128:        int     ps_changeprots;         /* # of calls to changeprot */
                    129:        int     ps_useless_changeprots; /* # of changeprots for wiring */
                    130:        int     ps_enter_firstpv;       /* pv heads entered */
                    131:        int     ps_enter_secondpv;      /* pv nonheads entered */
                    132:        int     ps_useless_changewire;  /* useless wiring changes */
                    133:        int     ps_npg_prot_all;        /* # of active pages protected */
                    134:        int     ps_npg_prot_actual;     /* # pages actually affected */
1.70      pk        135:        int     ps_npmeg_free;          /* # of free pmegs */
                    136:        int     ps_npmeg_locked;        /* # of pmegs on locked list */
                    137:        int     ps_npmeg_lru;           /* # of pmegs on lru list */
1.1       deraadt   138: } pmap_stats;
                    139:
                    140: #ifdef DEBUG
                    141: #define        PDB_CREATE      0x0001
                    142: #define        PDB_DESTROY     0x0002
                    143: #define        PDB_REMOVE      0x0004
                    144: #define        PDB_CHANGEPROT  0x0008
                    145: #define        PDB_ENTER       0x0010
                    146:
                    147: #define        PDB_MMU_ALLOC   0x0100
                    148: #define        PDB_MMU_STEAL   0x0200
                    149: #define        PDB_CTX_ALLOC   0x0400
                    150: #define        PDB_CTX_STEAL   0x0800
1.43      pk        151: #define        PDB_MMUREG_ALLOC        0x1000
                    152: #define        PDB_MMUREG_STEAL        0x2000
1.55      pk        153: #define        PDB_CACHESTUFF  0x4000
1.72      pk        154: #define        PDB_SWITCHMAP   0x8000
                    155: #define        PDB_SANITYCHK   0x10000
1.55      pk        156: int    pmapdebug = 0;
1.1       deraadt   157: #endif
                    158:
1.55      pk        159: #if 0
1.10      deraadt   160: #define        splpmap() splimp()
1.55      pk        161: #endif
1.1       deraadt   162:
                    163: /*
                    164:  * First and last managed physical addresses.
                    165:  */
                    166: vm_offset_t    vm_first_phys, vm_num_phys;
                    167:
                    168: /*
                    169:  * For each managed physical page, there is a list of all currently
                    170:  * valid virtual mappings of that page.  Since there is usually one
                    171:  * (or zero) mapping per page, the table begins with an initial entry,
                    172:  * rather than a pointer; this head entry is empty iff its pv_pmap
                    173:  * field is NULL.
                    174:  *
                    175:  * Note that these are per machine independent page (so there may be
                    176:  * only one for every two hardware pages, e.g.).  Since the virtual
                    177:  * address is aligned on a page boundary, the low order bits are free
                    178:  * for storing flags.  Only the head of each list has flags.
                    179:  *
                    180:  * THIS SHOULD BE PART OF THE CORE MAP
                    181:  */
                    182: struct pvlist {
                    183:        struct  pvlist *pv_next;        /* next pvlist, if any */
                    184:        struct  pmap *pv_pmap;          /* pmap of this va */
                    185:        int     pv_va;                  /* virtual address */
                    186:        int     pv_flags;               /* flags (below) */
                    187: };
                    188:
                    189: /*
                    190:  * Flags in pv_flags.  Note that PV_MOD must be 1 and PV_REF must be 2
                    191:  * since they must line up with the bits in the hardware PTEs (see pte.h).
1.55      pk        192:  * Sun4M bits are different (reversed), and at a different location in the
                    193:  * pte. Now why did they do that?
1.1       deraadt   194:  */
                    195: #define PV_MOD 1               /* page modified */
                    196: #define PV_REF 2               /* page referenced */
                    197: #define PV_NC  4               /* page cannot be cached */
1.55      pk        198: #define PV_REF4M       1       /* page referenced on sun4m */
                    199: #define PV_MOD4M       2       /* page modified on 4m (why reversed?!?) */
                    200: #define PV_C4M         4       /* page _can_ be cached on 4m */
1.1       deraadt   201: /*efine        PV_ALLF 7               ** all of the above */
                    202:
                    203: struct pvlist *pv_table;       /* array of entries, one per physical page */
                    204:
                    205: #define pvhead(pa)     (&pv_table[atop((pa) - vm_first_phys)])
                    206:
                    207: /*
                    208:  * Each virtual segment within each pmap is either valid or invalid.
                    209:  * It is valid if pm_npte[VA_VSEG(va)] is not 0.  This does not mean
                    210:  * it is in the MMU, however; that is true iff pm_segmap[VA_VSEG(va)]
                    211:  * does not point to the invalid PMEG.
                    212:  *
1.55      pk        213:  * In the older SPARC architectures (pre-4m), page tables are cached in the
                    214:  * MMU. The following discussion applies to these architectures:
                    215:  *
1.1       deraadt   216:  * If a virtual segment is valid and loaded, the correct PTEs appear
                    217:  * in the MMU only.  If it is valid and unloaded, the correct PTEs appear
                    218:  * in the pm_pte[VA_VSEG(va)] only.  However, some effort is made to keep
                    219:  * the software copies consistent enough with the MMU so that libkvm can
                    220:  * do user address translations.  In particular, pv_changepte() and
                    221:  * pmap_enu() maintain consistency, while less critical changes are
                    222:  * not maintained.  pm_pte[VA_VSEG(va)] always points to space for those
                    223:  * PTEs, unless this is the kernel pmap, in which case pm_pte[x] is not
                    224:  * used (sigh).
                    225:  *
                    226:  * Each PMEG in the MMU is either free or contains PTEs corresponding to
                    227:  * some pmap and virtual segment.  If it contains some PTEs, it also contains
                    228:  * reference and modify bits that belong in the pv_table.  If we need
                    229:  * to steal a PMEG from some process (if we need one and none are free)
                    230:  * we must copy the ref and mod bits, and update pm_segmap in the other
                    231:  * pmap to show that its virtual segment is no longer in the MMU.
                    232:  *
                    233:  * There are 128 PMEGs in a small Sun-4, of which only a few dozen are
                    234:  * tied down permanently, leaving `about' 100 to be spread among
                    235:  * running processes.  These are managed as an LRU cache.  Before
                    236:  * calling the VM paging code for a user page fault, the fault handler
                    237:  * calls mmu_load(pmap, va) to try to get a set of PTEs put into the
                    238:  * MMU.  mmu_load will check the validity of the segment and tell whether
                    239:  * it did something.
                    240:  *
                    241:  * Since I hate the name PMEG I call this data structure an `mmu entry'.
                    242:  * Each mmuentry is on exactly one of three `usage' lists: free, LRU,
                    243:  * or locked.  The LRU list is for user processes; the locked list is
                    244:  * for kernel entries; both are doubly linked queues headed by `mmuhd's.
                    245:  * The free list is a simple list, headed by a free list pointer.
1.55      pk        246:  *
                    247:  * In the sun4m architecture using the SPARC Reference MMU (SRMMU), three
                    248:  * levels of page tables are maintained in physical memory. We use the same
                    249:  * structures as with the 3-level old-style MMU (pm_regmap, pm_segmap,
                    250:  * rg_segmap, sg_pte, etc) to maintain kernel-edible page tables; we also
                    251:  * build a parallel set of physical tables that can be used by the MMU.
                    252:  * (XXX: This seems redundant, but is it necessary for the unified kernel?)
                    253:  *
                    254:  * If a virtual segment is valid, its entries will be in both parallel lists.
                    255:  * If it is not valid, then its entry in the kernel tables will be zero, and
                    256:  * its entry in the MMU tables will either be nonexistent or zero as well.
1.72      pk        257:  *
                    258:  * The Reference MMU generally uses a Translation Look-aside Buffer (TLB)
                    259:  * to cache the result of recently executed page table walks. When
                    260:  * manipulating page tables, we need to ensure consistency of the
                    261:  * in-memory and TLB copies of the page table entries. This is handled
                    262:  * by flushing (and invalidating) a TLB entry when appropriate before
                    263:  * altering an in-memory page table entry.
1.1       deraadt   264:  */
                    265: struct mmuentry {
1.43      pk        266:        TAILQ_ENTRY(mmuentry)   me_list;        /* usage list link */
                    267:        TAILQ_ENTRY(mmuentry)   me_pmchain;     /* pmap owner link */
1.1       deraadt   268:        struct  pmap *me_pmap;          /* pmap, if in use */
1.43      pk        269:        u_short me_vreg;                /* associated virtual region/segment */
                    270:        u_short me_vseg;                /* associated virtual region/segment */
1.45      pk        271:        u_short me_cookie;              /* hardware SMEG/PMEG number */
1.1       deraadt   272: };
1.43      pk        273: struct mmuentry *mmusegments;  /* allocated in pmap_bootstrap */
                    274: struct mmuentry *mmuregions;   /* allocated in pmap_bootstrap */
1.1       deraadt   275:
1.43      pk        276: struct mmuhd segm_freelist, segm_lru, segm_locked;
                    277: struct mmuhd region_freelist, region_lru, region_locked;
1.1       deraadt   278:
1.69      pk        279: int    seginval;               /* [4/4c] the invalid segment number */
                    280: int    reginval;               /* [4/3mmu] the invalid region number */
1.1       deraadt   281:
                    282: /*
1.55      pk        283:  * (sun4/4c)
1.1       deraadt   284:  * A context is simply a small number that dictates which set of 4096
                    285:  * segment map entries the MMU uses.  The Sun 4c has eight such sets.
                    286:  * These are alloted in an `almost MRU' fashion.
1.55      pk        287:  * (sun4m)
                    288:  * A context is simply a small number that indexes the context table, the
                    289:  * root-level page table mapping 4G areas. Each entry in this table points
                    290:  * to a 1st-level region table. A SPARC reference MMU will usually use 16
                    291:  * such contexts, but some offer as many as 64k contexts; the theoretical
                    292:  * maximum is 2^32 - 1, but this would create overlarge context tables.
1.1       deraadt   293:  *
                    294:  * Each context is either free or attached to a pmap.
                    295:  *
                    296:  * Since the virtual address cache is tagged by context, when we steal
                    297:  * a context we have to flush (that part of) the cache.
                    298:  */
                    299: union ctxinfo {
                    300:        union   ctxinfo *c_nextfree;    /* free list (if free) */
                    301:        struct  pmap *c_pmap;           /* pmap (if busy) */
                    302: };
1.69      pk        303:
                    304: #define ncontext       (cpuinfo.mmu_ncontext)
                    305: #define ctx_kick       (cpuinfo.ctx_kick)
                    306: #define ctx_kickdir    (cpuinfo.ctx_kickdir)
                    307: #define ctx_freelist   (cpuinfo.ctx_freelist)
                    308:
                    309: #if 0
1.1       deraadt   310: union ctxinfo *ctxinfo;                /* allocated at in pmap_bootstrap */
                    311:
                    312: union  ctxinfo *ctx_freelist;  /* context free list */
                    313: int    ctx_kick;               /* allocation rover when none free */
                    314: int    ctx_kickdir;            /* ctx_kick roves both directions */
                    315:
1.69      pk        316: char   *ctxbusyvector;         /* [4m] tells what contexts are busy (XXX)*/
                    317: #endif
1.55      pk        318:
1.1       deraadt   319: caddr_t        vpage[2];               /* two reserved MD virtual pages */
1.41      mycroft   320: caddr_t        vmmap;                  /* one reserved MI vpage for /dev/mem */
1.55      pk        321: caddr_t        vdumppages;             /* 32KB worth of reserved dump pages */
1.1       deraadt   322:
1.69      pk        323: smeg_t         tregion;        /* [4/3mmu] Region for temporary mappings */
                    324:
1.43      pk        325: struct pmap    kernel_pmap_store;              /* the kernel's pmap */
                    326: struct regmap  kernel_regmap_store[NKREG];     /* the kernel's regmap */
                    327: struct segmap  kernel_segmap_store[NKREG*NSEGRG];/* the kernel's segmaps */
1.1       deraadt   328:
1.69      pk        329: #if defined(SUN4M)
1.55      pk        330: u_int  *kernel_regtable_store;         /* 1k of storage to map the kernel */
                    331: u_int  *kernel_segtable_store;         /* 2k of storage to map the kernel */
                    332: u_int  *kernel_pagtable_store;         /* 128k of storage to map the kernel */
                    333:
                    334: u_int  *kernel_iopte_table;            /* 64k of storage for iommu */
                    335: u_int  kernel_iopte_table_pa;
                    336: #endif
                    337:
1.30      pk        338: #define        MA_SIZE 32              /* size of memory descriptor arrays */
1.1       deraadt   339: struct memarr pmemarr[MA_SIZE];/* physical memory regions */
                    340: int    npmemarr;               /* number of entries in pmemarr */
1.29      pk        341: int    cpmemarr;               /* pmap_next_page() state */
                    342: /*static*/ vm_offset_t avail_start;    /* first free physical page */
                    343: /*static*/ vm_offset_t avail_end;      /* last free physical page */
                    344: /*static*/ vm_offset_t avail_next;     /* pmap_next_page() state:
                    345:                                           next free physical page */
1.77      pk        346: /*static*/ vm_offset_t unavail_start;  /* first stolen free physical page */
                    347: /*static*/ vm_offset_t unavail_end;    /* last stolen free physical page */
1.29      pk        348: /*static*/ vm_offset_t virtual_avail;  /* first free virtual page number */
                    349: /*static*/ vm_offset_t virtual_end;    /* last free virtual page number */
                    350:
1.45      pk        351: int mmu_has_hole;
                    352:
1.29      pk        353: vm_offset_t prom_vstart;       /* For /dev/kmem */
                    354: vm_offset_t prom_vend;
1.1       deraadt   355:
1.55      pk        356: #if defined(SUN4)
1.31      pk        357: /*
1.55      pk        358:  * [sun4]: segfixmask: on some systems (4/110) "getsegmap()" returns a
                    359:  * partly invalid value. getsegmap returns a 16 bit value on the sun4,
                    360:  * but only the first 8 or so bits are valid (the rest are *supposed* to
                    361:  * be zero. On the 4/110 the bits that are supposed to be zero are
                    362:  * all one instead. e.g. KERNBASE is usually mapped by pmeg number zero.
                    363:  * On a 4/300 getsegmap(KERNBASE) == 0x0000, but
1.31      pk        364:  * on a 4/100 getsegmap(KERNBASE) == 0xff00
                    365:  *
1.55      pk        366:  * This confuses mmu_reservemon() and causes it to not reserve the PROM's
                    367:  * pmegs. Then the PROM's pmegs get used during autoconfig and everything
1.31      pk        368:  * falls apart!  (not very fun to debug, BTW.)
                    369:  *
1.43      pk        370:  * solution: mask the invalid bits in the getsetmap macro.
1.31      pk        371:  */
                    372:
                    373: static u_long segfixmask = 0xffffffff; /* all bits valid to start */
1.55      pk        374: #else
                    375: #define segfixmask 0xffffffff  /* It's in getsegmap's scope */
1.31      pk        376: #endif
                    377:
1.1       deraadt   378: /*
                    379:  * pseudo-functions for mnemonic value
                    380:  */
1.71      pk        381: #define getcontext4()          lduba(AC_CONTEXT, ASI_CONTROL)
                    382: #define getcontext4m()         lda(SRMMU_CXR, ASI_SRMMU)
1.55      pk        383: #define getcontext()           (CPU_ISSUN4M \
1.71      pk        384:                                        ? getcontext4m() \
                    385:                                        : getcontext4()  )
                    386:
                    387: #define setcontext4(c)         stba(AC_CONTEXT, ASI_CONTROL, c)
                    388: #define setcontext4m(c)                sta(SRMMU_CXR, ASI_SRMMU, c)
1.55      pk        389: #define setcontext(c)          (CPU_ISSUN4M \
1.71      pk        390:                                        ? setcontext4m(c) \
                    391:                                        : setcontext4(c)  )
1.55      pk        392:
                    393: #define        getsegmap(va)           (CPU_ISSUN4C \
                    394:                                        ? lduba(va, ASI_SEGMAP) \
                    395:                                        : (lduha(va, ASI_SEGMAP) & segfixmask))
                    396: #define        setsegmap(va, pmeg)     (CPU_ISSUN4C \
                    397:                                        ? stba(va, ASI_SEGMAP, pmeg) \
                    398:                                        : stha(va, ASI_SEGMAP, pmeg))
                    399:
                    400: /* 3-level sun4 MMU only: */
                    401: #define        getregmap(va)           ((unsigned)lduha((va)+2, ASI_REGMAP) >> 8)
                    402: #define        setregmap(va, smeg)     stha((va)+2, ASI_REGMAP, (smeg << 8))
                    403:
                    404: #if defined(SUN4M)
                    405: #define getpte4m(va)           lda((va & 0xFFFFF000) | ASI_SRMMUFP_L3, \
                    406:                                    ASI_SRMMUFP)
1.72      pk        407: void   setpgt4m __P((int *ptep, int pte));
1.55      pk        408: void   setpte4m __P((vm_offset_t va, int pte));
                    409: void   setptesw4m __P((struct pmap *pm, vm_offset_t va, int pte));
                    410: u_int  getptesw4m __P((struct pmap *pm, vm_offset_t va));
                    411: #endif
                    412:
                    413: #if defined(SUN4) || defined(SUN4C)
                    414: #define        getpte4(va)             lda(va, ASI_PTE)
                    415: #define        setpte4(va, pte)        sta(va, ASI_PTE, pte)
                    416: #endif
                    417:
                    418: /* Function pointer messiness for supporting multiple sparc architectures
                    419:  * within a single kernel: notice that there are two versions of many of the
                    420:  * functions within this file/module, one for the sun4/sun4c and the other
                    421:  * for the sun4m. For performance reasons (since things like pte bits don't
                    422:  * map nicely between the two architectures), there are separate functions
                    423:  * rather than unified functions which test the cputyp variable. If only
                    424:  * one architecture is being used, then the non-suffixed function calls
                    425:  * are macro-translated into the appropriate xxx4_4c or xxx4m call. If
                    426:  * multiple architectures are defined, the calls translate to (*xxx_p),
                    427:  * i.e. they indirect through function pointers initialized as appropriate
                    428:  * to the run-time architecture in pmap_bootstrap. See also pmap.h.
                    429:  */
                    430:
                    431: #if defined(SUN4M)
1.71      pk        432: static void mmu_setup4m_L1 __P((int, struct pmap *));
                    433: static void mmu_setup4m_L2 __P((int, struct regmap *));
                    434: static void  mmu_setup4m_L3 __P((int, struct segmap *));
1.77      pk        435: /*static*/ void        mmu_reservemon4m __P((struct pmap *));
1.58      pk        436:
1.55      pk        437: /*static*/ void pmap_rmk4m __P((struct pmap *, vm_offset_t, vm_offset_t,
                    438:                           int, int));
                    439: /*static*/ void pmap_rmu4m __P((struct pmap *, vm_offset_t, vm_offset_t,
                    440:                           int, int));
                    441: /*static*/ void pmap_enk4m __P((struct pmap *, vm_offset_t, vm_prot_t,
                    442:                          int, struct pvlist *, int));
                    443: /*static*/ void pmap_enu4m __P((struct pmap *, vm_offset_t, vm_prot_t,
                    444:                          int, struct pvlist *, int));
                    445: /*static*/ void pv_changepte4m __P((struct pvlist *, int, int));
                    446: /*static*/ int  pv_syncflags4m __P((struct pvlist *));
                    447: /*static*/ int  pv_link4m __P((struct pvlist *, struct pmap *, vm_offset_t));
                    448: /*static*/ void pv_unlink4m __P((struct pvlist *, struct pmap *, vm_offset_t));
                    449: #endif
                    450:
                    451: #if defined(SUN4) || defined(SUN4C)
1.58      pk        452: /*static*/ void        mmu_reservemon4_4c __P((int *, int *));
1.55      pk        453: /*static*/ void pmap_rmk4_4c __P((struct pmap *, vm_offset_t, vm_offset_t,
                    454:                           int, int));
                    455: /*static*/ void pmap_rmu4_4c __P((struct pmap *, vm_offset_t, vm_offset_t,
                    456:                           int, int));
                    457: /*static*/ void pmap_enk4_4c __P((struct pmap *, vm_offset_t, vm_prot_t,
                    458:                          int, struct pvlist *, int));
                    459: /*static*/ void pmap_enu4_4c __P((struct pmap *, vm_offset_t, vm_prot_t,
                    460:                          int, struct pvlist *, int));
                    461: /*static*/ void pv_changepte4_4c __P((struct pvlist *, int, int));
                    462: /*static*/ int  pv_syncflags4_4c __P((struct pvlist *));
                    463: /*static*/ int  pv_link4_4c __P((struct pvlist *, struct pmap *, vm_offset_t));
                    464: /*static*/ void pv_unlink4_4c __P((struct pvlist *, struct pmap *, vm_offset_t));
                    465: #endif
                    466:
                    467: #if !defined(SUN4M) && (defined(SUN4) || defined(SUN4C))
                    468: #define                pmap_rmk        pmap_rmk4_4c
                    469: #define                pmap_rmu        pmap_rmu4_4c
                    470:
                    471: #elif defined(SUN4M) && !(defined(SUN4) || defined(SUN4C))
                    472: #define                pmap_rmk        pmap_rmk4m
                    473: #define                pmap_rmu        pmap_rmu4m
                    474:
                    475: #else  /* must use function pointers */
                    476:
                    477: /* function pointer declarations */
                    478: /* from pmap.h: */
                    479: void           (*pmap_clear_modify_p) __P((vm_offset_t pa));
                    480: void            (*pmap_clear_reference_p) __P((vm_offset_t pa));
                    481: void            (*pmap_copy_page_p) __P((vm_offset_t, vm_offset_t));
                    482: void            (*pmap_enter_p) __P((pmap_t,
                    483:                     vm_offset_t, vm_offset_t, vm_prot_t, boolean_t));
                    484: vm_offset_t     (*pmap_extract_p) __P((pmap_t, vm_offset_t));
                    485: boolean_t       (*pmap_is_modified_p) __P((vm_offset_t pa));
                    486: boolean_t       (*pmap_is_referenced_p) __P((vm_offset_t pa));
                    487: void            (*pmap_page_protect_p) __P((vm_offset_t, vm_prot_t));
                    488: void            (*pmap_protect_p) __P((pmap_t,
                    489:                     vm_offset_t, vm_offset_t, vm_prot_t));
                    490: void            (*pmap_zero_page_p) __P((vm_offset_t));
                    491: void           (*pmap_changeprot_p) __P((pmap_t, vm_offset_t,
                    492:                     vm_prot_t, int));
                    493: /* local: */
                    494: void           (*pmap_rmk_p) __P((struct pmap *, vm_offset_t, vm_offset_t,
                    495:                           int, int));
                    496: void           (*pmap_rmu_p) __P((struct pmap *, vm_offset_t, vm_offset_t,
                    497:                           int, int));
                    498:
                    499: #define                pmap_rmk        (*pmap_rmk_p)
                    500: #define                pmap_rmu        (*pmap_rmu_p)
                    501:
                    502: #endif
                    503:
                    504: /* --------------------------------------------------------------*/
                    505:
                    506: /*
                    507:  * Next we have some Sun4m-specific routines which have no 4/4c
                    508:  * counterparts, or which are 4/4c macros.
                    509:  */
                    510:
                    511: #if defined(SUN4M)
                    512:
                    513: /* Macros which implement SRMMU TLB flushing/invalidation */
                    514:
                    515: #define tlb_flush_page(va)    sta((va & ~0xfff) | ASI_SRMMUFP_L3, ASI_SRMMUFP,0)
                    516: #define tlb_flush_segment(vreg, vseg) sta((vreg << RGSHIFT) | (vseg << SGSHIFT)\
                    517:                                          | ASI_SRMMUFP_L2, ASI_SRMMUFP,0)
                    518: #define tlb_flush_context()   sta(ASI_SRMMUFP_L1, ASI_SRMMUFP, 0)
                    519: #define tlb_flush_all()              sta(ASI_SRMMUFP_LN, ASI_SRMMUFP, 0)
                    520:
                    521: static u_int   VA2PA __P((caddr_t));
                    522:
                    523: /*
                    524:  * VA2PA(addr) -- converts a virtual address to a physical address using
                    525:  * the MMU's currently-installed page tables. As a side effect, the address
                    526:  * translation used may cause the associated pte to be encached. The correct
                    527:  * context for VA must be set before this is called.
                    528:  *
                    529:  * This routine should work with any level of mapping, as it is used
                    530:  * during bootup to interact with the ROM's initial L1 mapping of the kernel.
                    531:  */
                    532: static __inline u_int
                    533: VA2PA(addr)
                    534:        register caddr_t addr;
                    535: {
                    536:        register u_int pte;
                    537:
                    538:        /* we'll use that handy SRMMU flush/probe! %%%: make consts below! */
                    539:        /* Try each level in turn until we find a valid pte. Otherwise panic */
                    540:
                    541:        pte = lda(((u_int)addr & ~0xfff) | ASI_SRMMUFP_L3, ASI_SRMMUFP);
                    542:        if ((pte & SRMMU_TETYPE) == SRMMU_TEPTE)
                    543:            return (((pte & SRMMU_PPNMASK) << SRMMU_PPNPASHIFT) |
                    544:                    ((u_int)addr & 0xfff));
1.60      pk        545:
                    546:        /* A `TLB Flush Entire' is required before any L0, L1 or L2 probe */
                    547:        tlb_flush_all();
                    548:
1.55      pk        549:        pte = lda(((u_int)addr & ~0xfff) | ASI_SRMMUFP_L2, ASI_SRMMUFP);
                    550:        if ((pte & SRMMU_TETYPE) == SRMMU_TEPTE)
                    551:            return (((pte & SRMMU_PPNMASK) << SRMMU_PPNPASHIFT) |
                    552:                    ((u_int)addr & 0x3ffff));
                    553:        pte = lda(((u_int)addr & ~0xfff) | ASI_SRMMUFP_L1, ASI_SRMMUFP);
                    554:        if ((pte & SRMMU_TETYPE) == SRMMU_TEPTE)
                    555:            return (((pte & SRMMU_PPNMASK) << SRMMU_PPNPASHIFT) |
                    556:                    ((u_int)addr & 0xffffff));
                    557:        pte = lda(((u_int)addr & ~0xfff) | ASI_SRMMUFP_L0, ASI_SRMMUFP);
                    558:        if ((pte & SRMMU_TETYPE) == SRMMU_TEPTE)
                    559:            return (((pte & SRMMU_PPNMASK) << SRMMU_PPNPASHIFT) |
                    560:                    ((u_int)addr & 0xffffffff));
                    561:
                    562:        panic("VA2PA: Asked to translate unmapped VA %p", addr);
                    563: }
                    564:
                    565: /*
                    566:  * Get the page table entry (PTE) for va by looking it up in the software
                    567:  * page tables. These are the same tables that are used by the MMU; this
                    568:  * routine allows easy access to the page tables even if the context
                    569:  * corresponding to the table is not loaded or selected.
                    570:  * This routine should NOT be used if there is any chance that the desired
                    571:  * pte is in the TLB cache, since it will return stale data in that case.
                    572:  * For that case, and for general use, use getpte4m, which is much faster
                    573:  * and avoids walking in-memory page tables if the page is in the cache.
                    574:  * Note also that this routine only works if a kernel mapping has been
                    575:  * installed for the given page!
                    576:  */
                    577: __inline u_int
                    578: getptesw4m(pm, va)             /* Assumes L3 mapping! */
                    579:        register struct pmap *pm;
                    580:        register vm_offset_t va;
                    581: {
                    582:        register struct regmap *rm;
                    583:        register struct segmap *sm;
                    584:
                    585:        rm = &pm->pm_regmap[VA_VREG(va)];
                    586: #ifdef DEBUG
                    587:        if (rm == NULL)
1.58      pk        588:                panic("getptesw4m: no regmap entry");
1.55      pk        589: #endif
                    590:        sm = &rm->rg_segmap[VA_VSEG(va)];
                    591: #ifdef DEBUG
                    592:        if (sm == NULL)
1.58      pk        593:                panic("getptesw4m: no segmap");
1.55      pk        594: #endif
                    595:        return (sm->sg_pte[VA_SUN4M_VPG(va)]);  /* return pte */
                    596: }
                    597:
                    598: /*
                    599:  * Set the page table entry for va to pte. Only affects software MMU page-
                    600:  * tables (the in-core pagetables read by the MMU). Ignores TLB, and
                    601:  * thus should _not_ be called if the pte translation could be in the TLB.
                    602:  * In this case, use setpte4m().
                    603:  */
                    604: __inline void
                    605: setptesw4m(pm, va, pte)
                    606:        register struct pmap *pm;
                    607:        register vm_offset_t va;
                    608:        register int pte;
                    609: {
                    610:        register struct regmap *rm;
                    611:        register struct segmap *sm;
                    612:
                    613:        rm = &pm->pm_regmap[VA_VREG(va)];
                    614:
                    615: #ifdef DEBUG
                    616:        if (pm->pm_regmap == NULL || rm == NULL)
1.82      pk        617:                panic("setptesw4m: no regmap entry");
1.55      pk        618: #endif
                    619:        sm = &rm->rg_segmap[VA_VSEG(va)];
                    620:
                    621: #ifdef DEBUG
                    622:        if (rm->rg_segmap == NULL || sm == NULL || sm->sg_pte == NULL)
1.82      pk        623:                panic("setptesw4m: no segmap for va %p", (caddr_t)va);
1.55      pk        624: #endif
                    625:        sm->sg_pte[VA_SUN4M_VPG(va)] = pte; /* set new pte */
                    626: }
                    627:
1.72      pk        628: __inline void
                    629: setpgt4m(ptep, pte)
                    630:        int *ptep;
                    631:        int pte;
                    632: {
                    633:        *ptep = pte;
                    634:        if ((cpuinfo.flags & CPUFLG_CACHEPAGETABLES) == 0)
                    635:                cpuinfo.pcache_flush_line((int)ptep, VA2PA((caddr_t)ptep));
                    636: }
                    637:
1.55      pk        638: /* Set the page table entry for va to pte. Flushes cache. */
                    639: __inline void
                    640: setpte4m(va, pte)
                    641:        register vm_offset_t va;
                    642:        register int pte;
                    643: {
                    644:        register struct pmap *pm;
                    645:        register struct regmap *rm;
                    646:        register struct segmap *sm;
                    647:        register union ctxinfo *c;
                    648:
1.69      pk        649:        cache_flush_page(va);
1.55      pk        650:
1.58      pk        651:        /*
                    652:         * Now walk tables to find pte. We use ctxinfo to locate the pmap
1.55      pk        653:         * from the current context
                    654:         */
1.69      pk        655: #if 0
1.55      pk        656: #ifdef DEBUG
1.71      pk        657:        if (ctxbusyvector[getcontext4m()] == 0)
1.55      pk        658:                panic("setpte4m: no pmap for current context (%d)",
1.71      pk        659:                      getcontext4m());
1.1       deraadt   660: #endif
1.69      pk        661: #endif
1.71      pk        662:        c = &cpuinfo.ctxinfo[getcontext4m()];
1.55      pk        663:        pm = c->c_pmap;
                    664:
                    665:        /* Note: inline version of setptesw4m() */
                    666: #ifdef DEBUG
                    667:        if (pm->pm_regmap == NULL)
                    668:                panic("setpte4m: no regmap entry");
1.43      pk        669: #endif
1.55      pk        670:        rm = &pm->pm_regmap[VA_VREG(va)];
                    671:        sm = &rm->rg_segmap[VA_VSEG(va)];
1.1       deraadt   672:
1.55      pk        673: #ifdef DEBUG
                    674:        if (rm->rg_segmap == NULL || sm == NULL || sm->sg_pte == NULL)
                    675:                panic("setpte4m: no segmap for va %p", (caddr_t)va);
                    676: #endif
                    677:        tlb_flush_page(va);
1.72      pk        678:        setpgt4m(sm->sg_pte + VA_SUN4M_VPG(va), pte);
1.55      pk        679: }
1.72      pk        680:
1.55      pk        681: #endif /* 4m only */
1.1       deraadt   682:
                    683: /*----------------------------------------------------------------*/
                    684:
1.72      pk        685: /*
                    686:  * The following three macros are to be used in sun4/sun4c code only.
                    687:  */
1.69      pk        688: #if defined(SUN4_MMU3L)
                    689: #define CTX_USABLE(pm,rp) (                                    \
1.72      pk        690:                ((pm)->pm_ctx != NULL &&                        \
                    691:                 (!HASSUN4_MMU3L || (rp)->rg_smeg != reginval)) \
1.69      pk        692: )
1.43      pk        693: #else
1.55      pk        694: #define CTX_USABLE(pm,rp)      ((pm)->pm_ctx != NULL )
1.43      pk        695: #endif
                    696:
1.55      pk        697: #define GAP_WIDEN(pm,vr) do if (CPU_ISSUN4OR4C) {      \
                    698:        if (vr + 1 == pm->pm_gap_start)                 \
                    699:                pm->pm_gap_start = vr;                  \
                    700:        if (vr == pm->pm_gap_end)                       \
                    701:                pm->pm_gap_end = vr + 1;                \
1.43      pk        702: } while (0)
                    703:
1.55      pk        704: #define GAP_SHRINK(pm,vr) do if (CPU_ISSUN4OR4C) {                     \
1.43      pk        705:        register int x;                                                 \
                    706:        x = pm->pm_gap_start + (pm->pm_gap_end - pm->pm_gap_start) / 2; \
                    707:        if (vr > x) {                                                   \
                    708:                if (vr < pm->pm_gap_end)                                \
                    709:                        pm->pm_gap_end = vr;                            \
                    710:        } else {                                                        \
                    711:                if (vr >= pm->pm_gap_start && x != pm->pm_gap_start)    \
                    712:                        pm->pm_gap_start = vr + 1;                      \
                    713:        }                                                               \
                    714: } while (0)
                    715:
1.72      pk        716:
1.53      christos  717: static void sortm __P((struct memarr *, int));
                    718: void   ctx_alloc __P((struct pmap *));
                    719: void   ctx_free __P((struct pmap *));
                    720: void   pv_flushcache __P((struct pvlist *));
                    721: void   kvm_iocache __P((caddr_t, int));
                    722: #ifdef DEBUG
                    723: void   pm_check __P((char *, struct pmap *));
                    724: void   pm_check_k __P((char *, struct pmap *));
                    725: void   pm_check_u __P((char *, struct pmap *));
                    726: #endif
                    727:
                    728:
1.2       deraadt   729: /*
                    730:  * Sort a memory array by address.
                    731:  */
                    732: static void
                    733: sortm(mp, n)
                    734:        register struct memarr *mp;
                    735:        register int n;
                    736: {
                    737:        register struct memarr *mpj;
                    738:        register int i, j;
                    739:        register u_int addr, len;
                    740:
                    741:        /* Insertion sort.  This is O(n^2), but so what? */
                    742:        for (i = 1; i < n; i++) {
                    743:                /* save i'th entry */
                    744:                addr = mp[i].addr;
                    745:                len = mp[i].len;
                    746:                /* find j such that i'th entry goes before j'th */
                    747:                for (j = 0, mpj = mp; j < i; j++, mpj++)
                    748:                        if (addr < mpj->addr)
                    749:                                break;
                    750:                /* slide up any additional entries */
                    751:                ovbcopy(mpj, mpj + 1, (i - j) * sizeof(*mp));
                    752:                mpj->addr = addr;
                    753:                mpj->len = len;
                    754:        }
                    755: }
                    756:
1.29      pk        757: /*
                    758:  * For our convenience, vm_page.c implements:
                    759:  *       pmap_startup(), pmap_steal_memory()
                    760:  * using the functions:
                    761:  *       pmap_virtual_space(), pmap_free_pages(), pmap_next_page(),
                    762:  * which are much simpler to implement.
                    763:  */
                    764:
                    765: /*
                    766:  * How much virtual space does this kernel have?
                    767:  * (After mapping kernel text, data, etc.)
                    768:  */
                    769: void
                    770: pmap_virtual_space(v_start, v_end)
                    771:         vm_offset_t *v_start;
                    772:         vm_offset_t *v_end;
                    773: {
                    774:         *v_start = virtual_avail;
                    775:         *v_end   = virtual_end;
                    776: }
                    777:
                    778: /*
                    779:  * Return the number of page indices in the range of
                    780:  * possible return values for pmap_page_index() for
                    781:  * all addresses provided by pmap_next_page().  This
                    782:  * return value is used to allocate per-page data.
                    783:  *
                    784:  */
                    785: u_int
                    786: pmap_free_pages()
                    787: {
                    788:        int long bytes;
                    789:        int nmem;
                    790:        register struct memarr *mp;
                    791:
1.36      pk        792:        bytes = -avail_start;
                    793:        for (mp = pmemarr, nmem = npmemarr; --nmem >= 0; mp++)
1.29      pk        794:                bytes += mp->len;
                    795:
                    796:         return atop(bytes);
                    797: }
                    798:
                    799: /*
                    800:  * If there are still physical pages available, put the address of
                    801:  * the next available one at paddr and return TRUE.  Otherwise,
                    802:  * return FALSE to indicate that there are no more free pages.
                    803:  * Note that avail_next is set to avail_start in pmap_bootstrap().
                    804:  *
                    805:  * Imporant:  The page indices of the pages returned here must be
                    806:  * in ascending order.
                    807:  */
                    808: int
                    809: pmap_next_page(paddr)
                    810:         vm_offset_t *paddr;
                    811: {
                    812:
                    813:         /* Is it time to skip over a hole? */
                    814:        if (avail_next == pmemarr[cpmemarr].addr + pmemarr[cpmemarr].len) {
                    815:                if (++cpmemarr == npmemarr)
                    816:                        return FALSE;
                    817:                avail_next = pmemarr[cpmemarr].addr;
1.77      pk        818:        } else if (avail_next == unavail_start)
                    819:                avail_next = unavail_end;
1.29      pk        820:
                    821: #ifdef DIAGNOSTIC
                    822:         /* Any available memory remaining? */
                    823:         if (avail_next >= avail_end) {
1.30      pk        824:                panic("pmap_next_page: too much memory?!\n");
1.29      pk        825:        }
                    826: #endif
                    827:
                    828:         /* Have memory, will travel... */
                    829:         *paddr = avail_next;
                    830:         avail_next += NBPG;
                    831:         return TRUE;
                    832: }
                    833:
                    834: /*
                    835:  * pmap_page_index()
                    836:  *
                    837:  * Given a physical address, return a page index.
                    838:  *
                    839:  * There can be some values that we never return (i.e. a hole)
                    840:  * as long as the range of indices returned by this function
                    841:  * is smaller than the value returned by pmap_free_pages().
                    842:  * The returned index does NOT need to start at zero.
                    843:  *
                    844:  */
1.50      christos  845: int
1.29      pk        846: pmap_page_index(pa)
                    847:        vm_offset_t pa;
                    848: {
                    849:        int idx;
                    850:        int nmem;
                    851:        register struct memarr *mp;
                    852:
                    853: #ifdef  DIAGNOSTIC
                    854:        if (pa < avail_start || pa >= avail_end)
1.54      christos  855:                panic("pmap_page_index: pa=0x%lx", pa);
1.29      pk        856: #endif
                    857:
                    858:        for (idx = 0, mp = pmemarr, nmem = npmemarr; --nmem >= 0; mp++) {
                    859:                if (pa >= mp->addr && pa < mp->addr + mp->len)
                    860:                        break;
                    861:                idx += atop(mp->len);
                    862:        }
                    863:
                    864:        return (idx + atop(pa - mp->addr));
                    865: }
1.39      pk        866:
                    867: int
                    868: pmap_pa_exists(pa)
                    869:        vm_offset_t pa;
                    870: {
                    871:        register int nmem;
                    872:        register struct memarr *mp;
                    873:
                    874:        for (mp = pmemarr, nmem = npmemarr; --nmem >= 0; mp++) {
                    875:                if (pa >= mp->addr && pa < mp->addr + mp->len)
                    876:                        return 1;
                    877:        }
                    878:
                    879:        return 0;
                    880: }
1.29      pk        881:
1.1       deraadt   882: /* update pv_flags given a valid pte */
1.55      pk        883: #define        MR4_4C(pte) (((pte) >> PG_M_SHIFT) & (PV_MOD | PV_REF))
                    884: #define MR4M(pte) (((pte) >> PG_M_SHIFT4M) & (PV_MOD4M | PV_REF4M))
1.1       deraadt   885:
                    886: /*----------------------------------------------------------------*/
                    887:
                    888: /*
                    889:  * Agree with the monitor ROM as to how many MMU entries are
                    890:  * to be reserved, and map all of its segments into all contexts.
                    891:  *
                    892:  * Unfortunately, while the Version 0 PROM had a nice linked list of
                    893:  * taken virtual memory, the Version 2 PROM provides instead a convoluted
                    894:  * description of *free* virtual memory.  Rather than invert this, we
                    895:  * resort to two magic constants from the PROM vector description file.
                    896:  */
1.55      pk        897: #if defined(SUN4) || defined(SUN4C)
1.43      pk        898: void
1.58      pk        899: mmu_reservemon4_4c(nrp, nsp)
1.43      pk        900:        register int *nrp, *nsp;
1.1       deraadt   901: {
1.53      christos  902:        register u_int va = 0, eva = 0;
                    903:        register int mmuseg, i, nr, ns, vr, lastvr;
1.69      pk        904: #if defined(SUN4_MMU3L)
1.53      christos  905:        register int mmureg;
                    906: #endif
1.43      pk        907:        register struct regmap *rp;
1.1       deraadt   908:
1.55      pk        909: #if defined(SUN4M)
                    910:        if (CPU_ISSUN4M) {
1.81      pk        911:                panic("mmu_reservemon4_4c called on Sun4M machine");
1.55      pk        912:                return;
                    913:        }
                    914: #endif
                    915:
1.20      deraadt   916: #if defined(SUN4)
1.55      pk        917:        if (CPU_ISSUN4) {
1.29      pk        918:                prom_vstart = va = OLDMON_STARTVADDR;
                    919:                prom_vend = eva = OLDMON_ENDVADDR;
1.20      deraadt   920:        }
                    921: #endif
                    922: #if defined(SUN4C)
1.55      pk        923:        if (CPU_ISSUN4C) {
1.29      pk        924:                prom_vstart = va = OPENPROM_STARTVADDR;
                    925:                prom_vend = eva = OPENPROM_ENDVADDR;
1.19      deraadt   926:        }
1.20      deraadt   927: #endif
1.43      pk        928:        ns = *nsp;
                    929:        nr = *nrp;
                    930:        lastvr = 0;
1.1       deraadt   931:        while (va < eva) {
1.43      pk        932:                vr = VA_VREG(va);
                    933:                rp = &pmap_kernel()->pm_regmap[vr];
                    934:
1.69      pk        935: #if defined(SUN4_MMU3L)
                    936:                if (HASSUN4_MMU3L && vr != lastvr) {
1.43      pk        937:                        lastvr = vr;
                    938:                        mmureg = getregmap(va);
                    939:                        if (mmureg < nr)
                    940:                                rp->rg_smeg = nr = mmureg;
                    941:                        /*
                    942:                         * On 3-level MMU machines, we distribute regions,
                    943:                         * rather than segments, amongst the contexts.
                    944:                         */
                    945:                        for (i = ncontext; --i > 0;)
                    946:                                (*promvec->pv_setctxt)(i, (caddr_t)va, mmureg);
                    947:                }
                    948: #endif
1.1       deraadt   949:                mmuseg = getsegmap(va);
1.43      pk        950:                if (mmuseg < ns)
                    951:                        ns = mmuseg;
1.69      pk        952:
                    953:                if (!HASSUN4_MMU3L)
1.43      pk        954:                        for (i = ncontext; --i > 0;)
                    955:                                (*promvec->pv_setctxt)(i, (caddr_t)va, mmuseg);
                    956:
1.1       deraadt   957:                if (mmuseg == seginval) {
                    958:                        va += NBPSG;
                    959:                        continue;
                    960:                }
1.43      pk        961:                /*
                    962:                 * Another PROM segment. Enter into region map.
                    963:                 * Assume the entire segment is valid.
                    964:                 */
                    965:                rp->rg_nsegmap += 1;
                    966:                rp->rg_segmap[VA_VSEG(va)].sg_pmeg = mmuseg;
                    967:                rp->rg_segmap[VA_VSEG(va)].sg_npte = NPTESG;
                    968:
1.1       deraadt   969:                /* PROM maps its memory user-accessible: fix it. */
                    970:                for (i = NPTESG; --i >= 0; va += NBPG)
1.55      pk        971:                        setpte4(va, getpte4(va) | PG_S);
1.1       deraadt   972:        }
1.43      pk        973:        *nsp = ns;
                    974:        *nrp = nr;
                    975:        return;
1.1       deraadt   976: }
1.55      pk        977: #endif
                    978:
                    979: #if defined(SUN4M) /* Sun4M versions of above */
                    980:
                    981: /*
                    982:  * Take the monitor's initial page table layout, convert it to 3rd-level pte's
                    983:  * (it starts out as a L1 mapping), and install it along with a set of kernel
                    984:  * mapping tables as the kernel's initial page table setup. Also create and
                    985:  * enable a context table. I suppose we also want to block user-mode access
                    986:  * to the new kernel/ROM mappings.
                    987:  */
                    988:
1.58      pk        989: /*
                    990:  * mmu_reservemon4m(): Copies the existing (ROM) page tables to kernel space,
1.55      pk        991:  * converting any L1/L2 PTEs to L3 PTEs. Does *not* copy the L1 entry mapping
1.71      pk        992:  * the kernel at KERNBASE (0xf8000000) since we don't want to map 16M of
1.55      pk        993:  * physical memory for the kernel. Thus the kernel must be installed later!
                    994:  * Also installs ROM mappings into the kernel pmap.
                    995:  * NOTE: This also revokes all user-mode access to the mapped regions.
                    996:  */
                    997: void
1.77      pk        998: mmu_reservemon4m(kpmap)
1.55      pk        999:        struct pmap *kpmap;
                   1000: {
1.71      pk       1001:        unsigned int rom_ctxtbl;
1.55      pk       1002:        register int te;
1.69      pk       1003:        unsigned int mmupcrsave;
1.55      pk       1004:
1.69      pk       1005: /*XXX-GCC!*/mmupcrsave = 0;
1.55      pk       1006:
                   1007:        /*
                   1008:         * XXX: although the Sun4M can handle 36 bits of physical
                   1009:         * address space, we assume that all these page tables, etc
                   1010:         * are in the lower 4G (32-bits) of address space, i.e. out of I/O
                   1011:         * space. Eventually this should be changed to support the 36 bit
                   1012:         * physical addressing, in case some crazed ROM designer decides to
                   1013:         * stick the pagetables up there. In that case, we should use MMU
                   1014:         * transparent mode, (i.e. ASI 0x20 to 0x2f) to access
                   1015:         * physical memory.
                   1016:         */
                   1017:
1.71      pk       1018:        rom_ctxtbl = (lda(SRMMU_CXTPTR,ASI_SRMMU) << SRMMU_PPNPASHIFT);
1.55      pk       1019:
                   1020:        /* We're going to have to use MMU passthrough. If we're on a
                   1021:         * Viking MicroSparc without an mbus, we need to turn off traps
                   1022:         * and set the AC bit at 0x8000 in the MMU's control register. Ugh.
                   1023:         * XXX: Once we've done this, can we still access kernel vm?
                   1024:         */
1.69      pk       1025:        if (cpuinfo.cpu_vers == 4 && cpuinfo.mxcc) {
1.55      pk       1026:                sta(SRMMU_PCR, ASI_SRMMU,       /* set MMU AC bit */
1.75      pk       1027:                    ((mmupcrsave = lda(SRMMU_PCR,ASI_SRMMU)) | VIKING_PCR_AC));
1.55      pk       1028:        }
1.69      pk       1029:
1.71      pk       1030:        te = lda(rom_ctxtbl, ASI_BYPASS);       /* i.e. context 0 */
1.55      pk       1031:        switch (te & SRMMU_TETYPE) {
1.62      pk       1032:        case SRMMU_TEINVALID:
1.69      pk       1033:                cpuinfo.ctx_tbl[0] = SRMMU_TEINVALID;
1.77      pk       1034:                panic("mmu_reservemon4m: no existing L0 mapping! "
                   1035:                      "(How are we running?");
1.55      pk       1036:                break;
1.62      pk       1037:        case SRMMU_TEPTE:
1.55      pk       1038: #ifdef DEBUG
1.66      christos 1039:                printf("mmu_reservemon4m: trying to remap 4G segment!\n");
1.55      pk       1040: #endif
                   1041:                panic("mmu_reservemon4m: can't handle ROM 4G page size");
                   1042:                /* XXX: Should make this work, however stupid it is */
                   1043:                break;
1.62      pk       1044:        case SRMMU_TEPTD:
1.71      pk       1045:                mmu_setup4m_L1(te, kpmap);
1.55      pk       1046:                break;
1.62      pk       1047:        default:
1.55      pk       1048:                panic("mmu_reservemon4m: unknown pagetable entry type");
                   1049:        }
                   1050:
1.69      pk       1051:        if (cpuinfo.cpu_vers == 4 && cpuinfo.mxcc) {
                   1052:                sta(SRMMU_PCR, ASI_SRMMU, mmupcrsave);
1.55      pk       1053:        }
                   1054: }
                   1055:
                   1056: void
1.71      pk       1057: mmu_setup4m_L1(regtblptd, kpmap)
1.55      pk       1058:        int regtblptd;          /* PTD for region table to be remapped */
                   1059:        struct pmap *kpmap;
                   1060: {
                   1061:        register unsigned int regtblrover;
                   1062:        register int i;
                   1063:        unsigned int te;
1.71      pk       1064:        struct regmap *rp;
1.55      pk       1065:        int j, k;
                   1066:
1.69      pk       1067:        /*
                   1068:         * Here we scan the region table to copy any entries which appear.
1.55      pk       1069:         * We are only concerned with regions in kernel space and above
                   1070:         * (i.e. regions 0xf8 to 0xff). We also ignore region 0xf8, since
                   1071:         * that is the 16MB L1 mapping that the ROM used to map the kernel
                   1072:         * in initially. Later, we will rebuild a new L3 mapping for the
                   1073:         * kernel and install it before switching to the new pagetables.
                   1074:         */
1.71      pk       1075:        regtblrover =
                   1076:                ((regtblptd & ~SRMMU_TETYPE) << SRMMU_PPNPASHIFT) +
                   1077:                (VA_VREG(KERNBASE)+1) * sizeof(long);   /* kernel only */
1.55      pk       1078:
                   1079:        for (i = VA_VREG(KERNBASE) + 1; i < SRMMU_L1SIZE;
                   1080:             i++, regtblrover += sizeof(long)) {
1.71      pk       1081:
                   1082:                /* The region we're dealing with */
                   1083:                rp = &kpmap->pm_regmap[i];
                   1084:
1.55      pk       1085:                te = lda(regtblrover, ASI_BYPASS);
                   1086:                switch(te & SRMMU_TETYPE) {
1.62      pk       1087:                case SRMMU_TEINVALID:
1.55      pk       1088:                        break;
1.71      pk       1089:
1.62      pk       1090:                case SRMMU_TEPTE:
1.55      pk       1091: #ifdef DEBUG
1.81      pk       1092:                        printf("mmu_setup4m_L1: "
1.77      pk       1093:                               "converting region 0x%x from L1->L3\n", i);
1.55      pk       1094: #endif
1.71      pk       1095:                        /*
                   1096:                         * This region entry covers 64MB of memory -- or
                   1097:                         * (NSEGRG * NPTESG) pages -- which we must convert
                   1098:                         * into a 3-level description.
1.55      pk       1099:                         */
1.71      pk       1100:
1.55      pk       1101:                        for (j = 0; j < SRMMU_L2SIZE; j++) {
1.71      pk       1102:                                struct segmap *sp = &rp->rg_segmap[j];
1.55      pk       1103:
                   1104:                                for (k = 0; k < SRMMU_L3SIZE; k++) {
1.71      pk       1105:                                        sp->sg_npte++;
                   1106:                                        (sp->sg_pte)[k] =
1.55      pk       1107:                                            (te & SRMMU_L1PPNMASK) |
                   1108:                                            (j << SRMMU_L2PPNSHFT) |
                   1109:                                            (k << SRMMU_L3PPNSHFT) |
                   1110:                                            (te & SRMMU_PGBITSMSK) |
                   1111:                                            ((te & SRMMU_PROT_MASK) |
                   1112:                                             PPROT_U2S_OMASK) |
                   1113:                                            SRMMU_TEPTE;
                   1114:                                }
                   1115:                        }
                   1116:                        break;
1.71      pk       1117:
1.62      pk       1118:                case SRMMU_TEPTD:
1.71      pk       1119:                        mmu_setup4m_L2(te, rp);
1.55      pk       1120:                        break;
1.71      pk       1121:
1.62      pk       1122:                default:
1.55      pk       1123:                        panic("mmu_setup4m_L1: unknown pagetable entry type");
                   1124:                }
                   1125:        }
                   1126: }
                   1127:
                   1128: void
1.71      pk       1129: mmu_setup4m_L2(segtblptd, rp)
1.55      pk       1130:        int segtblptd;
1.71      pk       1131:        struct regmap *rp;
1.55      pk       1132: {
                   1133:        register unsigned int segtblrover;
                   1134:        register int i, k;
                   1135:        unsigned int te;
1.71      pk       1136:        struct segmap *sp;
1.55      pk       1137:
                   1138:        segtblrover = (segtblptd & ~SRMMU_TETYPE) << SRMMU_PPNPASHIFT;
                   1139:        for (i = 0; i < SRMMU_L2SIZE; i++, segtblrover += sizeof(long)) {
1.71      pk       1140:
                   1141:                sp = &rp->rg_segmap[i];
                   1142:
1.55      pk       1143:                te = lda(segtblrover, ASI_BYPASS);
                   1144:                switch(te & SRMMU_TETYPE) {
1.62      pk       1145:                case SRMMU_TEINVALID:
1.55      pk       1146:                        break;
1.71      pk       1147:
1.62      pk       1148:                case SRMMU_TEPTE:
1.55      pk       1149: #ifdef DEBUG
1.81      pk       1150:                        printf("mmu_setup4m_L2: converting L2 entry at segment 0x%x to L3\n",i);
1.55      pk       1151: #endif
1.71      pk       1152:                        /*
                   1153:                         * This segment entry covers 256KB of memory -- or
                   1154:                         * (NPTESG) pages -- which we must convert
                   1155:                         * into a 3-level description.
                   1156:                         */
1.55      pk       1157:                        for (k = 0; k < SRMMU_L3SIZE; k++) {
1.71      pk       1158:                                sp->sg_npte++;
                   1159:                                (sp->sg_pte)[k] =
1.55      pk       1160:                                    (te & SRMMU_L1PPNMASK) |
                   1161:                                    (te & SRMMU_L2PPNMASK) |
                   1162:                                    (k << SRMMU_L3PPNSHFT) |
                   1163:                                    (te & SRMMU_PGBITSMSK) |
                   1164:                                    ((te & SRMMU_PROT_MASK) |
                   1165:                                     PPROT_U2S_OMASK) |
                   1166:                                    SRMMU_TEPTE;
                   1167:                        }
                   1168:                        break;
1.71      pk       1169:
1.62      pk       1170:                case SRMMU_TEPTD:
1.71      pk       1171:                        mmu_setup4m_L3(te, sp);
1.55      pk       1172:                        break;
1.71      pk       1173:
1.62      pk       1174:                default:
1.55      pk       1175:                        panic("mmu_setup4m_L2: unknown pagetable entry type");
                   1176:                }
                   1177:        }
                   1178: }
                   1179:
1.71      pk       1180: void
                   1181: mmu_setup4m_L3(pagtblptd, sp)
1.55      pk       1182:        register int pagtblptd;
1.71      pk       1183:        struct segmap *sp;
1.55      pk       1184: {
                   1185:        register unsigned int pagtblrover;
1.71      pk       1186:        register int i;
1.55      pk       1187:        register unsigned int te;
                   1188:
                   1189:        pagtblrover = (pagtblptd & ~SRMMU_TETYPE) << SRMMU_PPNPASHIFT;
                   1190:        for (i = 0; i < SRMMU_L3SIZE; i++, pagtblrover += sizeof(long)) {
                   1191:                te = lda(pagtblrover, ASI_BYPASS);
                   1192:                switch(te & SRMMU_TETYPE) {
1.62      pk       1193:                case SRMMU_TEINVALID:
1.55      pk       1194:                        break;
1.62      pk       1195:                case SRMMU_TEPTE:
1.71      pk       1196:                        sp->sg_npte++;
                   1197:                        sp->sg_pte[i] = te | PPROT_U2S_OMASK;
1.55      pk       1198:                        break;
1.62      pk       1199:                case SRMMU_TEPTD:
1.55      pk       1200:                        panic("mmu_setup4m_L3: PTD found in L3 page table");
1.62      pk       1201:                default:
1.55      pk       1202:                        panic("mmu_setup4m_L3: unknown pagetable entry type");
                   1203:                }
                   1204:        }
                   1205: }
                   1206: #endif /* defined SUN4M */
1.1       deraadt  1207:
                   1208: /*----------------------------------------------------------------*/
                   1209:
                   1210: /*
                   1211:  * MMU management.
                   1212:  */
1.43      pk       1213: struct mmuentry *me_alloc __P((struct mmuhd *, struct pmap *, int, int));
                   1214: void           me_free __P((struct pmap *, u_int));
                   1215: struct mmuentry        *region_alloc __P((struct mmuhd *, struct pmap *, int));
                   1216: void           region_free __P((struct pmap *, u_int));
1.1       deraadt  1217:
                   1218: /*
                   1219:  * Change contexts.  We need the old context number as well as the new
                   1220:  * one.  If the context is changing, we must write all user windows
                   1221:  * first, lest an interrupt cause them to be written to the (other)
                   1222:  * user whose context we set here.
                   1223:  */
                   1224: #define        CHANGE_CONTEXTS(old, new) \
                   1225:        if ((old) != (new)) { \
                   1226:                write_user_windows(); \
                   1227:                setcontext(new); \
                   1228:        }
                   1229:
1.55      pk       1230: #if defined(SUN4) || defined(SUN4C) /* This is old sun MMU stuff */
1.1       deraadt  1231: /*
                   1232:  * Allocate an MMU entry (i.e., a PMEG).
                   1233:  * If necessary, steal one from someone else.
                   1234:  * Put it on the tail of the given queue
                   1235:  * (which is either the LRU list or the locked list).
                   1236:  * The locked list is not actually ordered, but this is easiest.
                   1237:  * Also put it on the given (new) pmap's chain,
                   1238:  * enter its pmeg number into that pmap's segmap,
                   1239:  * and store the pmeg's new virtual segment number (me->me_vseg).
                   1240:  *
                   1241:  * This routine is large and complicated, but it must be fast
                   1242:  * since it implements the dynamic allocation of MMU entries.
                   1243:  */
                   1244: struct mmuentry *
1.43      pk       1245: me_alloc(mh, newpm, newvreg, newvseg)
1.1       deraadt  1246:        register struct mmuhd *mh;
                   1247:        register struct pmap *newpm;
1.43      pk       1248:        register int newvreg, newvseg;
1.1       deraadt  1249: {
                   1250:        register struct mmuentry *me;
                   1251:        register struct pmap *pm;
                   1252:        register int i, va, pa, *pte, tpte;
                   1253:        int ctx;
1.43      pk       1254:        struct regmap *rp;
                   1255:        struct segmap *sp;
1.1       deraadt  1256:
                   1257:        /* try free list first */
1.43      pk       1258:        if ((me = segm_freelist.tqh_first) != NULL) {
                   1259:                TAILQ_REMOVE(&segm_freelist, me, me_list);
1.1       deraadt  1260: #ifdef DEBUG
                   1261:                if (me->me_pmap != NULL)
                   1262:                        panic("me_alloc: freelist entry has pmap");
                   1263:                if (pmapdebug & PDB_MMU_ALLOC)
1.66      christos 1264:                        printf("me_alloc: got pmeg %d\n", me->me_cookie);
1.1       deraadt  1265: #endif
1.43      pk       1266:                TAILQ_INSERT_TAIL(mh, me, me_list);
1.1       deraadt  1267:
                   1268:                /* onto on pmap chain; pmap is already locked, if needed */
1.43      pk       1269:                TAILQ_INSERT_TAIL(&newpm->pm_seglist, me, me_pmchain);
1.70      pk       1270: #ifdef DIAGNOSTIC
                   1271:                pmap_stats.ps_npmeg_free--;
                   1272:                if (mh == &segm_locked)
                   1273:                        pmap_stats.ps_npmeg_locked++;
                   1274:                else
                   1275:                        pmap_stats.ps_npmeg_lru++;
                   1276: #endif
1.1       deraadt  1277:
                   1278:                /* into pmap segment table, with backpointers */
1.43      pk       1279:                newpm->pm_regmap[newvreg].rg_segmap[newvseg].sg_pmeg = me->me_cookie;
1.1       deraadt  1280:                me->me_pmap = newpm;
                   1281:                me->me_vseg = newvseg;
1.43      pk       1282:                me->me_vreg = newvreg;
1.1       deraadt  1283:
                   1284:                return (me);
                   1285:        }
                   1286:
                   1287:        /* no luck, take head of LRU list */
1.43      pk       1288:        if ((me = segm_lru.tqh_first) == NULL)
1.1       deraadt  1289:                panic("me_alloc: all pmegs gone");
1.43      pk       1290:
1.1       deraadt  1291:        pm = me->me_pmap;
                   1292:        if (pm == NULL)
                   1293:                panic("me_alloc: LRU entry has no pmap");
1.42      mycroft  1294:        if (pm == pmap_kernel())
1.1       deraadt  1295:                panic("me_alloc: stealing from kernel");
1.12      pk       1296: #ifdef DEBUG
1.1       deraadt  1297:        if (pmapdebug & (PDB_MMU_ALLOC | PDB_MMU_STEAL))
1.66      christos 1298:                printf("me_alloc: stealing pmeg %x from pmap %p\n",
1.43      pk       1299:                    me->me_cookie, pm);
1.1       deraadt  1300: #endif
                   1301:        /*
                   1302:         * Remove from LRU list, and insert at end of new list
                   1303:         * (probably the LRU list again, but so what?).
                   1304:         */
1.43      pk       1305:        TAILQ_REMOVE(&segm_lru, me, me_list);
                   1306:        TAILQ_INSERT_TAIL(mh, me, me_list);
                   1307:
1.70      pk       1308: #ifdef DIAGNOSTIC
                   1309:        if (mh == &segm_locked) {
                   1310:                pmap_stats.ps_npmeg_lru--;
                   1311:                pmap_stats.ps_npmeg_locked++;
                   1312:        }
                   1313: #endif
                   1314:
1.43      pk       1315:        rp = &pm->pm_regmap[me->me_vreg];
                   1316:        if (rp->rg_segmap == NULL)
                   1317:                panic("me_alloc: LRU entry's pmap has no segments");
                   1318:        sp = &rp->rg_segmap[me->me_vseg];
                   1319:        pte = sp->sg_pte;
                   1320:        if (pte == NULL)
                   1321:                panic("me_alloc: LRU entry's pmap has no ptes");
1.1       deraadt  1322:
                   1323:        /*
                   1324:         * The PMEG must be mapped into some context so that we can
                   1325:         * read its PTEs.  Use its current context if it has one;
                   1326:         * if not, and since context 0 is reserved for the kernel,
                   1327:         * the simplest method is to switch to 0 and map the PMEG
                   1328:         * to virtual address 0---which, being a user space address,
                   1329:         * is by definition not in use.
                   1330:         *
                   1331:         * XXX for ncpus>1 must use per-cpu VA?
                   1332:         * XXX do not have to flush cache immediately
                   1333:         */
1.71      pk       1334:        ctx = getcontext4();
1.43      pk       1335:        if (CTX_USABLE(pm,rp)) {
1.1       deraadt  1336:                CHANGE_CONTEXTS(ctx, pm->pm_ctxnum);
1.69      pk       1337:                cache_flush_segment(me->me_vreg, me->me_vseg);
1.43      pk       1338:                va = VSTOVA(me->me_vreg,me->me_vseg);
1.1       deraadt  1339:        } else {
                   1340:                CHANGE_CONTEXTS(ctx, 0);
1.69      pk       1341:                if (HASSUN4_MMU3L)
1.43      pk       1342:                        setregmap(0, tregion);
                   1343:                setsegmap(0, me->me_cookie);
1.1       deraadt  1344:                /*
                   1345:                 * No cache flush needed: it happened earlier when
                   1346:                 * the old context was taken.
                   1347:                 */
                   1348:                va = 0;
                   1349:        }
                   1350:
                   1351:        /*
                   1352:         * Record reference and modify bits for each page,
                   1353:         * and copy PTEs into kernel memory so that they can
                   1354:         * be reloaded later.
                   1355:         */
                   1356:        i = NPTESG;
                   1357:        do {
1.55      pk       1358:                tpte = getpte4(va);
1.33      pk       1359:                if ((tpte & (PG_V | PG_TYPE)) == (PG_V | PG_OBMEM)) {
1.60      pk       1360:                        pa = ptoa(tpte & PG_PFNUM);
1.1       deraadt  1361:                        if (managed(pa))
1.55      pk       1362:                                pvhead(pa)->pv_flags |= MR4_4C(tpte);
1.1       deraadt  1363:                }
                   1364:                *pte++ = tpte & ~(PG_U|PG_M);
                   1365:                va += NBPG;
                   1366:        } while (--i > 0);
                   1367:
                   1368:        /* update segment tables */
                   1369:        simple_lock(&pm->pm_lock); /* what if other cpu takes mmuentry ?? */
1.43      pk       1370:        if (CTX_USABLE(pm,rp))
                   1371:                setsegmap(VSTOVA(me->me_vreg,me->me_vseg), seginval);
                   1372:        sp->sg_pmeg = seginval;
1.1       deraadt  1373:
                   1374:        /* off old pmap chain */
1.43      pk       1375:        TAILQ_REMOVE(&pm->pm_seglist, me, me_pmchain);
1.1       deraadt  1376:        simple_unlock(&pm->pm_lock);
1.71      pk       1377:        setcontext4(ctx);       /* done with old context */
1.1       deraadt  1378:
                   1379:        /* onto new pmap chain; new pmap is already locked, if needed */
1.43      pk       1380:        TAILQ_INSERT_TAIL(&newpm->pm_seglist, me, me_pmchain);
1.1       deraadt  1381:
                   1382:        /* into new segment table, with backpointers */
1.43      pk       1383:        newpm->pm_regmap[newvreg].rg_segmap[newvseg].sg_pmeg = me->me_cookie;
1.1       deraadt  1384:        me->me_pmap = newpm;
                   1385:        me->me_vseg = newvseg;
1.43      pk       1386:        me->me_vreg = newvreg;
1.1       deraadt  1387:
                   1388:        return (me);
                   1389: }
                   1390:
                   1391: /*
                   1392:  * Free an MMU entry.
                   1393:  *
                   1394:  * Assumes the corresponding pmap is already locked.
                   1395:  * Does NOT flush cache, but does record ref and mod bits.
                   1396:  * The rest of each PTE is discarded.
                   1397:  * CALLER MUST SET CONTEXT to pm->pm_ctxnum (if pmap has
                   1398:  * a context) or to 0 (if not).  Caller must also update
                   1399:  * pm->pm_segmap and (possibly) the hardware.
                   1400:  */
                   1401: void
                   1402: me_free(pm, pmeg)
                   1403:        register struct pmap *pm;
                   1404:        register u_int pmeg;
                   1405: {
1.43      pk       1406:        register struct mmuentry *me = &mmusegments[pmeg];
1.1       deraadt  1407:        register int i, va, pa, tpte;
1.43      pk       1408:        register int vr;
                   1409:        register struct regmap *rp;
                   1410:
                   1411:        vr = me->me_vreg;
1.1       deraadt  1412:
                   1413: #ifdef DEBUG
                   1414:        if (pmapdebug & PDB_MMU_ALLOC)
1.66      christos 1415:                printf("me_free: freeing pmeg %d from pmap %p\n",
1.43      pk       1416:                    me->me_cookie, pm);
                   1417:        if (me->me_cookie != pmeg)
1.1       deraadt  1418:                panic("me_free: wrong mmuentry");
                   1419:        if (pm != me->me_pmap)
                   1420:                panic("me_free: pm != me_pmap");
                   1421: #endif
                   1422:
1.43      pk       1423:        rp = &pm->pm_regmap[vr];
                   1424:
1.1       deraadt  1425:        /* just like me_alloc, but no cache flush, and context already set */
1.43      pk       1426:        if (CTX_USABLE(pm,rp)) {
                   1427:                va = VSTOVA(vr,me->me_vseg);
                   1428:        } else {
                   1429: #ifdef DEBUG
1.71      pk       1430: if (getcontext4() != 0) panic("me_free: ctx != 0");
1.43      pk       1431: #endif
1.69      pk       1432:                if (HASSUN4_MMU3L)
1.43      pk       1433:                        setregmap(0, tregion);
                   1434:                setsegmap(0, me->me_cookie);
1.1       deraadt  1435:                va = 0;
                   1436:        }
                   1437:        i = NPTESG;
                   1438:        do {
1.55      pk       1439:                tpte = getpte4(va);
1.33      pk       1440:                if ((tpte & (PG_V | PG_TYPE)) == (PG_V | PG_OBMEM)) {
1.60      pk       1441:                        pa = ptoa(tpte & PG_PFNUM);
1.1       deraadt  1442:                        if (managed(pa))
1.55      pk       1443:                                pvhead(pa)->pv_flags |= MR4_4C(tpte);
1.1       deraadt  1444:                }
                   1445:                va += NBPG;
                   1446:        } while (--i > 0);
                   1447:
                   1448:        /* take mmu entry off pmap chain */
1.43      pk       1449:        TAILQ_REMOVE(&pm->pm_seglist, me, me_pmchain);
                   1450:        /* ... and remove from segment map */
                   1451:        if (rp->rg_segmap == NULL)
                   1452:                panic("me_free: no segments in pmap");
                   1453:        rp->rg_segmap[me->me_vseg].sg_pmeg = seginval;
                   1454:
                   1455:        /* off LRU or lock chain */
                   1456:        if (pm == pmap_kernel()) {
                   1457:                TAILQ_REMOVE(&segm_locked, me, me_list);
1.70      pk       1458: #ifdef DIAGNOSTIC
                   1459:                pmap_stats.ps_npmeg_locked--;
                   1460: #endif
1.43      pk       1461:        } else {
                   1462:                TAILQ_REMOVE(&segm_lru, me, me_list);
1.70      pk       1463: #ifdef DIAGNOSTIC
                   1464:                pmap_stats.ps_npmeg_lru--;
                   1465: #endif
1.43      pk       1466:        }
                   1467:
                   1468:        /* no associated pmap; on free list */
                   1469:        me->me_pmap = NULL;
                   1470:        TAILQ_INSERT_TAIL(&segm_freelist, me, me_list);
1.70      pk       1471: #ifdef DIAGNOSTIC
                   1472:        pmap_stats.ps_npmeg_free++;
                   1473: #endif
1.43      pk       1474: }
                   1475:
1.69      pk       1476: #if defined(SUN4_MMU3L)
1.43      pk       1477:
                   1478: /* XXX - Merge with segm_alloc/segm_free ? */
                   1479:
                   1480: struct mmuentry *
                   1481: region_alloc(mh, newpm, newvr)
                   1482:        register struct mmuhd *mh;
                   1483:        register struct pmap *newpm;
                   1484:        register int newvr;
                   1485: {
                   1486:        register struct mmuentry *me;
                   1487:        register struct pmap *pm;
                   1488:        int ctx;
                   1489:        struct regmap *rp;
                   1490:
                   1491:        /* try free list first */
                   1492:        if ((me = region_freelist.tqh_first) != NULL) {
                   1493:                TAILQ_REMOVE(&region_freelist, me, me_list);
                   1494: #ifdef DEBUG
                   1495:                if (me->me_pmap != NULL)
                   1496:                        panic("region_alloc: freelist entry has pmap");
                   1497:                if (pmapdebug & PDB_MMUREG_ALLOC)
1.66      christos 1498:                        printf("region_alloc: got smeg %x\n", me->me_cookie);
1.43      pk       1499: #endif
                   1500:                TAILQ_INSERT_TAIL(mh, me, me_list);
                   1501:
                   1502:                /* onto on pmap chain; pmap is already locked, if needed */
                   1503:                TAILQ_INSERT_TAIL(&newpm->pm_reglist, me, me_pmchain);
                   1504:
                   1505:                /* into pmap segment table, with backpointers */
                   1506:                newpm->pm_regmap[newvr].rg_smeg = me->me_cookie;
                   1507:                me->me_pmap = newpm;
                   1508:                me->me_vreg = newvr;
                   1509:
                   1510:                return (me);
                   1511:        }
                   1512:
                   1513:        /* no luck, take head of LRU list */
                   1514:        if ((me = region_lru.tqh_first) == NULL)
                   1515:                panic("region_alloc: all smegs gone");
                   1516:
                   1517:        pm = me->me_pmap;
                   1518:        if (pm == NULL)
                   1519:                panic("region_alloc: LRU entry has no pmap");
                   1520:        if (pm == pmap_kernel())
                   1521:                panic("region_alloc: stealing from kernel");
                   1522: #ifdef DEBUG
                   1523:        if (pmapdebug & (PDB_MMUREG_ALLOC | PDB_MMUREG_STEAL))
1.66      christos 1524:                printf("region_alloc: stealing smeg %x from pmap %p\n",
1.43      pk       1525:                    me->me_cookie, pm);
                   1526: #endif
                   1527:        /*
                   1528:         * Remove from LRU list, and insert at end of new list
                   1529:         * (probably the LRU list again, but so what?).
                   1530:         */
                   1531:        TAILQ_REMOVE(&region_lru, me, me_list);
                   1532:        TAILQ_INSERT_TAIL(mh, me, me_list);
                   1533:
                   1534:        rp = &pm->pm_regmap[me->me_vreg];
1.71      pk       1535:        ctx = getcontext4();
1.43      pk       1536:        if (pm->pm_ctx) {
                   1537:                CHANGE_CONTEXTS(ctx, pm->pm_ctxnum);
1.69      pk       1538:                cache_flush_region(me->me_vreg);
1.43      pk       1539:        }
                   1540:
                   1541:        /* update region tables */
                   1542:        simple_lock(&pm->pm_lock); /* what if other cpu takes mmuentry ?? */
                   1543:        if (pm->pm_ctx)
                   1544:                setregmap(VRTOVA(me->me_vreg), reginval);
                   1545:        rp->rg_smeg = reginval;
                   1546:
                   1547:        /* off old pmap chain */
                   1548:        TAILQ_REMOVE(&pm->pm_reglist, me, me_pmchain);
                   1549:        simple_unlock(&pm->pm_lock);
1.71      pk       1550:        setcontext4(ctx);       /* done with old context */
1.43      pk       1551:
                   1552:        /* onto new pmap chain; new pmap is already locked, if needed */
                   1553:        TAILQ_INSERT_TAIL(&newpm->pm_reglist, me, me_pmchain);
                   1554:
                   1555:        /* into new segment table, with backpointers */
                   1556:        newpm->pm_regmap[newvr].rg_smeg = me->me_cookie;
                   1557:        me->me_pmap = newpm;
                   1558:        me->me_vreg = newvr;
                   1559:
                   1560:        return (me);
                   1561: }
                   1562:
                   1563: /*
                   1564:  * Free an MMU entry.
                   1565:  *
                   1566:  * Assumes the corresponding pmap is already locked.
                   1567:  * Does NOT flush cache. ???
                   1568:  * CALLER MUST SET CONTEXT to pm->pm_ctxnum (if pmap has
                   1569:  * a context) or to 0 (if not).  Caller must also update
                   1570:  * pm->pm_regmap and (possibly) the hardware.
                   1571:  */
                   1572: void
                   1573: region_free(pm, smeg)
                   1574:        register struct pmap *pm;
                   1575:        register u_int smeg;
                   1576: {
                   1577:        register struct mmuentry *me = &mmuregions[smeg];
                   1578:
                   1579: #ifdef DEBUG
                   1580:        if (pmapdebug & PDB_MMUREG_ALLOC)
1.66      christos 1581:                printf("region_free: freeing smeg %x from pmap %p\n",
1.43      pk       1582:                    me->me_cookie, pm);
                   1583:        if (me->me_cookie != smeg)
                   1584:                panic("region_free: wrong mmuentry");
                   1585:        if (pm != me->me_pmap)
                   1586:                panic("region_free: pm != me_pmap");
                   1587: #endif
                   1588:
                   1589:        if (pm->pm_ctx)
1.69      pk       1590:                cache_flush_region(me->me_vreg);
1.43      pk       1591:
                   1592:        /* take mmu entry off pmap chain */
                   1593:        TAILQ_REMOVE(&pm->pm_reglist, me, me_pmchain);
1.1       deraadt  1594:        /* ... and remove from segment map */
1.43      pk       1595:        pm->pm_regmap[smeg].rg_smeg = reginval;
1.1       deraadt  1596:
                   1597:        /* off LRU or lock chain */
1.43      pk       1598:        if (pm == pmap_kernel()) {
                   1599:                TAILQ_REMOVE(&region_locked, me, me_list);
                   1600:        } else {
                   1601:                TAILQ_REMOVE(&region_lru, me, me_list);
                   1602:        }
1.1       deraadt  1603:
                   1604:        /* no associated pmap; on free list */
                   1605:        me->me_pmap = NULL;
1.43      pk       1606:        TAILQ_INSERT_TAIL(&region_freelist, me, me_list);
1.1       deraadt  1607: }
1.43      pk       1608: #endif
1.1       deraadt  1609:
                   1610: /*
                   1611:  * `Page in' (load or inspect) an MMU entry; called on page faults.
                   1612:  * Returns 1 if we reloaded the segment, -1 if the segment was
                   1613:  * already loaded and the page was marked valid (in which case the
                   1614:  * fault must be a bus error or something), or 0 (segment loaded but
                   1615:  * PTE not valid, or segment not loaded at all).
                   1616:  */
                   1617: int
1.61      pk       1618: mmu_pagein(pm, va, prot)
1.1       deraadt  1619:        register struct pmap *pm;
1.45      pk       1620:        register int va, prot;
1.1       deraadt  1621: {
                   1622:        register int *pte;
1.45      pk       1623:        register int vr, vs, pmeg, i, s, bits;
1.43      pk       1624:        struct regmap *rp;
                   1625:        struct segmap *sp;
                   1626:
1.45      pk       1627:        if (prot != VM_PROT_NONE)
                   1628:                bits = PG_V | ((prot & VM_PROT_WRITE) ? PG_W : 0);
                   1629:        else
                   1630:                bits = 0;
                   1631:
1.43      pk       1632:        vr = VA_VREG(va);
                   1633:        vs = VA_VSEG(va);
                   1634:        rp = &pm->pm_regmap[vr];
                   1635: #ifdef DEBUG
                   1636: if (pm == pmap_kernel())
1.66      christos 1637: printf("mmu_pagein: kernel wants map at va %x, vr %d, vs %d\n", va, vr, vs);
1.43      pk       1638: #endif
                   1639:
                   1640:        /* return 0 if we have no PMEGs to load */
                   1641:        if (rp->rg_segmap == NULL)
                   1642:                return (0);
1.69      pk       1643: #if defined(SUN4_MMU3L)
                   1644:        if (HASSUN4_MMU3L && rp->rg_smeg == reginval) {
1.43      pk       1645:                smeg_t smeg;
                   1646:                unsigned int tva = VA_ROUNDDOWNTOREG(va);
                   1647:                struct segmap *sp = rp->rg_segmap;
                   1648:
                   1649:                s = splpmap();          /* paranoid */
                   1650:                smeg = region_alloc(&region_lru, pm, vr)->me_cookie;
                   1651:                setregmap(tva, smeg);
                   1652:                i = NSEGRG;
                   1653:                do {
                   1654:                        setsegmap(tva, sp++->sg_pmeg);
                   1655:                        tva += NBPSG;
                   1656:                } while (--i > 0);
                   1657:                splx(s);
                   1658:        }
                   1659: #endif
                   1660:        sp = &rp->rg_segmap[vs];
1.1       deraadt  1661:
                   1662:        /* return 0 if we have no PTEs to load */
1.43      pk       1663:        if ((pte = sp->sg_pte) == NULL)
1.1       deraadt  1664:                return (0);
1.43      pk       1665:
1.1       deraadt  1666:        /* return -1 if the fault is `hard', 0 if not */
1.43      pk       1667:        if (sp->sg_pmeg != seginval)
1.55      pk       1668:                return (bits && (getpte4(va) & bits) == bits ? -1 : 0);
1.1       deraadt  1669:
                   1670:        /* reload segment: write PTEs into a new LRU entry */
                   1671:        va = VA_ROUNDDOWNTOSEG(va);
                   1672:        s = splpmap();          /* paranoid */
1.43      pk       1673:        pmeg = me_alloc(&segm_lru, pm, vr, vs)->me_cookie;
1.1       deraadt  1674:        setsegmap(va, pmeg);
                   1675:        i = NPTESG;
                   1676:        do {
1.55      pk       1677:                setpte4(va, *pte++);
1.1       deraadt  1678:                va += NBPG;
                   1679:        } while (--i > 0);
                   1680:        splx(s);
                   1681:        return (1);
                   1682: }
1.55      pk       1683: #endif /* defined SUN4 or SUN4C */
                   1684:
1.1       deraadt  1685: /*
                   1686:  * Allocate a context.  If necessary, steal one from someone else.
                   1687:  * Changes hardware context number and loads segment map.
                   1688:  *
                   1689:  * This routine is only ever called from locore.s just after it has
                   1690:  * saved away the previous process, so there are no active user windows.
                   1691:  */
                   1692: void
                   1693: ctx_alloc(pm)
                   1694:        register struct pmap *pm;
                   1695: {
                   1696:        register union ctxinfo *c;
1.49      pk       1697:        register int s, cnum, i, doflush;
1.43      pk       1698:        register struct regmap *rp;
1.13      pk       1699:        register int gap_start, gap_end;
                   1700:        register unsigned long va;
1.1       deraadt  1701:
1.55      pk       1702: /*XXX-GCC!*/gap_start=gap_end=0;
1.1       deraadt  1703: #ifdef DEBUG
                   1704:        if (pm->pm_ctx)
                   1705:                panic("ctx_alloc pm_ctx");
                   1706:        if (pmapdebug & PDB_CTX_ALLOC)
1.66      christos 1707:                printf("ctx_alloc(%p)\n", pm);
1.1       deraadt  1708: #endif
1.55      pk       1709:        if (CPU_ISSUN4OR4C) {
                   1710:                gap_start = pm->pm_gap_start;
                   1711:                gap_end = pm->pm_gap_end;
                   1712:        }
1.13      pk       1713:
1.49      pk       1714:        s = splpmap();
1.1       deraadt  1715:        if ((c = ctx_freelist) != NULL) {
                   1716:                ctx_freelist = c->c_nextfree;
1.69      pk       1717:                cnum = c - cpuinfo.ctxinfo;
1.49      pk       1718:                doflush = 0;
1.1       deraadt  1719:        } else {
                   1720:                if ((ctx_kick += ctx_kickdir) >= ncontext) {
                   1721:                        ctx_kick = ncontext - 1;
                   1722:                        ctx_kickdir = -1;
                   1723:                } else if (ctx_kick < 1) {
                   1724:                        ctx_kick = 1;
                   1725:                        ctx_kickdir = 1;
                   1726:                }
1.69      pk       1727:                c = &cpuinfo.ctxinfo[cnum = ctx_kick];
1.1       deraadt  1728: #ifdef DEBUG
                   1729:                if (c->c_pmap == NULL)
                   1730:                        panic("ctx_alloc cu_pmap");
                   1731:                if (pmapdebug & (PDB_CTX_ALLOC | PDB_CTX_STEAL))
1.66      christos 1732:                        printf("ctx_alloc: steal context %d from %p\n",
1.1       deraadt  1733:                            cnum, c->c_pmap);
                   1734: #endif
                   1735:                c->c_pmap->pm_ctx = NULL;
1.69      pk       1736:                doflush = (CACHEINFO.c_vactype != VAC_NONE);
1.55      pk       1737:                if (CPU_ISSUN4OR4C) {
                   1738:                        if (gap_start < c->c_pmap->pm_gap_start)
                   1739:                                gap_start = c->c_pmap->pm_gap_start;
                   1740:                        if (gap_end > c->c_pmap->pm_gap_end)
                   1741:                                gap_end = c->c_pmap->pm_gap_end;
                   1742:                }
1.1       deraadt  1743:        }
1.49      pk       1744:
1.1       deraadt  1745:        c->c_pmap = pm;
                   1746:        pm->pm_ctx = c;
                   1747:        pm->pm_ctxnum = cnum;
                   1748:
1.55      pk       1749:        if (CPU_ISSUN4OR4C) {
                   1750:                /*
                   1751:                 * Write pmap's region (3-level MMU) or segment table into
                   1752:                 * the MMU.
                   1753:                 *
                   1754:                 * Only write those entries that actually map something in
                   1755:                 * this context by maintaining a pair of region numbers in
                   1756:                 * between which the pmap has no valid mappings.
                   1757:                 *
                   1758:                 * If a context was just allocated from the free list, trust
                   1759:                 * that all its pmeg numbers are `seginval'. We make sure this
                   1760:                 * is the case initially in pmap_bootstrap(). Otherwise, the
                   1761:                 * context was freed by calling ctx_free() in pmap_release(),
                   1762:                 * which in turn is supposedly called only when all mappings
                   1763:                 * have been removed.
                   1764:                 *
                   1765:                 * On the other hand, if the context had to be stolen from
                   1766:                 * another pmap, we possibly shrink the gap to be the
                   1767:                 * disjuction of the new and the previous map.
                   1768:                 */
1.43      pk       1769:
1.80      pk       1770:                setcontext4(cnum);
1.55      pk       1771:                splx(s);
                   1772:                if (doflush)
                   1773:                        cache_flush_context();
1.43      pk       1774:
1.55      pk       1775:                rp = pm->pm_regmap;
                   1776:                for (va = 0, i = NUREG; --i >= 0; ) {
                   1777:                        if (VA_VREG(va) >= gap_start) {
                   1778:                                va = VRTOVA(gap_end);
                   1779:                                i -= gap_end - gap_start;
                   1780:                                rp += gap_end - gap_start;
                   1781:                                if (i < 0)
                   1782:                                        break;
                   1783:                                /* mustn't re-enter this branch */
                   1784:                                gap_start = NUREG;
                   1785:                        }
1.69      pk       1786:                        if (HASSUN4_MMU3L) {
1.55      pk       1787:                                setregmap(va, rp++->rg_smeg);
                   1788:                                va += NBPRG;
1.69      pk       1789:                        } else {
1.55      pk       1790:                                register int j;
                   1791:                                register struct segmap *sp = rp->rg_segmap;
                   1792:                                for (j = NSEGRG; --j >= 0; va += NBPSG)
                   1793:                                        setsegmap(va,
                   1794:                                                  sp?sp++->sg_pmeg:seginval);
                   1795:                                rp++;
                   1796:                        }
1.43      pk       1797:                }
1.55      pk       1798:
                   1799:        } else if (CPU_ISSUN4M) {
                   1800:
1.80      pk       1801: #if defined(SUN4M)
1.55      pk       1802:                /*
                   1803:                 * Reload page and context tables to activate the page tables
                   1804:                 * for this context.
                   1805:                 *
                   1806:                 * The gap stuff isn't really needed in the Sun4m architecture,
                   1807:                 * since we don't have to worry about excessive mappings (all
                   1808:                 * mappings exist since the page tables must be complete for
                   1809:                 * the mmu to be happy).
                   1810:                 *
                   1811:                 * If a context was just allocated from the free list, trust
                   1812:                 * that all of its mmu-edible page tables are zeroed out
                   1813:                 * (except for those associated with the kernel). We make
                   1814:                 * sure this is the case initially in pmap_bootstrap() and
                   1815:                 * pmap_init() (?).
                   1816:                 * Otherwise, the context was freed by calling ctx_free() in
                   1817:                 * pmap_release(), which in turn is supposedly called only
                   1818:                 * when all mappings have been removed.
                   1819:                 *
                   1820:                 * XXX: Do we have to flush cache after reloading ctx tbl?
                   1821:                 */
                   1822:
1.79      pk       1823: #ifdef DEBUG
1.69      pk       1824: #if 0
1.61      pk       1825:                ctxbusyvector[cnum] = 1; /* mark context as busy */
1.69      pk       1826: #endif
1.55      pk       1827:                if (pm->pm_reg_ptps_pa == 0)
                   1828:                        panic("ctx_alloc: no region table in current pmap");
                   1829: #endif
                   1830:                /*setcontext(0); * paranoia? can we modify curr. ctx? */
1.79      pk       1831:                setpgt4m(&cpuinfo.ctx_tbl[cnum],
                   1832:                        (pm->pm_reg_ptps_pa >> SRMMU_PPNPASHIFT) | SRMMU_TEPTD);
1.55      pk       1833:
1.80      pk       1834:                setcontext4m(cnum);
1.55      pk       1835:                if (doflush)
                   1836:                        cache_flush_context();
                   1837:                tlb_flush_context(); /* remove any remnant garbage from tlb */
1.43      pk       1838: #endif
1.55      pk       1839:                splx(s);
1.13      pk       1840:        }
1.1       deraadt  1841: }
                   1842:
                   1843: /*
                   1844:  * Give away a context.  Flushes cache and sets current context to 0.
                   1845:  */
                   1846: void
                   1847: ctx_free(pm)
                   1848:        struct pmap *pm;
                   1849: {
                   1850:        register union ctxinfo *c;
                   1851:        register int newc, oldc;
                   1852:
                   1853:        if ((c = pm->pm_ctx) == NULL)
                   1854:                panic("ctx_free");
                   1855:        pm->pm_ctx = NULL;
                   1856:        oldc = getcontext();
1.55      pk       1857:
1.69      pk       1858:        if (CACHEINFO.c_vactype != VAC_NONE) {
1.1       deraadt  1859:                newc = pm->pm_ctxnum;
                   1860:                CHANGE_CONTEXTS(oldc, newc);
                   1861:                cache_flush_context();
1.55      pk       1862: #if defined(SUN4M)
                   1863:                if (CPU_ISSUN4M)
                   1864:                        tlb_flush_context();
                   1865: #endif
1.1       deraadt  1866:                setcontext(0);
                   1867:        } else {
1.55      pk       1868: #if defined(SUN4M)
                   1869:                if (CPU_ISSUN4M)
                   1870:                        tlb_flush_context();
                   1871: #endif
1.1       deraadt  1872:                CHANGE_CONTEXTS(oldc, 0);
                   1873:        }
                   1874:        c->c_nextfree = ctx_freelist;
                   1875:        ctx_freelist = c;
1.55      pk       1876:
1.69      pk       1877: #if 0
1.55      pk       1878: #if defined(SUN4M)
                   1879:        if (CPU_ISSUN4M) {
                   1880:                /* Map kernel back into unused context */
                   1881:                newc = pm->pm_ctxnum;
1.69      pk       1882:                cpuinfo.ctx_tbl[newc] = cpuinfo.ctx_tbl[0];
1.55      pk       1883:                if (newc)
                   1884:                        ctxbusyvector[newc] = 0; /* mark as free */
                   1885:        }
                   1886: #endif
1.69      pk       1887: #endif
1.1       deraadt  1888: }
                   1889:
                   1890:
                   1891: /*----------------------------------------------------------------*/
                   1892:
                   1893: /*
                   1894:  * pvlist functions.
                   1895:  */
                   1896:
                   1897: /*
                   1898:  * Walk the given pv list, and for each PTE, set or clear some bits
                   1899:  * (e.g., PG_W or PG_NC).
                   1900:  *
                   1901:  * As a special case, this never clears PG_W on `pager' pages.
                   1902:  * These, being kernel addresses, are always in hardware and have
                   1903:  * a context.
                   1904:  *
                   1905:  * This routine flushes the cache for any page whose PTE changes,
                   1906:  * as long as the process has a context; this is overly conservative.
                   1907:  * It also copies ref and mod bits to the pvlist, on the theory that
                   1908:  * this might save work later.  (XXX should test this theory)
                   1909:  */
1.55      pk       1910:
                   1911: #if defined(SUN4) || defined(SUN4C)
                   1912:
1.1       deraadt  1913: void
1.55      pk       1914: pv_changepte4_4c(pv0, bis, bic)
1.1       deraadt  1915:        register struct pvlist *pv0;
                   1916:        register int bis, bic;
                   1917: {
                   1918:        register int *pte;
                   1919:        register struct pvlist *pv;
                   1920:        register struct pmap *pm;
1.53      christos 1921:        register int va, vr, vs, flags;
1.1       deraadt  1922:        int ctx, s;
1.43      pk       1923:        struct regmap *rp;
                   1924:        struct segmap *sp;
1.1       deraadt  1925:
                   1926:        write_user_windows();           /* paranoid? */
                   1927:
                   1928:        s = splpmap();                  /* paranoid? */
                   1929:        if (pv0->pv_pmap == NULL) {
                   1930:                splx(s);
                   1931:                return;
                   1932:        }
1.71      pk       1933:        ctx = getcontext4();
1.1       deraadt  1934:        flags = pv0->pv_flags;
                   1935:        for (pv = pv0; pv != NULL; pv = pv->pv_next) {
                   1936:                pm = pv->pv_pmap;
1.81      pk       1937: #ifdef DIAGNOSTIC
                   1938:                if(pm == NULL)
                   1939:                        panic("pv_changepte: pm == NULL");
                   1940: #endif
1.1       deraadt  1941:                va = pv->pv_va;
1.43      pk       1942:                vr = VA_VREG(va);
                   1943:                vs = VA_VSEG(va);
                   1944:                rp = &pm->pm_regmap[vr];
                   1945:                if (rp->rg_segmap == NULL)
                   1946:                        panic("pv_changepte: no segments");
                   1947:
                   1948:                sp = &rp->rg_segmap[vs];
                   1949:                pte = sp->sg_pte;
                   1950:
                   1951:                if (sp->sg_pmeg == seginval) {
                   1952:                        /* not in hardware: just fix software copy */
                   1953:                        if (pte == NULL)
1.81      pk       1954:                                panic("pv_changepte: pte == NULL");
1.43      pk       1955:                        pte += VA_VPG(va);
                   1956:                        *pte = (*pte | bis) & ~bic;
                   1957:                } else {
1.1       deraadt  1958:                        register int tpte;
                   1959:
                   1960:                        /* in hardware: fix hardware copy */
1.43      pk       1961:                        if (CTX_USABLE(pm,rp)) {
1.1       deraadt  1962:                                extern vm_offset_t pager_sva, pager_eva;
                   1963:
1.8       pk       1964:                                /*
                   1965:                                 * Bizarreness:  we never clear PG_W on
                   1966:                                 * pager pages, nor PG_NC on DVMA pages.
                   1967:                                 */
1.1       deraadt  1968:                                if (bic == PG_W &&
                   1969:                                    va >= pager_sva && va < pager_eva)
1.3       deraadt  1970:                                        continue;
                   1971:                                if (bic == PG_NC &&
                   1972:                                    va >= DVMA_BASE && va < DVMA_END)
1.1       deraadt  1973:                                        continue;
1.71      pk       1974:                                setcontext4(pm->pm_ctxnum);
1.1       deraadt  1975:                                /* XXX should flush only when necessary */
1.55      pk       1976:                                tpte = getpte4(va);
1.69      pk       1977:                                if (tpte & PG_M)
1.43      pk       1978:                                        cache_flush_page(va);
1.1       deraadt  1979:                        } else {
                   1980:                                /* XXX per-cpu va? */
1.71      pk       1981:                                setcontext4(0);
1.69      pk       1982:                                if (HASSUN4_MMU3L)
1.43      pk       1983:                                        setregmap(0, tregion);
                   1984:                                setsegmap(0, sp->sg_pmeg);
1.18      deraadt  1985:                                va = VA_VPG(va) << PGSHIFT;
1.55      pk       1986:                                tpte = getpte4(va);
1.1       deraadt  1987:                        }
                   1988:                        if (tpte & PG_V)
1.63      pk       1989:                                flags |= (tpte >> PG_M_SHIFT) & (PV_MOD|PV_REF);
1.1       deraadt  1990:                        tpte = (tpte | bis) & ~bic;
1.55      pk       1991:                        setpte4(va, tpte);
1.1       deraadt  1992:                        if (pte != NULL)        /* update software copy */
                   1993:                                pte[VA_VPG(va)] = tpte;
                   1994:                }
                   1995:        }
                   1996:        pv0->pv_flags = flags;
1.71      pk       1997:        setcontext4(ctx);
1.1       deraadt  1998:        splx(s);
                   1999: }
                   2000:
                   2001: /*
                   2002:  * Sync ref and mod bits in pvlist (turns off same in hardware PTEs).
                   2003:  * Returns the new flags.
                   2004:  *
                   2005:  * This is just like pv_changepte, but we never add or remove bits,
                   2006:  * hence never need to adjust software copies.
                   2007:  */
                   2008: int
1.55      pk       2009: pv_syncflags4_4c(pv0)
1.1       deraadt  2010:        register struct pvlist *pv0;
                   2011: {
                   2012:        register struct pvlist *pv;
                   2013:        register struct pmap *pm;
1.53      christos 2014:        register int tpte, va, vr, vs, pmeg, flags;
1.1       deraadt  2015:        int ctx, s;
1.43      pk       2016:        struct regmap *rp;
                   2017:        struct segmap *sp;
1.1       deraadt  2018:
                   2019:        write_user_windows();           /* paranoid? */
                   2020:
                   2021:        s = splpmap();                  /* paranoid? */
                   2022:        if (pv0->pv_pmap == NULL) {     /* paranoid */
                   2023:                splx(s);
                   2024:                return (0);
                   2025:        }
1.71      pk       2026:        ctx = getcontext4();
1.1       deraadt  2027:        flags = pv0->pv_flags;
                   2028:        for (pv = pv0; pv != NULL; pv = pv->pv_next) {
                   2029:                pm = pv->pv_pmap;
                   2030:                va = pv->pv_va;
1.43      pk       2031:                vr = VA_VREG(va);
                   2032:                vs = VA_VSEG(va);
                   2033:                rp = &pm->pm_regmap[vr];
                   2034:                if (rp->rg_segmap == NULL)
                   2035:                        panic("pv_syncflags: no segments");
                   2036:                sp = &rp->rg_segmap[vs];
                   2037:
                   2038:                if ((pmeg = sp->sg_pmeg) == seginval)
1.1       deraadt  2039:                        continue;
1.43      pk       2040:
                   2041:                if (CTX_USABLE(pm,rp)) {
1.71      pk       2042:                        setcontext4(pm->pm_ctxnum);
1.1       deraadt  2043:                        /* XXX should flush only when necessary */
1.55      pk       2044:                        tpte = getpte4(va);
1.69      pk       2045:                        if (tpte & PG_M)
1.34      pk       2046:                                cache_flush_page(va);
1.1       deraadt  2047:                } else {
                   2048:                        /* XXX per-cpu va? */
1.71      pk       2049:                        setcontext4(0);
1.69      pk       2050:                        if (HASSUN4_MMU3L)
1.43      pk       2051:                                setregmap(0, tregion);
1.1       deraadt  2052:                        setsegmap(0, pmeg);
1.18      deraadt  2053:                        va = VA_VPG(va) << PGSHIFT;
1.55      pk       2054:                        tpte = getpte4(va);
1.1       deraadt  2055:                }
                   2056:                if (tpte & (PG_M|PG_U) && tpte & PG_V) {
                   2057:                        flags |= (tpte >> PG_M_SHIFT) &
                   2058:                            (PV_MOD|PV_REF);
                   2059:                        tpte &= ~(PG_M|PG_U);
1.55      pk       2060:                        setpte4(va, tpte);
1.1       deraadt  2061:                }
                   2062:        }
                   2063:        pv0->pv_flags = flags;
1.71      pk       2064:        setcontext4(ctx);
1.1       deraadt  2065:        splx(s);
                   2066:        return (flags);
                   2067: }
                   2068:
                   2069: /*
                   2070:  * pv_unlink is a helper function for pmap_remove.
                   2071:  * It takes a pointer to the pv_table head for some physical address
                   2072:  * and removes the appropriate (pmap, va) entry.
                   2073:  *
                   2074:  * Once the entry is removed, if the pv_table head has the cache
                   2075:  * inhibit bit set, see if we can turn that off; if so, walk the
                   2076:  * pvlist and turn off PG_NC in each PTE.  (The pvlist is by
                   2077:  * definition nonempty, since it must have at least two elements
                   2078:  * in it to have PV_NC set, and we only remove one here.)
                   2079:  */
1.43      pk       2080: /*static*/ void
1.55      pk       2081: pv_unlink4_4c(pv, pm, va)
1.1       deraadt  2082:        register struct pvlist *pv;
                   2083:        register struct pmap *pm;
                   2084:        register vm_offset_t va;
                   2085: {
                   2086:        register struct pvlist *npv;
                   2087:
1.11      pk       2088: #ifdef DIAGNOSTIC
                   2089:        if (pv->pv_pmap == NULL)
                   2090:                panic("pv_unlink0");
                   2091: #endif
1.1       deraadt  2092:        /*
                   2093:         * First entry is special (sigh).
                   2094:         */
                   2095:        npv = pv->pv_next;
                   2096:        if (pv->pv_pmap == pm && pv->pv_va == va) {
                   2097:                pmap_stats.ps_unlink_pvfirst++;
                   2098:                if (npv != NULL) {
                   2099:                        pv->pv_next = npv->pv_next;
                   2100:                        pv->pv_pmap = npv->pv_pmap;
                   2101:                        pv->pv_va = npv->pv_va;
1.81      pk       2102:                        FREE(npv, M_VMPVENT);
1.1       deraadt  2103:                } else
                   2104:                        pv->pv_pmap = NULL;
                   2105:        } else {
                   2106:                register struct pvlist *prev;
                   2107:
                   2108:                for (prev = pv;; prev = npv, npv = npv->pv_next) {
                   2109:                        pmap_stats.ps_unlink_pvsearch++;
                   2110:                        if (npv == NULL)
                   2111:                                panic("pv_unlink");
                   2112:                        if (npv->pv_pmap == pm && npv->pv_va == va)
                   2113:                                break;
                   2114:                }
                   2115:                prev->pv_next = npv->pv_next;
1.81      pk       2116:                FREE(npv, M_VMPVENT);
1.1       deraadt  2117:        }
                   2118:        if (pv->pv_flags & PV_NC) {
                   2119:                /*
                   2120:                 * Not cached: check to see if we can fix that now.
                   2121:                 */
                   2122:                va = pv->pv_va;
                   2123:                for (npv = pv->pv_next; npv != NULL; npv = npv->pv_next)
                   2124:                        if (BADALIAS(va, npv->pv_va))
                   2125:                                return;
                   2126:                pv->pv_flags &= ~PV_NC;
1.58      pk       2127:                pv_changepte4_4c(pv, 0, PG_NC);
1.1       deraadt  2128:        }
                   2129: }
                   2130:
                   2131: /*
                   2132:  * pv_link is the inverse of pv_unlink, and is used in pmap_enter.
                   2133:  * It returns PG_NC if the (new) pvlist says that the address cannot
                   2134:  * be cached.
                   2135:  */
1.43      pk       2136: /*static*/ int
1.55      pk       2137: pv_link4_4c(pv, pm, va)
1.1       deraadt  2138:        register struct pvlist *pv;
                   2139:        register struct pmap *pm;
                   2140:        register vm_offset_t va;
                   2141: {
                   2142:        register struct pvlist *npv;
                   2143:        register int ret;
                   2144:
                   2145:        if (pv->pv_pmap == NULL) {
                   2146:                /* no pvlist entries yet */
                   2147:                pmap_stats.ps_enter_firstpv++;
                   2148:                pv->pv_next = NULL;
                   2149:                pv->pv_pmap = pm;
                   2150:                pv->pv_va = va;
                   2151:                return (0);
                   2152:        }
                   2153:        /*
                   2154:         * Before entering the new mapping, see if
                   2155:         * it will cause old mappings to become aliased
                   2156:         * and thus need to be `discached'.
                   2157:         */
                   2158:        ret = 0;
                   2159:        pmap_stats.ps_enter_secondpv++;
                   2160:        if (pv->pv_flags & PV_NC) {
                   2161:                /* already uncached, just stay that way */
                   2162:                ret = PG_NC;
                   2163:        } else {
                   2164:                /* MAY NEED TO DISCACHE ANYWAY IF va IS IN DVMA SPACE? */
                   2165:                for (npv = pv; npv != NULL; npv = npv->pv_next) {
                   2166:                        if (BADALIAS(va, npv->pv_va)) {
1.43      pk       2167: #ifdef DEBUG
1.66      christos 2168:                                if (pmapdebug) printf(
1.54      christos 2169:                                "pv_link: badalias: pid %d, %lx<=>%x, pa %lx\n",
1.43      pk       2170:                                curproc?curproc->p_pid:-1, va, npv->pv_va,
                   2171:                                vm_first_phys + (pv-pv_table)*NBPG);
                   2172: #endif
1.1       deraadt  2173:                                pv->pv_flags |= PV_NC;
1.58      pk       2174:                                pv_changepte4_4c(pv, ret = PG_NC, 0);
1.1       deraadt  2175:                                break;
                   2176:                        }
                   2177:                }
                   2178:        }
1.81      pk       2179:        MALLOC(npv, struct pvlist *, sizeof *npv, M_VMPVENT, M_WAITOK);
1.1       deraadt  2180:        npv->pv_next = pv->pv_next;
                   2181:        npv->pv_pmap = pm;
                   2182:        npv->pv_va = va;
                   2183:        pv->pv_next = npv;
                   2184:        return (ret);
                   2185: }
                   2186:
1.55      pk       2187: #endif /* sun4, sun4c code */
                   2188:
                   2189: #if defined(SUN4M)             /* Sun4M versions of above */
1.1       deraadt  2190: /*
1.55      pk       2191:  * Walk the given pv list, and for each PTE, set or clear some bits
                   2192:  * (e.g., PG_W or PG_NC).
                   2193:  *
                   2194:  * As a special case, this never clears PG_W on `pager' pages.
                   2195:  * These, being kernel addresses, are always in hardware and have
                   2196:  * a context.
                   2197:  *
                   2198:  * This routine flushes the cache for any page whose PTE changes,
                   2199:  * as long as the process has a context; this is overly conservative.
                   2200:  * It also copies ref and mod bits to the pvlist, on the theory that
                   2201:  * this might save work later.  (XXX should test this theory)
1.1       deraadt  2202:  */
1.53      christos 2203: void
1.55      pk       2204: pv_changepte4m(pv0, bis, bic)
                   2205:        register struct pvlist *pv0;
                   2206:        register int bis, bic;
                   2207: {
1.1       deraadt  2208:        register struct pvlist *pv;
                   2209:        register struct pmap *pm;
1.61      pk       2210:        register int va, vr, flags;
1.55      pk       2211:        int ctx, s;
                   2212:        struct regmap *rp;
1.72      pk       2213:        struct segmap *sp;
1.1       deraadt  2214:
1.55      pk       2215:        write_user_windows();           /* paranoid? */
1.1       deraadt  2216:
1.55      pk       2217:        s = splpmap();                  /* paranoid? */
                   2218:        if (pv0->pv_pmap == NULL) {
                   2219:                splx(s);
                   2220:                return;
1.1       deraadt  2221:        }
1.71      pk       2222:        ctx = getcontext4m();
1.55      pk       2223:        flags = pv0->pv_flags;
                   2224:        for (pv = pv0; pv != NULL; pv = pv->pv_next) {
                   2225:                register int tpte;
                   2226:                pm = pv->pv_pmap;
1.81      pk       2227: #ifdef DIAGNOSTIC
1.61      pk       2228:                if (pm == NULL)
1.81      pk       2229:                        panic("pv_changepte: pm == NULL");
                   2230: #endif
1.55      pk       2231:                va = pv->pv_va;
                   2232:                vr = VA_VREG(va);
                   2233:                rp = &pm->pm_regmap[vr];
                   2234:                if (rp->rg_segmap == NULL)
                   2235:                        panic("pv_changepte: no segments");
                   2236:
1.72      pk       2237:                sp = &rp->rg_segmap[VA_VSEG(va)];
                   2238:
                   2239:                if (pm->pm_ctx) {
1.55      pk       2240:                        extern vm_offset_t pager_sva, pager_eva;
1.1       deraadt  2241:
1.55      pk       2242:                        /*
                   2243:                         * Bizarreness:  we never clear PG_W on
                   2244:                         * pager pages, nor set PG_C on DVMA pages.
                   2245:                         */
                   2246:                        if ((bic & PPROT_WRITE) &&
                   2247:                            va >= pager_sva && va < pager_eva)
1.60      pk       2248:                                continue;
1.55      pk       2249:                        if ((bis & SRMMU_PG_C) &&
                   2250:                            va >= DVMA_BASE && va < DVMA_END)
1.60      pk       2251:                                continue;
1.72      pk       2252:
                   2253:                        /* Flush TLB so memory copy is up-to-date */
1.71      pk       2254:                        setcontext4m(pm->pm_ctxnum);
1.72      pk       2255:                        tlb_flush_page(va);
                   2256:                }
                   2257:
                   2258:                tpte = sp->sg_pte[VA_SUN4M_VPG(va)];
                   2259:                if ((tpte & SRMMU_TETYPE) != SRMMU_TEPTE) {
                   2260:                        printf("pv_changepte: invalid PTE for 0x%x\n", va);
                   2261:                        continue;
1.55      pk       2262:                }
                   2263:
1.72      pk       2264:                flags |= (tpte >> PG_M_SHIFT4M) & (PV_MOD4M|PV_REF4M|PV_C4M);
1.55      pk       2265:                tpte = (tpte | bis) & ~bic;
                   2266:
1.72      pk       2267:                if (pm->pm_ctx) {
                   2268:                        if (flags & PV_MOD4M)
                   2269:                                /* XXX: Do we need to always flush? */
                   2270:                                cache_flush_page(va);
                   2271:                        tlb_flush_page(va);
                   2272:                }
                   2273:                setpgt4m(&sp->sg_pte[VA_SUN4M_VPG(va)], tpte);
1.55      pk       2274:        }
                   2275:        pv0->pv_flags = flags;
1.71      pk       2276:        setcontext4m(ctx);
1.55      pk       2277:        splx(s);
                   2278: }
                   2279:
                   2280: /*
                   2281:  * Sync ref and mod bits in pvlist. If page has been ref'd or modified,
                   2282:  * update ref/mod bits in pvlist, and clear the hardware bits.
                   2283:  *
                   2284:  * Return the new flags.
                   2285:  */
                   2286: int
                   2287: pv_syncflags4m(pv0)
                   2288:        register struct pvlist *pv0;
                   2289: {
                   2290:        register struct pvlist *pv;
                   2291:        register struct pmap *pm;
                   2292:        register int tpte, va, vr, vs, flags;
                   2293:        int ctx, s;
                   2294:        struct regmap *rp;
                   2295:        struct segmap *sp;
                   2296:
                   2297:        write_user_windows();           /* paranoid? */
                   2298:
                   2299:        s = splpmap();                  /* paranoid? */
                   2300:        if (pv0->pv_pmap == NULL) {     /* paranoid */
                   2301:                splx(s);
                   2302:                return (0);
                   2303:        }
1.71      pk       2304:        ctx = getcontext4m();
1.55      pk       2305:        flags = pv0->pv_flags;
                   2306:        for (pv = pv0; pv != NULL; pv = pv->pv_next) {
                   2307:                pm = pv->pv_pmap;
                   2308:                va = pv->pv_va;
                   2309:                vr = VA_VREG(va);
                   2310:                vs = VA_VSEG(va);
                   2311:                rp = &pm->pm_regmap[vr];
                   2312:                if (rp->rg_segmap == NULL)
                   2313:                        panic("pv_syncflags: no segments");
                   2314:                sp = &rp->rg_segmap[vs];
                   2315:
                   2316:                if (sp->sg_pte == NULL) /* invalid */
1.60      pk       2317:                        continue;
1.55      pk       2318:
1.62      pk       2319:                /*
                   2320:                 * We need the PTE from memory as the TLB version will
                   2321:                 * always have the SRMMU_PG_R bit on.
                   2322:                 */
1.72      pk       2323:                if (pm->pm_ctx) {
1.71      pk       2324:                        setcontext4m(pm->pm_ctxnum);
1.55      pk       2325:                        tlb_flush_page(va);
                   2326:                }
1.72      pk       2327:                tpte = sp->sg_pte[VA_SUN4M_VPG(va)];
1.62      pk       2328:
1.55      pk       2329:                if ((tpte & SRMMU_TETYPE) == SRMMU_TEPTE && /* if valid pte */
                   2330:                    (tpte & (SRMMU_PG_M|SRMMU_PG_R))) {   /* and mod/refd */
1.72      pk       2331:
1.55      pk       2332:                        flags |= (tpte >> PG_M_SHIFT4M) &
1.60      pk       2333:                                 (PV_MOD4M|PV_REF4M|PV_C4M);
1.72      pk       2334:
                   2335:                        if (pm->pm_ctx && (tpte & SRMMU_PG_M)) {
                   2336:                                cache_flush_page(va); /* XXX: do we need this?*/
                   2337:                                tlb_flush_page(va); /* paranoid? */
                   2338:                        }
                   2339:
                   2340:                        /* Clear mod/ref bits from PTE and write it back */
1.55      pk       2341:                        tpte &= ~(SRMMU_PG_M | SRMMU_PG_R);
1.72      pk       2342:                        setpgt4m(&sp->sg_pte[VA_SUN4M_VPG(va)], tpte);
1.55      pk       2343:                }
                   2344:        }
                   2345:        pv0->pv_flags = flags;
1.71      pk       2346:        setcontext4m(ctx);
1.55      pk       2347:        splx(s);
                   2348:        return (flags);
                   2349: }
                   2350:
                   2351: void
                   2352: pv_unlink4m(pv, pm, va)
                   2353:        register struct pvlist *pv;
                   2354:        register struct pmap *pm;
                   2355:        register vm_offset_t va;
                   2356: {
                   2357:        register struct pvlist *npv;
                   2358:
                   2359: #ifdef DIAGNOSTIC
                   2360:        if (pv->pv_pmap == NULL)
                   2361:                panic("pv_unlink0");
                   2362: #endif
                   2363:        /*
                   2364:         * First entry is special (sigh).
                   2365:         */
                   2366:        npv = pv->pv_next;
                   2367:        if (pv->pv_pmap == pm && pv->pv_va == va) {
                   2368:                pmap_stats.ps_unlink_pvfirst++;
                   2369:                if (npv != NULL) {
                   2370:                        pv->pv_next = npv->pv_next;
                   2371:                        pv->pv_pmap = npv->pv_pmap;
                   2372:                        pv->pv_va = npv->pv_va;
1.81      pk       2373:                        FREE(npv, M_VMPVENT);
1.55      pk       2374:                } else
                   2375:                        pv->pv_pmap = NULL;
                   2376:        } else {
                   2377:                register struct pvlist *prev;
                   2378:
                   2379:                for (prev = pv;; prev = npv, npv = npv->pv_next) {
                   2380:                        pmap_stats.ps_unlink_pvsearch++;
                   2381:                        if (npv == NULL)
                   2382:                                panic("pv_unlink");
                   2383:                        if (npv->pv_pmap == pm && npv->pv_va == va)
                   2384:                                break;
                   2385:                }
                   2386:                prev->pv_next = npv->pv_next;
1.81      pk       2387:                FREE(npv, M_VMPVENT);
1.55      pk       2388:        }
                   2389:        if (!(pv->pv_flags & PV_C4M)) {
                   2390:                /*
                   2391:                 * Not cached: check to see if we can fix that now.
                   2392:                 */
                   2393:                va = pv->pv_va;
                   2394:                for (npv = pv->pv_next; npv != NULL; npv = npv->pv_next)
                   2395:                        if (BADALIAS(va, npv->pv_va))
                   2396:                                return;
                   2397:                pv->pv_flags |= PV_C4M;
                   2398:                pv_changepte4m(pv, SRMMU_PG_C, 0);
                   2399:        }
                   2400: }
                   2401:
                   2402: /*
                   2403:  * pv_link is the inverse of pv_unlink, and is used in pmap_enter.
                   2404:  * It returns SRMMU_PG_C if the (new) pvlist says that the address cannot
                   2405:  * be cached (i.e. its results must be (& ~)'d in.
                   2406:  */
                   2407: /*static*/ int
                   2408: pv_link4m(pv, pm, va)
                   2409:        register struct pvlist *pv;
                   2410:        register struct pmap *pm;
                   2411:        register vm_offset_t va;
                   2412: {
                   2413:        register struct pvlist *npv;
                   2414:        register int ret;
                   2415:
                   2416:        if (pv->pv_pmap == NULL) {
                   2417:                /* no pvlist entries yet */
                   2418:                pmap_stats.ps_enter_firstpv++;
                   2419:                pv->pv_next = NULL;
                   2420:                pv->pv_pmap = pm;
                   2421:                pv->pv_va = va;
                   2422:                pv->pv_flags |= PV_C4M;
                   2423:                return (0);
                   2424:        }
                   2425:        /*
                   2426:         * Before entering the new mapping, see if
                   2427:         * it will cause old mappings to become aliased
                   2428:         * and thus need to be `discached'.
                   2429:         */
                   2430:        ret = 0;
                   2431:        pmap_stats.ps_enter_secondpv++;
                   2432:        if (!(pv->pv_flags & PV_C4M)) {
                   2433:                /* already uncached, just stay that way */
                   2434:                ret = SRMMU_PG_C;
                   2435:        } else {
                   2436:                for (npv = pv; npv != NULL; npv = npv->pv_next) {
                   2437:                        if (BADALIAS(va, npv->pv_va)) {
                   2438: #ifdef DEBUG
1.66      christos 2439:                                if (pmapdebug & PDB_CACHESTUFF) printf(
1.55      pk       2440:                                "pv_link: badalias: pid %d, %lx<=>%x, pa %lx\n",
                   2441:                                curproc?curproc->p_pid:-1, va, npv->pv_va,
                   2442:                                vm_first_phys + (pv-pv_table)*NBPG);
                   2443: #endif
                   2444:                                pv->pv_flags &= ~PV_C4M;
1.58      pk       2445:                                pv_changepte4m(pv, 0, ret = SRMMU_PG_C);
1.55      pk       2446:                                /* cache_flush_page(va); XXX: needed? */
                   2447:                                break;
                   2448:                        }
                   2449:                }
                   2450:        }
1.81      pk       2451:        MALLOC(npv, struct pvlist *, sizeof *npv, M_VMPVENT, M_WAITOK);
1.55      pk       2452:        npv->pv_next = pv->pv_next;
                   2453:        npv->pv_pmap = pm;
                   2454:        npv->pv_va = va;
                   2455:        npv->pv_flags |= (ret == SRMMU_PG_C ? 0 : PV_C4M);
                   2456:        pv->pv_next = npv;
                   2457:        return (ret);
                   2458: }
                   2459: #endif
                   2460:
                   2461: /*
                   2462:  * Walk the given list and flush the cache for each (MI) page that is
                   2463:  * potentially in the cache. Called only if vactype != VAC_NONE.
                   2464:  */
                   2465: void
                   2466: pv_flushcache(pv)
                   2467:        register struct pvlist *pv;
                   2468: {
                   2469:        register struct pmap *pm;
                   2470:        register int s, ctx;
                   2471:
                   2472:        write_user_windows();   /* paranoia? */
                   2473:
                   2474:        s = splpmap();          /* XXX extreme paranoia */
                   2475:        if ((pm = pv->pv_pmap) != NULL) {
                   2476:                ctx = getcontext();
                   2477:                for (;;) {
                   2478:                        if (pm->pm_ctx) {
                   2479:                                setcontext(pm->pm_ctxnum);
                   2480:                                cache_flush_page(pv->pv_va);
                   2481:                        }
                   2482:                        pv = pv->pv_next;
                   2483:                        if (pv == NULL)
                   2484:                                break;
                   2485:                        pm = pv->pv_pmap;
                   2486:                }
                   2487:                setcontext(ctx);
                   2488:        }
                   2489:        splx(s);
                   2490: }
                   2491:
                   2492: /*----------------------------------------------------------------*/
                   2493:
                   2494: /*
                   2495:  * At last, pmap code.
                   2496:  */
1.1       deraadt  2497:
1.18      deraadt  2498: #if defined(SUN4) && defined(SUN4C)
                   2499: int nptesg;
                   2500: #endif
                   2501:
1.55      pk       2502: #if defined(SUN4M)
                   2503: static void pmap_bootstrap4m __P((void));
                   2504: #endif
                   2505: #if defined(SUN4) || defined(SUN4C)
                   2506: static void pmap_bootstrap4_4c __P((int, int, int));
                   2507: #endif
                   2508:
1.1       deraadt  2509: /*
                   2510:  * Bootstrap the system enough to run with VM enabled.
                   2511:  *
1.43      pk       2512:  * nsegment is the number of mmu segment entries (``PMEGs'');
                   2513:  * nregion is the number of mmu region entries (``SMEGs'');
1.1       deraadt  2514:  * nctx is the number of contexts.
                   2515:  */
                   2516: void
1.43      pk       2517: pmap_bootstrap(nctx, nregion, nsegment)
                   2518:        int nsegment, nctx, nregion;
1.1       deraadt  2519: {
1.55      pk       2520:
                   2521:        cnt.v_page_size = NBPG;
                   2522:        vm_set_page_size();
                   2523:
                   2524: #if defined(SUN4) && (defined(SUN4C) || defined(SUN4M))
                   2525:        /* In this case NPTESG is not a #define */
                   2526:        nptesg = (NBPSG >> pgshift);
                   2527: #endif
                   2528:
1.69      pk       2529: #if 0
1.55      pk       2530:        ncontext = nctx;
1.69      pk       2531: #endif
1.55      pk       2532:
                   2533: #if defined(SUN4M)
                   2534:        if (CPU_ISSUN4M) {
                   2535:                pmap_bootstrap4m();
                   2536:                return;
                   2537:        }
                   2538: #endif
                   2539: #if defined(SUN4) || defined(SUN4C)
                   2540:        if (CPU_ISSUN4OR4C) {
                   2541:                pmap_bootstrap4_4c(nctx, nregion, nsegment);
                   2542:                return;
                   2543:        }
                   2544: #endif
                   2545: }
                   2546:
                   2547: #if defined(SUN4) || defined(SUN4C)
                   2548: void
                   2549: pmap_bootstrap4_4c(nctx, nregion, nsegment)
                   2550:        int nsegment, nctx, nregion;
                   2551: {
1.1       deraadt  2552:        register union ctxinfo *ci;
1.53      christos 2553:        register struct mmuentry *mmuseg;
1.77      pk       2554: #if defined(SUN4_MMU3L)
1.53      christos 2555:        register struct mmuentry *mmureg;
                   2556: #endif
1.43      pk       2557:        struct   regmap *rp;
                   2558:        register int i, j;
                   2559:        register int npte, zseg, vr, vs;
                   2560:        register int rcookie, scookie;
1.1       deraadt  2561:        register caddr_t p;
1.37      pk       2562:        register struct memarr *mp;
1.1       deraadt  2563:        register void (*rom_setmap)(int ctx, caddr_t va, int pmeg);
                   2564:        int lastpage;
                   2565:        extern char end[];
1.7       pk       2566: #ifdef DDB
                   2567:        extern char *esym;
                   2568: #endif
1.1       deraadt  2569:
1.45      pk       2570:        switch (cputyp) {
                   2571:        case CPU_SUN4C:
                   2572:                mmu_has_hole = 1;
                   2573:                break;
                   2574:        case CPU_SUN4:
1.69      pk       2575:                if (cpuinfo.cpu_type != CPUTYP_4_400) {
1.45      pk       2576:                        mmu_has_hole = 1;
                   2577:                        break;
                   2578:                }
                   2579:        }
                   2580:
1.19      deraadt  2581:        cnt.v_page_size = NBPG;
                   2582:        vm_set_page_size();
                   2583:
1.31      pk       2584: #if defined(SUN4)
                   2585:        /*
                   2586:         * set up the segfixmask to mask off invalid bits
                   2587:         */
1.43      pk       2588:        segfixmask =  nsegment - 1; /* assume nsegment is a power of 2 */
                   2589: #ifdef DIAGNOSTIC
                   2590:        if (((nsegment & segfixmask) | (nsegment & ~segfixmask)) != nsegment) {
1.66      christos 2591:                printf("pmap_bootstrap: unsuitable number of segments (%d)\n",
1.43      pk       2592:                        nsegment);
                   2593:                callrom();
                   2594:        }
                   2595: #endif
1.31      pk       2596: #endif
                   2597:
1.55      pk       2598: #if defined(SUN4M) /* We're in a dual-arch kernel. Setup 4/4c fn. ptrs */
                   2599:        pmap_clear_modify_p     =       pmap_clear_modify4_4c;
                   2600:        pmap_clear_reference_p  =       pmap_clear_reference4_4c;
                   2601:        pmap_copy_page_p        =       pmap_copy_page4_4c;
                   2602:        pmap_enter_p            =       pmap_enter4_4c;
                   2603:        pmap_extract_p          =       pmap_extract4_4c;
                   2604:        pmap_is_modified_p      =       pmap_is_modified4_4c;
                   2605:        pmap_is_referenced_p    =       pmap_is_referenced4_4c;
                   2606:        pmap_page_protect_p     =       pmap_page_protect4_4c;
                   2607:        pmap_protect_p          =       pmap_protect4_4c;
                   2608:        pmap_zero_page_p        =       pmap_zero_page4_4c;
                   2609:        pmap_changeprot_p       =       pmap_changeprot4_4c;
                   2610:        pmap_rmk_p              =       pmap_rmk4_4c;
                   2611:        pmap_rmu_p              =       pmap_rmu4_4c;
                   2612: #endif /* defined SUN4M */
1.43      pk       2613:
1.1       deraadt  2614:        /*
                   2615:         * Last segment is the `invalid' one (one PMEG of pte's with !pg_v).
                   2616:         * It will never be used for anything else.
                   2617:         */
1.43      pk       2618:        seginval = --nsegment;
                   2619:
1.69      pk       2620: #if defined(SUN4_MMU3L)
                   2621:        if (HASSUN4_MMU3L)
1.43      pk       2622:                reginval = --nregion;
                   2623: #endif
                   2624:
                   2625:        /*
                   2626:         * Intialize the kernel pmap.
                   2627:         */
                   2628:        /* kernel_pmap_store.pm_ctxnum = 0; */
                   2629:        simple_lock_init(kernel_pmap_store.pm_lock);
                   2630:        kernel_pmap_store.pm_refcount = 1;
1.69      pk       2631: #if defined(SUN4_MMU3L)
1.43      pk       2632:        TAILQ_INIT(&kernel_pmap_store.pm_reglist);
                   2633: #endif
                   2634:        TAILQ_INIT(&kernel_pmap_store.pm_seglist);
                   2635:
                   2636:        kernel_pmap_store.pm_regmap = &kernel_regmap_store[-NUREG];
                   2637:        for (i = NKREG; --i >= 0;) {
1.69      pk       2638: #if defined(SUN4_MMU3L)
1.43      pk       2639:                kernel_regmap_store[i].rg_smeg = reginval;
                   2640: #endif
                   2641:                kernel_regmap_store[i].rg_segmap =
                   2642:                        &kernel_segmap_store[i * NSEGRG];
                   2643:                for (j = NSEGRG; --j >= 0;)
                   2644:                        kernel_segmap_store[i * NSEGRG + j].sg_pmeg = seginval;
                   2645:        }
1.1       deraadt  2646:
                   2647:        /*
                   2648:         * Preserve the monitor ROM's reserved VM region, so that
                   2649:         * we can use L1-A or the monitor's debugger.  As a side
                   2650:         * effect we map the ROM's reserved VM into all contexts
                   2651:         * (otherwise L1-A crashes the machine!).
                   2652:         */
1.43      pk       2653:
1.58      pk       2654:        mmu_reservemon4_4c(&nregion, &nsegment);
1.43      pk       2655:
1.69      pk       2656: #if defined(SUN4_MMU3L)
1.43      pk       2657:        /* Reserve one region for temporary mappings */
                   2658:        tregion = --nregion;
                   2659: #endif
1.1       deraadt  2660:
                   2661:        /*
1.43      pk       2662:         * Allocate and clear mmu entries and context structures.
1.1       deraadt  2663:         */
                   2664:        p = end;
1.7       pk       2665: #ifdef DDB
                   2666:        if (esym != 0)
1.78      pk       2667:                p = esym;
1.7       pk       2668: #endif
1.69      pk       2669: #if defined(SUN4_MMU3L)
1.43      pk       2670:        mmuregions = mmureg = (struct mmuentry *)p;
                   2671:        p += nregion * sizeof(struct mmuentry);
1.78      pk       2672:        bzero(mmuregions, nregion * sizeof(struct mmuentry));
1.43      pk       2673: #endif
                   2674:        mmusegments = mmuseg = (struct mmuentry *)p;
                   2675:        p += nsegment * sizeof(struct mmuentry);
1.78      pk       2676:        bzero(mmusegments, nsegment * sizeof(struct mmuentry));
                   2677:
1.69      pk       2678:        pmap_kernel()->pm_ctx = cpuinfo.ctxinfo = ci = (union ctxinfo *)p;
1.1       deraadt  2679:        p += nctx * sizeof *ci;
                   2680:
1.43      pk       2681:        /* Initialize MMU resource queues */
1.69      pk       2682: #if defined(SUN4_MMU3L)
1.43      pk       2683:        TAILQ_INIT(&region_freelist);
                   2684:        TAILQ_INIT(&region_lru);
                   2685:        TAILQ_INIT(&region_locked);
                   2686: #endif
                   2687:        TAILQ_INIT(&segm_freelist);
                   2688:        TAILQ_INIT(&segm_lru);
                   2689:        TAILQ_INIT(&segm_locked);
                   2690:
1.1       deraadt  2691:        /*
                   2692:         * Set up the `constants' for the call to vm_init()
                   2693:         * in main().  All pages beginning at p (rounded up to
                   2694:         * the next whole page) and continuing through the number
                   2695:         * of available pages are free, but they start at a higher
                   2696:         * virtual address.  This gives us two mappable MD pages
                   2697:         * for pmap_zero_page and pmap_copy_page, and one MI page
                   2698:         * for /dev/mem, all with no associated physical memory.
                   2699:         */
                   2700:        p = (caddr_t)(((u_int)p + NBPG - 1) & ~PGOFSET);
                   2701:        avail_start = (int)p - KERNBASE;
1.36      pk       2702:
                   2703:        /*
                   2704:         * Grab physical memory list, so pmap_next_page() can do its bit.
                   2705:         */
                   2706:        npmemarr = makememarr(pmemarr, MA_SIZE, MEMARR_AVAILPHYS);
                   2707:        sortm(pmemarr, npmemarr);
                   2708:        if (pmemarr[0].addr != 0) {
1.66      christos 2709:                printf("pmap_bootstrap: no kernel memory?!\n");
1.36      pk       2710:                callrom();
                   2711:        }
                   2712:        avail_end = pmemarr[npmemarr-1].addr + pmemarr[npmemarr-1].len;
1.38      pk       2713:        avail_next = avail_start;
                   2714:        for (physmem = 0, mp = pmemarr, j = npmemarr; --j >= 0; mp++)
                   2715:                physmem += btoc(mp->len);
                   2716:
                   2717:        i = (int)p;
                   2718:        vpage[0] = p, p += NBPG;
                   2719:        vpage[1] = p, p += NBPG;
1.41      mycroft  2720:        vmmap = p, p += NBPG;
1.38      pk       2721:        p = reserve_dumppages(p);
1.39      pk       2722:
1.37      pk       2723:        /*
1.38      pk       2724:         * Allocate virtual memory for pv_table[], which will be mapped
                   2725:         * sparsely in pmap_init().
1.37      pk       2726:         */
                   2727:        pv_table = (struct pvlist *)p;
                   2728:        p += round_page(sizeof(struct pvlist) * atop(avail_end - avail_start));
1.36      pk       2729:
1.1       deraadt  2730:        virtual_avail = (vm_offset_t)p;
                   2731:        virtual_end = VM_MAX_KERNEL_ADDRESS;
                   2732:
                   2733:        p = (caddr_t)i;                 /* retract to first free phys */
                   2734:
                   2735:        /*
                   2736:         * All contexts are free except the kernel's.
                   2737:         *
                   2738:         * XXX sun4c could use context 0 for users?
                   2739:         */
1.42      mycroft  2740:        ci->c_pmap = pmap_kernel();
1.1       deraadt  2741:        ctx_freelist = ci + 1;
                   2742:        for (i = 1; i < ncontext; i++) {
                   2743:                ci++;
                   2744:                ci->c_nextfree = ci + 1;
                   2745:        }
                   2746:        ci->c_nextfree = NULL;
                   2747:        ctx_kick = 0;
                   2748:        ctx_kickdir = -1;
                   2749:
                   2750:        /*
                   2751:         * Init mmu entries that map the kernel physical addresses.
                   2752:         *
                   2753:         * All the other MMU entries are free.
                   2754:         *
                   2755:         * THIS ASSUMES SEGMENT i IS MAPPED BY MMU ENTRY i DURING THE
                   2756:         * BOOT PROCESS
                   2757:         */
1.43      pk       2758:
                   2759:        rom_setmap = promvec->pv_setctxt;
                   2760:        zseg = ((((u_int)p + NBPSG - 1) & ~SGOFSET) - KERNBASE) >> SGSHIFT;
1.1       deraadt  2761:        lastpage = VA_VPG(p);
                   2762:        if (lastpage == 0)
1.43      pk       2763:                /*
                   2764:                 * If the page bits in p are 0, we filled the last segment
                   2765:                 * exactly (now how did that happen?); if not, it is
                   2766:                 * the last page filled in the last segment.
                   2767:                 */
1.1       deraadt  2768:                lastpage = NPTESG;
1.43      pk       2769:
1.1       deraadt  2770:        p = (caddr_t)KERNBASE;          /* first va */
                   2771:        vs = VA_VSEG(KERNBASE);         /* first virtual segment */
1.43      pk       2772:        vr = VA_VREG(KERNBASE);         /* first virtual region */
                   2773:        rp = &pmap_kernel()->pm_regmap[vr];
                   2774:
                   2775:        for (rcookie = 0, scookie = 0;;) {
                   2776:
1.1       deraadt  2777:                /*
1.43      pk       2778:                 * Distribute each kernel region/segment into all contexts.
1.1       deraadt  2779:                 * This is done through the monitor ROM, rather than
                   2780:                 * directly here: if we do a setcontext we will fault,
                   2781:                 * as we are not (yet) mapped in any other context.
                   2782:                 */
1.43      pk       2783:
                   2784:                if ((vs % NSEGRG) == 0) {
                   2785:                        /* Entering a new region */
                   2786:                        if (VA_VREG(p) > vr) {
                   2787: #ifdef DEBUG
1.66      christos 2788:                                printf("note: giant kernel!\n");
1.43      pk       2789: #endif
                   2790:                                vr++, rp++;
                   2791:                        }
1.69      pk       2792: #if defined(SUN4_MMU3L)
                   2793:                        if (HASSUN4_MMU3L) {
1.43      pk       2794:                                for (i = 1; i < nctx; i++)
                   2795:                                        rom_setmap(i, p, rcookie);
                   2796:
                   2797:                                TAILQ_INSERT_TAIL(&region_locked,
                   2798:                                                  mmureg, me_list);
                   2799:                                TAILQ_INSERT_TAIL(&pmap_kernel()->pm_reglist,
                   2800:                                                  mmureg, me_pmchain);
                   2801:                                mmureg->me_cookie = rcookie;
                   2802:                                mmureg->me_pmap = pmap_kernel();
                   2803:                                mmureg->me_vreg = vr;
                   2804:                                rp->rg_smeg = rcookie;
                   2805:                                mmureg++;
                   2806:                                rcookie++;
                   2807:                        }
                   2808: #endif
                   2809:                }
                   2810:
1.69      pk       2811: #if defined(SUN4_MMU3L)
                   2812:                if (!HASSUN4_MMU3L)
1.43      pk       2813: #endif
                   2814:                        for (i = 1; i < nctx; i++)
                   2815:                                rom_setmap(i, p, scookie);
1.1       deraadt  2816:
                   2817:                /* set up the mmu entry */
1.43      pk       2818:                TAILQ_INSERT_TAIL(&segm_locked, mmuseg, me_list);
                   2819:                TAILQ_INSERT_TAIL(&pmap_kernel()->pm_seglist, mmuseg, me_pmchain);
1.70      pk       2820:                pmap_stats.ps_npmeg_locked++;
1.43      pk       2821:                mmuseg->me_cookie = scookie;
                   2822:                mmuseg->me_pmap = pmap_kernel();
                   2823:                mmuseg->me_vreg = vr;
                   2824:                mmuseg->me_vseg = vs % NSEGRG;
                   2825:                rp->rg_segmap[vs % NSEGRG].sg_pmeg = scookie;
                   2826:                npte = ++scookie < zseg ? NPTESG : lastpage;
                   2827:                rp->rg_segmap[vs % NSEGRG].sg_npte = npte;
                   2828:                rp->rg_nsegmap += 1;
                   2829:                mmuseg++;
1.1       deraadt  2830:                vs++;
1.43      pk       2831:                if (scookie < zseg) {
1.1       deraadt  2832:                        p += NBPSG;
                   2833:                        continue;
                   2834:                }
1.43      pk       2835:
1.1       deraadt  2836:                /*
                   2837:                 * Unmap the pages, if any, that are not part of
                   2838:                 * the final segment.
                   2839:                 */
1.43      pk       2840:                for (p += npte << PGSHIFT; npte < NPTESG; npte++, p += NBPG)
1.55      pk       2841:                        setpte4(p, 0);
1.43      pk       2842:
1.69      pk       2843: #if defined(SUN4_MMU3L)
                   2844:                if (HASSUN4_MMU3L) {
1.43      pk       2845:                        /*
                   2846:                         * Unmap the segments, if any, that are not part of
                   2847:                         * the final region.
                   2848:                         */
                   2849:                        for (i = rp->rg_nsegmap; i < NSEGRG; i++, p += NBPSG)
                   2850:                                setsegmap(p, seginval);
                   2851:                }
                   2852: #endif
1.1       deraadt  2853:                break;
                   2854:        }
1.43      pk       2855:
1.69      pk       2856: #if defined(SUN4_MMU3L)
                   2857:        if (HASSUN4_MMU3L)
1.43      pk       2858:                for (; rcookie < nregion; rcookie++, mmureg++) {
                   2859:                        mmureg->me_cookie = rcookie;
                   2860:                        TAILQ_INSERT_TAIL(&region_freelist, mmureg, me_list);
                   2861:                }
                   2862: #endif
                   2863:
                   2864:        for (; scookie < nsegment; scookie++, mmuseg++) {
                   2865:                mmuseg->me_cookie = scookie;
                   2866:                TAILQ_INSERT_TAIL(&segm_freelist, mmuseg, me_list);
1.70      pk       2867:                pmap_stats.ps_npmeg_free++;
1.1       deraadt  2868:        }
                   2869:
1.13      pk       2870:        /* Erase all spurious user-space segmaps */
                   2871:        for (i = 1; i < ncontext; i++) {
1.71      pk       2872:                setcontext4(i);
1.69      pk       2873:                if (HASSUN4_MMU3L)
1.43      pk       2874:                        for (p = 0, j = NUREG; --j >= 0; p += NBPRG)
                   2875:                                setregmap(p, reginval);
                   2876:                else
                   2877:                        for (p = 0, vr = 0; vr < NUREG; vr++) {
1.45      pk       2878:                                if (VA_INHOLE(p)) {
                   2879:                                        p = (caddr_t)MMU_HOLE_END;
                   2880:                                        vr = VA_VREG(p);
1.43      pk       2881:                                }
                   2882:                                for (j = NSEGRG; --j >= 0; p += NBPSG)
                   2883:                                        setsegmap(p, seginval);
                   2884:                        }
1.13      pk       2885:        }
1.71      pk       2886:        setcontext4(0);
1.13      pk       2887:
1.1       deraadt  2888:        /*
                   2889:         * write protect & encache kernel text;
                   2890:         * set red zone at kernel base; enable cache on message buffer.
                   2891:         */
                   2892:        {
1.23      deraadt  2893:                extern char etext[];
1.1       deraadt  2894: #ifdef KGDB
                   2895:                register int mask = ~PG_NC;     /* XXX chgkprot is busted */
                   2896: #else
                   2897:                register int mask = ~(PG_W | PG_NC);
                   2898: #endif
1.2       deraadt  2899:
1.23      deraadt  2900:                for (p = (caddr_t)trapbase; p < etext; p += NBPG)
1.55      pk       2901:                        setpte4(p, getpte4(p) & mask);
1.1       deraadt  2902:        }
                   2903: }
1.55      pk       2904: #endif
1.1       deraadt  2905:
1.55      pk       2906: #if defined(SUN4M)             /* Sun4M version of pmap_bootstrap */
                   2907: /*
                   2908:  * Bootstrap the system enough to run with VM enabled on a Sun4M machine.
                   2909:  *
                   2910:  * Switches from ROM to kernel page tables, and sets up initial mappings.
                   2911:  */
                   2912: static void
                   2913: pmap_bootstrap4m(void)
1.36      pk       2914: {
1.55      pk       2915:        register int i, j;
1.71      pk       2916:        caddr_t p;
1.55      pk       2917:        register caddr_t q;
                   2918:        register union ctxinfo *ci;
1.36      pk       2919:        register struct memarr *mp;
1.55      pk       2920:        register int reg, seg;
1.71      pk       2921:        unsigned int ctxtblsize;
1.79      pk       2922:        caddr_t pagetables_start, pagetables_end;
1.55      pk       2923:        extern char end[];
                   2924:        extern char etext[];
1.78      pk       2925:        extern caddr_t reserve_dumppages(caddr_t);
1.55      pk       2926: #ifdef DDB
                   2927:        extern char *esym;
                   2928: #endif
1.36      pk       2929:
1.55      pk       2930: #if defined(SUN4) || defined(SUN4C) /* setup 4M fn. ptrs for dual-arch kernel */
                   2931:        pmap_clear_modify_p     =       pmap_clear_modify4m;
                   2932:        pmap_clear_reference_p  =       pmap_clear_reference4m;
                   2933:        pmap_copy_page_p        =       pmap_copy_page4m;
                   2934:        pmap_enter_p            =       pmap_enter4m;
                   2935:        pmap_extract_p          =       pmap_extract4m;
                   2936:        pmap_is_modified_p      =       pmap_is_modified4m;
                   2937:        pmap_is_referenced_p    =       pmap_is_referenced4m;
                   2938:        pmap_page_protect_p     =       pmap_page_protect4m;
                   2939:        pmap_protect_p          =       pmap_protect4m;
                   2940:        pmap_zero_page_p        =       pmap_zero_page4m;
                   2941:        pmap_changeprot_p       =       pmap_changeprot4m;
                   2942:        pmap_rmk_p              =       pmap_rmk4m;
                   2943:        pmap_rmu_p              =       pmap_rmu4m;
                   2944: #endif /* defined Sun4/Sun4c */
1.37      pk       2945:
1.36      pk       2946:        /*
1.55      pk       2947:         * Intialize the kernel pmap.
                   2948:         */
                   2949:        /* kernel_pmap_store.pm_ctxnum = 0; */
                   2950:        simple_lock_init(kernel_pmap_store.pm_lock);
                   2951:        kernel_pmap_store.pm_refcount = 1;
1.71      pk       2952:
                   2953:        /*
                   2954:         * Set up pm_regmap for kernel to point NUREG *below* the beginning
1.55      pk       2955:         * of kernel regmap storage. Since the kernel only uses regions
                   2956:         * above NUREG, we save storage space and can index kernel and
                   2957:         * user regions in the same way
1.36      pk       2958:         */
1.55      pk       2959:        kernel_pmap_store.pm_regmap = &kernel_regmap_store[-NUREG];
                   2960:        kernel_pmap_store.pm_reg_ptps = NULL;
                   2961:        kernel_pmap_store.pm_reg_ptps_pa = 0;
                   2962:        bzero(kernel_regmap_store, NKREG * sizeof(struct regmap));
                   2963:        bzero(kernel_segmap_store, NKREG * NSEGRG * sizeof(struct segmap));
                   2964:        for (i = NKREG; --i >= 0;) {
                   2965:                kernel_regmap_store[i].rg_segmap =
                   2966:                        &kernel_segmap_store[i * NSEGRG];
                   2967:                kernel_regmap_store[i].rg_seg_ptps = NULL;
                   2968:                for (j = NSEGRG; --j >= 0;)
                   2969:                        kernel_segmap_store[i * NSEGRG + j].sg_pte = NULL;
                   2970:        }
1.38      pk       2971:
1.55      pk       2972:        p = end;                /* p points to top of kernel mem */
                   2973: #ifdef DDB
                   2974:        if (esym != 0)
1.78      pk       2975:                p = esym;
1.55      pk       2976: #endif
                   2977:
1.77      pk       2978:
1.71      pk       2979:        /* Allocate context administration */
1.69      pk       2980:        pmap_kernel()->pm_ctx = cpuinfo.ctxinfo = ci = (union ctxinfo *)p;
1.55      pk       2981:        p += ncontext * sizeof *ci;
1.69      pk       2982:        bzero((caddr_t)ci, (u_int)p - (u_int)ci);
1.77      pk       2983: #if 0
1.55      pk       2984:        ctxbusyvector = p;
                   2985:        p += ncontext;
                   2986:        bzero(ctxbusyvector, ncontext);
                   2987:        ctxbusyvector[0] = 1;   /* context 0 is always in use */
1.69      pk       2988: #endif
1.55      pk       2989:
1.77      pk       2990:
                   2991:        /*
                   2992:         * Set up the `constants' for the call to vm_init()
                   2993:         * in main().  All pages beginning at p (rounded up to
                   2994:         * the next whole page) and continuing through the number
                   2995:         * of available pages are free.
                   2996:         */
                   2997:        p = (caddr_t)(((u_int)p + NBPG - 1) & ~PGOFSET);
                   2998:        avail_start = (int)p - KERNBASE;
                   2999:        /*
                   3000:         * Grab physical memory list use it to compute `physmem' and
                   3001:         * `avail_end'. The latter is used in conjuction with
                   3002:         * `avail_start' and `avail_next' to dispatch left-over
                   3003:         * physical pages to the VM system.
                   3004:         */
                   3005:        npmemarr = makememarr(pmemarr, MA_SIZE, MEMARR_AVAILPHYS);
                   3006:        sortm(pmemarr, npmemarr);
                   3007:        if (pmemarr[0].addr != 0) {
                   3008:                printf("pmap_bootstrap: no kernel memory?!\n");
                   3009:                callrom();
                   3010:        }
                   3011:        avail_end = pmemarr[npmemarr-1].addr + pmemarr[npmemarr-1].len;
                   3012:        avail_next = avail_start;
                   3013:        for (physmem = 0, mp = pmemarr, j = npmemarr; --j >= 0; mp++)
                   3014:                physmem += btoc(mp->len);
                   3015:
                   3016:        /*
                   3017:         * Reserve memory for MMU pagetables. Some of these have severe
                   3018:         * alignment restrictions. We allocate in a sequence that
                   3019:         * minimizes alignment gaps.
                   3020:         * The amount of physical memory that becomes unavailable for
                   3021:         * general VM use is marked by [unavail_start, unavail_end>.
                   3022:         */
                   3023:
1.55      pk       3024:        /*
1.71      pk       3025:         * Reserve memory for I/O pagetables. This takes 64k of memory
1.55      pk       3026:         * since we want to have 64M of dvma space (this actually depends
1.77      pk       3027:         * on the definition of DVMA4M_BASE...we may drop it back to 32M).
                   3028:         * The table must be aligned on a (-DVMA4M_BASE/NBPG) boundary
                   3029:         * (i.e. 64K for 64M of dvma space).
1.55      pk       3030:         */
                   3031: #ifdef DEBUG
                   3032:        if ((0 - DVMA4M_BASE) % (16*1024*1024))
1.71      pk       3033:            panic("pmap_bootstrap4m: invalid DVMA4M_BASE of 0x%x", DVMA4M_BASE);
1.55      pk       3034: #endif
                   3035:
1.77      pk       3036:        p = (caddr_t) roundup((u_int)p, (0 - DVMA4M_BASE) / 1024);
                   3037:        unavail_start = (int)p - KERNBASE;
1.55      pk       3038:
                   3039:        kernel_iopte_table = (u_int *)p;
                   3040:        kernel_iopte_table_pa = VA2PA((caddr_t)kernel_iopte_table);
                   3041:        p += (0 - DVMA4M_BASE) / 1024;
                   3042:        bzero(kernel_iopte_table, p - (caddr_t) kernel_iopte_table);
                   3043:
1.79      pk       3044:        pagetables_start = p;
1.55      pk       3045:        /*
1.77      pk       3046:         * Allocate context table.
1.71      pk       3047:         * To keep supersparc happy, minimum aligment is on a 4K boundary.
                   3048:         */
                   3049:        ctxtblsize = max(ncontext,1024) * sizeof(int);
                   3050:        cpuinfo.ctx_tbl = (int *)roundup((u_int)p, ctxtblsize);
                   3051:        p = (caddr_t)((u_int)cpuinfo.ctx_tbl + ctxtblsize);
                   3052:        qzero(cpuinfo.ctx_tbl, ctxtblsize);
                   3053:
                   3054:        /*
                   3055:         * Reserve memory for segment and page tables needed to map the entire
1.55      pk       3056:         * kernel (from regions 0xf8 -> 0xff). This takes 130k of space, but
                   3057:         * unfortunately is necessary since pmap_enk *must* be able to enter
                   3058:         * a kernel mapping without resorting to malloc, or else the
                   3059:         * possibility of deadlock arises (pmap_enk4m is called to enter a
                   3060:         * mapping; it needs to malloc a page table; malloc then calls
                   3061:         * pmap_enk4m to enter the new malloc'd page; pmap_enk4m needs to
                   3062:         * malloc a page table to enter _that_ mapping; malloc deadlocks since
                   3063:         * it is already allocating that object).
                   3064:         */
1.77      pk       3065:        p = (caddr_t) roundup((u_int)p, SRMMU_L1SIZE * sizeof(long));
                   3066:        kernel_regtable_store = (u_int *)p;
                   3067:        p += SRMMU_L1SIZE * sizeof(long);
                   3068:        bzero(kernel_regtable_store,
                   3069:              p - (caddr_t) kernel_regtable_store);
                   3070:
                   3071:        p = (caddr_t) roundup((u_int)p, SRMMU_L2SIZE * sizeof(long));
                   3072:        kernel_segtable_store = (u_int *)p;
                   3073:        p += (SRMMU_L2SIZE * sizeof(long)) * NKREG;
                   3074:        bzero(kernel_segtable_store,
                   3075:              p - (caddr_t) kernel_segtable_store);
                   3076:
                   3077:        p = (caddr_t) roundup((u_int)p, SRMMU_L3SIZE * sizeof(long));
                   3078:        kernel_pagtable_store = (u_int *)p;
                   3079:        p += ((SRMMU_L3SIZE * sizeof(long)) * NKREG) * NSEGRG;
                   3080:        bzero(kernel_pagtable_store,
                   3081:              p - (caddr_t) kernel_pagtable_store);
                   3082:
                   3083:        /* Round to next page and mark end of stolen pages */
                   3084:        p = (caddr_t)(((u_int)p + NBPG - 1) & ~PGOFSET);
1.79      pk       3085:        pagetables_end = p;
1.77      pk       3086:        unavail_end = (int)p - KERNBASE;
1.71      pk       3087:
                   3088:        /*
                   3089:         * Since we've statically allocated space to map the entire kernel,
                   3090:         * we might as well pre-wire the mappings to save time in pmap_enter.
                   3091:         * This also gets around nasty problems with caching of L1/L2 ptp's.
                   3092:         *
                   3093:         * XXX WHY DO WE HAVE THIS CACHING PROBLEM WITH L1/L2 PTPS????? %%%
                   3094:         */
                   3095:
                   3096:        pmap_kernel()->pm_reg_ptps = (int *) kernel_regtable_store;
                   3097:        pmap_kernel()->pm_reg_ptps_pa =
                   3098:                VA2PA((caddr_t)pmap_kernel()->pm_reg_ptps);
                   3099:
                   3100:        /* Install L1 table in context 0 */
1.79      pk       3101:        setpgt4m(&cpuinfo.ctx_tbl[0],
                   3102:            (pmap_kernel()->pm_reg_ptps_pa >> SRMMU_PPNPASHIFT) | SRMMU_TEPTD);
1.71      pk       3103:
                   3104:        /* XXX:rethink - Store pointer to region table address */
                   3105:        cpuinfo.L1_ptps = pmap_kernel()->pm_reg_ptps;
1.55      pk       3106:
1.71      pk       3107:        for (reg = VA_VREG(KERNBASE); reg < NKREG+VA_VREG(KERNBASE); reg++) {
1.77      pk       3108:                struct regmap *rp;
1.71      pk       3109:                caddr_t kphyssegtbl;
                   3110:
                   3111:                /*
1.77      pk       3112:                 * Entering new region; install & build segtbl
1.71      pk       3113:                 */
                   3114:                int kregnum = reg - VA_VREG(KERNBASE);
                   3115:
                   3116:                rp = &pmap_kernel()->pm_regmap[reg];
                   3117:
                   3118:                kphyssegtbl = (caddr_t)
                   3119:                    &kernel_segtable_store[kregnum * SRMMU_L2SIZE];
                   3120:
1.77      pk       3121:                setpgt4m(&pmap_kernel()->pm_reg_ptps[reg],
                   3122:                    (VA2PA(kphyssegtbl) >> SRMMU_PPNPASHIFT) | SRMMU_TEPTD);
1.71      pk       3123:
                   3124:                rp->rg_seg_ptps = (int *)kphyssegtbl;
                   3125:
                   3126:                if (rp->rg_segmap == NULL) {
                   3127:                        printf("rp->rg_segmap == NULL!\n");
                   3128:                        rp->rg_segmap = &kernel_segmap_store[kregnum * NSEGRG];
                   3129:                }
                   3130:
                   3131:                for (seg = 0; seg < NSEGRG; seg++) {
1.77      pk       3132:                        struct segmap *sp;
1.71      pk       3133:                        caddr_t kphyspagtbl;
                   3134:
                   3135:                        rp->rg_nsegmap++;
                   3136:
                   3137:                        sp = &rp->rg_segmap[seg];
                   3138:                        kphyspagtbl = (caddr_t)
                   3139:                            &kernel_pagtable_store
                   3140:                                [((kregnum * NSEGRG) + seg) * SRMMU_L3SIZE];
                   3141:
1.77      pk       3142:                        setpgt4m(&rp->rg_seg_ptps[seg],
                   3143:                                 (VA2PA(kphyspagtbl) >> SRMMU_PPNPASHIFT) |
                   3144:                                 SRMMU_TEPTD);
1.71      pk       3145:                        sp->sg_pte = (int *) kphyspagtbl;
                   3146:                }
                   3147:        }
                   3148:
                   3149:        /*
                   3150:         * Preserve the monitor ROM's reserved VM region, so that
                   3151:         * we can use L1-A or the monitor's debugger.
1.55      pk       3152:         */
1.77      pk       3153:        mmu_reservemon4m(&kernel_pmap_store);
1.55      pk       3154:
                   3155:        /*
1.77      pk       3156:         * Reserve virtual address space for two mappable MD pages
                   3157:         * for pmap_zero_page and pmap_copy_page, one MI page
                   3158:         * for /dev/mem, and some more for dumpsys().
1.55      pk       3159:         */
1.77      pk       3160:        q = p;
1.55      pk       3161:        vpage[0] = p, p += NBPG;
                   3162:        vpage[1] = p, p += NBPG;
                   3163:        vmmap = p, p += NBPG;
                   3164:        p = reserve_dumppages(p);
                   3165:
                   3166:        /*
                   3167:         * Allocate virtual memory for pv_table[], which will be mapped
                   3168:         * sparsely in pmap_init().
                   3169:         */
                   3170:        pv_table = (struct pvlist *)p;
                   3171:        p += round_page(sizeof(struct pvlist) * atop(avail_end - avail_start));
                   3172:
                   3173:        virtual_avail = (vm_offset_t)p;
                   3174:        virtual_end = VM_MAX_KERNEL_ADDRESS;
                   3175:
1.77      pk       3176:        p = q;                  /* retract to first free phys */
1.55      pk       3177:
1.69      pk       3178:        /*
                   3179:         * Set up the ctxinfo structures (freelist of contexts)
1.55      pk       3180:         */
                   3181:        ci->c_pmap = pmap_kernel();
                   3182:        ctx_freelist = ci + 1;
                   3183:        for (i = 1; i < ncontext; i++) {
                   3184:                ci++;
                   3185:                ci->c_nextfree = ci + 1;
                   3186:        }
                   3187:        ci->c_nextfree = NULL;
                   3188:        ctx_kick = 0;
                   3189:        ctx_kickdir = -1;
                   3190:
1.69      pk       3191:        /*
                   3192:         * Now map the kernel into our new set of page tables, then
1.55      pk       3193:         * (finally) switch over to our running page tables.
                   3194:         * We map from KERNBASE to p into context 0's page tables (and
                   3195:         * the kernel pmap).
                   3196:         */
                   3197: #ifdef DEBUG                   /* Sanity checks */
                   3198:        if ((u_int)p % NBPG != 0)
1.69      pk       3199:                panic("pmap_bootstrap4m: p misaligned?!?");
1.55      pk       3200:        if (KERNBASE % NBPRG != 0)
1.69      pk       3201:                panic("pmap_bootstrap4m: KERNBASE not region-aligned");
1.55      pk       3202: #endif
1.69      pk       3203:
                   3204:        for (q = (caddr_t) KERNBASE; q < p; q += NBPG) {
1.77      pk       3205:                struct regmap *rp;
                   3206:                struct segmap *sp;
                   3207:                int pte;
                   3208:
1.79      pk       3209:                if ((int)q >= KERNBASE + avail_start &&
                   3210:                    (int)q < KERNBASE + unavail_start)
1.77      pk       3211:                        /* This gap is part of VM-managed pages */
                   3212:                        continue;
                   3213:
1.69      pk       3214:                /*
1.71      pk       3215:                 * Now install entry for current page.
1.69      pk       3216:                 */
1.77      pk       3217:                rp = &pmap_kernel()->pm_regmap[VA_VREG(q)];
                   3218:                sp = &rp->rg_segmap[VA_VSEG(q)];
                   3219:                sp->sg_npte++;
                   3220:
                   3221:                pte = ((int)q - KERNBASE) >> SRMMU_PPNPASHIFT;
                   3222:                pte |= PPROT_N_RX | SRMMU_PG_C | SRMMU_TEPTE;
                   3223:                /* write-protect kernel text */
                   3224:                if (q < (caddr_t) trapbase || q >= etext)
                   3225:                        pte |= PPROT_WRITE;
                   3226:
                   3227:                setpgt4m(&sp->sg_pte[VA_VPG(q)], pte);
1.69      pk       3228:        }
                   3229:
1.77      pk       3230: #if 0
1.55      pk       3231:        /*
                   3232:         * We also install the kernel mapping into all other contexts by
1.69      pk       3233:         * copying the context 0 L1 PTP from cpuinfo.ctx_tbl[0] into the
1.55      pk       3234:         * remainder of the context table (i.e. we share the kernel page-
                   3235:         * tables). Each user pmap automatically gets the kernel mapped
                   3236:         * into it when it is created, but we do this extra step early on
                   3237:         * in case some twit decides to switch to a context with no user
                   3238:         * pmap associated with it.
                   3239:         */
                   3240:        for (i = 1; i < ncontext; i++)
1.69      pk       3241:                cpuinfo.ctx_tbl[i] = cpuinfo.ctx_tbl[0];
                   3242: #endif
1.55      pk       3243:
                   3244:        /*
                   3245:         * Now switch to kernel pagetables (finally!)
                   3246:         */
1.69      pk       3247:        mmu_install_tables(&cpuinfo);
1.79      pk       3248:
                   3249:        /* Mark all MMU tables uncacheable, if required */
                   3250:        if ((cpuinfo.flags & CPUFLG_CACHEPAGETABLES) == 0)
                   3251:                kvm_uncache(pagetables_start,
                   3252:                            (pagetables_end - pagetables_start) >> PGSHIFT);
                   3253:
1.69      pk       3254: }
                   3255:
                   3256: void
                   3257: mmu_install_tables(sc)
                   3258:        struct cpu_softc *sc;
                   3259: {
                   3260:
                   3261: #ifdef DEBUG
                   3262:        printf("pmap_bootstrap: installing kernel page tables...");
                   3263: #endif
1.71      pk       3264:        setcontext4m(0);        /* paranoia? %%%: Make 0x3 a define! below */
1.69      pk       3265:
                   3266:        /* Enable MMU tablewalk caching, flush TLB */
                   3267:        if (sc->mmu_enable != 0)
                   3268:                sc->mmu_enable();
                   3269:
                   3270:        tlb_flush_all();
                   3271:
                   3272:        sta(SRMMU_CXTPTR, ASI_SRMMU,
                   3273:            (VA2PA((caddr_t)sc->ctx_tbl) >> SRMMU_PPNPASHIFT) & ~0x3);
                   3274:
                   3275:        tlb_flush_all();
                   3276:
                   3277: #ifdef DEBUG
                   3278:        printf("done.\n");
                   3279: #endif
                   3280: }
1.55      pk       3281:
1.69      pk       3282: /*
                   3283:  * Allocate per-CPU page tables.
                   3284:  * Note: this routine is called in the context of the boot CPU
                   3285:  * during autoconfig.
                   3286:  */
                   3287: void
                   3288: pmap_alloc_cpu(sc)
                   3289:        struct cpu_softc *sc;
                   3290: {
1.72      pk       3291:        caddr_t cpustore;
                   3292:        int *ctxtable;
                   3293:        int *regtable;
                   3294:        int *segtable;
                   3295:        int *pagtable;
                   3296:        int vr, vs, vpg;
                   3297:        struct regmap *rp;
                   3298:        struct segmap *sp;
                   3299:
                   3300:        /* Allocate properly aligned and physically contiguous memory here */
                   3301:        cpustore = 0;
                   3302:        ctxtable = 0;
                   3303:        regtable = 0;
                   3304:        segtable = 0;
                   3305:        pagtable = 0;
                   3306:
                   3307:        vr = VA_VREG(CPUINFO_VA);
                   3308:        vs = VA_VSEG(CPUINFO_VA);
                   3309:        vpg = VA_VPG(CPUINFO_VA);
                   3310:        rp = &pmap_kernel()->pm_regmap[vr];
                   3311:        sp = &rp->rg_segmap[vs];
                   3312:
                   3313:        /*
                   3314:         * Copy page tables, then modify entry for CPUINFO_VA so that
                   3315:         * it points at the per-CPU pages.
                   3316:         */
                   3317:        bcopy(cpuinfo.L1_ptps, regtable, SRMMU_L1SIZE * sizeof(int));
                   3318:        regtable[vr] =
                   3319:                (VA2PA((caddr_t)segtable) >> SRMMU_PPNPASHIFT) | SRMMU_TEPTD;
                   3320:
                   3321:        bcopy(rp->rg_seg_ptps, segtable, SRMMU_L2SIZE * sizeof(int));
                   3322:        segtable[vs] =
                   3323:                (VA2PA((caddr_t)pagtable) >> SRMMU_PPNPASHIFT) | SRMMU_TEPTD;
                   3324:
                   3325:        bcopy(sp->sg_pte, pagtable, SRMMU_L3SIZE * sizeof(int));
                   3326:        pagtable[vpg] =
                   3327:                (VA2PA((caddr_t)cpustore) >> SRMMU_PPNPASHIFT) |
                   3328:                (SRMMU_TEPTE | PPROT_RWX_RWX | SRMMU_PG_C);
1.69      pk       3329:
1.72      pk       3330:        /* Install L1 table in context 0 */
                   3331:        ctxtable[0] = ((u_int)regtable >> SRMMU_PPNPASHIFT) | SRMMU_TEPTD;
                   3332:
                   3333:        sc->ctx_tbl = ctxtable;
                   3334:        sc->L1_ptps = regtable;
1.69      pk       3335:
1.72      pk       3336: #if 0
1.69      pk       3337:        if ((sc->flags & CPUFLG_CACHEPAGETABLES) == 0) {
1.72      pk       3338:                kvm_uncache((caddr_t)0, 1);
1.69      pk       3339:        }
1.72      pk       3340: #endif
1.55      pk       3341: }
                   3342: #endif /* defined sun4m */
                   3343:
1.69      pk       3344:
1.55      pk       3345: void
                   3346: pmap_init()
                   3347: {
                   3348:        register vm_size_t s;
                   3349:        int pass1, nmem;
                   3350:        register struct memarr *mp;
                   3351:        vm_offset_t sva, va, eva;
                   3352:        vm_offset_t pa = 0;
                   3353:
                   3354:        if (PAGE_SIZE != NBPG)
                   3355:                panic("pmap_init: CLSIZE!=1");
                   3356:
                   3357:        /*
                   3358:         * Map pv_table[] as a `sparse' array. This requires two passes
                   3359:         * over the `pmemarr': (1) to determine the number of physical
                   3360:         * pages needed, and (2), to map the correct pieces of virtual
                   3361:         * memory allocated to pv_table[].
                   3362:         */
                   3363:
                   3364:        s = 0;
                   3365:        pass1 = 1;
1.38      pk       3366:
                   3367: pass2:
1.37      pk       3368:        sva = eva = 0;
1.36      pk       3369:        for (mp = pmemarr, nmem = npmemarr; --nmem >= 0; mp++) {
1.37      pk       3370:                int len;
                   3371:                vm_offset_t addr;
1.36      pk       3372:
1.37      pk       3373:                len = mp->len;
1.38      pk       3374:                if ((addr = mp->addr) < avail_start) {
1.36      pk       3375:                        /*
1.38      pk       3376:                         * pv_table[] covers everything above `avail_start'.
1.36      pk       3377:                         */
1.38      pk       3378:                        addr = avail_start;
                   3379:                        len -= avail_start;
1.36      pk       3380:                }
1.37      pk       3381:                len = sizeof(struct pvlist) * atop(len);
                   3382:
1.38      pk       3383:                if (addr < avail_start || addr >= avail_end)
1.54      christos 3384:                        panic("pmap_init: unmanaged address: 0x%lx", addr);
1.36      pk       3385:
1.38      pk       3386:                va = (vm_offset_t)&pv_table[atop(addr - avail_start)];
1.37      pk       3387:                sva = trunc_page(va);
1.55      pk       3388:
1.37      pk       3389:                if (sva < eva) {
1.77      pk       3390:                        /* This chunk overlaps the previous in pv_table[] */
1.37      pk       3391:                        sva += PAGE_SIZE;
                   3392:                        if (sva < eva)
1.54      christos 3393:                                panic("pmap_init: sva(%lx) < eva(%lx)",
1.55      pk       3394:                                      sva, eva);
1.37      pk       3395:                }
                   3396:                eva = round_page(va + len);
1.38      pk       3397:                if (pass1) {
                   3398:                        /* Just counting */
                   3399:                        s += eva - sva;
                   3400:                        continue;
                   3401:                }
                   3402:
                   3403:                /* Map this piece of pv_table[] */
1.37      pk       3404:                for (va = sva; va < eva; va += PAGE_SIZE) {
1.42      mycroft  3405:                        pmap_enter(pmap_kernel(), va, pa,
1.36      pk       3406:                                   VM_PROT_READ|VM_PROT_WRITE, 1);
                   3407:                        pa += PAGE_SIZE;
                   3408:                }
1.38      pk       3409:                bzero((caddr_t)sva, eva - sva);
                   3410:        }
1.36      pk       3411:
1.38      pk       3412:        if (pass1) {
1.42      mycroft  3413:                pa = pmap_extract(pmap_kernel(), kmem_alloc(kernel_map, s));
1.38      pk       3414:                pass1 = 0;
                   3415:                goto pass2;
1.36      pk       3416:        }
1.38      pk       3417:
                   3418:        vm_first_phys = avail_start;
                   3419:        vm_num_phys = avail_end - avail_start;
1.36      pk       3420: }
                   3421:
1.1       deraadt  3422:
                   3423: /*
                   3424:  * Map physical addresses into kernel VM.
                   3425:  */
                   3426: vm_offset_t
                   3427: pmap_map(va, pa, endpa, prot)
                   3428:        register vm_offset_t va, pa, endpa;
                   3429:        register int prot;
                   3430: {
                   3431:        register int pgsize = PAGE_SIZE;
                   3432:
                   3433:        while (pa < endpa) {
1.42      mycroft  3434:                pmap_enter(pmap_kernel(), va, pa, prot, 1);
1.1       deraadt  3435:                va += pgsize;
                   3436:                pa += pgsize;
                   3437:        }
                   3438:        return (va);
                   3439: }
                   3440:
                   3441: /*
                   3442:  * Create and return a physical map.
                   3443:  *
                   3444:  * If size is nonzero, the map is useless. (ick)
                   3445:  */
                   3446: struct pmap *
                   3447: pmap_create(size)
                   3448:        vm_size_t size;
                   3449: {
                   3450:        register struct pmap *pm;
                   3451:
                   3452:        if (size)
                   3453:                return (NULL);
                   3454:        pm = (struct pmap *)malloc(sizeof *pm, M_VMPMAP, M_WAITOK);
                   3455: #ifdef DEBUG
                   3456:        if (pmapdebug & PDB_CREATE)
1.66      christos 3457:                printf("pmap_create: created %p\n", pm);
1.1       deraadt  3458: #endif
                   3459:        bzero((caddr_t)pm, sizeof *pm);
                   3460:        pmap_pinit(pm);
                   3461:        return (pm);
                   3462: }
                   3463:
                   3464: /*
                   3465:  * Initialize a preallocated and zeroed pmap structure,
                   3466:  * such as one in a vmspace structure.
                   3467:  */
                   3468: void
                   3469: pmap_pinit(pm)
                   3470:        register struct pmap *pm;
                   3471: {
1.53      christos 3472:        register int size;
1.43      pk       3473:        void *urp;
1.1       deraadt  3474:
                   3475: #ifdef DEBUG
                   3476:        if (pmapdebug & PDB_CREATE)
1.66      christos 3477:                printf("pmap_pinit(%p)\n", pm);
1.1       deraadt  3478: #endif
1.13      pk       3479:
1.43      pk       3480:        size = NUREG * sizeof(struct regmap);
1.55      pk       3481:
1.43      pk       3482:        pm->pm_regstore = urp = malloc(size, M_VMPMAP, M_WAITOK);
1.55      pk       3483:        qzero((caddr_t)urp, size);
1.1       deraadt  3484:        /* pm->pm_ctx = NULL; */
                   3485:        simple_lock_init(&pm->pm_lock);
                   3486:        pm->pm_refcount = 1;
1.43      pk       3487:        pm->pm_regmap = urp;
1.55      pk       3488:
                   3489:        if (CPU_ISSUN4OR4C) {
                   3490:                TAILQ_INIT(&pm->pm_seglist);
1.69      pk       3491: #if defined(SUN4_MMU3L)
1.55      pk       3492:                TAILQ_INIT(&pm->pm_reglist);
1.69      pk       3493:                if (HASSUN4_MMU3L) {
                   3494:                        int i;
                   3495:                        for (i = NUREG; --i >= 0;)
                   3496:                                pm->pm_regmap[i].rg_smeg = reginval;
                   3497:                }
1.43      pk       3498: #endif
1.55      pk       3499:        }
                   3500: #if defined(SUN4M)
                   3501:        else {
1.79      pk       3502:                int i;
                   3503:
1.55      pk       3504:                /*
                   3505:                 * We must allocate and initialize hardware-readable (MMU)
                   3506:                 * pagetables. We must also map the kernel regions into this
                   3507:                 * pmap's pagetables, so that we can access the kernel from
                   3508:                 * user mode!
                   3509:                 *
                   3510:                 * Note: pm->pm_regmap's have been zeroed already, so we don't
                   3511:                 * need to explicitly mark them as invalid (a null
                   3512:                 * rg_seg_ptps pointer indicates invalid for the 4m)
                   3513:                 */
                   3514:                urp = malloc(SRMMU_L1SIZE * sizeof(int), M_VMPMAP, M_WAITOK);
1.72      pk       3515: #if 0
1.69      pk       3516:                if ((cpuinfo.flags & CPUFLG_CACHEPAGETABLES) == 0)
1.61      pk       3517:                        kvm_uncache(urp,
                   3518:                                    ((SRMMU_L1SIZE*sizeof(int))+NBPG-1)/NBPG);
1.72      pk       3519: #endif
1.55      pk       3520:
                   3521: #ifdef DEBUG
                   3522:                if ((u_int) urp % (SRMMU_L1SIZE * sizeof(int)))
1.61      pk       3523:                        panic("pmap_pinit: malloc() not giving aligned memory");
1.55      pk       3524: #endif
                   3525:                pm->pm_reg_ptps = urp;
                   3526:                pm->pm_reg_ptps_pa = VA2PA(urp);
                   3527:                qzero(urp, SRMMU_L1SIZE * sizeof(int));
                   3528:
1.79      pk       3529:                /* Copy kernel regions */
                   3530:                for (i = 0; i < NKREG; i++) {
                   3531:                        setpgt4m(&pm->pm_reg_ptps[VA_VREG(KERNBASE) + i],
                   3532:                                 cpuinfo.L1_ptps[VA_VREG(KERNBASE) + i]);
                   3533:                }
1.55      pk       3534:        }
                   3535: #endif
                   3536:
1.43      pk       3537:        pm->pm_gap_end = VA_VREG(VM_MAXUSER_ADDRESS);
                   3538:
                   3539:        return;
1.1       deraadt  3540: }
                   3541:
                   3542: /*
                   3543:  * Retire the given pmap from service.
                   3544:  * Should only be called if the map contains no valid mappings.
                   3545:  */
                   3546: void
                   3547: pmap_destroy(pm)
                   3548:        register struct pmap *pm;
                   3549: {
                   3550:        int count;
                   3551:
                   3552:        if (pm == NULL)
                   3553:                return;
                   3554: #ifdef DEBUG
                   3555:        if (pmapdebug & PDB_DESTROY)
1.66      christos 3556:                printf("pmap_destroy(%p)\n", pm);
1.1       deraadt  3557: #endif
                   3558:        simple_lock(&pm->pm_lock);
                   3559:        count = --pm->pm_refcount;
                   3560:        simple_unlock(&pm->pm_lock);
                   3561:        if (count == 0) {
                   3562:                pmap_release(pm);
1.49      pk       3563:                free(pm, M_VMPMAP);
1.1       deraadt  3564:        }
                   3565: }
                   3566:
                   3567: /*
                   3568:  * Release any resources held by the given physical map.
                   3569:  * Called when a pmap initialized by pmap_pinit is being released.
                   3570:  */
                   3571: void
                   3572: pmap_release(pm)
                   3573:        register struct pmap *pm;
                   3574: {
                   3575:        register union ctxinfo *c;
                   3576:        register int s = splpmap();     /* paranoia */
                   3577:
                   3578: #ifdef DEBUG
                   3579:        if (pmapdebug & PDB_DESTROY)
1.66      christos 3580:                printf("pmap_release(%p)\n", pm);
1.1       deraadt  3581: #endif
1.55      pk       3582:
                   3583:        if (CPU_ISSUN4OR4C) {
1.69      pk       3584: #if defined(SUN4_MMU3L)
1.55      pk       3585:                if (pm->pm_reglist.tqh_first)
                   3586:                        panic("pmap_release: region list not empty");
1.43      pk       3587: #endif
1.55      pk       3588:                if (pm->pm_seglist.tqh_first)
                   3589:                        panic("pmap_release: segment list not empty");
                   3590:
                   3591:                if ((c = pm->pm_ctx) != NULL) {
                   3592:                        if (pm->pm_ctxnum == 0)
                   3593:                                panic("pmap_release: releasing kernel");
                   3594:                        ctx_free(pm);
                   3595:                }
1.1       deraadt  3596:        }
                   3597:        splx(s);
1.55      pk       3598:
1.43      pk       3599: #ifdef DEBUG
1.55      pk       3600: if (pmapdebug) {
1.43      pk       3601:        int vs, vr;
                   3602:        for (vr = 0; vr < NUREG; vr++) {
                   3603:                struct regmap *rp = &pm->pm_regmap[vr];
                   3604:                if (rp->rg_nsegmap != 0)
1.66      christos 3605:                        printf("pmap_release: %d segments remain in "
1.43      pk       3606:                                "region %d\n", rp->rg_nsegmap, vr);
                   3607:                if (rp->rg_segmap != NULL) {
1.66      christos 3608:                        printf("pmap_release: segments still "
1.43      pk       3609:                                "allocated in region %d\n", vr);
                   3610:                        for (vs = 0; vs < NSEGRG; vs++) {
                   3611:                                struct segmap *sp = &rp->rg_segmap[vs];
                   3612:                                if (sp->sg_npte != 0)
1.66      christos 3613:                                        printf("pmap_release: %d ptes "
1.43      pk       3614:                                             "remain in segment %d\n",
                   3615:                                                sp->sg_npte, vs);
                   3616:                                if (sp->sg_pte != NULL) {
1.66      christos 3617:                                        printf("pmap_release: ptes still "
1.43      pk       3618:                                             "allocated in segment %d\n", vs);
                   3619:                                }
                   3620:                        }
                   3621:                }
                   3622:        }
                   3623: }
                   3624: #endif
                   3625:        if (pm->pm_regstore)
1.49      pk       3626:                free(pm->pm_regstore, M_VMPMAP);
1.55      pk       3627:
                   3628:        if (CPU_ISSUN4M) {
                   3629:                if ((c = pm->pm_ctx) != NULL) {
                   3630:                        if (pm->pm_ctxnum == 0)
                   3631:                                panic("pmap_release: releasing kernel");
                   3632:                        ctx_free(pm);
                   3633:                }
                   3634:                free(pm->pm_reg_ptps, M_VMPMAP);
                   3635:                pm->pm_reg_ptps = NULL;
                   3636:                pm->pm_reg_ptps_pa = 0;
                   3637:        }
1.1       deraadt  3638: }
                   3639:
                   3640: /*
                   3641:  * Add a reference to the given pmap.
                   3642:  */
                   3643: void
                   3644: pmap_reference(pm)
                   3645:        struct pmap *pm;
                   3646: {
                   3647:
                   3648:        if (pm != NULL) {
                   3649:                simple_lock(&pm->pm_lock);
                   3650:                pm->pm_refcount++;
                   3651:                simple_unlock(&pm->pm_lock);
                   3652:        }
                   3653: }
                   3654:
                   3655: /*
                   3656:  * Remove the given range of mapping entries.
                   3657:  * The starting and ending addresses are already rounded to pages.
                   3658:  * Sheer lunacy: pmap_remove is often asked to remove nonexistent
                   3659:  * mappings.
                   3660:  */
                   3661: void
                   3662: pmap_remove(pm, va, endva)
                   3663:        register struct pmap *pm;
                   3664:        register vm_offset_t va, endva;
                   3665: {
                   3666:        register vm_offset_t nva;
1.43      pk       3667:        register int vr, vs, s, ctx;
                   3668:        register void (*rm)(struct pmap *, vm_offset_t, vm_offset_t, int, int);
1.1       deraadt  3669:
                   3670:        if (pm == NULL)
                   3671:                return;
1.13      pk       3672:
1.1       deraadt  3673: #ifdef DEBUG
                   3674:        if (pmapdebug & PDB_REMOVE)
1.66      christos 3675:                printf("pmap_remove(%p, %lx, %lx)\n", pm, va, endva);
1.1       deraadt  3676: #endif
                   3677:
1.42      mycroft  3678:        if (pm == pmap_kernel()) {
1.1       deraadt  3679:                /*
                   3680:                 * Removing from kernel address space.
                   3681:                 */
                   3682:                rm = pmap_rmk;
                   3683:        } else {
                   3684:                /*
                   3685:                 * Removing from user address space.
                   3686:                 */
                   3687:                write_user_windows();
                   3688:                rm = pmap_rmu;
                   3689:        }
                   3690:
                   3691:        ctx = getcontext();
                   3692:        s = splpmap();          /* XXX conservative */
                   3693:        simple_lock(&pm->pm_lock);
                   3694:        for (; va < endva; va = nva) {
                   3695:                /* do one virtual segment at a time */
1.43      pk       3696:                vr = VA_VREG(va);
                   3697:                vs = VA_VSEG(va);
                   3698:                nva = VSTOVA(vr, vs + 1);
1.1       deraadt  3699:                if (nva == 0 || nva > endva)
                   3700:                        nva = endva;
1.76      pk       3701:                if (pm->pm_regmap[vr].rg_nsegmap != 0)
                   3702:                        (*rm)(pm, va, nva, vr, vs);
1.1       deraadt  3703:        }
                   3704:        simple_unlock(&pm->pm_lock);
                   3705:        splx(s);
                   3706:        setcontext(ctx);
                   3707: }
                   3708:
                   3709: /*
                   3710:  * The following magic number was chosen because:
                   3711:  *     1. It is the same amount of work to cache_flush_page 4 pages
                   3712:  *        as to cache_flush_segment 1 segment (so at 4 the cost of
                   3713:  *        flush is the same).
                   3714:  *     2. Flushing extra pages is bad (causes cache not to work).
                   3715:  *     3. The current code, which malloc()s 5 pages for each process
                   3716:  *        for a user vmspace/pmap, almost never touches all 5 of those
                   3717:  *        pages.
                   3718:  */
1.13      pk       3719: #if 0
                   3720: #define        PMAP_RMK_MAGIC  (cacheinfo.c_hwflush?5:64)      /* if > magic, use cache_flush_segment */
                   3721: #else
1.1       deraadt  3722: #define        PMAP_RMK_MAGIC  5       /* if > magic, use cache_flush_segment */
1.13      pk       3723: #endif
1.1       deraadt  3724:
                   3725: /*
                   3726:  * Remove a range contained within a single segment.
                   3727:  * These are egregiously complicated routines.
                   3728:  */
                   3729:
1.55      pk       3730: #if defined(SUN4) || defined(SUN4C)
                   3731:
1.43      pk       3732: /* remove from kernel */
1.55      pk       3733: /*static*/ void
                   3734: pmap_rmk4_4c(pm, va, endva, vr, vs)
1.1       deraadt  3735:        register struct pmap *pm;
                   3736:        register vm_offset_t va, endva;
1.43      pk       3737:        register int vr, vs;
1.1       deraadt  3738: {
                   3739:        register int i, tpte, perpage, npg;
                   3740:        register struct pvlist *pv;
1.43      pk       3741:        register int nleft, pmeg;
                   3742:        struct regmap *rp;
                   3743:        struct segmap *sp;
                   3744:
                   3745:        rp = &pm->pm_regmap[vr];
                   3746:        sp = &rp->rg_segmap[vs];
                   3747:
                   3748:        if (rp->rg_nsegmap == 0)
                   3749:                return;
                   3750:
                   3751: #ifdef DEBUG
                   3752:        if (rp->rg_segmap == NULL)
                   3753:                panic("pmap_rmk: no segments");
                   3754: #endif
                   3755:
                   3756:        if ((nleft = sp->sg_npte) == 0)
                   3757:                return;
                   3758:
                   3759:        pmeg = sp->sg_pmeg;
1.1       deraadt  3760:
                   3761: #ifdef DEBUG
                   3762:        if (pmeg == seginval)
                   3763:                panic("pmap_rmk: not loaded");
                   3764:        if (pm->pm_ctx == NULL)
                   3765:                panic("pmap_rmk: lost context");
                   3766: #endif
                   3767:
1.71      pk       3768:        setcontext4(0);
1.1       deraadt  3769:        /* decide how to flush cache */
                   3770:        npg = (endva - va) >> PGSHIFT;
                   3771:        if (npg > PMAP_RMK_MAGIC) {
                   3772:                /* flush the whole segment */
                   3773:                perpage = 0;
1.69      pk       3774:                cache_flush_segment(vr, vs);
1.1       deraadt  3775:        } else {
                   3776:                /* flush each page individually; some never need flushing */
1.69      pk       3777:                perpage = (CACHEINFO.c_vactype != VAC_NONE);
1.1       deraadt  3778:        }
                   3779:        while (va < endva) {
1.55      pk       3780:                tpte = getpte4(va);
1.1       deraadt  3781:                if ((tpte & PG_V) == 0) {
1.63      pk       3782:                        va += NBPG;
1.1       deraadt  3783:                        continue;
                   3784:                }
1.35      pk       3785:                if ((tpte & PG_TYPE) == PG_OBMEM) {
                   3786:                        /* if cacheable, flush page as needed */
                   3787:                        if (perpage && (tpte & PG_NC) == 0)
1.1       deraadt  3788:                                cache_flush_page(va);
1.60      pk       3789:                        i = ptoa(tpte & PG_PFNUM);
1.1       deraadt  3790:                        if (managed(i)) {
                   3791:                                pv = pvhead(i);
1.55      pk       3792:                                pv->pv_flags |= MR4_4C(tpte);
1.58      pk       3793:                                pv_unlink4_4c(pv, pm, va);
1.1       deraadt  3794:                        }
                   3795:                }
                   3796:                nleft--;
1.55      pk       3797:                setpte4(va, 0);
1.1       deraadt  3798:                va += NBPG;
                   3799:        }
                   3800:
                   3801:        /*
                   3802:         * If the segment is all gone, remove it from everyone and
                   3803:         * free the MMU entry.
                   3804:         */
1.43      pk       3805:        if ((sp->sg_npte = nleft) == 0) {
                   3806:                va = VSTOVA(vr,vs);             /* retract */
1.69      pk       3807: #if defined(SUN4_MMU3L)
                   3808:                if (HASSUN4_MMU3L)
1.1       deraadt  3809:                        setsegmap(va, seginval);
1.43      pk       3810:                else
                   3811: #endif
                   3812:                        for (i = ncontext; --i >= 0;) {
1.71      pk       3813:                                setcontext4(i);
1.43      pk       3814:                                setsegmap(va, seginval);
                   3815:                        }
                   3816:                me_free(pm, pmeg);
                   3817:                if (--rp->rg_nsegmap == 0) {
1.69      pk       3818: #if defined(SUN4_MMU3L)
                   3819:                        if (HASSUN4_MMU3L) {
1.43      pk       3820:                                for (i = ncontext; --i >= 0;) {
1.71      pk       3821:                                        setcontext4(i);
1.43      pk       3822:                                        setregmap(va, reginval);
                   3823:                                }
                   3824:                                /* note: context is 0 */
                   3825:                                region_free(pm, rp->rg_smeg);
                   3826:                        }
                   3827: #endif
1.1       deraadt  3828:                }
                   3829:        }
                   3830: }
                   3831:
1.55      pk       3832: #endif /* sun4, sun4c */
1.1       deraadt  3833:
1.55      pk       3834: #if defined(SUN4M)             /* 4M version of pmap_rmk */
                   3835: /* remove from kernel (4m)*/
                   3836: /*static*/ void
                   3837: pmap_rmk4m(pm, va, endva, vr, vs)
1.1       deraadt  3838:        register struct pmap *pm;
                   3839:        register vm_offset_t va, endva;
1.43      pk       3840:        register int vr, vs;
1.1       deraadt  3841: {
1.55      pk       3842:        register int i, tpte, perpage, npg;
1.1       deraadt  3843:        register struct pvlist *pv;
1.55      pk       3844:        register int nleft;
1.43      pk       3845:        struct regmap *rp;
                   3846:        struct segmap *sp;
                   3847:
                   3848:        rp = &pm->pm_regmap[vr];
1.55      pk       3849:        sp = &rp->rg_segmap[vs];
                   3850:
1.43      pk       3851:        if (rp->rg_nsegmap == 0)
                   3852:                return;
1.55      pk       3853:
                   3854: #ifdef DEBUG
1.43      pk       3855:        if (rp->rg_segmap == NULL)
1.55      pk       3856:                panic("pmap_rmk: no segments");
                   3857: #endif
1.43      pk       3858:
                   3859:        if ((nleft = sp->sg_npte) == 0)
                   3860:                return;
                   3861:
1.55      pk       3862: #ifdef DEBUG
                   3863:        if (sp->sg_pte == NULL || rp->rg_seg_ptps == NULL)
                   3864:                panic("pmap_rmk: segment/region does not exist");
                   3865:        if (pm->pm_ctx == NULL)
                   3866:                panic("pmap_rmk: lost context");
                   3867: #endif
1.43      pk       3868:
1.71      pk       3869:        setcontext4m(0);
1.55      pk       3870:        /* decide how to flush cache */
                   3871:        npg = (endva - va) >> PGSHIFT;
                   3872:        if (npg > PMAP_RMK_MAGIC) {
                   3873:                /* flush the whole segment */
                   3874:                perpage = 0;
1.69      pk       3875:                if (CACHEINFO.c_vactype != VAC_NONE)
1.55      pk       3876:                        cache_flush_segment(vr, vs);
                   3877:        } else {
                   3878:                /* flush each page individually; some never need flushing */
1.69      pk       3879:                perpage = (CACHEINFO.c_vactype != VAC_NONE);
1.55      pk       3880:        }
                   3881:        while (va < endva) {
1.72      pk       3882:                tpte = sp->sg_pte[VA_SUN4M_VPG(va)];
1.55      pk       3883:                if ((tpte & SRMMU_TETYPE) != SRMMU_TEPTE) {
1.72      pk       3884: #ifdef DEBUG
                   3885:                        if ((pmapdebug & PDB_SANITYCHK) &&
                   3886:                            (getpte4m(va) & SRMMU_TETYPE) == SRMMU_TEPTE)
1.81      pk       3887:                                panic("pmap_rmk: Spurious kTLB entry for %lx",
                   3888:                                      va);
1.72      pk       3889: #endif
1.61      pk       3890:                        va += NBPG;
1.55      pk       3891:                        continue;
                   3892:                }
                   3893:                if ((tpte & SRMMU_PGTYPE) == PG_SUN4M_OBMEM) {
                   3894:                        /* if cacheable, flush page as needed */
                   3895:                        if (perpage && (tpte & SRMMU_PG_C))
1.69      pk       3896:                                cache_flush_page(va);
1.60      pk       3897:                        i = ptoa((tpte & SRMMU_PPNMASK) >> SRMMU_PPNSHIFT);
1.55      pk       3898:                        if (managed(i)) {
                   3899:                                pv = pvhead(i);
                   3900:                                pv->pv_flags |= MR4M(tpte);
1.58      pk       3901:                                pv_unlink4m(pv, pm, va);
1.55      pk       3902:                        }
                   3903:                }
                   3904:                nleft--;
1.72      pk       3905:                tlb_flush_page(va);
                   3906:                setpgt4m(&sp->sg_pte[VA_SUN4M_VPG(va)], SRMMU_TEINVALID);
1.55      pk       3907:                va += NBPG;
                   3908:        }
                   3909:
                   3910:        /*
                   3911:         * If the segment is all gone, remove it from everyone and
                   3912:         * flush the TLB.
                   3913:         */
                   3914:        if ((sp->sg_npte = nleft) == 0) {
                   3915:                va = VSTOVA(vr,vs);             /* retract */
                   3916:
                   3917:                tlb_flush_segment(vr, vs);      /* Paranoia? */
                   3918:
1.58      pk       3919:                /*
                   3920:                 * We need to free the segment table. The problem is that
1.55      pk       3921:                 * we can't free the initial (bootstrap) mapping, so
                   3922:                 * we have to explicitly check for this case (ugh).
                   3923:                 */
                   3924:                if (va < virtual_avail) {
                   3925: #ifdef DEBUG
1.66      christos 3926:                        printf("pmap_rmk4m: attempt to free base kernel alloc\n");
1.55      pk       3927: #endif
                   3928:                        /* sp->sg_pte = NULL; */
                   3929:                        sp->sg_npte = 0;
                   3930:                        return;
                   3931:                }
                   3932:                /* no need to free the table; it is statically allocated */
                   3933:                qzero(sp->sg_pte, SRMMU_L3SIZE * sizeof(long));
                   3934:        }
                   3935:        /* if we're done with a region, leave it wired */
                   3936: }
                   3937: #endif /* sun4m */
                   3938: /*
                   3939:  * Just like pmap_rmk_magic, but we have a different threshold.
                   3940:  * Note that this may well deserve further tuning work.
                   3941:  */
                   3942: #if 0
                   3943: #define        PMAP_RMU_MAGIC  (cacheinfo.c_hwflush?4:64)      /* if > magic, use cache_flush_segment */
                   3944: #else
                   3945: #define        PMAP_RMU_MAGIC  4       /* if > magic, use cache_flush_segment */
                   3946: #endif
                   3947:
                   3948: #if defined(SUN4) || defined(SUN4C)
                   3949:
                   3950: /* remove from user */
                   3951: /*static*/ void
                   3952: pmap_rmu4_4c(pm, va, endva, vr, vs)
                   3953:        register struct pmap *pm;
                   3954:        register vm_offset_t va, endva;
                   3955:        register int vr, vs;
                   3956: {
                   3957:        register int *pte0, i, pteva, tpte, perpage, npg;
                   3958:        register struct pvlist *pv;
                   3959:        register int nleft, pmeg;
                   3960:        struct regmap *rp;
                   3961:        struct segmap *sp;
                   3962:
                   3963:        rp = &pm->pm_regmap[vr];
                   3964:        if (rp->rg_nsegmap == 0)
                   3965:                return;
                   3966:        if (rp->rg_segmap == NULL)
                   3967:                panic("pmap_rmu: no segments");
                   3968:
                   3969:        sp = &rp->rg_segmap[vs];
                   3970:        if ((nleft = sp->sg_npte) == 0)
                   3971:                return;
                   3972:        if (sp->sg_pte == NULL)
                   3973:                panic("pmap_rmu: no pages");
                   3974:
                   3975:
                   3976:        pmeg = sp->sg_pmeg;
                   3977:        pte0 = sp->sg_pte;
1.1       deraadt  3978:
                   3979:        if (pmeg == seginval) {
                   3980:                register int *pte = pte0 + VA_VPG(va);
                   3981:
                   3982:                /*
                   3983:                 * PTEs are not in MMU.  Just invalidate software copies.
                   3984:                 */
1.63      pk       3985:                for (; va < endva; pte++, va += NBPG) {
1.1       deraadt  3986:                        tpte = *pte;
                   3987:                        if ((tpte & PG_V) == 0) {
                   3988:                                /* nothing to remove (braindead VM layer) */
                   3989:                                continue;
                   3990:                        }
                   3991:                        if ((tpte & PG_TYPE) == PG_OBMEM) {
1.60      pk       3992:                                i = ptoa(tpte & PG_PFNUM);
1.21      deraadt  3993:                                if (managed(i))
1.58      pk       3994:                                        pv_unlink4_4c(pvhead(i), pm, va);
1.1       deraadt  3995:                        }
                   3996:                        nleft--;
                   3997:                        *pte = 0;
                   3998:                }
1.43      pk       3999:                if ((sp->sg_npte = nleft) == 0) {
1.49      pk       4000:                        free(pte0, M_VMPMAP);
1.43      pk       4001:                        sp->sg_pte = NULL;
                   4002:                        if (--rp->rg_nsegmap == 0) {
1.49      pk       4003:                                free(rp->rg_segmap, M_VMPMAP);
1.43      pk       4004:                                rp->rg_segmap = NULL;
1.69      pk       4005: #if defined(SUN4_MMU3L)
                   4006:                                if (HASSUN4_MMU3L && rp->rg_smeg != reginval) {
1.43      pk       4007:                                        if (pm->pm_ctx) {
1.71      pk       4008:                                                setcontext4(pm->pm_ctxnum);
1.43      pk       4009:                                                setregmap(va, reginval);
                   4010:                                        } else
1.71      pk       4011:                                                setcontext4(0);
1.43      pk       4012:                                        region_free(pm, rp->rg_smeg);
                   4013:                                }
                   4014: #endif
                   4015:                        }
1.1       deraadt  4016:                }
1.43      pk       4017:                return;
1.1       deraadt  4018:        }
                   4019:
                   4020:        /*
                   4021:         * PTEs are in MMU.  Invalidate in hardware, update ref &
                   4022:         * mod bits, and flush cache if required.
                   4023:         */
1.43      pk       4024:        if (CTX_USABLE(pm,rp)) {
1.1       deraadt  4025:                /* process has a context, must flush cache */
                   4026:                npg = (endva - va) >> PGSHIFT;
1.71      pk       4027:                setcontext4(pm->pm_ctxnum);
1.1       deraadt  4028:                if (npg > PMAP_RMU_MAGIC) {
                   4029:                        perpage = 0; /* flush the whole segment */
1.69      pk       4030:                        cache_flush_segment(vr, vs);
1.1       deraadt  4031:                } else
1.69      pk       4032:                        perpage = (CACHEINFO.c_vactype != VAC_NONE);
1.1       deraadt  4033:                pteva = va;
                   4034:        } else {
                   4035:                /* no context, use context 0; cache flush unnecessary */
1.71      pk       4036:                setcontext4(0);
1.69      pk       4037:                if (HASSUN4_MMU3L)
1.43      pk       4038:                        setregmap(0, tregion);
1.1       deraadt  4039:                /* XXX use per-cpu pteva? */
                   4040:                setsegmap(0, pmeg);
1.18      deraadt  4041:                pteva = VA_VPG(va) << PGSHIFT;
1.1       deraadt  4042:                perpage = 0;
                   4043:        }
1.63      pk       4044:        for (; va < endva; pteva += NBPG, va += NBPG) {
1.55      pk       4045:                tpte = getpte4(pteva);
1.1       deraadt  4046:                if ((tpte & PG_V) == 0)
                   4047:                        continue;
1.35      pk       4048:                if ((tpte & PG_TYPE) == PG_OBMEM) {
                   4049:                        /* if cacheable, flush page as needed */
                   4050:                        if (perpage && (tpte & PG_NC) == 0)
1.1       deraadt  4051:                                cache_flush_page(va);
1.60      pk       4052:                        i = ptoa(tpte & PG_PFNUM);
1.1       deraadt  4053:                        if (managed(i)) {
                   4054:                                pv = pvhead(i);
1.55      pk       4055:                                pv->pv_flags |= MR4_4C(tpte);
1.58      pk       4056:                                pv_unlink4_4c(pv, pm, va);
1.1       deraadt  4057:                        }
                   4058:                }
                   4059:                nleft--;
1.55      pk       4060:                setpte4(pteva, 0);
1.43      pk       4061: #define PMAP_PTESYNC
                   4062: #ifdef PMAP_PTESYNC
                   4063:                pte0[VA_VPG(pteva)] = 0;
                   4064: #endif
1.1       deraadt  4065:        }
                   4066:
                   4067:        /*
                   4068:         * If the segment is all gone, and the context is loaded, give
                   4069:         * the segment back.
                   4070:         */
1.43      pk       4071:        if ((sp->sg_npte = nleft) == 0 /* ??? && pm->pm_ctx != NULL*/) {
                   4072: #ifdef DEBUG
                   4073: if (pm->pm_ctx == NULL) {
1.66      christos 4074:        printf("pmap_rmu: no context here...");
1.43      pk       4075: }
                   4076: #endif
                   4077:                va = VSTOVA(vr,vs);             /* retract */
                   4078:                if (CTX_USABLE(pm,rp))
                   4079:                        setsegmap(va, seginval);
1.69      pk       4080:                else if (HASSUN4_MMU3L && rp->rg_smeg != reginval) {
1.43      pk       4081:                        /* note: context already set earlier */
                   4082:                        setregmap(0, rp->rg_smeg);
                   4083:                        setsegmap(vs << SGSHIFT, seginval);
                   4084:                }
1.49      pk       4085:                free(pte0, M_VMPMAP);
1.43      pk       4086:                sp->sg_pte = NULL;
1.1       deraadt  4087:                me_free(pm, pmeg);
1.13      pk       4088:
1.43      pk       4089:                if (--rp->rg_nsegmap == 0) {
1.49      pk       4090:                        free(rp->rg_segmap, M_VMPMAP);
1.43      pk       4091:                        rp->rg_segmap = NULL;
                   4092:                        GAP_WIDEN(pm,vr);
                   4093:
1.69      pk       4094: #if defined(SUN4_MMU3L)
                   4095:                        if (HASSUN4_MMU3L && rp->rg_smeg != reginval) {
1.43      pk       4096:                                /* note: context already set */
                   4097:                                if (pm->pm_ctx)
                   4098:                                        setregmap(va, reginval);
                   4099:                                region_free(pm, rp->rg_smeg);
                   4100:                        }
                   4101: #endif
                   4102:                }
1.13      pk       4103:
1.1       deraadt  4104:        }
                   4105: }
                   4106:
1.55      pk       4107: #endif /* sun4,4c */
                   4108:
                   4109: #if defined(SUN4M)             /* 4M version of pmap_rmu */
                   4110: /* remove from user */
                   4111: /*static*/ void
                   4112: pmap_rmu4m(pm, va, endva, vr, vs)
                   4113:        register struct pmap *pm;
                   4114:        register vm_offset_t va, endva;
                   4115:        register int vr, vs;
                   4116: {
1.72      pk       4117:        register int *pte0, i, perpage, npg;
1.55      pk       4118:        register struct pvlist *pv;
                   4119:        register int nleft;
                   4120:        struct regmap *rp;
                   4121:        struct segmap *sp;
                   4122:
                   4123:        rp = &pm->pm_regmap[vr];
                   4124:        if (rp->rg_nsegmap == 0)
                   4125:                return;
                   4126:        if (rp->rg_segmap == NULL)
                   4127:                panic("pmap_rmu: no segments");
                   4128:
                   4129:        sp = &rp->rg_segmap[vs];
                   4130:        if ((nleft = sp->sg_npte) == 0)
                   4131:                return;
1.76      pk       4132:
1.55      pk       4133:        if (sp->sg_pte == NULL)
                   4134:                panic("pmap_rmu: no pages");
                   4135:
                   4136:        pte0 = sp->sg_pte;
                   4137:
                   4138:        /*
                   4139:         * Invalidate PTE in MMU pagetables. Flush cache if necessary.
                   4140:         */
1.72      pk       4141:        if (pm->pm_ctx) {
1.55      pk       4142:                /* process has a context, must flush cache */
1.71      pk       4143:                setcontext4m(pm->pm_ctxnum);
1.69      pk       4144:                if (CACHEINFO.c_vactype != VAC_NONE) {
1.63      pk       4145:                        npg = (endva - va) >> PGSHIFT;
                   4146:                        if (npg > PMAP_RMU_MAGIC) {
                   4147:                                perpage = 0; /* flush the whole segment */
1.55      pk       4148:                                cache_flush_segment(vr, vs);
1.63      pk       4149:                        } else
                   4150:                                perpage = 1;
1.55      pk       4151:                } else
1.63      pk       4152:                        perpage = 0;
1.55      pk       4153:        } else {
                   4154:                /* no context; cache flush unnecessary */
                   4155:                perpage = 0;
                   4156:        }
1.63      pk       4157:        for (; va < endva; va += NBPG) {
1.72      pk       4158:
                   4159:                int tpte = pte0[VA_SUN4M_VPG(va)];
                   4160:
                   4161:                if ((tpte & SRMMU_TETYPE) != SRMMU_TEPTE) {
                   4162: #ifdef DEBUG
                   4163:                        if ((pmapdebug & PDB_SANITYCHK) &&
                   4164:                            pm->pm_ctx &&
                   4165:                            (getpte4m(va) & SRMMU_TEPTE) == SRMMU_TEPTE)
1.81      pk       4166:                                panic("pmap_rmu: Spurious uTLB entry for %lx",
                   4167:                                      va);
1.72      pk       4168: #endif
1.55      pk       4169:                        continue;
1.72      pk       4170:                }
                   4171:
1.55      pk       4172:                if ((tpte & SRMMU_PGTYPE) == PG_SUN4M_OBMEM) {
                   4173:                        /* if cacheable, flush page as needed */
                   4174:                        if (perpage && (tpte & SRMMU_PG_C))
1.60      pk       4175:                                cache_flush_page(va);
                   4176:                        i = ptoa((tpte & SRMMU_PPNMASK) >> SRMMU_PPNSHIFT);
1.55      pk       4177:                        if (managed(i)) {
                   4178:                                pv = pvhead(i);
                   4179:                                pv->pv_flags |= MR4M(tpte);
1.58      pk       4180:                                pv_unlink4m(pv, pm, va);
1.55      pk       4181:                        }
                   4182:                }
                   4183:                nleft--;
1.72      pk       4184:                if (pm->pm_ctx)
                   4185:                        tlb_flush_page(va);
                   4186:                setpgt4m(&pte0[VA_SUN4M_VPG(va)], SRMMU_TEINVALID);
1.55      pk       4187:        }
                   4188:
                   4189:        /*
                   4190:         * If the segment is all gone, and the context is loaded, give
                   4191:         * the segment back.
                   4192:         */
1.72      pk       4193:        if ((sp->sg_npte = nleft) == 0) {
1.55      pk       4194: #ifdef DEBUG
                   4195:                if (pm->pm_ctx == NULL) {
1.66      christos 4196:                        printf("pmap_rmu: no context here...");
1.55      pk       4197:                }
                   4198: #endif
                   4199:                va = VSTOVA(vr,vs);             /* retract */
                   4200:
                   4201:                tlb_flush_segment(vr, vs);      /* Paranoia? */
1.73      pk       4202:                setpgt4m(&rp->rg_seg_ptps[vs], SRMMU_TEINVALID);
1.55      pk       4203:                free(pte0, M_VMPMAP);
                   4204:                sp->sg_pte = NULL;
                   4205:
                   4206:                if (--rp->rg_nsegmap == 0) {
                   4207:                        free(rp->rg_segmap, M_VMPMAP);
                   4208:                        rp->rg_segmap = NULL;
                   4209:                        free(rp->rg_seg_ptps, M_VMPMAP);
1.73      pk       4210:                        setpgt4m(&pm->pm_reg_ptps[vr], SRMMU_TEINVALID);
1.55      pk       4211:                }
                   4212:        }
                   4213: }
                   4214: #endif /* sun4m */
                   4215:
1.1       deraadt  4216: /*
                   4217:  * Lower (make more strict) the protection on the specified
                   4218:  * physical page.
                   4219:  *
                   4220:  * There are only two cases: either the protection is going to 0
                   4221:  * (in which case we do the dirty work here), or it is going from
                   4222:  * to read-only (in which case pv_changepte does the trick).
                   4223:  */
1.55      pk       4224:
                   4225: #if defined(SUN4) || defined(SUN4C)
1.1       deraadt  4226: void
1.55      pk       4227: pmap_page_protect4_4c(pa, prot)
1.1       deraadt  4228:        vm_offset_t pa;
                   4229:        vm_prot_t prot;
                   4230: {
                   4231:        register struct pvlist *pv, *pv0, *npv;
                   4232:        register struct pmap *pm;
1.43      pk       4233:        register int va, vr, vs, pteva, tpte;
1.53      christos 4234:        register int flags, nleft, i, s, ctx;
1.43      pk       4235:        struct regmap *rp;
                   4236:        struct segmap *sp;
1.1       deraadt  4237:
                   4238: #ifdef DEBUG
1.43      pk       4239:        if (!pmap_pa_exists(pa))
1.54      christos 4240:                panic("pmap_page_protect: no such address: %lx", pa);
1.1       deraadt  4241:        if ((pmapdebug & PDB_CHANGEPROT) ||
                   4242:            (pmapdebug & PDB_REMOVE && prot == VM_PROT_NONE))
1.66      christos 4243:                printf("pmap_page_protect(%lx, %x)\n", pa, prot);
1.1       deraadt  4244: #endif
                   4245:        /*
                   4246:         * Skip unmanaged pages, or operations that do not take
                   4247:         * away write permission.
                   4248:         */
1.82      pk       4249:        if ((pa & (PMAP_TNC_4 & ~PMAP_NC)) ||
1.34      pk       4250:             !managed(pa) || prot & VM_PROT_WRITE)
1.1       deraadt  4251:                return;
                   4252:        write_user_windows();   /* paranoia */
                   4253:        if (prot & VM_PROT_READ) {
1.58      pk       4254:                pv_changepte4_4c(pvhead(pa), 0, PG_W);
1.1       deraadt  4255:                return;
                   4256:        }
                   4257:
                   4258:        /*
                   4259:         * Remove all access to all people talking to this page.
                   4260:         * Walk down PV list, removing all mappings.
                   4261:         * The logic is much like that for pmap_remove,
                   4262:         * but we know we are removing exactly one page.
                   4263:         */
                   4264:        pv = pvhead(pa);
                   4265:        s = splpmap();
                   4266:        if ((pm = pv->pv_pmap) == NULL) {
                   4267:                splx(s);
                   4268:                return;
                   4269:        }
1.71      pk       4270:        ctx = getcontext4();
1.1       deraadt  4271:        pv0 = pv;
                   4272:        flags = pv->pv_flags & ~PV_NC;
                   4273:        for (;; pm = pv->pv_pmap) {
                   4274:                va = pv->pv_va;
1.43      pk       4275:                vr = VA_VREG(va);
                   4276:                vs = VA_VSEG(va);
                   4277:                rp = &pm->pm_regmap[vr];
                   4278:                if (rp->rg_nsegmap == 0)
                   4279:                        panic("pmap_remove_all: empty vreg");
                   4280:                sp = &rp->rg_segmap[vs];
                   4281:                if ((nleft = sp->sg_npte) == 0)
1.1       deraadt  4282:                        panic("pmap_remove_all: empty vseg");
                   4283:                nleft--;
1.43      pk       4284:                sp->sg_npte = nleft;
                   4285:
                   4286:                if (sp->sg_pmeg == seginval) {
                   4287:                        /* Definitely not a kernel map */
1.1       deraadt  4288:                        if (nleft) {
1.43      pk       4289:                                sp->sg_pte[VA_VPG(va)] = 0;
1.1       deraadt  4290:                        } else {
1.49      pk       4291:                                free(sp->sg_pte, M_VMPMAP);
1.43      pk       4292:                                sp->sg_pte = NULL;
                   4293:                                if (--rp->rg_nsegmap == 0) {
1.49      pk       4294:                                        free(rp->rg_segmap, M_VMPMAP);
1.43      pk       4295:                                        rp->rg_segmap = NULL;
                   4296:                                        GAP_WIDEN(pm,vr);
1.69      pk       4297: #if defined(SUN4_MMU3L)
                   4298:                                        if (HASSUN4_MMU3L && rp->rg_smeg != reginval) {
1.43      pk       4299:                                                if (pm->pm_ctx) {
1.71      pk       4300:                                                        setcontext4(pm->pm_ctxnum);
1.43      pk       4301:                                                        setregmap(va, reginval);
                   4302:                                                } else
1.71      pk       4303:                                                        setcontext4(0);
1.43      pk       4304:                                                region_free(pm, rp->rg_smeg);
                   4305:                                        }
                   4306: #endif
                   4307:                                }
1.1       deraadt  4308:                        }
                   4309:                        goto nextpv;
                   4310:                }
1.43      pk       4311:                if (CTX_USABLE(pm,rp)) {
1.71      pk       4312:                        setcontext4(pm->pm_ctxnum);
1.1       deraadt  4313:                        pteva = va;
1.69      pk       4314:                        cache_flush_page(va);
1.1       deraadt  4315:                } else {
1.71      pk       4316:                        setcontext4(0);
1.1       deraadt  4317:                        /* XXX use per-cpu pteva? */
1.69      pk       4318:                        if (HASSUN4_MMU3L)
1.43      pk       4319:                                setregmap(0, tregion);
                   4320:                        setsegmap(0, sp->sg_pmeg);
1.18      deraadt  4321:                        pteva = VA_VPG(va) << PGSHIFT;
1.1       deraadt  4322:                }
1.43      pk       4323:
1.55      pk       4324:                tpte = getpte4(pteva);
1.43      pk       4325:                if ((tpte & PG_V) == 0)
                   4326:                        panic("pmap_page_protect !PG_V");
1.55      pk       4327:                flags |= MR4_4C(tpte);
1.43      pk       4328:
1.1       deraadt  4329:                if (nleft) {
1.55      pk       4330:                        setpte4(pteva, 0);
1.43      pk       4331: #ifdef PMAP_PTESYNC
1.44      pk       4332:                        if (sp->sg_pte != NULL)
                   4333:                                sp->sg_pte[VA_VPG(pteva)] = 0;
1.43      pk       4334: #endif
1.1       deraadt  4335:                } else {
1.43      pk       4336:                        if (pm == pmap_kernel()) {
1.69      pk       4337: #if defined(SUN4_MMU3L)
                   4338:                                if (!HASSUN4_MMU3L)
1.43      pk       4339: #endif
                   4340:                                        for (i = ncontext; --i >= 0;) {
1.71      pk       4341:                                                setcontext4(i);
1.1       deraadt  4342:                                                setsegmap(va, seginval);
                   4343:                                        }
1.43      pk       4344:                                me_free(pm, sp->sg_pmeg);
                   4345:                                if (--rp->rg_nsegmap == 0) {
1.69      pk       4346: #if defined(SUN4_MMU3L)
                   4347:                                        if (HASSUN4_MMU3L) {
1.43      pk       4348:                                                for (i = ncontext; --i >= 0;) {
1.71      pk       4349:                                                        setcontext4(i);
1.43      pk       4350:                                                        setregmap(va, reginval);
                   4351:                                                }
                   4352:                                                region_free(pm, rp->rg_smeg);
                   4353:                                        }
                   4354: #endif
                   4355:                                }
                   4356:                        } else {
                   4357:                                if (CTX_USABLE(pm,rp))
                   4358:                                        /* `pteva'; we might be using tregion */
                   4359:                                        setsegmap(pteva, seginval);
1.69      pk       4360: #if defined(SUN4_MMU3L)
1.72      pk       4361:                                else if (HASSUN4_MMU3L &&
                   4362:                                         rp->rg_smeg != reginval) {
1.43      pk       4363:                                        /* note: context already set earlier */
                   4364:                                        setregmap(0, rp->rg_smeg);
                   4365:                                        setsegmap(vs << SGSHIFT, seginval);
                   4366:                                }
                   4367: #endif
1.49      pk       4368:                                free(sp->sg_pte, M_VMPMAP);
1.43      pk       4369:                                sp->sg_pte = NULL;
                   4370:                                me_free(pm, sp->sg_pmeg);
                   4371:
                   4372:                                if (--rp->rg_nsegmap == 0) {
1.69      pk       4373: #if defined(SUN4_MMU3L)
1.72      pk       4374:                                        if (HASSUN4_MMU3L &&
                   4375:                                            rp->rg_smeg != reginval) {
1.43      pk       4376:                                                if (pm->pm_ctx)
                   4377:                                                        setregmap(va, reginval);
                   4378:                                                region_free(pm, rp->rg_smeg);
                   4379:                                        }
                   4380: #endif
1.49      pk       4381:                                        free(rp->rg_segmap, M_VMPMAP);
1.43      pk       4382:                                        rp->rg_segmap = NULL;
                   4383:                                        GAP_WIDEN(pm,vr);
1.1       deraadt  4384:                                }
                   4385:                        }
                   4386:                }
                   4387:        nextpv:
                   4388:                npv = pv->pv_next;
                   4389:                if (pv != pv0)
1.81      pk       4390:                        FREE(pv, M_VMPVENT);
1.1       deraadt  4391:                if ((pv = npv) == NULL)
                   4392:                        break;
                   4393:        }
                   4394:        pv0->pv_pmap = NULL;
1.11      pk       4395:        pv0->pv_next = NULL; /* ? */
1.1       deraadt  4396:        pv0->pv_flags = flags;
1.71      pk       4397:        setcontext4(ctx);
1.1       deraadt  4398:        splx(s);
                   4399: }
                   4400:
                   4401: /*
                   4402:  * Lower (make more strict) the protection on the specified
                   4403:  * range of this pmap.
                   4404:  *
                   4405:  * There are only two cases: either the protection is going to 0
                   4406:  * (in which case we call pmap_remove to do the dirty work), or
                   4407:  * it is going from read/write to read-only.  The latter is
                   4408:  * fairly easy.
                   4409:  */
                   4410: void
1.55      pk       4411: pmap_protect4_4c(pm, sva, eva, prot)
1.1       deraadt  4412:        register struct pmap *pm;
                   4413:        vm_offset_t sva, eva;
                   4414:        vm_prot_t prot;
                   4415: {
1.53      christos 4416:        register int va, nva, vr, vs;
1.1       deraadt  4417:        register int s, ctx;
1.43      pk       4418:        struct regmap *rp;
                   4419:        struct segmap *sp;
1.1       deraadt  4420:
                   4421:        if (pm == NULL || prot & VM_PROT_WRITE)
                   4422:                return;
1.43      pk       4423:
1.1       deraadt  4424:        if ((prot & VM_PROT_READ) == 0) {
                   4425:                pmap_remove(pm, sva, eva);
                   4426:                return;
                   4427:        }
                   4428:
                   4429:        write_user_windows();
1.71      pk       4430:        ctx = getcontext4();
1.1       deraadt  4431:        s = splpmap();
                   4432:        simple_lock(&pm->pm_lock);
                   4433:
                   4434:        for (va = sva; va < eva;) {
1.43      pk       4435:                vr = VA_VREG(va);
                   4436:                vs = VA_VSEG(va);
                   4437:                rp = &pm->pm_regmap[vr];
                   4438:                nva = VSTOVA(vr,vs + 1);
1.1       deraadt  4439: if (nva == 0) panic("pmap_protect: last segment");     /* cannot happen */
                   4440:                if (nva > eva)
                   4441:                        nva = eva;
1.43      pk       4442:                if (rp->rg_nsegmap == 0) {
1.1       deraadt  4443:                        va = nva;
                   4444:                        continue;
                   4445:                }
1.43      pk       4446: #ifdef DEBUG
                   4447:                if (rp->rg_segmap == NULL)
                   4448:                        panic("pmap_protect: no segments");
                   4449: #endif
                   4450:                sp = &rp->rg_segmap[vs];
                   4451:                if (sp->sg_npte == 0) {
                   4452:                        va = nva;
                   4453:                        continue;
                   4454:                }
                   4455: #ifdef DEBUG
                   4456:                if (pm != pmap_kernel() && sp->sg_pte == NULL)
                   4457:                        panic("pmap_protect: no pages");
                   4458: #endif
                   4459:                if (sp->sg_pmeg == seginval) {
                   4460:                        register int *pte = &sp->sg_pte[VA_VPG(va)];
1.1       deraadt  4461:
                   4462:                        /* not in MMU; just clear PG_W from core copies */
                   4463:                        for (; va < nva; va += NBPG)
                   4464:                                *pte++ &= ~PG_W;
                   4465:                } else {
                   4466:                        /* in MMU: take away write bits from MMU PTEs */
1.43      pk       4467:                        if (CTX_USABLE(pm,rp)) {
1.1       deraadt  4468:                                register int tpte;
                   4469:
                   4470:                                /*
                   4471:                                 * Flush cache so that any existing cache
                   4472:                                 * tags are updated.  This is really only
                   4473:                                 * needed for PTEs that lose PG_W.
                   4474:                                 */
1.71      pk       4475:                                setcontext4(pm->pm_ctxnum);
1.1       deraadt  4476:                                for (; va < nva; va += NBPG) {
1.55      pk       4477:                                        tpte = getpte4(va);
1.1       deraadt  4478:                                        pmap_stats.ps_npg_prot_all++;
1.35      pk       4479:                                        if ((tpte & (PG_W|PG_TYPE)) ==
                   4480:                                            (PG_W|PG_OBMEM)) {
1.1       deraadt  4481:                                                pmap_stats.ps_npg_prot_actual++;
1.69      pk       4482:                                                cache_flush_page(va);
1.55      pk       4483:                                                setpte4(va, tpte & ~PG_W);
1.1       deraadt  4484:                                        }
                   4485:                                }
                   4486:                        } else {
                   4487:                                register int pteva;
                   4488:
                   4489:                                /*
                   4490:                                 * No context, hence not cached;
                   4491:                                 * just update PTEs.
                   4492:                                 */
1.71      pk       4493:                                setcontext4(0);
1.1       deraadt  4494:                                /* XXX use per-cpu pteva? */
1.69      pk       4495:                                if (HASSUN4_MMU3L)
1.43      pk       4496:                                        setregmap(0, tregion);
                   4497:                                setsegmap(0, sp->sg_pmeg);
1.18      deraadt  4498:                                pteva = VA_VPG(va) << PGSHIFT;
1.1       deraadt  4499:                                for (; va < nva; pteva += NBPG, va += NBPG)
1.55      pk       4500:                                        setpte4(pteva, getpte4(pteva) & ~PG_W);
1.1       deraadt  4501:                        }
                   4502:                }
                   4503:        }
                   4504:        simple_unlock(&pm->pm_lock);
1.12      pk       4505:        splx(s);
1.71      pk       4506:        setcontext4(ctx);
1.1       deraadt  4507: }
                   4508:
                   4509: /*
                   4510:  * Change the protection and/or wired status of the given (MI) virtual page.
                   4511:  * XXX: should have separate function (or flag) telling whether only wiring
                   4512:  * is changing.
                   4513:  */
                   4514: void
1.55      pk       4515: pmap_changeprot4_4c(pm, va, prot, wired)
1.1       deraadt  4516:        register struct pmap *pm;
                   4517:        register vm_offset_t va;
                   4518:        vm_prot_t prot;
                   4519:        int wired;
                   4520: {
1.53      christos 4521:        register int vr, vs, tpte, newprot, ctx, s;
1.43      pk       4522:        struct regmap *rp;
                   4523:        struct segmap *sp;
1.1       deraadt  4524:
                   4525: #ifdef DEBUG
                   4526:        if (pmapdebug & PDB_CHANGEPROT)
1.66      christos 4527:                printf("pmap_changeprot(%p, %lx, %x, %x)\n",
1.1       deraadt  4528:                    pm, va, prot, wired);
                   4529: #endif
                   4530:
                   4531:        write_user_windows();   /* paranoia */
                   4532:
1.64      pk       4533:        va &= ~(NBPG-1);
1.42      mycroft  4534:        if (pm == pmap_kernel())
1.1       deraadt  4535:                newprot = prot & VM_PROT_WRITE ? PG_S|PG_W : PG_S;
                   4536:        else
                   4537:                newprot = prot & VM_PROT_WRITE ? PG_W : 0;
1.43      pk       4538:        vr = VA_VREG(va);
                   4539:        vs = VA_VSEG(va);
1.1       deraadt  4540:        s = splpmap();          /* conservative */
1.43      pk       4541:        rp = &pm->pm_regmap[vr];
                   4542:        if (rp->rg_nsegmap == 0) {
1.66      christos 4543:                printf("pmap_changeprot: no segments in %d\n", vr);
1.43      pk       4544:                return;
                   4545:        }
                   4546:        if (rp->rg_segmap == NULL) {
1.66      christos 4547:                printf("pmap_changeprot: no segments in %d!\n", vr);
1.43      pk       4548:                return;
                   4549:        }
                   4550:        sp = &rp->rg_segmap[vs];
                   4551:
1.1       deraadt  4552:        pmap_stats.ps_changeprots++;
                   4553:
1.43      pk       4554: #ifdef DEBUG
                   4555:        if (pm != pmap_kernel() && sp->sg_pte == NULL)
                   4556:                panic("pmap_changeprot: no pages");
                   4557: #endif
                   4558:
1.1       deraadt  4559:        /* update PTEs in software or hardware */
1.43      pk       4560:        if (sp->sg_pmeg == seginval) {
                   4561:                register int *pte = &sp->sg_pte[VA_VPG(va)];
1.1       deraadt  4562:
                   4563:                /* update in software */
                   4564:                if ((*pte & PG_PROT) == newprot)
                   4565:                        goto useless;
                   4566:                *pte = (*pte & ~PG_PROT) | newprot;
                   4567:        } else {
                   4568:                /* update in hardware */
1.71      pk       4569:                ctx = getcontext4();
1.43      pk       4570:                if (CTX_USABLE(pm,rp)) {
1.1       deraadt  4571:                        /* use current context; flush writeback cache */
1.71      pk       4572:                        setcontext4(pm->pm_ctxnum);
1.55      pk       4573:                        tpte = getpte4(va);
1.11      pk       4574:                        if ((tpte & PG_PROT) == newprot) {
1.71      pk       4575:                                setcontext4(ctx);
1.1       deraadt  4576:                                goto useless;
1.11      pk       4577:                        }
1.69      pk       4578:                        if (CACHEINFO.c_vactype == VAC_WRITEBACK &&
1.35      pk       4579:                            (tpte & (PG_U|PG_NC|PG_TYPE)) == (PG_U|PG_OBMEM))
1.1       deraadt  4580:                                cache_flush_page((int)va);
                   4581:                } else {
1.71      pk       4582:                        setcontext4(0);
1.1       deraadt  4583:                        /* XXX use per-cpu va? */
1.69      pk       4584:                        if (HASSUN4_MMU3L)
1.43      pk       4585:                                setregmap(0, tregion);
                   4586:                        setsegmap(0, sp->sg_pmeg);
1.18      deraadt  4587:                        va = VA_VPG(va) << PGSHIFT;
1.55      pk       4588:                        tpte = getpte4(va);
1.11      pk       4589:                        if ((tpte & PG_PROT) == newprot) {
1.71      pk       4590:                                setcontext4(ctx);
1.1       deraadt  4591:                                goto useless;
1.11      pk       4592:                        }
1.1       deraadt  4593:                }
                   4594:                tpte = (tpte & ~PG_PROT) | newprot;
1.55      pk       4595:                setpte4(va, tpte);
1.71      pk       4596:                setcontext4(ctx);
1.1       deraadt  4597:        }
                   4598:        splx(s);
                   4599:        return;
                   4600:
                   4601: useless:
                   4602:        /* only wiring changed, and we ignore wiring */
                   4603:        pmap_stats.ps_useless_changeprots++;
                   4604:        splx(s);
                   4605: }
                   4606:
1.55      pk       4607: #endif /* sun4, 4c */
                   4608:
                   4609: #if defined(SUN4M)             /* 4M version of protection routines above */
1.1       deraadt  4610: /*
1.55      pk       4611:  * Lower (make more strict) the protection on the specified
                   4612:  * physical page.
1.1       deraadt  4613:  *
1.55      pk       4614:  * There are only two cases: either the protection is going to 0
                   4615:  * (in which case we do the dirty work here), or it is going
                   4616:  * to read-only (in which case pv_changepte does the trick).
1.1       deraadt  4617:  */
                   4618: void
1.55      pk       4619: pmap_page_protect4m(pa, prot)
                   4620:        vm_offset_t pa;
1.1       deraadt  4621:        vm_prot_t prot;
                   4622: {
1.55      pk       4623:        register struct pvlist *pv, *pv0, *npv;
                   4624:        register struct pmap *pm;
                   4625:        register int va, vr, vs, tpte;
                   4626:        register int flags, nleft, s, ctx;
                   4627:        struct regmap *rp;
                   4628:        struct segmap *sp;
1.45      pk       4629:
                   4630: #ifdef DEBUG
1.55      pk       4631:        if (!pmap_pa_exists(pa))
                   4632:                panic("pmap_page_protect: no such address: 0x%lx", pa);
                   4633:        if ((pmapdebug & PDB_CHANGEPROT) ||
                   4634:            (pmapdebug & PDB_REMOVE && prot == VM_PROT_NONE))
1.66      christos 4635:                printf("pmap_page_protect(%lx, %x)\n", pa, prot);
1.45      pk       4636: #endif
1.55      pk       4637:        /*
                   4638:         * Skip unmanaged pages, or operations that do not take
                   4639:         * away write permission.
                   4640:         */
                   4641:        if (!managed(pa) || prot & VM_PROT_WRITE)
                   4642:                return;
                   4643:        write_user_windows();   /* paranoia */
                   4644:        if (prot & VM_PROT_READ) {
                   4645:                pv_changepte4m(pvhead(pa), 0, PPROT_WRITE);
1.45      pk       4646:                return;
                   4647:        }
1.39      pk       4648:
1.1       deraadt  4649:        /*
1.55      pk       4650:         * Remove all access to all people talking to this page.
                   4651:         * Walk down PV list, removing all mappings.
                   4652:         * The logic is much like that for pmap_remove,
                   4653:         * but we know we are removing exactly one page.
1.1       deraadt  4654:         */
1.55      pk       4655:        pv = pvhead(pa);
                   4656:        s = splpmap();
                   4657:        if ((pm = pv->pv_pmap) == NULL) {
                   4658:                splx(s);
                   4659:                return;
1.1       deraadt  4660:        }
1.71      pk       4661:        ctx = getcontext4m();
1.55      pk       4662:        pv0 = pv;
                   4663:        flags = pv->pv_flags /*| PV_C4M*/;      /* %%%: ???? */
                   4664:        for (;; pm = pv->pv_pmap) {
                   4665:                va = pv->pv_va;
                   4666:                vr = VA_VREG(va);
                   4667:                vs = VA_VSEG(va);
                   4668:                rp = &pm->pm_regmap[vr];
                   4669:                if (rp->rg_nsegmap == 0)
                   4670:                        panic("pmap_remove_all: empty vreg");
                   4671:                sp = &rp->rg_segmap[vs];
                   4672:                if ((nleft = sp->sg_npte) == 0)
                   4673:                        panic("pmap_remove_all: empty vseg");
                   4674:                nleft--;
                   4675:                sp->sg_npte = nleft;
1.1       deraadt  4676:
1.55      pk       4677:                /* Invalidate PTE in MMU pagetables. Flush cache if necessary */
1.72      pk       4678:                if (pm->pm_ctx) {
1.71      pk       4679:                        setcontext4m(pm->pm_ctxnum);
1.69      pk       4680:                        cache_flush_page(va);
1.55      pk       4681:                        tlb_flush_page(va);
1.72      pk       4682:                }
                   4683:
                   4684:                tpte = sp->sg_pte[VA_SUN4M_VPG(va)];
1.1       deraadt  4685:
1.55      pk       4686:                if ((tpte & SRMMU_TETYPE) != SRMMU_TEPTE)
                   4687:                        panic("pmap_page_protect !PG_V");
1.72      pk       4688:
1.55      pk       4689:                flags |= MR4M(tpte);
1.43      pk       4690:
1.83    ! pk       4691:                if (nleft) {
        !          4692:                        setpgt4m(&sp->sg_pte[VA_SUN4M_VPG(va)], SRMMU_TEINVALID);
        !          4693:                        goto nextpv;
        !          4694:                }
        !          4695:
        !          4696:                /* Entire segment is gone */
        !          4697:                if (pm == pmap_kernel()) {
        !          4698:                        tlb_flush_segment(vr, vs); /* Paranoid? */
1.72      pk       4699:                        setpgt4m(&sp->sg_pte[VA_SUN4M_VPG(va)], SRMMU_TEINVALID);
1.83    ! pk       4700:                        if (va < virtual_avail) {
1.55      pk       4701: #ifdef DEBUG
1.83    ! pk       4702:                                printf(
        !          4703:                                 "pmap_page_protect: attempt to free"
        !          4704:                                 " base kernel allocation\n");
1.55      pk       4705: #endif
1.83    ! pk       4706:                                goto nextpv;
        !          4707:                        }
1.72      pk       4708: #if 0 /* no need for this */
1.83    ! pk       4709:                        /* no need to free the table; it is static */
        !          4710:                        qzero(sp->sg_pte, SRMMU_L3SIZE * sizeof(int));
1.72      pk       4711: #endif
1.43      pk       4712:
1.83    ! pk       4713:                        /* if we're done with a region, leave it */
1.55      pk       4714:
1.83    ! pk       4715:                } else {        /* User mode mapping */
        !          4716:                        if (pm->pm_ctx)
        !          4717:                                tlb_flush_segment(vr, vs);
        !          4718:                        setpgt4m(&rp->rg_seg_ptps[vs], SRMMU_TEINVALID);
        !          4719:                        free(sp->sg_pte, M_VMPMAP);
        !          4720:                        sp->sg_pte = NULL;
1.55      pk       4721:
1.83    ! pk       4722:                        if (--rp->rg_nsegmap == 0) {
        !          4723:                                free(rp->rg_segmap, M_VMPMAP);
        !          4724:                                rp->rg_segmap = NULL;
        !          4725:                                free(rp->rg_seg_ptps, M_VMPMAP);
        !          4726:                                setpgt4m(&pm->pm_reg_ptps[vr],
        !          4727:                                        SRMMU_TEINVALID);
1.55      pk       4728:                        }
                   4729:                }
1.83    ! pk       4730:
1.55      pk       4731:        nextpv:
                   4732:                npv = pv->pv_next;
                   4733:                if (pv != pv0)
1.81      pk       4734:                        FREE(pv, M_VMPVENT);
1.55      pk       4735:                if ((pv = npv) == NULL)
                   4736:                        break;
                   4737:        }
                   4738:        pv0->pv_pmap = NULL;
                   4739:        pv0->pv_next = NULL; /* ? */
                   4740:        pv0->pv_flags = flags;
1.71      pk       4741:        setcontext4m(ctx);
1.55      pk       4742:        splx(s);
                   4743: }
                   4744:
                   4745: /*
                   4746:  * Lower (make more strict) the protection on the specified
                   4747:  * range of this pmap.
                   4748:  *
                   4749:  * There are only two cases: either the protection is going to 0
                   4750:  * (in which case we call pmap_remove to do the dirty work), or
                   4751:  * it is going from read/write to read-only.  The latter is
                   4752:  * fairly easy.
                   4753:  */
                   4754: void
                   4755: pmap_protect4m(pm, sva, eva, prot)
                   4756:        register struct pmap *pm;
                   4757:        vm_offset_t sva, eva;
                   4758:        vm_prot_t prot;
                   4759: {
                   4760:        register int va, nva, vr, vs;
                   4761:        register int s, ctx;
                   4762:        struct regmap *rp;
                   4763:        struct segmap *sp;
                   4764:
                   4765:        if (pm == NULL || prot & VM_PROT_WRITE)
                   4766:                return;
                   4767:
                   4768:        if ((prot & VM_PROT_READ) == 0) {
                   4769:                pmap_remove(pm, sva, eva);
                   4770:                return;
                   4771:        }
                   4772:
                   4773:        write_user_windows();
1.71      pk       4774:        ctx = getcontext4m();
1.55      pk       4775:        s = splpmap();
                   4776:        simple_lock(&pm->pm_lock);
                   4777:
                   4778:        for (va = sva; va < eva;) {
                   4779:                vr = VA_VREG(va);
                   4780:                vs = VA_VSEG(va);
                   4781:                rp = &pm->pm_regmap[vr];
                   4782:                nva = VSTOVA(vr,vs + 1);
                   4783:                if (nva == 0)   /* XXX */
                   4784:                        panic("pmap_protect: last segment"); /* cannot happen(why?)*/
                   4785:                if (nva > eva)
                   4786:                        nva = eva;
                   4787:                if (rp->rg_nsegmap == 0) {
                   4788:                        va = nva;
                   4789:                        continue;
                   4790:                }
                   4791: #ifdef DEBUG
                   4792:                if (rp->rg_segmap == NULL)
                   4793:                        panic("pmap_protect: no segments");
                   4794: #endif
                   4795:                sp = &rp->rg_segmap[vs];
                   4796:                if (sp->sg_npte == 0) {
                   4797:                        va = nva;
                   4798:                        continue;
                   4799:                }
                   4800: #ifdef DEBUG
                   4801:                if (sp->sg_pte == NULL)
                   4802:                        panic("pmap_protect: no pages");
                   4803: #endif
1.72      pk       4804:                /* pages loaded: take away write bits from MMU PTEs */
                   4805:                if (pm->pm_ctx)
                   4806:                        setcontext4m(pm->pm_ctxnum);
                   4807:
                   4808:                pmap_stats.ps_npg_prot_all = (nva - va) >> PGSHIFT;
                   4809:                for (; va < nva; va += NBPG) {
                   4810:                        int tpte;
                   4811:                        tpte = sp->sg_pte[VA_SUN4M_VPG(va)];
1.55      pk       4812:                        /*
                   4813:                         * Flush cache so that any existing cache
                   4814:                         * tags are updated.  This is really only
                   4815:                         * needed for PTEs that lose PG_W.
                   4816:                         */
1.72      pk       4817:                        if ((tpte & (PPROT_WRITE|SRMMU_PGTYPE)) ==
                   4818:                            (PPROT_WRITE|PG_SUN4M_OBMEM)) {
                   4819:                                pmap_stats.ps_npg_prot_actual++;
                   4820:                                if (pm->pm_ctx) {
1.69      pk       4821:                                        cache_flush_page(va);
1.72      pk       4822:                                        tlb_flush_page(va);
1.55      pk       4823:                                }
1.72      pk       4824:                                setpgt4m(&sp->sg_pte[VA_SUN4M_VPG(va)],
                   4825:                                         tpte & ~PPROT_WRITE);
1.55      pk       4826:                        }
                   4827:                }
                   4828:        }
                   4829:        simple_unlock(&pm->pm_lock);
                   4830:        splx(s);
1.71      pk       4831:        setcontext4m(ctx);
1.55      pk       4832: }
                   4833:
                   4834: /*
                   4835:  * Change the protection and/or wired status of the given (MI) virtual page.
                   4836:  * XXX: should have separate function (or flag) telling whether only wiring
                   4837:  * is changing.
                   4838:  */
                   4839: void
                   4840: pmap_changeprot4m(pm, va, prot, wired)
                   4841:        register struct pmap *pm;
                   4842:        register vm_offset_t va;
                   4843:        vm_prot_t prot;
                   4844:        int wired;
                   4845: {
                   4846:        register int tpte, newprot, ctx, s;
                   4847:
                   4848: #ifdef DEBUG
                   4849:        if (pmapdebug & PDB_CHANGEPROT)
1.66      christos 4850:                printf("pmap_changeprot(%p, %lx, %x, %x)\n",
1.55      pk       4851:                    pm, va, prot, wired);
                   4852: #endif
                   4853:
                   4854:        write_user_windows();   /* paranoia */
                   4855:
1.64      pk       4856:        va &= ~(NBPG-1);
1.55      pk       4857:        if (pm == pmap_kernel())
                   4858:                newprot = prot & VM_PROT_WRITE ? PPROT_N_RWX : PPROT_N_RX;
                   4859:        else
                   4860:                newprot = prot & VM_PROT_WRITE ? PPROT_RWX_RWX : PPROT_RX_RX;
                   4861:
                   4862:        pmap_stats.ps_changeprots++;
                   4863:
                   4864:        s = splpmap();          /* conservative */
1.71      pk       4865:        ctx = getcontext4m();
1.55      pk       4866:        if (pm->pm_ctx) {
1.71      pk       4867:                setcontext4m(pm->pm_ctxnum);
1.55      pk       4868:                tpte = getpte4m(va);
1.69      pk       4869:                if (CACHEINFO.c_vactype == VAC_WRITEBACK &&
1.60      pk       4870:                    (tpte & SRMMU_PGTYPE) == PG_SUN4M_OBMEM)
                   4871:                        cache_flush_page(va); /* XXX: paranoia? */
1.55      pk       4872:        } else {
                   4873:                tpte = getptesw4m(pm, va);
                   4874:        }
                   4875:        if ((tpte & SRMMU_PROT_MASK) == newprot) {
1.72      pk       4876:                /* only wiring changed, and we ignore wiring */
                   4877:                pmap_stats.ps_useless_changeprots++;
                   4878:                goto out;
1.55      pk       4879:        }
                   4880:        if (pm->pm_ctx)
1.60      pk       4881:                setpte4m(va, (tpte & ~SRMMU_PROT_MASK) | newprot);
1.55      pk       4882:        else
1.60      pk       4883:                setptesw4m(pm, va, (tpte & ~SRMMU_PROT_MASK) | newprot);
1.72      pk       4884:
                   4885: out:
1.71      pk       4886:        setcontext4m(ctx);
1.55      pk       4887:        splx(s);
                   4888: }
                   4889: #endif /* 4m */
                   4890:
                   4891: /*
                   4892:  * Insert (MI) physical page pa at virtual address va in the given pmap.
                   4893:  * NB: the pa parameter includes type bits PMAP_OBIO, PMAP_NC as necessary.
                   4894:  *
                   4895:  * If pa is not in the `managed' range it will not be `bank mapped'.
                   4896:  * This works during bootstrap only because the first 4MB happens to
                   4897:  * map one-to-one.
                   4898:  *
                   4899:  * There may already be something else there, or we might just be
                   4900:  * changing protections and/or wiring on an existing mapping.
                   4901:  *     XXX     should have different entry points for changing!
                   4902:  */
                   4903:
                   4904: #if defined(SUN4) || defined(SUN4C)
                   4905:
                   4906: void
                   4907: pmap_enter4_4c(pm, va, pa, prot, wired)
                   4908:        register struct pmap *pm;
                   4909:        vm_offset_t va, pa;
                   4910:        vm_prot_t prot;
                   4911:        int wired;
                   4912: {
                   4913:        register struct pvlist *pv;
                   4914:        register int pteproto, ctx;
                   4915:
                   4916:        if (pm == NULL)
                   4917:                return;
                   4918:
                   4919:        if (VA_INHOLE(va)) {
                   4920: #ifdef DEBUG
1.66      christos 4921:                printf("pmap_enter: pm %p, va %lx, pa %lx: in MMU hole\n",
1.55      pk       4922:                        pm, va, pa);
                   4923: #endif
                   4924:                return;
                   4925:        }
                   4926:
                   4927: #ifdef DEBUG
                   4928:        if (pmapdebug & PDB_ENTER)
1.66      christos 4929:                printf("pmap_enter(%p, %lx, %lx, %x, %x)\n",
1.55      pk       4930:                    pm, va, pa, prot, wired);
                   4931: #endif
                   4932:
1.82      pk       4933:        pteproto = PG_V | PMAP_T2PTE_4(pa);
                   4934:        pa &= ~PMAP_TNC_4;
1.55      pk       4935:        /*
                   4936:         * Set up prototype for new PTE.  Cannot set PG_NC from PV_NC yet
                   4937:         * since the pvlist no-cache bit might change as a result of the
                   4938:         * new mapping.
                   4939:         */
                   4940:        if ((pteproto & PG_TYPE) == PG_OBMEM && managed(pa)) {
                   4941: #ifdef DIAGNOSTIC
                   4942:                if (!pmap_pa_exists(pa))
                   4943:                        panic("pmap_enter: no such address: %lx", pa);
                   4944: #endif
                   4945:                pv = pvhead(pa);
                   4946:        } else {
                   4947:                pv = NULL;
                   4948:        }
1.60      pk       4949:        pteproto |= atop(pa) & PG_PFNUM;
1.55      pk       4950:        if (prot & VM_PROT_WRITE)
                   4951:                pteproto |= PG_W;
                   4952:
1.71      pk       4953:        ctx = getcontext4();
1.55      pk       4954:        if (pm == pmap_kernel())
                   4955:                pmap_enk4_4c(pm, va, prot, wired, pv, pteproto | PG_S);
                   4956:        else
                   4957:                pmap_enu4_4c(pm, va, prot, wired, pv, pteproto);
1.71      pk       4958:        setcontext4(ctx);
1.55      pk       4959: }
                   4960:
                   4961: /* enter new (or change existing) kernel mapping */
                   4962: void
                   4963: pmap_enk4_4c(pm, va, prot, wired, pv, pteproto)
                   4964:        register struct pmap *pm;
                   4965:        vm_offset_t va;
                   4966:        vm_prot_t prot;
                   4967:        int wired;
                   4968:        register struct pvlist *pv;
                   4969:        register int pteproto;
                   4970: {
                   4971:        register int vr, vs, tpte, i, s;
                   4972:        struct regmap *rp;
                   4973:        struct segmap *sp;
                   4974:
                   4975:        vr = VA_VREG(va);
                   4976:        vs = VA_VSEG(va);
                   4977:        rp = &pm->pm_regmap[vr];
                   4978:        sp = &rp->rg_segmap[vs];
                   4979:        s = splpmap();          /* XXX way too conservative */
                   4980:
1.69      pk       4981: #if defined(SUN4_MMU3L)
                   4982:        if (HASSUN4_MMU3L && rp->rg_smeg == reginval) {
1.55      pk       4983:                vm_offset_t tva;
                   4984:                rp->rg_smeg = region_alloc(&region_locked, pm, vr)->me_cookie;
                   4985:                i = ncontext - 1;
                   4986:                do {
1.71      pk       4987:                        setcontext4(i);
1.55      pk       4988:                        setregmap(va, rp->rg_smeg);
                   4989:                } while (--i >= 0);
1.1       deraadt  4990:
1.43      pk       4991:                /* set all PTEs to invalid, then overwrite one PTE below */
                   4992:                tva = VA_ROUNDDOWNTOREG(va);
                   4993:                for (i = 0; i < NSEGRG; i++) {
                   4994:                        setsegmap(tva, rp->rg_segmap[i].sg_pmeg);
                   4995:                        tva += NBPSG;
                   4996:                };
                   4997:        }
                   4998: #endif
1.55      pk       4999:        if (sp->sg_pmeg != seginval && (tpte = getpte4(va)) & PG_V) {
1.34      pk       5000:                register int addr;
1.1       deraadt  5001:
1.34      pk       5002:                /* old mapping exists, and is of the same pa type */
                   5003:                if ((tpte & (PG_PFNUM|PG_TYPE)) ==
                   5004:                    (pteproto & (PG_PFNUM|PG_TYPE))) {
1.1       deraadt  5005:                        /* just changing protection and/or wiring */
                   5006:                        splx(s);
1.81      pk       5007:                        pmap_changeprot4_4c(pm, va, prot, wired);
1.1       deraadt  5008:                        return;
                   5009:                }
                   5010:
1.34      pk       5011:                if ((tpte & PG_TYPE) == PG_OBMEM) {
1.43      pk       5012: #ifdef DEBUG
1.66      christos 5013: printf("pmap_enk: changing existing va=>pa entry: va %lx, pteproto %x\n",
1.43      pk       5014:        va, pteproto);
                   5015: #endif
1.34      pk       5016:                        /*
                   5017:                         * Switcheroo: changing pa for this va.
                   5018:                         * If old pa was managed, remove from pvlist.
                   5019:                         * If old page was cached, flush cache.
                   5020:                         */
1.60      pk       5021:                        addr = ptoa(tpte & PG_PFNUM);
1.31      pk       5022:                        if (managed(addr))
1.58      pk       5023:                                pv_unlink4_4c(pvhead(addr), pm, va);
1.34      pk       5024:                        if ((tpte & PG_NC) == 0) {
1.71      pk       5025:                                setcontext4(0); /* ??? */
1.69      pk       5026:                                cache_flush_page((int)va);
1.34      pk       5027:                        }
1.1       deraadt  5028:                }
                   5029:        } else {
                   5030:                /* adding new entry */
1.43      pk       5031:                sp->sg_npte++;
1.1       deraadt  5032:        }
                   5033:
                   5034:        /*
                   5035:         * If the new mapping is for a managed PA, enter into pvlist.
                   5036:         * Note that the mapping for a malloc page will always be
                   5037:         * unique (hence will never cause a second call to malloc).
                   5038:         */
                   5039:        if (pv != NULL)
1.58      pk       5040:                pteproto |= pv_link4_4c(pv, pm, va);
1.1       deraadt  5041:
1.43      pk       5042:        if (sp->sg_pmeg == seginval) {
1.1       deraadt  5043:                register int tva;
                   5044:
                   5045:                /*
                   5046:                 * Allocate an MMU entry now (on locked list),
                   5047:                 * and map it into every context.  Set all its
                   5048:                 * PTEs invalid (we will then overwrite one, but
                   5049:                 * this is more efficient than looping twice).
                   5050:                 */
                   5051: #ifdef DEBUG
                   5052:                if (pm->pm_ctx == NULL || pm->pm_ctxnum != 0)
                   5053:                        panic("pmap_enk: kern seg but no kern ctx");
                   5054: #endif
1.43      pk       5055:                sp->sg_pmeg = me_alloc(&segm_locked, pm, vr, vs)->me_cookie;
                   5056:                rp->rg_nsegmap++;
                   5057:
1.69      pk       5058: #if defined(SUN4_MMU3L)
                   5059:                if (HASSUN4_MMU3L)
1.43      pk       5060:                        setsegmap(va, sp->sg_pmeg);
                   5061:                else
                   5062: #endif
                   5063:                {
                   5064:                        i = ncontext - 1;
                   5065:                        do {
1.71      pk       5066:                                setcontext4(i);
1.43      pk       5067:                                setsegmap(va, sp->sg_pmeg);
                   5068:                        } while (--i >= 0);
                   5069:                }
1.1       deraadt  5070:
                   5071:                /* set all PTEs to invalid, then overwrite one PTE below */
                   5072:                tva = VA_ROUNDDOWNTOSEG(va);
                   5073:                i = NPTESG;
                   5074:                do {
1.55      pk       5075:                        setpte4(tva, 0);
1.1       deraadt  5076:                        tva += NBPG;
                   5077:                } while (--i > 0);
                   5078:        }
                   5079:
                   5080:        /* ptes kept in hardware only */
1.55      pk       5081:        setpte4(va, pteproto);
1.1       deraadt  5082:        splx(s);
                   5083: }
                   5084:
                   5085: /* enter new (or change existing) user mapping */
1.53      christos 5086: void
1.55      pk       5087: pmap_enu4_4c(pm, va, prot, wired, pv, pteproto)
1.1       deraadt  5088:        register struct pmap *pm;
                   5089:        vm_offset_t va;
                   5090:        vm_prot_t prot;
                   5091:        int wired;
                   5092:        register struct pvlist *pv;
                   5093:        register int pteproto;
                   5094: {
1.43      pk       5095:        register int vr, vs, *pte, tpte, pmeg, s, doflush;
                   5096:        struct regmap *rp;
                   5097:        struct segmap *sp;
1.1       deraadt  5098:
                   5099:        write_user_windows();           /* XXX conservative */
1.43      pk       5100:        vr = VA_VREG(va);
                   5101:        vs = VA_VSEG(va);
                   5102:        rp = &pm->pm_regmap[vr];
1.1       deraadt  5103:        s = splpmap();                  /* XXX conservative */
                   5104:
                   5105:        /*
                   5106:         * If there is no space in which the PTEs can be written
                   5107:         * while they are not in the hardware, this must be a new
                   5108:         * virtual segment.  Get PTE space and count the segment.
                   5109:         *
                   5110:         * TO SPEED UP CTX ALLOC, PUT SEGMENT BOUNDS STUFF HERE
                   5111:         * AND IN pmap_rmu()
                   5112:         */
1.13      pk       5113:
1.43      pk       5114:        GAP_SHRINK(pm,vr);
1.13      pk       5115:
                   5116: #ifdef DEBUG
                   5117:        if (pm->pm_gap_end < pm->pm_gap_start) {
1.66      christos 5118:                printf("pmap_enu: gap_start %x, gap_end %x",
1.13      pk       5119:                        pm->pm_gap_start, pm->pm_gap_end);
                   5120:                panic("pmap_enu: gap botch");
                   5121:        }
                   5122: #endif
                   5123:
1.43      pk       5124: rretry:
                   5125:        if (rp->rg_segmap == NULL) {
                   5126:                /* definitely a new mapping */
                   5127:                register int i;
                   5128:                register int size = NSEGRG * sizeof (struct segmap);
                   5129:
                   5130:                sp = (struct segmap *)malloc((u_long)size, M_VMPMAP, M_WAITOK);
                   5131:                if (rp->rg_segmap != NULL) {
1.66      christos 5132: printf("pmap_enter: segment filled during sleep\n");   /* can this happen? */
1.49      pk       5133:                        free(sp, M_VMPMAP);
1.43      pk       5134:                        goto rretry;
                   5135:                }
1.55      pk       5136:                qzero((caddr_t)sp, size);
1.43      pk       5137:                rp->rg_segmap = sp;
                   5138:                rp->rg_nsegmap = 0;
                   5139:                for (i = NSEGRG; --i >= 0;)
                   5140:                        sp++->sg_pmeg = seginval;
                   5141:        }
                   5142:
                   5143:        sp = &rp->rg_segmap[vs];
                   5144:
                   5145: sretry:
                   5146:        if ((pte = sp->sg_pte) == NULL) {
1.1       deraadt  5147:                /* definitely a new mapping */
                   5148:                register int size = NPTESG * sizeof *pte;
                   5149:
                   5150:                pte = (int *)malloc((u_long)size, M_VMPMAP, M_WAITOK);
1.43      pk       5151:                if (sp->sg_pte != NULL) {
1.66      christos 5152: printf("pmap_enter: pte filled during sleep\n");       /* can this happen? */
1.49      pk       5153:                        free(pte, M_VMPMAP);
1.43      pk       5154:                        goto sretry;
1.1       deraadt  5155:                }
                   5156: #ifdef DEBUG
1.43      pk       5157:                if (sp->sg_pmeg != seginval)
1.1       deraadt  5158:                        panic("pmap_enter: new ptes, but not seginval");
                   5159: #endif
1.55      pk       5160:                qzero((caddr_t)pte, size);
1.43      pk       5161:                sp->sg_pte = pte;
                   5162:                sp->sg_npte = 1;
                   5163:                rp->rg_nsegmap++;
1.1       deraadt  5164:        } else {
                   5165:                /* might be a change: fetch old pte */
                   5166:                doflush = 0;
1.55      pk       5167:                if ((pmeg = sp->sg_pmeg) == seginval) {
                   5168:                        /* software pte */
                   5169:                        tpte = pte[VA_VPG(va)];
                   5170:                } else {
                   5171:                        /* hardware pte */
                   5172:                        if (CTX_USABLE(pm,rp)) {
1.71      pk       5173:                                setcontext4(pm->pm_ctxnum);
1.55      pk       5174:                                tpte = getpte4(va);
1.69      pk       5175:                                doflush = CACHEINFO.c_vactype != VAC_NONE;
1.55      pk       5176:                        } else {
1.71      pk       5177:                                setcontext4(0);
1.55      pk       5178:                                /* XXX use per-cpu pteva? */
1.69      pk       5179:                                if (HASSUN4_MMU3L)
1.55      pk       5180:                                        setregmap(0, tregion);
                   5181:                                setsegmap(0, pmeg);
                   5182:                                tpte = getpte4(VA_VPG(va) << PGSHIFT);
                   5183:                        }
                   5184:                }
                   5185:                if (tpte & PG_V) {
                   5186:                        register int addr;
                   5187:
                   5188:                        /* old mapping exists, and is of the same pa type */
                   5189:                        if ((tpte & (PG_PFNUM|PG_TYPE)) ==
                   5190:                            (pteproto & (PG_PFNUM|PG_TYPE))) {
                   5191:                                /* just changing prot and/or wiring */
                   5192:                                splx(s);
                   5193:                                /* caller should call this directly: */
1.60      pk       5194:                                pmap_changeprot4_4c(pm, va, prot, wired);
1.55      pk       5195:                                if (wired)
                   5196:                                        pm->pm_stats.wired_count++;
                   5197:                                else
                   5198:                                        pm->pm_stats.wired_count--;
                   5199:                                return;
                   5200:                        }
                   5201:                        /*
                   5202:                         * Switcheroo: changing pa for this va.
                   5203:                         * If old pa was managed, remove from pvlist.
                   5204:                         * If old page was cached, flush cache.
                   5205:                         */
1.65      christos 5206: #if 0
1.66      christos 5207: printf("%s[%d]: pmap_enu: changing existing va(%x)=>pa entry\n",
1.65      christos 5208:        curproc->p_comm, curproc->p_pid, va);
                   5209: #endif
1.55      pk       5210:                        if ((tpte & PG_TYPE) == PG_OBMEM) {
1.60      pk       5211:                                addr = ptoa(tpte & PG_PFNUM);
1.55      pk       5212:                                if (managed(addr))
1.58      pk       5213:                                        pv_unlink4_4c(pvhead(addr), pm, va);
1.69      pk       5214:                                if (doflush && (tpte & PG_NC) == 0)
1.55      pk       5215:                                        cache_flush_page((int)va);
                   5216:                        }
                   5217:                } else {
                   5218:                        /* adding new entry */
                   5219:                        sp->sg_npte++;
                   5220:
                   5221:                        /*
                   5222:                         * Increment counters
                   5223:                         */
                   5224:                        if (wired)
                   5225:                                pm->pm_stats.wired_count++;
                   5226:                }
                   5227:        }
                   5228:
                   5229:        if (pv != NULL)
1.58      pk       5230:                pteproto |= pv_link4_4c(pv, pm, va);
1.55      pk       5231:
                   5232:        /*
                   5233:         * Update hardware & software PTEs.
                   5234:         */
                   5235:        if ((pmeg = sp->sg_pmeg) != seginval) {
1.81      pk       5236:                /* ptes are in hardware */
1.55      pk       5237:                if (CTX_USABLE(pm,rp))
1.71      pk       5238:                        setcontext4(pm->pm_ctxnum);
1.55      pk       5239:                else {
1.71      pk       5240:                        setcontext4(0);
1.55      pk       5241:                        /* XXX use per-cpu pteva? */
1.69      pk       5242:                        if (HASSUN4_MMU3L)
1.55      pk       5243:                                setregmap(0, tregion);
                   5244:                        setsegmap(0, pmeg);
                   5245:                        va = VA_VPG(va) << PGSHIFT;
                   5246:                }
                   5247:                setpte4(va, pteproto);
                   5248:        }
                   5249:        /* update software copy */
                   5250:        pte += VA_VPG(va);
                   5251:        *pte = pteproto;
                   5252:
                   5253:        splx(s);
                   5254: }
                   5255:
                   5256: #endif /*sun4,4c*/
                   5257:
                   5258: #if defined(SUN4M)             /* Sun4M versions of enter routines */
                   5259: /*
                   5260:  * Insert (MI) physical page pa at virtual address va in the given pmap.
                   5261:  * NB: the pa parameter includes type bits PMAP_OBIO, PMAP_NC as necessary.
                   5262:  *
                   5263:  * If pa is not in the `managed' range it will not be `bank mapped'.
                   5264:  * This works during bootstrap only because the first 4MB happens to
                   5265:  * map one-to-one.
                   5266:  *
                   5267:  * There may already be something else there, or we might just be
                   5268:  * changing protections and/or wiring on an existing mapping.
                   5269:  *     XXX     should have different entry points for changing!
                   5270:  */
                   5271:
                   5272: void
                   5273: pmap_enter4m(pm, va, pa, prot, wired)
                   5274:        register struct pmap *pm;
                   5275:        vm_offset_t va, pa;
                   5276:        vm_prot_t prot;
                   5277:        int wired;
                   5278: {
                   5279:        register struct pvlist *pv;
                   5280:        register int pteproto, ctx;
                   5281:
                   5282:        if (pm == NULL)
                   5283:                return;
                   5284:
                   5285: #ifdef DEBUG
                   5286:        if (pmapdebug & PDB_ENTER)
1.66      christos 5287:                printf("pmap_enter(%p, %lx, %lx, %x, %x)\n",
1.55      pk       5288:                    pm, va, pa, prot, wired);
                   5289: #endif
1.60      pk       5290:
                   5291:        /* Initialise pteproto with cache bit */
                   5292:        pteproto = (pa & PMAP_NC) == 0 ? SRMMU_PG_C : 0;
1.55      pk       5293:
1.82      pk       5294: #ifdef DEBUG
                   5295:        if (pa & PMAP_TYPE_SRMMU) {     /* this page goes in an iospace */
1.69      pk       5296:                if (cpuinfo.cpu_type == CPUTYP_MS1)
1.58      pk       5297:                        panic("pmap_enter4m: attempt to use 36-bit iospace on"
                   5298:                              " MicroSPARC");
1.55      pk       5299:        }
1.82      pk       5300: #endif
                   5301:        pteproto |= PMAP_T2PTE_SRMMU(pa);
1.55      pk       5302:
                   5303:        /* Make sure we get a pte with appropriate perms! */
                   5304:        pteproto |= SRMMU_TEPTE | PPROT_RX_RX;
                   5305:
1.82      pk       5306:        pa &= ~PMAP_TNC_SRMMU;
1.55      pk       5307:        /*
                   5308:         * Set up prototype for new PTE.  Cannot set PG_NC from PV_NC yet
                   5309:         * since the pvlist no-cache bit might change as a result of the
                   5310:         * new mapping.
                   5311:         */
                   5312:        if ((pteproto & SRMMU_PGTYPE) == PG_SUN4M_OBMEM && managed(pa)) {
                   5313: #ifdef DIAGNOSTIC
                   5314:                if (!pmap_pa_exists(pa))
                   5315:                        panic("pmap_enter: no such address: %lx", pa);
                   5316: #endif
                   5317:                pv = pvhead(pa);
                   5318:        } else {
                   5319:                pv = NULL;
                   5320:        }
1.60      pk       5321:        pteproto |= (atop(pa) << SRMMU_PPNSHIFT);
1.55      pk       5322:
                   5323:        if (prot & VM_PROT_WRITE)
                   5324:                pteproto |= PPROT_WRITE;
                   5325:
1.71      pk       5326:        ctx = getcontext4m();
1.55      pk       5327:
                   5328:        if (pm == pmap_kernel())
1.58      pk       5329:                pmap_enk4m(pm, va, prot, wired, pv, pteproto | PPROT_S);
1.55      pk       5330:        else
1.58      pk       5331:                pmap_enu4m(pm, va, prot, wired, pv, pteproto);
1.55      pk       5332:
1.71      pk       5333:        setcontext4m(ctx);
1.55      pk       5334: }
                   5335:
                   5336: /* enter new (or change existing) kernel mapping */
                   5337: void
                   5338: pmap_enk4m(pm, va, prot, wired, pv, pteproto)
                   5339:        register struct pmap *pm;
                   5340:        vm_offset_t va;
                   5341:        vm_prot_t prot;
                   5342:        int wired;
                   5343:        register struct pvlist *pv;
                   5344:        register int pteproto;
                   5345: {
                   5346:        register int vr, vs, tpte, s;
                   5347:        struct regmap *rp;
                   5348:        struct segmap *sp;
                   5349:
                   5350: #ifdef DEBUG
                   5351:        if (va < KERNBASE)
1.72      pk       5352:                panic("pmap_enk4m: can't enter va 0x%lx below KERNBASE", va);
1.55      pk       5353: #endif
                   5354:        vr = VA_VREG(va);
                   5355:        vs = VA_VSEG(va);
                   5356:        rp = &pm->pm_regmap[vr];
                   5357:        sp = &rp->rg_segmap[vs];
                   5358:
                   5359:        s = splpmap();          /* XXX way too conservative */
                   5360:
                   5361:        if (rp->rg_seg_ptps == NULL) /* enter new region */
                   5362:                panic("pmap_enk4m: missing kernel region table for va %lx",va);
                   5363:
1.72      pk       5364:        tpte = sp->sg_pte[VA_SUN4M_VPG(va)];
                   5365:        if ((tpte & SRMMU_TETYPE) == SRMMU_TEPTE) {
1.55      pk       5366:                register int addr;
                   5367:
                   5368:                /* old mapping exists, and is of the same pa type */
                   5369:
                   5370:                if ((tpte & SRMMU_PPNMASK) == (pteproto & SRMMU_PPNMASK)) {
                   5371:                        /* just changing protection and/or wiring */
                   5372:                        splx(s);
1.81      pk       5373:                        pmap_changeprot4m(pm, va, prot, wired);
1.55      pk       5374:                        return;
                   5375:                }
                   5376:
                   5377:                if ((tpte & SRMMU_PGTYPE) == PG_SUN4M_OBMEM) {
                   5378: #ifdef DEBUG
1.66      christos 5379: printf("pmap_enk4m: changing existing va=>pa entry: va %lx, pteproto %x, "
1.55      pk       5380:        "oldpte %x\n", va, pteproto, tpte);
                   5381: #endif
                   5382:                        /*
                   5383:                         * Switcheroo: changing pa for this va.
                   5384:                         * If old pa was managed, remove from pvlist.
                   5385:                         * If old page was cached, flush cache.
                   5386:                         */
1.60      pk       5387:                        addr = ptoa((tpte & SRMMU_PPNMASK) >> SRMMU_PPNSHIFT);
1.55      pk       5388:                        if (managed(addr))
1.58      pk       5389:                                pv_unlink4m(pvhead(addr), pm, va);
1.55      pk       5390:                        if (tpte & SRMMU_PG_C) {
1.71      pk       5391:                                setcontext4m(0);        /* ??? */
1.69      pk       5392:                                cache_flush_page((int)va);
1.55      pk       5393:                        }
                   5394:                }
                   5395:        } else {
                   5396:                /* adding new entry */
                   5397:                sp->sg_npte++;
                   5398:        }
                   5399:
                   5400:        /*
                   5401:         * If the new mapping is for a managed PA, enter into pvlist.
                   5402:         * Note that the mapping for a malloc page will always be
                   5403:         * unique (hence will never cause a second call to malloc).
                   5404:         */
                   5405:        if (pv != NULL)
                   5406:                pteproto &= ~(pv_link4m(pv, pm, va));
                   5407:
1.72      pk       5408: #ifdef DEBUG
1.55      pk       5409:        if (sp->sg_pte == NULL) /* If no existing pagetable */
1.60      pk       5410:                panic("pmap_enk4m: missing segment table for va 0x%lx",va);
1.72      pk       5411: #endif
1.55      pk       5412:
1.72      pk       5413:        tlb_flush_page(va);
                   5414:        setpgt4m(&sp->sg_pte[VA_SUN4M_VPG(va)], pteproto);
1.55      pk       5415:
                   5416:        splx(s);
                   5417: }
                   5418:
                   5419: /* enter new (or change existing) user mapping */
                   5420: void
                   5421: pmap_enu4m(pm, va, prot, wired, pv, pteproto)
                   5422:        register struct pmap *pm;
                   5423:        vm_offset_t va;
                   5424:        vm_prot_t prot;
                   5425:        int wired;
                   5426:        register struct pvlist *pv;
                   5427:        register int pteproto;
                   5428: {
1.72      pk       5429:        register int vr, vs, *pte, tpte, s;
1.55      pk       5430:        struct regmap *rp;
                   5431:        struct segmap *sp;
                   5432:
1.72      pk       5433: #ifdef DEBUG
                   5434:        if (KERNBASE < va)
                   5435:                panic("pmap_enu4m: can't enter va 0x%lx above KERNBASE", va);
                   5436: #endif
                   5437:
1.55      pk       5438:        write_user_windows();           /* XXX conservative */
                   5439:        vr = VA_VREG(va);
                   5440:        vs = VA_VSEG(va);
                   5441:        rp = &pm->pm_regmap[vr];
                   5442:        s = splpmap();                  /* XXX conservative */
                   5443:
                   5444: rretry:
                   5445:        if (rp->rg_segmap == NULL) {
                   5446:                /* definitely a new mapping */
                   5447:                register int size = NSEGRG * sizeof (struct segmap);
                   5448:
                   5449:                sp = (struct segmap *)malloc((u_long)size, M_VMPMAP, M_WAITOK);
                   5450:                if (rp->rg_segmap != NULL) {
                   5451: #ifdef DEBUG
1.66      christos 5452: printf("pmap_enu4m: segment filled during sleep\n");   /* can this happen? */
1.55      pk       5453: #endif
                   5454:                        free(sp, M_VMPMAP);
                   5455:                        goto rretry;
                   5456:                }
                   5457:                qzero((caddr_t)sp, size);
                   5458:                rp->rg_segmap = sp;
                   5459:                rp->rg_nsegmap = 0;
                   5460:                rp->rg_seg_ptps = NULL;
                   5461:        }
                   5462: rgretry:
                   5463:        if (rp->rg_seg_ptps == NULL) {
                   5464:                /* Need a segment table */
1.73      pk       5465:                int size, i, *ptd;
                   5466:
1.55      pk       5467:                size = SRMMU_L2SIZE * sizeof(long);
1.73      pk       5468:                ptd = (int *)malloc(size, M_VMPMAP, M_WAITOK);
1.55      pk       5469:                if (rp->rg_seg_ptps != NULL) {
                   5470: #ifdef DEBUG
1.66      christos 5471: printf("pmap_enu4m: bizarre segment table fill during sleep\n");
1.55      pk       5472: #endif
1.73      pk       5473:                        free(ptd, M_VMPMAP);
1.55      pk       5474:                        goto rgretry;
                   5475:                }
1.72      pk       5476: #if 0
1.69      pk       5477:                if ((cpuinfo.flags & CPUFLG_CACHEPAGETABLES) == 0)
1.73      pk       5478:                        kvm_uncache((char *)ptd, (size+NBPG-1)/NBPG);
1.72      pk       5479: #endif
1.55      pk       5480:
1.73      pk       5481:                rp->rg_seg_ptps = ptd;
                   5482:                for (i = 0; i < SRMMU_L2SIZE; i++)
1.74      pk       5483:                        setpgt4m(&ptd[i], SRMMU_TEINVALID);
1.72      pk       5484:                setpgt4m(&pm->pm_reg_ptps[vr],
1.73      pk       5485:                         (VA2PA((caddr_t)ptd) >> SRMMU_PPNPASHIFT) | SRMMU_TEPTD);
1.55      pk       5486:        }
                   5487:
                   5488:        sp = &rp->rg_segmap[vs];
                   5489:
                   5490: sretry:
                   5491:        if ((pte = sp->sg_pte) == NULL) {
                   5492:                /* definitely a new mapping */
1.73      pk       5493:                int i, size = SRMMU_L3SIZE * sizeof(*pte);
1.55      pk       5494:
                   5495:                pte = (int *)malloc((u_long)size, M_VMPMAP, M_WAITOK);
                   5496:                if (sp->sg_pte != NULL) {
1.66      christos 5497: printf("pmap_enter: pte filled during sleep\n");       /* can this happen? */
1.55      pk       5498:                        free(pte, M_VMPMAP);
                   5499:                        goto sretry;
                   5500:                }
1.72      pk       5501: #if 0
1.69      pk       5502:                if ((cpuinfo.flags & CPUFLG_CACHEPAGETABLES) == 0)
1.57      abrown   5503:                        kvm_uncache((caddr_t)pte, (size+NBPG-1)/NBPG);
1.72      pk       5504: #endif
1.55      pk       5505:
                   5506:                sp->sg_pte = pte;
                   5507:                sp->sg_npte = 1;
                   5508:                rp->rg_nsegmap++;
1.74      pk       5509:                for (i = 0; i < SRMMU_L3SIZE; i++)
                   5510:                        setpgt4m(&pte[i], SRMMU_TEINVALID);
1.72      pk       5511:                setpgt4m(&rp->rg_seg_ptps[vs],
                   5512:                        (VA2PA((caddr_t)pte) >> SRMMU_PPNPASHIFT) | SRMMU_TEPTD);
1.55      pk       5513:        } else {
1.72      pk       5514:                /*
                   5515:                 * Might be a change: fetch old pte
                   5516:                 * Note we're only interested in the PTE's page frame
                   5517:                 * number and type bits, so the memory copy will do.
                   5518:                 */
                   5519:                tpte = pte[VA_SUN4M_VPG(va)];
1.55      pk       5520:
                   5521:                if ((tpte & SRMMU_TETYPE) == SRMMU_TEPTE) {
1.34      pk       5522:                        register int addr;
1.1       deraadt  5523:
1.34      pk       5524:                        /* old mapping exists, and is of the same pa type */
1.55      pk       5525:                        if ((tpte & SRMMU_PPNMASK) ==
                   5526:                            (pteproto & SRMMU_PPNMASK)) {
1.1       deraadt  5527:                                /* just changing prot and/or wiring */
                   5528:                                splx(s);
                   5529:                                /* caller should call this directly: */
1.60      pk       5530:                                pmap_changeprot4m(pm, va, prot, wired);
1.15      deraadt  5531:                                if (wired)
                   5532:                                        pm->pm_stats.wired_count++;
                   5533:                                else
                   5534:                                        pm->pm_stats.wired_count--;
1.1       deraadt  5535:                                return;
                   5536:                        }
                   5537:                        /*
                   5538:                         * Switcheroo: changing pa for this va.
                   5539:                         * If old pa was managed, remove from pvlist.
                   5540:                         * If old page was cached, flush cache.
                   5541:                         */
1.60      pk       5542: #ifdef DEBUG
1.72      pk       5543: if (pmapdebug & PDB_SWITCHMAP)
1.66      christos 5544: printf("%s[%d]: pmap_enu: changing existing va(%x)=>pa(pte=%x) entry\n",
1.72      pk       5545:        curproc->p_comm, curproc->p_pid, (int)va, (int)pte);
1.60      pk       5546: #endif
1.55      pk       5547:                        if ((tpte & SRMMU_PGTYPE) == PG_SUN4M_OBMEM) {
1.60      pk       5548:                                addr = ptoa( (tpte & SRMMU_PPNMASK) >>
                   5549:                                             SRMMU_PPNSHIFT);
1.31      pk       5550:                                if (managed(addr))
1.58      pk       5551:                                        pv_unlink4m(pvhead(addr), pm, va);
1.72      pk       5552:                                if (pm->pm_ctx && (tpte & SRMMU_PG_C))
1.34      pk       5553:                                        cache_flush_page((int)va);
1.31      pk       5554:                        }
1.1       deraadt  5555:                } else {
                   5556:                        /* adding new entry */
1.43      pk       5557:                        sp->sg_npte++;
1.15      deraadt  5558:
                   5559:                        /*
                   5560:                         * Increment counters
                   5561:                         */
                   5562:                        if (wired)
                   5563:                                pm->pm_stats.wired_count++;
1.1       deraadt  5564:                }
                   5565:        }
                   5566:        if (pv != NULL)
1.55      pk       5567:                pteproto &= ~(pv_link4m(pv, pm, va));
1.1       deraadt  5568:
                   5569:        /*
1.72      pk       5570:         * Update PTEs, flush TLB as necessary.
1.1       deraadt  5571:         */
1.72      pk       5572:        if (pm->pm_ctx) {
1.71      pk       5573:                setcontext4m(pm->pm_ctxnum);
1.72      pk       5574:                tlb_flush_page(va);
                   5575:        }
                   5576:        setpgt4m(&sp->sg_pte[VA_SUN4M_VPG(va)], pteproto);
1.1       deraadt  5577:
                   5578:        splx(s);
                   5579: }
1.55      pk       5580: #endif /* sun4m */
1.1       deraadt  5581:
                   5582: /*
                   5583:  * Change the wiring attribute for a map/virtual-address pair.
                   5584:  */
                   5585: /* ARGSUSED */
                   5586: void
                   5587: pmap_change_wiring(pm, va, wired)
                   5588:        struct pmap *pm;
                   5589:        vm_offset_t va;
                   5590:        int wired;
                   5591: {
                   5592:
                   5593:        pmap_stats.ps_useless_changewire++;
                   5594: }
                   5595:
                   5596: /*
                   5597:  * Extract the physical page address associated
                   5598:  * with the given map/virtual_address pair.
                   5599:  * GRR, the vm code knows; we should not have to do this!
                   5600:  */
1.55      pk       5601:
                   5602: #if defined(SUN4) || defined(SUN4C)
1.1       deraadt  5603: vm_offset_t
1.55      pk       5604: pmap_extract4_4c(pm, va)
1.1       deraadt  5605:        register struct pmap *pm;
                   5606:        vm_offset_t va;
                   5607: {
                   5608:        register int tpte;
1.43      pk       5609:        register int vr, vs;
                   5610:        struct regmap *rp;
                   5611:        struct segmap *sp;
1.1       deraadt  5612:
                   5613:        if (pm == NULL) {
1.66      christos 5614:                printf("pmap_extract: null pmap\n");
1.1       deraadt  5615:                return (0);
                   5616:        }
1.43      pk       5617:        vr = VA_VREG(va);
                   5618:        vs = VA_VSEG(va);
                   5619:        rp = &pm->pm_regmap[vr];
                   5620:        if (rp->rg_segmap == NULL) {
1.66      christos 5621:                printf("pmap_extract: invalid segment (%d)\n", vr);
1.43      pk       5622:                return (0);
                   5623:        }
                   5624:        sp = &rp->rg_segmap[vs];
                   5625:
                   5626:        if (sp->sg_pmeg != seginval) {
1.71      pk       5627:                register int ctx = getcontext4();
1.1       deraadt  5628:
1.43      pk       5629:                if (CTX_USABLE(pm,rp)) {
1.61      pk       5630:                        CHANGE_CONTEXTS(ctx, pm->pm_ctxnum);
1.55      pk       5631:                        tpte = getpte4(va);
1.1       deraadt  5632:                } else {
1.61      pk       5633:                        CHANGE_CONTEXTS(ctx, 0);
1.69      pk       5634:                        if (HASSUN4_MMU3L)
1.43      pk       5635:                                setregmap(0, tregion);
                   5636:                        setsegmap(0, sp->sg_pmeg);
1.55      pk       5637:                        tpte = getpte4(VA_VPG(va) << PGSHIFT);
1.1       deraadt  5638:                }
1.71      pk       5639:                setcontext4(ctx);
1.1       deraadt  5640:        } else {
1.43      pk       5641:                register int *pte = sp->sg_pte;
1.1       deraadt  5642:
                   5643:                if (pte == NULL) {
1.66      christos 5644:                        printf("pmap_extract: invalid segment\n");
1.1       deraadt  5645:                        return (0);
                   5646:                }
                   5647:                tpte = pte[VA_VPG(va)];
                   5648:        }
                   5649:        if ((tpte & PG_V) == 0) {
1.66      christos 5650:                printf("pmap_extract: invalid pte\n");
1.1       deraadt  5651:                return (0);
                   5652:        }
                   5653:        tpte &= PG_PFNUM;
1.60      pk       5654:        tpte = tpte;
1.1       deraadt  5655:        return ((tpte << PGSHIFT) | (va & PGOFSET));
                   5656: }
1.55      pk       5657: #endif /*4,4c*/
                   5658:
                   5659: #if defined(SUN4M)             /* 4m version of pmap_extract */
                   5660: /*
                   5661:  * Extract the physical page address associated
                   5662:  * with the given map/virtual_address pair.
                   5663:  * GRR, the vm code knows; we should not have to do this!
                   5664:  */
                   5665: vm_offset_t
                   5666: pmap_extract4m(pm, va)
                   5667:        register struct pmap *pm;
                   5668:        vm_offset_t va;
                   5669: {
1.79      pk       5670:        register int pte;
1.55      pk       5671:
                   5672:        if (pm == NULL) {
1.66      christos 5673:                printf("pmap_extract: null pmap\n");
1.55      pk       5674:                return (0);
                   5675:        }
                   5676:
1.79      pk       5677:        pte = getptesw4m(pm, va);
1.55      pk       5678:
1.72      pk       5679: #ifdef DEBUG
1.79      pk       5680:        if ((pte & SRMMU_TETYPE) != SRMMU_TEPTE) {
1.72      pk       5681:                printf("pmap_extract: invalid pte of type %d\n",
1.79      pk       5682:                       pte & SRMMU_TETYPE);
1.72      pk       5683:                return (0);
                   5684:        }
                   5685: #endif
1.55      pk       5686:
1.79      pk       5687:        return (ptoa((pte & SRMMU_PPNMASK) >> SRMMU_PPNSHIFT) | VA_OFF(va));
1.55      pk       5688: }
                   5689: #endif /* sun4m */
1.1       deraadt  5690:
                   5691: /*
                   5692:  * Copy the range specified by src_addr/len
                   5693:  * from the source map to the range dst_addr/len
                   5694:  * in the destination map.
                   5695:  *
                   5696:  * This routine is only advisory and need not do anything.
                   5697:  */
                   5698: /* ARGSUSED */
                   5699: void
                   5700: pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr)
                   5701:        struct pmap *dst_pmap, *src_pmap;
                   5702:        vm_offset_t dst_addr;
                   5703:        vm_size_t len;
                   5704:        vm_offset_t src_addr;
                   5705: {
1.55      pk       5706: #if 0
                   5707:        if (CPU_ISSUN4M) {
                   5708:                register int i, pte;
                   5709:                for (i = 0; i < len/NBPG; i++) {
                   5710:                        pte = getptesw4m(src_pmap, src_addr);
                   5711:                        pmap_enter(dst_pmap, dst_addr,
1.60      pk       5712:                                   ptoa((pte & SRMMU_PPNMASK) >>
                   5713:                                        SRMMU_PPNSHIFT) |
                   5714:                                    VA_OFF(src_addr),
                   5715:                                   (pte & PPROT_WRITE)
                   5716:                                        ? VM_PROT_WRITE| VM_PROT_READ
                   5717:                                        : VM_PROT_READ,
1.55      pk       5718:                                   0);
                   5719:                        src_addr += NBPG;
                   5720:                        dst_addr += NBPG;
                   5721:                }
                   5722:        }
                   5723: #endif
1.1       deraadt  5724: }
                   5725:
                   5726: /*
                   5727:  * Require that all active physical maps contain no
                   5728:  * incorrect entries NOW.  [This update includes
                   5729:  * forcing updates of any address map caching.]
                   5730:  */
                   5731: void
                   5732: pmap_update()
                   5733: {
1.55      pk       5734: #if defined(SUN4M)
                   5735:        if (CPU_ISSUN4M)
                   5736:                tlb_flush_all();        /* %%%: Extreme Paranoia?  */
                   5737: #endif
1.1       deraadt  5738: }
                   5739:
                   5740: /*
                   5741:  * Garbage collects the physical map system for
                   5742:  * pages which are no longer used.
                   5743:  * Success need not be guaranteed -- that is, there
                   5744:  * may well be pages which are not referenced, but
                   5745:  * others may be collected.
                   5746:  * Called by the pageout daemon when pages are scarce.
                   5747:  */
                   5748: /* ARGSUSED */
                   5749: void
                   5750: pmap_collect(pm)
                   5751:        struct pmap *pm;
                   5752: {
                   5753: }
                   5754:
1.55      pk       5755: #if defined(SUN4) || defined(SUN4C)
                   5756:
1.1       deraadt  5757: /*
                   5758:  * Clear the modify bit for the given physical page.
                   5759:  */
                   5760: void
1.55      pk       5761: pmap_clear_modify4_4c(pa)
1.1       deraadt  5762:        register vm_offset_t pa;
                   5763: {
                   5764:        register struct pvlist *pv;
                   5765:
1.82      pk       5766:        if ((pa & (PMAP_TNC_4 & ~PMAP_NC)) == 0 && managed(pa)) {
1.1       deraadt  5767:                pv = pvhead(pa);
1.58      pk       5768:                (void) pv_syncflags4_4c(pv);
1.1       deraadt  5769:                pv->pv_flags &= ~PV_MOD;
                   5770:        }
                   5771: }
                   5772:
                   5773: /*
                   5774:  * Tell whether the given physical page has been modified.
                   5775:  */
                   5776: int
1.55      pk       5777: pmap_is_modified4_4c(pa)
1.1       deraadt  5778:        register vm_offset_t pa;
                   5779: {
                   5780:        register struct pvlist *pv;
                   5781:
1.82      pk       5782:        if ((pa & (PMAP_TNC_4 & ~PMAP_NC)) == 0 && managed(pa)) {
1.1       deraadt  5783:                pv = pvhead(pa);
1.58      pk       5784:                if (pv->pv_flags & PV_MOD || pv_syncflags4_4c(pv) & PV_MOD)
1.1       deraadt  5785:                        return (1);
                   5786:        }
                   5787:        return (0);
                   5788: }
                   5789:
                   5790: /*
                   5791:  * Clear the reference bit for the given physical page.
                   5792:  */
                   5793: void
1.55      pk       5794: pmap_clear_reference4_4c(pa)
1.1       deraadt  5795:        vm_offset_t pa;
                   5796: {
                   5797:        register struct pvlist *pv;
                   5798:
1.82      pk       5799:        if ((pa & (PMAP_TNC_4 & ~PMAP_NC)) == 0 && managed(pa)) {
1.1       deraadt  5800:                pv = pvhead(pa);
1.58      pk       5801:                (void) pv_syncflags4_4c(pv);
1.1       deraadt  5802:                pv->pv_flags &= ~PV_REF;
                   5803:        }
                   5804: }
                   5805:
                   5806: /*
                   5807:  * Tell whether the given physical page has been referenced.
                   5808:  */
                   5809: int
1.55      pk       5810: pmap_is_referenced4_4c(pa)
1.1       deraadt  5811:        vm_offset_t pa;
                   5812: {
                   5813:        register struct pvlist *pv;
                   5814:
1.82      pk       5815:        if ((pa & (PMAP_TNC_4 & ~PMAP_NC)) == 0 && managed(pa)) {
1.1       deraadt  5816:                pv = pvhead(pa);
1.58      pk       5817:                if (pv->pv_flags & PV_REF || pv_syncflags4_4c(pv) & PV_REF)
1.1       deraadt  5818:                        return (1);
                   5819:        }
                   5820:        return (0);
                   5821: }
1.55      pk       5822: #endif /*4,4c*/
                   5823:
1.58      pk       5824: #if defined(SUN4M)
                   5825:
                   5826: /*
                   5827:  * 4m versions of bit test/set routines
                   5828:  *
                   5829:  * Note that the 4m-specific routines should eventually service these
                   5830:  * requests from their page tables, and the whole pvlist bit mess should
                   5831:  * be dropped for the 4m (unless this causes a performance hit from
                   5832:  * tracing down pagetables/regmap/segmaps).
                   5833:  */
                   5834:
1.55      pk       5835: /*
                   5836:  * Clear the modify bit for the given physical page.
                   5837:  */
                   5838: void
                   5839: pmap_clear_modify4m(pa)           /* XXX %%%: Should service from swpagetbl for 4m */
                   5840:        register vm_offset_t pa;
                   5841: {
                   5842:        register struct pvlist *pv;
                   5843:
1.82      pk       5844:        if ((pa & (PMAP_TNC_SRMMU & ~PMAP_NC)) == 0 && managed(pa)) {
1.55      pk       5845:                pv = pvhead(pa);
1.58      pk       5846:                (void) pv_syncflags4m(pv);
1.55      pk       5847:                pv->pv_flags &= ~PV_MOD4M;
                   5848:        }
                   5849: }
                   5850:
                   5851: /*
                   5852:  * Tell whether the given physical page has been modified.
                   5853:  */
                   5854: int
                   5855: pmap_is_modified4m(pa) /* Test performance with SUN4M && SUN4/4C. XXX */
                   5856:        register vm_offset_t pa;
                   5857: {
                   5858:        register struct pvlist *pv;
                   5859:
1.82      pk       5860:        if ((pa & (PMAP_TNC_SRMMU & ~PMAP_NC)) == 0 && managed(pa)) {
1.55      pk       5861:                pv = pvhead(pa);
                   5862:                if (pv->pv_flags & PV_MOD4M || pv_syncflags4m(pv) & PV_MOD4M)
                   5863:                        return(1);
                   5864:        }
                   5865:        return (0);
                   5866: }
                   5867:
                   5868: /*
                   5869:  * Clear the reference bit for the given physical page.
                   5870:  */
                   5871: void
                   5872: pmap_clear_reference4m(pa)
                   5873:        vm_offset_t pa;
                   5874: {
                   5875:        register struct pvlist *pv;
                   5876:
1.82      pk       5877:        if ((pa & (PMAP_TNC_SRMMU & ~PMAP_NC)) == 0 && managed(pa)) {
1.55      pk       5878:                pv = pvhead(pa);
1.58      pk       5879:                (void) pv_syncflags4m(pv);
1.55      pk       5880:                pv->pv_flags &= ~PV_REF4M;
                   5881:        }
                   5882: }
                   5883:
                   5884: /*
                   5885:  * Tell whether the given physical page has been referenced.
                   5886:  */
                   5887: int
                   5888: pmap_is_referenced4m(pa)
                   5889:        vm_offset_t pa;
                   5890: {
                   5891:        register struct pvlist *pv;
                   5892:
1.82      pk       5893:        if ((pa & (PMAP_TNC_SRMMU & ~PMAP_NC)) == 0 && managed(pa)) {
1.55      pk       5894:                pv = pvhead(pa);
                   5895:                if (pv->pv_flags & PV_REF4M || pv_syncflags4m(pv) & PV_REF4M)
                   5896:                        return(1);
                   5897:        }
                   5898:        return (0);
                   5899: }
                   5900: #endif /* 4m */
1.1       deraadt  5901:
                   5902: /*
                   5903:  * Make the specified pages (by pmap, offset) pageable (or not) as requested.
                   5904:  *
                   5905:  * A page which is not pageable may not take a fault; therefore, its page
                   5906:  * table entry must remain valid for the duration (or at least, the trap
                   5907:  * handler must not call vm_fault).
                   5908:  *
                   5909:  * This routine is merely advisory; pmap_enter will specify that these pages
                   5910:  * are to be wired down (or not) as appropriate.
                   5911:  */
                   5912: /* ARGSUSED */
                   5913: void
                   5914: pmap_pageable(pm, start, end, pageable)
                   5915:        struct pmap *pm;
                   5916:        vm_offset_t start, end;
                   5917:        int pageable;
                   5918: {
1.2       deraadt  5919: }
                   5920:
                   5921: /*
1.1       deraadt  5922:  * Fill the given MI physical page with zero bytes.
                   5923:  *
                   5924:  * We avoid stomping on the cache.
                   5925:  * XXX might be faster to use destination's context and allow cache to fill?
                   5926:  */
1.55      pk       5927:
                   5928: #if defined(SUN4) || defined(SUN4C)
                   5929:
1.1       deraadt  5930: void
1.55      pk       5931: pmap_zero_page4_4c(pa)
1.1       deraadt  5932:        register vm_offset_t pa;
                   5933: {
                   5934:        register caddr_t va;
                   5935:        register int pte;
                   5936:
1.82      pk       5937:        if (((pa & (PMAP_TNC_4 & ~PMAP_NC)) == 0) && managed(pa)) {
1.1       deraadt  5938:                /*
                   5939:                 * The following might not be necessary since the page
                   5940:                 * is being cleared because it is about to be allocated,
                   5941:                 * i.e., is in use by no one.
                   5942:                 */
1.69      pk       5943:                pv_flushcache(pvhead(pa));
1.60      pk       5944:        }
                   5945:        pte = PG_V | PG_S | PG_W | PG_NC | (atop(pa) & PG_PFNUM);
1.1       deraadt  5946:
                   5947:        va = vpage[0];
1.55      pk       5948:        setpte4(va, pte);
1.1       deraadt  5949:        qzero(va, NBPG);
1.55      pk       5950:        setpte4(va, 0);
1.1       deraadt  5951: }
                   5952:
                   5953: /*
                   5954:  * Copy the given MI physical source page to its destination.
                   5955:  *
                   5956:  * We avoid stomping on the cache as above (with same `XXX' note).
                   5957:  * We must first flush any write-back cache for the source page.
                   5958:  * We go ahead and stomp on the kernel's virtual cache for the
                   5959:  * source page, since the cache can read memory MUCH faster than
                   5960:  * the processor.
                   5961:  */
                   5962: void
1.55      pk       5963: pmap_copy_page4_4c(src, dst)
1.1       deraadt  5964:        vm_offset_t src, dst;
                   5965: {
                   5966:        register caddr_t sva, dva;
                   5967:        register int spte, dpte;
                   5968:
                   5969:        if (managed(src)) {
1.69      pk       5970:                if (CACHEINFO.c_vactype == VAC_WRITEBACK)
1.1       deraadt  5971:                        pv_flushcache(pvhead(src));
1.60      pk       5972:        }
                   5973:        spte = PG_V | PG_S | (atop(src) & PG_PFNUM);
1.1       deraadt  5974:
                   5975:        if (managed(dst)) {
                   5976:                /* similar `might not be necessary' comment applies */
1.69      pk       5977:                if (CACHEINFO.c_vactype != VAC_NONE)
1.1       deraadt  5978:                        pv_flushcache(pvhead(dst));
1.60      pk       5979:        }
                   5980:        dpte = PG_V | PG_S | PG_W | PG_NC | (atop(dst) & PG_PFNUM);
1.1       deraadt  5981:
                   5982:        sva = vpage[0];
                   5983:        dva = vpage[1];
1.55      pk       5984:        setpte4(sva, spte);
                   5985:        setpte4(dva, dpte);
1.1       deraadt  5986:        qcopy(sva, dva, NBPG);  /* loads cache, so we must ... */
1.69      pk       5987:        cache_flush_page((int)sva);
1.55      pk       5988:        setpte4(sva, 0);
                   5989:        setpte4(dva, 0);
                   5990: }
                   5991: #endif /* 4, 4c */
                   5992:
                   5993: #if defined(SUN4M)             /* Sun4M version of copy/zero routines */
                   5994: /*
                   5995:  * Fill the given MI physical page with zero bytes.
                   5996:  *
                   5997:  * We avoid stomping on the cache.
                   5998:  * XXX might be faster to use destination's context and allow cache to fill?
                   5999:  */
                   6000: void
                   6001: pmap_zero_page4m(pa)
                   6002:        register vm_offset_t pa;
                   6003: {
                   6004:        register caddr_t va;
                   6005:        register int pte;
1.79      pk       6006:        int ctx;
1.55      pk       6007:
1.82      pk       6008:        if (((pa & (PMAP_TNC_SRMMU & ~PMAP_NC)) == 0) && managed(pa)) {
1.55      pk       6009:                /*
                   6010:                 * The following might not be necessary since the page
                   6011:                 * is being cleared because it is about to be allocated,
                   6012:                 * i.e., is in use by no one.
                   6013:                 */
1.69      pk       6014:                if (CACHEINFO.c_vactype != VAC_NONE)
1.55      pk       6015:                        pv_flushcache(pvhead(pa));
1.60      pk       6016:        }
1.68      abrown   6017:        pte = (SRMMU_TEPTE | PPROT_S | PPROT_WRITE |
                   6018:               (atop(pa) << SRMMU_PPNSHIFT));
1.69      pk       6019:        if (cpuinfo.flags & CPUFLG_CACHE_MANDATORY)
1.68      abrown   6020:                pte |= SRMMU_PG_C;
                   6021:        else
                   6022:                pte &= ~SRMMU_PG_C;
                   6023:
1.79      pk       6024:        /* XXX - must use context 0 or else setpte4m() will fail */
                   6025:        ctx = getcontext4m();
                   6026:        setcontext4m(0);
1.55      pk       6027:        va = vpage[0];
                   6028:        setpte4m((vm_offset_t) va, pte);
                   6029:        qzero(va, NBPG);
                   6030:        setpte4m((vm_offset_t) va, SRMMU_TEINVALID);
1.79      pk       6031:        setcontext4m(ctx);
1.55      pk       6032: }
                   6033:
                   6034: /*
                   6035:  * Copy the given MI physical source page to its destination.
                   6036:  *
                   6037:  * We avoid stomping on the cache as above (with same `XXX' note).
                   6038:  * We must first flush any write-back cache for the source page.
                   6039:  * We go ahead and stomp on the kernel's virtual cache for the
                   6040:  * source page, since the cache can read memory MUCH faster than
                   6041:  * the processor.
                   6042:  */
                   6043: void
                   6044: pmap_copy_page4m(src, dst)
                   6045:        vm_offset_t src, dst;
                   6046: {
                   6047:        register caddr_t sva, dva;
                   6048:        register int spte, dpte;
1.79      pk       6049:        int ctx;
1.55      pk       6050:
                   6051:        if (managed(src)) {
1.69      pk       6052:                if (CACHEINFO.c_vactype == VAC_WRITEBACK)
1.55      pk       6053:                        pv_flushcache(pvhead(src));
1.60      pk       6054:        }
                   6055:        spte = SRMMU_TEPTE | SRMMU_PG_C | PPROT_S |
                   6056:                (atop(src) << SRMMU_PPNSHIFT);
1.55      pk       6057:
                   6058:        if (managed(dst)) {
                   6059:                /* similar `might not be necessary' comment applies */
1.69      pk       6060:                if (CACHEINFO.c_vactype != VAC_NONE)
1.55      pk       6061:                        pv_flushcache(pvhead(dst));
1.60      pk       6062:        }
1.68      abrown   6063:        dpte = (SRMMU_TEPTE | PPROT_S | PPROT_WRITE |
                   6064:               (atop(dst) << SRMMU_PPNSHIFT));
1.69      pk       6065:        if (cpuinfo.flags & CPUFLG_CACHE_MANDATORY)
1.68      abrown   6066:                dpte |= SRMMU_PG_C;
                   6067:        else
                   6068:                dpte &= ~SRMMU_PG_C;
1.60      pk       6069:
1.79      pk       6070:        /* XXX - must use context 0 or else setpte4m() will fail */
                   6071:        ctx = getcontext4m();
                   6072:        setcontext4m(0);
1.55      pk       6073:        sva = vpage[0];
                   6074:        dva = vpage[1];
                   6075:        setpte4m((vm_offset_t) sva, spte);
                   6076:        setpte4m((vm_offset_t) dva, dpte);
                   6077:        qcopy(sva, dva, NBPG);  /* loads cache, so we must ... */
1.69      pk       6078:        cache_flush_page((int)sva);
1.55      pk       6079:        setpte4m((vm_offset_t) sva, SRMMU_TEINVALID);
                   6080:        setpte4m((vm_offset_t) dva, SRMMU_TEINVALID);
1.79      pk       6081:        setcontext4m(ctx);
1.1       deraadt  6082: }
1.55      pk       6083: #endif /* Sun4M */
1.1       deraadt  6084:
                   6085: /*
                   6086:  * Turn a cdevsw d_mmap value into a byte address for pmap_enter.
                   6087:  * XXX this should almost certainly be done differently, and
                   6088:  *     elsewhere, or even not at all
                   6089:  */
                   6090: vm_offset_t
                   6091: pmap_phys_address(x)
                   6092:        int x;
                   6093: {
                   6094:
                   6095:        return (x);
                   6096: }
                   6097:
                   6098: /*
                   6099:  * Turn off cache for a given (va, number of pages).
                   6100:  *
                   6101:  * We just assert PG_NC for each PTE; the addresses must reside
                   6102:  * in locked kernel space.  A cache flush is also done.
                   6103:  */
1.53      christos 6104: void
1.1       deraadt  6105: kvm_uncache(va, npages)
                   6106:        register caddr_t va;
                   6107:        register int npages;
                   6108: {
                   6109:        register int pte;
1.55      pk       6110:        if (CPU_ISSUN4M) {
                   6111: #if defined(SUN4M)
                   6112:                for (; --npages >= 0; va += NBPG) {
                   6113:                        pte = getpte4m((vm_offset_t) va);
                   6114:                        if ((pte & SRMMU_TETYPE) != SRMMU_TEPTE)
                   6115:                                panic("kvm_uncache: table entry not pte");
                   6116:                        pte &= ~SRMMU_PG_C;
                   6117:                        setpte4m((vm_offset_t) va, pte);
1.69      pk       6118:                        if ((pte & PG_TYPE) == PG_OBMEM)
1.55      pk       6119:                                cache_flush_page((int)va);
                   6120:                }
                   6121: #endif
                   6122:        } else {
                   6123: #if defined(SUN4) || defined(SUN4C)
                   6124:                for (; --npages >= 0; va += NBPG) {
                   6125:                        pte = getpte4(va);
                   6126:                        if ((pte & PG_V) == 0)
                   6127:                                panic("kvm_uncache !pg_v");
                   6128:                        pte |= PG_NC;
                   6129:                        setpte4(va, pte);
1.69      pk       6130:                        if ((pte & PG_TYPE) == PG_OBMEM)
1.55      pk       6131:                                cache_flush_page((int)va);
                   6132:                }
                   6133: #endif
1.1       deraadt  6134:        }
1.21      deraadt  6135: }
                   6136:
1.46      pk       6137: /*
                   6138:  * Turn on IO cache for a given (va, number of pages).
                   6139:  *
                   6140:  * We just assert PG_NC for each PTE; the addresses must reside
                   6141:  * in locked kernel space.  A cache flush is also done.
                   6142:  */
1.53      christos 6143: void
1.46      pk       6144: kvm_iocache(va, npages)
                   6145:        register caddr_t va;
                   6146:        register int npages;
                   6147: {
                   6148:
1.55      pk       6149: #ifdef SUN4M
                   6150:        if (CPU_ISSUN4M) /* %%%: Implement! */
                   6151:                panic("kvm_iocache: 4m iocache not implemented");
                   6152: #endif
                   6153: #if defined(SUN4) || defined(SUN4C)
1.46      pk       6154:        for (; --npages >= 0; va += NBPG) {
1.55      pk       6155:                register int pte = getpte4(va);
1.46      pk       6156:                if ((pte & PG_V) == 0)
                   6157:                        panic("kvm_iocache !pg_v");
                   6158:                pte |= PG_IOC;
1.55      pk       6159:                setpte4(va, pte);
1.46      pk       6160:        }
1.55      pk       6161: #endif
1.46      pk       6162: }
                   6163:
1.21      deraadt  6164: int
                   6165: pmap_count_ptes(pm)
                   6166:        register struct pmap *pm;
                   6167: {
                   6168:        register int idx, total;
1.43      pk       6169:        register struct regmap *rp;
                   6170:        register struct segmap *sp;
1.21      deraadt  6171:
1.43      pk       6172:        if (pm == pmap_kernel()) {
                   6173:                rp = &pm->pm_regmap[NUREG];
                   6174:                idx = NKREG;
                   6175:        } else {
                   6176:                rp = pm->pm_regmap;
                   6177:                idx = NUREG;
                   6178:        }
1.21      deraadt  6179:        for (total = 0; idx;)
1.43      pk       6180:                if ((sp = rp[--idx].rg_segmap) != NULL)
                   6181:                        total += sp->sg_npte;
1.21      deraadt  6182:        pm->pm_stats.resident_count = total;
                   6183:        return (total);
1.24      pk       6184: }
                   6185:
                   6186: /*
1.51      gwr      6187:  * Find first virtual address >= *va that is
                   6188:  * least likely to cause cache aliases.
                   6189:  * (This will just seg-align mappings.)
1.24      pk       6190:  */
1.51      gwr      6191: void
1.52      pk       6192: pmap_prefer(foff, vap)
1.51      gwr      6193:        register vm_offset_t foff;
1.52      pk       6194:        register vm_offset_t *vap;
1.24      pk       6195: {
1.52      pk       6196:        register vm_offset_t va = *vap;
                   6197:        register long d, m;
                   6198:
                   6199:        if (VA_INHOLE(va))
                   6200:                va = MMU_HOLE_END;
1.24      pk       6201:
1.48      pk       6202:        m = CACHE_ALIAS_DIST;
                   6203:        if (m == 0)             /* m=0 => no cache aliasing */
1.51      gwr      6204:                return;
1.24      pk       6205:
1.52      pk       6206:        d = foff - va;
                   6207:        d &= (m - 1);
                   6208:        *vap = va + d;
1.23      deraadt  6209: }
                   6210:
1.53      christos 6211: void
1.23      deraadt  6212: pmap_redzone()
                   6213: {
1.55      pk       6214: #if defined(SUN4M)
                   6215:        if (CPU_ISSUN4M) {
                   6216:                setpte4m(KERNBASE, 0);
                   6217:                return;
                   6218:        }
                   6219: #endif
                   6220: #if defined(SUN4) || defined(SUN4C)
                   6221:        if (CPU_ISSUN4OR4C) {
                   6222:                setpte4(KERNBASE, 0);
                   6223:                return;
                   6224:        }
                   6225: #endif
1.1       deraadt  6226: }
1.43      pk       6227:
                   6228: #ifdef DEBUG
                   6229: /*
                   6230:  * Check consistency of a pmap (time consuming!).
                   6231:  */
1.53      christos 6232: void
1.43      pk       6233: pm_check(s, pm)
                   6234:        char *s;
                   6235:        struct pmap *pm;
                   6236: {
                   6237:        if (pm == pmap_kernel())
                   6238:                pm_check_k(s, pm);
                   6239:        else
                   6240:                pm_check_u(s, pm);
                   6241: }
                   6242:
1.53      christos 6243: void
1.43      pk       6244: pm_check_u(s, pm)
                   6245:        char *s;
                   6246:        struct pmap *pm;
                   6247: {
                   6248:        struct regmap *rp;
                   6249:        struct segmap *sp;
                   6250:        int n, vs, vr, j, m, *pte;
                   6251:
1.55      pk       6252:        if (pm->pm_regmap == NULL)
1.72      pk       6253:                panic("%s: CHK(pmap %p): no region mapping", s, pm);
1.55      pk       6254:
                   6255: #if defined(SUN4M)
                   6256:        if (CPU_ISSUN4M &&
                   6257:            (pm->pm_reg_ptps == NULL ||
                   6258:             pm->pm_reg_ptps_pa != VA2PA((caddr_t)pm->pm_reg_ptps)))
1.72      pk       6259:                panic("%s: CHK(pmap %p): no SRMMU region table or bad pa: "
                   6260:                      "tblva=%p, tblpa=0x%x",
                   6261:                        s, pm, pm->pm_reg_ptps, pm->pm_reg_ptps_pa);
1.55      pk       6262:
                   6263:        if (CPU_ISSUN4M && pm->pm_ctx != NULL &&
1.69      pk       6264:            (cpuinfo.ctx_tbl[pm->pm_ctxnum] != ((VA2PA((caddr_t)pm->pm_reg_ptps)
1.55      pk       6265:                                              >> SRMMU_PPNPASHIFT) |
                   6266:                                             SRMMU_TEPTD)))
                   6267:            panic("%s: CHK(pmap %p): SRMMU region table at %x not installed "
                   6268:                  "for context %d", s, pm, pm->pm_reg_ptps_pa, pm->pm_ctxnum);
                   6269: #endif
                   6270:
1.43      pk       6271:        for (vr = 0; vr < NUREG; vr++) {
                   6272:                rp = &pm->pm_regmap[vr];
                   6273:                if (rp->rg_nsegmap == 0)
                   6274:                        continue;
                   6275:                if (rp->rg_segmap == NULL)
                   6276:                        panic("%s: CHK(vr %d): nsegmap = %d; sp==NULL",
                   6277:                                s, vr, rp->rg_nsegmap);
1.55      pk       6278: #if defined(SUN4M)
                   6279:                if (CPU_ISSUN4M && rp->rg_seg_ptps == NULL)
                   6280:                    panic("%s: CHK(vr %d): nsegmap=%d; no SRMMU segment table",
                   6281:                          s, vr, rp->rg_nsegmap);
                   6282:                if (CPU_ISSUN4M &&
                   6283:                    pm->pm_reg_ptps[vr] != ((VA2PA((caddr_t)rp->rg_seg_ptps) >>
                   6284:                                            SRMMU_PPNPASHIFT) | SRMMU_TEPTD))
                   6285:                    panic("%s: CHK(vr %d): SRMMU segtbl not installed",s,vr);
                   6286: #endif
1.43      pk       6287:                if ((unsigned int)rp < KERNBASE)
1.54      christos 6288:                        panic("%s: rp=%p", s, rp);
1.43      pk       6289:                n = 0;
                   6290:                for (vs = 0; vs < NSEGRG; vs++) {
                   6291:                        sp = &rp->rg_segmap[vs];
                   6292:                        if ((unsigned int)sp < KERNBASE)
1.54      christos 6293:                                panic("%s: sp=%p", s, sp);
1.43      pk       6294:                        if (sp->sg_npte != 0) {
                   6295:                                n++;
                   6296:                                if (sp->sg_pte == NULL)
                   6297:                                        panic("%s: CHK(vr %d, vs %d): npte=%d, "
                   6298:                                           "pte=NULL", s, vr, vs, sp->sg_npte);
1.55      pk       6299: #if defined(SUN4M)
                   6300:                                if (CPU_ISSUN4M &&
                   6301:                                    rp->rg_seg_ptps[vs] !=
                   6302:                                     ((VA2PA((caddr_t)sp->sg_pte)
                   6303:                                        >> SRMMU_PPNPASHIFT) |
                   6304:                                       SRMMU_TEPTD))
                   6305:                                    panic("%s: CHK(vr %d, vs %d): SRMMU page "
                   6306:                                          "table not installed correctly",s,vr,
                   6307:                                          vs);
                   6308: #endif
1.43      pk       6309:                                pte=sp->sg_pte;
                   6310:                                m = 0;
                   6311:                                for (j=0; j<NPTESG; j++,pte++)
1.55      pk       6312:                                    if ((CPU_ISSUN4M
                   6313:                                         ?((*pte & SRMMU_TETYPE) == SRMMU_TEPTE)
                   6314:                                         :(*pte & PG_V)))
                   6315:                                        m++;
1.43      pk       6316:                                if (m != sp->sg_npte)
                   6317:                                    /*if (pmapdebug & 0x10000)*/
1.66      christos 6318:                                        printf("%s: user CHK(vr %d, vs %d): "
1.43      pk       6319:                                            "npte(%d) != # valid(%d)\n",
                   6320:                                                s, vr, vs, sp->sg_npte, m);
                   6321:                        }
                   6322:                }
                   6323:                if (n != rp->rg_nsegmap)
                   6324:                        panic("%s: CHK(vr %d): inconsistent "
                   6325:                                "# of pte's: %d, should be %d",
                   6326:                                s, vr, rp->rg_nsegmap, n);
                   6327:        }
1.53      christos 6328:        return;
1.43      pk       6329: }
                   6330:
1.53      christos 6331: void
1.55      pk       6332: pm_check_k(s, pm)              /* Note: not as extensive as pm_check_u. */
1.43      pk       6333:        char *s;
                   6334:        struct pmap *pm;
                   6335: {
                   6336:        struct regmap *rp;
                   6337:        int vr, vs, n;
                   6338:
1.55      pk       6339:        if (pm->pm_regmap == NULL)
                   6340:            panic("%s: CHK(pmap %p): no region mapping", s, pm);
                   6341:
                   6342: #if defined(SUN4M)
                   6343:        if (CPU_ISSUN4M &&
                   6344:            (pm->pm_reg_ptps == NULL ||
                   6345:             pm->pm_reg_ptps_pa != VA2PA((caddr_t)pm->pm_reg_ptps)))
                   6346:            panic("%s: CHK(pmap %p): no SRMMU region table or bad pa: tblva=%p, tblpa=%x",
                   6347:                  s, pm, pm->pm_reg_ptps, pm->pm_reg_ptps_pa);
                   6348:
                   6349:        if (CPU_ISSUN4M &&
1.69      pk       6350:            (cpuinfo.ctx_tbl[0] != ((VA2PA((caddr_t)pm->pm_reg_ptps) >>
1.55      pk       6351:                                             SRMMU_PPNPASHIFT) | SRMMU_TEPTD)))
                   6352:            panic("%s: CHK(pmap %p): SRMMU region table at %x not installed "
                   6353:                  "for context %d", s, pm, pm->pm_reg_ptps_pa, 0);
                   6354: #endif
1.43      pk       6355:        for (vr = NUREG; vr < NUREG+NKREG; vr++) {
                   6356:                rp = &pm->pm_regmap[vr];
                   6357:                if (rp->rg_segmap == NULL)
                   6358:                        panic("%s: CHK(vr %d): nsegmap = %d; sp==NULL",
                   6359:                                s, vr, rp->rg_nsegmap);
                   6360:                if (rp->rg_nsegmap == 0)
                   6361:                        continue;
1.55      pk       6362: #if defined(SUN4M)
                   6363:                if (CPU_ISSUN4M && rp->rg_seg_ptps == NULL)
                   6364:                    panic("%s: CHK(vr %d): nsegmap=%d; no SRMMU segment table",
                   6365:                          s, vr, rp->rg_nsegmap);
                   6366:                if (CPU_ISSUN4M &&
                   6367:                    pm->pm_reg_ptps[vr] != ((VA2PA((caddr_t)rp->rg_seg_ptps) >>
                   6368:                                            SRMMU_PPNPASHIFT) | SRMMU_TEPTD))
                   6369:                    panic("%s: CHK(vr %d): SRMMU segtbl not installed",s,vr);
                   6370: #endif
1.72      pk       6371:                if (CPU_ISSUN4M) {
                   6372:                        n = NSEGRG;
                   6373:                } else {
                   6374:                        for (n = 0, vs = 0; vs < NSEGRG; vs++) {
                   6375:                                if (rp->rg_segmap[vs].sg_npte)
                   6376:                                        n++;
                   6377:                        }
1.43      pk       6378:                }
                   6379:                if (n != rp->rg_nsegmap)
1.66      christos 6380:                        printf("%s: kernel CHK(vr %d): inconsistent "
1.43      pk       6381:                                "# of pte's: %d, should be %d\n",
                   6382:                                s, vr, rp->rg_nsegmap, n);
                   6383:        }
1.53      christos 6384:        return;
1.43      pk       6385: }
                   6386: #endif
1.46      pk       6387:
                   6388: /*
                   6389:  * Return the number bytes that pmap_dumpmmu() will dump.
                   6390:  * For each pmeg in the MMU, we'll write NPTESG PTEs.
                   6391:  * The last page or two contains stuff so libkvm can bootstrap.
                   6392:  */
                   6393: int
                   6394: pmap_dumpsize()
                   6395: {
1.67      pk       6396:        long    sz;
                   6397:
                   6398:        sz = ALIGN(sizeof(kcore_seg_t)) + ALIGN(sizeof(cpu_kcore_hdr_t));
                   6399:        sz += npmemarr * sizeof(phys_ram_seg_t);
1.55      pk       6400:
                   6401:        if (CPU_ISSUN4OR4C)
1.67      pk       6402:                sz += (seginval + 1) * NPTESG * sizeof(int);
                   6403:
                   6404:        return (btoc(sz));
1.46      pk       6405: }
                   6406:
                   6407: /*
                   6408:  * Write the mmu contents to the dump device.
                   6409:  * This gets appended to the end of a crash dump since
1.55      pk       6410:  * there is no in-core copy of kernel memory mappings on a 4/4c machine.
1.46      pk       6411:  */
                   6412: int
                   6413: pmap_dumpmmu(dump, blkno)
                   6414:        register daddr_t blkno;
                   6415:        register int (*dump)    __P((dev_t, daddr_t, caddr_t, size_t));
                   6416: {
1.67      pk       6417:        kcore_seg_t     *ksegp;
                   6418:        cpu_kcore_hdr_t *kcpup;
                   6419:        phys_ram_seg_t  memseg;
                   6420:        register int    error = 0;
                   6421:        register int    i, memsegoffset, pmegoffset;
                   6422:        int             buffer[dbtob(1) / sizeof(int)];
                   6423:        int             *bp, *ep;
1.55      pk       6424: #if defined(SUN4C) || defined(SUN4)
1.67      pk       6425:        register int    pmeg;
1.55      pk       6426: #endif
1.46      pk       6427:
1.67      pk       6428: #define EXPEDITE(p,n) do {                                             \
                   6429:        int *sp = (int *)(p);                                           \
                   6430:        int sz = (n);                                                   \
                   6431:        while (sz > 0) {                                                \
                   6432:                *bp++ = *sp++;                                          \
                   6433:                if (bp >= ep) {                                         \
                   6434:                        error = (*dump)(dumpdev, blkno,                 \
                   6435:                                        (caddr_t)buffer, dbtob(1));     \
                   6436:                        if (error != 0)                                 \
                   6437:                                return (error);                         \
                   6438:                        ++blkno;                                        \
                   6439:                        bp = buffer;                                    \
                   6440:                }                                                       \
                   6441:                sz -= 4;                                                \
                   6442:        }                                                               \
                   6443: } while (0)
                   6444:
                   6445:        setcontext(0);
                   6446:
                   6447:        /* Setup bookkeeping pointers */
                   6448:        bp = buffer;
                   6449:        ep = &buffer[sizeof(buffer) / sizeof(buffer[0])];
                   6450:
                   6451:        /* Fill in MI segment header */
                   6452:        ksegp = (kcore_seg_t *)bp;
                   6453:        CORE_SETMAGIC(*ksegp, KCORE_MAGIC, MID_MACHINE, CORE_CPU);
                   6454:        ksegp->c_size = ctob(pmap_dumpsize()) - ALIGN(sizeof(kcore_seg_t));
                   6455:
                   6456:        /* Fill in MD segment header (interpreted by MD part of libkvm) */
                   6457:        kcpup = (cpu_kcore_hdr_t *)((int)bp + ALIGN(sizeof(kcore_seg_t)));
                   6458:        kcpup->cputype = cputyp;
                   6459:        kcpup->nmemseg = npmemarr;
                   6460:        kcpup->memsegoffset = memsegoffset = ALIGN(sizeof(cpu_kcore_hdr_t));
                   6461:        kcpup->npmeg = (CPU_ISSUN4OR4C) ? seginval + 1 : 0;
                   6462:        kcpup->pmegoffset = pmegoffset =
                   6463:                memsegoffset + npmemarr * sizeof(phys_ram_seg_t);
                   6464:
                   6465:        /* Note: we have assumed everything fits in buffer[] so far... */
                   6466:        bp = (int *)&kcpup->segmap_store;
                   6467:        EXPEDITE(&kernel_segmap_store, sizeof(kernel_segmap_store));
                   6468:
                   6469:        /* Align storage for upcoming quad-aligned segment array */
                   6470:        while (bp != (int *)ALIGN(bp)) {
                   6471:                int dummy = 0;
                   6472:                EXPEDITE(&dummy, 4);
                   6473:        }
                   6474:        for (i = 0; i < npmemarr; i++) {
                   6475:                memseg.start = pmemarr[i].addr;
                   6476:                memseg.size = pmemarr[i].len;
                   6477:                EXPEDITE(&memseg, sizeof(phys_ram_seg_t));
                   6478:        }
                   6479:
                   6480:        if (CPU_ISSUN4M)
                   6481:                goto out;
1.55      pk       6482:
                   6483: #if defined(SUN4C) || defined(SUN4)
1.46      pk       6484:        /*
                   6485:         * dump page table entries
                   6486:         *
                   6487:         * We dump each pmeg in order (by segment number).  Since the MMU
                   6488:         * automatically maps the given virtual segment to a pmeg we must
                   6489:         * iterate over the segments by incrementing an unused segment slot
                   6490:         * in the MMU.  This fixed segment number is used in the virtual
                   6491:         * address argument to getpte().
                   6492:         */
1.55      pk       6493:
1.46      pk       6494:        /*
                   6495:         * Go through the pmegs and dump each one.
                   6496:         */
                   6497:        for (pmeg = 0; pmeg <= seginval; ++pmeg) {
                   6498:                register int va = 0;
                   6499:
                   6500:                setsegmap(va, pmeg);
                   6501:                i = NPTESG;
                   6502:                do {
1.67      pk       6503:                        int pte = getpte4(va);
                   6504:                        EXPEDITE(&pte, sizeof(pte));
1.46      pk       6505:                        va += NBPG;
                   6506:                } while (--i > 0);
                   6507:        }
                   6508:        setsegmap(0, seginval);
1.67      pk       6509: #endif
1.46      pk       6510:
1.67      pk       6511: out:
                   6512:        if (bp != buffer)
1.46      pk       6513:                error = (*dump)(dumpdev, blkno++, (caddr_t)buffer, dbtob(1));
                   6514:
                   6515:        return (error);
1.55      pk       6516: }
                   6517:
                   6518: #ifdef EXTREME_DEBUG
                   6519:
                   6520: static void test_region __P((int, int, int));
                   6521:
                   6522: void
                   6523: debug_pagetables()
                   6524: {
                   6525:        register int i;
                   6526:        register int *regtbl;
                   6527:        register int te;
                   6528:
1.66      christos 6529:        printf("\nncontext=%d. ",ncontext);
                   6530:        printf("Context table is at va 0x%x. Level 0 PTP: 0x%x\n",
1.69      pk       6531:               cpuinfo.ctx_tbl, cpuinfo.ctx_tbl[0]);
1.66      christos 6532:        printf("Context 0 region table is at va 0x%x, pa 0x%x. Contents:\n",
1.55      pk       6533:               pmap_kernel()->pm_reg_ptps, pmap_kernel()->pm_reg_ptps_pa);
                   6534:
                   6535:        regtbl = pmap_kernel()->pm_reg_ptps;
                   6536:
1.66      christos 6537:        printf("PROM vector is at 0x%x\n",promvec);
                   6538:        printf("PROM reboot routine is at 0x%x\n",promvec->pv_reboot);
                   6539:        printf("PROM abort routine is at 0x%x\n",promvec->pv_abort);
                   6540:        printf("PROM halt routine is at 0x%x\n",promvec->pv_halt);
1.55      pk       6541:
1.66      christos 6542:        printf("Testing region 0xfe: ");
1.55      pk       6543:        test_region(0xfe,0,16*1024*1024);
1.66      christos 6544:        printf("Testing region 0xff: ");
1.55      pk       6545:        test_region(0xff,0,16*1024*1024);
1.66      christos 6546:        printf("Testing kernel region 0xf8: ");
1.55      pk       6547:        test_region(0xf8, 4096, avail_start);
                   6548:        cngetc();
                   6549:
                   6550:        for (i = 0; i < SRMMU_L1SIZE; i++) {
                   6551:                te = regtbl[i];
                   6552:                if ((te & SRMMU_TETYPE) == SRMMU_TEINVALID)
                   6553:                    continue;
1.66      christos 6554:                printf("Region 0x%x: PTE=0x%x <%s> L2PA=0x%x kernL2VA=0x%x\n",
1.55      pk       6555:                       i, te, ((te & SRMMU_TETYPE) == SRMMU_TEPTE ? "pte" :
                   6556:                               ((te & SRMMU_TETYPE) == SRMMU_TEPTD ? "ptd" :
                   6557:                                ((te & SRMMU_TETYPE) == SRMMU_TEINVALID ?
                   6558:                                 "invalid" : "reserved"))),
                   6559:                       (te & ~0x3) << SRMMU_PPNPASHIFT,
                   6560:                       pmap_kernel()->pm_regmap[i].rg_seg_ptps);
                   6561:        }
1.66      christos 6562:        printf("Press q to halt...\n");
1.55      pk       6563:        if (cngetc()=='q')
                   6564:            callrom();
                   6565: }
                   6566:
                   6567: static u_int
                   6568: VA2PAsw(ctx, addr, pte)
                   6569:        register int ctx;
                   6570:        register caddr_t addr;
                   6571:        int *pte;
                   6572: {
                   6573:        register int *curtbl;
                   6574:        register int curpte;
                   6575:
                   6576: #ifdef EXTREME_EXTREME_DEBUG
1.66      christos 6577:        printf("Looking up addr 0x%x in context 0x%x\n",addr,ctx);
1.55      pk       6578: #endif
                   6579:        /* L0 */
1.69      pk       6580:        *pte = curpte = cpuinfo.ctx_tbl[ctx];
1.55      pk       6581: #ifdef EXTREME_EXTREME_DEBUG
1.66      christos 6582:        printf("Got L0 pte 0x%x\n",pte);
1.55      pk       6583: #endif
                   6584:        if ((curpte & SRMMU_TETYPE) == SRMMU_TEPTE) {
                   6585:                return (((curpte & SRMMU_PPNMASK) << SRMMU_PPNPASHIFT) |
                   6586:                        ((u_int)addr & 0xffffffff));
                   6587:        }
                   6588:        if ((curpte & SRMMU_TETYPE) != SRMMU_TEPTD) {
1.66      christos 6589:                printf("Bad context table entry 0x%x for context 0x%x\n",
1.55      pk       6590:                       curpte, ctx);
                   6591:                return 0;
                   6592:        }
                   6593:        /* L1 */
                   6594:        curtbl = ((curpte & ~0x3) << 4) | (0xf8 << RGSHIFT); /* correct for krn*/
                   6595:        *pte = curpte = curtbl[VA_VREG(addr)];
                   6596: #ifdef EXTREME_EXTREME_DEBUG
1.66      christos 6597:        printf("L1 table at 0x%x.\nGot L1 pte 0x%x\n",curtbl,curpte);
1.55      pk       6598: #endif
                   6599:        if ((curpte & SRMMU_TETYPE) == SRMMU_TEPTE)
                   6600:            return (((curpte & SRMMU_PPNMASK) << SRMMU_PPNPASHIFT) |
                   6601:                    ((u_int)addr & 0xffffff));
                   6602:        if ((curpte & SRMMU_TETYPE) != SRMMU_TEPTD) {
1.66      christos 6603:                printf("Bad region table entry 0x%x for region 0x%x\n",
1.55      pk       6604:                       curpte, VA_VREG(addr));
                   6605:                return 0;
                   6606:        }
                   6607:        /* L2 */
                   6608:        curtbl = ((curpte & ~0x3) << 4) | (0xf8 << RGSHIFT); /* correct for krn*/
                   6609:        *pte = curpte = curtbl[VA_VSEG(addr)];
                   6610: #ifdef EXTREME_EXTREME_DEBUG
1.66      christos 6611:        printf("L2 table at 0x%x.\nGot L2 pte 0x%x\n",curtbl,curpte);
1.55      pk       6612: #endif
                   6613:        if ((curpte & SRMMU_TETYPE) == SRMMU_TEPTE)
                   6614:            return (((curpte & SRMMU_PPNMASK) << SRMMU_PPNPASHIFT) |
                   6615:                    ((u_int)addr & 0x3ffff));
                   6616:        if ((curpte & SRMMU_TETYPE) != SRMMU_TEPTD) {
1.66      christos 6617:                printf("Bad segment table entry 0x%x for reg 0x%x, seg 0x%x\n",
1.55      pk       6618:                       curpte, VA_VREG(addr), VA_VSEG(addr));
                   6619:                return 0;
                   6620:        }
                   6621:        /* L3 */
                   6622:        curtbl = ((curpte & ~0x3) << 4) | (0xf8 << RGSHIFT); /* correct for krn*/
                   6623:        *pte = curpte = curtbl[VA_VPG(addr)];
                   6624: #ifdef EXTREME_EXTREME_DEBUG
1.66      christos 6625:        printf("L3 table at 0x%x.\nGot L3 pte 0x%x\n",curtbl,curpte);
1.55      pk       6626: #endif
                   6627:        if ((curpte & SRMMU_TETYPE) == SRMMU_TEPTE)
                   6628:            return (((curpte & SRMMU_PPNMASK) << SRMMU_PPNPASHIFT) |
                   6629:                    ((u_int)addr & 0xfff));
                   6630:        else {
1.66      christos 6631:                printf("Bad L3 pte 0x%x for reg 0x%x, seg 0x%x, pg 0x%x\n",
1.55      pk       6632:                       curpte, VA_VREG(addr), VA_VSEG(addr), VA_VPG(addr));
                   6633:                return 0;
                   6634:        }
1.66      christos 6635:        printf("Bizarreness with address 0x%x!\n",addr);
1.55      pk       6636: }
                   6637:
                   6638: void test_region(reg, start, stop)
                   6639:        register int reg;
                   6640:        register int start, stop;
                   6641: {
                   6642:        register int i;
                   6643:        register int addr;
                   6644:        register int pte;
                   6645:        int ptesw;
                   6646: /*     int cnt=0;
                   6647: */
                   6648:
                   6649:        for (i = start; i < stop; i+= NBPG) {
                   6650:                addr = (reg << RGSHIFT) | i;
                   6651:                pte=lda(((u_int)(addr)) | ASI_SRMMUFP_LN, ASI_SRMMUFP);
                   6652:                if (pte) {
1.66      christos 6653: /*                     printf("Valid address 0x%x\n",addr);
1.55      pk       6654:                        if (++cnt == 20) {
                   6655:                                cngetc();
                   6656:                                cnt=0;
                   6657:                        }
                   6658: */
                   6659:                        if (VA2PA(addr) != VA2PAsw(0,addr,&ptesw)) {
1.66      christos 6660:                                printf("Mismatch at address 0x%x.\n",addr);
1.55      pk       6661:                                if (cngetc()=='q') break;
                   6662:                        }
                   6663:                        if (reg == 0xf8) /* kernel permissions are different */
                   6664:                            continue;
                   6665:                        if ((pte&SRMMU_PROT_MASK)!=(ptesw&SRMMU_PROT_MASK)) {
1.66      christos 6666:                                printf("Mismatched protections at address "
1.55      pk       6667:                                       "0x%x; pte=0x%x, ptesw=0x%x\n",
                   6668:                                       addr,pte,ptesw);
                   6669:                                if (cngetc()=='q') break;
                   6670:                        }
                   6671:                }
                   6672:        }
1.66      christos 6673:        printf("done.\n");
1.46      pk       6674: }
1.55      pk       6675:
                   6676:
                   6677: void print_fe_map(void)
                   6678: {
                   6679:        u_int i, pte;
                   6680:
1.66      christos 6681:        printf("map of region 0xfe:\n");
1.55      pk       6682:        for (i = 0xfe000000; i < 0xff000000; i+=4096) {
                   6683:                if (((pte = getpte4m(i)) & SRMMU_TETYPE) != SRMMU_TEPTE)
                   6684:                    continue;
1.66      christos 6685:                printf("0x%x -> 0x%x%x (pte %x)\n", i, pte >> 28,
1.55      pk       6686:                       (pte & ~0xff) << 4, pte);
                   6687:        }
1.66      christos 6688:        printf("done\n");
1.55      pk       6689: }
                   6690:
                   6691: #endif

CVSweb <webmaster@jp.NetBSD.org>