[BACK]Return to pmap.c CVS log [TXT][DIR] Up to [cvs.NetBSD.org] / src / sys / arch / sparc / sparc

Annotation of src/sys/arch/sparc/sparc/pmap.c, Revision 1.74

1.74    ! pk          1: /*     $NetBSD: pmap.c,v 1.73 1997/03/21 15:19:29 pk Exp $ */
1.22      deraadt     2:
1.1       deraadt     3: /*
1.55      pk          4:  * Copyright (c) 1996
1.57      abrown      5:  *     The President and Fellows of Harvard College. All rights reserved.
1.1       deraadt     6:  * Copyright (c) 1992, 1993
                      7:  *     The Regents of the University of California.  All rights reserved.
                      8:  *
                      9:  * This software was developed by the Computer Systems Engineering group
                     10:  * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
                     11:  * contributed to Berkeley.
                     12:  *
                     13:  * All advertising materials mentioning features or use of this software
                     14:  * must display the following acknowledgement:
1.55      pk         15:  *     This product includes software developed by Harvard University.
1.1       deraadt    16:  *     This product includes software developed by the University of
                     17:  *     California, Lawrence Berkeley Laboratory.
                     18:  *
                     19:  * Redistribution and use in source and binary forms, with or without
                     20:  * modification, are permitted provided that the following conditions
                     21:  * are met:
1.55      pk         22:  *
1.1       deraadt    23:  * 1. Redistributions of source code must retain the above copyright
                     24:  *    notice, this list of conditions and the following disclaimer.
                     25:  * 2. Redistributions in binary form must reproduce the above copyright
                     26:  *    notice, this list of conditions and the following disclaimer in the
                     27:  *    documentation and/or other materials provided with the distribution.
                     28:  * 3. All advertising materials mentioning features or use of this software
                     29:  *    must display the following acknowledgement:
1.55      pk         30:  *     This product includes software developed by Aaron Brown and
                     31:  *     Harvard University.
                     32:  *      This product includes software developed by the University of
                     33:  *      California, Berkeley and its contributors.
1.1       deraadt    34:  * 4. Neither the name of the University nor the names of its contributors
                     35:  *    may be used to endorse or promote products derived from this software
                     36:  *    without specific prior written permission.
                     37:  *
                     38:  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
                     39:  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
                     40:  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
                     41:  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
                     42:  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
                     43:  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
                     44:  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
                     45:  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
                     46:  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
                     47:  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
                     48:  * SUCH DAMAGE.
                     49:  *
1.22      deraadt    50:  *     @(#)pmap.c      8.4 (Berkeley) 2/5/94
1.55      pk         51:  *
1.1       deraadt    52:  */
                     53:
                     54: /*
                     55:  * SPARC physical map management code.
                     56:  * Does not function on multiprocessors (yet).
                     57:  */
                     58:
                     59: #include <sys/param.h>
                     60: #include <sys/systm.h>
                     61: #include <sys/device.h>
                     62: #include <sys/proc.h>
1.43      pk         63: #include <sys/queue.h>
1.1       deraadt    64: #include <sys/malloc.h>
1.67      pk         65: #include <sys/exec.h>
                     66: #include <sys/core.h>
                     67: #include <sys/kcore.h>
1.1       deraadt    68:
                     69: #include <vm/vm.h>
                     70: #include <vm/vm_kern.h>
                     71: #include <vm/vm_prot.h>
                     72: #include <vm/vm_page.h>
                     73:
                     74: #include <machine/autoconf.h>
                     75: #include <machine/bsd_openprom.h>
1.19      deraadt    76: #include <machine/oldmon.h>
1.1       deraadt    77: #include <machine/cpu.h>
                     78: #include <machine/ctlreg.h>
1.67      pk         79: #include <machine/kcore.h>
1.1       deraadt    80:
                     81: #include <sparc/sparc/asm.h>
                     82: #include <sparc/sparc/cache.h>
1.3       deraadt    83: #include <sparc/sparc/vaddrs.h>
1.69      pk         84: #include <sparc/sparc/cpuvar.h>
1.1       deraadt    85:
                     86: #ifdef DEBUG
                     87: #define PTE_BITS "\20\40V\37W\36S\35NC\33IO\32U\31M"
1.55      pk         88: #define PTE_BITS4M "\20\10C\7M\6R\5ACC3\4ACC2\3ACC1\2TYP2\1TYP1"
1.1       deraadt    89: #endif
                     90:
                     91: /*
                     92:  * The SPARCstation offers us the following challenges:
                     93:  *
                     94:  *   1. A virtual address cache.  This is, strictly speaking, not
                     95:  *     part of the architecture, but the code below assumes one.
                     96:  *     This is a write-through cache on the 4c and a write-back cache
                     97:  *     on others.
                     98:  *
1.55      pk         99:  *   2. (4/4c only) An MMU that acts like a cache.  There is not enough
                    100:  *     space in the MMU to map everything all the time.  Instead, we need
1.1       deraadt   101:  *     to load MMU with the `working set' of translations for each
1.55      pk        102:  *     process. The sun4m does not act like a cache; tables are maintained
                    103:  *     in physical memory.
1.1       deraadt   104:  *
                    105:  *   3.        Segmented virtual and physical spaces.  The upper 12 bits of
                    106:  *     a virtual address (the virtual segment) index a segment table,
                    107:  *     giving a physical segment.  The physical segment selects a
                    108:  *     `Page Map Entry Group' (PMEG) and the virtual page number---the
                    109:  *     next 5 or 6 bits of the virtual address---select the particular
                    110:  *     `Page Map Entry' for the page.  We call the latter a PTE and
                    111:  *     call each Page Map Entry Group a pmeg (for want of a better name).
1.55      pk        112:  *     Note that the sun4m has an unsegmented 36-bit physical space.
1.1       deraadt   113:  *
                    114:  *     Since there are no valid bits in the segment table, the only way
                    115:  *     to have an invalid segment is to make one full pmeg of invalid PTEs.
1.55      pk        116:  *     We use the last one (since the ROM does as well) (sun4/4c only)
1.1       deraadt   117:  *
                    118:  *   4. Discontiguous physical pages.  The Mach VM expects physical pages
                    119:  *     to be in one sequential lump.
                    120:  *
                    121:  *   5. The MMU is always on: it is not possible to disable it.  This is
                    122:  *     mainly a startup hassle.
                    123:  */
                    124:
                    125: struct pmap_stats {
                    126:        int     ps_unlink_pvfirst;      /* # of pv_unlinks on head */
                    127:        int     ps_unlink_pvsearch;     /* # of pv_unlink searches */
                    128:        int     ps_changeprots;         /* # of calls to changeprot */
                    129:        int     ps_useless_changeprots; /* # of changeprots for wiring */
                    130:        int     ps_enter_firstpv;       /* pv heads entered */
                    131:        int     ps_enter_secondpv;      /* pv nonheads entered */
                    132:        int     ps_useless_changewire;  /* useless wiring changes */
                    133:        int     ps_npg_prot_all;        /* # of active pages protected */
                    134:        int     ps_npg_prot_actual;     /* # pages actually affected */
1.70      pk        135:        int     ps_npmeg_free;          /* # of free pmegs */
                    136:        int     ps_npmeg_locked;        /* # of pmegs on locked list */
                    137:        int     ps_npmeg_lru;           /* # of pmegs on lru list */
1.1       deraadt   138: } pmap_stats;
                    139:
                    140: #ifdef DEBUG
                    141: #define        PDB_CREATE      0x0001
                    142: #define        PDB_DESTROY     0x0002
                    143: #define        PDB_REMOVE      0x0004
                    144: #define        PDB_CHANGEPROT  0x0008
                    145: #define        PDB_ENTER       0x0010
                    146:
                    147: #define        PDB_MMU_ALLOC   0x0100
                    148: #define        PDB_MMU_STEAL   0x0200
                    149: #define        PDB_CTX_ALLOC   0x0400
                    150: #define        PDB_CTX_STEAL   0x0800
1.43      pk        151: #define        PDB_MMUREG_ALLOC        0x1000
                    152: #define        PDB_MMUREG_STEAL        0x2000
1.55      pk        153: #define        PDB_CACHESTUFF  0x4000
1.72      pk        154: #define        PDB_SWITCHMAP   0x8000
                    155: #define        PDB_SANITYCHK   0x10000
1.55      pk        156: int    pmapdebug = 0;
1.1       deraadt   157: #endif
                    158:
1.55      pk        159: #if 0
1.10      deraadt   160: #define        splpmap() splimp()
1.55      pk        161: #endif
1.1       deraadt   162:
                    163: /*
                    164:  * First and last managed physical addresses.
                    165:  */
                    166: vm_offset_t    vm_first_phys, vm_num_phys;
                    167:
                    168: /*
                    169:  * For each managed physical page, there is a list of all currently
                    170:  * valid virtual mappings of that page.  Since there is usually one
                    171:  * (or zero) mapping per page, the table begins with an initial entry,
                    172:  * rather than a pointer; this head entry is empty iff its pv_pmap
                    173:  * field is NULL.
                    174:  *
                    175:  * Note that these are per machine independent page (so there may be
                    176:  * only one for every two hardware pages, e.g.).  Since the virtual
                    177:  * address is aligned on a page boundary, the low order bits are free
                    178:  * for storing flags.  Only the head of each list has flags.
                    179:  *
                    180:  * THIS SHOULD BE PART OF THE CORE MAP
                    181:  */
                    182: struct pvlist {
                    183:        struct  pvlist *pv_next;        /* next pvlist, if any */
                    184:        struct  pmap *pv_pmap;          /* pmap of this va */
                    185:        int     pv_va;                  /* virtual address */
                    186:        int     pv_flags;               /* flags (below) */
                    187: };
                    188:
                    189: /*
                    190:  * Flags in pv_flags.  Note that PV_MOD must be 1 and PV_REF must be 2
                    191:  * since they must line up with the bits in the hardware PTEs (see pte.h).
1.55      pk        192:  * Sun4M bits are different (reversed), and at a different location in the
                    193:  * pte. Now why did they do that?
1.1       deraadt   194:  */
                    195: #define PV_MOD 1               /* page modified */
                    196: #define PV_REF 2               /* page referenced */
                    197: #define PV_NC  4               /* page cannot be cached */
1.55      pk        198: #define PV_REF4M       1       /* page referenced on sun4m */
                    199: #define PV_MOD4M       2       /* page modified on 4m (why reversed?!?) */
                    200: #define PV_C4M         4       /* page _can_ be cached on 4m */
1.1       deraadt   201: /*efine        PV_ALLF 7               ** all of the above */
                    202:
                    203: struct pvlist *pv_table;       /* array of entries, one per physical page */
                    204:
                    205: #define pvhead(pa)     (&pv_table[atop((pa) - vm_first_phys)])
                    206:
                    207: /*
                    208:  * Each virtual segment within each pmap is either valid or invalid.
                    209:  * It is valid if pm_npte[VA_VSEG(va)] is not 0.  This does not mean
                    210:  * it is in the MMU, however; that is true iff pm_segmap[VA_VSEG(va)]
                    211:  * does not point to the invalid PMEG.
                    212:  *
1.55      pk        213:  * In the older SPARC architectures (pre-4m), page tables are cached in the
                    214:  * MMU. The following discussion applies to these architectures:
                    215:  *
1.1       deraadt   216:  * If a virtual segment is valid and loaded, the correct PTEs appear
                    217:  * in the MMU only.  If it is valid and unloaded, the correct PTEs appear
                    218:  * in the pm_pte[VA_VSEG(va)] only.  However, some effort is made to keep
                    219:  * the software copies consistent enough with the MMU so that libkvm can
                    220:  * do user address translations.  In particular, pv_changepte() and
                    221:  * pmap_enu() maintain consistency, while less critical changes are
                    222:  * not maintained.  pm_pte[VA_VSEG(va)] always points to space for those
                    223:  * PTEs, unless this is the kernel pmap, in which case pm_pte[x] is not
                    224:  * used (sigh).
                    225:  *
                    226:  * Each PMEG in the MMU is either free or contains PTEs corresponding to
                    227:  * some pmap and virtual segment.  If it contains some PTEs, it also contains
                    228:  * reference and modify bits that belong in the pv_table.  If we need
                    229:  * to steal a PMEG from some process (if we need one and none are free)
                    230:  * we must copy the ref and mod bits, and update pm_segmap in the other
                    231:  * pmap to show that its virtual segment is no longer in the MMU.
                    232:  *
                    233:  * There are 128 PMEGs in a small Sun-4, of which only a few dozen are
                    234:  * tied down permanently, leaving `about' 100 to be spread among
                    235:  * running processes.  These are managed as an LRU cache.  Before
                    236:  * calling the VM paging code for a user page fault, the fault handler
                    237:  * calls mmu_load(pmap, va) to try to get a set of PTEs put into the
                    238:  * MMU.  mmu_load will check the validity of the segment and tell whether
                    239:  * it did something.
                    240:  *
                    241:  * Since I hate the name PMEG I call this data structure an `mmu entry'.
                    242:  * Each mmuentry is on exactly one of three `usage' lists: free, LRU,
                    243:  * or locked.  The LRU list is for user processes; the locked list is
                    244:  * for kernel entries; both are doubly linked queues headed by `mmuhd's.
                    245:  * The free list is a simple list, headed by a free list pointer.
1.55      pk        246:  *
                    247:  * In the sun4m architecture using the SPARC Reference MMU (SRMMU), three
                    248:  * levels of page tables are maintained in physical memory. We use the same
                    249:  * structures as with the 3-level old-style MMU (pm_regmap, pm_segmap,
                    250:  * rg_segmap, sg_pte, etc) to maintain kernel-edible page tables; we also
                    251:  * build a parallel set of physical tables that can be used by the MMU.
                    252:  * (XXX: This seems redundant, but is it necessary for the unified kernel?)
                    253:  *
                    254:  * If a virtual segment is valid, its entries will be in both parallel lists.
                    255:  * If it is not valid, then its entry in the kernel tables will be zero, and
                    256:  * its entry in the MMU tables will either be nonexistent or zero as well.
1.72      pk        257:  *
                    258:  * The Reference MMU generally uses a Translation Look-aside Buffer (TLB)
                    259:  * to cache the result of recently executed page table walks. When
                    260:  * manipulating page tables, we need to ensure consistency of the
                    261:  * in-memory and TLB copies of the page table entries. This is handled
                    262:  * by flushing (and invalidating) a TLB entry when appropriate before
                    263:  * altering an in-memory page table entry.
1.1       deraadt   264:  */
                    265: struct mmuentry {
1.43      pk        266:        TAILQ_ENTRY(mmuentry)   me_list;        /* usage list link */
                    267:        TAILQ_ENTRY(mmuentry)   me_pmchain;     /* pmap owner link */
1.1       deraadt   268:        struct  pmap *me_pmap;          /* pmap, if in use */
1.43      pk        269:        u_short me_vreg;                /* associated virtual region/segment */
                    270:        u_short me_vseg;                /* associated virtual region/segment */
1.45      pk        271:        u_short me_cookie;              /* hardware SMEG/PMEG number */
1.1       deraadt   272: };
1.43      pk        273: struct mmuentry *mmusegments;  /* allocated in pmap_bootstrap */
                    274: struct mmuentry *mmuregions;   /* allocated in pmap_bootstrap */
1.1       deraadt   275:
1.43      pk        276: struct mmuhd segm_freelist, segm_lru, segm_locked;
                    277: struct mmuhd region_freelist, region_lru, region_locked;
1.1       deraadt   278:
1.69      pk        279: int    seginval;               /* [4/4c] the invalid segment number */
                    280: int    reginval;               /* [4/3mmu] the invalid region number */
1.1       deraadt   281:
                    282: /*
1.55      pk        283:  * (sun4/4c)
1.1       deraadt   284:  * A context is simply a small number that dictates which set of 4096
                    285:  * segment map entries the MMU uses.  The Sun 4c has eight such sets.
                    286:  * These are alloted in an `almost MRU' fashion.
1.55      pk        287:  * (sun4m)
                    288:  * A context is simply a small number that indexes the context table, the
                    289:  * root-level page table mapping 4G areas. Each entry in this table points
                    290:  * to a 1st-level region table. A SPARC reference MMU will usually use 16
                    291:  * such contexts, but some offer as many as 64k contexts; the theoretical
                    292:  * maximum is 2^32 - 1, but this would create overlarge context tables.
1.1       deraadt   293:  *
                    294:  * Each context is either free or attached to a pmap.
                    295:  *
                    296:  * Since the virtual address cache is tagged by context, when we steal
                    297:  * a context we have to flush (that part of) the cache.
                    298:  */
                    299: union ctxinfo {
                    300:        union   ctxinfo *c_nextfree;    /* free list (if free) */
                    301:        struct  pmap *c_pmap;           /* pmap (if busy) */
                    302: };
1.69      pk        303:
                    304: #define ncontext       (cpuinfo.mmu_ncontext)
                    305: #define ctx_kick       (cpuinfo.ctx_kick)
                    306: #define ctx_kickdir    (cpuinfo.ctx_kickdir)
                    307: #define ctx_freelist   (cpuinfo.ctx_freelist)
                    308:
                    309: #if 0
1.1       deraadt   310: union ctxinfo *ctxinfo;                /* allocated at in pmap_bootstrap */
                    311:
                    312: union  ctxinfo *ctx_freelist;  /* context free list */
                    313: int    ctx_kick;               /* allocation rover when none free */
                    314: int    ctx_kickdir;            /* ctx_kick roves both directions */
                    315:
1.69      pk        316: char   *ctxbusyvector;         /* [4m] tells what contexts are busy (XXX)*/
                    317: #endif
1.55      pk        318:
1.1       deraadt   319: caddr_t        vpage[2];               /* two reserved MD virtual pages */
1.41      mycroft   320: caddr_t        vmmap;                  /* one reserved MI vpage for /dev/mem */
1.55      pk        321: caddr_t        vdumppages;             /* 32KB worth of reserved dump pages */
1.1       deraadt   322:
1.69      pk        323: smeg_t         tregion;        /* [4/3mmu] Region for temporary mappings */
                    324:
1.43      pk        325: struct pmap    kernel_pmap_store;              /* the kernel's pmap */
                    326: struct regmap  kernel_regmap_store[NKREG];     /* the kernel's regmap */
                    327: struct segmap  kernel_segmap_store[NKREG*NSEGRG];/* the kernel's segmaps */
1.1       deraadt   328:
1.69      pk        329: #if defined(SUN4M)
1.55      pk        330: u_int  *kernel_regtable_store;         /* 1k of storage to map the kernel */
                    331: u_int  *kernel_segtable_store;         /* 2k of storage to map the kernel */
                    332: u_int  *kernel_pagtable_store;         /* 128k of storage to map the kernel */
                    333:
                    334: u_int  *kernel_iopte_table;            /* 64k of storage for iommu */
                    335: u_int  kernel_iopte_table_pa;
                    336: #endif
                    337:
1.30      pk        338: #define        MA_SIZE 32              /* size of memory descriptor arrays */
1.1       deraadt   339: struct memarr pmemarr[MA_SIZE];/* physical memory regions */
                    340: int    npmemarr;               /* number of entries in pmemarr */
1.29      pk        341: int    cpmemarr;               /* pmap_next_page() state */
                    342: /*static*/ vm_offset_t avail_start;    /* first free physical page */
                    343: /*static*/ vm_offset_t avail_end;      /* last free physical page */
                    344: /*static*/ vm_offset_t avail_next;     /* pmap_next_page() state:
                    345:                                           next free physical page */
                    346: /*static*/ vm_offset_t virtual_avail;  /* first free virtual page number */
                    347: /*static*/ vm_offset_t virtual_end;    /* last free virtual page number */
                    348:
1.45      pk        349: int mmu_has_hole;
                    350:
1.29      pk        351: vm_offset_t prom_vstart;       /* For /dev/kmem */
                    352: vm_offset_t prom_vend;
1.1       deraadt   353:
1.55      pk        354: #if defined(SUN4)
1.31      pk        355: /*
1.55      pk        356:  * [sun4]: segfixmask: on some systems (4/110) "getsegmap()" returns a
                    357:  * partly invalid value. getsegmap returns a 16 bit value on the sun4,
                    358:  * but only the first 8 or so bits are valid (the rest are *supposed* to
                    359:  * be zero. On the 4/110 the bits that are supposed to be zero are
                    360:  * all one instead. e.g. KERNBASE is usually mapped by pmeg number zero.
                    361:  * On a 4/300 getsegmap(KERNBASE) == 0x0000, but
1.31      pk        362:  * on a 4/100 getsegmap(KERNBASE) == 0xff00
                    363:  *
1.55      pk        364:  * This confuses mmu_reservemon() and causes it to not reserve the PROM's
                    365:  * pmegs. Then the PROM's pmegs get used during autoconfig and everything
1.31      pk        366:  * falls apart!  (not very fun to debug, BTW.)
                    367:  *
1.43      pk        368:  * solution: mask the invalid bits in the getsetmap macro.
1.31      pk        369:  */
                    370:
                    371: static u_long segfixmask = 0xffffffff; /* all bits valid to start */
1.55      pk        372: #else
                    373: #define segfixmask 0xffffffff  /* It's in getsegmap's scope */
1.31      pk        374: #endif
                    375:
1.1       deraadt   376: /*
                    377:  * pseudo-functions for mnemonic value
                    378:  */
1.71      pk        379: #define getcontext4()          lduba(AC_CONTEXT, ASI_CONTROL)
                    380: #define getcontext4m()         lda(SRMMU_CXR, ASI_SRMMU)
1.55      pk        381: #define getcontext()           (CPU_ISSUN4M \
1.71      pk        382:                                        ? getcontext4m() \
                    383:                                        : getcontext4()  )
                    384:
                    385: #define setcontext4(c)         stba(AC_CONTEXT, ASI_CONTROL, c)
                    386: #define setcontext4m(c)                sta(SRMMU_CXR, ASI_SRMMU, c)
1.55      pk        387: #define setcontext(c)          (CPU_ISSUN4M \
1.71      pk        388:                                        ? setcontext4m(c) \
                    389:                                        : setcontext4(c)  )
1.55      pk        390:
                    391: #define        getsegmap(va)           (CPU_ISSUN4C \
                    392:                                        ? lduba(va, ASI_SEGMAP) \
                    393:                                        : (lduha(va, ASI_SEGMAP) & segfixmask))
                    394: #define        setsegmap(va, pmeg)     (CPU_ISSUN4C \
                    395:                                        ? stba(va, ASI_SEGMAP, pmeg) \
                    396:                                        : stha(va, ASI_SEGMAP, pmeg))
                    397:
                    398: /* 3-level sun4 MMU only: */
                    399: #define        getregmap(va)           ((unsigned)lduha((va)+2, ASI_REGMAP) >> 8)
                    400: #define        setregmap(va, smeg)     stha((va)+2, ASI_REGMAP, (smeg << 8))
                    401:
                    402: #if defined(SUN4M)
                    403: #define getpte4m(va)           lda((va & 0xFFFFF000) | ASI_SRMMUFP_L3, \
                    404:                                    ASI_SRMMUFP)
1.72      pk        405: void   setpgt4m __P((int *ptep, int pte));
1.55      pk        406: void   setpte4m __P((vm_offset_t va, int pte));
                    407: void   setptesw4m __P((struct pmap *pm, vm_offset_t va, int pte));
                    408: u_int  getptesw4m __P((struct pmap *pm, vm_offset_t va));
                    409: #endif
                    410:
                    411: #if defined(SUN4) || defined(SUN4C)
                    412: #define        getpte4(va)             lda(va, ASI_PTE)
                    413: #define        setpte4(va, pte)        sta(va, ASI_PTE, pte)
                    414: #endif
                    415:
                    416: /* Function pointer messiness for supporting multiple sparc architectures
                    417:  * within a single kernel: notice that there are two versions of many of the
                    418:  * functions within this file/module, one for the sun4/sun4c and the other
                    419:  * for the sun4m. For performance reasons (since things like pte bits don't
                    420:  * map nicely between the two architectures), there are separate functions
                    421:  * rather than unified functions which test the cputyp variable. If only
                    422:  * one architecture is being used, then the non-suffixed function calls
                    423:  * are macro-translated into the appropriate xxx4_4c or xxx4m call. If
                    424:  * multiple architectures are defined, the calls translate to (*xxx_p),
                    425:  * i.e. they indirect through function pointers initialized as appropriate
                    426:  * to the run-time architecture in pmap_bootstrap. See also pmap.h.
                    427:  */
                    428:
                    429: #if defined(SUN4M)
1.71      pk        430: static void mmu_setup4m_L1 __P((int, struct pmap *));
                    431: static void mmu_setup4m_L2 __P((int, struct regmap *));
                    432: static void  mmu_setup4m_L3 __P((int, struct segmap *));
1.58      pk        433: /*static*/ void        mmu_reservemon4m __P((struct pmap *, caddr_t *));
                    434:
1.55      pk        435: /*static*/ void pmap_rmk4m __P((struct pmap *, vm_offset_t, vm_offset_t,
                    436:                           int, int));
                    437: /*static*/ void pmap_rmu4m __P((struct pmap *, vm_offset_t, vm_offset_t,
                    438:                           int, int));
                    439: /*static*/ void pmap_enk4m __P((struct pmap *, vm_offset_t, vm_prot_t,
                    440:                          int, struct pvlist *, int));
                    441: /*static*/ void pmap_enu4m __P((struct pmap *, vm_offset_t, vm_prot_t,
                    442:                          int, struct pvlist *, int));
                    443: /*static*/ void pv_changepte4m __P((struct pvlist *, int, int));
                    444: /*static*/ int  pv_syncflags4m __P((struct pvlist *));
                    445: /*static*/ int  pv_link4m __P((struct pvlist *, struct pmap *, vm_offset_t));
                    446: /*static*/ void pv_unlink4m __P((struct pvlist *, struct pmap *, vm_offset_t));
                    447: #endif
                    448:
                    449: #if defined(SUN4) || defined(SUN4C)
1.58      pk        450: /*static*/ void        mmu_reservemon4_4c __P((int *, int *));
1.55      pk        451: /*static*/ void pmap_rmk4_4c __P((struct pmap *, vm_offset_t, vm_offset_t,
                    452:                           int, int));
                    453: /*static*/ void pmap_rmu4_4c __P((struct pmap *, vm_offset_t, vm_offset_t,
                    454:                           int, int));
                    455: /*static*/ void pmap_enk4_4c __P((struct pmap *, vm_offset_t, vm_prot_t,
                    456:                          int, struct pvlist *, int));
                    457: /*static*/ void pmap_enu4_4c __P((struct pmap *, vm_offset_t, vm_prot_t,
                    458:                          int, struct pvlist *, int));
                    459: /*static*/ void pv_changepte4_4c __P((struct pvlist *, int, int));
                    460: /*static*/ int  pv_syncflags4_4c __P((struct pvlist *));
                    461: /*static*/ int  pv_link4_4c __P((struct pvlist *, struct pmap *, vm_offset_t));
                    462: /*static*/ void pv_unlink4_4c __P((struct pvlist *, struct pmap *, vm_offset_t));
                    463: #endif
                    464:
                    465: #if !defined(SUN4M) && (defined(SUN4) || defined(SUN4C))
                    466: #define                pmap_rmk        pmap_rmk4_4c
                    467: #define                pmap_rmu        pmap_rmu4_4c
                    468:
                    469: #elif defined(SUN4M) && !(defined(SUN4) || defined(SUN4C))
                    470: #define                pmap_rmk        pmap_rmk4m
                    471: #define                pmap_rmu        pmap_rmu4m
                    472:
                    473: #else  /* must use function pointers */
                    474:
                    475: /* function pointer declarations */
                    476: /* from pmap.h: */
                    477: void           (*pmap_clear_modify_p) __P((vm_offset_t pa));
                    478: void            (*pmap_clear_reference_p) __P((vm_offset_t pa));
                    479: void            (*pmap_copy_page_p) __P((vm_offset_t, vm_offset_t));
                    480: void            (*pmap_enter_p) __P((pmap_t,
                    481:                     vm_offset_t, vm_offset_t, vm_prot_t, boolean_t));
                    482: vm_offset_t     (*pmap_extract_p) __P((pmap_t, vm_offset_t));
                    483: boolean_t       (*pmap_is_modified_p) __P((vm_offset_t pa));
                    484: boolean_t       (*pmap_is_referenced_p) __P((vm_offset_t pa));
                    485: void            (*pmap_page_protect_p) __P((vm_offset_t, vm_prot_t));
                    486: void            (*pmap_protect_p) __P((pmap_t,
                    487:                     vm_offset_t, vm_offset_t, vm_prot_t));
                    488: void            (*pmap_zero_page_p) __P((vm_offset_t));
                    489: void           (*pmap_changeprot_p) __P((pmap_t, vm_offset_t,
                    490:                     vm_prot_t, int));
                    491: /* local: */
                    492: void           (*pmap_rmk_p) __P((struct pmap *, vm_offset_t, vm_offset_t,
                    493:                           int, int));
                    494: void           (*pmap_rmu_p) __P((struct pmap *, vm_offset_t, vm_offset_t,
                    495:                           int, int));
                    496:
                    497: #define                pmap_rmk        (*pmap_rmk_p)
                    498: #define                pmap_rmu        (*pmap_rmu_p)
                    499:
                    500: #endif
                    501:
                    502: /* --------------------------------------------------------------*/
                    503:
                    504: /*
                    505:  * Next we have some Sun4m-specific routines which have no 4/4c
                    506:  * counterparts, or which are 4/4c macros.
                    507:  */
                    508:
                    509: #if defined(SUN4M)
                    510:
                    511: /* Macros which implement SRMMU TLB flushing/invalidation */
                    512:
                    513: #define tlb_flush_page(va)    sta((va & ~0xfff) | ASI_SRMMUFP_L3, ASI_SRMMUFP,0)
                    514: #define tlb_flush_segment(vreg, vseg) sta((vreg << RGSHIFT) | (vseg << SGSHIFT)\
                    515:                                          | ASI_SRMMUFP_L2, ASI_SRMMUFP,0)
                    516: #define tlb_flush_context()   sta(ASI_SRMMUFP_L1, ASI_SRMMUFP, 0)
                    517: #define tlb_flush_all()              sta(ASI_SRMMUFP_LN, ASI_SRMMUFP, 0)
                    518:
                    519: static u_int   VA2PA __P((caddr_t));
                    520:
                    521: /*
                    522:  * VA2PA(addr) -- converts a virtual address to a physical address using
                    523:  * the MMU's currently-installed page tables. As a side effect, the address
                    524:  * translation used may cause the associated pte to be encached. The correct
                    525:  * context for VA must be set before this is called.
                    526:  *
                    527:  * This routine should work with any level of mapping, as it is used
                    528:  * during bootup to interact with the ROM's initial L1 mapping of the kernel.
                    529:  */
                    530: static __inline u_int
                    531: VA2PA(addr)
                    532:        register caddr_t addr;
                    533: {
                    534:        register u_int pte;
                    535:
                    536:        /* we'll use that handy SRMMU flush/probe! %%%: make consts below! */
                    537:        /* Try each level in turn until we find a valid pte. Otherwise panic */
                    538:
                    539:        pte = lda(((u_int)addr & ~0xfff) | ASI_SRMMUFP_L3, ASI_SRMMUFP);
                    540:        if ((pte & SRMMU_TETYPE) == SRMMU_TEPTE)
                    541:            return (((pte & SRMMU_PPNMASK) << SRMMU_PPNPASHIFT) |
                    542:                    ((u_int)addr & 0xfff));
1.60      pk        543:
                    544:        /* A `TLB Flush Entire' is required before any L0, L1 or L2 probe */
                    545:        tlb_flush_all();
                    546:
1.55      pk        547:        pte = lda(((u_int)addr & ~0xfff) | ASI_SRMMUFP_L2, ASI_SRMMUFP);
                    548:        if ((pte & SRMMU_TETYPE) == SRMMU_TEPTE)
                    549:            return (((pte & SRMMU_PPNMASK) << SRMMU_PPNPASHIFT) |
                    550:                    ((u_int)addr & 0x3ffff));
                    551:        pte = lda(((u_int)addr & ~0xfff) | ASI_SRMMUFP_L1, ASI_SRMMUFP);
                    552:        if ((pte & SRMMU_TETYPE) == SRMMU_TEPTE)
                    553:            return (((pte & SRMMU_PPNMASK) << SRMMU_PPNPASHIFT) |
                    554:                    ((u_int)addr & 0xffffff));
                    555:        pte = lda(((u_int)addr & ~0xfff) | ASI_SRMMUFP_L0, ASI_SRMMUFP);
                    556:        if ((pte & SRMMU_TETYPE) == SRMMU_TEPTE)
                    557:            return (((pte & SRMMU_PPNMASK) << SRMMU_PPNPASHIFT) |
                    558:                    ((u_int)addr & 0xffffffff));
                    559:
                    560:        panic("VA2PA: Asked to translate unmapped VA %p", addr);
                    561: }
                    562:
                    563: /*
                    564:  * Get the page table entry (PTE) for va by looking it up in the software
                    565:  * page tables. These are the same tables that are used by the MMU; this
                    566:  * routine allows easy access to the page tables even if the context
                    567:  * corresponding to the table is not loaded or selected.
                    568:  * This routine should NOT be used if there is any chance that the desired
                    569:  * pte is in the TLB cache, since it will return stale data in that case.
                    570:  * For that case, and for general use, use getpte4m, which is much faster
                    571:  * and avoids walking in-memory page tables if the page is in the cache.
                    572:  * Note also that this routine only works if a kernel mapping has been
                    573:  * installed for the given page!
                    574:  */
                    575: __inline u_int
                    576: getptesw4m(pm, va)             /* Assumes L3 mapping! */
                    577:        register struct pmap *pm;
                    578:        register vm_offset_t va;
                    579: {
                    580:        register struct regmap *rm;
                    581:        register struct segmap *sm;
                    582:
                    583:        rm = &pm->pm_regmap[VA_VREG(va)];
                    584: #ifdef DEBUG
                    585:        if (rm == NULL)
1.58      pk        586:                panic("getptesw4m: no regmap entry");
1.55      pk        587: #endif
                    588:        sm = &rm->rg_segmap[VA_VSEG(va)];
                    589: #ifdef DEBUG
                    590:        if (sm == NULL)
1.58      pk        591:                panic("getptesw4m: no segmap");
1.55      pk        592: #endif
                    593:        return (sm->sg_pte[VA_SUN4M_VPG(va)]);  /* return pte */
                    594: }
                    595:
                    596: /*
                    597:  * Set the page table entry for va to pte. Only affects software MMU page-
                    598:  * tables (the in-core pagetables read by the MMU). Ignores TLB, and
                    599:  * thus should _not_ be called if the pte translation could be in the TLB.
                    600:  * In this case, use setpte4m().
                    601:  */
                    602: __inline void
                    603: setptesw4m(pm, va, pte)
                    604:        register struct pmap *pm;
                    605:        register vm_offset_t va;
                    606:        register int pte;
                    607: {
                    608:        register struct regmap *rm;
                    609:        register struct segmap *sm;
                    610:
                    611:        rm = &pm->pm_regmap[VA_VREG(va)];
                    612:
                    613: #ifdef DEBUG
                    614:        if (pm->pm_regmap == NULL || rm == NULL)
                    615:                panic("setpte4m: no regmap entry");
                    616: #endif
                    617:        sm = &rm->rg_segmap[VA_VSEG(va)];
                    618:
                    619: #ifdef DEBUG
                    620:        if (rm->rg_segmap == NULL || sm == NULL || sm->sg_pte == NULL)
                    621:                panic("setpte4m: no segmap for va %p", (caddr_t)va);
                    622: #endif
                    623:        sm->sg_pte[VA_SUN4M_VPG(va)] = pte; /* set new pte */
                    624: }
                    625:
1.72      pk        626: __inline void
                    627: setpgt4m(ptep, pte)
                    628:        int *ptep;
                    629:        int pte;
                    630: {
                    631:        *ptep = pte;
                    632:        if ((cpuinfo.flags & CPUFLG_CACHEPAGETABLES) == 0)
                    633:                cpuinfo.pcache_flush_line((int)ptep, VA2PA((caddr_t)ptep));
                    634: }
                    635:
1.55      pk        636: /* Set the page table entry for va to pte. Flushes cache. */
                    637: __inline void
                    638: setpte4m(va, pte)
                    639:        register vm_offset_t va;
                    640:        register int pte;
                    641: {
                    642:        register struct pmap *pm;
                    643:        register struct regmap *rm;
                    644:        register struct segmap *sm;
                    645:        register union ctxinfo *c;
                    646:
1.69      pk        647:        cache_flush_page(va);
1.55      pk        648:
1.58      pk        649:        /*
                    650:         * Now walk tables to find pte. We use ctxinfo to locate the pmap
1.55      pk        651:         * from the current context
                    652:         */
1.69      pk        653: #if 0
1.55      pk        654: #ifdef DEBUG
1.71      pk        655:        if (ctxbusyvector[getcontext4m()] == 0)
1.55      pk        656:                panic("setpte4m: no pmap for current context (%d)",
1.71      pk        657:                      getcontext4m());
1.1       deraadt   658: #endif
1.69      pk        659: #endif
1.71      pk        660:        c = &cpuinfo.ctxinfo[getcontext4m()];
1.55      pk        661:        pm = c->c_pmap;
                    662:
                    663:        /* Note: inline version of setptesw4m() */
                    664: #ifdef DEBUG
                    665:        if (pm->pm_regmap == NULL)
                    666:                panic("setpte4m: no regmap entry");
1.43      pk        667: #endif
1.55      pk        668:        rm = &pm->pm_regmap[VA_VREG(va)];
                    669:        sm = &rm->rg_segmap[VA_VSEG(va)];
1.1       deraadt   670:
1.55      pk        671: #ifdef DEBUG
                    672:        if (rm->rg_segmap == NULL || sm == NULL || sm->sg_pte == NULL)
                    673:                panic("setpte4m: no segmap for va %p", (caddr_t)va);
                    674: #endif
                    675:        tlb_flush_page(va);
1.72      pk        676:        setpgt4m(sm->sg_pte + VA_SUN4M_VPG(va), pte);
1.55      pk        677: }
1.72      pk        678:
1.55      pk        679: #endif /* 4m only */
1.1       deraadt   680:
                    681: /*----------------------------------------------------------------*/
                    682:
1.72      pk        683: /*
                    684:  * The following three macros are to be used in sun4/sun4c code only.
                    685:  */
1.69      pk        686: #if defined(SUN4_MMU3L)
                    687: #define CTX_USABLE(pm,rp) (                                    \
1.72      pk        688:                ((pm)->pm_ctx != NULL &&                        \
                    689:                 (!HASSUN4_MMU3L || (rp)->rg_smeg != reginval)) \
1.69      pk        690: )
1.43      pk        691: #else
1.55      pk        692: #define CTX_USABLE(pm,rp)      ((pm)->pm_ctx != NULL )
1.43      pk        693: #endif
                    694:
1.55      pk        695: #define GAP_WIDEN(pm,vr) do if (CPU_ISSUN4OR4C) {      \
                    696:        if (vr + 1 == pm->pm_gap_start)                 \
                    697:                pm->pm_gap_start = vr;                  \
                    698:        if (vr == pm->pm_gap_end)                       \
                    699:                pm->pm_gap_end = vr + 1;                \
1.43      pk        700: } while (0)
                    701:
1.55      pk        702: #define GAP_SHRINK(pm,vr) do if (CPU_ISSUN4OR4C) {                     \
1.43      pk        703:        register int x;                                                 \
                    704:        x = pm->pm_gap_start + (pm->pm_gap_end - pm->pm_gap_start) / 2; \
                    705:        if (vr > x) {                                                   \
                    706:                if (vr < pm->pm_gap_end)                                \
                    707:                        pm->pm_gap_end = vr;                            \
                    708:        } else {                                                        \
                    709:                if (vr >= pm->pm_gap_start && x != pm->pm_gap_start)    \
                    710:                        pm->pm_gap_start = vr + 1;                      \
                    711:        }                                                               \
                    712: } while (0)
                    713:
1.72      pk        714:
1.53      christos  715: static void sortm __P((struct memarr *, int));
                    716: void   ctx_alloc __P((struct pmap *));
                    717: void   ctx_free __P((struct pmap *));
                    718: void   pv_flushcache __P((struct pvlist *));
                    719: void   kvm_iocache __P((caddr_t, int));
                    720: #ifdef DEBUG
                    721: void   pm_check __P((char *, struct pmap *));
                    722: void   pm_check_k __P((char *, struct pmap *));
                    723: void   pm_check_u __P((char *, struct pmap *));
                    724: #endif
                    725:
                    726:
1.2       deraadt   727: /*
                    728:  * Sort a memory array by address.
                    729:  */
                    730: static void
                    731: sortm(mp, n)
                    732:        register struct memarr *mp;
                    733:        register int n;
                    734: {
                    735:        register struct memarr *mpj;
                    736:        register int i, j;
                    737:        register u_int addr, len;
                    738:
                    739:        /* Insertion sort.  This is O(n^2), but so what? */
                    740:        for (i = 1; i < n; i++) {
                    741:                /* save i'th entry */
                    742:                addr = mp[i].addr;
                    743:                len = mp[i].len;
                    744:                /* find j such that i'th entry goes before j'th */
                    745:                for (j = 0, mpj = mp; j < i; j++, mpj++)
                    746:                        if (addr < mpj->addr)
                    747:                                break;
                    748:                /* slide up any additional entries */
                    749:                ovbcopy(mpj, mpj + 1, (i - j) * sizeof(*mp));
                    750:                mpj->addr = addr;
                    751:                mpj->len = len;
                    752:        }
                    753: }
                    754:
1.29      pk        755: /*
                    756:  * For our convenience, vm_page.c implements:
                    757:  *       pmap_startup(), pmap_steal_memory()
                    758:  * using the functions:
                    759:  *       pmap_virtual_space(), pmap_free_pages(), pmap_next_page(),
                    760:  * which are much simpler to implement.
                    761:  */
                    762:
                    763: /*
                    764:  * How much virtual space does this kernel have?
                    765:  * (After mapping kernel text, data, etc.)
                    766:  */
                    767: void
                    768: pmap_virtual_space(v_start, v_end)
                    769:         vm_offset_t *v_start;
                    770:         vm_offset_t *v_end;
                    771: {
                    772:         *v_start = virtual_avail;
                    773:         *v_end   = virtual_end;
                    774: }
                    775:
                    776: /*
                    777:  * Return the number of page indices in the range of
                    778:  * possible return values for pmap_page_index() for
                    779:  * all addresses provided by pmap_next_page().  This
                    780:  * return value is used to allocate per-page data.
                    781:  *
                    782:  */
                    783: u_int
                    784: pmap_free_pages()
                    785: {
                    786:        int long bytes;
                    787:        int nmem;
                    788:        register struct memarr *mp;
                    789:
1.36      pk        790:        bytes = -avail_start;
                    791:        for (mp = pmemarr, nmem = npmemarr; --nmem >= 0; mp++)
1.29      pk        792:                bytes += mp->len;
                    793:
                    794:         return atop(bytes);
                    795: }
                    796:
                    797: /*
                    798:  * If there are still physical pages available, put the address of
                    799:  * the next available one at paddr and return TRUE.  Otherwise,
                    800:  * return FALSE to indicate that there are no more free pages.
                    801:  * Note that avail_next is set to avail_start in pmap_bootstrap().
                    802:  *
                    803:  * Imporant:  The page indices of the pages returned here must be
                    804:  * in ascending order.
                    805:  */
                    806: int
                    807: pmap_next_page(paddr)
                    808:         vm_offset_t *paddr;
                    809: {
                    810:
                    811:         /* Is it time to skip over a hole? */
                    812:        if (avail_next == pmemarr[cpmemarr].addr + pmemarr[cpmemarr].len) {
                    813:                if (++cpmemarr == npmemarr)
                    814:                        return FALSE;
                    815:                avail_next = pmemarr[cpmemarr].addr;
                    816:        }
                    817:
                    818: #ifdef DIAGNOSTIC
                    819:         /* Any available memory remaining? */
                    820:         if (avail_next >= avail_end) {
1.30      pk        821:                panic("pmap_next_page: too much memory?!\n");
1.29      pk        822:        }
                    823: #endif
                    824:
                    825:         /* Have memory, will travel... */
                    826:         *paddr = avail_next;
                    827:         avail_next += NBPG;
                    828:         return TRUE;
                    829: }
                    830:
                    831: /*
                    832:  * pmap_page_index()
                    833:  *
                    834:  * Given a physical address, return a page index.
                    835:  *
                    836:  * There can be some values that we never return (i.e. a hole)
                    837:  * as long as the range of indices returned by this function
                    838:  * is smaller than the value returned by pmap_free_pages().
                    839:  * The returned index does NOT need to start at zero.
                    840:  *
                    841:  */
1.50      christos  842: int
1.29      pk        843: pmap_page_index(pa)
                    844:        vm_offset_t pa;
                    845: {
                    846:        int idx;
                    847:        int nmem;
                    848:        register struct memarr *mp;
                    849:
                    850: #ifdef  DIAGNOSTIC
                    851:        if (pa < avail_start || pa >= avail_end)
1.54      christos  852:                panic("pmap_page_index: pa=0x%lx", pa);
1.29      pk        853: #endif
                    854:
                    855:        for (idx = 0, mp = pmemarr, nmem = npmemarr; --nmem >= 0; mp++) {
                    856:                if (pa >= mp->addr && pa < mp->addr + mp->len)
                    857:                        break;
                    858:                idx += atop(mp->len);
                    859:        }
                    860:
                    861:        return (idx + atop(pa - mp->addr));
                    862: }
1.39      pk        863:
                    864: int
                    865: pmap_pa_exists(pa)
                    866:        vm_offset_t pa;
                    867: {
                    868:        register int nmem;
                    869:        register struct memarr *mp;
                    870:
                    871:        for (mp = pmemarr, nmem = npmemarr; --nmem >= 0; mp++) {
                    872:                if (pa >= mp->addr && pa < mp->addr + mp->len)
                    873:                        return 1;
                    874:        }
                    875:
                    876:        return 0;
                    877: }
1.29      pk        878:
1.1       deraadt   879: /* update pv_flags given a valid pte */
1.55      pk        880: #define        MR4_4C(pte) (((pte) >> PG_M_SHIFT) & (PV_MOD | PV_REF))
                    881: #define MR4M(pte) (((pte) >> PG_M_SHIFT4M) & (PV_MOD4M | PV_REF4M))
1.1       deraadt   882:
                    883: /*----------------------------------------------------------------*/
                    884:
                    885: /*
                    886:  * Agree with the monitor ROM as to how many MMU entries are
                    887:  * to be reserved, and map all of its segments into all contexts.
                    888:  *
                    889:  * Unfortunately, while the Version 0 PROM had a nice linked list of
                    890:  * taken virtual memory, the Version 2 PROM provides instead a convoluted
                    891:  * description of *free* virtual memory.  Rather than invert this, we
                    892:  * resort to two magic constants from the PROM vector description file.
                    893:  */
1.55      pk        894: #if defined(SUN4) || defined(SUN4C)
1.43      pk        895: void
1.58      pk        896: mmu_reservemon4_4c(nrp, nsp)
1.43      pk        897:        register int *nrp, *nsp;
1.1       deraadt   898: {
1.53      christos  899:        register u_int va = 0, eva = 0;
                    900:        register int mmuseg, i, nr, ns, vr, lastvr;
1.69      pk        901: #if defined(SUN4_MMU3L)
1.53      christos  902:        register int mmureg;
                    903: #endif
1.43      pk        904:        register struct regmap *rp;
1.1       deraadt   905:
1.55      pk        906: #if defined(SUN4M)
                    907:        if (CPU_ISSUN4M) {
                    908:                panic("mmu_reservemon called on Sun4M machine");
                    909:                return;
                    910:        }
                    911: #endif
                    912:
1.20      deraadt   913: #if defined(SUN4)
1.55      pk        914:        if (CPU_ISSUN4) {
1.29      pk        915:                prom_vstart = va = OLDMON_STARTVADDR;
                    916:                prom_vend = eva = OLDMON_ENDVADDR;
1.20      deraadt   917:        }
                    918: #endif
                    919: #if defined(SUN4C)
1.55      pk        920:        if (CPU_ISSUN4C) {
1.29      pk        921:                prom_vstart = va = OPENPROM_STARTVADDR;
                    922:                prom_vend = eva = OPENPROM_ENDVADDR;
1.19      deraadt   923:        }
1.20      deraadt   924: #endif
1.43      pk        925:        ns = *nsp;
                    926:        nr = *nrp;
                    927:        lastvr = 0;
1.1       deraadt   928:        while (va < eva) {
1.43      pk        929:                vr = VA_VREG(va);
                    930:                rp = &pmap_kernel()->pm_regmap[vr];
                    931:
1.69      pk        932: #if defined(SUN4_MMU3L)
                    933:                if (HASSUN4_MMU3L && vr != lastvr) {
1.43      pk        934:                        lastvr = vr;
                    935:                        mmureg = getregmap(va);
                    936:                        if (mmureg < nr)
                    937:                                rp->rg_smeg = nr = mmureg;
                    938:                        /*
                    939:                         * On 3-level MMU machines, we distribute regions,
                    940:                         * rather than segments, amongst the contexts.
                    941:                         */
                    942:                        for (i = ncontext; --i > 0;)
                    943:                                (*promvec->pv_setctxt)(i, (caddr_t)va, mmureg);
                    944:                }
                    945: #endif
1.1       deraadt   946:                mmuseg = getsegmap(va);
1.43      pk        947:                if (mmuseg < ns)
                    948:                        ns = mmuseg;
1.69      pk        949:
                    950:                if (!HASSUN4_MMU3L)
1.43      pk        951:                        for (i = ncontext; --i > 0;)
                    952:                                (*promvec->pv_setctxt)(i, (caddr_t)va, mmuseg);
                    953:
1.1       deraadt   954:                if (mmuseg == seginval) {
                    955:                        va += NBPSG;
                    956:                        continue;
                    957:                }
1.43      pk        958:                /*
                    959:                 * Another PROM segment. Enter into region map.
                    960:                 * Assume the entire segment is valid.
                    961:                 */
                    962:                rp->rg_nsegmap += 1;
                    963:                rp->rg_segmap[VA_VSEG(va)].sg_pmeg = mmuseg;
                    964:                rp->rg_segmap[VA_VSEG(va)].sg_npte = NPTESG;
                    965:
1.1       deraadt   966:                /* PROM maps its memory user-accessible: fix it. */
                    967:                for (i = NPTESG; --i >= 0; va += NBPG)
1.55      pk        968:                        setpte4(va, getpte4(va) | PG_S);
1.1       deraadt   969:        }
1.43      pk        970:        *nsp = ns;
                    971:        *nrp = nr;
                    972:        return;
1.1       deraadt   973: }
1.55      pk        974: #endif
                    975:
                    976: #if defined(SUN4M) /* Sun4M versions of above */
                    977:
                    978: /*
                    979:  * Take the monitor's initial page table layout, convert it to 3rd-level pte's
                    980:  * (it starts out as a L1 mapping), and install it along with a set of kernel
                    981:  * mapping tables as the kernel's initial page table setup. Also create and
                    982:  * enable a context table. I suppose we also want to block user-mode access
                    983:  * to the new kernel/ROM mappings.
                    984:  */
                    985:
1.58      pk        986: /*
                    987:  * mmu_reservemon4m(): Copies the existing (ROM) page tables to kernel space,
1.55      pk        988:  * converting any L1/L2 PTEs to L3 PTEs. Does *not* copy the L1 entry mapping
1.71      pk        989:  * the kernel at KERNBASE (0xf8000000) since we don't want to map 16M of
1.55      pk        990:  * physical memory for the kernel. Thus the kernel must be installed later!
                    991:  * Also installs ROM mappings into the kernel pmap.
                    992:  * NOTE: This also revokes all user-mode access to the mapped regions.
                    993:  */
                    994: void
                    995: mmu_reservemon4m(kpmap, kmemtop)
                    996:        struct pmap *kpmap;
                    997:        register caddr_t *kmemtop; /* Note: this is a *virtual* address! */
                    998: {
1.71      pk        999:        unsigned int rom_ctxtbl;
1.55      pk       1000:        register int te;
1.69      pk       1001:        unsigned int mmupcrsave;
1.55      pk       1002:
1.69      pk       1003: /*XXX-GCC!*/mmupcrsave = 0;
1.55      pk       1004:
                   1005:        /*
                   1006:         * XXX: although the Sun4M can handle 36 bits of physical
                   1007:         * address space, we assume that all these page tables, etc
                   1008:         * are in the lower 4G (32-bits) of address space, i.e. out of I/O
                   1009:         * space. Eventually this should be changed to support the 36 bit
                   1010:         * physical addressing, in case some crazed ROM designer decides to
                   1011:         * stick the pagetables up there. In that case, we should use MMU
                   1012:         * transparent mode, (i.e. ASI 0x20 to 0x2f) to access
                   1013:         * physical memory.
                   1014:         */
                   1015:
1.71      pk       1016:        rom_ctxtbl = (lda(SRMMU_CXTPTR,ASI_SRMMU) << SRMMU_PPNPASHIFT);
1.55      pk       1017:
                   1018:        /* We're going to have to use MMU passthrough. If we're on a
                   1019:         * Viking MicroSparc without an mbus, we need to turn off traps
                   1020:         * and set the AC bit at 0x8000 in the MMU's control register. Ugh.
                   1021:         * XXX: Once we've done this, can we still access kernel vm?
                   1022:         */
1.69      pk       1023:        if (cpuinfo.cpu_vers == 4 && cpuinfo.mxcc) {
1.55      pk       1024:                sta(SRMMU_PCR, ASI_SRMMU,       /* set MMU AC bit */
1.69      pk       1025:                    ((mmupcrsave = lda(SRMMU_PCR,ASI_SRMMU)) | SRMMU_PCR_AC));
1.55      pk       1026:        }
1.69      pk       1027:
1.71      pk       1028:        te = lda(rom_ctxtbl, ASI_BYPASS);       /* i.e. context 0 */
1.55      pk       1029:        switch (te & SRMMU_TETYPE) {
1.62      pk       1030:        case SRMMU_TEINVALID:
1.69      pk       1031:                cpuinfo.ctx_tbl[0] = SRMMU_TEINVALID;
1.55      pk       1032:                panic("mmu_reservemon4m: no existing L0 mapping! (How are we running?");
                   1033:                break;
1.62      pk       1034:        case SRMMU_TEPTE:
1.55      pk       1035: #ifdef DEBUG
1.66      christos 1036:                printf("mmu_reservemon4m: trying to remap 4G segment!\n");
1.55      pk       1037: #endif
                   1038:                panic("mmu_reservemon4m: can't handle ROM 4G page size");
                   1039:                /* XXX: Should make this work, however stupid it is */
                   1040:                break;
1.62      pk       1041:        case SRMMU_TEPTD:
1.71      pk       1042:                mmu_setup4m_L1(te, kpmap);
1.55      pk       1043:                break;
1.62      pk       1044:        default:
1.55      pk       1045:                panic("mmu_reservemon4m: unknown pagetable entry type");
                   1046:        }
                   1047:
1.69      pk       1048:        if (cpuinfo.cpu_vers == 4 && cpuinfo.mxcc) {
                   1049:                sta(SRMMU_PCR, ASI_SRMMU, mmupcrsave);
1.55      pk       1050:        }
                   1051: }
                   1052:
                   1053: void
1.71      pk       1054: mmu_setup4m_L1(regtblptd, kpmap)
1.55      pk       1055:        int regtblptd;          /* PTD for region table to be remapped */
                   1056:        struct pmap *kpmap;
                   1057: {
                   1058:        register unsigned int regtblrover;
                   1059:        register int i;
                   1060:        unsigned int te;
1.71      pk       1061:        struct regmap *rp;
1.55      pk       1062:        int j, k;
                   1063:
1.69      pk       1064:        /*
                   1065:         * Here we scan the region table to copy any entries which appear.
1.55      pk       1066:         * We are only concerned with regions in kernel space and above
                   1067:         * (i.e. regions 0xf8 to 0xff). We also ignore region 0xf8, since
                   1068:         * that is the 16MB L1 mapping that the ROM used to map the kernel
                   1069:         * in initially. Later, we will rebuild a new L3 mapping for the
                   1070:         * kernel and install it before switching to the new pagetables.
                   1071:         */
1.71      pk       1072:        regtblrover =
                   1073:                ((regtblptd & ~SRMMU_TETYPE) << SRMMU_PPNPASHIFT) +
                   1074:                (VA_VREG(KERNBASE)+1) * sizeof(long);   /* kernel only */
1.55      pk       1075:
                   1076:        for (i = VA_VREG(KERNBASE) + 1; i < SRMMU_L1SIZE;
                   1077:             i++, regtblrover += sizeof(long)) {
1.71      pk       1078:
                   1079:                /* The region we're dealing with */
                   1080:                rp = &kpmap->pm_regmap[i];
                   1081:
1.55      pk       1082:                te = lda(regtblrover, ASI_BYPASS);
                   1083:                switch(te & SRMMU_TETYPE) {
1.62      pk       1084:                case SRMMU_TEINVALID:
1.55      pk       1085:                        break;
1.71      pk       1086:
1.62      pk       1087:                case SRMMU_TEPTE:
1.55      pk       1088: #ifdef DEBUG
1.66      christos 1089:                        printf("mmu_reservemon4m: converting region 0x%x from L1->L3\n",i);
1.55      pk       1090: #endif
1.71      pk       1091:                        /*
                   1092:                         * This region entry covers 64MB of memory -- or
                   1093:                         * (NSEGRG * NPTESG) pages -- which we must convert
                   1094:                         * into a 3-level description.
1.55      pk       1095:                         */
1.71      pk       1096:
1.55      pk       1097:                        for (j = 0; j < SRMMU_L2SIZE; j++) {
1.71      pk       1098:                                struct segmap *sp = &rp->rg_segmap[j];
1.55      pk       1099:
                   1100:                                for (k = 0; k < SRMMU_L3SIZE; k++) {
1.71      pk       1101:                                        sp->sg_npte++;
                   1102:                                        (sp->sg_pte)[k] =
1.55      pk       1103:                                            (te & SRMMU_L1PPNMASK) |
                   1104:                                            (j << SRMMU_L2PPNSHFT) |
                   1105:                                            (k << SRMMU_L3PPNSHFT) |
                   1106:                                            (te & SRMMU_PGBITSMSK) |
                   1107:                                            ((te & SRMMU_PROT_MASK) |
                   1108:                                             PPROT_U2S_OMASK) |
                   1109:                                            SRMMU_TEPTE;
                   1110:                                }
                   1111:                        }
                   1112:                        break;
1.71      pk       1113:
1.62      pk       1114:                case SRMMU_TEPTD:
1.71      pk       1115:                        mmu_setup4m_L2(te, rp);
1.55      pk       1116:                        break;
1.71      pk       1117:
1.62      pk       1118:                default:
1.55      pk       1119:                        panic("mmu_setup4m_L1: unknown pagetable entry type");
                   1120:                }
                   1121:        }
                   1122: }
                   1123:
                   1124: void
1.71      pk       1125: mmu_setup4m_L2(segtblptd, rp)
1.55      pk       1126:        int segtblptd;
1.71      pk       1127:        struct regmap *rp;
1.55      pk       1128: {
                   1129:        register unsigned int segtblrover;
                   1130:        register int i, k;
                   1131:        unsigned int te;
1.71      pk       1132:        struct segmap *sp;
1.55      pk       1133:
                   1134:        segtblrover = (segtblptd & ~SRMMU_TETYPE) << SRMMU_PPNPASHIFT;
                   1135:        for (i = 0; i < SRMMU_L2SIZE; i++, segtblrover += sizeof(long)) {
1.71      pk       1136:
                   1137:                sp = &rp->rg_segmap[i];
                   1138:
1.55      pk       1139:                te = lda(segtblrover, ASI_BYPASS);
                   1140:                switch(te & SRMMU_TETYPE) {
1.62      pk       1141:                case SRMMU_TEINVALID:
1.55      pk       1142:                        break;
1.71      pk       1143:
1.62      pk       1144:                case SRMMU_TEPTE:
1.55      pk       1145: #ifdef DEBUG
1.66      christos 1146:                        printf("mmu_reservemon4m: converting L2 entry at segment 0x%x to L3\n",i);
1.55      pk       1147: #endif
1.71      pk       1148:                        /*
                   1149:                         * This segment entry covers 256KB of memory -- or
                   1150:                         * (NPTESG) pages -- which we must convert
                   1151:                         * into a 3-level description.
                   1152:                         */
1.55      pk       1153:                        for (k = 0; k < SRMMU_L3SIZE; k++) {
1.71      pk       1154:                                sp->sg_npte++;
                   1155:                                (sp->sg_pte)[k] =
1.55      pk       1156:                                    (te & SRMMU_L1PPNMASK) |
                   1157:                                    (te & SRMMU_L2PPNMASK) |
                   1158:                                    (k << SRMMU_L3PPNSHFT) |
                   1159:                                    (te & SRMMU_PGBITSMSK) |
                   1160:                                    ((te & SRMMU_PROT_MASK) |
                   1161:                                     PPROT_U2S_OMASK) |
                   1162:                                    SRMMU_TEPTE;
                   1163:                        }
                   1164:                        break;
1.71      pk       1165:
1.62      pk       1166:                case SRMMU_TEPTD:
1.71      pk       1167:                        mmu_setup4m_L3(te, sp);
1.55      pk       1168:                        break;
1.71      pk       1169:
1.62      pk       1170:                default:
1.55      pk       1171:                        panic("mmu_setup4m_L2: unknown pagetable entry type");
                   1172:                }
                   1173:        }
                   1174: }
                   1175:
1.71      pk       1176: void
                   1177: mmu_setup4m_L3(pagtblptd, sp)
1.55      pk       1178:        register int pagtblptd;
1.71      pk       1179:        struct segmap *sp;
1.55      pk       1180: {
                   1181:        register unsigned int pagtblrover;
1.71      pk       1182:        register int i;
1.55      pk       1183:        register unsigned int te;
                   1184:
                   1185:        pagtblrover = (pagtblptd & ~SRMMU_TETYPE) << SRMMU_PPNPASHIFT;
                   1186:        for (i = 0; i < SRMMU_L3SIZE; i++, pagtblrover += sizeof(long)) {
                   1187:                te = lda(pagtblrover, ASI_BYPASS);
                   1188:                switch(te & SRMMU_TETYPE) {
1.62      pk       1189:                case SRMMU_TEINVALID:
1.55      pk       1190:                        break;
1.62      pk       1191:                case SRMMU_TEPTE:
1.71      pk       1192:                        sp->sg_npte++;
                   1193:                        sp->sg_pte[i] = te | PPROT_U2S_OMASK;
1.55      pk       1194:                        break;
1.62      pk       1195:                case SRMMU_TEPTD:
1.55      pk       1196:                        panic("mmu_setup4m_L3: PTD found in L3 page table");
1.62      pk       1197:                default:
1.55      pk       1198:                        panic("mmu_setup4m_L3: unknown pagetable entry type");
                   1199:                }
                   1200:        }
                   1201: }
                   1202: #endif /* defined SUN4M */
1.1       deraadt  1203:
                   1204: /*----------------------------------------------------------------*/
                   1205:
                   1206: /*
                   1207:  * MMU management.
                   1208:  */
1.43      pk       1209: struct mmuentry *me_alloc __P((struct mmuhd *, struct pmap *, int, int));
                   1210: void           me_free __P((struct pmap *, u_int));
                   1211: struct mmuentry        *region_alloc __P((struct mmuhd *, struct pmap *, int));
                   1212: void           region_free __P((struct pmap *, u_int));
1.1       deraadt  1213:
                   1214: /*
                   1215:  * Change contexts.  We need the old context number as well as the new
                   1216:  * one.  If the context is changing, we must write all user windows
                   1217:  * first, lest an interrupt cause them to be written to the (other)
                   1218:  * user whose context we set here.
                   1219:  */
                   1220: #define        CHANGE_CONTEXTS(old, new) \
                   1221:        if ((old) != (new)) { \
                   1222:                write_user_windows(); \
                   1223:                setcontext(new); \
                   1224:        }
                   1225:
1.55      pk       1226: #if defined(SUN4) || defined(SUN4C) /* This is old sun MMU stuff */
1.1       deraadt  1227: /*
                   1228:  * Allocate an MMU entry (i.e., a PMEG).
                   1229:  * If necessary, steal one from someone else.
                   1230:  * Put it on the tail of the given queue
                   1231:  * (which is either the LRU list or the locked list).
                   1232:  * The locked list is not actually ordered, but this is easiest.
                   1233:  * Also put it on the given (new) pmap's chain,
                   1234:  * enter its pmeg number into that pmap's segmap,
                   1235:  * and store the pmeg's new virtual segment number (me->me_vseg).
                   1236:  *
                   1237:  * This routine is large and complicated, but it must be fast
                   1238:  * since it implements the dynamic allocation of MMU entries.
                   1239:  */
                   1240: struct mmuentry *
1.43      pk       1241: me_alloc(mh, newpm, newvreg, newvseg)
1.1       deraadt  1242:        register struct mmuhd *mh;
                   1243:        register struct pmap *newpm;
1.43      pk       1244:        register int newvreg, newvseg;
1.1       deraadt  1245: {
                   1246:        register struct mmuentry *me;
                   1247:        register struct pmap *pm;
                   1248:        register int i, va, pa, *pte, tpte;
                   1249:        int ctx;
1.43      pk       1250:        struct regmap *rp;
                   1251:        struct segmap *sp;
1.1       deraadt  1252:
                   1253:        /* try free list first */
1.43      pk       1254:        if ((me = segm_freelist.tqh_first) != NULL) {
                   1255:                TAILQ_REMOVE(&segm_freelist, me, me_list);
1.1       deraadt  1256: #ifdef DEBUG
                   1257:                if (me->me_pmap != NULL)
                   1258:                        panic("me_alloc: freelist entry has pmap");
                   1259:                if (pmapdebug & PDB_MMU_ALLOC)
1.66      christos 1260:                        printf("me_alloc: got pmeg %d\n", me->me_cookie);
1.1       deraadt  1261: #endif
1.43      pk       1262:                TAILQ_INSERT_TAIL(mh, me, me_list);
1.1       deraadt  1263:
                   1264:                /* onto on pmap chain; pmap is already locked, if needed */
1.43      pk       1265:                TAILQ_INSERT_TAIL(&newpm->pm_seglist, me, me_pmchain);
1.70      pk       1266: #ifdef DIAGNOSTIC
                   1267:                pmap_stats.ps_npmeg_free--;
                   1268:                if (mh == &segm_locked)
                   1269:                        pmap_stats.ps_npmeg_locked++;
                   1270:                else
                   1271:                        pmap_stats.ps_npmeg_lru++;
                   1272: #endif
1.1       deraadt  1273:
                   1274:                /* into pmap segment table, with backpointers */
1.43      pk       1275:                newpm->pm_regmap[newvreg].rg_segmap[newvseg].sg_pmeg = me->me_cookie;
1.1       deraadt  1276:                me->me_pmap = newpm;
                   1277:                me->me_vseg = newvseg;
1.43      pk       1278:                me->me_vreg = newvreg;
1.1       deraadt  1279:
                   1280:                return (me);
                   1281:        }
                   1282:
                   1283:        /* no luck, take head of LRU list */
1.43      pk       1284:        if ((me = segm_lru.tqh_first) == NULL)
1.1       deraadt  1285:                panic("me_alloc: all pmegs gone");
1.43      pk       1286:
1.1       deraadt  1287:        pm = me->me_pmap;
                   1288:        if (pm == NULL)
                   1289:                panic("me_alloc: LRU entry has no pmap");
1.42      mycroft  1290:        if (pm == pmap_kernel())
1.1       deraadt  1291:                panic("me_alloc: stealing from kernel");
1.12      pk       1292: #ifdef DEBUG
1.1       deraadt  1293:        if (pmapdebug & (PDB_MMU_ALLOC | PDB_MMU_STEAL))
1.66      christos 1294:                printf("me_alloc: stealing pmeg %x from pmap %p\n",
1.43      pk       1295:                    me->me_cookie, pm);
1.1       deraadt  1296: #endif
                   1297:        /*
                   1298:         * Remove from LRU list, and insert at end of new list
                   1299:         * (probably the LRU list again, but so what?).
                   1300:         */
1.43      pk       1301:        TAILQ_REMOVE(&segm_lru, me, me_list);
                   1302:        TAILQ_INSERT_TAIL(mh, me, me_list);
                   1303:
1.70      pk       1304: #ifdef DIAGNOSTIC
                   1305:        if (mh == &segm_locked) {
                   1306:                pmap_stats.ps_npmeg_lru--;
                   1307:                pmap_stats.ps_npmeg_locked++;
                   1308:        }
                   1309: #endif
                   1310:
1.43      pk       1311:        rp = &pm->pm_regmap[me->me_vreg];
                   1312:        if (rp->rg_segmap == NULL)
                   1313:                panic("me_alloc: LRU entry's pmap has no segments");
                   1314:        sp = &rp->rg_segmap[me->me_vseg];
                   1315:        pte = sp->sg_pte;
                   1316:        if (pte == NULL)
                   1317:                panic("me_alloc: LRU entry's pmap has no ptes");
1.1       deraadt  1318:
                   1319:        /*
                   1320:         * The PMEG must be mapped into some context so that we can
                   1321:         * read its PTEs.  Use its current context if it has one;
                   1322:         * if not, and since context 0 is reserved for the kernel,
                   1323:         * the simplest method is to switch to 0 and map the PMEG
                   1324:         * to virtual address 0---which, being a user space address,
                   1325:         * is by definition not in use.
                   1326:         *
                   1327:         * XXX for ncpus>1 must use per-cpu VA?
                   1328:         * XXX do not have to flush cache immediately
                   1329:         */
1.71      pk       1330:        ctx = getcontext4();
1.43      pk       1331:        if (CTX_USABLE(pm,rp)) {
1.1       deraadt  1332:                CHANGE_CONTEXTS(ctx, pm->pm_ctxnum);
1.69      pk       1333:                cache_flush_segment(me->me_vreg, me->me_vseg);
1.43      pk       1334:                va = VSTOVA(me->me_vreg,me->me_vseg);
1.1       deraadt  1335:        } else {
                   1336:                CHANGE_CONTEXTS(ctx, 0);
1.69      pk       1337:                if (HASSUN4_MMU3L)
1.43      pk       1338:                        setregmap(0, tregion);
                   1339:                setsegmap(0, me->me_cookie);
1.1       deraadt  1340:                /*
                   1341:                 * No cache flush needed: it happened earlier when
                   1342:                 * the old context was taken.
                   1343:                 */
                   1344:                va = 0;
                   1345:        }
                   1346:
                   1347:        /*
                   1348:         * Record reference and modify bits for each page,
                   1349:         * and copy PTEs into kernel memory so that they can
                   1350:         * be reloaded later.
                   1351:         */
                   1352:        i = NPTESG;
                   1353:        do {
1.55      pk       1354:                tpte = getpte4(va);
1.33      pk       1355:                if ((tpte & (PG_V | PG_TYPE)) == (PG_V | PG_OBMEM)) {
1.60      pk       1356:                        pa = ptoa(tpte & PG_PFNUM);
1.1       deraadt  1357:                        if (managed(pa))
1.55      pk       1358:                                pvhead(pa)->pv_flags |= MR4_4C(tpte);
1.1       deraadt  1359:                }
                   1360:                *pte++ = tpte & ~(PG_U|PG_M);
                   1361:                va += NBPG;
                   1362:        } while (--i > 0);
                   1363:
                   1364:        /* update segment tables */
                   1365:        simple_lock(&pm->pm_lock); /* what if other cpu takes mmuentry ?? */
1.43      pk       1366:        if (CTX_USABLE(pm,rp))
                   1367:                setsegmap(VSTOVA(me->me_vreg,me->me_vseg), seginval);
                   1368:        sp->sg_pmeg = seginval;
1.1       deraadt  1369:
                   1370:        /* off old pmap chain */
1.43      pk       1371:        TAILQ_REMOVE(&pm->pm_seglist, me, me_pmchain);
1.1       deraadt  1372:        simple_unlock(&pm->pm_lock);
1.71      pk       1373:        setcontext4(ctx);       /* done with old context */
1.1       deraadt  1374:
                   1375:        /* onto new pmap chain; new pmap is already locked, if needed */
1.43      pk       1376:        TAILQ_INSERT_TAIL(&newpm->pm_seglist, me, me_pmchain);
1.1       deraadt  1377:
                   1378:        /* into new segment table, with backpointers */
1.43      pk       1379:        newpm->pm_regmap[newvreg].rg_segmap[newvseg].sg_pmeg = me->me_cookie;
1.1       deraadt  1380:        me->me_pmap = newpm;
                   1381:        me->me_vseg = newvseg;
1.43      pk       1382:        me->me_vreg = newvreg;
1.1       deraadt  1383:
                   1384:        return (me);
                   1385: }
                   1386:
                   1387: /*
                   1388:  * Free an MMU entry.
                   1389:  *
                   1390:  * Assumes the corresponding pmap is already locked.
                   1391:  * Does NOT flush cache, but does record ref and mod bits.
                   1392:  * The rest of each PTE is discarded.
                   1393:  * CALLER MUST SET CONTEXT to pm->pm_ctxnum (if pmap has
                   1394:  * a context) or to 0 (if not).  Caller must also update
                   1395:  * pm->pm_segmap and (possibly) the hardware.
                   1396:  */
                   1397: void
                   1398: me_free(pm, pmeg)
                   1399:        register struct pmap *pm;
                   1400:        register u_int pmeg;
                   1401: {
1.43      pk       1402:        register struct mmuentry *me = &mmusegments[pmeg];
1.1       deraadt  1403:        register int i, va, pa, tpte;
1.43      pk       1404:        register int vr;
                   1405:        register struct regmap *rp;
                   1406:
                   1407:        vr = me->me_vreg;
1.1       deraadt  1408:
                   1409: #ifdef DEBUG
                   1410:        if (pmapdebug & PDB_MMU_ALLOC)
1.66      christos 1411:                printf("me_free: freeing pmeg %d from pmap %p\n",
1.43      pk       1412:                    me->me_cookie, pm);
                   1413:        if (me->me_cookie != pmeg)
1.1       deraadt  1414:                panic("me_free: wrong mmuentry");
                   1415:        if (pm != me->me_pmap)
                   1416:                panic("me_free: pm != me_pmap");
                   1417: #endif
                   1418:
1.43      pk       1419:        rp = &pm->pm_regmap[vr];
                   1420:
1.1       deraadt  1421:        /* just like me_alloc, but no cache flush, and context already set */
1.43      pk       1422:        if (CTX_USABLE(pm,rp)) {
                   1423:                va = VSTOVA(vr,me->me_vseg);
                   1424:        } else {
                   1425: #ifdef DEBUG
1.71      pk       1426: if (getcontext4() != 0) panic("me_free: ctx != 0");
1.43      pk       1427: #endif
1.69      pk       1428:                if (HASSUN4_MMU3L)
1.43      pk       1429:                        setregmap(0, tregion);
                   1430:                setsegmap(0, me->me_cookie);
1.1       deraadt  1431:                va = 0;
                   1432:        }
                   1433:        i = NPTESG;
                   1434:        do {
1.55      pk       1435:                tpte = getpte4(va);
1.33      pk       1436:                if ((tpte & (PG_V | PG_TYPE)) == (PG_V | PG_OBMEM)) {
1.60      pk       1437:                        pa = ptoa(tpte & PG_PFNUM);
1.1       deraadt  1438:                        if (managed(pa))
1.55      pk       1439:                                pvhead(pa)->pv_flags |= MR4_4C(tpte);
1.1       deraadt  1440:                }
                   1441:                va += NBPG;
                   1442:        } while (--i > 0);
                   1443:
                   1444:        /* take mmu entry off pmap chain */
1.43      pk       1445:        TAILQ_REMOVE(&pm->pm_seglist, me, me_pmchain);
                   1446:        /* ... and remove from segment map */
                   1447:        if (rp->rg_segmap == NULL)
                   1448:                panic("me_free: no segments in pmap");
                   1449:        rp->rg_segmap[me->me_vseg].sg_pmeg = seginval;
                   1450:
                   1451:        /* off LRU or lock chain */
                   1452:        if (pm == pmap_kernel()) {
                   1453:                TAILQ_REMOVE(&segm_locked, me, me_list);
1.70      pk       1454: #ifdef DIAGNOSTIC
                   1455:                pmap_stats.ps_npmeg_locked--;
                   1456: #endif
1.43      pk       1457:        } else {
                   1458:                TAILQ_REMOVE(&segm_lru, me, me_list);
1.70      pk       1459: #ifdef DIAGNOSTIC
                   1460:                pmap_stats.ps_npmeg_lru--;
                   1461: #endif
1.43      pk       1462:        }
                   1463:
                   1464:        /* no associated pmap; on free list */
                   1465:        me->me_pmap = NULL;
                   1466:        TAILQ_INSERT_TAIL(&segm_freelist, me, me_list);
1.70      pk       1467: #ifdef DIAGNOSTIC
                   1468:        pmap_stats.ps_npmeg_free++;
                   1469: #endif
1.43      pk       1470: }
                   1471:
1.69      pk       1472: #if defined(SUN4_MMU3L)
1.43      pk       1473:
                   1474: /* XXX - Merge with segm_alloc/segm_free ? */
                   1475:
                   1476: struct mmuentry *
                   1477: region_alloc(mh, newpm, newvr)
                   1478:        register struct mmuhd *mh;
                   1479:        register struct pmap *newpm;
                   1480:        register int newvr;
                   1481: {
                   1482:        register struct mmuentry *me;
                   1483:        register struct pmap *pm;
                   1484:        int ctx;
                   1485:        struct regmap *rp;
                   1486:
                   1487:        /* try free list first */
                   1488:        if ((me = region_freelist.tqh_first) != NULL) {
                   1489:                TAILQ_REMOVE(&region_freelist, me, me_list);
                   1490: #ifdef DEBUG
                   1491:                if (me->me_pmap != NULL)
                   1492:                        panic("region_alloc: freelist entry has pmap");
                   1493:                if (pmapdebug & PDB_MMUREG_ALLOC)
1.66      christos 1494:                        printf("region_alloc: got smeg %x\n", me->me_cookie);
1.43      pk       1495: #endif
                   1496:                TAILQ_INSERT_TAIL(mh, me, me_list);
                   1497:
                   1498:                /* onto on pmap chain; pmap is already locked, if needed */
                   1499:                TAILQ_INSERT_TAIL(&newpm->pm_reglist, me, me_pmchain);
                   1500:
                   1501:                /* into pmap segment table, with backpointers */
                   1502:                newpm->pm_regmap[newvr].rg_smeg = me->me_cookie;
                   1503:                me->me_pmap = newpm;
                   1504:                me->me_vreg = newvr;
                   1505:
                   1506:                return (me);
                   1507:        }
                   1508:
                   1509:        /* no luck, take head of LRU list */
                   1510:        if ((me = region_lru.tqh_first) == NULL)
                   1511:                panic("region_alloc: all smegs gone");
                   1512:
                   1513:        pm = me->me_pmap;
                   1514:        if (pm == NULL)
                   1515:                panic("region_alloc: LRU entry has no pmap");
                   1516:        if (pm == pmap_kernel())
                   1517:                panic("region_alloc: stealing from kernel");
                   1518: #ifdef DEBUG
                   1519:        if (pmapdebug & (PDB_MMUREG_ALLOC | PDB_MMUREG_STEAL))
1.66      christos 1520:                printf("region_alloc: stealing smeg %x from pmap %p\n",
1.43      pk       1521:                    me->me_cookie, pm);
                   1522: #endif
                   1523:        /*
                   1524:         * Remove from LRU list, and insert at end of new list
                   1525:         * (probably the LRU list again, but so what?).
                   1526:         */
                   1527:        TAILQ_REMOVE(&region_lru, me, me_list);
                   1528:        TAILQ_INSERT_TAIL(mh, me, me_list);
                   1529:
                   1530:        rp = &pm->pm_regmap[me->me_vreg];
1.71      pk       1531:        ctx = getcontext4();
1.43      pk       1532:        if (pm->pm_ctx) {
                   1533:                CHANGE_CONTEXTS(ctx, pm->pm_ctxnum);
1.69      pk       1534:                cache_flush_region(me->me_vreg);
1.43      pk       1535:        }
                   1536:
                   1537:        /* update region tables */
                   1538:        simple_lock(&pm->pm_lock); /* what if other cpu takes mmuentry ?? */
                   1539:        if (pm->pm_ctx)
                   1540:                setregmap(VRTOVA(me->me_vreg), reginval);
                   1541:        rp->rg_smeg = reginval;
                   1542:
                   1543:        /* off old pmap chain */
                   1544:        TAILQ_REMOVE(&pm->pm_reglist, me, me_pmchain);
                   1545:        simple_unlock(&pm->pm_lock);
1.71      pk       1546:        setcontext4(ctx);       /* done with old context */
1.43      pk       1547:
                   1548:        /* onto new pmap chain; new pmap is already locked, if needed */
                   1549:        TAILQ_INSERT_TAIL(&newpm->pm_reglist, me, me_pmchain);
                   1550:
                   1551:        /* into new segment table, with backpointers */
                   1552:        newpm->pm_regmap[newvr].rg_smeg = me->me_cookie;
                   1553:        me->me_pmap = newpm;
                   1554:        me->me_vreg = newvr;
                   1555:
                   1556:        return (me);
                   1557: }
                   1558:
                   1559: /*
                   1560:  * Free an MMU entry.
                   1561:  *
                   1562:  * Assumes the corresponding pmap is already locked.
                   1563:  * Does NOT flush cache. ???
                   1564:  * CALLER MUST SET CONTEXT to pm->pm_ctxnum (if pmap has
                   1565:  * a context) or to 0 (if not).  Caller must also update
                   1566:  * pm->pm_regmap and (possibly) the hardware.
                   1567:  */
                   1568: void
                   1569: region_free(pm, smeg)
                   1570:        register struct pmap *pm;
                   1571:        register u_int smeg;
                   1572: {
                   1573:        register struct mmuentry *me = &mmuregions[smeg];
                   1574:
                   1575: #ifdef DEBUG
                   1576:        if (pmapdebug & PDB_MMUREG_ALLOC)
1.66      christos 1577:                printf("region_free: freeing smeg %x from pmap %p\n",
1.43      pk       1578:                    me->me_cookie, pm);
                   1579:        if (me->me_cookie != smeg)
                   1580:                panic("region_free: wrong mmuentry");
                   1581:        if (pm != me->me_pmap)
                   1582:                panic("region_free: pm != me_pmap");
                   1583: #endif
                   1584:
                   1585:        if (pm->pm_ctx)
1.69      pk       1586:                cache_flush_region(me->me_vreg);
1.43      pk       1587:
                   1588:        /* take mmu entry off pmap chain */
                   1589:        TAILQ_REMOVE(&pm->pm_reglist, me, me_pmchain);
1.1       deraadt  1590:        /* ... and remove from segment map */
1.43      pk       1591:        pm->pm_regmap[smeg].rg_smeg = reginval;
1.1       deraadt  1592:
                   1593:        /* off LRU or lock chain */
1.43      pk       1594:        if (pm == pmap_kernel()) {
                   1595:                TAILQ_REMOVE(&region_locked, me, me_list);
                   1596:        } else {
                   1597:                TAILQ_REMOVE(&region_lru, me, me_list);
                   1598:        }
1.1       deraadt  1599:
                   1600:        /* no associated pmap; on free list */
                   1601:        me->me_pmap = NULL;
1.43      pk       1602:        TAILQ_INSERT_TAIL(&region_freelist, me, me_list);
1.1       deraadt  1603: }
1.43      pk       1604: #endif
1.1       deraadt  1605:
                   1606: /*
                   1607:  * `Page in' (load or inspect) an MMU entry; called on page faults.
                   1608:  * Returns 1 if we reloaded the segment, -1 if the segment was
                   1609:  * already loaded and the page was marked valid (in which case the
                   1610:  * fault must be a bus error or something), or 0 (segment loaded but
                   1611:  * PTE not valid, or segment not loaded at all).
                   1612:  */
                   1613: int
1.61      pk       1614: mmu_pagein(pm, va, prot)
1.1       deraadt  1615:        register struct pmap *pm;
1.45      pk       1616:        register int va, prot;
1.1       deraadt  1617: {
                   1618:        register int *pte;
1.45      pk       1619:        register int vr, vs, pmeg, i, s, bits;
1.43      pk       1620:        struct regmap *rp;
                   1621:        struct segmap *sp;
                   1622:
1.45      pk       1623:        if (prot != VM_PROT_NONE)
                   1624:                bits = PG_V | ((prot & VM_PROT_WRITE) ? PG_W : 0);
                   1625:        else
                   1626:                bits = 0;
                   1627:
1.43      pk       1628:        vr = VA_VREG(va);
                   1629:        vs = VA_VSEG(va);
                   1630:        rp = &pm->pm_regmap[vr];
                   1631: #ifdef DEBUG
                   1632: if (pm == pmap_kernel())
1.66      christos 1633: printf("mmu_pagein: kernel wants map at va %x, vr %d, vs %d\n", va, vr, vs);
1.43      pk       1634: #endif
                   1635:
                   1636:        /* return 0 if we have no PMEGs to load */
                   1637:        if (rp->rg_segmap == NULL)
                   1638:                return (0);
1.69      pk       1639: #if defined(SUN4_MMU3L)
                   1640:        if (HASSUN4_MMU3L && rp->rg_smeg == reginval) {
1.43      pk       1641:                smeg_t smeg;
                   1642:                unsigned int tva = VA_ROUNDDOWNTOREG(va);
                   1643:                struct segmap *sp = rp->rg_segmap;
                   1644:
                   1645:                s = splpmap();          /* paranoid */
                   1646:                smeg = region_alloc(&region_lru, pm, vr)->me_cookie;
                   1647:                setregmap(tva, smeg);
                   1648:                i = NSEGRG;
                   1649:                do {
                   1650:                        setsegmap(tva, sp++->sg_pmeg);
                   1651:                        tva += NBPSG;
                   1652:                } while (--i > 0);
                   1653:                splx(s);
                   1654:        }
                   1655: #endif
                   1656:        sp = &rp->rg_segmap[vs];
1.1       deraadt  1657:
                   1658:        /* return 0 if we have no PTEs to load */
1.43      pk       1659:        if ((pte = sp->sg_pte) == NULL)
1.1       deraadt  1660:                return (0);
1.43      pk       1661:
1.1       deraadt  1662:        /* return -1 if the fault is `hard', 0 if not */
1.43      pk       1663:        if (sp->sg_pmeg != seginval)
1.55      pk       1664:                return (bits && (getpte4(va) & bits) == bits ? -1 : 0);
1.1       deraadt  1665:
                   1666:        /* reload segment: write PTEs into a new LRU entry */
                   1667:        va = VA_ROUNDDOWNTOSEG(va);
                   1668:        s = splpmap();          /* paranoid */
1.43      pk       1669:        pmeg = me_alloc(&segm_lru, pm, vr, vs)->me_cookie;
1.1       deraadt  1670:        setsegmap(va, pmeg);
                   1671:        i = NPTESG;
                   1672:        do {
1.55      pk       1673:                setpte4(va, *pte++);
1.1       deraadt  1674:                va += NBPG;
                   1675:        } while (--i > 0);
                   1676:        splx(s);
                   1677:        return (1);
                   1678: }
1.55      pk       1679: #endif /* defined SUN4 or SUN4C */
                   1680:
1.1       deraadt  1681: /*
                   1682:  * Allocate a context.  If necessary, steal one from someone else.
                   1683:  * Changes hardware context number and loads segment map.
                   1684:  *
                   1685:  * This routine is only ever called from locore.s just after it has
                   1686:  * saved away the previous process, so there are no active user windows.
                   1687:  */
                   1688: void
                   1689: ctx_alloc(pm)
                   1690:        register struct pmap *pm;
                   1691: {
                   1692:        register union ctxinfo *c;
1.49      pk       1693:        register int s, cnum, i, doflush;
1.43      pk       1694:        register struct regmap *rp;
1.13      pk       1695:        register int gap_start, gap_end;
                   1696:        register unsigned long va;
1.1       deraadt  1697:
1.55      pk       1698: /*XXX-GCC!*/gap_start=gap_end=0;
1.1       deraadt  1699: #ifdef DEBUG
                   1700:        if (pm->pm_ctx)
                   1701:                panic("ctx_alloc pm_ctx");
                   1702:        if (pmapdebug & PDB_CTX_ALLOC)
1.66      christos 1703:                printf("ctx_alloc(%p)\n", pm);
1.1       deraadt  1704: #endif
1.55      pk       1705:        if (CPU_ISSUN4OR4C) {
                   1706:                gap_start = pm->pm_gap_start;
                   1707:                gap_end = pm->pm_gap_end;
                   1708:        }
1.13      pk       1709:
1.49      pk       1710:        s = splpmap();
1.1       deraadt  1711:        if ((c = ctx_freelist) != NULL) {
                   1712:                ctx_freelist = c->c_nextfree;
1.69      pk       1713:                cnum = c - cpuinfo.ctxinfo;
1.49      pk       1714:                doflush = 0;
1.1       deraadt  1715:        } else {
                   1716:                if ((ctx_kick += ctx_kickdir) >= ncontext) {
                   1717:                        ctx_kick = ncontext - 1;
                   1718:                        ctx_kickdir = -1;
                   1719:                } else if (ctx_kick < 1) {
                   1720:                        ctx_kick = 1;
                   1721:                        ctx_kickdir = 1;
                   1722:                }
1.69      pk       1723:                c = &cpuinfo.ctxinfo[cnum = ctx_kick];
1.1       deraadt  1724: #ifdef DEBUG
                   1725:                if (c->c_pmap == NULL)
                   1726:                        panic("ctx_alloc cu_pmap");
                   1727:                if (pmapdebug & (PDB_CTX_ALLOC | PDB_CTX_STEAL))
1.66      christos 1728:                        printf("ctx_alloc: steal context %d from %p\n",
1.1       deraadt  1729:                            cnum, c->c_pmap);
                   1730: #endif
                   1731:                c->c_pmap->pm_ctx = NULL;
1.69      pk       1732:                doflush = (CACHEINFO.c_vactype != VAC_NONE);
1.55      pk       1733:                if (CPU_ISSUN4OR4C) {
                   1734:                        if (gap_start < c->c_pmap->pm_gap_start)
                   1735:                                gap_start = c->c_pmap->pm_gap_start;
                   1736:                        if (gap_end > c->c_pmap->pm_gap_end)
                   1737:                                gap_end = c->c_pmap->pm_gap_end;
                   1738:                }
1.1       deraadt  1739:        }
1.49      pk       1740:
1.1       deraadt  1741:        c->c_pmap = pm;
                   1742:        pm->pm_ctx = c;
                   1743:        pm->pm_ctxnum = cnum;
                   1744:
1.55      pk       1745:        if (CPU_ISSUN4OR4C) {
                   1746:                /*
                   1747:                 * Write pmap's region (3-level MMU) or segment table into
                   1748:                 * the MMU.
                   1749:                 *
                   1750:                 * Only write those entries that actually map something in
                   1751:                 * this context by maintaining a pair of region numbers in
                   1752:                 * between which the pmap has no valid mappings.
                   1753:                 *
                   1754:                 * If a context was just allocated from the free list, trust
                   1755:                 * that all its pmeg numbers are `seginval'. We make sure this
                   1756:                 * is the case initially in pmap_bootstrap(). Otherwise, the
                   1757:                 * context was freed by calling ctx_free() in pmap_release(),
                   1758:                 * which in turn is supposedly called only when all mappings
                   1759:                 * have been removed.
                   1760:                 *
                   1761:                 * On the other hand, if the context had to be stolen from
                   1762:                 * another pmap, we possibly shrink the gap to be the
                   1763:                 * disjuction of the new and the previous map.
                   1764:                 */
1.43      pk       1765:
1.55      pk       1766:                setcontext(cnum);
                   1767:                splx(s);
                   1768:                if (doflush)
                   1769:                        cache_flush_context();
1.43      pk       1770:
1.55      pk       1771:                rp = pm->pm_regmap;
                   1772:                for (va = 0, i = NUREG; --i >= 0; ) {
                   1773:                        if (VA_VREG(va) >= gap_start) {
                   1774:                                va = VRTOVA(gap_end);
                   1775:                                i -= gap_end - gap_start;
                   1776:                                rp += gap_end - gap_start;
                   1777:                                if (i < 0)
                   1778:                                        break;
                   1779:                                /* mustn't re-enter this branch */
                   1780:                                gap_start = NUREG;
                   1781:                        }
1.69      pk       1782:                        if (HASSUN4_MMU3L) {
1.55      pk       1783:                                setregmap(va, rp++->rg_smeg);
                   1784:                                va += NBPRG;
1.69      pk       1785:                        } else {
1.55      pk       1786:                                register int j;
                   1787:                                register struct segmap *sp = rp->rg_segmap;
                   1788:                                for (j = NSEGRG; --j >= 0; va += NBPSG)
                   1789:                                        setsegmap(va,
                   1790:                                                  sp?sp++->sg_pmeg:seginval);
                   1791:                                rp++;
                   1792:                        }
1.43      pk       1793:                }
1.55      pk       1794:
                   1795:        } else if (CPU_ISSUN4M) {
                   1796:
                   1797:                /*
                   1798:                 * Reload page and context tables to activate the page tables
                   1799:                 * for this context.
                   1800:                 *
                   1801:                 * The gap stuff isn't really needed in the Sun4m architecture,
                   1802:                 * since we don't have to worry about excessive mappings (all
                   1803:                 * mappings exist since the page tables must be complete for
                   1804:                 * the mmu to be happy).
                   1805:                 *
                   1806:                 * If a context was just allocated from the free list, trust
                   1807:                 * that all of its mmu-edible page tables are zeroed out
                   1808:                 * (except for those associated with the kernel). We make
                   1809:                 * sure this is the case initially in pmap_bootstrap() and
                   1810:                 * pmap_init() (?).
                   1811:                 * Otherwise, the context was freed by calling ctx_free() in
                   1812:                 * pmap_release(), which in turn is supposedly called only
                   1813:                 * when all mappings have been removed.
                   1814:                 *
                   1815:                 * XXX: Do we have to flush cache after reloading ctx tbl?
                   1816:                 */
                   1817:
                   1818:                /*
                   1819:                 * We install kernel mappings into the pmap here, since when
                   1820:                 * the kernel expands it only propagates the expansion to pmaps
                   1821:                 * corresponding to valid contexts. Thus it is possible (and
                   1822:                 * it has happened!) that a pmap is created just before
                   1823:                 * the kernel expands, but the pmap gets a context *after*
                   1824:                 * the kernel expands, thus missing getting mappings.
                   1825:                 */
1.69      pk       1826: #if 0
1.55      pk       1827:                qcopy(&pmap_kernel()->pm_reg_ptps[VA_VREG(KERNBASE)],
                   1828:                      &pm->pm_reg_ptps[VA_VREG(KERNBASE)],
                   1829:                      NKREG * sizeof(int));
1.69      pk       1830: #endif
                   1831:                qcopy(&cpuinfo.L1_ptps[VA_VREG(KERNBASE)],
                   1832:                      &pm->pm_reg_ptps[VA_VREG(KERNBASE)],
                   1833:                      NKREG * sizeof(int));
1.55      pk       1834:                /*
                   1835:                 * We must also install the regmap/segmap/etc stuff for
                   1836:                 * kernel maps.
                   1837:                 */
                   1838:                qcopy(&pmap_kernel()->pm_regmap[VA_VREG(KERNBASE)],
                   1839:                       &pm->pm_regmap[VA_VREG(KERNBASE)],
                   1840:                       NKREG * sizeof(struct regmap));
                   1841:
1.69      pk       1842: #if 0
1.61      pk       1843:                ctxbusyvector[cnum] = 1; /* mark context as busy */
1.69      pk       1844: #endif
1.55      pk       1845: #ifdef DEBUG
                   1846:                if (pm->pm_reg_ptps_pa == 0)
                   1847:                        panic("ctx_alloc: no region table in current pmap");
                   1848: #endif
                   1849:                /*setcontext(0); * paranoia? can we modify curr. ctx? */
1.69      pk       1850:                cpuinfo.ctx_tbl[cnum] =
1.55      pk       1851:                        (pm->pm_reg_ptps_pa >> SRMMU_PPNPASHIFT) | SRMMU_TEPTD;
                   1852:
                   1853:                setcontext(cnum);
                   1854:                if (doflush)
                   1855:                        cache_flush_context();
                   1856: #if defined(SUN4M)
                   1857:                tlb_flush_context(); /* remove any remnant garbage from tlb */
1.43      pk       1858: #endif
1.55      pk       1859:                splx(s);
1.13      pk       1860:        }
1.1       deraadt  1861: }
                   1862:
                   1863: /*
                   1864:  * Give away a context.  Flushes cache and sets current context to 0.
                   1865:  */
                   1866: void
                   1867: ctx_free(pm)
                   1868:        struct pmap *pm;
                   1869: {
                   1870:        register union ctxinfo *c;
                   1871:        register int newc, oldc;
                   1872:
                   1873:        if ((c = pm->pm_ctx) == NULL)
                   1874:                panic("ctx_free");
                   1875:        pm->pm_ctx = NULL;
                   1876:        oldc = getcontext();
1.55      pk       1877:
1.69      pk       1878:        if (CACHEINFO.c_vactype != VAC_NONE) {
1.1       deraadt  1879:                newc = pm->pm_ctxnum;
                   1880:                CHANGE_CONTEXTS(oldc, newc);
                   1881:                cache_flush_context();
1.55      pk       1882: #if defined(SUN4M)
                   1883:                if (CPU_ISSUN4M)
                   1884:                        tlb_flush_context();
                   1885: #endif
1.1       deraadt  1886:                setcontext(0);
                   1887:        } else {
1.55      pk       1888: #if defined(SUN4M)
                   1889:                if (CPU_ISSUN4M)
                   1890:                        tlb_flush_context();
                   1891: #endif
1.1       deraadt  1892:                CHANGE_CONTEXTS(oldc, 0);
                   1893:        }
                   1894:        c->c_nextfree = ctx_freelist;
                   1895:        ctx_freelist = c;
1.55      pk       1896:
1.69      pk       1897: #if 0
1.55      pk       1898: #if defined(SUN4M)
                   1899:        if (CPU_ISSUN4M) {
                   1900:                /* Map kernel back into unused context */
                   1901:                newc = pm->pm_ctxnum;
1.69      pk       1902:                cpuinfo.ctx_tbl[newc] = cpuinfo.ctx_tbl[0];
1.55      pk       1903:                if (newc)
                   1904:                        ctxbusyvector[newc] = 0; /* mark as free */
                   1905:        }
                   1906: #endif
1.69      pk       1907: #endif
1.1       deraadt  1908: }
                   1909:
                   1910:
                   1911: /*----------------------------------------------------------------*/
                   1912:
                   1913: /*
                   1914:  * pvlist functions.
                   1915:  */
                   1916:
                   1917: /*
                   1918:  * Walk the given pv list, and for each PTE, set or clear some bits
                   1919:  * (e.g., PG_W or PG_NC).
                   1920:  *
                   1921:  * As a special case, this never clears PG_W on `pager' pages.
                   1922:  * These, being kernel addresses, are always in hardware and have
                   1923:  * a context.
                   1924:  *
                   1925:  * This routine flushes the cache for any page whose PTE changes,
                   1926:  * as long as the process has a context; this is overly conservative.
                   1927:  * It also copies ref and mod bits to the pvlist, on the theory that
                   1928:  * this might save work later.  (XXX should test this theory)
                   1929:  */
1.55      pk       1930:
                   1931: #if defined(SUN4) || defined(SUN4C)
                   1932:
1.1       deraadt  1933: void
1.55      pk       1934: pv_changepte4_4c(pv0, bis, bic)
1.1       deraadt  1935:        register struct pvlist *pv0;
                   1936:        register int bis, bic;
                   1937: {
                   1938:        register int *pte;
                   1939:        register struct pvlist *pv;
                   1940:        register struct pmap *pm;
1.53      christos 1941:        register int va, vr, vs, flags;
1.1       deraadt  1942:        int ctx, s;
1.43      pk       1943:        struct regmap *rp;
                   1944:        struct segmap *sp;
1.1       deraadt  1945:
                   1946:        write_user_windows();           /* paranoid? */
                   1947:
                   1948:        s = splpmap();                  /* paranoid? */
                   1949:        if (pv0->pv_pmap == NULL) {
                   1950:                splx(s);
                   1951:                return;
                   1952:        }
1.71      pk       1953:        ctx = getcontext4();
1.1       deraadt  1954:        flags = pv0->pv_flags;
                   1955:        for (pv = pv0; pv != NULL; pv = pv->pv_next) {
                   1956:                pm = pv->pv_pmap;
                   1957: if(pm==NULL)panic("pv_changepte 1");
                   1958:                va = pv->pv_va;
1.43      pk       1959:                vr = VA_VREG(va);
                   1960:                vs = VA_VSEG(va);
                   1961:                rp = &pm->pm_regmap[vr];
                   1962:                if (rp->rg_segmap == NULL)
                   1963:                        panic("pv_changepte: no segments");
                   1964:
                   1965:                sp = &rp->rg_segmap[vs];
                   1966:                pte = sp->sg_pte;
                   1967:
                   1968:                if (sp->sg_pmeg == seginval) {
                   1969:                        /* not in hardware: just fix software copy */
                   1970:                        if (pte == NULL)
                   1971:                                panic("pv_changepte 2");
                   1972:                        pte += VA_VPG(va);
                   1973:                        *pte = (*pte | bis) & ~bic;
                   1974:                } else {
1.1       deraadt  1975:                        register int tpte;
                   1976:
                   1977:                        /* in hardware: fix hardware copy */
1.43      pk       1978:                        if (CTX_USABLE(pm,rp)) {
1.1       deraadt  1979:                                extern vm_offset_t pager_sva, pager_eva;
                   1980:
1.8       pk       1981:                                /*
                   1982:                                 * Bizarreness:  we never clear PG_W on
                   1983:                                 * pager pages, nor PG_NC on DVMA pages.
                   1984:                                 */
1.1       deraadt  1985:                                if (bic == PG_W &&
                   1986:                                    va >= pager_sva && va < pager_eva)
1.3       deraadt  1987:                                        continue;
                   1988:                                if (bic == PG_NC &&
                   1989:                                    va >= DVMA_BASE && va < DVMA_END)
1.1       deraadt  1990:                                        continue;
1.71      pk       1991:                                setcontext4(pm->pm_ctxnum);
1.1       deraadt  1992:                                /* XXX should flush only when necessary */
1.55      pk       1993:                                tpte = getpte4(va);
1.69      pk       1994:                                if (tpte & PG_M)
1.43      pk       1995:                                        cache_flush_page(va);
1.1       deraadt  1996:                        } else {
                   1997:                                /* XXX per-cpu va? */
1.71      pk       1998:                                setcontext4(0);
1.69      pk       1999:                                if (HASSUN4_MMU3L)
1.43      pk       2000:                                        setregmap(0, tregion);
                   2001:                                setsegmap(0, sp->sg_pmeg);
1.18      deraadt  2002:                                va = VA_VPG(va) << PGSHIFT;
1.55      pk       2003:                                tpte = getpte4(va);
1.1       deraadt  2004:                        }
                   2005:                        if (tpte & PG_V)
1.63      pk       2006:                                flags |= (tpte >> PG_M_SHIFT) & (PV_MOD|PV_REF);
1.1       deraadt  2007:                        tpte = (tpte | bis) & ~bic;
1.55      pk       2008:                        setpte4(va, tpte);
1.1       deraadt  2009:                        if (pte != NULL)        /* update software copy */
                   2010:                                pte[VA_VPG(va)] = tpte;
                   2011:                }
                   2012:        }
                   2013:        pv0->pv_flags = flags;
1.71      pk       2014:        setcontext4(ctx);
1.1       deraadt  2015:        splx(s);
                   2016: }
                   2017:
                   2018: /*
                   2019:  * Sync ref and mod bits in pvlist (turns off same in hardware PTEs).
                   2020:  * Returns the new flags.
                   2021:  *
                   2022:  * This is just like pv_changepte, but we never add or remove bits,
                   2023:  * hence never need to adjust software copies.
                   2024:  */
                   2025: int
1.55      pk       2026: pv_syncflags4_4c(pv0)
1.1       deraadt  2027:        register struct pvlist *pv0;
                   2028: {
                   2029:        register struct pvlist *pv;
                   2030:        register struct pmap *pm;
1.53      christos 2031:        register int tpte, va, vr, vs, pmeg, flags;
1.1       deraadt  2032:        int ctx, s;
1.43      pk       2033:        struct regmap *rp;
                   2034:        struct segmap *sp;
1.1       deraadt  2035:
                   2036:        write_user_windows();           /* paranoid? */
                   2037:
                   2038:        s = splpmap();                  /* paranoid? */
                   2039:        if (pv0->pv_pmap == NULL) {     /* paranoid */
                   2040:                splx(s);
                   2041:                return (0);
                   2042:        }
1.71      pk       2043:        ctx = getcontext4();
1.1       deraadt  2044:        flags = pv0->pv_flags;
                   2045:        for (pv = pv0; pv != NULL; pv = pv->pv_next) {
                   2046:                pm = pv->pv_pmap;
                   2047:                va = pv->pv_va;
1.43      pk       2048:                vr = VA_VREG(va);
                   2049:                vs = VA_VSEG(va);
                   2050:                rp = &pm->pm_regmap[vr];
                   2051:                if (rp->rg_segmap == NULL)
                   2052:                        panic("pv_syncflags: no segments");
                   2053:                sp = &rp->rg_segmap[vs];
                   2054:
                   2055:                if ((pmeg = sp->sg_pmeg) == seginval)
1.1       deraadt  2056:                        continue;
1.43      pk       2057:
                   2058:                if (CTX_USABLE(pm,rp)) {
1.71      pk       2059:                        setcontext4(pm->pm_ctxnum);
1.1       deraadt  2060:                        /* XXX should flush only when necessary */
1.55      pk       2061:                        tpte = getpte4(va);
1.69      pk       2062:                        if (tpte & PG_M)
1.34      pk       2063:                                cache_flush_page(va);
1.1       deraadt  2064:                } else {
                   2065:                        /* XXX per-cpu va? */
1.71      pk       2066:                        setcontext4(0);
1.69      pk       2067:                        if (HASSUN4_MMU3L)
1.43      pk       2068:                                setregmap(0, tregion);
1.1       deraadt  2069:                        setsegmap(0, pmeg);
1.18      deraadt  2070:                        va = VA_VPG(va) << PGSHIFT;
1.55      pk       2071:                        tpte = getpte4(va);
1.1       deraadt  2072:                }
                   2073:                if (tpte & (PG_M|PG_U) && tpte & PG_V) {
                   2074:                        flags |= (tpte >> PG_M_SHIFT) &
                   2075:                            (PV_MOD|PV_REF);
                   2076:                        tpte &= ~(PG_M|PG_U);
1.55      pk       2077:                        setpte4(va, tpte);
1.1       deraadt  2078:                }
                   2079:        }
                   2080:        pv0->pv_flags = flags;
1.71      pk       2081:        setcontext4(ctx);
1.1       deraadt  2082:        splx(s);
                   2083:        return (flags);
                   2084: }
                   2085:
                   2086: /*
                   2087:  * pv_unlink is a helper function for pmap_remove.
                   2088:  * It takes a pointer to the pv_table head for some physical address
                   2089:  * and removes the appropriate (pmap, va) entry.
                   2090:  *
                   2091:  * Once the entry is removed, if the pv_table head has the cache
                   2092:  * inhibit bit set, see if we can turn that off; if so, walk the
                   2093:  * pvlist and turn off PG_NC in each PTE.  (The pvlist is by
                   2094:  * definition nonempty, since it must have at least two elements
                   2095:  * in it to have PV_NC set, and we only remove one here.)
                   2096:  */
1.43      pk       2097: /*static*/ void
1.55      pk       2098: pv_unlink4_4c(pv, pm, va)
1.1       deraadt  2099:        register struct pvlist *pv;
                   2100:        register struct pmap *pm;
                   2101:        register vm_offset_t va;
                   2102: {
                   2103:        register struct pvlist *npv;
                   2104:
1.11      pk       2105: #ifdef DIAGNOSTIC
                   2106:        if (pv->pv_pmap == NULL)
                   2107:                panic("pv_unlink0");
                   2108: #endif
1.1       deraadt  2109:        /*
                   2110:         * First entry is special (sigh).
                   2111:         */
                   2112:        npv = pv->pv_next;
                   2113:        if (pv->pv_pmap == pm && pv->pv_va == va) {
                   2114:                pmap_stats.ps_unlink_pvfirst++;
                   2115:                if (npv != NULL) {
                   2116:                        pv->pv_next = npv->pv_next;
                   2117:                        pv->pv_pmap = npv->pv_pmap;
                   2118:                        pv->pv_va = npv->pv_va;
1.49      pk       2119:                        free(npv, M_VMPVENT);
1.1       deraadt  2120:                } else
                   2121:                        pv->pv_pmap = NULL;
                   2122:        } else {
                   2123:                register struct pvlist *prev;
                   2124:
                   2125:                for (prev = pv;; prev = npv, npv = npv->pv_next) {
                   2126:                        pmap_stats.ps_unlink_pvsearch++;
                   2127:                        if (npv == NULL)
                   2128:                                panic("pv_unlink");
                   2129:                        if (npv->pv_pmap == pm && npv->pv_va == va)
                   2130:                                break;
                   2131:                }
                   2132:                prev->pv_next = npv->pv_next;
1.49      pk       2133:                free(npv, M_VMPVENT);
1.1       deraadt  2134:        }
                   2135:        if (pv->pv_flags & PV_NC) {
                   2136:                /*
                   2137:                 * Not cached: check to see if we can fix that now.
                   2138:                 */
                   2139:                va = pv->pv_va;
                   2140:                for (npv = pv->pv_next; npv != NULL; npv = npv->pv_next)
                   2141:                        if (BADALIAS(va, npv->pv_va))
                   2142:                                return;
                   2143:                pv->pv_flags &= ~PV_NC;
1.58      pk       2144:                pv_changepte4_4c(pv, 0, PG_NC);
1.1       deraadt  2145:        }
                   2146: }
                   2147:
                   2148: /*
                   2149:  * pv_link is the inverse of pv_unlink, and is used in pmap_enter.
                   2150:  * It returns PG_NC if the (new) pvlist says that the address cannot
                   2151:  * be cached.
                   2152:  */
1.43      pk       2153: /*static*/ int
1.55      pk       2154: pv_link4_4c(pv, pm, va)
1.1       deraadt  2155:        register struct pvlist *pv;
                   2156:        register struct pmap *pm;
                   2157:        register vm_offset_t va;
                   2158: {
                   2159:        register struct pvlist *npv;
                   2160:        register int ret;
                   2161:
                   2162:        if (pv->pv_pmap == NULL) {
                   2163:                /* no pvlist entries yet */
                   2164:                pmap_stats.ps_enter_firstpv++;
                   2165:                pv->pv_next = NULL;
                   2166:                pv->pv_pmap = pm;
                   2167:                pv->pv_va = va;
                   2168:                return (0);
                   2169:        }
                   2170:        /*
                   2171:         * Before entering the new mapping, see if
                   2172:         * it will cause old mappings to become aliased
                   2173:         * and thus need to be `discached'.
                   2174:         */
                   2175:        ret = 0;
                   2176:        pmap_stats.ps_enter_secondpv++;
                   2177:        if (pv->pv_flags & PV_NC) {
                   2178:                /* already uncached, just stay that way */
                   2179:                ret = PG_NC;
                   2180:        } else {
                   2181:                /* MAY NEED TO DISCACHE ANYWAY IF va IS IN DVMA SPACE? */
                   2182:                for (npv = pv; npv != NULL; npv = npv->pv_next) {
                   2183:                        if (BADALIAS(va, npv->pv_va)) {
1.43      pk       2184: #ifdef DEBUG
1.66      christos 2185:                                if (pmapdebug) printf(
1.54      christos 2186:                                "pv_link: badalias: pid %d, %lx<=>%x, pa %lx\n",
1.43      pk       2187:                                curproc?curproc->p_pid:-1, va, npv->pv_va,
                   2188:                                vm_first_phys + (pv-pv_table)*NBPG);
                   2189: #endif
1.1       deraadt  2190:                                pv->pv_flags |= PV_NC;
1.58      pk       2191:                                pv_changepte4_4c(pv, ret = PG_NC, 0);
1.1       deraadt  2192:                                break;
                   2193:                        }
                   2194:                }
                   2195:        }
                   2196:        npv = (struct pvlist *)malloc(sizeof *npv, M_VMPVENT, M_WAITOK);
                   2197:        npv->pv_next = pv->pv_next;
                   2198:        npv->pv_pmap = pm;
                   2199:        npv->pv_va = va;
                   2200:        pv->pv_next = npv;
                   2201:        return (ret);
                   2202: }
                   2203:
1.55      pk       2204: #endif /* sun4, sun4c code */
                   2205:
                   2206: #if defined(SUN4M)             /* Sun4M versions of above */
1.1       deraadt  2207: /*
1.55      pk       2208:  * Walk the given pv list, and for each PTE, set or clear some bits
                   2209:  * (e.g., PG_W or PG_NC).
                   2210:  *
                   2211:  * As a special case, this never clears PG_W on `pager' pages.
                   2212:  * These, being kernel addresses, are always in hardware and have
                   2213:  * a context.
                   2214:  *
                   2215:  * This routine flushes the cache for any page whose PTE changes,
                   2216:  * as long as the process has a context; this is overly conservative.
                   2217:  * It also copies ref and mod bits to the pvlist, on the theory that
                   2218:  * this might save work later.  (XXX should test this theory)
1.1       deraadt  2219:  */
1.53      christos 2220: void
1.55      pk       2221: pv_changepte4m(pv0, bis, bic)
                   2222:        register struct pvlist *pv0;
                   2223:        register int bis, bic;
                   2224: {
1.1       deraadt  2225:        register struct pvlist *pv;
                   2226:        register struct pmap *pm;
1.61      pk       2227:        register int va, vr, flags;
1.55      pk       2228:        int ctx, s;
                   2229:        struct regmap *rp;
1.72      pk       2230:        struct segmap *sp;
1.1       deraadt  2231:
1.55      pk       2232:        write_user_windows();           /* paranoid? */
1.1       deraadt  2233:
1.55      pk       2234:        s = splpmap();                  /* paranoid? */
                   2235:        if (pv0->pv_pmap == NULL) {
                   2236:                splx(s);
                   2237:                return;
1.1       deraadt  2238:        }
1.71      pk       2239:        ctx = getcontext4m();
1.55      pk       2240:        flags = pv0->pv_flags;
                   2241:        for (pv = pv0; pv != NULL; pv = pv->pv_next) {
                   2242:                register int tpte;
                   2243:                pm = pv->pv_pmap;
1.61      pk       2244:                if (pm == NULL)
                   2245:                        panic("pv_changepte 1");
1.55      pk       2246:                va = pv->pv_va;
                   2247:                vr = VA_VREG(va);
                   2248:                rp = &pm->pm_regmap[vr];
                   2249:                if (rp->rg_segmap == NULL)
                   2250:                        panic("pv_changepte: no segments");
                   2251:
1.72      pk       2252:                sp = &rp->rg_segmap[VA_VSEG(va)];
                   2253:
                   2254:                if (pm->pm_ctx) {
1.55      pk       2255:                        extern vm_offset_t pager_sva, pager_eva;
1.1       deraadt  2256:
1.55      pk       2257:                        /*
                   2258:                         * Bizarreness:  we never clear PG_W on
                   2259:                         * pager pages, nor set PG_C on DVMA pages.
                   2260:                         */
                   2261:                        if ((bic & PPROT_WRITE) &&
                   2262:                            va >= pager_sva && va < pager_eva)
1.60      pk       2263:                                continue;
1.55      pk       2264:                        if ((bis & SRMMU_PG_C) &&
                   2265:                            va >= DVMA_BASE && va < DVMA_END)
1.60      pk       2266:                                continue;
1.72      pk       2267:
                   2268:                        /* Flush TLB so memory copy is up-to-date */
1.71      pk       2269:                        setcontext4m(pm->pm_ctxnum);
1.72      pk       2270:                        tlb_flush_page(va);
                   2271:                }
                   2272:
                   2273:                tpte = sp->sg_pte[VA_SUN4M_VPG(va)];
                   2274:                if ((tpte & SRMMU_TETYPE) != SRMMU_TEPTE) {
                   2275:                        printf("pv_changepte: invalid PTE for 0x%x\n", va);
                   2276:                        continue;
1.55      pk       2277:                }
                   2278:
1.72      pk       2279:                flags |= (tpte >> PG_M_SHIFT4M) & (PV_MOD4M|PV_REF4M|PV_C4M);
1.55      pk       2280:                tpte = (tpte | bis) & ~bic;
                   2281:
1.72      pk       2282:                if (pm->pm_ctx) {
                   2283:                        if (flags & PV_MOD4M)
                   2284:                                /* XXX: Do we need to always flush? */
                   2285:                                cache_flush_page(va);
                   2286:                        tlb_flush_page(va);
                   2287:                }
                   2288:                setpgt4m(&sp->sg_pte[VA_SUN4M_VPG(va)], tpte);
1.55      pk       2289:        }
                   2290:        pv0->pv_flags = flags;
1.71      pk       2291:        setcontext4m(ctx);
1.55      pk       2292:        splx(s);
                   2293: }
                   2294:
                   2295: /*
                   2296:  * Sync ref and mod bits in pvlist. If page has been ref'd or modified,
                   2297:  * update ref/mod bits in pvlist, and clear the hardware bits.
                   2298:  *
                   2299:  * Return the new flags.
                   2300:  */
                   2301: int
                   2302: pv_syncflags4m(pv0)
                   2303:        register struct pvlist *pv0;
                   2304: {
                   2305:        register struct pvlist *pv;
                   2306:        register struct pmap *pm;
                   2307:        register int tpte, va, vr, vs, flags;
                   2308:        int ctx, s;
                   2309:        struct regmap *rp;
                   2310:        struct segmap *sp;
                   2311:
                   2312:        write_user_windows();           /* paranoid? */
                   2313:
                   2314:        s = splpmap();                  /* paranoid? */
                   2315:        if (pv0->pv_pmap == NULL) {     /* paranoid */
                   2316:                splx(s);
                   2317:                return (0);
                   2318:        }
1.71      pk       2319:        ctx = getcontext4m();
1.55      pk       2320:        flags = pv0->pv_flags;
                   2321:        for (pv = pv0; pv != NULL; pv = pv->pv_next) {
                   2322:                pm = pv->pv_pmap;
                   2323:                va = pv->pv_va;
                   2324:                vr = VA_VREG(va);
                   2325:                vs = VA_VSEG(va);
                   2326:                rp = &pm->pm_regmap[vr];
                   2327:                if (rp->rg_segmap == NULL)
                   2328:                        panic("pv_syncflags: no segments");
                   2329:                sp = &rp->rg_segmap[vs];
                   2330:
                   2331:                if (sp->sg_pte == NULL) /* invalid */
1.60      pk       2332:                        continue;
1.55      pk       2333:
1.62      pk       2334:                /*
                   2335:                 * We need the PTE from memory as the TLB version will
                   2336:                 * always have the SRMMU_PG_R bit on.
                   2337:                 */
1.72      pk       2338:                if (pm->pm_ctx) {
1.71      pk       2339:                        setcontext4m(pm->pm_ctxnum);
1.55      pk       2340:                        tlb_flush_page(va);
                   2341:                }
1.72      pk       2342:                tpte = sp->sg_pte[VA_SUN4M_VPG(va)];
1.62      pk       2343:
1.55      pk       2344:                if ((tpte & SRMMU_TETYPE) == SRMMU_TEPTE && /* if valid pte */
                   2345:                    (tpte & (SRMMU_PG_M|SRMMU_PG_R))) {   /* and mod/refd */
1.72      pk       2346:
1.55      pk       2347:                        flags |= (tpte >> PG_M_SHIFT4M) &
1.60      pk       2348:                                 (PV_MOD4M|PV_REF4M|PV_C4M);
1.72      pk       2349:
                   2350:                        if (pm->pm_ctx && (tpte & SRMMU_PG_M)) {
                   2351:                                cache_flush_page(va); /* XXX: do we need this?*/
                   2352:                                tlb_flush_page(va); /* paranoid? */
                   2353:                        }
                   2354:
                   2355:                        /* Clear mod/ref bits from PTE and write it back */
1.55      pk       2356:                        tpte &= ~(SRMMU_PG_M | SRMMU_PG_R);
1.72      pk       2357:                        setpgt4m(&sp->sg_pte[VA_SUN4M_VPG(va)], tpte);
1.55      pk       2358:                }
                   2359:        }
                   2360:        pv0->pv_flags = flags;
1.71      pk       2361:        setcontext4m(ctx);
1.55      pk       2362:        splx(s);
                   2363:        return (flags);
                   2364: }
                   2365:
                   2366: void
                   2367: pv_unlink4m(pv, pm, va)
                   2368:        register struct pvlist *pv;
                   2369:        register struct pmap *pm;
                   2370:        register vm_offset_t va;
                   2371: {
                   2372:        register struct pvlist *npv;
                   2373:
                   2374: #ifdef DIAGNOSTIC
                   2375:        if (pv->pv_pmap == NULL)
                   2376:                panic("pv_unlink0");
                   2377: #endif
                   2378:        /*
                   2379:         * First entry is special (sigh).
                   2380:         */
                   2381:        npv = pv->pv_next;
                   2382:        if (pv->pv_pmap == pm && pv->pv_va == va) {
                   2383:                pmap_stats.ps_unlink_pvfirst++;
                   2384:                if (npv != NULL) {
                   2385:                        pv->pv_next = npv->pv_next;
                   2386:                        pv->pv_pmap = npv->pv_pmap;
                   2387:                        pv->pv_va = npv->pv_va;
                   2388:                        free(npv, M_VMPVENT);
                   2389:                } else
                   2390:                        pv->pv_pmap = NULL;
                   2391:        } else {
                   2392:                register struct pvlist *prev;
                   2393:
                   2394:                for (prev = pv;; prev = npv, npv = npv->pv_next) {
                   2395:                        pmap_stats.ps_unlink_pvsearch++;
                   2396:                        if (npv == NULL)
                   2397:                                panic("pv_unlink");
                   2398:                        if (npv->pv_pmap == pm && npv->pv_va == va)
                   2399:                                break;
                   2400:                }
                   2401:                prev->pv_next = npv->pv_next;
                   2402:                free(npv, M_VMPVENT);
                   2403:        }
                   2404:        if (!(pv->pv_flags & PV_C4M)) {
                   2405:                /*
                   2406:                 * Not cached: check to see if we can fix that now.
                   2407:                 */
                   2408:                va = pv->pv_va;
                   2409:                for (npv = pv->pv_next; npv != NULL; npv = npv->pv_next)
                   2410:                        if (BADALIAS(va, npv->pv_va))
                   2411:                                return;
                   2412:                pv->pv_flags |= PV_C4M;
                   2413:                pv_changepte4m(pv, SRMMU_PG_C, 0);
                   2414:        }
                   2415: }
                   2416:
                   2417: /*
                   2418:  * pv_link is the inverse of pv_unlink, and is used in pmap_enter.
                   2419:  * It returns SRMMU_PG_C if the (new) pvlist says that the address cannot
                   2420:  * be cached (i.e. its results must be (& ~)'d in.
                   2421:  */
                   2422: /*static*/ int
                   2423: pv_link4m(pv, pm, va)
                   2424:        register struct pvlist *pv;
                   2425:        register struct pmap *pm;
                   2426:        register vm_offset_t va;
                   2427: {
                   2428:        register struct pvlist *npv;
                   2429:        register int ret;
                   2430:
                   2431:        if (pv->pv_pmap == NULL) {
                   2432:                /* no pvlist entries yet */
                   2433:                pmap_stats.ps_enter_firstpv++;
                   2434:                pv->pv_next = NULL;
                   2435:                pv->pv_pmap = pm;
                   2436:                pv->pv_va = va;
                   2437:                pv->pv_flags |= PV_C4M;
                   2438:                return (0);
                   2439:        }
                   2440:        /*
                   2441:         * Before entering the new mapping, see if
                   2442:         * it will cause old mappings to become aliased
                   2443:         * and thus need to be `discached'.
                   2444:         */
                   2445:        ret = 0;
                   2446:        pmap_stats.ps_enter_secondpv++;
                   2447:        if (!(pv->pv_flags & PV_C4M)) {
                   2448:                /* already uncached, just stay that way */
                   2449:                ret = SRMMU_PG_C;
                   2450:        } else {
                   2451:                for (npv = pv; npv != NULL; npv = npv->pv_next) {
                   2452:                        if (BADALIAS(va, npv->pv_va)) {
                   2453: #ifdef DEBUG
1.66      christos 2454:                                if (pmapdebug & PDB_CACHESTUFF) printf(
1.55      pk       2455:                                "pv_link: badalias: pid %d, %lx<=>%x, pa %lx\n",
                   2456:                                curproc?curproc->p_pid:-1, va, npv->pv_va,
                   2457:                                vm_first_phys + (pv-pv_table)*NBPG);
                   2458: #endif
                   2459:                                pv->pv_flags &= ~PV_C4M;
1.58      pk       2460:                                pv_changepte4m(pv, 0, ret = SRMMU_PG_C);
1.55      pk       2461:                                /* cache_flush_page(va); XXX: needed? */
                   2462:                                break;
                   2463:                        }
                   2464:                }
                   2465:        }
                   2466:        npv = (struct pvlist *)malloc(sizeof *npv, M_VMPVENT, M_WAITOK);
                   2467:        npv->pv_next = pv->pv_next;
                   2468:        npv->pv_pmap = pm;
                   2469:        npv->pv_va = va;
                   2470:        npv->pv_flags |= (ret == SRMMU_PG_C ? 0 : PV_C4M);
                   2471:        pv->pv_next = npv;
                   2472:        return (ret);
                   2473: }
                   2474: #endif
                   2475:
                   2476: /*
                   2477:  * Walk the given list and flush the cache for each (MI) page that is
                   2478:  * potentially in the cache. Called only if vactype != VAC_NONE.
                   2479:  */
                   2480: void
                   2481: pv_flushcache(pv)
                   2482:        register struct pvlist *pv;
                   2483: {
                   2484:        register struct pmap *pm;
                   2485:        register int s, ctx;
                   2486:
                   2487:        write_user_windows();   /* paranoia? */
                   2488:
                   2489:        s = splpmap();          /* XXX extreme paranoia */
                   2490:        if ((pm = pv->pv_pmap) != NULL) {
                   2491:                ctx = getcontext();
                   2492:                for (;;) {
                   2493:                        if (pm->pm_ctx) {
                   2494:                                setcontext(pm->pm_ctxnum);
                   2495:                                cache_flush_page(pv->pv_va);
                   2496:                        }
                   2497:                        pv = pv->pv_next;
                   2498:                        if (pv == NULL)
                   2499:                                break;
                   2500:                        pm = pv->pv_pmap;
                   2501:                }
                   2502:                setcontext(ctx);
                   2503:        }
                   2504:        splx(s);
                   2505: }
                   2506:
                   2507: /*----------------------------------------------------------------*/
                   2508:
                   2509: /*
                   2510:  * At last, pmap code.
                   2511:  */
1.1       deraadt  2512:
1.18      deraadt  2513: #if defined(SUN4) && defined(SUN4C)
                   2514: int nptesg;
                   2515: #endif
                   2516:
1.55      pk       2517: #if defined(SUN4M)
                   2518: static void pmap_bootstrap4m __P((void));
                   2519: #endif
                   2520: #if defined(SUN4) || defined(SUN4C)
                   2521: static void pmap_bootstrap4_4c __P((int, int, int));
                   2522: #endif
                   2523:
1.1       deraadt  2524: /*
                   2525:  * Bootstrap the system enough to run with VM enabled.
                   2526:  *
1.43      pk       2527:  * nsegment is the number of mmu segment entries (``PMEGs'');
                   2528:  * nregion is the number of mmu region entries (``SMEGs'');
1.1       deraadt  2529:  * nctx is the number of contexts.
                   2530:  */
                   2531: void
1.43      pk       2532: pmap_bootstrap(nctx, nregion, nsegment)
                   2533:        int nsegment, nctx, nregion;
1.1       deraadt  2534: {
1.55      pk       2535:
                   2536:        cnt.v_page_size = NBPG;
                   2537:        vm_set_page_size();
                   2538:
                   2539: #if defined(SUN4) && (defined(SUN4C) || defined(SUN4M))
                   2540:        /* In this case NPTESG is not a #define */
                   2541:        nptesg = (NBPSG >> pgshift);
                   2542: #endif
                   2543:
1.69      pk       2544: #if 0
1.55      pk       2545:        ncontext = nctx;
1.69      pk       2546: #endif
1.55      pk       2547:
                   2548: #if defined(SUN4M)
                   2549:        if (CPU_ISSUN4M) {
                   2550:                pmap_bootstrap4m();
                   2551:                return;
                   2552:        }
                   2553: #endif
                   2554: #if defined(SUN4) || defined(SUN4C)
                   2555:        if (CPU_ISSUN4OR4C) {
                   2556:                pmap_bootstrap4_4c(nctx, nregion, nsegment);
                   2557:                return;
                   2558:        }
                   2559: #endif
                   2560: }
                   2561:
                   2562: #if defined(SUN4) || defined(SUN4C)
                   2563: void
                   2564: pmap_bootstrap4_4c(nctx, nregion, nsegment)
                   2565:        int nsegment, nctx, nregion;
                   2566: {
1.1       deraadt  2567:        register union ctxinfo *ci;
1.53      christos 2568:        register struct mmuentry *mmuseg;
1.69      pk       2569: #ifdef SUN4_MMU3SUN4_MMU3L
1.53      christos 2570:        register struct mmuentry *mmureg;
                   2571: #endif
1.43      pk       2572:        struct   regmap *rp;
                   2573:        register int i, j;
                   2574:        register int npte, zseg, vr, vs;
                   2575:        register int rcookie, scookie;
1.1       deraadt  2576:        register caddr_t p;
1.37      pk       2577:        register struct memarr *mp;
1.1       deraadt  2578:        register void (*rom_setmap)(int ctx, caddr_t va, int pmeg);
                   2579:        int lastpage;
                   2580:        extern char end[];
1.7       pk       2581: #ifdef DDB
                   2582:        extern char *esym;
                   2583:        char *theend = end;
                   2584: #endif
1.1       deraadt  2585:
1.45      pk       2586:        switch (cputyp) {
                   2587:        case CPU_SUN4C:
                   2588:                mmu_has_hole = 1;
                   2589:                break;
                   2590:        case CPU_SUN4:
1.69      pk       2591:                if (cpuinfo.cpu_type != CPUTYP_4_400) {
1.45      pk       2592:                        mmu_has_hole = 1;
                   2593:                        break;
                   2594:                }
                   2595:        }
                   2596:
1.19      deraadt  2597:        cnt.v_page_size = NBPG;
                   2598:        vm_set_page_size();
                   2599:
1.31      pk       2600: #if defined(SUN4)
                   2601:        /*
                   2602:         * set up the segfixmask to mask off invalid bits
                   2603:         */
1.43      pk       2604:        segfixmask =  nsegment - 1; /* assume nsegment is a power of 2 */
                   2605: #ifdef DIAGNOSTIC
                   2606:        if (((nsegment & segfixmask) | (nsegment & ~segfixmask)) != nsegment) {
1.66      christos 2607:                printf("pmap_bootstrap: unsuitable number of segments (%d)\n",
1.43      pk       2608:                        nsegment);
                   2609:                callrom();
                   2610:        }
                   2611: #endif
1.31      pk       2612: #endif
                   2613:
1.55      pk       2614: #if defined(SUN4M) /* We're in a dual-arch kernel. Setup 4/4c fn. ptrs */
                   2615:        pmap_clear_modify_p     =       pmap_clear_modify4_4c;
                   2616:        pmap_clear_reference_p  =       pmap_clear_reference4_4c;
                   2617:        pmap_copy_page_p        =       pmap_copy_page4_4c;
                   2618:        pmap_enter_p            =       pmap_enter4_4c;
                   2619:        pmap_extract_p          =       pmap_extract4_4c;
                   2620:        pmap_is_modified_p      =       pmap_is_modified4_4c;
                   2621:        pmap_is_referenced_p    =       pmap_is_referenced4_4c;
                   2622:        pmap_page_protect_p     =       pmap_page_protect4_4c;
                   2623:        pmap_protect_p          =       pmap_protect4_4c;
                   2624:        pmap_zero_page_p        =       pmap_zero_page4_4c;
                   2625:        pmap_changeprot_p       =       pmap_changeprot4_4c;
                   2626:        pmap_rmk_p              =       pmap_rmk4_4c;
                   2627:        pmap_rmu_p              =       pmap_rmu4_4c;
                   2628: #endif /* defined SUN4M */
1.43      pk       2629:
1.1       deraadt  2630:        /*
                   2631:         * Last segment is the `invalid' one (one PMEG of pte's with !pg_v).
                   2632:         * It will never be used for anything else.
                   2633:         */
1.43      pk       2634:        seginval = --nsegment;
                   2635:
1.69      pk       2636: #if defined(SUN4_MMU3L)
                   2637:        if (HASSUN4_MMU3L)
1.43      pk       2638:                reginval = --nregion;
                   2639: #endif
                   2640:
                   2641:        /*
                   2642:         * Intialize the kernel pmap.
                   2643:         */
                   2644:        /* kernel_pmap_store.pm_ctxnum = 0; */
                   2645:        simple_lock_init(kernel_pmap_store.pm_lock);
                   2646:        kernel_pmap_store.pm_refcount = 1;
1.69      pk       2647: #if defined(SUN4_MMU3L)
1.43      pk       2648:        TAILQ_INIT(&kernel_pmap_store.pm_reglist);
                   2649: #endif
                   2650:        TAILQ_INIT(&kernel_pmap_store.pm_seglist);
                   2651:
                   2652:        kernel_pmap_store.pm_regmap = &kernel_regmap_store[-NUREG];
                   2653:        for (i = NKREG; --i >= 0;) {
1.69      pk       2654: #if defined(SUN4_MMU3L)
1.43      pk       2655:                kernel_regmap_store[i].rg_smeg = reginval;
                   2656: #endif
                   2657:                kernel_regmap_store[i].rg_segmap =
                   2658:                        &kernel_segmap_store[i * NSEGRG];
                   2659:                for (j = NSEGRG; --j >= 0;)
                   2660:                        kernel_segmap_store[i * NSEGRG + j].sg_pmeg = seginval;
                   2661:        }
1.1       deraadt  2662:
                   2663:        /*
                   2664:         * Preserve the monitor ROM's reserved VM region, so that
                   2665:         * we can use L1-A or the monitor's debugger.  As a side
                   2666:         * effect we map the ROM's reserved VM into all contexts
                   2667:         * (otherwise L1-A crashes the machine!).
                   2668:         */
1.43      pk       2669:
1.58      pk       2670:        mmu_reservemon4_4c(&nregion, &nsegment);
1.43      pk       2671:
1.69      pk       2672: #if defined(SUN4_MMU3L)
1.43      pk       2673:        /* Reserve one region for temporary mappings */
                   2674:        tregion = --nregion;
                   2675: #endif
1.1       deraadt  2676:
                   2677:        /*
1.43      pk       2678:         * Allocate and clear mmu entries and context structures.
1.1       deraadt  2679:         */
                   2680:        p = end;
1.7       pk       2681: #ifdef DDB
                   2682:        if (esym != 0)
                   2683:                theend = p = esym;
                   2684: #endif
1.69      pk       2685: #if defined(SUN4_MMU3L)
1.43      pk       2686:        mmuregions = mmureg = (struct mmuentry *)p;
                   2687:        p += nregion * sizeof(struct mmuentry);
                   2688: #endif
                   2689:        mmusegments = mmuseg = (struct mmuentry *)p;
                   2690:        p += nsegment * sizeof(struct mmuentry);
1.69      pk       2691:        pmap_kernel()->pm_ctx = cpuinfo.ctxinfo = ci = (union ctxinfo *)p;
1.1       deraadt  2692:        p += nctx * sizeof *ci;
1.7       pk       2693: #ifdef DDB
                   2694:        bzero(theend, p - theend);
                   2695: #else
1.1       deraadt  2696:        bzero(end, p - end);
1.7       pk       2697: #endif
1.1       deraadt  2698:
1.43      pk       2699:        /* Initialize MMU resource queues */
1.69      pk       2700: #if defined(SUN4_MMU3L)
1.43      pk       2701:        TAILQ_INIT(&region_freelist);
                   2702:        TAILQ_INIT(&region_lru);
                   2703:        TAILQ_INIT(&region_locked);
                   2704: #endif
                   2705:        TAILQ_INIT(&segm_freelist);
                   2706:        TAILQ_INIT(&segm_lru);
                   2707:        TAILQ_INIT(&segm_locked);
                   2708:
1.1       deraadt  2709:        /*
                   2710:         * Set up the `constants' for the call to vm_init()
                   2711:         * in main().  All pages beginning at p (rounded up to
                   2712:         * the next whole page) and continuing through the number
                   2713:         * of available pages are free, but they start at a higher
                   2714:         * virtual address.  This gives us two mappable MD pages
                   2715:         * for pmap_zero_page and pmap_copy_page, and one MI page
                   2716:         * for /dev/mem, all with no associated physical memory.
                   2717:         */
                   2718:        p = (caddr_t)(((u_int)p + NBPG - 1) & ~PGOFSET);
                   2719:        avail_start = (int)p - KERNBASE;
1.36      pk       2720:
                   2721:        /*
                   2722:         * Grab physical memory list, so pmap_next_page() can do its bit.
                   2723:         */
                   2724:        npmemarr = makememarr(pmemarr, MA_SIZE, MEMARR_AVAILPHYS);
                   2725:        sortm(pmemarr, npmemarr);
                   2726:        if (pmemarr[0].addr != 0) {
1.66      christos 2727:                printf("pmap_bootstrap: no kernel memory?!\n");
1.36      pk       2728:                callrom();
                   2729:        }
                   2730:        avail_end = pmemarr[npmemarr-1].addr + pmemarr[npmemarr-1].len;
1.38      pk       2731:        avail_next = avail_start;
                   2732:        for (physmem = 0, mp = pmemarr, j = npmemarr; --j >= 0; mp++)
                   2733:                physmem += btoc(mp->len);
                   2734:
                   2735:        i = (int)p;
                   2736:        vpage[0] = p, p += NBPG;
                   2737:        vpage[1] = p, p += NBPG;
1.41      mycroft  2738:        vmmap = p, p += NBPG;
1.38      pk       2739:        p = reserve_dumppages(p);
1.39      pk       2740:
1.37      pk       2741:        /*
1.38      pk       2742:         * Allocate virtual memory for pv_table[], which will be mapped
                   2743:         * sparsely in pmap_init().
1.37      pk       2744:         */
                   2745:        pv_table = (struct pvlist *)p;
                   2746:        p += round_page(sizeof(struct pvlist) * atop(avail_end - avail_start));
1.36      pk       2747:
1.1       deraadt  2748:        virtual_avail = (vm_offset_t)p;
                   2749:        virtual_end = VM_MAX_KERNEL_ADDRESS;
                   2750:
                   2751:        p = (caddr_t)i;                 /* retract to first free phys */
                   2752:
                   2753:        /*
                   2754:         * All contexts are free except the kernel's.
                   2755:         *
                   2756:         * XXX sun4c could use context 0 for users?
                   2757:         */
1.42      mycroft  2758:        ci->c_pmap = pmap_kernel();
1.1       deraadt  2759:        ctx_freelist = ci + 1;
                   2760:        for (i = 1; i < ncontext; i++) {
                   2761:                ci++;
                   2762:                ci->c_nextfree = ci + 1;
                   2763:        }
                   2764:        ci->c_nextfree = NULL;
                   2765:        ctx_kick = 0;
                   2766:        ctx_kickdir = -1;
                   2767:
                   2768:        /*
                   2769:         * Init mmu entries that map the kernel physical addresses.
                   2770:         *
                   2771:         * All the other MMU entries are free.
                   2772:         *
                   2773:         * THIS ASSUMES SEGMENT i IS MAPPED BY MMU ENTRY i DURING THE
                   2774:         * BOOT PROCESS
                   2775:         */
1.43      pk       2776:
                   2777:        rom_setmap = promvec->pv_setctxt;
                   2778:        zseg = ((((u_int)p + NBPSG - 1) & ~SGOFSET) - KERNBASE) >> SGSHIFT;
1.1       deraadt  2779:        lastpage = VA_VPG(p);
                   2780:        if (lastpage == 0)
1.43      pk       2781:                /*
                   2782:                 * If the page bits in p are 0, we filled the last segment
                   2783:                 * exactly (now how did that happen?); if not, it is
                   2784:                 * the last page filled in the last segment.
                   2785:                 */
1.1       deraadt  2786:                lastpage = NPTESG;
1.43      pk       2787:
1.1       deraadt  2788:        p = (caddr_t)KERNBASE;          /* first va */
                   2789:        vs = VA_VSEG(KERNBASE);         /* first virtual segment */
1.43      pk       2790:        vr = VA_VREG(KERNBASE);         /* first virtual region */
                   2791:        rp = &pmap_kernel()->pm_regmap[vr];
                   2792:
                   2793:        for (rcookie = 0, scookie = 0;;) {
                   2794:
1.1       deraadt  2795:                /*
1.43      pk       2796:                 * Distribute each kernel region/segment into all contexts.
1.1       deraadt  2797:                 * This is done through the monitor ROM, rather than
                   2798:                 * directly here: if we do a setcontext we will fault,
                   2799:                 * as we are not (yet) mapped in any other context.
                   2800:                 */
1.43      pk       2801:
                   2802:                if ((vs % NSEGRG) == 0) {
                   2803:                        /* Entering a new region */
                   2804:                        if (VA_VREG(p) > vr) {
                   2805: #ifdef DEBUG
1.66      christos 2806:                                printf("note: giant kernel!\n");
1.43      pk       2807: #endif
                   2808:                                vr++, rp++;
                   2809:                        }
1.69      pk       2810: #if defined(SUN4_MMU3L)
                   2811:                        if (HASSUN4_MMU3L) {
1.43      pk       2812:                                for (i = 1; i < nctx; i++)
                   2813:                                        rom_setmap(i, p, rcookie);
                   2814:
                   2815:                                TAILQ_INSERT_TAIL(&region_locked,
                   2816:                                                  mmureg, me_list);
                   2817:                                TAILQ_INSERT_TAIL(&pmap_kernel()->pm_reglist,
                   2818:                                                  mmureg, me_pmchain);
                   2819:                                mmureg->me_cookie = rcookie;
                   2820:                                mmureg->me_pmap = pmap_kernel();
                   2821:                                mmureg->me_vreg = vr;
                   2822:                                rp->rg_smeg = rcookie;
                   2823:                                mmureg++;
                   2824:                                rcookie++;
                   2825:                        }
                   2826: #endif
                   2827:                }
                   2828:
1.69      pk       2829: #if defined(SUN4_MMU3L)
                   2830:                if (!HASSUN4_MMU3L)
1.43      pk       2831: #endif
                   2832:                        for (i = 1; i < nctx; i++)
                   2833:                                rom_setmap(i, p, scookie);
1.1       deraadt  2834:
                   2835:                /* set up the mmu entry */
1.43      pk       2836:                TAILQ_INSERT_TAIL(&segm_locked, mmuseg, me_list);
                   2837:                TAILQ_INSERT_TAIL(&pmap_kernel()->pm_seglist, mmuseg, me_pmchain);
1.70      pk       2838:                pmap_stats.ps_npmeg_locked++;
1.43      pk       2839:                mmuseg->me_cookie = scookie;
                   2840:                mmuseg->me_pmap = pmap_kernel();
                   2841:                mmuseg->me_vreg = vr;
                   2842:                mmuseg->me_vseg = vs % NSEGRG;
                   2843:                rp->rg_segmap[vs % NSEGRG].sg_pmeg = scookie;
                   2844:                npte = ++scookie < zseg ? NPTESG : lastpage;
                   2845:                rp->rg_segmap[vs % NSEGRG].sg_npte = npte;
                   2846:                rp->rg_nsegmap += 1;
                   2847:                mmuseg++;
1.1       deraadt  2848:                vs++;
1.43      pk       2849:                if (scookie < zseg) {
1.1       deraadt  2850:                        p += NBPSG;
                   2851:                        continue;
                   2852:                }
1.43      pk       2853:
1.1       deraadt  2854:                /*
                   2855:                 * Unmap the pages, if any, that are not part of
                   2856:                 * the final segment.
                   2857:                 */
1.43      pk       2858:                for (p += npte << PGSHIFT; npte < NPTESG; npte++, p += NBPG)
1.55      pk       2859:                        setpte4(p, 0);
1.43      pk       2860:
1.69      pk       2861: #if defined(SUN4_MMU3L)
                   2862:                if (HASSUN4_MMU3L) {
1.43      pk       2863:                        /*
                   2864:                         * Unmap the segments, if any, that are not part of
                   2865:                         * the final region.
                   2866:                         */
                   2867:                        for (i = rp->rg_nsegmap; i < NSEGRG; i++, p += NBPSG)
                   2868:                                setsegmap(p, seginval);
                   2869:                }
                   2870: #endif
1.1       deraadt  2871:                break;
                   2872:        }
1.43      pk       2873:
1.69      pk       2874: #if defined(SUN4_MMU3L)
                   2875:        if (HASSUN4_MMU3L)
1.43      pk       2876:                for (; rcookie < nregion; rcookie++, mmureg++) {
                   2877:                        mmureg->me_cookie = rcookie;
                   2878:                        TAILQ_INSERT_TAIL(&region_freelist, mmureg, me_list);
                   2879:                }
                   2880: #endif
                   2881:
                   2882:        for (; scookie < nsegment; scookie++, mmuseg++) {
                   2883:                mmuseg->me_cookie = scookie;
                   2884:                TAILQ_INSERT_TAIL(&segm_freelist, mmuseg, me_list);
1.70      pk       2885:                pmap_stats.ps_npmeg_free++;
1.1       deraadt  2886:        }
                   2887:
1.13      pk       2888:        /* Erase all spurious user-space segmaps */
                   2889:        for (i = 1; i < ncontext; i++) {
1.71      pk       2890:                setcontext4(i);
1.69      pk       2891:                if (HASSUN4_MMU3L)
1.43      pk       2892:                        for (p = 0, j = NUREG; --j >= 0; p += NBPRG)
                   2893:                                setregmap(p, reginval);
                   2894:                else
                   2895:                        for (p = 0, vr = 0; vr < NUREG; vr++) {
1.45      pk       2896:                                if (VA_INHOLE(p)) {
                   2897:                                        p = (caddr_t)MMU_HOLE_END;
                   2898:                                        vr = VA_VREG(p);
1.43      pk       2899:                                }
                   2900:                                for (j = NSEGRG; --j >= 0; p += NBPSG)
                   2901:                                        setsegmap(p, seginval);
                   2902:                        }
1.13      pk       2903:        }
1.71      pk       2904:        setcontext4(0);
1.13      pk       2905:
1.1       deraadt  2906:        /*
                   2907:         * write protect & encache kernel text;
                   2908:         * set red zone at kernel base; enable cache on message buffer.
                   2909:         */
                   2910:        {
1.23      deraadt  2911:                extern char etext[];
1.1       deraadt  2912: #ifdef KGDB
                   2913:                register int mask = ~PG_NC;     /* XXX chgkprot is busted */
                   2914: #else
                   2915:                register int mask = ~(PG_W | PG_NC);
                   2916: #endif
1.2       deraadt  2917:
1.23      deraadt  2918:                for (p = (caddr_t)trapbase; p < etext; p += NBPG)
1.55      pk       2919:                        setpte4(p, getpte4(p) & mask);
1.1       deraadt  2920:        }
                   2921: }
1.55      pk       2922: #endif
1.1       deraadt  2923:
1.55      pk       2924: #if defined(SUN4M)             /* Sun4M version of pmap_bootstrap */
                   2925: /*
                   2926:  * Bootstrap the system enough to run with VM enabled on a Sun4M machine.
                   2927:  *
                   2928:  * Switches from ROM to kernel page tables, and sets up initial mappings.
                   2929:  */
                   2930: static void
                   2931: pmap_bootstrap4m(void)
1.36      pk       2932: {
1.55      pk       2933:        register int i, j;
1.71      pk       2934:        caddr_t p;
1.55      pk       2935:        register caddr_t q;
                   2936:        register union ctxinfo *ci;
1.36      pk       2937:        register struct memarr *mp;
1.55      pk       2938:        struct   regmap *rmapp = NULL;
                   2939:        struct   segmap *smapp = NULL;
                   2940:        register int reg, seg;
1.71      pk       2941:        unsigned int ctxtblsize;
1.55      pk       2942: #if 0
                   2943:        int nkreg, nkseg, nkpag, kernsize, newpgs;
                   2944: #endif
                   2945:        int deadfill, deadspace;
                   2946:        extern char end[];
                   2947:        extern char etext[];
                   2948: #ifdef DDB
                   2949:        extern char *esym;
                   2950:        char *theend = end;
                   2951: #endif
                   2952:        extern caddr_t reserve_dumppages(caddr_t);
1.36      pk       2953:
1.55      pk       2954: #if defined(SUN4) || defined(SUN4C) /* setup 4M fn. ptrs for dual-arch kernel */
                   2955:        pmap_clear_modify_p     =       pmap_clear_modify4m;
                   2956:        pmap_clear_reference_p  =       pmap_clear_reference4m;
                   2957:        pmap_copy_page_p        =       pmap_copy_page4m;
                   2958:        pmap_enter_p            =       pmap_enter4m;
                   2959:        pmap_extract_p          =       pmap_extract4m;
                   2960:        pmap_is_modified_p      =       pmap_is_modified4m;
                   2961:        pmap_is_referenced_p    =       pmap_is_referenced4m;
                   2962:        pmap_page_protect_p     =       pmap_page_protect4m;
                   2963:        pmap_protect_p          =       pmap_protect4m;
                   2964:        pmap_zero_page_p        =       pmap_zero_page4m;
                   2965:        pmap_changeprot_p       =       pmap_changeprot4m;
                   2966:        pmap_rmk_p              =       pmap_rmk4m;
                   2967:        pmap_rmu_p              =       pmap_rmu4m;
                   2968: #endif /* defined Sun4/Sun4c */
1.37      pk       2969:
1.71      pk       2970: /*XXX-GCC!*/ci = 0;
                   2971:
1.36      pk       2972:        /*
1.55      pk       2973:         * Intialize the kernel pmap.
                   2974:         */
                   2975:        /* kernel_pmap_store.pm_ctxnum = 0; */
                   2976:        simple_lock_init(kernel_pmap_store.pm_lock);
                   2977:        kernel_pmap_store.pm_refcount = 1;
1.71      pk       2978:
                   2979:        /*
                   2980:         * Set up pm_regmap for kernel to point NUREG *below* the beginning
1.55      pk       2981:         * of kernel regmap storage. Since the kernel only uses regions
                   2982:         * above NUREG, we save storage space and can index kernel and
                   2983:         * user regions in the same way
1.36      pk       2984:         */
1.55      pk       2985:        kernel_pmap_store.pm_regmap = &kernel_regmap_store[-NUREG];
                   2986:        kernel_pmap_store.pm_reg_ptps = NULL;
                   2987:        kernel_pmap_store.pm_reg_ptps_pa = 0;
                   2988:        bzero(kernel_regmap_store, NKREG * sizeof(struct regmap));
                   2989:        bzero(kernel_segmap_store, NKREG * NSEGRG * sizeof(struct segmap));
                   2990:        for (i = NKREG; --i >= 0;) {
                   2991:                kernel_regmap_store[i].rg_segmap =
                   2992:                        &kernel_segmap_store[i * NSEGRG];
                   2993:                kernel_regmap_store[i].rg_seg_ptps = NULL;
                   2994:                for (j = NSEGRG; --j >= 0;)
                   2995:                        kernel_segmap_store[i * NSEGRG + j].sg_pte = NULL;
                   2996:        }
1.38      pk       2997:
1.55      pk       2998:        p = end;                /* p points to top of kernel mem */
                   2999: #ifdef DDB
                   3000:        if (esym != 0)
                   3001:                theend = p = esym;
                   3002: #endif
                   3003:
1.71      pk       3004: #if 0
                   3005:        /* Allocate context administration */
1.69      pk       3006:        pmap_kernel()->pm_ctx = cpuinfo.ctxinfo = ci = (union ctxinfo *)p;
1.55      pk       3007:        p += ncontext * sizeof *ci;
1.69      pk       3008:        bzero((caddr_t)ci, (u_int)p - (u_int)ci);
1.55      pk       3009:        ctxbusyvector = p;
                   3010:        p += ncontext;
                   3011:        bzero(ctxbusyvector, ncontext);
                   3012:        ctxbusyvector[0] = 1;   /* context 0 is always in use */
1.69      pk       3013: #endif
1.55      pk       3014:
                   3015:        /*
1.71      pk       3016:         * Reserve memory for I/O pagetables. This takes 64k of memory
1.55      pk       3017:         * since we want to have 64M of dvma space (this actually depends
                   3018:         * on the definition of DVMA4M_BASE...we may drop it back to 32M)
                   3019:         * but since the table must be aligned, we might end up using
                   3020:         * as much as 128K. (note 1024 = NBPG / sizeof(iopte_t))
                   3021:         *
                   3022:         * We optimize with some space saving song and dance to
                   3023:         * squeeze other pagetables in the dead space.
                   3024:         */
                   3025: #ifdef DEBUG
                   3026:        if ((0 - DVMA4M_BASE) % (16*1024*1024))
1.71      pk       3027:            panic("pmap_bootstrap4m: invalid DVMA4M_BASE of 0x%x", DVMA4M_BASE);
1.55      pk       3028: #endif
                   3029:
                   3030:        deadfill = 0;
1.71      pk       3031:        p = (caddr_t) roundup((u_int) p, sizeof(long) *
                   3032:                        max(SRMMU_L1SIZE, max(SRMMU_L2SIZE, SRMMU_L3SIZE)));
1.55      pk       3033:
1.71      pk       3034:        deadspace = (int) (
                   3035:                ((caddr_t)roundup((u_int)p, (0 - DVMA4M_BASE) / 1024)) - p);
1.55      pk       3036:
                   3037:        if (deadspace >= SRMMU_L3SIZE * sizeof(long) * NKREG * NSEGRG) {
                   3038:                p = (caddr_t) roundup((u_int)p, SRMMU_L3SIZE * sizeof(long));
                   3039:                kernel_pagtable_store = (u_int *)p;
                   3040:                p += ((SRMMU_L3SIZE * sizeof(long)) * NKREG) * NSEGRG;
                   3041:                bzero(kernel_pagtable_store,
                   3042:                      p - (caddr_t) kernel_pagtable_store);
1.71      pk       3043:                deadfill |= 8;
                   3044:                deadspace -= (int)(p - (caddr_t) kernel_pagtable_store);
                   3045:        }
                   3046:        if (deadspace >= ncontext * sizeof(union ctxinfo)) {
                   3047:                /* Allocate context administration */
                   3048:                ci = (union ctxinfo *)p;
                   3049:                p += ncontext * sizeof *ci;
                   3050:                bzero((caddr_t)ci, (u_int)p - (u_int)ci);
1.55      pk       3051:                deadfill |= 4;
1.71      pk       3052:                deadspace -= (int)(p - (caddr_t)ci);
1.55      pk       3053:        }
                   3054:        if (deadspace >= SRMMU_L2SIZE * sizeof(long) * NKREG) {
                   3055:                p = (caddr_t) roundup((u_int)p, SRMMU_L2SIZE * sizeof(long));
                   3056:                kernel_segtable_store = (u_int *)p;
                   3057:                p += (SRMMU_L2SIZE * sizeof(long)) * NKREG;
                   3058:                bzero(kernel_segtable_store,
                   3059:                      p - (caddr_t) kernel_segtable_store);
                   3060:                deadfill |= 2;
                   3061:                deadspace -= (int)(p - (caddr_t) kernel_segtable_store);
                   3062:        }
                   3063:        if (deadspace >= SRMMU_L1SIZE * sizeof(long)) {
                   3064:                p = (caddr_t) roundup((u_int)p, SRMMU_L1SIZE * sizeof(long));
                   3065:                kernel_regtable_store = (u_int *)p;
                   3066:                p += SRMMU_L1SIZE * sizeof(long);
                   3067:                bzero(kernel_regtable_store,
                   3068:                      p - (caddr_t) kernel_regtable_store);
                   3069:                deadfill |= 1;
                   3070:                deadspace -= (int)(p - (caddr_t) kernel_regtable_store);
                   3071:        }
                   3072:        if (deadspace < 0)
1.69      pk       3073:                printf("pmap_bootstrap4m: botch in memory-saver\n");
1.55      pk       3074:
                   3075:        p = (caddr_t) roundup((u_int)p, (0 - DVMA4M_BASE) / 1024);
                   3076:        kernel_iopte_table = (u_int *)p;
                   3077:        kernel_iopte_table_pa = VA2PA((caddr_t)kernel_iopte_table);
                   3078:        p += (0 - DVMA4M_BASE) / 1024;
                   3079:        bzero(kernel_iopte_table, p - (caddr_t) kernel_iopte_table);
                   3080:
                   3081:        /*
1.71      pk       3082:         * Allocate context table. We put it right after the IOPTEs,
                   3083:         * so we avoid alignment-induced wastage.
                   3084:         * To keep supersparc happy, minimum aligment is on a 4K boundary.
                   3085:         */
                   3086:        ctxtblsize = max(ncontext,1024) * sizeof(int);
                   3087:        cpuinfo.ctx_tbl = (int *)roundup((u_int)p, ctxtblsize);
                   3088:        p = (caddr_t)((u_int)cpuinfo.ctx_tbl + ctxtblsize);
                   3089:        qzero(cpuinfo.ctx_tbl, ctxtblsize);
                   3090:
                   3091:
                   3092:        /*
                   3093:         * Reserve memory for segment and page tables needed to map the entire
1.55      pk       3094:         * kernel (from regions 0xf8 -> 0xff). This takes 130k of space, but
                   3095:         * unfortunately is necessary since pmap_enk *must* be able to enter
                   3096:         * a kernel mapping without resorting to malloc, or else the
                   3097:         * possibility of deadlock arises (pmap_enk4m is called to enter a
                   3098:         * mapping; it needs to malloc a page table; malloc then calls
                   3099:         * pmap_enk4m to enter the new malloc'd page; pmap_enk4m needs to
                   3100:         * malloc a page table to enter _that_ mapping; malloc deadlocks since
                   3101:         * it is already allocating that object).
                   3102:         *
                   3103:         * We only do this if it wasn't done above...
                   3104:         */
                   3105:        if (!(deadfill & 2)) {
                   3106:                p = (caddr_t) roundup((u_int)p, SRMMU_L2SIZE * sizeof(long));
                   3107:                kernel_segtable_store = (u_int *)p;
                   3108:                p += (SRMMU_L2SIZE * sizeof(long)) * NKREG;
                   3109:                bzero(kernel_segtable_store,
                   3110:                      p - (caddr_t) kernel_segtable_store);
                   3111:        }
1.71      pk       3112:        if (!(deadfill & 8)) {
1.55      pk       3113:                p = (caddr_t) roundup((u_int)p, SRMMU_L3SIZE * sizeof(long));
                   3114:                kernel_pagtable_store = (u_int *)p;
                   3115:                p += ((SRMMU_L3SIZE * sizeof(long)) * NKREG) * NSEGRG;
                   3116:                bzero(kernel_pagtable_store,
                   3117:                      p - (caddr_t) kernel_pagtable_store);
                   3118:        }
                   3119:        if (!(deadfill & 1)) {
                   3120:                p = (caddr_t) roundup((u_int)p, SRMMU_L1SIZE * sizeof(long));
                   3121:                kernel_regtable_store = (u_int *)p;
                   3122:                p += SRMMU_L1SIZE * sizeof(long);
                   3123:                bzero(kernel_regtable_store,
                   3124:                      p - (caddr_t) kernel_regtable_store);
                   3125:        }
1.71      pk       3126:        if (!(deadfill & 4)) {
                   3127:                /* Allocate context administration */
                   3128:                p = (caddr_t) roundup((u_int)p, sizeof(long));
                   3129:                ci = (union ctxinfo *)p;
                   3130:                p += ncontext * sizeof *ci;
                   3131:                bzero((caddr_t)ci, (u_int)p - (u_int)ci);
                   3132:        }
                   3133:
                   3134:        pmap_kernel()->pm_ctx = cpuinfo.ctxinfo = ci;
                   3135:
                   3136:        /*
                   3137:         * Since we've statically allocated space to map the entire kernel,
                   3138:         * we might as well pre-wire the mappings to save time in pmap_enter.
                   3139:         * This also gets around nasty problems with caching of L1/L2 ptp's.
                   3140:         *
                   3141:         * XXX WHY DO WE HAVE THIS CACHING PROBLEM WITH L1/L2 PTPS????? %%%
                   3142:         */
                   3143:
                   3144:        pmap_kernel()->pm_reg_ptps = (int *) kernel_regtable_store;
                   3145:        pmap_kernel()->pm_reg_ptps_pa =
                   3146:                VA2PA((caddr_t)pmap_kernel()->pm_reg_ptps);
                   3147:
                   3148:        /* Install L1 table in context 0 */
                   3149:        cpuinfo.ctx_tbl[0] =
                   3150:            (pmap_kernel()->pm_reg_ptps_pa >> SRMMU_PPNPASHIFT) | SRMMU_TEPTD;
                   3151:
                   3152:        /* XXX:rethink - Store pointer to region table address */
                   3153:        cpuinfo.L1_ptps = pmap_kernel()->pm_reg_ptps;
1.55      pk       3154:
1.71      pk       3155:        for (reg = VA_VREG(KERNBASE); reg < NKREG+VA_VREG(KERNBASE); reg++) {
                   3156:                struct   regmap *rp;
                   3157:                caddr_t kphyssegtbl;
                   3158:
                   3159:                /*
                   3160:                 * entering new region; install & build segtbl
                   3161:                 * XXX: WE TRASH ANY EXISTING MAPPINGS IN THE KERNEL
                   3162:                 *      REGION. SHOULD BE FIXED!
                   3163:                 */
                   3164:                int kregnum = reg - VA_VREG(KERNBASE);
                   3165:
                   3166:                rp = &pmap_kernel()->pm_regmap[reg];
                   3167:
                   3168:                kphyssegtbl = (caddr_t)
                   3169:                    &kernel_segtable_store[kregnum * SRMMU_L2SIZE];
                   3170:
                   3171:                bzero(kphyssegtbl, SRMMU_L2SIZE * sizeof(long));
                   3172:                (pmap_kernel()->pm_reg_ptps)[reg] =
                   3173:                    (VA2PA(kphyssegtbl) >> SRMMU_PPNPASHIFT) | SRMMU_TEPTD;
                   3174:
                   3175:                rp->rg_seg_ptps = (int *)kphyssegtbl;
                   3176:
                   3177:                if (rp->rg_segmap == NULL) {
                   3178:                        printf("rp->rg_segmap == NULL!\n");
                   3179:                        rp->rg_segmap = &kernel_segmap_store[kregnum * NSEGRG];
                   3180:                }
                   3181:
                   3182:                for (seg = 0; seg < NSEGRG; seg++) {
                   3183:                        struct   segmap *sp;
                   3184:                        caddr_t kphyspagtbl;
                   3185:
                   3186:                        rp->rg_nsegmap++;
                   3187:
                   3188:                        sp = &rp->rg_segmap[seg];
                   3189:                        kphyspagtbl = (caddr_t)
                   3190:                            &kernel_pagtable_store
                   3191:                                [((kregnum * NSEGRG) + seg) * SRMMU_L3SIZE];
                   3192:
                   3193:                        bzero(kphyspagtbl, SRMMU_L3SIZE * sizeof(long));
                   3194:
                   3195:                        rp->rg_seg_ptps[seg] =
                   3196:                            (VA2PA(kphyspagtbl) >> SRMMU_PPNPASHIFT) |
                   3197:                                SRMMU_TEPTD;
                   3198:                        sp->sg_pte = (int *) kphyspagtbl;
                   3199:                }
                   3200:        }
                   3201:
                   3202:        /*
                   3203:         * Preserve the monitor ROM's reserved VM region, so that
                   3204:         * we can use L1-A or the monitor's debugger.
1.55      pk       3205:         */
1.71      pk       3206:        mmu_reservemon4m(&kernel_pmap_store, &p);
                   3207:
1.55      pk       3208:
                   3209:        /*
                   3210:         * Set up the `constants' for the call to vm_init()
                   3211:         * in main().  All pages beginning at p (rounded up to
                   3212:         * the next whole page) and continuing through the number
                   3213:         * of available pages are free, but they start at a higher
                   3214:         * virtual address.  This gives us two mappable MD pages
                   3215:         * for pmap_zero_page and pmap_copy_page, and one MI page
                   3216:         * for /dev/mem, all with no associated physical memory.
                   3217:         */
                   3218:        p = (caddr_t)(((u_int)p + NBPG - 1) & ~PGOFSET);
                   3219:        avail_start = (int)p - KERNBASE;
                   3220:        /*
                   3221:         * Grab physical memory list, so pmap_next_page() can do its bit.
                   3222:         */
                   3223:        npmemarr = makememarr(pmemarr, MA_SIZE, MEMARR_AVAILPHYS);
                   3224:        sortm(pmemarr, npmemarr);
                   3225:        if (pmemarr[0].addr != 0) {
1.66      christos 3226:                printf("pmap_bootstrap: no kernel memory?!\n");
1.55      pk       3227:                callrom();
                   3228:        }
                   3229:        avail_end = pmemarr[npmemarr-1].addr + pmemarr[npmemarr-1].len;
                   3230:        avail_next = avail_start;
                   3231:        for (physmem = 0, mp = pmemarr, j = npmemarr; --j >= 0; mp++)
                   3232:                physmem += btoc(mp->len);
                   3233:
                   3234:        i = (int)p;
                   3235:        vpage[0] = p, p += NBPG;
                   3236:        vpage[1] = p, p += NBPG;
                   3237:        vmmap = p, p += NBPG;
                   3238:        p = reserve_dumppages(p);
                   3239:
                   3240:        /*
                   3241:         * Allocate virtual memory for pv_table[], which will be mapped
                   3242:         * sparsely in pmap_init().
                   3243:         */
                   3244:        pv_table = (struct pvlist *)p;
                   3245:        p += round_page(sizeof(struct pvlist) * atop(avail_end - avail_start));
                   3246:
                   3247:        virtual_avail = (vm_offset_t)p;
                   3248:        virtual_end = VM_MAX_KERNEL_ADDRESS;
                   3249:
                   3250:        p = (caddr_t)i;                 /* retract to first free phys */
                   3251:
1.69      pk       3252:        /*
                   3253:         * Set up the ctxinfo structures (freelist of contexts)
1.55      pk       3254:         */
                   3255:        ci->c_pmap = pmap_kernel();
                   3256:        ctx_freelist = ci + 1;
                   3257:        for (i = 1; i < ncontext; i++) {
                   3258:                ci++;
                   3259:                ci->c_nextfree = ci + 1;
                   3260:        }
                   3261:        ci->c_nextfree = NULL;
                   3262:        ctx_kick = 0;
                   3263:        ctx_kickdir = -1;
                   3264:
1.69      pk       3265:        /*
                   3266:         * Now map the kernel into our new set of page tables, then
1.55      pk       3267:         * (finally) switch over to our running page tables.
                   3268:         * We map from KERNBASE to p into context 0's page tables (and
                   3269:         * the kernel pmap).
                   3270:         */
                   3271: #ifdef DEBUG                   /* Sanity checks */
                   3272:        if ((u_int)p % NBPG != 0)
1.69      pk       3273:                panic("pmap_bootstrap4m: p misaligned?!?");
1.55      pk       3274:        if (KERNBASE % NBPRG != 0)
1.69      pk       3275:                panic("pmap_bootstrap4m: KERNBASE not region-aligned");
1.55      pk       3276: #endif
1.69      pk       3277:
                   3278:        for (q = (caddr_t) KERNBASE; q < p; q += NBPG) {
                   3279:                /*
1.71      pk       3280:                 * Now install entry for current page.
                   3281:                 * Cache and write-protect kernel text.
1.69      pk       3282:                 */
                   3283:                rmapp = &(pmap_kernel()->pm_regmap[VA_VREG(q)]);
                   3284:                smapp = &(rmapp->rg_segmap[VA_VSEG(q)]);
                   3285:                smapp->sg_npte++;
                   3286:                if (q < (caddr_t) trapbase)
                   3287:                        /* Must map in message buffer in low page. */
                   3288:                        (smapp->sg_pte)[VA_VPG(q)] =
                   3289:                                ((q - (caddr_t)KERNBASE) >> SRMMU_PPNPASHIFT) |
                   3290:                                PPROT_N_RWX | SRMMU_PG_C | SRMMU_TEPTE;
                   3291:                else if (q >= (caddr_t) trapbase && q < etext)
                   3292:                        (smapp->sg_pte)[VA_VPG(q)] =
                   3293:                                (VA2PA(q) >> SRMMU_PPNPASHIFT) |
                   3294:                                PPROT_N_RX | SRMMU_PG_C | SRMMU_TEPTE;
                   3295:                else
                   3296:                        (smapp->sg_pte)[VA_VPG(q)] =
                   3297:                                (VA2PA(q) >> SRMMU_PPNPASHIFT) |
                   3298:                                PPROT_N_RWX | SRMMU_PG_C | SRMMU_TEPTE;
                   3299:        }
                   3300:
                   3301:
1.55      pk       3302:        /*
                   3303:         * We also install the kernel mapping into all other contexts by
1.69      pk       3304:         * copying the context 0 L1 PTP from cpuinfo.ctx_tbl[0] into the
1.55      pk       3305:         * remainder of the context table (i.e. we share the kernel page-
                   3306:         * tables). Each user pmap automatically gets the kernel mapped
                   3307:         * into it when it is created, but we do this extra step early on
                   3308:         * in case some twit decides to switch to a context with no user
                   3309:         * pmap associated with it.
                   3310:         */
1.69      pk       3311: #if 0
1.55      pk       3312:        for (i = 1; i < ncontext; i++)
1.69      pk       3313:                cpuinfo.ctx_tbl[i] = cpuinfo.ctx_tbl[0];
                   3314: #endif
1.55      pk       3315:
                   3316:        /*
                   3317:         * Now switch to kernel pagetables (finally!)
                   3318:         */
1.69      pk       3319:        mmu_install_tables(&cpuinfo);
1.55      pk       3320:
                   3321:        /*
                   3322:         * On SuperSPARC machines without a MXCC, we *cannot* cache the
                   3323:         * page tables.
                   3324:         */
1.69      pk       3325:        if ((cpuinfo.flags & CPUFLG_CACHEPAGETABLES) == 0) {
1.55      pk       3326:                int bytes, numpages;
                   3327:
                   3328: #define DO_THE_MATH(math)                                              \
                   3329:        bytes = (math);                                                 \
                   3330:        numpages = (bytes >> PGSHIFT) + (bytes % NBPG ? 1 : 0);
                   3331:
                   3332:                DO_THE_MATH(SRMMU_L3SIZE * sizeof(long) * NKREG * NSEGRG);
                   3333: #ifdef DEBUG
1.66      christos 3334:                printf("pmap_bootstrap4m: uncaching %d PT pages at 0x%lx\n",
1.55      pk       3335:                    numpages, (long)kernel_pagtable_store);
                   3336: #endif
                   3337:                kvm_uncache((caddr_t)kernel_pagtable_store, numpages);
                   3338:
                   3339:                DO_THE_MATH(SRMMU_L2SIZE * sizeof(long) * NKREG);
                   3340: #ifdef DEBUG
1.66      christos 3341:                printf("pmap_bootstrap4m: uncaching %d ST pages at 0x%lx\n",
1.55      pk       3342:                    numpages, (long)kernel_segtable_store);
                   3343: #endif
                   3344:                kvm_uncache((caddr_t)kernel_segtable_store, numpages);
                   3345:
                   3346:                DO_THE_MATH(SRMMU_L1SIZE * sizeof(long));
                   3347: #ifdef DEBUG
1.66      christos 3348:                printf("pmap_bootstrap4m: uncaching %d RT pages at 0x%lx\n",
1.55      pk       3349:                    numpages, (long)kernel_regtable_store);
                   3350: #endif
                   3351:                kvm_uncache((caddr_t)kernel_regtable_store, numpages);
                   3352:
                   3353: #undef DO_THE_MATH
                   3354:        }
                   3355:
                   3356: #ifdef DEBUG
1.66      christos 3357:        printf("\n");   /* Might as well make it pretty... */
1.55      pk       3358: #endif
1.69      pk       3359:        /* All done! */
                   3360: }
                   3361:
                   3362: void
                   3363: mmu_install_tables(sc)
                   3364:        struct cpu_softc *sc;
                   3365: {
                   3366:
                   3367: #ifdef DEBUG
                   3368:        printf("pmap_bootstrap: installing kernel page tables...");
                   3369: #endif
1.71      pk       3370:        setcontext4m(0);        /* paranoia? %%%: Make 0x3 a define! below */
1.69      pk       3371:
                   3372:        /* Enable MMU tablewalk caching, flush TLB */
                   3373:        if (sc->mmu_enable != 0)
                   3374:                sc->mmu_enable();
                   3375:
                   3376:        tlb_flush_all();
                   3377:
                   3378:        sta(SRMMU_CXTPTR, ASI_SRMMU,
                   3379:            (VA2PA((caddr_t)sc->ctx_tbl) >> SRMMU_PPNPASHIFT) & ~0x3);
                   3380:
                   3381:        tlb_flush_all();
                   3382:
                   3383: #ifdef DEBUG
                   3384:        printf("done.\n");
                   3385: #endif
                   3386: }
1.55      pk       3387:
1.69      pk       3388: /*
                   3389:  * Allocate per-CPU page tables.
                   3390:  * Note: this routine is called in the context of the boot CPU
                   3391:  * during autoconfig.
                   3392:  */
                   3393: void
                   3394: pmap_alloc_cpu(sc)
                   3395:        struct cpu_softc *sc;
                   3396: {
1.72      pk       3397:        caddr_t cpustore;
                   3398:        int *ctxtable;
                   3399:        int *regtable;
                   3400:        int *segtable;
                   3401:        int *pagtable;
                   3402:        int vr, vs, vpg;
                   3403:        struct regmap *rp;
                   3404:        struct segmap *sp;
                   3405:
                   3406:        /* Allocate properly aligned and physically contiguous memory here */
                   3407:        cpustore = 0;
                   3408:        ctxtable = 0;
                   3409:        regtable = 0;
                   3410:        segtable = 0;
                   3411:        pagtable = 0;
                   3412:
                   3413:        vr = VA_VREG(CPUINFO_VA);
                   3414:        vs = VA_VSEG(CPUINFO_VA);
                   3415:        vpg = VA_VPG(CPUINFO_VA);
                   3416:        rp = &pmap_kernel()->pm_regmap[vr];
                   3417:        sp = &rp->rg_segmap[vs];
                   3418:
                   3419:        /*
                   3420:         * Copy page tables, then modify entry for CPUINFO_VA so that
                   3421:         * it points at the per-CPU pages.
                   3422:         */
                   3423:        bcopy(cpuinfo.L1_ptps, regtable, SRMMU_L1SIZE * sizeof(int));
                   3424:        regtable[vr] =
                   3425:                (VA2PA((caddr_t)segtable) >> SRMMU_PPNPASHIFT) | SRMMU_TEPTD;
                   3426:
                   3427:        bcopy(rp->rg_seg_ptps, segtable, SRMMU_L2SIZE * sizeof(int));
                   3428:        segtable[vs] =
                   3429:                (VA2PA((caddr_t)pagtable) >> SRMMU_PPNPASHIFT) | SRMMU_TEPTD;
                   3430:
                   3431:        bcopy(sp->sg_pte, pagtable, SRMMU_L3SIZE * sizeof(int));
                   3432:        pagtable[vpg] =
                   3433:                (VA2PA((caddr_t)cpustore) >> SRMMU_PPNPASHIFT) |
                   3434:                (SRMMU_TEPTE | PPROT_RWX_RWX | SRMMU_PG_C);
1.69      pk       3435:
1.72      pk       3436:        /* Install L1 table in context 0 */
                   3437:        ctxtable[0] = ((u_int)regtable >> SRMMU_PPNPASHIFT) | SRMMU_TEPTD;
                   3438:
                   3439:        sc->ctx_tbl = ctxtable;
                   3440:        sc->L1_ptps = regtable;
1.69      pk       3441:
1.72      pk       3442: #if 0
1.69      pk       3443:        if ((sc->flags & CPUFLG_CACHEPAGETABLES) == 0) {
1.72      pk       3444:                kvm_uncache((caddr_t)0, 1);
1.69      pk       3445:        }
1.72      pk       3446: #endif
1.55      pk       3447: }
                   3448: #endif /* defined sun4m */
                   3449:
1.69      pk       3450:
1.55      pk       3451: void
                   3452: pmap_init()
                   3453: {
                   3454:        register vm_size_t s;
                   3455:        int pass1, nmem;
                   3456:        register struct memarr *mp;
                   3457:        vm_offset_t sva, va, eva;
                   3458:        vm_offset_t pa = 0;
                   3459:
                   3460:        if (PAGE_SIZE != NBPG)
                   3461:                panic("pmap_init: CLSIZE!=1");
                   3462:
                   3463:        /*
                   3464:         * Map pv_table[] as a `sparse' array. This requires two passes
                   3465:         * over the `pmemarr': (1) to determine the number of physical
                   3466:         * pages needed, and (2), to map the correct pieces of virtual
                   3467:         * memory allocated to pv_table[].
                   3468:         */
                   3469:
                   3470:        s = 0;
                   3471:        pass1 = 1;
1.38      pk       3472:
                   3473: pass2:
1.37      pk       3474:        sva = eva = 0;
1.36      pk       3475:        for (mp = pmemarr, nmem = npmemarr; --nmem >= 0; mp++) {
1.37      pk       3476:                int len;
                   3477:                vm_offset_t addr;
1.36      pk       3478:
1.37      pk       3479:                len = mp->len;
1.38      pk       3480:                if ((addr = mp->addr) < avail_start) {
1.36      pk       3481:                        /*
1.38      pk       3482:                         * pv_table[] covers everything above `avail_start'.
1.36      pk       3483:                         */
1.38      pk       3484:                        addr = avail_start;
                   3485:                        len -= avail_start;
1.36      pk       3486:                }
1.37      pk       3487:                len = sizeof(struct pvlist) * atop(len);
                   3488:
1.38      pk       3489:                if (addr < avail_start || addr >= avail_end)
1.54      christos 3490:                        panic("pmap_init: unmanaged address: 0x%lx", addr);
1.36      pk       3491:
1.38      pk       3492:                va = (vm_offset_t)&pv_table[atop(addr - avail_start)];
1.37      pk       3493:                sva = trunc_page(va);
1.55      pk       3494:
1.37      pk       3495:                if (sva < eva) {
1.55      pk       3496: #if defined(DEBUG) && !defined(SUN4M)
                   3497:                        /*
                   3498:                         * crowded chunks are normal on SS20s; don't clutter
                   3499:                         * screen with messages
                   3500:                         */
1.66      christos 3501:                        printf("note: crowded chunk at 0x%x\n", mp->addr);
1.38      pk       3502: #endif
1.37      pk       3503:                        sva += PAGE_SIZE;
                   3504:                        if (sva < eva)
1.54      christos 3505:                                panic("pmap_init: sva(%lx) < eva(%lx)",
1.55      pk       3506:                                      sva, eva);
1.37      pk       3507:                }
                   3508:                eva = round_page(va + len);
1.38      pk       3509:                if (pass1) {
                   3510:                        /* Just counting */
                   3511:                        s += eva - sva;
                   3512:                        continue;
                   3513:                }
                   3514:
                   3515:                /* Map this piece of pv_table[] */
1.37      pk       3516:                for (va = sva; va < eva; va += PAGE_SIZE) {
1.42      mycroft  3517:                        pmap_enter(pmap_kernel(), va, pa,
1.36      pk       3518:                                   VM_PROT_READ|VM_PROT_WRITE, 1);
                   3519:                        pa += PAGE_SIZE;
                   3520:                }
1.38      pk       3521:                bzero((caddr_t)sva, eva - sva);
                   3522:        }
1.36      pk       3523:
1.38      pk       3524:        if (pass1) {
1.42      mycroft  3525:                pa = pmap_extract(pmap_kernel(), kmem_alloc(kernel_map, s));
1.38      pk       3526:                pass1 = 0;
                   3527:                goto pass2;
1.36      pk       3528:        }
1.38      pk       3529:
                   3530:        vm_first_phys = avail_start;
                   3531:        vm_num_phys = avail_end - avail_start;
1.36      pk       3532: }
                   3533:
1.1       deraadt  3534:
                   3535: /*
                   3536:  * Map physical addresses into kernel VM.
                   3537:  */
                   3538: vm_offset_t
                   3539: pmap_map(va, pa, endpa, prot)
                   3540:        register vm_offset_t va, pa, endpa;
                   3541:        register int prot;
                   3542: {
                   3543:        register int pgsize = PAGE_SIZE;
                   3544:
                   3545:        while (pa < endpa) {
1.42      mycroft  3546:                pmap_enter(pmap_kernel(), va, pa, prot, 1);
1.1       deraadt  3547:                va += pgsize;
                   3548:                pa += pgsize;
                   3549:        }
                   3550:        return (va);
                   3551: }
                   3552:
                   3553: /*
                   3554:  * Create and return a physical map.
                   3555:  *
                   3556:  * If size is nonzero, the map is useless. (ick)
                   3557:  */
                   3558: struct pmap *
                   3559: pmap_create(size)
                   3560:        vm_size_t size;
                   3561: {
                   3562:        register struct pmap *pm;
                   3563:
                   3564:        if (size)
                   3565:                return (NULL);
                   3566:        pm = (struct pmap *)malloc(sizeof *pm, M_VMPMAP, M_WAITOK);
                   3567: #ifdef DEBUG
                   3568:        if (pmapdebug & PDB_CREATE)
1.66      christos 3569:                printf("pmap_create: created %p\n", pm);
1.1       deraadt  3570: #endif
                   3571:        bzero((caddr_t)pm, sizeof *pm);
                   3572:        pmap_pinit(pm);
                   3573:        return (pm);
                   3574: }
                   3575:
                   3576: /*
                   3577:  * Initialize a preallocated and zeroed pmap structure,
                   3578:  * such as one in a vmspace structure.
                   3579:  */
                   3580: void
                   3581: pmap_pinit(pm)
                   3582:        register struct pmap *pm;
                   3583: {
1.53      christos 3584:        register int size;
1.43      pk       3585:        void *urp;
1.1       deraadt  3586:
                   3587: #ifdef DEBUG
                   3588:        if (pmapdebug & PDB_CREATE)
1.66      christos 3589:                printf("pmap_pinit(%p)\n", pm);
1.1       deraadt  3590: #endif
1.13      pk       3591:
1.43      pk       3592:        size = NUREG * sizeof(struct regmap);
1.55      pk       3593:
1.43      pk       3594:        pm->pm_regstore = urp = malloc(size, M_VMPMAP, M_WAITOK);
1.55      pk       3595:        qzero((caddr_t)urp, size);
1.1       deraadt  3596:        /* pm->pm_ctx = NULL; */
                   3597:        simple_lock_init(&pm->pm_lock);
                   3598:        pm->pm_refcount = 1;
1.43      pk       3599:        pm->pm_regmap = urp;
1.55      pk       3600:
                   3601:        if (CPU_ISSUN4OR4C) {
                   3602:                TAILQ_INIT(&pm->pm_seglist);
1.69      pk       3603: #if defined(SUN4_MMU3L)
1.55      pk       3604:                TAILQ_INIT(&pm->pm_reglist);
1.69      pk       3605:                if (HASSUN4_MMU3L) {
                   3606:                        int i;
                   3607:                        for (i = NUREG; --i >= 0;)
                   3608:                                pm->pm_regmap[i].rg_smeg = reginval;
                   3609:                }
1.43      pk       3610: #endif
1.55      pk       3611:        }
                   3612: #if defined(SUN4M)
                   3613:        else {
                   3614:                /*
                   3615:                 * We must allocate and initialize hardware-readable (MMU)
                   3616:                 * pagetables. We must also map the kernel regions into this
                   3617:                 * pmap's pagetables, so that we can access the kernel from
                   3618:                 * user mode!
                   3619:                 *
                   3620:                 * Note: pm->pm_regmap's have been zeroed already, so we don't
                   3621:                 * need to explicitly mark them as invalid (a null
                   3622:                 * rg_seg_ptps pointer indicates invalid for the 4m)
                   3623:                 */
                   3624:                urp = malloc(SRMMU_L1SIZE * sizeof(int), M_VMPMAP, M_WAITOK);
1.72      pk       3625: #if 0
1.69      pk       3626:                if ((cpuinfo.flags & CPUFLG_CACHEPAGETABLES) == 0)
1.61      pk       3627:                        kvm_uncache(urp,
                   3628:                                    ((SRMMU_L1SIZE*sizeof(int))+NBPG-1)/NBPG);
1.72      pk       3629: #endif
1.55      pk       3630:
                   3631: #ifdef DEBUG
                   3632:                if ((u_int) urp % (SRMMU_L1SIZE * sizeof(int)))
1.61      pk       3633:                        panic("pmap_pinit: malloc() not giving aligned memory");
1.55      pk       3634: #endif
                   3635:                pm->pm_reg_ptps = urp;
                   3636:                pm->pm_reg_ptps_pa = VA2PA(urp);
                   3637:                qzero(urp, SRMMU_L1SIZE * sizeof(int));
                   3638:
                   3639:        }
                   3640: #endif
                   3641:
1.43      pk       3642:        pm->pm_gap_end = VA_VREG(VM_MAXUSER_ADDRESS);
                   3643:
                   3644:        return;
1.1       deraadt  3645: }
                   3646:
                   3647: /*
                   3648:  * Retire the given pmap from service.
                   3649:  * Should only be called if the map contains no valid mappings.
                   3650:  */
                   3651: void
                   3652: pmap_destroy(pm)
                   3653:        register struct pmap *pm;
                   3654: {
                   3655:        int count;
                   3656:
                   3657:        if (pm == NULL)
                   3658:                return;
                   3659: #ifdef DEBUG
                   3660:        if (pmapdebug & PDB_DESTROY)
1.66      christos 3661:                printf("pmap_destroy(%p)\n", pm);
1.1       deraadt  3662: #endif
                   3663:        simple_lock(&pm->pm_lock);
                   3664:        count = --pm->pm_refcount;
                   3665:        simple_unlock(&pm->pm_lock);
                   3666:        if (count == 0) {
                   3667:                pmap_release(pm);
1.49      pk       3668:                free(pm, M_VMPMAP);
1.1       deraadt  3669:        }
                   3670: }
                   3671:
                   3672: /*
                   3673:  * Release any resources held by the given physical map.
                   3674:  * Called when a pmap initialized by pmap_pinit is being released.
                   3675:  */
                   3676: void
                   3677: pmap_release(pm)
                   3678:        register struct pmap *pm;
                   3679: {
                   3680:        register union ctxinfo *c;
                   3681:        register int s = splpmap();     /* paranoia */
                   3682:
                   3683: #ifdef DEBUG
                   3684:        if (pmapdebug & PDB_DESTROY)
1.66      christos 3685:                printf("pmap_release(%p)\n", pm);
1.1       deraadt  3686: #endif
1.55      pk       3687:
                   3688:        if (CPU_ISSUN4OR4C) {
1.69      pk       3689: #if defined(SUN4_MMU3L)
1.55      pk       3690:                if (pm->pm_reglist.tqh_first)
                   3691:                        panic("pmap_release: region list not empty");
1.43      pk       3692: #endif
1.55      pk       3693:                if (pm->pm_seglist.tqh_first)
                   3694:                        panic("pmap_release: segment list not empty");
                   3695:
                   3696:                if ((c = pm->pm_ctx) != NULL) {
                   3697:                        if (pm->pm_ctxnum == 0)
                   3698:                                panic("pmap_release: releasing kernel");
                   3699:                        ctx_free(pm);
                   3700:                }
1.1       deraadt  3701:        }
                   3702:        splx(s);
1.55      pk       3703:
1.43      pk       3704: #ifdef DEBUG
1.55      pk       3705: if (pmapdebug) {
1.43      pk       3706:        int vs, vr;
                   3707:        for (vr = 0; vr < NUREG; vr++) {
                   3708:                struct regmap *rp = &pm->pm_regmap[vr];
                   3709:                if (rp->rg_nsegmap != 0)
1.66      christos 3710:                        printf("pmap_release: %d segments remain in "
1.43      pk       3711:                                "region %d\n", rp->rg_nsegmap, vr);
                   3712:                if (rp->rg_segmap != NULL) {
1.66      christos 3713:                        printf("pmap_release: segments still "
1.43      pk       3714:                                "allocated in region %d\n", vr);
                   3715:                        for (vs = 0; vs < NSEGRG; vs++) {
                   3716:                                struct segmap *sp = &rp->rg_segmap[vs];
                   3717:                                if (sp->sg_npte != 0)
1.66      christos 3718:                                        printf("pmap_release: %d ptes "
1.43      pk       3719:                                             "remain in segment %d\n",
                   3720:                                                sp->sg_npte, vs);
                   3721:                                if (sp->sg_pte != NULL) {
1.66      christos 3722:                                        printf("pmap_release: ptes still "
1.43      pk       3723:                                             "allocated in segment %d\n", vs);
                   3724:                                }
                   3725:                        }
                   3726:                }
                   3727:        }
                   3728: }
                   3729: #endif
                   3730:        if (pm->pm_regstore)
1.49      pk       3731:                free(pm->pm_regstore, M_VMPMAP);
1.55      pk       3732:
                   3733:        if (CPU_ISSUN4M) {
                   3734:                if ((c = pm->pm_ctx) != NULL) {
                   3735:                        if (pm->pm_ctxnum == 0)
                   3736:                                panic("pmap_release: releasing kernel");
                   3737:                        ctx_free(pm);
                   3738:                }
                   3739:                free(pm->pm_reg_ptps, M_VMPMAP);
                   3740:                pm->pm_reg_ptps = NULL;
                   3741:                pm->pm_reg_ptps_pa = 0;
                   3742:        }
1.1       deraadt  3743: }
                   3744:
                   3745: /*
                   3746:  * Add a reference to the given pmap.
                   3747:  */
                   3748: void
                   3749: pmap_reference(pm)
                   3750:        struct pmap *pm;
                   3751: {
                   3752:
                   3753:        if (pm != NULL) {
                   3754:                simple_lock(&pm->pm_lock);
                   3755:                pm->pm_refcount++;
                   3756:                simple_unlock(&pm->pm_lock);
                   3757:        }
                   3758: }
                   3759:
                   3760: /*
                   3761:  * Remove the given range of mapping entries.
                   3762:  * The starting and ending addresses are already rounded to pages.
                   3763:  * Sheer lunacy: pmap_remove is often asked to remove nonexistent
                   3764:  * mappings.
                   3765:  */
                   3766: void
                   3767: pmap_remove(pm, va, endva)
                   3768:        register struct pmap *pm;
                   3769:        register vm_offset_t va, endva;
                   3770: {
                   3771:        register vm_offset_t nva;
1.43      pk       3772:        register int vr, vs, s, ctx;
                   3773:        register void (*rm)(struct pmap *, vm_offset_t, vm_offset_t, int, int);
1.1       deraadt  3774:
                   3775:        if (pm == NULL)
                   3776:                return;
1.13      pk       3777:
1.1       deraadt  3778: #ifdef DEBUG
                   3779:        if (pmapdebug & PDB_REMOVE)
1.66      christos 3780:                printf("pmap_remove(%p, %lx, %lx)\n", pm, va, endva);
1.1       deraadt  3781: #endif
                   3782:
1.42      mycroft  3783:        if (pm == pmap_kernel()) {
1.1       deraadt  3784:                /*
                   3785:                 * Removing from kernel address space.
                   3786:                 */
                   3787:                rm = pmap_rmk;
                   3788:        } else {
                   3789:                /*
                   3790:                 * Removing from user address space.
                   3791:                 */
                   3792:                write_user_windows();
                   3793:                rm = pmap_rmu;
                   3794:        }
                   3795:
                   3796:        ctx = getcontext();
                   3797:        s = splpmap();          /* XXX conservative */
                   3798:        simple_lock(&pm->pm_lock);
                   3799:        for (; va < endva; va = nva) {
                   3800:                /* do one virtual segment at a time */
1.43      pk       3801:                vr = VA_VREG(va);
                   3802:                vs = VA_VSEG(va);
                   3803:                nva = VSTOVA(vr, vs + 1);
1.1       deraadt  3804:                if (nva == 0 || nva > endva)
                   3805:                        nva = endva;
1.43      pk       3806:                (*rm)(pm, va, nva, vr, vs);
1.1       deraadt  3807:        }
                   3808:        simple_unlock(&pm->pm_lock);
                   3809:        splx(s);
                   3810:        setcontext(ctx);
                   3811: }
                   3812:
                   3813: /*
                   3814:  * The following magic number was chosen because:
                   3815:  *     1. It is the same amount of work to cache_flush_page 4 pages
                   3816:  *        as to cache_flush_segment 1 segment (so at 4 the cost of
                   3817:  *        flush is the same).
                   3818:  *     2. Flushing extra pages is bad (causes cache not to work).
                   3819:  *     3. The current code, which malloc()s 5 pages for each process
                   3820:  *        for a user vmspace/pmap, almost never touches all 5 of those
                   3821:  *        pages.
                   3822:  */
1.13      pk       3823: #if 0
                   3824: #define        PMAP_RMK_MAGIC  (cacheinfo.c_hwflush?5:64)      /* if > magic, use cache_flush_segment */
                   3825: #else
1.1       deraadt  3826: #define        PMAP_RMK_MAGIC  5       /* if > magic, use cache_flush_segment */
1.13      pk       3827: #endif
1.1       deraadt  3828:
                   3829: /*
                   3830:  * Remove a range contained within a single segment.
                   3831:  * These are egregiously complicated routines.
                   3832:  */
                   3833:
1.55      pk       3834: #if defined(SUN4) || defined(SUN4C)
                   3835:
1.43      pk       3836: /* remove from kernel */
1.55      pk       3837: /*static*/ void
                   3838: pmap_rmk4_4c(pm, va, endva, vr, vs)
1.1       deraadt  3839:        register struct pmap *pm;
                   3840:        register vm_offset_t va, endva;
1.43      pk       3841:        register int vr, vs;
1.1       deraadt  3842: {
                   3843:        register int i, tpte, perpage, npg;
                   3844:        register struct pvlist *pv;
1.43      pk       3845:        register int nleft, pmeg;
                   3846:        struct regmap *rp;
                   3847:        struct segmap *sp;
                   3848:
                   3849:        rp = &pm->pm_regmap[vr];
                   3850:        sp = &rp->rg_segmap[vs];
                   3851:
                   3852:        if (rp->rg_nsegmap == 0)
                   3853:                return;
                   3854:
                   3855: #ifdef DEBUG
                   3856:        if (rp->rg_segmap == NULL)
                   3857:                panic("pmap_rmk: no segments");
                   3858: #endif
                   3859:
                   3860:        if ((nleft = sp->sg_npte) == 0)
                   3861:                return;
                   3862:
                   3863:        pmeg = sp->sg_pmeg;
1.1       deraadt  3864:
                   3865: #ifdef DEBUG
                   3866:        if (pmeg == seginval)
                   3867:                panic("pmap_rmk: not loaded");
                   3868:        if (pm->pm_ctx == NULL)
                   3869:                panic("pmap_rmk: lost context");
                   3870: #endif
                   3871:
1.71      pk       3872:        setcontext4(0);
1.1       deraadt  3873:        /* decide how to flush cache */
                   3874:        npg = (endva - va) >> PGSHIFT;
                   3875:        if (npg > PMAP_RMK_MAGIC) {
                   3876:                /* flush the whole segment */
                   3877:                perpage = 0;
1.69      pk       3878:                cache_flush_segment(vr, vs);
1.1       deraadt  3879:        } else {
                   3880:                /* flush each page individually; some never need flushing */
1.69      pk       3881:                perpage = (CACHEINFO.c_vactype != VAC_NONE);
1.1       deraadt  3882:        }
                   3883:        while (va < endva) {
1.55      pk       3884:                tpte = getpte4(va);
1.1       deraadt  3885:                if ((tpte & PG_V) == 0) {
1.63      pk       3886:                        va += NBPG;
1.1       deraadt  3887:                        continue;
                   3888:                }
1.35      pk       3889:                if ((tpte & PG_TYPE) == PG_OBMEM) {
                   3890:                        /* if cacheable, flush page as needed */
                   3891:                        if (perpage && (tpte & PG_NC) == 0)
1.1       deraadt  3892:                                cache_flush_page(va);
1.60      pk       3893:                        i = ptoa(tpte & PG_PFNUM);
1.1       deraadt  3894:                        if (managed(i)) {
                   3895:                                pv = pvhead(i);
1.55      pk       3896:                                pv->pv_flags |= MR4_4C(tpte);
1.58      pk       3897:                                pv_unlink4_4c(pv, pm, va);
1.1       deraadt  3898:                        }
                   3899:                }
                   3900:                nleft--;
1.55      pk       3901:                setpte4(va, 0);
1.1       deraadt  3902:                va += NBPG;
                   3903:        }
                   3904:
                   3905:        /*
                   3906:         * If the segment is all gone, remove it from everyone and
                   3907:         * free the MMU entry.
                   3908:         */
1.43      pk       3909:        if ((sp->sg_npte = nleft) == 0) {
                   3910:                va = VSTOVA(vr,vs);             /* retract */
1.69      pk       3911: #if defined(SUN4_MMU3L)
                   3912:                if (HASSUN4_MMU3L)
1.1       deraadt  3913:                        setsegmap(va, seginval);
1.43      pk       3914:                else
                   3915: #endif
                   3916:                        for (i = ncontext; --i >= 0;) {
1.71      pk       3917:                                setcontext4(i);
1.43      pk       3918:                                setsegmap(va, seginval);
                   3919:                        }
                   3920:                me_free(pm, pmeg);
                   3921:                if (--rp->rg_nsegmap == 0) {
1.69      pk       3922: #if defined(SUN4_MMU3L)
                   3923:                        if (HASSUN4_MMU3L) {
1.43      pk       3924:                                for (i = ncontext; --i >= 0;) {
1.71      pk       3925:                                        setcontext4(i);
1.43      pk       3926:                                        setregmap(va, reginval);
                   3927:                                }
                   3928:                                /* note: context is 0 */
                   3929:                                region_free(pm, rp->rg_smeg);
                   3930:                        }
                   3931: #endif
1.1       deraadt  3932:                }
                   3933:        }
                   3934: }
                   3935:
1.55      pk       3936: #endif /* sun4, sun4c */
1.1       deraadt  3937:
1.55      pk       3938: #if defined(SUN4M)             /* 4M version of pmap_rmk */
                   3939: /* remove from kernel (4m)*/
                   3940: /*static*/ void
                   3941: pmap_rmk4m(pm, va, endva, vr, vs)
1.1       deraadt  3942:        register struct pmap *pm;
                   3943:        register vm_offset_t va, endva;
1.43      pk       3944:        register int vr, vs;
1.1       deraadt  3945: {
1.55      pk       3946:        register int i, tpte, perpage, npg;
1.1       deraadt  3947:        register struct pvlist *pv;
1.55      pk       3948:        register int nleft;
1.43      pk       3949:        struct regmap *rp;
                   3950:        struct segmap *sp;
                   3951:
                   3952:        rp = &pm->pm_regmap[vr];
1.55      pk       3953:        sp = &rp->rg_segmap[vs];
                   3954:
1.43      pk       3955:        if (rp->rg_nsegmap == 0)
                   3956:                return;
1.55      pk       3957:
                   3958: #ifdef DEBUG
1.43      pk       3959:        if (rp->rg_segmap == NULL)
1.55      pk       3960:                panic("pmap_rmk: no segments");
                   3961: #endif
1.43      pk       3962:
                   3963:        if ((nleft = sp->sg_npte) == 0)
                   3964:                return;
                   3965:
1.55      pk       3966: #ifdef DEBUG
                   3967:        if (sp->sg_pte == NULL || rp->rg_seg_ptps == NULL)
                   3968:                panic("pmap_rmk: segment/region does not exist");
                   3969:        if (pm->pm_ctx == NULL)
                   3970:                panic("pmap_rmk: lost context");
                   3971: #endif
1.43      pk       3972:
1.71      pk       3973:        setcontext4m(0);
1.55      pk       3974:        /* decide how to flush cache */
                   3975:        npg = (endva - va) >> PGSHIFT;
                   3976:        if (npg > PMAP_RMK_MAGIC) {
                   3977:                /* flush the whole segment */
                   3978:                perpage = 0;
1.69      pk       3979:                if (CACHEINFO.c_vactype != VAC_NONE)
1.55      pk       3980:                        cache_flush_segment(vr, vs);
                   3981:        } else {
                   3982:                /* flush each page individually; some never need flushing */
1.69      pk       3983:                perpage = (CACHEINFO.c_vactype != VAC_NONE);
1.55      pk       3984:        }
                   3985:        while (va < endva) {
1.72      pk       3986:                tpte = sp->sg_pte[VA_SUN4M_VPG(va)];
1.55      pk       3987:                if ((tpte & SRMMU_TETYPE) != SRMMU_TEPTE) {
1.72      pk       3988: #ifdef DEBUG
                   3989:                        if ((pmapdebug & PDB_SANITYCHK) &&
                   3990:                            (getpte4m(va) & SRMMU_TETYPE) == SRMMU_TEPTE)
                   3991:                                panic("Spurious kTLB entry for %lx", va);
                   3992: #endif
1.61      pk       3993:                        va += NBPG;
1.55      pk       3994:                        continue;
                   3995:                }
                   3996:                if ((tpte & SRMMU_PGTYPE) == PG_SUN4M_OBMEM) {
                   3997:                        /* if cacheable, flush page as needed */
                   3998:                        if (perpage && (tpte & SRMMU_PG_C))
1.69      pk       3999:                                cache_flush_page(va);
1.60      pk       4000:                        i = ptoa((tpte & SRMMU_PPNMASK) >> SRMMU_PPNSHIFT);
1.55      pk       4001:                        if (managed(i)) {
                   4002:                                pv = pvhead(i);
                   4003:                                pv->pv_flags |= MR4M(tpte);
1.58      pk       4004:                                pv_unlink4m(pv, pm, va);
1.55      pk       4005:                        }
                   4006:                }
                   4007:                nleft--;
1.72      pk       4008:                tlb_flush_page(va);
                   4009:                setpgt4m(&sp->sg_pte[VA_SUN4M_VPG(va)], SRMMU_TEINVALID);
1.55      pk       4010:                va += NBPG;
                   4011:        }
                   4012:
                   4013:        /*
                   4014:         * If the segment is all gone, remove it from everyone and
                   4015:         * flush the TLB.
                   4016:         */
                   4017:        if ((sp->sg_npte = nleft) == 0) {
                   4018:                va = VSTOVA(vr,vs);             /* retract */
                   4019:
                   4020:                tlb_flush_segment(vr, vs);      /* Paranoia? */
                   4021:
1.58      pk       4022:                /*
                   4023:                 * We need to free the segment table. The problem is that
1.55      pk       4024:                 * we can't free the initial (bootstrap) mapping, so
                   4025:                 * we have to explicitly check for this case (ugh).
                   4026:                 */
                   4027:                if (va < virtual_avail) {
                   4028: #ifdef DEBUG
1.66      christos 4029:                        printf("pmap_rmk4m: attempt to free base kernel alloc\n");
1.55      pk       4030: #endif
                   4031:                        /* sp->sg_pte = NULL; */
                   4032:                        sp->sg_npte = 0;
                   4033:                        return;
                   4034:                }
                   4035:                /* no need to free the table; it is statically allocated */
                   4036:                qzero(sp->sg_pte, SRMMU_L3SIZE * sizeof(long));
                   4037:        }
                   4038:        /* if we're done with a region, leave it wired */
                   4039: }
                   4040: #endif /* sun4m */
                   4041: /*
                   4042:  * Just like pmap_rmk_magic, but we have a different threshold.
                   4043:  * Note that this may well deserve further tuning work.
                   4044:  */
                   4045: #if 0
                   4046: #define        PMAP_RMU_MAGIC  (cacheinfo.c_hwflush?4:64)      /* if > magic, use cache_flush_segment */
                   4047: #else
                   4048: #define        PMAP_RMU_MAGIC  4       /* if > magic, use cache_flush_segment */
                   4049: #endif
                   4050:
                   4051: #if defined(SUN4) || defined(SUN4C)
                   4052:
                   4053: /* remove from user */
                   4054: /*static*/ void
                   4055: pmap_rmu4_4c(pm, va, endva, vr, vs)
                   4056:        register struct pmap *pm;
                   4057:        register vm_offset_t va, endva;
                   4058:        register int vr, vs;
                   4059: {
                   4060:        register int *pte0, i, pteva, tpte, perpage, npg;
                   4061:        register struct pvlist *pv;
                   4062:        register int nleft, pmeg;
                   4063:        struct regmap *rp;
                   4064:        struct segmap *sp;
                   4065:
                   4066:        rp = &pm->pm_regmap[vr];
                   4067:        if (rp->rg_nsegmap == 0)
                   4068:                return;
                   4069:        if (rp->rg_segmap == NULL)
                   4070:                panic("pmap_rmu: no segments");
                   4071:
                   4072:        sp = &rp->rg_segmap[vs];
                   4073:        if ((nleft = sp->sg_npte) == 0)
                   4074:                return;
                   4075:        if (sp->sg_pte == NULL)
                   4076:                panic("pmap_rmu: no pages");
                   4077:
                   4078:
                   4079:        pmeg = sp->sg_pmeg;
                   4080:        pte0 = sp->sg_pte;
1.1       deraadt  4081:
                   4082:        if (pmeg == seginval) {
                   4083:                register int *pte = pte0 + VA_VPG(va);
                   4084:
                   4085:                /*
                   4086:                 * PTEs are not in MMU.  Just invalidate software copies.
                   4087:                 */
1.63      pk       4088:                for (; va < endva; pte++, va += NBPG) {
1.1       deraadt  4089:                        tpte = *pte;
                   4090:                        if ((tpte & PG_V) == 0) {
                   4091:                                /* nothing to remove (braindead VM layer) */
                   4092:                                continue;
                   4093:                        }
                   4094:                        if ((tpte & PG_TYPE) == PG_OBMEM) {
1.60      pk       4095:                                i = ptoa(tpte & PG_PFNUM);
1.21      deraadt  4096:                                if (managed(i))
1.58      pk       4097:                                        pv_unlink4_4c(pvhead(i), pm, va);
1.1       deraadt  4098:                        }
                   4099:                        nleft--;
                   4100:                        *pte = 0;
                   4101:                }
1.43      pk       4102:                if ((sp->sg_npte = nleft) == 0) {
1.49      pk       4103:                        free(pte0, M_VMPMAP);
1.43      pk       4104:                        sp->sg_pte = NULL;
                   4105:                        if (--rp->rg_nsegmap == 0) {
1.49      pk       4106:                                free(rp->rg_segmap, M_VMPMAP);
1.43      pk       4107:                                rp->rg_segmap = NULL;
1.69      pk       4108: #if defined(SUN4_MMU3L)
                   4109:                                if (HASSUN4_MMU3L && rp->rg_smeg != reginval) {
1.43      pk       4110:                                        if (pm->pm_ctx) {
1.71      pk       4111:                                                setcontext4(pm->pm_ctxnum);
1.43      pk       4112:                                                setregmap(va, reginval);
                   4113:                                        } else
1.71      pk       4114:                                                setcontext4(0);
1.43      pk       4115:                                        region_free(pm, rp->rg_smeg);
                   4116:                                }
                   4117: #endif
                   4118:                        }
1.1       deraadt  4119:                }
1.43      pk       4120:                return;
1.1       deraadt  4121:        }
                   4122:
                   4123:        /*
                   4124:         * PTEs are in MMU.  Invalidate in hardware, update ref &
                   4125:         * mod bits, and flush cache if required.
                   4126:         */
1.43      pk       4127:        if (CTX_USABLE(pm,rp)) {
1.1       deraadt  4128:                /* process has a context, must flush cache */
                   4129:                npg = (endva - va) >> PGSHIFT;
1.71      pk       4130:                setcontext4(pm->pm_ctxnum);
1.1       deraadt  4131:                if (npg > PMAP_RMU_MAGIC) {
                   4132:                        perpage = 0; /* flush the whole segment */
1.69      pk       4133:                        cache_flush_segment(vr, vs);
1.1       deraadt  4134:                } else
1.69      pk       4135:                        perpage = (CACHEINFO.c_vactype != VAC_NONE);
1.1       deraadt  4136:                pteva = va;
                   4137:        } else {
                   4138:                /* no context, use context 0; cache flush unnecessary */
1.71      pk       4139:                setcontext4(0);
1.69      pk       4140:                if (HASSUN4_MMU3L)
1.43      pk       4141:                        setregmap(0, tregion);
1.1       deraadt  4142:                /* XXX use per-cpu pteva? */
                   4143:                setsegmap(0, pmeg);
1.18      deraadt  4144:                pteva = VA_VPG(va) << PGSHIFT;
1.1       deraadt  4145:                perpage = 0;
                   4146:        }
1.63      pk       4147:        for (; va < endva; pteva += NBPG, va += NBPG) {
1.55      pk       4148:                tpte = getpte4(pteva);
1.1       deraadt  4149:                if ((tpte & PG_V) == 0)
                   4150:                        continue;
1.35      pk       4151:                if ((tpte & PG_TYPE) == PG_OBMEM) {
                   4152:                        /* if cacheable, flush page as needed */
                   4153:                        if (perpage && (tpte & PG_NC) == 0)
1.1       deraadt  4154:                                cache_flush_page(va);
1.60      pk       4155:                        i = ptoa(tpte & PG_PFNUM);
1.1       deraadt  4156:                        if (managed(i)) {
                   4157:                                pv = pvhead(i);
1.55      pk       4158:                                pv->pv_flags |= MR4_4C(tpte);
1.58      pk       4159:                                pv_unlink4_4c(pv, pm, va);
1.1       deraadt  4160:                        }
                   4161:                }
                   4162:                nleft--;
1.55      pk       4163:                setpte4(pteva, 0);
1.43      pk       4164: #define PMAP_PTESYNC
                   4165: #ifdef PMAP_PTESYNC
                   4166:                pte0[VA_VPG(pteva)] = 0;
                   4167: #endif
1.1       deraadt  4168:        }
                   4169:
                   4170:        /*
                   4171:         * If the segment is all gone, and the context is loaded, give
                   4172:         * the segment back.
                   4173:         */
1.43      pk       4174:        if ((sp->sg_npte = nleft) == 0 /* ??? && pm->pm_ctx != NULL*/) {
                   4175: #ifdef DEBUG
                   4176: if (pm->pm_ctx == NULL) {
1.66      christos 4177:        printf("pmap_rmu: no context here...");
1.43      pk       4178: }
                   4179: #endif
                   4180:                va = VSTOVA(vr,vs);             /* retract */
                   4181:                if (CTX_USABLE(pm,rp))
                   4182:                        setsegmap(va, seginval);
1.69      pk       4183:                else if (HASSUN4_MMU3L && rp->rg_smeg != reginval) {
1.43      pk       4184:                        /* note: context already set earlier */
                   4185:                        setregmap(0, rp->rg_smeg);
                   4186:                        setsegmap(vs << SGSHIFT, seginval);
                   4187:                }
1.49      pk       4188:                free(pte0, M_VMPMAP);
1.43      pk       4189:                sp->sg_pte = NULL;
1.1       deraadt  4190:                me_free(pm, pmeg);
1.13      pk       4191:
1.43      pk       4192:                if (--rp->rg_nsegmap == 0) {
1.49      pk       4193:                        free(rp->rg_segmap, M_VMPMAP);
1.43      pk       4194:                        rp->rg_segmap = NULL;
                   4195:                        GAP_WIDEN(pm,vr);
                   4196:
1.69      pk       4197: #if defined(SUN4_MMU3L)
                   4198:                        if (HASSUN4_MMU3L && rp->rg_smeg != reginval) {
1.43      pk       4199:                                /* note: context already set */
                   4200:                                if (pm->pm_ctx)
                   4201:                                        setregmap(va, reginval);
                   4202:                                region_free(pm, rp->rg_smeg);
                   4203:                        }
                   4204: #endif
                   4205:                }
1.13      pk       4206:
1.1       deraadt  4207:        }
                   4208: }
                   4209:
1.55      pk       4210: #endif /* sun4,4c */
                   4211:
                   4212: #if defined(SUN4M)             /* 4M version of pmap_rmu */
                   4213: /* remove from user */
                   4214: /*static*/ void
                   4215: pmap_rmu4m(pm, va, endva, vr, vs)
                   4216:        register struct pmap *pm;
                   4217:        register vm_offset_t va, endva;
                   4218:        register int vr, vs;
                   4219: {
1.72      pk       4220:        register int *pte0, i, perpage, npg;
1.55      pk       4221:        register struct pvlist *pv;
                   4222:        register int nleft;
                   4223:        struct regmap *rp;
                   4224:        struct segmap *sp;
                   4225:
                   4226:        rp = &pm->pm_regmap[vr];
                   4227:        if (rp->rg_nsegmap == 0)
                   4228:                return;
                   4229:        if (rp->rg_segmap == NULL)
                   4230:                panic("pmap_rmu: no segments");
                   4231:
                   4232:        sp = &rp->rg_segmap[vs];
                   4233:        if ((nleft = sp->sg_npte) == 0)
                   4234:                return;
                   4235:        if (sp->sg_pte == NULL)
                   4236:                panic("pmap_rmu: no pages");
                   4237:
                   4238:        pte0 = sp->sg_pte;
                   4239:
                   4240:        /*
                   4241:         * Invalidate PTE in MMU pagetables. Flush cache if necessary.
                   4242:         */
1.72      pk       4243:        if (pm->pm_ctx) {
1.55      pk       4244:                /* process has a context, must flush cache */
1.71      pk       4245:                setcontext4m(pm->pm_ctxnum);
1.69      pk       4246:                if (CACHEINFO.c_vactype != VAC_NONE) {
1.63      pk       4247:                        npg = (endva - va) >> PGSHIFT;
                   4248:                        if (npg > PMAP_RMU_MAGIC) {
                   4249:                                perpage = 0; /* flush the whole segment */
1.55      pk       4250:                                cache_flush_segment(vr, vs);
1.63      pk       4251:                        } else
                   4252:                                perpage = 1;
1.55      pk       4253:                } else
1.63      pk       4254:                        perpage = 0;
1.55      pk       4255:        } else {
                   4256:                /* no context; cache flush unnecessary */
                   4257:                perpage = 0;
                   4258:        }
1.63      pk       4259:        for (; va < endva; va += NBPG) {
1.72      pk       4260:
                   4261:                int tpte = pte0[VA_SUN4M_VPG(va)];
                   4262:
                   4263:                if ((tpte & SRMMU_TETYPE) != SRMMU_TEPTE) {
                   4264: #ifdef DEBUG
                   4265:                        if ((pmapdebug & PDB_SANITYCHK) &&
                   4266:                            pm->pm_ctx &&
                   4267:                            (getpte4m(va) & SRMMU_TEPTE) == SRMMU_TEPTE)
                   4268:                                panic("Spurious uTLB entry for %lx", va);
                   4269: #endif
1.55      pk       4270:                        continue;
1.72      pk       4271:                }
                   4272:
1.55      pk       4273:                if ((tpte & SRMMU_PGTYPE) == PG_SUN4M_OBMEM) {
                   4274:                        /* if cacheable, flush page as needed */
                   4275:                        if (perpage && (tpte & SRMMU_PG_C))
1.60      pk       4276:                                cache_flush_page(va);
                   4277:                        i = ptoa((tpte & SRMMU_PPNMASK) >> SRMMU_PPNSHIFT);
1.55      pk       4278:                        if (managed(i)) {
                   4279:                                pv = pvhead(i);
                   4280:                                pv->pv_flags |= MR4M(tpte);
1.58      pk       4281:                                pv_unlink4m(pv, pm, va);
1.55      pk       4282:                        }
                   4283:                }
                   4284:                nleft--;
1.72      pk       4285:                if (pm->pm_ctx)
                   4286:                        tlb_flush_page(va);
                   4287:                setpgt4m(&pte0[VA_SUN4M_VPG(va)], SRMMU_TEINVALID);
1.55      pk       4288:        }
                   4289:
                   4290:        /*
                   4291:         * If the segment is all gone, and the context is loaded, give
                   4292:         * the segment back.
                   4293:         */
1.72      pk       4294:        if ((sp->sg_npte = nleft) == 0) {
1.55      pk       4295: #ifdef DEBUG
                   4296:                if (pm->pm_ctx == NULL) {
1.66      christos 4297:                        printf("pmap_rmu: no context here...");
1.55      pk       4298:                }
                   4299: #endif
                   4300:                va = VSTOVA(vr,vs);             /* retract */
                   4301:
                   4302:                tlb_flush_segment(vr, vs);      /* Paranoia? */
1.73      pk       4303:                setpgt4m(&rp->rg_seg_ptps[vs], SRMMU_TEINVALID);
1.55      pk       4304:                free(pte0, M_VMPMAP);
                   4305:                sp->sg_pte = NULL;
                   4306:
                   4307:                if (--rp->rg_nsegmap == 0) {
                   4308:                        free(rp->rg_segmap, M_VMPMAP);
                   4309:                        rp->rg_segmap = NULL;
                   4310:                        free(rp->rg_seg_ptps, M_VMPMAP);
1.73      pk       4311:                        setpgt4m(&pm->pm_reg_ptps[vr], SRMMU_TEINVALID);
1.55      pk       4312:                }
                   4313:        }
                   4314: }
                   4315: #endif /* sun4m */
                   4316:
1.1       deraadt  4317: /*
                   4318:  * Lower (make more strict) the protection on the specified
                   4319:  * physical page.
                   4320:  *
                   4321:  * There are only two cases: either the protection is going to 0
                   4322:  * (in which case we do the dirty work here), or it is going from
                   4323:  * to read-only (in which case pv_changepte does the trick).
                   4324:  */
1.55      pk       4325:
                   4326: #if defined(SUN4) || defined(SUN4C)
1.1       deraadt  4327: void
1.55      pk       4328: pmap_page_protect4_4c(pa, prot)
1.1       deraadt  4329:        vm_offset_t pa;
                   4330:        vm_prot_t prot;
                   4331: {
                   4332:        register struct pvlist *pv, *pv0, *npv;
                   4333:        register struct pmap *pm;
1.43      pk       4334:        register int va, vr, vs, pteva, tpte;
1.53      christos 4335:        register int flags, nleft, i, s, ctx;
1.43      pk       4336:        struct regmap *rp;
                   4337:        struct segmap *sp;
1.1       deraadt  4338:
                   4339: #ifdef DEBUG
1.43      pk       4340:        if (!pmap_pa_exists(pa))
1.54      christos 4341:                panic("pmap_page_protect: no such address: %lx", pa);
1.1       deraadt  4342:        if ((pmapdebug & PDB_CHANGEPROT) ||
                   4343:            (pmapdebug & PDB_REMOVE && prot == VM_PROT_NONE))
1.66      christos 4344:                printf("pmap_page_protect(%lx, %x)\n", pa, prot);
1.1       deraadt  4345: #endif
                   4346:        /*
                   4347:         * Skip unmanaged pages, or operations that do not take
                   4348:         * away write permission.
                   4349:         */
1.34      pk       4350:        if ((pa & (PMAP_TNC & ~PMAP_NC)) ||
                   4351:             !managed(pa) || prot & VM_PROT_WRITE)
1.1       deraadt  4352:                return;
                   4353:        write_user_windows();   /* paranoia */
                   4354:        if (prot & VM_PROT_READ) {
1.58      pk       4355:                pv_changepte4_4c(pvhead(pa), 0, PG_W);
1.1       deraadt  4356:                return;
                   4357:        }
                   4358:
                   4359:        /*
                   4360:         * Remove all access to all people talking to this page.
                   4361:         * Walk down PV list, removing all mappings.
                   4362:         * The logic is much like that for pmap_remove,
                   4363:         * but we know we are removing exactly one page.
                   4364:         */
                   4365:        pv = pvhead(pa);
                   4366:        s = splpmap();
                   4367:        if ((pm = pv->pv_pmap) == NULL) {
                   4368:                splx(s);
                   4369:                return;
                   4370:        }
1.71      pk       4371:        ctx = getcontext4();
1.1       deraadt  4372:        pv0 = pv;
                   4373:        flags = pv->pv_flags & ~PV_NC;
                   4374:        for (;; pm = pv->pv_pmap) {
                   4375:                va = pv->pv_va;
1.43      pk       4376:                vr = VA_VREG(va);
                   4377:                vs = VA_VSEG(va);
                   4378:                rp = &pm->pm_regmap[vr];
                   4379:                if (rp->rg_nsegmap == 0)
                   4380:                        panic("pmap_remove_all: empty vreg");
                   4381:                sp = &rp->rg_segmap[vs];
                   4382:                if ((nleft = sp->sg_npte) == 0)
1.1       deraadt  4383:                        panic("pmap_remove_all: empty vseg");
                   4384:                nleft--;
1.43      pk       4385:                sp->sg_npte = nleft;
                   4386:
                   4387:                if (sp->sg_pmeg == seginval) {
                   4388:                        /* Definitely not a kernel map */
1.1       deraadt  4389:                        if (nleft) {
1.43      pk       4390:                                sp->sg_pte[VA_VPG(va)] = 0;
1.1       deraadt  4391:                        } else {
1.49      pk       4392:                                free(sp->sg_pte, M_VMPMAP);
1.43      pk       4393:                                sp->sg_pte = NULL;
                   4394:                                if (--rp->rg_nsegmap == 0) {
1.49      pk       4395:                                        free(rp->rg_segmap, M_VMPMAP);
1.43      pk       4396:                                        rp->rg_segmap = NULL;
                   4397:                                        GAP_WIDEN(pm,vr);
1.69      pk       4398: #if defined(SUN4_MMU3L)
                   4399:                                        if (HASSUN4_MMU3L && rp->rg_smeg != reginval) {
1.43      pk       4400:                                                if (pm->pm_ctx) {
1.71      pk       4401:                                                        setcontext4(pm->pm_ctxnum);
1.43      pk       4402:                                                        setregmap(va, reginval);
                   4403:                                                } else
1.71      pk       4404:                                                        setcontext4(0);
1.43      pk       4405:                                                region_free(pm, rp->rg_smeg);
                   4406:                                        }
                   4407: #endif
                   4408:                                }
1.1       deraadt  4409:                        }
                   4410:                        goto nextpv;
                   4411:                }
1.43      pk       4412:                if (CTX_USABLE(pm,rp)) {
1.71      pk       4413:                        setcontext4(pm->pm_ctxnum);
1.1       deraadt  4414:                        pteva = va;
1.69      pk       4415:                        cache_flush_page(va);
1.1       deraadt  4416:                } else {
1.71      pk       4417:                        setcontext4(0);
1.1       deraadt  4418:                        /* XXX use per-cpu pteva? */
1.69      pk       4419:                        if (HASSUN4_MMU3L)
1.43      pk       4420:                                setregmap(0, tregion);
                   4421:                        setsegmap(0, sp->sg_pmeg);
1.18      deraadt  4422:                        pteva = VA_VPG(va) << PGSHIFT;
1.1       deraadt  4423:                }
1.43      pk       4424:
1.55      pk       4425:                tpte = getpte4(pteva);
1.43      pk       4426:                if ((tpte & PG_V) == 0)
                   4427:                        panic("pmap_page_protect !PG_V");
1.55      pk       4428:                flags |= MR4_4C(tpte);
1.43      pk       4429:
1.1       deraadt  4430:                if (nleft) {
1.55      pk       4431:                        setpte4(pteva, 0);
1.43      pk       4432: #ifdef PMAP_PTESYNC
1.44      pk       4433:                        if (sp->sg_pte != NULL)
                   4434:                                sp->sg_pte[VA_VPG(pteva)] = 0;
1.43      pk       4435: #endif
1.1       deraadt  4436:                } else {
1.43      pk       4437:                        if (pm == pmap_kernel()) {
1.69      pk       4438: #if defined(SUN4_MMU3L)
                   4439:                                if (!HASSUN4_MMU3L)
1.43      pk       4440: #endif
                   4441:                                        for (i = ncontext; --i >= 0;) {
1.71      pk       4442:                                                setcontext4(i);
1.1       deraadt  4443:                                                setsegmap(va, seginval);
                   4444:                                        }
1.43      pk       4445:                                me_free(pm, sp->sg_pmeg);
                   4446:                                if (--rp->rg_nsegmap == 0) {
1.69      pk       4447: #if defined(SUN4_MMU3L)
                   4448:                                        if (HASSUN4_MMU3L) {
1.43      pk       4449:                                                for (i = ncontext; --i >= 0;) {
1.71      pk       4450:                                                        setcontext4(i);
1.43      pk       4451:                                                        setregmap(va, reginval);
                   4452:                                                }
                   4453:                                                region_free(pm, rp->rg_smeg);
                   4454:                                        }
                   4455: #endif
                   4456:                                }
                   4457:                        } else {
                   4458:                                if (CTX_USABLE(pm,rp))
                   4459:                                        /* `pteva'; we might be using tregion */
                   4460:                                        setsegmap(pteva, seginval);
1.69      pk       4461: #if defined(SUN4_MMU3L)
1.72      pk       4462:                                else if (HASSUN4_MMU3L &&
                   4463:                                         rp->rg_smeg != reginval) {
1.43      pk       4464:                                        /* note: context already set earlier */
                   4465:                                        setregmap(0, rp->rg_smeg);
                   4466:                                        setsegmap(vs << SGSHIFT, seginval);
                   4467:                                }
                   4468: #endif
1.49      pk       4469:                                free(sp->sg_pte, M_VMPMAP);
1.43      pk       4470:                                sp->sg_pte = NULL;
                   4471:                                me_free(pm, sp->sg_pmeg);
                   4472:
                   4473:                                if (--rp->rg_nsegmap == 0) {
1.69      pk       4474: #if defined(SUN4_MMU3L)
1.72      pk       4475:                                        if (HASSUN4_MMU3L &&
                   4476:                                            rp->rg_smeg != reginval) {
1.43      pk       4477:                                                if (pm->pm_ctx)
                   4478:                                                        setregmap(va, reginval);
                   4479:                                                region_free(pm, rp->rg_smeg);
                   4480:                                        }
                   4481: #endif
1.49      pk       4482:                                        free(rp->rg_segmap, M_VMPMAP);
1.43      pk       4483:                                        rp->rg_segmap = NULL;
                   4484:                                        GAP_WIDEN(pm,vr);
1.1       deraadt  4485:                                }
                   4486:                        }
                   4487:                }
                   4488:        nextpv:
                   4489:                npv = pv->pv_next;
                   4490:                if (pv != pv0)
1.49      pk       4491:                        free(pv, M_VMPVENT);
1.1       deraadt  4492:                if ((pv = npv) == NULL)
                   4493:                        break;
                   4494:        }
                   4495:        pv0->pv_pmap = NULL;
1.11      pk       4496:        pv0->pv_next = NULL; /* ? */
1.1       deraadt  4497:        pv0->pv_flags = flags;
1.71      pk       4498:        setcontext4(ctx);
1.1       deraadt  4499:        splx(s);
                   4500: }
                   4501:
                   4502: /*
                   4503:  * Lower (make more strict) the protection on the specified
                   4504:  * range of this pmap.
                   4505:  *
                   4506:  * There are only two cases: either the protection is going to 0
                   4507:  * (in which case we call pmap_remove to do the dirty work), or
                   4508:  * it is going from read/write to read-only.  The latter is
                   4509:  * fairly easy.
                   4510:  */
                   4511: void
1.55      pk       4512: pmap_protect4_4c(pm, sva, eva, prot)
1.1       deraadt  4513:        register struct pmap *pm;
                   4514:        vm_offset_t sva, eva;
                   4515:        vm_prot_t prot;
                   4516: {
1.53      christos 4517:        register int va, nva, vr, vs;
1.1       deraadt  4518:        register int s, ctx;
1.43      pk       4519:        struct regmap *rp;
                   4520:        struct segmap *sp;
1.1       deraadt  4521:
                   4522:        if (pm == NULL || prot & VM_PROT_WRITE)
                   4523:                return;
1.43      pk       4524:
1.1       deraadt  4525:        if ((prot & VM_PROT_READ) == 0) {
                   4526:                pmap_remove(pm, sva, eva);
                   4527:                return;
                   4528:        }
                   4529:
                   4530:        write_user_windows();
1.71      pk       4531:        ctx = getcontext4();
1.1       deraadt  4532:        s = splpmap();
                   4533:        simple_lock(&pm->pm_lock);
                   4534:
                   4535:        for (va = sva; va < eva;) {
1.43      pk       4536:                vr = VA_VREG(va);
                   4537:                vs = VA_VSEG(va);
                   4538:                rp = &pm->pm_regmap[vr];
                   4539:                nva = VSTOVA(vr,vs + 1);
1.1       deraadt  4540: if (nva == 0) panic("pmap_protect: last segment");     /* cannot happen */
                   4541:                if (nva > eva)
                   4542:                        nva = eva;
1.43      pk       4543:                if (rp->rg_nsegmap == 0) {
1.1       deraadt  4544:                        va = nva;
                   4545:                        continue;
                   4546:                }
1.43      pk       4547: #ifdef DEBUG
                   4548:                if (rp->rg_segmap == NULL)
                   4549:                        panic("pmap_protect: no segments");
                   4550: #endif
                   4551:                sp = &rp->rg_segmap[vs];
                   4552:                if (sp->sg_npte == 0) {
                   4553:                        va = nva;
                   4554:                        continue;
                   4555:                }
                   4556: #ifdef DEBUG
                   4557:                if (pm != pmap_kernel() && sp->sg_pte == NULL)
                   4558:                        panic("pmap_protect: no pages");
                   4559: #endif
                   4560:                if (sp->sg_pmeg == seginval) {
                   4561:                        register int *pte = &sp->sg_pte[VA_VPG(va)];
1.1       deraadt  4562:
                   4563:                        /* not in MMU; just clear PG_W from core copies */
                   4564:                        for (; va < nva; va += NBPG)
                   4565:                                *pte++ &= ~PG_W;
                   4566:                } else {
                   4567:                        /* in MMU: take away write bits from MMU PTEs */
1.43      pk       4568:                        if (CTX_USABLE(pm,rp)) {
1.1       deraadt  4569:                                register int tpte;
                   4570:
                   4571:                                /*
                   4572:                                 * Flush cache so that any existing cache
                   4573:                                 * tags are updated.  This is really only
                   4574:                                 * needed for PTEs that lose PG_W.
                   4575:                                 */
1.71      pk       4576:                                setcontext4(pm->pm_ctxnum);
1.1       deraadt  4577:                                for (; va < nva; va += NBPG) {
1.55      pk       4578:                                        tpte = getpte4(va);
1.1       deraadt  4579:                                        pmap_stats.ps_npg_prot_all++;
1.35      pk       4580:                                        if ((tpte & (PG_W|PG_TYPE)) ==
                   4581:                                            (PG_W|PG_OBMEM)) {
1.1       deraadt  4582:                                                pmap_stats.ps_npg_prot_actual++;
1.69      pk       4583:                                                cache_flush_page(va);
1.55      pk       4584:                                                setpte4(va, tpte & ~PG_W);
1.1       deraadt  4585:                                        }
                   4586:                                }
                   4587:                        } else {
                   4588:                                register int pteva;
                   4589:
                   4590:                                /*
                   4591:                                 * No context, hence not cached;
                   4592:                                 * just update PTEs.
                   4593:                                 */
1.71      pk       4594:                                setcontext4(0);
1.1       deraadt  4595:                                /* XXX use per-cpu pteva? */
1.69      pk       4596:                                if (HASSUN4_MMU3L)
1.43      pk       4597:                                        setregmap(0, tregion);
                   4598:                                setsegmap(0, sp->sg_pmeg);
1.18      deraadt  4599:                                pteva = VA_VPG(va) << PGSHIFT;
1.1       deraadt  4600:                                for (; va < nva; pteva += NBPG, va += NBPG)
1.55      pk       4601:                                        setpte4(pteva, getpte4(pteva) & ~PG_W);
1.1       deraadt  4602:                        }
                   4603:                }
                   4604:        }
                   4605:        simple_unlock(&pm->pm_lock);
1.12      pk       4606:        splx(s);
1.71      pk       4607:        setcontext4(ctx);
1.1       deraadt  4608: }
                   4609:
                   4610: /*
                   4611:  * Change the protection and/or wired status of the given (MI) virtual page.
                   4612:  * XXX: should have separate function (or flag) telling whether only wiring
                   4613:  * is changing.
                   4614:  */
                   4615: void
1.55      pk       4616: pmap_changeprot4_4c(pm, va, prot, wired)
1.1       deraadt  4617:        register struct pmap *pm;
                   4618:        register vm_offset_t va;
                   4619:        vm_prot_t prot;
                   4620:        int wired;
                   4621: {
1.53      christos 4622:        register int vr, vs, tpte, newprot, ctx, s;
1.43      pk       4623:        struct regmap *rp;
                   4624:        struct segmap *sp;
1.1       deraadt  4625:
                   4626: #ifdef DEBUG
                   4627:        if (pmapdebug & PDB_CHANGEPROT)
1.66      christos 4628:                printf("pmap_changeprot(%p, %lx, %x, %x)\n",
1.1       deraadt  4629:                    pm, va, prot, wired);
                   4630: #endif
                   4631:
                   4632:        write_user_windows();   /* paranoia */
                   4633:
1.64      pk       4634:        va &= ~(NBPG-1);
1.42      mycroft  4635:        if (pm == pmap_kernel())
1.1       deraadt  4636:                newprot = prot & VM_PROT_WRITE ? PG_S|PG_W : PG_S;
                   4637:        else
                   4638:                newprot = prot & VM_PROT_WRITE ? PG_W : 0;
1.43      pk       4639:        vr = VA_VREG(va);
                   4640:        vs = VA_VSEG(va);
1.1       deraadt  4641:        s = splpmap();          /* conservative */
1.43      pk       4642:        rp = &pm->pm_regmap[vr];
                   4643:        if (rp->rg_nsegmap == 0) {
1.66      christos 4644:                printf("pmap_changeprot: no segments in %d\n", vr);
1.43      pk       4645:                return;
                   4646:        }
                   4647:        if (rp->rg_segmap == NULL) {
1.66      christos 4648:                printf("pmap_changeprot: no segments in %d!\n", vr);
1.43      pk       4649:                return;
                   4650:        }
                   4651:        sp = &rp->rg_segmap[vs];
                   4652:
1.1       deraadt  4653:        pmap_stats.ps_changeprots++;
                   4654:
1.43      pk       4655: #ifdef DEBUG
                   4656:        if (pm != pmap_kernel() && sp->sg_pte == NULL)
                   4657:                panic("pmap_changeprot: no pages");
                   4658: #endif
                   4659:
1.1       deraadt  4660:        /* update PTEs in software or hardware */
1.43      pk       4661:        if (sp->sg_pmeg == seginval) {
                   4662:                register int *pte = &sp->sg_pte[VA_VPG(va)];
1.1       deraadt  4663:
                   4664:                /* update in software */
                   4665:                if ((*pte & PG_PROT) == newprot)
                   4666:                        goto useless;
                   4667:                *pte = (*pte & ~PG_PROT) | newprot;
                   4668:        } else {
                   4669:                /* update in hardware */
1.71      pk       4670:                ctx = getcontext4();
1.43      pk       4671:                if (CTX_USABLE(pm,rp)) {
1.1       deraadt  4672:                        /* use current context; flush writeback cache */
1.71      pk       4673:                        setcontext4(pm->pm_ctxnum);
1.55      pk       4674:                        tpte = getpte4(va);
1.11      pk       4675:                        if ((tpte & PG_PROT) == newprot) {
1.71      pk       4676:                                setcontext4(ctx);
1.1       deraadt  4677:                                goto useless;
1.11      pk       4678:                        }
1.69      pk       4679:                        if (CACHEINFO.c_vactype == VAC_WRITEBACK &&
1.35      pk       4680:                            (tpte & (PG_U|PG_NC|PG_TYPE)) == (PG_U|PG_OBMEM))
1.1       deraadt  4681:                                cache_flush_page((int)va);
                   4682:                } else {
1.71      pk       4683:                        setcontext4(0);
1.1       deraadt  4684:                        /* XXX use per-cpu va? */
1.69      pk       4685:                        if (HASSUN4_MMU3L)
1.43      pk       4686:                                setregmap(0, tregion);
                   4687:                        setsegmap(0, sp->sg_pmeg);
1.18      deraadt  4688:                        va = VA_VPG(va) << PGSHIFT;
1.55      pk       4689:                        tpte = getpte4(va);
1.11      pk       4690:                        if ((tpte & PG_PROT) == newprot) {
1.71      pk       4691:                                setcontext4(ctx);
1.1       deraadt  4692:                                goto useless;
1.11      pk       4693:                        }
1.1       deraadt  4694:                }
                   4695:                tpte = (tpte & ~PG_PROT) | newprot;
1.55      pk       4696:                setpte4(va, tpte);
1.71      pk       4697:                setcontext4(ctx);
1.1       deraadt  4698:        }
                   4699:        splx(s);
                   4700:        return;
                   4701:
                   4702: useless:
                   4703:        /* only wiring changed, and we ignore wiring */
                   4704:        pmap_stats.ps_useless_changeprots++;
                   4705:        splx(s);
                   4706: }
                   4707:
1.55      pk       4708: #endif /* sun4, 4c */
                   4709:
                   4710: #if defined(SUN4M)             /* 4M version of protection routines above */
1.1       deraadt  4711: /*
1.55      pk       4712:  * Lower (make more strict) the protection on the specified
                   4713:  * physical page.
1.1       deraadt  4714:  *
1.55      pk       4715:  * There are only two cases: either the protection is going to 0
                   4716:  * (in which case we do the dirty work here), or it is going
                   4717:  * to read-only (in which case pv_changepte does the trick).
1.1       deraadt  4718:  */
                   4719: void
1.55      pk       4720: pmap_page_protect4m(pa, prot)
                   4721:        vm_offset_t pa;
1.1       deraadt  4722:        vm_prot_t prot;
                   4723: {
1.55      pk       4724:        register struct pvlist *pv, *pv0, *npv;
                   4725:        register struct pmap *pm;
                   4726:        register int va, vr, vs, tpte;
                   4727:        register int flags, nleft, s, ctx;
                   4728:        struct regmap *rp;
                   4729:        struct segmap *sp;
1.45      pk       4730:
                   4731: #ifdef DEBUG
1.55      pk       4732:        if (!pmap_pa_exists(pa))
                   4733:                panic("pmap_page_protect: no such address: 0x%lx", pa);
                   4734:        if ((pmapdebug & PDB_CHANGEPROT) ||
                   4735:            (pmapdebug & PDB_REMOVE && prot == VM_PROT_NONE))
1.66      christos 4736:                printf("pmap_page_protect(%lx, %x)\n", pa, prot);
1.45      pk       4737: #endif
1.55      pk       4738:        /*
                   4739:         * Skip unmanaged pages, or operations that do not take
                   4740:         * away write permission.
                   4741:         */
                   4742:        if (!managed(pa) || prot & VM_PROT_WRITE)
                   4743:                return;
                   4744:        write_user_windows();   /* paranoia */
                   4745:        if (prot & VM_PROT_READ) {
                   4746:                pv_changepte4m(pvhead(pa), 0, PPROT_WRITE);
1.45      pk       4747:                return;
                   4748:        }
1.39      pk       4749:
1.1       deraadt  4750:        /*
1.55      pk       4751:         * Remove all access to all people talking to this page.
                   4752:         * Walk down PV list, removing all mappings.
                   4753:         * The logic is much like that for pmap_remove,
                   4754:         * but we know we are removing exactly one page.
1.1       deraadt  4755:         */
1.55      pk       4756:        pv = pvhead(pa);
                   4757:        s = splpmap();
                   4758:        if ((pm = pv->pv_pmap) == NULL) {
                   4759:                splx(s);
                   4760:                return;
1.1       deraadt  4761:        }
1.71      pk       4762:        ctx = getcontext4m();
1.55      pk       4763:        pv0 = pv;
                   4764:        flags = pv->pv_flags /*| PV_C4M*/;      /* %%%: ???? */
                   4765:        for (;; pm = pv->pv_pmap) {
                   4766:                va = pv->pv_va;
                   4767:                vr = VA_VREG(va);
                   4768:                vs = VA_VSEG(va);
                   4769:                rp = &pm->pm_regmap[vr];
                   4770:                if (rp->rg_nsegmap == 0)
                   4771:                        panic("pmap_remove_all: empty vreg");
                   4772:                sp = &rp->rg_segmap[vs];
                   4773:                if ((nleft = sp->sg_npte) == 0)
                   4774:                        panic("pmap_remove_all: empty vseg");
                   4775:                nleft--;
                   4776:                sp->sg_npte = nleft;
1.1       deraadt  4777:
1.55      pk       4778:                /* Invalidate PTE in MMU pagetables. Flush cache if necessary */
1.72      pk       4779:                if (pm->pm_ctx) {
1.71      pk       4780:                        setcontext4m(pm->pm_ctxnum);
1.69      pk       4781:                        cache_flush_page(va);
1.55      pk       4782:                        tlb_flush_page(va);
1.72      pk       4783:                }
                   4784:
                   4785:                tpte = sp->sg_pte[VA_SUN4M_VPG(va)];
1.1       deraadt  4786:
1.55      pk       4787:                if ((tpte & SRMMU_TETYPE) != SRMMU_TEPTE)
                   4788:                        panic("pmap_page_protect !PG_V");
1.72      pk       4789:
1.55      pk       4790:                flags |= MR4M(tpte);
1.43      pk       4791:
1.55      pk       4792:                if (nleft)
1.72      pk       4793:                        setpgt4m(&sp->sg_pte[VA_SUN4M_VPG(va)], SRMMU_TEINVALID);
1.55      pk       4794:                else {
                   4795:                        if (pm == pmap_kernel()) {
                   4796:                                tlb_flush_segment(vr, vs); /* Paranoid? */
                   4797:                                if (va < virtual_avail) {
                   4798: #ifdef DEBUG
1.66      christos 4799:                                        printf("pmap_rmk4m: attempt to free "
1.55      pk       4800:                                               "base kernel allocation\n");
                   4801: #endif
                   4802:                                        goto nextpv;
                   4803:                                }
1.72      pk       4804: #if 0 /* no need for this */
1.55      pk       4805:                                /* no need to free the table; it is static */
                   4806:                                qzero(sp->sg_pte, SRMMU_L3SIZE * sizeof(int));
1.72      pk       4807: #endif
1.43      pk       4808:
1.55      pk       4809:                                /* if we're done with a region, leave it */
                   4810:
                   4811:                        } else {        /* User mode mapping */
1.72      pk       4812:                                if (pm->pm_ctx)
1.62      pk       4813:                                        tlb_flush_segment(vr, vs);
1.72      pk       4814:                                setpgt4m(&rp->rg_seg_ptps[vs], SRMMU_TEINVALID);
1.55      pk       4815:                                free(sp->sg_pte, M_VMPMAP);
                   4816:                                sp->sg_pte = NULL;
                   4817:
                   4818:                                if (--rp->rg_nsegmap == 0) {
                   4819:                                        free(rp->rg_segmap, M_VMPMAP);
                   4820:                                        rp->rg_segmap = NULL;
                   4821:                                        free(rp->rg_seg_ptps, M_VMPMAP);
1.72      pk       4822:                                        setpgt4m(&pm->pm_reg_ptps[vr],
                   4823:                                                SRMMU_TEINVALID);
1.55      pk       4824:                                }
                   4825:                        }
                   4826:                }
                   4827:        nextpv:
                   4828:                npv = pv->pv_next;
                   4829:                if (pv != pv0)
                   4830:                        free(pv, M_VMPVENT);
                   4831:                if ((pv = npv) == NULL)
                   4832:                        break;
                   4833:        }
                   4834:        pv0->pv_pmap = NULL;
                   4835:        pv0->pv_next = NULL; /* ? */
                   4836:        pv0->pv_flags = flags;
1.71      pk       4837:        setcontext4m(ctx);
1.55      pk       4838:        splx(s);
                   4839: }
                   4840:
                   4841: /*
                   4842:  * Lower (make more strict) the protection on the specified
                   4843:  * range of this pmap.
                   4844:  *
                   4845:  * There are only two cases: either the protection is going to 0
                   4846:  * (in which case we call pmap_remove to do the dirty work), or
                   4847:  * it is going from read/write to read-only.  The latter is
                   4848:  * fairly easy.
                   4849:  */
                   4850: void
                   4851: pmap_protect4m(pm, sva, eva, prot)
                   4852:        register struct pmap *pm;
                   4853:        vm_offset_t sva, eva;
                   4854:        vm_prot_t prot;
                   4855: {
                   4856:        register int va, nva, vr, vs;
                   4857:        register int s, ctx;
                   4858:        struct regmap *rp;
                   4859:        struct segmap *sp;
                   4860:
                   4861:        if (pm == NULL || prot & VM_PROT_WRITE)
                   4862:                return;
                   4863:
                   4864:        if ((prot & VM_PROT_READ) == 0) {
                   4865:                pmap_remove(pm, sva, eva);
                   4866:                return;
                   4867:        }
                   4868:
                   4869:        write_user_windows();
1.71      pk       4870:        ctx = getcontext4m();
1.55      pk       4871:        s = splpmap();
                   4872:        simple_lock(&pm->pm_lock);
                   4873:
                   4874:        for (va = sva; va < eva;) {
                   4875:                vr = VA_VREG(va);
                   4876:                vs = VA_VSEG(va);
                   4877:                rp = &pm->pm_regmap[vr];
                   4878:                nva = VSTOVA(vr,vs + 1);
                   4879:                if (nva == 0)   /* XXX */
                   4880:                        panic("pmap_protect: last segment"); /* cannot happen(why?)*/
                   4881:                if (nva > eva)
                   4882:                        nva = eva;
                   4883:                if (rp->rg_nsegmap == 0) {
                   4884:                        va = nva;
                   4885:                        continue;
                   4886:                }
                   4887: #ifdef DEBUG
                   4888:                if (rp->rg_segmap == NULL)
                   4889:                        panic("pmap_protect: no segments");
                   4890: #endif
                   4891:                sp = &rp->rg_segmap[vs];
                   4892:                if (sp->sg_npte == 0) {
                   4893:                        va = nva;
                   4894:                        continue;
                   4895:                }
                   4896: #ifdef DEBUG
                   4897:                if (sp->sg_pte == NULL)
                   4898:                        panic("pmap_protect: no pages");
                   4899: #endif
1.72      pk       4900:                /* pages loaded: take away write bits from MMU PTEs */
                   4901:                if (pm->pm_ctx)
                   4902:                        setcontext4m(pm->pm_ctxnum);
                   4903:
                   4904:                pmap_stats.ps_npg_prot_all = (nva - va) >> PGSHIFT;
                   4905:                for (; va < nva; va += NBPG) {
                   4906:                        int tpte;
                   4907:                        tpte = sp->sg_pte[VA_SUN4M_VPG(va)];
1.55      pk       4908:                        /*
                   4909:                         * Flush cache so that any existing cache
                   4910:                         * tags are updated.  This is really only
                   4911:                         * needed for PTEs that lose PG_W.
                   4912:                         */
1.72      pk       4913:                        if ((tpte & (PPROT_WRITE|SRMMU_PGTYPE)) ==
                   4914:                            (PPROT_WRITE|PG_SUN4M_OBMEM)) {
                   4915:                                pmap_stats.ps_npg_prot_actual++;
                   4916:                                if (pm->pm_ctx) {
1.69      pk       4917:                                        cache_flush_page(va);
1.72      pk       4918:                                        tlb_flush_page(va);
1.55      pk       4919:                                }
1.72      pk       4920:                                setpgt4m(&sp->sg_pte[VA_SUN4M_VPG(va)],
                   4921:                                         tpte & ~PPROT_WRITE);
1.55      pk       4922:                        }
                   4923:                }
                   4924:        }
                   4925:        simple_unlock(&pm->pm_lock);
                   4926:        splx(s);
1.71      pk       4927:        setcontext4m(ctx);
1.55      pk       4928: }
                   4929:
                   4930: /*
                   4931:  * Change the protection and/or wired status of the given (MI) virtual page.
                   4932:  * XXX: should have separate function (or flag) telling whether only wiring
                   4933:  * is changing.
                   4934:  */
                   4935: void
                   4936: pmap_changeprot4m(pm, va, prot, wired)
                   4937:        register struct pmap *pm;
                   4938:        register vm_offset_t va;
                   4939:        vm_prot_t prot;
                   4940:        int wired;
                   4941: {
                   4942:        register int tpte, newprot, ctx, s;
                   4943:
                   4944: #ifdef DEBUG
                   4945:        if (pmapdebug & PDB_CHANGEPROT)
1.66      christos 4946:                printf("pmap_changeprot(%p, %lx, %x, %x)\n",
1.55      pk       4947:                    pm, va, prot, wired);
                   4948: #endif
                   4949:
                   4950:        write_user_windows();   /* paranoia */
                   4951:
1.64      pk       4952:        va &= ~(NBPG-1);
1.55      pk       4953:        if (pm == pmap_kernel())
                   4954:                newprot = prot & VM_PROT_WRITE ? PPROT_N_RWX : PPROT_N_RX;
                   4955:        else
                   4956:                newprot = prot & VM_PROT_WRITE ? PPROT_RWX_RWX : PPROT_RX_RX;
                   4957:
                   4958:        pmap_stats.ps_changeprots++;
                   4959:
                   4960:        s = splpmap();          /* conservative */
1.71      pk       4961:        ctx = getcontext4m();
1.55      pk       4962:        if (pm->pm_ctx) {
1.71      pk       4963:                setcontext4m(pm->pm_ctxnum);
1.55      pk       4964:                tpte = getpte4m(va);
1.69      pk       4965:                if (CACHEINFO.c_vactype == VAC_WRITEBACK &&
1.60      pk       4966:                    (tpte & SRMMU_PGTYPE) == PG_SUN4M_OBMEM)
                   4967:                        cache_flush_page(va); /* XXX: paranoia? */
1.55      pk       4968:        } else {
                   4969:                tpte = getptesw4m(pm, va);
                   4970:        }
                   4971:        if ((tpte & SRMMU_PROT_MASK) == newprot) {
1.72      pk       4972:                /* only wiring changed, and we ignore wiring */
                   4973:                pmap_stats.ps_useless_changeprots++;
                   4974:                goto out;
1.55      pk       4975:        }
                   4976:        if (pm->pm_ctx)
1.60      pk       4977:                setpte4m(va, (tpte & ~SRMMU_PROT_MASK) | newprot);
1.55      pk       4978:        else
1.60      pk       4979:                setptesw4m(pm, va, (tpte & ~SRMMU_PROT_MASK) | newprot);
1.72      pk       4980:
                   4981: out:
1.71      pk       4982:        setcontext4m(ctx);
1.55      pk       4983:        splx(s);
                   4984: }
                   4985: #endif /* 4m */
                   4986:
                   4987: /*
                   4988:  * Insert (MI) physical page pa at virtual address va in the given pmap.
                   4989:  * NB: the pa parameter includes type bits PMAP_OBIO, PMAP_NC as necessary.
                   4990:  *
                   4991:  * If pa is not in the `managed' range it will not be `bank mapped'.
                   4992:  * This works during bootstrap only because the first 4MB happens to
                   4993:  * map one-to-one.
                   4994:  *
                   4995:  * There may already be something else there, or we might just be
                   4996:  * changing protections and/or wiring on an existing mapping.
                   4997:  *     XXX     should have different entry points for changing!
                   4998:  */
                   4999:
                   5000: #if defined(SUN4) || defined(SUN4C)
                   5001:
                   5002: void
                   5003: pmap_enter4_4c(pm, va, pa, prot, wired)
                   5004:        register struct pmap *pm;
                   5005:        vm_offset_t va, pa;
                   5006:        vm_prot_t prot;
                   5007:        int wired;
                   5008: {
                   5009:        register struct pvlist *pv;
                   5010:        register int pteproto, ctx;
                   5011:
                   5012:        if (pm == NULL)
                   5013:                return;
                   5014:
                   5015:        if (VA_INHOLE(va)) {
                   5016: #ifdef DEBUG
1.66      christos 5017:                printf("pmap_enter: pm %p, va %lx, pa %lx: in MMU hole\n",
1.55      pk       5018:                        pm, va, pa);
                   5019: #endif
                   5020:                return;
                   5021:        }
                   5022:
                   5023: #ifdef DEBUG
                   5024:        if (pmapdebug & PDB_ENTER)
1.66      christos 5025:                printf("pmap_enter(%p, %lx, %lx, %x, %x)\n",
1.55      pk       5026:                    pm, va, pa, prot, wired);
                   5027: #endif
                   5028:
                   5029:        pteproto = PG_V | ((pa & PMAP_TNC) << PG_TNC_SHIFT);
                   5030:        pa &= ~PMAP_TNC;
                   5031:        /*
                   5032:         * Set up prototype for new PTE.  Cannot set PG_NC from PV_NC yet
                   5033:         * since the pvlist no-cache bit might change as a result of the
                   5034:         * new mapping.
                   5035:         */
                   5036:        if ((pteproto & PG_TYPE) == PG_OBMEM && managed(pa)) {
                   5037: #ifdef DIAGNOSTIC
                   5038:                if (!pmap_pa_exists(pa))
                   5039:                        panic("pmap_enter: no such address: %lx", pa);
                   5040: #endif
                   5041:                pv = pvhead(pa);
                   5042:        } else {
                   5043:                pv = NULL;
                   5044:        }
1.60      pk       5045:        pteproto |= atop(pa) & PG_PFNUM;
1.55      pk       5046:        if (prot & VM_PROT_WRITE)
                   5047:                pteproto |= PG_W;
                   5048:
1.71      pk       5049:        ctx = getcontext4();
1.55      pk       5050:        if (pm == pmap_kernel())
                   5051:                pmap_enk4_4c(pm, va, prot, wired, pv, pteproto | PG_S);
                   5052:        else
                   5053:                pmap_enu4_4c(pm, va, prot, wired, pv, pteproto);
1.71      pk       5054:        setcontext4(ctx);
1.55      pk       5055: }
                   5056:
                   5057: /* enter new (or change existing) kernel mapping */
                   5058: void
                   5059: pmap_enk4_4c(pm, va, prot, wired, pv, pteproto)
                   5060:        register struct pmap *pm;
                   5061:        vm_offset_t va;
                   5062:        vm_prot_t prot;
                   5063:        int wired;
                   5064:        register struct pvlist *pv;
                   5065:        register int pteproto;
                   5066: {
                   5067:        register int vr, vs, tpte, i, s;
                   5068:        struct regmap *rp;
                   5069:        struct segmap *sp;
                   5070:
                   5071:        vr = VA_VREG(va);
                   5072:        vs = VA_VSEG(va);
                   5073:        rp = &pm->pm_regmap[vr];
                   5074:        sp = &rp->rg_segmap[vs];
                   5075:        s = splpmap();          /* XXX way too conservative */
                   5076:
1.69      pk       5077: #if defined(SUN4_MMU3L)
                   5078:        if (HASSUN4_MMU3L && rp->rg_smeg == reginval) {
1.55      pk       5079:                vm_offset_t tva;
                   5080:                rp->rg_smeg = region_alloc(&region_locked, pm, vr)->me_cookie;
                   5081:                i = ncontext - 1;
                   5082:                do {
1.71      pk       5083:                        setcontext4(i);
1.55      pk       5084:                        setregmap(va, rp->rg_smeg);
                   5085:                } while (--i >= 0);
1.1       deraadt  5086:
1.43      pk       5087:                /* set all PTEs to invalid, then overwrite one PTE below */
                   5088:                tva = VA_ROUNDDOWNTOREG(va);
                   5089:                for (i = 0; i < NSEGRG; i++) {
                   5090:                        setsegmap(tva, rp->rg_segmap[i].sg_pmeg);
                   5091:                        tva += NBPSG;
                   5092:                };
                   5093:        }
                   5094: #endif
1.55      pk       5095:        if (sp->sg_pmeg != seginval && (tpte = getpte4(va)) & PG_V) {
1.34      pk       5096:                register int addr;
1.1       deraadt  5097:
1.34      pk       5098:                /* old mapping exists, and is of the same pa type */
                   5099:                if ((tpte & (PG_PFNUM|PG_TYPE)) ==
                   5100:                    (pteproto & (PG_PFNUM|PG_TYPE))) {
1.1       deraadt  5101:                        /* just changing protection and/or wiring */
                   5102:                        splx(s);
                   5103:                        pmap_changeprot(pm, va, prot, wired);
                   5104:                        return;
                   5105:                }
                   5106:
1.34      pk       5107:                if ((tpte & PG_TYPE) == PG_OBMEM) {
1.43      pk       5108: #ifdef DEBUG
1.66      christos 5109: printf("pmap_enk: changing existing va=>pa entry: va %lx, pteproto %x\n",
1.43      pk       5110:        va, pteproto);
                   5111: #endif
1.34      pk       5112:                        /*
                   5113:                         * Switcheroo: changing pa for this va.
                   5114:                         * If old pa was managed, remove from pvlist.
                   5115:                         * If old page was cached, flush cache.
                   5116:                         */
1.60      pk       5117:                        addr = ptoa(tpte & PG_PFNUM);
1.31      pk       5118:                        if (managed(addr))
1.58      pk       5119:                                pv_unlink4_4c(pvhead(addr), pm, va);
1.34      pk       5120:                        if ((tpte & PG_NC) == 0) {
1.71      pk       5121:                                setcontext4(0); /* ??? */
1.69      pk       5122:                                cache_flush_page((int)va);
1.34      pk       5123:                        }
1.1       deraadt  5124:                }
                   5125:        } else {
                   5126:                /* adding new entry */
1.43      pk       5127:                sp->sg_npte++;
1.1       deraadt  5128:        }
                   5129:
                   5130:        /*
                   5131:         * If the new mapping is for a managed PA, enter into pvlist.
                   5132:         * Note that the mapping for a malloc page will always be
                   5133:         * unique (hence will never cause a second call to malloc).
                   5134:         */
                   5135:        if (pv != NULL)
1.58      pk       5136:                pteproto |= pv_link4_4c(pv, pm, va);
1.1       deraadt  5137:
1.43      pk       5138:        if (sp->sg_pmeg == seginval) {
1.1       deraadt  5139:                register int tva;
                   5140:
                   5141:                /*
                   5142:                 * Allocate an MMU entry now (on locked list),
                   5143:                 * and map it into every context.  Set all its
                   5144:                 * PTEs invalid (we will then overwrite one, but
                   5145:                 * this is more efficient than looping twice).
                   5146:                 */
                   5147: #ifdef DEBUG
                   5148:                if (pm->pm_ctx == NULL || pm->pm_ctxnum != 0)
                   5149:                        panic("pmap_enk: kern seg but no kern ctx");
                   5150: #endif
1.43      pk       5151:                sp->sg_pmeg = me_alloc(&segm_locked, pm, vr, vs)->me_cookie;
                   5152:                rp->rg_nsegmap++;
                   5153:
1.69      pk       5154: #if defined(SUN4_MMU3L)
                   5155:                if (HASSUN4_MMU3L)
1.43      pk       5156:                        setsegmap(va, sp->sg_pmeg);
                   5157:                else
                   5158: #endif
                   5159:                {
                   5160:                        i = ncontext - 1;
                   5161:                        do {
1.71      pk       5162:                                setcontext4(i);
1.43      pk       5163:                                setsegmap(va, sp->sg_pmeg);
                   5164:                        } while (--i >= 0);
                   5165:                }
1.1       deraadt  5166:
                   5167:                /* set all PTEs to invalid, then overwrite one PTE below */
                   5168:                tva = VA_ROUNDDOWNTOSEG(va);
                   5169:                i = NPTESG;
                   5170:                do {
1.55      pk       5171:                        setpte4(tva, 0);
1.1       deraadt  5172:                        tva += NBPG;
                   5173:                } while (--i > 0);
                   5174:        }
                   5175:
                   5176:        /* ptes kept in hardware only */
1.55      pk       5177:        setpte4(va, pteproto);
1.1       deraadt  5178:        splx(s);
                   5179: }
                   5180:
                   5181: /* enter new (or change existing) user mapping */
1.53      christos 5182: void
1.55      pk       5183: pmap_enu4_4c(pm, va, prot, wired, pv, pteproto)
1.1       deraadt  5184:        register struct pmap *pm;
                   5185:        vm_offset_t va;
                   5186:        vm_prot_t prot;
                   5187:        int wired;
                   5188:        register struct pvlist *pv;
                   5189:        register int pteproto;
                   5190: {
1.43      pk       5191:        register int vr, vs, *pte, tpte, pmeg, s, doflush;
                   5192:        struct regmap *rp;
                   5193:        struct segmap *sp;
1.1       deraadt  5194:
                   5195:        write_user_windows();           /* XXX conservative */
1.43      pk       5196:        vr = VA_VREG(va);
                   5197:        vs = VA_VSEG(va);
                   5198:        rp = &pm->pm_regmap[vr];
1.1       deraadt  5199:        s = splpmap();                  /* XXX conservative */
                   5200:
                   5201:        /*
                   5202:         * If there is no space in which the PTEs can be written
                   5203:         * while they are not in the hardware, this must be a new
                   5204:         * virtual segment.  Get PTE space and count the segment.
                   5205:         *
                   5206:         * TO SPEED UP CTX ALLOC, PUT SEGMENT BOUNDS STUFF HERE
                   5207:         * AND IN pmap_rmu()
                   5208:         */
1.13      pk       5209:
1.43      pk       5210:        GAP_SHRINK(pm,vr);
1.13      pk       5211:
                   5212: #ifdef DEBUG
                   5213:        if (pm->pm_gap_end < pm->pm_gap_start) {
1.66      christos 5214:                printf("pmap_enu: gap_start %x, gap_end %x",
1.13      pk       5215:                        pm->pm_gap_start, pm->pm_gap_end);
                   5216:                panic("pmap_enu: gap botch");
                   5217:        }
                   5218: #endif
                   5219:
1.43      pk       5220: rretry:
                   5221:        if (rp->rg_segmap == NULL) {
                   5222:                /* definitely a new mapping */
                   5223:                register int i;
                   5224:                register int size = NSEGRG * sizeof (struct segmap);
                   5225:
                   5226:                sp = (struct segmap *)malloc((u_long)size, M_VMPMAP, M_WAITOK);
                   5227:                if (rp->rg_segmap != NULL) {
1.66      christos 5228: printf("pmap_enter: segment filled during sleep\n");   /* can this happen? */
1.49      pk       5229:                        free(sp, M_VMPMAP);
1.43      pk       5230:                        goto rretry;
                   5231:                }
1.55      pk       5232:                qzero((caddr_t)sp, size);
1.43      pk       5233:                rp->rg_segmap = sp;
                   5234:                rp->rg_nsegmap = 0;
                   5235:                for (i = NSEGRG; --i >= 0;)
                   5236:                        sp++->sg_pmeg = seginval;
                   5237:        }
                   5238:
                   5239:        sp = &rp->rg_segmap[vs];
                   5240:
                   5241: sretry:
                   5242:        if ((pte = sp->sg_pte) == NULL) {
1.1       deraadt  5243:                /* definitely a new mapping */
                   5244:                register int size = NPTESG * sizeof *pte;
                   5245:
                   5246:                pte = (int *)malloc((u_long)size, M_VMPMAP, M_WAITOK);
1.43      pk       5247:                if (sp->sg_pte != NULL) {
1.66      christos 5248: printf("pmap_enter: pte filled during sleep\n");       /* can this happen? */
1.49      pk       5249:                        free(pte, M_VMPMAP);
1.43      pk       5250:                        goto sretry;
1.1       deraadt  5251:                }
                   5252: #ifdef DEBUG
1.43      pk       5253:                if (sp->sg_pmeg != seginval)
1.1       deraadt  5254:                        panic("pmap_enter: new ptes, but not seginval");
                   5255: #endif
1.55      pk       5256:                qzero((caddr_t)pte, size);
1.43      pk       5257:                sp->sg_pte = pte;
                   5258:                sp->sg_npte = 1;
                   5259:                rp->rg_nsegmap++;
1.1       deraadt  5260:        } else {
                   5261:                /* might be a change: fetch old pte */
                   5262:                doflush = 0;
1.55      pk       5263:                if ((pmeg = sp->sg_pmeg) == seginval) {
                   5264:                        /* software pte */
                   5265:                        tpte = pte[VA_VPG(va)];
                   5266:                } else {
                   5267:                        /* hardware pte */
                   5268:                        if (CTX_USABLE(pm,rp)) {
1.71      pk       5269:                                setcontext4(pm->pm_ctxnum);
1.55      pk       5270:                                tpte = getpte4(va);
1.69      pk       5271:                                doflush = CACHEINFO.c_vactype != VAC_NONE;
1.55      pk       5272:                        } else {
1.71      pk       5273:                                setcontext4(0);
1.55      pk       5274:                                /* XXX use per-cpu pteva? */
1.69      pk       5275:                                if (HASSUN4_MMU3L)
1.55      pk       5276:                                        setregmap(0, tregion);
                   5277:                                setsegmap(0, pmeg);
                   5278:                                tpte = getpte4(VA_VPG(va) << PGSHIFT);
                   5279:                        }
                   5280:                }
                   5281:                if (tpte & PG_V) {
                   5282:                        register int addr;
                   5283:
                   5284:                        /* old mapping exists, and is of the same pa type */
                   5285:                        if ((tpte & (PG_PFNUM|PG_TYPE)) ==
                   5286:                            (pteproto & (PG_PFNUM|PG_TYPE))) {
                   5287:                                /* just changing prot and/or wiring */
                   5288:                                splx(s);
                   5289:                                /* caller should call this directly: */
1.60      pk       5290:                                pmap_changeprot4_4c(pm, va, prot, wired);
1.55      pk       5291:                                if (wired)
                   5292:                                        pm->pm_stats.wired_count++;
                   5293:                                else
                   5294:                                        pm->pm_stats.wired_count--;
                   5295:                                return;
                   5296:                        }
                   5297:                        /*
                   5298:                         * Switcheroo: changing pa for this va.
                   5299:                         * If old pa was managed, remove from pvlist.
                   5300:                         * If old page was cached, flush cache.
                   5301:                         */
1.65      christos 5302: #if 0
1.66      christos 5303: printf("%s[%d]: pmap_enu: changing existing va(%x)=>pa entry\n",
1.65      christos 5304:        curproc->p_comm, curproc->p_pid, va);
                   5305: #endif
1.55      pk       5306:                        if ((tpte & PG_TYPE) == PG_OBMEM) {
1.60      pk       5307:                                addr = ptoa(tpte & PG_PFNUM);
1.55      pk       5308:                                if (managed(addr))
1.58      pk       5309:                                        pv_unlink4_4c(pvhead(addr), pm, va);
1.69      pk       5310:                                if (doflush && (tpte & PG_NC) == 0)
1.55      pk       5311:                                        cache_flush_page((int)va);
                   5312:                        }
                   5313:                } else {
                   5314:                        /* adding new entry */
                   5315:                        sp->sg_npte++;
                   5316:
                   5317:                        /*
                   5318:                         * Increment counters
                   5319:                         */
                   5320:                        if (wired)
                   5321:                                pm->pm_stats.wired_count++;
                   5322:                }
                   5323:        }
                   5324:
                   5325:        if (pv != NULL)
1.58      pk       5326:                pteproto |= pv_link4_4c(pv, pm, va);
1.55      pk       5327:
                   5328:        /*
                   5329:         * Update hardware & software PTEs.
                   5330:         */
                   5331:        if ((pmeg = sp->sg_pmeg) != seginval) {
                   5332:                /* ptes are in hardare */
                   5333:                if (CTX_USABLE(pm,rp))
1.71      pk       5334:                        setcontext4(pm->pm_ctxnum);
1.55      pk       5335:                else {
1.71      pk       5336:                        setcontext4(0);
1.55      pk       5337:                        /* XXX use per-cpu pteva? */
1.69      pk       5338:                        if (HASSUN4_MMU3L)
1.55      pk       5339:                                setregmap(0, tregion);
                   5340:                        setsegmap(0, pmeg);
                   5341:                        va = VA_VPG(va) << PGSHIFT;
                   5342:                }
                   5343:                setpte4(va, pteproto);
                   5344:        }
                   5345:        /* update software copy */
                   5346:        pte += VA_VPG(va);
                   5347:        *pte = pteproto;
                   5348:
                   5349:        splx(s);
                   5350: }
                   5351:
                   5352: #endif /*sun4,4c*/
                   5353:
                   5354: #if defined(SUN4M)             /* Sun4M versions of enter routines */
                   5355: /*
                   5356:  * Insert (MI) physical page pa at virtual address va in the given pmap.
                   5357:  * NB: the pa parameter includes type bits PMAP_OBIO, PMAP_NC as necessary.
                   5358:  *
                   5359:  * If pa is not in the `managed' range it will not be `bank mapped'.
                   5360:  * This works during bootstrap only because the first 4MB happens to
                   5361:  * map one-to-one.
                   5362:  *
                   5363:  * There may already be something else there, or we might just be
                   5364:  * changing protections and/or wiring on an existing mapping.
                   5365:  *     XXX     should have different entry points for changing!
                   5366:  */
                   5367:
                   5368: void
                   5369: pmap_enter4m(pm, va, pa, prot, wired)
                   5370:        register struct pmap *pm;
                   5371:        vm_offset_t va, pa;
                   5372:        vm_prot_t prot;
                   5373:        int wired;
                   5374: {
                   5375:        register struct pvlist *pv;
                   5376:        register int pteproto, ctx;
                   5377:
                   5378:        if (pm == NULL)
                   5379:                return;
                   5380:
                   5381: #ifdef DEBUG
                   5382:        if (pmapdebug & PDB_ENTER)
1.66      christos 5383:                printf("pmap_enter(%p, %lx, %lx, %x, %x)\n",
1.55      pk       5384:                    pm, va, pa, prot, wired);
                   5385: #endif
1.60      pk       5386:
                   5387:        /* Initialise pteproto with cache bit */
                   5388:        pteproto = (pa & PMAP_NC) == 0 ? SRMMU_PG_C : 0;
1.55      pk       5389:
                   5390:        if (pa & PMAP_TYPE4M) {         /* this page goes in an iospace */
1.69      pk       5391:                if (cpuinfo.cpu_type == CPUTYP_MS1)
1.58      pk       5392:                        panic("pmap_enter4m: attempt to use 36-bit iospace on"
                   5393:                              " MicroSPARC");
1.55      pk       5394:                pteproto |= (pa & PMAP_TYPE4M) << PMAP_PTESHFT4M;
                   5395:        }
                   5396:
                   5397:        /* Make sure we get a pte with appropriate perms! */
                   5398:        pteproto |= SRMMU_TEPTE | PPROT_RX_RX;
                   5399:
                   5400:        pa &= ~PMAP_TNC;
                   5401:        /*
                   5402:         * Set up prototype for new PTE.  Cannot set PG_NC from PV_NC yet
                   5403:         * since the pvlist no-cache bit might change as a result of the
                   5404:         * new mapping.
                   5405:         */
                   5406:        if ((pteproto & SRMMU_PGTYPE) == PG_SUN4M_OBMEM && managed(pa)) {
                   5407: #ifdef DIAGNOSTIC
                   5408:                if (!pmap_pa_exists(pa))
                   5409:                        panic("pmap_enter: no such address: %lx", pa);
                   5410: #endif
                   5411:                pv = pvhead(pa);
                   5412:        } else {
                   5413:                pv = NULL;
                   5414:        }
1.60      pk       5415:        pteproto |= (atop(pa) << SRMMU_PPNSHIFT);
1.55      pk       5416:
                   5417:        if (prot & VM_PROT_WRITE)
                   5418:                pteproto |= PPROT_WRITE;
                   5419:
1.71      pk       5420:        ctx = getcontext4m();
1.55      pk       5421:
                   5422:        if (pm == pmap_kernel())
1.58      pk       5423:                pmap_enk4m(pm, va, prot, wired, pv, pteproto | PPROT_S);
1.55      pk       5424:        else
1.58      pk       5425:                pmap_enu4m(pm, va, prot, wired, pv, pteproto);
1.55      pk       5426:
1.71      pk       5427:        setcontext4m(ctx);
1.55      pk       5428: }
                   5429:
                   5430: /* enter new (or change existing) kernel mapping */
                   5431: void
                   5432: pmap_enk4m(pm, va, prot, wired, pv, pteproto)
                   5433:        register struct pmap *pm;
                   5434:        vm_offset_t va;
                   5435:        vm_prot_t prot;
                   5436:        int wired;
                   5437:        register struct pvlist *pv;
                   5438:        register int pteproto;
                   5439: {
                   5440:        register int vr, vs, tpte, s;
                   5441:        struct regmap *rp;
                   5442:        struct segmap *sp;
                   5443:
                   5444: #ifdef DEBUG
                   5445:        if (va < KERNBASE)
1.72      pk       5446:                panic("pmap_enk4m: can't enter va 0x%lx below KERNBASE", va);
1.55      pk       5447: #endif
                   5448:        vr = VA_VREG(va);
                   5449:        vs = VA_VSEG(va);
                   5450:        rp = &pm->pm_regmap[vr];
                   5451:        sp = &rp->rg_segmap[vs];
                   5452:
                   5453:        s = splpmap();          /* XXX way too conservative */
                   5454:
                   5455:        if (rp->rg_seg_ptps == NULL) /* enter new region */
                   5456:                panic("pmap_enk4m: missing kernel region table for va %lx",va);
                   5457:
1.72      pk       5458:        tpte = sp->sg_pte[VA_SUN4M_VPG(va)];
                   5459:        if ((tpte & SRMMU_TETYPE) == SRMMU_TEPTE) {
1.55      pk       5460:                register int addr;
                   5461:
                   5462:                /* old mapping exists, and is of the same pa type */
                   5463:
                   5464:                if ((tpte & SRMMU_PPNMASK) == (pteproto & SRMMU_PPNMASK)) {
                   5465:                        /* just changing protection and/or wiring */
                   5466:                        splx(s);
                   5467:                        pmap_changeprot(pm, va, prot, wired);
                   5468:                        return;
                   5469:                }
                   5470:
                   5471:                if ((tpte & SRMMU_PGTYPE) == PG_SUN4M_OBMEM) {
                   5472: #ifdef DEBUG
1.66      christos 5473: printf("pmap_enk4m: changing existing va=>pa entry: va %lx, pteproto %x, "
1.55      pk       5474:        "oldpte %x\n", va, pteproto, tpte);
                   5475: #endif
                   5476:                        /*
                   5477:                         * Switcheroo: changing pa for this va.
                   5478:                         * If old pa was managed, remove from pvlist.
                   5479:                         * If old page was cached, flush cache.
                   5480:                         */
1.60      pk       5481:                        addr = ptoa((tpte & SRMMU_PPNMASK) >> SRMMU_PPNSHIFT);
1.55      pk       5482:                        if (managed(addr))
1.58      pk       5483:                                pv_unlink4m(pvhead(addr), pm, va);
1.55      pk       5484:                        if (tpte & SRMMU_PG_C) {
1.71      pk       5485:                                setcontext4m(0);        /* ??? */
1.69      pk       5486:                                cache_flush_page((int)va);
1.55      pk       5487:                        }
                   5488:                }
                   5489:        } else {
                   5490:                /* adding new entry */
                   5491:                sp->sg_npte++;
                   5492:        }
                   5493:
                   5494:        /*
                   5495:         * If the new mapping is for a managed PA, enter into pvlist.
                   5496:         * Note that the mapping for a malloc page will always be
                   5497:         * unique (hence will never cause a second call to malloc).
                   5498:         */
                   5499:        if (pv != NULL)
                   5500:                pteproto &= ~(pv_link4m(pv, pm, va));
                   5501:
1.72      pk       5502: #ifdef DEBUG
1.55      pk       5503:        if (sp->sg_pte == NULL) /* If no existing pagetable */
1.60      pk       5504:                panic("pmap_enk4m: missing segment table for va 0x%lx",va);
1.72      pk       5505: #endif
1.55      pk       5506:
1.72      pk       5507:        tlb_flush_page(va);
                   5508:        setpgt4m(&sp->sg_pte[VA_SUN4M_VPG(va)], pteproto);
1.55      pk       5509:
                   5510:        splx(s);
                   5511: }
                   5512:
                   5513: /* enter new (or change existing) user mapping */
                   5514: void
                   5515: pmap_enu4m(pm, va, prot, wired, pv, pteproto)
                   5516:        register struct pmap *pm;
                   5517:        vm_offset_t va;
                   5518:        vm_prot_t prot;
                   5519:        int wired;
                   5520:        register struct pvlist *pv;
                   5521:        register int pteproto;
                   5522: {
1.72      pk       5523:        register int vr, vs, *pte, tpte, s;
1.55      pk       5524:        struct regmap *rp;
                   5525:        struct segmap *sp;
                   5526:
1.72      pk       5527: #ifdef DEBUG
                   5528:        if (KERNBASE < va)
                   5529:                panic("pmap_enu4m: can't enter va 0x%lx above KERNBASE", va);
                   5530: #endif
                   5531:
1.55      pk       5532:        write_user_windows();           /* XXX conservative */
                   5533:        vr = VA_VREG(va);
                   5534:        vs = VA_VSEG(va);
                   5535:        rp = &pm->pm_regmap[vr];
                   5536:        s = splpmap();                  /* XXX conservative */
                   5537:
                   5538: rretry:
                   5539:        if (rp->rg_segmap == NULL) {
                   5540:                /* definitely a new mapping */
                   5541:                register int size = NSEGRG * sizeof (struct segmap);
                   5542:
                   5543:                sp = (struct segmap *)malloc((u_long)size, M_VMPMAP, M_WAITOK);
                   5544:                if (rp->rg_segmap != NULL) {
                   5545: #ifdef DEBUG
1.66      christos 5546: printf("pmap_enu4m: segment filled during sleep\n");   /* can this happen? */
1.55      pk       5547: #endif
                   5548:                        free(sp, M_VMPMAP);
                   5549:                        goto rretry;
                   5550:                }
                   5551:                qzero((caddr_t)sp, size);
                   5552:                rp->rg_segmap = sp;
                   5553:                rp->rg_nsegmap = 0;
                   5554:                rp->rg_seg_ptps = NULL;
                   5555:        }
                   5556: rgretry:
                   5557:        if (rp->rg_seg_ptps == NULL) {
                   5558:                /* Need a segment table */
1.73      pk       5559:                int size, i, *ptd;
                   5560:
1.55      pk       5561:                size = SRMMU_L2SIZE * sizeof(long);
1.73      pk       5562:                ptd = (int *)malloc(size, M_VMPMAP, M_WAITOK);
1.55      pk       5563:                if (rp->rg_seg_ptps != NULL) {
                   5564: #ifdef DEBUG
1.66      christos 5565: printf("pmap_enu4m: bizarre segment table fill during sleep\n");
1.55      pk       5566: #endif
1.73      pk       5567:                        free(ptd, M_VMPMAP);
1.55      pk       5568:                        goto rgretry;
                   5569:                }
1.72      pk       5570: #if 0
1.69      pk       5571:                if ((cpuinfo.flags & CPUFLG_CACHEPAGETABLES) == 0)
1.73      pk       5572:                        kvm_uncache((char *)ptd, (size+NBPG-1)/NBPG);
1.72      pk       5573: #endif
1.55      pk       5574:
1.73      pk       5575:                rp->rg_seg_ptps = ptd;
                   5576:                for (i = 0; i < SRMMU_L2SIZE; i++)
1.74    ! pk       5577:                        setpgt4m(&ptd[i], SRMMU_TEINVALID);
1.72      pk       5578:                setpgt4m(&pm->pm_reg_ptps[vr],
1.73      pk       5579:                         (VA2PA((caddr_t)ptd) >> SRMMU_PPNPASHIFT) | SRMMU_TEPTD);
1.55      pk       5580:        }
                   5581:
                   5582:        sp = &rp->rg_segmap[vs];
                   5583:
                   5584: sretry:
                   5585:        if ((pte = sp->sg_pte) == NULL) {
                   5586:                /* definitely a new mapping */
1.73      pk       5587:                int i, size = SRMMU_L3SIZE * sizeof(*pte);
1.55      pk       5588:
                   5589:                pte = (int *)malloc((u_long)size, M_VMPMAP, M_WAITOK);
                   5590:                if (sp->sg_pte != NULL) {
1.66      christos 5591: printf("pmap_enter: pte filled during sleep\n");       /* can this happen? */
1.55      pk       5592:                        free(pte, M_VMPMAP);
                   5593:                        goto sretry;
                   5594:                }
1.72      pk       5595: #if 0
1.69      pk       5596:                if ((cpuinfo.flags & CPUFLG_CACHEPAGETABLES) == 0)
1.57      abrown   5597:                        kvm_uncache((caddr_t)pte, (size+NBPG-1)/NBPG);
1.72      pk       5598: #endif
1.55      pk       5599:
                   5600:                sp->sg_pte = pte;
                   5601:                sp->sg_npte = 1;
                   5602:                rp->rg_nsegmap++;
1.74    ! pk       5603:                for (i = 0; i < SRMMU_L3SIZE; i++)
        !          5604:                        setpgt4m(&pte[i], SRMMU_TEINVALID);
1.72      pk       5605:                setpgt4m(&rp->rg_seg_ptps[vs],
                   5606:                        (VA2PA((caddr_t)pte) >> SRMMU_PPNPASHIFT) | SRMMU_TEPTD);
1.55      pk       5607:        } else {
1.72      pk       5608:                /*
                   5609:                 * Might be a change: fetch old pte
                   5610:                 * Note we're only interested in the PTE's page frame
                   5611:                 * number and type bits, so the memory copy will do.
                   5612:                 */
                   5613:                tpte = pte[VA_SUN4M_VPG(va)];
1.55      pk       5614:
                   5615:                if ((tpte & SRMMU_TETYPE) == SRMMU_TEPTE) {
1.34      pk       5616:                        register int addr;
1.1       deraadt  5617:
1.34      pk       5618:                        /* old mapping exists, and is of the same pa type */
1.55      pk       5619:                        if ((tpte & SRMMU_PPNMASK) ==
                   5620:                            (pteproto & SRMMU_PPNMASK)) {
1.1       deraadt  5621:                                /* just changing prot and/or wiring */
                   5622:                                splx(s);
                   5623:                                /* caller should call this directly: */
1.60      pk       5624:                                pmap_changeprot4m(pm, va, prot, wired);
1.15      deraadt  5625:                                if (wired)
                   5626:                                        pm->pm_stats.wired_count++;
                   5627:                                else
                   5628:                                        pm->pm_stats.wired_count--;
1.1       deraadt  5629:                                return;
                   5630:                        }
                   5631:                        /*
                   5632:                         * Switcheroo: changing pa for this va.
                   5633:                         * If old pa was managed, remove from pvlist.
                   5634:                         * If old page was cached, flush cache.
                   5635:                         */
1.60      pk       5636: #ifdef DEBUG
1.72      pk       5637: if (pmapdebug & PDB_SWITCHMAP)
1.66      christos 5638: printf("%s[%d]: pmap_enu: changing existing va(%x)=>pa(pte=%x) entry\n",
1.72      pk       5639:        curproc->p_comm, curproc->p_pid, (int)va, (int)pte);
1.60      pk       5640: #endif
1.55      pk       5641:                        if ((tpte & SRMMU_PGTYPE) == PG_SUN4M_OBMEM) {
1.60      pk       5642:                                addr = ptoa( (tpte & SRMMU_PPNMASK) >>
                   5643:                                             SRMMU_PPNSHIFT);
1.31      pk       5644:                                if (managed(addr))
1.58      pk       5645:                                        pv_unlink4m(pvhead(addr), pm, va);
1.72      pk       5646:                                if (pm->pm_ctx && (tpte & SRMMU_PG_C))
1.34      pk       5647:                                        cache_flush_page((int)va);
1.31      pk       5648:                        }
1.1       deraadt  5649:                } else {
                   5650:                        /* adding new entry */
1.43      pk       5651:                        sp->sg_npte++;
1.15      deraadt  5652:
                   5653:                        /*
                   5654:                         * Increment counters
                   5655:                         */
                   5656:                        if (wired)
                   5657:                                pm->pm_stats.wired_count++;
1.1       deraadt  5658:                }
                   5659:        }
                   5660:        if (pv != NULL)
1.55      pk       5661:                pteproto &= ~(pv_link4m(pv, pm, va));
1.1       deraadt  5662:
                   5663:        /*
1.72      pk       5664:         * Update PTEs, flush TLB as necessary.
1.1       deraadt  5665:         */
1.72      pk       5666:        if (pm->pm_ctx) {
1.71      pk       5667:                setcontext4m(pm->pm_ctxnum);
1.72      pk       5668:                tlb_flush_page(va);
                   5669:        }
                   5670:        setpgt4m(&sp->sg_pte[VA_SUN4M_VPG(va)], pteproto);
1.1       deraadt  5671:
                   5672:        splx(s);
                   5673: }
1.55      pk       5674: #endif /* sun4m */
1.1       deraadt  5675:
                   5676: /*
                   5677:  * Change the wiring attribute for a map/virtual-address pair.
                   5678:  */
                   5679: /* ARGSUSED */
                   5680: void
                   5681: pmap_change_wiring(pm, va, wired)
                   5682:        struct pmap *pm;
                   5683:        vm_offset_t va;
                   5684:        int wired;
                   5685: {
                   5686:
                   5687:        pmap_stats.ps_useless_changewire++;
                   5688: }
                   5689:
                   5690: /*
                   5691:  * Extract the physical page address associated
                   5692:  * with the given map/virtual_address pair.
                   5693:  * GRR, the vm code knows; we should not have to do this!
                   5694:  */
1.55      pk       5695:
                   5696: #if defined(SUN4) || defined(SUN4C)
1.1       deraadt  5697: vm_offset_t
1.55      pk       5698: pmap_extract4_4c(pm, va)
1.1       deraadt  5699:        register struct pmap *pm;
                   5700:        vm_offset_t va;
                   5701: {
                   5702:        register int tpte;
1.43      pk       5703:        register int vr, vs;
                   5704:        struct regmap *rp;
                   5705:        struct segmap *sp;
1.1       deraadt  5706:
                   5707:        if (pm == NULL) {
1.66      christos 5708:                printf("pmap_extract: null pmap\n");
1.1       deraadt  5709:                return (0);
                   5710:        }
1.43      pk       5711:        vr = VA_VREG(va);
                   5712:        vs = VA_VSEG(va);
                   5713:        rp = &pm->pm_regmap[vr];
                   5714:        if (rp->rg_segmap == NULL) {
1.66      christos 5715:                printf("pmap_extract: invalid segment (%d)\n", vr);
1.43      pk       5716:                return (0);
                   5717:        }
                   5718:        sp = &rp->rg_segmap[vs];
                   5719:
                   5720:        if (sp->sg_pmeg != seginval) {
1.71      pk       5721:                register int ctx = getcontext4();
1.1       deraadt  5722:
1.43      pk       5723:                if (CTX_USABLE(pm,rp)) {
1.61      pk       5724:                        CHANGE_CONTEXTS(ctx, pm->pm_ctxnum);
1.55      pk       5725:                        tpte = getpte4(va);
1.1       deraadt  5726:                } else {
1.61      pk       5727:                        CHANGE_CONTEXTS(ctx, 0);
1.69      pk       5728:                        if (HASSUN4_MMU3L)
1.43      pk       5729:                                setregmap(0, tregion);
                   5730:                        setsegmap(0, sp->sg_pmeg);
1.55      pk       5731:                        tpte = getpte4(VA_VPG(va) << PGSHIFT);
1.1       deraadt  5732:                }
1.71      pk       5733:                setcontext4(ctx);
1.1       deraadt  5734:        } else {
1.43      pk       5735:                register int *pte = sp->sg_pte;
1.1       deraadt  5736:
                   5737:                if (pte == NULL) {
1.66      christos 5738:                        printf("pmap_extract: invalid segment\n");
1.1       deraadt  5739:                        return (0);
                   5740:                }
                   5741:                tpte = pte[VA_VPG(va)];
                   5742:        }
                   5743:        if ((tpte & PG_V) == 0) {
1.66      christos 5744:                printf("pmap_extract: invalid pte\n");
1.1       deraadt  5745:                return (0);
                   5746:        }
                   5747:        tpte &= PG_PFNUM;
1.60      pk       5748:        tpte = tpte;
1.1       deraadt  5749:        return ((tpte << PGSHIFT) | (va & PGOFSET));
                   5750: }
1.55      pk       5751: #endif /*4,4c*/
                   5752:
                   5753: #if defined(SUN4M)             /* 4m version of pmap_extract */
                   5754: /*
                   5755:  * Extract the physical page address associated
                   5756:  * with the given map/virtual_address pair.
                   5757:  * GRR, the vm code knows; we should not have to do this!
                   5758:  */
                   5759: vm_offset_t
                   5760: pmap_extract4m(pm, va)
                   5761:        register struct pmap *pm;
                   5762:        vm_offset_t va;
                   5763: {
1.72      pk       5764:        register int tpte;
1.55      pk       5765:
                   5766:        if (pm == NULL) {
1.66      christos 5767:                printf("pmap_extract: null pmap\n");
1.55      pk       5768:                return (0);
                   5769:        }
                   5770:
1.72      pk       5771: #if 0
1.55      pk       5772:        if (pm->pm_ctx) {
1.72      pk       5773:                int ctx = getcontext4m();
1.61      pk       5774:                CHANGE_CONTEXTS(ctx, pm->pm_ctxnum);
1.55      pk       5775:                tpte = getpte4m(va);
1.71      pk       5776:                setcontext4m(ctx);
1.55      pk       5777:        } else
1.58      pk       5778:                tpte = getptesw4m(pm, va);
1.72      pk       5779: #else
                   5780:        tpte = getptesw4m(pm, va);
                   5781: #endif
1.55      pk       5782:
1.72      pk       5783: #ifdef DEBUG
                   5784:        if ((tpte & SRMMU_TETYPE) != SRMMU_TEPTE) {
                   5785:                printf("pmap_extract: invalid pte of type %d\n",
                   5786:                       tpte & SRMMU_TETYPE);
                   5787:                return (0);
                   5788:        }
                   5789: #endif
1.55      pk       5790:
1.60      pk       5791:        return (ptoa((tpte & SRMMU_PPNMASK) >> SRMMU_PPNSHIFT) | VA_OFF(va));
1.55      pk       5792: }
                   5793: #endif /* sun4m */
1.1       deraadt  5794:
                   5795: /*
                   5796:  * Copy the range specified by src_addr/len
                   5797:  * from the source map to the range dst_addr/len
                   5798:  * in the destination map.
                   5799:  *
                   5800:  * This routine is only advisory and need not do anything.
                   5801:  */
                   5802: /* ARGSUSED */
                   5803: void
                   5804: pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr)
                   5805:        struct pmap *dst_pmap, *src_pmap;
                   5806:        vm_offset_t dst_addr;
                   5807:        vm_size_t len;
                   5808:        vm_offset_t src_addr;
                   5809: {
1.55      pk       5810: #if 0
                   5811:        if (CPU_ISSUN4M) {
                   5812:                register int i, pte;
                   5813:                for (i = 0; i < len/NBPG; i++) {
                   5814:                        pte = getptesw4m(src_pmap, src_addr);
                   5815:                        pmap_enter(dst_pmap, dst_addr,
1.60      pk       5816:                                   ptoa((pte & SRMMU_PPNMASK) >>
                   5817:                                        SRMMU_PPNSHIFT) |
                   5818:                                    VA_OFF(src_addr),
                   5819:                                   (pte & PPROT_WRITE)
                   5820:                                        ? VM_PROT_WRITE| VM_PROT_READ
                   5821:                                        : VM_PROT_READ,
1.55      pk       5822:                                   0);
                   5823:                        src_addr += NBPG;
                   5824:                        dst_addr += NBPG;
                   5825:                }
                   5826:        }
                   5827: #endif
1.1       deraadt  5828: }
                   5829:
                   5830: /*
                   5831:  * Require that all active physical maps contain no
                   5832:  * incorrect entries NOW.  [This update includes
                   5833:  * forcing updates of any address map caching.]
                   5834:  */
                   5835: void
                   5836: pmap_update()
                   5837: {
1.55      pk       5838: #if defined(SUN4M)
                   5839:        if (CPU_ISSUN4M)
                   5840:                tlb_flush_all();        /* %%%: Extreme Paranoia?  */
                   5841: #endif
1.1       deraadt  5842: }
                   5843:
                   5844: /*
                   5845:  * Garbage collects the physical map system for
                   5846:  * pages which are no longer used.
                   5847:  * Success need not be guaranteed -- that is, there
                   5848:  * may well be pages which are not referenced, but
                   5849:  * others may be collected.
                   5850:  * Called by the pageout daemon when pages are scarce.
                   5851:  */
                   5852: /* ARGSUSED */
                   5853: void
                   5854: pmap_collect(pm)
                   5855:        struct pmap *pm;
                   5856: {
                   5857: }
                   5858:
1.55      pk       5859: #if defined(SUN4) || defined(SUN4C)
                   5860:
1.1       deraadt  5861: /*
                   5862:  * Clear the modify bit for the given physical page.
                   5863:  */
                   5864: void
1.55      pk       5865: pmap_clear_modify4_4c(pa)
1.1       deraadt  5866:        register vm_offset_t pa;
                   5867: {
                   5868:        register struct pvlist *pv;
                   5869:
1.34      pk       5870:        if ((pa & (PMAP_TNC & ~PMAP_NC)) == 0 && managed(pa)) {
1.1       deraadt  5871:                pv = pvhead(pa);
1.58      pk       5872:                (void) pv_syncflags4_4c(pv);
1.1       deraadt  5873:                pv->pv_flags &= ~PV_MOD;
                   5874:        }
                   5875: }
                   5876:
                   5877: /*
                   5878:  * Tell whether the given physical page has been modified.
                   5879:  */
                   5880: int
1.55      pk       5881: pmap_is_modified4_4c(pa)
1.1       deraadt  5882:        register vm_offset_t pa;
                   5883: {
                   5884:        register struct pvlist *pv;
                   5885:
1.34      pk       5886:        if ((pa & (PMAP_TNC & ~PMAP_NC)) == 0 && managed(pa)) {
1.1       deraadt  5887:                pv = pvhead(pa);
1.58      pk       5888:                if (pv->pv_flags & PV_MOD || pv_syncflags4_4c(pv) & PV_MOD)
1.1       deraadt  5889:                        return (1);
                   5890:        }
                   5891:        return (0);
                   5892: }
                   5893:
                   5894: /*
                   5895:  * Clear the reference bit for the given physical page.
                   5896:  */
                   5897: void
1.55      pk       5898: pmap_clear_reference4_4c(pa)
1.1       deraadt  5899:        vm_offset_t pa;
                   5900: {
                   5901:        register struct pvlist *pv;
                   5902:
1.34      pk       5903:        if ((pa & (PMAP_TNC & ~PMAP_NC)) == 0 && managed(pa)) {
1.1       deraadt  5904:                pv = pvhead(pa);
1.58      pk       5905:                (void) pv_syncflags4_4c(pv);
1.1       deraadt  5906:                pv->pv_flags &= ~PV_REF;
                   5907:        }
                   5908: }
                   5909:
                   5910: /*
                   5911:  * Tell whether the given physical page has been referenced.
                   5912:  */
                   5913: int
1.55      pk       5914: pmap_is_referenced4_4c(pa)
1.1       deraadt  5915:        vm_offset_t pa;
                   5916: {
                   5917:        register struct pvlist *pv;
                   5918:
1.34      pk       5919:        if ((pa & (PMAP_TNC & ~PMAP_NC)) == 0 && managed(pa)) {
1.1       deraadt  5920:                pv = pvhead(pa);
1.58      pk       5921:                if (pv->pv_flags & PV_REF || pv_syncflags4_4c(pv) & PV_REF)
1.1       deraadt  5922:                        return (1);
                   5923:        }
                   5924:        return (0);
                   5925: }
1.55      pk       5926: #endif /*4,4c*/
                   5927:
1.58      pk       5928: #if defined(SUN4M)
                   5929:
                   5930: /*
                   5931:  * 4m versions of bit test/set routines
                   5932:  *
                   5933:  * Note that the 4m-specific routines should eventually service these
                   5934:  * requests from their page tables, and the whole pvlist bit mess should
                   5935:  * be dropped for the 4m (unless this causes a performance hit from
                   5936:  * tracing down pagetables/regmap/segmaps).
                   5937:  */
                   5938:
1.55      pk       5939: /*
                   5940:  * Clear the modify bit for the given physical page.
                   5941:  */
                   5942: void
                   5943: pmap_clear_modify4m(pa)           /* XXX %%%: Should service from swpagetbl for 4m */
                   5944:        register vm_offset_t pa;
                   5945: {
                   5946:        register struct pvlist *pv;
                   5947:
                   5948:        if ((pa & (PMAP_TNC & ~PMAP_NC)) == 0 && managed(pa)) {
                   5949:                pv = pvhead(pa);
1.58      pk       5950:                (void) pv_syncflags4m(pv);
1.55      pk       5951:                pv->pv_flags &= ~PV_MOD4M;
                   5952:        }
                   5953: }
                   5954:
                   5955: /*
                   5956:  * Tell whether the given physical page has been modified.
                   5957:  */
                   5958: int
                   5959: pmap_is_modified4m(pa) /* Test performance with SUN4M && SUN4/4C. XXX */
                   5960:        register vm_offset_t pa;
                   5961: {
                   5962:        register struct pvlist *pv;
                   5963:
                   5964:        if ((pa & (PMAP_TNC & ~PMAP_NC)) == 0 && managed(pa)) {
                   5965:                pv = pvhead(pa);
                   5966:                if (pv->pv_flags & PV_MOD4M || pv_syncflags4m(pv) & PV_MOD4M)
                   5967:                        return(1);
                   5968:        }
                   5969:        return (0);
                   5970: }
                   5971:
                   5972: /*
                   5973:  * Clear the reference bit for the given physical page.
                   5974:  */
                   5975: void
                   5976: pmap_clear_reference4m(pa)
                   5977:        vm_offset_t pa;
                   5978: {
                   5979:        register struct pvlist *pv;
                   5980:
                   5981:        if ((pa & (PMAP_TNC & ~PMAP_NC)) == 0 && managed(pa)) {
                   5982:                pv = pvhead(pa);
1.58      pk       5983:                (void) pv_syncflags4m(pv);
1.55      pk       5984:                pv->pv_flags &= ~PV_REF4M;
                   5985:        }
                   5986: }
                   5987:
                   5988: /*
                   5989:  * Tell whether the given physical page has been referenced.
                   5990:  */
                   5991: int
                   5992: pmap_is_referenced4m(pa)
                   5993:        vm_offset_t pa;
                   5994: {
                   5995:        register struct pvlist *pv;
                   5996:
                   5997:        if ((pa & (PMAP_TNC & ~PMAP_NC)) == 0 && managed(pa)) {
                   5998:                pv = pvhead(pa);
                   5999:                if (pv->pv_flags & PV_REF4M || pv_syncflags4m(pv) & PV_REF4M)
                   6000:                        return(1);
                   6001:        }
                   6002:        return (0);
                   6003: }
                   6004: #endif /* 4m */
1.1       deraadt  6005:
                   6006: /*
                   6007:  * Make the specified pages (by pmap, offset) pageable (or not) as requested.
                   6008:  *
                   6009:  * A page which is not pageable may not take a fault; therefore, its page
                   6010:  * table entry must remain valid for the duration (or at least, the trap
                   6011:  * handler must not call vm_fault).
                   6012:  *
                   6013:  * This routine is merely advisory; pmap_enter will specify that these pages
                   6014:  * are to be wired down (or not) as appropriate.
                   6015:  */
                   6016: /* ARGSUSED */
                   6017: void
                   6018: pmap_pageable(pm, start, end, pageable)
                   6019:        struct pmap *pm;
                   6020:        vm_offset_t start, end;
                   6021:        int pageable;
                   6022: {
1.2       deraadt  6023: }
                   6024:
                   6025: /*
1.1       deraadt  6026:  * Fill the given MI physical page with zero bytes.
                   6027:  *
                   6028:  * We avoid stomping on the cache.
                   6029:  * XXX might be faster to use destination's context and allow cache to fill?
                   6030:  */
1.55      pk       6031:
                   6032: #if defined(SUN4) || defined(SUN4C)
                   6033:
1.1       deraadt  6034: void
1.55      pk       6035: pmap_zero_page4_4c(pa)
1.1       deraadt  6036:        register vm_offset_t pa;
                   6037: {
                   6038:        register caddr_t va;
                   6039:        register int pte;
                   6040:
1.34      pk       6041:        if (((pa & (PMAP_TNC & ~PMAP_NC)) == 0) && managed(pa)) {
1.1       deraadt  6042:                /*
                   6043:                 * The following might not be necessary since the page
                   6044:                 * is being cleared because it is about to be allocated,
                   6045:                 * i.e., is in use by no one.
                   6046:                 */
1.69      pk       6047:                pv_flushcache(pvhead(pa));
1.60      pk       6048:        }
                   6049:        pte = PG_V | PG_S | PG_W | PG_NC | (atop(pa) & PG_PFNUM);
1.1       deraadt  6050:
                   6051:        va = vpage[0];
1.55      pk       6052:        setpte4(va, pte);
1.1       deraadt  6053:        qzero(va, NBPG);
1.55      pk       6054:        setpte4(va, 0);
1.1       deraadt  6055: }
                   6056:
                   6057: /*
                   6058:  * Copy the given MI physical source page to its destination.
                   6059:  *
                   6060:  * We avoid stomping on the cache as above (with same `XXX' note).
                   6061:  * We must first flush any write-back cache for the source page.
                   6062:  * We go ahead and stomp on the kernel's virtual cache for the
                   6063:  * source page, since the cache can read memory MUCH faster than
                   6064:  * the processor.
                   6065:  */
                   6066: void
1.55      pk       6067: pmap_copy_page4_4c(src, dst)
1.1       deraadt  6068:        vm_offset_t src, dst;
                   6069: {
                   6070:        register caddr_t sva, dva;
                   6071:        register int spte, dpte;
                   6072:
                   6073:        if (managed(src)) {
1.69      pk       6074:                if (CACHEINFO.c_vactype == VAC_WRITEBACK)
1.1       deraadt  6075:                        pv_flushcache(pvhead(src));
1.60      pk       6076:        }
                   6077:        spte = PG_V | PG_S | (atop(src) & PG_PFNUM);
1.1       deraadt  6078:
                   6079:        if (managed(dst)) {
                   6080:                /* similar `might not be necessary' comment applies */
1.69      pk       6081:                if (CACHEINFO.c_vactype != VAC_NONE)
1.1       deraadt  6082:                        pv_flushcache(pvhead(dst));
1.60      pk       6083:        }
                   6084:        dpte = PG_V | PG_S | PG_W | PG_NC | (atop(dst) & PG_PFNUM);
1.1       deraadt  6085:
                   6086:        sva = vpage[0];
                   6087:        dva = vpage[1];
1.55      pk       6088:        setpte4(sva, spte);
                   6089:        setpte4(dva, dpte);
1.1       deraadt  6090:        qcopy(sva, dva, NBPG);  /* loads cache, so we must ... */
1.69      pk       6091:        cache_flush_page((int)sva);
1.55      pk       6092:        setpte4(sva, 0);
                   6093:        setpte4(dva, 0);
                   6094: }
                   6095: #endif /* 4, 4c */
                   6096:
                   6097: #if defined(SUN4M)             /* Sun4M version of copy/zero routines */
                   6098: /*
                   6099:  * Fill the given MI physical page with zero bytes.
                   6100:  *
                   6101:  * We avoid stomping on the cache.
                   6102:  * XXX might be faster to use destination's context and allow cache to fill?
                   6103:  */
                   6104: void
                   6105: pmap_zero_page4m(pa)
                   6106:        register vm_offset_t pa;
                   6107: {
                   6108:        register caddr_t va;
                   6109:        register int pte;
                   6110:
                   6111:        if (((pa & (PMAP_TNC & ~PMAP_NC)) == 0) && managed(pa)) {
                   6112:                /*
                   6113:                 * The following might not be necessary since the page
                   6114:                 * is being cleared because it is about to be allocated,
                   6115:                 * i.e., is in use by no one.
                   6116:                 */
1.69      pk       6117:                if (CACHEINFO.c_vactype != VAC_NONE)
1.55      pk       6118:                        pv_flushcache(pvhead(pa));
1.60      pk       6119:        }
1.68      abrown   6120:        pte = (SRMMU_TEPTE | PPROT_S | PPROT_WRITE |
                   6121:               (atop(pa) << SRMMU_PPNSHIFT));
1.69      pk       6122:        if (cpuinfo.flags & CPUFLG_CACHE_MANDATORY)
1.68      abrown   6123:                pte |= SRMMU_PG_C;
                   6124:        else
                   6125:                pte &= ~SRMMU_PG_C;
                   6126:
1.55      pk       6127:        va = vpage[0];
                   6128:        setpte4m((vm_offset_t) va, pte);
                   6129:        qzero(va, NBPG);
                   6130:        setpte4m((vm_offset_t) va, SRMMU_TEINVALID);
                   6131: }
                   6132:
                   6133: /*
                   6134:  * Copy the given MI physical source page to its destination.
                   6135:  *
                   6136:  * We avoid stomping on the cache as above (with same `XXX' note).
                   6137:  * We must first flush any write-back cache for the source page.
                   6138:  * We go ahead and stomp on the kernel's virtual cache for the
                   6139:  * source page, since the cache can read memory MUCH faster than
                   6140:  * the processor.
                   6141:  */
                   6142: void
                   6143: pmap_copy_page4m(src, dst)
                   6144:        vm_offset_t src, dst;
                   6145: {
                   6146:        register caddr_t sva, dva;
                   6147:        register int spte, dpte;
                   6148:
                   6149:        if (managed(src)) {
1.69      pk       6150:                if (CACHEINFO.c_vactype == VAC_WRITEBACK)
1.55      pk       6151:                        pv_flushcache(pvhead(src));
1.60      pk       6152:        }
                   6153:        spte = SRMMU_TEPTE | SRMMU_PG_C | PPROT_S |
                   6154:                (atop(src) << SRMMU_PPNSHIFT);
1.55      pk       6155:
                   6156:        if (managed(dst)) {
                   6157:                /* similar `might not be necessary' comment applies */
1.69      pk       6158:                if (CACHEINFO.c_vactype != VAC_NONE)
1.55      pk       6159:                        pv_flushcache(pvhead(dst));
1.60      pk       6160:        }
1.68      abrown   6161:        dpte = (SRMMU_TEPTE | PPROT_S | PPROT_WRITE |
                   6162:               (atop(dst) << SRMMU_PPNSHIFT));
1.69      pk       6163:        if (cpuinfo.flags & CPUFLG_CACHE_MANDATORY)
1.68      abrown   6164:                dpte |= SRMMU_PG_C;
                   6165:        else
                   6166:                dpte &= ~SRMMU_PG_C;
1.60      pk       6167:
1.55      pk       6168:        sva = vpage[0];
                   6169:        dva = vpage[1];
                   6170:        setpte4m((vm_offset_t) sva, spte);
                   6171:        setpte4m((vm_offset_t) dva, dpte);
                   6172:        qcopy(sva, dva, NBPG);  /* loads cache, so we must ... */
1.69      pk       6173:        cache_flush_page((int)sva);
1.55      pk       6174:        setpte4m((vm_offset_t) sva, SRMMU_TEINVALID);
                   6175:        setpte4m((vm_offset_t) dva, SRMMU_TEINVALID);
1.1       deraadt  6176: }
1.55      pk       6177: #endif /* Sun4M */
1.1       deraadt  6178:
                   6179: /*
                   6180:  * Turn a cdevsw d_mmap value into a byte address for pmap_enter.
                   6181:  * XXX this should almost certainly be done differently, and
                   6182:  *     elsewhere, or even not at all
                   6183:  */
                   6184: vm_offset_t
                   6185: pmap_phys_address(x)
                   6186:        int x;
                   6187: {
                   6188:
                   6189:        return (x);
                   6190: }
                   6191:
                   6192: /*
                   6193:  * Turn off cache for a given (va, number of pages).
                   6194:  *
                   6195:  * We just assert PG_NC for each PTE; the addresses must reside
                   6196:  * in locked kernel space.  A cache flush is also done.
                   6197:  */
1.53      christos 6198: void
1.1       deraadt  6199: kvm_uncache(va, npages)
                   6200:        register caddr_t va;
                   6201:        register int npages;
                   6202: {
                   6203:        register int pte;
1.55      pk       6204:        if (CPU_ISSUN4M) {
                   6205: #if defined(SUN4M)
                   6206:                for (; --npages >= 0; va += NBPG) {
                   6207:                        pte = getpte4m((vm_offset_t) va);
                   6208:                        if ((pte & SRMMU_TETYPE) != SRMMU_TEPTE)
                   6209:                                panic("kvm_uncache: table entry not pte");
                   6210:                        pte &= ~SRMMU_PG_C;
                   6211:                        setpte4m((vm_offset_t) va, pte);
1.69      pk       6212:                        if ((pte & PG_TYPE) == PG_OBMEM)
1.55      pk       6213:                                cache_flush_page((int)va);
                   6214:                }
                   6215: #endif
                   6216:        } else {
                   6217: #if defined(SUN4) || defined(SUN4C)
                   6218:                for (; --npages >= 0; va += NBPG) {
                   6219:                        pte = getpte4(va);
                   6220:                        if ((pte & PG_V) == 0)
                   6221:                                panic("kvm_uncache !pg_v");
                   6222:                        pte |= PG_NC;
                   6223:                        setpte4(va, pte);
1.69      pk       6224:                        if ((pte & PG_TYPE) == PG_OBMEM)
1.55      pk       6225:                                cache_flush_page((int)va);
                   6226:                }
                   6227: #endif
1.1       deraadt  6228:        }
1.21      deraadt  6229: }
                   6230:
1.46      pk       6231: /*
                   6232:  * Turn on IO cache for a given (va, number of pages).
                   6233:  *
                   6234:  * We just assert PG_NC for each PTE; the addresses must reside
                   6235:  * in locked kernel space.  A cache flush is also done.
                   6236:  */
1.53      christos 6237: void
1.46      pk       6238: kvm_iocache(va, npages)
                   6239:        register caddr_t va;
                   6240:        register int npages;
                   6241: {
                   6242:
1.55      pk       6243: #ifdef SUN4M
                   6244:        if (CPU_ISSUN4M) /* %%%: Implement! */
                   6245:                panic("kvm_iocache: 4m iocache not implemented");
                   6246: #endif
                   6247: #if defined(SUN4) || defined(SUN4C)
1.46      pk       6248:        for (; --npages >= 0; va += NBPG) {
1.55      pk       6249:                register int pte = getpte4(va);
1.46      pk       6250:                if ((pte & PG_V) == 0)
                   6251:                        panic("kvm_iocache !pg_v");
                   6252:                pte |= PG_IOC;
1.55      pk       6253:                setpte4(va, pte);
1.46      pk       6254:        }
1.55      pk       6255: #endif
1.46      pk       6256: }
                   6257:
1.21      deraadt  6258: int
                   6259: pmap_count_ptes(pm)
                   6260:        register struct pmap *pm;
                   6261: {
                   6262:        register int idx, total;
1.43      pk       6263:        register struct regmap *rp;
                   6264:        register struct segmap *sp;
1.21      deraadt  6265:
1.43      pk       6266:        if (pm == pmap_kernel()) {
                   6267:                rp = &pm->pm_regmap[NUREG];
                   6268:                idx = NKREG;
                   6269:        } else {
                   6270:                rp = pm->pm_regmap;
                   6271:                idx = NUREG;
                   6272:        }
1.21      deraadt  6273:        for (total = 0; idx;)
1.43      pk       6274:                if ((sp = rp[--idx].rg_segmap) != NULL)
                   6275:                        total += sp->sg_npte;
1.21      deraadt  6276:        pm->pm_stats.resident_count = total;
                   6277:        return (total);
1.24      pk       6278: }
                   6279:
                   6280: /*
1.51      gwr      6281:  * Find first virtual address >= *va that is
                   6282:  * least likely to cause cache aliases.
                   6283:  * (This will just seg-align mappings.)
1.24      pk       6284:  */
1.51      gwr      6285: void
1.52      pk       6286: pmap_prefer(foff, vap)
1.51      gwr      6287:        register vm_offset_t foff;
1.52      pk       6288:        register vm_offset_t *vap;
1.24      pk       6289: {
1.52      pk       6290:        register vm_offset_t va = *vap;
                   6291:        register long d, m;
                   6292:
                   6293:        if (VA_INHOLE(va))
                   6294:                va = MMU_HOLE_END;
1.24      pk       6295:
1.48      pk       6296:        m = CACHE_ALIAS_DIST;
                   6297:        if (m == 0)             /* m=0 => no cache aliasing */
1.51      gwr      6298:                return;
1.24      pk       6299:
1.52      pk       6300:        d = foff - va;
                   6301:        d &= (m - 1);
                   6302:        *vap = va + d;
1.23      deraadt  6303: }
                   6304:
1.53      christos 6305: void
1.23      deraadt  6306: pmap_redzone()
                   6307: {
1.55      pk       6308: #if defined(SUN4M)
                   6309:        if (CPU_ISSUN4M) {
                   6310:                setpte4m(KERNBASE, 0);
                   6311:                return;
                   6312:        }
                   6313: #endif
                   6314: #if defined(SUN4) || defined(SUN4C)
                   6315:        if (CPU_ISSUN4OR4C) {
                   6316:                setpte4(KERNBASE, 0);
                   6317:                return;
                   6318:        }
                   6319: #endif
1.1       deraadt  6320: }
1.43      pk       6321:
                   6322: #ifdef DEBUG
                   6323: /*
                   6324:  * Check consistency of a pmap (time consuming!).
                   6325:  */
1.53      christos 6326: void
1.43      pk       6327: pm_check(s, pm)
                   6328:        char *s;
                   6329:        struct pmap *pm;
                   6330: {
                   6331:        if (pm == pmap_kernel())
                   6332:                pm_check_k(s, pm);
                   6333:        else
                   6334:                pm_check_u(s, pm);
                   6335: }
                   6336:
1.53      christos 6337: void
1.43      pk       6338: pm_check_u(s, pm)
                   6339:        char *s;
                   6340:        struct pmap *pm;
                   6341: {
                   6342:        struct regmap *rp;
                   6343:        struct segmap *sp;
                   6344:        int n, vs, vr, j, m, *pte;
                   6345:
1.55      pk       6346:        if (pm->pm_regmap == NULL)
1.72      pk       6347:                panic("%s: CHK(pmap %p): no region mapping", s, pm);
1.55      pk       6348:
                   6349: #if defined(SUN4M)
                   6350:        if (CPU_ISSUN4M &&
                   6351:            (pm->pm_reg_ptps == NULL ||
                   6352:             pm->pm_reg_ptps_pa != VA2PA((caddr_t)pm->pm_reg_ptps)))
1.72      pk       6353:                panic("%s: CHK(pmap %p): no SRMMU region table or bad pa: "
                   6354:                      "tblva=%p, tblpa=0x%x",
                   6355:                        s, pm, pm->pm_reg_ptps, pm->pm_reg_ptps_pa);
1.55      pk       6356:
                   6357:        if (CPU_ISSUN4M && pm->pm_ctx != NULL &&
1.69      pk       6358:            (cpuinfo.ctx_tbl[pm->pm_ctxnum] != ((VA2PA((caddr_t)pm->pm_reg_ptps)
1.55      pk       6359:                                              >> SRMMU_PPNPASHIFT) |
                   6360:                                             SRMMU_TEPTD)))
                   6361:            panic("%s: CHK(pmap %p): SRMMU region table at %x not installed "
                   6362:                  "for context %d", s, pm, pm->pm_reg_ptps_pa, pm->pm_ctxnum);
                   6363: #endif
                   6364:
1.43      pk       6365:        for (vr = 0; vr < NUREG; vr++) {
                   6366:                rp = &pm->pm_regmap[vr];
                   6367:                if (rp->rg_nsegmap == 0)
                   6368:                        continue;
                   6369:                if (rp->rg_segmap == NULL)
                   6370:                        panic("%s: CHK(vr %d): nsegmap = %d; sp==NULL",
                   6371:                                s, vr, rp->rg_nsegmap);
1.55      pk       6372: #if defined(SUN4M)
                   6373:                if (CPU_ISSUN4M && rp->rg_seg_ptps == NULL)
                   6374:                    panic("%s: CHK(vr %d): nsegmap=%d; no SRMMU segment table",
                   6375:                          s, vr, rp->rg_nsegmap);
                   6376:                if (CPU_ISSUN4M &&
                   6377:                    pm->pm_reg_ptps[vr] != ((VA2PA((caddr_t)rp->rg_seg_ptps) >>
                   6378:                                            SRMMU_PPNPASHIFT) | SRMMU_TEPTD))
                   6379:                    panic("%s: CHK(vr %d): SRMMU segtbl not installed",s,vr);
                   6380: #endif
1.43      pk       6381:                if ((unsigned int)rp < KERNBASE)
1.54      christos 6382:                        panic("%s: rp=%p", s, rp);
1.43      pk       6383:                n = 0;
                   6384:                for (vs = 0; vs < NSEGRG; vs++) {
                   6385:                        sp = &rp->rg_segmap[vs];
                   6386:                        if ((unsigned int)sp < KERNBASE)
1.54      christos 6387:                                panic("%s: sp=%p", s, sp);
1.43      pk       6388:                        if (sp->sg_npte != 0) {
                   6389:                                n++;
                   6390:                                if (sp->sg_pte == NULL)
                   6391:                                        panic("%s: CHK(vr %d, vs %d): npte=%d, "
                   6392:                                           "pte=NULL", s, vr, vs, sp->sg_npte);
1.55      pk       6393: #if defined(SUN4M)
                   6394:                                if (CPU_ISSUN4M &&
                   6395:                                    rp->rg_seg_ptps[vs] !=
                   6396:                                     ((VA2PA((caddr_t)sp->sg_pte)
                   6397:                                        >> SRMMU_PPNPASHIFT) |
                   6398:                                       SRMMU_TEPTD))
                   6399:                                    panic("%s: CHK(vr %d, vs %d): SRMMU page "
                   6400:                                          "table not installed correctly",s,vr,
                   6401:                                          vs);
                   6402: #endif
1.43      pk       6403:                                pte=sp->sg_pte;
                   6404:                                m = 0;
                   6405:                                for (j=0; j<NPTESG; j++,pte++)
1.55      pk       6406:                                    if ((CPU_ISSUN4M
                   6407:                                         ?((*pte & SRMMU_TETYPE) == SRMMU_TEPTE)
                   6408:                                         :(*pte & PG_V)))
                   6409:                                        m++;
1.43      pk       6410:                                if (m != sp->sg_npte)
                   6411:                                    /*if (pmapdebug & 0x10000)*/
1.66      christos 6412:                                        printf("%s: user CHK(vr %d, vs %d): "
1.43      pk       6413:                                            "npte(%d) != # valid(%d)\n",
                   6414:                                                s, vr, vs, sp->sg_npte, m);
                   6415:                        }
                   6416:                }
                   6417:                if (n != rp->rg_nsegmap)
                   6418:                        panic("%s: CHK(vr %d): inconsistent "
                   6419:                                "# of pte's: %d, should be %d",
                   6420:                                s, vr, rp->rg_nsegmap, n);
                   6421:        }
1.53      christos 6422:        return;
1.43      pk       6423: }
                   6424:
1.53      christos 6425: void
1.55      pk       6426: pm_check_k(s, pm)              /* Note: not as extensive as pm_check_u. */
1.43      pk       6427:        char *s;
                   6428:        struct pmap *pm;
                   6429: {
                   6430:        struct regmap *rp;
                   6431:        int vr, vs, n;
                   6432:
1.55      pk       6433:        if (pm->pm_regmap == NULL)
                   6434:            panic("%s: CHK(pmap %p): no region mapping", s, pm);
                   6435:
                   6436: #if defined(SUN4M)
                   6437:        if (CPU_ISSUN4M &&
                   6438:            (pm->pm_reg_ptps == NULL ||
                   6439:             pm->pm_reg_ptps_pa != VA2PA((caddr_t)pm->pm_reg_ptps)))
                   6440:            panic("%s: CHK(pmap %p): no SRMMU region table or bad pa: tblva=%p, tblpa=%x",
                   6441:                  s, pm, pm->pm_reg_ptps, pm->pm_reg_ptps_pa);
                   6442:
                   6443:        if (CPU_ISSUN4M &&
1.69      pk       6444:            (cpuinfo.ctx_tbl[0] != ((VA2PA((caddr_t)pm->pm_reg_ptps) >>
1.55      pk       6445:                                             SRMMU_PPNPASHIFT) | SRMMU_TEPTD)))
                   6446:            panic("%s: CHK(pmap %p): SRMMU region table at %x not installed "
                   6447:                  "for context %d", s, pm, pm->pm_reg_ptps_pa, 0);
                   6448: #endif
1.43      pk       6449:        for (vr = NUREG; vr < NUREG+NKREG; vr++) {
                   6450:                rp = &pm->pm_regmap[vr];
                   6451:                if (rp->rg_segmap == NULL)
                   6452:                        panic("%s: CHK(vr %d): nsegmap = %d; sp==NULL",
                   6453:                                s, vr, rp->rg_nsegmap);
                   6454:                if (rp->rg_nsegmap == 0)
                   6455:                        continue;
1.55      pk       6456: #if defined(SUN4M)
                   6457:                if (CPU_ISSUN4M && rp->rg_seg_ptps == NULL)
                   6458:                    panic("%s: CHK(vr %d): nsegmap=%d; no SRMMU segment table",
                   6459:                          s, vr, rp->rg_nsegmap);
                   6460:                if (CPU_ISSUN4M &&
                   6461:                    pm->pm_reg_ptps[vr] != ((VA2PA((caddr_t)rp->rg_seg_ptps) >>
                   6462:                                            SRMMU_PPNPASHIFT) | SRMMU_TEPTD))
                   6463:                    panic("%s: CHK(vr %d): SRMMU segtbl not installed",s,vr);
                   6464: #endif
1.72      pk       6465:                if (CPU_ISSUN4M) {
                   6466:                        n = NSEGRG;
                   6467:                } else {
                   6468:                        for (n = 0, vs = 0; vs < NSEGRG; vs++) {
                   6469:                                if (rp->rg_segmap[vs].sg_npte)
                   6470:                                        n++;
                   6471:                        }
1.43      pk       6472:                }
                   6473:                if (n != rp->rg_nsegmap)
1.66      christos 6474:                        printf("%s: kernel CHK(vr %d): inconsistent "
1.43      pk       6475:                                "# of pte's: %d, should be %d\n",
                   6476:                                s, vr, rp->rg_nsegmap, n);
                   6477:        }
1.53      christos 6478:        return;
1.43      pk       6479: }
                   6480: #endif
1.46      pk       6481:
                   6482: /*
                   6483:  * Return the number bytes that pmap_dumpmmu() will dump.
                   6484:  * For each pmeg in the MMU, we'll write NPTESG PTEs.
                   6485:  * The last page or two contains stuff so libkvm can bootstrap.
                   6486:  */
                   6487: int
                   6488: pmap_dumpsize()
                   6489: {
1.67      pk       6490:        long    sz;
                   6491:
                   6492:        sz = ALIGN(sizeof(kcore_seg_t)) + ALIGN(sizeof(cpu_kcore_hdr_t));
                   6493:        sz += npmemarr * sizeof(phys_ram_seg_t);
1.55      pk       6494:
                   6495:        if (CPU_ISSUN4OR4C)
1.67      pk       6496:                sz += (seginval + 1) * NPTESG * sizeof(int);
                   6497:
                   6498:        return (btoc(sz));
1.46      pk       6499: }
                   6500:
                   6501: /*
                   6502:  * Write the mmu contents to the dump device.
                   6503:  * This gets appended to the end of a crash dump since
1.55      pk       6504:  * there is no in-core copy of kernel memory mappings on a 4/4c machine.
1.46      pk       6505:  */
                   6506: int
                   6507: pmap_dumpmmu(dump, blkno)
                   6508:        register daddr_t blkno;
                   6509:        register int (*dump)    __P((dev_t, daddr_t, caddr_t, size_t));
                   6510: {
1.67      pk       6511:        kcore_seg_t     *ksegp;
                   6512:        cpu_kcore_hdr_t *kcpup;
                   6513:        phys_ram_seg_t  memseg;
                   6514:        register int    error = 0;
                   6515:        register int    i, memsegoffset, pmegoffset;
                   6516:        int             buffer[dbtob(1) / sizeof(int)];
                   6517:        int             *bp, *ep;
1.55      pk       6518: #if defined(SUN4C) || defined(SUN4)
1.67      pk       6519:        register int    pmeg;
1.55      pk       6520: #endif
1.46      pk       6521:
1.67      pk       6522: #define EXPEDITE(p,n) do {                                             \
                   6523:        int *sp = (int *)(p);                                           \
                   6524:        int sz = (n);                                                   \
                   6525:        while (sz > 0) {                                                \
                   6526:                *bp++ = *sp++;                                          \
                   6527:                if (bp >= ep) {                                         \
                   6528:                        error = (*dump)(dumpdev, blkno,                 \
                   6529:                                        (caddr_t)buffer, dbtob(1));     \
                   6530:                        if (error != 0)                                 \
                   6531:                                return (error);                         \
                   6532:                        ++blkno;                                        \
                   6533:                        bp = buffer;                                    \
                   6534:                }                                                       \
                   6535:                sz -= 4;                                                \
                   6536:        }                                                               \
                   6537: } while (0)
                   6538:
                   6539:        setcontext(0);
                   6540:
                   6541:        /* Setup bookkeeping pointers */
                   6542:        bp = buffer;
                   6543:        ep = &buffer[sizeof(buffer) / sizeof(buffer[0])];
                   6544:
                   6545:        /* Fill in MI segment header */
                   6546:        ksegp = (kcore_seg_t *)bp;
                   6547:        CORE_SETMAGIC(*ksegp, KCORE_MAGIC, MID_MACHINE, CORE_CPU);
                   6548:        ksegp->c_size = ctob(pmap_dumpsize()) - ALIGN(sizeof(kcore_seg_t));
                   6549:
                   6550:        /* Fill in MD segment header (interpreted by MD part of libkvm) */
                   6551:        kcpup = (cpu_kcore_hdr_t *)((int)bp + ALIGN(sizeof(kcore_seg_t)));
                   6552:        kcpup->cputype = cputyp;
                   6553:        kcpup->nmemseg = npmemarr;
                   6554:        kcpup->memsegoffset = memsegoffset = ALIGN(sizeof(cpu_kcore_hdr_t));
                   6555:        kcpup->npmeg = (CPU_ISSUN4OR4C) ? seginval + 1 : 0;
                   6556:        kcpup->pmegoffset = pmegoffset =
                   6557:                memsegoffset + npmemarr * sizeof(phys_ram_seg_t);
                   6558:
                   6559:        /* Note: we have assumed everything fits in buffer[] so far... */
                   6560:        bp = (int *)&kcpup->segmap_store;
                   6561:        EXPEDITE(&kernel_segmap_store, sizeof(kernel_segmap_store));
                   6562:
                   6563:        /* Align storage for upcoming quad-aligned segment array */
                   6564:        while (bp != (int *)ALIGN(bp)) {
                   6565:                int dummy = 0;
                   6566:                EXPEDITE(&dummy, 4);
                   6567:        }
                   6568:        for (i = 0; i < npmemarr; i++) {
                   6569:                memseg.start = pmemarr[i].addr;
                   6570:                memseg.size = pmemarr[i].len;
                   6571:                EXPEDITE(&memseg, sizeof(phys_ram_seg_t));
                   6572:        }
                   6573:
                   6574:        if (CPU_ISSUN4M)
                   6575:                goto out;
1.55      pk       6576:
                   6577: #if defined(SUN4C) || defined(SUN4)
1.46      pk       6578:        /*
                   6579:         * dump page table entries
                   6580:         *
                   6581:         * We dump each pmeg in order (by segment number).  Since the MMU
                   6582:         * automatically maps the given virtual segment to a pmeg we must
                   6583:         * iterate over the segments by incrementing an unused segment slot
                   6584:         * in the MMU.  This fixed segment number is used in the virtual
                   6585:         * address argument to getpte().
                   6586:         */
1.55      pk       6587:
1.46      pk       6588:        /*
                   6589:         * Go through the pmegs and dump each one.
                   6590:         */
                   6591:        for (pmeg = 0; pmeg <= seginval; ++pmeg) {
                   6592:                register int va = 0;
                   6593:
                   6594:                setsegmap(va, pmeg);
                   6595:                i = NPTESG;
                   6596:                do {
1.67      pk       6597:                        int pte = getpte4(va);
                   6598:                        EXPEDITE(&pte, sizeof(pte));
1.46      pk       6599:                        va += NBPG;
                   6600:                } while (--i > 0);
                   6601:        }
                   6602:        setsegmap(0, seginval);
1.67      pk       6603: #endif
1.46      pk       6604:
1.67      pk       6605: out:
                   6606:        if (bp != buffer)
1.46      pk       6607:                error = (*dump)(dumpdev, blkno++, (caddr_t)buffer, dbtob(1));
                   6608:
                   6609:        return (error);
1.55      pk       6610: }
                   6611:
                   6612: #ifdef EXTREME_DEBUG
                   6613:
                   6614: static void test_region __P((int, int, int));
                   6615:
                   6616: void
                   6617: debug_pagetables()
                   6618: {
                   6619:        register int i;
                   6620:        register int *regtbl;
                   6621:        register int te;
                   6622:
1.66      christos 6623:        printf("\nncontext=%d. ",ncontext);
                   6624:        printf("Context table is at va 0x%x. Level 0 PTP: 0x%x\n",
1.69      pk       6625:               cpuinfo.ctx_tbl, cpuinfo.ctx_tbl[0]);
1.66      christos 6626:        printf("Context 0 region table is at va 0x%x, pa 0x%x. Contents:\n",
1.55      pk       6627:               pmap_kernel()->pm_reg_ptps, pmap_kernel()->pm_reg_ptps_pa);
                   6628:
                   6629:        regtbl = pmap_kernel()->pm_reg_ptps;
                   6630:
1.66      christos 6631:        printf("PROM vector is at 0x%x\n",promvec);
                   6632:        printf("PROM reboot routine is at 0x%x\n",promvec->pv_reboot);
                   6633:        printf("PROM abort routine is at 0x%x\n",promvec->pv_abort);
                   6634:        printf("PROM halt routine is at 0x%x\n",promvec->pv_halt);
1.55      pk       6635:
1.66      christos 6636:        printf("Testing region 0xfe: ");
1.55      pk       6637:        test_region(0xfe,0,16*1024*1024);
1.66      christos 6638:        printf("Testing region 0xff: ");
1.55      pk       6639:        test_region(0xff,0,16*1024*1024);
1.66      christos 6640:        printf("Testing kernel region 0xf8: ");
1.55      pk       6641:        test_region(0xf8, 4096, avail_start);
                   6642:        cngetc();
                   6643:
                   6644:        for (i = 0; i < SRMMU_L1SIZE; i++) {
                   6645:                te = regtbl[i];
                   6646:                if ((te & SRMMU_TETYPE) == SRMMU_TEINVALID)
                   6647:                    continue;
1.66      christos 6648:                printf("Region 0x%x: PTE=0x%x <%s> L2PA=0x%x kernL2VA=0x%x\n",
1.55      pk       6649:                       i, te, ((te & SRMMU_TETYPE) == SRMMU_TEPTE ? "pte" :
                   6650:                               ((te & SRMMU_TETYPE) == SRMMU_TEPTD ? "ptd" :
                   6651:                                ((te & SRMMU_TETYPE) == SRMMU_TEINVALID ?
                   6652:                                 "invalid" : "reserved"))),
                   6653:                       (te & ~0x3) << SRMMU_PPNPASHIFT,
                   6654:                       pmap_kernel()->pm_regmap[i].rg_seg_ptps);
                   6655:        }
1.66      christos 6656:        printf("Press q to halt...\n");
1.55      pk       6657:        if (cngetc()=='q')
                   6658:            callrom();
                   6659: }
                   6660:
                   6661: static u_int
                   6662: VA2PAsw(ctx, addr, pte)
                   6663:        register int ctx;
                   6664:        register caddr_t addr;
                   6665:        int *pte;
                   6666: {
                   6667:        register int *curtbl;
                   6668:        register int curpte;
                   6669:
                   6670: #ifdef EXTREME_EXTREME_DEBUG
1.66      christos 6671:        printf("Looking up addr 0x%x in context 0x%x\n",addr,ctx);
1.55      pk       6672: #endif
                   6673:        /* L0 */
1.69      pk       6674:        *pte = curpte = cpuinfo.ctx_tbl[ctx];
1.55      pk       6675: #ifdef EXTREME_EXTREME_DEBUG
1.66      christos 6676:        printf("Got L0 pte 0x%x\n",pte);
1.55      pk       6677: #endif
                   6678:        if ((curpte & SRMMU_TETYPE) == SRMMU_TEPTE) {
                   6679:                return (((curpte & SRMMU_PPNMASK) << SRMMU_PPNPASHIFT) |
                   6680:                        ((u_int)addr & 0xffffffff));
                   6681:        }
                   6682:        if ((curpte & SRMMU_TETYPE) != SRMMU_TEPTD) {
1.66      christos 6683:                printf("Bad context table entry 0x%x for context 0x%x\n",
1.55      pk       6684:                       curpte, ctx);
                   6685:                return 0;
                   6686:        }
                   6687:        /* L1 */
                   6688:        curtbl = ((curpte & ~0x3) << 4) | (0xf8 << RGSHIFT); /* correct for krn*/
                   6689:        *pte = curpte = curtbl[VA_VREG(addr)];
                   6690: #ifdef EXTREME_EXTREME_DEBUG
1.66      christos 6691:        printf("L1 table at 0x%x.\nGot L1 pte 0x%x\n",curtbl,curpte);
1.55      pk       6692: #endif
                   6693:        if ((curpte & SRMMU_TETYPE) == SRMMU_TEPTE)
                   6694:            return (((curpte & SRMMU_PPNMASK) << SRMMU_PPNPASHIFT) |
                   6695:                    ((u_int)addr & 0xffffff));
                   6696:        if ((curpte & SRMMU_TETYPE) != SRMMU_TEPTD) {
1.66      christos 6697:                printf("Bad region table entry 0x%x for region 0x%x\n",
1.55      pk       6698:                       curpte, VA_VREG(addr));
                   6699:                return 0;
                   6700:        }
                   6701:        /* L2 */
                   6702:        curtbl = ((curpte & ~0x3) << 4) | (0xf8 << RGSHIFT); /* correct for krn*/
                   6703:        *pte = curpte = curtbl[VA_VSEG(addr)];
                   6704: #ifdef EXTREME_EXTREME_DEBUG
1.66      christos 6705:        printf("L2 table at 0x%x.\nGot L2 pte 0x%x\n",curtbl,curpte);
1.55      pk       6706: #endif
                   6707:        if ((curpte & SRMMU_TETYPE) == SRMMU_TEPTE)
                   6708:            return (((curpte & SRMMU_PPNMASK) << SRMMU_PPNPASHIFT) |
                   6709:                    ((u_int)addr & 0x3ffff));
                   6710:        if ((curpte & SRMMU_TETYPE) != SRMMU_TEPTD) {
1.66      christos 6711:                printf("Bad segment table entry 0x%x for reg 0x%x, seg 0x%x\n",
1.55      pk       6712:                       curpte, VA_VREG(addr), VA_VSEG(addr));
                   6713:                return 0;
                   6714:        }
                   6715:        /* L3 */
                   6716:        curtbl = ((curpte & ~0x3) << 4) | (0xf8 << RGSHIFT); /* correct for krn*/
                   6717:        *pte = curpte = curtbl[VA_VPG(addr)];
                   6718: #ifdef EXTREME_EXTREME_DEBUG
1.66      christos 6719:        printf("L3 table at 0x%x.\nGot L3 pte 0x%x\n",curtbl,curpte);
1.55      pk       6720: #endif
                   6721:        if ((curpte & SRMMU_TETYPE) == SRMMU_TEPTE)
                   6722:            return (((curpte & SRMMU_PPNMASK) << SRMMU_PPNPASHIFT) |
                   6723:                    ((u_int)addr & 0xfff));
                   6724:        else {
1.66      christos 6725:                printf("Bad L3 pte 0x%x for reg 0x%x, seg 0x%x, pg 0x%x\n",
1.55      pk       6726:                       curpte, VA_VREG(addr), VA_VSEG(addr), VA_VPG(addr));
                   6727:                return 0;
                   6728:        }
1.66      christos 6729:        printf("Bizarreness with address 0x%x!\n",addr);
1.55      pk       6730: }
                   6731:
                   6732: void test_region(reg, start, stop)
                   6733:        register int reg;
                   6734:        register int start, stop;
                   6735: {
                   6736:        register int i;
                   6737:        register int addr;
                   6738:        register int pte;
                   6739:        int ptesw;
                   6740: /*     int cnt=0;
                   6741: */
                   6742:
                   6743:        for (i = start; i < stop; i+= NBPG) {
                   6744:                addr = (reg << RGSHIFT) | i;
                   6745:                pte=lda(((u_int)(addr)) | ASI_SRMMUFP_LN, ASI_SRMMUFP);
                   6746:                if (pte) {
1.66      christos 6747: /*                     printf("Valid address 0x%x\n",addr);
1.55      pk       6748:                        if (++cnt == 20) {
                   6749:                                cngetc();
                   6750:                                cnt=0;
                   6751:                        }
                   6752: */
                   6753:                        if (VA2PA(addr) != VA2PAsw(0,addr,&ptesw)) {
1.66      christos 6754:                                printf("Mismatch at address 0x%x.\n",addr);
1.55      pk       6755:                                if (cngetc()=='q') break;
                   6756:                        }
                   6757:                        if (reg == 0xf8) /* kernel permissions are different */
                   6758:                            continue;
                   6759:                        if ((pte&SRMMU_PROT_MASK)!=(ptesw&SRMMU_PROT_MASK)) {
1.66      christos 6760:                                printf("Mismatched protections at address "
1.55      pk       6761:                                       "0x%x; pte=0x%x, ptesw=0x%x\n",
                   6762:                                       addr,pte,ptesw);
                   6763:                                if (cngetc()=='q') break;
                   6764:                        }
                   6765:                }
                   6766:        }
1.66      christos 6767:        printf("done.\n");
1.46      pk       6768: }
1.55      pk       6769:
                   6770:
                   6771: void print_fe_map(void)
                   6772: {
                   6773:        u_int i, pte;
                   6774:
1.66      christos 6775:        printf("map of region 0xfe:\n");
1.55      pk       6776:        for (i = 0xfe000000; i < 0xff000000; i+=4096) {
                   6777:                if (((pte = getpte4m(i)) & SRMMU_TETYPE) != SRMMU_TEPTE)
                   6778:                    continue;
1.66      christos 6779:                printf("0x%x -> 0x%x%x (pte %x)\n", i, pte >> 28,
1.55      pk       6780:                       (pte & ~0xff) << 4, pte);
                   6781:        }
1.66      christos 6782:        printf("done\n");
1.55      pk       6783: }
                   6784:
                   6785: #endif

CVSweb <webmaster@jp.NetBSD.org>