[BACK]Return to pmap.c CVS log [TXT][DIR] Up to [cvs.NetBSD.org] / src / sys / arch / sparc / sparc

Annotation of src/sys/arch/sparc/sparc/pmap.c, Revision 1.348.6.3

1.348.6.3! tls         1: /*     $NetBSD$ */
1.22      deraadt     2:
1.1       deraadt     3: /*
1.55      pk          4:  * Copyright (c) 1996
1.57      abrown      5:  *     The President and Fellows of Harvard College. All rights reserved.
1.1       deraadt     6:  * Copyright (c) 1992, 1993
                      7:  *     The Regents of the University of California.  All rights reserved.
                      8:  *
                      9:  * This software was developed by the Computer Systems Engineering group
                     10:  * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
                     11:  * contributed to Berkeley.
                     12:  *
                     13:  * All advertising materials mentioning features or use of this software
                     14:  * must display the following acknowledgement:
1.55      pk         15:  *     This product includes software developed by Harvard University.
1.1       deraadt    16:  *     This product includes software developed by the University of
                     17:  *     California, Lawrence Berkeley Laboratory.
                     18:  *
                     19:  * Redistribution and use in source and binary forms, with or without
                     20:  * modification, are permitted provided that the following conditions
                     21:  * are met:
1.55      pk         22:  *
1.1       deraadt    23:  * 1. Redistributions of source code must retain the above copyright
                     24:  *    notice, this list of conditions and the following disclaimer.
                     25:  * 2. Redistributions in binary form must reproduce the above copyright
                     26:  *    notice, this list of conditions and the following disclaimer in the
                     27:  *    documentation and/or other materials provided with the distribution.
                     28:  * 3. All advertising materials mentioning features or use of this software
                     29:  *    must display the following acknowledgement:
1.55      pk         30:  *     This product includes software developed by Aaron Brown and
                     31:  *     Harvard University.
                     32:  *      This product includes software developed by the University of
                     33:  *      California, Berkeley and its contributors.
1.1       deraadt    34:  * 4. Neither the name of the University nor the names of its contributors
                     35:  *    may be used to endorse or promote products derived from this software
                     36:  *    without specific prior written permission.
                     37:  *
                     38:  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
                     39:  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
                     40:  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
                     41:  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
                     42:  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
                     43:  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
                     44:  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
                     45:  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
                     46:  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
                     47:  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
                     48:  * SUCH DAMAGE.
                     49:  *
1.22      deraadt    50:  *     @(#)pmap.c      8.4 (Berkeley) 2/5/94
1.55      pk         51:  *
1.1       deraadt    52:  */
                     53:
                     54: /*
                     55:  * SPARC physical map management code.
                     56:  */
1.263     lukem      57:
                     58: #include <sys/cdefs.h>
1.348.6.3! tls        59: __KERNEL_RCSID(0, "$NetBSD$");
1.112     mrg        60:
1.119     jonathan   61: #include "opt_ddb.h"
1.192     pk         62: #include "opt_kgdb.h"
1.200     darrenr    63: #include "opt_sparc_arch.h"
1.1       deraadt    64:
                     65: #include <sys/param.h>
                     66: #include <sys/systm.h>
                     67: #include <sys/device.h>
                     68: #include <sys/proc.h>
1.43      pk         69: #include <sys/queue.h>
1.121     pk         70: #include <sys/pool.h>
1.67      pk         71: #include <sys/exec.h>
                     72: #include <sys/core.h>
                     73: #include <sys/kcore.h>
1.195     mrg        74: #include <sys/kernel.h>
1.322     ad         75: #include <sys/atomic.h>
1.1       deraadt    76:
1.332     matt       77: #include <sys/exec_aout.h>             /* for MID_* */
                     78:
1.110     mrg        79: #include <uvm/uvm.h>
                     80:
1.1       deraadt    81: #include <machine/autoconf.h>
                     82: #include <machine/bsd_openprom.h>
1.19      deraadt    83: #include <machine/oldmon.h>
1.1       deraadt    84: #include <machine/cpu.h>
                     85: #include <machine/ctlreg.h>
1.67      pk         86: #include <machine/kcore.h>
1.1       deraadt    87:
                     88: #include <sparc/sparc/asm.h>
                     89: #include <sparc/sparc/cache.h>
1.3       deraadt    90: #include <sparc/sparc/vaddrs.h>
1.69      pk         91: #include <sparc/sparc/cpuvar.h>
1.1       deraadt    92:
                     93: /*
                     94:  * The SPARCstation offers us the following challenges:
                     95:  *
                     96:  *   1. A virtual address cache.  This is, strictly speaking, not
                     97:  *     part of the architecture, but the code below assumes one.
                     98:  *     This is a write-through cache on the 4c and a write-back cache
                     99:  *     on others.
                    100:  *
1.55      pk        101:  *   2. (4/4c only) An MMU that acts like a cache.  There is not enough
                    102:  *     space in the MMU to map everything all the time.  Instead, we need
1.1       deraadt   103:  *     to load MMU with the `working set' of translations for each
1.55      pk        104:  *     process. The sun4m does not act like a cache; tables are maintained
                    105:  *     in physical memory.
1.1       deraadt   106:  *
                    107:  *   3.        Segmented virtual and physical spaces.  The upper 12 bits of
                    108:  *     a virtual address (the virtual segment) index a segment table,
                    109:  *     giving a physical segment.  The physical segment selects a
                    110:  *     `Page Map Entry Group' (PMEG) and the virtual page number---the
                    111:  *     next 5 or 6 bits of the virtual address---select the particular
                    112:  *     `Page Map Entry' for the page.  We call the latter a PTE and
                    113:  *     call each Page Map Entry Group a pmeg (for want of a better name).
1.55      pk        114:  *     Note that the sun4m has an unsegmented 36-bit physical space.
1.1       deraadt   115:  *
                    116:  *     Since there are no valid bits in the segment table, the only way
                    117:  *     to have an invalid segment is to make one full pmeg of invalid PTEs.
1.55      pk        118:  *     We use the last one (since the ROM does as well) (sun4/4c only)
1.1       deraadt   119:  *
                    120:  *   4. Discontiguous physical pages.  The Mach VM expects physical pages
                    121:  *     to be in one sequential lump.
                    122:  *
                    123:  *   5. The MMU is always on: it is not possible to disable it.  This is
                    124:  *     mainly a startup hassle.
                    125:  */
                    126:
                    127: struct pmap_stats {
                    128:        int     ps_unlink_pvfirst;      /* # of pv_unlinks on head */
                    129:        int     ps_unlink_pvsearch;     /* # of pv_unlink searches */
                    130:        int     ps_changeprots;         /* # of calls to changeprot */
                    131:        int     ps_enter_firstpv;       /* pv heads entered */
                    132:        int     ps_enter_secondpv;      /* pv nonheads entered */
                    133:        int     ps_useless_changewire;  /* useless wiring changes */
                    134:        int     ps_npg_prot_all;        /* # of active pages protected */
                    135:        int     ps_npg_prot_actual;     /* # pages actually affected */
1.70      pk        136:        int     ps_npmeg_free;          /* # of free pmegs */
                    137:        int     ps_npmeg_locked;        /* # of pmegs on locked list */
                    138:        int     ps_npmeg_lru;           /* # of pmegs on lru list */
1.1       deraadt   139: } pmap_stats;
                    140:
1.294     pk        141: #if defined(SUN4) || defined(SUN4C)
                    142: struct evcnt mmu_stolenpmegs_evcnt =
                    143:        EVCNT_INITIALIZER(EVCNT_TYPE_INTR,0,"mmu","stln pmgs");
                    144: EVCNT_ATTACH_STATIC(mmu_stolenpmegs_evcnt);
                    145:
                    146: struct evcnt mmu_pagein_evcnt =
                    147:        EVCNT_INITIALIZER(EVCNT_TYPE_INTR,0,"mmu","pagein");
                    148: EVCNT_ATTACH_STATIC(mmu_pagein_evcnt);
                    149: #endif /* SUN4 || SUN4C */
                    150:
1.1       deraadt   151: #ifdef DEBUG
                    152: #define        PDB_CREATE      0x0001
                    153: #define        PDB_DESTROY     0x0002
                    154: #define        PDB_REMOVE      0x0004
                    155: #define        PDB_CHANGEPROT  0x0008
                    156: #define        PDB_ENTER       0x0010
1.90      pk        157: #define        PDB_FOLLOW      0x0020
1.1       deraadt   158:
                    159: #define        PDB_MMU_ALLOC   0x0100
                    160: #define        PDB_MMU_STEAL   0x0200
                    161: #define        PDB_CTX_ALLOC   0x0400
                    162: #define        PDB_CTX_STEAL   0x0800
1.43      pk        163: #define        PDB_MMUREG_ALLOC        0x1000
                    164: #define        PDB_MMUREG_STEAL        0x2000
1.55      pk        165: #define        PDB_CACHESTUFF  0x4000
1.72      pk        166: #define        PDB_SWITCHMAP   0x8000
                    167: #define        PDB_SANITYCHK   0x10000
1.55      pk        168: int    pmapdebug = 0;
1.1       deraadt   169: #endif
                    170:
                    171: /*
1.181     pk        172:  * Bounds on managed physical addresses. Used by (MD) users
                    173:  * of uvm_pglistalloc() to provide search hints.
1.1       deraadt   174:  */
1.181     pk        175: paddr_t        vm_first_phys = (paddr_t)-1;
                    176: paddr_t        vm_last_phys = 0;
                    177: psize_t vm_num_phys;
1.1       deraadt   178:
1.322     ad        179: #define        PMAP_LOCK()     KERNEL_LOCK(1, NULL)
                    180: #define        PMAP_UNLOCK()   KERNEL_UNLOCK_ONE(NULL)
1.242     pk        181:
                    182: /*
1.236     pk        183:  * Flags in pvlist.pv_flags.  Note that PV_MOD must be 1 and PV_REF must be 2
1.1       deraadt   184:  * since they must line up with the bits in the hardware PTEs (see pte.h).
1.115     pk        185:  * SUN4M bits are at a slightly different location in the PTE.
1.236     pk        186:  *
1.115     pk        187:  * Note: the REF, MOD and ANC flag bits occur only in the head of a pvlist.
1.182     pk        188:  * The NC bit is meaningful in each individual pv entry and reflects the
                    189:  * requested non-cacheability at the time the entry was made through
                    190:  * pv_link() or when subsequently altered by kvm_uncache() (but the latter
                    191:  * does not happen in kernels as of the time of this writing (March 2001)).
1.115     pk        192:  */
                    193: #define PV_MOD         1       /* page modified */
                    194: #define PV_REF         2       /* page referenced */
                    195: #define PV_NC          4       /* page cannot be cached */
                    196: #define PV_REF4M       1       /* page referenced (SRMMU) */
                    197: #define PV_MOD4M       2       /* page modified (SRMMU) */
                    198: #define PV_ANC         0x10    /* page has incongruent aliases */
1.1       deraadt   199:
1.181     pk        200: static struct pool pv_pool;
1.1       deraadt   201:
1.236     pk        202: /*
                    203:  * pvhead(pte): find a VM page given a PTE entry.
                    204:  */
                    205: #if defined(SUN4) || defined(SUN4C)
1.303     uwe       206: static struct vm_page *
                    207: pvhead4_4c(u_int pte)
1.181     pk        208: {
1.236     pk        209:        paddr_t pa = (pte & PG_PFNUM) << PGSHIFT;
1.181     pk        210:
1.236     pk        211:        return (PHYS_TO_VM_PAGE(pa));
                    212: }
1.181     pk        213: #endif
1.177     pk        214:
1.236     pk        215: #if defined(SUN4M) || defined(SUN4D)
1.303     uwe       216: static struct vm_page *
                    217: pvhead4m(u_int pte)
1.236     pk        218: {
                    219:        paddr_t pa = (pte & SRMMU_PPNMASK) << SRMMU_PPNPASHIFT;
1.122     pk        220:
1.236     pk        221:        return (PHYS_TO_VM_PAGE(pa));
1.181     pk        222: }
1.236     pk        223: #endif
1.122     pk        224:
1.1       deraadt   225: /*
                    226:  * Each virtual segment within each pmap is either valid or invalid.
                    227:  * It is valid if pm_npte[VA_VSEG(va)] is not 0.  This does not mean
                    228:  * it is in the MMU, however; that is true iff pm_segmap[VA_VSEG(va)]
                    229:  * does not point to the invalid PMEG.
                    230:  *
1.203     pk        231:  * In the older SPARC architectures (sun4/sun4c), page tables are cached in
                    232:  * the MMU. The following discussion applies to these architectures:
1.55      pk        233:  *
1.1       deraadt   234:  * If a virtual segment is valid and loaded, the correct PTEs appear
                    235:  * in the MMU only.  If it is valid and unloaded, the correct PTEs appear
                    236:  * in the pm_pte[VA_VSEG(va)] only.  However, some effort is made to keep
                    237:  * the software copies consistent enough with the MMU so that libkvm can
                    238:  * do user address translations.  In particular, pv_changepte() and
                    239:  * pmap_enu() maintain consistency, while less critical changes are
                    240:  * not maintained.  pm_pte[VA_VSEG(va)] always points to space for those
1.265     pk        241:  * PTEs.
1.1       deraadt   242:  *
                    243:  * Each PMEG in the MMU is either free or contains PTEs corresponding to
                    244:  * some pmap and virtual segment.  If it contains some PTEs, it also contains
                    245:  * reference and modify bits that belong in the pv_table.  If we need
                    246:  * to steal a PMEG from some process (if we need one and none are free)
                    247:  * we must copy the ref and mod bits, and update pm_segmap in the other
                    248:  * pmap to show that its virtual segment is no longer in the MMU.
                    249:  *
                    250:  * There are 128 PMEGs in a small Sun-4, of which only a few dozen are
                    251:  * tied down permanently, leaving `about' 100 to be spread among
                    252:  * running processes.  These are managed as an LRU cache.  Before
                    253:  * calling the VM paging code for a user page fault, the fault handler
                    254:  * calls mmu_load(pmap, va) to try to get a set of PTEs put into the
                    255:  * MMU.  mmu_load will check the validity of the segment and tell whether
                    256:  * it did something.
                    257:  *
                    258:  * Since I hate the name PMEG I call this data structure an `mmu entry'.
                    259:  * Each mmuentry is on exactly one of three `usage' lists: free, LRU,
1.265     pk        260:  * or locked.  The locked list is only used for kernel mappings that need
                    261:  * to be wired down.
1.55      pk        262:  *
1.203     pk        263:  *
1.55      pk        264:  * In the sun4m architecture using the SPARC Reference MMU (SRMMU), three
                    265:  * levels of page tables are maintained in physical memory. We use the same
                    266:  * structures as with the 3-level old-style MMU (pm_regmap, pm_segmap,
                    267:  * rg_segmap, sg_pte, etc) to maintain kernel-edible page tables; we also
                    268:  * build a parallel set of physical tables that can be used by the MMU.
                    269:  * (XXX: This seems redundant, but is it necessary for the unified kernel?)
                    270:  *
                    271:  * If a virtual segment is valid, its entries will be in both parallel lists.
                    272:  * If it is not valid, then its entry in the kernel tables will be zero, and
                    273:  * its entry in the MMU tables will either be nonexistent or zero as well.
1.72      pk        274:  *
                    275:  * The Reference MMU generally uses a Translation Look-aside Buffer (TLB)
                    276:  * to cache the result of recently executed page table walks. When
                    277:  * manipulating page tables, we need to ensure consistency of the
                    278:  * in-memory and TLB copies of the page table entries. This is handled
                    279:  * by flushing (and invalidating) a TLB entry when appropriate before
                    280:  * altering an in-memory page table entry.
1.1       deraadt   281:  */
                    282: struct mmuentry {
1.348.6.3! tls       283:        struct {
        !           284:            struct mmuentry *prev, *next;
        !           285:        }                       me_list;        /* usage list link */
1.43      pk        286:        TAILQ_ENTRY(mmuentry)   me_pmchain;     /* pmap owner link */
1.1       deraadt   287:        struct  pmap *me_pmap;          /* pmap, if in use */
1.43      pk        288:        u_short me_vreg;                /* associated virtual region/segment */
                    289:        u_short me_vseg;                /* associated virtual region/segment */
1.45      pk        290:        u_short me_cookie;              /* hardware SMEG/PMEG number */
1.265     pk        291: #ifdef DIAGNOSTIC
                    292:        int *me_statp;/*XXX*/
                    293: #endif
1.1       deraadt   294: };
1.43      pk        295: struct mmuentry *mmusegments;  /* allocated in pmap_bootstrap */
                    296: struct mmuentry *mmuregions;   /* allocated in pmap_bootstrap */
1.1       deraadt   297:
1.348.6.3! tls       298: #if defined(SUN4) || defined(SUN4C)
        !           299: struct mmuentry segm_freelist, segm_lru, segm_locked;
        !           300: #if defined(SUN4_MMU3L)
        !           301: struct mmuentry region_freelist, region_lru, region_locked;
        !           302: #endif
        !           303: /*
        !           304:  * We use a double linked list looping through its static head (which
        !           305:  * alway remains on the list), so we can remove any other member from
        !           306:  * a list without knowing which list it is on.
        !           307:  */
        !           308: static void inline
        !           309: mmuq_remove(struct mmuentry *e)
        !           310: {
        !           311:        e->me_list.next->me_list.prev = e->me_list.prev;
        !           312:        e->me_list.prev->me_list.next = e->me_list.next;
        !           313: }
        !           314:
        !           315: static void inline
        !           316: mmuq_init(struct mmuentry *e)
        !           317: {
        !           318:        memset(e, 0, sizeof(*e));
        !           319:        e->me_list.next = e;
        !           320:        e->me_list.prev = e;
        !           321: }
        !           322:
        !           323: static inline struct mmuentry *
        !           324: mmuq_first(struct mmuentry *head)
        !           325: {
        !           326:        KASSERT(head->me_list.next != head);
        !           327:        return head->me_list.next;
        !           328: }
        !           329:
        !           330: static inline bool
        !           331: mmuq_empty(struct mmuentry *head)
        !           332: {
        !           333:        return head->me_list.next == head;
        !           334: }
        !           335:
        !           336: static inline void
        !           337: mmuq_insert_tail(struct mmuentry *head, struct mmuentry *e)
        !           338: {
        !           339:        e->me_list.prev = head->me_list.prev;
        !           340:        e->me_list.next = head;
        !           341:        head->me_list.prev->me_list.next = e;
        !           342:        head->me_list.prev = e;
        !           343: }
        !           344: #endif
1.265     pk        345:
1.1       deraadt   346:
1.69      pk        347: int    seginval;               /* [4/4c] the invalid segment number */
                    348: int    reginval;               /* [4/3mmu] the invalid region number */
1.1       deraadt   349:
1.322     ad        350: static kmutex_t demap_lock;
1.340     martin    351: static bool    lock_available = false; /* demap_lock has been initialized */
1.322     ad        352:
1.1       deraadt   353: /*
1.55      pk        354:  * (sun4/4c)
1.1       deraadt   355:  * A context is simply a small number that dictates which set of 4096
1.203     pk        356:  * segment map entries the MMU uses.  The Sun 4c has eight (SS1,IPC) or
                    357:  * sixteen (SS2,IPX) such sets. These are alloted in an `almost MRU' fashion.
1.55      pk        358:  * (sun4m)
                    359:  * A context is simply a small number that indexes the context table, the
                    360:  * root-level page table mapping 4G areas. Each entry in this table points
                    361:  * to a 1st-level region table. A SPARC reference MMU will usually use 16
                    362:  * such contexts, but some offer as many as 64k contexts; the theoretical
                    363:  * maximum is 2^32 - 1, but this would create overlarge context tables.
1.1       deraadt   364:  *
                    365:  * Each context is either free or attached to a pmap.
                    366:  *
                    367:  * Since the virtual address cache is tagged by context, when we steal
                    368:  * a context we have to flush (that part of) the cache.
                    369:  */
                    370: union ctxinfo {
                    371:        union   ctxinfo *c_nextfree;    /* free list (if free) */
                    372:        struct  pmap *c_pmap;           /* pmap (if busy) */
                    373: };
1.69      pk        374:
1.322     ad        375: static kmutex_t        ctx_lock;               /* lock for below */
1.193     mrg       376: union  ctxinfo *ctxinfo;               /* allocated at in pmap_bootstrap */
                    377: union  ctxinfo *ctx_freelist;          /* context free list */
                    378: int    ctx_kick;                       /* allocation rover when none free */
                    379: int    ctx_kickdir;                    /* ctx_kick roves both directions */
                    380: int    ncontext;                       /* sizeof ctx_freelist */
1.69      pk        381:
1.246     pk        382: void   ctx_alloc(struct pmap *);
                    383: void   ctx_free(struct pmap *);
1.122     pk        384:
1.311     christos  385: /*void *       vdumppages;     -* 32KB worth of reserved dump pages */
1.1       deraadt   386:
1.69      pk        387: smeg_t         tregion;        /* [4/3mmu] Region for temporary mappings */
                    388:
1.323     pooka     389: static struct pmap     kernel_pmap_store;      /* the kernel's pmap */
1.324     pooka     390: struct pmap *const kernel_pmap_ptr = &kernel_pmap_store; /* pmap_kernel() */
1.43      pk        391: struct regmap  kernel_regmap_store[NKREG];     /* the kernel's regmap */
                    392: struct segmap  kernel_segmap_store[NKREG*NSEGRG];/* the kernel's segmaps */
1.1       deraadt   393:
1.210     thorpej   394: #if defined(SUN4M) || defined(SUN4D)
1.55      pk        395: u_int  *kernel_regtable_store;         /* 1k of storage to map the kernel */
                    396: u_int  *kernel_segtable_store;         /* 2k of storage to map the kernel */
                    397: u_int  *kernel_pagtable_store;         /* 128k of storage to map the kernel */
                    398:
1.121     pk        399: /*
                    400:  * Memory pools and back-end supplier for SRMMU page tables.
                    401:  * Share a pool between the level 2 and level 3 page tables,
                    402:  * since these are equal in size.
                    403:  */
                    404: static struct pool L1_pool;
                    405: static struct pool L23_pool;
                    406:
1.246     pk        407: static void *pgt_page_alloc(struct pool *, int);
                    408: static void  pgt_page_free(struct pool *, void *);
1.206     thorpej   409:
                    410: static struct pool_allocator pgt_page_allocator = {
                    411:        pgt_page_alloc, pgt_page_free, 0,
                    412: };
1.121     pk        413:
1.265     pk        414: #endif /* SUN4M || SUN4D */
                    415:
                    416: #if defined(SUN4) || defined(SUN4C)
                    417: /*
                    418:  * Memory pool for user and kernel PTE tables.
                    419:  */
                    420: static struct pool pte_pool;
1.55      pk        421: #endif
                    422:
1.246     pk        423: struct memarr *pmemarr;        /* physical memory regions */
1.1       deraadt   424: int    npmemarr;               /* number of entries in pmemarr */
1.181     pk        425:
                    426: static paddr_t avail_start;    /* first available physical page, other
                    427:                                   than the `etext gap' defined below */
                    428: static vaddr_t etext_gap_start;/* start of gap between text & data */
                    429: static vaddr_t etext_gap_end;  /* end of gap between text & data */
1.253     thorpej   430: static vaddr_t virtual_avail;  /* first free kernel virtual address */
                    431: static vaddr_t virtual_end;    /* last free kernel virtual address */
1.29      pk        432:
1.246     pk        433: static void pmap_page_upload(void);
1.107     pk        434:
1.45      pk        435: int mmu_has_hole;
                    436:
1.124     pk        437: vaddr_t prom_vstart;   /* For /dev/kmem */
                    438: vaddr_t prom_vend;
1.1       deraadt   439:
1.134     thorpej   440: /*
                    441:  * Memory pool for pmap structures.
                    442:  */
1.320     ad        443: static struct pool_cache pmap_cache;
1.246     pk        444: static int     pmap_pmap_pool_ctor(void *, void *, int);
                    445: static void    pmap_pmap_pool_dtor(void *, void *);
1.265     pk        446: static struct pool segmap_pool;
1.134     thorpej   447:
1.55      pk        448: #if defined(SUN4)
1.31      pk        449: /*
1.55      pk        450:  * [sun4]: segfixmask: on some systems (4/110) "getsegmap()" returns a
                    451:  * partly invalid value. getsegmap returns a 16 bit value on the sun4,
                    452:  * but only the first 8 or so bits are valid (the rest are *supposed* to
                    453:  * be zero. On the 4/110 the bits that are supposed to be zero are
                    454:  * all one instead. e.g. KERNBASE is usually mapped by pmeg number zero.
                    455:  * On a 4/300 getsegmap(KERNBASE) == 0x0000, but
1.31      pk        456:  * on a 4/100 getsegmap(KERNBASE) == 0xff00
                    457:  *
1.55      pk        458:  * This confuses mmu_reservemon() and causes it to not reserve the PROM's
                    459:  * pmegs. Then the PROM's pmegs get used during autoconfig and everything
1.31      pk        460:  * falls apart!  (not very fun to debug, BTW.)
                    461:  *
1.43      pk        462:  * solution: mask the invalid bits in the getsetmap macro.
1.31      pk        463:  */
                    464:
1.273     hannken   465: static u_int segfixmask = 0xffffffff; /* all bits valid to start */
1.55      pk        466: #else
                    467: #define segfixmask 0xffffffff  /* It's in getsegmap's scope */
1.31      pk        468: #endif
                    469:
1.1       deraadt   470: /*
                    471:  * pseudo-functions for mnemonic value
                    472:  */
1.55      pk        473: #define        getsegmap(va)           (CPU_ISSUN4C \
                    474:                                        ? lduba(va, ASI_SEGMAP) \
                    475:                                        : (lduha(va, ASI_SEGMAP) & segfixmask))
                    476: #define        setsegmap(va, pmeg)     (CPU_ISSUN4C \
                    477:                                        ? stba(va, ASI_SEGMAP, pmeg) \
                    478:                                        : stha(va, ASI_SEGMAP, pmeg))
                    479:
                    480: /* 3-level sun4 MMU only: */
                    481: #define        getregmap(va)           ((unsigned)lduha((va)+2, ASI_REGMAP) >> 8)
                    482: #define        setregmap(va, smeg)     stha((va)+2, ASI_REGMAP, (smeg << 8))
                    483:
1.286     pk        484:
1.210     thorpej   485: #if defined(SUN4M) || defined(SUN4D)
1.286     pk        486: #if 0
                    487: #if VM_PROT_READ != 1 || VM_PROT_WRITE != 2 || VM_PROT_EXECUTE != 4
                    488: #error fix protection code translation table
                    489: #endif
                    490: #endif
                    491: /*
                    492:  * Translation table for kernel vs. PTE protection bits.
                    493:  */
                    494: const u_int protection_codes[2][8] = {
                    495:        /* kernel */
                    496:        {
                    497:        PPROT_N_RX,     /* VM_PROT_NONE    | VM_PROT_NONE  | VM_PROT_NONE */
                    498:        PPROT_N_RX,     /* VM_PROT_NONE    | VM_PROT_NONE  | VM_PROT_READ */
                    499:        PPROT_N_RWX,    /* VM_PROT_NONE    | VM_PROT_WRITE | VM_PROT_NONE */
                    500:        PPROT_N_RWX,    /* VM_PROT_NONE    | VM_PROT_WRITE | VM_PROT_READ */
                    501:        PPROT_N_RX,     /* VM_PROT_EXECUTE | VM_PROT_NONE  | VM_PROT_NONE */
                    502:        PPROT_N_RX,     /* VM_PROT_EXECUTE | VM_PROT_NONE  | VM_PROT_READ */
                    503:        PPROT_N_RWX,    /* VM_PROT_EXECUTE | VM_PROT_WRITE | VM_PROT_NONE */
                    504:        PPROT_N_RWX,    /* VM_PROT_EXECUTE | VM_PROT_WRITE | VM_PROT_READ */
                    505:        },
                    506:
                    507:        /* user */
                    508:        {
                    509:        PPROT_N_RX,     /* VM_PROT_NONE    | VM_PROT_NONE  | VM_PROT_NONE */
                    510:        PPROT_R_R,      /* VM_PROT_NONE    | VM_PROT_NONE  | VM_PROT_READ */
                    511:        PPROT_RW_RW,    /* VM_PROT_NONE    | VM_PROT_WRITE | VM_PROT_NONE */
                    512:        PPROT_RW_RW,    /* VM_PROT_NONE    | VM_PROT_WRITE | VM_PROT_READ */
                    513:        PPROT_X_X,      /* VM_PROT_EXECUTE | VM_PROT_NONE  | VM_PROT_NONE */
                    514:        PPROT_RX_RX,    /* VM_PROT_EXECUTE | VM_PROT_NONE  | VM_PROT_READ */
                    515:        PPROT_RWX_RWX,  /* VM_PROT_EXECUTE | VM_PROT_WRITE | VM_PROT_NONE */
                    516:        PPROT_RWX_RWX,  /* VM_PROT_EXECUTE | VM_PROT_WRITE | VM_PROT_READ */
                    517:        }
                    518: };
                    519: #define pte_kprot4m(prot) (protection_codes[0][(prot)])
                    520: #define pte_uprot4m(prot) (protection_codes[1][(prot)])
                    521: #define pte_prot4m(pm, prot) \
                    522:        (protection_codes[(pm) == pmap_kernel() ? 0 : 1][(prot)])
                    523:
1.282     pk        524: void           setpte4m(vaddr_t va, int pte);
1.246     pk        525: void           setpgt4m(int *ptep, int pte);
                    526: void           setpgt4m_va(vaddr_t, int *, int, int, int, u_int);
1.282     pk        527: int            updatepte4m(vaddr_t, int *, int, int, int, u_int);
1.232     pk        528: #endif /* SUN4M || SUN4D */
                    529:
                    530: #if defined(MULTIPROCESSOR)
                    531: #define PMAP_SET_CPUSET(pmap, cpi)     \
                    532:        (pmap->pm_cpuset |= (1 << (cpi)->ci_cpuid))
                    533: #define PMAP_CLR_CPUSET(pmap, cpi)     \
                    534:        (pmap->pm_cpuset &= ~(1 << (cpi)->ci_cpuid))
                    535: #define PMAP_CPUSET(pmap)              (pmap->pm_cpuset)
                    536: #else
                    537: #define PMAP_SET_CPUSET(pmap, cpi)     /* nothing */
                    538: #define PMAP_CLR_CPUSET(pmap, cpi)     /* nothing */
                    539: #define PMAP_CPUSET(pmap)              1       /* XXX: 1 or 0? */
                    540: #endif /* MULTIPROCESSOR */
1.195     mrg       541:
1.55      pk        542:
                    543: /* Function pointer messiness for supporting multiple sparc architectures
                    544:  * within a single kernel: notice that there are two versions of many of the
                    545:  * functions within this file/module, one for the sun4/sun4c and the other
                    546:  * for the sun4m. For performance reasons (since things like pte bits don't
                    547:  * map nicely between the two architectures), there are separate functions
                    548:  * rather than unified functions which test the cputyp variable. If only
                    549:  * one architecture is being used, then the non-suffixed function calls
                    550:  * are macro-translated into the appropriate xxx4_4c or xxx4m call. If
                    551:  * multiple architectures are defined, the calls translate to (*xxx_p),
                    552:  * i.e. they indirect through function pointers initialized as appropriate
                    553:  * to the run-time architecture in pmap_bootstrap. See also pmap.h.
                    554:  */
                    555:
1.210     thorpej   556: #if defined(SUN4M) || defined(SUN4D)
1.246     pk        557: static void mmu_setup4m_L1(int, struct pmap *);
                    558: static void mmu_setup4m_L2(int, struct regmap *);
                    559: static void  mmu_setup4m_L3(int, struct segmap *);
                    560: /*static*/ void        mmu_reservemon4m(struct pmap *);
                    561:
1.248     pk        562: /*static*/ void pmap_changeprot4m(pmap_t, vaddr_t, vm_prot_t, int);
1.246     pk        563: /*static*/ void pmap_rmk4m(struct pmap *, vaddr_t, vaddr_t, int, int);
                    564: /*static*/ void pmap_rmu4m(struct pmap *, vaddr_t, vaddr_t, int, int);
                    565: /*static*/ int  pmap_enk4m(struct pmap *, vaddr_t, vm_prot_t,
                    566:                                int, struct vm_page *, int);
                    567: /*static*/ int  pmap_enu4m(struct pmap *, vaddr_t, vm_prot_t,
                    568:                                int, struct vm_page *, int);
                    569: /*static*/ void pv_changepte4m(struct vm_page *, int, int);
                    570: /*static*/ int  pv_syncflags4m(struct vm_page *);
                    571: /*static*/ int  pv_link4m(struct vm_page *, struct pmap *, vaddr_t, u_int *);
                    572: /*static*/ void pv_unlink4m(struct vm_page *, struct pmap *, vaddr_t);
1.55      pk        573: #endif
                    574:
                    575: #if defined(SUN4) || defined(SUN4C)
1.246     pk        576: /*static*/ void        mmu_reservemon4_4c(int *, int *);
1.248     pk        577: /*static*/ void pmap_changeprot4_4c(pmap_t, vaddr_t, vm_prot_t, int);
1.246     pk        578: /*static*/ void pmap_rmk4_4c(struct pmap *, vaddr_t, vaddr_t, int, int);
                    579: /*static*/ void pmap_rmu4_4c(struct pmap *, vaddr_t, vaddr_t, int, int);
                    580: /*static*/ int  pmap_enk4_4c(struct pmap *, vaddr_t, vm_prot_t,
                    581:                                  int, struct vm_page *, int);
                    582: /*static*/ int  pmap_enu4_4c(struct pmap *, vaddr_t, vm_prot_t,
                    583:                                  int, struct vm_page *, int);
                    584: /*static*/ void pv_changepte4_4c(struct vm_page *, int, int);
                    585: /*static*/ int  pv_syncflags4_4c(struct vm_page *);
1.278     pk        586: /*static*/ int  pv_link4_4c(struct vm_page *, struct pmap *, vaddr_t, u_int *);
1.246     pk        587: /*static*/ void pv_unlink4_4c(struct vm_page *, struct pmap *, vaddr_t);
1.55      pk        588: #endif
                    589:
1.210     thorpej   590: #if !(defined(SUN4M) || defined(SUN4D)) && (defined(SUN4) || defined(SUN4C))
1.55      pk        591: #define                pmap_rmk        pmap_rmk4_4c
                    592: #define                pmap_rmu        pmap_rmu4_4c
                    593:
1.210     thorpej   594: #elif (defined(SUN4M) || defined(SUN4D)) && !(defined(SUN4) || defined(SUN4C))
1.55      pk        595: #define                pmap_rmk        pmap_rmk4m
                    596: #define                pmap_rmu        pmap_rmu4m
                    597:
                    598: #else  /* must use function pointers */
                    599:
                    600: /* function pointer declarations */
                    601: /* from pmap.h: */
1.308     thorpej   602: bool           (*pmap_clear_modify_p)(struct vm_page *);
                    603: bool           (*pmap_clear_reference_p)(struct vm_page *);
1.333     skrll     604: int            (*pmap_enter_p)(pmap_t, vaddr_t, paddr_t, vm_prot_t, u_int);
1.308     thorpej   605: bool           (*pmap_extract_p)(pmap_t, vaddr_t, paddr_t *);
                    606: bool           (*pmap_is_modified_p)(struct vm_page *);
                    607: bool           (*pmap_is_referenced_p)(struct vm_page *);
1.337     cegger    608: void           (*pmap_kenter_pa_p)(vaddr_t, paddr_t, vm_prot_t, u_int);
1.246     pk        609: void           (*pmap_kremove_p)(vaddr_t, vsize_t);
1.248     pk        610: void           (*pmap_kprotect_p)(vaddr_t, vsize_t, vm_prot_t);
1.246     pk        611: void           (*pmap_page_protect_p)(struct vm_page *, vm_prot_t);
                    612: void           (*pmap_protect_p)(pmap_t, vaddr_t, vaddr_t, vm_prot_t);
1.55      pk        613: /* local: */
1.246     pk        614: void           (*pmap_rmk_p)(struct pmap *, vaddr_t, vaddr_t, int, int);
                    615: void           (*pmap_rmu_p)(struct pmap *, vaddr_t, vaddr_t, int, int);
1.55      pk        616:
                    617: #define                pmap_rmk        (*pmap_rmk_p)
                    618: #define                pmap_rmu        (*pmap_rmu_p)
                    619:
                    620: #endif
                    621:
                    622: /* --------------------------------------------------------------*/
                    623:
                    624: /*
1.210     thorpej   625:  * Next we have some sun4m/4d-specific routines which have no 4/4c
1.55      pk        626:  * counterparts, or which are 4/4c macros.
                    627:  */
                    628:
1.210     thorpej   629: #if defined(SUN4M) || defined(SUN4D)
1.215     pk        630: /*
1.222     pk        631:  * SP versions of the tlb flush operations.
                    632:  *
                    633:  * Turn off traps to prevent register window overflows
                    634:  * from writing user windows to the wrong stack.
1.215     pk        635:  */
1.303     uwe       636: static void
                    637: sp_tlb_flush(int va, int ctx, int lvl)
1.221     pk        638: {
1.347     mrg       639:        /*
                    640:         * XXX convert %o3 (oldpsr), %o4 (SRMMU_CXR) and %o5 (old context)
                    641:         * into generically named registers.  right now we're assuming that
                    642:         * gcc doesn't do anything funny with these registers.
                    643:         */
1.303     uwe       644:
1.222     pk        645:        /* Traps off */
                    646:        __asm("rd       %psr, %o3");
                    647:        __asm("wr       %%o3, %0, %%psr" :: "n" (PSR_ET));
                    648:
                    649:        /* Save context */
                    650:        __asm("mov      %0, %%o4" :: "n"(SRMMU_CXR));
                    651:        __asm("lda      [%%o4]%0, %%o5" :: "n"(ASI_SRMMU));
                    652:
                    653:        /* Set new context and flush type bits */
1.345     mrg       654:        va &= ~0xfff;
                    655:        __asm("sta      %1, [%%o4]%0" :: "n"(ASI_SRMMU), "r"(ctx));
                    656:        va |= lvl;
1.222     pk        657:
                    658:        /* Do the TLB flush */
1.345     mrg       659:        __asm("sta      %%g0, [%1]%0" :: "n"(ASI_SRMMUFP), "r"(va));
1.222     pk        660:
                    661:        /* Restore context */
                    662:        __asm("sta      %%o5, [%%o4]%0" :: "n"(ASI_SRMMU));
                    663:
                    664:        /* and turn traps on again */
                    665:        __asm("wr       %o3, 0, %psr");
                    666:        __asm("nop");
1.270     chs       667:        __asm("nop");
1.222     pk        668:        __asm("nop");
1.215     pk        669: }
                    670:
1.307     perry     671: static inline void
1.303     uwe       672: sp_tlb_flush_all(void)
1.215     pk        673: {
1.303     uwe       674:
1.222     pk        675:        sta(ASI_SRMMUFP_LN, ASI_SRMMUFP, 0);
1.215     pk        676: }
1.195     mrg       677:
                    678: #if defined(MULTIPROCESSOR)
                    679: /*
1.220     mrg       680:  * The SMP versions of the tlb flush routines.  We only need to
1.288     pk        681:  * do a cross call for these on sun4m (Mbus) systems. sun4d systems
                    682:  * have an Xbus which broadcasts TLB demaps in hardware.
1.195     mrg       683:  */
1.220     mrg       684:
1.307     perry     685: static inline void     smp_tlb_flush_page (int va, int ctx, u_int cpuset);
                    686: static inline void     smp_tlb_flush_segment (int va, int ctx, u_int cpuset);
                    687: static inline void     smp_tlb_flush_region (int va, int ctx, u_int cpuset);
                    688: static inline void     smp_tlb_flush_context (int ctx, u_int cpuset);
                    689: static inline void     smp_tlb_flush_all (void);
1.195     mrg       690:
1.289     pk        691: /* From locore: */
                    692: extern void ft_tlb_flush(int va, int ctx, int lvl);
                    693:
1.307     perry     694: static inline void
1.226     mrg       695: smp_tlb_flush_page(int va, int ctx, u_int cpuset)
1.195     mrg       696: {
1.303     uwe       697:
1.220     mrg       698:        if (CPU_ISSUN4D) {
1.222     pk        699:                sp_tlb_flush(va, ctx, ASI_SRMMUFP_L3);
1.220     mrg       700:        } else
1.289     pk        701:                FXCALL3(sp_tlb_flush, ft_tlb_flush, va, ctx, ASI_SRMMUFP_L3, cpuset);
1.195     mrg       702: }
                    703:
1.307     perry     704: static inline void
1.226     mrg       705: smp_tlb_flush_segment(int va, int ctx, u_int cpuset)
1.195     mrg       706: {
1.303     uwe       707:
1.220     mrg       708:        if (CPU_ISSUN4D) {
1.222     pk        709:                sp_tlb_flush(va, ctx, ASI_SRMMUFP_L2);
1.220     mrg       710:        } else
1.289     pk        711:                FXCALL3(sp_tlb_flush, ft_tlb_flush, va, ctx, ASI_SRMMUFP_L2, cpuset);
1.195     mrg       712: }
1.143     pk        713:
1.307     perry     714: static inline void
1.226     mrg       715: smp_tlb_flush_region(int va, int ctx, u_int cpuset)
1.195     mrg       716: {
1.303     uwe       717:
1.220     mrg       718:        if (CPU_ISSUN4D) {
1.222     pk        719:                sp_tlb_flush(va, ctx, ASI_SRMMUFP_L1);
1.220     mrg       720:        } else
1.289     pk        721:                FXCALL3(sp_tlb_flush, ft_tlb_flush, va, ctx, ASI_SRMMUFP_L1, cpuset);
1.195     mrg       722: }
                    723:
1.307     perry     724: static inline void
1.226     mrg       725: smp_tlb_flush_context(int ctx, u_int cpuset)
1.195     mrg       726: {
1.303     uwe       727:
1.220     mrg       728:        if (CPU_ISSUN4D) {
1.345     mrg       729:                sp_tlb_flush(0, ctx, ASI_SRMMUFP_L0);
1.220     mrg       730:        } else
1.289     pk        731:                FXCALL3(sp_tlb_flush, ft_tlb_flush, 0, ctx, ASI_SRMMUFP_L0, cpuset);
1.195     mrg       732: }
                    733:
1.307     perry     734: static inline void
1.303     uwe       735: smp_tlb_flush_all(void)
1.195     mrg       736: {
1.303     uwe       737:
1.220     mrg       738:        if (CPU_ISSUN4D) {
                    739:                sp_tlb_flush_all();
                    740:        } else
                    741:                XCALL0(sp_tlb_flush_all, CPUSET_ALL);
1.195     mrg       742: }
1.222     pk        743: #endif /* MULTIPROCESSOR */
1.195     mrg       744:
1.215     pk        745: #if defined(MULTIPROCESSOR)
1.226     mrg       746: #define tlb_flush_page(va,ctx,s)       smp_tlb_flush_page(va,ctx,s)
                    747: #define tlb_flush_segment(va,ctx,s)    smp_tlb_flush_segment(va,ctx,s)
                    748: #define tlb_flush_region(va,ctx,s)     smp_tlb_flush_region(va,ctx,s)
                    749: #define tlb_flush_context(ctx,s)       smp_tlb_flush_context(ctx,s)
1.195     mrg       750: #define tlb_flush_all()                        smp_tlb_flush_all()
                    751: #else
1.226     mrg       752: #define tlb_flush_page(va,ctx,s)       sp_tlb_flush(va,ctx,ASI_SRMMUFP_L3)
                    753: #define tlb_flush_segment(va,ctx,s)    sp_tlb_flush(va,ctx,ASI_SRMMUFP_L2)
                    754: #define tlb_flush_region(va,ctx,s)     sp_tlb_flush(va,ctx,ASI_SRMMUFP_L1)
1.345     mrg       755: #define tlb_flush_context(ctx,s)       sp_tlb_flush(0,ctx,ASI_SRMMUFP_L0)
1.223     pk        756: #define tlb_flush_all()                        sp_tlb_flush_all()
1.232     pk        757: #endif /* MULTIPROCESSOR */
1.195     mrg       758:
1.311     christos  759: static u_int   VA2PA(void *);
1.246     pk        760: static u_long  srmmu_bypass_read(u_long);
1.55      pk        761:
                    762: /*
                    763:  * VA2PA(addr) -- converts a virtual address to a physical address using
                    764:  * the MMU's currently-installed page tables. As a side effect, the address
                    765:  * translation used may cause the associated pte to be encached. The correct
                    766:  * context for VA must be set before this is called.
                    767:  *
                    768:  * This routine should work with any level of mapping, as it is used
                    769:  * during bootup to interact with the ROM's initial L1 mapping of the kernel.
                    770:  */
1.160     pk        771: static u_int
1.311     christos  772: VA2PA(void *addr)
1.55      pk        773: {
1.124     pk        774:        u_int pte;
1.55      pk        775:
1.217     pk        776:        /*
                    777:         * We'll use that handy SRMMU flush/probe.
                    778:         * Try each level in turn until we find a valid pte. Otherwise panic.
                    779:         */
1.55      pk        780:
                    781:        pte = lda(((u_int)addr & ~0xfff) | ASI_SRMMUFP_L3, ASI_SRMMUFP);
1.143     pk        782:        /* Unlock fault status; required on Hypersparc modules */
                    783:        (void)lda(SRMMU_SFSR, ASI_SRMMU);
1.55      pk        784:        if ((pte & SRMMU_TETYPE) == SRMMU_TEPTE)
                    785:            return (((pte & SRMMU_PPNMASK) << SRMMU_PPNPASHIFT) |
                    786:                    ((u_int)addr & 0xfff));
1.60      pk        787:
                    788:        /* A `TLB Flush Entire' is required before any L0, L1 or L2 probe */
1.195     mrg       789:        tlb_flush_all_real();
1.60      pk        790:
1.55      pk        791:        pte = lda(((u_int)addr & ~0xfff) | ASI_SRMMUFP_L2, ASI_SRMMUFP);
                    792:        if ((pte & SRMMU_TETYPE) == SRMMU_TEPTE)
                    793:            return (((pte & SRMMU_PPNMASK) << SRMMU_PPNPASHIFT) |
                    794:                    ((u_int)addr & 0x3ffff));
                    795:        pte = lda(((u_int)addr & ~0xfff) | ASI_SRMMUFP_L1, ASI_SRMMUFP);
                    796:        if ((pte & SRMMU_TETYPE) == SRMMU_TEPTE)
                    797:            return (((pte & SRMMU_PPNMASK) << SRMMU_PPNPASHIFT) |
                    798:                    ((u_int)addr & 0xffffff));
                    799:        pte = lda(((u_int)addr & ~0xfff) | ASI_SRMMUFP_L0, ASI_SRMMUFP);
                    800:        if ((pte & SRMMU_TETYPE) == SRMMU_TEPTE)
                    801:            return (((pte & SRMMU_PPNMASK) << SRMMU_PPNPASHIFT) |
                    802:                    ((u_int)addr & 0xffffffff));
                    803:
1.160     pk        804: #ifdef DIAGNOSTIC
1.55      pk        805:        panic("VA2PA: Asked to translate unmapped VA %p", addr);
1.160     pk        806: #else
                    807:        return (0);
                    808: #endif
1.55      pk        809: }
                    810:
1.282     pk        811: /*
                    812:  * Atomically update a PTE entry, coping with hardware updating the
                    813:  * PTE at the same time we are.  This is the procedure that is
                    814:  * recommended in the SuperSPARC user's manual.
                    815:  */
                    816: int
1.303     uwe       817: updatepte4m(vaddr_t va, int *pte, int bic, int bis, int ctx, u_int cpuset)
1.282     pk        818: {
                    819:        int oldval, swapval;
                    820:        volatile int *vpte = (volatile int *)pte;
1.340     martin    821:        bool can_lock = lock_available;
1.282     pk        822:
                    823:        /*
                    824:         * Can only be one of these happening in the system
                    825:         * at any one time.
                    826:         */
1.340     martin    827:        if (__predict_true(can_lock))
                    828:                mutex_spin_enter(&demap_lock);
1.282     pk        829:
                    830:        /*
                    831:         * The idea is to loop swapping zero into the pte, flushing
                    832:         * it, and repeating until it stays zero.  At this point,
                    833:         * there should be no more hardware accesses to this PTE
                    834:         * so we can modify it without losing any mod/ref info.
                    835:         */
                    836:        oldval = 0;
                    837:        do {
                    838:                swapval = 0;
                    839:                swap(vpte, swapval);
                    840:                tlb_flush_page(va, ctx, cpuset);
                    841:                oldval |= swapval;
1.291     pk        842:        } while (__predict_false(*vpte != 0));
1.282     pk        843:
                    844:        swapval = (oldval & ~bic) | bis;
                    845:        swap(vpte, swapval);
                    846:
1.340     martin    847:        if (__predict_true(can_lock))
                    848:                mutex_spin_exit(&demap_lock);
1.282     pk        849:
                    850:        return (oldval);
                    851: }
                    852:
1.306     perry     853: inline void
1.303     uwe       854: setpgt4m(int *ptep, int pte)
1.85      pk        855: {
1.195     mrg       856:
1.341     mrg       857:        kpreempt_disable();
1.130     pk        858:        swap(ptep, pte);
1.341     mrg       859:        kpreempt_enable();
1.85      pk        860: }
                    861:
1.306     perry     862: inline void
1.303     uwe       863: setpgt4m_va(vaddr_t va, int *ptep, int pte, int pageflush, int ctx,
                    864:            u_int cpuset)
1.195     mrg       865: {
1.303     uwe       866:
1.282     pk        867: #if defined(MULTIPROCESSOR)
1.226     mrg       868:        updatepte4m(va, ptep, 0xffffffff, pte, pageflush ? ctx : 0, cpuset);
1.282     pk        869: #else
1.341     mrg       870:        kpreempt_disable();
1.291     pk        871:        if (__predict_true(pageflush))
1.282     pk        872:                tlb_flush_page(va, ctx, 0);
                    873:        setpgt4m(ptep, pte);
1.341     mrg       874:        kpreempt_enable();
1.282     pk        875: #endif /* MULTIPROCESSOR */
1.195     mrg       876: }
                    877:
1.92      pk        878: /* Set the page table entry for va to pte. */
1.270     chs       879: void
1.303     uwe       880: setpte4m(vaddr_t va, int pte)
1.55      pk        881: {
1.115     pk        882:        struct pmap *pm;
1.267     pk        883:        struct regmap *rp;
                    884:        struct segmap *sp;
1.55      pk        885:
1.279     pk        886: #ifdef DEBUG
1.100     pk        887:        if (getcontext4m() != 0)
                    888:                panic("setpte4m: user context");
1.279     pk        889: #endif
1.100     pk        890:
                    891:        pm = pmap_kernel();
1.267     pk        892:        rp = &pm->pm_regmap[VA_VREG(va)];
                    893:        sp = &rp->rg_segmap[VA_VSEG(va)];
1.1       deraadt   894:
1.226     mrg       895:        tlb_flush_page(va, 0, CPUSET_ALL);
1.267     pk        896:        setpgt4m(sp->sg_pte + VA_SUN4M_VPG(va), pte);
1.55      pk        897: }
1.72      pk        898:
1.100     pk        899: /*
1.121     pk        900:  * Page table pool back-end.
                    901:  */
1.100     pk        902: void *
1.206     thorpej   903: pgt_page_alloc(struct pool *pp, int flags)
1.100     pk        904: {
1.167     pk        905:        int cacheit = (cpuinfo.flags & CPUFLG_CACHEPAGETABLES) != 0;
                    906:        struct vm_page *pg;
                    907:        vaddr_t va;
                    908:        paddr_t pa;
1.100     pk        909:
1.167     pk        910:        /* Allocate a page of physical memory */
                    911:        if ((pg = uvm_pagealloc(NULL, 0, NULL, 0)) == NULL)
                    912:                return (NULL);
1.100     pk        913:
1.167     pk        914:        /* Allocate virtual memory */
1.348     para      915:        va = uvm_km_alloc(kernel_map, PAGE_SIZE, 0, UVM_KMF_VAONLY |
1.241     pk        916:                ((flags & PR_WAITOK) ? 0 : UVM_KMF_NOWAIT | UVM_KMF_TRYLOCK));
1.167     pk        917:        if (va == 0) {
                    918:                uvm_pagefree(pg);
                    919:                return (NULL);
1.100     pk        920:        }
1.167     pk        921:
                    922:        /*
                    923:         * On systems with a physical data cache we need to flush this page
                    924:         * from the cache if the pagetables cannot be cached.
                    925:         * On systems with a virtually indexed data cache, we only need
                    926:         * to map it non-cacheable, since the page is not currently mapped.
                    927:         */
                    928:        pa = VM_PAGE_TO_PHYS(pg);
                    929:        if (cacheit == 0)
                    930:                pcache_flush_page(pa, 1);
                    931:
                    932:        /* Map the page */
1.194     chs       933:        pmap_kenter_pa(va, pa | (cacheit ? 0 : PMAP_NC),
1.337     cegger    934:            VM_PROT_READ | VM_PROT_WRITE, 0);
1.198     chris     935:        pmap_update(pmap_kernel());
1.167     pk        936:
                    937:        return ((void *)va);
1.195     mrg       938: }
                    939:
1.100     pk        940: void
1.206     thorpej   941: pgt_page_free(struct pool *pp, void *v)
1.100     pk        942: {
1.194     chs       943:        vaddr_t va;
                    944:        paddr_t pa;
1.348.6.3! tls       945:        bool rv __diagused;
1.194     chs       946:
                    947:        va = (vaddr_t)v;
                    948:        rv = pmap_extract(pmap_kernel(), va, &pa);
                    949:        KASSERT(rv);
                    950:        uvm_pagefree(PHYS_TO_VM_PAGE(pa));
1.206     thorpej   951:        pmap_kremove(va, PAGE_SIZE);
1.348     para      952:        uvm_km_free(kernel_map, va, PAGE_SIZE, UVM_KMF_VAONLY);
1.100     pk        953: }
1.215     pk        954: #endif /* SUN4M || SUN4D */
1.1       deraadt   955:
                    956: /*----------------------------------------------------------------*/
                    957:
1.72      pk        958: /*
                    959:  * The following three macros are to be used in sun4/sun4c code only.
                    960:  */
1.69      pk        961: #if defined(SUN4_MMU3L)
                    962: #define CTX_USABLE(pm,rp) (                                    \
1.72      pk        963:                ((pm)->pm_ctx != NULL &&                        \
                    964:                 (!HASSUN4_MMU3L || (rp)->rg_smeg != reginval)) \
1.69      pk        965: )
1.43      pk        966: #else
1.55      pk        967: #define CTX_USABLE(pm,rp)      ((pm)->pm_ctx != NULL )
1.43      pk        968: #endif
                    969:
1.296     chs       970: #define GAP_WIDEN(pm,vr) do if (CPU_HAS_SUNMMU) {              \
1.209     thorpej   971:        if (vr + 1 == pm->pm_gap_start)                         \
                    972:                pm->pm_gap_start = vr;                          \
                    973:        if (vr == pm->pm_gap_end)                               \
                    974:                pm->pm_gap_end = vr + 1;                        \
1.43      pk        975: } while (0)
                    976:
1.296     chs       977: #define GAP_SHRINK(pm,vr) do if (CPU_HAS_SUNMMU) {                     \
1.124     pk        978:        int x;                                                          \
1.43      pk        979:        x = pm->pm_gap_start + (pm->pm_gap_end - pm->pm_gap_start) / 2; \
                    980:        if (vr > x) {                                                   \
                    981:                if (vr < pm->pm_gap_end)                                \
                    982:                        pm->pm_gap_end = vr;                            \
                    983:        } else {                                                        \
                    984:                if (vr >= pm->pm_gap_start && x != pm->pm_gap_start)    \
                    985:                        pm->pm_gap_start = vr + 1;                      \
                    986:        }                                                               \
                    987: } while (0)
                    988:
1.72      pk        989:
1.311     christos  990: static void get_phys_mem(void **);
1.313     mrg       991: #if 0 /* not used */
                    992: void   kvm_iocache(char *, int);
                    993: #endif
1.122     pk        994:
1.53      christos  995: #ifdef DEBUG
1.246     pk        996: void   pm_check(char *, struct pmap *);
                    997: void   pm_check_k(char *, struct pmap *);
                    998: void   pm_check_u(char *, struct pmap *);
1.53      christos  999: #endif
                   1000:
1.181     pk       1001: /*
                   1002:  * During the PMAP bootstrap, we can use a simple translation to map a
                   1003:  * kernel virtual address to a psysical memory address (this is arranged
                   1004:  * in locore).  Usually, KERNBASE maps to physical address 0. This is always
                   1005:  * the case on sun4 and sun4c machines. On sun4m machines -- if no memory is
                   1006:  * installed in the bank corresponding to physical address 0 -- the PROM may
                   1007:  * elect to load us at some other address, presumably at the start of
                   1008:  * the first memory bank that is available. We set the up the variable
                   1009:  * `va2pa_offset' to hold the physical address corresponding to KERNBASE.
                   1010:  */
                   1011:
1.238     pk       1012: static u_long va2pa_offset;
1.181     pk       1013: #define PMAP_BOOTSTRAP_VA2PA(v) ((paddr_t)((u_long)(v) - va2pa_offset))
1.325     martin   1014: #define PMAP_BOOTSTRAP_PA2VA(p) ((vaddr_t)((u_long)(p) + va2pa_offset))
1.53      christos 1015:
1.2       deraadt  1016: /*
1.181     pk       1017:  * Grab physical memory list.
                   1018:  * While here, compute `physmem'.
1.122     pk       1019:  */
                   1020: void
1.311     christos 1021: get_phys_mem(void **top)
1.122     pk       1022: {
                   1023:        struct memarr *mp;
1.312     macallan 1024:        char *p;
1.122     pk       1025:        int i;
                   1026:
1.246     pk       1027:        /* Load the memory descriptor array at the current kernel top */
1.311     christos 1028:        p = (void *)ALIGN(*top);
1.246     pk       1029:        pmemarr = (struct memarr *)p;
                   1030:        npmemarr = prom_makememarr(pmemarr, 1000, MEMARR_AVAILPHYS);
                   1031:
                   1032:        /* Update kernel top */
                   1033:        p += npmemarr * sizeof(struct memarr);
                   1034:        *top = p;
1.136     pk       1035:
1.122     pk       1036:        for (physmem = 0, mp = pmemarr, i = npmemarr; --i >= 0; mp++)
                   1037:                physmem += btoc(mp->len);
                   1038: }
                   1039:
1.2       deraadt  1040:
1.29      pk       1041: /*
1.106     thorpej  1042:  * Support functions for vm_page_bootstrap().
1.29      pk       1043:  */
                   1044:
                   1045: /*
1.253     thorpej  1046:  * How much virtual space does this kernel have?
                   1047:  * (After mapping kernel text, data, etc.)
                   1048:  */
                   1049: void
1.303     uwe      1050: pmap_virtual_space(vaddr_t *v_start, vaddr_t *v_end)
1.253     thorpej  1051: {
1.303     uwe      1052:
1.253     thorpej  1053:         *v_start = virtual_avail;
                   1054:         *v_end   = virtual_end;
                   1055: }
                   1056:
1.265     pk       1057: #ifdef PMAP_GROWKERNEL
                   1058: vaddr_t
                   1059: pmap_growkernel(vaddr_t eva)
                   1060: {
                   1061:        struct regmap *rp;
                   1062:        struct segmap *sp;
                   1063:        int vr, evr, M, N, i;
                   1064:        struct vm_page *pg;
                   1065:        vaddr_t va;
                   1066:
                   1067:        if (eva <= virtual_end)
                   1068:                return (virtual_end);
                   1069:
                   1070:        /* For now, only implemented for sun4/sun4c */
1.296     chs      1071:        KASSERT(CPU_HAS_SUNMMU);
1.265     pk       1072:
                   1073:        /*
                   1074:         * Map in the next region(s)
                   1075:         */
                   1076:
                   1077:        /* Get current end-of-kernel */
                   1078:        vr = virtual_end >> RGSHIFT;
                   1079:        evr = (eva + NBPRG - 1) >> RGSHIFT;
                   1080:        eva = evr << RGSHIFT;
                   1081:
                   1082:        if (eva > VM_MAX_KERNEL_ADDRESS)
                   1083:                panic("growkernel: grown too large: %lx", eva);
                   1084:
                   1085:        /*
                   1086:         * Divide a region in N blocks of M segments, where each segment
                   1087:         * block can have its PTEs mapped by one page.
                   1088:         * N should come out to 1 for 8K pages and to 4 for 4K pages.
                   1089:         */
                   1090:        M = NBPG / (NPTESG * sizeof(int));
                   1091:        N = (NBPRG/NBPSG) / M;
                   1092:
                   1093:        while (vr < evr) {
                   1094:                rp = &pmap_kernel()->pm_regmap[vr];
                   1095:                for (i = 0; i < N; i++) {
                   1096:                        sp = &rp->rg_segmap[i * M];
                   1097:                        va = (vaddr_t)sp->sg_pte;
                   1098:                        pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_USERESERVE);
                   1099:                        if (pg == NULL)
                   1100:                                panic("growkernel: out of memory");
                   1101:                        pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg),
1.337     cegger   1102:                                        VM_PROT_READ | VM_PROT_WRITE, 0);
1.265     pk       1103:                }
                   1104:        }
                   1105:
                   1106:        virtual_end = eva;
                   1107:        return (eva);
                   1108: }
                   1109: #endif
                   1110:
1.253     thorpej  1111: /*
1.107     pk       1112:  * Helper routine that hands off available physical pages to the VM system.
1.29      pk       1113:  */
1.107     pk       1114: static void
1.303     uwe      1115: pmap_page_upload(void)
1.29      pk       1116: {
1.181     pk       1117:        int     n;
1.163     pk       1118:        paddr_t start, end;
1.29      pk       1119:
1.181     pk       1120:        /* First, the `etext gap' */
                   1121:        start = PMAP_BOOTSTRAP_VA2PA(etext_gap_start);
                   1122:        end = PMAP_BOOTSTRAP_VA2PA(etext_gap_end);
1.244     pk       1123:
1.177     pk       1124: #ifdef DIAGNOSTIC
1.181     pk       1125:        if (avail_start <= start)
1.177     pk       1126:                panic("pmap_page_upload: etext gap overlap: %lx < %lx",
1.181     pk       1127:                        (u_long)avail_start, (u_long)start);
1.177     pk       1128: #endif
1.181     pk       1129:        if (etext_gap_start < etext_gap_end) {
                   1130:                vm_first_phys = start;
1.178     pk       1131:                uvm_page_physload(
1.181     pk       1132:                        atop(start),
                   1133:                        atop(end),
                   1134:                        atop(start),
                   1135:                        atop(end), VM_FREELIST_DEFAULT);
                   1136:        }
1.177     pk       1137:
1.107     pk       1138:        for (n = 0; n < npmemarr; n++) {
1.181     pk       1139:
                   1140:                start = pmemarr[n].addr;
                   1141:                end = start + pmemarr[n].len;
                   1142:
1.244     pk       1143:                /* Update vm_{first_last}_phys */
                   1144:                if (vm_first_phys > start)
                   1145:                        vm_first_phys = start;
                   1146:                if (vm_last_phys < end)
                   1147:                        vm_last_phys = end;
                   1148:
1.107     pk       1149:                /*
1.244     pk       1150:                 * Exclude any memory allocated for the kernel as computed
                   1151:                 * by pmap_bootstrap(), i.e. the range
                   1152:                 *      [KERNBASE_PA, avail_start>.
                   1153:                 * Note that this will also exclude the `etext gap' range
                   1154:                 * already uploaded above.
1.237     pk       1155:                 */
                   1156:                if (start < PMAP_BOOTSTRAP_VA2PA(KERNBASE)) {
1.244     pk       1157:                        /*
                   1158:                         * This segment starts below the kernel load address.
                   1159:                         * Chop it off at the start of the kernel.
                   1160:                         */
                   1161:                        paddr_t chop = PMAP_BOOTSTRAP_VA2PA(KERNBASE);
                   1162:
                   1163:                        if (end < chop)
                   1164:                                chop = end;
1.238     pk       1165: #ifdef DEBUG
1.325     martin   1166:                        prom_printf("bootstrap gap: start %lx, chop %lx, end %lx\n",
1.244     pk       1167:                                start, chop, end);
1.238     pk       1168: #endif
1.237     pk       1169:                        uvm_page_physload(
                   1170:                                atop(start),
1.244     pk       1171:                                atop(chop),
1.237     pk       1172:                                atop(start),
1.244     pk       1173:                                atop(chop),
1.237     pk       1174:                                VM_FREELIST_DEFAULT);
                   1175:
1.244     pk       1176:                        /*
                   1177:                         * Adjust the start address to reflect the
                   1178:                         * uploaded portion of this segment.
                   1179:                         */
                   1180:                        start = chop;
1.237     pk       1181:                }
                   1182:
1.244     pk       1183:                /* Skip the current kernel address range */
1.181     pk       1184:                if (start <= avail_start && avail_start < end)
                   1185:                        start = avail_start;
                   1186:
1.107     pk       1187:                if (start == end)
                   1188:                        continue;
1.29      pk       1189:
1.244     pk       1190:                /* Upload (the rest of) this segment */
1.110     mrg      1191:                uvm_page_physload(
                   1192:                        atop(start),
                   1193:                        atop(end),
                   1194:                        atop(start),
1.120     thorpej  1195:                        atop(end), VM_FREELIST_DEFAULT);
1.29      pk       1196:        }
1.330     mrg      1197:
                   1198: #if defined(MULTIPROCESSOR)
                   1199:        {
                   1200:                CPU_INFO_ITERATOR cpunum;
                   1201:                struct cpu_info *cpi;
                   1202:
                   1203:                for (CPU_INFO_FOREACH(cpunum, cpi)) {
                   1204:                        if (cpi->ci_free_sva1)
                   1205:                                uvm_page_physload(atop(PMAP_BOOTSTRAP_VA2PA(cpi->ci_free_sva1)),
                   1206:                                                  atop(PMAP_BOOTSTRAP_VA2PA(cpi->ci_free_eva1)),
                   1207:                                                  atop(PMAP_BOOTSTRAP_VA2PA(cpi->ci_free_sva1)),
                   1208:                                                  atop(PMAP_BOOTSTRAP_VA2PA(cpi->ci_free_eva1)),
                   1209:                                                  VM_FREELIST_DEFAULT);
                   1210:                        if (cpi->ci_free_sva2)
                   1211:                                uvm_page_physload(atop(PMAP_BOOTSTRAP_VA2PA(cpi->ci_free_sva2)),
                   1212:                                                  atop(PMAP_BOOTSTRAP_VA2PA(cpi->ci_free_eva2)),
                   1213:                                                  atop(PMAP_BOOTSTRAP_VA2PA(cpi->ci_free_sva2)),
                   1214:                                                  atop(PMAP_BOOTSTRAP_VA2PA(cpi->ci_free_eva2)),
                   1215:                                                  VM_FREELIST_DEFAULT);
                   1216:                }
                   1217:        }
                   1218: #endif
1.29      pk       1219: }
                   1220:
1.181     pk       1221: /*
                   1222:  * This routine is used by mmrw() to validate access to `/dev/mem'.
                   1223:  */
1.39      pk       1224: int
1.303     uwe      1225: pmap_pa_exists(paddr_t pa)
1.39      pk       1226: {
1.124     pk       1227:        int nmem;
                   1228:        struct memarr *mp;
1.39      pk       1229:
                   1230:        for (mp = pmemarr, nmem = npmemarr; --nmem >= 0; mp++) {
                   1231:                if (pa >= mp->addr && pa < mp->addr + mp->len)
                   1232:                        return 1;
                   1233:        }
                   1234:
                   1235:        return 0;
                   1236: }
1.29      pk       1237:
1.1       deraadt  1238: /* update pv_flags given a valid pte */
1.55      pk       1239: #define        MR4_4C(pte) (((pte) >> PG_M_SHIFT) & (PV_MOD | PV_REF))
                   1240: #define MR4M(pte) (((pte) >> PG_M_SHIFT4M) & (PV_MOD4M | PV_REF4M))
1.1       deraadt  1241:
                   1242: /*----------------------------------------------------------------*/
                   1243:
                   1244: /*
                   1245:  * Agree with the monitor ROM as to how many MMU entries are
                   1246:  * to be reserved, and map all of its segments into all contexts.
                   1247:  *
                   1248:  * Unfortunately, while the Version 0 PROM had a nice linked list of
                   1249:  * taken virtual memory, the Version 2 PROM provides instead a convoluted
                   1250:  * description of *free* virtual memory.  Rather than invert this, we
                   1251:  * resort to two magic constants from the PROM vector description file.
                   1252:  */
1.55      pk       1253: #if defined(SUN4) || defined(SUN4C)
1.43      pk       1254: void
1.303     uwe      1255: mmu_reservemon4_4c(int *nrp, int *nsp)
1.1       deraadt  1256: {
1.124     pk       1257:        u_int va = 0, eva = 0;
1.348.6.3! tls      1258:        int mmuseg, i, nr, ns, vr;
1.265     pk       1259:        int *pte;
1.69      pk       1260: #if defined(SUN4_MMU3L)
1.348.6.3! tls      1261:        int mmureg, lastvr = 0;
1.53      christos 1262: #endif
1.124     pk       1263:        struct regmap *rp;
1.1       deraadt  1264:
1.20      deraadt  1265: #if defined(SUN4)
1.55      pk       1266:        if (CPU_ISSUN4) {
1.29      pk       1267:                prom_vstart = va = OLDMON_STARTVADDR;
                   1268:                prom_vend = eva = OLDMON_ENDVADDR;
1.20      deraadt  1269:        }
                   1270: #endif
                   1271: #if defined(SUN4C)
1.55      pk       1272:        if (CPU_ISSUN4C) {
1.29      pk       1273:                prom_vstart = va = OPENPROM_STARTVADDR;
                   1274:                prom_vend = eva = OPENPROM_ENDVADDR;
1.19      deraadt  1275:        }
1.20      deraadt  1276: #endif
1.43      pk       1277:        ns = *nsp;
                   1278:        nr = *nrp;
1.1       deraadt  1279:        while (va < eva) {
1.43      pk       1280:                vr = VA_VREG(va);
                   1281:                rp = &pmap_kernel()->pm_regmap[vr];
                   1282:
1.69      pk       1283: #if defined(SUN4_MMU3L)
                   1284:                if (HASSUN4_MMU3L && vr != lastvr) {
1.43      pk       1285:                        lastvr = vr;
                   1286:                        mmureg = getregmap(va);
                   1287:                        if (mmureg < nr)
                   1288:                                rp->rg_smeg = nr = mmureg;
                   1289:                        /*
                   1290:                         * On 3-level MMU machines, we distribute regions,
                   1291:                         * rather than segments, amongst the contexts.
                   1292:                         */
                   1293:                        for (i = ncontext; --i > 0;)
1.311     christos 1294:                                prom_setcontext(i, (void *)va, mmureg);
1.43      pk       1295:                }
                   1296: #endif
1.1       deraadt  1297:                mmuseg = getsegmap(va);
1.43      pk       1298:                if (mmuseg < ns)
                   1299:                        ns = mmuseg;
1.69      pk       1300:
                   1301:                if (!HASSUN4_MMU3L)
1.43      pk       1302:                        for (i = ncontext; --i > 0;)
1.311     christos 1303:                                prom_setcontext(i, (void *)va, mmuseg);
1.43      pk       1304:
1.1       deraadt  1305:                if (mmuseg == seginval) {
                   1306:                        va += NBPSG;
                   1307:                        continue;
                   1308:                }
1.43      pk       1309:                /*
                   1310:                 * Another PROM segment. Enter into region map.
                   1311:                 * Assume the entire segment is valid.
                   1312:                 */
                   1313:                rp->rg_nsegmap += 1;
                   1314:                rp->rg_segmap[VA_VSEG(va)].sg_pmeg = mmuseg;
                   1315:                rp->rg_segmap[VA_VSEG(va)].sg_npte = NPTESG;
1.265     pk       1316:                pte = rp->rg_segmap[VA_VSEG(va)].sg_pte;
1.43      pk       1317:
1.1       deraadt  1318:                /* PROM maps its memory user-accessible: fix it. */
1.265     pk       1319:                for (i = NPTESG; --i >= 0; va += NBPG, pte++) {
                   1320:                        *pte = getpte4(va) | PG_S;
                   1321:                        setpte4(va, *pte);
                   1322:                }
1.1       deraadt  1323:        }
1.43      pk       1324:        *nsp = ns;
                   1325:        *nrp = nr;
                   1326:        return;
1.1       deraadt  1327: }
1.55      pk       1328: #endif
                   1329:
1.210     thorpej  1330: #if defined(SUN4M) || defined(SUN4D) /* SRMMU versions of above */
1.55      pk       1331:
1.97      pk       1332: u_long
1.303     uwe      1333: srmmu_bypass_read(u_long paddr)
1.97      pk       1334: {
                   1335:        unsigned long v;
                   1336:
1.158     pk       1337:        if (cpuinfo.mxcc) {
1.97      pk       1338:                /*
                   1339:                 * We're going to have to use MMU passthrough. If we're on
1.158     pk       1340:                 * a Viking SuperSPARC with a MultiCache Controller, we
                   1341:                 * need to set the AC (Alternate Cacheable) bit in the MMU's
                   1342:                 * control register in order to not by-pass the cache.
1.97      pk       1343:                 */
                   1344:
1.158     pk       1345:                unsigned long s = lda(SRMMU_PCR, ASI_SRMMU);
1.97      pk       1346:
                   1347:                /* set MMU AC bit */
                   1348:                sta(SRMMU_PCR, ASI_SRMMU, s | VIKING_PCR_AC);
                   1349:                v = lda(paddr, ASI_BYPASS);
                   1350:                sta(SRMMU_PCR, ASI_SRMMU, s);
                   1351:        } else
                   1352:                v = lda(paddr, ASI_BYPASS);
                   1353:
                   1354:        return (v);
                   1355: }
                   1356:
                   1357:
1.55      pk       1358: /*
                   1359:  * Take the monitor's initial page table layout, convert it to 3rd-level pte's
                   1360:  * (it starts out as a L1 mapping), and install it along with a set of kernel
                   1361:  * mapping tables as the kernel's initial page table setup. Also create and
                   1362:  * enable a context table. I suppose we also want to block user-mode access
                   1363:  * to the new kernel/ROM mappings.
                   1364:  */
                   1365:
1.58      pk       1366: /*
                   1367:  * mmu_reservemon4m(): Copies the existing (ROM) page tables to kernel space,
1.55      pk       1368:  * converting any L1/L2 PTEs to L3 PTEs. Does *not* copy the L1 entry mapping
1.96      pk       1369:  * the kernel at KERNBASE since we don't want to map 16M of physical
                   1370:  * memory for the kernel. Thus the kernel must be installed later!
1.55      pk       1371:  * Also installs ROM mappings into the kernel pmap.
                   1372:  * NOTE: This also revokes all user-mode access to the mapped regions.
                   1373:  */
                   1374: void
1.303     uwe      1375: mmu_reservemon4m(struct pmap *kpmap)
1.55      pk       1376: {
1.71      pk       1377:        unsigned int rom_ctxtbl;
1.124     pk       1378:        int te;
1.55      pk       1379:
1.275     martin   1380: #if !(defined(PROM_AT_F0) || defined(MSIIEP))
1.97      pk       1381:        prom_vstart = OPENPROM_STARTVADDR;
                   1382:        prom_vend = OPENPROM_ENDVADDR;
1.275     martin   1383: #else /* OBP3/OFW in JavaStations */
                   1384:        prom_vstart = 0xf0000000;
                   1385: #if defined(MSIIEP)
                   1386:        prom_vend = 0xf0800000;
                   1387: #else
                   1388:        prom_vend = 0xf0080000;
                   1389: #endif
                   1390: #endif
1.55      pk       1391:
                   1392:        /*
1.203     pk       1393:         * XXX: although the sun4m can handle 36 bits of physical
1.55      pk       1394:         * address space, we assume that all these page tables, etc
                   1395:         * are in the lower 4G (32-bits) of address space, i.e. out of I/O
                   1396:         * space. Eventually this should be changed to support the 36 bit
                   1397:         * physical addressing, in case some crazed ROM designer decides to
                   1398:         * stick the pagetables up there. In that case, we should use MMU
                   1399:         * transparent mode, (i.e. ASI 0x20 to 0x2f) to access
                   1400:         * physical memory.
                   1401:         */
                   1402:
1.71      pk       1403:        rom_ctxtbl = (lda(SRMMU_CXTPTR,ASI_SRMMU) << SRMMU_PPNPASHIFT);
1.55      pk       1404:
1.97      pk       1405:        te = srmmu_bypass_read(rom_ctxtbl);     /* i.e. context 0 */
1.69      pk       1406:
1.55      pk       1407:        switch (te & SRMMU_TETYPE) {
1.62      pk       1408:        case SRMMU_TEINVALID:
1.69      pk       1409:                cpuinfo.ctx_tbl[0] = SRMMU_TEINVALID;
1.77      pk       1410:                panic("mmu_reservemon4m: no existing L0 mapping! "
                   1411:                      "(How are we running?");
1.55      pk       1412:                break;
1.62      pk       1413:        case SRMMU_TEPTE:
1.55      pk       1414:                panic("mmu_reservemon4m: can't handle ROM 4G page size");
                   1415:                /* XXX: Should make this work, however stupid it is */
                   1416:                break;
1.62      pk       1417:        case SRMMU_TEPTD:
1.71      pk       1418:                mmu_setup4m_L1(te, kpmap);
1.55      pk       1419:                break;
1.62      pk       1420:        default:
1.55      pk       1421:                panic("mmu_reservemon4m: unknown pagetable entry type");
                   1422:        }
                   1423: }
                   1424:
1.303     uwe      1425: /* regtblptd - PTD for region table to be remapped */
1.55      pk       1426: void
1.303     uwe      1427: mmu_setup4m_L1(int regtblptd, struct pmap *kpmap)
1.55      pk       1428: {
1.124     pk       1429:        unsigned int regtblrover;
                   1430:        int i;
1.55      pk       1431:        unsigned int te;
1.71      pk       1432:        struct regmap *rp;
1.55      pk       1433:        int j, k;
                   1434:
1.69      pk       1435:        /*
                   1436:         * Here we scan the region table to copy any entries which appear.
1.55      pk       1437:         * We are only concerned with regions in kernel space and above
1.96      pk       1438:         * (i.e. regions VA_VREG(KERNBASE)+1 to 0xff). We ignore the first
                   1439:         * region (at VA_VREG(KERNBASE)), since that is the 16MB L1 mapping
                   1440:         * that the ROM used to map the kernel in initially. Later, we will
                   1441:         * rebuild a new L3 mapping for the kernel and install it before
                   1442:         * switching to the new pagetables.
1.55      pk       1443:         */
1.71      pk       1444:        regtblrover =
                   1445:                ((regtblptd & ~SRMMU_TETYPE) << SRMMU_PPNPASHIFT) +
                   1446:                (VA_VREG(KERNBASE)+1) * sizeof(long);   /* kernel only */
1.55      pk       1447:
                   1448:        for (i = VA_VREG(KERNBASE) + 1; i < SRMMU_L1SIZE;
                   1449:             i++, regtblrover += sizeof(long)) {
1.71      pk       1450:
                   1451:                /* The region we're dealing with */
                   1452:                rp = &kpmap->pm_regmap[i];
                   1453:
1.97      pk       1454:                te = srmmu_bypass_read(regtblrover);
1.55      pk       1455:                switch(te & SRMMU_TETYPE) {
1.62      pk       1456:                case SRMMU_TEINVALID:
1.55      pk       1457:                        break;
1.71      pk       1458:
1.62      pk       1459:                case SRMMU_TEPTE:
1.55      pk       1460: #ifdef DEBUG
1.325     martin   1461:                        prom_printf("mmu_setup4m_L1: "
1.77      pk       1462:                               "converting region 0x%x from L1->L3\n", i);
1.55      pk       1463: #endif
1.71      pk       1464:                        /*
                   1465:                         * This region entry covers 64MB of memory -- or
                   1466:                         * (NSEGRG * NPTESG) pages -- which we must convert
                   1467:                         * into a 3-level description.
1.55      pk       1468:                         */
1.71      pk       1469:
1.55      pk       1470:                        for (j = 0; j < SRMMU_L2SIZE; j++) {
1.71      pk       1471:                                struct segmap *sp = &rp->rg_segmap[j];
1.55      pk       1472:
                   1473:                                for (k = 0; k < SRMMU_L3SIZE; k++) {
1.97      pk       1474:                                        setpgt4m(&sp->sg_pte[k],
                   1475:                                                (te & SRMMU_L1PPNMASK) |
                   1476:                                                (j << SRMMU_L2PPNSHFT) |
                   1477:                                                (k << SRMMU_L3PPNSHFT) |
                   1478:                                                (te & SRMMU_PGBITSMSK) |
                   1479:                                                ((te & SRMMU_PROT_MASK) |
                   1480:                                                 PPROT_U2S_OMASK) |
                   1481:                                                SRMMU_TEPTE);
1.55      pk       1482:                                }
                   1483:                        }
                   1484:                        break;
1.71      pk       1485:
1.62      pk       1486:                case SRMMU_TEPTD:
1.71      pk       1487:                        mmu_setup4m_L2(te, rp);
1.55      pk       1488:                        break;
1.71      pk       1489:
1.62      pk       1490:                default:
1.55      pk       1491:                        panic("mmu_setup4m_L1: unknown pagetable entry type");
                   1492:                }
                   1493:        }
                   1494: }
                   1495:
                   1496: void
1.303     uwe      1497: mmu_setup4m_L2(int segtblptd, struct regmap *rp)
1.55      pk       1498: {
1.124     pk       1499:        unsigned int segtblrover;
                   1500:        int i, k;
1.55      pk       1501:        unsigned int te;
1.71      pk       1502:        struct segmap *sp;
1.55      pk       1503:
                   1504:        segtblrover = (segtblptd & ~SRMMU_TETYPE) << SRMMU_PPNPASHIFT;
                   1505:        for (i = 0; i < SRMMU_L2SIZE; i++, segtblrover += sizeof(long)) {
1.71      pk       1506:
                   1507:                sp = &rp->rg_segmap[i];
                   1508:
1.97      pk       1509:                te = srmmu_bypass_read(segtblrover);
1.55      pk       1510:                switch(te & SRMMU_TETYPE) {
1.62      pk       1511:                case SRMMU_TEINVALID:
1.55      pk       1512:                        break;
1.71      pk       1513:
1.62      pk       1514:                case SRMMU_TEPTE:
1.55      pk       1515: #ifdef DEBUG
1.325     martin   1516:                        prom_printf("mmu_setup4m_L2: converting L2 entry at segment 0x%x to L3\n",i);
1.55      pk       1517: #endif
1.71      pk       1518:                        /*
                   1519:                         * This segment entry covers 256KB of memory -- or
                   1520:                         * (NPTESG) pages -- which we must convert
                   1521:                         * into a 3-level description.
                   1522:                         */
1.55      pk       1523:                        for (k = 0; k < SRMMU_L3SIZE; k++) {
1.97      pk       1524:                                setpgt4m(&sp->sg_pte[k],
                   1525:                                        (te & SRMMU_L1PPNMASK) |
                   1526:                                        (te & SRMMU_L2PPNMASK) |
                   1527:                                        (k << SRMMU_L3PPNSHFT) |
                   1528:                                        (te & SRMMU_PGBITSMSK) |
                   1529:                                        ((te & SRMMU_PROT_MASK) |
                   1530:                                         PPROT_U2S_OMASK) |
                   1531:                                        SRMMU_TEPTE);
1.55      pk       1532:                        }
                   1533:                        break;
1.71      pk       1534:
1.62      pk       1535:                case SRMMU_TEPTD:
1.71      pk       1536:                        mmu_setup4m_L3(te, sp);
1.55      pk       1537:                        break;
1.71      pk       1538:
1.62      pk       1539:                default:
1.55      pk       1540:                        panic("mmu_setup4m_L2: unknown pagetable entry type");
                   1541:                }
                   1542:        }
                   1543: }
                   1544:
1.71      pk       1545: void
1.303     uwe      1546: mmu_setup4m_L3(int pagtblptd, struct segmap *sp)
1.55      pk       1547: {
1.124     pk       1548:        unsigned int pagtblrover;
                   1549:        int i;
                   1550:        unsigned int te;
1.55      pk       1551:
                   1552:        pagtblrover = (pagtblptd & ~SRMMU_TETYPE) << SRMMU_PPNPASHIFT;
                   1553:        for (i = 0; i < SRMMU_L3SIZE; i++, pagtblrover += sizeof(long)) {
1.97      pk       1554:                te = srmmu_bypass_read(pagtblrover);
1.55      pk       1555:                switch(te & SRMMU_TETYPE) {
1.62      pk       1556:                case SRMMU_TEINVALID:
1.55      pk       1557:                        break;
1.62      pk       1558:                case SRMMU_TEPTE:
1.97      pk       1559:                        setpgt4m(&sp->sg_pte[i], te | PPROT_U2S_OMASK);
1.277     pk       1560:                        pmap_kernel()->pm_stats.resident_count++;
1.55      pk       1561:                        break;
1.62      pk       1562:                case SRMMU_TEPTD:
1.55      pk       1563:                        panic("mmu_setup4m_L3: PTD found in L3 page table");
1.62      pk       1564:                default:
1.55      pk       1565:                        panic("mmu_setup4m_L3: unknown pagetable entry type");
                   1566:                }
                   1567:        }
                   1568: }
1.210     thorpej  1569: #endif /* defined SUN4M || defined SUN4D */
1.1       deraadt  1570:
                   1571: /*----------------------------------------------------------------*/
                   1572:
1.261     pk       1573: #if defined(SUN4) || defined(SUN4C)
1.1       deraadt  1574: /*
                   1575:  * MMU management.
                   1576:  */
1.348.6.3! tls      1577: static int     me_alloc(struct mmuentry *, struct pmap *, int, int);
1.265     pk       1578: static void    me_free(struct pmap *, u_int);
1.267     pk       1579: #if defined(SUN4_MMU3L)
1.348.6.3! tls      1580: static int     region_alloc(struct mmuentry *, struct pmap *, int);
1.267     pk       1581: static void    region_free(struct pmap *, u_int);
                   1582: #endif
1.1       deraadt  1583:
                   1584:
                   1585: /*
                   1586:  * Allocate an MMU entry (i.e., a PMEG).
                   1587:  * If necessary, steal one from someone else.
                   1588:  * Put it on the tail of the given queue
                   1589:  * (which is either the LRU list or the locked list).
                   1590:  * The locked list is not actually ordered, but this is easiest.
                   1591:  * Also put it on the given (new) pmap's chain,
                   1592:  * enter its pmeg number into that pmap's segmap,
                   1593:  * and store the pmeg's new virtual segment number (me->me_vseg).
                   1594:  *
                   1595:  * This routine is large and complicated, but it must be fast
                   1596:  * since it implements the dynamic allocation of MMU entries.
                   1597:  */
1.265     pk       1598:
1.307     perry    1599: static inline int
1.348.6.3! tls      1600: me_alloc(struct mmuentry *mh, struct pmap *newpm, int newvreg, int newvseg)
1.124     pk       1601: {
                   1602:        struct mmuentry *me;
                   1603:        struct pmap *pm;
1.267     pk       1604:        int i, va, *ptep, pte;
1.1       deraadt  1605:        int ctx;
1.43      pk       1606:        struct regmap *rp;
                   1607:        struct segmap *sp;
1.1       deraadt  1608:
                   1609:        /* try free list first */
1.348.6.3! tls      1610:        if (!mmuq_empty(&segm_freelist)) {
        !          1611:                me = mmuq_first(&segm_freelist);
        !          1612:                mmuq_remove(me);
1.1       deraadt  1613: #ifdef DEBUG
                   1614:                if (me->me_pmap != NULL)
                   1615:                        panic("me_alloc: freelist entry has pmap");
                   1616:                if (pmapdebug & PDB_MMU_ALLOC)
1.66      christos 1617:                        printf("me_alloc: got pmeg %d\n", me->me_cookie);
1.1       deraadt  1618: #endif
1.348.6.3! tls      1619:                mmuq_insert_tail(mh, me);
1.1       deraadt  1620:
                   1621:                /* onto on pmap chain; pmap is already locked, if needed */
1.43      pk       1622:                TAILQ_INSERT_TAIL(&newpm->pm_seglist, me, me_pmchain);
1.70      pk       1623: #ifdef DIAGNOSTIC
                   1624:                pmap_stats.ps_npmeg_free--;
1.265     pk       1625:                if (mh == &segm_locked) {
1.70      pk       1626:                        pmap_stats.ps_npmeg_locked++;
1.265     pk       1627:                        me->me_statp = &pmap_stats.ps_npmeg_locked;
                   1628:                } else {
1.70      pk       1629:                        pmap_stats.ps_npmeg_lru++;
1.265     pk       1630:                        me->me_statp = &pmap_stats.ps_npmeg_lru;
                   1631:                }
1.70      pk       1632: #endif
1.1       deraadt  1633:
                   1634:                /* into pmap segment table, with backpointers */
                   1635:                me->me_pmap = newpm;
                   1636:                me->me_vseg = newvseg;
1.43      pk       1637:                me->me_vreg = newvreg;
1.1       deraadt  1638:
1.262     pk       1639:                return (me->me_cookie);
1.1       deraadt  1640:        }
                   1641:
                   1642:        /* no luck, take head of LRU list */
1.348.6.3! tls      1643:        if (mmuq_empty(&segm_lru))
1.1       deraadt  1644:                panic("me_alloc: all pmegs gone");
1.43      pk       1645:
1.348.6.3! tls      1646:        me = mmuq_first(&segm_lru);
1.1       deraadt  1647:        pm = me->me_pmap;
1.12      pk       1648: #ifdef DEBUG
1.1       deraadt  1649:        if (pmapdebug & (PDB_MMU_ALLOC | PDB_MMU_STEAL))
1.91      fair     1650:                printf("me_alloc: stealing pmeg 0x%x from pmap %p\n",
1.43      pk       1651:                    me->me_cookie, pm);
1.1       deraadt  1652: #endif
1.294     pk       1653:
                   1654:        mmu_stolenpmegs_evcnt.ev_count++;
                   1655:
1.1       deraadt  1656:        /*
                   1657:         * Remove from LRU list, and insert at end of new list
                   1658:         * (probably the LRU list again, but so what?).
                   1659:         */
1.348.6.3! tls      1660:        mmuq_remove(me);
        !          1661:        mmuq_insert_tail(mh, me);
1.43      pk       1662:
1.70      pk       1663: #ifdef DIAGNOSTIC
                   1664:        if (mh == &segm_locked) {
                   1665:                pmap_stats.ps_npmeg_lru--;
                   1666:                pmap_stats.ps_npmeg_locked++;
1.265     pk       1667:                me->me_statp = &pmap_stats.ps_npmeg_locked;
                   1668:        } else {
                   1669:                me->me_statp = &pmap_stats.ps_npmeg_lru;
1.70      pk       1670:        }
                   1671: #endif
                   1672:
1.43      pk       1673:        rp = &pm->pm_regmap[me->me_vreg];
                   1674:        sp = &rp->rg_segmap[me->me_vseg];
1.267     pk       1675:        ptep = sp->sg_pte;
1.1       deraadt  1676:
1.260     pk       1677: #ifdef DEBUG
                   1678:        if (sp->sg_pmeg != me->me_cookie)
                   1679:                panic("me_alloc: wrong sg_pmeg (%d != %d)",
                   1680:                                sp->sg_pmeg, me->me_cookie);
                   1681: #endif
                   1682:
1.1       deraadt  1683:        /*
                   1684:         * The PMEG must be mapped into some context so that we can
                   1685:         * read its PTEs.  Use its current context if it has one;
                   1686:         * if not, and since context 0 is reserved for the kernel,
                   1687:         * the simplest method is to switch to 0 and map the PMEG
                   1688:         * to virtual address 0---which, being a user space address,
                   1689:         * is by definition not in use.
                   1690:         *
                   1691:         * XXX do not have to flush cache immediately
                   1692:         */
1.71      pk       1693:        ctx = getcontext4();
1.259     pk       1694:
                   1695:        /*
                   1696:         * Even if we're stealing a PMEG from ourselves (i.e. if pm==newpm),
                   1697:         * we must make sure there are no user register windows in the CPU
                   1698:         * for the following reasons:
                   1699:         * (1) if we have a write-allocate cache and the segment we are
                   1700:         *     stealing contains stack pages, an interrupt during the
                   1701:         *     interval that starts at cache_flush_segment() below and ends
                   1702:         *     when the segment is finally removed from the MMU, may cause
                   1703:         *     dirty cache lines to reappear.
                   1704:         * (2) when re-wiring this PMEG for use by another segment (e.g.
                   1705:         *     in mmu_pagein()) a window exists where the PTEs in this PMEG
                   1706:         *     point at arbitrary pages allocated to this address space.
                   1707:         *     Again, a register window flush at this point is likely to
                   1708:         *     cause data corruption in case the segment being rewired
                   1709:         *     contains stack virtual addresses.
                   1710:         */
                   1711:        write_user_windows();
1.43      pk       1712:        if (CTX_USABLE(pm,rp)) {
1.272     pk       1713:                setcontext4(pm->pm_ctxnum);
1.260     pk       1714:                va = VSTOVA(me->me_vreg, me->me_vseg);
                   1715: #ifdef DEBUG
                   1716:                if (getsegmap(va) != me->me_cookie)
1.272     pk       1717:                        panic("me_alloc: wrong pmeg in MMU (%d != %d)",
1.260     pk       1718:                                getsegmap(va), me->me_cookie);
                   1719: #endif
1.214     pk       1720:                cache_flush_segment(me->me_vreg, me->me_vseg, pm->pm_ctxnum);
1.1       deraadt  1721:        } else {
1.260     pk       1722:                va = 0;
1.259     pk       1723:                setcontext4(0);
1.69      pk       1724:                if (HASSUN4_MMU3L)
1.260     pk       1725:                        setregmap(va, tregion);
                   1726:                setsegmap(va, me->me_cookie);
1.1       deraadt  1727:                /*
                   1728:                 * No cache flush needed: it happened earlier when
                   1729:                 * the old context was taken.
                   1730:                 */
                   1731:        }
                   1732:
                   1733:        /*
                   1734:         * Record reference and modify bits for each page,
                   1735:         * and copy PTEs into kernel memory so that they can
                   1736:         * be reloaded later.
                   1737:         */
                   1738:        i = NPTESG;
                   1739:        do {
1.267     pk       1740:                int swbits = *ptep & PG_MBZ;
                   1741:                pte = getpte4(va);
                   1742:                if ((pte & (PG_V | PG_TYPE)) == (PG_V | PG_OBMEM)) {
1.236     pk       1743:                        struct vm_page *pg;
1.267     pk       1744:                        if ((pg = pvhead4_4c(pte)) != NULL)
                   1745:                                VM_MDPAGE_PVHEAD(pg)->pv_flags |= MR4_4C(pte);
1.1       deraadt  1746:                }
1.267     pk       1747:                *ptep++ = swbits | (pte & ~(PG_U|PG_M));
1.1       deraadt  1748:                va += NBPG;
                   1749:        } while (--i > 0);
                   1750:
                   1751:        /* update segment tables */
1.265     pk       1752:        if (CTX_USABLE(pm,rp)) {
                   1753:                va = VSTOVA(me->me_vreg,me->me_vseg);
                   1754:                if (pm != pmap_kernel() || HASSUN4_MMU3L)
                   1755:                        setsegmap(va, seginval);
                   1756:                else {
                   1757:                        /* Unmap segment from all contexts */
                   1758:                        for (i = ncontext; --i >= 0;) {
                   1759:                                setcontext4(i);
                   1760:                                setsegmap(va, seginval);
                   1761:                        }
                   1762:                }
                   1763:        }
1.43      pk       1764:        sp->sg_pmeg = seginval;
1.1       deraadt  1765:
                   1766:        /* off old pmap chain */
1.43      pk       1767:        TAILQ_REMOVE(&pm->pm_seglist, me, me_pmchain);
1.194     chs      1768:        setcontext4(ctx);
1.1       deraadt  1769:
                   1770:        /* onto new pmap chain; new pmap is already locked, if needed */
1.43      pk       1771:        TAILQ_INSERT_TAIL(&newpm->pm_seglist, me, me_pmchain);
1.1       deraadt  1772:
                   1773:        /* into new segment table, with backpointers */
                   1774:        me->me_pmap = newpm;
                   1775:        me->me_vseg = newvseg;
1.43      pk       1776:        me->me_vreg = newvreg;
1.1       deraadt  1777:
1.262     pk       1778:        return (me->me_cookie);
1.1       deraadt  1779: }
                   1780:
                   1781: /*
                   1782:  * Free an MMU entry.
                   1783:  *
                   1784:  * Assumes the corresponding pmap is already locked.
1.260     pk       1785:  * Caller must update hardware.
1.1       deraadt  1786:  */
1.307     perry    1787: static inline void
1.303     uwe      1788: me_free(struct pmap *pm, u_int pmeg)
1.1       deraadt  1789: {
1.124     pk       1790:        struct mmuentry *me = &mmusegments[pmeg];
1.265     pk       1791: #ifdef DEBUG
1.124     pk       1792:        struct regmap *rp;
1.260     pk       1793:        int i, va, tpte, ctx;
                   1794: #endif
1.43      pk       1795:
1.265     pk       1796: #ifdef DEBUG
1.260     pk       1797:        rp = &pm->pm_regmap[me->me_vreg];
1.1       deraadt  1798:        if (pmapdebug & PDB_MMU_ALLOC)
1.66      christos 1799:                printf("me_free: freeing pmeg %d from pmap %p\n",
1.43      pk       1800:                    me->me_cookie, pm);
                   1801:        if (me->me_cookie != pmeg)
1.1       deraadt  1802:                panic("me_free: wrong mmuentry");
                   1803:        if (pm != me->me_pmap)
                   1804:                panic("me_free: pm != me_pmap");
1.260     pk       1805:        if (rp->rg_segmap[me->me_vseg].sg_pmeg != pmeg &&
                   1806:            rp->rg_segmap[me->me_vseg].sg_pmeg != seginval)
                   1807:                panic("me_free: wrong sg_pmeg (%d != %d)",
                   1808:                        rp->rg_segmap[me->me_vseg].sg_pmeg, pmeg);
1.1       deraadt  1809:
1.260     pk       1810:        /* check for spurious mappings (using temp. mapping in context 0) */
                   1811:        ctx = getcontext4();
                   1812:        setcontext4(0);
                   1813:        if (HASSUN4_MMU3L)
                   1814:                setregmap(0, tregion);
                   1815:        setsegmap(0, me->me_cookie);
                   1816:        va = 0;
1.1       deraadt  1817:        i = NPTESG;
                   1818:        do {
1.55      pk       1819:                tpte = getpte4(va);
1.260     pk       1820:                if ((tpte & PG_V) == PG_V)
                   1821:                        panic("me_free: segment not clean (pte=%x)", tpte);
1.1       deraadt  1822:                va += NBPG;
                   1823:        } while (--i > 0);
1.260     pk       1824:        setcontext4(ctx);
                   1825: #endif /* DEBUG */
1.1       deraadt  1826:
                   1827:        /* take mmu entry off pmap chain */
1.43      pk       1828:        TAILQ_REMOVE(&pm->pm_seglist, me, me_pmchain);
                   1829:
                   1830:        /* off LRU or lock chain */
1.348.6.3! tls      1831:        mmuq_remove(me);
1.70      pk       1832: #ifdef DIAGNOSTIC
1.265     pk       1833:        if (me->me_statp == NULL)
                   1834:                panic("me_statp");
                   1835:        (*me->me_statp)--;
                   1836:        me->me_statp = NULL;
1.70      pk       1837: #endif
1.43      pk       1838:
                   1839:        /* no associated pmap; on free list */
                   1840:        me->me_pmap = NULL;
1.348.6.3! tls      1841:        mmuq_insert_tail(&segm_freelist, me);
1.70      pk       1842: #ifdef DIAGNOSTIC
                   1843:        pmap_stats.ps_npmeg_free++;
                   1844: #endif
1.43      pk       1845: }
                   1846:
1.69      pk       1847: #if defined(SUN4_MMU3L)
1.43      pk       1848:
                   1849: /* XXX - Merge with segm_alloc/segm_free ? */
                   1850:
1.262     pk       1851: int
1.348.6.3! tls      1852: region_alloc(struct mmuentry *mh, struct pmap *newpm, int newvr)
1.43      pk       1853: {
1.124     pk       1854:        struct mmuentry *me;
                   1855:        struct pmap *pm;
1.43      pk       1856:        int ctx;
                   1857:        struct regmap *rp;
                   1858:
                   1859:        /* try free list first */
1.348.6.3! tls      1860:        if (!mmuq_empty(&region_freelist)) {
        !          1861:                me = mmuq_first(&region_freelist);
        !          1862:                mmuq_remove(me);
1.43      pk       1863: #ifdef DEBUG
                   1864:                if (me->me_pmap != NULL)
                   1865:                        panic("region_alloc: freelist entry has pmap");
                   1866:                if (pmapdebug & PDB_MMUREG_ALLOC)
1.91      fair     1867:                        printf("region_alloc: got smeg 0x%x\n", me->me_cookie);
1.43      pk       1868: #endif
1.348.6.3! tls      1869:                mmuq_insert_tail(mh, me);
1.43      pk       1870:
                   1871:                /* onto on pmap chain; pmap is already locked, if needed */
                   1872:                TAILQ_INSERT_TAIL(&newpm->pm_reglist, me, me_pmchain);
                   1873:
                   1874:                /* into pmap segment table, with backpointers */
                   1875:                me->me_pmap = newpm;
                   1876:                me->me_vreg = newvr;
                   1877:
1.262     pk       1878:                return (me->me_cookie);
1.43      pk       1879:        }
                   1880:
                   1881:        /* no luck, take head of LRU list */
1.348.6.3! tls      1882:        if (mmuq_empty(&region_lru))
1.43      pk       1883:                panic("region_alloc: all smegs gone");
                   1884:
1.348.6.3! tls      1885:        me = mmuq_first(&region_lru);
1.265     pk       1886:
1.43      pk       1887:        pm = me->me_pmap;
                   1888:        if (pm == NULL)
                   1889:                panic("region_alloc: LRU entry has no pmap");
                   1890:        if (pm == pmap_kernel())
                   1891:                panic("region_alloc: stealing from kernel");
                   1892: #ifdef DEBUG
                   1893:        if (pmapdebug & (PDB_MMUREG_ALLOC | PDB_MMUREG_STEAL))
1.91      fair     1894:                printf("region_alloc: stealing smeg 0x%x from pmap %p\n",
1.43      pk       1895:                    me->me_cookie, pm);
                   1896: #endif
                   1897:        /*
                   1898:         * Remove from LRU list, and insert at end of new list
                   1899:         * (probably the LRU list again, but so what?).
                   1900:         */
1.348.6.3! tls      1901:        mmuq_remove(me);
        !          1902:        mmuq_insert_tail(mh, me);
1.43      pk       1903:
                   1904:        rp = &pm->pm_regmap[me->me_vreg];
1.71      pk       1905:        ctx = getcontext4();
1.259     pk       1906:
                   1907:        /* Flush register windows; see comment in me_alloc() */
                   1908:        write_user_windows();
1.43      pk       1909:        if (pm->pm_ctx) {
1.259     pk       1910:                setcontext4(pm->pm_ctxnum);
1.214     pk       1911:                cache_flush_region(me->me_vreg, pm->pm_ctxnum);
1.43      pk       1912:        }
                   1913:
                   1914:        /* update region tables */
                   1915:        if (pm->pm_ctx)
                   1916:                setregmap(VRTOVA(me->me_vreg), reginval);
                   1917:        rp->rg_smeg = reginval;
                   1918:
                   1919:        /* off old pmap chain */
                   1920:        TAILQ_REMOVE(&pm->pm_reglist, me, me_pmchain);
1.71      pk       1921:        setcontext4(ctx);       /* done with old context */
1.43      pk       1922:
                   1923:        /* onto new pmap chain; new pmap is already locked, if needed */
                   1924:        TAILQ_INSERT_TAIL(&newpm->pm_reglist, me, me_pmchain);
                   1925:
                   1926:        /* into new segment table, with backpointers */
                   1927:        me->me_pmap = newpm;
                   1928:        me->me_vreg = newvr;
                   1929:
1.262     pk       1930:        return (me->me_cookie);
1.43      pk       1931: }
                   1932:
                   1933: /*
                   1934:  * Free an MMU entry.
                   1935:  * Assumes the corresponding pmap is already locked.
1.265     pk       1936:  * Caller must update hardware.
1.43      pk       1937:  */
                   1938: void
1.303     uwe      1939: region_free(struct pmap *pm, u_int smeg)
1.43      pk       1940: {
1.124     pk       1941:        struct mmuentry *me = &mmuregions[smeg];
1.43      pk       1942:
                   1943: #ifdef DEBUG
                   1944:        if (pmapdebug & PDB_MMUREG_ALLOC)
1.91      fair     1945:                printf("region_free: freeing smeg 0x%x from pmap %p\n",
1.43      pk       1946:                    me->me_cookie, pm);
                   1947:        if (me->me_cookie != smeg)
                   1948:                panic("region_free: wrong mmuentry");
                   1949:        if (pm != me->me_pmap)
                   1950:                panic("region_free: pm != me_pmap");
                   1951: #endif
                   1952:
                   1953:        /* take mmu entry off pmap chain */
                   1954:        TAILQ_REMOVE(&pm->pm_reglist, me, me_pmchain);
1.1       deraadt  1955:
                   1956:        /* off LRU or lock chain */
1.348.6.3! tls      1957:        mmuq_remove(me);
1.265     pk       1958:
                   1959:        /* no associated pmap; on free list */
                   1960:        me->me_pmap = NULL;
1.348.6.3! tls      1961:        mmuq_insert_tail(&region_freelist, me);
1.265     pk       1962: }
                   1963:
                   1964: static void
                   1965: mmu_pagein_reg(struct pmap *pm, struct regmap *rp, vaddr_t va,
1.348.6.3! tls      1966:                int vr, struct mmuentry *mh)
1.265     pk       1967: {
                   1968:        int i, s, smeg;
                   1969:
                   1970:        va = VA_ROUNDDOWNTOREG(va);
                   1971:        rp->rg_smeg = smeg = region_alloc(mh, pm, vr);
                   1972:
                   1973:        s = splvm();
1.43      pk       1974:        if (pm == pmap_kernel()) {
1.265     pk       1975:                /* Map region into all contexts */
                   1976:                int ctx = getcontext4();
                   1977:                i = ncontext - 1;
                   1978:                do {
                   1979:                        setcontext4(i);
                   1980:                        setregmap(va, smeg);
                   1981:                } while (--i >= 0);
                   1982:                setcontext4(ctx);
                   1983:        } else
                   1984:                setregmap(va, smeg);
                   1985:
                   1986:        /* Load PMEGs into this region */
                   1987:        for (i = 0; i < NSEGRG; i++) {
                   1988:                setsegmap(va, rp->rg_segmap[i].sg_pmeg);
                   1989:                va += NBPSG;
                   1990:        }
                   1991:        splx(s);
                   1992: }
                   1993: #endif /* SUN4_MMU3L */
                   1994:
1.267     pk       1995: static void
1.265     pk       1996: mmu_pmeg_lock(int pmeg)
                   1997: {
                   1998:        struct mmuentry *me = &mmusegments[pmeg];
1.303     uwe      1999:
1.348.6.3! tls      2000:        mmuq_remove(me);
        !          2001:        mmuq_insert_tail(&segm_locked, me);
1.268     pk       2002: #ifdef DIAGNOSTIC
                   2003:        (*me->me_statp)--;
                   2004:        pmap_stats.ps_npmeg_locked++;
                   2005:        me->me_statp = &pmap_stats.ps_npmeg_locked;
                   2006: #endif
1.265     pk       2007: }
                   2008:
1.267     pk       2009: static void
                   2010: mmu_pmeg_unlock(int pmeg)
                   2011: {
                   2012:        struct mmuentry *me = &mmusegments[pmeg];
1.303     uwe      2013:
1.348.6.3! tls      2014:        mmuq_remove(me);
        !          2015:        mmuq_insert_tail(&segm_lru, me);
1.268     pk       2016: #ifdef DIAGNOSTIC
                   2017:        (*me->me_statp)--;
                   2018:        pmap_stats.ps_npmeg_lru++;
                   2019:        me->me_statp = &pmap_stats.ps_npmeg_lru;
                   2020: #endif
1.267     pk       2021: }
                   2022:
                   2023: static void
1.265     pk       2024: mmu_pagein_seg(struct pmap *pm, struct segmap *sp, vaddr_t va,
1.348.6.3! tls      2025:                int vr, int vs, struct mmuentry *mh)
1.265     pk       2026: {
                   2027:        int s, i, pmeg, *pte;
                   2028:
1.294     pk       2029:        mmu_pagein_evcnt.ev_count++;
                   2030:
1.265     pk       2031:        va = VA_ROUNDDOWNTOSEG(va);
                   2032:        s = splvm();            /* paranoid */
                   2033:        sp->sg_pmeg = pmeg = me_alloc(mh, pm, vr, vs);
                   2034:        if (pm != pmap_kernel() || HASSUN4_MMU3L)
                   2035:                setsegmap(va, pmeg);
                   2036:        else {
                   2037:                /* Map kernel address into all contexts */
                   2038:                int ctx = getcontext4();
1.301     tsutsui  2039:                i = ncontext - 1;
1.265     pk       2040:                do {
                   2041:                        setcontext4(i);
                   2042:                        setsegmap(va, pmeg);
                   2043:                } while (--i >= 0);
                   2044:                setcontext4(ctx);
1.43      pk       2045:        }
1.1       deraadt  2046:
1.265     pk       2047:        /* reload segment: write PTEs into a the MMU */
                   2048:        pte = sp->sg_pte;
                   2049:        i = NPTESG;
                   2050:        do {
1.267     pk       2051:                setpte4(va, *pte++ & ~PG_MBZ);
1.265     pk       2052:                va += NBPG;
                   2053:        } while (--i > 0);
                   2054:        splx(s);
1.1       deraadt  2055: }
                   2056:
                   2057: /*
                   2058:  * `Page in' (load or inspect) an MMU entry; called on page faults.
                   2059:  * Returns 1 if we reloaded the segment, -1 if the segment was
                   2060:  * already loaded and the page was marked valid (in which case the
                   2061:  * fault must be a bus error or something), or 0 (segment loaded but
                   2062:  * PTE not valid, or segment not loaded at all).
                   2063:  */
                   2064: int
1.303     uwe      2065: mmu_pagein(struct pmap *pm, vaddr_t va, int prot)
1.1       deraadt  2066: {
1.265     pk       2067:        int vr, vs, bits;
1.43      pk       2068:        struct regmap *rp;
                   2069:        struct segmap *sp;
                   2070:
1.322     ad       2071:        PMAP_LOCK();
                   2072:
1.45      pk       2073:        if (prot != VM_PROT_NONE)
                   2074:                bits = PG_V | ((prot & VM_PROT_WRITE) ? PG_W : 0);
                   2075:        else
                   2076:                bits = 0;
                   2077:
1.43      pk       2078:        vr = VA_VREG(va);
                   2079:        vs = VA_VSEG(va);
                   2080:        rp = &pm->pm_regmap[vr];
                   2081:
                   2082:        /* return 0 if we have no PMEGs to load */
1.322     ad       2083:        if (rp->rg_nsegmap == 0) {
                   2084:                PMAP_UNLOCK();
1.265     pk       2085:                return (0);
1.322     ad       2086:        }
1.265     pk       2087:
                   2088: #ifdef DIAGNOSTIC
1.43      pk       2089:        if (rp->rg_segmap == NULL)
1.265     pk       2090:                panic("pagein: no segmap");
                   2091: #endif
1.145     pk       2092:
1.69      pk       2093: #if defined(SUN4_MMU3L)
1.265     pk       2094:        if (HASSUN4_MMU3L && rp->rg_smeg == reginval)
                   2095:                mmu_pagein_reg(pm, rp, va, vr, &region_lru);
1.43      pk       2096: #endif
                   2097:        sp = &rp->rg_segmap[vs];
1.1       deraadt  2098:
                   2099:        /* return 0 if we have no PTEs to load */
1.322     ad       2100:        if (sp->sg_npte == 0) {
                   2101:                PMAP_UNLOCK();
1.1       deraadt  2102:                return (0);
1.322     ad       2103:        }
1.43      pk       2104:
1.1       deraadt  2105:        /* return -1 if the fault is `hard', 0 if not */
1.322     ad       2106:        if (sp->sg_pmeg != seginval) {
                   2107:                PMAP_UNLOCK();
1.55      pk       2108:                return (bits && (getpte4(va) & bits) == bits ? -1 : 0);
1.322     ad       2109:        }
1.1       deraadt  2110:
1.265     pk       2111:        mmu_pagein_seg(pm, sp, va, vr, vs, &segm_lru);
1.322     ad       2112:        PMAP_UNLOCK();
1.1       deraadt  2113:        return (1);
                   2114: }
1.265     pk       2115: #endif /* SUN4 or SUN4C */
1.55      pk       2116:
1.1       deraadt  2117: /*
                   2118:  * Allocate a context.  If necessary, steal one from someone else.
                   2119:  * Changes hardware context number and loads segment map.
                   2120:  *
                   2121:  * This routine is only ever called from locore.s just after it has
                   2122:  * saved away the previous process, so there are no active user windows.
                   2123:  */
                   2124: void
1.303     uwe      2125: ctx_alloc(struct pmap *pm)
1.1       deraadt  2126: {
1.124     pk       2127:        union ctxinfo *c;
1.325     martin   2128:        int cnum, i = 0, doflush;
1.124     pk       2129:        struct regmap *rp;
                   2130:        int gap_start, gap_end;
1.194     chs      2131:        vaddr_t va;
1.336     he       2132: #if defined(SUN4M) || defined(SUN4D)
1.325     martin   2133:        struct cpu_info *cpi;
1.336     he       2134: #endif
1.1       deraadt  2135:
1.55      pk       2136: /*XXX-GCC!*/gap_start=gap_end=0;
1.1       deraadt  2137: #ifdef DEBUG
                   2138:        if (pm->pm_ctx)
                   2139:                panic("ctx_alloc pm_ctx");
                   2140:        if (pmapdebug & PDB_CTX_ALLOC)
1.221     pk       2141:                printf("ctx_alloc[%d](%p)\n", cpu_number(), pm);
1.1       deraadt  2142: #endif
1.296     chs      2143:        if (CPU_HAS_SUNMMU) {
1.55      pk       2144:                gap_start = pm->pm_gap_start;
                   2145:                gap_end = pm->pm_gap_end;
                   2146:        }
1.13      pk       2147:
1.322     ad       2148:        mutex_spin_enter(&ctx_lock);
1.1       deraadt  2149:        if ((c = ctx_freelist) != NULL) {
                   2150:                ctx_freelist = c->c_nextfree;
1.193     mrg      2151:                cnum = c - ctxinfo;
1.49      pk       2152:                doflush = 0;
1.1       deraadt  2153:        } else {
                   2154:                if ((ctx_kick += ctx_kickdir) >= ncontext) {
                   2155:                        ctx_kick = ncontext - 1;
                   2156:                        ctx_kickdir = -1;
                   2157:                } else if (ctx_kick < 1) {
                   2158:                        ctx_kick = 1;
                   2159:                        ctx_kickdir = 1;
                   2160:                }
1.193     mrg      2161:                c = &ctxinfo[cnum = ctx_kick];
1.1       deraadt  2162: #ifdef DEBUG
                   2163:                if (c->c_pmap == NULL)
                   2164:                        panic("ctx_alloc cu_pmap");
                   2165:                if (pmapdebug & (PDB_CTX_ALLOC | PDB_CTX_STEAL))
1.221     pk       2166:                        printf("ctx_alloc[%d]: steal context %d from %p\n",
                   2167:                            cpu_number(), cnum, c->c_pmap);
1.1       deraadt  2168: #endif
                   2169:                c->c_pmap->pm_ctx = NULL;
1.217     pk       2170:                c->c_pmap->pm_ctxnum = 0;
1.69      pk       2171:                doflush = (CACHEINFO.c_vactype != VAC_NONE);
1.296     chs      2172:                if (CPU_HAS_SUNMMU) {
1.55      pk       2173:                        if (gap_start < c->c_pmap->pm_gap_start)
                   2174:                                gap_start = c->c_pmap->pm_gap_start;
                   2175:                        if (gap_end > c->c_pmap->pm_gap_end)
                   2176:                                gap_end = c->c_pmap->pm_gap_end;
                   2177:                }
1.1       deraadt  2178:        }
1.49      pk       2179:
1.1       deraadt  2180:        c->c_pmap = pm;
                   2181:        pm->pm_ctx = c;
                   2182:        pm->pm_ctxnum = cnum;
                   2183:
1.296     chs      2184:        if (CPU_HAS_SUNMMU) {
                   2185:
1.55      pk       2186:                /*
                   2187:                 * Write pmap's region (3-level MMU) or segment table into
                   2188:                 * the MMU.
                   2189:                 *
                   2190:                 * Only write those entries that actually map something in
                   2191:                 * this context by maintaining a pair of region numbers in
                   2192:                 * between which the pmap has no valid mappings.
                   2193:                 *
                   2194:                 * If a context was just allocated from the free list, trust
                   2195:                 * that all its pmeg numbers are `seginval'. We make sure this
                   2196:                 * is the case initially in pmap_bootstrap(). Otherwise, the
                   2197:                 * context was freed by calling ctx_free() in pmap_release(),
                   2198:                 * which in turn is supposedly called only when all mappings
                   2199:                 * have been removed.
                   2200:                 *
                   2201:                 * On the other hand, if the context had to be stolen from
                   2202:                 * another pmap, we possibly shrink the gap to be the
                   2203:                 * disjuction of the new and the previous map.
                   2204:                 */
1.43      pk       2205:
1.80      pk       2206:                setcontext4(cnum);
1.55      pk       2207:                if (doflush)
1.214     pk       2208:                        cache_flush_context(cnum);
1.43      pk       2209:
1.55      pk       2210:                rp = pm->pm_regmap;
                   2211:                for (va = 0, i = NUREG; --i >= 0; ) {
                   2212:                        if (VA_VREG(va) >= gap_start) {
                   2213:                                va = VRTOVA(gap_end);
                   2214:                                i -= gap_end - gap_start;
                   2215:                                rp += gap_end - gap_start;
                   2216:                                if (i < 0)
                   2217:                                        break;
                   2218:                                /* mustn't re-enter this branch */
                   2219:                                gap_start = NUREG;
                   2220:                        }
1.69      pk       2221:                        if (HASSUN4_MMU3L) {
1.55      pk       2222:                                setregmap(va, rp++->rg_smeg);
                   2223:                                va += NBPRG;
1.69      pk       2224:                        } else {
1.124     pk       2225:                                int j;
                   2226:                                struct segmap *sp = rp->rg_segmap;
1.55      pk       2227:                                for (j = NSEGRG; --j >= 0; va += NBPSG)
                   2228:                                        setsegmap(va,
                   2229:                                                  sp?sp++->sg_pmeg:seginval);
                   2230:                                rp++;
                   2231:                        }
1.43      pk       2232:                }
1.55      pk       2233:
1.210     thorpej  2234:        } else if (CPU_HAS_SRMMU) {
1.55      pk       2235:
1.210     thorpej  2236: #if defined(SUN4M) || defined(SUN4D)
1.55      pk       2237:                /*
                   2238:                 * Reload page and context tables to activate the page tables
                   2239:                 * for this context.
                   2240:                 *
1.203     pk       2241:                 * The gap stuff isn't really needed in the sun4m architecture,
1.55      pk       2242:                 * since we don't have to worry about excessive mappings (all
                   2243:                 * mappings exist since the page tables must be complete for
                   2244:                 * the mmu to be happy).
                   2245:                 *
                   2246:                 * If a context was just allocated from the free list, trust
                   2247:                 * that all of its mmu-edible page tables are zeroed out
                   2248:                 * (except for those associated with the kernel). We make
                   2249:                 * sure this is the case initially in pmap_bootstrap() and
                   2250:                 * pmap_init() (?).
                   2251:                 * Otherwise, the context was freed by calling ctx_free() in
                   2252:                 * pmap_release(), which in turn is supposedly called only
                   2253:                 * when all mappings have been removed.
                   2254:                 *
                   2255:                 * XXX: Do we have to flush cache after reloading ctx tbl?
                   2256:                 */
                   2257:
1.157     pk       2258:                /*
                   2259:                 * We need to flush the cache only when stealing a context
                   2260:                 * from another pmap. In that case it's Ok to switch the
1.217     pk       2261:                 * context and leave it set, since the context table
1.157     pk       2262:                 * will have a valid region table entry for this context
                   2263:                 * number.
                   2264:                 *
                   2265:                 * Otherwise, we switch to the new context after loading
                   2266:                 * the context table entry with the new pmap's region.
                   2267:                 */
                   2268:                if (doflush) {
1.214     pk       2269:                        cache_flush_context(cnum);
1.157     pk       2270:                }
1.152     pk       2271:
                   2272:                /*
                   2273:                 * The context allocated to a process is the same on all CPUs.
                   2274:                 * Here we install the per-CPU region table in each CPU's
                   2275:                 * context table slot.
                   2276:                 *
                   2277:                 * Note on multi-threaded processes: a context must remain
1.276     wiz      2278:                 * valid as long as any thread is still running on a CPU.
1.152     pk       2279:                 */
1.325     martin   2280:                for (CPU_INFO_FOREACH(i, cpi)) {
1.133     pk       2281:                        setpgt4m(&cpi->ctx_tbl[cnum],
1.152     pk       2282:                                 (pm->pm_reg_ptps_pa[i] >> SRMMU_PPNPASHIFT) |
1.133     pk       2283:                                        SRMMU_TEPTD);
                   2284:                }
1.55      pk       2285:
1.217     pk       2286:                /* And finally switch to the new context */
                   2287:                (*cpuinfo.pure_vcache_flush)();
                   2288:                setcontext4m(cnum);
                   2289: #endif /* SUN4M || SUN4D */
1.13      pk       2290:        }
1.322     ad       2291:        mutex_spin_exit(&ctx_lock);
1.1       deraadt  2292: }
                   2293:
                   2294: /*
1.322     ad       2295:  * Give away a context.
1.1       deraadt  2296:  */
                   2297: void
1.303     uwe      2298: ctx_free(struct pmap *pm)
1.1       deraadt  2299: {
1.124     pk       2300:        union ctxinfo *c;
1.336     he       2301:        int ctx;
                   2302: #if defined(SUN4M) || defined(SUN4D)
1.325     martin   2303:        struct cpu_info *cpi;
1.336     he       2304: #endif
1.1       deraadt  2305:
1.194     chs      2306:        c = pm->pm_ctx;
1.217     pk       2307:        ctx = pm->pm_ctxnum;
1.1       deraadt  2308:        pm->pm_ctx = NULL;
1.217     pk       2309:        pm->pm_ctxnum = 0;
                   2310: #if defined(SUN4) || defined(SUN4C)
1.296     chs      2311:        if (CPU_HAS_SUNMMU) {
1.217     pk       2312:                int octx = getcontext4();
1.235     pk       2313:                setcontext4(ctx);
1.217     pk       2314:                cache_flush_context(ctx);
1.235     pk       2315:                setcontext4(octx);
1.217     pk       2316:        }
                   2317: #endif /* SUN4 || SUN4C */
                   2318:
1.322     ad       2319:        mutex_spin_enter(&ctx_lock);
                   2320:
1.210     thorpej  2321: #if defined(SUN4M) || defined(SUN4D)
1.217     pk       2322:        if (CPU_HAS_SRMMU) {
1.348.6.3! tls      2323:                CPU_INFO_ITERATOR i;
        !          2324:
        !          2325:                __USE(i);
1.293     pk       2326:
1.217     pk       2327:                cache_flush_context(ctx);
1.226     mrg      2328:                tlb_flush_context(ctx, PMAP_CPUSET(pm));
1.325     martin   2329:                for (CPU_INFO_FOREACH(i, cpi)) {
1.293     pk       2330:                        setpgt4m(&cpi->ctx_tbl[ctx], SRMMU_TEINVALID);
                   2331:                }
1.217     pk       2332:        }
1.55      pk       2333: #endif
1.156     pk       2334:
1.1       deraadt  2335:        c->c_nextfree = ctx_freelist;
                   2336:        ctx_freelist = c;
1.322     ad       2337:        mutex_spin_exit(&ctx_lock);
1.1       deraadt  2338: }
                   2339:
                   2340:
                   2341: /*----------------------------------------------------------------*/
                   2342:
                   2343: /*
                   2344:  * pvlist functions.
                   2345:  */
                   2346:
                   2347: /*
                   2348:  * Walk the given pv list, and for each PTE, set or clear some bits
                   2349:  * (e.g., PG_W or PG_NC).
                   2350:  *
                   2351:  * This routine flushes the cache for any page whose PTE changes,
                   2352:  * as long as the process has a context; this is overly conservative.
                   2353:  * It also copies ref and mod bits to the pvlist, on the theory that
                   2354:  * this might save work later.  (XXX should test this theory)
                   2355:  */
1.55      pk       2356:
                   2357: #if defined(SUN4) || defined(SUN4C)
                   2358:
1.1       deraadt  2359: void
1.303     uwe      2360: pv_changepte4_4c(struct vm_page *pg, int bis, int bic)
1.1       deraadt  2361: {
1.267     pk       2362:        int pte, *ptep;
1.115     pk       2363:        struct pvlist *pv;
                   2364:        struct pmap *pm;
                   2365:        int va, vr, vs;
1.1       deraadt  2366:        int ctx, s;
1.43      pk       2367:        struct regmap *rp;
                   2368:        struct segmap *sp;
1.1       deraadt  2369:
1.236     pk       2370:        pv = VM_MDPAGE_PVHEAD(pg);
                   2371:
1.1       deraadt  2372:        write_user_windows();           /* paranoid? */
1.175     thorpej  2373:        s = splvm();                    /* paranoid? */
1.236     pk       2374:        if (pv->pv_pmap == NULL) {
1.1       deraadt  2375:                splx(s);
                   2376:                return;
                   2377:        }
1.71      pk       2378:        ctx = getcontext4();
1.236     pk       2379:        for (; pv != NULL; pv = pv->pv_next) {
1.1       deraadt  2380:                pm = pv->pv_pmap;
                   2381:                va = pv->pv_va;
1.43      pk       2382:                vr = VA_VREG(va);
                   2383:                vs = VA_VSEG(va);
                   2384:                rp = &pm->pm_regmap[vr];
                   2385:                sp = &rp->rg_segmap[vs];
1.267     pk       2386:                ptep = &sp->sg_pte[VA_VPG(va)];
1.43      pk       2387:
                   2388:                if (sp->sg_pmeg == seginval) {
                   2389:                        /* not in hardware: just fix software copy */
1.267     pk       2390:                        *ptep = (*ptep | bis) & ~bic;
1.43      pk       2391:                } else {
1.1       deraadt  2392:                        /* in hardware: fix hardware copy */
1.43      pk       2393:                        if (CTX_USABLE(pm,rp)) {
1.71      pk       2394:                                setcontext4(pm->pm_ctxnum);
1.1       deraadt  2395:                                /* XXX should flush only when necessary */
1.267     pk       2396:                                pte = getpte4(va);
1.88      pk       2397:                                /*
                   2398:                                 * XXX: always flush cache; conservative, but
                   2399:                                 * needed to invalidate cache tag protection
                   2400:                                 * bits and when disabling caching.
                   2401:                                 */
1.214     pk       2402:                                cache_flush_page(va, pm->pm_ctxnum);
1.1       deraadt  2403:                        } else {
1.236     pk       2404:                                /* Make temp map in ctx 0 to access the PTE */
1.71      pk       2405:                                setcontext4(0);
1.69      pk       2406:                                if (HASSUN4_MMU3L)
1.43      pk       2407:                                        setregmap(0, tregion);
                   2408:                                setsegmap(0, sp->sg_pmeg);
1.18      deraadt  2409:                                va = VA_VPG(va) << PGSHIFT;
1.267     pk       2410:                                pte = getpte4(va);
1.1       deraadt  2411:                        }
1.267     pk       2412:                        if (pte & PG_V)
                   2413:                                VM_MDPAGE_PVHEAD(pg)->pv_flags |= MR4_4C(pte);
                   2414:                        pte = (pte | bis) & ~bic;
                   2415:                        setpte4(va, pte);
                   2416:                        *ptep = (*ptep & PG_MBZ) | pte;
1.1       deraadt  2417:                }
                   2418:        }
1.71      pk       2419:        setcontext4(ctx);
1.1       deraadt  2420:        splx(s);
                   2421: }
                   2422:
                   2423: /*
                   2424:  * Sync ref and mod bits in pvlist (turns off same in hardware PTEs).
                   2425:  * Returns the new flags.
                   2426:  *
                   2427:  * This is just like pv_changepte, but we never add or remove bits,
                   2428:  * hence never need to adjust software copies.
                   2429:  */
                   2430: int
1.303     uwe      2431: pv_syncflags4_4c(struct vm_page *pg)
1.1       deraadt  2432: {
1.124     pk       2433:        struct pvlist *pv;
                   2434:        struct pmap *pm;
1.267     pk       2435:        int pte, va, vr, vs, pmeg, flags;
1.1       deraadt  2436:        int ctx, s;
1.43      pk       2437:        struct regmap *rp;
                   2438:        struct segmap *sp;
1.1       deraadt  2439:
1.236     pk       2440:        pv = VM_MDPAGE_PVHEAD(pg);
                   2441:
1.175     thorpej  2442:        s = splvm();                    /* paranoid? */
1.287     pk       2443:        if (pv->pv_pmap == NULL) {
                   2444:                /* Page not mapped; pv_flags is already up to date */
1.1       deraadt  2445:                splx(s);
                   2446:                return (0);
                   2447:        }
1.71      pk       2448:        ctx = getcontext4();
1.236     pk       2449:        flags = pv->pv_flags;
                   2450:        for (; pv != NULL; pv = pv->pv_next) {
1.1       deraadt  2451:                pm = pv->pv_pmap;
                   2452:                va = pv->pv_va;
1.43      pk       2453:                vr = VA_VREG(va);
                   2454:                vs = VA_VSEG(va);
                   2455:                rp = &pm->pm_regmap[vr];
                   2456:                sp = &rp->rg_segmap[vs];
                   2457:                if ((pmeg = sp->sg_pmeg) == seginval)
1.1       deraadt  2458:                        continue;
1.43      pk       2459:                if (CTX_USABLE(pm,rp)) {
1.71      pk       2460:                        setcontext4(pm->pm_ctxnum);
1.1       deraadt  2461:                        /* XXX should flush only when necessary */
1.267     pk       2462:                        pte = getpte4(va);
                   2463:                        if (pte & PG_M)
1.214     pk       2464:                                cache_flush_page(va, pm->pm_ctxnum);
1.1       deraadt  2465:                } else {
1.236     pk       2466:                        /* Make temp map in ctx 0 to access the PTE */
1.71      pk       2467:                        setcontext4(0);
1.69      pk       2468:                        if (HASSUN4_MMU3L)
1.43      pk       2469:                                setregmap(0, tregion);
1.1       deraadt  2470:                        setsegmap(0, pmeg);
1.18      deraadt  2471:                        va = VA_VPG(va) << PGSHIFT;
1.267     pk       2472:                        pte = getpte4(va);
1.1       deraadt  2473:                }
1.267     pk       2474:                if (pte & (PG_M|PG_U) && pte & PG_V) {
                   2475:                        flags |= MR4_4C(pte);
                   2476:                        pte &= ~(PG_M|PG_U);
                   2477:                        setpte4(va, pte);
1.1       deraadt  2478:                }
                   2479:        }
1.236     pk       2480:
                   2481:        VM_MDPAGE_PVHEAD(pg)->pv_flags = flags;
1.71      pk       2482:        setcontext4(ctx);
1.1       deraadt  2483:        splx(s);
                   2484:        return (flags);
                   2485: }
                   2486:
                   2487: /*
                   2488:  * pv_unlink is a helper function for pmap_remove.
                   2489:  * It takes a pointer to the pv_table head for some physical address
                   2490:  * and removes the appropriate (pmap, va) entry.
                   2491:  *
                   2492:  * Once the entry is removed, if the pv_table head has the cache
                   2493:  * inhibit bit set, see if we can turn that off; if so, walk the
                   2494:  * pvlist and turn off PG_NC in each PTE.  (The pvlist is by
                   2495:  * definition nonempty, since it must have at least two elements
                   2496:  * in it to have PV_NC set, and we only remove one here.)
                   2497:  */
1.43      pk       2498: /*static*/ void
1.303     uwe      2499: pv_unlink4_4c(struct vm_page *pg, struct pmap *pm, vaddr_t va)
1.1       deraadt  2500: {
1.236     pk       2501:        struct pvlist *pv0, *npv;
                   2502:
                   2503:        pv0 = VM_MDPAGE_PVHEAD(pg);
                   2504:        npv = pv0->pv_next;
1.1       deraadt  2505:
                   2506:        /*
                   2507:         * First entry is special (sigh).
                   2508:         */
1.236     pk       2509:        if (pv0->pv_pmap == pm && pv0->pv_va == va) {
1.1       deraadt  2510:                pmap_stats.ps_unlink_pvfirst++;
                   2511:                if (npv != NULL) {
1.115     pk       2512:                        /*
                   2513:                         * Shift next entry into the head.
                   2514:                         * Make sure to retain the REF, MOD and ANC flags.
                   2515:                         */
1.236     pk       2516:                        pv0->pv_next = npv->pv_next;
                   2517:                        pv0->pv_pmap = npv->pv_pmap;
                   2518:                        pv0->pv_va = npv->pv_va;
                   2519:                        pv0->pv_flags &= ~PV_NC;
                   2520:                        pv0->pv_flags |= (npv->pv_flags & PV_NC);
1.122     pk       2521:                        pool_put(&pv_pool, npv);
1.86      pk       2522:                } else {
1.115     pk       2523:                        /*
                   2524:                         * No mappings left; we still need to maintain
                   2525:                         * the REF and MOD flags. since pmap_is_modified()
                   2526:                         * can still be called for this page.
                   2527:                         */
1.236     pk       2528:                        pv0->pv_pmap = NULL;
                   2529:                        pv0->pv_flags &= ~(PV_NC|PV_ANC);
1.86      pk       2530:                        return;
                   2531:                }
1.1       deraadt  2532:        } else {
1.124     pk       2533:                struct pvlist *prev;
1.1       deraadt  2534:
1.278     pk       2535:                pmap_stats.ps_unlink_pvsearch++;
1.236     pk       2536:                for (prev = pv0;; prev = npv, npv = npv->pv_next) {
1.278     pk       2537:                        if (npv == NULL) {
                   2538:                                panic("pv_unlink: pm %p is missing on pg %p",
                   2539:                                        pm, pg);
                   2540:                        }
1.1       deraadt  2541:                        if (npv->pv_pmap == pm && npv->pv_va == va)
                   2542:                                break;
                   2543:                }
                   2544:                prev->pv_next = npv->pv_next;
1.122     pk       2545:                pool_put(&pv_pool, npv);
1.1       deraadt  2546:        }
1.236     pk       2547:        if ((pv0->pv_flags & (PV_NC|PV_ANC)) == PV_ANC) {
1.1       deraadt  2548:                /*
1.236     pk       2549:                 * Not cached: check whether we can fix that now.
1.1       deraadt  2550:                 */
1.236     pk       2551:                va = pv0->pv_va;
                   2552:                for (npv = pv0->pv_next; npv != NULL; npv = npv->pv_next)
1.182     pk       2553:                        if (BADALIAS(va, npv->pv_va) ||
                   2554:                            (npv->pv_flags & PV_NC) != 0)
1.1       deraadt  2555:                                return;
1.236     pk       2556:                pv0->pv_flags &= ~PV_ANC;
                   2557:                pv_changepte4_4c(pg, 0, PG_NC);
1.1       deraadt  2558:        }
                   2559: }
                   2560:
                   2561: /*
                   2562:  * pv_link is the inverse of pv_unlink, and is used in pmap_enter.
                   2563:  * It returns PG_NC if the (new) pvlist says that the address cannot
                   2564:  * be cached.
                   2565:  */
1.43      pk       2566: /*static*/ int
1.303     uwe      2567: pv_link4_4c(struct vm_page *pg, struct pmap *pm, vaddr_t va,
                   2568:            unsigned int *pteprotop)
1.1       deraadt  2569: {
1.278     pk       2570:        struct pvlist *pv0, *pv, *npv;
                   2571:        int nc = (*pteprotop & PG_NC) != 0 ? PV_NC : 0;
1.115     pk       2572:
1.236     pk       2573:        pv0 = VM_MDPAGE_PVHEAD(pg);
1.1       deraadt  2574:
1.236     pk       2575:        if (pv0->pv_pmap == NULL) {
1.1       deraadt  2576:                /* no pvlist entries yet */
                   2577:                pmap_stats.ps_enter_firstpv++;
1.236     pk       2578:                pv0->pv_next = NULL;
                   2579:                pv0->pv_pmap = pm;
                   2580:                pv0->pv_va = va;
1.278     pk       2581:                pv0->pv_flags |= nc;
                   2582:                return (0);
1.1       deraadt  2583:        }
1.278     pk       2584:
                   2585:        /*
1.305     uwe      2586:         * Allocate the new PV entry now, and, if that fails, bail out
1.278     pk       2587:         * before changing the cacheable state of the existing mappings.
                   2588:         */
                   2589:        npv = pool_get(&pv_pool, PR_NOWAIT);
                   2590:        if (npv == NULL)
                   2591:                return (ENOMEM);
                   2592:
                   2593:        pmap_stats.ps_enter_secondpv++;
                   2594:
1.1       deraadt  2595:        /*
                   2596:         * Before entering the new mapping, see if
                   2597:         * it will cause old mappings to become aliased
                   2598:         * and thus need to be `discached'.
                   2599:         */
1.236     pk       2600:        if (pv0->pv_flags & PV_ANC) {
1.1       deraadt  2601:                /* already uncached, just stay that way */
1.278     pk       2602:                *pteprotop |= PG_NC;
                   2603:                goto link_npv;
                   2604:        }
                   2605:
                   2606:        for (pv = pv0; pv != NULL; pv = pv->pv_next) {
                   2607:                if ((pv->pv_flags & PV_NC) != 0) {
                   2608:                        *pteprotop |= PG_NC;
                   2609: #ifdef DEBUG
                   2610:                        /* Check currently illegal condition */
                   2611:                        if (nc == 0)
                   2612:                                printf("pv_link: proc %s, va=0x%lx: "
1.184     pk       2613:                                "unexpected uncached mapping at 0x%lx\n",
1.278     pk       2614:                                    curproc ? curproc->p_comm : "--",
                   2615:                                    va, pv->pv_va);
1.182     pk       2616: #endif
1.278     pk       2617:                }
                   2618:                if (BADALIAS(va, pv->pv_va)) {
1.43      pk       2619: #ifdef DEBUG
1.278     pk       2620:                        if (pmapdebug & PDB_CACHESTUFF)
                   2621:                                printf(
                   2622:                        "pv_link: badalias: proc %s, 0x%lx<=>0x%lx, pg %p\n",
                   2623:                                curproc ? curproc->p_comm : "--",
                   2624:                                va, pv->pv_va, pg);
1.43      pk       2625: #endif
1.278     pk       2626:                        /* Mark list head `uncached due to aliases' */
                   2627:                        pv0->pv_flags |= PV_ANC;
                   2628:                        pv_changepte4_4c(pg, PG_NC, 0);
                   2629:                        *pteprotop |= PG_NC;
                   2630:                        break;
1.1       deraadt  2631:                }
                   2632:        }
1.278     pk       2633:
                   2634: link_npv:
1.236     pk       2635:        npv->pv_next = pv0->pv_next;
1.1       deraadt  2636:        npv->pv_pmap = pm;
                   2637:        npv->pv_va = va;
1.278     pk       2638:        npv->pv_flags = nc;
1.236     pk       2639:        pv0->pv_next = npv;
1.278     pk       2640:        return (0);
1.1       deraadt  2641: }
                   2642:
1.236     pk       2643: #endif /* SUN4 || SUN4C */
1.55      pk       2644:
1.210     thorpej  2645: #if defined(SUN4M) || defined(SUN4D)   /* SRMMU versions of above */
1.1       deraadt  2646: /*
1.55      pk       2647:  * Walk the given pv list, and for each PTE, set or clear some bits
                   2648:  * (e.g., PG_W or PG_NC).
                   2649:  *
                   2650:  * This routine flushes the cache for any page whose PTE changes,
                   2651:  * as long as the process has a context; this is overly conservative.
                   2652:  * It also copies ref and mod bits to the pvlist, on the theory that
                   2653:  * this might save work later.  (XXX should test this theory)
1.243     pk       2654:  *
                   2655:  * Called with PV lock and pmap main lock held.
1.1       deraadt  2656:  */
1.53      christos 2657: void
1.303     uwe      2658: pv_changepte4m(struct vm_page *pg, int bis, int bic)
1.55      pk       2659: {
1.115     pk       2660:        struct pvlist *pv;
                   2661:        struct pmap *pm;
1.243     pk       2662:        vaddr_t va;
1.55      pk       2663:        struct regmap *rp;
1.72      pk       2664:        struct segmap *sp;
1.1       deraadt  2665:
1.236     pk       2666:        pv = VM_MDPAGE_PVHEAD(pg);
1.243     pk       2667:        if (pv->pv_pmap == NULL)
                   2668:                return;
1.242     pk       2669:
1.236     pk       2670:        for (; pv != NULL; pv = pv->pv_next) {
1.55      pk       2671:                pm = pv->pv_pmap;
1.243     pk       2672:                /* XXXSMP: should lock pm */
1.55      pk       2673:                va = pv->pv_va;
1.243     pk       2674:                rp = &pm->pm_regmap[VA_VREG(va)];
1.72      pk       2675:                sp = &rp->rg_segmap[VA_VSEG(va)];
                   2676:
                   2677:                if (pm->pm_ctx) {
1.88      pk       2678:                        /*
                   2679:                         * XXX: always flush cache; conservative, but
                   2680:                         * needed to invalidate cache tag protection
                   2681:                         * bits and when disabling caching.
                   2682:                         */
1.214     pk       2683:                        cache_flush_page(va, pm->pm_ctxnum);
1.72      pk       2684:                }
                   2685:
1.348.6.3! tls      2686:                KASSERT((sp->sg_pte[VA_SUN4M_VPG(va)] & SRMMU_TETYPE) ==
        !          2687:                        SRMMU_TEPTE);
1.236     pk       2688:                VM_MDPAGE_PVHEAD(pg)->pv_flags |= MR4M(updatepte4m(va,
1.226     mrg      2689:                    &sp->sg_pte[VA_SUN4M_VPG(va)], bic, bis, pm->pm_ctxnum,
                   2690:                    PMAP_CPUSET(pm)));
1.55      pk       2691:        }
                   2692: }
                   2693:
                   2694: /*
                   2695:  * Sync ref and mod bits in pvlist. If page has been ref'd or modified,
                   2696:  * update ref/mod bits in pvlist, and clear the hardware bits.
                   2697:  *
                   2698:  * Return the new flags.
                   2699:  */
                   2700: int
1.303     uwe      2701: pv_syncflags4m(struct vm_page *pg)
1.55      pk       2702: {
1.124     pk       2703:        struct pvlist *pv;
                   2704:        struct pmap *pm;
1.287     pk       2705:        int va, flags;
1.217     pk       2706:        int s;
1.55      pk       2707:        struct regmap *rp;
                   2708:        struct segmap *sp;
1.291     pk       2709:        int tpte;
1.55      pk       2710:
1.243     pk       2711:        s = splvm();
1.322     ad       2712:        PMAP_LOCK();
1.236     pk       2713:        pv = VM_MDPAGE_PVHEAD(pg);
1.287     pk       2714:        if (pv->pv_pmap == NULL) {
                   2715:                /* Page not mapped; pv_flags is already up to date */
1.242     pk       2716:                flags = 0;
                   2717:                goto out;
1.55      pk       2718:        }
1.236     pk       2719:
                   2720:        flags = pv->pv_flags;
                   2721:        for (; pv != NULL; pv = pv->pv_next) {
1.55      pk       2722:                pm = pv->pv_pmap;
                   2723:                va = pv->pv_va;
1.287     pk       2724:                rp = &pm->pm_regmap[VA_VREG(va)];
                   2725:                sp = &rp->rg_segmap[VA_VSEG(va)];
1.194     chs      2726:
1.291     pk       2727:                tpte = sp->sg_pte[VA_SUN4M_VPG(va)];
1.292     pk       2728:                if ((tpte & SRMMU_TETYPE) == SRMMU_TEPTE &&
                   2729:                    (tpte & (SRMMU_PG_R|SRMMU_PG_M)) != 0) {
                   2730:                        /*
                   2731:                         * Flush cache if modified to make sure the PTE
                   2732:                         * M bit will be set again on the next write access.
                   2733:                         */
                   2734:                        if (pm->pm_ctx && (tpte & SRMMU_PG_M) == SRMMU_PG_M)
                   2735:                                cache_flush_page(va, pm->pm_ctxnum);
1.291     pk       2736:
1.292     pk       2737:                        flags |= MR4M(updatepte4m(va,
                   2738:                                        &sp->sg_pte[VA_SUN4M_VPG(va)],
1.217     pk       2739:                                        SRMMU_PG_M | SRMMU_PG_R,
1.287     pk       2740:                                        0, pm->pm_ctxnum, PMAP_CPUSET(pm)));
1.292     pk       2741:                }
1.55      pk       2742:        }
1.242     pk       2743:
1.236     pk       2744:        VM_MDPAGE_PVHEAD(pg)->pv_flags = flags;
1.242     pk       2745: out:
1.322     ad       2746:        PMAP_UNLOCK();
1.55      pk       2747:        splx(s);
                   2748:        return (flags);
                   2749: }
                   2750:
1.195     mrg      2751: /*
                   2752:  * Should be called with pmap already locked.
                   2753:  */
1.55      pk       2754: void
1.303     uwe      2755: pv_unlink4m(struct vm_page *pg, struct pmap *pm, vaddr_t va)
1.55      pk       2756: {
1.236     pk       2757:        struct pvlist *pv0, *npv;
                   2758:
                   2759:        pv0 = VM_MDPAGE_PVHEAD(pg);
1.55      pk       2760:
1.236     pk       2761:        npv = pv0->pv_next;
1.55      pk       2762:        /*
                   2763:         * First entry is special (sigh).
                   2764:         */
1.236     pk       2765:        if (pv0->pv_pmap == pm && pv0->pv_va == va) {
1.55      pk       2766:                pmap_stats.ps_unlink_pvfirst++;
                   2767:                if (npv != NULL) {
1.115     pk       2768:                        /*
                   2769:                         * Shift next entry into the head.
1.155     pk       2770:                         * Make sure to retain the REF, MOD and ANC flags
                   2771:                         * on the list head.
1.115     pk       2772:                         */
1.236     pk       2773:                        pv0->pv_next = npv->pv_next;
                   2774:                        pv0->pv_pmap = npv->pv_pmap;
                   2775:                        pv0->pv_va = npv->pv_va;
                   2776:                        pv0->pv_flags &= ~PV_NC;
                   2777:                        pv0->pv_flags |= (npv->pv_flags & PV_NC);
1.122     pk       2778:                        pool_put(&pv_pool, npv);
1.86      pk       2779:                } else {
1.115     pk       2780:                        /*
1.155     pk       2781:                         * No mappings left; we need to maintain
                   2782:                         * the REF and MOD flags, since pmap_is_modified()
1.115     pk       2783:                         * can still be called for this page.
                   2784:                         */
1.236     pk       2785:                        pv0->pv_pmap = NULL;
                   2786:                        pv0->pv_flags &= ~(PV_NC|PV_ANC);
1.322     ad       2787:                        return;
1.86      pk       2788:                }
1.55      pk       2789:        } else {
1.124     pk       2790:                struct pvlist *prev;
1.55      pk       2791:
1.278     pk       2792:                pmap_stats.ps_unlink_pvsearch++;
1.236     pk       2793:                for (prev = pv0;; prev = npv, npv = npv->pv_next) {
1.242     pk       2794:                        if (npv == NULL) {
1.278     pk       2795:                                panic("pv_unlink: pm %p is missing on pg %p",
                   2796:                                        pm, pg);
1.322     ad       2797:                                return;
1.242     pk       2798:                        }
1.55      pk       2799:                        if (npv->pv_pmap == pm && npv->pv_va == va)
                   2800:                                break;
                   2801:                }
                   2802:                prev->pv_next = npv->pv_next;
1.122     pk       2803:                pool_put(&pv_pool, npv);
1.55      pk       2804:        }
1.236     pk       2805:
                   2806:        if ((pv0->pv_flags & (PV_NC|PV_ANC)) == PV_ANC) {
1.194     chs      2807:
1.55      pk       2808:                /*
1.236     pk       2809:                 * Not cached: check whether we can fix that now.
1.55      pk       2810:                 */
1.236     pk       2811:                va = pv0->pv_va;
                   2812:                for (npv = pv0->pv_next; npv != NULL; npv = npv->pv_next)
1.115     pk       2813:                        if (BADALIAS(va, npv->pv_va) ||
1.182     pk       2814:                            (npv->pv_flags & PV_NC) != 0)
1.322     ad       2815:                                return;
1.182     pk       2816: #ifdef DEBUG
                   2817:                if (pmapdebug & PDB_CACHESTUFF)
                   2818:                        printf(
1.236     pk       2819:                        "pv_unlink: alias ok: proc %s, va 0x%lx, pg %p\n",
1.182     pk       2820:                                curproc ? curproc->p_comm : "--",
1.236     pk       2821:                                va, pg);
1.182     pk       2822: #endif
1.236     pk       2823:                pv0->pv_flags &= ~PV_ANC;
                   2824:                pv_changepte4m(pg, SRMMU_PG_C, 0);
1.55      pk       2825:        }
                   2826: }
                   2827:
                   2828: /*
                   2829:  * pv_link is the inverse of pv_unlink, and is used in pmap_enter.
1.242     pk       2830:  * May turn off the cacheable bit in the pte prototype for the new mapping.
                   2831:  * Called with pm locked.
1.55      pk       2832:  */
                   2833: /*static*/ int
1.303     uwe      2834: pv_link4m(struct vm_page *pg, struct pmap *pm, vaddr_t va,
                   2835:          unsigned int *pteprotop)
1.55      pk       2836: {
1.243     pk       2837:        struct pvlist *pv0, *pv, *npv;
1.242     pk       2838:        int nc = (*pteprotop & SRMMU_PG_C) == 0 ? PV_NC : 0;
                   2839:        int error = 0;
1.115     pk       2840:
1.236     pk       2841:        pv0 = VM_MDPAGE_PVHEAD(pg);
1.55      pk       2842:
1.236     pk       2843:        if (pv0->pv_pmap == NULL) {
1.55      pk       2844:                /* no pvlist entries yet */
                   2845:                pmap_stats.ps_enter_firstpv++;
1.236     pk       2846:                pv0->pv_next = NULL;
                   2847:                pv0->pv_pmap = pm;
                   2848:                pv0->pv_va = va;
1.242     pk       2849:                pv0->pv_flags |= nc;
                   2850:                goto out;
                   2851:        }
                   2852:
1.243     pk       2853:        /*
1.305     uwe      2854:         * Allocate the new PV entry now, and, if that fails, bail out
1.243     pk       2855:         * before changing the cacheable state of the existing mappings.
                   2856:         */
1.242     pk       2857:        npv = pool_get(&pv_pool, PR_NOWAIT);
                   2858:        if (npv == NULL) {
                   2859:                error = ENOMEM;
                   2860:                goto out;
1.55      pk       2861:        }
1.194     chs      2862:
1.278     pk       2863:        pmap_stats.ps_enter_secondpv++;
                   2864:
1.55      pk       2865:        /*
1.242     pk       2866:         * See if the new mapping will cause old mappings to
                   2867:         * become aliased and thus need to be `discached'.
1.55      pk       2868:         */
1.236     pk       2869:        if ((pv0->pv_flags & PV_ANC) != 0) {
1.55      pk       2870:                /* already uncached, just stay that way */
1.242     pk       2871:                *pteprotop &= ~SRMMU_PG_C;
1.245     pk       2872:                goto link_npv;
1.242     pk       2873:        }
                   2874:
1.243     pk       2875:        for (pv = pv0; pv != NULL; pv = pv->pv_next) {
                   2876:                if ((pv->pv_flags & PV_NC) != 0) {
1.242     pk       2877:                        *pteprotop &= ~SRMMU_PG_C;
                   2878: #ifdef DEBUG
                   2879:                        /* Check currently illegal condition */
                   2880:                        if (nc == 0)
                   2881:                                printf("pv_link: proc %s, va=0x%lx: "
1.184     pk       2882:                                "unexpected uncached mapping at 0x%lx\n",
1.242     pk       2883:                                    curproc ? curproc->p_comm : "--",
1.243     pk       2884:                                    va, pv->pv_va);
1.182     pk       2885: #endif
1.242     pk       2886:                }
1.243     pk       2887:                if (BADALIAS(va, pv->pv_va)) {
1.55      pk       2888: #ifdef DEBUG
1.242     pk       2889:                        if (pmapdebug & PDB_CACHESTUFF)
                   2890:                                printf(
1.236     pk       2891:                        "pv_link: badalias: proc %s, 0x%lx<=>0x%lx, pg %p\n",
1.242     pk       2892:                                curproc ? curproc->p_comm : "--",
1.243     pk       2893:                                va, pv->pv_va, pg);
1.55      pk       2894: #endif
1.242     pk       2895:                        /* Mark list head `uncached due to aliases' */
                   2896:                        pv0->pv_flags |= PV_ANC;
                   2897:                        pv_changepte4m(pg, 0, SRMMU_PG_C);
                   2898:                        *pteprotop &= ~SRMMU_PG_C;
                   2899:                        break;
1.55      pk       2900:                }
                   2901:        }
1.236     pk       2902:
1.245     pk       2903: link_npv:
1.243     pk       2904:        /* Now link in the new PV entry */
                   2905:        npv->pv_next = pv0->pv_next;
                   2906:        npv->pv_pmap = pm;
                   2907:        npv->pv_va = va;
                   2908:        npv->pv_flags = nc;
                   2909:        pv0->pv_next = npv;
                   2910:
1.242     pk       2911: out:
1.278     pk       2912:        return (error);
1.55      pk       2913: }
1.184     pk       2914: #endif
1.182     pk       2915:
                   2916: /*
                   2917:  * Uncache all entries on behalf of kvm_uncache(). In addition to
                   2918:  * removing the cache bit from the PTE, we are also setting PV_NC
                   2919:  * in each entry to stop pv_unlink() from re-caching (i.e. when a
                   2920:  * a bad alias is going away).
                   2921:  */
1.303     uwe      2922: static void
                   2923: pv_uncache(struct vm_page *pg)
1.182     pk       2924: {
                   2925:        struct pvlist *pv;
1.243     pk       2926:        int s;
1.182     pk       2927:
1.243     pk       2928:        s = splvm();
1.322     ad       2929:        PMAP_LOCK();
1.243     pk       2930:
1.248     pk       2931:        for (pv = VM_MDPAGE_PVHEAD(pg); pv != NULL; pv = pv->pv_next)
                   2932:                pv->pv_flags |= PV_NC;
                   2933:
1.210     thorpej  2934: #if defined(SUN4M) || defined(SUN4D)
                   2935:        if (CPU_HAS_SRMMU)
1.236     pk       2936:                pv_changepte4m(pg, 0, SRMMU_PG_C);
1.182     pk       2937: #endif
                   2938: #if defined(SUN4) || defined(SUN4C)
1.296     chs      2939:        if (CPU_HAS_SUNMMU)
1.236     pk       2940:                pv_changepte4_4c(pg, PG_NC, 0);
1.182     pk       2941: #endif
1.322     ad       2942:        PMAP_UNLOCK();
1.243     pk       2943:        splx(s);
1.182     pk       2944: }
1.55      pk       2945:
                   2946: /*
                   2947:  * Walk the given list and flush the cache for each (MI) page that is
                   2948:  * potentially in the cache. Called only if vactype != VAC_NONE.
                   2949:  */
1.224     pk       2950: #if defined(SUN4) || defined(SUN4C)
                   2951: static void
1.236     pk       2952: pv_flushcache4_4c(struct vm_page *pg)
1.55      pk       2953: {
1.236     pk       2954:        struct pvlist *pv;
1.124     pk       2955:        struct pmap *pm;
                   2956:        int s, ctx;
1.55      pk       2957:
1.236     pk       2958:        pv = VM_MDPAGE_PVHEAD(pg);
                   2959:
1.55      pk       2960:        write_user_windows();   /* paranoia? */
1.175     thorpej  2961:        s = splvm();            /* XXX extreme paranoia */
1.55      pk       2962:        if ((pm = pv->pv_pmap) != NULL) {
1.224     pk       2963:                ctx = getcontext4();
1.55      pk       2964:                for (;;) {
                   2965:                        if (pm->pm_ctx) {
1.224     pk       2966:                                setcontext4(pm->pm_ctxnum);
1.214     pk       2967:                                cache_flush_page(pv->pv_va, pm->pm_ctxnum);
1.224     pk       2968:                        }
                   2969:                        pv = pv->pv_next;
                   2970:                        if (pv == NULL)
                   2971:                                break;
                   2972:                        pm = pv->pv_pmap;
                   2973:                }
                   2974:                setcontext4(ctx);
                   2975:        }
                   2976:        splx(s);
                   2977: }
                   2978: #endif /* SUN4 || SUN4C */
                   2979:
1.210     thorpej  2980: #if defined(SUN4M) || defined(SUN4D)
1.224     pk       2981: static void
1.236     pk       2982: pv_flushcache4m(struct vm_page *pg)
1.224     pk       2983: {
1.236     pk       2984:        struct pvlist *pv;
1.224     pk       2985:        struct pmap *pm;
                   2986:        int s;
                   2987:
1.236     pk       2988:        pv = VM_MDPAGE_PVHEAD(pg);
                   2989:
1.224     pk       2990:        s = splvm();            /* XXX extreme paranoia */
                   2991:        if ((pm = pv->pv_pmap) != NULL) {
                   2992:                for (;;) {
                   2993:                        if (pm->pm_ctx) {
                   2994:                                cache_flush_page(pv->pv_va, pm->pm_ctxnum);
1.55      pk       2995:                        }
                   2996:                        pv = pv->pv_next;
                   2997:                        if (pv == NULL)
                   2998:                                break;
                   2999:                        pm = pv->pv_pmap;
                   3000:                }
                   3001:        }
                   3002:        splx(s);
                   3003: }
1.224     pk       3004: #endif /* SUN4M || SUN4D */
1.55      pk       3005:
                   3006: /*----------------------------------------------------------------*/
                   3007:
                   3008: /*
                   3009:  * At last, pmap code.
                   3010:  */
1.1       deraadt  3011:
1.210     thorpej  3012: #if defined(SUN4) && (defined(SUN4C) || defined(SUN4M) || defined(SUN4D))
1.18      deraadt  3013: int nptesg;
                   3014: #endif
                   3015:
1.210     thorpej  3016: #if defined(SUN4M) || defined(SUN4D)
1.311     christos 3017: static void pmap_bootstrap4m(void *);
1.55      pk       3018: #endif
                   3019: #if defined(SUN4) || defined(SUN4C)
1.311     christos 3020: static void pmap_bootstrap4_4c(void *, int, int, int);
1.55      pk       3021: #endif
                   3022:
1.1       deraadt  3023: /*
                   3024:  * Bootstrap the system enough to run with VM enabled.
                   3025:  *
1.43      pk       3026:  * nsegment is the number of mmu segment entries (``PMEGs'');
                   3027:  * nregion is the number of mmu region entries (``SMEGs'');
1.1       deraadt  3028:  * nctx is the number of contexts.
                   3029:  */
                   3030: void
1.303     uwe      3031: pmap_bootstrap(int nctx, int nregion, int nsegment)
1.1       deraadt  3032: {
1.311     christos 3033:        void *p;
1.184     pk       3034:        extern char etext[], kernel_data_start[];
1.246     pk       3035:        extern char *kernel_top;
1.181     pk       3036:
1.110     mrg      3037:        uvmexp.pagesize = NBPG;
                   3038:        uvm_setpagesize();
1.55      pk       3039:
1.210     thorpej  3040: #if defined(SUN4) && (defined(SUN4C) || defined(SUN4M) || defined(SUN4D))
1.181     pk       3041:        /* In this case NPTESG is a variable */
1.55      pk       3042:        nptesg = (NBPSG >> pgshift);
                   3043: #endif
                   3044:
1.177     pk       3045:        /*
1.181     pk       3046:         * Grab physical memory list.
                   3047:         */
1.246     pk       3048:        p = kernel_top;
                   3049:        get_phys_mem(&p);
1.181     pk       3050:
                   3051:        /*
1.177     pk       3052:         * The data segment in sparc ELF images is aligned to a 64KB
                   3053:         * (the maximum page size defined by the ELF/sparc ABI) boundary.
                   3054:         * This results in a unused portion of physical memory in between
                   3055:         * the text/rodata and the data segment. We pick up that gap
                   3056:         * here to remove it from the kernel map and give it to the
                   3057:         * VM manager later.
                   3058:         */
                   3059:        etext_gap_start = (vaddr_t)(etext + NBPG - 1) & ~PGOFSET;
                   3060:        etext_gap_end = (vaddr_t)kernel_data_start & ~PGOFSET;
1.55      pk       3061:
1.210     thorpej  3062:        if (CPU_HAS_SRMMU) {
                   3063: #if defined(SUN4M) || defined(SUN4D)
1.246     pk       3064:                pmap_bootstrap4m(p);
1.55      pk       3065: #endif
1.296     chs      3066:        } else if (CPU_HAS_SUNMMU) {
1.55      pk       3067: #if defined(SUN4) || defined(SUN4C)
1.246     pk       3068:                pmap_bootstrap4_4c(p, nctx, nregion, nsegment);
1.177     pk       3069: #endif
1.55      pk       3070:        }
1.177     pk       3071:
                   3072:        pmap_page_upload();
1.325     martin   3073:        mutex_init(&demap_lock, MUTEX_DEFAULT, IPL_VM);
                   3074:        mutex_init(&ctx_lock, MUTEX_DEFAULT, IPL_SCHED);
1.340     martin   3075:        lock_available = true;
1.55      pk       3076: }
                   3077:
                   3078: #if defined(SUN4) || defined(SUN4C)
                   3079: void
1.311     christos 3080: pmap_bootstrap4_4c(void *top, int nctx, int nregion, int nsegment)
1.55      pk       3081: {
1.122     pk       3082:        union ctxinfo *ci;
                   3083:        struct mmuentry *mmuseg;
1.77      pk       3084: #if defined(SUN4_MMU3L)
1.122     pk       3085:        struct mmuentry *mmureg;
1.53      christos 3086: #endif
1.267     pk       3087:        struct regmap *rp;
                   3088:        struct segmap *sp;
1.122     pk       3089:        int i, j;
                   3090:        int npte, zseg, vr, vs;
1.244     pk       3091:        int startscookie, scookie;
                   3092: #if defined(SUN4_MMU3L)
1.274     chs      3093:        int startrcookie = 0, rcookie = 0;
1.244     pk       3094: #endif
1.265     pk       3095:        int *kptes;
1.1       deraadt  3096:        int lastpage;
1.139     chs      3097:        vaddr_t va;
1.265     pk       3098:        vaddr_t p;
1.238     pk       3099:        extern char kernel_text[];
1.1       deraadt  3100:
1.237     pk       3101:        /*
1.238     pk       3102:         * Compute `va2pa_offset'.
                   3103:         * Use `kernel_text' to probe the MMU translation since
                   3104:         * the pages at KERNBASE might not be mapped.
1.237     pk       3105:         */
1.238     pk       3106:        va2pa_offset = (vaddr_t)kernel_text -
1.239     pk       3107:                        ((getpte4(kernel_text) & PG_PFNUM) << PGSHIFT);
1.237     pk       3108:
1.193     mrg      3109:        ncontext = nctx;
                   3110:
1.45      pk       3111:        switch (cputyp) {
                   3112:        case CPU_SUN4C:
                   3113:                mmu_has_hole = 1;
                   3114:                break;
                   3115:        case CPU_SUN4:
1.69      pk       3116:                if (cpuinfo.cpu_type != CPUTYP_4_400) {
1.45      pk       3117:                        mmu_has_hole = 1;
                   3118:                        break;
                   3119:                }
                   3120:        }
                   3121:
1.31      pk       3122: #if defined(SUN4)
                   3123:        /*
                   3124:         * set up the segfixmask to mask off invalid bits
                   3125:         */
1.43      pk       3126:        segfixmask =  nsegment - 1; /* assume nsegment is a power of 2 */
                   3127: #ifdef DIAGNOSTIC
                   3128:        if (((nsegment & segfixmask) | (nsegment & ~segfixmask)) != nsegment) {
1.66      christos 3129:                printf("pmap_bootstrap: unsuitable number of segments (%d)\n",
1.43      pk       3130:                        nsegment);
                   3131:                callrom();
                   3132:        }
                   3133: #endif
1.31      pk       3134: #endif
                   3135:
1.210     thorpej  3136: #if defined(SUN4M) || defined(SUN4D) /* We're in a dual-arch kernel.
                   3137:                                        Setup 4/4c fn. ptrs */
1.55      pk       3138:        pmap_clear_modify_p     =       pmap_clear_modify4_4c;
                   3139:        pmap_clear_reference_p  =       pmap_clear_reference4_4c;
                   3140:        pmap_enter_p            =       pmap_enter4_4c;
                   3141:        pmap_extract_p          =       pmap_extract4_4c;
                   3142:        pmap_is_modified_p      =       pmap_is_modified4_4c;
                   3143:        pmap_is_referenced_p    =       pmap_is_referenced4_4c;
1.151     chs      3144:        pmap_kenter_pa_p        =       pmap_kenter_pa4_4c;
                   3145:        pmap_kremove_p          =       pmap_kremove4_4c;
1.248     pk       3146:        pmap_kprotect_p         =       pmap_kprotect4_4c;
1.55      pk       3147:        pmap_page_protect_p     =       pmap_page_protect4_4c;
                   3148:        pmap_protect_p          =       pmap_protect4_4c;
                   3149:        pmap_rmk_p              =       pmap_rmk4_4c;
                   3150:        pmap_rmu_p              =       pmap_rmu4_4c;
1.210     thorpej  3151: #endif /* defined SUN4M || defined SUN4D */
1.43      pk       3152:
1.265     pk       3153:        p = (vaddr_t)top;
                   3154:
1.1       deraadt  3155:        /*
                   3156:         * Last segment is the `invalid' one (one PMEG of pte's with !pg_v).
                   3157:         * It will never be used for anything else.
                   3158:         */
1.43      pk       3159:        seginval = --nsegment;
                   3160:
1.69      pk       3161: #if defined(SUN4_MMU3L)
                   3162:        if (HASSUN4_MMU3L)
1.43      pk       3163:                reginval = --nregion;
                   3164: #endif
                   3165:
                   3166:        /*
1.265     pk       3167:         * Allocate and initialise mmu entries and context structures.
                   3168:         */
                   3169: #if defined(SUN4_MMU3L)
                   3170:        mmuregions = mmureg = (struct mmuentry *)p;
                   3171:        p += nregion * sizeof(struct mmuentry);
1.326     cegger   3172:        memset(mmuregions, 0, nregion * sizeof(struct mmuentry));
1.265     pk       3173: #endif
                   3174:        mmusegments = mmuseg = (struct mmuentry *)p;
                   3175:        p += nsegment * sizeof(struct mmuentry);
1.326     cegger   3176:        memset(mmusegments, 0, nsegment * sizeof(struct mmuentry));
1.265     pk       3177:
                   3178:        pmap_kernel()->pm_ctx = ctxinfo = ci = (union ctxinfo *)p;
                   3179:        p += nctx * sizeof *ci;
                   3180:
                   3181:        /* Initialize MMU resource queues */
                   3182: #if defined(SUN4_MMU3L)
1.348.6.3! tls      3183:        mmuq_init(&region_freelist);
        !          3184:        mmuq_init(&region_lru);
        !          3185:        mmuq_init(&region_locked);
        !          3186: #endif
        !          3187:        mmuq_init(&segm_freelist);
        !          3188:        mmuq_init(&segm_lru);
        !          3189:        mmuq_init(&segm_locked);
1.265     pk       3190:
                   3191:
                   3192:        /*
1.43      pk       3193:         * Intialize the kernel pmap.
                   3194:         */
                   3195:        /* kernel_pmap_store.pm_ctxnum = 0; */
                   3196:        kernel_pmap_store.pm_refcount = 1;
1.69      pk       3197: #if defined(SUN4_MMU3L)
1.43      pk       3198:        TAILQ_INIT(&kernel_pmap_store.pm_reglist);
                   3199: #endif
                   3200:        TAILQ_INIT(&kernel_pmap_store.pm_seglist);
                   3201:
1.186     pk       3202:        /*
1.265     pk       3203:         * Allocate memory for kernel PTEs
                   3204:         * XXX Consider allocating memory for only a few regions
                   3205:         * and use growkernel() to allocate more as needed.
                   3206:         */
                   3207:        kptes = (int *)p;
                   3208:        p += NKREG * NSEGRG * NPTESG * sizeof(int);
1.326     cegger   3209:        memset(kptes, 0, NKREG * NSEGRG * NPTESG * sizeof(int));
1.265     pk       3210:
                   3211:        /*
1.186     pk       3212:         * Set up pm_regmap for kernel to point NUREG *below* the beginning
                   3213:         * of kernel regmap storage. Since the kernel only uses regions
                   3214:         * above NUREG, we save storage space and can index kernel and
                   3215:         * user regions in the same way.
                   3216:         */
1.43      pk       3217:        kernel_pmap_store.pm_regmap = &kernel_regmap_store[-NUREG];
                   3218:        for (i = NKREG; --i >= 0;) {
1.69      pk       3219: #if defined(SUN4_MMU3L)
1.43      pk       3220:                kernel_regmap_store[i].rg_smeg = reginval;
                   3221: #endif
                   3222:                kernel_regmap_store[i].rg_segmap =
                   3223:                        &kernel_segmap_store[i * NSEGRG];
1.265     pk       3224:                for (j = NSEGRG; --j >= 0;) {
1.267     pk       3225:                        sp = &kernel_segmap_store[i * NSEGRG + j];
                   3226:                        sp->sg_pmeg = seginval;
                   3227:                        sp->sg_pte = &kptes[(i * NSEGRG + j) * NPTESG];
1.265     pk       3228:                }
1.43      pk       3229:        }
1.1       deraadt  3230:
                   3231:        /*
                   3232:         * Preserve the monitor ROM's reserved VM region, so that
                   3233:         * we can use L1-A or the monitor's debugger.  As a side
                   3234:         * effect we map the ROM's reserved VM into all contexts
                   3235:         * (otherwise L1-A crashes the machine!).
                   3236:         */
1.43      pk       3237:
1.58      pk       3238:        mmu_reservemon4_4c(&nregion, &nsegment);
1.43      pk       3239:
1.69      pk       3240: #if defined(SUN4_MMU3L)
1.43      pk       3241:        /* Reserve one region for temporary mappings */
1.143     pk       3242:        if (HASSUN4_MMU3L)
                   3243:                tregion = --nregion;
1.43      pk       3244: #endif
1.1       deraadt  3245:
                   3246:        /*
                   3247:         * Set up the `constants' for the call to vm_init()
                   3248:         * in main().  All pages beginning at p (rounded up to
                   3249:         * the next whole page) and continuing through the number
                   3250:         * of available pages are free, but they start at a higher
                   3251:         * virtual address.  This gives us two mappable MD pages
                   3252:         * for pmap_zero_page and pmap_copy_page, and one MI page
                   3253:         * for /dev/mem, all with no associated physical memory.
                   3254:         */
1.265     pk       3255:        p = (p + NBPG - 1) & ~PGOFSET;
1.36      pk       3256:
1.181     pk       3257:        avail_start = PMAP_BOOTSTRAP_VA2PA(p);
1.38      pk       3258:
1.265     pk       3259:        i = p;
1.311     christos 3260:        cpuinfo.vpage[0] = (void *)p, p += NBPG;
                   3261:        cpuinfo.vpage[1] = (void *)p, p += NBPG;
                   3262:        p = (vaddr_t)reserve_dumppages((void *)p);
1.39      pk       3263:
1.265     pk       3264:        virtual_avail = p;
1.1       deraadt  3265:        virtual_end = VM_MAX_KERNEL_ADDRESS;
                   3266:
1.265     pk       3267:        p = i;                  /* retract to first free phys */
1.1       deraadt  3268:
1.322     ad       3269:
1.1       deraadt  3270:        /*
                   3271:         * All contexts are free except the kernel's.
                   3272:         *
                   3273:         * XXX sun4c could use context 0 for users?
                   3274:         */
1.42      mycroft  3275:        ci->c_pmap = pmap_kernel();
1.1       deraadt  3276:        ctx_freelist = ci + 1;
                   3277:        for (i = 1; i < ncontext; i++) {
                   3278:                ci++;
                   3279:                ci->c_nextfree = ci + 1;
                   3280:        }
                   3281:        ci->c_nextfree = NULL;
                   3282:        ctx_kick = 0;
                   3283:        ctx_kickdir = -1;
                   3284:
                   3285:        /*
                   3286:         * Init mmu entries that map the kernel physical addresses.
                   3287:         *
                   3288:         * All the other MMU entries are free.
                   3289:         *
1.244     pk       3290:         * THIS ASSUMES THE KERNEL IS MAPPED BY A CONTIGUOUS RANGE OF
                   3291:         * MMU SEGMENTS/REGIONS DURING THE BOOT PROCESS
1.1       deraadt  3292:         */
1.43      pk       3293:
1.181     pk       3294:        /* Compute the number of segments used by the kernel */
1.265     pk       3295:        zseg = (((p + NBPSG - 1) & ~SGOFSET) - KERNBASE) >> SGSHIFT;
1.1       deraadt  3296:        lastpage = VA_VPG(p);
                   3297:        if (lastpage == 0)
1.43      pk       3298:                /*
                   3299:                 * If the page bits in p are 0, we filled the last segment
1.265     pk       3300:                 * exactly; if not, it is the last page filled in the
                   3301:                 * last segment.
1.43      pk       3302:                 */
1.1       deraadt  3303:                lastpage = NPTESG;
1.43      pk       3304:
1.265     pk       3305:        p = KERNBASE;                   /* first va */
1.1       deraadt  3306:        vs = VA_VSEG(KERNBASE);         /* first virtual segment */
1.43      pk       3307:        vr = VA_VREG(KERNBASE);         /* first virtual region */
                   3308:        rp = &pmap_kernel()->pm_regmap[vr];
                   3309:
1.244     pk       3310:        /* Get region/segment where kernel addresses start */
                   3311: #if defined(SUN4_MMU3L)
                   3312:        if (HASSUN4_MMU3L)
                   3313:                startrcookie = rcookie = getregmap(p);
                   3314:        mmureg = &mmuregions[rcookie];
                   3315: #endif
                   3316:
                   3317:        startscookie = scookie = getsegmap(p);
                   3318:        mmuseg = &mmusegments[scookie];
                   3319:        zseg += scookie;        /* First free segment */
                   3320:
                   3321:        for (;;) {
1.43      pk       3322:
1.1       deraadt  3323:                /*
1.43      pk       3324:                 * Distribute each kernel region/segment into all contexts.
1.1       deraadt  3325:                 * This is done through the monitor ROM, rather than
                   3326:                 * directly here: if we do a setcontext we will fault,
                   3327:                 * as we are not (yet) mapped in any other context.
                   3328:                 */
1.43      pk       3329:
                   3330:                if ((vs % NSEGRG) == 0) {
                   3331:                        /* Entering a new region */
                   3332:                        if (VA_VREG(p) > vr) {
                   3333: #ifdef DEBUG
1.66      christos 3334:                                printf("note: giant kernel!\n");
1.43      pk       3335: #endif
                   3336:                                vr++, rp++;
                   3337:                        }
1.69      pk       3338: #if defined(SUN4_MMU3L)
                   3339:                        if (HASSUN4_MMU3L) {
1.43      pk       3340:                                for (i = 1; i < nctx; i++)
1.311     christos 3341:                                        prom_setcontext(i, (void *)p, rcookie);
1.43      pk       3342:
1.348.6.3! tls      3343:                                mmuq_insert_tail(&region_locked,
        !          3344:                                                  mmureg);
1.43      pk       3345:                                TAILQ_INSERT_TAIL(&pmap_kernel()->pm_reglist,
                   3346:                                                  mmureg, me_pmchain);
1.265     pk       3347: #ifdef DIAGNOSTIC
                   3348:                                mmuseg->me_statp = NULL;
                   3349: #endif
1.43      pk       3350:                                mmureg->me_cookie = rcookie;
                   3351:                                mmureg->me_pmap = pmap_kernel();
                   3352:                                mmureg->me_vreg = vr;
                   3353:                                rp->rg_smeg = rcookie;
                   3354:                                mmureg++;
                   3355:                                rcookie++;
                   3356:                        }
1.265     pk       3357: #endif /* SUN4_MMU3L */
1.43      pk       3358:                }
                   3359:
1.69      pk       3360: #if defined(SUN4_MMU3L)
                   3361:                if (!HASSUN4_MMU3L)
1.43      pk       3362: #endif
                   3363:                        for (i = 1; i < nctx; i++)
1.311     christos 3364:                                prom_setcontext(i, (void *)p, scookie);
1.1       deraadt  3365:
                   3366:                /* set up the mmu entry */
1.348.6.3! tls      3367:                mmuq_insert_tail(&segm_locked, mmuseg);
1.265     pk       3368: #ifdef DIAGNOSTIC
1.268     pk       3369:                mmuseg->me_statp = &pmap_stats.ps_npmeg_locked;
1.265     pk       3370: #endif
1.43      pk       3371:                TAILQ_INSERT_TAIL(&pmap_kernel()->pm_seglist, mmuseg, me_pmchain);
1.70      pk       3372:                pmap_stats.ps_npmeg_locked++;
1.43      pk       3373:                mmuseg->me_cookie = scookie;
                   3374:                mmuseg->me_pmap = pmap_kernel();
                   3375:                mmuseg->me_vreg = vr;
                   3376:                mmuseg->me_vseg = vs % NSEGRG;
1.267     pk       3377:                sp = &rp->rg_segmap[vs % NSEGRG];
                   3378:                sp->sg_pmeg = scookie;
1.43      pk       3379:                npte = ++scookie < zseg ? NPTESG : lastpage;
1.267     pk       3380:                sp->sg_npte = npte;
                   3381:                sp->sg_nwired = npte;
1.277     pk       3382:                pmap_kernel()->pm_stats.resident_count += npte;
1.43      pk       3383:                rp->rg_nsegmap += 1;
1.265     pk       3384:                for (i = 0; i < npte; i++)
1.267     pk       3385:                        sp->sg_pte[i] = getpte4(p + i * NBPG) | PG_WIRED;
1.43      pk       3386:                mmuseg++;
1.1       deraadt  3387:                vs++;
1.43      pk       3388:                if (scookie < zseg) {
1.1       deraadt  3389:                        p += NBPSG;
                   3390:                        continue;
                   3391:                }
1.43      pk       3392:
1.1       deraadt  3393:                /*
                   3394:                 * Unmap the pages, if any, that are not part of
                   3395:                 * the final segment.
                   3396:                 */
1.43      pk       3397:                for (p += npte << PGSHIFT; npte < NPTESG; npte++, p += NBPG)
1.55      pk       3398:                        setpte4(p, 0);
1.43      pk       3399:
1.69      pk       3400: #if defined(SUN4_MMU3L)
                   3401:                if (HASSUN4_MMU3L) {
1.43      pk       3402:                        /*
                   3403:                         * Unmap the segments, if any, that are not part of
                   3404:                         * the final region.
                   3405:                         */
                   3406:                        for (i = rp->rg_nsegmap; i < NSEGRG; i++, p += NBPSG)
                   3407:                                setsegmap(p, seginval);
1.139     chs      3408:
                   3409:                        /*
                   3410:                         * Unmap any kernel regions that we aren't using.
                   3411:                         */
                   3412:                        for (i = 0; i < nctx; i++) {
                   3413:                                setcontext4(i);
1.265     pk       3414:                                for (va = p;
1.139     chs      3415:                                     va < (OPENPROM_STARTVADDR & ~(NBPRG - 1));
                   3416:                                     va += NBPRG)
                   3417:                                        setregmap(va, reginval);
                   3418:                        }
                   3419:
                   3420:                } else
                   3421: #endif
                   3422:                {
                   3423:                        /*
                   3424:                         * Unmap any kernel segments that we aren't using.
                   3425:                         */
                   3426:                        for (i = 0; i < nctx; i++) {
                   3427:                                setcontext4(i);
1.265     pk       3428:                                for (va = p;
1.139     chs      3429:                                     va < (OPENPROM_STARTVADDR & ~(NBPSG - 1));
                   3430:                                     va += NBPSG)
                   3431:                                        setsegmap(va, seginval);
                   3432:                        }
1.43      pk       3433:                }
1.1       deraadt  3434:                break;
                   3435:        }
1.43      pk       3436:
1.69      pk       3437: #if defined(SUN4_MMU3L)
                   3438:        if (HASSUN4_MMU3L)
1.265     pk       3439:                for (rcookie = 0; rcookie < nregion; rcookie++) {
1.244     pk       3440:                        if (rcookie == startrcookie)
                   3441:                                /* Kernel must fit in one region! */
                   3442:                                rcookie++;
                   3443:                        mmureg = &mmuregions[rcookie];
1.43      pk       3444:                        mmureg->me_cookie = rcookie;
1.348.6.3! tls      3445:                        mmuq_insert_tail(&region_freelist, mmureg);
1.265     pk       3446: #ifdef DIAGNOSTIC
                   3447:                        mmuseg->me_statp = NULL;
                   3448: #endif
1.43      pk       3449:                }
1.265     pk       3450: #endif /* SUN4_MMU3L */
1.43      pk       3451:
1.265     pk       3452:        for (scookie = 0; scookie < nsegment; scookie++) {
1.244     pk       3453:                if (scookie == startscookie)
1.265     pk       3454:                        /* Skip static kernel image */
1.244     pk       3455:                        scookie = zseg;
                   3456:                mmuseg = &mmusegments[scookie];
1.43      pk       3457:                mmuseg->me_cookie = scookie;
1.348.6.3! tls      3458:                mmuq_insert_tail(&segm_freelist, mmuseg);
1.70      pk       3459:                pmap_stats.ps_npmeg_free++;
1.265     pk       3460: #ifdef DIAGNOSTIC
                   3461:                mmuseg->me_statp = NULL;
                   3462: #endif
1.1       deraadt  3463:        }
                   3464:
1.13      pk       3465:        /* Erase all spurious user-space segmaps */
                   3466:        for (i = 1; i < ncontext; i++) {
1.71      pk       3467:                setcontext4(i);
1.69      pk       3468:                if (HASSUN4_MMU3L)
1.43      pk       3469:                        for (p = 0, j = NUREG; --j >= 0; p += NBPRG)
                   3470:                                setregmap(p, reginval);
                   3471:                else
                   3472:                        for (p = 0, vr = 0; vr < NUREG; vr++) {
1.45      pk       3473:                                if (VA_INHOLE(p)) {
1.265     pk       3474:                                        p = MMU_HOLE_END;
1.45      pk       3475:                                        vr = VA_VREG(p);
1.43      pk       3476:                                }
                   3477:                                for (j = NSEGRG; --j >= 0; p += NBPSG)
                   3478:                                        setsegmap(p, seginval);
                   3479:                        }
1.13      pk       3480:        }
1.71      pk       3481:        setcontext4(0);
1.13      pk       3482:
1.1       deraadt  3483:        /*
                   3484:         * write protect & encache kernel text;
1.255     pk       3485:         * set red zone at kernel base;
                   3486:         * enable cache on message buffer and cpuinfo.
1.1       deraadt  3487:         */
                   3488:        {
1.23      deraadt  3489:                extern char etext[];
1.2       deraadt  3490:
1.255     pk       3491:                /* Enable cache on message buffer and cpuinfo */
1.265     pk       3492:                for (p = KERNBASE; p < (vaddr_t)trapbase; p += NBPG)
1.255     pk       3493:                        setpte4(p, getpte4(p) & ~PG_NC);
                   3494:
                   3495:                /* Enable cache and write protext kernel text */
1.265     pk       3496:                for (p = (vaddr_t)trapbase; p < (vaddr_t)etext; p += NBPG)
1.255     pk       3497:                        setpte4(p, getpte4(p) & ~(PG_NC|PG_W));
1.177     pk       3498:
                   3499:                /*
                   3500:                 * Unmap the `etext gap'; it'll be made available
                   3501:                 * to the VM manager.
                   3502:                 */
1.265     pk       3503:                for (p = etext_gap_start; p < etext_gap_end; p += NBPG) {
                   3504:                        rp = &pmap_kernel()->pm_regmap[VA_VREG(p)];
1.267     pk       3505:                        sp = &rp->rg_segmap[VA_VSEG(p)];
                   3506:                        sp->sg_nwired--;
                   3507:                        sp->sg_npte--;
1.277     pk       3508:                        pmap_kernel()->pm_stats.resident_count--;
1.267     pk       3509:                        sp->sg_pte[VA_VPG(p)] = 0;
1.177     pk       3510:                        setpte4(p, 0);
1.265     pk       3511:                }
                   3512:
                   3513:                /* Enable cache on data & bss */
                   3514:                for (p = etext_gap_end; p < virtual_avail; p += NBPG)
                   3515:                        setpte4(p, getpte4(p) & ~PG_NC);
                   3516:
1.1       deraadt  3517:        }
1.334     tsutsui  3518:        cpus[0] = (struct cpu_info *)CPUINFO_VA;
1.1       deraadt  3519: }
1.55      pk       3520: #endif
1.1       deraadt  3521:
1.210     thorpej  3522: #if defined(SUN4M) || defined(SUN4D)   /* SRMMU version of pmap_bootstrap */
1.55      pk       3523: /*
1.203     pk       3524:  * Bootstrap the system enough to run with VM enabled on a sun4m machine.
1.55      pk       3525:  *
                   3526:  * Switches from ROM to kernel page tables, and sets up initial mappings.
                   3527:  */
                   3528: static void
1.311     christos 3529: pmap_bootstrap4m(void *top)
1.36      pk       3530: {
1.124     pk       3531:        int i, j;
1.265     pk       3532:        vaddr_t p, q;
1.122     pk       3533:        union ctxinfo *ci;
                   3534:        int reg, seg;
1.71      pk       3535:        unsigned int ctxtblsize;
1.265     pk       3536:        vaddr_t pagetables_start, pagetables_end;
1.167     pk       3537:        paddr_t pagetables_start_pa;
1.55      pk       3538:        extern char etext[];
1.238     pk       3539:        extern char kernel_text[];
1.325     martin   3540:        vaddr_t va;
1.344     mrg      3541: #if defined(MULTIPROCESSOR)
1.325     martin   3542:        vsize_t off;
1.330     mrg      3543:        size_t cpuinfo_len = sizeof(struct cpu_info);
1.327     mrg      3544:        uint8_t *cpuinfo_data;
1.330     mrg      3545:        int align = PAGE_SIZE;
                   3546:        vaddr_t sva, cpuinfo_va;
                   3547:        vsize_t sz;
1.325     martin   3548: #endif
1.237     pk       3549:
                   3550:        /*
1.238     pk       3551:         * Compute `va2pa_offset'.
                   3552:         * Use `kernel_text' to probe the MMU translation since
                   3553:         * the pages at KERNBASE might not be mapped.
1.237     pk       3554:         */
1.238     pk       3555:        va2pa_offset = (vaddr_t)kernel_text - VA2PA(kernel_text);
1.36      pk       3556:
1.193     mrg      3557:        ncontext = cpuinfo.mmu_ncontext;
                   3558:
1.210     thorpej  3559: #if defined(SUN4) || defined(SUN4C) /* setup SRMMU fn. ptrs for dual-arch
                   3560:                                       kernel */
1.55      pk       3561:        pmap_clear_modify_p     =       pmap_clear_modify4m;
                   3562:        pmap_clear_reference_p  =       pmap_clear_reference4m;
                   3563:        pmap_enter_p            =       pmap_enter4m;
                   3564:        pmap_extract_p          =       pmap_extract4m;
                   3565:        pmap_is_modified_p      =       pmap_is_modified4m;
                   3566:        pmap_is_referenced_p    =       pmap_is_referenced4m;
1.151     chs      3567:        pmap_kenter_pa_p        =       pmap_kenter_pa4m;
                   3568:        pmap_kremove_p          =       pmap_kremove4m;
1.248     pk       3569:        pmap_kprotect_p         =       pmap_kprotect4m;
1.55      pk       3570:        pmap_page_protect_p     =       pmap_page_protect4m;
                   3571:        pmap_protect_p          =       pmap_protect4m;
                   3572:        pmap_rmk_p              =       pmap_rmk4m;
                   3573:        pmap_rmu_p              =       pmap_rmu4m;
1.203     pk       3574: #endif /* defined SUN4/SUN4C */
1.37      pk       3575:
1.36      pk       3576:        /*
1.152     pk       3577:         * p points to top of kernel mem
                   3578:         */
1.265     pk       3579:        p = (vaddr_t)top;
1.152     pk       3580:
1.330     mrg      3581:        p = (p + NBPG - 1) & ~PGOFSET;
1.329     mrg      3582:
1.152     pk       3583:        /*
1.55      pk       3584:         * Intialize the kernel pmap.
                   3585:         */
                   3586:        /* kernel_pmap_store.pm_ctxnum = 0; */
                   3587:        kernel_pmap_store.pm_refcount = 1;
1.71      pk       3588:
                   3589:        /*
                   3590:         * Set up pm_regmap for kernel to point NUREG *below* the beginning
1.55      pk       3591:         * of kernel regmap storage. Since the kernel only uses regions
                   3592:         * above NUREG, we save storage space and can index kernel and
1.186     pk       3593:         * user regions in the same way.
1.36      pk       3594:         */
1.55      pk       3595:        kernel_pmap_store.pm_regmap = &kernel_regmap_store[-NUREG];
1.326     cegger   3596:        memset(kernel_regmap_store, 0, NKREG * sizeof(struct regmap));
                   3597:        memset(kernel_segmap_store, 0, NKREG * NSEGRG * sizeof(struct segmap));
1.55      pk       3598:        for (i = NKREG; --i >= 0;) {
                   3599:                kernel_regmap_store[i].rg_segmap =
                   3600:                        &kernel_segmap_store[i * NSEGRG];
                   3601:                kernel_regmap_store[i].rg_seg_ptps = NULL;
                   3602:                for (j = NSEGRG; --j >= 0;)
                   3603:                        kernel_segmap_store[i * NSEGRG + j].sg_pte = NULL;
                   3604:        }
1.38      pk       3605:
1.152     pk       3606:        /* Allocate kernel region pointer tables */
                   3607:        pmap_kernel()->pm_reg_ptps = (int **)(q = p);
1.302     briggs   3608:        p += sparc_ncpus * sizeof(int **);
1.326     cegger   3609:        memset((void *)q, 0, (u_int)p - (u_int)q);
1.152     pk       3610:
                   3611:        pmap_kernel()->pm_reg_ptps_pa = (int *)(q = p);
1.302     briggs   3612:        p += sparc_ncpus * sizeof(int *);
1.326     cegger   3613:        memset((void *)q, 0, (u_int)p - (u_int)q);
1.77      pk       3614:
1.71      pk       3615:        /* Allocate context administration */
1.193     mrg      3616:        pmap_kernel()->pm_ctx = ctxinfo = ci = (union ctxinfo *)p;
1.55      pk       3617:        p += ncontext * sizeof *ci;
1.326     cegger   3618:        memset((void *)ci, 0, (u_int)p - (u_int)ci);
1.55      pk       3619:
1.77      pk       3620:        /*
                   3621:         * Set up the `constants' for the call to vm_init()
                   3622:         * in main().  All pages beginning at p (rounded up to
                   3623:         * the next whole page) and continuing through the number
                   3624:         * of available pages are free.
                   3625:         */
1.265     pk       3626:        p = (p + NBPG - 1) & ~PGOFSET;
1.122     pk       3627:
1.77      pk       3628:        /*
                   3629:         * Reserve memory for MMU pagetables. Some of these have severe
                   3630:         * alignment restrictions. We allocate in a sequence that
                   3631:         * minimizes alignment gaps.
                   3632:         */
                   3633:
1.163     pk       3634:        pagetables_start = p;
1.181     pk       3635:        pagetables_start_pa = PMAP_BOOTSTRAP_VA2PA(p);
1.55      pk       3636:
                   3637:        /*
1.77      pk       3638:         * Allocate context table.
1.71      pk       3639:         * To keep supersparc happy, minimum aligment is on a 4K boundary.
                   3640:         */
                   3641:        ctxtblsize = max(ncontext,1024) * sizeof(int);
                   3642:        cpuinfo.ctx_tbl = (int *)roundup((u_int)p, ctxtblsize);
1.181     pk       3643:        cpuinfo.ctx_tbl_pa = PMAP_BOOTSTRAP_VA2PA(cpuinfo.ctx_tbl);
1.265     pk       3644:        p = (u_int)cpuinfo.ctx_tbl + ctxtblsize;
1.71      pk       3645:
1.226     mrg      3646: #if defined(MULTIPROCESSOR)
                   3647:        /*
                   3648:         * Make sure all smp_tlb_flush*() routines for kernel pmap are
                   3649:         * broadcast to all CPU's.
                   3650:         */
                   3651:        pmap_kernel()->pm_cpuset = CPUSET_ALL;
                   3652: #endif
                   3653:
1.71      pk       3654:        /*
                   3655:         * Reserve memory for segment and page tables needed to map the entire
1.163     pk       3656:         * kernel. This takes (2K + NKREG * 16K) of space, but unfortunately
                   3657:         * is necessary since pmap_enter() *must* be able to enter a kernel
                   3658:         * mapping without delay.
1.55      pk       3659:         */
1.265     pk       3660:        p = (vaddr_t) roundup(p, SRMMU_L1SIZE * sizeof(u_int));
                   3661:        qzero((void *)p, SRMMU_L1SIZE * sizeof(u_int));
1.77      pk       3662:        kernel_regtable_store = (u_int *)p;
1.122     pk       3663:        p += SRMMU_L1SIZE * sizeof(u_int);
1.77      pk       3664:
1.265     pk       3665:        p = (vaddr_t) roundup(p, SRMMU_L2SIZE * sizeof(u_int));
                   3666:        qzero((void *)p, (SRMMU_L2SIZE * sizeof(u_int)) * NKREG);
1.77      pk       3667:        kernel_segtable_store = (u_int *)p;
1.122     pk       3668:        p += (SRMMU_L2SIZE * sizeof(u_int)) * NKREG;
1.77      pk       3669:
1.265     pk       3670:        p = (vaddr_t) roundup(p, SRMMU_L3SIZE * sizeof(u_int));
1.122     pk       3671:        /* zero it: all will be SRMMU_TEINVALID */
1.265     pk       3672:        qzero((void *)p, ((SRMMU_L3SIZE * sizeof(u_int)) * NSEGRG) * NKREG);
1.77      pk       3673:        kernel_pagtable_store = (u_int *)p;
1.265     pk       3674:        p += ((SRMMU_L3SIZE * sizeof(u_int)) * NSEGRG) * NKREG;
1.77      pk       3675:
1.163     pk       3676:        /* Round to next page and mark end of pre-wired kernel space */
1.265     pk       3677:        p = (p + NBPG - 1) & ~PGOFSET;
1.79      pk       3678:        pagetables_end = p;
1.174     pk       3679:
1.330     mrg      3680: #if defined(MULTIPROCESSOR)
                   3681:        /*
                   3682:         * Allocate aligned KVA.  `cpuinfo' resides at a fixed virtual
                   3683:         * address. Since we need to access an other CPU's cpuinfo
                   3684:         * structure occasionally, this must be done at a virtual address
                   3685:         * that's cache congruent to the fixed address CPUINFO_VA.
                   3686:         *
                   3687:         * NOTE: we're using the cache properties of the boot CPU to
                   3688:         * determine the alignment (XXX).
                   3689:         */
1.331     mrg      3690:        sz = sizeof(struct cpu_info);
1.330     mrg      3691:        if (sparc_ncpus > 1) {
                   3692:                if (CACHEINFO.c_totalsize > align) {
                   3693:                        /* Need a power of two */
                   3694:                        while (align <= CACHEINFO.c_totalsize)
                   3695:                                align <<= 1;
                   3696:                        align >>= 1;
                   3697:                }
                   3698:
                   3699:                sz = (sz + PAGE_SIZE - 1) & -PAGE_SIZE;
                   3700:                cpuinfo_len = sz + align - PAGE_SIZE;
                   3701:
                   3702:                /* Grab as much space as we need */
                   3703:                cpuinfo_data = (uint8_t *)p;
                   3704:                p += (cpuinfo_len * sparc_ncpus);
                   3705:        } else
                   3706:                cpuinfo_data = (uint8_t *)CPUINFO_VA;
                   3707: #endif
                   3708:
1.181     pk       3709:        avail_start = PMAP_BOOTSTRAP_VA2PA(p);
1.71      pk       3710:
                   3711:        /*
1.163     pk       3712:         * Now wire the region and segment tables of the kernel map.
1.71      pk       3713:         */
1.152     pk       3714:        pmap_kernel()->pm_reg_ptps[0] = (int *) kernel_regtable_store;
                   3715:        pmap_kernel()->pm_reg_ptps_pa[0] =
1.233     pk       3716:                 PMAP_BOOTSTRAP_VA2PA(kernel_regtable_store);
1.71      pk       3717:
                   3718:        /* Install L1 table in context 0 */
1.79      pk       3719:        setpgt4m(&cpuinfo.ctx_tbl[0],
1.152     pk       3720:            (pmap_kernel()->pm_reg_ptps_pa[0] >> SRMMU_PPNPASHIFT) | SRMMU_TEPTD);
1.71      pk       3721:
1.96      pk       3722:        for (reg = 0; reg < NKREG; reg++) {
1.77      pk       3723:                struct regmap *rp;
1.311     christos 3724:                void *kphyssegtbl;
1.71      pk       3725:
                   3726:                /*
1.77      pk       3727:                 * Entering new region; install & build segtbl
1.71      pk       3728:                 */
                   3729:
1.96      pk       3730:                rp = &pmap_kernel()->pm_regmap[reg + VA_VREG(KERNBASE)];
1.71      pk       3731:
1.311     christos 3732:                kphyssegtbl = (void *)
1.96      pk       3733:                    &kernel_segtable_store[reg * SRMMU_L2SIZE];
1.71      pk       3734:
1.152     pk       3735:                setpgt4m(&pmap_kernel()->pm_reg_ptps[0][reg + VA_VREG(KERNBASE)],
1.233     pk       3736:                    (PMAP_BOOTSTRAP_VA2PA(kphyssegtbl) >> SRMMU_PPNPASHIFT) |
                   3737:                    SRMMU_TEPTD);
1.71      pk       3738:
                   3739:                rp->rg_seg_ptps = (int *)kphyssegtbl;
                   3740:
                   3741:                for (seg = 0; seg < NSEGRG; seg++) {
1.77      pk       3742:                        struct segmap *sp;
1.311     christos 3743:                        void *kphyspagtbl;
1.71      pk       3744:
                   3745:                        rp->rg_nsegmap++;
                   3746:
                   3747:                        sp = &rp->rg_segmap[seg];
1.311     christos 3748:                        kphyspagtbl = (void *)
1.71      pk       3749:                            &kernel_pagtable_store
1.96      pk       3750:                                [((reg * NSEGRG) + seg) * SRMMU_L3SIZE];
1.71      pk       3751:
1.77      pk       3752:                        setpgt4m(&rp->rg_seg_ptps[seg],
1.233     pk       3753:                                 (PMAP_BOOTSTRAP_VA2PA(kphyspagtbl) >> SRMMU_PPNPASHIFT) |
1.77      pk       3754:                                 SRMMU_TEPTD);
1.71      pk       3755:                        sp->sg_pte = (int *) kphyspagtbl;
                   3756:                }
                   3757:        }
                   3758:
                   3759:        /*
                   3760:         * Preserve the monitor ROM's reserved VM region, so that
                   3761:         * we can use L1-A or the monitor's debugger.
1.55      pk       3762:         */
1.77      pk       3763:        mmu_reservemon4m(&kernel_pmap_store);
1.55      pk       3764:
                   3765:        /*
1.77      pk       3766:         * Reserve virtual address space for two mappable MD pages
                   3767:         * for pmap_zero_page and pmap_copy_page, one MI page
                   3768:         * for /dev/mem, and some more for dumpsys().
1.55      pk       3769:         */
1.77      pk       3770:        q = p;
1.311     christos 3771:        cpuinfo.vpage[0] = (void *)p, p += NBPG;
                   3772:        cpuinfo.vpage[1] = (void *)p, p += NBPG;
                   3773:        p = (vaddr_t)reserve_dumppages((void *)p);
1.55      pk       3774:
1.101     pk       3775:        /* Find PTE locations of vpage[] to optimize zero_fill() et.al. */
                   3776:        for (i = 0; i < 2; i++) {
                   3777:                struct regmap *rp;
                   3778:                struct segmap *sp;
1.228     pk       3779:                rp = &pmap_kernel()->pm_regmap[VA_VREG(cpuinfo.vpage[i])];
                   3780:                sp = &rp->rg_segmap[VA_VSEG(cpuinfo.vpage[i])];
                   3781:                cpuinfo.vpage_pte[i] =
                   3782:                        &sp->sg_pte[VA_SUN4M_VPG(cpuinfo.vpage[i])];
1.101     pk       3783:        }
                   3784:
1.275     martin   3785: #if !(defined(PROM_AT_F0) || defined(MSIIEP))
1.265     pk       3786:        virtual_avail = p;
1.275     martin   3787: #elif defined(MSIIEP)
                   3788:        virtual_avail = (vaddr_t)0xf0800000; /* Krups */
                   3789: #else
                   3790:        virtual_avail = (vaddr_t)0xf0080000; /* Mr.Coffee/OFW */
                   3791: #endif
1.55      pk       3792:        virtual_end = VM_MAX_KERNEL_ADDRESS;
                   3793:
1.77      pk       3794:        p = q;                  /* retract to first free phys */
1.55      pk       3795:
1.69      pk       3796:        /*
                   3797:         * Set up the ctxinfo structures (freelist of contexts)
1.55      pk       3798:         */
                   3799:        ci->c_pmap = pmap_kernel();
                   3800:        ctx_freelist = ci + 1;
                   3801:        for (i = 1; i < ncontext; i++) {
                   3802:                ci++;
                   3803:                ci->c_nextfree = ci + 1;
                   3804:        }
                   3805:        ci->c_nextfree = NULL;
                   3806:        ctx_kick = 0;
                   3807:        ctx_kickdir = -1;
                   3808:
1.69      pk       3809:        /*
                   3810:         * Now map the kernel into our new set of page tables, then
1.55      pk       3811:         * (finally) switch over to our running page tables.
                   3812:         * We map from KERNBASE to p into context 0's page tables (and
                   3813:         * the kernel pmap).
                   3814:         */
                   3815: #ifdef DEBUG                   /* Sanity checks */
1.265     pk       3816:        if (p % NBPG != 0)
1.69      pk       3817:                panic("pmap_bootstrap4m: p misaligned?!?");
1.55      pk       3818:        if (KERNBASE % NBPRG != 0)
1.69      pk       3819:                panic("pmap_bootstrap4m: KERNBASE not region-aligned");
1.55      pk       3820: #endif
1.69      pk       3821:
1.265     pk       3822:        for (q = KERNBASE; q < p; q += NBPG) {
1.77      pk       3823:                struct regmap *rp;
                   3824:                struct segmap *sp;
1.177     pk       3825:                int pte, *ptep;
1.77      pk       3826:
1.69      pk       3827:                /*
1.71      pk       3828:                 * Now install entry for current page.
1.69      pk       3829:                 */
1.77      pk       3830:                rp = &pmap_kernel()->pm_regmap[VA_VREG(q)];
                   3831:                sp = &rp->rg_segmap[VA_VSEG(q)];
1.177     pk       3832:                ptep = &sp->sg_pte[VA_VPG(q)];
                   3833:
                   3834:                /*
                   3835:                 * Unmap the `etext gap'; it'll be made available
                   3836:                 * to the VM manager.
                   3837:                 */
1.265     pk       3838:                if (q >= etext_gap_start && q < etext_gap_end) {
1.177     pk       3839:                        setpgt4m(ptep, 0);
                   3840:                        continue;
                   3841:                }
                   3842:
1.181     pk       3843:                pte = PMAP_BOOTSTRAP_VA2PA(q) >> SRMMU_PPNPASHIFT;
1.122     pk       3844:                pte |= PPROT_N_RX | SRMMU_TEPTE;
                   3845:
                   3846:                /* Deal with the cacheable bit for pagetable memory */
                   3847:                if ((cpuinfo.flags & CPUFLG_CACHEPAGETABLES) != 0 ||
                   3848:                    q < pagetables_start || q >= pagetables_end)
                   3849:                        pte |= SRMMU_PG_C;
                   3850:
1.77      pk       3851:                /* write-protect kernel text */
1.265     pk       3852:                if (q < (vaddr_t)trapbase || q >= (vaddr_t)etext)
1.77      pk       3853:                        pte |= PPROT_WRITE;
                   3854:
1.177     pk       3855:                setpgt4m(ptep, pte);
1.277     pk       3856:                pmap_kernel()->pm_stats.resident_count++;
1.69      pk       3857:        }
                   3858:
1.160     pk       3859:        if ((cpuinfo.flags & CPUFLG_CACHEPAGETABLES) == 0) {
                   3860:                /*
                   3861:                 * The page tables have been setup. Since we're still
                   3862:                 * running on the PROM's memory map, the memory we
                   3863:                 * allocated for our page tables might still be cached.
                   3864:                 * Flush it now, and don't touch it again until we
                   3865:                 * switch to our own tables (will be done immediately below).
                   3866:                 */
1.167     pk       3867:                int size = pagetables_end - pagetables_start;
                   3868:                if (CACHEINFO.c_vactype != VAC_NONE) {
1.325     martin   3869:                        va = (vaddr_t)pagetables_start;
1.265     pk       3870:                        while (size > 0) {
1.214     pk       3871:                                cache_flush_page(va, 0);
1.167     pk       3872:                                va += NBPG;
                   3873:                                size -= NBPG;
                   3874:                        }
                   3875:                } else if (cpuinfo.pcache_flush_page != NULL) {
1.265     pk       3876:                        paddr_t pa = pagetables_start_pa;
                   3877:                        while (size > 0) {
1.167     pk       3878:                                pcache_flush_page(pa, 0);
                   3879:                                pa += NBPG;
                   3880:                                size -= NBPG;
                   3881:                        }
                   3882:                }
1.160     pk       3883:        }
1.100     pk       3884:
1.55      pk       3885:        /*
                   3886:         * Now switch to kernel pagetables (finally!)
                   3887:         */
1.69      pk       3888:        mmu_install_tables(&cpuinfo);
1.325     martin   3889:
1.330     mrg      3890: #if defined(MULTIPROCESSOR)
1.325     martin   3891:        /*
1.329     mrg      3892:         * Initialise any cpu-specific data now.
                   3893:         */
                   3894:        cpu_init_system();
                   3895:
                   3896:        /*
1.327     mrg      3897:         * Setup the cpus[] array and the ci_self links.
                   3898:         */
1.325     martin   3899:        for (i = 0; i < sparc_ncpus; i++) {
1.330     mrg      3900:                sva = (vaddr_t) (cpuinfo_data + (cpuinfo_len * i));
                   3901:                cpuinfo_va = sva +
                   3902:                   (((CPUINFO_VA & (align - 1)) + align - sva) & (align - 1));
                   3903:
                   3904:                /*
                   3905:                 * Either remap from CPUINFO_VA to the new correct value
                   3906:                 * or clear out this cpuinfo.
                   3907:                 */
                   3908:                if (i == 0) {
                   3909:                        for (off = 0, va = cpuinfo_va;
                   3910:                             sparc_ncpus > 1 && off < sizeof(struct cpu_info);
                   3911:                             va += NBPG, off += NBPG) {
                   3912:                                paddr_t pa =
                   3913:                                    PMAP_BOOTSTRAP_VA2PA(CPUINFO_VA + off);
                   3914:
                   3915:                                pmap_kremove(va, NBPG);
                   3916:                                pmap_kenter_pa(va, pa,
1.337     cegger   3917:                                               VM_PROT_READ | VM_PROT_WRITE, 0);
1.330     mrg      3918:                        }
                   3919:
                   3920:                } else
                   3921:                        memset((void *)cpuinfo_va, 0, sizeof(struct cpu_info));
                   3922:
                   3923:                cpus[i] = (struct cpu_info *)cpuinfo_va;
1.327     mrg      3924:                cpus[i]->ci_self = cpus[i];
1.330     mrg      3925:
                   3926:                /* Unmap and prepare to return unused pages */
                   3927:                if (cpuinfo_va != sva) {
                   3928:                        cpus[i]->ci_free_sva1 = sva;
                   3929:                        cpus[i]->ci_free_eva1 = cpuinfo_va;
                   3930:                        for (va = cpus[i]->ci_free_sva1;
                   3931:                             va < cpus[i]->ci_free_eva1;
                   3932:                             va += NBPG)
                   3933:                                setpte4m(va, 0);
                   3934:                }
                   3935:                if (cpuinfo_va + sz != sva + cpuinfo_len) {
                   3936:                        cpus[i]->ci_free_sva2 = cpuinfo_va + sz;
                   3937:                        cpus[i]->ci_free_eva2 = sva + cpuinfo_len;
                   3938:                        for (va = cpus[i]->ci_free_sva2;
                   3939:                             va < cpus[i]->ci_free_eva2;
                   3940:                             va += NBPG)
                   3941:                                setpte4m(va, 0);
                   3942:                }
1.325     martin   3943:        }
1.329     mrg      3944: #else
                   3945:        cpus[0] = (struct cpu_info *)CPUINFO_VA;
1.325     martin   3946: #endif
1.327     mrg      3947:
1.325     martin   3948:        pmap_update(pmap_kernel());
1.338     mrg      3949:
                   3950: #ifdef DIAGNOSTIC
                   3951:        if (curcpu()->ci_self != cpus[0]) {
                   3952:                prom_printf("curcpu()->ci_self %p != cpus[0] %p\n", curcpu()->ci_self, cpus[0]);
                   3953:                panic("cpuinfo inconsistent");
                   3954:        }
                   3955: #endif
1.69      pk       3956: }
                   3957:
1.97      pk       3958: static u_long prom_ctxreg;
                   3959:
1.69      pk       3960: void
1.303     uwe      3961: mmu_install_tables(struct cpu_info *sc)
1.69      pk       3962: {
                   3963:
                   3964: #ifdef DEBUG
1.325     martin   3965:        prom_printf("pmap_bootstrap: installing kernel page tables...");
1.69      pk       3966: #endif
1.71      pk       3967:        setcontext4m(0);        /* paranoia? %%%: Make 0x3 a define! below */
1.69      pk       3968:
                   3969:        /* Enable MMU tablewalk caching, flush TLB */
                   3970:        if (sc->mmu_enable != 0)
                   3971:                sc->mmu_enable();
                   3972:
1.195     mrg      3973:        tlb_flush_all_real();
1.97      pk       3974:        prom_ctxreg = lda(SRMMU_CXTPTR, ASI_SRMMU);
1.69      pk       3975:
                   3976:        sta(SRMMU_CXTPTR, ASI_SRMMU,
1.128     pk       3977:                (sc->ctx_tbl_pa >> SRMMU_PPNPASHIFT) & ~0x3);
1.69      pk       3978:
1.195     mrg      3979:        tlb_flush_all_real();
1.69      pk       3980:
                   3981: #ifdef DEBUG
1.325     martin   3982:        prom_printf("done.\n");
1.69      pk       3983: #endif
                   3984: }
1.55      pk       3985:
1.246     pk       3986: void srmmu_restore_prom_ctx(void);
1.97      pk       3987:
                   3988: void
1.303     uwe      3989: srmmu_restore_prom_ctx(void)
1.97      pk       3990: {
1.303     uwe      3991:
1.97      pk       3992:        tlb_flush_all();
                   3993:        sta(SRMMU_CXTPTR, ASI_SRMMU, prom_ctxreg);
                   3994:        tlb_flush_all();
1.164     thorpej  3995: }
1.232     pk       3996: #endif /* SUN4M || SUN4D */
1.164     thorpej  3997:
1.232     pk       3998: #if defined(MULTIPROCESSOR)
1.164     thorpej  3999: /*
1.128     pk       4000:  * Allocate per-CPU page tables. One region, segment and page table
                   4001:  * is needed to map CPUINFO_VA to different physical addresses on
                   4002:  * each CPU. Since the kernel region and segment tables are all
                   4003:  * pre-wired (in bootstrap() above) and we also assume that the
                   4004:  * first segment (256K) of kernel space is fully populated with
                   4005:  * pages from the start, these per-CPU tables will never need
                   4006:  * to be updated when mapping kernel virtual memory.
                   4007:  *
1.69      pk       4008:  * Note: this routine is called in the context of the boot CPU
                   4009:  * during autoconfig.
                   4010:  */
                   4011: void
1.303     uwe      4012: pmap_alloc_cpu(struct cpu_info *sc)
1.69      pk       4013: {
1.232     pk       4014: #if defined(SUN4M) || defined(SUN4D)   /* Only implemented for SUN4M/D */
1.128     pk       4015:        vaddr_t va;
1.166     pk       4016:        paddr_t pa;
                   4017:        paddr_t alignment;
1.128     pk       4018:        u_int *ctxtable, *regtable, *segtable, *pagtable;
1.166     pk       4019:        u_int *ctxtable_pa, *regtable_pa, *segtable_pa, *pagtable_pa;
                   4020:        psize_t ctxsize, size;
1.72      pk       4021:        int vr, vs, vpg;
                   4022:        struct regmap *rp;
                   4023:        struct segmap *sp;
1.128     pk       4024:        struct pglist mlist;
                   4025:        int cachebit;
1.166     pk       4026:        int pagesz = NBPG;
1.228     pk       4027:        int i;
1.128     pk       4028:
1.218     pk       4029:        cachebit = (cpuinfo.flags & CPUFLG_CACHEPAGETABLES) != 0;
1.128     pk       4030:
                   4031:        /*
                   4032:         * Allocate properly aligned and contiguous physically memory
1.166     pk       4033:         * for the PTE tables.
1.128     pk       4034:         */
1.166     pk       4035:        ctxsize = (sc->mmu_ncontext * sizeof(int) + pagesz - 1) & -pagesz;
                   4036:        alignment = ctxsize;
                   4037:
                   4038:        /* The region, segment and page table we need fit in one page */
                   4039:        size = ctxsize + pagesz;
                   4040:
                   4041:        if (uvm_pglistalloc(size, vm_first_phys, vm_first_phys+vm_num_phys,
                   4042:                            alignment, 0, &mlist, 1, 0) != 0)
1.128     pk       4043:                panic("pmap_alloc_cpu: no memory");
                   4044:
1.166     pk       4045:        pa = VM_PAGE_TO_PHYS(TAILQ_FIRST(&mlist));
                   4046:
                   4047:        /* Allocate virtual memory */
1.298     yamt     4048:        va = uvm_km_alloc(kernel_map, size, 0, UVM_KMF_VAONLY);
1.128     pk       4049:        if (va == 0)
                   4050:                panic("pmap_alloc_cpu: no memory");
                   4051:
1.166     pk       4052:        /*
                   4053:         * Layout the page tables in our chunk of memory
                   4054:         */
                   4055:        ctxtable = (u_int *)va;
                   4056:        regtable = (u_int *)(va + ctxsize);
                   4057:        segtable = regtable + SRMMU_L1SIZE;
                   4058:        pagtable = segtable + SRMMU_L2SIZE;
1.128     pk       4059:
1.166     pk       4060:        ctxtable_pa = (u_int *)pa;
                   4061:        regtable_pa = (u_int *)(pa + ctxsize);
                   4062:        segtable_pa = regtable_pa + SRMMU_L1SIZE;
                   4063:        pagtable_pa = segtable_pa + SRMMU_L2SIZE;
1.128     pk       4064:
                   4065:        /* Map the pages */
1.166     pk       4066:        while (size != 0) {
1.199     chs      4067:                pmap_kenter_pa(va, pa | (cachebit ? 0 : PMAP_NC),
1.337     cegger   4068:                    VM_PROT_READ | VM_PROT_WRITE, 0);
1.166     pk       4069:                va += pagesz;
                   4070:                pa += pagesz;
                   4071:                size -= pagesz;
1.128     pk       4072:        }
1.198     chris    4073:        pmap_update(pmap_kernel());
1.72      pk       4074:
1.128     pk       4075:        /*
1.152     pk       4076:         * Store the region table pointer (and its corresponding physical
                   4077:         * address) in the CPU's slot in the kernel pmap region table
                   4078:         * pointer table.
1.133     pk       4079:         */
1.176     christos 4080:        pmap_kernel()->pm_reg_ptps[sc->ci_cpuid] = regtable;
                   4081:        pmap_kernel()->pm_reg_ptps_pa[sc->ci_cpuid] = (paddr_t)regtable_pa;
1.133     pk       4082:
1.72      pk       4083:        vr = VA_VREG(CPUINFO_VA);
                   4084:        vs = VA_VSEG(CPUINFO_VA);
                   4085:        vpg = VA_VPG(CPUINFO_VA);
                   4086:        rp = &pmap_kernel()->pm_regmap[vr];
                   4087:        sp = &rp->rg_segmap[vs];
                   4088:
                   4089:        /*
1.166     pk       4090:         * Copy page tables from CPU #0, then modify entry for CPUINFO_VA
                   4091:         * so that it points at the per-CPU pages.
1.72      pk       4092:         */
1.166     pk       4093:        qcopy(pmap_kernel()->pm_reg_ptps[0], regtable,
                   4094:                SRMMU_L1SIZE * sizeof(int));
                   4095:        qcopy(rp->rg_seg_ptps, segtable, SRMMU_L2SIZE * sizeof(int));
                   4096:        qcopy(sp->sg_pte, pagtable, SRMMU_L3SIZE * sizeof(int));
                   4097:
1.128     pk       4098:        setpgt4m(&ctxtable[0],
1.166     pk       4099:                 ((u_long)regtable_pa >> SRMMU_PPNPASHIFT) | SRMMU_TEPTD);
1.128     pk       4100:        setpgt4m(&regtable[vr],
1.166     pk       4101:                 ((u_long)segtable_pa >> SRMMU_PPNPASHIFT) | SRMMU_TEPTD);
1.128     pk       4102:        setpgt4m(&segtable[vs],
1.166     pk       4103:                 ((u_long)pagtable_pa >> SRMMU_PPNPASHIFT) | SRMMU_TEPTD);
1.128     pk       4104:        setpgt4m(&pagtable[vpg],
1.311     christos 4105:                (VA2PA((void *)sc) >> SRMMU_PPNPASHIFT) |
1.129     pk       4106:                (SRMMU_TEPTE | PPROT_N_RWX | SRMMU_PG_C));
1.72      pk       4107:
1.166     pk       4108:        /* Install this CPU's context table */
1.72      pk       4109:        sc->ctx_tbl = ctxtable;
1.166     pk       4110:        sc->ctx_tbl_pa = (paddr_t)ctxtable_pa;
1.228     pk       4111:
                   4112:        /* Pre-compute this CPU's vpage[] PTEs */
                   4113:        for (i = 0; i < 2; i++) {
                   4114:                rp = &pmap_kernel()->pm_regmap[VA_VREG(sc->vpage[i])];
                   4115:                sp = &rp->rg_segmap[VA_VSEG(sc->vpage[i])];
                   4116:                sc->vpage_pte[i] = &sp->sg_pte[VA_SUN4M_VPG(sc->vpage[i])];
                   4117:        }
1.232     pk       4118: #endif /* SUN4M || SUN4D */
1.55      pk       4119: }
1.232     pk       4120: #endif /* MULTIPROCESSOR */
1.55      pk       4121:
1.69      pk       4122:
1.55      pk       4123: void
1.303     uwe      4124: pmap_init(void)
1.55      pk       4125: {
1.265     pk       4126:        u_int sz;
1.55      pk       4127:
                   4128:        if (PAGE_SIZE != NBPG)
1.205     simonb   4129:                panic("pmap_init: PAGE_SIZE!=NBPG");
1.55      pk       4130:
1.181     pk       4131:        vm_num_phys = vm_last_phys - vm_first_phys;
1.121     pk       4132:
1.122     pk       4133:        /* Setup a pool for additional pvlist structures */
1.314     ad       4134:        pool_init(&pv_pool, sizeof(struct pvlist), 0, 0, 0, "pvtable", NULL,
                   4135:            IPL_NONE);
1.121     pk       4136:
1.152     pk       4137:        /*
                   4138:         * Setup a pool for pmap structures.
1.276     wiz      4139:         * The pool size includes space for an array of per-CPU
1.152     pk       4140:         * region table pointers & physical addresses
                   4141:         */
1.265     pk       4142:        sz = ALIGN(sizeof(struct pmap)) +
                   4143:             ALIGN(NUREG * sizeof(struct regmap)) +
1.302     briggs   4144:             sparc_ncpus * sizeof(int *) +      /* pm_reg_ptps */
                   4145:             sparc_ncpus * sizeof(int);         /* pm_reg_ptps_pa */
1.320     ad       4146:        pool_cache_bootstrap(&pmap_cache, sz, 0, 0, 0, "pmappl", NULL,
                   4147:            IPL_NONE, pmap_pmap_pool_ctor, pmap_pmap_pool_dtor, NULL);
1.134     thorpej  4148:
1.265     pk       4149:        sz = NSEGRG * sizeof (struct segmap);
1.314     ad       4150:        pool_init(&segmap_pool, sz, 0, 0, 0, "segmap", NULL, IPL_NONE);
1.265     pk       4151:
1.210     thorpej  4152: #if defined(SUN4M) || defined(SUN4D)
                   4153:        if (CPU_HAS_SRMMU) {
1.121     pk       4154:                /*
                   4155:                 * The SRMMU only ever needs chunks in one of two sizes:
                   4156:                 * 1024 (for region level tables) and 256 (for segment
                   4157:                 * and page level tables).
                   4158:                 */
1.265     pk       4159:                sz = SRMMU_L1SIZE * sizeof(int);
                   4160:                pool_init(&L1_pool, sz, sz, 0, 0, "L1 pagetable",
1.314     ad       4161:                          &pgt_page_allocator, IPL_NONE);
1.121     pk       4162:
1.265     pk       4163:                sz = SRMMU_L2SIZE * sizeof(int);
                   4164:                pool_init(&L23_pool, sz, sz, 0, 0, "L2/L3 pagetable",
1.314     ad       4165:                          &pgt_page_allocator, IPL_NONE);
1.121     pk       4166:        }
1.232     pk       4167: #endif /* SUN4M || SUN4D */
1.265     pk       4168: #if defined(SUN4) || defined(SUN4C)
1.296     chs      4169:        if (CPU_HAS_SUNMMU) {
1.265     pk       4170:                sz = NPTESG * sizeof(int);
1.314     ad       4171:                pool_init(&pte_pool, sz, 0, 0, 0, "ptemap", NULL,
                   4172:                    IPL_NONE);
1.265     pk       4173:        }
                   4174: #endif /* SUN4 || SUN4C */
1.36      pk       4175: }
                   4176:
1.1       deraadt  4177:
                   4178: /*
                   4179:  * Map physical addresses into kernel VM.
                   4180:  */
1.124     pk       4181: vaddr_t
1.303     uwe      4182: pmap_map(vaddr_t va, paddr_t pa, paddr_t endpa, int prot)
1.1       deraadt  4183: {
1.124     pk       4184:        int pgsize = PAGE_SIZE;
1.1       deraadt  4185:
                   4186:        while (pa < endpa) {
1.337     cegger   4187:                pmap_kenter_pa(va, pa, prot, 0);
1.1       deraadt  4188:                va += pgsize;
                   4189:                pa += pgsize;
                   4190:        }
1.198     chris    4191:        pmap_update(pmap_kernel());
1.1       deraadt  4192:        return (va);
                   4193: }
                   4194:
1.186     pk       4195: #ifdef DEBUG
1.1       deraadt  4196: /*
1.186     pk       4197:  * Check a pmap for spuriously lingering mappings
1.1       deraadt  4198:  */
1.307     perry    4199: static inline void
1.195     mrg      4200: pmap_quiet_check(struct pmap *pm)
1.186     pk       4201: {
                   4202:        int vs, vr;
                   4203:
1.296     chs      4204:        if (CPU_HAS_SUNMMU) {
1.186     pk       4205: #if defined(SUN4_MMU3L)
1.296     chs      4206:                if (TAILQ_FIRST(&pm->pm_reglist))
1.186     pk       4207:                        panic("pmap_destroy: region list not empty");
                   4208: #endif
1.296     chs      4209:                if (TAILQ_FIRST(&pm->pm_seglist))
1.186     pk       4210:                        panic("pmap_destroy: segment list not empty");
                   4211:        }
                   4212:
                   4213:        for (vr = 0; vr < NUREG; vr++) {
                   4214:                struct regmap *rp = &pm->pm_regmap[vr];
                   4215:
1.296     chs      4216:                if (HASSUN4_MMU3L) {
                   4217:                        if (rp->rg_smeg != reginval)
                   4218:                                printf("pmap_chk: spurious smeg in "
                   4219:                                       "user region %d\n", vr);
1.186     pk       4220:                }
1.210     thorpej  4221:                if (CPU_HAS_SRMMU) {
1.186     pk       4222:                        int n;
                   4223: #if defined(MULTIPROCESSOR)
1.302     briggs   4224:                        for (n = 0; n < sparc_ncpus; n++)
1.186     pk       4225: #else
                   4226:                        n = 0;
                   4227: #endif
                   4228:                        {
1.338     mrg      4229:                                /* Did this cpu attach? */
                   4230:                                if (pmap_kernel()->pm_reg_ptps[n] == 0)
                   4231:                                        continue;
                   4232:
1.186     pk       4233:                                if (pm->pm_reg_ptps[n][vr] != SRMMU_TEINVALID)
                   4234:                                        printf("pmap_chk: spurious PTP in user "
1.276     wiz      4235:                                                "region %d on CPU %d\n", vr, n);
1.186     pk       4236:                        }
                   4237:                }
                   4238:                if (rp->rg_nsegmap != 0)
                   4239:                        printf("pmap_chk: %d segments remain in "
                   4240:                                "region %d\n", rp->rg_nsegmap, vr);
                   4241:                if (rp->rg_segmap != NULL) {
                   4242:                        printf("pmap_chk: segments still "
                   4243:                                "allocated in region %d\n", vr);
                   4244:                        for (vs = 0; vs < NSEGRG; vs++) {
                   4245:                                struct segmap *sp = &rp->rg_segmap[vs];
                   4246:                                if (sp->sg_npte != 0)
                   4247:                                        printf("pmap_chk: %d ptes "
                   4248:                                             "remain in segment %d\n",
                   4249:                                                sp->sg_npte, vs);
                   4250:                                if (sp->sg_pte != NULL) {
                   4251:                                        printf("pmap_chk: ptes still "
                   4252:                                             "allocated in segment %d\n", vs);
                   4253:                                }
1.296     chs      4254:                                if (CPU_HAS_SUNMMU) {
1.213     pk       4255:                                        if (sp->sg_pmeg != seginval)
                   4256:                                                printf("pmap_chk: pm %p(%d,%d) "
                   4257:                                                  "spurious soft pmeg %d\n",
                   4258:                                                  pm, vr, vs, sp->sg_pmeg);
                   4259:                                }
                   4260:                        }
                   4261:                }
                   4262:
                   4263:                /* Check for spurious pmeg entries in the MMU */
                   4264:                if (pm->pm_ctx == NULL)
                   4265:                        continue;
1.296     chs      4266:                if (CPU_HAS_SUNMMU) {
1.213     pk       4267:                        int ctx;
                   4268:                        if (mmu_has_hole && (vr >= 32 || vr < (256 - 32)))
                   4269:                                continue;
                   4270:                        ctx = getcontext4();
                   4271:                        setcontext4(pm->pm_ctxnum);
                   4272:                        for (vs = 0; vs < NSEGRG; vs++) {
                   4273:                                vaddr_t va = VSTOVA(vr,vs);
                   4274:                                int pmeg = getsegmap(va);
                   4275:                                if (pmeg != seginval)
                   4276:                                        printf("pmap_chk: pm %p(%d,%d:%x): "
                   4277:                                                "spurious pmeg %d\n",
                   4278:                                                pm, vr, vs, (u_int)va, pmeg);
1.186     pk       4279:                        }
1.213     pk       4280:                        setcontext4(ctx);
1.186     pk       4281:                }
                   4282:        }
1.296     chs      4283:        if (pm->pm_stats.resident_count) {
                   4284:                printf("pmap_chk: res count %ld\n",
                   4285:                       pm->pm_stats.resident_count);
                   4286:        }
                   4287:        if (pm->pm_stats.wired_count) {
                   4288:                printf("pmap_chk: wired count %ld\n",
                   4289:                       pm->pm_stats.wired_count);
                   4290:        }
1.186     pk       4291: }
                   4292: #endif /* DEBUG */
                   4293:
                   4294: int
                   4295: pmap_pmap_pool_ctor(void *arg, void *object, int flags)
1.1       deraadt  4296: {
1.186     pk       4297:        struct pmap *pm = object;
1.152     pk       4298:        u_long addr;
1.1       deraadt  4299:
1.326     cegger   4300:        memset(pm, 0, sizeof *pm);
1.1       deraadt  4301:
1.152     pk       4302:        /*
                   4303:         * `pmap_pool' entries include space for the per-CPU
                   4304:         * region table pointer arrays.
                   4305:         */
                   4306:        addr = (u_long)pm + ALIGN(sizeof(struct pmap));
1.186     pk       4307:        pm->pm_regmap = (void *)addr;
                   4308:        addr += ALIGN(NUREG * sizeof(struct regmap));
1.152     pk       4309:        pm->pm_reg_ptps = (int **)addr;
1.302     briggs   4310:        addr += sparc_ncpus * sizeof(int *);
1.152     pk       4311:        pm->pm_reg_ptps_pa = (int *)addr;
                   4312:
1.311     christos 4313:        qzero((void *)pm->pm_regmap, NUREG * sizeof(struct regmap));
1.13      pk       4314:
1.213     pk       4315:        /* pm->pm_ctx = NULL; // already done */
1.55      pk       4316:
1.296     chs      4317:        if (CPU_HAS_SUNMMU) {
1.55      pk       4318:                TAILQ_INIT(&pm->pm_seglist);
1.69      pk       4319: #if defined(SUN4_MMU3L)
1.55      pk       4320:                TAILQ_INIT(&pm->pm_reglist);
1.69      pk       4321:                if (HASSUN4_MMU3L) {
                   4322:                        int i;
                   4323:                        for (i = NUREG; --i >= 0;)
                   4324:                                pm->pm_regmap[i].rg_smeg = reginval;
                   4325:                }
1.43      pk       4326: #endif
1.55      pk       4327:        }
1.210     thorpej  4328: #if defined(SUN4M) || defined(SUN4D)
1.55      pk       4329:        else {
1.152     pk       4330:                int i, n;
1.79      pk       4331:
1.55      pk       4332:                /*
                   4333:                 * We must allocate and initialize hardware-readable (MMU)
                   4334:                 * pagetables. We must also map the kernel regions into this
                   4335:                 * pmap's pagetables, so that we can access the kernel from
1.89      pk       4336:                 * this user context.
1.55      pk       4337:                 */
1.152     pk       4338: #if defined(MULTIPROCESSOR)
1.302     briggs   4339:                for (n = 0; n < sparc_ncpus; n++)
1.152     pk       4340: #else
                   4341:                n = 0;
                   4342: #endif
                   4343:                {
                   4344:                        int *upt, *kpt;
                   4345:
1.339     mrg      4346: #if defined(MULTIPROCESSOR)
1.338     mrg      4347:                        /* Did this cpu attach? */
                   4348:                        if (pmap_kernel()->pm_reg_ptps[n] == 0)
                   4349:                                continue;
1.339     mrg      4350: #endif
1.338     mrg      4351:
1.186     pk       4352:                        upt = pool_get(&L1_pool, flags);
1.152     pk       4353:                        pm->pm_reg_ptps[n] = upt;
                   4354:                        pm->pm_reg_ptps_pa[n] = VA2PA((char *)upt);
                   4355:
                   4356:                        /* Invalidate user space regions */
                   4357:                        for (i = 0; i < NUREG; i++)
                   4358:                                setpgt4m(upt++, SRMMU_TEINVALID);
                   4359:
                   4360:                        /* Copy kernel regions */
                   4361:                        kpt = &pmap_kernel()->pm_reg_ptps[n][VA_VREG(KERNBASE)];
1.217     pk       4362:                        for (i = 0; i < NKREG; i++)
                   4363:                                setpgt4m(upt++, kpt[i]);
1.79      pk       4364:                }
1.55      pk       4365:        }
1.232     pk       4366: #endif /* SUN4M || SUN4D */
1.193     mrg      4367:
1.278     pk       4368:        /* XXX - a peculiar place to do this, but we can't do it in pmap_init
                   4369:         * and here at least it's off the beaten code track.
                   4370:         */
                   4371: {static int x; if (x == 0) pool_setlowat(&pv_pool, 512), x = 1; }
                   4372:
1.186     pk       4373:        return (0);
1.1       deraadt  4374: }
                   4375:
                   4376: void
1.186     pk       4377: pmap_pmap_pool_dtor(void *arg, void *object)
1.1       deraadt  4378: {
1.186     pk       4379:        struct pmap *pm = object;
1.124     pk       4380:        union ctxinfo *c;
1.175     thorpej  4381:        int s = splvm();        /* paranoia */
1.1       deraadt  4382:
                   4383: #ifdef DEBUG
                   4384:        if (pmapdebug & PDB_DESTROY)
1.186     pk       4385:                printf("pmap_pmap_pool_dtor(%p)\n", pm);
1.1       deraadt  4386: #endif
1.55      pk       4387:
1.186     pk       4388:        if ((c = pm->pm_ctx) != NULL) {
                   4389:                ctx_free(pm);
1.1       deraadt  4390:        }
1.102     pk       4391:
1.210     thorpej  4392: #if defined(SUN4M) || defined(SUN4D)
                   4393:        if (CPU_HAS_SRMMU) {
1.152     pk       4394:                int n;
1.195     mrg      4395:
1.152     pk       4396: #if defined(MULTIPROCESSOR)
1.302     briggs   4397:                for (n = 0; n < sparc_ncpus; n++)
1.152     pk       4398: #else
                   4399:                n = 0;
                   4400: #endif
                   4401:                {
1.338     mrg      4402:                        int *pt;
                   4403:
1.339     mrg      4404: #if defined(MULTIPROCESSOR)
1.338     mrg      4405:                        /* Did this cpu attach? */
                   4406:                        if (pmap_kernel()->pm_reg_ptps[n] == 0)
                   4407:                                continue;
1.339     mrg      4408: #endif
1.338     mrg      4409:
                   4410:                        pt = pm->pm_reg_ptps[n];
1.152     pk       4411:                        pm->pm_reg_ptps[n] = NULL;
                   4412:                        pm->pm_reg_ptps_pa[n] = 0;
1.157     pk       4413:                        pool_put(&L1_pool, pt);
1.152     pk       4414:                }
1.102     pk       4415:        }
1.232     pk       4416: #endif /* SUN4M || SUN4D */
1.1       deraadt  4417:        splx(s);
1.186     pk       4418: }
1.55      pk       4419:
1.186     pk       4420: /*
                   4421:  * Create and return a physical map.
                   4422:  */
                   4423: struct pmap *
1.303     uwe      4424: pmap_create(void)
1.186     pk       4425: {
                   4426:        struct pmap *pm;
                   4427:
1.320     ad       4428:        pm = pool_cache_get(&pmap_cache, PR_WAITOK);
1.229     pk       4429:
                   4430:        /*
                   4431:         * Reset fields that are not preserved in the pmap cache pool.
                   4432:         */
1.186     pk       4433:        pm->pm_refcount = 1;
1.229     pk       4434: #if defined(MULTIPROCESSOR)
                   4435:        /* reset active CPU set */
                   4436:        pm->pm_cpuset = 0;
                   4437: #endif
1.296     chs      4438:        if (CPU_HAS_SUNMMU) {
1.229     pk       4439:                /* reset the region gap */
                   4440:                pm->pm_gap_start = 0;
                   4441:                pm->pm_gap_end = VA_VREG(VM_MAXUSER_ADDRESS);
                   4442:        }
                   4443:
1.43      pk       4444: #ifdef DEBUG
1.186     pk       4445:        if (pmapdebug & PDB_CREATE)
1.221     pk       4446:                printf("pmap_create[%d]: created %p\n", cpu_number(), pm);
1.186     pk       4447:        pmap_quiet_check(pm);
                   4448: #endif
                   4449:        return (pm);
1.43      pk       4450: }
1.186     pk       4451:
                   4452: /*
                   4453:  * Retire the given pmap from service.
                   4454:  * Should only be called if the map contains no valid mappings.
                   4455:  */
                   4456: void
1.303     uwe      4457: pmap_destroy(struct pmap *pm)
1.186     pk       4458: {
                   4459:
                   4460: #ifdef DEBUG
                   4461:        if (pmapdebug & PDB_DESTROY)
1.221     pk       4462:                printf("pmap_destroy[%d](%p)\n", cpu_number(), pm);
1.186     pk       4463: #endif
1.322     ad       4464:        if (atomic_dec_uint_nv(&pm->pm_refcount) == 0) {
1.186     pk       4465: #ifdef DEBUG
                   4466:                pmap_quiet_check(pm);
                   4467: #endif
1.320     ad       4468:                pool_cache_put(&pmap_cache, pm);
1.186     pk       4469:        }
1.1       deraadt  4470: }
                   4471:
                   4472: /*
                   4473:  * Add a reference to the given pmap.
                   4474:  */
                   4475: void
1.303     uwe      4476: pmap_reference(struct pmap *pm)
1.1       deraadt  4477: {
1.322     ad       4478:
                   4479:        atomic_inc_uint(&pm->pm_refcount);
1.242     pk       4480: }
                   4481:
1.265     pk       4482: #if defined(SUN4) || defined(SUN4C)
1.242     pk       4483: /*
1.265     pk       4484:  * helper to deallocate level 2 & 3 page tables.
1.242     pk       4485:  */
1.303     uwe      4486: static void
                   4487: pgt_lvl23_remove4_4c(struct pmap *pm, struct regmap *rp, struct segmap *sp,
                   4488:                     int vr, int vs)
1.242     pk       4489: {
1.265     pk       4490:        vaddr_t va, tva;
                   4491:        int i, pmeg;
                   4492:
                   4493:        va = VSTOVA(vr,vs);
                   4494:        if ((pmeg = sp->sg_pmeg) != seginval) {
                   4495:                if (CTX_USABLE(pm,rp)) {
                   4496:                        setcontext4(pm->pm_ctxnum);
                   4497:                        setsegmap(va, seginval);
                   4498:                } else {
                   4499:                        /* no context, use context 0 */
                   4500:                        setcontext4(0);
                   4501:                        if (HASSUN4_MMU3L && rp->rg_smeg != reginval) {
                   4502:                                setregmap(0, rp->rg_smeg);
                   4503:                                tva = vs << SGSHIFT;
                   4504:                                setsegmap(tva, seginval);
                   4505:                        }
                   4506:                }
                   4507:                if (!HASSUN4_MMU3L) {
                   4508:                        if (pm == pmap_kernel()) {
                   4509:                                /* Unmap segment from all contexts */
                   4510:                                for (i = ncontext; --i >= 0;) {
                   4511:                                        setcontext4(i);
                   4512:                                        setsegmap(va, seginval);
                   4513:                                }
                   4514:                        }
                   4515:                }
                   4516:                me_free(pm, pmeg);
                   4517:                sp->sg_pmeg = seginval;
                   4518:        }
                   4519:        /* Free software tables for non-kernel maps */
                   4520:        if (pm != pmap_kernel()) {
                   4521:                pool_put(&pte_pool, sp->sg_pte);
                   4522:                sp->sg_pte = NULL;
                   4523:        }
1.251     pk       4524:
1.265     pk       4525:        if (rp->rg_nsegmap <= 0)
                   4526:                panic("pgt_rm: pm %p: nsegmap = %d\n", pm, rp->rg_nsegmap);
1.242     pk       4527:
1.265     pk       4528:        if (--rp->rg_nsegmap == 0) {
1.267     pk       4529: #if defined(SUN4_MMU3L)
1.265     pk       4530:                if (HASSUN4_MMU3L) {
                   4531:                        if (rp->rg_smeg != reginval) {
                   4532:                                if (pm == pmap_kernel()) {
                   4533:                                        /* Unmap from all contexts */
                   4534:                                        for (i = ncontext; --i >= 0;) {
                   4535:                                                setcontext4(i);
                   4536:                                                setregmap(va, reginval);
                   4537:                                        }
                   4538:                                } else if (pm->pm_ctx) {
                   4539:                                        setcontext4(pm->pm_ctxnum);
                   4540:                                        setregmap(va, reginval);
                   4541:                                }
                   4542:
                   4543:                                /* Release MMU resource */
                   4544:                                region_free(pm, rp->rg_smeg);
                   4545:                                rp->rg_smeg = reginval;
                   4546:                        }
                   4547:                }
1.267     pk       4548: #endif /* SUN4_MMU3L */
1.265     pk       4549:                /* Free software tables for non-kernel maps */
                   4550:                if (pm != pmap_kernel()) {
                   4551:                        GAP_WIDEN(pm,vr);
                   4552:                        pool_put(&segmap_pool, rp->rg_segmap);
                   4553:                        rp->rg_segmap = NULL;
                   4554:                }
                   4555:        }
                   4556: }
                   4557: #endif /* SUN4 || SUN4C */
                   4558:
                   4559: #if defined(SUN4M) || defined(SUN4D)
                   4560: /*
                   4561:  * SRMMU helper to deallocate level 2 & 3 page tables.
                   4562:  */
1.296     chs      4563: static void
                   4564: pgt_lvl23_remove4m(struct pmap *pm, struct regmap *rp, struct segmap *sp,
                   4565:     int vr, int vs)
1.265     pk       4566: {
                   4567:
                   4568:        /* Invalidate level 2 PTP entry */
                   4569:        if (pm->pm_ctx)
                   4570:                tlb_flush_segment(VSTOVA(vr,vs), pm->pm_ctxnum,
                   4571:                                  PMAP_CPUSET(pm));
                   4572:        setpgt4m(&rp->rg_seg_ptps[vs], SRMMU_TEINVALID);
                   4573:        pool_put(&L23_pool, sp->sg_pte);
                   4574:        sp->sg_pte = NULL;
                   4575:
                   4576:        /* If region is now empty, remove level 2 pagetable as well */
1.242     pk       4577:        if (--rp->rg_nsegmap == 0) {
                   4578:                int n = 0;
1.251     pk       4579:                if (pm->pm_ctx)
                   4580:                        tlb_flush_region(VRTOVA(vr), pm->pm_ctxnum,
                   4581:                                         PMAP_CPUSET(pm));
1.344     mrg      4582: #if defined(MULTIPROCESSOR)
1.242     pk       4583:                /* Invalidate level 1 PTP entries on all CPUs */
1.316     mrg      4584:                for (; n < sparc_ncpus; n++) {
1.327     mrg      4585:                        if ((cpus[n]->flags & CPUFLG_HATCHED) == 0)
1.316     mrg      4586:                                continue;
1.242     pk       4587: #endif
                   4588:                        setpgt4m(&pm->pm_reg_ptps[n][vr], SRMMU_TEINVALID);
1.344     mrg      4589: #if defined(MULTIPROCESSOR)
1.316     mrg      4590:                }
                   4591: #endif
1.242     pk       4592:
1.265     pk       4593:                pool_put(&segmap_pool, rp->rg_segmap);
1.242     pk       4594:                rp->rg_segmap = NULL;
                   4595:                pool_put(&L23_pool, rp->rg_seg_ptps);
                   4596:        }
1.1       deraadt  4597: }
1.242     pk       4598: #endif /* SUN4M || SUN4D */
1.1       deraadt  4599:
1.296     chs      4600: void
                   4601: pmap_remove_all(struct pmap *pm)
1.264     pk       4602: {
                   4603:        if (pm->pm_ctx == NULL)
                   4604:                return;
                   4605:
                   4606: #if defined(SUN4) || defined(SUN4C)
1.296     chs      4607:        if (CPU_HAS_SUNMMU) {
1.264     pk       4608:                int ctx = getcontext4();
                   4609:                setcontext4(pm->pm_ctxnum);
                   4610:                cache_flush_context(pm->pm_ctxnum);
                   4611:                setcontext4(ctx);
                   4612:        }
                   4613: #endif
                   4614:
                   4615: #if defined(SUN4M) || defined(SUN4D)
                   4616:        if (CPU_HAS_SRMMU) {
                   4617:                cache_flush_context(pm->pm_ctxnum);
                   4618:        }
                   4619: #endif
                   4620:
                   4621:        pm->pm_flags |= PMAP_USERCACHECLEAN;
                   4622: }
                   4623:
1.1       deraadt  4624: /*
                   4625:  * Remove the given range of mapping entries.
                   4626:  * The starting and ending addresses are already rounded to pages.
                   4627:  * Sheer lunacy: pmap_remove is often asked to remove nonexistent
                   4628:  * mappings.
                   4629:  */
                   4630: void
1.303     uwe      4631: pmap_remove(struct pmap *pm, vaddr_t va, vaddr_t endva)
1.1       deraadt  4632: {
1.124     pk       4633:        vaddr_t nva;
                   4634:        int vr, vs, s, ctx;
                   4635:        void (*rm)(struct pmap *, vaddr_t, vaddr_t, int, int);
1.1       deraadt  4636:
                   4637: #ifdef DEBUG
                   4638:        if (pmapdebug & PDB_REMOVE)
1.221     pk       4639:                printf("pmap_remove[%d](%p, 0x%lx, 0x%lx)\n",
                   4640:                        cpu_number(), pm, va, endva);
1.1       deraadt  4641: #endif
                   4642:
1.258     pk       4643:        if (!CPU_HAS_SRMMU)
                   4644:                write_user_windows();
                   4645:
1.42      mycroft  4646:        if (pm == pmap_kernel()) {
1.1       deraadt  4647:                /*
                   4648:                 * Removing from kernel address space.
                   4649:                 */
                   4650:                rm = pmap_rmk;
                   4651:        } else {
                   4652:                /*
                   4653:                 * Removing from user address space.
                   4654:                 */
                   4655:                rm = pmap_rmu;
                   4656:        }
                   4657:
                   4658:        ctx = getcontext();
1.175     thorpej  4659:        s = splvm();            /* XXX conservative */
1.322     ad       4660:        PMAP_LOCK();
1.1       deraadt  4661:        for (; va < endva; va = nva) {
                   4662:                /* do one virtual segment at a time */
1.43      pk       4663:                vr = VA_VREG(va);
                   4664:                vs = VA_VSEG(va);
                   4665:                nva = VSTOVA(vr, vs + 1);
1.1       deraadt  4666:                if (nva == 0 || nva > endva)
                   4667:                        nva = endva;
1.76      pk       4668:                if (pm->pm_regmap[vr].rg_nsegmap != 0)
                   4669:                        (*rm)(pm, va, nva, vr, vs);
1.1       deraadt  4670:        }
1.322     ad       4671:        PMAP_UNLOCK();
1.1       deraadt  4672:        splx(s);
                   4673:        setcontext(ctx);
                   4674: }
                   4675:
                   4676: /*
1.257     pk       4677:  * It is the same amount of work to cache_flush_page 16 pages
                   4678:  * as to cache_flush_segment 1 segment, assuming a 64K cache size
                   4679:  * and a 4K page size or a 128K cache size and 8K page size.
1.1       deraadt  4680:  */
1.257     pk       4681: #define        PMAP_SFL_THRESHOLD      16      /* if > magic, use cache_flush_segment */
1.1       deraadt  4682:
                   4683: /*
                   4684:  * Remove a range contained within a single segment.
                   4685:  * These are egregiously complicated routines.
                   4686:  */
                   4687:
1.55      pk       4688: #if defined(SUN4) || defined(SUN4C)
                   4689:
1.43      pk       4690: /* remove from kernel */
1.55      pk       4691: /*static*/ void
1.303     uwe      4692: pmap_rmk4_4c(struct pmap *pm, vaddr_t va, vaddr_t endva, int vr, int vs)
1.124     pk       4693: {
1.265     pk       4694:        int pte, mmupte, *ptep, perpage, npg;
1.236     pk       4695:        struct vm_page *pg;
1.265     pk       4696:        int nleft, pmeg, inmmu;
1.43      pk       4697:        struct regmap *rp;
                   4698:        struct segmap *sp;
                   4699:
                   4700:        rp = &pm->pm_regmap[vr];
                   4701:        sp = &rp->rg_segmap[vs];
                   4702:
                   4703:        if (rp->rg_nsegmap == 0)
                   4704:                return;
                   4705:        if ((nleft = sp->sg_npte) == 0)
                   4706:                return;
                   4707:        pmeg = sp->sg_pmeg;
1.265     pk       4708:        inmmu = pmeg != seginval;
1.267     pk       4709:        ptep = &sp->sg_pte[VA_VPG(va)];
1.265     pk       4710:
1.1       deraadt  4711:        /* decide how to flush cache */
                   4712:        npg = (endva - va) >> PGSHIFT;
1.265     pk       4713:        if (!inmmu) {
                   4714:                perpage = 0;
                   4715:        } else if (npg > PMAP_SFL_THRESHOLD) {
1.1       deraadt  4716:                /* flush the whole segment */
                   4717:                perpage = 0;
1.214     pk       4718:                cache_flush_segment(vr, vs, 0);
1.1       deraadt  4719:        } else {
                   4720:                /* flush each page individually; some never need flushing */
1.69      pk       4721:                perpage = (CACHEINFO.c_vactype != VAC_NONE);
1.1       deraadt  4722:        }
1.265     pk       4723:
                   4724:        for (; va < endva; va += NBPG, ptep++) {
                   4725:                pte = *ptep;
1.274     chs      4726:                mmupte = inmmu ? getpte4(va) : 0;
1.265     pk       4727:                if ((pte & PG_V) == 0) {
                   4728: #ifdef DIAGNOSTIC
                   4729:                        if (inmmu && (mmupte & PG_V) != 0)
                   4730:                                printf("rmk: inconsistent ptes va=%lx\n", va);
                   4731: #endif
1.1       deraadt  4732:                        continue;
                   4733:                }
1.265     pk       4734:                if ((pte & PG_TYPE) == PG_OBMEM) {
1.35      pk       4735:                        /* if cacheable, flush page as needed */
1.265     pk       4736:                        if (perpage && (mmupte & PG_NC) == 0)
1.214     pk       4737:                                cache_flush_page(va, 0);
1.265     pk       4738:                        if ((pg = pvhead4_4c(pte)) != NULL) {
                   4739:                                if (inmmu)
                   4740:                                        VM_MDPAGE_PVHEAD(pg)->pv_flags |= MR4_4C(mmupte);
1.236     pk       4741:                                pv_unlink4_4c(pg, pm, va);
1.1       deraadt  4742:                        }
                   4743:                }
                   4744:                nleft--;
1.131     pk       4745: #ifdef DIAGNOSTIC
                   4746:                if (nleft < 0)
                   4747:                        panic("pmap_rmk: too many PTEs in segment; "
                   4748:                              "va 0x%lx; endva 0x%lx", va, endva);
                   4749: #endif
1.296     chs      4750:                if (pte & PG_WIRED) {
1.267     pk       4751:                        sp->sg_nwired--;
1.296     chs      4752:                        pm->pm_stats.wired_count--;
                   4753:                }
1.267     pk       4754:
1.265     pk       4755:                if (inmmu)
                   4756:                        setpte4(va, 0);
                   4757:                *ptep = 0;
1.277     pk       4758:                pm->pm_stats.resident_count--;
1.1       deraadt  4759:        }
                   4760:
1.267     pk       4761: #ifdef DIAGNOSTIC
                   4762:        if (sp->sg_nwired > nleft || sp->sg_nwired < 0)
                   4763:                panic("pmap_rmk: pm %p, va %lx: nleft=%d, nwired=%d",
                   4764:                        pm, va, nleft, sp->sg_nwired);
                   4765: #endif
1.265     pk       4766:        if ((sp->sg_npte = nleft) == 0)
                   4767:                pgt_lvl23_remove4_4c(pm, rp, sp, vr, vs);
1.267     pk       4768:        else if (sp->sg_nwired == 0) {
                   4769:                if (sp->sg_pmeg != seginval)
                   4770:                        mmu_pmeg_unlock(sp->sg_pmeg);
                   4771:        }
1.1       deraadt  4772: }
                   4773:
1.236     pk       4774: #endif /* SUN4 || SUN4C */
1.1       deraadt  4775:
1.210     thorpej  4776: #if defined(SUN4M) || defined(SUN4D)   /* SRMMU version of pmap_rmk */
1.303     uwe      4777:
1.55      pk       4778: /* remove from kernel (4m)*/
1.303     uwe      4779: /* pm is already locked */
1.55      pk       4780: /*static*/ void
1.303     uwe      4781: pmap_rmk4m(struct pmap *pm, vaddr_t va, vaddr_t endva, int vr, int vs)
1.124     pk       4782: {
1.181     pk       4783:        int tpte, perpage, npg;
1.236     pk       4784:        struct vm_page *pg;
1.43      pk       4785:        struct regmap *rp;
                   4786:        struct segmap *sp;
                   4787:
                   4788:        rp = &pm->pm_regmap[vr];
1.55      pk       4789:        sp = &rp->rg_segmap[vs];
1.43      pk       4790:        if (rp->rg_nsegmap == 0)
                   4791:                return;
1.281     pk       4792:
1.55      pk       4793:        /* decide how to flush cache */
                   4794:        npg = (endva - va) >> PGSHIFT;
1.257     pk       4795:        if (npg > PMAP_SFL_THRESHOLD) {
1.55      pk       4796:                /* flush the whole segment */
                   4797:                perpage = 0;
1.69      pk       4798:                if (CACHEINFO.c_vactype != VAC_NONE)
1.214     pk       4799:                        cache_flush_segment(vr, vs, 0);
1.55      pk       4800:        } else {
                   4801:                /* flush each page individually; some never need flushing */
1.69      pk       4802:                perpage = (CACHEINFO.c_vactype != VAC_NONE);
1.55      pk       4803:        }
                   4804:        while (va < endva) {
1.72      pk       4805:                tpte = sp->sg_pte[VA_SUN4M_VPG(va)];
1.55      pk       4806:                if ((tpte & SRMMU_TETYPE) != SRMMU_TEPTE) {
1.72      pk       4807: #ifdef DEBUG
                   4808:                        if ((pmapdebug & PDB_SANITYCHK) &&
                   4809:                            (getpte4m(va) & SRMMU_TETYPE) == SRMMU_TEPTE)
1.91      fair     4810:                                panic("pmap_rmk: Spurious kTLB entry for 0x%lx",
1.81      pk       4811:                                      va);
1.72      pk       4812: #endif
1.61      pk       4813:                        va += NBPG;
1.55      pk       4814:                        continue;
                   4815:                }
                   4816:                if ((tpte & SRMMU_PGTYPE) == PG_SUN4M_OBMEM) {
                   4817:                        /* if cacheable, flush page as needed */
                   4818:                        if (perpage && (tpte & SRMMU_PG_C))
1.214     pk       4819:                                cache_flush_page(va, 0);
1.236     pk       4820:                        if ((pg = pvhead4m(tpte)) != NULL) {
                   4821:                                VM_MDPAGE_PVHEAD(pg)->pv_flags |= MR4M(tpte);
                   4822:                                pv_unlink4m(pg, pm, va);
1.55      pk       4823:                        }
                   4824:                }
1.196     mrg      4825:                setpgt4m_va(va, &sp->sg_pte[VA_SUN4M_VPG(va)],
1.226     mrg      4826:                    SRMMU_TEINVALID, 1, 0, CPUSET_ALL);
1.277     pk       4827:                pm->pm_stats.resident_count--;
1.55      pk       4828:                va += NBPG;
                   4829:        }
                   4830: }
1.210     thorpej  4831: #endif /* SUN4M || SUN4D */
1.161     pk       4832:
1.55      pk       4833: #if defined(SUN4) || defined(SUN4C)
                   4834:
                   4835: /* remove from user */
                   4836: /*static*/ void
1.303     uwe      4837: pmap_rmu4_4c(struct pmap *pm, vaddr_t va, vaddr_t endva, int vr, int vs)
1.124     pk       4838: {
1.267     pk       4839:        int *ptep, pteva, pte, perpage, npg;
1.236     pk       4840:        struct vm_page *pg;
1.265     pk       4841:        int nleft, pmeg, inmmu;
1.55      pk       4842:        struct regmap *rp;
                   4843:        struct segmap *sp;
                   4844:
                   4845:        rp = &pm->pm_regmap[vr];
                   4846:        if (rp->rg_nsegmap == 0)
                   4847:                return;
                   4848:        sp = &rp->rg_segmap[vs];
                   4849:        if ((nleft = sp->sg_npte) == 0)
                   4850:                return;
                   4851:        pmeg = sp->sg_pmeg;
1.265     pk       4852:        inmmu = pmeg != seginval;
1.1       deraadt  4853:
                   4854:        /*
                   4855:         * PTEs are in MMU.  Invalidate in hardware, update ref &
                   4856:         * mod bits, and flush cache if required.
                   4857:         */
1.265     pk       4858:        if (!inmmu) {
                   4859:                perpage = 0;
1.274     chs      4860:                pteva = 0;
1.265     pk       4861:        } else if (CTX_USABLE(pm,rp)) {
1.1       deraadt  4862:                /* process has a context, must flush cache */
                   4863:                npg = (endva - va) >> PGSHIFT;
1.71      pk       4864:                setcontext4(pm->pm_ctxnum);
1.264     pk       4865:                if ((pm->pm_flags & PMAP_USERCACHECLEAN) != 0)
                   4866:                        perpage = 0;
                   4867:                else if (npg > PMAP_SFL_THRESHOLD) {
1.1       deraadt  4868:                        perpage = 0; /* flush the whole segment */
1.214     pk       4869:                        cache_flush_segment(vr, vs, pm->pm_ctxnum);
1.1       deraadt  4870:                } else
1.69      pk       4871:                        perpage = (CACHEINFO.c_vactype != VAC_NONE);
1.1       deraadt  4872:                pteva = va;
                   4873:        } else {
                   4874:                /* no context, use context 0; cache flush unnecessary */
1.71      pk       4875:                setcontext4(0);
1.69      pk       4876:                if (HASSUN4_MMU3L)
1.43      pk       4877:                        setregmap(0, tregion);
1.276     wiz      4878:                /* XXX use per-CPU pteva? */
1.1       deraadt  4879:                setsegmap(0, pmeg);
1.18      deraadt  4880:                pteva = VA_VPG(va) << PGSHIFT;
1.1       deraadt  4881:                perpage = 0;
                   4882:        }
1.265     pk       4883:
                   4884:        ptep = sp->sg_pte + VA_VPG(va);
                   4885:        for (; va < endva; ptep++, pteva += NBPG, va += NBPG) {
                   4886:                int mmupte;
1.267     pk       4887:                pte = *ptep;
1.274     chs      4888:                mmupte = inmmu ? getpte4(pteva) : 0;
1.265     pk       4889:
1.267     pk       4890:                if ((pte & PG_V) == 0) {
1.265     pk       4891: #ifdef DIAGNOSTIC
                   4892:                        if (inmmu && (mmupte & PG_V) != 0)
1.267     pk       4893:                                printf("pmap_rmu: pte=%x, mmupte=%x\n",
                   4894:                                        pte, getpte4(pteva));
1.265     pk       4895: #endif
1.1       deraadt  4896:                        continue;
1.265     pk       4897:                }
1.267     pk       4898:                if ((pte & PG_TYPE) == PG_OBMEM) {
1.35      pk       4899:                        /* if cacheable, flush page as needed */
1.265     pk       4900:                        if (perpage && (mmupte & PG_NC) == 0)
1.214     pk       4901:                                cache_flush_page(va, pm->pm_ctxnum);
1.267     pk       4902:                        if ((pg = pvhead4_4c(pte)) != NULL) {
1.265     pk       4903:                                if (inmmu)
                   4904:                                        VM_MDPAGE_PVHEAD(pg)->pv_flags |= MR4_4C(mmupte);
1.236     pk       4905:                                pv_unlink4_4c(pg, pm, va);
1.1       deraadt  4906:                        }
                   4907:                }
                   4908:                nleft--;
1.131     pk       4909: #ifdef DIAGNOSTIC
                   4910:                if (nleft < 0)
                   4911:                        panic("pmap_rmu: too many PTEs in segment; "
1.265     pk       4912:                             "va 0x%lx; endva 0x%lx", va, endva);
1.131     pk       4913: #endif
1.265     pk       4914:                if (inmmu)
                   4915:                        setpte4(pteva, 0);
1.267     pk       4916:
1.296     chs      4917:                if (pte & PG_WIRED) {
1.267     pk       4918:                        sp->sg_nwired--;
1.296     chs      4919:                        pm->pm_stats.wired_count--;
                   4920:                }
1.265     pk       4921:                *ptep = 0;
1.277     pk       4922:                pm->pm_stats.resident_count--;
1.1       deraadt  4923:        }
                   4924:
1.267     pk       4925: #ifdef DIAGNOSTIC
                   4926:        if (sp->sg_nwired > nleft || sp->sg_nwired < 0)
                   4927:                panic("pmap_rmu: pm %p, va %lx: nleft=%d, nwired=%d",
                   4928:                        pm, va, nleft, sp->sg_nwired);
                   4929: #endif
1.265     pk       4930:        if ((sp->sg_npte = nleft) == 0)
                   4931:                pgt_lvl23_remove4_4c(pm, rp, sp, vr, vs);
1.267     pk       4932:        else if (sp->sg_nwired == 0) {
                   4933:                if (sp->sg_pmeg != seginval)
                   4934:                        mmu_pmeg_unlock(sp->sg_pmeg);
                   4935:        }
1.1       deraadt  4936: }
                   4937:
1.232     pk       4938: #endif /* SUN4 || SUN4C */
1.55      pk       4939:
1.210     thorpej  4940: #if defined(SUN4M) || defined(SUN4D)   /* SRMMU version of pmap_rmu */
1.55      pk       4941: /* remove from user */
1.195     mrg      4942: /* Note: pm is already locked */
1.55      pk       4943: /*static*/ void
1.303     uwe      4944: pmap_rmu4m(struct pmap *pm, vaddr_t va, vaddr_t endva, int vr, int vs)
1.124     pk       4945: {
1.181     pk       4946:        int *pte0, perpage, npg;
1.236     pk       4947:        struct vm_page *pg;
1.124     pk       4948:        int nleft;
1.55      pk       4949:        struct regmap *rp;
                   4950:        struct segmap *sp;
                   4951:
                   4952:        rp = &pm->pm_regmap[vr];
                   4953:        if (rp->rg_nsegmap == 0)
                   4954:                return;
                   4955:        sp = &rp->rg_segmap[vs];
                   4956:        if ((nleft = sp->sg_npte) == 0)
                   4957:                return;
                   4958:        pte0 = sp->sg_pte;
                   4959:
                   4960:        /*
                   4961:         * Invalidate PTE in MMU pagetables. Flush cache if necessary.
                   4962:         */
1.264     pk       4963:        if (pm->pm_ctx && (pm->pm_flags & PMAP_USERCACHECLEAN) == 0) {
1.55      pk       4964:                /* process has a context, must flush cache */
1.69      pk       4965:                if (CACHEINFO.c_vactype != VAC_NONE) {
1.63      pk       4966:                        npg = (endva - va) >> PGSHIFT;
1.257     pk       4967:                        if (npg > PMAP_SFL_THRESHOLD) {
1.63      pk       4968:                                perpage = 0; /* flush the whole segment */
1.214     pk       4969:                                cache_flush_segment(vr, vs, pm->pm_ctxnum);
1.63      pk       4970:                        } else
                   4971:                                perpage = 1;
1.55      pk       4972:                } else
1.63      pk       4973:                        perpage = 0;
1.55      pk       4974:        } else {
                   4975:                /* no context; cache flush unnecessary */
                   4976:                perpage = 0;
                   4977:        }
1.63      pk       4978:        for (; va < endva; va += NBPG) {
1.100     pk       4979:                int tpte;
                   4980:
                   4981:                tpte = pte0[VA_SUN4M_VPG(va)];
1.72      pk       4982:
                   4983:                if ((tpte & SRMMU_TETYPE) != SRMMU_TEPTE) {
                   4984: #ifdef DEBUG
                   4985:                        if ((pmapdebug & PDB_SANITYCHK) &&
                   4986:                            pm->pm_ctx &&
                   4987:                            (getpte4m(va) & SRMMU_TEPTE) == SRMMU_TEPTE)
1.91      fair     4988:                                panic("pmap_rmu: Spurious uTLB entry for 0x%lx",
1.81      pk       4989:                                      va);
1.72      pk       4990: #endif
1.55      pk       4991:                        continue;
1.72      pk       4992:                }
                   4993:
1.55      pk       4994:                if ((tpte & SRMMU_PGTYPE) == PG_SUN4M_OBMEM) {
                   4995:                        /* if cacheable, flush page as needed */
                   4996:                        if (perpage && (tpte & SRMMU_PG_C))
1.214     pk       4997:                                cache_flush_page(va, pm->pm_ctxnum);
1.236     pk       4998:                        if ((pg = pvhead4m(tpte)) != NULL) {
                   4999:                                VM_MDPAGE_PVHEAD(pg)->pv_flags |= MR4M(tpte);
                   5000:                                pv_unlink4m(pg, pm, va);
1.55      pk       5001:                        }
                   5002:                }
                   5003:                nleft--;
1.131     pk       5004: #ifdef DIAGNOSTIC
                   5005:                if (nleft < 0)
                   5006:                        panic("pmap_rmu: too many PTEs in segment; "
                   5007:                              "va 0x%lx; endva 0x%lx", va, endva);
                   5008: #endif
1.196     mrg      5009:                setpgt4m_va(va, &pte0[VA_SUN4M_VPG(va)], SRMMU_TEINVALID,
1.226     mrg      5010:                    pm->pm_ctx != NULL, pm->pm_ctxnum, PMAP_CPUSET(pm));
1.277     pk       5011:                pm->pm_stats.resident_count--;
1.296     chs      5012:                if (sp->sg_wiremap & (1 << VA_SUN4M_VPG(va))) {
                   5013:                        sp->sg_wiremap &= ~(1 << VA_SUN4M_VPG(va));
                   5014:                        pm->pm_stats.wired_count--;
                   5015:                }
1.55      pk       5016:        }
                   5017:
                   5018:        /*
                   5019:         * If the segment is all gone, and the context is loaded, give
                   5020:         * the segment back.
                   5021:         */
1.242     pk       5022:        if ((sp->sg_npte = nleft) == 0)
                   5023:                pgt_lvl23_remove4m(pm, rp, sp, vr, vs);
1.55      pk       5024: }
1.210     thorpej  5025: #endif /* SUN4M || SUN4D */
1.55      pk       5026:
1.1       deraadt  5027: /*
                   5028:  * Lower (make more strict) the protection on the specified
                   5029:  * physical page.
                   5030:  *
                   5031:  * There are only two cases: either the protection is going to 0
                   5032:  * (in which case we do the dirty work here), or it is going from
                   5033:  * to read-only (in which case pv_changepte does the trick).
                   5034:  */
1.55      pk       5035:
                   5036: #if defined(SUN4) || defined(SUN4C)
1.1       deraadt  5037: void
1.303     uwe      5038: pmap_page_protect4_4c(struct vm_page *pg, vm_prot_t prot)
1.1       deraadt  5039: {
1.236     pk       5040:        struct pvlist *pv, *npv;
1.124     pk       5041:        struct pmap *pm;
1.267     pk       5042:        vaddr_t va;
                   5043:        int vr, vs, pteva, pte, *ptep;
1.265     pk       5044:        int flags, nleft, s, ctx;
1.43      pk       5045:        struct regmap *rp;
                   5046:        struct segmap *sp;
1.1       deraadt  5047:
                   5048: #ifdef DEBUG
                   5049:        if ((pmapdebug & PDB_CHANGEPROT) ||
                   5050:            (pmapdebug & PDB_REMOVE && prot == VM_PROT_NONE))
1.236     pk       5051:                printf("pmap_page_protect(0x%lx, 0x%x)\n",
                   5052:                        VM_PAGE_TO_PHYS(pg), prot);
1.1       deraadt  5053: #endif
                   5054:        /*
1.236     pk       5055:         * Skip unmanaged pages, or operations that do not take
                   5056:         * away write permission.
1.1       deraadt  5057:         */
1.236     pk       5058:        if (prot & VM_PROT_WRITE)
1.1       deraadt  5059:                return;
1.162     pk       5060:
1.1       deraadt  5061:        write_user_windows();   /* paranoia */
                   5062:        if (prot & VM_PROT_READ) {
1.236     pk       5063:                pv_changepte4_4c(pg, 0, PG_W);
1.1       deraadt  5064:                return;
                   5065:        }
                   5066:
                   5067:        /*
                   5068:         * Remove all access to all people talking to this page.
                   5069:         * Walk down PV list, removing all mappings.
                   5070:         * The logic is much like that for pmap_remove,
                   5071:         * but we know we are removing exactly one page.
                   5072:         */
1.175     thorpej  5073:        s = splvm();
1.236     pk       5074:        pv = VM_MDPAGE_PVHEAD(pg);
1.162     pk       5075:        if (pv->pv_pmap == NULL) {
1.1       deraadt  5076:                splx(s);
                   5077:                return;
                   5078:        }
1.71      pk       5079:        ctx = getcontext4();
1.162     pk       5080:
                   5081:        /* This pv head will become empty, so clear caching state flags */
                   5082:        flags = pv->pv_flags & ~(PV_NC|PV_ANC);
                   5083:
                   5084:        while (pv != NULL) {
                   5085:                pm = pv->pv_pmap;
1.1       deraadt  5086:                va = pv->pv_va;
1.43      pk       5087:                vr = VA_VREG(va);
                   5088:                vs = VA_VSEG(va);
                   5089:                rp = &pm->pm_regmap[vr];
                   5090:                sp = &rp->rg_segmap[vs];
1.241     pk       5091:                if ((nleft = sp->sg_npte) <= 0)
                   5092:                        panic("pmap_page_protect: empty vseg");
1.162     pk       5093:                sp->sg_npte = --nleft;
1.267     pk       5094:                ptep = &sp->sg_pte[VA_VPG(va)];
1.43      pk       5095:
1.296     chs      5096:                if (*ptep & PG_WIRED) {
1.267     pk       5097:                        sp->sg_nwired--;
1.296     chs      5098:                        pm->pm_stats.wired_count--;
                   5099:                }
1.267     pk       5100:
                   5101:                if (sp->sg_pmeg != seginval) {
                   5102:                        /* Update PV flags */
                   5103:                        if (CTX_USABLE(pm,rp)) {
                   5104:                                setcontext4(pm->pm_ctxnum);
                   5105:                                pteva = va;
                   5106:                                cache_flush_page(va, pm->pm_ctxnum);
                   5107:                        } else {
                   5108:                                setcontext4(0);
1.276     wiz      5109:                                /* XXX use per-CPU pteva? */
1.267     pk       5110:                                if (HASSUN4_MMU3L)
                   5111:                                        setregmap(0, tregion);
                   5112:                                setsegmap(0, sp->sg_pmeg);
                   5113:                                pteva = VA_VPG(va) << PGSHIFT;
                   5114:                        }
1.84      pk       5115:
1.267     pk       5116:                        pte = getpte4(pteva);
                   5117: #ifdef DIAGNOSTIC
                   5118:                        if ((pte & PG_V) == 0)
1.268     pk       5119:                                panic("pmap_page_protect !PG_V: pg %p "
1.267     pk       5120:                                      "ctx %d, va 0x%lx, pte 0x%x",
1.268     pk       5121:                                      pg, pm->pm_ctxnum, va, pte);
1.267     pk       5122: #endif
                   5123:                        flags |= MR4_4C(pte);
1.43      pk       5124:
1.267     pk       5125:                        setpte4(pteva, 0);
                   5126: #ifdef DIAGNOSTIC
                   5127:                        if (sp->sg_nwired > nleft || sp->sg_nwired < 0)
                   5128:                                panic("pmap_page_protect: pm %p, va %lx: nleft=%d, nwired=%d",
                   5129:                                        pm, va, nleft, sp->sg_nwired);
1.194     chs      5130: #endif
1.267     pk       5131:                        if (sp->sg_nwired == 0)
                   5132:                                mmu_pmeg_unlock(sp->sg_pmeg);
                   5133:                }
1.43      pk       5134:
1.267     pk       5135:                *ptep = 0;
1.277     pk       5136:                pm->pm_stats.resident_count--;
1.265     pk       5137:                if (nleft == 0)
                   5138:                        pgt_lvl23_remove4_4c(pm, rp, sp, vr, vs);
1.1       deraadt  5139:                npv = pv->pv_next;
1.236     pk       5140:                if (pv != VM_MDPAGE_PVHEAD(pg))
1.122     pk       5141:                        pool_put(&pv_pool, pv);
1.162     pk       5142:                pv = npv;
1.1       deraadt  5143:        }
1.162     pk       5144:
                   5145:        /* Finally, update pv head */
1.236     pk       5146:        VM_MDPAGE_PVHEAD(pg)->pv_pmap = NULL;
                   5147:        VM_MDPAGE_PVHEAD(pg)->pv_next = NULL;
                   5148:        VM_MDPAGE_PVHEAD(pg)->pv_flags = flags;
1.71      pk       5149:        setcontext4(ctx);
1.1       deraadt  5150:        splx(s);
                   5151: }
                   5152:
                   5153: /*
                   5154:  * Lower (make more strict) the protection on the specified
                   5155:  * range of this pmap.
                   5156:  *
                   5157:  * There are only two cases: either the protection is going to 0
                   5158:  * (in which case we call pmap_remove to do the dirty work), or
                   5159:  * it is going from read/write to read-only.  The latter is
                   5160:  * fairly easy.
                   5161:  */
                   5162: void
1.303     uwe      5163: pmap_protect4_4c(struct pmap *pm, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
1.1       deraadt  5164: {
1.124     pk       5165:        int va, nva, vr, vs;
                   5166:        int s, ctx;
1.43      pk       5167:        struct regmap *rp;
                   5168:        struct segmap *sp;
1.1       deraadt  5169:
                   5170:        if ((prot & VM_PROT_READ) == 0) {
                   5171:                pmap_remove(pm, sva, eva);
                   5172:                return;
                   5173:        }
                   5174:
                   5175:        write_user_windows();
1.71      pk       5176:        ctx = getcontext4();
1.175     thorpej  5177:        s = splvm();
1.322     ad       5178:        PMAP_LOCK();
1.1       deraadt  5179:        for (va = sva; va < eva;) {
1.43      pk       5180:                vr = VA_VREG(va);
                   5181:                vs = VA_VSEG(va);
                   5182:                rp = &pm->pm_regmap[vr];
                   5183:                nva = VSTOVA(vr,vs + 1);
1.1       deraadt  5184:                if (nva > eva)
                   5185:                        nva = eva;
1.43      pk       5186:                if (rp->rg_nsegmap == 0) {
1.1       deraadt  5187:                        va = nva;
                   5188:                        continue;
                   5189:                }
1.43      pk       5190: #ifdef DEBUG
                   5191:                if (rp->rg_segmap == NULL)
                   5192:                        panic("pmap_protect: no segments");
                   5193: #endif
                   5194:                sp = &rp->rg_segmap[vs];
                   5195:                if (sp->sg_npte == 0) {
                   5196:                        va = nva;
                   5197:                        continue;
                   5198:                }
                   5199: #ifdef DEBUG
1.265     pk       5200:                if (sp->sg_pte == NULL)
1.43      pk       5201:                        panic("pmap_protect: no pages");
                   5202: #endif
                   5203:                if (sp->sg_pmeg == seginval) {
1.267     pk       5204:                        int *ptep = &sp->sg_pte[VA_VPG(va)];
1.1       deraadt  5205:
                   5206:                        /* not in MMU; just clear PG_W from core copies */
                   5207:                        for (; va < nva; va += NBPG)
1.267     pk       5208:                                *ptep++ &= ~PG_W;
1.1       deraadt  5209:                } else {
                   5210:                        /* in MMU: take away write bits from MMU PTEs */
1.43      pk       5211:                        if (CTX_USABLE(pm,rp)) {
1.267     pk       5212:                                int pte;
1.1       deraadt  5213:
                   5214:                                /*
                   5215:                                 * Flush cache so that any existing cache
                   5216:                                 * tags are updated.  This is really only
                   5217:                                 * needed for PTEs that lose PG_W.
                   5218:                                 */
1.265     pk       5219:                                pmap_stats.ps_npg_prot_all +=
                   5220:                                        (nva - va) >> PGSHIFT;
1.71      pk       5221:                                setcontext4(pm->pm_ctxnum);
1.1       deraadt  5222:                                for (; va < nva; va += NBPG) {
1.267     pk       5223:                                        pte = getpte4(va);
                   5224:                                        if ((pte & (PG_W|PG_TYPE)) ==
1.35      pk       5225:                                            (PG_W|PG_OBMEM)) {
1.1       deraadt  5226:                                                pmap_stats.ps_npg_prot_actual++;
1.214     pk       5227:                                                cache_flush_page(va, pm->pm_ctxnum);
1.267     pk       5228:                                                setpte4(va, pte & ~PG_W);
1.1       deraadt  5229:                                        }
                   5230:                                }
                   5231:                        } else {
1.124     pk       5232:                                int pteva;
1.1       deraadt  5233:
                   5234:                                /*
                   5235:                                 * No context, hence not cached;
                   5236:                                 * just update PTEs.
                   5237:                                 */
1.71      pk       5238:                                setcontext4(0);
1.276     wiz      5239:                                /* XXX use per-CPU pteva? */
1.69      pk       5240:                                if (HASSUN4_MMU3L)
1.43      pk       5241:                                        setregmap(0, tregion);
                   5242:                                setsegmap(0, sp->sg_pmeg);
1.18      deraadt  5243:                                pteva = VA_VPG(va) << PGSHIFT;
1.1       deraadt  5244:                                for (; va < nva; pteva += NBPG, va += NBPG)
1.55      pk       5245:                                        setpte4(pteva, getpte4(pteva) & ~PG_W);
1.1       deraadt  5246:                        }
                   5247:                }
                   5248:        }
1.322     ad       5249:        PMAP_UNLOCK();
1.12      pk       5250:        splx(s);
1.71      pk       5251:        setcontext4(ctx);
1.1       deraadt  5252: }
                   5253:
                   5254: /*
                   5255:  * Change the protection and/or wired status of the given (MI) virtual page.
                   5256:  * XXX: should have separate function (or flag) telling whether only wiring
                   5257:  * is changing.
                   5258:  */
                   5259: void
1.303     uwe      5260: pmap_changeprot4_4c(struct pmap *pm, vaddr_t va, vm_prot_t prot, int flags)
1.1       deraadt  5261: {
1.267     pk       5262:        int vr, vs, newprot, ctx, pte, *ptep;
                   5263:        int pmeg;
1.43      pk       5264:        struct regmap *rp;
                   5265:        struct segmap *sp;
1.1       deraadt  5266:
                   5267: #ifdef DEBUG
                   5268:        if (pmapdebug & PDB_CHANGEPROT)
1.91      fair     5269:                printf("pmap_changeprot(%p, 0x%lx, 0x%x, 0x%x)\n",
1.248     pk       5270:                    pm, va, prot, flags);
1.1       deraadt  5271: #endif
                   5272:
1.42      mycroft  5273:        if (pm == pmap_kernel())
1.1       deraadt  5274:                newprot = prot & VM_PROT_WRITE ? PG_S|PG_W : PG_S;
                   5275:        else
                   5276:                newprot = prot & VM_PROT_WRITE ? PG_W : 0;
1.43      pk       5277:        vr = VA_VREG(va);
                   5278:        vs = VA_VSEG(va);
                   5279:        rp = &pm->pm_regmap[vr];
                   5280:        sp = &rp->rg_segmap[vs];
1.267     pk       5281:        ptep = &sp->sg_pte[VA_VPG(va)];
                   5282:
1.1       deraadt  5283:        pmap_stats.ps_changeprots++;
                   5284:
1.267     pk       5285:        pte = *ptep;
                   5286:        if (pte & PG_WIRED && (flags & PMAP_WIRED) == 0) {
                   5287:                pte &= ~PG_WIRED;
                   5288:                sp->sg_nwired--;
1.296     chs      5289:                pm->pm_stats.wired_count--;
1.267     pk       5290:        } else if ((pte & PG_WIRED) == 0 && flags & PMAP_WIRED) {
                   5291:                pte |= PG_WIRED;
                   5292:                sp->sg_nwired++;
1.296     chs      5293:                pm->pm_stats.wired_count++;
1.267     pk       5294:        }
                   5295:        pte = (pte & ~PG_PROT) | newprot;
                   5296:        /* Update S/W pte entry */
                   5297:        *ptep = pte;
                   5298:
1.1       deraadt  5299:        /* update PTEs in software or hardware */
1.267     pk       5300:        if ((pmeg = sp->sg_pmeg) != seginval) {
1.1       deraadt  5301:                /* update in hardware */
1.71      pk       5302:                ctx = getcontext4();
1.43      pk       5303:                if (CTX_USABLE(pm,rp)) {
1.88      pk       5304:                        /*
                   5305:                         * Use current context.
                   5306:                         * Flush cache if page has been referenced to
                   5307:                         * avoid stale protection bits in the cache tags.
                   5308:                         */
1.71      pk       5309:                        setcontext4(pm->pm_ctxnum);
1.267     pk       5310:                        pte = getpte4(va);
                   5311:                        if ((pte & (PG_U|PG_NC|PG_TYPE)) == (PG_U|PG_OBMEM))
1.214     pk       5312:                                cache_flush_page(va, pm->pm_ctxnum);
1.1       deraadt  5313:                } else {
1.71      pk       5314:                        setcontext4(0);
1.276     wiz      5315:                        /* XXX use per-CPU va? */
1.69      pk       5316:                        if (HASSUN4_MMU3L)
1.43      pk       5317:                                setregmap(0, tregion);
1.267     pk       5318:                        setsegmap(0, pmeg);
1.18      deraadt  5319:                        va = VA_VPG(va) << PGSHIFT;
1.267     pk       5320:                        pte = getpte4(va);
1.1       deraadt  5321:                }
1.267     pk       5322:                pte = (pte & ~PG_PROT) | newprot;
                   5323:                setpte4(va, pte);
1.71      pk       5324:                setcontext4(ctx);
1.267     pk       5325: #ifdef DIAGNOSTIC
                   5326:                if (sp->sg_nwired > sp->sg_npte || sp->sg_nwired < 0)
                   5327:                        panic("pmap_protect: pm %p, va %lx: nleft=%d, nwired=%d",
                   5328:                                pm, va, sp->sg_npte, sp->sg_nwired);
                   5329: #endif
                   5330:                if (sp->sg_nwired == 0)
                   5331:                        mmu_pmeg_unlock(pmeg);
                   5332:                else
                   5333:                        mmu_pmeg_lock(pmeg);
1.1       deraadt  5334:        }
                   5335: }
                   5336:
1.232     pk       5337: #endif /* SUN4 || SUN4C */
1.55      pk       5338:
1.265     pk       5339: #if defined(SUN4M) || defined(SUN4D)
1.1       deraadt  5340: /*
1.55      pk       5341:  * Lower (make more strict) the protection on the specified
                   5342:  * physical page.
1.1       deraadt  5343:  *
1.55      pk       5344:  * There are only two cases: either the protection is going to 0
                   5345:  * (in which case we do the dirty work here), or it is going
                   5346:  * to read-only (in which case pv_changepte does the trick).
1.1       deraadt  5347:  */
                   5348: void
1.303     uwe      5349: pmap_page_protect4m(struct vm_page *pg, vm_prot_t prot)
1.1       deraadt  5350: {
1.236     pk       5351:        struct pvlist *pv, *npv;
1.124     pk       5352:        struct pmap *pm;
1.268     pk       5353:        vaddr_t va;
                   5354:        int vr, vs, tpte;
1.217     pk       5355:        int flags, nleft, s;
1.55      pk       5356:        struct regmap *rp;
                   5357:        struct segmap *sp;
1.45      pk       5358:
                   5359: #ifdef DEBUG
1.55      pk       5360:        if ((pmapdebug & PDB_CHANGEPROT) ||
                   5361:            (pmapdebug & PDB_REMOVE && prot == VM_PROT_NONE))
1.221     pk       5362:                printf("pmap_page_protect[%d](0x%lx, 0x%x)\n",
1.236     pk       5363:                        cpu_number(), VM_PAGE_TO_PHYS(pg), prot);
1.45      pk       5364: #endif
1.243     pk       5365:        s = splvm();
1.322     ad       5366:        PMAP_LOCK();
1.243     pk       5367:
1.55      pk       5368:        if (prot & VM_PROT_READ) {
1.236     pk       5369:                pv_changepte4m(pg, 0, PPROT_WRITE);
1.243     pk       5370:                goto out;
1.45      pk       5371:        }
1.39      pk       5372:
1.1       deraadt  5373:        /*
1.55      pk       5374:         * Remove all access to all people talking to this page.
1.243     pk       5375:         * Walk down PV list, removing all mappings. The logic is much
                   5376:         * like that for pmap_remove, but we know we are removing exactly
                   5377:         * one page.
1.1       deraadt  5378:         */
1.236     pk       5379:        pv = VM_MDPAGE_PVHEAD(pg);
1.243     pk       5380:        if (pv->pv_pmap == NULL)
                   5381:                goto out;
1.162     pk       5382:
                   5383:        /* This pv head will become empty, so clear caching state flags */
1.182     pk       5384:        flags = pv->pv_flags & ~(PV_NC|PV_ANC);
1.162     pk       5385:        while (pv != NULL) {
                   5386:                pm = pv->pv_pmap;
1.55      pk       5387:                va = pv->pv_va;
                   5388:                vr = VA_VREG(va);
                   5389:                vs = VA_VSEG(va);
                   5390:                rp = &pm->pm_regmap[vr];
                   5391:                if (rp->rg_nsegmap == 0)
                   5392:                        panic("pmap_remove_all: empty vreg");
                   5393:                sp = &rp->rg_segmap[vs];
1.281     pk       5394:                nleft = sp->sg_npte;
                   5395:                if (pm != pmap_kernel()) {
                   5396:                        if (nleft <= 0)
                   5397:                                panic("pmap_page_protect: empty vseg");
                   5398:                        sp->sg_npte = --nleft;
                   5399:                }
1.1       deraadt  5400:
1.236     pk       5401:                /*
                   5402:                 * Invalidate PTE in MMU pagetables.
                   5403:                 * Flush cache if necessary.
                   5404:                 */
1.72      pk       5405:                if (pm->pm_ctx) {
1.214     pk       5406:                        cache_flush_page(va, pm->pm_ctxnum);
1.72      pk       5407:                }
                   5408:
                   5409:                tpte = sp->sg_pte[VA_SUN4M_VPG(va)];
1.196     mrg      5410:                setpgt4m_va(va, &sp->sg_pte[VA_SUN4M_VPG(va)], SRMMU_TEINVALID,
1.226     mrg      5411:                    pm->pm_ctx != NULL, pm->pm_ctxnum, PMAP_CPUSET(pm));
1.1       deraadt  5412:
1.277     pk       5413:                pm->pm_stats.resident_count--;
1.296     chs      5414:                if (sp->sg_wiremap & (1 << VA_SUN4M_VPG(va))) {
                   5415:                        sp->sg_wiremap &= ~(1 << VA_SUN4M_VPG(va));
                   5416:                        pm->pm_stats.wired_count--;
                   5417:                }
1.277     pk       5418:
1.55      pk       5419:                if ((tpte & SRMMU_TETYPE) != SRMMU_TEPTE)
1.268     pk       5420:                        panic("pmap_page_protect !PG_V: pg %p va %lx", pg, va);
1.72      pk       5421:
1.55      pk       5422:                flags |= MR4M(tpte);
1.43      pk       5423:
1.281     pk       5424:                if (pm != pmap_kernel() && nleft == 0)
1.162     pk       5425:                        /*
                   5426:                         * Entire user mode segment is gone
                   5427:                         */
1.242     pk       5428:                        pgt_lvl23_remove4m(pm, rp, sp, vr, vs);
1.83      pk       5429:
1.55      pk       5430:                npv = pv->pv_next;
1.236     pk       5431:                if (pv != VM_MDPAGE_PVHEAD(pg))
1.122     pk       5432:                        pool_put(&pv_pool, pv);
1.162     pk       5433:                pv = npv;
1.55      pk       5434:        }
1.162     pk       5435:
                   5436:        /* Finally, update pv head */
1.236     pk       5437:        VM_MDPAGE_PVHEAD(pg)->pv_pmap = NULL;
                   5438:        VM_MDPAGE_PVHEAD(pg)->pv_next = NULL;
                   5439:        VM_MDPAGE_PVHEAD(pg)->pv_flags = flags;
1.243     pk       5440:
                   5441: out:
1.322     ad       5442:        PMAP_UNLOCK();
1.55      pk       5443:        splx(s);
                   5444: }
                   5445:
                   5446: /*
                   5447:  * Lower (make more strict) the protection on the specified
                   5448:  * range of this pmap.
                   5449:  */
                   5450: void
1.303     uwe      5451: pmap_protect4m(struct pmap *pm, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
1.55      pk       5452: {
1.145     pk       5453:        vaddr_t va, nva;
1.217     pk       5454:        int s, vr, vs;
1.55      pk       5455:        struct regmap *rp;
                   5456:        struct segmap *sp;
1.269     chs      5457:        int newprot;
                   5458:
1.55      pk       5459:        if ((prot & VM_PROT_READ) == 0) {
                   5460:                pmap_remove(pm, sva, eva);
                   5461:                return;
                   5462:        }
                   5463:
1.145     pk       5464: #ifdef DEBUG
                   5465:        if (pmapdebug & PDB_CHANGEPROT)
1.221     pk       5466:                printf("pmap_protect[%d][curpid %d, ctx %d,%d](%lx, %lx, %x)\n",
1.269     chs      5467:                        cpu_number(), curproc->p_pid,
                   5468:                        getcontext4m(), pm->pm_ctx ? pm->pm_ctxnum : -1,
                   5469:                        sva, eva, prot);
1.145     pk       5470: #endif
                   5471:
1.269     chs      5472:        newprot = pte_prot4m(pm, prot);
                   5473:
                   5474:        write_user_windows();
1.175     thorpej  5475:        s = splvm();
1.322     ad       5476:        PMAP_LOCK();
1.55      pk       5477:
                   5478:        for (va = sva; va < eva;) {
                   5479:                vr = VA_VREG(va);
                   5480:                vs = VA_VSEG(va);
                   5481:                rp = &pm->pm_regmap[vr];
                   5482:                nva = VSTOVA(vr,vs + 1);
                   5483:                if (nva > eva)
                   5484:                        nva = eva;
                   5485:                if (rp->rg_nsegmap == 0) {
                   5486:                        va = nva;
                   5487:                        continue;
                   5488:                }
                   5489:                sp = &rp->rg_segmap[vs];
1.281     pk       5490:                if (pm != pmap_kernel() && sp->sg_npte == 0) {
1.55      pk       5491:                        va = nva;
                   5492:                        continue;
                   5493:                }
1.269     chs      5494:
1.145     pk       5495:                /*
                   5496:                 * pages loaded: take away write bits from MMU PTEs
                   5497:                 */
1.265     pk       5498:                pmap_stats.ps_npg_prot_all += (nva - va) >> PGSHIFT;
1.72      pk       5499:                for (; va < nva; va += NBPG) {
1.269     chs      5500:                        int tpte, npte;
1.100     pk       5501:
1.72      pk       5502:                        tpte = sp->sg_pte[VA_SUN4M_VPG(va)];
1.269     chs      5503:                        if ((tpte & SRMMU_PGTYPE) != PG_SUN4M_OBMEM)
                   5504:                                continue;
1.280     pk       5505:                        if ((tpte & SRMMU_TETYPE) != SRMMU_TEPTE)
                   5506:                                continue;
1.269     chs      5507:                        npte = (tpte & ~SRMMU_PROT_MASK) | newprot;
                   5508:                        if (npte == tpte)
                   5509:                                continue;
                   5510:
1.55      pk       5511:                        /*
                   5512:                         * Flush cache so that any existing cache
1.269     chs      5513:                         * tags are updated.
1.55      pk       5514:                         */
1.269     chs      5515:
                   5516:                        pmap_stats.ps_npg_prot_actual++;
                   5517:                        if (pm->pm_ctx) {
                   5518:                                cache_flush_page(va, pm->pm_ctxnum);
1.55      pk       5519:                        }
1.269     chs      5520:                        updatepte4m(va, &sp->sg_pte[VA_SUN4M_VPG(va)],
                   5521:                            SRMMU_PROT_MASK, newprot, pm->pm_ctxnum,
                   5522:                            PMAP_CPUSET(pm));
1.55      pk       5523:                }
                   5524:        }
1.322     ad       5525:        PMAP_UNLOCK();
1.55      pk       5526:        splx(s);
                   5527: }
                   5528:
                   5529: /*
                   5530:  * Change the protection and/or wired status of the given (MI) virtual page.
                   5531:  * XXX: should have separate function (or flag) telling whether only wiring
                   5532:  * is changing.
                   5533:  */
                   5534: void
1.303     uwe      5535: pmap_changeprot4m(struct pmap *pm, vaddr_t va, vm_prot_t prot, int flags)
1.55      pk       5536: {
1.248     pk       5537:        int pte, newprot;
1.100     pk       5538:        struct regmap *rp;
                   5539:        struct segmap *sp;
1.308     thorpej  5540:        bool owired;
1.55      pk       5541:
                   5542: #ifdef DEBUG
                   5543:        if (pmapdebug & PDB_CHANGEPROT)
1.221     pk       5544:                printf("pmap_changeprot[%d](%p, 0x%lx, 0x%x, 0x%x)\n",
1.248     pk       5545:                    cpu_number(), pm, va, prot, flags);
1.55      pk       5546: #endif
                   5547:
1.269     chs      5548:        newprot = pte_prot4m(pm, prot);
1.55      pk       5549:
                   5550:        pmap_stats.ps_changeprots++;
                   5551:
1.100     pk       5552:        rp = &pm->pm_regmap[VA_VREG(va)];
                   5553:        sp = &rp->rg_segmap[VA_VSEG(va)];
                   5554:
                   5555:        pte = sp->sg_pte[VA_SUN4M_VPG(va)];
1.296     chs      5556:        owired = sp->sg_wiremap & (1 << VA_SUN4M_VPG(va));
                   5557:
                   5558:        if (owired) {
                   5559:                pm->pm_stats.wired_count--;
                   5560:                sp->sg_wiremap &= ~(1 << VA_SUN4M_VPG(va));
                   5561:        }
                   5562:        if (flags & PMAP_WIRED) {
                   5563:                pm->pm_stats.wired_count++;
                   5564:                sp->sg_wiremap |= (1 << VA_SUN4M_VPG(va));
1.100     pk       5565:        }
                   5566:
                   5567:        if (pm->pm_ctx) {
1.88      pk       5568:                /*
                   5569:                 * Use current context.
                   5570:                 * Flush cache if page has been referenced to
                   5571:                 * avoid stale protection bits in the cache tags.
                   5572:                 */
1.145     pk       5573:
1.100     pk       5574:                if ((pte & (SRMMU_PG_C|SRMMU_PGTYPE)) ==
1.88      pk       5575:                    (SRMMU_PG_C|PG_SUN4M_OBMEM))
1.214     pk       5576:                        cache_flush_page(va, pm->pm_ctxnum);
1.55      pk       5577:        }
1.100     pk       5578:
1.195     mrg      5579:        setpgt4m_va(va, &sp->sg_pte[VA_SUN4M_VPG(va)],
1.196     mrg      5580:                 (pte & ~SRMMU_PROT_MASK) | newprot,
1.226     mrg      5581:                 pm->pm_ctx != NULL, pm->pm_ctxnum, PMAP_CPUSET(pm));
1.195     mrg      5582:
1.55      pk       5583: }
1.210     thorpej  5584: #endif /* SUN4M || SUN4D */
1.55      pk       5585:
                   5586: /*
                   5587:  * Insert (MI) physical page pa at virtual address va in the given pmap.
                   5588:  * NB: the pa parameter includes type bits PMAP_OBIO, PMAP_NC as necessary.
                   5589:  *
                   5590:  * If pa is not in the `managed' range it will not be `bank mapped'.
                   5591:  * This works during bootstrap only because the first 4MB happens to
                   5592:  * map one-to-one.
                   5593:  *
                   5594:  * There may already be something else there, or we might just be
                   5595:  * changing protections and/or wiring on an existing mapping.
                   5596:  *     XXX     should have different entry points for changing!
                   5597:  */
                   5598:
                   5599: #if defined(SUN4) || defined(SUN4C)
                   5600:
1.153     thorpej  5601: int
1.303     uwe      5602: pmap_enter4_4c(struct pmap *pm, vaddr_t va, paddr_t pa,
1.333     skrll    5603:               vm_prot_t prot, u_int flags)
1.55      pk       5604: {
1.236     pk       5605:        struct vm_page *pg;
1.124     pk       5606:        int pteproto, ctx;
1.187     pk       5607:        int error;
1.55      pk       5608:
                   5609:        if (VA_INHOLE(va)) {
                   5610: #ifdef DEBUG
1.91      fair     5611:                printf("pmap_enter: pm %p, va 0x%lx, pa 0x%lx: in MMU hole\n",
1.55      pk       5612:                        pm, va, pa);
                   5613: #endif
1.185     chs      5614:                return 0;
1.55      pk       5615:        }
                   5616:
                   5617: #ifdef DEBUG
                   5618:        if (pmapdebug & PDB_ENTER)
1.91      fair     5619:                printf("pmap_enter(%p, 0x%lx, 0x%lx, 0x%x, 0x%x)\n",
1.187     pk       5620:                    pm, va, pa, prot, flags);
1.55      pk       5621: #endif
                   5622:
1.236     pk       5623:        pg = PHYS_TO_VM_PAGE(pa);
1.82      pk       5624:        pteproto = PG_V | PMAP_T2PTE_4(pa);
                   5625:        pa &= ~PMAP_TNC_4;
1.299     chs      5626:
1.55      pk       5627:        /*
                   5628:         * Set up prototype for new PTE.  Cannot set PG_NC from PV_NC yet
                   5629:         * since the pvlist no-cache bit might change as a result of the
                   5630:         * new mapping.
                   5631:         */
1.236     pk       5632:        pteproto |= atop(pa) & PG_PFNUM;
1.55      pk       5633:        if (prot & VM_PROT_WRITE)
                   5634:                pteproto |= PG_W;
1.267     pk       5635:        if ((flags & PMAP_WIRED) != 0)
                   5636:                pteproto |= PG_WIRED;
1.299     chs      5637:        if (flags & VM_PROT_ALL) {
                   5638:                pteproto |= PG_U;
                   5639:                if (flags & VM_PROT_WRITE) {
                   5640:                        pteproto |= PG_M;
                   5641:                }
                   5642:        }
1.267     pk       5643:
1.258     pk       5644:        write_user_windows();
1.71      pk       5645:        ctx = getcontext4();
1.55      pk       5646:        if (pm == pmap_kernel())
1.236     pk       5647:                error = pmap_enk4_4c(pm, va, prot, flags, pg, pteproto | PG_S);
1.55      pk       5648:        else
1.236     pk       5649:                error = pmap_enu4_4c(pm, va, prot, flags, pg, pteproto);
1.71      pk       5650:        setcontext4(ctx);
1.187     pk       5651:        return (error);
1.55      pk       5652: }
                   5653:
                   5654: /* enter new (or change existing) kernel mapping */
1.187     pk       5655: int
1.303     uwe      5656: pmap_enk4_4c(struct pmap *pm, vaddr_t va, vm_prot_t prot, int flags,
                   5657:             struct vm_page *pg, int pteproto)
1.55      pk       5658: {
1.267     pk       5659:        int vr, vs, pte, s, inmmu;
1.265     pk       5660:        int *ptep;
1.55      pk       5661:        struct regmap *rp;
                   5662:        struct segmap *sp;
1.278     pk       5663:        int error = 0;
1.55      pk       5664:
                   5665:        vr = VA_VREG(va);
                   5666:        vs = VA_VSEG(va);
                   5667:        rp = &pm->pm_regmap[vr];
                   5668:        sp = &rp->rg_segmap[vs];
1.265     pk       5669:        ptep = &sp->sg_pte[VA_VPG(va)];
1.175     thorpej  5670:        s = splvm();            /* XXX way too conservative */
1.55      pk       5671:
1.69      pk       5672: #if defined(SUN4_MMU3L)
1.265     pk       5673:        if (HASSUN4_MMU3L && rp->rg_smeg == reginval)
                   5674:                mmu_pagein_reg(pm, rp, va, vr, &region_locked);
                   5675: #endif
1.1       deraadt  5676:
1.265     pk       5677:        inmmu = sp->sg_pmeg != seginval;
1.267     pk       5678:        if ((pte = *ptep) & PG_V) {
1.1       deraadt  5679:
1.34      pk       5680:                /* old mapping exists, and is of the same pa type */
1.267     pk       5681:                if ((pte & (PG_PFNUM|PG_TYPE)) ==
1.34      pk       5682:                    (pteproto & (PG_PFNUM|PG_TYPE))) {
1.1       deraadt  5683:                        /* just changing protection and/or wiring */
1.248     pk       5684:                        pmap_changeprot4_4c(pm, va, prot, flags);
1.1       deraadt  5685:                        splx(s);
1.187     pk       5686:                        return (0);
1.1       deraadt  5687:                }
                   5688:
1.267     pk       5689:                if ((pte & PG_TYPE) == PG_OBMEM) {
1.236     pk       5690:                        struct vm_page *opg;
1.194     chs      5691:
1.34      pk       5692:                        /*
                   5693:                         * Switcheroo: changing pa for this va.
                   5694:                         * If old pa was managed, remove from pvlist.
                   5695:                         * If old page was cached, flush cache.
                   5696:                         */
1.267     pk       5697:                        if ((opg = pvhead4_4c(pte)) != NULL)
1.236     pk       5698:                                pv_unlink4_4c(opg, pm, va);
1.267     pk       5699:                        if (inmmu && (pte & PG_NC) == 0) {
1.71      pk       5700:                                setcontext4(0); /* ??? */
1.214     pk       5701:                                cache_flush_page(va, 0);
1.34      pk       5702:                        }
1.1       deraadt  5703:                }
1.278     pk       5704:                *ptep = 0;
                   5705:                if (inmmu)
                   5706:                        setpte4(va, 0);
1.296     chs      5707:                if (pte & PG_WIRED) {
1.267     pk       5708:                        sp->sg_nwired--;
1.296     chs      5709:                        pm->pm_stats.wired_count--;
                   5710:                }
1.277     pk       5711:                pm->pm_stats.resident_count--;
1.1       deraadt  5712:        } else {
                   5713:                /* adding new entry */
1.265     pk       5714:                if (sp->sg_npte++ == 0) {
                   5715: #ifdef DIAGNOSTIC
                   5716:                        int i; for (i = 0; i < NPTESG; i++) {
                   5717:                                if (sp->sg_pte[i] == 0)
                   5718:                                        continue;
                   5719:                                panic("pmap_enk: pm %p, va %lx: pte[%d] not empty\n",
                   5720:                                        pm, va, i);
                   5721:                        }
                   5722: #endif
                   5723:                        rp->rg_nsegmap++;
                   5724:                }
1.1       deraadt  5725:        }
                   5726:
                   5727:        /*
                   5728:         * If the new mapping is for a managed PA, enter into pvlist.
                   5729:         */
1.278     pk       5730:        if (pg != NULL && (error = pv_link4_4c(pg, pm, va, &pteproto)) != 0) {
                   5731:                if (--sp->sg_npte == 0)
                   5732:                        pgt_lvl23_remove4_4c(pm, rp, sp, vr, vs);
                   5733:                if ((flags & PMAP_CANFAIL) != 0)
                   5734:                        goto out;
                   5735:                panic("pmap_enter: cannot allocate PV entry");
                   5736:        }
1.1       deraadt  5737:
1.265     pk       5738:        /* Update S/W page table */
                   5739:        *ptep = pteproto;
1.296     chs      5740:        if (pteproto & PG_WIRED) {
1.267     pk       5741:                sp->sg_nwired++;
1.296     chs      5742:                pm->pm_stats.wired_count++;
                   5743:        }
1.277     pk       5744:        pm->pm_stats.resident_count++;
1.1       deraadt  5745:
1.267     pk       5746: #ifdef DIAGNOSTIC
                   5747:        if (sp->sg_nwired > sp->sg_npte || sp->sg_nwired < 0)
                   5748:                panic("pmap_enk: pm %p, va %lx: nleft=%d, nwired=%d",
                   5749:                        pm, va, sp->sg_npte, sp->sg_nwired);
                   5750: #endif
1.265     pk       5751:        if (sp->sg_pmeg == seginval)
                   5752:                mmu_pagein_seg(pm, sp, va, vr, vs,
1.267     pk       5753:                        (pteproto & PG_WIRED) != 0 ? &segm_locked : &segm_lru);
                   5754:        else if ((pteproto & PG_WIRED) != 0)
1.265     pk       5755:                mmu_pmeg_lock(sp->sg_pmeg);
1.1       deraadt  5756:
1.265     pk       5757:        /* Update H/W page table */
1.267     pk       5758:        setpte4(va, pteproto & ~PG_MBZ);
1.278     pk       5759: out:
1.1       deraadt  5760:        splx(s);
1.278     pk       5761:        return (error);
1.1       deraadt  5762: }
                   5763:
                   5764: /* enter new (or change existing) user mapping */
1.187     pk       5765: int
1.303     uwe      5766: pmap_enu4_4c(struct pmap *pm, vaddr_t va, vm_prot_t prot, int flags,
                   5767:             struct vm_page *pg, int pteproto)
1.1       deraadt  5768: {
1.267     pk       5769:        int vr, vs, *ptep, pte, pmeg, s;
1.187     pk       5770:        int error = 0;
1.43      pk       5771:        struct regmap *rp;
                   5772:        struct segmap *sp;
1.1       deraadt  5773:
1.264     pk       5774:        pm->pm_flags &= ~PMAP_USERCACHECLEAN;
                   5775:
1.43      pk       5776:        vr = VA_VREG(va);
                   5777:        vs = VA_VSEG(va);
                   5778:        rp = &pm->pm_regmap[vr];
1.175     thorpej  5779:        s = splvm();                    /* XXX conservative */
1.1       deraadt  5780:
                   5781:        /*
                   5782:         * If there is no space in which the PTEs can be written
                   5783:         * while they are not in the hardware, this must be a new
                   5784:         * virtual segment.  Get PTE space and count the segment.
                   5785:         *
                   5786:         * TO SPEED UP CTX ALLOC, PUT SEGMENT BOUNDS STUFF HERE
                   5787:         * AND IN pmap_rmu()
                   5788:         */
1.13      pk       5789:
1.43      pk       5790:        GAP_SHRINK(pm,vr);
1.13      pk       5791:
                   5792: #ifdef DEBUG
                   5793:        if (pm->pm_gap_end < pm->pm_gap_start) {
1.91      fair     5794:                printf("pmap_enu: gap_start 0x%x, gap_end 0x%x",
1.13      pk       5795:                        pm->pm_gap_start, pm->pm_gap_end);
                   5796:                panic("pmap_enu: gap botch");
                   5797:        }
                   5798: #endif
                   5799:
1.43      pk       5800:        if (rp->rg_segmap == NULL) {
                   5801:                /* definitely a new mapping */
1.124     pk       5802:                int i;
1.265     pk       5803:                int mflag = PR_NOWAIT;
1.43      pk       5804:
1.187     pk       5805:        rretry:
1.265     pk       5806:                sp = (struct segmap *)pool_get(&segmap_pool, mflag);
1.187     pk       5807:                if (sp == NULL) {
                   5808:                        if ((flags & PMAP_CANFAIL) != 0) {
                   5809:                                error = ENOMEM;
                   5810:                                goto out;
                   5811:                        }
1.265     pk       5812:                        mflag = PR_WAITOK;
1.43      pk       5813:                        goto rretry;
                   5814:                }
1.187     pk       5815: #ifdef DEBUG
                   5816:                if (rp->rg_segmap != NULL)
                   5817:                        panic("pmap_enter: segment filled during sleep");
                   5818: #endif
1.311     christos 5819:                qzero((void *)sp, NSEGRG * sizeof (struct segmap));
1.43      pk       5820:                rp->rg_segmap = sp;
                   5821:                rp->rg_nsegmap = 0;
                   5822:                for (i = NSEGRG; --i >= 0;)
                   5823:                        sp++->sg_pmeg = seginval;
                   5824:        }
                   5825:
                   5826:        sp = &rp->rg_segmap[vs];
                   5827:
1.267     pk       5828:        if ((ptep = sp->sg_pte) == NULL) {
1.1       deraadt  5829:                /* definitely a new mapping */
1.267     pk       5830:                int size = NPTESG * sizeof *ptep;
1.265     pk       5831:                int mflag = PR_NOWAIT;
1.1       deraadt  5832:
1.187     pk       5833:        sretry:
1.267     pk       5834:                ptep = (int *)pool_get(&pte_pool, mflag);
                   5835:                if (ptep == NULL) {
1.187     pk       5836:                        if ((flags & PMAP_CANFAIL) != 0) {
                   5837:                                error = ENOMEM;
                   5838:                                goto out;
                   5839:                        }
1.265     pk       5840:                        mflag = PR_WAITOK;
1.43      pk       5841:                        goto sretry;
1.1       deraadt  5842:                }
                   5843: #ifdef DEBUG
1.187     pk       5844:                if (sp->sg_pte != NULL)
                   5845:                        panic("pmap_enter: pte filled during sleep");
1.43      pk       5846:                if (sp->sg_pmeg != seginval)
1.1       deraadt  5847:                        panic("pmap_enter: new ptes, but not seginval");
                   5848: #endif
1.311     christos 5849:                qzero((void *)ptep, size);
1.267     pk       5850:                sp->sg_pte = ptep;
1.43      pk       5851:                sp->sg_npte = 1;
                   5852:                rp->rg_nsegmap++;
1.1       deraadt  5853:        } else {
                   5854:                /* might be a change: fetch old pte */
1.267     pk       5855:                pte = ptep[VA_VPG(va)];
                   5856:                if (pte & PG_V) {
1.55      pk       5857:                        /* old mapping exists, and is of the same pa type */
1.267     pk       5858:                        if ((pte & (PG_PFNUM|PG_TYPE)) ==
1.55      pk       5859:                            (pteproto & (PG_PFNUM|PG_TYPE))) {
                   5860:                                /* just changing prot and/or wiring */
1.248     pk       5861:                                pmap_changeprot4_4c(pm, va, prot, flags);
                   5862:                                splx(s);
1.187     pk       5863:                                return (0);
1.55      pk       5864:                        }
                   5865:                        /*
                   5866:                         * Switcheroo: changing pa for this va.
                   5867:                         * If old pa was managed, remove from pvlist.
                   5868:                         * If old page was cached, flush cache.
                   5869:                         */
1.65      christos 5870: #if 0
1.187     pk       5871:                        printf("%s[%d]: pmap_enu: changing existing "
1.265     pk       5872:                                "va(0x%lx)=>pa entry\n",
1.187     pk       5873:                                curproc->p_comm, curproc->p_pid, va);
1.65      christos 5874: #endif
1.267     pk       5875:                        if ((pte & PG_TYPE) == PG_OBMEM) {
1.236     pk       5876:                                struct vm_page *opg;
1.267     pk       5877:                                if ((opg = pvhead4_4c(pte)) != NULL)
1.236     pk       5878:                                        pv_unlink4_4c(opg, pm, va);
1.265     pk       5879:                                if (CACHEINFO.c_vactype != VAC_NONE &&
                   5880:                                    (pmeg = sp->sg_pmeg) != seginval) {
                   5881:                                        /* hardware pte */
                   5882:                                        if (CTX_USABLE(pm,rp)) {
                   5883:                                                setcontext4(pm->pm_ctxnum);
                   5884:                                        } else {
                   5885:                                                setcontext4(0);
1.276     wiz      5886:                                                /* XXX use per-CPU pteva? */
1.265     pk       5887:                                                if (HASSUN4_MMU3L)
                   5888:                                                        setregmap(0, tregion);
                   5889:                                                setsegmap(0, pmeg);
                   5890:                                        }
1.214     pk       5891:                                        cache_flush_page(va, pm->pm_ctxnum);
1.265     pk       5892:                                }
1.55      pk       5893:                        }
1.296     chs      5894:                        if (pte & PG_WIRED) {
1.267     pk       5895:                                sp->sg_nwired--;
1.296     chs      5896:                                pm->pm_stats.wired_count--;
                   5897:                        }
1.277     pk       5898:                        pm->pm_stats.resident_count--;
1.278     pk       5899:                        ptep[VA_VPG(va)] = 0;
                   5900:                        if (sp->sg_pmeg != seginval)
                   5901:                                setpte4(va, 0);
1.55      pk       5902:                } else {
                   5903:                        /* adding new entry */
                   5904:                        sp->sg_npte++;
                   5905:                }
                   5906:        }
                   5907:
1.278     pk       5908:        if (pg != NULL && (error = pv_link4_4c(pg, pm, va, &pteproto)) != 0) {
                   5909:                if (--sp->sg_npte == 0)
                   5910:                        /* Sigh, undo pgt allocations */
                   5911:                        pgt_lvl23_remove4_4c(pm, rp, sp, vr, vs);
                   5912:
                   5913:                if ((flags & PMAP_CANFAIL) != 0)
                   5914:                        goto out;
                   5915:                panic("pmap_enter: cannot allocate PV entry");
                   5916:        }
1.55      pk       5917:
1.267     pk       5918:        /* Update S/W page table */
                   5919:        ptep += VA_VPG(va);
                   5920:        *ptep = pteproto;
1.296     chs      5921:        if (pteproto & PG_WIRED) {
1.267     pk       5922:                sp->sg_nwired++;
1.296     chs      5923:                pm->pm_stats.wired_count++;
                   5924:        }
1.277     pk       5925:        pm->pm_stats.resident_count++;
1.267     pk       5926:
                   5927: #ifdef DIAGNOSTIC
                   5928:        if (sp->sg_nwired > sp->sg_npte || sp->sg_nwired < 0)
                   5929:                panic("pmap_enu: pm %p, va %lx: nleft=%d, nwired=%d",
                   5930:                        pm, va, sp->sg_npte, sp->sg_nwired);
                   5931: #endif
                   5932:
1.55      pk       5933:        if ((pmeg = sp->sg_pmeg) != seginval) {
1.267     pk       5934:                /* Update H/W page table */
1.55      pk       5935:                if (CTX_USABLE(pm,rp))
1.71      pk       5936:                        setcontext4(pm->pm_ctxnum);
1.55      pk       5937:                else {
1.71      pk       5938:                        setcontext4(0);
1.69      pk       5939:                        if (HASSUN4_MMU3L)
1.55      pk       5940:                                setregmap(0, tregion);
                   5941:                        setsegmap(0, pmeg);
                   5942:                        va = VA_VPG(va) << PGSHIFT;
                   5943:                }
1.267     pk       5944:                setpte4(va, pteproto & ~PG_MBZ);
1.55      pk       5945:        }
                   5946:
1.187     pk       5947: out:
1.55      pk       5948:        splx(s);
1.187     pk       5949:        return (error);
1.55      pk       5950: }
                   5951:
1.151     chs      5952: void
1.337     cegger   5953: pmap_kenter_pa4_4c(vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
1.151     chs      5954: {
1.194     chs      5955:        struct pmap *pm = pmap_kernel();
                   5956:        struct regmap *rp;
                   5957:        struct segmap *sp;
1.265     pk       5958:        int vr, vs, s;
                   5959:        int *ptep, pteproto;
                   5960:        int lockit = 1;
1.194     chs      5961:
1.202     pk       5962:        pteproto = PG_S | PG_V | PMAP_T2PTE_4(pa);
1.194     chs      5963:        pa &= ~PMAP_TNC_4;
                   5964:        pteproto |= atop(pa) & PG_PFNUM;
                   5965:        if (prot & VM_PROT_WRITE)
                   5966:                pteproto |= PG_W;
                   5967:
                   5968:        vr = VA_VREG(va);
                   5969:        vs = VA_VSEG(va);
                   5970:        rp = &pm->pm_regmap[vr];
                   5971:        sp = &rp->rg_segmap[vs];
1.265     pk       5972:        ptep = &sp->sg_pte[VA_VPG(va)];
                   5973:
1.267     pk       5974:        if (lockit) {
                   5975:                pteproto |= PG_WIRED;
                   5976:                sp->sg_nwired++;
                   5977:        }
                   5978:
1.265     pk       5979:        KASSERT((*ptep & PG_V) == 0);
1.194     chs      5980:
                   5981:        s = splvm();
                   5982: #if defined(SUN4_MMU3L)
1.265     pk       5983:        if (HASSUN4_MMU3L && rp->rg_smeg == reginval)
                   5984:                mmu_pagein_reg(pm, rp, va, vr, &region_locked);
                   5985: #endif
1.194     chs      5986:
1.265     pk       5987:        if (sp->sg_npte++ == 0) {
                   5988: #ifdef DIAGNOSTIC
                   5989:                int i; for (i = 0; i < NPTESG; i++) {
                   5990:                        if (sp->sg_pte[i] == 0)
                   5991:                                continue;
                   5992:                        panic("pmap_enk: pm %p, va %lx: pte[%d] not empty\n",
                   5993:                                pm, va, i);
1.194     chs      5994:                }
1.265     pk       5995: #endif
                   5996:                rp->rg_nsegmap++;
1.194     chs      5997:        }
1.265     pk       5998:
                   5999:        /* Update S/W page table */
                   6000:        *ptep = pteproto;
                   6001:
1.267     pk       6002: #ifdef DIAGNOSTIC
                   6003:        if (sp->sg_nwired > sp->sg_npte || sp->sg_nwired < 0)
                   6004:                panic("pmap_kenter: pm %p, va %lx: nleft=%d, nwired=%d",
                   6005:                        pm, va, sp->sg_npte, sp->sg_nwired);
                   6006: #endif
                   6007:
1.194     chs      6008:        if (sp->sg_pmeg == seginval) {
1.265     pk       6009:                mmu_pagein_seg(pm, sp, va, vr, vs,
                   6010:                                lockit ? &segm_locked : &segm_lru);
                   6011:        } else if (lockit)
                   6012:                mmu_pmeg_lock(sp->sg_pmeg);
                   6013:
                   6014:        /* Update H/W page table */
1.267     pk       6015:        setpte4(va, pteproto & ~PG_MBZ);
1.265     pk       6016:        splx(s);
                   6017: }
1.194     chs      6018:
1.265     pk       6019: #if notyet
                   6020: void pmap_lockmmu(vaddr_t sva, size_t sz);
1.303     uwe      6021:
1.265     pk       6022: void
                   6023: pmap_lockmmu(vaddr_t sva, size_t sz)
                   6024: {
                   6025:        struct pmap *pm = pmap_kernel();
                   6026:        vaddr_t va, eva;
                   6027:        struct regmap *rp;
                   6028:        struct segmap *sp;
                   6029:        int vr, vs;
                   6030:
1.296     chs      6031:        if (CPU_HAS_SRMMU)
1.265     pk       6032:                return;
                   6033:
                   6034:        eva = sva + sz;
                   6035:        va = VA_ROUNDDOWNTOSEG(sva);
                   6036:
                   6037:        for (; va < eva; va += NBPSG) {
                   6038:                vr = VA_VREG(va);
                   6039:                vs = VA_VSEG(va);
                   6040:                rp = &pm->pm_regmap[vr];
                   6041:                sp = &rp->rg_segmap[vs];
1.194     chs      6042:
1.265     pk       6043:                KASSERT(sp->sg_npte != 0);
1.194     chs      6044:
1.265     pk       6045:                if (sp->sg_pmeg == seginval)
                   6046:                        mmu_pagein_seg(pm, sp, va, vr, vs, &segm_locked);
1.194     chs      6047:                else
1.265     pk       6048:                        mmu_pmeg_lock(sp->sg_pmeg);
1.194     chs      6049:        }
1.151     chs      6050: }
1.265     pk       6051: #endif
1.151     chs      6052:
                   6053: void
1.303     uwe      6054: pmap_kremove4_4c(vaddr_t va, vsize_t len)
1.151     chs      6055: {
1.194     chs      6056:        struct pmap *pm = pmap_kernel();
                   6057:        struct regmap *rp;
                   6058:        struct segmap *sp;
                   6059:        vaddr_t nva, endva;
1.265     pk       6060:        int pte, mmupte, *ptep, perpage, npg, inmmu;
1.194     chs      6061:        int nleft, pmeg;
                   6062:        int vr, vs, s, ctx;
                   6063:
                   6064:        endva = va + len;
                   6065: #ifdef DEBUG
                   6066:        if (pmapdebug & PDB_REMOVE)
                   6067:                printf("pmap_kremove(0x%lx, 0x%lx)\n", va, endva);
                   6068: #endif
                   6069:
1.258     pk       6070:        write_user_windows();
                   6071:
1.194     chs      6072:        s = splvm();
                   6073:        ctx = getcontext();
1.322     ad       6074:        PMAP_LOCK();
1.258     pk       6075:        setcontext4(0);
1.194     chs      6076:        for (; va < endva; va = nva) {
                   6077:                /* do one virtual segment at a time */
                   6078:                vr = VA_VREG(va);
                   6079:                vs = VA_VSEG(va);
                   6080:                nva = VSTOVA(vr, vs + 1);
                   6081:                if (nva == 0 || nva > endva)
                   6082:                        nva = endva;
                   6083:
                   6084:                rp = &pm->pm_regmap[vr];
                   6085:                sp = &rp->rg_segmap[vs];
                   6086:
                   6087:                if (rp->rg_nsegmap == 0)
                   6088:                        continue;
                   6089:                nleft = sp->sg_npte;
                   6090:                if (nleft == 0)
                   6091:                        continue;
                   6092:                pmeg = sp->sg_pmeg;
1.265     pk       6093:                inmmu = (pmeg != seginval);
1.267     pk       6094:                ptep = &sp->sg_pte[VA_VPG(va)];
1.265     pk       6095:
1.194     chs      6096:                /* decide how to flush cache */
1.207     pk       6097:                npg = (nva - va) >> PGSHIFT;
1.265     pk       6098:                if (!inmmu) {
                   6099:                        perpage = 0;
                   6100:                } else if (npg > PMAP_SFL_THRESHOLD) {
1.194     chs      6101:                        /* flush the whole segment */
                   6102:                        perpage = 0;
1.214     pk       6103:                        cache_flush_segment(vr, vs, 0);
1.194     chs      6104:                } else {
                   6105:                        /*
                   6106:                         * flush each page individually;
                   6107:                         * some never need flushing
                   6108:                         */
                   6109:                        perpage = (CACHEINFO.c_vactype != VAC_NONE);
                   6110:                }
1.265     pk       6111:
                   6112:                for (; va < nva; va += NBPG, ptep++) {
                   6113:                        pte = *ptep;
1.274     chs      6114:                        mmupte = inmmu ? getpte4(va) : 0;
1.265     pk       6115:                        if ((pte & PG_V) == 0) {
                   6116: #ifdef DIAGNOSTIC
                   6117:                                if (inmmu && (mmupte & PG_V) != 0)
                   6118:                                        printf("rmk: inconsistent ptes va=%lx\n", va);
                   6119: #endif
1.194     chs      6120:                                continue;
                   6121:                        }
1.265     pk       6122:                        if ((pte & PG_TYPE) == PG_OBMEM) {
1.194     chs      6123:                                /* if cacheable, flush page as needed */
1.265     pk       6124:                                if (perpage && (mmupte & PG_NC) == 0)
1.214     pk       6125:                                        cache_flush_page(va, 0);
1.194     chs      6126:                        }
                   6127:                        nleft--;
                   6128: #ifdef DIAGNOSTIC
                   6129:                        if (nleft < 0)
                   6130:                                panic("pmap_kremove: too many PTEs in segment; "
                   6131:                                      "va 0x%lx; endva 0x%lx", va, endva);
                   6132: #endif
1.267     pk       6133:                        if (pte & PG_WIRED)
                   6134:                                sp->sg_nwired--;
                   6135:
1.265     pk       6136:                        if (inmmu)
                   6137:                                setpte4(va, 0);
                   6138:                        *ptep = 0;
1.194     chs      6139:                }
                   6140:
1.267     pk       6141: #ifdef DIAGNOSTIC
                   6142:                if (sp->sg_nwired > nleft || sp->sg_nwired < 0)
                   6143:                        panic("pmap_kremove: pm %p, va %lx: nleft=%d, nwired=%d",
                   6144:                                pm, va, nleft, sp->sg_nwired);
                   6145: #endif
                   6146:
1.265     pk       6147:                if ((sp->sg_npte = nleft) == 0)
                   6148:                        pgt_lvl23_remove4_4c(pm, rp, sp, vr, vs);
1.267     pk       6149:                else if (sp->sg_nwired == 0) {
                   6150:                        if (sp->sg_pmeg != seginval)
                   6151:                                mmu_pmeg_unlock(sp->sg_pmeg);
                   6152:                }
1.151     chs      6153:        }
1.322     ad       6154:        PMAP_UNLOCK();
1.211     tsutsui  6155:        setcontext4(ctx);
1.194     chs      6156:        splx(s);
1.151     chs      6157: }
                   6158:
1.248     pk       6159: /*
                   6160:  * Change protection on a range of kernel addresses.
                   6161:  */
                   6162: void
                   6163: pmap_kprotect4_4c(vaddr_t va, vsize_t size, vm_prot_t prot)
                   6164: {
                   6165:        int pte, newprot, ctx;
                   6166:
1.250     pk       6167:        size = roundup(size,NBPG);
1.248     pk       6168:        newprot = prot & VM_PROT_WRITE ? PG_S|PG_W : PG_S;
                   6169:
                   6170:        ctx = getcontext4();
                   6171:        setcontext4(0);
                   6172:        while (size > 0) {
                   6173:                pte = getpte4(va);
                   6174:
                   6175:                /*
                   6176:                 * Flush cache if page has been referenced to
                   6177:                 * avoid stale protection bits in the cache tags.
                   6178:                 */
                   6179:                if ((pte & (PG_NC|PG_TYPE)) == PG_OBMEM)
                   6180:                        cache_flush_page(va, 0);
                   6181:
                   6182:                pte = (pte & ~PG_PROT) | newprot;
                   6183:                setpte4(va, pte);
                   6184:
                   6185:                va += NBPG;
1.250     pk       6186:                size -= NBPG;
1.248     pk       6187:        }
                   6188:        setcontext4(ctx);
                   6189: }
1.232     pk       6190: #endif /* SUN4 || SUN4C */
1.55      pk       6191:
1.210     thorpej  6192: #if defined(SUN4M) || defined(SUN4D)   /* SRMMU versions of enter routines */
1.55      pk       6193: /*
                   6194:  * Insert (MI) physical page pa at virtual address va in the given pmap.
                   6195:  * NB: the pa parameter includes type bits PMAP_OBIO, PMAP_NC as necessary.
                   6196:  *
                   6197:  * If pa is not in the `managed' range it will not be `bank mapped'.
                   6198:  * This works during bootstrap only because the first 4MB happens to
                   6199:  * map one-to-one.
                   6200:  *
                   6201:  * There may already be something else there, or we might just be
                   6202:  * changing protections and/or wiring on an existing mapping.
                   6203:  *     XXX     should have different entry points for changing!
                   6204:  */
                   6205:
1.153     thorpej  6206: int
1.303     uwe      6207: pmap_enter4m(struct pmap *pm, vaddr_t va, paddr_t pa,
1.333     skrll    6208:             vm_prot_t prot, u_int flags)
1.55      pk       6209: {
1.236     pk       6210:        struct vm_page *pg;
1.217     pk       6211:        int pteproto;
1.187     pk       6212:        int error;
1.55      pk       6213:
                   6214: #ifdef DEBUG
                   6215:        if (pmapdebug & PDB_ENTER)
1.221     pk       6216:                printf("pmap_enter[curcpu %d, curpid %d, ctx %d,%d]"
1.145     pk       6217:                        "(%p, 0x%lx, 0x%lx, 0x%x, 0x%x)\n",
1.221     pk       6218:                        cpu_number(), curproc==NULL ? -1 : curproc->p_pid,
                   6219:                        getcontext4m(), pm->pm_ctx==NULL ? -1 : pm->pm_ctxnum,
1.187     pk       6220:                        pm, va, pa, prot, flags);
1.55      pk       6221: #endif
1.60      pk       6222:
1.236     pk       6223:        pg = PHYS_TO_VM_PAGE(pa);
                   6224:
1.60      pk       6225:        /* Initialise pteproto with cache bit */
                   6226:        pteproto = (pa & PMAP_NC) == 0 ? SRMMU_PG_C : 0;
1.55      pk       6227:
1.82      pk       6228: #ifdef DEBUG
                   6229:        if (pa & PMAP_TYPE_SRMMU) {     /* this page goes in an iospace */
1.69      pk       6230:                if (cpuinfo.cpu_type == CPUTYP_MS1)
1.58      pk       6231:                        panic("pmap_enter4m: attempt to use 36-bit iospace on"
                   6232:                              " MicroSPARC");
1.55      pk       6233:        }
1.82      pk       6234: #endif
1.269     chs      6235:        pteproto |= SRMMU_TEPTE;
1.82      pk       6236:        pteproto |= PMAP_T2PTE_SRMMU(pa);
1.269     chs      6237:        pa &= ~PMAP_TNC_SRMMU;
1.55      pk       6238:
                   6239:        /*
                   6240:         * Set up prototype for new PTE.  Cannot set PG_NC from PV_NC yet
                   6241:         * since the pvlist no-cache bit might change as a result of the
                   6242:         * new mapping.
                   6243:         */
1.236     pk       6244:        pteproto |= (atop(pa) << SRMMU_PPNSHIFT);
1.55      pk       6245:
1.269     chs      6246:        /* Make sure we get a pte with appropriate perms! */
                   6247:        pteproto |= pte_prot4m(pm, prot);
1.299     chs      6248:        if (flags & VM_PROT_ALL) {
                   6249:                pteproto |= SRMMU_PG_R;
                   6250:                if (flags & VM_PROT_WRITE) {
                   6251:                        pteproto |= SRMMU_PG_M;
                   6252:                }
                   6253:        }
1.55      pk       6254:
                   6255:        if (pm == pmap_kernel())
1.236     pk       6256:                error = pmap_enk4m(pm, va, prot, flags, pg, pteproto | PPROT_S);
1.55      pk       6257:        else
1.236     pk       6258:                error = pmap_enu4m(pm, va, prot, flags, pg, pteproto);
1.55      pk       6259:
1.187     pk       6260:        return (error);
1.55      pk       6261: }
                   6262:
                   6263: /* enter new (or change existing) kernel mapping */
1.187     pk       6264: int
1.303     uwe      6265: pmap_enk4m(struct pmap *pm, vaddr_t va, vm_prot_t prot, int flags,
                   6266:           struct vm_page *pg, int pteproto)
1.55      pk       6267: {
1.124     pk       6268:        int vr, vs, tpte, s;
1.55      pk       6269:        struct regmap *rp;
                   6270:        struct segmap *sp;
1.242     pk       6271:        int error = 0;
1.55      pk       6272:
                   6273: #ifdef DEBUG
                   6274:        if (va < KERNBASE)
1.72      pk       6275:                panic("pmap_enk4m: can't enter va 0x%lx below KERNBASE", va);
1.55      pk       6276: #endif
                   6277:        vr = VA_VREG(va);
                   6278:        vs = VA_VSEG(va);
                   6279:        rp = &pm->pm_regmap[vr];
                   6280:        sp = &rp->rg_segmap[vs];
                   6281:
1.341     mrg      6282:        kpreempt_disable();
1.175     thorpej  6283:        s = splvm();            /* XXX way too conservative */
1.322     ad       6284:        PMAP_LOCK();
1.55      pk       6285:
                   6286:        if (rp->rg_seg_ptps == NULL) /* enter new region */
1.91      fair     6287:                panic("pmap_enk4m: missing kernel region table for va 0x%lx",va);
1.55      pk       6288:
1.72      pk       6289:        tpte = sp->sg_pte[VA_SUN4M_VPG(va)];
                   6290:        if ((tpte & SRMMU_TETYPE) == SRMMU_TEPTE) {
1.55      pk       6291:
                   6292:                /* old mapping exists, and is of the same pa type */
                   6293:
                   6294:                if ((tpte & SRMMU_PPNMASK) == (pteproto & SRMMU_PPNMASK)) {
                   6295:                        /* just changing protection and/or wiring */
1.248     pk       6296:                        pmap_changeprot4m(pm, va, prot, flags);
1.341     mrg      6297:                        error = 0;
                   6298:                        goto out;
1.55      pk       6299:                }
                   6300:
                   6301:                if ((tpte & SRMMU_PGTYPE) == PG_SUN4M_OBMEM) {
1.236     pk       6302:                        struct vm_page *opg;
1.55      pk       6303: #ifdef DEBUG
1.91      fair     6304: printf("pmap_enk4m: changing existing va=>pa entry: va 0x%lx, pteproto 0x%x, "
                   6305:        "oldpte 0x%x\n", va, pteproto, tpte);
1.55      pk       6306: #endif
                   6307:                        /*
                   6308:                         * Switcheroo: changing pa for this va.
                   6309:                         * If old pa was managed, remove from pvlist.
                   6310:                         * If old page was cached, flush cache.
                   6311:                         */
1.236     pk       6312:                        if ((opg = pvhead4m(tpte)) != NULL)
                   6313:                                pv_unlink4m(opg, pm, va);
1.55      pk       6314:                        if (tpte & SRMMU_PG_C) {
1.214     pk       6315:                                cache_flush_page(va, 0);
1.55      pk       6316:                        }
                   6317:                }
1.234     pk       6318:
                   6319:                /*
                   6320:                 * Invalidate the mapping now, so we can avoid the
                   6321:                 * de-map and update protocol when setting the new
                   6322:                 * PTE below.
                   6323:                 */
                   6324:                setpgt4m_va(va, &sp->sg_pte[VA_SUN4M_VPG(va)],
                   6325:                        SRMMU_TEINVALID, pm->pm_ctx != NULL,
                   6326:                        pm->pm_ctxnum, PMAP_CPUSET(pm));
1.277     pk       6327:                pm->pm_stats.resident_count--;
1.55      pk       6328:        }
                   6329:
                   6330:        /*
                   6331:         * If the new mapping is for a managed PA, enter into pvlist.
                   6332:         */
1.278     pk       6333:        if (pg != NULL && (error = pv_link4m(pg, pm, va, &pteproto)) != 0) {
1.242     pk       6334:                if ((flags & PMAP_CANFAIL) != 0)
                   6335:                        goto out;
                   6336:                panic("pmap_enter: cannot allocate PV entry");
                   6337:        }
1.55      pk       6338:
1.234     pk       6339:        setpgt4m(&sp->sg_pte[VA_SUN4M_VPG(va)], pteproto);
1.277     pk       6340:        pm->pm_stats.resident_count++;
1.242     pk       6341: out:
1.322     ad       6342:        PMAP_UNLOCK();
1.55      pk       6343:        splx(s);
1.341     mrg      6344:        kpreempt_enable();
1.242     pk       6345:        return (error);
1.55      pk       6346: }
                   6347:
                   6348: /* enter new (or change existing) user mapping */
1.187     pk       6349: int
1.303     uwe      6350: pmap_enu4m(struct pmap *pm, vaddr_t va, vm_prot_t prot, int flags,
                   6351:           struct vm_page *pg, int pteproto)
1.55      pk       6352: {
1.124     pk       6353:        int vr, vs, *pte, tpte, s;
1.187     pk       6354:        int error = 0;
1.55      pk       6355:        struct regmap *rp;
                   6356:        struct segmap *sp;
1.308     thorpej  6357:        bool owired;
1.55      pk       6358:
1.72      pk       6359: #ifdef DEBUG
                   6360:        if (KERNBASE < va)
                   6361:                panic("pmap_enu4m: can't enter va 0x%lx above KERNBASE", va);
                   6362: #endif
1.264     pk       6363:
                   6364:        pm->pm_flags &= ~PMAP_USERCACHECLEAN;
1.72      pk       6365:
1.55      pk       6366:        vr = VA_VREG(va);
                   6367:        vs = VA_VSEG(va);
                   6368:        rp = &pm->pm_regmap[vr];
1.175     thorpej  6369:        s = splvm();                    /* XXX conservative */
1.322     ad       6370:        PMAP_LOCK();
1.55      pk       6371:
                   6372:        if (rp->rg_segmap == NULL) {
                   6373:                /* definitely a new mapping */
1.265     pk       6374:                int mflag = PR_NOWAIT;
1.55      pk       6375:
1.265     pk       6376:        rretry:
                   6377:                sp = (struct segmap *)pool_get(&segmap_pool, mflag);
1.187     pk       6378:                if (sp == NULL) {
                   6379:                        if ((flags & PMAP_CANFAIL) != 0) {
                   6380:                                error = ENOMEM;
                   6381:                                goto out;
                   6382:                        }
1.265     pk       6383:                        mflag = PR_WAITOK;
                   6384:                        goto rretry;
1.187     pk       6385:                }
1.55      pk       6386: #ifdef DEBUG
1.187     pk       6387:                if (rp->rg_segmap != NULL)
                   6388:                        panic("pmap_enu4m: segment filled during sleep");
1.55      pk       6389: #endif
1.311     christos 6390:                qzero((void *)sp, NSEGRG * sizeof (struct segmap));
1.55      pk       6391:                rp->rg_segmap = sp;
                   6392:                rp->rg_nsegmap = 0;
                   6393:                rp->rg_seg_ptps = NULL;
                   6394:        }
                   6395:        if (rp->rg_seg_ptps == NULL) {
                   6396:                /* Need a segment table */
1.100     pk       6397:                int i, *ptd;
1.187     pk       6398:                int mflag = PR_NOWAIT;
1.73      pk       6399:
1.265     pk       6400:        sretry:
1.187     pk       6401:                ptd = pool_get(&L23_pool, mflag);
                   6402:                if (ptd == NULL) {
                   6403:                        if ((flags & PMAP_CANFAIL) != 0) {
                   6404:                                error = ENOMEM;
                   6405:                                goto out;
                   6406:                        }
1.265     pk       6407:                        mflag = PR_WAITOK;
                   6408:                        goto sretry;
1.187     pk       6409:                }
1.55      pk       6410:
1.73      pk       6411:                rp->rg_seg_ptps = ptd;
                   6412:                for (i = 0; i < SRMMU_L2SIZE; i++)
1.74      pk       6413:                        setpgt4m(&ptd[i], SRMMU_TEINVALID);
1.152     pk       6414:
                   6415:                /* Replicate segment allocation in each CPU's region table */
1.344     mrg      6416: #if defined(MULTIPROCESSOR)
1.302     briggs   6417:                for (i = 0; i < sparc_ncpus; i++)
1.152     pk       6418: #else
                   6419:                i = 0;
                   6420: #endif
                   6421:                {
1.316     mrg      6422: #if defined(MULTIPROCESSOR)
1.327     mrg      6423:                        if ((cpus[i]->flags & CPUFLG_HATCHED) == 0)
1.316     mrg      6424:                                continue;
                   6425: #endif
1.152     pk       6426:                        setpgt4m(&pm->pm_reg_ptps[i][vr],
1.311     christos 6427:                                 (VA2PA((void *)ptd) >> SRMMU_PPNPASHIFT) |
1.152     pk       6428:                                        SRMMU_TEPTD);
                   6429:                }
1.55      pk       6430:        }
                   6431:
                   6432:        sp = &rp->rg_segmap[vs];
                   6433:
1.310     thorpej  6434:        owired = false;
1.55      pk       6435:        if ((pte = sp->sg_pte) == NULL) {
                   6436:                /* definitely a new mapping */
1.100     pk       6437:                int i;
1.187     pk       6438:                int mflag = PR_NOWAIT;
1.55      pk       6439:
1.187     pk       6440:                pte = pool_get(&L23_pool, mflag);
                   6441:                if (pte == NULL) {
                   6442:                        if ((flags & PMAP_CANFAIL) != 0) {
                   6443:                                error = ENOMEM;
                   6444:                                goto out;
                   6445:                        }
1.242     pk       6446:                        panic("pmap_enter: cannot allocate PTE table");
1.55      pk       6447:                }
                   6448:
                   6449:                sp->sg_pte = pte;
                   6450:                sp->sg_npte = 1;
                   6451:                rp->rg_nsegmap++;
1.74      pk       6452:                for (i = 0; i < SRMMU_L3SIZE; i++)
                   6453:                        setpgt4m(&pte[i], SRMMU_TEINVALID);
1.72      pk       6454:                setpgt4m(&rp->rg_seg_ptps[vs],
1.311     christos 6455:                        (VA2PA((void *)pte) >> SRMMU_PPNPASHIFT) | SRMMU_TEPTD);
1.55      pk       6456:        } else {
1.248     pk       6457: #ifdef DIAGNOSTIC
                   6458:                if (sp->sg_npte <= 0)
                   6459:                        panic("pm %p: npte %d", pm, sp->sg_npte);
                   6460: #endif
1.72      pk       6461:                /*
                   6462:                 * Might be a change: fetch old pte
                   6463:                 */
                   6464:                tpte = pte[VA_SUN4M_VPG(va)];
1.55      pk       6465:
                   6466:                if ((tpte & SRMMU_TETYPE) == SRMMU_TEPTE) {
1.1       deraadt  6467:
1.34      pk       6468:                        /* old mapping exists, and is of the same pa type */
1.55      pk       6469:                        if ((tpte & SRMMU_PPNMASK) ==
                   6470:                            (pteproto & SRMMU_PPNMASK)) {
1.1       deraadt  6471:                                /* just changing prot and/or wiring */
                   6472:                                /* caller should call this directly: */
1.248     pk       6473:                                pmap_changeprot4m(pm, va, prot, flags);
1.341     mrg      6474:                                error = 0;
                   6475:                                goto out;
1.1       deraadt  6476:                        }
                   6477:                        /*
                   6478:                         * Switcheroo: changing pa for this va.
                   6479:                         * If old pa was managed, remove from pvlist.
                   6480:                         * If old page was cached, flush cache.
                   6481:                         */
1.60      pk       6482: #ifdef DEBUG
1.187     pk       6483:                        if (pmapdebug & PDB_SWITCHMAP)
                   6484:                                printf("%s[%d]: pmap_enu: changing existing "
                   6485:                                        "va 0x%x: pte 0x%x=>0x%x\n",
                   6486:                                        curproc->p_comm, curproc->p_pid,
                   6487:                                        (int)va, tpte, pteproto);
1.60      pk       6488: #endif
1.55      pk       6489:                        if ((tpte & SRMMU_PGTYPE) == PG_SUN4M_OBMEM) {
1.236     pk       6490:                                struct vm_page *opg;
                   6491:                                if ((opg = pvhead4m(tpte)) != NULL) {
                   6492:                                        VM_MDPAGE_PVHEAD(opg)->pv_flags |=
                   6493:                                                        MR4M(tpte);
                   6494:                                        pv_unlink4m(opg, pm, va);
1.100     pk       6495:                                }
1.72      pk       6496:                                if (pm->pm_ctx && (tpte & SRMMU_PG_C))
1.214     pk       6497:                                        cache_flush_page(va, pm->pm_ctxnum);
1.31      pk       6498:                        }
1.234     pk       6499:                        /*
                   6500:                         * We end up in this `change map' branch relatively
                   6501:                         * infrequently.
                   6502:                         * Invalidate the mapping now, so we can avoid the
                   6503:                         * de-map and update protocol when setting the new
                   6504:                         * PTE below.
                   6505:                         */
                   6506:                        setpgt4m_va(va, &sp->sg_pte[VA_SUN4M_VPG(va)],
                   6507:                                SRMMU_TEINVALID, pm->pm_ctx != NULL,
                   6508:                                pm->pm_ctxnum, PMAP_CPUSET(pm));
1.277     pk       6509:                        pm->pm_stats.resident_count--;
1.296     chs      6510:                        owired = sp->sg_wiremap & (1 << VA_SUN4M_VPG(va));
1.1       deraadt  6511:                } else {
                   6512:                        /* adding new entry */
1.43      pk       6513:                        sp->sg_npte++;
1.1       deraadt  6514:                }
                   6515:        }
1.234     pk       6516:
1.278     pk       6517:        if (pg != NULL && (error = pv_link4m(pg, pm, va, &pteproto)) != 0) {
1.242     pk       6518:                if (--sp->sg_npte == 0)
                   6519:                        /* Sigh, undo pgt allocations */
                   6520:                        pgt_lvl23_remove4m(pm, rp, sp, vr, vs);
                   6521:
                   6522:                if ((flags & PMAP_CANFAIL) != 0)
                   6523:                        goto out;
                   6524:                panic("pmap_enter: cannot allocate PV entry");
                   6525:        }
1.1       deraadt  6526:
                   6527:        /*
1.72      pk       6528:         * Update PTEs, flush TLB as necessary.
1.1       deraadt  6529:         */
1.234     pk       6530:        setpgt4m(&sp->sg_pte[VA_SUN4M_VPG(va)], pteproto);
1.277     pk       6531:        pm->pm_stats.resident_count++;
1.296     chs      6532:        if (owired) {
                   6533:                pm->pm_stats.wired_count--;
                   6534:                sp->sg_wiremap &= ~(1 << VA_SUN4M_VPG(va));
                   6535:        }
                   6536:        if (flags & PMAP_WIRED) {
                   6537:                pm->pm_stats.wired_count++;
                   6538:                sp->sg_wiremap |= (1 << VA_SUN4M_VPG(va));
                   6539:        }
1.1       deraadt  6540:
1.187     pk       6541: out:
1.322     ad       6542:        PMAP_UNLOCK();
1.1       deraadt  6543:        splx(s);
1.187     pk       6544:        return (error);
1.1       deraadt  6545: }
1.151     chs      6546:
                   6547: void
1.337     cegger   6548: pmap_kenter_pa4m(vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
1.151     chs      6549: {
1.194     chs      6550:        struct pmap *pm = pmap_kernel();
                   6551:        struct regmap *rp;
                   6552:        struct segmap *sp;
1.281     pk       6553:        int pteproto, vr, vs;
1.194     chs      6554:
                   6555:        /* Initialise pteproto with cache bit */
                   6556:        pteproto = (pa & PMAP_NC) == 0 ? SRMMU_PG_C : 0;
1.269     chs      6557:        pteproto |= SRMMU_TEPTE | PPROT_S;
1.194     chs      6558:        pteproto |= PMAP_T2PTE_SRMMU(pa);
                   6559:        pteproto |= (atop(pa & ~PMAP_TNC_SRMMU) << SRMMU_PPNSHIFT);
1.286     pk       6560:        pteproto |= pte_kprot4m(prot);
1.194     chs      6561:
                   6562:        vr = VA_VREG(va);
                   6563:        vs = VA_VSEG(va);
                   6564:        rp = &pm->pm_regmap[vr];
                   6565:        sp = &rp->rg_segmap[vs];
                   6566:
1.281     pk       6567:        KASSERT((sp->sg_pte[VA_SUN4M_VPG(va)] & SRMMU_TETYPE) != SRMMU_TEPTE);
1.194     chs      6568:
1.235     pk       6569:        setpgt4m(&sp->sg_pte[VA_SUN4M_VPG(va)], pteproto);
1.151     chs      6570: }
                   6571:
                   6572: void
1.303     uwe      6573: pmap_kremove4m(vaddr_t va, vsize_t len)
1.151     chs      6574: {
1.194     chs      6575:        struct pmap *pm = pmap_kernel();
                   6576:        struct regmap *rp;
                   6577:        struct segmap *sp;
                   6578:        vaddr_t endva, nva;
1.217     pk       6579:        int vr, vs;
1.295     pk       6580:        int tpte, perpage, npg, s;
                   6581:
                   6582:        /*
                   6583:         * The kernel pmap doesn't need to be locked, but the demap lock
                   6584:         * in updatepte() requires interrupt protection.
                   6585:         */
1.341     mrg      6586:        kpreempt_disable();
1.295     pk       6587:        s = splvm();
1.194     chs      6588:
                   6589:        endva = va + len;
                   6590:        for (; va < endva; va = nva) {
                   6591:                /* do one virtual segment at a time */
                   6592:                vr = VA_VREG(va);
                   6593:                vs = VA_VSEG(va);
                   6594:                nva = VSTOVA(vr, vs + 1);
                   6595:                if (nva == 0 || nva > endva) {
                   6596:                        nva = endva;
                   6597:                }
                   6598:
                   6599:                rp = &pm->pm_regmap[vr];
                   6600:                sp = &rp->rg_segmap[vs];
                   6601:
1.281     pk       6602:                /* decide how to flush the cache */
1.194     chs      6603:                npg = (nva - va) >> PGSHIFT;
1.257     pk       6604:                if (npg > PMAP_SFL_THRESHOLD) {
1.194     chs      6605:                        /* flush the whole segment */
                   6606:                        perpage = 0;
                   6607:                        if (CACHEINFO.c_vactype != VAC_NONE) {
1.214     pk       6608:                                cache_flush_segment(vr, vs, 0);
1.194     chs      6609:                        }
                   6610:                } else {
                   6611:                        /*
                   6612:                         * flush each page individually;
                   6613:                         * some never need flushing
                   6614:                         */
                   6615:                        perpage = (CACHEINFO.c_vactype != VAC_NONE);
                   6616:                }
                   6617:                for (; va < nva; va += NBPG) {
                   6618:                        tpte = sp->sg_pte[VA_SUN4M_VPG(va)];
1.281     pk       6619:                        if ((tpte & SRMMU_TETYPE) != SRMMU_TEPTE)
1.194     chs      6620:                                continue;
1.281     pk       6621:
1.194     chs      6622:                        if ((tpte & SRMMU_PGTYPE) == PG_SUN4M_OBMEM) {
                   6623:                                /* if cacheable, flush page as needed */
                   6624:                                if (perpage && (tpte & SRMMU_PG_C))
1.214     pk       6625:                                        cache_flush_page(va, 0);
1.194     chs      6626:                        }
1.196     mrg      6627:                        setpgt4m_va(va, &sp->sg_pte[VA_SUN4M_VPG(va)],
1.226     mrg      6628:                                 SRMMU_TEINVALID, 1, 0, CPUSET_ALL);
1.194     chs      6629:                }
1.151     chs      6630:        }
1.295     pk       6631:        splx(s);
1.341     mrg      6632:        kpreempt_enable();
1.151     chs      6633: }
                   6634:
1.248     pk       6635: /*
                   6636:  * Change protection on a range of kernel addresses.
                   6637:  */
                   6638: void
                   6639: pmap_kprotect4m(vaddr_t va, vsize_t size, vm_prot_t prot)
                   6640: {
                   6641:        struct pmap *pm = pmap_kernel();
1.295     pk       6642:        int pte, newprot, s;
1.248     pk       6643:        struct regmap *rp;
                   6644:        struct segmap *sp;
                   6645:
1.250     pk       6646:        size = roundup(size,NBPG);
1.286     pk       6647:        newprot = pte_kprot4m(prot);
1.248     pk       6648:
1.295     pk       6649:        /*
                   6650:         * The kernel pmap doesn't need to be locked, but the demap lock
                   6651:         * in updatepte() requires interrupt protection.
                   6652:         */
1.341     mrg      6653:        kpreempt_disable();
1.295     pk       6654:        s = splvm();
                   6655:
1.248     pk       6656:        while (size > 0) {
                   6657:                rp = &pm->pm_regmap[VA_VREG(va)];
                   6658:                sp = &rp->rg_segmap[VA_VSEG(va)];
                   6659:                pte = sp->sg_pte[VA_SUN4M_VPG(va)];
                   6660:
                   6661:                /*
                   6662:                 * Flush cache if page has been referenced to
                   6663:                 * avoid stale protection bits in the cache tags.
                   6664:                 */
                   6665:                if ((pte & (SRMMU_PG_C|SRMMU_PGTYPE)) ==
                   6666:                    (SRMMU_PG_C|PG_SUN4M_OBMEM))
                   6667:                        cache_flush_page(va, 0);
                   6668:
                   6669:                setpgt4m_va(va, &sp->sg_pte[VA_SUN4M_VPG(va)],
                   6670:                         (pte & ~SRMMU_PROT_MASK) | newprot,
                   6671:                         1, pm->pm_ctxnum, PMAP_CPUSET(pm));
                   6672:
                   6673:                va += NBPG;
1.250     pk       6674:                size -= NBPG;
1.248     pk       6675:        }
1.295     pk       6676:        splx(s);
1.341     mrg      6677:        kpreempt_enable();
1.248     pk       6678: }
1.210     thorpej  6679: #endif /* SUN4M || SUN4D */
1.1       deraadt  6680:
                   6681: /*
1.147     thorpej  6682:  * Clear the wiring attribute for a map/virtual-address pair.
1.1       deraadt  6683:  */
                   6684: /* ARGSUSED */
                   6685: void
1.303     uwe      6686: pmap_unwire(struct pmap *pm, vaddr_t va)
1.1       deraadt  6687: {
1.296     chs      6688:        int vr, vs, *ptep;
1.267     pk       6689:        struct regmap *rp;
                   6690:        struct segmap *sp;
1.308     thorpej  6691:        bool owired;
1.268     pk       6692:
1.341     mrg      6693:        kpreempt_disable();
1.267     pk       6694:        vr = VA_VREG(va);
                   6695:        vs = VA_VSEG(va);
                   6696:        rp = &pm->pm_regmap[vr];
                   6697:        sp = &rp->rg_segmap[vs];
1.268     pk       6698:
1.310     thorpej  6699:        owired = false;
1.296     chs      6700:        if (CPU_HAS_SUNMMU) {
                   6701:                ptep = &sp->sg_pte[VA_VPG(va)];
                   6702:                owired = *ptep & PG_WIRED;
                   6703:                *ptep &= ~PG_WIRED;
                   6704:        }
                   6705:        if (CPU_HAS_SRMMU) {
                   6706:                owired = sp->sg_wiremap & (1 << VA_SUN4M_VPG(va));
                   6707:                sp->sg_wiremap &= ~(1 << VA_SUN4M_VPG(va));
                   6708:        }
                   6709:        if (!owired) {
1.267     pk       6710:                pmap_stats.ps_useless_changewire++;
1.346     martin   6711:                kpreempt_enable();
1.267     pk       6712:                return;
                   6713:        }
1.1       deraadt  6714:
1.296     chs      6715:        pm->pm_stats.wired_count--;
1.297     chs      6716: #if defined(SUN4) || defined(SUN4C)
1.296     chs      6717:        if (CPU_HAS_SUNMMU && --sp->sg_nwired <= 0) {
1.267     pk       6718: #ifdef DIAGNOSTIC
                   6719:                if (sp->sg_nwired > sp->sg_npte || sp->sg_nwired < 0)
                   6720:                        panic("pmap_unwire: pm %p, va %lx: nleft=%d, nwired=%d",
                   6721:                                pm, va, sp->sg_npte, sp->sg_nwired);
                   6722: #endif
                   6723:                if (sp->sg_pmeg != seginval)
                   6724:                        mmu_pmeg_unlock(sp->sg_pmeg);
                   6725:        }
1.297     chs      6726: #endif /* SUN4 || SUN4C */
1.341     mrg      6727:        kpreempt_enable();
1.1       deraadt  6728: }
                   6729:
                   6730: /*
                   6731:  * Extract the physical page address associated
                   6732:  * with the given map/virtual_address pair.
                   6733:  * GRR, the vm code knows; we should not have to do this!
                   6734:  */
1.55      pk       6735:
                   6736: #if defined(SUN4) || defined(SUN4C)
1.308     thorpej  6737: bool
1.303     uwe      6738: pmap_extract4_4c(struct pmap *pm, vaddr_t va, paddr_t *pap)
1.1       deraadt  6739: {
1.124     pk       6740:        int vr, vs;
1.43      pk       6741:        struct regmap *rp;
                   6742:        struct segmap *sp;
1.265     pk       6743:        int pte, *ptep;
1.1       deraadt  6744:
1.43      pk       6745:        vr = VA_VREG(va);
                   6746:        vs = VA_VSEG(va);
                   6747:        rp = &pm->pm_regmap[vr];
                   6748:        if (rp->rg_segmap == NULL) {
1.90      pk       6749: #ifdef DEBUG
                   6750:                if (pmapdebug & PDB_FOLLOW)
                   6751:                        printf("pmap_extract: invalid segment (%d)\n", vr);
                   6752: #endif
1.310     thorpej  6753:                return (false);
1.43      pk       6754:        }
                   6755:        sp = &rp->rg_segmap[vs];
1.265     pk       6756:        ptep = sp->sg_pte;
                   6757:        if (ptep == NULL) {
1.90      pk       6758: #ifdef DEBUG
1.265     pk       6759:                if (pmapdebug & PDB_FOLLOW)
                   6760:                        printf("pmap_extract: invalid segment\n");
1.90      pk       6761: #endif
1.310     thorpej  6762:                return (false);
1.1       deraadt  6763:        }
1.265     pk       6764:        pte = ptep[VA_VPG(va)];
                   6765:
                   6766:        if ((pte & PG_V) == 0) {
1.90      pk       6767: #ifdef DEBUG
                   6768:                if (pmapdebug & PDB_FOLLOW)
                   6769:                        printf("pmap_extract: invalid pte\n");
                   6770: #endif
1.310     thorpej  6771:                return (false);
1.1       deraadt  6772:        }
1.265     pk       6773:        pte &= PG_PFNUM;
1.149     thorpej  6774:        if (pap != NULL)
1.265     pk       6775:                *pap = (pte << PGSHIFT) | (va & PGOFSET);
1.310     thorpej  6776:        return (true);
1.1       deraadt  6777: }
1.236     pk       6778: #endif /* SUN4 || SUN4C */
1.55      pk       6779:
1.210     thorpej  6780: #if defined(SUN4M) || defined(SUN4D)   /* SRMMU version of pmap_extract */
1.55      pk       6781: /*
                   6782:  * Extract the physical page address associated
                   6783:  * with the given map/virtual_address pair.
                   6784:  * GRR, the vm code knows; we should not have to do this!
                   6785:  */
1.308     thorpej  6786: bool
1.303     uwe      6787: pmap_extract4m(struct pmap *pm, vaddr_t va, paddr_t *pap)
1.55      pk       6788: {
1.265     pk       6789:        struct regmap *rp;
                   6790:        struct segmap *sp;
1.90      pk       6791:        int pte;
1.310     thorpej  6792:        int vr, vs, s, v = false;
1.340     martin   6793:        bool can_lock = lock_available;
1.265     pk       6794:
                   6795:        vr = VA_VREG(va);
                   6796:        vs = VA_VSEG(va);
1.55      pk       6797:
1.295     pk       6798:        /*
                   6799:         * The kernel pmap doesn't need to be locked, but the demap lock
                   6800:         * requires interrupt protection.
                   6801:         */
1.242     pk       6802:        s = splvm();
1.322     ad       6803:        if (pm != pmap_kernel()) {
                   6804:                PMAP_LOCK();
                   6805:        }
1.295     pk       6806:
1.265     pk       6807:        rp = &pm->pm_regmap[vr];
                   6808:        if (rp->rg_segmap == NULL) {
1.90      pk       6809: #ifdef DEBUG
                   6810:                if (pmapdebug & PDB_FOLLOW)
1.145     pk       6811:                        printf("pmap_extract: no segmap\n");
1.90      pk       6812: #endif
1.242     pk       6813:                goto out;
1.90      pk       6814:        }
1.113     pk       6815:
1.265     pk       6816:        sp = &rp->rg_segmap[vs];
                   6817:        if (sp->sg_pte == NULL) {
1.113     pk       6818: #ifdef DEBUG
                   6819:                if (pmapdebug & PDB_FOLLOW)
1.145     pk       6820:                        printf("pmap_extract: no ptes\n");
1.113     pk       6821: #endif
1.242     pk       6822:                goto out;
1.113     pk       6823:        }
                   6824:
1.265     pk       6825:        pte = sp->sg_pte[VA_SUN4M_VPG(va)];
1.90      pk       6826:        if ((pte & SRMMU_TETYPE) != SRMMU_TEPTE) {
1.72      pk       6827: #ifdef DEBUG
1.90      pk       6828:                if (pmapdebug & PDB_FOLLOW)
                   6829:                        printf("pmap_extract: invalid pte of type %d\n",
                   6830:                               pte & SRMMU_TETYPE);
                   6831: #endif
1.290     pk       6832:                /*
                   6833:                 * We can read a spurious invalid pte if the system is in
                   6834:                 * the middle of the PTE update protocol. So, acquire the
                   6835:                 * demap lock and retry.
                   6836:                 */
1.340     martin   6837:                if (__predict_true(can_lock))
                   6838:                        mutex_spin_enter(&demap_lock);
1.290     pk       6839:                pte = sp->sg_pte[VA_SUN4M_VPG(va)];
1.340     martin   6840:                if (__predict_true(can_lock))
                   6841:                        mutex_spin_exit(&demap_lock);
1.290     pk       6842:                if ((pte & SRMMU_TETYPE) != SRMMU_TEPTE)
                   6843:                        goto out;
1.72      pk       6844:        }
1.241     pk       6845: #ifdef DIAGNOSTIC
1.281     pk       6846:        if (pm != pmap_kernel() && sp->sg_npte <= 0)
1.266     hannken  6847:                panic("pmap_extract: pm %p: npte = %d\n", pm, sp->sg_npte);
1.241     pk       6848: #endif
1.55      pk       6849:
1.149     thorpej  6850:        if (pap != NULL)
                   6851:                *pap = ptoa((pte & SRMMU_PPNMASK) >> SRMMU_PPNSHIFT) |
                   6852:                    VA_OFF(va);
1.242     pk       6853:
1.310     thorpej  6854:        v = true;
1.242     pk       6855: out:
1.322     ad       6856:        if (pm != pmap_kernel()) {
                   6857:                PMAP_UNLOCK();
                   6858:        }
1.242     pk       6859:        splx(s);
                   6860:        return (v);
1.55      pk       6861: }
                   6862: #endif /* sun4m */
1.1       deraadt  6863:
1.303     uwe      6864: int pmap_copy_disabled=0;
                   6865:
1.1       deraadt  6866: /*
                   6867:  * Copy the range specified by src_addr/len
                   6868:  * from the source map to the range dst_addr/len
                   6869:  * in the destination map.
                   6870:  *
                   6871:  * This routine is only advisory and need not do anything.
                   6872:  */
                   6873: /* ARGSUSED */
                   6874: void
1.303     uwe      6875: pmap_copy(struct pmap *dst_pmap, struct pmap *src_pmap,
                   6876:          vaddr_t dst_addr, vsize_t len, vaddr_t src_addr)
1.1       deraadt  6877: {
1.94      pk       6878: #if notyet
1.267     pk       6879:        struct regmap *rp;
                   6880:        struct segmap *sp;
1.92      pk       6881:
1.94      pk       6882:        if (pmap_copy_disabled)
                   6883:                return;
1.92      pk       6884: #ifdef DIAGNOSTIC
                   6885:        if (VA_OFF(src_addr) != 0)
                   6886:                printf("pmap_copy: addr not page aligned: 0x%lx\n", src_addr);
                   6887:        if ((len & (NBPG-1)) != 0)
                   6888:                printf("pmap_copy: length not page aligned: 0x%lx\n", len);
                   6889: #endif
                   6890:
                   6891:        if (src_pmap == NULL)
                   6892:                return;
                   6893:
1.210     thorpej  6894:        if (CPU_HAS_SRMMU) {
1.92      pk       6895:                int i, npg, pte;
1.124     pk       6896:                paddr_t pa;
1.92      pk       6897:
                   6898:                npg = len >> PGSHIFT;
                   6899:                for (i = 0; i < npg; i++) {
1.267     pk       6900:                        if ((rp = src_pmap->pm_regmap) == NULL)
1.115     pk       6901:                                continue;
1.267     pk       6902:                        rp += VA_VREG(src_addr);
1.115     pk       6903:
1.267     pk       6904:                        if ((sp = rp->rg_segmap) == NULL)
1.92      pk       6905:                                continue;
1.267     pk       6906:                        sp += VA_VSEG(src_addr);
                   6907:                        if (sp->sg_npte == 0)
1.92      pk       6908:                                continue;
1.115     pk       6909:
1.267     pk       6910:                        pte = sp->sg_pte[VA_SUN4M_VPG(src_addr)];
1.92      pk       6911:                        if ((pte & SRMMU_TETYPE) != SRMMU_TEPTE)
                   6912:                                continue;
                   6913:
                   6914:                        pa = ptoa((pte & SRMMU_PPNMASK) >> SRMMU_PPNSHIFT);
1.55      pk       6915:                        pmap_enter(dst_pmap, dst_addr,
1.92      pk       6916:                                   pa,
1.284     pk       6917:                                   /* XXX - need to copy VM_PROT_EXEC too */
1.60      pk       6918:                                   (pte & PPROT_WRITE)
1.92      pk       6919:                                        ? (VM_PROT_WRITE | VM_PROT_READ)
1.60      pk       6920:                                        : VM_PROT_READ,
1.153     thorpej  6921:                                   0);
1.55      pk       6922:                        src_addr += NBPG;
                   6923:                        dst_addr += NBPG;
                   6924:                }
1.198     chris    6925:                pmap_update(dst_pmap);
1.55      pk       6926:        }
                   6927: #endif
1.1       deraadt  6928: }
                   6929:
1.55      pk       6930: #if defined(SUN4) || defined(SUN4C)
1.1       deraadt  6931: /*
                   6932:  * Clear the modify bit for the given physical page.
                   6933:  */
1.308     thorpej  6934: bool
1.303     uwe      6935: pmap_clear_modify4_4c(struct vm_page *pg)
1.1       deraadt  6936: {
1.308     thorpej  6937:        bool rv;
1.1       deraadt  6938:
1.236     pk       6939:        (void) pv_syncflags4_4c(pg);
                   6940:        rv = VM_MDPAGE_PVHEAD(pg)->pv_flags & PV_MOD;
                   6941:        VM_MDPAGE_PVHEAD(pg)->pv_flags &= ~PV_MOD;
1.305     uwe      6942:        return (rv);
1.1       deraadt  6943: }
                   6944:
                   6945: /*
                   6946:  * Tell whether the given physical page has been modified.
                   6947:  */
1.308     thorpej  6948: bool
1.303     uwe      6949: pmap_is_modified4_4c(struct vm_page *pg)
1.1       deraadt  6950: {
                   6951:
1.236     pk       6952:        return (VM_MDPAGE_PVHEAD(pg)->pv_flags & PV_MOD ||
                   6953:                pv_syncflags4_4c(pg) & PV_MOD);
1.1       deraadt  6954: }
                   6955:
                   6956: /*
                   6957:  * Clear the reference bit for the given physical page.
                   6958:  */
1.308     thorpej  6959: bool
1.303     uwe      6960: pmap_clear_reference4_4c(struct vm_page *pg)
1.1       deraadt  6961: {
1.308     thorpej  6962:        bool rv;
1.1       deraadt  6963:
1.236     pk       6964:        (void) pv_syncflags4_4c(pg);
                   6965:        rv = VM_MDPAGE_PVHEAD(pg)->pv_flags & PV_REF;
                   6966:        VM_MDPAGE_PVHEAD(pg)->pv_flags &= ~PV_REF;
1.305     uwe      6967:        return (rv);
1.1       deraadt  6968: }
                   6969:
                   6970: /*
                   6971:  * Tell whether the given physical page has been referenced.
                   6972:  */
1.308     thorpej  6973: bool
1.303     uwe      6974: pmap_is_referenced4_4c(struct vm_page *pg)
1.1       deraadt  6975: {
1.181     pk       6976:
1.236     pk       6977:        return (VM_MDPAGE_PVHEAD(pg)->pv_flags & PV_REF ||
                   6978:                pv_syncflags4_4c(pg) & PV_REF);
1.1       deraadt  6979: }
1.236     pk       6980: #endif /* SUN4 || SUN4C */
1.55      pk       6981:
1.210     thorpej  6982: #if defined(SUN4M) || defined(SUN4D)
1.58      pk       6983:
                   6984: /*
1.210     thorpej  6985:  * SRMMU versions of bit test/set routines
1.58      pk       6986:  *
                   6987:  * Note that the 4m-specific routines should eventually service these
                   6988:  * requests from their page tables, and the whole pvlist bit mess should
                   6989:  * be dropped for the 4m (unless this causes a performance hit from
                   6990:  * tracing down pagetables/regmap/segmaps).
                   6991:  */
                   6992:
1.55      pk       6993: /*
                   6994:  * Clear the modify bit for the given physical page.
                   6995:  */
1.308     thorpej  6996: bool
1.303     uwe      6997: pmap_clear_modify4m(struct vm_page *pg)
1.55      pk       6998: {
1.308     thorpej  6999:        bool rv;
1.55      pk       7000:
1.236     pk       7001:        (void) pv_syncflags4m(pg);
                   7002:        rv = VM_MDPAGE_PVHEAD(pg)->pv_flags & PV_MOD4M;
                   7003:        VM_MDPAGE_PVHEAD(pg)->pv_flags &= ~PV_MOD4M;
1.181     pk       7004:        return (rv);
1.55      pk       7005: }
                   7006:
                   7007: /*
                   7008:  * Tell whether the given physical page has been modified.
                   7009:  */
1.308     thorpej  7010: bool
1.303     uwe      7011: pmap_is_modified4m(struct vm_page *pg)
1.55      pk       7012: {
1.181     pk       7013:
1.236     pk       7014:        return (VM_MDPAGE_PVHEAD(pg)->pv_flags & PV_MOD4M ||
                   7015:                pv_syncflags4m(pg) & PV_MOD4M);
1.55      pk       7016: }
                   7017:
                   7018: /*
                   7019:  * Clear the reference bit for the given physical page.
                   7020:  */
1.308     thorpej  7021: bool
1.303     uwe      7022: pmap_clear_reference4m(struct vm_page *pg)
1.55      pk       7023: {
1.308     thorpej  7024:        bool rv;
1.55      pk       7025:
1.236     pk       7026:        (void) pv_syncflags4m(pg);
                   7027:        rv = VM_MDPAGE_PVHEAD(pg)->pv_flags & PV_REF4M;
                   7028:        VM_MDPAGE_PVHEAD(pg)->pv_flags &= ~PV_REF4M;
1.181     pk       7029:        return (rv);
1.55      pk       7030: }
                   7031:
                   7032: /*
                   7033:  * Tell whether the given physical page has been referenced.
                   7034:  */
1.309     matt     7035: bool
1.303     uwe      7036: pmap_is_referenced4m(struct vm_page *pg)
1.55      pk       7037: {
1.181     pk       7038:
1.236     pk       7039:        return (VM_MDPAGE_PVHEAD(pg)->pv_flags & PV_REF4M ||
                   7040:                pv_syncflags4m(pg) & PV_REF4M);
1.55      pk       7041: }
1.236     pk       7042: #endif /* SUN4M || SUN4D */
1.2       deraadt  7043:
                   7044: /*
1.1       deraadt  7045:  * Fill the given MI physical page with zero bytes.
                   7046:  *
                   7047:  * We avoid stomping on the cache.
                   7048:  * XXX might be faster to use destination's context and allow cache to fill?
                   7049:  */
1.55      pk       7050:
                   7051: #if defined(SUN4) || defined(SUN4C)
                   7052:
1.1       deraadt  7053: void
1.303     uwe      7054: pmap_zero_page4_4c(paddr_t pa)
1.1       deraadt  7055: {
1.236     pk       7056:        struct vm_page *pg;
1.311     christos 7057:        void *va;
1.124     pk       7058:        int pte;
1.1       deraadt  7059:
1.236     pk       7060:        if ((pg = PHYS_TO_VM_PAGE(pa)) != NULL) {
1.1       deraadt  7061:                /*
                   7062:                 * The following might not be necessary since the page
                   7063:                 * is being cleared because it is about to be allocated,
                   7064:                 * i.e., is in use by no one.
                   7065:                 */
1.236     pk       7066:                pv_flushcache4_4c(pg);
1.60      pk       7067:        }
1.236     pk       7068:        pte = PG_V | PG_S | PG_W | PG_NC | (atop(pa) & PG_PFNUM);
1.1       deraadt  7069:
1.228     pk       7070:        va = cpuinfo.vpage[0];
1.55      pk       7071:        setpte4(va, pte);
1.1       deraadt  7072:        qzero(va, NBPG);
1.55      pk       7073:        setpte4(va, 0);
1.1       deraadt  7074: }
                   7075:
                   7076: /*
                   7077:  * Copy the given MI physical source page to its destination.
                   7078:  *
                   7079:  * We avoid stomping on the cache as above (with same `XXX' note).
                   7080:  * We must first flush any write-back cache for the source page.
                   7081:  * We go ahead and stomp on the kernel's virtual cache for the
                   7082:  * source page, since the cache can read memory MUCH faster than
                   7083:  * the processor.
                   7084:  */
                   7085: void
1.303     uwe      7086: pmap_copy_page4_4c(paddr_t src, paddr_t dst)
1.1       deraadt  7087: {
1.236     pk       7088:        struct vm_page *pg;
1.313     mrg      7089:        char *sva, *dva;
1.124     pk       7090:        int spte, dpte;
1.1       deraadt  7091:
1.236     pk       7092:        if ((pg = PHYS_TO_VM_PAGE(src)) != NULL) {
1.69      pk       7093:                if (CACHEINFO.c_vactype == VAC_WRITEBACK)
1.236     pk       7094:                        pv_flushcache4_4c(pg);
1.60      pk       7095:        }
1.236     pk       7096:        spte = PG_V | PG_S | (atop(src) & PG_PFNUM);
1.1       deraadt  7097:
1.236     pk       7098:        if ((pg = PHYS_TO_VM_PAGE(dst)) != NULL) {
1.1       deraadt  7099:                /* similar `might not be necessary' comment applies */
1.69      pk       7100:                if (CACHEINFO.c_vactype != VAC_NONE)
1.236     pk       7101:                        pv_flushcache4_4c(pg);
1.60      pk       7102:        }
1.236     pk       7103:        dpte = PG_V | PG_S | PG_W | PG_NC | (atop(dst) & PG_PFNUM);
1.1       deraadt  7104:
1.228     pk       7105:        sva = cpuinfo.vpage[0];
                   7106:        dva = cpuinfo.vpage[1];
1.55      pk       7107:        setpte4(sva, spte);
                   7108:        setpte4(dva, dpte);
1.1       deraadt  7109:        qcopy(sva, dva, NBPG);  /* loads cache, so we must ... */
1.214     pk       7110:        cache_flush_page((vaddr_t)sva, getcontext4());
1.55      pk       7111:        setpte4(sva, 0);
                   7112:        setpte4(dva, 0);
                   7113: }
1.236     pk       7114: #endif /* SUN4 || SUN4C */
1.55      pk       7115:
1.210     thorpej  7116: #if defined(SUN4M) || defined(SUN4D)   /* SRMMU version of copy/zero routines */
1.55      pk       7117: /*
                   7118:  * Fill the given MI physical page with zero bytes.
                   7119:  *
                   7120:  * We avoid stomping on the cache.
                   7121:  * XXX might be faster to use destination's context and allow cache to fill?
                   7122:  */
                   7123: void
1.303     uwe      7124: pmap_zero_page4m(paddr_t pa)
1.55      pk       7125: {
1.236     pk       7126:        struct vm_page *pg;
1.311     christos 7127:        void *va;
1.124     pk       7128:        int pte;
1.55      pk       7129:
1.341     mrg      7130:        kpreempt_disable();
1.236     pk       7131:        if ((pg = PHYS_TO_VM_PAGE(pa)) != NULL) {
1.55      pk       7132:                /*
1.167     pk       7133:                 * The following VAC flush might not be necessary since the
                   7134:                 * page is being cleared because it is about to be allocated,
1.55      pk       7135:                 * i.e., is in use by no one.
1.167     pk       7136:                 * In the case of a physical cache, a flush (or just an
                   7137:                 * invalidate, if possible) is usually necessary when using
                   7138:                 * uncached access to clear it.
1.55      pk       7139:                 */
1.69      pk       7140:                if (CACHEINFO.c_vactype != VAC_NONE)
1.236     pk       7141:                        pv_flushcache4m(pg);
1.167     pk       7142:                else
                   7143:                        pcache_flush_page(pa, 1);
1.60      pk       7144:        }
1.236     pk       7145:        pte = SRMMU_TEPTE | PPROT_N_RWX | (pa >> SRMMU_PPNPASHIFT);
1.69      pk       7146:        if (cpuinfo.flags & CPUFLG_CACHE_MANDATORY)
1.68      abrown   7147:                pte |= SRMMU_PG_C;
                   7148:
1.228     pk       7149:        va = cpuinfo.vpage[0];
                   7150:        setpgt4m(cpuinfo.vpage_pte[0], pte);
1.55      pk       7151:        qzero(va, NBPG);
1.228     pk       7152:        /*
                   7153:         * Remove temporary mapping (which is kernel-only, so the
                   7154:         * context used for TLB flushing does not matter)
                   7155:         */
                   7156:        sp_tlb_flush((int)va, 0, ASI_SRMMUFP_L3);
                   7157:        setpgt4m(cpuinfo.vpage_pte[0], SRMMU_TEINVALID);
1.341     mrg      7158:        kpreempt_enable();
1.55      pk       7159: }
                   7160:
1.159     pk       7161: /*
                   7162:  * Viking/MXCC specific version of pmap_zero_page
                   7163:  */
1.158     pk       7164: void
1.303     uwe      7165: pmap_zero_page_viking_mxcc(paddr_t pa)
1.158     pk       7166: {
                   7167:        u_int offset;
                   7168:        u_int stream_data_addr = MXCC_STREAM_DATA;
1.305     uwe      7169:        uint64_t v = (uint64_t)pa;
1.158     pk       7170:
1.341     mrg      7171:        kpreempt_disable();
1.158     pk       7172:        /* Load MXCC stream data register with 0 (bottom 32 bytes only) */
                   7173:        stda(stream_data_addr+0, ASI_CONTROL, 0);
                   7174:        stda(stream_data_addr+8, ASI_CONTROL, 0);
                   7175:        stda(stream_data_addr+16, ASI_CONTROL, 0);
                   7176:        stda(stream_data_addr+24, ASI_CONTROL, 0);
                   7177:
                   7178:        /* Then write the stream data register to each block in the page */
                   7179:        v |= MXCC_STREAM_C;
                   7180:        for (offset = 0; offset < NBPG; offset += MXCC_STREAM_BLKSZ) {
                   7181:                stda(MXCC_STREAM_DST, ASI_CONTROL, v | offset);
                   7182:        }
1.341     mrg      7183:        kpreempt_enable();
1.158     pk       7184: }
                   7185:
1.55      pk       7186: /*
1.159     pk       7187:  * HyperSPARC/RT625 specific version of pmap_zero_page
                   7188:  */
                   7189: void
1.303     uwe      7190: pmap_zero_page_hypersparc(paddr_t pa)
1.159     pk       7191: {
1.236     pk       7192:        struct vm_page *pg;
1.311     christos 7193:        void *va;
1.159     pk       7194:        int pte;
                   7195:        int offset;
                   7196:
1.341     mrg      7197:        kpreempt_disable();
1.159     pk       7198:        /*
                   7199:         * We still have to map the page, since ASI_BLOCKFILL
                   7200:         * takes virtual addresses. This also means we have to
                   7201:         * consider cache aliasing; therefore we still need
                   7202:         * to flush the cache here. All we gain is the speed-up
                   7203:         * in zero-fill loop itself..
                   7204:         */
1.236     pk       7205:        if ((pg = PHYS_TO_VM_PAGE(pa)) != NULL) {
1.159     pk       7206:                /*
                   7207:                 * The following might not be necessary since the page
                   7208:                 * is being cleared because it is about to be allocated,
                   7209:                 * i.e., is in use by no one.
                   7210:                 */
                   7211:                if (CACHEINFO.c_vactype != VAC_NONE)
1.236     pk       7212:                        pv_flushcache4m(pg);
1.159     pk       7213:        }
1.236     pk       7214:        pte = SRMMU_TEPTE | SRMMU_PG_C | PPROT_N_RWX | (pa >> SRMMU_PPNPASHIFT);
1.159     pk       7215:
1.228     pk       7216:        va = cpuinfo.vpage[0];
                   7217:        setpgt4m(cpuinfo.vpage_pte[0], pte);
1.159     pk       7218:        for (offset = 0; offset < NBPG; offset += 32) {
1.312     macallan 7219:                sta((char *)va + offset, ASI_BLOCKFILL, 0);
1.159     pk       7220:        }
                   7221:        /* Remove temporary mapping */
1.228     pk       7222:        sp_tlb_flush((int)va, 0, ASI_SRMMUFP_L3);
                   7223:        setpgt4m(cpuinfo.vpage_pte[0], SRMMU_TEINVALID);
1.341     mrg      7224:        kpreempt_enable();
1.159     pk       7225: }
                   7226:
                   7227: /*
1.55      pk       7228:  * Copy the given MI physical source page to its destination.
                   7229:  *
                   7230:  * We avoid stomping on the cache as above (with same `XXX' note).
                   7231:  * We must first flush any write-back cache for the source page.
                   7232:  * We go ahead and stomp on the kernel's virtual cache for the
                   7233:  * source page, since the cache can read memory MUCH faster than
                   7234:  * the processor.
                   7235:  */
                   7236: void
1.303     uwe      7237: pmap_copy_page4m(paddr_t src, paddr_t dst)
1.55      pk       7238: {
1.236     pk       7239:        struct vm_page *pg;
1.312     macallan 7240:        void *sva, *dva;
1.124     pk       7241:        int spte, dpte;
1.55      pk       7242:
1.341     mrg      7243:        kpreempt_disable();
1.236     pk       7244:        if ((pg = PHYS_TO_VM_PAGE(src)) != NULL) {
1.69      pk       7245:                if (CACHEINFO.c_vactype == VAC_WRITEBACK)
1.236     pk       7246:                        pv_flushcache4m(pg);
1.60      pk       7247:        }
1.145     pk       7248:
                   7249:        spte = SRMMU_TEPTE | SRMMU_PG_C | PPROT_N_RX |
1.236     pk       7250:                (src >> SRMMU_PPNPASHIFT);
1.55      pk       7251:
1.236     pk       7252:        if ((pg = PHYS_TO_VM_PAGE(dst)) != NULL) {
1.55      pk       7253:                /* similar `might not be necessary' comment applies */
1.69      pk       7254:                if (CACHEINFO.c_vactype != VAC_NONE)
1.236     pk       7255:                        pv_flushcache4m(pg);
1.167     pk       7256:                else
                   7257:                        pcache_flush_page(dst, 1);
1.60      pk       7258:        }
1.145     pk       7259:
1.236     pk       7260:        dpte = SRMMU_TEPTE | PPROT_N_RWX | (dst >> SRMMU_PPNPASHIFT);
1.69      pk       7261:        if (cpuinfo.flags & CPUFLG_CACHE_MANDATORY)
1.68      abrown   7262:                dpte |= SRMMU_PG_C;
1.60      pk       7263:
1.228     pk       7264:        sva = cpuinfo.vpage[0];
                   7265:        dva = cpuinfo.vpage[1];
                   7266:        setpgt4m(cpuinfo.vpage_pte[0], spte);
                   7267:        setpgt4m(cpuinfo.vpage_pte[1], dpte);
1.55      pk       7268:        qcopy(sva, dva, NBPG);  /* loads cache, so we must ... */
1.285     pk       7269:        cpuinfo.sp_vcache_flush_page((vaddr_t)sva, getcontext4m());
1.228     pk       7270:        sp_tlb_flush((int)sva, 0, ASI_SRMMUFP_L3);
                   7271:        setpgt4m(cpuinfo.vpage_pte[0], SRMMU_TEINVALID);
                   7272:        sp_tlb_flush((int)dva, 0, ASI_SRMMUFP_L3);
                   7273:        setpgt4m(cpuinfo.vpage_pte[1], SRMMU_TEINVALID);
1.341     mrg      7274:        kpreempt_enable();
1.158     pk       7275: }
                   7276:
1.159     pk       7277: /*
                   7278:  * Viking/MXCC specific version of pmap_copy_page
                   7279:  */
1.158     pk       7280: void
1.303     uwe      7281: pmap_copy_page_viking_mxcc(paddr_t src, paddr_t dst)
1.158     pk       7282: {
                   7283:        u_int offset;
1.305     uwe      7284:        uint64_t v1 = (uint64_t)src;
                   7285:        uint64_t v2 = (uint64_t)dst;
1.158     pk       7286:
1.341     mrg      7287:        kpreempt_disable();
1.158     pk       7288:        /* Enable cache-coherency */
                   7289:        v1 |= MXCC_STREAM_C;
                   7290:        v2 |= MXCC_STREAM_C;
                   7291:
                   7292:        /* Copy through stream data register */
                   7293:        for (offset = 0; offset < NBPG; offset += MXCC_STREAM_BLKSZ) {
                   7294:                stda(MXCC_STREAM_SRC, ASI_CONTROL, v1 | offset);
                   7295:                stda(MXCC_STREAM_DST, ASI_CONTROL, v2 | offset);
                   7296:        }
1.341     mrg      7297:        kpreempt_enable();
1.159     pk       7298: }
                   7299:
                   7300: /*
                   7301:  * HyperSPARC/RT625 specific version of pmap_copy_page
                   7302:  */
                   7303: void
1.303     uwe      7304: pmap_copy_page_hypersparc(paddr_t src, paddr_t dst)
1.159     pk       7305: {
1.236     pk       7306:        struct vm_page *pg;
1.312     macallan 7307:        void *sva, *dva;
1.159     pk       7308:        int spte, dpte;
                   7309:        int offset;
                   7310:
1.341     mrg      7311:        kpreempt_disable();
1.159     pk       7312:        /*
                   7313:         * We still have to map the pages, since ASI_BLOCKCOPY
                   7314:         * takes virtual addresses. This also means we have to
                   7315:         * consider cache aliasing; therefore we still need
                   7316:         * to flush the cache here. All we gain is the speed-up
                   7317:         * in copy loop itself..
                   7318:         */
                   7319:
1.236     pk       7320:        if ((pg = PHYS_TO_VM_PAGE(src)) != NULL) {
1.159     pk       7321:                if (CACHEINFO.c_vactype == VAC_WRITEBACK)
1.236     pk       7322:                        pv_flushcache4m(pg);
1.159     pk       7323:        }
                   7324:
                   7325:        spte = SRMMU_TEPTE | SRMMU_PG_C | PPROT_N_RX |
1.236     pk       7326:                (src >> SRMMU_PPNPASHIFT);
1.159     pk       7327:
1.236     pk       7328:        if ((pg = PHYS_TO_VM_PAGE(dst)) != NULL) {
1.159     pk       7329:                /* similar `might not be necessary' comment applies */
                   7330:                if (CACHEINFO.c_vactype != VAC_NONE)
1.236     pk       7331:                        pv_flushcache4m(pg);
1.159     pk       7332:        }
                   7333:
                   7334:        dpte = SRMMU_TEPTE | SRMMU_PG_C | PPROT_N_RWX |
1.236     pk       7335:                (dst >> SRMMU_PPNPASHIFT);
1.159     pk       7336:
1.228     pk       7337:        sva = cpuinfo.vpage[0];
                   7338:        dva = cpuinfo.vpage[1];
                   7339:        setpgt4m(cpuinfo.vpage_pte[0], spte);
                   7340:        setpgt4m(cpuinfo.vpage_pte[1], dpte);
1.159     pk       7341:
                   7342:        for (offset = 0; offset < NBPG; offset += 32) {
1.312     macallan 7343:                sta((char *)dva + offset, ASI_BLOCKCOPY, (char *)sva + offset);
1.159     pk       7344:        }
                   7345:
1.228     pk       7346:        sp_tlb_flush((int)sva, 0, ASI_SRMMUFP_L3);
                   7347:        setpgt4m(cpuinfo.vpage_pte[0], SRMMU_TEINVALID);
                   7348:        sp_tlb_flush((int)dva, 0, ASI_SRMMUFP_L3);
                   7349:        setpgt4m(cpuinfo.vpage_pte[1], SRMMU_TEINVALID);
1.341     mrg      7350:        kpreempt_enable();
1.1       deraadt  7351: }
1.210     thorpej  7352: #endif /* SUN4M || SUN4D */
1.1       deraadt  7353:
                   7354: /*
                   7355:  * Turn off cache for a given (va, number of pages).
                   7356:  *
                   7357:  * We just assert PG_NC for each PTE; the addresses must reside
                   7358:  * in locked kernel space.  A cache flush is also done.
                   7359:  */
1.53      christos 7360: void
1.313     mrg      7361: kvm_uncache(char *va, int npages)
1.1       deraadt  7362: {
1.236     pk       7363:        struct vm_page *pg;
1.115     pk       7364:        int pte;
1.88      pk       7365:
1.210     thorpej  7366:        if (CPU_HAS_SRMMU) {
                   7367: #if defined(SUN4M) || defined(SUN4D)
1.312     macallan 7368:                for (; --npages >= 0; va = (char *)va + NBPG) {
1.124     pk       7369:                        pte = getpte4m((vaddr_t) va);
1.55      pk       7370:                        if ((pte & SRMMU_TETYPE) != SRMMU_TEPTE)
                   7371:                                panic("kvm_uncache: table entry not pte");
1.115     pk       7372:
1.182     pk       7373:                        if ((pte & SRMMU_PGTYPE) == PG_SUN4M_OBMEM) {
1.236     pk       7374:                                if ((pg = pvhead4m(pte)) != NULL) {
                   7375:                                        pv_uncache(pg);
1.182     pk       7376:                                        return;
                   7377:                                }
1.214     pk       7378:                                cache_flush_page((vaddr_t)va, 0);
1.182     pk       7379:                        }
1.143     pk       7380:
1.116     pk       7381:                        pte &= ~SRMMU_PG_C;
1.182     pk       7382:                        setpte4m((vaddr_t)va, pte);
1.55      pk       7383:                }
                   7384: #endif
                   7385:        } else {
                   7386: #if defined(SUN4) || defined(SUN4C)
                   7387:                for (; --npages >= 0; va += NBPG) {
                   7388:                        pte = getpte4(va);
                   7389:                        if ((pte & PG_V) == 0)
                   7390:                                panic("kvm_uncache !pg_v");
1.115     pk       7391:
1.182     pk       7392:                        if ((pte & PG_TYPE) == PG_OBMEM) {
1.236     pk       7393:                                if ((pg = pvhead4_4c(pte)) != NULL) {
                   7394:                                        pv_uncache(pg);
1.182     pk       7395:                                        return;
                   7396:                                }
1.214     pk       7397:                                cache_flush_page((vaddr_t)va, 0);
1.115     pk       7398:                        }
1.116     pk       7399:                        pte |= PG_NC;
                   7400:                        setpte4(va, pte);
1.55      pk       7401:                }
                   7402: #endif
1.1       deraadt  7403:        }
1.21      deraadt  7404: }
                   7405:
1.313     mrg      7406: #if 0 /* not used */
1.46      pk       7407: /*
                   7408:  * Turn on IO cache for a given (va, number of pages).
                   7409:  *
                   7410:  * We just assert PG_NC for each PTE; the addresses must reside
                   7411:  * in locked kernel space.  A cache flush is also done.
                   7412:  */
1.53      christos 7413: void
1.313     mrg      7414: kvm_iocache(char *va, int npages)
1.46      pk       7415: {
                   7416:
1.210     thorpej  7417: #if defined(SUN4M)
1.55      pk       7418:        if (CPU_ISSUN4M) /* %%%: Implement! */
                   7419:                panic("kvm_iocache: 4m iocache not implemented");
                   7420: #endif
1.210     thorpej  7421: #if defined(SUN4D)
                   7422:        if (CPU_ISSUN4D) /* %%%: Implement! */
                   7423:                panic("kvm_iocache: 4d iocache not implemented");
                   7424: #endif
1.55      pk       7425: #if defined(SUN4) || defined(SUN4C)
1.46      pk       7426:        for (; --npages >= 0; va += NBPG) {
1.124     pk       7427:                int pte = getpte4(va);
1.46      pk       7428:                if ((pte & PG_V) == 0)
                   7429:                        panic("kvm_iocache !pg_v");
                   7430:                pte |= PG_IOC;
1.55      pk       7431:                setpte4(va, pte);
1.46      pk       7432:        }
1.55      pk       7433: #endif
1.46      pk       7434: }
1.313     mrg      7435: #endif
1.46      pk       7436:
1.24      pk       7437: /*
1.51      gwr      7438:  * Find first virtual address >= *va that is
                   7439:  * least likely to cause cache aliases.
                   7440:  * (This will just seg-align mappings.)
1.24      pk       7441:  */
1.51      gwr      7442: void
1.348.6.2  tls      7443: pmap_prefer(vaddr_t foff, vaddr_t *vap, size_t size, int td)
1.24      pk       7444: {
1.124     pk       7445:        vaddr_t va = *vap;
1.348.6.2  tls      7446:        long m;
1.24      pk       7447:
1.48      pk       7448:        m = CACHE_ALIAS_DIST;
                   7449:        if (m == 0)             /* m=0 => no cache aliasing */
1.51      gwr      7450:                return;
1.24      pk       7451:
1.348.6.2  tls      7452:        if (VA_INHOLE(va)) {
                   7453:                if (td)
                   7454:                        va = MMU_HOLE_START - size;
                   7455:                else
                   7456:                        va = MMU_HOLE_END;
                   7457:        }
                   7458:
                   7459:        va = (va & ~(m - 1)) | (foff & (m - 1));
                   7460:
                   7461:        if (td) {
                   7462:                if (va > *vap)
                   7463:                        va -= m;
                   7464:        } else {
                   7465:                if (va < *vap)
                   7466:                        va += m;
                   7467:        }
                   7468:        *vap = va;
1.23      deraadt  7469: }
                   7470:
1.53      christos 7471: void
1.303     uwe      7472: pmap_redzone(void)
1.23      deraadt  7473: {
1.303     uwe      7474:
1.100     pk       7475:        pmap_remove(pmap_kernel(), KERNBASE, KERNBASE+NBPG);
1.104     thorpej  7476: }
                   7477:
                   7478: /*
                   7479:  * Activate the address space for the specified process.  If the
                   7480:  * process is the current process, load the new MMU context.
                   7481:  */
                   7482: void
1.303     uwe      7483: pmap_activate(struct lwp *l)
1.104     thorpej  7484: {
1.231     thorpej  7485:        pmap_t pm = l->l_proc->p_vmspace->vm_map.pmap;
1.104     thorpej  7486:
1.348.6.1  tls      7487:        if (pm == pmap_kernel() || l != curlwp) {
                   7488:                return;
                   7489:        }
1.104     thorpej  7490:
1.348.6.1  tls      7491:        PMAP_LOCK();
                   7492:        if (pm->pm_ctx == NULL) {
                   7493:                ctx_alloc(pm);  /* performs setcontext() */
                   7494:        } else {
                   7495:                setcontext(pm->pm_ctxnum);
1.230     pk       7496:        }
1.348.6.1  tls      7497:        PMAP_SET_CPUSET(pm, &cpuinfo);
                   7498:        PMAP_UNLOCK();
1.104     thorpej  7499: }
                   7500:
                   7501: /*
                   7502:  * Deactivate the address space of the specified process.
                   7503:  */
                   7504: void
1.303     uwe      7505: pmap_deactivate(struct lwp *l)
1.104     thorpej  7506: {
1.348.6.1  tls      7507:        struct proc *p = l->l_proc;
                   7508:        pmap_t pm = p->p_vmspace->vm_map.pmap;
                   7509:
                   7510:        if (pm == pmap_kernel() || l != curlwp) {
                   7511:                return;
                   7512:        }
                   7513:
                   7514:        write_user_windows();
                   7515:        PMAP_LOCK();
                   7516:        if (pm->pm_ctx) {
                   7517:                (*cpuinfo.pure_vcache_flush)();
1.226     mrg      7518:
1.232     pk       7519: #if defined(SUN4M) || defined(SUN4D)
1.348.6.1  tls      7520:                if (CPU_HAS_SRMMU)
1.230     pk       7521:                        sp_tlb_flush(0, pm->pm_ctxnum, ASI_SRMMUFP_L0);
1.232     pk       7522: #endif
1.226     mrg      7523:        }
1.348.6.1  tls      7524:
                   7525:        /* we no longer need broadcast tlb flushes for this pmap. */
                   7526:        PMAP_CLR_CPUSET(pm, &cpuinfo);
                   7527:        PMAP_UNLOCK();
1.1       deraadt  7528: }
1.43      pk       7529:
                   7530: #ifdef DEBUG
                   7531: /*
                   7532:  * Check consistency of a pmap (time consuming!).
                   7533:  */
1.53      christos 7534: void
1.303     uwe      7535: pm_check(char *s, struct pmap *pm)
1.43      pk       7536: {
1.303     uwe      7537:
1.43      pk       7538:        if (pm == pmap_kernel())
                   7539:                pm_check_k(s, pm);
                   7540:        else
                   7541:                pm_check_u(s, pm);
                   7542: }
                   7543:
1.53      christos 7544: void
1.303     uwe      7545: pm_check_u(char *s, struct pmap *pm)
1.43      pk       7546: {
                   7547:        struct regmap *rp;
                   7548:        struct segmap *sp;
1.217     pk       7549:        int cpu, n, vs, vr, j, m, *pte;
                   7550:
                   7551:        cpu = cpuinfo.ci_cpuid;
1.43      pk       7552:
1.55      pk       7553:        if (pm->pm_regmap == NULL)
1.276     wiz      7554:                panic("%s: CPU %d: CHK(pmap %p): no region mapping",
1.217     pk       7555:                        s, cpu, pm);
1.55      pk       7556:
1.210     thorpej  7557: #if defined(SUN4M) || defined(SUN4D)
                   7558:        if (CPU_HAS_SRMMU &&
1.217     pk       7559:            (pm->pm_reg_ptps[cpu] == NULL ||
1.311     christos 7560:             pm->pm_reg_ptps_pa[cpu] != VA2PA((void *)pm->pm_reg_ptps[cpu])))
1.276     wiz      7561:                panic("%s: CPU %d: CHK(pmap %p): no SRMMU region table or bad pa: "
1.72      pk       7562:                      "tblva=%p, tblpa=0x%x",
1.217     pk       7563:                        s, cpu, pm, pm->pm_reg_ptps[cpu], pm->pm_reg_ptps_pa[cpu]);
1.55      pk       7564:
1.210     thorpej  7565:        if (CPU_HAS_SRMMU && pm->pm_ctx != NULL &&
1.311     christos 7566:            (cpuinfo.ctx_tbl[pm->pm_ctxnum] != ((VA2PA((void *)pm->pm_reg_ptps[cpu])
1.55      pk       7567:                                              >> SRMMU_PPNPASHIFT) |
                   7568:                                             SRMMU_TEPTD)))
1.276     wiz      7569:            panic("%s: CPU %d: CHK(pmap %p): SRMMU region table at 0x%x not installed "
1.217     pk       7570:                  "for context %d", s, cpu, pm, pm->pm_reg_ptps_pa[cpu], pm->pm_ctxnum);
1.55      pk       7571: #endif
                   7572:
1.43      pk       7573:        for (vr = 0; vr < NUREG; vr++) {
                   7574:                rp = &pm->pm_regmap[vr];
                   7575:                if (rp->rg_nsegmap == 0)
                   7576:                        continue;
                   7577:                if (rp->rg_segmap == NULL)
1.276     wiz      7578:                        panic("%s: CPU %d: CHK(vr %d): nsegmap = %d; sp==NULL",
1.217     pk       7579:                                s, cpu, vr, rp->rg_nsegmap);
1.210     thorpej  7580: #if defined(SUN4M) || defined(SUN4D)
                   7581:                if (CPU_HAS_SRMMU && rp->rg_seg_ptps == NULL)
1.276     wiz      7582:                    panic("%s: CPU %d: CHK(vr %d): nsegmap=%d; no SRMMU segment table",
1.217     pk       7583:                          s, cpu, vr, rp->rg_nsegmap);
1.210     thorpej  7584:                if (CPU_HAS_SRMMU &&
1.311     christos 7585:                    pm->pm_reg_ptps[cpu][vr] != ((VA2PA((void *)rp->rg_seg_ptps) >>
1.55      pk       7586:                                            SRMMU_PPNPASHIFT) | SRMMU_TEPTD))
1.276     wiz      7587:                    panic("%s: CPU %d: CHK(vr %d): SRMMU segtbl not installed",
1.217     pk       7588:                                s, cpu, vr);
1.55      pk       7589: #endif
1.43      pk       7590:                if ((unsigned int)rp < KERNBASE)
1.276     wiz      7591:                        panic("%s: CPU %d: rp=%p", s, cpu, rp);
1.43      pk       7592:                n = 0;
                   7593:                for (vs = 0; vs < NSEGRG; vs++) {
                   7594:                        sp = &rp->rg_segmap[vs];
                   7595:                        if ((unsigned int)sp < KERNBASE)
1.276     wiz      7596:                                panic("%s: CPU %d: sp=%p", s, cpu, sp);
1.43      pk       7597:                        if (sp->sg_npte != 0) {
                   7598:                                n++;
                   7599:                                if (sp->sg_pte == NULL)
1.276     wiz      7600:                                        panic("%s: CPU %d: CHK(vr %d, vs %d): npte=%d, "
1.217     pk       7601:                                           "pte=NULL", s, cpu, vr, vs, sp->sg_npte);
1.210     thorpej  7602: #if defined(SUN4M) || defined(SUN4D)
                   7603:                                if (CPU_HAS_SRMMU &&
1.55      pk       7604:                                    rp->rg_seg_ptps[vs] !=
1.311     christos 7605:                                     ((VA2PA((void *)sp->sg_pte)
1.55      pk       7606:                                        >> SRMMU_PPNPASHIFT) |
                   7607:                                       SRMMU_TEPTD))
1.276     wiz      7608:                                    panic("%s: CPU %d: CHK(vr %d, vs %d): SRMMU page "
1.217     pk       7609:                                          "table not installed correctly",
                   7610:                                                s, cpu, vr, vs);
1.55      pk       7611: #endif
1.43      pk       7612:                                pte=sp->sg_pte;
                   7613:                                m = 0;
                   7614:                                for (j=0; j<NPTESG; j++,pte++)
1.210     thorpej  7615:                                    if ((CPU_HAS_SRMMU
1.55      pk       7616:                                         ?((*pte & SRMMU_TETYPE) == SRMMU_TEPTE)
                   7617:                                         :(*pte & PG_V)))
                   7618:                                        m++;
1.43      pk       7619:                                if (m != sp->sg_npte)
1.276     wiz      7620:                                        printf("%s: CPU %d: user CHK(vr %d, vs %d): "
1.43      pk       7621:                                            "npte(%d) != # valid(%d)\n",
1.217     pk       7622:                                                s, cpu, vr, vs, sp->sg_npte, m);
1.43      pk       7623:                        }
                   7624:                }
                   7625:                if (n != rp->rg_nsegmap)
1.276     wiz      7626:                        panic("%s: CPU %d: CHK(vr %d): inconsistent "
1.43      pk       7627:                                "# of pte's: %d, should be %d",
1.217     pk       7628:                                s, cpu, vr, rp->rg_nsegmap, n);
1.43      pk       7629:        }
1.53      christos 7630:        return;
1.43      pk       7631: }
                   7632:
1.303     uwe      7633: /* Note: not as extensive as pm_check_u. */
1.53      christos 7634: void
1.303     uwe      7635: pm_check_k(char *s, struct pmap *pm)
1.43      pk       7636: {
                   7637:        struct regmap *rp;
1.217     pk       7638:        int cpu, vr, vs, n;
                   7639:
1.221     pk       7640:        cpu = cpu_number();
1.43      pk       7641:
1.55      pk       7642:        if (pm->pm_regmap == NULL)
1.122     pk       7643:                panic("%s: CHK(pmap %p): no region mapping", s, pm);
1.55      pk       7644:
1.210     thorpej  7645: #if defined(SUN4M) || defined(SUN4D)
                   7646:        if (CPU_HAS_SRMMU &&
1.217     pk       7647:            (pm->pm_reg_ptps[cpu] == NULL ||
1.311     christos 7648:             pm->pm_reg_ptps_pa[cpu] != VA2PA((void *)pm->pm_reg_ptps[cpu])))
1.276     wiz      7649:            panic("%s: CPU %d: CHK(pmap %p): no SRMMU region table or bad pa: tblva=%p, tblpa=0x%x",
1.217     pk       7650:                  s, cpu, pm, pm->pm_reg_ptps[cpu], pm->pm_reg_ptps_pa[cpu]);
1.55      pk       7651:
1.210     thorpej  7652:        if (CPU_HAS_SRMMU &&
1.311     christos 7653:            (cpuinfo.ctx_tbl[0] != ((VA2PA((void *)pm->pm_reg_ptps[cpu]) >>
1.55      pk       7654:                                             SRMMU_PPNPASHIFT) | SRMMU_TEPTD)))
1.276     wiz      7655:            panic("%s: CPU %d: CHK(pmap %p): SRMMU region table at 0x%x not installed "
1.217     pk       7656:                  "for context %d", s, cpu, pm, pm->pm_reg_ptps_pa[cpu], 0);
1.55      pk       7657: #endif
1.43      pk       7658:        for (vr = NUREG; vr < NUREG+NKREG; vr++) {
                   7659:                rp = &pm->pm_regmap[vr];
                   7660:                if (rp->rg_segmap == NULL)
1.276     wiz      7661:                        panic("%s: CPU %d: CHK(vr %d): nsegmap = %d; sp==NULL",
1.217     pk       7662:                                s, cpu, vr, rp->rg_nsegmap);
1.43      pk       7663:                if (rp->rg_nsegmap == 0)
                   7664:                        continue;
1.210     thorpej  7665: #if defined(SUN4M) || defined(SUN4D)
                   7666:                if (CPU_HAS_SRMMU && rp->rg_seg_ptps == NULL)
1.276     wiz      7667:                    panic("%s: CPU %d: CHK(vr %d): nsegmap=%d; no SRMMU segment table",
1.217     pk       7668:                          s, cpu, vr, rp->rg_nsegmap);
                   7669:
1.276     wiz      7670:                if (CPU_HAS_SRMMU && vr != NUREG /* 1st kseg is per CPU */ &&
1.311     christos 7671:                    pm->pm_reg_ptps[cpu][vr] != ((VA2PA((void *)rp->rg_seg_ptps) >>
1.55      pk       7672:                                            SRMMU_PPNPASHIFT) | SRMMU_TEPTD))
1.276     wiz      7673:                    panic("%s: CPU %d: CHK(vr %d): SRMMU segtbl not installed",
1.217     pk       7674:                                s, cpu, vr);
1.55      pk       7675: #endif
1.210     thorpej  7676:                if (CPU_HAS_SRMMU) {
1.72      pk       7677:                        n = NSEGRG;
                   7678:                } else {
                   7679:                        for (n = 0, vs = 0; vs < NSEGRG; vs++) {
                   7680:                                if (rp->rg_segmap[vs].sg_npte)
                   7681:                                        n++;
                   7682:                        }
1.43      pk       7683:                }
                   7684:                if (n != rp->rg_nsegmap)
1.276     wiz      7685:                        printf("%s: CPU %d: kernel CHK(vr %d): inconsistent "
1.43      pk       7686:                                "# of pte's: %d, should be %d\n",
1.217     pk       7687:                                s, cpu, vr, rp->rg_nsegmap, n);
1.43      pk       7688:        }
1.53      christos 7689:        return;
1.43      pk       7690: }
                   7691: #endif
1.46      pk       7692:
                   7693: /*
1.98      pk       7694:  * Return the number of disk blocks that pmap_dumpmmu() will dump.
1.46      pk       7695:  */
                   7696: int
1.303     uwe      7697: pmap_dumpsize(void)
1.46      pk       7698: {
1.98      pk       7699:        int     sz;
1.67      pk       7700:
                   7701:        sz = ALIGN(sizeof(kcore_seg_t)) + ALIGN(sizeof(cpu_kcore_hdr_t));
                   7702:        sz += npmemarr * sizeof(phys_ram_seg_t);
1.98      pk       7703:        sz += sizeof(kernel_segmap_store);
1.55      pk       7704:
1.296     chs      7705:        if (CPU_HAS_SUNMMU)
1.98      pk       7706:                /* For each pmeg in the MMU, we'll write NPTESG PTEs. */
1.67      pk       7707:                sz += (seginval + 1) * NPTESG * sizeof(int);
                   7708:
1.98      pk       7709:        return btodb(sz + DEV_BSIZE - 1);
1.46      pk       7710: }
                   7711:
                   7712: /*
1.98      pk       7713:  * Write the core dump headers and MD data to the dump device.
                   7714:  * We dump the following items:
1.195     mrg      7715:  *
1.98      pk       7716:  *     kcore_seg_t              MI header defined in <sys/kcore.h>)
                   7717:  *     cpu_kcore_hdr_t          MD header defined in <machine/kcore.h>)
                   7718:  *     phys_ram_seg_t[npmemarr] physical memory segments
                   7719:  *     segmap_t[NKREG*NSEGRG]   the kernel's segment map
                   7720:  *     the MMU pmegs on sun4/sun4c
1.46      pk       7721:  */
                   7722: int
1.311     christos 7723: pmap_dumpmmu(int (*dump)(dev_t, daddr_t, void *, size_t),
1.303     uwe      7724:             daddr_t blkno)
1.46      pk       7725: {
1.67      pk       7726:        kcore_seg_t     *ksegp;
                   7727:        cpu_kcore_hdr_t *kcpup;
                   7728:        phys_ram_seg_t  memseg;
1.124     pk       7729:        int             error = 0;
                   7730:        int             i, memsegoffset, segmapoffset, pmegoffset;
1.67      pk       7731:        int             buffer[dbtob(1) / sizeof(int)];
                   7732:        int             *bp, *ep;
1.55      pk       7733: #if defined(SUN4C) || defined(SUN4)
1.124     pk       7734:        int     pmeg;
1.55      pk       7735: #endif
1.46      pk       7736:
1.67      pk       7737: #define EXPEDITE(p,n) do {                                             \
                   7738:        int *sp = (int *)(p);                                           \
                   7739:        int sz = (n);                                                   \
                   7740:        while (sz > 0) {                                                \
                   7741:                *bp++ = *sp++;                                          \
                   7742:                if (bp >= ep) {                                         \
                   7743:                        error = (*dump)(dumpdev, blkno,                 \
1.311     christos 7744:                                        (void *)buffer, dbtob(1));      \
1.67      pk       7745:                        if (error != 0)                                 \
                   7746:                                return (error);                         \
                   7747:                        ++blkno;                                        \
                   7748:                        bp = buffer;                                    \
                   7749:                }                                                       \
                   7750:                sz -= 4;                                                \
                   7751:        }                                                               \
                   7752: } while (0)
                   7753:
                   7754:        setcontext(0);
                   7755:
                   7756:        /* Setup bookkeeping pointers */
                   7757:        bp = buffer;
                   7758:        ep = &buffer[sizeof(buffer) / sizeof(buffer[0])];
                   7759:
                   7760:        /* Fill in MI segment header */
                   7761:        ksegp = (kcore_seg_t *)bp;
                   7762:        CORE_SETMAGIC(*ksegp, KCORE_MAGIC, MID_MACHINE, CORE_CPU);
1.98      pk       7763:        ksegp->c_size = dbtob(pmap_dumpsize()) - ALIGN(sizeof(kcore_seg_t));
1.67      pk       7764:
                   7765:        /* Fill in MD segment header (interpreted by MD part of libkvm) */
                   7766:        kcpup = (cpu_kcore_hdr_t *)((int)bp + ALIGN(sizeof(kcore_seg_t)));
                   7767:        kcpup->cputype = cputyp;
1.98      pk       7768:        kcpup->kernbase = KERNBASE;
1.67      pk       7769:        kcpup->nmemseg = npmemarr;
                   7770:        kcpup->memsegoffset = memsegoffset = ALIGN(sizeof(cpu_kcore_hdr_t));
1.98      pk       7771:        kcpup->nsegmap = NKREG*NSEGRG;
                   7772:        kcpup->segmapoffset = segmapoffset =
                   7773:                memsegoffset + npmemarr * sizeof(phys_ram_seg_t);
                   7774:
1.296     chs      7775:        kcpup->npmeg = (CPU_HAS_SUNMMU) ? seginval + 1 : 0;
1.67      pk       7776:        kcpup->pmegoffset = pmegoffset =
1.98      pk       7777:                segmapoffset + kcpup->nsegmap * sizeof(struct segmap);
1.67      pk       7778:
                   7779:        /* Note: we have assumed everything fits in buffer[] so far... */
1.98      pk       7780:        bp = (int *)((int)kcpup + ALIGN(sizeof(cpu_kcore_hdr_t)));
1.67      pk       7781:
1.98      pk       7782: #if 0
1.67      pk       7783:        /* Align storage for upcoming quad-aligned segment array */
                   7784:        while (bp != (int *)ALIGN(bp)) {
                   7785:                int dummy = 0;
                   7786:                EXPEDITE(&dummy, 4);
                   7787:        }
1.98      pk       7788: #endif
                   7789:
1.67      pk       7790:        for (i = 0; i < npmemarr; i++) {
                   7791:                memseg.start = pmemarr[i].addr;
                   7792:                memseg.size = pmemarr[i].len;
1.271     mrg      7793:                EXPEDITE((void *)&memseg, sizeof(phys_ram_seg_t));
1.67      pk       7794:        }
1.98      pk       7795:
                   7796:        EXPEDITE(&kernel_segmap_store, sizeof(kernel_segmap_store));
1.67      pk       7797:
1.210     thorpej  7798:        if (CPU_HAS_SRMMU)
1.67      pk       7799:                goto out;
1.55      pk       7800:
                   7801: #if defined(SUN4C) || defined(SUN4)
1.46      pk       7802:        /*
                   7803:         * dump page table entries
                   7804:         *
                   7805:         * We dump each pmeg in order (by segment number).  Since the MMU
                   7806:         * automatically maps the given virtual segment to a pmeg we must
                   7807:         * iterate over the segments by incrementing an unused segment slot
                   7808:         * in the MMU.  This fixed segment number is used in the virtual
                   7809:         * address argument to getpte().
                   7810:         */
1.55      pk       7811:
1.46      pk       7812:        /*
                   7813:         * Go through the pmegs and dump each one.
                   7814:         */
                   7815:        for (pmeg = 0; pmeg <= seginval; ++pmeg) {
1.124     pk       7816:                int va = 0;
1.46      pk       7817:
                   7818:                setsegmap(va, pmeg);
                   7819:                i = NPTESG;
                   7820:                do {
1.67      pk       7821:                        int pte = getpte4(va);
                   7822:                        EXPEDITE(&pte, sizeof(pte));
1.46      pk       7823:                        va += NBPG;
                   7824:                } while (--i > 0);
                   7825:        }
                   7826:        setsegmap(0, seginval);
1.67      pk       7827: #endif
1.46      pk       7828:
1.67      pk       7829: out:
                   7830:        if (bp != buffer)
1.311     christos 7831:                error = (*dump)(dumpdev, blkno++, (void *)buffer, dbtob(1));
1.46      pk       7832:
                   7833:        return (error);
1.92      pk       7834: }
                   7835:
                   7836: /*
                   7837:  * Helper function for debuggers.
                   7838:  */
                   7839: void
1.303     uwe      7840: pmap_writetext(unsigned char *dst, int ch)
1.92      pk       7841: {
1.95      pk       7842:        int s, pte0, pte, ctx;
1.124     pk       7843:        vaddr_t va;
1.92      pk       7844:
1.175     thorpej  7845:        s = splvm();
1.92      pk       7846:        va = (unsigned long)dst & (~PGOFSET);
1.214     pk       7847:        cache_flush(dst, 1);
1.92      pk       7848:
1.95      pk       7849:        ctx = getcontext();
                   7850:        setcontext(0);
                   7851:
1.210     thorpej  7852: #if defined(SUN4M) || defined(SUN4D)
                   7853:        if (CPU_HAS_SRMMU) {
1.92      pk       7854:                pte0 = getpte4m(va);
                   7855:                if ((pte0 & SRMMU_TETYPE) != SRMMU_TEPTE) {
                   7856:                        splx(s);
                   7857:                        return;
                   7858:                }
                   7859:                pte = pte0 | PPROT_WRITE;
                   7860:                setpte4m(va, pte);
                   7861:                *dst = (unsigned char)ch;
                   7862:                setpte4m(va, pte0);
                   7863:
                   7864:        }
                   7865: #endif
                   7866: #if defined(SUN4) || defined(SUN4C)
                   7867:        if (CPU_ISSUN4C || CPU_ISSUN4) {
                   7868:                pte0 = getpte4(va);
                   7869:                if ((pte0 & PG_V) == 0) {
                   7870:                        splx(s);
                   7871:                        return;
                   7872:                }
                   7873:                pte = pte0 | PG_W;
                   7874:                setpte4(va, pte);
                   7875:                *dst = (unsigned char)ch;
                   7876:                setpte4(va, pte0);
                   7877:        }
                   7878: #endif
1.214     pk       7879:        cache_flush(dst, 1);
1.95      pk       7880:        setcontext(ctx);
1.92      pk       7881:        splx(s);
1.55      pk       7882: }
                   7883:
                   7884: #ifdef EXTREME_DEBUG
                   7885:
1.304     uwe      7886: void debug_pagetables(void);
                   7887: void print_fe_map(void);
                   7888:
1.303     uwe      7889: static void test_region(int, int, int);
1.55      pk       7890:
1.304     uwe      7891:
1.55      pk       7892: void
1.303     uwe      7893: debug_pagetables(void)
1.55      pk       7894: {
1.304     uwe      7895:        struct promvec *promvec = romp;
1.124     pk       7896:        int *regtbl;
                   7897:        int te;
1.304     uwe      7898:        int i;
1.55      pk       7899:
1.304     uwe      7900:        printf("\nncontext=%d. ", ncontext);
                   7901:        printf("Context table is at va %p. Level 0 PTP: 0x%x\n",
1.69      pk       7902:               cpuinfo.ctx_tbl, cpuinfo.ctx_tbl[0]);
1.304     uwe      7903:        printf("Context 0 region table is at va %p, pa 0x%x. Contents:\n",
1.152     pk       7904:               pmap_kernel()->pm_reg_ptps[0], pmap_kernel()->pm_reg_ptps_pa[0]);
1.55      pk       7905:
1.152     pk       7906:        regtbl = pmap_kernel()->pm_reg_ptps[0];
1.55      pk       7907:
1.304     uwe      7908:        printf("PROM vector is at %p\n", promvec);
                   7909:        printf("PROM reboot routine is at %p\n", promvec->pv_reboot);
                   7910:        printf("PROM abort routine is at %p\n", promvec->pv_abort);
                   7911:        printf("PROM halt routine is at %p\n", promvec->pv_halt);
1.55      pk       7912:
1.66      christos 7913:        printf("Testing region 0xfe: ");
1.55      pk       7914:        test_region(0xfe,0,16*1024*1024);
1.66      christos 7915:        printf("Testing region 0xff: ");
1.55      pk       7916:        test_region(0xff,0,16*1024*1024);
1.96      pk       7917:        printf("Testing kernel region 0x%x: ", VA_VREG(KERNBASE));
                   7918:        test_region(VA_VREG(KERNBASE), 4096, avail_start);
1.55      pk       7919:        cngetc();
                   7920:
                   7921:        for (i = 0; i < SRMMU_L1SIZE; i++) {
                   7922:                te = regtbl[i];
                   7923:                if ((te & SRMMU_TETYPE) == SRMMU_TEINVALID)
                   7924:                    continue;
1.304     uwe      7925:                printf("Region 0x%x: PTE=0x%x <%s> L2PA=0x%x kernL2VA=%p\n",
1.55      pk       7926:                       i, te, ((te & SRMMU_TETYPE) == SRMMU_TEPTE ? "pte" :
                   7927:                               ((te & SRMMU_TETYPE) == SRMMU_TEPTD ? "ptd" :
                   7928:                                ((te & SRMMU_TETYPE) == SRMMU_TEINVALID ?
                   7929:                                 "invalid" : "reserved"))),
                   7930:                       (te & ~0x3) << SRMMU_PPNPASHIFT,
                   7931:                       pmap_kernel()->pm_regmap[i].rg_seg_ptps);
                   7932:        }
1.66      christos 7933:        printf("Press q to halt...\n");
1.55      pk       7934:        if (cngetc()=='q')
                   7935:            callrom();
                   7936: }
                   7937:
                   7938: static u_int
1.311     christos 7939: VA2PAsw(int ctx, void *addr, int *pte)
1.55      pk       7940: {
1.124     pk       7941:        int *curtbl;
                   7942:        int curpte;
1.55      pk       7943:
                   7944: #ifdef EXTREME_EXTREME_DEBUG
1.66      christos 7945:        printf("Looking up addr 0x%x in context 0x%x\n",addr,ctx);
1.55      pk       7946: #endif
                   7947:        /* L0 */
1.69      pk       7948:        *pte = curpte = cpuinfo.ctx_tbl[ctx];
1.55      pk       7949: #ifdef EXTREME_EXTREME_DEBUG
1.66      christos 7950:        printf("Got L0 pte 0x%x\n",pte);
1.55      pk       7951: #endif
                   7952:        if ((curpte & SRMMU_TETYPE) == SRMMU_TEPTE) {
                   7953:                return (((curpte & SRMMU_PPNMASK) << SRMMU_PPNPASHIFT) |
                   7954:                        ((u_int)addr & 0xffffffff));
                   7955:        }
                   7956:        if ((curpte & SRMMU_TETYPE) != SRMMU_TEPTD) {
1.66      christos 7957:                printf("Bad context table entry 0x%x for context 0x%x\n",
1.55      pk       7958:                       curpte, ctx);
                   7959:                return 0;
                   7960:        }
                   7961:        /* L1 */
1.304     uwe      7962:        curtbl = (int *)(((curpte & ~0x3) << 4) | KERNBASE); /* correct for krn */
1.55      pk       7963:        *pte = curpte = curtbl[VA_VREG(addr)];
                   7964: #ifdef EXTREME_EXTREME_DEBUG
1.66      christos 7965:        printf("L1 table at 0x%x.\nGot L1 pte 0x%x\n",curtbl,curpte);
1.55      pk       7966: #endif
                   7967:        if ((curpte & SRMMU_TETYPE) == SRMMU_TEPTE)
                   7968:            return (((curpte & SRMMU_PPNMASK) << SRMMU_PPNPASHIFT) |
                   7969:                    ((u_int)addr & 0xffffff));
                   7970:        if ((curpte & SRMMU_TETYPE) != SRMMU_TEPTD) {
1.66      christos 7971:                printf("Bad region table entry 0x%x for region 0x%x\n",
1.55      pk       7972:                       curpte, VA_VREG(addr));
                   7973:                return 0;
                   7974:        }
                   7975:        /* L2 */
1.304     uwe      7976:        curtbl = (int *)(((curpte & ~0x3) << 4) | KERNBASE); /* correct for krn */
1.55      pk       7977:        *pte = curpte = curtbl[VA_VSEG(addr)];
                   7978: #ifdef EXTREME_EXTREME_DEBUG
1.66      christos 7979:        printf("L2 table at 0x%x.\nGot L2 pte 0x%x\n",curtbl,curpte);
1.55      pk       7980: #endif
                   7981:        if ((curpte & SRMMU_TETYPE) == SRMMU_TEPTE)
                   7982:            return (((curpte & SRMMU_PPNMASK) << SRMMU_PPNPASHIFT) |
                   7983:                    ((u_int)addr & 0x3ffff));
                   7984:        if ((curpte & SRMMU_TETYPE) != SRMMU_TEPTD) {
1.66      christos 7985:                printf("Bad segment table entry 0x%x for reg 0x%x, seg 0x%x\n",
1.55      pk       7986:                       curpte, VA_VREG(addr), VA_VSEG(addr));
                   7987:                return 0;
                   7988:        }
                   7989:        /* L3 */
1.304     uwe      7990:        curtbl = (int *)(((curpte & ~0x3) << 4) | KERNBASE); /* correct for krn */
1.55      pk       7991:        *pte = curpte = curtbl[VA_VPG(addr)];
                   7992: #ifdef EXTREME_EXTREME_DEBUG
1.304     uwe      7993:        printf("L3 table at %p.\nGot L3 pte 0x%x\n", curtbl, curpte);
1.55      pk       7994: #endif
                   7995:        if ((curpte & SRMMU_TETYPE) == SRMMU_TEPTE)
                   7996:            return (((curpte & SRMMU_PPNMASK) << SRMMU_PPNPASHIFT) |
                   7997:                    ((u_int)addr & 0xfff));
                   7998:        else {
1.66      christos 7999:                printf("Bad L3 pte 0x%x for reg 0x%x, seg 0x%x, pg 0x%x\n",
1.55      pk       8000:                       curpte, VA_VREG(addr), VA_VSEG(addr), VA_VPG(addr));
                   8001:                return 0;
                   8002:        }
1.304     uwe      8003:        printf("Bizarreness with address %p!\n", addr);
1.55      pk       8004: }
                   8005:
1.303     uwe      8006: static void
                   8007: test_region(int reg, int start, int stop)
1.55      pk       8008: {
1.124     pk       8009:        int i;
                   8010:        int addr;
                   8011:        int pte;
1.55      pk       8012:        int ptesw;
                   8013: /*     int cnt=0;
                   8014: */
                   8015:
1.304     uwe      8016:        for (i = start; i < stop; i += NBPG) {
1.55      pk       8017:                addr = (reg << RGSHIFT) | i;
1.304     uwe      8018:                pte = lda(((u_int)(addr)) | ASI_SRMMUFP_LN, ASI_SRMMUFP);
1.55      pk       8019:                if (pte) {
1.66      christos 8020: /*                     printf("Valid address 0x%x\n",addr);
1.55      pk       8021:                        if (++cnt == 20) {
                   8022:                                cngetc();
1.304     uwe      8023:                                cnt = 0;
1.55      pk       8024:                        }
                   8025: */
1.311     christos 8026:                        if (VA2PA((void *)addr) != VA2PAsw(0, (void *)addr, &ptesw)) {
1.304     uwe      8027:                                printf("Mismatch at address 0x%x.\n", addr);
                   8028:                                if (cngetc() == 'q')
                   8029:                                        break;
1.55      pk       8030:                        }
1.96      pk       8031:                        if (reg == VA_VREG(KERNBASE))
                   8032:                                /* kernel permissions are different */
                   8033:                                continue;
1.304     uwe      8034:                        if ((pte & SRMMU_PROT_MASK) != (ptesw & SRMMU_PROT_MASK)) {
1.66      christos 8035:                                printf("Mismatched protections at address "
1.55      pk       8036:                                       "0x%x; pte=0x%x, ptesw=0x%x\n",
1.304     uwe      8037:                                       addr, pte, ptesw);
                   8038:                                if (cngetc() == 'q')
                   8039:                                        break;
1.55      pk       8040:                        }
                   8041:                }
                   8042:        }
1.66      christos 8043:        printf("done.\n");
1.46      pk       8044: }
1.55      pk       8045:
                   8046:
1.303     uwe      8047: void
                   8048: print_fe_map(void)
1.55      pk       8049: {
                   8050:        u_int i, pte;
                   8051:
1.66      christos 8052:        printf("map of region 0xfe:\n");
1.265     pk       8053:        for (i = 0xfe000000; i < 0xff000000; i += 4096) {
1.55      pk       8054:                if (((pte = getpte4m(i)) & SRMMU_TETYPE) != SRMMU_TEPTE)
                   8055:                    continue;
1.91      fair     8056:                printf("0x%x -> 0x%x%x (pte 0x%x)\n", i, pte >> 28,
1.55      pk       8057:                       (pte & ~0xff) << 4, pte);
                   8058:        }
1.66      christos 8059:        printf("done\n");
1.55      pk       8060: }
1.265     pk       8061: #endif /* EXTREME_DEBUG */
1.267     pk       8062:
                   8063: #ifdef DDB
                   8064: int pmap_dump(struct pmap *pm);
1.303     uwe      8065:
                   8066: int
                   8067: pmap_dump(struct pmap *pm)
1.267     pk       8068: {
                   8069:        int startvr, endvr, vr, vs, i, n;
                   8070:        struct regmap *rp;
                   8071:        struct segmap *sp;
                   8072:
                   8073:        if (pm == NULL)
                   8074:                pm = pmap_kernel();
                   8075:
                   8076:        if (pm == pmap_kernel()) {
                   8077:                startvr = NUREG;
                   8078:                endvr = 256;
                   8079:        } else {
                   8080:                startvr = 0;
                   8081:                endvr = NUREG;
                   8082:        }
                   8083:
                   8084:        for (vr = startvr; vr < endvr; vr++) {
                   8085:                rp = &pm->pm_regmap[vr];
                   8086:                if (rp->rg_nsegmap == 0)
                   8087:                        continue;
                   8088:                printf("vr %d: %d segments", vr, rp->rg_nsegmap);
                   8089:                if (rp->rg_segmap == NULL) {
                   8090:                        printf("[no segments]\n");
                   8091:                        continue;
                   8092:                }
                   8093:                for (vs = 0; vs < NSEGRG; vs++) {
                   8094:                        sp = &rp->rg_segmap[vs];
                   8095:                        if (sp->sg_npte == 0)
                   8096:                                continue;
                   8097:                        if ((vs & 3) == 0)
                   8098:                                printf("\n   ");
                   8099:                        printf(" %d: n %d w %d p %d,", vs,
                   8100:                                sp->sg_npte, sp->sg_nwired, sp->sg_pmeg);
                   8101:                        if (sp->sg_pte == NULL) {
                   8102:                                printf("[no ptes]");
                   8103:                                continue;
                   8104:                        }
                   8105:                        for (n = 0, i = 0; i < NPTESG; i++) {
1.296     chs      8106:                                if (CPU_HAS_SUNMMU && sp->sg_pte[i] & PG_WIRED)
                   8107:                                        n++;
                   8108:                                if (CPU_HAS_SRMMU && sp->sg_wiremap & (1 << i))
1.267     pk       8109:                                        n++;
                   8110:                        }
                   8111:                        if (n != sp->sg_nwired)
                   8112:                                printf("[wired count %d]", n);
                   8113:                }
                   8114:                printf("\n");
                   8115:        }
                   8116:
                   8117:        return (0);
                   8118: }
                   8119: #endif /* DDB */

CVSweb <webmaster@jp.NetBSD.org>