[BACK]Return to pmap.c CVS log [TXT][DIR] Up to [cvs.NetBSD.org] / src / sys / arch / sun3 / sun3

Annotation of src/sys/arch/sun3/sun3/pmap.c, Revision 1.164.2.1

1.164.2.1! uebayasi    1: /*     $NetBSD$        */
1.36      cgd         2:
1.64      gwr         3: /*-
                      4:  * Copyright (c) 1996 The NetBSD Foundation, Inc.
1.13      glass       5:  * All rights reserved.
                      6:  *
1.64      gwr         7:  * This code is derived from software contributed to The NetBSD Foundation
                      8:  * by Adam Glass and Gordon W. Ross.
                      9:  *
1.13      glass      10:  * Redistribution and use in source and binary forms, with or without
                     11:  * modification, are permitted provided that the following conditions
                     12:  * are met:
                     13:  * 1. Redistributions of source code must retain the above copyright
                     14:  *    notice, this list of conditions and the following disclaimer.
                     15:  * 2. Redistributions in binary form must reproduce the above copyright
                     16:  *    notice, this list of conditions and the following disclaimer in the
                     17:  *    documentation and/or other materials provided with the distribution.
                     18:  *
1.64      gwr        19:  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
                     20:  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
                     21:  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
1.66      gwr        22:  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
                     23:  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
1.64      gwr        24:  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
                     25:  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
                     26:  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
                     27:  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
                     28:  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
                     29:  * POSSIBILITY OF SUCH DAMAGE.
1.13      glass      30:  */
1.25      gwr        31:
1.3       glass      32: /*
1.1       glass      33:  * Some notes:
                     34:  *
1.84      gwr        35:  * sun3s have contexts (8).  In this pmap design, the kernel is mapped
1.38      gwr        36:  * into all contexts.  Processes take up a known portion of the context,
1.1       glass      37:  * and compete for the available contexts on a LRU basis.
                     38:  *
1.52      gwr        39:  * sun3s also have this evil "PMEG" crapola.  Essentially each "context"'s
1.1       glass      40:  * address space is defined by the 2048 one-byte entries in the segment map.
1.38      gwr        41:  * Each of these 1-byte entries points to a "Page Map Entry Group" (PMEG)
                     42:  * which contains the mappings for that virtual segment.  (This strange
                     43:  * terminology invented by Sun and preserved here for consistency.)
                     44:  * Each PMEG maps a segment of 128Kb length, with 16 pages of 8Kb each.
                     45:  *
1.52      gwr        46:  * As you might guess, these PMEGs are in short supply and heavy demand.
                     47:  * PMEGs allocated to the kernel are "static" in the sense that they can't
                     48:  * be stolen from it.  PMEGs allocated to a particular segment of a
1.1       glass      49:  * pmap's virtual space will be fought over by the other pmaps.
                     50:  */
                     51:
                     52: /*
1.65      gwr        53:  * Cache management:
                     54:  * All sun3 cache implementations are write-back.
                     55:  * Flushes must be done before removing translations
                     56:  * from the MMU because the cache uses the MMU.
                     57:  */
                     58:
                     59: /*
1.1       glass      60:  * wanted attributes:
                     61:  *       pmegs that aren't needed by a pmap remain in the MMU.
                     62:  *       quick context switches between pmaps
                     63:  *       kernel is in all contexts
                     64:  */
                     65:
1.83      gwr        66: /*
1.84      gwr        67:  * Project1:  Use a "null" context for processes that have not
1.83      gwr        68:  * touched any user-space address recently.  This is efficient
                     69:  * for things that stay in the kernel for a while, waking up
                     70:  * to handle some I/O then going back to sleep (i.e. nfsd).
                     71:  * If and when such a process returns to user-mode, it will
                     72:  * fault and be given a real context at that time.
                     73:  *
                     74:  * This also lets context switch be fast, because all we need
                     75:  * to do there for the MMU is slam the context register.
1.84      gwr        76:  *
                     77:  * Project2:  Use a private pool of PV elements.  This pool can be
                     78:  * fixed size because the total mapped virtual space supported by
                     79:  * the MMU H/W (and this pmap) is fixed for all time.
1.83      gwr        80:  */
1.144     lukem      81:
                     82: #include <sys/cdefs.h>
1.164.2.1! uebayasi   83: __KERNEL_RCSID(0, "$NetBSD$");
1.83      gwr        84:
1.104     jonathan   85: #include "opt_ddb.h"
1.143     martin     86: #include "opt_pmap_debug.h"
1.103     gwr        87:
1.38      gwr        88: #include <sys/param.h>
                     89: #include <sys/systm.h>
                     90: #include <sys/proc.h>
                     91: #include <sys/malloc.h>
1.132     chs        92: #include <sys/pool.h>
1.38      gwr        93: #include <sys/queue.h>
1.74      gwr        94: #include <sys/kcore.h>
1.38      gwr        95:
1.103     gwr        96: #include <uvm/uvm.h>
1.110     mrg        97:
1.74      gwr        98: #include <machine/cpu.h>
                     99: #include <machine/dvma.h>
1.76      gwr       100: #include <machine/idprom.h>
1.74      gwr       101: #include <machine/kcore.h>
1.38      gwr       102: #include <machine/mon.h>
1.74      gwr       103: #include <machine/pmap.h>
                    104: #include <machine/pte.h>
1.38      gwr       105: #include <machine/vmparam.h>
1.138     chs       106: #include <m68k/cacheops.h>
1.65      gwr       107:
1.99      gwr       108: #include <sun3/sun3/cache.h>
                    109: #include <sun3/sun3/control.h>
                    110: #include <sun3/sun3/fc.h>
                    111: #include <sun3/sun3/machdep.h>
                    112: #include <sun3/sun3/obmem.h>
                    113:
1.81      gwr       114: #ifdef DDB
                    115: #include <ddb/db_output.h>
                    116: #else
                    117: #define db_printf printf
                    118: #endif
                    119:
1.78      gwr       120: /* Verify this correspondence between definitions. */
1.76      gwr       121: #if    (PMAP_OBIO << PG_MOD_SHIFT) != PGT_OBIO
                    122: #error "PMAP_XXX definitions don't match pte.h!"
                    123: #endif
                    124:
1.89      gwr       125: /* Type bits in a "pseudo" physical address. (XXX: pmap.h?) */
                    126: #define PMAP_TYPE      PMAP_VME32
1.75      gwr       127:
1.78      gwr       128: /*
                    129:  * Local convenience macros
                    130:  */
                    131:
1.98      gwr       132: #define DVMA_MAP_END   (DVMA_MAP_BASE + DVMA_MAP_AVAIL)
                    133:
1.80      gwr       134: /* User segments from 0 to KERNBASE */
1.164.2.1! uebayasi  135: #define        NUSEG   (KERNBASE3 / NBSG)
1.80      gwr       136: /* The remainder are kernel segments. */
                    137: #define        NKSEG   (NSEGMAP - NUSEG)
1.78      gwr       138:
                    139: #define VA_SEGNUM(x)   ((u_int)(x) >> SEGSHIFT)
1.50      gwr       140:
1.76      gwr       141: /*
1.78      gwr       142:  * Only "main memory" pages are registered in the pv_lists.
                    143:  * This macro is used to determine if a given pte refers to
                    144:  * "main memory" or not.  One slight hack here deserves more
                    145:  * explanation:  The Sun frame buffers all appear as PG_OBMEM
                    146:  * devices but way up near the end of the address space.
                    147:  * We do not want to consider these as "main memory" so the
                    148:  * macro below treats the high bits of the PFN as type bits.
                    149:  *
                    150:  * Note that on the 3/60 only 16 bits of PFN are stored in the
                    151:  * MMU and the top 3 bits read back as zero.  This means a
                    152:  * translation entered into the mmu for physical address
                    153:  * 0xFF000000 will look like 0x1F000000 after one reads back
                    154:  * the pte and converts the PFN to a physical address.
                    155:  */
1.88      gwr       156: #define MEM_BITS       (PG_TYPE | PA_PGNUM(0xF8000000))
1.78      gwr       157: #define        IS_MAIN_MEM(pte) (((pte) & MEM_BITS) == 0)
                    158:
1.87      gwr       159: /* Does this (pseudo) PA represent device space? */
1.89      gwr       160: #define PA_DEV_MASK   (0xF8000000 | PMAP_TYPE)
1.88      gwr       161: #define PA_IS_DEV(pa) ((pa) & PA_DEV_MASK)
1.87      gwr       162:
1.78      gwr       163: /*
                    164:  * Is there a Virtually Addressed Cache (VAC) alias problem
                    165:  * if one page is mapped at both a1 and a2?
                    166:  */
                    167: #define        BADALIAS(a1, a2)        (((int)(a1) ^ (int)(a2)) & SEGOFSET)
                    168:
                    169:
                    170: /*
                    171:  * Debugging support.
                    172:  */
                    173: #define        PMD_ENTER       1
                    174: #define        PMD_LINK        2
                    175: #define        PMD_PROTECT     4
                    176: #define        PMD_SWITCH      8
                    177: #define PMD_COW                0x10
                    178: #define PMD_MODBIT     0x20
                    179: #define PMD_REFBIT     0x40
                    180: #define PMD_WIRING     0x80
                    181: #define PMD_CONTEXT    0x100
                    182: #define PMD_CREATE     0x200
                    183: #define PMD_SEGMAP     0x400
                    184: #define PMD_SETPTE     0x800
1.132     chs       185: #define PMD_FAULT      0x1000
                    186: #define PMD_KMAP       0x2000
1.78      gwr       187:
                    188: #define        PMD_REMOVE      PMD_ENTER
                    189: #define        PMD_UNLINK      PMD_LINK
                    190:
                    191: #ifdef PMAP_DEBUG
                    192: int pmap_debug = 0;
                    193: int pmap_db_watchva = -1;
                    194: int pmap_db_watchpmeg = -1;
                    195: #endif /* PMAP_DEBUG */
                    196:
                    197: /*
                    198:  * Miscellaneous variables.
                    199:  *
1.76      gwr       200:  * For simplicity, this interface retains the variables
                    201:  * that were used in the old interface (without NONCONTIG).
                    202:  * These are set in pmap_bootstrap() and used in
                    203:  * pmap_next_page().
                    204:  */
1.142     thorpej   205: vaddr_t virtual_avail, virtual_end;
1.132     chs       206: paddr_t avail_start, avail_end;
1.78      gwr       207: #define        managed(pa)     (((pa) >= avail_start) && ((pa) < avail_end))
1.76      gwr       208:
                    209: /* used to skip the Sun3/50 video RAM */
1.132     chs       210: static vaddr_t hole_start, hole_size;
1.38      gwr       211:
1.78      gwr       212: /* This is for pmap_next_page() */
1.132     chs       213: static paddr_t avail_next;
1.78      gwr       214:
                    215: /* This is where we map a PMEG without a context. */
1.132     chs       216: static vaddr_t temp_seg_va;
1.78      gwr       217:
                    218: /*
                    219:  * Location to store virtual addresses
                    220:  * to be used in copy/zero operations.
                    221:  */
1.132     chs       222: vaddr_t tmp_vpages[2] = {
1.99      gwr       223:        SUN3_MONSHORTSEG,
1.140     thorpej   224:        SUN3_MONSHORTSEG + PAGE_SIZE };
1.78      gwr       225: int tmp_vpages_inuse;
                    226:
                    227: static int pmap_version = 1;
1.158     pooka     228: static struct pmap kernel_pmap_store;
1.159     pooka     229: struct pmap *const kernel_pmap_ptr  = &kernel_pmap_store;
1.158     pooka     230: #define kernel_pmap (kernel_pmap_ptr)
1.82      gwr       231: static u_char kernel_segmap[NSEGMAP];
1.132     chs       232:
                    233: /* memory pool for pmap structures */
                    234: struct pool    pmap_pmap_pool;
1.78      gwr       235:
1.38      gwr       236: /* statistics... */
                    237: struct pmap_stats {
                    238:        int     ps_enter_firstpv;       /* pv heads entered */
                    239:        int     ps_enter_secondpv;      /* pv nonheads entered */
1.39      gwr       240:        int     ps_unlink_pvfirst;      /* of pv_unlinks on head */
                    241:        int     ps_unlink_pvsearch;     /* of pv_unlink searches */
1.40      gwr       242:        int     ps_pmeg_faultin;        /* pmegs reloaded */
1.39      gwr       243:        int     ps_changeprots;         /* of calls to changeprot */
                    244:        int     ps_changewire;          /* useless wiring changes */
                    245:        int     ps_npg_prot_all;        /* of active pages protected */
                    246:        int     ps_npg_prot_actual;     /* pages actually affected */
1.60      gwr       247:        int     ps_vac_uncached;        /* non-cached due to bad alias */
                    248:        int     ps_vac_recached;        /* re-cached when bad alias gone */
1.38      gwr       249: } pmap_stats;
                    250:
1.78      gwr       251: #define pmap_lock(pmap) simple_lock(&pmap->pm_lock)
                    252: #define pmap_unlock(pmap) simple_unlock(&pmap->pm_lock)
                    253: #define pmap_add_ref(pmap) ++pmap->pm_refcount
                    254: #define pmap_del_ref(pmap) --pmap->pm_refcount
                    255: #define pmap_refcount(pmap) pmap->pm_refcount
                    256:
1.77      gwr       257: #ifdef PMAP_DEBUG
                    258: #define        CHECK_SPL() do { \
                    259:        if ((getsr() & PSL_IPL) < PSL_IPL4) \
                    260:                panic("pmap: bad spl, line %d", __LINE__); \
                    261: } while (0)
                    262: #else  /* PMAP_DEBUG */
                    263: #define        CHECK_SPL() (void)0
                    264: #endif /* PMAP_DEBUG */
                    265:
1.38      gwr       266:
1.2       glass     267: /*
1.78      gwr       268:  * PV support.
                    269:  * (i.e. Find all virtual mappings of a physical page.)
1.5       glass     270:  */
                    271:
1.38      gwr       272: int pv_initialized = 0;
1.2       glass     273:
1.84      gwr       274: /* One of these for each mapped virtual page. */
1.1       glass     275: struct pv_entry {
1.38      gwr       276:        struct pv_entry *pv_next;
                    277:        pmap_t         pv_pmap;
1.132     chs       278:        vaddr_t        pv_va;
1.1       glass     279: };
1.38      gwr       280: typedef struct pv_entry *pv_entry_t;
1.1       glass     281:
1.84      gwr       282: /* Table of PV list heads (per physical page). */
                    283: static struct pv_entry **pv_head_tbl;
                    284:
                    285: /* Free list of PV entries. */
                    286: static struct pv_entry *pv_free_list;
                    287:
                    288: /* Table of flags (per physical page). */
                    289: static u_char *pv_flags_tbl;
1.1       glass     290:
1.38      gwr       291: /* These are as in the MMU but shifted by PV_SHIFT. */
                    292: #define PV_SHIFT       24
                    293: #define PV_VALID  0x80
                    294: #define PV_WRITE  0x40
                    295: #define PV_SYSTEM 0x20
                    296: #define PV_NC     0x10
                    297: #define PV_PERM   0xF0
                    298: #define PV_TYPE   0x0C
                    299: #define PV_REF    0x02
                    300: #define PV_MOD    0x01
                    301:
                    302:
                    303: /*
1.78      gwr       304:  * context structures, and queues
                    305:  */
                    306:
                    307: struct context_state {
                    308:        TAILQ_ENTRY(context_state) context_link;
                    309:        int            context_num;
                    310:        struct pmap   *context_upmap;
                    311: };
                    312: typedef struct context_state *context_t;
                    313:
1.83      gwr       314: #define INVALID_CONTEXT -1     /* impossible value */
                    315: #define EMPTY_CONTEXT 0
                    316: #define FIRST_CONTEXT 1
                    317: #define        has_context(pmap)       ((pmap)->pm_ctxnum != EMPTY_CONTEXT)
1.78      gwr       318:
1.79      gwr       319: TAILQ_HEAD(context_tailq, context_state)
                    320:        context_free_queue, context_active_queue;
1.50      gwr       321:
1.78      gwr       322: static struct context_state context_array[NCONTEXT];
1.1       glass     323:
                    324:
1.38      gwr       325: /*
1.81      gwr       326:  * PMEG structures, queues, and macros
1.38      gwr       327:  */
                    328: #define PMEGQ_FREE     0
                    329: #define PMEGQ_INACTIVE 1
                    330: #define PMEGQ_ACTIVE   2
                    331: #define PMEGQ_KERNEL   3
                    332: #define PMEGQ_NONE     4
                    333:
                    334: struct pmeg_state {
                    335:        TAILQ_ENTRY(pmeg_state) pmeg_link;
                    336:        int            pmeg_index;
                    337:        pmap_t         pmeg_owner;
                    338:        int            pmeg_version;
1.132     chs       339:        vaddr_t        pmeg_va;
1.38      gwr       340:        int            pmeg_wired;
                    341:        int            pmeg_reserved;
                    342:        int            pmeg_vpages;
                    343:        int            pmeg_qstate;
                    344: };
                    345:
                    346: typedef struct pmeg_state *pmeg_t;
                    347:
                    348: #define PMEG_INVAL (NPMEG-1)
                    349: #define PMEG_NULL (pmeg_t) NULL
                    350:
                    351: /* XXX - Replace pmeg_kernel_queue with pmeg_wired_queue ? */
1.79      gwr       352: TAILQ_HEAD(pmeg_tailq, pmeg_state)
                    353:        pmeg_free_queue, pmeg_inactive_queue,
1.38      gwr       354:        pmeg_active_queue, pmeg_kernel_queue;
1.26      gwr       355:
                    356: static struct pmeg_state pmeg_array[NPMEG];
1.20      glass     357:
1.1       glass     358:
1.38      gwr       359: /*
                    360:  * prototypes
                    361:  */
1.145     chs       362: static int get_pte_pmeg(int, int);
                    363: static void set_pte_pmeg(int, int, int);
1.38      gwr       364:
1.145     chs       365: static void context_allocate(pmap_t);
                    366: static void context_free(pmap_t);
                    367: static void context_init(void);
                    368:
                    369: static void pmeg_init(void);
                    370: static void pmeg_reserve(int);
                    371:
                    372: static pmeg_t pmeg_allocate(pmap_t, vaddr_t);
                    373: static void pmeg_mon_init(vaddr_t, vaddr_t, int);
                    374: static void pmeg_release(pmeg_t);
                    375: static void pmeg_free(pmeg_t);
                    376: static pmeg_t pmeg_cache(pmap_t, vaddr_t);
                    377: static void pmeg_set_wiring(pmeg_t, vaddr_t, int);
                    378:
                    379: static int  pv_link  (pmap_t, int, vaddr_t);
                    380: static void pv_unlink(pmap_t, int, vaddr_t);
                    381: static void pv_remove_all(paddr_t);
                    382: static void pv_changepte(paddr_t, int, int);
                    383: static u_int pv_syncflags(pv_entry_t);
                    384: static void pv_init(void);
                    385:
                    386: static void pmeg_clean(pmeg_t);
                    387: static void pmeg_clean_free(void);
                    388:
                    389: static void pmap_common_init(pmap_t);
                    390: static void pmap_kernel_init(pmap_t);
                    391: static void pmap_user_init(pmap_t);
                    392: static void pmap_page_upload(void);
                    393:
1.151     thorpej   394: static void pmap_enter_kernel(vaddr_t, int, bool);
                    395: static void pmap_enter_user(pmap_t, vaddr_t, int, bool);
1.145     chs       396:
                    397: static void pmap_protect1(pmap_t, vaddr_t, vaddr_t);
                    398: static void pmap_protect_mmu(pmap_t, vaddr_t, vaddr_t);
                    399: static void pmap_protect_noctx(pmap_t, vaddr_t, vaddr_t);
                    400:
                    401: static void pmap_remove1(pmap_t, vaddr_t, vaddr_t);
                    402: static void pmap_remove_mmu(pmap_t, vaddr_t, vaddr_t);
                    403: static void pmap_remove_noctx(pmap_t, vaddr_t, vaddr_t);
1.1       glass     404:
1.145     chs       405: static int  pmap_fault_reload(struct pmap *, vaddr_t, int);
1.66      gwr       406:
1.99      gwr       407: /* Called only from locore.s and pmap.c */
1.145     chs       408: void   _pmap_switch(pmap_t);
1.99      gwr       409:
1.79      gwr       410: #ifdef PMAP_DEBUG
1.145     chs       411: void pmap_print(pmap_t);
1.156     tsutsui   412: void pv_print(paddr_t);
1.145     chs       413: void pmeg_print(pmeg_t);
                    414: static void pmeg_verify_empty(vaddr_t);
1.79      gwr       415: #endif /* PMAP_DEBUG */
1.145     chs       416: void pmap_pinit(pmap_t);
                    417: void pmap_release(pmap_t);
1.79      gwr       418:
                    419: /*
                    420:  * Various in-line helper functions.
                    421:  */
                    422:
1.83      gwr       423: static inline pmap_t
1.145     chs       424: current_pmap(void)
1.83      gwr       425: {
                    426:        struct vmspace *vm;
1.130     chs       427:        struct vm_map *map;
1.83      gwr       428:        pmap_t  pmap;
                    429:
1.154     tsutsui   430:        vm = curproc->p_vmspace;
                    431:        map = &vm->vm_map;
                    432:        pmap = vm_map_pmap(map);
1.83      gwr       433:
                    434:        return (pmap);
                    435: }
                    436:
1.84      gwr       437: static inline struct pv_entry **
1.132     chs       438: pa_to_pvhead(paddr_t pa)
1.84      gwr       439: {
                    440:        int idx;
                    441:
                    442:        idx = PA_PGNUM(pa);
1.79      gwr       443: #ifdef DIAGNOSTIC
1.88      gwr       444:        if (PA_IS_DEV(pa) || (idx >= physmem))
1.84      gwr       445:                panic("pmap:pa_to_pvhead: bad pa=0x%lx", pa);
                    446: #endif
                    447:        return (&pv_head_tbl[idx]);
                    448: }
                    449:
                    450: static inline u_char *
1.132     chs       451: pa_to_pvflags(paddr_t pa)
1.79      gwr       452: {
1.84      gwr       453:        int idx;
                    454:
                    455:        idx = PA_PGNUM(pa);
                    456: #ifdef DIAGNOSTIC
1.88      gwr       457:        if (PA_IS_DEV(pa) || (idx >= physmem))
1.87      gwr       458:                panic("pmap:pa_to_pvflags: bad pa=0x%lx", pa);
1.84      gwr       459: #endif
                    460:        return (&pv_flags_tbl[idx]);
1.79      gwr       461: }
                    462:
1.132     chs       463: /*
                    464:  * Save the MOD bit from the given PTE using its PA
                    465:  */
                    466: static inline void
                    467: save_modref_bits(int pte)
                    468: {
                    469:        u_char *pv_flags;
                    470:
                    471:        pv_flags = pa_to_pvflags(PG_PA(pte));
                    472:        *pv_flags |= ((pte & PG_MODREF) >> PV_SHIFT);
                    473: }
                    474:
1.84      gwr       475: static inline pmeg_t
1.79      gwr       476: pmeg_p(int sme)
                    477: {
1.84      gwr       478: #ifdef DIAGNOSTIC
1.79      gwr       479:        if (sme < 0 || sme >= SEGINV)
                    480:                panic("pmeg_p: bad sme");
1.84      gwr       481: #endif
1.79      gwr       482:        return &pmeg_array[sme];
                    483: }
                    484:
                    485: #define is_pmeg_wired(pmegp) (pmegp->pmeg_wired != 0)
                    486:
1.145     chs       487: static void
                    488: pmeg_set_wiring(pmeg_t pmegp, vaddr_t va, int flag)
1.79      gwr       489: {
                    490:        int idx, mask;
                    491:
                    492:        idx = VA_PTE_NUM(va);
                    493:        mask = 1 << idx;
                    494:
                    495:        if (flag)
                    496:                pmegp->pmeg_wired |= mask;
                    497:        else
                    498:                pmegp->pmeg_wired &= ~mask;
                    499: }
                    500:
1.78      gwr       501: /****************************************************************
                    502:  * Context management functions.
1.26      gwr       503:  */
1.39      gwr       504:
1.80      gwr       505: /* part of pmap_bootstrap */
1.145     chs       506: static void
                    507: context_init(void)
1.78      gwr       508: {
                    509:        int i;
                    510:
                    511:        TAILQ_INIT(&context_free_queue);
                    512:        TAILQ_INIT(&context_active_queue);
1.26      gwr       513:
1.83      gwr       514:        /* Leave EMPTY_CONTEXT out of the free list. */
                    515:        context_array[0].context_upmap = kernel_pmap;
                    516:
                    517:        for (i = 1; i < NCONTEXT; i++) {
1.78      gwr       518:                context_array[i].context_num = i;
                    519:                context_array[i].context_upmap = NULL;
                    520:                TAILQ_INSERT_TAIL(&context_free_queue, &context_array[i],
1.145     chs       521:                                  context_link);
1.76      gwr       522: #ifdef PMAP_DEBUG
1.78      gwr       523:                if (pmap_debug & PMD_CONTEXT)
1.81      gwr       524:                        printf("context_init: sizeof(context_array[0])=%d\n",
1.145     chs       525:                               sizeof(context_array[0]));
1.78      gwr       526: #endif
                    527:        }
                    528: }
1.26      gwr       529:
1.80      gwr       530: /* Get us a context (steal one if necessary). */
1.145     chs       531: static void
                    532: context_allocate(pmap_t pmap)
1.38      gwr       533: {
                    534:        context_t context;
                    535:
1.80      gwr       536:        CHECK_SPL();
1.77      gwr       537:
1.80      gwr       538: #ifdef DIAGNOSTIC
1.50      gwr       539:        if (pmap == kernel_pmap)
                    540:                panic("context_allocate: kernel_pmap");
1.38      gwr       541:        if (has_context(pmap))
                    542:                panic("pmap: pmap already has context allocated to it");
1.80      gwr       543: #endif
                    544:
                    545:        context = TAILQ_FIRST(&context_free_queue);
1.65      gwr       546:        if (context == NULL) {
                    547:                /* Steal the head of the active queue. */
1.80      gwr       548:                context = TAILQ_FIRST(&context_active_queue);
1.65      gwr       549:                if (context == NULL)
                    550:                        panic("pmap: no contexts left?");
1.38      gwr       551: #ifdef PMAP_DEBUG
                    552:                if (pmap_debug & PMD_CONTEXT)
1.80      gwr       553:                        printf("context_allocate: steal ctx %d from pmap %p\n",
1.145     chs       554:                               context->context_num, context->context_upmap);
1.38      gwr       555: #endif
1.80      gwr       556:                context_free(context->context_upmap);
                    557:                context = TAILQ_FIRST(&context_free_queue);
1.38      gwr       558:        }
1.80      gwr       559:        TAILQ_REMOVE(&context_free_queue, context, context_link);
                    560:
1.132     chs       561: #ifdef DIAGNOSTIC
1.38      gwr       562:        if (context->context_upmap != NULL)
                    563:                panic("pmap: context in use???");
1.132     chs       564: #endif
1.80      gwr       565:
                    566:        context->context_upmap = pmap;
1.38      gwr       567:        pmap->pm_ctxnum = context->context_num;
1.80      gwr       568:
                    569:        TAILQ_INSERT_TAIL(&context_active_queue, context, context_link);
1.38      gwr       570:
                    571:        /*
                    572:         * We could reload the MMU here, but that would
                    573:         * artificially move PMEGs from the inactive queue
                    574:         * to the active queue, so do lazy reloading.
                    575:         * XXX - Need to reload wired pmegs though...
1.80      gwr       576:         * XXX: Verify the context it is empty?
1.38      gwr       577:         */
1.1       glass     578: }
1.5       glass     579:
1.80      gwr       580: /*
                    581:  * Unload the context and put it on the free queue.
                    582:  */
1.145     chs       583: static void
                    584: context_free(pmap_t pmap)
1.38      gwr       585: {
                    586:        int saved_ctxnum, ctxnum;
1.80      gwr       587:        int i, sme;
1.38      gwr       588:        context_t contextp;
1.132     chs       589:        vaddr_t va;
1.38      gwr       590:
1.80      gwr       591:        CHECK_SPL();
1.1       glass     592:
1.38      gwr       593:        ctxnum = pmap->pm_ctxnum;
1.83      gwr       594:        if (ctxnum < FIRST_CONTEXT || ctxnum >= NCONTEXT)
1.80      gwr       595:                panic("pmap: context_free ctxnum");
1.38      gwr       596:        contextp = &context_array[ctxnum];
                    597:
1.80      gwr       598:        /* Temporary context change. */
1.38      gwr       599:        saved_ctxnum = get_context();
                    600:        set_context(ctxnum);
                    601:
1.50      gwr       602:        /* Before unloading translations, flush cache. */
                    603: #ifdef HAVECACHE
                    604:        if (cache_size)
                    605:                cache_flush_context();
                    606: #endif
                    607:
1.38      gwr       608:        /* Unload MMU (but keep in SW segmap). */
1.145     chs       609:        for (i = 0, va = 0; i < NUSEG; i++, va += NBSG) {
1.80      gwr       610:
                    611: #if !defined(PMAP_DEBUG)
                    612:                /* Short-cut using the S/W segmap (if !debug). */
                    613:                if (pmap->pm_segmap[i] == SEGINV)
                    614:                        continue;
                    615: #endif
                    616:
                    617:                /* Check the H/W segmap. */
                    618:                sme = get_segmap(va);
                    619:                if (sme == SEGINV)
                    620:                        continue;
                    621:
                    622:                /* Found valid PMEG in the segmap. */
1.38      gwr       623: #ifdef PMAP_DEBUG
1.80      gwr       624:                if (pmap_debug & PMD_SEGMAP)
1.145     chs       625:                        printf("pmap: set_segmap ctx=%d v=0x%lx old=0x%x "
                    626:                               "new=ff (cf)\n", ctxnum, va, sme);
1.81      gwr       627: #endif
                    628: #ifdef DIAGNOSTIC
1.80      gwr       629:                if (sme != pmap->pm_segmap[i])
                    630:                        panic("context_free: unknown sme at va=0x%lx", va);
                    631: #endif
                    632:                /* Did cache flush above (whole context). */
                    633:                set_segmap(va, SEGINV);
                    634:                /* In this case, do not clear pm_segmap. */
1.83      gwr       635:                /* XXX: Maybe inline this call? */
1.80      gwr       636:                pmeg_release(pmeg_p(sme));
1.38      gwr       637:        }
1.80      gwr       638:
                    639:        /* Restore previous context. */
1.38      gwr       640:        set_context(saved_ctxnum);
1.80      gwr       641:
                    642:        /* Dequeue, update, requeue. */
                    643:        TAILQ_REMOVE(&context_active_queue, contextp, context_link);
1.83      gwr       644:        pmap->pm_ctxnum = EMPTY_CONTEXT;
1.38      gwr       645:        contextp->context_upmap = NULL;
1.78      gwr       646:        TAILQ_INSERT_TAIL(&context_free_queue, contextp, context_link);
1.1       glass     647: }
                    648:
1.78      gwr       649:
                    650: /****************************************************************
                    651:  * PMEG management functions.
                    652:  */
                    653:
1.145     chs       654: static void
                    655: pmeg_init(void)
1.26      gwr       656: {
1.78      gwr       657:        int x;
                    658:
                    659:        /* clear pmeg array, put it all on the free pmeq queue */
1.38      gwr       660:
1.78      gwr       661:        TAILQ_INIT(&pmeg_free_queue);
                    662:        TAILQ_INIT(&pmeg_inactive_queue);
                    663:        TAILQ_INIT(&pmeg_active_queue);
                    664:        TAILQ_INIT(&pmeg_kernel_queue);
1.38      gwr       665:
1.133     tsutsui   666:        memset(pmeg_array, 0, NPMEG*sizeof(struct pmeg_state));
1.145     chs       667:        for (x = 0; x < NPMEG; x++) {
                    668:                TAILQ_INSERT_TAIL(&pmeg_free_queue, &pmeg_array[x], pmeg_link);
1.78      gwr       669:                pmeg_array[x].pmeg_qstate = PMEGQ_FREE;
                    670:                pmeg_array[x].pmeg_index = x;
1.38      gwr       671:        }
                    672:
1.78      gwr       673:        /* The last pmeg is not usable. */
                    674:        pmeg_reserve(SEGINV);
1.26      gwr       675: }
                    676:
1.38      gwr       677: /*
                    678:  * Reserve a pmeg (forever) for use by PROM, etc.
                    679:  * Contents are left as-is.  Called very early...
                    680:  */
1.145     chs       681: void
                    682: pmeg_reserve(int sme)
1.1       glass     683: {
1.38      gwr       684:        pmeg_t pmegp;
1.1       glass     685:
1.38      gwr       686:        /* Can not use pmeg_p() because it fails on SEGINV. */
                    687:        pmegp = &pmeg_array[sme];
1.26      gwr       688:
1.67      gwr       689:        if (pmegp->pmeg_reserved) {
1.76      gwr       690:                mon_printf("pmeg_reserve: already reserved\n");
1.67      gwr       691:                sunmon_abort();
                    692:        }
                    693:        if (pmegp->pmeg_owner) {
1.76      gwr       694:                mon_printf("pmeg_reserve: already owned\n");
1.67      gwr       695:                sunmon_abort();
                    696:        }
1.38      gwr       697:
1.78      gwr       698:        /* Owned by kernel, but not really usable... */
1.56      gwr       699:        pmegp->pmeg_owner = kernel_pmap;
1.38      gwr       700:        pmegp->pmeg_reserved++; /* keep count, just in case */
                    701:        TAILQ_REMOVE(&pmeg_free_queue, pmegp, pmeg_link);
                    702:        pmegp->pmeg_qstate = PMEGQ_NONE;
1.1       glass     703: }
                    704:
1.75      gwr       705: /*
                    706:  * Examine PMEGs used by the monitor, and either
                    707:  * reserve them (keep=1) or clear them (keep=0)
                    708:  */
1.145     chs       709: static void
                    710: pmeg_mon_init(vaddr_t sva, vaddr_t eva, int keep)
1.75      gwr       711: {
1.132     chs       712:        vaddr_t pgva, endseg;
1.75      gwr       713:        int pte, valid;
                    714:        unsigned char sme;
                    715:
1.94      gwr       716: #ifdef PMAP_DEBUG
                    717:        if (pmap_debug & PMD_SEGMAP)
                    718:                mon_printf("pmeg_mon_init(0x%x, 0x%x, %d)\n",
1.145     chs       719:                           sva, eva, keep);
1.94      gwr       720: #endif
                    721:
1.145     chs       722:        sva &= ~(NBSG - 1);
1.75      gwr       723:
                    724:        while (sva < eva) {
                    725:                sme = get_segmap(sva);
                    726:                if (sme != SEGINV) {
                    727:                        valid = 0;
                    728:                        endseg = sva + NBSG;
1.140     thorpej   729:                        for (pgva = sva; pgva < endseg; pgva += PAGE_SIZE) {
1.75      gwr       730:                                pte = get_pte(pgva);
                    731:                                if (pte & PG_VALID) {
                    732:                                        valid++;
                    733:                                }
                    734:                        }
1.94      gwr       735: #ifdef PMAP_DEBUG
                    736:                        if (pmap_debug & PMD_SEGMAP)
                    737:                                mon_printf(" sva=0x%x seg=0x%x valid=%d\n",
1.145     chs       738:                                           sva, sme, valid);
1.94      gwr       739: #endif
1.75      gwr       740:                        if (keep && valid)
1.76      gwr       741:                                pmeg_reserve(sme);
1.145     chs       742:                        else
                    743:                                set_segmap(sva, SEGINV);
1.75      gwr       744:                }
                    745:                sva += NBSG;
                    746:        }
                    747: }
                    748:
1.81      gwr       749: /*
                    750:  * This is used only during pmap_bootstrap, so we can
                    751:  * get away with borrowing a slot in the segmap.
                    752:  */
1.145     chs       753: static void
                    754: pmeg_clean(pmeg_t pmegp)
1.7       glass     755: {
1.81      gwr       756:        int sme;
1.132     chs       757:        vaddr_t va;
1.81      gwr       758:
                    759:        sme = get_segmap(0);
                    760:        if (sme != SEGINV)
                    761:                panic("pmeg_clean");
                    762:
                    763:        sme = pmegp->pmeg_index;
                    764:        set_segmap(0, sme);
                    765:
1.140     thorpej   766:        for (va = 0; va < NBSG; va += PAGE_SIZE)
1.81      gwr       767:                set_pte(va, PG_INVAL);
1.38      gwr       768:
1.81      gwr       769:        set_segmap(0, SEGINV);
1.7       glass     770: }
                    771:
                    772: /*
                    773:  * This routine makes sure that pmegs on the pmeg_free_queue contain
                    774:  * no valid ptes.  It pulls things off the queue, cleans them, and
1.80      gwr       775:  * puts them at the end.  The ending condition is finding the first
                    776:  * queue element at the head of the queue again.
1.7       glass     777:  */
1.145     chs       778: static void
                    779: pmeg_clean_free(void)
1.7       glass     780: {
1.38      gwr       781:        pmeg_t pmegp, pmegp_first;
1.7       glass     782:
1.80      gwr       783:        pmegp = TAILQ_FIRST(&pmeg_free_queue);
                    784:        if (pmegp == NULL)
1.38      gwr       785:                panic("pmap: no free pmegs available to clean");
1.26      gwr       786:
1.38      gwr       787:        pmegp_first = NULL;
1.26      gwr       788:
1.38      gwr       789:        for (;;) {
1.80      gwr       790:                pmegp = TAILQ_FIRST(&pmeg_free_queue);
                    791:                TAILQ_REMOVE(&pmeg_free_queue, pmegp, pmeg_link);
1.26      gwr       792:
1.38      gwr       793:                pmegp->pmeg_qstate = PMEGQ_NONE;
                    794:                pmeg_clean(pmegp);
1.80      gwr       795:                pmegp->pmeg_qstate = PMEGQ_FREE;
1.26      gwr       796:
1.38      gwr       797:                TAILQ_INSERT_TAIL(&pmeg_free_queue, pmegp, pmeg_link);
1.26      gwr       798:
1.38      gwr       799:                if (pmegp == pmegp_first)
                    800:                        break;
                    801:                if (pmegp_first == NULL)
                    802:                        pmegp_first = pmegp;
1.1       glass     803:        }
                    804: }
                    805:
1.38      gwr       806: /*
1.46      gwr       807:  * Allocate a PMEG by whatever means necessary.
                    808:  * (May invalidate some mappings!)
1.38      gwr       809:  */
1.145     chs       810: static pmeg_t
                    811: pmeg_allocate(pmap_t pmap, vaddr_t va)
1.1       glass     812: {
1.38      gwr       813:        pmeg_t pmegp;
                    814:
                    815:        CHECK_SPL();
1.1       glass     816:
1.39      gwr       817: #ifdef DIAGNOSTIC
                    818:        if (va & SEGOFSET) {
1.73      fair      819:                panic("pmap:pmeg_allocate: va=0x%lx", va);
1.39      gwr       820:        }
                    821: #endif
                    822:
1.38      gwr       823:        /* Get one onto the free list if necessary. */
1.80      gwr       824:        pmegp = TAILQ_FIRST(&pmeg_free_queue);
1.38      gwr       825:        if (!pmegp) {
                    826:                /* Try inactive queue... */
1.80      gwr       827:                pmegp = TAILQ_FIRST(&pmeg_inactive_queue);
1.38      gwr       828:                if (!pmegp) {
                    829:                        /* Try active queue... */
1.80      gwr       830:                        pmegp = TAILQ_FIRST(&pmeg_active_queue);
1.38      gwr       831:                }
                    832:                if (!pmegp) {
                    833:                        panic("pmeg_allocate: failed");
                    834:                }
1.145     chs       835:
1.40      gwr       836:                /*
                    837:                 * Remove mappings to free-up a pmeg
                    838:                 * (so it will go onto the free list).
1.46      gwr       839:                 * XXX - Skip this one if it is wired?
1.40      gwr       840:                 */
1.76      gwr       841:                pmap_remove1(pmegp->pmeg_owner,
1.145     chs       842:                             pmegp->pmeg_va,
                    843:                             pmegp->pmeg_va + NBSG);
1.38      gwr       844:        }
                    845:
                    846:        /* OK, free list has something for us to take. */
1.80      gwr       847:        pmegp = TAILQ_FIRST(&pmeg_free_queue);
                    848: #ifdef DIAGNOSTIC
                    849:        if (pmegp == NULL)
1.38      gwr       850:                panic("pmeg_allocagte: still none free?");
1.80      gwr       851:        if ((pmegp->pmeg_qstate != PMEGQ_FREE) ||
1.145     chs       852:            (pmegp->pmeg_index == SEGINV) ||
                    853:            (pmegp->pmeg_vpages))
1.80      gwr       854:                panic("pmeg_allocate: bad pmegp=%p", pmegp);
1.26      gwr       855: #endif
                    856: #ifdef PMAP_DEBUG
1.38      gwr       857:        if (pmegp->pmeg_index == pmap_db_watchpmeg) {
1.81      gwr       858:                db_printf("pmeg_allocate: watch pmegp=%p\n", pmegp);
1.38      gwr       859:                Debugger();
                    860:        }
1.26      gwr       861: #endif
1.80      gwr       862:
                    863:        TAILQ_REMOVE(&pmeg_free_queue, pmegp, pmeg_link);
1.38      gwr       864:
                    865:        /* Reassign this PMEG for the caller. */
                    866:        pmegp->pmeg_owner = pmap;
                    867:        pmegp->pmeg_version = pmap->pm_version;
                    868:        pmegp->pmeg_va = va;
                    869:        pmegp->pmeg_wired = 0;
                    870:        pmegp->pmeg_reserved  = 0;
                    871:        pmegp->pmeg_vpages  = 0;
1.50      gwr       872:        if (pmap == kernel_pmap) {
1.38      gwr       873:                TAILQ_INSERT_TAIL(&pmeg_kernel_queue, pmegp, pmeg_link);
                    874:                pmegp->pmeg_qstate = PMEGQ_KERNEL;
                    875:        } else {
                    876:                TAILQ_INSERT_TAIL(&pmeg_active_queue, pmegp, pmeg_link);
                    877:                pmegp->pmeg_qstate = PMEGQ_ACTIVE;
1.30      gwr       878:        }
1.38      gwr       879:        /* Caller will verify that it's empty (if debugging). */
                    880:        return pmegp;
1.1       glass     881: }
1.7       glass     882:
1.28      gwr       883: /*
1.38      gwr       884:  * Put pmeg on the inactive queue, leaving its contents intact.
                    885:  * This happens when we loose our context.  We may reclaim
                    886:  * this pmeg later if it is still in the inactive queue.
1.28      gwr       887:  */
1.145     chs       888: static void
                    889: pmeg_release(pmeg_t pmegp)
1.1       glass     890: {
1.78      gwr       891:
1.38      gwr       892:        CHECK_SPL();
1.29      gwr       893:
1.38      gwr       894: #ifdef DIAGNOSTIC
1.80      gwr       895:        if ((pmegp->pmeg_owner == kernel_pmap) ||
1.145     chs       896:            (pmegp->pmeg_qstate != PMEGQ_ACTIVE))
1.80      gwr       897:                panic("pmeg_release: bad pmeg=%p", pmegp);
1.38      gwr       898: #endif
                    899:
1.26      gwr       900:        TAILQ_REMOVE(&pmeg_active_queue, pmegp, pmeg_link);
1.38      gwr       901:        pmegp->pmeg_qstate = PMEGQ_INACTIVE;
1.29      gwr       902:        TAILQ_INSERT_TAIL(&pmeg_inactive_queue, pmegp, pmeg_link);
1.1       glass     903: }
1.7       glass     904:
1.26      gwr       905: /*
1.38      gwr       906:  * Move the pmeg to the free queue from wherever it is.
1.50      gwr       907:  * The pmeg will be clean.  It might be in kernel_pmap.
1.26      gwr       908:  */
1.145     chs       909: static void
                    910: pmeg_free(pmeg_t pmegp)
1.38      gwr       911: {
1.78      gwr       912:
1.38      gwr       913:        CHECK_SPL();
                    914:
1.80      gwr       915: #ifdef DIAGNOSTIC
                    916:        /* Caller should verify that it's empty. */
1.38      gwr       917:        if (pmegp->pmeg_vpages != 0)
                    918:                panic("pmeg_free: vpages");
                    919: #endif
                    920:
                    921:        switch (pmegp->pmeg_qstate) {
                    922:        case PMEGQ_ACTIVE:
                    923:                TAILQ_REMOVE(&pmeg_active_queue, pmegp, pmeg_link);
                    924:                break;
                    925:        case PMEGQ_INACTIVE:
                    926:                TAILQ_REMOVE(&pmeg_inactive_queue, pmegp, pmeg_link);
                    927:                break;
                    928:        case PMEGQ_KERNEL:
                    929:                TAILQ_REMOVE(&pmeg_kernel_queue, pmegp, pmeg_link);
                    930:                break;
                    931:        default:
                    932:                panic("pmeg_free: releasing bad pmeg");
                    933:                break;
                    934:        }
                    935:
1.28      gwr       936: #ifdef PMAP_DEBUG
1.38      gwr       937:        if (pmegp->pmeg_index == pmap_db_watchpmeg) {
1.81      gwr       938:                db_printf("pmeg_free: watch pmeg 0x%x\n",
1.145     chs       939:                          pmegp->pmeg_index);
1.38      gwr       940:                Debugger();
                    941:        }
1.28      gwr       942: #endif
                    943:
1.38      gwr       944:        pmegp->pmeg_owner = NULL;
                    945:        pmegp->pmeg_qstate = PMEGQ_FREE;
                    946:        TAILQ_INSERT_TAIL(&pmeg_free_queue, pmegp, pmeg_link);
                    947: }
                    948:
                    949: /*
                    950:  * Find a PMEG that was put on the inactive queue when we
                    951:  * had our context stolen.  If found, move to active queue.
                    952:  */
1.145     chs       953: static pmeg_t
                    954: pmeg_cache(pmap_t pmap, vaddr_t va)
1.38      gwr       955: {
1.39      gwr       956:        int sme, segnum;
1.38      gwr       957:        pmeg_t pmegp;
                    958:
                    959:        CHECK_SPL();
1.26      gwr       960:
1.80      gwr       961: #ifdef DIAGNOSTIC
1.50      gwr       962:        if (pmap == kernel_pmap)
                    963:                panic("pmeg_cache: kernel_pmap");
1.39      gwr       964:        if (va & SEGOFSET) {
1.73      fair      965:                panic("pmap:pmeg_cache: va=0x%lx", va);
1.39      gwr       966:        }
                    967: #endif
                    968:
1.38      gwr       969:        if (pmap->pm_segmap == NULL)
                    970:                return PMEG_NULL;
1.80      gwr       971:
1.38      gwr       972:        segnum = VA_SEGNUM(va);
                    973:        if (segnum > NUSEG)             /* out of range */
                    974:                return PMEG_NULL;
1.80      gwr       975:
1.39      gwr       976:        sme = pmap->pm_segmap[segnum];
                    977:        if (sme == SEGINV)      /* nothing cached */
1.38      gwr       978:                return PMEG_NULL;
                    979:
1.39      gwr       980:        pmegp = pmeg_p(sme);
1.38      gwr       981:
1.30      gwr       982: #ifdef PMAP_DEBUG
1.38      gwr       983:        if (pmegp->pmeg_index == pmap_db_watchpmeg) {
1.81      gwr       984:                db_printf("pmeg_cache: watch pmeg 0x%x\n", pmegp->pmeg_index);
1.38      gwr       985:                Debugger();
1.30      gwr       986:        }
                    987: #endif
1.38      gwr       988:
                    989:        /*
                    990:         * Our segmap named a PMEG.  If it is no longer ours,
                    991:         * invalidate that entry in our segmap and return NULL.
                    992:         */
                    993:        if ((pmegp->pmeg_owner != pmap) ||
1.145     chs       994:            (pmegp->pmeg_version != pmap->pm_version) ||
                    995:            (pmegp->pmeg_va != va))
1.38      gwr       996:        {
1.30      gwr       997: #ifdef PMAP_DEBUG
1.81      gwr       998:                db_printf("pmap:pmeg_cache: invalid pmeg: sme=0x%x\n", sme);
1.39      gwr       999:                pmeg_print(pmegp);
1.78      gwr      1000:                Debugger();
1.38      gwr      1001: #endif
                   1002:                pmap->pm_segmap[segnum] = SEGINV;
                   1003:                return PMEG_NULL; /* cache lookup failed */
1.30      gwr      1004:        }
1.38      gwr      1005:
1.80      gwr      1006: #ifdef DIAGNOSTIC
1.38      gwr      1007:        /* Make sure it is on the inactive queue. */
                   1008:        if (pmegp->pmeg_qstate != PMEGQ_INACTIVE)
1.80      gwr      1009:                panic("pmeg_cache: pmeg was taken: %p", pmegp);
1.30      gwr      1010: #endif
1.26      gwr      1011:
1.38      gwr      1012:        TAILQ_REMOVE(&pmeg_inactive_queue, pmegp, pmeg_link);
                   1013:        pmegp->pmeg_qstate = PMEGQ_ACTIVE;
                   1014:        TAILQ_INSERT_TAIL(&pmeg_active_queue, pmegp, pmeg_link);
1.30      gwr      1015:
1.38      gwr      1016:        return pmegp;
                   1017: }
1.26      gwr      1018:
1.78      gwr      1019: #ifdef PMAP_DEBUG
1.145     chs      1020: static void
                   1021: pmeg_verify_empty(vaddr_t va)
1.38      gwr      1022: {
1.132     chs      1023:        vaddr_t eva;
1.78      gwr      1024:        int pte;
1.29      gwr      1025:
1.140     thorpej  1026:        for (eva = va + NBSG;  va < eva; va += PAGE_SIZE) {
1.78      gwr      1027:                pte = get_pte(va);
                   1028:                if (pte & PG_VALID)
                   1029:                        panic("pmeg_verify_empty");
                   1030:        }
                   1031: }
                   1032: #endif /* PMAP_DEBUG */
1.1       glass    1033:
1.26      gwr      1034:
1.78      gwr      1035: /****************************************************************
                   1036:  * Physical-to-virutal lookup support
1.84      gwr      1037:  *
                   1038:  * Need memory for the pv_alloc/pv_free list heads
                   1039:  * and elements.  We know how many to allocate since
                   1040:  * there is one list head for each physical page, and
                   1041:  * at most one element for each PMEG slot.
1.78      gwr      1042:  */
1.145     chs      1043: static void
                   1044: pv_init(void)
1.38      gwr      1045: {
1.84      gwr      1046:        int npp, nvp, sz;
                   1047:        pv_entry_t pv;
                   1048:        char *p;
                   1049:
                   1050:        /* total allocation size */
                   1051:        sz = 0;
                   1052:
                   1053:        /*
                   1054:         * Data for each physical page.
                   1055:         * Each "mod/ref" flag is a char.
                   1056:         * Each PV head is a pointer.
                   1057:         * Note physmem is in pages.
                   1058:         */
                   1059:        npp = ALIGN(physmem);
                   1060:        sz += (npp * sizeof(*pv_flags_tbl));
                   1061:        sz += (npp * sizeof(*pv_head_tbl));
                   1062:
                   1063:        /*
                   1064:         * Data for each virtual page (all PMEGs).
                   1065:         * One pv_entry for each page frame.
                   1066:         */
                   1067:        nvp = NPMEG * NPAGSEG;
                   1068:        sz += (nvp * sizeof(*pv_free_list));
1.38      gwr      1069:
1.84      gwr      1070:        /* Now allocate the whole thing. */
                   1071:        sz = m68k_round_page(sz);
1.146     yamt     1072:        p = (char *)uvm_km_alloc(kernel_map, sz, 0, UVM_KMF_WIRED);
1.84      gwr      1073:        if (p == NULL)
                   1074:                panic("pmap:pv_init: alloc failed");
1.133     tsutsui  1075:        memset(p, 0, sz);
1.29      gwr      1076:
1.84      gwr      1077:        /* Now divide up the space. */
                   1078:        pv_flags_tbl = (void *) p;
                   1079:        p += (npp * sizeof(*pv_flags_tbl));
                   1080:        pv_head_tbl = (void*) p;
                   1081:        p += (npp * sizeof(*pv_head_tbl));
1.132     chs      1082:        pv_free_list = (void *)p;
1.84      gwr      1083:        p += (nvp * sizeof(*pv_free_list));
                   1084:
                   1085:        /* Finally, make pv_free_list into a list. */
1.132     chs      1086:        for (pv = pv_free_list; (char *)pv < p; pv++)
1.84      gwr      1087:                pv->pv_next = &pv[1];
                   1088:        pv[-1].pv_next = 0;
1.78      gwr      1089:
                   1090:        pv_initialized++;
1.1       glass    1091: }
                   1092:
1.38      gwr      1093: /*
                   1094:  * Set or clear bits in all PTEs mapping a page.
                   1095:  * Also does syncflags work while we are there...
                   1096:  */
1.145     chs      1097: static void
                   1098: pv_changepte(paddr_t pa, int set_bits, int clear_bits)
1.38      gwr      1099: {
1.84      gwr      1100:        pv_entry_t *head, pv;
                   1101:        u_char *pv_flags;
1.38      gwr      1102:        pmap_t pmap;
1.132     chs      1103:        vaddr_t va;
1.80      gwr      1104:        int pte, sme;
1.38      gwr      1105:        int saved_ctx;
1.151     thorpej  1106:        bool in_ctx;
1.80      gwr      1107:        u_int flags;
                   1108:
1.84      gwr      1109:        pv_flags = pa_to_pvflags(pa);
                   1110:        head     = pa_to_pvhead(pa);
                   1111:
1.80      gwr      1112:        /* If no mappings, no work to do. */
1.84      gwr      1113:        if (*head == NULL)
1.38      gwr      1114:                return;
1.80      gwr      1115:
1.50      gwr      1116: #ifdef DIAGNOSTIC
                   1117:        /* This function should only clear these bits: */
                   1118:        if (clear_bits & ~(PG_WRITE | PG_NC | PG_REF | PG_MOD))
1.137     provos   1119:                panic("pv_changepte: clear=0x%x", clear_bits);
1.50      gwr      1120: #endif
1.38      gwr      1121:
1.80      gwr      1122:        flags = 0;
1.38      gwr      1123:        saved_ctx = get_context();
1.84      gwr      1124:        for (pv = *head; pv != NULL; pv = pv->pv_next) {
1.38      gwr      1125:                pmap = pv->pv_pmap;
                   1126:                va = pv->pv_va;
1.65      gwr      1127:
1.38      gwr      1128: #ifdef DIAGNOSTIC
1.84      gwr      1129:                if (pmap->pm_segmap == NULL)
                   1130:                        panic("pv_changepte: null segmap");
1.38      gwr      1131: #endif
                   1132:
1.131     wiz      1133:                /* Is the PTE currently accessible in some context? */
1.152     thorpej  1134:                in_ctx = false;
1.107     gwr      1135:                sme = SEGINV;   /* kill warning */
1.50      gwr      1136:                if (pmap == kernel_pmap)
1.152     thorpej  1137:                        in_ctx = true;
1.38      gwr      1138:                else if (has_context(pmap)) {
                   1139:                        /* PMEG may be inactive. */
                   1140:                        set_context(pmap->pm_ctxnum);
                   1141:                        sme = get_segmap(va);
                   1142:                        if (sme != SEGINV)
1.152     thorpej  1143:                                in_ctx = true;
1.38      gwr      1144:                }
                   1145:
1.152     thorpej  1146:                if (in_ctx == true) {
1.38      gwr      1147:                        /*
                   1148:                         * The PTE is in the current context.
1.52      gwr      1149:                         * Make sure PTE is up-to-date with VAC.
1.38      gwr      1150:                         */
1.52      gwr      1151: #ifdef HAVECACHE
                   1152:                        if (cache_size)
                   1153:                                cache_flush_page(va);
                   1154: #endif
1.38      gwr      1155:                        pte = get_pte(va);
                   1156:                } else {
1.132     chs      1157:
1.38      gwr      1158:                        /*
                   1159:                         * The PTE is not in any context.
                   1160:                         */
1.132     chs      1161:
1.38      gwr      1162:                        sme = pmap->pm_segmap[VA_SEGNUM(va)];
1.132     chs      1163: #ifdef DIAGNOSTIC
1.38      gwr      1164:                        if (sme == SEGINV)
                   1165:                                panic("pv_changepte: SEGINV");
1.132     chs      1166: #endif
1.38      gwr      1167:                        pte = get_pte_pmeg(sme, VA_PTE_NUM(va));
                   1168:                }
1.1       glass    1169:
1.38      gwr      1170: #ifdef DIAGNOSTIC
1.92      gwr      1171:                /* PV entries point only to valid mappings. */
1.38      gwr      1172:                if ((pte & PG_VALID) == 0)
1.137     provos   1173:                        panic("pv_changepte: not PG_VALID at va=0x%lx", va);
1.38      gwr      1174: #endif
                   1175:                /* Get these while it's easy. */
                   1176:                if (pte & PG_MODREF) {
1.80      gwr      1177:                        flags |= (pte & PG_MODREF);
1.38      gwr      1178:                        pte &= ~PG_MODREF;
                   1179:                }
                   1180:
                   1181:                /* Finally, set and clear some bits. */
                   1182:                pte |= set_bits;
                   1183:                pte &= ~clear_bits;
                   1184:
1.152     thorpej  1185:                if (in_ctx == true) {
1.52      gwr      1186:                        /* Did cache flush above. */
1.38      gwr      1187:                        set_pte(va, pte);
                   1188:                } else {
                   1189:                        set_pte_pmeg(sme, VA_PTE_NUM(va), pte);
                   1190:                }
                   1191:        }
1.80      gwr      1192:        set_context(saved_ctx);
1.1       glass    1193:
1.84      gwr      1194:        *pv_flags |= (flags >> PV_SHIFT);
1.38      gwr      1195: }
1.1       glass    1196:
1.38      gwr      1197: /*
1.84      gwr      1198:  * Return ref and mod bits from pvlist,
                   1199:  * and turns off same in hardware PTEs.
1.38      gwr      1200:  */
1.145     chs      1201: static u_int
                   1202: pv_syncflags(pv_entry_t pv)
1.38      gwr      1203: {
                   1204:        pmap_t pmap;
1.132     chs      1205:        vaddr_t va;
1.80      gwr      1206:        int pte, sme;
1.38      gwr      1207:        int saved_ctx;
1.151     thorpej  1208:        bool in_ctx;
1.80      gwr      1209:        u_int flags;
                   1210:
                   1211:        /* If no mappings, no work to do. */
1.84      gwr      1212:        if (pv == NULL)
                   1213:                return (0);
1.38      gwr      1214:
1.80      gwr      1215:        flags = 0;
1.38      gwr      1216:        saved_ctx = get_context();
1.132     chs      1217:        for (; pv != NULL; pv = pv->pv_next) {
1.38      gwr      1218:                pmap = pv->pv_pmap;
                   1219:                va = pv->pv_va;
1.132     chs      1220:                sme = SEGINV;
1.65      gwr      1221:
1.38      gwr      1222: #ifdef DIAGNOSTIC
                   1223:                /*
                   1224:                 * Only the head may have a null pmap, and
                   1225:                 * we checked for that above.
                   1226:                 */
1.84      gwr      1227:                if (pmap->pm_segmap == NULL)
                   1228:                        panic("pv_syncflags: null segmap");
1.38      gwr      1229: #endif
                   1230:
1.131     wiz      1231:                /* Is the PTE currently accessible in some context? */
1.152     thorpej  1232:                in_ctx = false;
1.50      gwr      1233:                if (pmap == kernel_pmap)
1.152     thorpej  1234:                        in_ctx = true;
1.38      gwr      1235:                else if (has_context(pmap)) {
                   1236:                        /* PMEG may be inactive. */
                   1237:                        set_context(pmap->pm_ctxnum);
                   1238:                        sme = get_segmap(va);
                   1239:                        if (sme != SEGINV)
1.152     thorpej  1240:                                in_ctx = true;
1.38      gwr      1241:                }
                   1242:
1.152     thorpej  1243:                if (in_ctx == true) {
1.132     chs      1244:
1.38      gwr      1245:                        /*
                   1246:                         * The PTE is in the current context.
1.52      gwr      1247:                         * Make sure PTE is up-to-date with VAC.
1.38      gwr      1248:                         */
1.132     chs      1249:
1.52      gwr      1250: #ifdef HAVECACHE
                   1251:                        if (cache_size)
                   1252:                                cache_flush_page(va);
                   1253: #endif
1.38      gwr      1254:                        pte = get_pte(va);
                   1255:                } else {
1.132     chs      1256:
1.38      gwr      1257:                        /*
                   1258:                         * The PTE is not in any context.
                   1259:                         */
1.132     chs      1260:
1.38      gwr      1261:                        sme = pmap->pm_segmap[VA_SEGNUM(va)];
1.132     chs      1262: #ifdef DIAGNOSTIC
1.38      gwr      1263:                        if (sme == SEGINV)
                   1264:                                panic("pv_syncflags: SEGINV");
1.132     chs      1265: #endif
1.38      gwr      1266:                        pte = get_pte_pmeg(sme, VA_PTE_NUM(va));
                   1267:                }
1.29      gwr      1268:
1.38      gwr      1269: #ifdef DIAGNOSTIC
1.92      gwr      1270:                /* PV entries point only to valid mappings. */
1.38      gwr      1271:                if ((pte & PG_VALID) == 0)
1.137     provos   1272:                        panic("pv_syncflags: not PG_VALID at va=0x%lx", va);
1.38      gwr      1273: #endif
                   1274:                /* OK, do what we came here for... */
                   1275:                if (pte & PG_MODREF) {
1.80      gwr      1276:                        flags |= (pte & PG_MODREF);
1.38      gwr      1277:                        pte &= ~PG_MODREF;
                   1278:                }
                   1279:
1.152     thorpej  1280:                if (in_ctx == true) {
1.52      gwr      1281:                        /* Did cache flush above. */
1.38      gwr      1282:                        set_pte(va, pte);
                   1283:                } else {
                   1284:                        set_pte_pmeg(sme, VA_PTE_NUM(va), pte);
                   1285:                }
                   1286:        }
                   1287:        set_context(saved_ctx);
1.19      glass    1288:
1.84      gwr      1289:        return (flags >> PV_SHIFT);
1.1       glass    1290: }
                   1291:
1.78      gwr      1292: /* Remove all mappings for the physical page. */
1.145     chs      1293: static void
                   1294: pv_remove_all(paddr_t pa)
1.38      gwr      1295: {
1.84      gwr      1296:        pv_entry_t *head, pv;
1.38      gwr      1297:        pmap_t pmap;
1.132     chs      1298:        vaddr_t va;
1.1       glass    1299:
1.78      gwr      1300:        CHECK_SPL();
                   1301:
1.19      glass    1302: #ifdef PMAP_DEBUG
1.38      gwr      1303:        if (pmap_debug & PMD_REMOVE)
1.73      fair     1304:                printf("pv_remove_all(0x%lx)\n", pa);
1.38      gwr      1305: #endif
1.78      gwr      1306:
1.84      gwr      1307:        head = pa_to_pvhead(pa);
                   1308:        while ((pv = *head) != NULL) {
1.38      gwr      1309:                pmap = pv->pv_pmap;
                   1310:                va   = pv->pv_va;
1.140     thorpej  1311:                pmap_remove1(pmap, va, va + PAGE_SIZE);
1.38      gwr      1312: #ifdef PMAP_DEBUG
                   1313:                /* Make sure it went away. */
1.84      gwr      1314:                if (pv == *head) {
1.145     chs      1315:                        db_printf("pv_remove_all: "
                   1316:                                  "head unchanged for pa=0x%lx\n", pa);
1.38      gwr      1317:                        Debugger();
                   1318:                }
                   1319: #endif
                   1320:        }
1.1       glass    1321: }
                   1322:
1.38      gwr      1323: /*
1.42      gwr      1324:  * The pmap system is asked to lookup all mappings that point to a
1.38      gwr      1325:  * given physical memory address.  This function adds a new element
                   1326:  * to the list of mappings maintained for the given physical address.
1.42      gwr      1327:  * Returns PV_NC if the (new) pvlist says that the address cannot
1.38      gwr      1328:  * be cached.
                   1329:  */
1.145     chs      1330: static int
                   1331: pv_link(pmap_t pmap, int pte, vaddr_t va)
1.38      gwr      1332: {
1.132     chs      1333:        paddr_t pa;
1.84      gwr      1334:        pv_entry_t *head, pv;
                   1335:        u_char *pv_flags;
1.92      gwr      1336:        int flags;
1.38      gwr      1337:
                   1338:        if (!pv_initialized)
                   1339:                return 0;
1.1       glass    1340:
1.80      gwr      1341:        CHECK_SPL();
                   1342:
1.92      gwr      1343:        /* Only the non-cached bit is of interest here. */
                   1344:        flags = (pte & PG_NC) ? PV_NC : 0;
                   1345:        pa = PG_PA(pte);
                   1346:
1.19      glass    1347: #ifdef PMAP_DEBUG
1.38      gwr      1348:        if ((pmap_debug & PMD_LINK) || (va == pmap_db_watchva)) {
1.92      gwr      1349:                printf("pv_link(%p, 0x%x, 0x%lx)\n", pmap, pte, va);
1.38      gwr      1350:                /* pv_print(pa); */
                   1351:        }
                   1352: #endif
1.1       glass    1353:
1.84      gwr      1354:        pv_flags = pa_to_pvflags(pa);
                   1355:        head     = pa_to_pvhead(pa);
1.2       glass    1356:
1.84      gwr      1357: #ifdef DIAGNOSTIC
                   1358:        /* See if this mapping is already in the list. */
                   1359:        for (pv = *head; pv != NULL; pv = pv->pv_next) {
1.81      gwr      1360:                if ((pv->pv_pmap == pmap) && (pv->pv_va == va))
1.73      fair     1361:                        panic("pv_link: duplicate entry for PA=0x%lx", pa);
1.38      gwr      1362:        }
1.19      glass    1363: #endif
1.132     chs      1364: #ifdef HAVECACHE
1.26      gwr      1365:
1.38      gwr      1366:        /*
1.84      gwr      1367:         * Does this new mapping cause VAC alias problems?
1.38      gwr      1368:         */
1.132     chs      1369:
1.84      gwr      1370:        *pv_flags |= flags;
                   1371:        if ((*pv_flags & PV_NC) == 0) {
                   1372:                for (pv = *head; pv != NULL; pv = pv->pv_next) {
1.81      gwr      1373:                        if (BADALIAS(va, pv->pv_va)) {
1.84      gwr      1374:                                *pv_flags |= PV_NC;
                   1375:                                pv_changepte(pa, PG_NC, 0);
1.60      gwr      1376:                                pmap_stats.ps_vac_uncached++;
1.38      gwr      1377:                                break;
                   1378:                        }
                   1379:                }
                   1380:        }
1.132     chs      1381: #endif
1.84      gwr      1382:
                   1383:        /* Allocate a PV element (pv_alloc()). */
                   1384:        pv = pv_free_list;
                   1385:        if (pv == NULL)
                   1386:                panic("pv_link: pv_alloc");
                   1387:        pv_free_list = pv->pv_next;
                   1388:        pv->pv_next = 0;
                   1389:
                   1390:        /* Insert new entry at the head. */
1.81      gwr      1391:        pv->pv_pmap = pmap;
                   1392:        pv->pv_va   = va;
1.84      gwr      1393:        pv->pv_next = *head;
                   1394:        *head = pv;
1.38      gwr      1395:
1.84      gwr      1396:        return (*pv_flags & PV_NC);
1.38      gwr      1397: }
                   1398:
                   1399: /*
                   1400:  * pv_unlink is a helper function for pmap_remove.
                   1401:  * It removes the appropriate (pmap, pa, va) entry.
                   1402:  *
                   1403:  * Once the entry is removed, if the pv_table head has the cache
                   1404:  * inhibit bit set, see if we can turn that off; if so, walk the
                   1405:  * pvlist and turn off PG_NC in each PTE.  (The pvlist is by
                   1406:  * definition nonempty, since it must have at least two elements
                   1407:  * in it to have PV_NC set, and we only remove one here.)
                   1408:  */
1.145     chs      1409: static void
                   1410: pv_unlink(pmap_t pmap, int pte, vaddr_t va)
1.38      gwr      1411: {
1.132     chs      1412:        paddr_t pa;
1.84      gwr      1413:        pv_entry_t *head, *ppv, pv;
                   1414:        u_char *pv_flags;
1.38      gwr      1415:
1.80      gwr      1416:        CHECK_SPL();
                   1417:
1.92      gwr      1418:        pa = PG_PA(pte);
1.18      glass    1419: #ifdef PMAP_DEBUG
1.80      gwr      1420:        if ((pmap_debug & PMD_LINK) || (va == pmap_db_watchva)) {
1.92      gwr      1421:                printf("pv_unlink(%p, 0x%x, 0x%lx)\n", pmap, pte, va);
1.80      gwr      1422:                /* pv_print(pa); */
1.26      gwr      1423:        }
1.18      glass    1424: #endif
1.81      gwr      1425:
1.84      gwr      1426:        pv_flags = pa_to_pvflags(pa);
                   1427:        head     = pa_to_pvhead(pa);
1.38      gwr      1428:
1.84      gwr      1429:        /*
                   1430:         * Find the entry.
                   1431:         */
                   1432:        ppv = head;
                   1433:        pv = *ppv;
                   1434:        while (pv) {
                   1435:                if ((pv->pv_pmap == pmap) && (pv->pv_va == va))
                   1436:                        goto found;
                   1437:                ppv = &pv->pv_next;
                   1438:                pv  =  pv->pv_next;
                   1439:        }
                   1440: #ifdef PMAP_DEBUG
                   1441:        db_printf("pv_unlink: not found (pa=0x%lx,va=0x%lx)\n", pa, va);
                   1442:        Debugger();
1.38      gwr      1443: #endif
1.84      gwr      1444:        return;
                   1445:
1.145     chs      1446:   found:
1.84      gwr      1447:        /* Unlink this entry from the list and clear it. */
                   1448:        *ppv = pv->pv_next;
                   1449:        pv->pv_pmap = NULL;
                   1450:        pv->pv_va   = 0;
                   1451:
                   1452:        /* Insert it on the head of the free list. (pv_free()) */
                   1453:        pv->pv_next = pv_free_list;
                   1454:        pv_free_list = pv;
                   1455:        pv = NULL;
                   1456:
                   1457:        /* Do any non-cached mappings remain? */
                   1458:        if ((*pv_flags & PV_NC) == 0)
                   1459:                return;
                   1460:        if ((pv = *head) == NULL)
                   1461:                return;
1.1       glass    1462:
                   1463:        /*
1.84      gwr      1464:         * Have non-cached mappings.  See if we can fix that now.
1.1       glass    1465:         */
1.84      gwr      1466:        va = pv->pv_va;
                   1467:        for (pv = pv->pv_next; pv != NULL; pv = pv->pv_next) {
                   1468:                /* If there is a DVMA mapping, leave it NC. */
1.98      gwr      1469:                if (va >= DVMA_MAP_BASE)
1.84      gwr      1470:                        return;
                   1471:                /* If there are VAC alias problems, leave NC. */
                   1472:                if (BADALIAS(va, pv->pv_va))
                   1473:                        return;
1.1       glass    1474:        }
1.84      gwr      1475:        /* OK, there are no "problem" mappings. */
                   1476:        *pv_flags &= ~PV_NC;
                   1477:        pv_changepte(pa, 0, PG_NC);
                   1478:        pmap_stats.ps_vac_recached++;
1.1       glass    1479: }
                   1480:
1.38      gwr      1481:
1.78      gwr      1482: /****************************************************************
                   1483:  * Bootstrap and Initialization, etc.
                   1484:  */
1.38      gwr      1485:
1.145     chs      1486: void
                   1487: pmap_common_init(pmap_t pmap)
1.38      gwr      1488: {
1.133     tsutsui  1489:        memset(pmap, 0, sizeof(struct pmap));
1.132     chs      1490:        pmap->pm_refcount = 1;
1.38      gwr      1491:        pmap->pm_version = pmap_version++;
1.83      gwr      1492:        pmap->pm_ctxnum = EMPTY_CONTEXT;
1.38      gwr      1493:        simple_lock_init(&pmap->pm_lock);
                   1494: }
                   1495:
                   1496: /*
                   1497:  * Prepare the kernel for VM operations.
1.99      gwr      1498:  * This is called by locore2.c:_vm_init()
1.38      gwr      1499:  * after the "start/end" globals are set.
1.75      gwr      1500:  * This function must NOT leave context zero.
1.38      gwr      1501:  */
1.145     chs      1502: void
                   1503: pmap_bootstrap(vaddr_t nextva)
1.38      gwr      1504: {
1.99      gwr      1505:        struct sunromvec *rvec;
1.132     chs      1506:        vaddr_t va, eva;
1.75      gwr      1507:        int i, pte, sme;
1.78      gwr      1508:        extern char etext[];
1.75      gwr      1509:
1.76      gwr      1510:        nextva = m68k_round_page(nextva);
1.75      gwr      1511:        rvec = romVectorPtr;
                   1512:
1.76      gwr      1513:        /* Steal some special-purpose, already mapped pages? */
                   1514:
1.75      gwr      1515:        /*
                   1516:         * Determine the range of kernel virtual space available.
1.76      gwr      1517:         * It is segment-aligned to simplify PMEG management.
1.75      gwr      1518:         */
1.164     tsutsui  1519:        virtual_avail = sun3_round_seg(nextva);
1.75      gwr      1520:        virtual_end = VM_MAX_KERNEL_ADDRESS;
                   1521:
                   1522:        /*
                   1523:         * Determine the range of physical memory available.
                   1524:         * Physical memory at zero was remapped to KERNBASE.
                   1525:         */
1.164.2.1! uebayasi 1526:        avail_start = nextva - KERNBASE3;
1.75      gwr      1527:        if (rvec->romvecVersion < 1) {
                   1528:                mon_printf("Warning: ancient PROM version=%d\n",
1.145     chs      1529:                           rvec->romvecVersion);
1.75      gwr      1530:                /* Guess that PROM version 0.X used two pages. */
1.140     thorpej  1531:                avail_end = *rvec->memorySize - (2*PAGE_SIZE);
1.75      gwr      1532:        } else {
                   1533:                /* PROM version 1 or later. */
                   1534:                avail_end = *rvec->memoryAvail;
                   1535:        }
                   1536:        avail_end = m68k_trunc_page(avail_end);
                   1537:
                   1538:        /*
1.76      gwr      1539:         * Report the actual amount of physical memory,
                   1540:         * even though the PROM takes a few pages.
1.75      gwr      1541:         */
1.76      gwr      1542:        physmem = (btoc(avail_end) + 0xF) & ~0xF;
1.75      gwr      1543:
                   1544:        /*
1.76      gwr      1545:         * On the Sun3/50, the video frame buffer is located at
                   1546:         * physical addres 1MB so we must step over it.
1.75      gwr      1547:         */
1.148     thorpej  1548:        if (cpu_machine_id == ID_SUN3_50) {
1.76      gwr      1549:                hole_start = m68k_trunc_page(OBMEM_BW50_ADDR);
                   1550:                hole_size  = m68k_round_page(OBMEM_BW2_SIZE);
1.91      gwr      1551:                if (avail_start > hole_start) {
1.76      gwr      1552:                        mon_printf("kernel too large for Sun3/50\n");
                   1553:                        sunmon_abort();
                   1554:                }
                   1555:        }
1.75      gwr      1556:
                   1557:        /*
                   1558:         * Done allocating PAGES of virtual space, so
                   1559:         * clean out the rest of the last used segment.
                   1560:         */
1.140     thorpej  1561:        for (va = nextva; va < virtual_avail; va += PAGE_SIZE)
1.75      gwr      1562:                set_pte(va, PG_INVAL);
                   1563:
                   1564:        /*
                   1565:         * Now that we are done stealing physical pages, etc.
                   1566:         * figure out which PMEGs are used by those mappings
1.76      gwr      1567:         * and either reserve them or clear them out.
                   1568:         * -- but first, init PMEG management.
                   1569:         * This puts all PMEGs in the free list.
                   1570:         * We will allocte the in-use ones.
1.75      gwr      1571:         */
1.76      gwr      1572:        pmeg_init();
                   1573:
                   1574:        /*
                   1575:         * Unmap user virtual segments.
                   1576:         * VA range: [0 .. KERNBASE]
                   1577:         */
1.164.2.1! uebayasi 1578:        for (va = 0; va < KERNBASE3; va += NBSG)
1.76      gwr      1579:                set_segmap(va, SEGINV);
1.75      gwr      1580:
                   1581:        /*
                   1582:         * Reserve PMEGS for kernel text/data/bss
                   1583:         * and the misc pages taken above.
1.76      gwr      1584:         * VA range: [KERNBASE .. virtual_avail]
1.75      gwr      1585:         */
1.76      gwr      1586:        for ( ; va < virtual_avail; va += NBSG) {
1.75      gwr      1587:                sme = get_segmap(va);
                   1588:                if (sme == SEGINV) {
                   1589:                        mon_printf("kernel text/data/bss not mapped\n");
                   1590:                        sunmon_abort();
                   1591:                }
1.76      gwr      1592:                pmeg_reserve(sme);
1.75      gwr      1593:        }
                   1594:
                   1595:        /*
1.76      gwr      1596:         * Unmap kernel virtual space.  Make sure to leave no valid
1.75      gwr      1597:         * segmap entries in the MMU unless pmeg_array records them.
1.76      gwr      1598:         * VA range: [vseg_avail .. virtual_end]
1.75      gwr      1599:         */
1.76      gwr      1600:        for ( ; va < virtual_end; va += NBSG)
1.75      gwr      1601:                set_segmap(va, SEGINV);
                   1602:
                   1603:        /*
1.76      gwr      1604:         * Reserve PMEGs used by the PROM monitor (device mappings).
                   1605:         * Free up any pmegs in this range which have no mappings.
                   1606:         * VA range: [0x0FE00000 .. 0x0FF00000]
1.75      gwr      1607:         */
1.152     thorpej  1608:        pmeg_mon_init(SUN3_MONSTART, SUN3_MONEND, true);
1.75      gwr      1609:
                   1610:        /*
1.76      gwr      1611:         * Unmap any pmegs left in DVMA space by the PROM.
                   1612:         * DO NOT kill the last one! (owned by the PROM!)
                   1613:         * VA range: [0x0FF00000 .. 0x0FFE0000]
1.75      gwr      1614:         */
1.152     thorpej  1615:        pmeg_mon_init(SUN3_MONEND, SUN3_MONSHORTSEG, false);
1.75      gwr      1616:
                   1617:        /*
                   1618:         * MONSHORTSEG contains MONSHORTPAGE which is a data page
1.76      gwr      1619:         * allocated by the PROM monitor.  Reserve the segment,
                   1620:         * but clear out all but the last PTE inside it.
                   1621:         * Note we use this for tmp_vpages.
1.75      gwr      1622:         */
1.99      gwr      1623:        va  = SUN3_MONSHORTSEG;
                   1624:        eva = SUN3_MONSHORTPAGE;
1.76      gwr      1625:        sme = get_segmap(va);
                   1626:        pmeg_reserve(sme);
1.140     thorpej  1627:        for ( ; va < eva; va += PAGE_SIZE)
1.75      gwr      1628:                set_pte(va, PG_INVAL);
                   1629:
                   1630:        /*
1.76      gwr      1631:         * Done reserving PMEGs and/or clearing out mappings.
                   1632:         *
                   1633:         * Now verify the mapping protections and such for the
                   1634:         * important parts of the address space (in VA order).
                   1635:         * Note that the Sun PROM usually leaves the memory
                   1636:         * mapped with everything non-cached...
1.75      gwr      1637:         */
                   1638:
                   1639:        /*
1.76      gwr      1640:         * Map the message buffer page at a constant location
                   1641:         * (physical address zero) so its contents will be
                   1642:         * preserved through a reboot.
1.75      gwr      1643:         */
1.164.2.1! uebayasi 1644:        va = KERNBASE3;
1.75      gwr      1645:        pte = get_pte(va);
1.76      gwr      1646:        pte |= (PG_SYSTEM | PG_WRITE | PG_NC);
1.75      gwr      1647:        set_pte(va, pte);
1.140     thorpej  1648:        va += PAGE_SIZE;
1.75      gwr      1649:        /* Initialize msgbufaddr later, in machdep.c */
                   1650:
1.76      gwr      1651:        /* Next is the tmpstack page. */
                   1652:        pte = get_pte(va);
                   1653:        pte &= ~(PG_NC);
                   1654:        pte |= (PG_SYSTEM | PG_WRITE);
                   1655:        set_pte(va, pte);
1.140     thorpej  1656:        va += PAGE_SIZE;
1.75      gwr      1657:
                   1658:        /*
1.76      gwr      1659:         * Next is the kernel text.
                   1660:         *
1.75      gwr      1661:         * Verify protection bits on kernel text/data/bss
                   1662:         * All of kernel text, data, and bss are cached.
                   1663:         * Text is read-only (except in db_write_ktext).
                   1664:         */
                   1665:        eva = m68k_trunc_page(etext);
                   1666:        while (va < eva) {
                   1667:                pte = get_pte(va);
                   1668:                if ((pte & (PG_VALID|PG_TYPE)) != PG_VALID) {
                   1669:                        mon_printf("invalid page at 0x%x\n", va);
                   1670:                }
                   1671:                pte &= ~(PG_WRITE|PG_NC);
                   1672:                /* Kernel text is read-only */
                   1673:                pte |= (PG_SYSTEM);
                   1674:                set_pte(va, pte);
1.140     thorpej  1675:                va += PAGE_SIZE;
1.75      gwr      1676:        }
1.76      gwr      1677:        /* data, bss, etc. */
                   1678:        while (va < nextva) {
1.75      gwr      1679:                pte = get_pte(va);
                   1680:                if ((pte & (PG_VALID|PG_TYPE)) != PG_VALID) {
                   1681:                        mon_printf("invalid page at 0x%x\n", va);
                   1682:                }
                   1683:                pte &= ~(PG_NC);
                   1684:                pte |= (PG_SYSTEM | PG_WRITE);
                   1685:                set_pte(va, pte);
1.140     thorpej  1686:                va += PAGE_SIZE;
1.75      gwr      1687:        }
                   1688:
                   1689:        /*
                   1690:         * Duplicate all mappings in the current context into
                   1691:         * every other context.  We have to let the PROM do the
                   1692:         * actual segmap manipulation because we can only switch
1.76      gwr      1693:         * the MMU context after we are sure that the kernel is
                   1694:         * identically mapped in all contexts.  The PROM can do
                   1695:         * the job using hardware-dependent tricks...
1.75      gwr      1696:         */
                   1697: #ifdef DIAGNOSTIC
                   1698:        /* Note: PROM setcxsegmap function needs sfc=dfs=FC_CONTROL */
                   1699:        if ((getsfc() != FC_CONTROL) || (getdfc() != FC_CONTROL)) {
                   1700:                mon_printf("pmap_bootstrap: bad dfc or sfc\n");
                   1701:                sunmon_abort();
                   1702:        }
                   1703:        /* Near the beginning of locore.s we set context zero. */
                   1704:        if (get_context() != 0) {
                   1705:                mon_printf("pmap_bootstrap: not in context zero?\n");
                   1706:                sunmon_abort();
                   1707:        }
1.76      gwr      1708: #endif /* DIAGNOSTIC */
1.132     chs      1709:        for (va = 0; va < (vaddr_t) (NBSG * NSEGMAP); va += NBSG) {
1.76      gwr      1710:                /* Read the segmap entry from context zero... */
                   1711:                sme = get_segmap(va);
                   1712:                /* ... then copy it into all other contexts. */
1.75      gwr      1713:                for (i = 1; i < NCONTEXT; i++) {
                   1714:                        (*rvec->setcxsegmap)(i, va, sme);
                   1715:                }
                   1716:        }
                   1717:
1.38      gwr      1718:        /*
                   1719:         * Reserve a segment for the kernel to use to access a pmeg
                   1720:         * that is not currently mapped into any context/segmap.
                   1721:         * The kernel temporarily maps such a pmeg into this segment.
1.83      gwr      1722:         *
                   1723:         * XXX: Now that context zero is reserved as kernel-only,
                   1724:         * we could borrow context zero for these temporary uses.
1.38      gwr      1725:         */
                   1726:        temp_seg_va = virtual_avail;
                   1727:        virtual_avail += NBSG;
1.94      gwr      1728: #ifdef DIAGNOSTIC
1.67      gwr      1729:        if (temp_seg_va & SEGOFSET) {
                   1730:                mon_printf("pmap_bootstrap: temp_seg_va\n");
                   1731:                sunmon_abort();
                   1732:        }
1.38      gwr      1733: #endif
                   1734:
                   1735:        /* Initialization for pmap_next_page() */
                   1736:        avail_next = avail_start;
                   1737:
1.140     thorpej  1738:        uvmexp.pagesize = PAGE_SIZE;
1.110     mrg      1739:        uvm_setpagesize();
1.38      gwr      1740:
                   1741:        /* after setting up some structures */
                   1742:
1.50      gwr      1743:        pmap_common_init(kernel_pmap);
1.82      gwr      1744:        pmap_kernel_init(kernel_pmap);
1.38      gwr      1745:
                   1746:        context_init();
                   1747:
                   1748:        pmeg_clean_free();
1.101     gwr      1749:
                   1750:        pmap_page_upload();
1.38      gwr      1751: }
                   1752:
1.82      gwr      1753: /*
                   1754:  * Give the kernel pmap a segmap, just so there are not
                   1755:  * so many special cases required.  Maybe faster too,
                   1756:  * because this lets pmap_remove() and pmap_protect()
                   1757:  * use a S/W copy of the segmap to avoid function calls.
                   1758:  */
1.145     chs      1759: void
                   1760: pmap_kernel_init(pmap_t pmap)
1.82      gwr      1761: {
1.132     chs      1762:        vaddr_t va;
1.82      gwr      1763:        int i, sme;
                   1764:
                   1765:        for (i=0, va=0; i < NSEGMAP; i++, va+=NBSG) {
                   1766:                sme = get_segmap(va);
                   1767:                kernel_segmap[i] = sme;
                   1768:        }
                   1769:        pmap->pm_segmap = kernel_segmap;
                   1770: }
                   1771:
1.81      gwr      1772:
1.78      gwr      1773: /****************************************************************
                   1774:  * PMAP interface functions.
                   1775:  */
                   1776:
1.38      gwr      1777: /*
1.97      thorpej  1778:  * Support functions for vm_page_bootstrap().
1.38      gwr      1779:  */
1.142     thorpej  1780:
                   1781: /*
                   1782:  * How much virtual space does this kernel have?
                   1783:  * (After mapping kernel text, data, etc.)
                   1784:  */
1.145     chs      1785: void
                   1786: pmap_virtual_space(vaddr_t *v_start, vaddr_t *v_end)
1.142     thorpej  1787: {
                   1788:        *v_start = virtual_avail;
                   1789:        *v_end   = virtual_end;
                   1790: }
1.1       glass    1791:
1.101     gwr      1792: /* Provide memory to the VM system. */
1.145     chs      1793: static void
                   1794: pmap_page_upload(void)
1.101     gwr      1795: {
                   1796:        int a, b, c, d;
                   1797:
                   1798:        if (hole_size) {
                   1799:                /*
                   1800:                 * Supply the memory in two segments so the
                   1801:                 * reserved memory (3/50 video ram at 1MB)
                   1802:                 * can be carved from the front of the 2nd.
                   1803:                 */
                   1804:                a = atop(avail_start);
                   1805:                b = atop(hole_start);
1.105     thorpej  1806:                uvm_page_physload(a, b, a, b, VM_FREELIST_DEFAULT);
1.101     gwr      1807:                c = atop(hole_start + hole_size);
                   1808:                d = atop(avail_end);
1.105     thorpej  1809:                uvm_page_physload(b, d, c, d, VM_FREELIST_DEFAULT);
1.101     gwr      1810:        } else {
                   1811:                a = atop(avail_start);
                   1812:                d = atop(avail_end);
1.105     thorpej  1813:                uvm_page_physload(a, d, a, d, VM_FREELIST_DEFAULT);
1.101     gwr      1814:        }
                   1815: }
1.38      gwr      1816:
1.1       glass    1817: /*
                   1818:  *     Initialize the pmap module.
                   1819:  *     Called by vm_init, to initialize any structures that the pmap
                   1820:  *     system needs to map virtual memory.
                   1821:  */
1.145     chs      1822: void
                   1823: pmap_init(void)
1.1       glass    1824: {
1.132     chs      1825:        pv_init();
1.1       glass    1826:
1.120     tsutsui  1827:        /* Initialize the pmap pool. */
                   1828:        pool_init(&pmap_pmap_pool, sizeof(struct pmap), 0, 0, 0, "pmappl",
1.153     ad       1829:                  &pool_allocator_nointr, IPL_NONE);
1.1       glass    1830: }
                   1831:
1.38      gwr      1832: /*
1.56      gwr      1833:  * Map a range of kernel virtual address space.
                   1834:  * This might be used for device mappings, or to
                   1835:  * record the mapping for kernel text/data/bss.
1.100     gwr      1836:  * Return VA following the mapped range.
1.38      gwr      1837:  */
1.145     chs      1838: vaddr_t
                   1839: pmap_map(vaddr_t va, paddr_t pa, paddr_t endpa, int prot)
1.38      gwr      1840: {
1.100     gwr      1841:        int sz;
                   1842:
                   1843:        sz = endpa - pa;
                   1844:        do {
1.116     thorpej  1845:                pmap_enter(kernel_pmap, va, pa, prot, 0);
1.140     thorpej  1846:                va += PAGE_SIZE;
                   1847:                pa += PAGE_SIZE;
                   1848:                sz -= PAGE_SIZE;
1.100     gwr      1849:        } while (sz > 0);
1.135     chris    1850:        pmap_update(kernel_pmap);
1.100     gwr      1851:        return(va);
1.38      gwr      1852: }
                   1853:
1.145     chs      1854: void
                   1855: pmap_user_init(pmap_t pmap)
1.38      gwr      1856: {
                   1857:        int i;
                   1858:        pmap->pm_segmap = malloc(sizeof(char)*NUSEG, M_VMPMAP, M_WAITOK);
1.132     chs      1859:        for (i = 0; i < NUSEG; i++) {
1.38      gwr      1860:                pmap->pm_segmap[i] = SEGINV;
                   1861:        }
1.1       glass    1862: }
                   1863:
                   1864: /*
                   1865:  *     Create and return a physical map.
                   1866:  *
                   1867:  *     If the size specified for the map
                   1868:  *     is zero, the map is an actual physical
                   1869:  *     map, and may be referenced by the
                   1870:  *     hardware.
                   1871:  *
                   1872:  *     If the size specified is non-zero,
                   1873:  *     the map will be used in software only, and
                   1874:  *     is bounded by that size.
                   1875:  */
1.145     chs      1876: pmap_t
                   1877: pmap_create(void)
1.1       glass    1878: {
1.38      gwr      1879:        pmap_t pmap;
1.2       glass    1880:
1.120     tsutsui  1881:        pmap = pool_get(&pmap_pmap_pool, PR_WAITOK);
1.96      thorpej  1882:        pmap_pinit(pmap);
1.38      gwr      1883:        return pmap;
1.1       glass    1884: }
                   1885:
                   1886: /*
                   1887:  * Release any resources held by the given physical map.
                   1888:  * Called when a pmap initialized by pmap_pinit is being released.
                   1889:  * Should only be called if the map contains no valid mappings.
                   1890:  */
1.145     chs      1891: void
                   1892: pmap_release(struct pmap *pmap)
1.1       glass    1893: {
1.80      gwr      1894:        int s;
                   1895:
1.122     thorpej  1896:        s = splvm();
1.26      gwr      1897:
1.50      gwr      1898:        if (pmap == kernel_pmap)
                   1899:                panic("pmap_release: kernel_pmap!");
1.29      gwr      1900:
1.80      gwr      1901:        if (has_context(pmap)) {
                   1902: #ifdef PMAP_DEBUG
                   1903:                if (pmap_debug & PMD_CONTEXT)
                   1904:                        printf("pmap_release(%p): free ctx %d\n",
1.145     chs      1905:                               pmap, pmap->pm_ctxnum);
1.80      gwr      1906: #endif
1.38      gwr      1907:                context_free(pmap);
1.80      gwr      1908:        }
1.38      gwr      1909:        free(pmap->pm_segmap, M_VMPMAP);
                   1910:        pmap->pm_segmap = NULL;
1.80      gwr      1911:
                   1912:        splx(s);
1.1       glass    1913: }
                   1914:
                   1915:
                   1916: /*
                   1917:  *     Retire the given physical map from service.
                   1918:  *     Should only be called if the map contains
                   1919:  *     no valid mappings.
                   1920:  */
1.145     chs      1921: void
                   1922: pmap_destroy(pmap_t pmap)
1.1       glass    1923: {
1.38      gwr      1924:        int count;
1.1       glass    1925:
1.18      glass    1926: #ifdef PMAP_DEBUG
1.38      gwr      1927:        if (pmap_debug & PMD_CREATE)
1.65      gwr      1928:                printf("pmap_destroy(%p)\n", pmap);
1.18      glass    1929: #endif
1.50      gwr      1930:        if (pmap == kernel_pmap)
                   1931:                panic("pmap_destroy: kernel_pmap!");
1.38      gwr      1932:        pmap_lock(pmap);
                   1933:        count = pmap_del_ref(pmap);
                   1934:        pmap_unlock(pmap);
                   1935:        if (count == 0) {
                   1936:                pmap_release(pmap);
1.120     tsutsui  1937:                pool_put(&pmap_pmap_pool, pmap);
1.38      gwr      1938:        }
1.1       glass    1939: }
                   1940:
                   1941: /*
                   1942:  *     Add a reference to the specified pmap.
                   1943:  */
1.145     chs      1944: void
                   1945: pmap_reference(pmap_t pmap)
1.1       glass    1946: {
1.132     chs      1947:        pmap_lock(pmap);
                   1948:        pmap_add_ref(pmap);
                   1949:        pmap_unlock(pmap);
1.1       glass    1950: }
1.26      gwr      1951:
1.85      gwr      1952:
1.38      gwr      1953: /*
1.85      gwr      1954:  *     Insert the given physical page (p) at
                   1955:  *     the specified virtual address (v) in the
                   1956:  *     target physical map with the protection requested.
                   1957:  *
                   1958:  *     The physical address is page aligned, but may have some
                   1959:  *     low bits set indicating an OBIO or VME bus page, or just
                   1960:  *     that the non-cache bit should be set (i.e PMAP_NC).
                   1961:  *
                   1962:  *     If specified, the page will be wired down, meaning
                   1963:  *     that the related pte can not be reclaimed.
                   1964:  *
                   1965:  *     NB:  This is the only routine which MAY NOT lazy-evaluate
                   1966:  *     or lose information.  That is, this routine must actually
                   1967:  *     insert this page into the given map NOW.
1.38      gwr      1968:  */
1.145     chs      1969: int
1.160     cegger   1970: pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
1.85      gwr      1971: {
1.92      gwr      1972:        int new_pte, s;
1.151     thorpej  1973:        bool wired = (flags & PMAP_WIRED) != 0;
1.85      gwr      1974:
                   1975: #ifdef PMAP_DEBUG
                   1976:        if ((pmap_debug & PMD_ENTER) ||
1.145     chs      1977:            (va == pmap_db_watchva))
1.85      gwr      1978:                printf("pmap_enter(%p, 0x%lx, 0x%lx, 0x%x, 0x%x)\n",
1.145     chs      1979:                       pmap, va, pa, prot, wired);
1.85      gwr      1980: #endif
                   1981:
                   1982:        /* Get page-type bits from low part of the PA... */
1.92      gwr      1983:        new_pte = (pa & PMAP_SPEC) << PG_MOD_SHIFT;
1.85      gwr      1984:
                   1985:        /* ...now the valid and writable bits... */
1.92      gwr      1986:        new_pte |= PG_VALID;
1.90      gwr      1987:        if (prot & VM_PROT_WRITE)
1.92      gwr      1988:                new_pte |= PG_WRITE;
1.147     chs      1989:        if (flags & VM_PROT_ALL) {
                   1990:                new_pte |= PG_REF;
                   1991:                if (flags & VM_PROT_WRITE) {
                   1992:                        new_pte |= PG_MOD;
                   1993:                }
                   1994:        }
1.85      gwr      1995:
                   1996:        /* ...and finally the page-frame number. */
1.92      gwr      1997:        new_pte |= PA_PGNUM(pa);
1.85      gwr      1998:
                   1999:        /*
                   2000:         * treatment varies significantly:
                   2001:         *  kernel ptes are in all contexts, and are always in the mmu
                   2002:         *  user ptes may not necessarily? be in the mmu.  pmap may not
                   2003:         *   be in the mmu either.
                   2004:         *
                   2005:         */
1.122     thorpej  2006:        s = splvm();
1.85      gwr      2007:        if (pmap == kernel_pmap) {
1.92      gwr      2008:                new_pte |= PG_SYSTEM;
                   2009:                pmap_enter_kernel(va, new_pte, wired);
1.85      gwr      2010:        } else {
1.92      gwr      2011:                pmap_enter_user(pmap, va, new_pte, wired);
1.85      gwr      2012:        }
                   2013:        splx(s);
1.124     chs      2014:        return 0;
1.85      gwr      2015: }
                   2016:
1.145     chs      2017: static void
1.151     thorpej  2018: pmap_enter_kernel(vaddr_t pgva, int new_pte, bool wired)
1.38      gwr      2019: {
1.92      gwr      2020:        pmap_t pmap = kernel_pmap;
                   2021:        pmeg_t pmegp;
1.85      gwr      2022:        int do_pv, old_pte, sme;
1.132     chs      2023:        vaddr_t segva;
1.80      gwr      2024:
1.85      gwr      2025:        /*
                   2026:          keep in hardware only, since its mapped into all contexts anyway;
                   2027:          need to handle possibly allocating additional pmegs
                   2028:          need to make sure they cant be stolen from the kernel;
                   2029:          map any new pmegs into all contexts, make sure rest of pmeg is null;
                   2030:          deal with pv_stuff; possibly caching problems;
                   2031:          must also deal with changes too.
1.145     chs      2032:        */
1.85      gwr      2033:
                   2034:        /*
                   2035:         * In detail:
                   2036:         *
                   2037:         * (a) lock pmap
                   2038:         * (b) Is the VA in a already mapped segment, if so
                   2039:         *       look to see if that VA address is "valid".  If it is, then
                   2040:         *       action is a change to an existing pte
                   2041:         * (c) if not mapped segment, need to allocate pmeg
                   2042:         * (d) if adding pte entry or changing physaddr of existing one,
                   2043:         *              use pv_stuff, for change, pmap_remove() possibly.
                   2044:         * (e) change/add pte
                   2045:         */
                   2046:
1.30      gwr      2047: #ifdef DIAGNOSTIC
1.98      gwr      2048:        if ((pgva < virtual_avail) || (pgva >= DVMA_MAP_END))
1.85      gwr      2049:                panic("pmap_enter_kernel: bad va=0x%lx", pgva);
                   2050:        if ((new_pte & (PG_VALID | PG_SYSTEM)) != (PG_VALID | PG_SYSTEM))
                   2051:                panic("pmap_enter_kernel: bad pte");
                   2052: #endif
                   2053:
1.98      gwr      2054:        if (pgva >= DVMA_MAP_BASE) {
1.85      gwr      2055:                /* This is DVMA space.  Always want it non-cached. */
                   2056:                new_pte |= PG_NC;
1.29      gwr      2057:        }
                   2058:
1.164     tsutsui  2059:        segva = sun3_trunc_seg(pgva);
1.152     thorpej  2060:        do_pv = true;
1.85      gwr      2061:
1.90      gwr      2062:        /* Do we have a PMEG? */
1.82      gwr      2063:        sme = get_segmap(segva);
1.90      gwr      2064:        if (sme != SEGINV) {
                   2065:                /* Found a PMEG in the segmap.  Cool. */
                   2066:                pmegp = pmeg_p(sme);
                   2067: #ifdef DIAGNOSTIC
                   2068:                /* Make sure it is the right PMEG. */
1.92      gwr      2069:                if (sme != pmap->pm_segmap[VA_SEGNUM(segva)])
1.145     chs      2070:                        panic("pmap_enter_kernel: wrong sme at VA=0x%lx",
                   2071:                              segva);
1.90      gwr      2072:                /* Make sure it is ours. */
1.92      gwr      2073:                if (pmegp->pmeg_owner != pmap)
1.90      gwr      2074:                        panic("pmap_enter_kernel: MMU has bad pmeg 0x%x", sme);
                   2075: #endif
                   2076:        } else {
                   2077:                /* No PMEG in the segmap.  Have to allocate one. */
1.92      gwr      2078:                pmegp = pmeg_allocate(pmap, segva);
1.85      gwr      2079:                sme = pmegp->pmeg_index;
1.92      gwr      2080:                pmap->pm_segmap[VA_SEGNUM(segva)] = sme;
1.85      gwr      2081:                set_segmap_allctx(segva, sme);
1.90      gwr      2082: #ifdef PMAP_DEBUG
                   2083:                pmeg_verify_empty(segva);
1.85      gwr      2084:                if (pmap_debug & PMD_SEGMAP) {
1.145     chs      2085:                        printf("pmap: set_segmap pmap=%p va=0x%lx sme=0x%x "
                   2086:                               "(ek)\n", pmap, segva, sme);
1.85      gwr      2087:                }
1.29      gwr      2088: #endif
1.85      gwr      2089:                /* There are no existing mappings to deal with. */
                   2090:                old_pte = 0;
                   2091:                goto add_pte;
                   2092:        }
1.80      gwr      2093:
1.85      gwr      2094:        /*
                   2095:         * We have a PMEG.  Is the VA already mapped to somewhere?
                   2096:         *      (a) if so, is it same pa? (really a protection change)
                   2097:         *      (b) if not same pa, then we have to unlink from old pa
                   2098:         */
                   2099:        old_pte = get_pte(pgva);
                   2100:        if ((old_pte & PG_VALID) == 0)
                   2101:                goto add_pte;
                   2102:
                   2103:        /* Have valid translation.  Flush cache before changing it. */
1.38      gwr      2104: #ifdef HAVECACHE
1.50      gwr      2105:        if (cache_size) {
1.85      gwr      2106:                cache_flush_page(pgva);
                   2107:                /* Get fresh mod/ref bits from write-back. */
                   2108:                old_pte = get_pte(pgva);
1.50      gwr      2109:        }
1.38      gwr      2110: #endif
                   2111:
1.85      gwr      2112:        /* XXX - removing valid page here, way lame... -glass */
                   2113:        pmegp->pmeg_vpages--;
                   2114:
                   2115:        if (!IS_MAIN_MEM(old_pte)) {
                   2116:                /* Was not main memory, so no pv_entry for it. */
                   2117:                goto add_pte;
1.38      gwr      2118:        }
                   2119:
1.85      gwr      2120:        /* Old mapping was main memory.  Save mod/ref bits. */
                   2121:        save_modref_bits(old_pte);
1.38      gwr      2122:
1.85      gwr      2123:        /*
                   2124:         * If not changing the type or pfnum then re-use pv_entry.
                   2125:         * Note we get here only with old_pte having PGT_OBMEM.
                   2126:         */
1.132     chs      2127:        if ((old_pte & (PG_TYPE|PG_FRAME)) == (new_pte & (PG_TYPE|PG_FRAME))) {
1.152     thorpej  2128:                do_pv = false;          /* re-use pv_entry */
1.85      gwr      2129:                new_pte |= (old_pte & PG_NC);
                   2130:                goto add_pte;
1.38      gwr      2131:        }
                   2132:
1.85      gwr      2133:        /* OK, different type or PA, have to kill old pv_entry. */
1.92      gwr      2134:        pv_unlink(pmap, old_pte, pgva);
1.29      gwr      2135:
1.145     chs      2136: add_pte:       /* can be destructive */
1.85      gwr      2137:        pmeg_set_wiring(pmegp, pgva, wired);
1.80      gwr      2138:
1.85      gwr      2139:        /* Anything but MAIN_MEM is mapped non-cached. */
                   2140:        if (!IS_MAIN_MEM(new_pte)) {
                   2141:                new_pte |= PG_NC;
1.152     thorpej  2142:                do_pv = false;
1.85      gwr      2143:        }
1.152     thorpej  2144:        if (do_pv == true) {
1.92      gwr      2145:                if (pv_link(pmap, new_pte, pgva) & PV_NC)
1.85      gwr      2146:                        new_pte |= PG_NC;
                   2147:        }
1.39      gwr      2148: #ifdef PMAP_DEBUG
1.85      gwr      2149:        if ((pmap_debug & PMD_SETPTE) || (pgva == pmap_db_watchva)) {
1.145     chs      2150:                printf("pmap: set_pte pmap=%p va=0x%lx old=0x%x new=0x%x "
                   2151:                       "(ek)\n", pmap, pgva, old_pte, new_pte);
1.85      gwr      2152:        }
1.39      gwr      2153: #endif
1.85      gwr      2154:        /* cache flush done above */
                   2155:        set_pte(pgva, new_pte);
                   2156:        pmegp->pmeg_vpages++;
1.38      gwr      2157: }
                   2158:
1.80      gwr      2159:
1.145     chs      2160: static void
1.151     thorpej  2161: pmap_enter_user(pmap_t pmap, vaddr_t pgva, int new_pte, bool wired)
1.38      gwr      2162: {
1.80      gwr      2163:        int do_pv, old_pte, sme;
1.132     chs      2164:        vaddr_t segva;
1.38      gwr      2165:        pmeg_t pmegp;
                   2166:
1.85      gwr      2167: #ifdef DIAGNOSTIC
                   2168:        if (pgva >= VM_MAXUSER_ADDRESS)
                   2169:                panic("pmap_enter_user: bad va=0x%lx", pgva);
                   2170:        if ((new_pte & (PG_VALID | PG_SYSTEM)) != PG_VALID)
                   2171:                panic("pmap_enter_user: bad pte");
                   2172: #endif
                   2173: #ifdef PMAP_DEBUG
1.38      gwr      2174:        /*
1.85      gwr      2175:         * Some user pages are wired here, and a later
1.113     thorpej  2176:         * call to pmap_unwire() will unwire them.
1.85      gwr      2177:         * XXX - Need a separate list for wired user pmegs
                   2178:         * so they can not be stolen from the active list.
                   2179:         * XXX - Note: vm_fault.c assumes pmap_extract will
                   2180:         * work on wired mappings, so must preserve them...
                   2181:         * XXX: Maybe keep a list of wired PMEGs?
1.38      gwr      2182:         */
1.85      gwr      2183:        if (wired && (pmap_debug & PMD_WIRING)) {
1.145     chs      2184:                db_printf("pmap_enter_user: attempt to wire user page, "
                   2185:                          "ignored\n");
1.85      gwr      2186:                Debugger();
                   2187:        }
                   2188: #endif
1.15      glass    2189:
1.85      gwr      2190:        /* Validate this assumption. */
                   2191:        if (pmap != current_pmap()) {
                   2192: #ifdef PMAP_DEBUG
1.93      gwr      2193:                /* Aparently, this never happens. */
1.139     thorpej  2194:                db_printf("pmap_enter_user: not curlwp\n");
1.85      gwr      2195:                Debugger();
1.38      gwr      2196: #endif
1.93      gwr      2197:                /* Just throw it out (fault it in later). */
1.85      gwr      2198:                /* XXX: But must remember it if wired... */
                   2199:                return;
1.1       glass    2200:        }
1.38      gwr      2201:
1.164     tsutsui  2202:        segva = sun3_trunc_seg(pgva);
1.152     thorpej  2203:        do_pv = true;
1.38      gwr      2204:
1.85      gwr      2205:        /*
                   2206:         * If this pmap was sharing the "empty" context,
                   2207:         * allocate a real context for its exclusive use.
                   2208:         */
                   2209:        if (!has_context(pmap)) {
                   2210:                context_allocate(pmap);
1.28      gwr      2211: #ifdef PMAP_DEBUG
1.85      gwr      2212:                if (pmap_debug & PMD_CONTEXT)
                   2213:                        printf("pmap_enter(%p) got context %d\n",
1.145     chs      2214:                               pmap, pmap->pm_ctxnum);
1.85      gwr      2215: #endif
                   2216:                set_context(pmap->pm_ctxnum);
                   2217:        } else {
                   2218: #ifdef PMAP_DEBUG
                   2219:                /* Make sure context is correct. */
                   2220:                if (pmap->pm_ctxnum != get_context()) {
                   2221:                        db_printf("pmap_enter_user: wrong context\n");
                   2222:                        Debugger();
                   2223:                        /* XXX: OK to proceed? */
                   2224:                        set_context(pmap->pm_ctxnum);
1.38      gwr      2225:                }
1.28      gwr      2226: #endif
1.38      gwr      2227:        }
                   2228:
1.85      gwr      2229:        /*
                   2230:         * We have a context.  Do we have a PMEG?
                   2231:         */
                   2232:        sme = get_segmap(segva);
                   2233:        if (sme != SEGINV) {
                   2234:                /* Found a PMEG in the segmap.  Cool. */
                   2235:                pmegp = pmeg_p(sme);
1.90      gwr      2236: #ifdef DIAGNOSTIC
1.85      gwr      2237:                /* Make sure it is the right PMEG. */
                   2238:                if (sme != pmap->pm_segmap[VA_SEGNUM(segva)])
                   2239:                        panic("pmap_enter_user: wrong sme at VA=0x%lx", segva);
1.90      gwr      2240:                /* Make sure it is ours. */
                   2241:                if (pmegp->pmeg_owner != pmap)
                   2242:                        panic("pmap_enter_user: MMU has bad pmeg 0x%x", sme);
1.85      gwr      2243: #endif
                   2244:        } else {
                   2245:                /* Not in the segmap.  Try the S/W cache. */
                   2246:                pmegp = pmeg_cache(pmap, segva);
                   2247:                if (pmegp) {
                   2248:                        /* Found PMEG in cache.  Just reload it. */
                   2249:                        sme = pmegp->pmeg_index;
                   2250:                        set_segmap(segva, sme);
                   2251:                } else {
                   2252:                        /* PMEG not in cache, so allocate one. */
                   2253:                        pmegp = pmeg_allocate(pmap, segva);
                   2254:                        sme = pmegp->pmeg_index;
                   2255:                        pmap->pm_segmap[VA_SEGNUM(segva)] = sme;
                   2256:                        set_segmap(segva, sme);
                   2257: #ifdef PMAP_DEBUG
                   2258:                        pmeg_verify_empty(segva);
                   2259: #endif
                   2260:                }
                   2261: #ifdef PMAP_DEBUG
                   2262:                if (pmap_debug & PMD_SEGMAP) {
1.145     chs      2263:                        printf("pmap: set_segmap pmap=%p va=0x%lx sme=0x%x "
                   2264:                               "(eu)\n", pmap, segva, sme);
1.85      gwr      2265:                }
1.30      gwr      2266: #endif
1.85      gwr      2267:        }
1.38      gwr      2268:
                   2269:        /*
1.83      gwr      2270:         * We have a PMEG.  Is the VA already mapped to somewhere?
                   2271:         *      (a) if so, is it same pa? (really a protection change)
                   2272:         *      (b) if not same pa, then we have to unlink from old pa
1.38      gwr      2273:         */
1.82      gwr      2274:        old_pte = get_pte(pgva);
1.38      gwr      2275:        if ((old_pte & PG_VALID) == 0)
                   2276:                goto add_pte;
                   2277:
1.50      gwr      2278:        /* Have valid translation.  Flush cache before changing it. */
                   2279: #ifdef HAVECACHE
1.52      gwr      2280:        if (cache_size) {
1.82      gwr      2281:                cache_flush_page(pgva);
1.52      gwr      2282:                /* Get fresh mod/ref bits from write-back. */
1.82      gwr      2283:                old_pte = get_pte(pgva);
1.52      gwr      2284:        }
1.50      gwr      2285: #endif
                   2286:
                   2287:        /* XXX - removing valid page here, way lame... -glass */
1.38      gwr      2288:        pmegp->pmeg_vpages--;
                   2289:
1.44      gwr      2290:        if (!IS_MAIN_MEM(old_pte)) {
1.38      gwr      2291:                /* Was not main memory, so no pv_entry for it. */
1.33      gwr      2292:                goto add_pte;
                   2293:        }
1.1       glass    2294:
1.38      gwr      2295:        /* Old mapping was main memory.  Save mod/ref bits. */
                   2296:        save_modref_bits(old_pte);
1.1       glass    2297:
1.38      gwr      2298:        /*
                   2299:         * If not changing the type or pfnum then re-use pv_entry.
                   2300:         * Note we get here only with old_pte having PGT_OBMEM.
                   2301:         */
1.132     chs      2302:        if ((old_pte & (PG_TYPE|PG_FRAME)) == (new_pte & (PG_TYPE|PG_FRAME))) {
1.152     thorpej  2303:                do_pv = false;          /* re-use pv_entry */
1.38      gwr      2304:                new_pte |= (old_pte & PG_NC);
                   2305:                goto add_pte;
1.28      gwr      2306:        }
1.1       glass    2307:
1.38      gwr      2308:        /* OK, different type or PA, have to kill old pv_entry. */
1.92      gwr      2309:        pv_unlink(pmap, old_pte, pgva);
1.38      gwr      2310:
1.145     chs      2311:   add_pte:
1.85      gwr      2312:        /* XXX - Wiring changes on user pmaps? */
                   2313:        /* pmeg_set_wiring(pmegp, pgva, wired); */
1.38      gwr      2314:
1.92      gwr      2315:        /* Anything but MAIN_MEM is mapped non-cached. */
1.44      gwr      2316:        if (!IS_MAIN_MEM(new_pte)) {
1.38      gwr      2317:                new_pte |= PG_NC;
1.152     thorpej  2318:                do_pv = false;
1.38      gwr      2319:        }
1.152     thorpej  2320:        if (do_pv == true) {
1.92      gwr      2321:                if (pv_link(pmap, new_pte, pgva) & PV_NC)
1.38      gwr      2322:                        new_pte |= PG_NC;
                   2323:        }
1.39      gwr      2324: #ifdef PMAP_DEBUG
1.82      gwr      2325:        if ((pmap_debug & PMD_SETPTE) || (pgva == pmap_db_watchva)) {
1.145     chs      2326:                printf("pmap: set_pte pmap=%p va=0x%lx old=0x%x new=0x%x "
                   2327:                       "(eu)\n", pmap, pgva, old_pte, new_pte);
1.39      gwr      2328:        }
                   2329: #endif
1.50      gwr      2330:        /* cache flush done above */
1.82      gwr      2331:        set_pte(pgva, new_pte);
1.38      gwr      2332:        pmegp->pmeg_vpages++;
                   2333: }
                   2334:
1.145     chs      2335: void
1.162     cegger   2336: pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
1.115     chs      2337: {
1.132     chs      2338:        int new_pte, s;
                   2339:        pmap_t pmap = kernel_pmap;
                   2340:        pmeg_t pmegp;
                   2341:        int sme;
                   2342:        vaddr_t segva;
                   2343:
                   2344: #ifdef PMAP_DEBUG
                   2345:        if ((pmap_debug & PMD_ENTER) ||
1.145     chs      2346:            (va == pmap_db_watchva))
1.132     chs      2347:                printf("pmap_kenter_pa(0x%lx, 0x%lx, 0x%x)\n",
1.145     chs      2348:                       va, pa, prot);
1.132     chs      2349: #endif
                   2350:
                   2351:        /* Get page-type bits from low part of the PA... */
                   2352:        new_pte = (pa & PMAP_SPEC) << PG_MOD_SHIFT;
                   2353:
                   2354:        /* ...now the valid and writable bits... */
                   2355:        new_pte |= PG_SYSTEM|PG_VALID;
                   2356:        if (prot & VM_PROT_WRITE)
                   2357:                new_pte |= PG_WRITE;
                   2358:
                   2359:        /* ...and finally the page-frame number. */
                   2360:        new_pte |= PA_PGNUM(pa);
                   2361:
                   2362:        /*
                   2363:         * keep in hardware only, since its mapped into all contexts anyway;
                   2364:         * need to handle possibly allocating additional pmegs
                   2365:         * need to make sure they cant be stolen from the kernel;
                   2366:         * map any new pmegs into all contexts, make sure rest of pmeg is null;
                   2367:         * must also deal with changes too.
                   2368:         */
                   2369:
                   2370:        /*
                   2371:         * In detail:
                   2372:         *
                   2373:         * (a) lock pmap
                   2374:         * (b) Is the VA in a already mapped segment, if so
                   2375:         *       look to see if that VA address is "valid".  If it is, then
                   2376:         *       action is a change to an existing pte
                   2377:         * (c) if not mapped segment, need to allocate pmeg
                   2378:         * (d) change/add pte
                   2379:         */
                   2380:
                   2381: #ifdef DIAGNOSTIC
                   2382:        if ((va < virtual_avail) || (va >= DVMA_MAP_END))
1.134     tsutsui  2383:                panic("pmap_kenter_pa: bad va=0x%lx", va);
1.132     chs      2384: #endif
                   2385:
                   2386:        if (va >= DVMA_MAP_BASE) {
                   2387:                /* This is DVMA space.  Always want it non-cached. */
                   2388:                new_pte |= PG_NC;
                   2389:        }
                   2390:
1.164     tsutsui  2391:        segva = sun3_trunc_seg(va);
1.132     chs      2392:
                   2393:        s = splvm();
                   2394:
                   2395:        /* Do we have a PMEG? */
                   2396:        sme = get_segmap(segva);
                   2397:        if (sme != SEGINV) {
                   2398:                KASSERT((get_pte(va) & PG_VALID) == 0);
                   2399:
                   2400:                /* Found a PMEG in the segmap.  Cool. */
                   2401:                pmegp = pmeg_p(sme);
                   2402: #ifdef DIAGNOSTIC
                   2403:                /* Make sure it is the right PMEG. */
                   2404:                if (sme != pmap->pm_segmap[VA_SEGNUM(segva)])
1.134     tsutsui  2405:                        panic("pmap_kenter_pa: wrong sme at VA=0x%lx", segva);
1.132     chs      2406:                /* Make sure it is ours. */
                   2407:                if (pmegp->pmeg_owner != pmap)
                   2408:                        panic("pmap_kenter_pa: MMU has bad pmeg 0x%x", sme);
                   2409: #endif
                   2410:        } else {
                   2411:
                   2412:                /* No PMEG in the segmap.  Have to allocate one. */
                   2413:                pmegp = pmeg_allocate(pmap, segva);
                   2414:                sme = pmegp->pmeg_index;
                   2415:                pmap->pm_segmap[VA_SEGNUM(segva)] = sme;
                   2416:                set_segmap_allctx(segva, sme);
                   2417: #ifdef PMAP_DEBUG
                   2418:                pmeg_verify_empty(segva);
                   2419:                if (pmap_debug & PMD_SEGMAP) {
1.145     chs      2420:                        printf("pmap: set_segmap pmap=%p va=0x%lx sme=0x%x "
                   2421:                               "(ek)\n", pmap, segva, sme);
1.132     chs      2422:                }
                   2423: #endif
                   2424:        }
                   2425:
1.152     thorpej  2426:        pmeg_set_wiring(pmegp, va, true);
1.132     chs      2427:
                   2428:        /* Anything but MAIN_MEM is mapped non-cached. */
                   2429:        if (!IS_MAIN_MEM(new_pte)) {
                   2430:                new_pte |= PG_NC;
                   2431:        }
                   2432: #ifdef PMAP_DEBUG
                   2433:        if ((pmap_debug & PMD_SETPTE) || (va == pmap_db_watchva)) {
1.156     tsutsui  2434:                printf("pmap: set_pte pmap=%p va=0x%lx new=0x%x "
                   2435:                       "(ek)\n", pmap, va, new_pte);
1.132     chs      2436:        }
                   2437: #endif
                   2438:        /* cache flush done above */
                   2439:        set_pte(va, new_pte);
                   2440:        pmegp->pmeg_vpages++;
                   2441:        splx(s);
1.115     chs      2442: }
                   2443:
1.145     chs      2444: void
                   2445: pmap_kremove(vaddr_t va, vsize_t len)
1.115     chs      2446: {
1.132     chs      2447:        pmap_t pmap = kernel_pmap;
                   2448:        vaddr_t eva, neva, pgva, segva, segnum;
                   2449:        int pte, sme;
                   2450:        pmeg_t pmegp;
                   2451: #ifdef HAVECACHE
                   2452:        int flush_by_page = 0;
                   2453: #endif
                   2454:        int s;
                   2455:
                   2456:        s = splvm();
                   2457:        segnum = VA_SEGNUM(va);
                   2458:        for (eva = va + len; va < eva; va = neva, segnum++) {
1.164     tsutsui  2459:                neva = sun3_trunc_seg(va) + NBSG;
1.132     chs      2460:                if (neva > eva) {
                   2461:                        neva = eva;
                   2462:                }
                   2463:                if (pmap->pm_segmap[segnum] == SEGINV) {
                   2464:                        continue;
                   2465:                }
                   2466:
1.164     tsutsui  2467:                segva = sun3_trunc_seg(va);
1.132     chs      2468:                sme = get_segmap(segva);
                   2469:                pmegp = pmeg_p(sme);
                   2470:
                   2471: #ifdef HAVECACHE
                   2472:                if (cache_size) {
                   2473:
1.145     chs      2474:                        /*
1.132     chs      2475:                         * If the range to be removed is larger than the cache,
                   2476:                         * it will be cheaper to flush this segment entirely.
                   2477:                         */
                   2478:
                   2479:                        if (cache_size < (eva - va)) {
                   2480:                                /* cheaper to flush whole segment */
                   2481:                                cache_flush_segment(segva);
                   2482:                        } else {
                   2483:                                flush_by_page = 1;
                   2484:                        }
                   2485:                }
                   2486: #endif
                   2487:
                   2488:                /* Invalidate the PTEs in the given range. */
1.140     thorpej  2489:                for (pgva = va; pgva < neva; pgva += PAGE_SIZE) {
1.132     chs      2490:                        pte = get_pte(pgva);
                   2491:                        if (pte & PG_VALID) {
                   2492: #ifdef HAVECACHE
                   2493:                                if (flush_by_page) {
                   2494:                                        cache_flush_page(pgva);
1.145     chs      2495:                                        /* Get fresh mod/ref bits
                   2496:                                           from write-back. */
1.132     chs      2497:                                        pte = get_pte(pgva);
                   2498:                                }
                   2499: #endif
                   2500: #ifdef PMAP_DEBUG
1.145     chs      2501:                                if ((pmap_debug & PMD_SETPTE) ||
                   2502:                                    (pgva == pmap_db_watchva)) {
1.132     chs      2503:                                        printf("pmap: set_pte pmap=%p va=0x%lx"
1.145     chs      2504:                                               " old=0x%x new=0x%x (rrmmu)\n",
                   2505:                                               pmap, pgva, pte, PG_INVAL);
1.132     chs      2506:                                }
                   2507: #endif
                   2508:                                set_pte(pgva, PG_INVAL);
                   2509:                                KASSERT(pmegp->pmeg_vpages > 0);
                   2510:                                pmegp->pmeg_vpages--;
                   2511:                        }
                   2512:                }
                   2513:                KASSERT(pmegp->pmeg_vpages >= 0);
                   2514:                if (pmegp->pmeg_vpages == 0) {
                   2515:                        /* We are done with this pmeg. */
                   2516: #ifdef PMAP_DEBUG
                   2517:                        if (is_pmeg_wired(pmegp)) {
                   2518:                                if (pmap_debug & PMD_WIRING) {
1.145     chs      2519:                                        db_printf("pmap: removing wired "
                   2520:                                                  "pmeg: %p\n", pmegp);
1.132     chs      2521:                                        Debugger();
                   2522:                                }
                   2523:                        }
                   2524:                        if (pmap_debug & PMD_SEGMAP) {
1.145     chs      2525:                                printf("pmap: set_segmap ctx=%d v=0x%lx "
                   2526:                                       "old=0x%x new=ff (rm)\n",
                   2527:                                       pmap->pm_ctxnum, segva,
                   2528:                                       pmegp->pmeg_index);
1.132     chs      2529:                        }
                   2530:                        pmeg_verify_empty(segva);
                   2531: #endif
                   2532:
                   2533:                        /* Remove it from the MMU. */
                   2534:                        set_segmap_allctx(segva, SEGINV);
                   2535:                        pmap->pm_segmap[VA_SEGNUM(segva)] = SEGINV;
                   2536:
                   2537:                        /* Now, put it on the free list. */
                   2538:                        pmeg_free(pmegp);
                   2539:                }
1.115     chs      2540:        }
1.132     chs      2541:        splx(s);
1.115     chs      2542: }
                   2543:
1.38      gwr      2544:
1.85      gwr      2545: /*
                   2546:  * The trap handler calls this so we can try to resolve
                   2547:  * user-level faults by reloading a PMEG.
                   2548:  * If that does not prodce a valid mapping,
                   2549:  * call vm_fault as usual.
                   2550:  *
                   2551:  * XXX: Merge this with the next function?
                   2552:  */
1.145     chs      2553: int
                   2554: _pmap_fault(struct vm_map *map, vaddr_t va, vm_prot_t ftype)
1.85      gwr      2555: {
                   2556:        pmap_t pmap;
                   2557:        int rv;
                   2558:
                   2559:        pmap = vm_map_pmap(map);
                   2560:        if (map == kernel_map) {
                   2561:                /* Do not allow faults below the "managed" space. */
                   2562:                if (va < virtual_avail) {
                   2563:                        /*
                   2564:                         * Most pages below virtual_avail are read-only,
                   2565:                         * so I will assume it is a protection failure.
                   2566:                         */
1.124     chs      2567:                        return EACCES;
1.85      gwr      2568:                }
                   2569:        } else {
                   2570:                /* User map.  Try reload shortcut. */
                   2571:                if (pmap_fault_reload(pmap, va, ftype))
1.124     chs      2572:                        return 0;
1.85      gwr      2573:        }
1.150     drochner 2574:        rv = uvm_fault(map, va, ftype);
1.85      gwr      2575:
                   2576: #ifdef PMAP_DEBUG
                   2577:        if (pmap_debug & PMD_FAULT) {
                   2578:                printf("pmap_fault(%p, 0x%lx, 0x%x) -> 0x%x\n",
1.145     chs      2579:                       map, va, ftype, rv);
1.85      gwr      2580:        }
                   2581: #endif
                   2582:
                   2583:        return (rv);
                   2584: }
                   2585:
                   2586: /*
                   2587:  * This is a shortcut used by the trap handler to
                   2588:  * reload PMEGs into a user segmap without calling
1.152     thorpej  2589:  * the actual VM fault handler.  Returns true if:
1.85      gwr      2590:  *     the PMEG was reloaded, and
                   2591:  *     it has a valid PTE at va.
                   2592:  * Otherwise return zero and let VM code handle it.
                   2593:  */
1.145     chs      2594: int
                   2595: pmap_fault_reload(pmap_t pmap, vaddr_t pgva, vm_prot_t ftype)
1.38      gwr      2596: {
1.85      gwr      2597:        int rv, s, pte, chkpte, sme;
1.132     chs      2598:        vaddr_t segva;
1.38      gwr      2599:        pmeg_t pmegp;
                   2600:
1.82      gwr      2601:        if (pgva >= VM_MAXUSER_ADDRESS)
1.85      gwr      2602:                return (0);
                   2603:        if (pmap->pm_segmap == NULL) {
1.83      gwr      2604: #ifdef PMAP_DEBUG
1.85      gwr      2605:                db_printf("pmap_fault_reload: null segmap\n");
1.83      gwr      2606:                Debugger();
                   2607: #endif
1.85      gwr      2608:                return (0);
1.83      gwr      2609:        }
                   2610:
1.85      gwr      2611:        /* Short-cut using the S/W segmap. */
                   2612:        if (pmap->pm_segmap[VA_SEGNUM(pgva)] == SEGINV)
                   2613:                return (0);
                   2614:
1.164     tsutsui  2615:        segva = sun3_trunc_seg(pgva);
1.85      gwr      2616:        chkpte = PG_VALID;
                   2617:        if (ftype & VM_PROT_WRITE)
                   2618:                chkpte |= PG_WRITE;
                   2619:        rv = 0;
                   2620:
1.122     thorpej  2621:        s = splvm();
1.38      gwr      2622:
                   2623:        /*
1.85      gwr      2624:         * Given that we faulted on a user-space address, we will
                   2625:         * probably need a context.  Get a context now so we can
                   2626:         * try to resolve the fault with a segmap reload.
1.38      gwr      2627:         */
1.83      gwr      2628:        if (!has_context(pmap)) {
                   2629:                context_allocate(pmap);
                   2630: #ifdef PMAP_DEBUG
                   2631:                if (pmap_debug & PMD_CONTEXT)
1.85      gwr      2632:                        printf("pmap_fault(%p) got context %d\n",
1.145     chs      2633:                               pmap, pmap->pm_ctxnum);
1.83      gwr      2634: #endif
                   2635:                set_context(pmap->pm_ctxnum);
                   2636:        } else {
1.38      gwr      2637: #ifdef PMAP_DEBUG
1.83      gwr      2638:                /* Make sure context is correct. */
                   2639:                if (pmap->pm_ctxnum != get_context()) {
1.85      gwr      2640:                        db_printf("pmap_fault_reload: wrong context\n");
1.38      gwr      2641:                        Debugger();
1.83      gwr      2642:                        /* XXX: OK to proceed? */
                   2643:                        set_context(pmap->pm_ctxnum);
1.38      gwr      2644:                }
                   2645: #endif
                   2646:        }
                   2647:
1.82      gwr      2648:        sme = get_segmap(segva);
1.85      gwr      2649:        if (sme == SEGINV) {
                   2650:                /* See if there is something to reload. */
1.82      gwr      2651:                pmegp = pmeg_cache(pmap, segva);
1.80      gwr      2652:                if (pmegp) {
1.85      gwr      2653:                        /* Found one!  OK, reload it. */
                   2654:                        pmap_stats.ps_pmeg_faultin++;
1.80      gwr      2655:                        sme = pmegp->pmeg_index;
1.82      gwr      2656:                        set_segmap(segva, sme);
1.85      gwr      2657:                        pte = get_pte(pgva);
                   2658:                        if (pte & chkpte)
                   2659:                                rv = 1;
                   2660:                }
                   2661:        }
                   2662:
                   2663:        splx(s);
                   2664:        return (rv);
                   2665: }
                   2666:
                   2667:
                   2668: /*
                   2669:  * Clear the modify bit for the given physical page.
                   2670:  */
1.151     thorpej  2671: bool
1.145     chs      2672: pmap_clear_modify(struct vm_page *pg)
1.85      gwr      2673: {
1.115     chs      2674:        paddr_t pa = VM_PAGE_TO_PHYS(pg);
                   2675:        pv_entry_t *head;
1.85      gwr      2676:        u_char *pv_flags;
                   2677:        int s;
1.151     thorpej  2678:        bool rv;
1.85      gwr      2679:
                   2680:        pv_flags = pa_to_pvflags(pa);
                   2681:        head     = pa_to_pvhead(pa);
                   2682:
1.122     thorpej  2683:        s = splvm();
1.85      gwr      2684:        *pv_flags |= pv_syncflags(*head);
1.115     chs      2685:        rv = *pv_flags & PV_MOD;
1.85      gwr      2686:        *pv_flags &= ~PV_MOD;
                   2687:        splx(s);
1.115     chs      2688:        return rv;
1.85      gwr      2689: }
                   2690:
                   2691: /*
                   2692:  * Tell whether the given physical page has been modified.
                   2693:  */
1.151     thorpej  2694: bool
1.145     chs      2695: pmap_is_modified(struct vm_page *pg)
1.85      gwr      2696: {
1.115     chs      2697:        paddr_t pa = VM_PAGE_TO_PHYS(pg);
                   2698:        pv_entry_t *head;
1.85      gwr      2699:        u_char *pv_flags;
1.132     chs      2700:        int s;
1.151     thorpej  2701:        bool rv;
1.87      gwr      2702:
1.85      gwr      2703:        pv_flags = pa_to_pvflags(pa);
                   2704:        head     = pa_to_pvhead(pa);
                   2705:
1.122     thorpej  2706:        s = splvm();
1.85      gwr      2707:        if ((*pv_flags & PV_MOD) == 0)
                   2708:                *pv_flags |= pv_syncflags(*head);
                   2709:        rv = (*pv_flags & PV_MOD);
                   2710:        splx(s);
                   2711:        return (rv);
                   2712: }
                   2713:
                   2714: /*
                   2715:  * Clear the reference bit for the given physical page.
                   2716:  * It's OK to just remove mappings if that's easier.
                   2717:  */
1.151     thorpej  2718: bool
1.145     chs      2719: pmap_clear_reference(struct vm_page *pg)
1.85      gwr      2720: {
1.115     chs      2721:        paddr_t pa = VM_PAGE_TO_PHYS(pg);
                   2722:        pv_entry_t *head;
1.85      gwr      2723:        u_char *pv_flags;
                   2724:        int s;
1.151     thorpej  2725:        bool rv;
1.85      gwr      2726:
                   2727:        pv_flags = pa_to_pvflags(pa);
                   2728:        head     = pa_to_pvhead(pa);
                   2729:
1.122     thorpej  2730:        s = splvm();
1.85      gwr      2731:        *pv_flags |= pv_syncflags(*head);
1.115     chs      2732:        rv = *pv_flags & PV_REF;
1.85      gwr      2733:        *pv_flags &= ~PV_REF;
                   2734:        splx(s);
1.115     chs      2735:        return rv;
1.85      gwr      2736: }
                   2737:
                   2738: /*
                   2739:  * Tell whether the given physical page has been referenced.
1.152     thorpej  2740:  * It's OK to just return false if page is not mapped.
1.85      gwr      2741:  */
1.151     thorpej  2742: bool
1.145     chs      2743: pmap_is_referenced(struct vm_page *pg)
1.85      gwr      2744: {
1.115     chs      2745:        paddr_t pa = VM_PAGE_TO_PHYS(pg);
                   2746:        pv_entry_t *head;
1.85      gwr      2747:        u_char *pv_flags;
1.115     chs      2748:        int s;
1.151     thorpej  2749:        bool rv;
1.85      gwr      2750:
                   2751:        pv_flags = pa_to_pvflags(pa);
                   2752:        head     = pa_to_pvhead(pa);
                   2753:
1.122     thorpej  2754:        s = splvm();
1.85      gwr      2755:        if ((*pv_flags & PV_REF) == 0)
                   2756:                *pv_flags |= pv_syncflags(*head);
                   2757:        rv = (*pv_flags & PV_REF);
                   2758:        splx(s);
                   2759:        return (rv);
                   2760: }
                   2761:
                   2762:
                   2763: /*
                   2764:  * This is called by locore.s:cpu_switch() when it is
                   2765:  * switching to a new process.  Load new translations.
1.99      gwr      2766:  * Note: done in-line by locore.s unless PMAP_DEBUG
1.85      gwr      2767:  *
                   2768:  * Note that we do NOT allocate a context here, but
                   2769:  * share the "kernel only" context until we really
                   2770:  * need our own context for user-space mappings in
                   2771:  * pmap_enter_user().
                   2772:  */
1.145     chs      2773: void
                   2774: _pmap_switch(pmap_t pmap)
1.85      gwr      2775: {
                   2776:        set_context(pmap->pm_ctxnum);
                   2777:        ICIA();
                   2778: }
                   2779:
1.95      thorpej  2780: /*
1.99      gwr      2781:  * Exported version of pmap_activate().  This is called from the
                   2782:  * machine-independent VM code when a process is given a new pmap.
1.139     thorpej  2783:  * If (p == curlwp) do like cpu_switch would do; otherwise just
1.99      gwr      2784:  * take this as notification that the process has a new pmap.
1.95      thorpej  2785:  */
1.145     chs      2786: void
                   2787: pmap_activate(struct lwp *l)
1.95      thorpej  2788: {
1.139     thorpej  2789:        pmap_t pmap = l->l_proc->p_vmspace->vm_map.pmap;
1.95      thorpej  2790:
1.154     tsutsui  2791:        if (l->l_proc == curproc) {
1.99      gwr      2792:                _pmap_switch(pmap);
                   2793:        }
1.95      thorpej  2794: }
                   2795:
                   2796: /*
                   2797:  * Deactivate the address space of the specified process.
                   2798:  */
1.145     chs      2799: void
                   2800: pmap_deactivate(struct lwp *l)
1.95      thorpej  2801: {
1.132     chs      2802:        /* Nothing to do. */
1.95      thorpej  2803: }
1.85      gwr      2804:
                   2805: /*
1.113     thorpej  2806:  *     Routine:        pmap_unwire
                   2807:  *     Function:       Clear the wired attribute for a map/virtual-address
1.85      gwr      2808:  *                     pair.
                   2809:  *     In/out conditions:
                   2810:  *                     The mapping must already exist in the pmap.
                   2811:  */
1.145     chs      2812: void
                   2813: pmap_unwire(pmap_t pmap, vaddr_t va)
1.85      gwr      2814: {
                   2815:        int s, sme;
                   2816:        int wiremask, ptenum;
                   2817:        pmeg_t pmegp;
                   2818:
                   2819: #ifdef PMAP_DEBUG
                   2820:        if (pmap_debug & PMD_WIRING)
1.113     thorpej  2821:                printf("pmap_unwire(pmap=%p, va=0x%lx)\n",
1.145     chs      2822:                       pmap, va);
1.80      gwr      2823: #endif
1.85      gwr      2824:        /*
                   2825:         * We are asked to unwire pages that were wired when
                   2826:         * pmap_enter() was called and we ignored wiring.
                   2827:         * (VM code appears to wire a stack page during fork.)
                   2828:         */
                   2829:        if (pmap != kernel_pmap) {
                   2830: #ifdef PMAP_DEBUG
                   2831:                if (pmap_debug & PMD_WIRING) {
                   2832:                        db_printf("  (user pmap -- ignored)\n");
                   2833:                        Debugger();
1.38      gwr      2834:                }
                   2835: #endif
1.85      gwr      2836:                return;
1.38      gwr      2837:        }
                   2838:
1.85      gwr      2839:        ptenum = VA_PTE_NUM(va);
                   2840:        wiremask = 1 << ptenum;
                   2841:
1.122     thorpej  2842:        s = splvm();
1.85      gwr      2843:        sme = get_segmap(va);
                   2844:        pmegp = pmeg_p(sme);
1.113     thorpej  2845:        pmegp->pmeg_wired &= ~wiremask;
1.85      gwr      2846:        splx(s);
                   2847: }
1.50      gwr      2848:
1.85      gwr      2849: /*
                   2850:  *     Copy the range specified by src_addr/len
                   2851:  *     from the source map to the range dst_addr/len
                   2852:  *     in the destination map.
                   2853:  *
                   2854:  *     This routine is only advisory and need not do anything.
                   2855:  */
                   2856: void
1.145     chs      2857: pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vaddr_t dst_addr, vsize_t len,
                   2858:          vaddr_t src_addr)
1.85      gwr      2859: {
                   2860: }
1.38      gwr      2861:
1.85      gwr      2862: /*
                   2863:  *     Routine:        pmap_extract
                   2864:  *     Function:
                   2865:  *             Extract the physical page address associated
                   2866:  *             with the given map/virtual_address pair.
                   2867:  *     Returns zero if VA not valid.
                   2868:  */
1.151     thorpej  2869: bool
1.145     chs      2870: pmap_extract(pmap_t pmap, vaddr_t va, paddr_t *pap)
1.85      gwr      2871: {
                   2872:        int s, sme, segnum, ptenum, pte;
1.114     thorpej  2873:        paddr_t pa;
1.38      gwr      2874:
1.85      gwr      2875:        pte = 0;
1.122     thorpej  2876:        s = splvm();
1.85      gwr      2877:        if (pmap == kernel_pmap) {
                   2878:                sme = get_segmap(va);
                   2879:                if (sme != SEGINV)
                   2880:                        pte = get_pte(va);
                   2881:        } else {
                   2882:                /* This is rare, so do it the easy way. */
                   2883:                segnum = VA_SEGNUM(va);
                   2884:                sme = pmap->pm_segmap[segnum];
                   2885:                if (sme != SEGINV) {
                   2886:                        ptenum = VA_PTE_NUM(va);
                   2887:                        pte = get_pte_pmeg(sme, ptenum);
                   2888:                }
1.38      gwr      2889:        }
1.85      gwr      2890:        splx(s);
1.38      gwr      2891:
1.85      gwr      2892:        if ((pte & PG_VALID) == 0) {
                   2893: #ifdef PMAP_DEBUG
                   2894:                db_printf("pmap_extract: invalid va=0x%lx\n", va);
                   2895:                Debugger();
                   2896: #endif
1.152     thorpej  2897:                return (false);
1.38      gwr      2898:        }
1.85      gwr      2899:        pa = PG_PA(pte);
                   2900: #ifdef DIAGNOSTIC
                   2901:        if (pte & PG_TYPE) {
1.137     provos   2902:                panic("pmap_extract: not main mem, va=0x%lx", va);
1.39      gwr      2903:        }
                   2904: #endif
1.114     thorpej  2905:        if (pap != NULL)
                   2906:                *pap = pa;
1.152     thorpej  2907:        return (true);
1.1       glass    2908: }
1.38      gwr      2909:
1.85      gwr      2910:
1.38      gwr      2911: /*
1.85      gwr      2912:  *       pmap_page_protect:
1.1       glass    2913:  *
1.85      gwr      2914:  *       Lower the permission for all mappings to a given page.
1.1       glass    2915:  */
1.145     chs      2916: void
                   2917: pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
1.38      gwr      2918: {
1.115     chs      2919:        paddr_t pa = VM_PAGE_TO_PHYS(pg);
1.38      gwr      2920:        int s;
                   2921:
1.122     thorpej  2922:        s = splvm();
1.85      gwr      2923: #ifdef PMAP_DEBUG
                   2924:        if (pmap_debug & PMD_PROTECT)
1.156     tsutsui  2925:                printf("pmap_page_protect(0x%lx, 0x%x)\n", pa, prot);
1.38      gwr      2926: #endif
1.85      gwr      2927:        switch (prot) {
                   2928:        case VM_PROT_ALL:
                   2929:                break;
                   2930:        case VM_PROT_READ:
                   2931:        case VM_PROT_READ|VM_PROT_EXECUTE:
                   2932:                pv_changepte(pa, 0, PG_WRITE);
                   2933:                break;
                   2934:        default:
                   2935:                /* remove mapping for all pmaps that have it */
                   2936:                pv_remove_all(pa);
                   2937:                break;
                   2938:        }
1.77      gwr      2939:        splx(s);
1.85      gwr      2940: }
1.66      gwr      2941:
1.85      gwr      2942: /*
                   2943:  * Initialize a preallocated and zeroed pmap structure,
                   2944:  * such as one in a vmspace structure.
                   2945:  */
1.145     chs      2946: void
                   2947: pmap_pinit(pmap_t pmap)
1.85      gwr      2948: {
                   2949:        pmap_common_init(pmap);
                   2950:        pmap_user_init(pmap);
                   2951: }
1.66      gwr      2952:
1.38      gwr      2953: /*
1.85      gwr      2954:  *     Reduce the permissions on the specified
                   2955:  *     range of this map as requested.
                   2956:  *     (Make pages read-only.)
1.38      gwr      2957:  */
1.145     chs      2958: void
                   2959: pmap_protect(pmap_t pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
1.1       glass    2960: {
1.132     chs      2961:        vaddr_t va, neva;
1.85      gwr      2962:        int segnum;
1.5       glass    2963:
1.85      gwr      2964:        /* If leaving writable, nothing to do. */
                   2965:        if (prot & VM_PROT_WRITE)
                   2966:                return;
1.82      gwr      2967:
1.85      gwr      2968:        /* If removing all permissions, just unmap. */
                   2969:        if ((prot & VM_PROT_READ) == 0) {
                   2970:                pmap_remove(pmap, sva, eva);
                   2971:                return;
                   2972:        }
1.38      gwr      2973:
1.85      gwr      2974: #ifdef PMAP_DEBUG
                   2975:        if ((pmap_debug & PMD_PROTECT) ||
1.145     chs      2976:            ((sva <= pmap_db_watchva && eva > pmap_db_watchva)))
1.85      gwr      2977:                printf("pmap_protect(%p, 0x%lx, 0x%lx)\n", pmap, sva, eva);
                   2978: #endif
1.38      gwr      2979:
1.132     chs      2980:        KASSERT((pmap == kernel_pmap) ?
1.145     chs      2981:                sva >= virtual_avail && eva < DVMA_MAP_END :
                   2982:                eva <= VM_MAXUSER_ADDRESS);
1.85      gwr      2983:        va = sva;
                   2984:        segnum = VA_SEGNUM(va);
                   2985:        while (va < eva) {
1.164     tsutsui  2986:                neva = sun3_trunc_seg(va) + NBSG;
1.85      gwr      2987:                if (neva > eva)
                   2988:                        neva = eva;
                   2989:                if (pmap->pm_segmap[segnum] != SEGINV)
                   2990:                        pmap_protect1(pmap, va, neva);
                   2991:                va = neva;
                   2992:                segnum++;
1.38      gwr      2993:        }
1.1       glass    2994: }
                   2995:
1.38      gwr      2996: /*
1.85      gwr      2997:  * Remove write permissions in given range.
                   2998:  * (guaranteed to be within one segment)
                   2999:  * similar to pmap_remove1()
1.38      gwr      3000:  */
1.145     chs      3001: void
                   3002: pmap_protect1(pmap_t pmap, vaddr_t sva, vaddr_t eva)
1.1       glass    3003: {
1.85      gwr      3004:        int old_ctx, s, sme;
1.151     thorpej  3005:        bool in_ctx;
1.38      gwr      3006:
1.122     thorpej  3007:        s = splvm();
1.84      gwr      3008:
1.85      gwr      3009: #ifdef DIAGNOSTIC
1.164     tsutsui  3010:        if (sun3_trunc_seg(sva) != sun3_trunc_seg(eva-1))
1.85      gwr      3011:                panic("pmap_protect1: bad range!");
                   3012: #endif
1.38      gwr      3013:
1.85      gwr      3014:        if (pmap == kernel_pmap) {
                   3015:                sme = get_segmap(sva);
                   3016:                if (sme != SEGINV)
                   3017:                        pmap_protect_mmu(pmap, sva, eva);
                   3018:                goto out;
                   3019:        }
                   3020:        /* It is a user pmap. */
1.1       glass    3021:
1.85      gwr      3022:        /* There is a PMEG, but maybe not active. */
                   3023:        old_ctx = INVALID_CONTEXT;
1.152     thorpej  3024:        in_ctx = false;
1.85      gwr      3025:        if (has_context(pmap)) {
                   3026:                /* Temporary context change. */
                   3027:                old_ctx = get_context();
                   3028:                set_context(pmap->pm_ctxnum);
                   3029:                sme = get_segmap(sva);
                   3030:                if (sme != SEGINV)
1.152     thorpej  3031:                        in_ctx = true;
1.85      gwr      3032:        }
1.38      gwr      3033:
1.152     thorpej  3034:        if (in_ctx == true)
1.85      gwr      3035:                pmap_protect_mmu(pmap, sva, eva);
                   3036:        else
                   3037:                pmap_protect_noctx(pmap, sva, eva);
1.84      gwr      3038:
1.85      gwr      3039:        if (old_ctx != INVALID_CONTEXT) {
                   3040:                /* Restore previous context. */
                   3041:                set_context(old_ctx);
                   3042:        }
1.80      gwr      3043:
1.85      gwr      3044: out:
1.80      gwr      3045:        splx(s);
1.1       glass    3046: }
                   3047:
1.38      gwr      3048: /*
1.85      gwr      3049:  * Remove write permissions, all in one PMEG,
                   3050:  * where that PMEG is currently in the MMU.
                   3051:  * The current context is already correct.
1.38      gwr      3052:  */
1.145     chs      3053: void
                   3054: pmap_protect_mmu(pmap_t pmap, vaddr_t sva, vaddr_t eva)
1.38      gwr      3055: {
1.85      gwr      3056:        pmeg_t pmegp;
1.132     chs      3057:        vaddr_t pgva, segva;
1.85      gwr      3058:        int pte, sme;
1.107     gwr      3059: #ifdef HAVECACHE
1.85      gwr      3060:        int flush_by_page = 0;
1.107     gwr      3061: #endif
1.85      gwr      3062:
                   3063:        CHECK_SPL();
                   3064:
                   3065: #ifdef DIAGNOSTIC
                   3066:        if (pmap != kernel_pmap) {
                   3067:                if (pmap->pm_ctxnum != get_context())
                   3068:                        panic("pmap_protect_mmu: wrong context");
                   3069:        }
                   3070: #endif
1.1       glass    3071:
1.164     tsutsui  3072:        segva = sun3_trunc_seg(sva);
1.85      gwr      3073:        sme = get_segmap(segva);
1.84      gwr      3074:
1.85      gwr      3075: #ifdef DIAGNOSTIC
                   3076:        /* Make sure it is valid and known. */
                   3077:        if (sme == SEGINV)
                   3078:                panic("pmap_protect_mmu: SEGINV");
                   3079:        if (pmap->pm_segmap && (pmap->pm_segmap[VA_SEGNUM(segva)] != sme))
                   3080:                panic("pmap_protect_mmu: incorrect sme, va=0x%lx", segva);
                   3081: #endif
1.38      gwr      3082:
1.85      gwr      3083:        pmegp = pmeg_p(sme);
                   3084:        /* have pmeg, will travel */
1.38      gwr      3085:
1.85      gwr      3086: #ifdef DIAGNOSTIC
                   3087:        /* Make sure we own the pmeg, right va, etc. */
                   3088:        if ((pmegp->pmeg_va != segva) ||
1.145     chs      3089:            (pmegp->pmeg_owner != pmap) ||
                   3090:            (pmegp->pmeg_version != pmap->pm_version))
1.85      gwr      3091:        {
                   3092:                panic("pmap_protect_mmu: bad pmeg=%p", pmegp);
                   3093:        }
1.132     chs      3094:        if (pmegp->pmeg_vpages < 0)
                   3095:                panic("pmap_protect_mmu: npages corrupted");
                   3096:        if (pmegp->pmeg_vpages == 0)
1.85      gwr      3097:                panic("pmap_protect_mmu: no valid pages?");
                   3098: #endif
1.26      gwr      3099:
1.85      gwr      3100: #ifdef HAVECACHE
                   3101:        if (cache_size) {
                   3102:                /*
                   3103:                 * If the range to be removed is larger than the cache,
                   3104:                 * it will be cheaper to flush this segment entirely.
                   3105:                 */
                   3106:                if (cache_size < (eva - sva)) {
                   3107:                        /* cheaper to flush whole segment */
                   3108:                        cache_flush_segment(segva);
                   3109:                } else {
                   3110:                        flush_by_page = 1;
                   3111:                }
                   3112:        }
                   3113: #endif
1.84      gwr      3114:
1.86      gwr      3115:        /* Remove write permission in the given range. */
1.140     thorpej  3116:        for (pgva = sva; pgva < eva; pgva += PAGE_SIZE) {
1.85      gwr      3117:                pte = get_pte(pgva);
                   3118:                if (pte & PG_VALID) {
                   3119: #ifdef HAVECACHE
                   3120:                        if (flush_by_page) {
                   3121:                                cache_flush_page(pgva);
                   3122:                                /* Get fresh mod/ref bits from write-back. */
                   3123:                                pte = get_pte(pgva);
                   3124:                        }
                   3125: #endif
                   3126:                        if (IS_MAIN_MEM(pte)) {
                   3127:                                save_modref_bits(pte);
                   3128:                        }
                   3129:                        pte &= ~(PG_WRITE | PG_MODREF);
                   3130:                        set_pte(pgva, pte);
                   3131:                }
                   3132:        }
1.38      gwr      3133: }
1.28      gwr      3134:
1.66      gwr      3135: /*
1.85      gwr      3136:  * Remove write permissions, all in one PMEG,
                   3137:  * where it is not currently in any context.
1.66      gwr      3138:  */
1.145     chs      3139: void
                   3140: pmap_protect_noctx(pmap_t pmap, vaddr_t sva, vaddr_t eva)
1.38      gwr      3141: {
1.86      gwr      3142:        int old_ctx, pte, sme, segnum;
1.132     chs      3143:        vaddr_t pgva, segva;
1.38      gwr      3144:
1.85      gwr      3145: #ifdef DIAGNOSTIC
                   3146:        /* Kernel always in a context (actually, in all contexts). */
                   3147:        if (pmap == kernel_pmap)
                   3148:                panic("pmap_protect_noctx: kernel_pmap");
                   3149:        if (pmap->pm_segmap == NULL)
                   3150:                panic("pmap_protect_noctx: null segmap");
1.38      gwr      3151: #endif
                   3152:
1.164     tsutsui  3153:        segva = sun3_trunc_seg(sva);
1.86      gwr      3154:        segnum = VA_SEGNUM(segva);
1.85      gwr      3155:        sme = pmap->pm_segmap[segnum];
1.28      gwr      3156:        if (sme == SEGINV)
1.85      gwr      3157:                return;
1.86      gwr      3158:
                   3159:        /*
                   3160:         * Borrow the EMPTY_CONTEXT so we can access the PMEG
                   3161:         * at its normal virtual address.
                   3162:         */
                   3163:        old_ctx = get_context();
                   3164:        set_context(EMPTY_CONTEXT);
                   3165:        set_segmap(segva, sme);
1.77      gwr      3166:
1.85      gwr      3167:        /* Remove write permission in the given range. */
1.140     thorpej  3168:        for (pgva = sva; pgva < eva; pgva += PAGE_SIZE) {
1.86      gwr      3169:                pte = get_pte(pgva);
1.85      gwr      3170:                if (pte & PG_VALID) {
1.86      gwr      3171:                        /* No cache flush needed. */
1.85      gwr      3172:                        if (IS_MAIN_MEM(pte)) {
                   3173:                                save_modref_bits(pte);
                   3174:                        }
                   3175:                        pte &= ~(PG_WRITE | PG_MODREF);
1.86      gwr      3176:                        set_pte(pgva, pte);
1.85      gwr      3177:                }
                   3178:        }
1.86      gwr      3179:
                   3180:        /*
                   3181:         * Make the EMPTY_CONTEXT really empty again, and
                   3182:         * restore the previous context.
                   3183:         */
                   3184:        set_segmap(segva, SEGINV);
                   3185:        set_context(old_ctx);
1.2       glass    3186: }
1.38      gwr      3187:
1.85      gwr      3188:
1.2       glass    3189: /*
1.85      gwr      3190:  *     Remove the given range of addresses from the specified map.
1.2       glass    3191:  *
1.85      gwr      3192:  *     It is assumed that the start and end are properly
                   3193:  *     rounded to the page size.
1.2       glass    3194:  */
1.145     chs      3195: void
                   3196: pmap_remove(pmap_t pmap, vaddr_t sva, vaddr_t eva)
1.2       glass    3197: {
1.132     chs      3198:        vaddr_t va, neva;
1.85      gwr      3199:        int segnum;
1.2       glass    3200:
1.85      gwr      3201: #ifdef PMAP_DEBUG
                   3202:        if ((pmap_debug & PMD_REMOVE) ||
1.145     chs      3203:            ((sva <= pmap_db_watchva && eva > pmap_db_watchva)))
1.85      gwr      3204:                printf("pmap_remove(%p, 0x%lx, 0x%lx)\n", pmap, sva, eva);
                   3205: #endif
1.80      gwr      3206:
1.132     chs      3207:
                   3208:        KASSERT((pmap == kernel_pmap) ?
1.145     chs      3209:                sva >= virtual_avail && eva < DVMA_MAP_END :
                   3210:                eva <= VM_MAXUSER_ADDRESS);
1.85      gwr      3211:        va = sva;
                   3212:        segnum = VA_SEGNUM(va);
                   3213:        while (va < eva) {
1.164     tsutsui  3214:                neva = sun3_trunc_seg(va) + NBSG;
1.85      gwr      3215:                if (neva > eva)
                   3216:                        neva = eva;
                   3217:                if (pmap->pm_segmap[segnum] != SEGINV)
                   3218:                        pmap_remove1(pmap, va, neva);
                   3219:                va = neva;
                   3220:                segnum++;
1.56      gwr      3221:        }
1.2       glass    3222: }
                   3223:
                   3224: /*
1.85      gwr      3225:  * Remove user mappings, all within one segment
1.2       glass    3226:  */
1.145     chs      3227: void
                   3228: pmap_remove1(pmap_t pmap, vaddr_t sva, vaddr_t eva)
1.2       glass    3229: {
1.85      gwr      3230:        int old_ctx, s, sme;
1.151     thorpej  3231:        bool in_ctx;
1.85      gwr      3232:
1.122     thorpej  3233:        s = splvm();
1.85      gwr      3234:
                   3235: #ifdef DIAGNOSTIC
1.164     tsutsui  3236:        if (sun3_trunc_seg(sva) != sun3_trunc_seg(eva-1))
1.85      gwr      3237:                panic("pmap_remove1: bad range!");
                   3238: #endif
                   3239:
                   3240:        if (pmap == kernel_pmap) {
                   3241:                sme = get_segmap(sva);
                   3242:                if (sme != SEGINV)
                   3243:                        pmap_remove_mmu(pmap, sva, eva);
                   3244:                goto out;
                   3245:        }
                   3246:        /* It is a user pmap. */
                   3247:
                   3248:        /* There is a PMEG, but maybe not active. */
                   3249:        old_ctx = INVALID_CONTEXT;
1.152     thorpej  3250:        in_ctx = false;
1.85      gwr      3251:        if (has_context(pmap)) {
                   3252:                /* Temporary context change. */
                   3253:                old_ctx = get_context();
                   3254:                set_context(pmap->pm_ctxnum);
                   3255:                sme = get_segmap(sva);
                   3256:                if (sme != SEGINV)
1.152     thorpej  3257:                        in_ctx = true;
1.85      gwr      3258:        }
                   3259:
1.152     thorpej  3260:        if (in_ctx == true)
1.85      gwr      3261:                pmap_remove_mmu(pmap, sva, eva);
                   3262:        else
                   3263:                pmap_remove_noctx(pmap, sva, eva);
                   3264:
                   3265:        if (old_ctx != INVALID_CONTEXT) {
                   3266:                /* Restore previous context. */
                   3267:                set_context(old_ctx);
                   3268:        }
                   3269:
                   3270: out:
                   3271:        splx(s);
1.2       glass    3272: }
1.5       glass    3273:
1.38      gwr      3274: /*
1.85      gwr      3275:  * Remove some mappings, all in one PMEG,
1.38      gwr      3276:  * where that PMEG is currently in the MMU.
                   3277:  * The current context is already correct.
1.85      gwr      3278:  * If no PTEs remain valid in the PMEG, free it.
1.38      gwr      3279:  */
1.145     chs      3280: void
                   3281: pmap_remove_mmu(pmap_t pmap, vaddr_t sva, vaddr_t eva)
1.38      gwr      3282: {
                   3283:        pmeg_t pmegp;
1.132     chs      3284:        vaddr_t pgva, segva;
1.38      gwr      3285:        int pte, sme;
1.107     gwr      3286: #ifdef HAVECACHE
1.52      gwr      3287:        int flush_by_page = 0;
1.107     gwr      3288: #endif
1.38      gwr      3289:
                   3290:        CHECK_SPL();
                   3291:
                   3292: #ifdef DIAGNOSTIC
1.50      gwr      3293:        if (pmap != kernel_pmap) {
1.38      gwr      3294:                if (pmap->pm_ctxnum != get_context())
1.85      gwr      3295:                        panic("pmap_remove_mmu: wrong context");
1.38      gwr      3296:        }
                   3297: #endif
                   3298:
1.164     tsutsui  3299:        segva = sun3_trunc_seg(sva);
1.82      gwr      3300:        sme = get_segmap(segva);
1.80      gwr      3301:
1.38      gwr      3302: #ifdef DIAGNOSTIC
                   3303:        /* Make sure it is valid and known. */
                   3304:        if (sme == SEGINV)
1.85      gwr      3305:                panic("pmap_remove_mmu: SEGINV");
1.82      gwr      3306:        if (pmap->pm_segmap && (pmap->pm_segmap[VA_SEGNUM(segva)] != sme))
1.85      gwr      3307:                panic("pmap_remove_mmu: incorrect sme, va=0x%lx", segva);
1.38      gwr      3308: #endif
1.80      gwr      3309:
1.29      gwr      3310:        pmegp = pmeg_p(sme);
1.38      gwr      3311:        /* have pmeg, will travel */
                   3312:
1.30      gwr      3313: #ifdef DIAGNOSTIC
1.38      gwr      3314:        /* Make sure we own the pmeg, right va, etc. */
1.82      gwr      3315:        if ((pmegp->pmeg_va != segva) ||
1.145     chs      3316:            (pmegp->pmeg_owner != pmap) ||
                   3317:            (pmegp->pmeg_version != pmap->pm_version))
1.38      gwr      3318:        {
1.85      gwr      3319:                panic("pmap_remove_mmu: bad pmeg=%p", pmegp);
1.38      gwr      3320:        }
1.132     chs      3321:        if (pmegp->pmeg_vpages < 0)
                   3322:                panic("pmap_remove_mmu: npages corrupted");
                   3323:        if (pmegp->pmeg_vpages == 0)
1.85      gwr      3324:                panic("pmap_remove_mmu: no valid pages?");
1.38      gwr      3325: #endif
                   3326:
                   3327: #ifdef HAVECACHE
1.52      gwr      3328:        if (cache_size) {
                   3329:                /*
                   3330:                 * If the range to be removed is larger than the cache,
                   3331:                 * it will be cheaper to flush this segment entirely.
                   3332:                 */
                   3333:                if (cache_size < (eva - sva)) {
                   3334:                        /* cheaper to flush whole segment */
1.82      gwr      3335:                        cache_flush_segment(segva);
1.52      gwr      3336:                } else {
                   3337:                        flush_by_page = 1;
                   3338:                }
                   3339:        }
1.30      gwr      3340: #endif
1.38      gwr      3341:
1.85      gwr      3342:        /* Invalidate the PTEs in the given range. */
1.140     thorpej  3343:        for (pgva = sva; pgva < eva; pgva += PAGE_SIZE) {
1.82      gwr      3344:                pte = get_pte(pgva);
1.38      gwr      3345:                if (pte & PG_VALID) {
1.52      gwr      3346: #ifdef HAVECACHE
                   3347:                        if (flush_by_page) {
1.82      gwr      3348:                                cache_flush_page(pgva);
1.52      gwr      3349:                                /* Get fresh mod/ref bits from write-back. */
1.82      gwr      3350:                                pte = get_pte(pgva);
1.52      gwr      3351:                        }
                   3352: #endif
                   3353:                        if (IS_MAIN_MEM(pte)) {
                   3354:                                save_modref_bits(pte);
1.92      gwr      3355:                                pv_unlink(pmap, pte, pgva);
1.85      gwr      3356:                        }
                   3357: #ifdef PMAP_DEBUG
1.145     chs      3358:                        if ((pmap_debug & PMD_SETPTE) ||
                   3359:                            (pgva == pmap_db_watchva)) {
1.85      gwr      3360:                                printf("pmap: set_pte pmap=%p va=0x%lx"
1.145     chs      3361:                                       " old=0x%x new=0x%x (rrmmu)\n",
                   3362:                                       pmap, pgva, pte, PG_INVAL);
1.85      gwr      3363:                        }
                   3364: #endif
                   3365:                        set_pte(pgva, PG_INVAL);
1.132     chs      3366:                        KASSERT(pmegp->pmeg_vpages > 0);
1.85      gwr      3367:                        pmegp->pmeg_vpages--;
                   3368:                }
                   3369:        }
                   3370:
1.132     chs      3371:        KASSERT(pmegp->pmeg_vpages >= 0);
                   3372:        if (pmegp->pmeg_vpages == 0) {
1.85      gwr      3373:                /* We are done with this pmeg. */
                   3374:                if (is_pmeg_wired(pmegp)) {
                   3375: #ifdef PMAP_DEBUG
                   3376:                        if (pmap_debug & PMD_WIRING) {
1.145     chs      3377:                                db_printf("pmap: removing wired pmeg: %p\n",
                   3378:                                          pmegp);
1.85      gwr      3379:                                Debugger();
1.52      gwr      3380:                        }
1.85      gwr      3381: #endif /* PMAP_DEBUG */
                   3382:                }
                   3383:
                   3384: #ifdef PMAP_DEBUG
                   3385:                if (pmap_debug & PMD_SEGMAP) {
1.145     chs      3386:                        printf("pmap: set_segmap ctx=%d v=0x%lx old=0x%x "
                   3387:                               "new=ff (rm)\n",
                   3388:                               pmap->pm_ctxnum, segva, pmegp->pmeg_index);
1.85      gwr      3389:                }
                   3390:                pmeg_verify_empty(segva);
                   3391: #endif
                   3392:
                   3393:                /* Remove it from the MMU. */
                   3394:                if (kernel_pmap == pmap) {
                   3395:                        /* Did cache flush above. */
                   3396:                        set_segmap_allctx(segva, SEGINV);
                   3397:                } else {
                   3398:                        /* Did cache flush above. */
                   3399:                        set_segmap(segva, SEGINV);
1.38      gwr      3400:                }
1.85      gwr      3401:                pmap->pm_segmap[VA_SEGNUM(segva)] = SEGINV;
                   3402:                /* Now, put it on the free list. */
                   3403:                pmeg_free(pmegp);
1.38      gwr      3404:        }
                   3405: }
                   3406:
                   3407: /*
1.85      gwr      3408:  * Remove some mappings, all in one PMEG,
1.38      gwr      3409:  * where it is not currently in any context.
                   3410:  */
1.145     chs      3411: void
                   3412: pmap_remove_noctx(pmap_t pmap, vaddr_t sva, vaddr_t eva)
1.38      gwr      3413: {
                   3414:        pmeg_t pmegp;
1.86      gwr      3415:        int old_ctx, pte, sme, segnum;
1.132     chs      3416:        vaddr_t pgva, segva;
1.38      gwr      3417:
                   3418:        CHECK_SPL();
                   3419:
1.81      gwr      3420: #ifdef DIAGNOSTIC
1.38      gwr      3421:        /* Kernel always in a context (actually, in all contexts). */
1.50      gwr      3422:        if (pmap == kernel_pmap)
1.85      gwr      3423:                panic("pmap_remove_noctx: kernel_pmap");
1.38      gwr      3424:        if (pmap->pm_segmap == NULL)
1.85      gwr      3425:                panic("pmap_remove_noctx: null segmap");
1.38      gwr      3426: #endif
                   3427:
1.164     tsutsui  3428:        segva = sun3_trunc_seg(sva);
1.86      gwr      3429:        segnum = VA_SEGNUM(segva);
1.38      gwr      3430:        sme = pmap->pm_segmap[segnum];
1.80      gwr      3431:        if (sme == SEGINV)
                   3432:                return;
1.38      gwr      3433:        pmegp = pmeg_p(sme);
                   3434:
1.86      gwr      3435:        /*
                   3436:         * Borrow the EMPTY_CONTEXT so we can access the PMEG
                   3437:         * at its normal virtual address.
                   3438:         */
                   3439:        old_ctx = get_context();
                   3440:        set_context(EMPTY_CONTEXT);
                   3441:        set_segmap(segva, sme);
                   3442:
                   3443:        /* Invalidate the PTEs in the given range. */
1.140     thorpej  3444:        for (pgva = sva; pgva < eva; pgva += PAGE_SIZE) {
1.86      gwr      3445:                pte = get_pte(pgva);
1.38      gwr      3446:                if (pte & PG_VALID) {
1.86      gwr      3447:                        /* No cache flush needed. */
1.52      gwr      3448:                        if (IS_MAIN_MEM(pte)) {
                   3449:                                save_modref_bits(pte);
1.92      gwr      3450:                                pv_unlink(pmap, pte, pgva);
1.52      gwr      3451:                        }
1.76      gwr      3452: #ifdef PMAP_DEBUG
1.145     chs      3453:                        if ((pmap_debug & PMD_SETPTE) ||
                   3454:                            (pgva == pmap_db_watchva)) {
1.85      gwr      3455:                                printf("pmap: set_pte pmap=%p va=0x%lx"
1.145     chs      3456:                                       " old=0x%x new=0x%x (rrncx)\n",
                   3457:                                       pmap, pgva, pte, PG_INVAL);
1.85      gwr      3458:                        }
1.76      gwr      3459: #endif
1.86      gwr      3460:                        set_pte(pgva, PG_INVAL);
1.132     chs      3461:                        KASSERT(pmegp->pmeg_vpages > 0);
1.85      gwr      3462:                        pmegp->pmeg_vpages--;
1.38      gwr      3463:                }
1.66      gwr      3464:        }
1.86      gwr      3465:
                   3466:        /*
                   3467:         * Make the EMPTY_CONTEXT really empty again, and
                   3468:         * restore the previous context.
                   3469:         */
                   3470:        set_segmap(segva, SEGINV);
                   3471:        set_context(old_ctx);
                   3472:
1.132     chs      3473:        KASSERT(pmegp->pmeg_vpages >= 0);
                   3474:        if (pmegp->pmeg_vpages == 0) {
1.86      gwr      3475:                /* We are done with this pmeg. */
                   3476:                if (is_pmeg_wired(pmegp)) {
                   3477: #ifdef PMAP_DEBUG
                   3478:                        if (pmap_debug & PMD_WIRING) {
1.145     chs      3479:                                db_printf("pmap: removing wired pmeg: %p\n",
                   3480:                                          pmegp);
1.86      gwr      3481:                                Debugger();
                   3482:                        }
                   3483: #endif /* PMAP_DEBUG */
                   3484:                }
1.66      gwr      3485:
1.85      gwr      3486:                pmap->pm_segmap[segnum] = SEGINV;
                   3487:                pmeg_free(pmegp);
1.38      gwr      3488:        }
                   3489: }
1.85      gwr      3490:
1.38      gwr      3491:
                   3492: /*
1.69      gwr      3493:  * Count resident pages in this pmap.
                   3494:  * See: kern_sysctl.c:pmap_resident_count
1.38      gwr      3495:  */
1.145     chs      3496: segsz_t
                   3497: pmap_resident_pages(pmap_t pmap)
1.38      gwr      3498: {
                   3499:        int i, sme, pages;
                   3500:        pmeg_t pmeg;
                   3501:
1.69      gwr      3502:        if (pmap->pm_segmap == 0)
                   3503:                return (0);
                   3504:
1.38      gwr      3505:        pages = 0;
1.69      gwr      3506:        for (i = 0; i < NUSEG; i++) {
                   3507:                sme = pmap->pm_segmap[i];
                   3508:                if (sme != SEGINV) {
                   3509:                        pmeg = pmeg_p(sme);
                   3510:                        pages += pmeg->pmeg_vpages;
                   3511:                }
                   3512:        }
                   3513:        return (pages);
                   3514: }
                   3515:
                   3516: /*
                   3517:  * Count wired pages in this pmap.
                   3518:  * See vm_mmap.c:pmap_wired_count
                   3519:  */
1.145     chs      3520: segsz_t
                   3521: pmap_wired_pages(pmap_t pmap)
1.69      gwr      3522: {
                   3523:        int i, mask, sme, pages;
                   3524:        pmeg_t pmeg;
                   3525:
                   3526:        if (pmap->pm_segmap == 0)
                   3527:                return (0);
                   3528:
                   3529:        pages = 0;
                   3530:        for (i = 0; i < NUSEG; i++) {
                   3531:                sme = pmap->pm_segmap[i];
                   3532:                if (sme != SEGINV) {
                   3533:                        pmeg = pmeg_p(sme);
                   3534:                        mask = 0x8000;
                   3535:                        do {
                   3536:                                if (pmeg->pmeg_wired & mask)
                   3537:                                        pages++;
                   3538:                                mask = (mask >> 1);
                   3539:                        } while (mask);
1.38      gwr      3540:                }
                   3541:        }
                   3542:        return (pages);
1.2       glass    3543: }
                   3544:
1.38      gwr      3545:
                   3546: /*
                   3547:  *     pmap_copy_page copies the specified (machine independent)
                   3548:  *     page by mapping the page into virtual memory and using
                   3549:  *     bcopy to copy the page, one machine dependent page at a
                   3550:  *     time.
                   3551:  */
1.145     chs      3552: void
                   3553: pmap_copy_page(paddr_t src, paddr_t dst)
1.38      gwr      3554: {
                   3555:        int pte;
                   3556:        int s;
                   3557:
1.122     thorpej  3558:        s = splvm();
1.77      gwr      3559:
1.38      gwr      3560: #ifdef PMAP_DEBUG
                   3561:        if (pmap_debug & PMD_COW)
1.73      fair     3562:                printf("pmap_copy_page: 0x%lx -> 0x%lx\n", src, dst);
1.38      gwr      3563: #endif
                   3564:
1.132     chs      3565: #ifdef DIAGNOSTIC
1.38      gwr      3566:        if (tmp_vpages_inuse)
                   3567:                panic("pmap_copy_page: vpages inuse");
                   3568:        tmp_vpages_inuse++;
1.132     chs      3569: #endif
1.38      gwr      3570:
1.50      gwr      3571:        /* PG_PERM is short for (PG_VALID|PG_WRITE|PG_SYSTEM|PG_NC) */
                   3572:        /* All mappings to vmp_vpages are non-cached, so no flush. */
1.38      gwr      3573:        pte = PG_PERM | PA_PGNUM(src);
                   3574:        set_pte(tmp_vpages[0], pte);
                   3575:        pte = PG_PERM | PA_PGNUM(dst);
                   3576:        set_pte(tmp_vpages[1], pte);
1.68      thorpej  3577:        copypage((char *) tmp_vpages[0], (char *) tmp_vpages[1]);
1.38      gwr      3578:        set_pte(tmp_vpages[0], PG_INVAL);
                   3579:        set_pte(tmp_vpages[0], PG_INVAL);
                   3580:
1.132     chs      3581: #ifdef DIAGNOSTIC
1.38      gwr      3582:        tmp_vpages_inuse--;
1.132     chs      3583: #endif
1.77      gwr      3584:
                   3585:        splx(s);
1.38      gwr      3586: }
                   3587:
1.2       glass    3588: /*
                   3589:  *     pmap_zero_page zeros the specified (machine independent)
                   3590:  *     page by mapping the page into virtual memory and using
                   3591:  *     bzero to clear its contents, one machine dependent page
                   3592:  *     at a time.
                   3593:  */
1.145     chs      3594: void
                   3595: pmap_zero_page(paddr_t pa)
1.2       glass    3596: {
1.38      gwr      3597:        int pte;
                   3598:        int s;
1.2       glass    3599:
1.122     thorpej  3600:        s = splvm();
1.77      gwr      3601:
1.26      gwr      3602: #ifdef PMAP_DEBUG
1.38      gwr      3603:        if (pmap_debug & PMD_COW)
1.73      fair     3604:                printf("pmap_zero_page: 0x%lx\n", pa);
1.38      gwr      3605: #endif
                   3606:
1.132     chs      3607: #ifdef DIAGNOSTIC
1.38      gwr      3608:        if (tmp_vpages_inuse)
                   3609:                panic("pmap_zero_page: vpages inuse");
                   3610:        tmp_vpages_inuse++;
1.132     chs      3611: #endif
1.50      gwr      3612:
                   3613:        /* PG_PERM is short for (PG_VALID|PG_WRITE|PG_SYSTEM|PG_NC) */
                   3614:        /* All mappings to vmp_vpages are non-cached, so no flush. */
1.38      gwr      3615:        pte = PG_PERM | PA_PGNUM(pa);
                   3616:        set_pte(tmp_vpages[0], pte);
1.68      thorpej  3617:        zeropage((char *) tmp_vpages[0]);
1.38      gwr      3618:        set_pte(tmp_vpages[0], PG_INVAL);
1.50      gwr      3619:
1.132     chs      3620: #ifdef DIAGNOSTIC
1.38      gwr      3621:        tmp_vpages_inuse--;
1.132     chs      3622: #endif
1.77      gwr      3623:
                   3624:        splx(s);
1.51      gwr      3625: }
                   3626:
                   3627: /*
1.60      gwr      3628:  * Find first virtual address >= *va that is
                   3629:  * least likely to cause cache aliases.
                   3630:  * (This will just seg-align mappings.)
                   3631:  */
1.145     chs      3632: void
                   3633: pmap_prefer(vaddr_t fo, vaddr_t *va)
1.60      gwr      3634: {
1.132     chs      3635:        long d;
1.60      gwr      3636:
                   3637:        d = fo - *va;
                   3638:        d &= SEGOFSET;
                   3639:        *va += d;
                   3640: }
1.61      gwr      3641:
                   3642: /*
1.74      gwr      3643:  * Fill in the sun3x-specific part of the kernel core header
                   3644:  * for dumpsys().  (See machdep.c for the rest.)
1.61      gwr      3645:  */
1.145     chs      3646: void
                   3647: pmap_kcore_hdr(struct sun3_kcore_hdr *sh)
1.61      gwr      3648: {
1.132     chs      3649:        vaddr_t va;
1.74      gwr      3650:        u_char *cp, *ep;
1.61      gwr      3651:
1.74      gwr      3652:        sh->segshift = SEGSHIFT;
                   3653:        sh->pg_frame = PG_FRAME;
                   3654:        sh->pg_valid = PG_VALID;
                   3655:
                   3656:        /* Copy the kernel segmap (256 bytes). */
1.164.2.1! uebayasi 3657:        va = KERNBASE3;
1.74      gwr      3658:        cp = sh->ksegmap;
                   3659:        ep = cp + sizeof(sh->ksegmap);
1.61      gwr      3660:        do {
                   3661:                *cp = get_segmap(va);
1.74      gwr      3662:                va += NBSG;
1.61      gwr      3663:                cp++;
1.74      gwr      3664:        } while (cp < ep);
1.61      gwr      3665: }
                   3666:
                   3667: /*
                   3668:  * Copy the pagemap RAM into the passed buffer (one page)
                   3669:  * starting at OFF in the pagemap RAM.
                   3670:  */
1.145     chs      3671: void
                   3672: pmap_get_pagemap(int *pt, int off)
1.61      gwr      3673: {
1.132     chs      3674:        vaddr_t va, va_end;
1.61      gwr      3675:        int sme, sme_end;       /* SegMap Entry numbers */
                   3676:
                   3677:        sme = (off >> 6);       /* PMEG to start on */
                   3678:        sme_end = sme + 128; /* where to stop */
                   3679:        va_end = temp_seg_va + NBSG;
                   3680:
                   3681:        do {
                   3682:                set_segmap(temp_seg_va, sme);
                   3683:                va = temp_seg_va;
                   3684:                do {
                   3685:                        *pt++ = get_pte(va);
1.140     thorpej  3686:                        va += PAGE_SIZE;
1.61      gwr      3687:                } while (va < va_end);
                   3688:                sme++;
                   3689:        } while (sme < sme_end);
                   3690:        set_segmap(temp_seg_va, SEGINV);
                   3691: }
                   3692:
1.60      gwr      3693:
                   3694: /*
                   3695:  * Helper functions for changing unloaded PMEGs
1.83      gwr      3696:  * XXX: These should go away.  (Borrow context zero instead.)
1.60      gwr      3697:  */
1.80      gwr      3698:
1.132     chs      3699: #ifdef DIAGNOSTIC
1.38      gwr      3700: static int temp_seg_inuse;
1.132     chs      3701: #endif
1.38      gwr      3702:
                   3703: static int
                   3704: get_pte_pmeg(int pmeg_num, int page_num)
                   3705: {
1.132     chs      3706:        vaddr_t va;
1.38      gwr      3707:        int pte;
                   3708:
1.50      gwr      3709:        CHECK_SPL();
1.132     chs      3710: #ifdef DIAGNOSTIC
1.38      gwr      3711:        if (temp_seg_inuse)
                   3712:                panic("get_pte_pmeg: temp_seg_inuse");
1.50      gwr      3713:        temp_seg_inuse++;
1.132     chs      3714: #endif
1.38      gwr      3715:
                   3716:        va = temp_seg_va;
                   3717:        set_segmap(temp_seg_va, pmeg_num);
1.140     thorpej  3718:        va += PAGE_SIZE*page_num;
1.38      gwr      3719:        pte = get_pte(va);
                   3720:        set_segmap(temp_seg_va, SEGINV);
1.50      gwr      3721:
1.132     chs      3722: #ifdef DIAGNOSTIC
1.38      gwr      3723:        temp_seg_inuse--;
1.132     chs      3724: #endif
1.38      gwr      3725:        return pte;
                   3726: }
                   3727:
                   3728: static void
                   3729: set_pte_pmeg(int pmeg_num, int page_num, int pte)
                   3730: {
1.132     chs      3731:        vaddr_t va;
1.38      gwr      3732:
1.50      gwr      3733:        CHECK_SPL();
1.132     chs      3734: #ifdef DIAGNOSTIC
1.38      gwr      3735:        if (temp_seg_inuse)
                   3736:                panic("set_pte_pmeg: temp_seg_inuse");
1.50      gwr      3737:        temp_seg_inuse++;
1.132     chs      3738: #endif
1.38      gwr      3739:
1.50      gwr      3740:        /* We never access data in temp_seg_va so no need to flush. */
1.38      gwr      3741:        va = temp_seg_va;
                   3742:        set_segmap(temp_seg_va, pmeg_num);
1.140     thorpej  3743:        va += PAGE_SIZE*page_num;
1.38      gwr      3744:        set_pte(va, pte);
                   3745:        set_segmap(temp_seg_va, SEGINV);
1.50      gwr      3746:
1.132     chs      3747: #ifdef DIAGNOSTIC
1.38      gwr      3748:        temp_seg_inuse--;
1.132     chs      3749: #endif
1.2       glass    3750: }
1.109     is       3751:
                   3752: /*
                   3753:  *     Routine:        pmap_procwr
                   3754:  *
                   3755:  *     Function:
                   3756:  *             Synchronize caches corresponding to [addr, addr+len) in p.
                   3757:  */
1.145     chs      3758: void
                   3759: pmap_procwr(struct proc *p, vaddr_t va, size_t len)
1.109     is       3760: {
                   3761:        (void)cachectl1(0x80000004, va, len, p);
                   3762: }
                   3763:
1.78      gwr      3764:
                   3765: #ifdef PMAP_DEBUG
                   3766: /* Things to call from the debugger. */
                   3767:
1.145     chs      3768: void
                   3769: pmap_print(pmap_t pmap)
1.78      gwr      3770: {
1.81      gwr      3771:        db_printf(" pm_ctxnum=%d\n", pmap->pm_ctxnum);
                   3772:        db_printf(" pm_version=0x%x\n", pmap->pm_version);
                   3773:        db_printf(" pm_segmap=%p\n", pmap->pm_segmap);
1.78      gwr      3774: }
                   3775:
1.145     chs      3776: void
                   3777: pmeg_print(pmeg_t pmegp)
1.78      gwr      3778: {
1.81      gwr      3779:        db_printf("link_next=%p  link_prev=%p\n",
1.145     chs      3780:                  TAILQ_NEXT(pmegp, pmeg_link),
1.156     tsutsui  3781:                  TAILQ_PREV(pmegp, pmeg_tailq, pmeg_link));
1.81      gwr      3782:        db_printf("index=0x%x owner=%p own_vers=0x%x\n",
1.145     chs      3783:                  pmegp->pmeg_index, pmegp->pmeg_owner, pmegp->pmeg_version);
1.81      gwr      3784:        db_printf("va=0x%lx wired=0x%x reserved=0x%x vpgs=0x%x qstate=0x%x\n",
1.145     chs      3785:                  pmegp->pmeg_va, pmegp->pmeg_wired,
                   3786:                  pmegp->pmeg_reserved, pmegp->pmeg_vpages,
                   3787:                  pmegp->pmeg_qstate);
1.78      gwr      3788: }
                   3789:
1.145     chs      3790: void
                   3791: pv_print(paddr_t pa)
1.78      gwr      3792: {
                   3793:        pv_entry_t pv;
1.84      gwr      3794:        int idx;
1.78      gwr      3795:
1.84      gwr      3796:        idx = PA_PGNUM(pa);
1.87      gwr      3797:        if (idx >= physmem) {
1.84      gwr      3798:                db_printf("bad address\n");
1.78      gwr      3799:                return;
1.84      gwr      3800:        }
                   3801:        db_printf("pa=0x%lx, flags=0x%x\n",
1.145     chs      3802:                  pa, pv_flags_tbl[idx]);
1.78      gwr      3803:
1.84      gwr      3804:        pv = pv_head_tbl[idx];
1.78      gwr      3805:        while (pv) {
1.84      gwr      3806:                db_printf(" pv_entry %p pmap %p va 0x%lx next %p\n",
1.145     chs      3807:                          pv, pv->pv_pmap, pv->pv_va, pv->pv_next);
1.78      gwr      3808:                pv = pv->pv_next;
                   3809:        }
                   3810: }
                   3811: #endif /* PMAP_DEBUG */

CVSweb <webmaster@jp.NetBSD.org>