[BACK]Return to pmap.c CVS log [TXT][DIR] Up to [cvs.NetBSD.org] / src / sys / arch / sun3 / sun3

Annotation of src/sys/arch/sun3/sun3/pmap.c, Revision 1.121

1.121   ! tsutsui     1: /*     $NetBSD: pmap.c,v 1.120 2000/10/27 13:28:54 tsutsui Exp $       */
1.36      cgd         2:
1.64      gwr         3: /*-
                      4:  * Copyright (c) 1996 The NetBSD Foundation, Inc.
1.13      glass       5:  * All rights reserved.
                      6:  *
1.64      gwr         7:  * This code is derived from software contributed to The NetBSD Foundation
                      8:  * by Adam Glass and Gordon W. Ross.
                      9:  *
1.13      glass      10:  * Redistribution and use in source and binary forms, with or without
                     11:  * modification, are permitted provided that the following conditions
                     12:  * are met:
                     13:  * 1. Redistributions of source code must retain the above copyright
                     14:  *    notice, this list of conditions and the following disclaimer.
                     15:  * 2. Redistributions in binary form must reproduce the above copyright
                     16:  *    notice, this list of conditions and the following disclaimer in the
                     17:  *    documentation and/or other materials provided with the distribution.
                     18:  * 3. All advertising materials mentioning features or use of this software
                     19:  *    must display the following acknowledgement:
1.64      gwr        20:  *        This product includes software developed by the NetBSD
                     21:  *        Foundation, Inc. and its contributors.
                     22:  * 4. Neither the name of The NetBSD Foundation nor the names of its
                     23:  *    contributors may be used to endorse or promote products derived
                     24:  *    from this software without specific prior written permission.
1.13      glass      25:  *
1.64      gwr        26:  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
                     27:  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
                     28:  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
1.66      gwr        29:  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
                     30:  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
1.64      gwr        31:  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
                     32:  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
                     33:  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
                     34:  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
                     35:  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
                     36:  * POSSIBILITY OF SUCH DAMAGE.
1.13      glass      37:  */
1.25      gwr        38:
1.3       glass      39: /*
1.1       glass      40:  * Some notes:
                     41:  *
1.84      gwr        42:  * sun3s have contexts (8).  In this pmap design, the kernel is mapped
1.38      gwr        43:  * into all contexts.  Processes take up a known portion of the context,
1.1       glass      44:  * and compete for the available contexts on a LRU basis.
                     45:  *
1.52      gwr        46:  * sun3s also have this evil "PMEG" crapola.  Essentially each "context"'s
1.1       glass      47:  * address space is defined by the 2048 one-byte entries in the segment map.
1.38      gwr        48:  * Each of these 1-byte entries points to a "Page Map Entry Group" (PMEG)
                     49:  * which contains the mappings for that virtual segment.  (This strange
                     50:  * terminology invented by Sun and preserved here for consistency.)
                     51:  * Each PMEG maps a segment of 128Kb length, with 16 pages of 8Kb each.
                     52:  *
1.52      gwr        53:  * As you might guess, these PMEGs are in short supply and heavy demand.
                     54:  * PMEGs allocated to the kernel are "static" in the sense that they can't
                     55:  * be stolen from it.  PMEGs allocated to a particular segment of a
1.1       glass      56:  * pmap's virtual space will be fought over by the other pmaps.
                     57:  */
                     58:
                     59: /*
1.65      gwr        60:  * Cache management:
                     61:  * All sun3 cache implementations are write-back.
                     62:  * Flushes must be done before removing translations
                     63:  * from the MMU because the cache uses the MMU.
                     64:  */
                     65:
                     66: /*
1.1       glass      67:  * wanted attributes:
                     68:  *       pmegs that aren't needed by a pmap remain in the MMU.
                     69:  *       quick context switches between pmaps
                     70:  *       kernel is in all contexts
                     71:  */
                     72:
1.83      gwr        73: /*
1.84      gwr        74:  * Project1:  Use a "null" context for processes that have not
1.83      gwr        75:  * touched any user-space address recently.  This is efficient
                     76:  * for things that stay in the kernel for a while, waking up
                     77:  * to handle some I/O then going back to sleep (i.e. nfsd).
                     78:  * If and when such a process returns to user-mode, it will
                     79:  * fault and be given a real context at that time.
                     80:  *
                     81:  * This also lets context switch be fast, because all we need
                     82:  * to do there for the MMU is slam the context register.
1.84      gwr        83:  *
                     84:  * Project2:  Use a private pool of PV elements.  This pool can be
                     85:  * fixed size because the total mapped virtual space supported by
                     86:  * the MMU H/W (and this pmap) is fixed for all time.
1.83      gwr        87:  */
                     88:
1.104     jonathan   89: #include "opt_ddb.h"
1.103     gwr        90:
1.38      gwr        91: #include <sys/param.h>
                     92: #include <sys/systm.h>
                     93: #include <sys/proc.h>
                     94: #include <sys/malloc.h>
1.120     tsutsui    95: #include <sys/pool.h>
1.38      gwr        96: #include <sys/user.h>
                     97: #include <sys/queue.h>
1.74      gwr        98: #include <sys/kcore.h>
1.38      gwr        99:
1.103     gwr       100: #include <uvm/uvm.h>
1.110     mrg       101:
1.107     gwr       102: /* XXX - Pager hacks... (explain?) */
                    103: #define PAGER_SVA (uvm.pager_sva)
                    104: #define PAGER_EVA (uvm.pager_eva)
1.103     gwr       105:
1.78      gwr       106: #include <m68k/m68k.h>
                    107:
1.74      gwr       108: #include <machine/cpu.h>
                    109: #include <machine/dvma.h>
1.76      gwr       110: #include <machine/idprom.h>
1.74      gwr       111: #include <machine/kcore.h>
1.38      gwr       112: #include <machine/mon.h>
1.74      gwr       113: #include <machine/pmap.h>
                    114: #include <machine/pte.h>
1.38      gwr       115: #include <machine/vmparam.h>
1.65      gwr       116:
1.99      gwr       117: #include <sun3/sun3/cache.h>
                    118: #include <sun3/sun3/control.h>
                    119: #include <sun3/sun3/fc.h>
                    120: #include <sun3/sun3/machdep.h>
                    121: #include <sun3/sun3/obmem.h>
                    122:
1.81      gwr       123: #ifdef DDB
                    124: #include <ddb/db_output.h>
                    125: #else
                    126: #define db_printf printf
                    127: #endif
                    128:
1.78      gwr       129: /* Verify this correspondence between definitions. */
1.76      gwr       130: #if    (PMAP_OBIO << PG_MOD_SHIFT) != PGT_OBIO
                    131: #error "PMAP_XXX definitions don't match pte.h!"
                    132: #endif
                    133:
1.89      gwr       134: /* Type bits in a "pseudo" physical address. (XXX: pmap.h?) */
                    135: #define PMAP_TYPE      PMAP_VME32
1.75      gwr       136:
1.78      gwr       137: /*
                    138:  * Local convenience macros
                    139:  */
                    140:
1.98      gwr       141: #define DVMA_MAP_END   (DVMA_MAP_BASE + DVMA_MAP_AVAIL)
                    142:
1.80      gwr       143: /* User segments from 0 to KERNBASE */
                    144: #define        NUSEG   (KERNBASE / NBSG)
                    145: /* The remainder are kernel segments. */
                    146: #define        NKSEG   (NSEGMAP - NUSEG)
1.78      gwr       147:
                    148: #define VA_SEGNUM(x)   ((u_int)(x) >> SEGSHIFT)
1.50      gwr       149:
1.76      gwr       150: /*
1.78      gwr       151:  * Only "main memory" pages are registered in the pv_lists.
                    152:  * This macro is used to determine if a given pte refers to
                    153:  * "main memory" or not.  One slight hack here deserves more
                    154:  * explanation:  The Sun frame buffers all appear as PG_OBMEM
                    155:  * devices but way up near the end of the address space.
                    156:  * We do not want to consider these as "main memory" so the
                    157:  * macro below treats the high bits of the PFN as type bits.
                    158:  *
                    159:  * Note that on the 3/60 only 16 bits of PFN are stored in the
                    160:  * MMU and the top 3 bits read back as zero.  This means a
                    161:  * translation entered into the mmu for physical address
                    162:  * 0xFF000000 will look like 0x1F000000 after one reads back
                    163:  * the pte and converts the PFN to a physical address.
                    164:  */
1.88      gwr       165: #define MEM_BITS       (PG_TYPE | PA_PGNUM(0xF8000000))
1.78      gwr       166: #define        IS_MAIN_MEM(pte) (((pte) & MEM_BITS) == 0)
                    167:
1.87      gwr       168: /* Does this (pseudo) PA represent device space? */
1.89      gwr       169: #define PA_DEV_MASK   (0xF8000000 | PMAP_TYPE)
1.88      gwr       170: #define PA_IS_DEV(pa) ((pa) & PA_DEV_MASK)
1.87      gwr       171:
1.78      gwr       172: /*
                    173:  * Is there a Virtually Addressed Cache (VAC) alias problem
                    174:  * if one page is mapped at both a1 and a2?
                    175:  */
                    176: #define        BADALIAS(a1, a2)        (((int)(a1) ^ (int)(a2)) & SEGOFSET)
                    177:
                    178:
                    179: /*
                    180:  * Debugging support.
                    181:  */
                    182: #define        PMD_ENTER       1
                    183: #define        PMD_LINK        2
                    184: #define        PMD_PROTECT     4
                    185: #define        PMD_SWITCH      8
                    186: #define PMD_COW                0x10
                    187: #define PMD_MODBIT     0x20
                    188: #define PMD_REFBIT     0x40
                    189: #define PMD_WIRING     0x80
                    190: #define PMD_CONTEXT    0x100
                    191: #define PMD_CREATE     0x200
                    192: #define PMD_SEGMAP     0x400
                    193: #define PMD_SETPTE     0x800
                    194: #define PMD_FAULT  0x1000
                    195:
                    196: #define        PMD_REMOVE      PMD_ENTER
                    197: #define        PMD_UNLINK      PMD_LINK
                    198:
                    199: #ifdef PMAP_DEBUG
                    200: int pmap_debug = 0;
                    201: int pmap_db_watchva = -1;
                    202: int pmap_db_watchpmeg = -1;
                    203: #endif /* PMAP_DEBUG */
                    204:
                    205:
                    206: /*
                    207:  * Miscellaneous variables.
                    208:  *
1.76      gwr       209:  * For simplicity, this interface retains the variables
                    210:  * that were used in the old interface (without NONCONTIG).
                    211:  * These are set in pmap_bootstrap() and used in
                    212:  * pmap_next_page().
                    213:  */
                    214: vm_offset_t virtual_avail, virtual_end;
                    215: vm_offset_t avail_start, avail_end;
1.78      gwr       216: #define        managed(pa)     (((pa) >= avail_start) && ((pa) < avail_end))
1.76      gwr       217:
                    218: /* used to skip the Sun3/50 video RAM */
                    219: static vm_offset_t hole_start, hole_size;
1.38      gwr       220:
1.78      gwr       221: /* This is for pmap_next_page() */
                    222: static vm_offset_t avail_next;
                    223:
                    224: /* This is where we map a PMEG without a context. */
                    225: static vm_offset_t temp_seg_va;
                    226:
                    227: /*
                    228:  * Location to store virtual addresses
                    229:  * to be used in copy/zero operations.
                    230:  */
                    231: vm_offset_t tmp_vpages[2] = {
1.99      gwr       232:        SUN3_MONSHORTSEG,
                    233:        SUN3_MONSHORTSEG + NBPG };
1.78      gwr       234: int tmp_vpages_inuse;
                    235:
                    236: static int pmap_version = 1;
                    237: struct pmap kernel_pmap_store;
                    238: #define kernel_pmap (&kernel_pmap_store)
1.82      gwr       239: static u_char kernel_segmap[NSEGMAP];
1.120     tsutsui   240: 
                    241: /* memory pool for pmap structures */
                    242: struct pool    pmap_pmap_pool;
1.78      gwr       243:
1.38      gwr       244: /* statistics... */
                    245: struct pmap_stats {
                    246:        int     ps_enter_firstpv;       /* pv heads entered */
                    247:        int     ps_enter_secondpv;      /* pv nonheads entered */
1.39      gwr       248:        int     ps_unlink_pvfirst;      /* of pv_unlinks on head */
                    249:        int     ps_unlink_pvsearch;     /* of pv_unlink searches */
1.40      gwr       250:        int     ps_pmeg_faultin;        /* pmegs reloaded */
1.39      gwr       251:        int     ps_changeprots;         /* of calls to changeprot */
                    252:        int     ps_changewire;          /* useless wiring changes */
                    253:        int     ps_npg_prot_all;        /* of active pages protected */
                    254:        int     ps_npg_prot_actual;     /* pages actually affected */
1.60      gwr       255:        int     ps_vac_uncached;        /* non-cached due to bad alias */
                    256:        int     ps_vac_recached;        /* re-cached when bad alias gone */
1.38      gwr       257: } pmap_stats;
                    258:
                    259:
                    260: /*
1.77      gwr       261:  * locking issues:  These used to do spl* stuff.
                    262:  * XXX: Use these for reentrance detection?
1.38      gwr       263:  */
1.77      gwr       264: #define PMAP_LOCK()    (void)/XXX
                    265: #define PMAP_UNLOCK()  (void)/XXX
1.2       glass     266:
1.78      gwr       267: #define pmap_lock(pmap) simple_lock(&pmap->pm_lock)
                    268: #define pmap_unlock(pmap) simple_unlock(&pmap->pm_lock)
                    269: #define pmap_add_ref(pmap) ++pmap->pm_refcount
                    270: #define pmap_del_ref(pmap) --pmap->pm_refcount
                    271: #define pmap_refcount(pmap) pmap->pm_refcount
                    272:
1.22      gwr       273: /*
1.77      gwr       274:  * Note that splpmap() is used in routines called at splnet() and
1.22      gwr       275:  * MUST NOT lower the priority.  For this reason we arrange that:
1.55      gwr       276:  *    splimp = max(splnet,splbio)
1.22      gwr       277:  * Would splvm() be more natural here? (same level as splimp).
                    278:  */
1.77      gwr       279:
1.21      glass     280: #define splpmap splimp
1.77      gwr       281:
                    282: #ifdef PMAP_DEBUG
                    283: #define        CHECK_SPL() do { \
                    284:        if ((getsr() & PSL_IPL) < PSL_IPL4) \
                    285:                panic("pmap: bad spl, line %d", __LINE__); \
                    286: } while (0)
                    287: #else  /* PMAP_DEBUG */
                    288: #define        CHECK_SPL() (void)0
                    289: #endif /* PMAP_DEBUG */
                    290:
1.38      gwr       291:
1.2       glass     292: /*
1.78      gwr       293:  * PV support.
                    294:  * (i.e. Find all virtual mappings of a physical page.)
1.5       glass     295:  */
                    296:
                    297: /*
1.38      gwr       298:  * XXX - Could eliminate this by causing managed() to return 0
                    299:  * ( avail_start = avail_end = 0 )
1.2       glass     300:  */
1.38      gwr       301: int pv_initialized = 0;
1.2       glass     302:
1.84      gwr       303: /* One of these for each mapped virtual page. */
1.1       glass     304: struct pv_entry {
1.38      gwr       305:        struct pv_entry *pv_next;
                    306:        pmap_t         pv_pmap;
                    307:        vm_offset_t      pv_va;
1.1       glass     308: };
1.38      gwr       309: typedef struct pv_entry *pv_entry_t;
1.1       glass     310:
1.84      gwr       311: /* Table of PV list heads (per physical page). */
                    312: static struct pv_entry **pv_head_tbl;
                    313:
                    314: /* Free list of PV entries. */
                    315: static struct pv_entry *pv_free_list;
                    316:
                    317: /* Table of flags (per physical page). */
                    318: static u_char *pv_flags_tbl;
1.1       glass     319:
1.38      gwr       320: /* These are as in the MMU but shifted by PV_SHIFT. */
                    321: #define PV_SHIFT       24
                    322: #define PV_VALID  0x80
                    323: #define PV_WRITE  0x40
                    324: #define PV_SYSTEM 0x20
                    325: #define PV_NC     0x10
                    326: #define PV_PERM   0xF0
                    327: #define PV_TYPE   0x0C
                    328: #define PV_REF    0x02
                    329: #define PV_MOD    0x01
                    330:
                    331:
                    332: /*
1.78      gwr       333:  * context structures, and queues
                    334:  */
                    335:
                    336: struct context_state {
                    337:        TAILQ_ENTRY(context_state) context_link;
                    338:        int            context_num;
                    339:        struct pmap   *context_upmap;
                    340: };
                    341: typedef struct context_state *context_t;
                    342:
1.83      gwr       343: #define INVALID_CONTEXT -1     /* impossible value */
                    344: #define EMPTY_CONTEXT 0
                    345: #define FIRST_CONTEXT 1
                    346: #define        has_context(pmap)       ((pmap)->pm_ctxnum != EMPTY_CONTEXT)
1.78      gwr       347:
1.79      gwr       348: TAILQ_HEAD(context_tailq, context_state)
                    349:        context_free_queue, context_active_queue;
1.50      gwr       350:
1.78      gwr       351: static struct context_state context_array[NCONTEXT];
1.1       glass     352:
                    353:
1.38      gwr       354: /*
1.81      gwr       355:  * PMEG structures, queues, and macros
1.38      gwr       356:  */
                    357: #define PMEGQ_FREE     0
                    358: #define PMEGQ_INACTIVE 1
                    359: #define PMEGQ_ACTIVE   2
                    360: #define PMEGQ_KERNEL   3
                    361: #define PMEGQ_NONE     4
                    362:
                    363: struct pmeg_state {
                    364:        TAILQ_ENTRY(pmeg_state) pmeg_link;
                    365:        int            pmeg_index;
                    366:        pmap_t         pmeg_owner;
                    367:        int            pmeg_version;
                    368:        vm_offset_t    pmeg_va;
                    369:        int            pmeg_wired;
                    370:        int            pmeg_reserved;
                    371:        int            pmeg_vpages;
                    372:        int            pmeg_qstate;
                    373: };
                    374:
                    375: typedef struct pmeg_state *pmeg_t;
                    376:
                    377: #define PMEG_INVAL (NPMEG-1)
                    378: #define PMEG_NULL (pmeg_t) NULL
                    379:
                    380: /* XXX - Replace pmeg_kernel_queue with pmeg_wired_queue ? */
1.79      gwr       381: TAILQ_HEAD(pmeg_tailq, pmeg_state)
                    382:        pmeg_free_queue, pmeg_inactive_queue,
1.38      gwr       383:        pmeg_active_queue, pmeg_kernel_queue;
1.26      gwr       384:
                    385: static struct pmeg_state pmeg_array[NPMEG];
1.20      glass     386:
1.1       glass     387:
1.38      gwr       388: /*
                    389:  * prototypes
                    390:  */
                    391: static int get_pte_pmeg __P((int, int));
                    392: static void set_pte_pmeg __P((int, int, int));
                    393:
                    394: static void context_allocate __P((pmap_t pmap));
                    395: static void context_free __P((pmap_t pmap));
                    396: static void context_init __P((void));
                    397:
1.76      gwr       398: static void pmeg_init __P((void));
                    399: static void pmeg_reserve __P((int pmeg_num));
1.75      gwr       400:
1.38      gwr       401: static pmeg_t pmeg_allocate __P((pmap_t pmap, vm_offset_t va));
1.75      gwr       402: static void pmeg_mon_init __P((vm_offset_t sva, vm_offset_t eva, int keep));
1.38      gwr       403: static void pmeg_release __P((pmeg_t pmegp));
1.76      gwr       404: static void pmeg_free __P((pmeg_t pmegp));
1.38      gwr       405: static pmeg_t pmeg_cache __P((pmap_t pmap, vm_offset_t va));
                    406: static void pmeg_set_wiring __P((pmeg_t pmegp, vm_offset_t va, int));
                    407:
1.92      gwr       408: static int  pv_link   __P((pmap_t pmap, int pte, vm_offset_t va));
                    409: static void pv_unlink __P((pmap_t pmap, int pte, vm_offset_t va));
1.76      gwr       410: static void pv_remove_all __P((vm_offset_t pa));
1.84      gwr       411: static void pv_changepte __P((vm_offset_t pa, int, int));
                    412: static u_int pv_syncflags __P((pv_entry_t));
1.38      gwr       413: static void pv_init __P((void));
1.2       glass     414:
1.65      gwr       415: static void pmeg_clean __P((pmeg_t pmegp));
                    416: static void pmeg_clean_free __P((void));
                    417:
1.38      gwr       418: static void pmap_common_init __P((pmap_t pmap));
1.82      gwr       419: static void pmap_kernel_init __P((pmap_t pmap));
1.76      gwr       420: static void pmap_user_init __P((pmap_t pmap));
1.101     gwr       421: static void pmap_page_upload __P((void));
1.38      gwr       422:
1.92      gwr       423: static void pmap_enter_kernel __P((vm_offset_t va,
                    424:        int new_pte, boolean_t wired));
                    425: static void pmap_enter_user __P((pmap_t pmap, vm_offset_t va,
                    426:        int new_pte, boolean_t wired));
1.38      gwr       427:
1.76      gwr       428: static void pmap_protect1 __P((pmap_t, vm_offset_t, vm_offset_t));
                    429: static void pmap_protect_mmu __P((pmap_t, vm_offset_t, vm_offset_t));
                    430: static void pmap_protect_noctx __P((pmap_t, vm_offset_t, vm_offset_t));
                    431:
                    432: static void pmap_remove1 __P((pmap_t pmap, vm_offset_t, vm_offset_t));
                    433: static void pmap_remove_mmu __P((pmap_t, vm_offset_t, vm_offset_t));
                    434: static void pmap_remove_noctx __P((pmap_t, vm_offset_t, vm_offset_t));
1.1       glass     435:
1.66      gwr       436: static int  pmap_fault_reload __P((struct pmap *, vm_offset_t, int));
                    437:
1.99      gwr       438: /* Called only from locore.s and pmap.c */
                    439: void   _pmap_switch __P((pmap_t pmap));
                    440:
1.79      gwr       441: #ifdef PMAP_DEBUG
                    442: void pmap_print __P((pmap_t pmap));
                    443: void pv_print __P((vm_offset_t pa));
                    444: void pmeg_print __P((pmeg_t pmegp));
                    445: static void pmeg_verify_empty __P((vm_offset_t va));
                    446: #endif /* PMAP_DEBUG */
1.102     thorpej   447: void pmap_pinit __P((pmap_t));
                    448: void pmap_release __P((pmap_t));
1.79      gwr       449:
                    450: /*
                    451:  * Various in-line helper functions.
                    452:  */
                    453:
1.83      gwr       454: static inline pmap_t
                    455: current_pmap __P((void))
                    456: {
                    457:        struct proc *p;
                    458:        struct vmspace *vm;
                    459:        vm_map_t        map;
                    460:        pmap_t  pmap;
                    461:
                    462:        p = curproc;    /* XXX */
                    463:        if (p == NULL)
                    464:                pmap = kernel_pmap;
                    465:        else {
                    466:                vm = p->p_vmspace;
                    467:                map = &vm->vm_map;
                    468:                pmap = vm_map_pmap(map);
                    469:        }
                    470:
                    471:        return (pmap);
                    472: }
                    473:
1.84      gwr       474: static inline struct pv_entry **
                    475: pa_to_pvhead(vm_offset_t pa)
                    476: {
                    477:        int idx;
                    478:
                    479:        idx = PA_PGNUM(pa);
1.79      gwr       480: #ifdef DIAGNOSTIC
1.88      gwr       481:        if (PA_IS_DEV(pa) || (idx >= physmem))
1.84      gwr       482:                panic("pmap:pa_to_pvhead: bad pa=0x%lx", pa);
                    483: #endif
                    484:        return (&pv_head_tbl[idx]);
                    485: }
                    486:
                    487: static inline u_char *
                    488: pa_to_pvflags(vm_offset_t pa)
1.79      gwr       489: {
1.84      gwr       490:        int idx;
                    491:
                    492:        idx = PA_PGNUM(pa);
                    493: #ifdef DIAGNOSTIC
1.88      gwr       494:        if (PA_IS_DEV(pa) || (idx >= physmem))
1.87      gwr       495:                panic("pmap:pa_to_pvflags: bad pa=0x%lx", pa);
1.84      gwr       496: #endif
                    497:        return (&pv_flags_tbl[idx]);
1.79      gwr       498: }
                    499:
1.84      gwr       500: static inline pmeg_t
1.79      gwr       501: pmeg_p(int sme)
                    502: {
1.84      gwr       503: #ifdef DIAGNOSTIC
1.79      gwr       504:        if (sme < 0 || sme >= SEGINV)
                    505:                panic("pmeg_p: bad sme");
1.84      gwr       506: #endif
1.79      gwr       507:        return &pmeg_array[sme];
                    508: }
                    509:
                    510: #define is_pmeg_wired(pmegp) (pmegp->pmeg_wired != 0)
                    511:
                    512: static void
                    513: pmeg_set_wiring(pmegp, va, flag)
                    514:        pmeg_t pmegp;
                    515:        vm_offset_t va;
                    516:        int flag;
                    517: {
                    518:        int idx, mask;
                    519:
                    520:        idx = VA_PTE_NUM(va);
                    521:        mask = 1 << idx;
                    522:
                    523:        if (flag)
                    524:                pmegp->pmeg_wired |= mask;
                    525:        else
                    526:                pmegp->pmeg_wired &= ~mask;
                    527: }
                    528:
                    529: /*
                    530:  * Save the MOD bit from the given PTE using its PA
                    531:  */
                    532: static void
                    533: save_modref_bits(int pte)
                    534: {
1.84      gwr       535:        u_char *pv_flags;
1.79      gwr       536:
                    537:        if (pv_initialized == 0)
                    538:                return;
                    539:
                    540:        /* Only main memory is ever in the pv_lists */
                    541:        if (!IS_MAIN_MEM(pte))
                    542:                return;
                    543:
1.80      gwr       544:        CHECK_SPL();
                    545:
1.84      gwr       546:        pv_flags = pa_to_pvflags(PG_PA(pte));
                    547:        *pv_flags |= ((pte & PG_MODREF) >> PV_SHIFT);
1.79      gwr       548: }
                    549:
1.2       glass     550:
1.78      gwr       551: /****************************************************************
                    552:  * Context management functions.
1.26      gwr       553:  */
1.39      gwr       554:
1.80      gwr       555: /* part of pmap_bootstrap */
1.78      gwr       556: static void
                    557: context_init()
                    558: {
                    559:        int i;
                    560:
                    561:        TAILQ_INIT(&context_free_queue);
                    562:        TAILQ_INIT(&context_active_queue);
1.26      gwr       563:
1.83      gwr       564:        /* Leave EMPTY_CONTEXT out of the free list. */
                    565:        context_array[0].context_upmap = kernel_pmap;
                    566:
                    567:        for (i = 1; i < NCONTEXT; i++) {
1.78      gwr       568:                context_array[i].context_num = i;
                    569:                context_array[i].context_upmap = NULL;
                    570:                TAILQ_INSERT_TAIL(&context_free_queue, &context_array[i],
                    571:                                                  context_link);
1.76      gwr       572: #ifdef PMAP_DEBUG
1.78      gwr       573:                if (pmap_debug & PMD_CONTEXT)
1.81      gwr       574:                        printf("context_init: sizeof(context_array[0])=%d\n",
                    575:                                   sizeof(context_array[0]));
1.78      gwr       576: #endif
                    577:        }
                    578: }
1.26      gwr       579:
1.80      gwr       580: /* Get us a context (steal one if necessary). */
1.38      gwr       581: static void
                    582: context_allocate(pmap)
                    583:        pmap_t pmap;
                    584: {
                    585:        context_t context;
                    586:
1.80      gwr       587:        CHECK_SPL();
1.77      gwr       588:
1.80      gwr       589: #ifdef DIAGNOSTIC
1.50      gwr       590:        if (pmap == kernel_pmap)
                    591:                panic("context_allocate: kernel_pmap");
1.38      gwr       592:        if (has_context(pmap))
                    593:                panic("pmap: pmap already has context allocated to it");
1.80      gwr       594: #endif
                    595:
                    596:        context = TAILQ_FIRST(&context_free_queue);
1.65      gwr       597:        if (context == NULL) {
                    598:                /* Steal the head of the active queue. */
1.80      gwr       599:                context = TAILQ_FIRST(&context_active_queue);
1.65      gwr       600:                if (context == NULL)
                    601:                        panic("pmap: no contexts left?");
1.38      gwr       602: #ifdef PMAP_DEBUG
                    603:                if (pmap_debug & PMD_CONTEXT)
1.80      gwr       604:                        printf("context_allocate: steal ctx %d from pmap %p\n",
                    605:                                   context->context_num, context->context_upmap);
1.38      gwr       606: #endif
1.80      gwr       607:                context_free(context->context_upmap);
                    608:                context = TAILQ_FIRST(&context_free_queue);
1.38      gwr       609:        }
1.80      gwr       610:        TAILQ_REMOVE(&context_free_queue, context, context_link);
                    611:
1.38      gwr       612:        if (context->context_upmap != NULL)
                    613:                panic("pmap: context in use???");
1.80      gwr       614:
                    615:        context->context_upmap = pmap;
1.38      gwr       616:        pmap->pm_ctxnum = context->context_num;
1.80      gwr       617:
                    618:        TAILQ_INSERT_TAIL(&context_active_queue, context, context_link);
1.38      gwr       619:
                    620:        /*
                    621:         * We could reload the MMU here, but that would
                    622:         * artificially move PMEGs from the inactive queue
                    623:         * to the active queue, so do lazy reloading.
                    624:         * XXX - Need to reload wired pmegs though...
1.80      gwr       625:         * XXX: Verify the context it is empty?
1.38      gwr       626:         */
1.1       glass     627: }
1.5       glass     628:
1.80      gwr       629: /*
                    630:  * Unload the context and put it on the free queue.
                    631:  */
1.38      gwr       632: static void
                    633: context_free(pmap)             /* :) */
                    634:        pmap_t pmap;
                    635: {
                    636:        int saved_ctxnum, ctxnum;
1.80      gwr       637:        int i, sme;
1.38      gwr       638:        context_t contextp;
                    639:        vm_offset_t va;
                    640:
1.80      gwr       641:        CHECK_SPL();
1.1       glass     642:
1.38      gwr       643:        ctxnum = pmap->pm_ctxnum;
1.83      gwr       644:        if (ctxnum < FIRST_CONTEXT || ctxnum >= NCONTEXT)
1.80      gwr       645:                panic("pmap: context_free ctxnum");
1.38      gwr       646:        contextp = &context_array[ctxnum];
                    647:
1.80      gwr       648:        /* Temporary context change. */
1.38      gwr       649:        saved_ctxnum = get_context();
                    650:        set_context(ctxnum);
                    651:
1.50      gwr       652:        /* Before unloading translations, flush cache. */
                    653: #ifdef HAVECACHE
                    654:        if (cache_size)
                    655:                cache_flush_context();
                    656: #endif
                    657:
1.38      gwr       658:        /* Unload MMU (but keep in SW segmap). */
1.80      gwr       659:        for (i=0, va=0; i < NUSEG; i++, va+=NBSG) {
                    660:
                    661: #if !defined(PMAP_DEBUG)
                    662:                /* Short-cut using the S/W segmap (if !debug). */
                    663:                if (pmap->pm_segmap[i] == SEGINV)
                    664:                        continue;
                    665: #endif
                    666:
                    667:                /* Check the H/W segmap. */
                    668:                sme = get_segmap(va);
                    669:                if (sme == SEGINV)
                    670:                        continue;
                    671:
                    672:                /* Found valid PMEG in the segmap. */
1.38      gwr       673: #ifdef PMAP_DEBUG
1.80      gwr       674:                if (pmap_debug & PMD_SEGMAP)
                    675:                        printf("pmap: set_segmap ctx=%d v=0x%lx old=0x%x new=ff (cf)\n",
                    676:                                   ctxnum, va, sme);
1.81      gwr       677: #endif
                    678: #ifdef DIAGNOSTIC
1.80      gwr       679:                if (sme != pmap->pm_segmap[i])
                    680:                        panic("context_free: unknown sme at va=0x%lx", va);
                    681: #endif
                    682:                /* Did cache flush above (whole context). */
                    683:                set_segmap(va, SEGINV);
                    684:                /* In this case, do not clear pm_segmap. */
1.83      gwr       685:                /* XXX: Maybe inline this call? */
1.80      gwr       686:                pmeg_release(pmeg_p(sme));
1.38      gwr       687:        }
1.80      gwr       688:
                    689:        /* Restore previous context. */
1.38      gwr       690:        set_context(saved_ctxnum);
1.80      gwr       691:
                    692:        /* Dequeue, update, requeue. */
                    693:        TAILQ_REMOVE(&context_active_queue, contextp, context_link);
1.83      gwr       694:        pmap->pm_ctxnum = EMPTY_CONTEXT;
1.38      gwr       695:        contextp->context_upmap = NULL;
1.78      gwr       696:        TAILQ_INSERT_TAIL(&context_free_queue, contextp, context_link);
1.1       glass     697: }
                    698:
1.78      gwr       699:
                    700: /****************************************************************
                    701:  * PMEG management functions.
                    702:  */
                    703:
1.38      gwr       704: static void
1.78      gwr       705: pmeg_init()
1.26      gwr       706: {
1.78      gwr       707:        int x;
                    708:
                    709:        /* clear pmeg array, put it all on the free pmeq queue */
1.38      gwr       710:
1.78      gwr       711:        TAILQ_INIT(&pmeg_free_queue);
                    712:        TAILQ_INIT(&pmeg_inactive_queue);
                    713:        TAILQ_INIT(&pmeg_active_queue);
                    714:        TAILQ_INIT(&pmeg_kernel_queue);
1.38      gwr       715:
1.78      gwr       716:        bzero(pmeg_array, NPMEG*sizeof(struct pmeg_state));
                    717:        for (x =0 ; x<NPMEG; x++) {
                    718:                TAILQ_INSERT_TAIL(&pmeg_free_queue, &pmeg_array[x],
                    719:                                  pmeg_link);
                    720:                pmeg_array[x].pmeg_qstate = PMEGQ_FREE;
                    721:                pmeg_array[x].pmeg_index = x;
1.38      gwr       722:        }
                    723:
1.78      gwr       724:        /* The last pmeg is not usable. */
                    725:        pmeg_reserve(SEGINV);
1.26      gwr       726: }
                    727:
1.38      gwr       728: /*
                    729:  * Reserve a pmeg (forever) for use by PROM, etc.
                    730:  * Contents are left as-is.  Called very early...
                    731:  */
                    732: void
1.76      gwr       733: pmeg_reserve(sme)
1.38      gwr       734:        int sme;
1.1       glass     735: {
1.38      gwr       736:        pmeg_t pmegp;
1.1       glass     737:
1.38      gwr       738:        /* Can not use pmeg_p() because it fails on SEGINV. */
                    739:        pmegp = &pmeg_array[sme];
1.26      gwr       740:
1.67      gwr       741:        if (pmegp->pmeg_reserved) {
1.76      gwr       742:                mon_printf("pmeg_reserve: already reserved\n");
1.67      gwr       743:                sunmon_abort();
                    744:        }
                    745:        if (pmegp->pmeg_owner) {
1.76      gwr       746:                mon_printf("pmeg_reserve: already owned\n");
1.67      gwr       747:                sunmon_abort();
                    748:        }
1.38      gwr       749:
1.78      gwr       750:        /* Owned by kernel, but not really usable... */
1.56      gwr       751:        pmegp->pmeg_owner = kernel_pmap;
1.38      gwr       752:        pmegp->pmeg_reserved++; /* keep count, just in case */
                    753:        TAILQ_REMOVE(&pmeg_free_queue, pmegp, pmeg_link);
                    754:        pmegp->pmeg_qstate = PMEGQ_NONE;
1.1       glass     755: }
                    756:
1.75      gwr       757: /*
                    758:  * Examine PMEGs used by the monitor, and either
                    759:  * reserve them (keep=1) or clear them (keep=0)
                    760:  */
                    761: static void
                    762: pmeg_mon_init(sva, eva, keep)
                    763:        vm_offset_t sva, eva;
                    764:        int keep;       /* true: steal, false: clear */
                    765: {
                    766:        vm_offset_t pgva, endseg;
                    767:        int pte, valid;
                    768:        unsigned char sme;
                    769:
1.94      gwr       770: #ifdef PMAP_DEBUG
                    771:        if (pmap_debug & PMD_SEGMAP)
                    772:                mon_printf("pmeg_mon_init(0x%x, 0x%x, %d)\n",
                    773:                           sva, eva, keep);
                    774: #endif
                    775:
1.75      gwr       776:        sva &= ~(NBSG-1);
                    777:
                    778:        while (sva < eva) {
                    779:                sme = get_segmap(sva);
                    780:                if (sme != SEGINV) {
                    781:                        valid = 0;
                    782:                        endseg = sva + NBSG;
                    783:                        for (pgva = sva; pgva < endseg; pgva += NBPG) {
                    784:                                pte = get_pte(pgva);
                    785:                                if (pte & PG_VALID) {
                    786:                                        valid++;
                    787:                                }
                    788:                        }
1.94      gwr       789: #ifdef PMAP_DEBUG
                    790:                        if (pmap_debug & PMD_SEGMAP)
                    791:                                mon_printf(" sva=0x%x seg=0x%x valid=%d\n",
                    792:                                           sva, sme, valid);
                    793: #endif
1.75      gwr       794:                        if (keep && valid)
1.76      gwr       795:                                pmeg_reserve(sme);
1.75      gwr       796:                        else set_segmap(sva, SEGINV);
                    797:                }
                    798:                sva += NBSG;
                    799:        }
                    800: }
                    801:
1.81      gwr       802: /*
                    803:  * This is used only during pmap_bootstrap, so we can
                    804:  * get away with borrowing a slot in the segmap.
                    805:  */
1.38      gwr       806: static void
                    807: pmeg_clean(pmegp)
                    808:        pmeg_t pmegp;
1.7       glass     809: {
1.81      gwr       810:        int sme;
                    811:        vm_offset_t va;
                    812:
                    813:        sme = get_segmap(0);
                    814:        if (sme != SEGINV)
                    815:                panic("pmeg_clean");
                    816:
                    817:        sme = pmegp->pmeg_index;
                    818:        set_segmap(0, sme);
                    819:
                    820:        for (va = 0; va < NBSG; va += NBPG)
                    821:                set_pte(va, PG_INVAL);
1.38      gwr       822:
1.81      gwr       823:        set_segmap(0, SEGINV);
1.7       glass     824: }
                    825:
                    826: /*
                    827:  * This routine makes sure that pmegs on the pmeg_free_queue contain
                    828:  * no valid ptes.  It pulls things off the queue, cleans them, and
1.80      gwr       829:  * puts them at the end.  The ending condition is finding the first
                    830:  * queue element at the head of the queue again.
1.7       glass     831:  */
1.38      gwr       832: static void
                    833: pmeg_clean_free()
1.7       glass     834: {
1.38      gwr       835:        pmeg_t pmegp, pmegp_first;
1.7       glass     836:
1.80      gwr       837:        pmegp = TAILQ_FIRST(&pmeg_free_queue);
                    838:        if (pmegp == NULL)
1.38      gwr       839:                panic("pmap: no free pmegs available to clean");
1.26      gwr       840:
1.38      gwr       841:        pmegp_first = NULL;
1.26      gwr       842:
1.38      gwr       843:        for (;;) {
1.80      gwr       844:                pmegp = TAILQ_FIRST(&pmeg_free_queue);
                    845:                TAILQ_REMOVE(&pmeg_free_queue, pmegp, pmeg_link);
1.26      gwr       846:
1.38      gwr       847:                pmegp->pmeg_qstate = PMEGQ_NONE;
                    848:                pmeg_clean(pmegp);
1.80      gwr       849:                pmegp->pmeg_qstate = PMEGQ_FREE;
1.26      gwr       850:
1.38      gwr       851:                TAILQ_INSERT_TAIL(&pmeg_free_queue, pmegp, pmeg_link);
1.26      gwr       852:
1.38      gwr       853:                if (pmegp == pmegp_first)
                    854:                        break;
                    855:                if (pmegp_first == NULL)
                    856:                        pmegp_first = pmegp;
1.1       glass     857:        }
                    858: }
                    859:
1.38      gwr       860: /*
1.46      gwr       861:  * Allocate a PMEG by whatever means necessary.
                    862:  * (May invalidate some mappings!)
1.38      gwr       863:  */
                    864: static pmeg_t
                    865: pmeg_allocate(pmap, va)
                    866:        pmap_t pmap;
                    867:        vm_offset_t va;
1.1       glass     868: {
1.38      gwr       869:        pmeg_t pmegp;
                    870:
                    871:        CHECK_SPL();
1.1       glass     872:
1.39      gwr       873: #ifdef DIAGNOSTIC
                    874:        if (va & SEGOFSET) {
1.73      fair      875:                panic("pmap:pmeg_allocate: va=0x%lx", va);
1.39      gwr       876:        }
                    877: #endif
                    878:
1.38      gwr       879:        /* Get one onto the free list if necessary. */
1.80      gwr       880:        pmegp = TAILQ_FIRST(&pmeg_free_queue);
1.38      gwr       881:        if (!pmegp) {
                    882:                /* Try inactive queue... */
1.80      gwr       883:                pmegp = TAILQ_FIRST(&pmeg_inactive_queue);
1.38      gwr       884:                if (!pmegp) {
                    885:                        /* Try active queue... */
1.80      gwr       886:                        pmegp = TAILQ_FIRST(&pmeg_active_queue);
1.38      gwr       887:                }
                    888:                if (!pmegp) {
                    889:                        panic("pmeg_allocate: failed");
                    890:                }
1.40      gwr       891:                /*
                    892:                 * Remove mappings to free-up a pmeg
                    893:                 * (so it will go onto the free list).
                    894:                 * XXX - Should this call up into the VM layer
                    895:                 * to notify it when pages are deactivated?
                    896:                 * See: vm_page.c:vm_page_deactivate(vm_page_t)
1.46      gwr       897:                 * XXX - Skip this one if it is wired?
1.40      gwr       898:                 */
1.76      gwr       899:                pmap_remove1(pmegp->pmeg_owner,
                    900:                             pmegp->pmeg_va,
                    901:                             pmegp->pmeg_va + NBSG);
1.38      gwr       902:        }
                    903:
                    904:        /* OK, free list has something for us to take. */
1.80      gwr       905:        pmegp = TAILQ_FIRST(&pmeg_free_queue);
                    906: #ifdef DIAGNOSTIC
                    907:        if (pmegp == NULL)
1.38      gwr       908:                panic("pmeg_allocagte: still none free?");
1.80      gwr       909:        if ((pmegp->pmeg_qstate != PMEGQ_FREE) ||
                    910:                (pmegp->pmeg_index == SEGINV) ||
                    911:                (pmegp->pmeg_vpages))
                    912:                panic("pmeg_allocate: bad pmegp=%p", pmegp);
1.26      gwr       913: #endif
                    914: #ifdef PMAP_DEBUG
1.38      gwr       915:        if (pmegp->pmeg_index == pmap_db_watchpmeg) {
1.81      gwr       916:                db_printf("pmeg_allocate: watch pmegp=%p\n", pmegp);
1.38      gwr       917:                Debugger();
                    918:        }
1.26      gwr       919: #endif
1.80      gwr       920:
                    921:        TAILQ_REMOVE(&pmeg_free_queue, pmegp, pmeg_link);
1.38      gwr       922:
                    923:        /* Reassign this PMEG for the caller. */
                    924:        pmegp->pmeg_owner = pmap;
                    925:        pmegp->pmeg_version = pmap->pm_version;
                    926:        pmegp->pmeg_va = va;
                    927:        pmegp->pmeg_wired = 0;
                    928:        pmegp->pmeg_reserved  = 0;
                    929:        pmegp->pmeg_vpages  = 0;
1.50      gwr       930:        if (pmap == kernel_pmap) {
1.38      gwr       931:                TAILQ_INSERT_TAIL(&pmeg_kernel_queue, pmegp, pmeg_link);
                    932:                pmegp->pmeg_qstate = PMEGQ_KERNEL;
                    933:        } else {
                    934:                TAILQ_INSERT_TAIL(&pmeg_active_queue, pmegp, pmeg_link);
                    935:                pmegp->pmeg_qstate = PMEGQ_ACTIVE;
1.30      gwr       936:        }
1.38      gwr       937:        /* Caller will verify that it's empty (if debugging). */
                    938:        return pmegp;
1.1       glass     939: }
1.7       glass     940:
1.28      gwr       941: /*
1.38      gwr       942:  * Put pmeg on the inactive queue, leaving its contents intact.
                    943:  * This happens when we loose our context.  We may reclaim
                    944:  * this pmeg later if it is still in the inactive queue.
1.28      gwr       945:  */
1.38      gwr       946: static void
                    947: pmeg_release(pmegp)
                    948:        pmeg_t pmegp;
1.1       glass     949: {
1.78      gwr       950:
1.38      gwr       951:        CHECK_SPL();
1.29      gwr       952:
1.38      gwr       953: #ifdef DIAGNOSTIC
1.80      gwr       954:        if ((pmegp->pmeg_owner == kernel_pmap) ||
                    955:                (pmegp->pmeg_qstate != PMEGQ_ACTIVE))
                    956:                panic("pmeg_release: bad pmeg=%p", pmegp);
1.38      gwr       957: #endif
                    958:
1.26      gwr       959:        TAILQ_REMOVE(&pmeg_active_queue, pmegp, pmeg_link);
1.38      gwr       960:        pmegp->pmeg_qstate = PMEGQ_INACTIVE;
1.29      gwr       961:        TAILQ_INSERT_TAIL(&pmeg_inactive_queue, pmegp, pmeg_link);
1.1       glass     962: }
1.7       glass     963:
1.26      gwr       964: /*
1.38      gwr       965:  * Move the pmeg to the free queue from wherever it is.
1.50      gwr       966:  * The pmeg will be clean.  It might be in kernel_pmap.
1.26      gwr       967:  */
1.38      gwr       968: static void
1.76      gwr       969: pmeg_free(pmegp)
1.38      gwr       970:        pmeg_t pmegp;
                    971: {
1.78      gwr       972:
1.38      gwr       973:        CHECK_SPL();
                    974:
1.80      gwr       975: #ifdef DIAGNOSTIC
                    976:        /* Caller should verify that it's empty. */
1.38      gwr       977:        if (pmegp->pmeg_vpages != 0)
                    978:                panic("pmeg_free: vpages");
                    979: #endif
                    980:
                    981:        switch (pmegp->pmeg_qstate) {
                    982:        case PMEGQ_ACTIVE:
                    983:                TAILQ_REMOVE(&pmeg_active_queue, pmegp, pmeg_link);
                    984:                break;
                    985:        case PMEGQ_INACTIVE:
                    986:                TAILQ_REMOVE(&pmeg_inactive_queue, pmegp, pmeg_link);
                    987:                break;
                    988:        case PMEGQ_KERNEL:
                    989:                TAILQ_REMOVE(&pmeg_kernel_queue, pmegp, pmeg_link);
                    990:                break;
                    991:        default:
                    992:                panic("pmeg_free: releasing bad pmeg");
                    993:                break;
                    994:        }
                    995:
1.28      gwr       996: #ifdef PMAP_DEBUG
1.38      gwr       997:        if (pmegp->pmeg_index == pmap_db_watchpmeg) {
1.81      gwr       998:                db_printf("pmeg_free: watch pmeg 0x%x\n",
1.38      gwr       999:                           pmegp->pmeg_index);
                   1000:                Debugger();
                   1001:        }
1.28      gwr      1002: #endif
                   1003:
1.38      gwr      1004:        pmegp->pmeg_owner = NULL;
                   1005:        pmegp->pmeg_qstate = PMEGQ_FREE;
                   1006:        TAILQ_INSERT_TAIL(&pmeg_free_queue, pmegp, pmeg_link);
                   1007: }
                   1008:
                   1009: /*
                   1010:  * Find a PMEG that was put on the inactive queue when we
                   1011:  * had our context stolen.  If found, move to active queue.
                   1012:  */
                   1013: static pmeg_t
                   1014: pmeg_cache(pmap, va)
                   1015:        pmap_t pmap;
                   1016:        vm_offset_t va;
                   1017: {
1.39      gwr      1018:        int sme, segnum;
1.38      gwr      1019:        pmeg_t pmegp;
                   1020:
                   1021:        CHECK_SPL();
1.26      gwr      1022:
1.80      gwr      1023: #ifdef DIAGNOSTIC
1.50      gwr      1024:        if (pmap == kernel_pmap)
                   1025:                panic("pmeg_cache: kernel_pmap");
1.39      gwr      1026:        if (va & SEGOFSET) {
1.73      fair     1027:                panic("pmap:pmeg_cache: va=0x%lx", va);
1.39      gwr      1028:        }
                   1029: #endif
                   1030:
1.38      gwr      1031:        if (pmap->pm_segmap == NULL)
                   1032:                return PMEG_NULL;
1.80      gwr      1033:
1.38      gwr      1034:        segnum = VA_SEGNUM(va);
                   1035:        if (segnum > NUSEG)             /* out of range */
                   1036:                return PMEG_NULL;
1.80      gwr      1037:
1.39      gwr      1038:        sme = pmap->pm_segmap[segnum];
                   1039:        if (sme == SEGINV)      /* nothing cached */
1.38      gwr      1040:                return PMEG_NULL;
                   1041:
1.39      gwr      1042:        pmegp = pmeg_p(sme);
1.38      gwr      1043:
1.30      gwr      1044: #ifdef PMAP_DEBUG
1.38      gwr      1045:        if (pmegp->pmeg_index == pmap_db_watchpmeg) {
1.81      gwr      1046:                db_printf("pmeg_cache: watch pmeg 0x%x\n", pmegp->pmeg_index);
1.38      gwr      1047:                Debugger();
1.30      gwr      1048:        }
                   1049: #endif
1.38      gwr      1050:
                   1051:        /*
                   1052:         * Our segmap named a PMEG.  If it is no longer ours,
                   1053:         * invalidate that entry in our segmap and return NULL.
                   1054:         */
                   1055:        if ((pmegp->pmeg_owner != pmap) ||
                   1056:                (pmegp->pmeg_version != pmap->pm_version) ||
                   1057:                (pmegp->pmeg_va != va))
                   1058:        {
1.30      gwr      1059: #ifdef PMAP_DEBUG
1.81      gwr      1060:                db_printf("pmap:pmeg_cache: invalid pmeg: sme=0x%x\n", sme);
1.39      gwr      1061:                pmeg_print(pmegp);
1.78      gwr      1062:                Debugger();
1.38      gwr      1063: #endif
                   1064:                pmap->pm_segmap[segnum] = SEGINV;
                   1065:                return PMEG_NULL; /* cache lookup failed */
1.30      gwr      1066:        }
1.38      gwr      1067:
1.80      gwr      1068: #ifdef DIAGNOSTIC
1.38      gwr      1069:        /* Make sure it is on the inactive queue. */
                   1070:        if (pmegp->pmeg_qstate != PMEGQ_INACTIVE)
1.80      gwr      1071:                panic("pmeg_cache: pmeg was taken: %p", pmegp);
1.30      gwr      1072: #endif
1.26      gwr      1073:
1.38      gwr      1074:        TAILQ_REMOVE(&pmeg_inactive_queue, pmegp, pmeg_link);
                   1075:        pmegp->pmeg_qstate = PMEGQ_ACTIVE;
                   1076:        TAILQ_INSERT_TAIL(&pmeg_active_queue, pmegp, pmeg_link);
1.30      gwr      1077:
1.38      gwr      1078:        return pmegp;
                   1079: }
1.26      gwr      1080:
1.78      gwr      1081: #ifdef PMAP_DEBUG
1.75      gwr      1082: static void
1.78      gwr      1083: pmeg_verify_empty(va)
                   1084:        vm_offset_t va;
1.38      gwr      1085: {
1.78      gwr      1086:        vm_offset_t eva;
                   1087:        int pte;
1.29      gwr      1088:
1.78      gwr      1089:        for (eva = va + NBSG;  va < eva; va += NBPG) {
                   1090:                pte = get_pte(va);
                   1091:                if (pte & PG_VALID)
                   1092:                        panic("pmeg_verify_empty");
                   1093:        }
                   1094: }
                   1095: #endif /* PMAP_DEBUG */
1.1       glass    1096:
1.26      gwr      1097:
1.78      gwr      1098: /****************************************************************
                   1099:  * Physical-to-virutal lookup support
1.84      gwr      1100:  *
                   1101:  * Need memory for the pv_alloc/pv_free list heads
                   1102:  * and elements.  We know how many to allocate since
                   1103:  * there is one list head for each physical page, and
                   1104:  * at most one element for each PMEG slot.
1.78      gwr      1105:  */
                   1106: static void
                   1107: pv_init()
1.38      gwr      1108: {
1.84      gwr      1109:        int npp, nvp, sz;
                   1110:        pv_entry_t pv;
                   1111:        char *p;
                   1112:
                   1113:        /* total allocation size */
                   1114:        sz = 0;
                   1115:
                   1116:        /*
                   1117:         * Data for each physical page.
                   1118:         * Each "mod/ref" flag is a char.
                   1119:         * Each PV head is a pointer.
                   1120:         * Note physmem is in pages.
                   1121:         */
                   1122:        npp = ALIGN(physmem);
                   1123:        sz += (npp * sizeof(*pv_flags_tbl));
                   1124:        sz += (npp * sizeof(*pv_head_tbl));
                   1125:
                   1126:        /*
                   1127:         * Data for each virtual page (all PMEGs).
                   1128:         * One pv_entry for each page frame.
                   1129:         */
                   1130:        nvp = NPMEG * NPAGSEG;
                   1131:        sz += (nvp * sizeof(*pv_free_list));
1.38      gwr      1132:
1.84      gwr      1133:        /* Now allocate the whole thing. */
                   1134:        sz = m68k_round_page(sz);
1.110     mrg      1135:        p = (char*) uvm_km_alloc(kernel_map, sz);
1.84      gwr      1136:        if (p == NULL)
                   1137:                panic("pmap:pv_init: alloc failed");
                   1138:        bzero(p, sz);
1.29      gwr      1139:
1.84      gwr      1140:        /* Now divide up the space. */
                   1141:        pv_flags_tbl = (void *) p;
                   1142:        p += (npp * sizeof(*pv_flags_tbl));
                   1143:        pv_head_tbl = (void*) p;
                   1144:        p += (npp * sizeof(*pv_head_tbl));
                   1145:        pv_free_list = (void *) p;
                   1146:        p += (nvp * sizeof(*pv_free_list));
                   1147:
                   1148:        /* Finally, make pv_free_list into a list. */
                   1149:        for (pv = pv_free_list; (char*)pv < p; pv++)
                   1150:                pv->pv_next = &pv[1];
                   1151:        pv[-1].pv_next = 0;
1.78      gwr      1152:
                   1153:        pv_initialized++;
1.1       glass    1154: }
                   1155:
1.38      gwr      1156: /*
                   1157:  * Set or clear bits in all PTEs mapping a page.
                   1158:  * Also does syncflags work while we are there...
                   1159:  */
                   1160: static void
1.84      gwr      1161: pv_changepte(pa, set_bits, clear_bits)
                   1162:        vm_offset_t pa;
1.38      gwr      1163:        int set_bits;
                   1164:        int clear_bits;
                   1165: {
1.84      gwr      1166:        pv_entry_t *head, pv;
                   1167:        u_char *pv_flags;
1.38      gwr      1168:        pmap_t pmap;
                   1169:        vm_offset_t va;
1.80      gwr      1170:        int pte, sme;
1.38      gwr      1171:        int saved_ctx;
                   1172:        boolean_t in_ctx;
1.80      gwr      1173:        u_int flags;
                   1174:
                   1175:        CHECK_SPL();
1.38      gwr      1176:
                   1177:        if (!pv_initialized)
                   1178:                return;
1.80      gwr      1179:        if ((set_bits == 0) && (clear_bits == 0))
                   1180:                return;
                   1181:
1.84      gwr      1182:        pv_flags = pa_to_pvflags(pa);
                   1183:        head     = pa_to_pvhead(pa);
                   1184:
1.80      gwr      1185:        /* If no mappings, no work to do. */
1.84      gwr      1186:        if (*head == NULL)
1.38      gwr      1187:                return;
1.80      gwr      1188:
1.50      gwr      1189: #ifdef DIAGNOSTIC
                   1190:        /* This function should only clear these bits: */
                   1191:        if (clear_bits & ~(PG_WRITE | PG_NC | PG_REF | PG_MOD))
1.73      fair     1192:                panic("pv_changepte: clear=0x%x\n", clear_bits);
1.50      gwr      1193: #endif
1.38      gwr      1194:
1.80      gwr      1195:        flags = 0;
1.38      gwr      1196:        saved_ctx = get_context();
1.84      gwr      1197:        for (pv = *head; pv != NULL; pv = pv->pv_next) {
1.38      gwr      1198:                pmap = pv->pv_pmap;
                   1199:                va = pv->pv_va;
1.65      gwr      1200:
1.38      gwr      1201: #ifdef DIAGNOSTIC
                   1202:                if (pmap == NULL)
                   1203:                        panic("pv_changepte: null pmap");
1.84      gwr      1204:                if (pmap->pm_segmap == NULL)
                   1205:                        panic("pv_changepte: null segmap");
1.38      gwr      1206: #endif
                   1207:
1.107     gwr      1208:                /* XXX don't write protect pager mappings */
                   1209:                if (clear_bits & PG_WRITE) {
                   1210:                        if (va >= PAGER_SVA && va < PAGER_EVA) {
                   1211: #ifdef PMAP_DEBUG
                   1212:                                /* XXX - Does this actually happen? */
                   1213:                                printf("pv_changepte: in pager!\n");
                   1214:                                Debugger();
                   1215: #endif
                   1216:                                continue;
                   1217:                        }
                   1218:                }
                   1219:
1.38      gwr      1220:                /* Is the PTE currently accessable in some context? */
                   1221:                in_ctx = FALSE;
1.107     gwr      1222:                sme = SEGINV;   /* kill warning */
1.50      gwr      1223:                if (pmap == kernel_pmap)
1.38      gwr      1224:                        in_ctx = TRUE;
                   1225:                else if (has_context(pmap)) {
                   1226:                        /* PMEG may be inactive. */
                   1227:                        set_context(pmap->pm_ctxnum);
                   1228:                        sme = get_segmap(va);
                   1229:                        if (sme != SEGINV)
                   1230:                                in_ctx = TRUE;
                   1231:                }
                   1232:
                   1233:                if (in_ctx == TRUE) {
                   1234:                        /*
                   1235:                         * The PTE is in the current context.
1.52      gwr      1236:                         * Make sure PTE is up-to-date with VAC.
1.38      gwr      1237:                         */
1.52      gwr      1238: #ifdef HAVECACHE
                   1239:                        if (cache_size)
                   1240:                                cache_flush_page(va);
                   1241: #endif
1.38      gwr      1242:                        pte = get_pte(va);
                   1243:                } else {
                   1244:                        /*
                   1245:                         * The PTE is not in any context.
                   1246:                         */
                   1247:                        sme = pmap->pm_segmap[VA_SEGNUM(va)];
                   1248:                        if (sme == SEGINV)
                   1249:                                panic("pv_changepte: SEGINV");
                   1250:                        pte = get_pte_pmeg(sme, VA_PTE_NUM(va));
                   1251:                }
1.1       glass    1252:
1.38      gwr      1253: #ifdef DIAGNOSTIC
1.92      gwr      1254:                /* PV entries point only to valid mappings. */
1.38      gwr      1255:                if ((pte & PG_VALID) == 0)
1.73      fair     1256:                        panic("pv_changepte: not PG_VALID at va=0x%lx\n", va);
1.38      gwr      1257: #endif
                   1258:                /* Get these while it's easy. */
                   1259:                if (pte & PG_MODREF) {
1.80      gwr      1260:                        flags |= (pte & PG_MODREF);
1.38      gwr      1261:                        pte &= ~PG_MODREF;
                   1262:                }
                   1263:
                   1264:                /* Finally, set and clear some bits. */
                   1265:                pte |= set_bits;
                   1266:                pte &= ~clear_bits;
                   1267:
                   1268:                if (in_ctx == TRUE) {
1.52      gwr      1269:                        /* Did cache flush above. */
1.38      gwr      1270:                        set_pte(va, pte);
                   1271:                } else {
                   1272:                        set_pte_pmeg(sme, VA_PTE_NUM(va), pte);
                   1273:                }
                   1274:        }
1.80      gwr      1275:        set_context(saved_ctx);
1.1       glass    1276:
1.84      gwr      1277:        *pv_flags |= (flags >> PV_SHIFT);
1.38      gwr      1278: }
1.1       glass    1279:
1.38      gwr      1280: /*
1.84      gwr      1281:  * Return ref and mod bits from pvlist,
                   1282:  * and turns off same in hardware PTEs.
1.38      gwr      1283:  */
1.84      gwr      1284: static u_int
                   1285: pv_syncflags(pv)
                   1286:        pv_entry_t pv;
1.38      gwr      1287: {
                   1288:        pmap_t pmap;
                   1289:        vm_offset_t va;
1.80      gwr      1290:        int pte, sme;
1.38      gwr      1291:        int saved_ctx;
                   1292:        boolean_t in_ctx;
1.80      gwr      1293:        u_int flags;
                   1294:
                   1295:        CHECK_SPL();
1.38      gwr      1296:
                   1297:        if (!pv_initialized)
1.84      gwr      1298:                return(0);
1.80      gwr      1299:
                   1300:        /* If no mappings, no work to do. */
1.84      gwr      1301:        if (pv == NULL)
                   1302:                return (0);
1.38      gwr      1303:
1.80      gwr      1304:        flags = 0;
1.38      gwr      1305:        saved_ctx = get_context();
1.84      gwr      1306:        for ( ; pv != NULL; pv = pv->pv_next) {
1.38      gwr      1307:                pmap = pv->pv_pmap;
                   1308:                va = pv->pv_va;
1.65      gwr      1309:                sme = SEGINV;   /* kill warning */
                   1310:
1.38      gwr      1311: #ifdef DIAGNOSTIC
                   1312:                /*
                   1313:                 * Only the head may have a null pmap, and
                   1314:                 * we checked for that above.
                   1315:                 */
                   1316:                if (pmap == NULL)
                   1317:                        panic("pv_syncflags: null pmap");
1.84      gwr      1318:                if (pmap->pm_segmap == NULL)
                   1319:                        panic("pv_syncflags: null segmap");
1.38      gwr      1320: #endif
                   1321:
                   1322:                /* Is the PTE currently accessable in some context? */
                   1323:                in_ctx = FALSE;
1.50      gwr      1324:                if (pmap == kernel_pmap)
1.38      gwr      1325:                        in_ctx = TRUE;
                   1326:                else if (has_context(pmap)) {
                   1327:                        /* PMEG may be inactive. */
                   1328:                        set_context(pmap->pm_ctxnum);
                   1329:                        sme = get_segmap(va);
                   1330:                        if (sme != SEGINV)
                   1331:                                in_ctx = TRUE;
                   1332:                }
                   1333:
                   1334:                if (in_ctx == TRUE) {
                   1335:                        /*
                   1336:                         * The PTE is in the current context.
1.52      gwr      1337:                         * Make sure PTE is up-to-date with VAC.
1.38      gwr      1338:                         */
1.52      gwr      1339: #ifdef HAVECACHE
                   1340:                        if (cache_size)
                   1341:                                cache_flush_page(va);
                   1342: #endif
1.38      gwr      1343:                        pte = get_pte(va);
                   1344:                } else {
                   1345:                        /*
                   1346:                         * The PTE is not in any context.
                   1347:                         * XXX - Consider syncing MODREF bits
                   1348:                         * when the PMEG looses its context?
                   1349:                         */
                   1350:                        sme = pmap->pm_segmap[VA_SEGNUM(va)];
                   1351:                        if (sme == SEGINV)
                   1352:                                panic("pv_syncflags: SEGINV");
                   1353:                        pte = get_pte_pmeg(sme, VA_PTE_NUM(va));
                   1354:                }
1.29      gwr      1355:
1.38      gwr      1356: #ifdef DIAGNOSTIC
1.92      gwr      1357:                /* PV entries point only to valid mappings. */
1.38      gwr      1358:                if ((pte & PG_VALID) == 0)
1.73      fair     1359:                        panic("pv_syncflags: not PG_VALID at va=0x%lx\n", va);
1.38      gwr      1360: #endif
                   1361:                /* OK, do what we came here for... */
                   1362:                if (pte & PG_MODREF) {
1.80      gwr      1363:                        flags |= (pte & PG_MODREF);
1.38      gwr      1364:                        pte &= ~PG_MODREF;
                   1365:                }
                   1366:
                   1367:                if (in_ctx == TRUE) {
1.52      gwr      1368:                        /* Did cache flush above. */
1.38      gwr      1369:                        set_pte(va, pte);
                   1370:                } else {
                   1371:                        set_pte_pmeg(sme, VA_PTE_NUM(va), pte);
                   1372:                }
                   1373:        }
                   1374:        set_context(saved_ctx);
1.19      glass    1375:
1.84      gwr      1376:        return (flags >> PV_SHIFT);
1.1       glass    1377: }
                   1378:
1.78      gwr      1379: /* Remove all mappings for the physical page. */
1.38      gwr      1380: static void
                   1381: pv_remove_all(pa)
                   1382:        vm_offset_t pa;
                   1383: {
1.84      gwr      1384:        pv_entry_t *head, pv;
1.38      gwr      1385:        pmap_t pmap;
                   1386:        vm_offset_t va;
1.1       glass    1387:
1.78      gwr      1388:        CHECK_SPL();
                   1389:
1.19      glass    1390: #ifdef PMAP_DEBUG
1.38      gwr      1391:        if (pmap_debug & PMD_REMOVE)
1.73      fair     1392:                printf("pv_remove_all(0x%lx)\n", pa);
1.38      gwr      1393: #endif
1.78      gwr      1394:
1.38      gwr      1395:        if (!pv_initialized)
                   1396:                return;
1.26      gwr      1397:
1.84      gwr      1398:        head = pa_to_pvhead(pa);
                   1399:        while ((pv = *head) != NULL) {
1.38      gwr      1400:                pmap = pv->pv_pmap;
                   1401:                va   = pv->pv_va;
1.76      gwr      1402:                pmap_remove1(pmap, va, va + NBPG);
1.38      gwr      1403: #ifdef PMAP_DEBUG
                   1404:                /* Make sure it went away. */
1.84      gwr      1405:                if (pv == *head) {
1.81      gwr      1406:                        db_printf("pv_remove_all: head unchanged for pa=0x%lx\n", pa);
1.38      gwr      1407:                        Debugger();
                   1408:                }
                   1409: #endif
                   1410:        }
1.1       glass    1411: }
                   1412:
1.38      gwr      1413: /*
1.42      gwr      1414:  * The pmap system is asked to lookup all mappings that point to a
1.38      gwr      1415:  * given physical memory address.  This function adds a new element
                   1416:  * to the list of mappings maintained for the given physical address.
1.42      gwr      1417:  * Returns PV_NC if the (new) pvlist says that the address cannot
1.38      gwr      1418:  * be cached.
                   1419:  */
                   1420: static int
1.92      gwr      1421: pv_link(pmap, pte, va)
1.38      gwr      1422:        pmap_t pmap;
1.92      gwr      1423:        int pte;
                   1424:        vm_offset_t va;
1.38      gwr      1425: {
1.92      gwr      1426:        vm_offset_t pa;
1.84      gwr      1427:        pv_entry_t *head, pv;
                   1428:        u_char *pv_flags;
1.92      gwr      1429:        int flags;
1.38      gwr      1430:
                   1431:        if (!pv_initialized)
                   1432:                return 0;
1.1       glass    1433:
1.80      gwr      1434:        CHECK_SPL();
                   1435:
1.92      gwr      1436:        /* Only the non-cached bit is of interest here. */
                   1437:        flags = (pte & PG_NC) ? PV_NC : 0;
                   1438:        pa = PG_PA(pte);
                   1439:
1.19      glass    1440: #ifdef PMAP_DEBUG
1.38      gwr      1441:        if ((pmap_debug & PMD_LINK) || (va == pmap_db_watchva)) {
1.92      gwr      1442:                printf("pv_link(%p, 0x%x, 0x%lx)\n", pmap, pte, va);
1.38      gwr      1443:                /* pv_print(pa); */
                   1444:        }
                   1445: #endif
1.1       glass    1446:
1.84      gwr      1447:        pv_flags = pa_to_pvflags(pa);
                   1448:        head     = pa_to_pvhead(pa);
1.2       glass    1449:
1.84      gwr      1450: #ifdef DIAGNOSTIC
                   1451:        /* See if this mapping is already in the list. */
                   1452:        for (pv = *head; pv != NULL; pv = pv->pv_next) {
1.81      gwr      1453:                if ((pv->pv_pmap == pmap) && (pv->pv_va == va))
1.73      fair     1454:                        panic("pv_link: duplicate entry for PA=0x%lx", pa);
1.38      gwr      1455:        }
1.19      glass    1456: #endif
1.26      gwr      1457:
1.38      gwr      1458:        /*
1.84      gwr      1459:         * Does this new mapping cause VAC alias problems?
1.38      gwr      1460:         */
1.84      gwr      1461:        *pv_flags |= flags;
                   1462:        if ((*pv_flags & PV_NC) == 0) {
                   1463:                for (pv = *head; pv != NULL; pv = pv->pv_next) {
1.81      gwr      1464:                        if (BADALIAS(va, pv->pv_va)) {
1.84      gwr      1465:                                *pv_flags |= PV_NC;
                   1466:                                pv_changepte(pa, PG_NC, 0);
1.60      gwr      1467:                                pmap_stats.ps_vac_uncached++;
1.38      gwr      1468:                                break;
                   1469:                        }
                   1470:                }
                   1471:        }
1.84      gwr      1472:
                   1473:        /* Allocate a PV element (pv_alloc()). */
                   1474:        pv = pv_free_list;
                   1475:        if (pv == NULL)
                   1476:                panic("pv_link: pv_alloc");
                   1477:        pv_free_list = pv->pv_next;
                   1478:        pv->pv_next = 0;
                   1479:
                   1480:        /* Insert new entry at the head. */
1.81      gwr      1481:        pv->pv_pmap = pmap;
                   1482:        pv->pv_va   = va;
1.84      gwr      1483:        pv->pv_next = *head;
                   1484:        *head = pv;
1.38      gwr      1485:
1.84      gwr      1486:        return (*pv_flags & PV_NC);
1.38      gwr      1487: }
                   1488:
                   1489: /*
                   1490:  * pv_unlink is a helper function for pmap_remove.
                   1491:  * It removes the appropriate (pmap, pa, va) entry.
                   1492:  *
                   1493:  * Once the entry is removed, if the pv_table head has the cache
                   1494:  * inhibit bit set, see if we can turn that off; if so, walk the
                   1495:  * pvlist and turn off PG_NC in each PTE.  (The pvlist is by
                   1496:  * definition nonempty, since it must have at least two elements
                   1497:  * in it to have PV_NC set, and we only remove one here.)
                   1498:  */
                   1499: static void
1.92      gwr      1500: pv_unlink(pmap, pte, va)
1.38      gwr      1501:        pmap_t pmap;
1.92      gwr      1502:        int pte;
                   1503:        vm_offset_t va;
1.38      gwr      1504: {
1.92      gwr      1505:        vm_offset_t pa;
1.84      gwr      1506:        pv_entry_t *head, *ppv, pv;
                   1507:        u_char *pv_flags;
1.38      gwr      1508:
                   1509:        if (!pv_initialized)
                   1510:                return;
                   1511:
1.80      gwr      1512:        CHECK_SPL();
                   1513:
1.92      gwr      1514:        pa = PG_PA(pte);
1.18      glass    1515: #ifdef PMAP_DEBUG
1.80      gwr      1516:        if ((pmap_debug & PMD_LINK) || (va == pmap_db_watchva)) {
1.92      gwr      1517:                printf("pv_unlink(%p, 0x%x, 0x%lx)\n", pmap, pte, va);
1.80      gwr      1518:                /* pv_print(pa); */
1.26      gwr      1519:        }
1.18      glass    1520: #endif
1.81      gwr      1521:
1.84      gwr      1522:        pv_flags = pa_to_pvflags(pa);
                   1523:        head     = pa_to_pvhead(pa);
1.38      gwr      1524:
1.84      gwr      1525:        /*
                   1526:         * Find the entry.
                   1527:         */
                   1528:        ppv = head;
                   1529:        pv = *ppv;
                   1530:        while (pv) {
                   1531:                if ((pv->pv_pmap == pmap) && (pv->pv_va == va))
                   1532:                        goto found;
                   1533:                ppv = &pv->pv_next;
                   1534:                pv  =  pv->pv_next;
                   1535:        }
                   1536: #ifdef PMAP_DEBUG
                   1537:        db_printf("pv_unlink: not found (pa=0x%lx,va=0x%lx)\n", pa, va);
                   1538:        Debugger();
1.38      gwr      1539: #endif
1.84      gwr      1540:        return;
                   1541:
                   1542: found:
                   1543:        /* Unlink this entry from the list and clear it. */
                   1544:        *ppv = pv->pv_next;
                   1545:        pv->pv_pmap = NULL;
                   1546:        pv->pv_va   = 0;
                   1547:
                   1548:        /* Insert it on the head of the free list. (pv_free()) */
                   1549:        pv->pv_next = pv_free_list;
                   1550:        pv_free_list = pv;
                   1551:        pv = NULL;
                   1552:
                   1553:        /* Do any non-cached mappings remain? */
                   1554:        if ((*pv_flags & PV_NC) == 0)
                   1555:                return;
                   1556:        if ((pv = *head) == NULL)
                   1557:                return;
1.1       glass    1558:
                   1559:        /*
1.84      gwr      1560:         * Have non-cached mappings.  See if we can fix that now.
1.1       glass    1561:         */
1.84      gwr      1562:        va = pv->pv_va;
                   1563:        for (pv = pv->pv_next; pv != NULL; pv = pv->pv_next) {
                   1564:                /* If there is a DVMA mapping, leave it NC. */
1.98      gwr      1565:                if (va >= DVMA_MAP_BASE)
1.84      gwr      1566:                        return;
                   1567:                /* If there are VAC alias problems, leave NC. */
                   1568:                if (BADALIAS(va, pv->pv_va))
                   1569:                        return;
1.1       glass    1570:        }
1.84      gwr      1571:        /* OK, there are no "problem" mappings. */
                   1572:        *pv_flags &= ~PV_NC;
                   1573:        pv_changepte(pa, 0, PG_NC);
                   1574:        pmap_stats.ps_vac_recached++;
1.1       glass    1575: }
                   1576:
1.38      gwr      1577:
1.78      gwr      1578: /****************************************************************
                   1579:  * Bootstrap and Initialization, etc.
                   1580:  */
1.38      gwr      1581:
                   1582: void
                   1583: pmap_common_init(pmap)
                   1584:        pmap_t pmap;
                   1585: {
                   1586:        bzero(pmap, sizeof(struct pmap));
                   1587:        pmap->pm_refcount=1;
                   1588:        pmap->pm_version = pmap_version++;
1.83      gwr      1589:        pmap->pm_ctxnum = EMPTY_CONTEXT;
1.38      gwr      1590:        simple_lock_init(&pmap->pm_lock);
                   1591: }
                   1592:
                   1593: /*
                   1594:  * Prepare the kernel for VM operations.
1.99      gwr      1595:  * This is called by locore2.c:_vm_init()
1.38      gwr      1596:  * after the "start/end" globals are set.
1.75      gwr      1597:  * This function must NOT leave context zero.
1.38      gwr      1598:  */
                   1599: void
1.75      gwr      1600: pmap_bootstrap(nextva)
                   1601:        vm_offset_t nextva;
1.38      gwr      1602: {
1.99      gwr      1603:        struct sunromvec *rvec;
1.75      gwr      1604:        vm_offset_t va, eva;
                   1605:        int i, pte, sme;
1.78      gwr      1606:        extern char etext[];
1.75      gwr      1607:
1.76      gwr      1608:        nextva = m68k_round_page(nextva);
1.75      gwr      1609:        rvec = romVectorPtr;
                   1610:
1.76      gwr      1611:        /* Steal some special-purpose, already mapped pages? */
                   1612:
1.75      gwr      1613:        /*
                   1614:         * Determine the range of kernel virtual space available.
1.76      gwr      1615:         * It is segment-aligned to simplify PMEG management.
1.75      gwr      1616:         */
1.76      gwr      1617:        virtual_avail = m68k_round_seg(nextva);
1.75      gwr      1618:        virtual_end = VM_MAX_KERNEL_ADDRESS;
                   1619:
                   1620:        /*
                   1621:         * Determine the range of physical memory available.
                   1622:         * Physical memory at zero was remapped to KERNBASE.
                   1623:         */
1.76      gwr      1624:        avail_start = nextva - KERNBASE;
1.75      gwr      1625:        if (rvec->romvecVersion < 1) {
                   1626:                mon_printf("Warning: ancient PROM version=%d\n",
                   1627:                                   rvec->romvecVersion);
                   1628:                /* Guess that PROM version 0.X used two pages. */
                   1629:                avail_end = *rvec->memorySize - (2*NBPG);
                   1630:        } else {
                   1631:                /* PROM version 1 or later. */
                   1632:                avail_end = *rvec->memoryAvail;
                   1633:        }
                   1634:        avail_end = m68k_trunc_page(avail_end);
                   1635:
                   1636:        /*
1.76      gwr      1637:         * Report the actual amount of physical memory,
                   1638:         * even though the PROM takes a few pages.
1.75      gwr      1639:         */
1.76      gwr      1640:        physmem = (btoc(avail_end) + 0xF) & ~0xF;
1.75      gwr      1641:
                   1642:        /*
1.76      gwr      1643:         * On the Sun3/50, the video frame buffer is located at
                   1644:         * physical addres 1MB so we must step over it.
1.75      gwr      1645:         */
1.76      gwr      1646:        if (cpu_machine_id == SUN3_MACH_50) {
                   1647:                hole_start = m68k_trunc_page(OBMEM_BW50_ADDR);
                   1648:                hole_size  = m68k_round_page(OBMEM_BW2_SIZE);
1.91      gwr      1649:                if (avail_start > hole_start) {
1.76      gwr      1650:                        mon_printf("kernel too large for Sun3/50\n");
                   1651:                        sunmon_abort();
                   1652:                }
                   1653:        }
1.75      gwr      1654:
                   1655:        /*
                   1656:         * Done allocating PAGES of virtual space, so
                   1657:         * clean out the rest of the last used segment.
                   1658:         */
1.76      gwr      1659:        for (va = nextva; va < virtual_avail; va += NBPG)
1.75      gwr      1660:                set_pte(va, PG_INVAL);
                   1661:
                   1662:        /*
                   1663:         * Now that we are done stealing physical pages, etc.
                   1664:         * figure out which PMEGs are used by those mappings
1.76      gwr      1665:         * and either reserve them or clear them out.
                   1666:         * -- but first, init PMEG management.
                   1667:         * This puts all PMEGs in the free list.
                   1668:         * We will allocte the in-use ones.
1.75      gwr      1669:         */
1.76      gwr      1670:        pmeg_init();
                   1671:
                   1672:        /*
                   1673:         * Unmap user virtual segments.
                   1674:         * VA range: [0 .. KERNBASE]
                   1675:         */
                   1676:        for (va = 0; va < KERNBASE; va += NBSG)
                   1677:                set_segmap(va, SEGINV);
1.75      gwr      1678:
                   1679:        /*
                   1680:         * Reserve PMEGS for kernel text/data/bss
                   1681:         * and the misc pages taken above.
1.76      gwr      1682:         * VA range: [KERNBASE .. virtual_avail]
1.75      gwr      1683:         */
1.76      gwr      1684:        for ( ; va < virtual_avail; va += NBSG) {
1.75      gwr      1685:                sme = get_segmap(va);
                   1686:                if (sme == SEGINV) {
                   1687:                        mon_printf("kernel text/data/bss not mapped\n");
                   1688:                        sunmon_abort();
                   1689:                }
1.76      gwr      1690:                pmeg_reserve(sme);
1.75      gwr      1691:        }
                   1692:
                   1693:        /*
1.76      gwr      1694:         * Unmap kernel virtual space.  Make sure to leave no valid
1.75      gwr      1695:         * segmap entries in the MMU unless pmeg_array records them.
1.76      gwr      1696:         * VA range: [vseg_avail .. virtual_end]
1.75      gwr      1697:         */
1.76      gwr      1698:        for ( ; va < virtual_end; va += NBSG)
1.75      gwr      1699:                set_segmap(va, SEGINV);
                   1700:
                   1701:        /*
1.76      gwr      1702:         * Reserve PMEGs used by the PROM monitor (device mappings).
                   1703:         * Free up any pmegs in this range which have no mappings.
                   1704:         * VA range: [0x0FE00000 .. 0x0FF00000]
1.75      gwr      1705:         */
1.99      gwr      1706:        pmeg_mon_init(SUN3_MONSTART, SUN3_MONEND, TRUE);
1.75      gwr      1707:
                   1708:        /*
1.76      gwr      1709:         * Unmap any pmegs left in DVMA space by the PROM.
                   1710:         * DO NOT kill the last one! (owned by the PROM!)
                   1711:         * VA range: [0x0FF00000 .. 0x0FFE0000]
1.75      gwr      1712:         */
1.99      gwr      1713:        pmeg_mon_init(SUN3_MONEND, SUN3_MONSHORTSEG, FALSE);
1.75      gwr      1714:
                   1715:        /*
                   1716:         * MONSHORTSEG contains MONSHORTPAGE which is a data page
1.76      gwr      1717:         * allocated by the PROM monitor.  Reserve the segment,
                   1718:         * but clear out all but the last PTE inside it.
                   1719:         * Note we use this for tmp_vpages.
1.75      gwr      1720:         */
1.99      gwr      1721:        va  = SUN3_MONSHORTSEG;
                   1722:        eva = SUN3_MONSHORTPAGE;
1.76      gwr      1723:        sme = get_segmap(va);
                   1724:        pmeg_reserve(sme);
                   1725:        for ( ; va < eva; va += NBPG)
1.75      gwr      1726:                set_pte(va, PG_INVAL);
                   1727:
                   1728:        /*
1.76      gwr      1729:         * Done reserving PMEGs and/or clearing out mappings.
                   1730:         *
                   1731:         * Now verify the mapping protections and such for the
                   1732:         * important parts of the address space (in VA order).
                   1733:         * Note that the Sun PROM usually leaves the memory
                   1734:         * mapped with everything non-cached...
1.75      gwr      1735:         */
                   1736:
                   1737:        /*
1.76      gwr      1738:         * Map the message buffer page at a constant location
                   1739:         * (physical address zero) so its contents will be
                   1740:         * preserved through a reboot.
1.75      gwr      1741:         */
                   1742:        va = KERNBASE;
                   1743:        pte = get_pte(va);
1.76      gwr      1744:        pte |= (PG_SYSTEM | PG_WRITE | PG_NC);
1.75      gwr      1745:        set_pte(va, pte);
1.76      gwr      1746:        va += NBPG;
1.75      gwr      1747:        /* Initialize msgbufaddr later, in machdep.c */
                   1748:
1.76      gwr      1749:        /* Next is the tmpstack page. */
                   1750:        pte = get_pte(va);
                   1751:        pte &= ~(PG_NC);
                   1752:        pte |= (PG_SYSTEM | PG_WRITE);
                   1753:        set_pte(va, pte);
                   1754:        va += NBPG;
1.75      gwr      1755:
                   1756:        /*
1.76      gwr      1757:         * Next is the kernel text.
                   1758:         *
1.75      gwr      1759:         * Verify protection bits on kernel text/data/bss
                   1760:         * All of kernel text, data, and bss are cached.
                   1761:         * Text is read-only (except in db_write_ktext).
                   1762:         */
                   1763:        eva = m68k_trunc_page(etext);
                   1764:        while (va < eva) {
                   1765:                pte = get_pte(va);
                   1766:                if ((pte & (PG_VALID|PG_TYPE)) != PG_VALID) {
                   1767:                        mon_printf("invalid page at 0x%x\n", va);
                   1768:                }
                   1769:                pte &= ~(PG_WRITE|PG_NC);
                   1770:                /* Kernel text is read-only */
                   1771:                pte |= (PG_SYSTEM);
                   1772:                set_pte(va, pte);
                   1773:                va += NBPG;
                   1774:        }
1.76      gwr      1775:        /* data, bss, etc. */
                   1776:        while (va < nextva) {
1.75      gwr      1777:                pte = get_pte(va);
                   1778:                if ((pte & (PG_VALID|PG_TYPE)) != PG_VALID) {
                   1779:                        mon_printf("invalid page at 0x%x\n", va);
                   1780:                }
                   1781:                pte &= ~(PG_NC);
                   1782:                pte |= (PG_SYSTEM | PG_WRITE);
                   1783:                set_pte(va, pte);
                   1784:                va += NBPG;
                   1785:        }
                   1786:
                   1787:        /*
                   1788:         * Duplicate all mappings in the current context into
                   1789:         * every other context.  We have to let the PROM do the
                   1790:         * actual segmap manipulation because we can only switch
1.76      gwr      1791:         * the MMU context after we are sure that the kernel is
                   1792:         * identically mapped in all contexts.  The PROM can do
                   1793:         * the job using hardware-dependent tricks...
1.75      gwr      1794:         */
                   1795: #ifdef DIAGNOSTIC
                   1796:        /* Note: PROM setcxsegmap function needs sfc=dfs=FC_CONTROL */
                   1797:        if ((getsfc() != FC_CONTROL) || (getdfc() != FC_CONTROL)) {
                   1798:                mon_printf("pmap_bootstrap: bad dfc or sfc\n");
                   1799:                sunmon_abort();
                   1800:        }
                   1801:        /* Near the beginning of locore.s we set context zero. */
                   1802:        if (get_context() != 0) {
                   1803:                mon_printf("pmap_bootstrap: not in context zero?\n");
                   1804:                sunmon_abort();
                   1805:        }
1.76      gwr      1806: #endif /* DIAGNOSTIC */
1.75      gwr      1807:        for (va = 0; va < (vm_offset_t) (NBSG * NSEGMAP); va += NBSG) {
1.76      gwr      1808:                /* Read the segmap entry from context zero... */
                   1809:                sme = get_segmap(va);
                   1810:                /* ... then copy it into all other contexts. */
1.75      gwr      1811:                for (i = 1; i < NCONTEXT; i++) {
                   1812:                        (*rvec->setcxsegmap)(i, va, sme);
                   1813:                }
                   1814:        }
                   1815:
1.38      gwr      1816:        /*
                   1817:         * Reserve a segment for the kernel to use to access a pmeg
                   1818:         * that is not currently mapped into any context/segmap.
                   1819:         * The kernel temporarily maps such a pmeg into this segment.
1.83      gwr      1820:         *
                   1821:         * XXX: Now that context zero is reserved as kernel-only,
                   1822:         * we could borrow context zero for these temporary uses.
1.38      gwr      1823:         */
                   1824:        temp_seg_va = virtual_avail;
                   1825:        virtual_avail += NBSG;
1.94      gwr      1826: #ifdef DIAGNOSTIC
1.67      gwr      1827:        if (temp_seg_va & SEGOFSET) {
                   1828:                mon_printf("pmap_bootstrap: temp_seg_va\n");
                   1829:                sunmon_abort();
                   1830:        }
1.38      gwr      1831: #endif
                   1832:
                   1833:        /* Initialization for pmap_next_page() */
                   1834:        avail_next = avail_start;
                   1835:
1.121   ! tsutsui  1836:        uvmexp.pagesize = NBPG;
1.110     mrg      1837:        uvm_setpagesize();
1.38      gwr      1838:
                   1839:        /* after setting up some structures */
                   1840:
1.50      gwr      1841:        pmap_common_init(kernel_pmap);
1.82      gwr      1842:        pmap_kernel_init(kernel_pmap);
1.38      gwr      1843:
                   1844:        context_init();
                   1845:
                   1846:        pmeg_clean_free();
1.101     gwr      1847:
                   1848:        pmap_page_upload();
1.38      gwr      1849: }
                   1850:
1.82      gwr      1851: /*
                   1852:  * Give the kernel pmap a segmap, just so there are not
                   1853:  * so many special cases required.  Maybe faster too,
                   1854:  * because this lets pmap_remove() and pmap_protect()
                   1855:  * use a S/W copy of the segmap to avoid function calls.
                   1856:  */
                   1857: void
                   1858: pmap_kernel_init(pmap)
                   1859:         pmap_t pmap;
                   1860: {
                   1861:        vm_offset_t va;
                   1862:        int i, sme;
                   1863:
                   1864:        for (i=0, va=0; i < NSEGMAP; i++, va+=NBSG) {
                   1865:                sme = get_segmap(va);
                   1866:                kernel_segmap[i] = sme;
                   1867:        }
                   1868:        pmap->pm_segmap = kernel_segmap;
                   1869: }
                   1870:
1.81      gwr      1871:
1.78      gwr      1872: /****************************************************************
                   1873:  * PMAP interface functions.
                   1874:  */
                   1875:
1.38      gwr      1876: /*
1.97      thorpej  1877:  * Support functions for vm_page_bootstrap().
1.38      gwr      1878:  */
                   1879:
                   1880: /*
                   1881:  * How much virtual space does this kernel have?
                   1882:  * (After mapping kernel text, data, etc.)
                   1883:  */
                   1884: void
                   1885: pmap_virtual_space(v_start, v_end)
                   1886:        vm_offset_t *v_start;
                   1887:        vm_offset_t *v_end;
1.1       glass    1888: {
1.38      gwr      1889:        *v_start = virtual_avail;
                   1890:        *v_end   = virtual_end;
1.1       glass    1891: }
                   1892:
1.101     gwr      1893: /* Provide memory to the VM system. */
                   1894: static void
                   1895: pmap_page_upload()
                   1896: {
                   1897:        int a, b, c, d;
                   1898:
                   1899:        if (hole_size) {
                   1900:                /*
                   1901:                 * Supply the memory in two segments so the
                   1902:                 * reserved memory (3/50 video ram at 1MB)
                   1903:                 * can be carved from the front of the 2nd.
                   1904:                 */
                   1905:                a = atop(avail_start);
                   1906:                b = atop(hole_start);
1.105     thorpej  1907:                uvm_page_physload(a, b, a, b, VM_FREELIST_DEFAULT);
1.101     gwr      1908:                c = atop(hole_start + hole_size);
                   1909:                d = atop(avail_end);
1.105     thorpej  1910:                uvm_page_physload(b, d, c, d, VM_FREELIST_DEFAULT);
1.101     gwr      1911:        } else {
                   1912:                a = atop(avail_start);
                   1913:                d = atop(avail_end);
1.105     thorpej  1914:                uvm_page_physload(a, d, a, d, VM_FREELIST_DEFAULT);
1.101     gwr      1915:        }
                   1916: }
1.24      gwr      1917:
                   1918: /*
1.38      gwr      1919:  * pmap_page_index()
1.24      gwr      1920:  *
1.40      gwr      1921:  * Given a physical address, return a page index.
                   1922:  *
                   1923:  * There can be some values that we never return (i.e. a hole)
                   1924:  * as long as the range of indices returned by this function
                   1925:  * is smaller than the value returned by pmap_free_pages().
                   1926:  * The returned index does NOT need to start at zero.
1.70      gwr      1927:  * (This is normally a macro in pmap.h)
1.24      gwr      1928:  */
1.70      gwr      1929: #ifndef        pmap_page_index
1.59      christos 1930: int
1.38      gwr      1931: pmap_page_index(pa)
                   1932:        vm_offset_t pa;
1.24      gwr      1933: {
1.65      gwr      1934:        int idx;
1.24      gwr      1935:
1.41      gwr      1936: #ifdef DIAGNOSTIC
1.38      gwr      1937:        if (pa < avail_start || pa >= avail_end)
1.73      fair     1938:                panic("pmap_page_index: pa=0x%lx", pa);
1.70      gwr      1939: #endif /* DIAGNOSTIC */
1.24      gwr      1940:
1.70      gwr      1941:        idx = atop(pa);
1.65      gwr      1942:        return (idx);
1.24      gwr      1943: }
1.70      gwr      1944: #endif /* !pmap_page_index */
1.24      gwr      1945:
1.38      gwr      1946:
1.1       glass    1947: /*
                   1948:  *     Initialize the pmap module.
                   1949:  *     Called by vm_init, to initialize any structures that the pmap
                   1950:  *     system needs to map virtual memory.
                   1951:  */
                   1952: void
1.38      gwr      1953: pmap_init()
1.1       glass    1954: {
                   1955:
1.38      gwr      1956:        pv_init();
1.120     tsutsui  1957: 
                   1958:        /* Initialize the pmap pool. */
                   1959:        pool_init(&pmap_pmap_pool, sizeof(struct pmap), 0, 0, 0, "pmappl",
                   1960:            0, pool_page_alloc_nointr, pool_page_free_nointr, M_VMPMAP);
1.1       glass    1961: }
                   1962:
1.38      gwr      1963: /*
1.56      gwr      1964:  * Map a range of kernel virtual address space.
                   1965:  * This might be used for device mappings, or to
                   1966:  * record the mapping for kernel text/data/bss.
1.100     gwr      1967:  * Return VA following the mapped range.
1.38      gwr      1968:  */
1.1       glass    1969: vm_offset_t
1.100     gwr      1970: pmap_map(va, pa, endpa, prot)
                   1971:        vm_offset_t     va;
                   1972:        vm_offset_t     pa;
                   1973:        vm_offset_t     endpa;
1.38      gwr      1974:        int             prot;
                   1975: {
1.100     gwr      1976:        int sz;
                   1977:
                   1978:        sz = endpa - pa;
                   1979:        do {
1.116     thorpej  1980:                pmap_enter(kernel_pmap, va, pa, prot, 0);
1.100     gwr      1981:                va += NBPG;
                   1982:                pa += NBPG;
                   1983:                sz -= NBPG;
                   1984:        } while (sz > 0);
                   1985:        return(va);
1.38      gwr      1986: }
                   1987:
                   1988: void
1.76      gwr      1989: pmap_user_init(pmap)
1.38      gwr      1990:        pmap_t pmap;
                   1991: {
                   1992:        int i;
                   1993:        pmap->pm_segmap = malloc(sizeof(char)*NUSEG, M_VMPMAP, M_WAITOK);
                   1994:        for (i=0; i < NUSEG; i++) {
                   1995:                pmap->pm_segmap[i] = SEGINV;
                   1996:        }
1.1       glass    1997: }
                   1998:
                   1999: /*
                   2000:  *     Create and return a physical map.
                   2001:  *
                   2002:  *     If the size specified for the map
                   2003:  *     is zero, the map is an actual physical
                   2004:  *     map, and may be referenced by the
                   2005:  *     hardware.
                   2006:  *
                   2007:  *     If the size specified is non-zero,
                   2008:  *     the map will be used in software only, and
                   2009:  *     is bounded by that size.
                   2010:  */
                   2011: pmap_t
1.115     chs      2012: pmap_create()
1.1       glass    2013: {
1.38      gwr      2014:        pmap_t pmap;
1.2       glass    2015:
1.120     tsutsui  2016:        pmap = pool_get(&pmap_pmap_pool, PR_WAITOK);
1.96      thorpej  2017:        pmap_pinit(pmap);
1.38      gwr      2018:        return pmap;
1.1       glass    2019: }
                   2020:
                   2021: /*
                   2022:  * Release any resources held by the given physical map.
                   2023:  * Called when a pmap initialized by pmap_pinit is being released.
                   2024:  * Should only be called if the map contains no valid mappings.
                   2025:  */
                   2026: void
                   2027: pmap_release(pmap)
1.38      gwr      2028:        struct pmap *pmap;
1.1       glass    2029: {
1.80      gwr      2030:        int s;
                   2031:
                   2032:        s = splpmap();
1.26      gwr      2033:
1.50      gwr      2034:        if (pmap == kernel_pmap)
                   2035:                panic("pmap_release: kernel_pmap!");
1.29      gwr      2036:
1.80      gwr      2037:        if (has_context(pmap)) {
                   2038: #ifdef PMAP_DEBUG
                   2039:                if (pmap_debug & PMD_CONTEXT)
                   2040:                        printf("pmap_release(%p): free ctx %d\n",
                   2041:                                   pmap, pmap->pm_ctxnum);
                   2042: #endif
1.38      gwr      2043:                context_free(pmap);
1.80      gwr      2044:        }
1.38      gwr      2045:        free(pmap->pm_segmap, M_VMPMAP);
                   2046:        pmap->pm_segmap = NULL;
1.80      gwr      2047:
                   2048:        splx(s);
1.1       glass    2049: }
                   2050:
                   2051:
                   2052: /*
                   2053:  *     Retire the given physical map from service.
                   2054:  *     Should only be called if the map contains
                   2055:  *     no valid mappings.
                   2056:  */
                   2057: void
                   2058: pmap_destroy(pmap)
1.38      gwr      2059:        pmap_t pmap;
1.1       glass    2060: {
1.38      gwr      2061:        int count;
1.1       glass    2062:
1.38      gwr      2063:        if (pmap == NULL)
1.78      gwr      2064:                return; /* Duh! */
1.26      gwr      2065:
1.18      glass    2066: #ifdef PMAP_DEBUG
1.38      gwr      2067:        if (pmap_debug & PMD_CREATE)
1.65      gwr      2068:                printf("pmap_destroy(%p)\n", pmap);
1.18      glass    2069: #endif
1.50      gwr      2070:        if (pmap == kernel_pmap)
                   2071:                panic("pmap_destroy: kernel_pmap!");
1.38      gwr      2072:        pmap_lock(pmap);
                   2073:        count = pmap_del_ref(pmap);
                   2074:        pmap_unlock(pmap);
                   2075:        if (count == 0) {
                   2076:                pmap_release(pmap);
1.120     tsutsui  2077:                pool_put(&pmap_pmap_pool, pmap);
1.38      gwr      2078:        }
1.1       glass    2079: }
                   2080:
                   2081: /*
                   2082:  *     Add a reference to the specified pmap.
                   2083:  */
                   2084: void
                   2085: pmap_reference(pmap)
                   2086:        pmap_t  pmap;
                   2087: {
1.38      gwr      2088:        if (pmap != NULL) {
                   2089:                pmap_lock(pmap);
                   2090:                pmap_add_ref(pmap);
                   2091:                pmap_unlock(pmap);
                   2092:        }
1.1       glass    2093: }
1.26      gwr      2094:
1.85      gwr      2095:
1.38      gwr      2096: /*
1.85      gwr      2097:  *     Insert the given physical page (p) at
                   2098:  *     the specified virtual address (v) in the
                   2099:  *     target physical map with the protection requested.
                   2100:  *
                   2101:  *     The physical address is page aligned, but may have some
                   2102:  *     low bits set indicating an OBIO or VME bus page, or just
                   2103:  *     that the non-cache bit should be set (i.e PMAP_NC).
                   2104:  *
                   2105:  *     If specified, the page will be wired down, meaning
                   2106:  *     that the related pte can not be reclaimed.
                   2107:  *
                   2108:  *     NB:  This is the only routine which MAY NOT lazy-evaluate
                   2109:  *     or lose information.  That is, this routine must actually
                   2110:  *     insert this page into the given map NOW.
1.38      gwr      2111:  */
1.116     thorpej  2112: int
                   2113: pmap_enter(pmap, va, pa, prot, flags)
1.38      gwr      2114:        pmap_t pmap;
1.85      gwr      2115:        vm_offset_t va;
                   2116:        vm_offset_t pa;
                   2117:        vm_prot_t prot;
1.116     thorpej  2118:        int flags;
1.85      gwr      2119: {
1.92      gwr      2120:        int new_pte, s;
1.116     thorpej  2121:        boolean_t wired = (flags & PMAP_WIRED) != 0;
1.85      gwr      2122:
                   2123:        if (pmap == NULL)
1.116     thorpej  2124:                return (KERN_SUCCESS);
1.85      gwr      2125: #ifdef PMAP_DEBUG
                   2126:        if ((pmap_debug & PMD_ENTER) ||
                   2127:                (va == pmap_db_watchva))
                   2128:                printf("pmap_enter(%p, 0x%lx, 0x%lx, 0x%x, 0x%x)\n",
                   2129:                           pmap, va, pa, prot, wired);
                   2130: #endif
                   2131:
                   2132:        /* Get page-type bits from low part of the PA... */
1.92      gwr      2133:        new_pte = (pa & PMAP_SPEC) << PG_MOD_SHIFT;
1.85      gwr      2134:
                   2135:        /* ...now the valid and writable bits... */
1.92      gwr      2136:        new_pte |= PG_VALID;
1.90      gwr      2137:        if (prot & VM_PROT_WRITE)
1.92      gwr      2138:                new_pte |= PG_WRITE;
1.85      gwr      2139:
                   2140:        /* ...and finally the page-frame number. */
1.92      gwr      2141:        new_pte |= PA_PGNUM(pa);
1.85      gwr      2142:
                   2143:        /*
                   2144:         * treatment varies significantly:
                   2145:         *  kernel ptes are in all contexts, and are always in the mmu
                   2146:         *  user ptes may not necessarily? be in the mmu.  pmap may not
                   2147:         *   be in the mmu either.
                   2148:         *
                   2149:         */
                   2150:        s = splpmap();
                   2151:        if (pmap == kernel_pmap) {
1.92      gwr      2152:                new_pte |= PG_SYSTEM;
                   2153:                pmap_enter_kernel(va, new_pte, wired);
1.85      gwr      2154:        } else {
1.92      gwr      2155:                pmap_enter_user(pmap, va, new_pte, wired);
1.85      gwr      2156:        }
                   2157:        splx(s);
1.116     thorpej  2158:        return (KERN_SUCCESS);
1.85      gwr      2159: }
                   2160:
                   2161: static void
1.92      gwr      2162: pmap_enter_kernel(pgva, new_pte, wired)
1.85      gwr      2163:        vm_offset_t pgva;
1.92      gwr      2164:        int new_pte;
1.85      gwr      2165:        boolean_t wired;
1.38      gwr      2166: {
1.92      gwr      2167:        pmap_t pmap = kernel_pmap;
                   2168:        pmeg_t pmegp;
1.85      gwr      2169:        int do_pv, old_pte, sme;
                   2170:        vm_offset_t segva;
1.32      gwr      2171:
1.80      gwr      2172:        CHECK_SPL();
                   2173:
1.85      gwr      2174:        /*
                   2175:          keep in hardware only, since its mapped into all contexts anyway;
                   2176:          need to handle possibly allocating additional pmegs
                   2177:          need to make sure they cant be stolen from the kernel;
                   2178:          map any new pmegs into all contexts, make sure rest of pmeg is null;
                   2179:          deal with pv_stuff; possibly caching problems;
                   2180:          must also deal with changes too.
                   2181:          */
                   2182:
                   2183:        /*
                   2184:         * In detail:
                   2185:         *
                   2186:         * (a) lock pmap
                   2187:         * (b) Is the VA in a already mapped segment, if so
                   2188:         *       look to see if that VA address is "valid".  If it is, then
                   2189:         *       action is a change to an existing pte
                   2190:         * (c) if not mapped segment, need to allocate pmeg
                   2191:         * (d) if adding pte entry or changing physaddr of existing one,
                   2192:         *              use pv_stuff, for change, pmap_remove() possibly.
                   2193:         * (e) change/add pte
                   2194:         */
                   2195:
1.30      gwr      2196: #ifdef DIAGNOSTIC
1.98      gwr      2197:        if ((pgva < virtual_avail) || (pgva >= DVMA_MAP_END))
1.85      gwr      2198:                panic("pmap_enter_kernel: bad va=0x%lx", pgva);
                   2199:        if ((new_pte & (PG_VALID | PG_SYSTEM)) != (PG_VALID | PG_SYSTEM))
                   2200:                panic("pmap_enter_kernel: bad pte");
                   2201: #endif
                   2202:
1.98      gwr      2203:        if (pgva >= DVMA_MAP_BASE) {
1.85      gwr      2204:                /* This is DVMA space.  Always want it non-cached. */
                   2205:                new_pte |= PG_NC;
1.29      gwr      2206:        }
                   2207:
1.85      gwr      2208:        segva = m68k_trunc_seg(pgva);
                   2209:        do_pv = TRUE;
                   2210:
1.90      gwr      2211:        /* Do we have a PMEG? */
1.82      gwr      2212:        sme = get_segmap(segva);
1.90      gwr      2213:        if (sme != SEGINV) {
                   2214:                /* Found a PMEG in the segmap.  Cool. */
                   2215:                pmegp = pmeg_p(sme);
                   2216: #ifdef DIAGNOSTIC
                   2217:                /* Make sure it is the right PMEG. */
1.92      gwr      2218:                if (sme != pmap->pm_segmap[VA_SEGNUM(segva)])
1.90      gwr      2219:                        panic("pmap_enter_kernel: wrong sme at VA=0x%lx", segva);
                   2220:                /* Make sure it is ours. */
1.92      gwr      2221:                if (pmegp->pmeg_owner != pmap)
1.90      gwr      2222:                        panic("pmap_enter_kernel: MMU has bad pmeg 0x%x", sme);
                   2223: #endif
                   2224:        } else {
                   2225:                /* No PMEG in the segmap.  Have to allocate one. */
1.92      gwr      2226:                pmegp = pmeg_allocate(pmap, segva);
1.85      gwr      2227:                sme = pmegp->pmeg_index;
1.92      gwr      2228:                pmap->pm_segmap[VA_SEGNUM(segva)] = sme;
1.85      gwr      2229:                set_segmap_allctx(segva, sme);
1.90      gwr      2230: #ifdef PMAP_DEBUG
                   2231:                pmeg_verify_empty(segva);
1.85      gwr      2232:                if (pmap_debug & PMD_SEGMAP) {
                   2233:                        printf("pmap: set_segmap pmap=%p va=0x%lx sme=0x%x (ek)\n",
1.92      gwr      2234:                                   pmap, segva, sme);
1.85      gwr      2235:                }
1.29      gwr      2236: #endif
1.85      gwr      2237:                /* There are no existing mappings to deal with. */
                   2238:                old_pte = 0;
                   2239:                goto add_pte;
                   2240:        }
1.80      gwr      2241:
1.85      gwr      2242:        /*
                   2243:         * We have a PMEG.  Is the VA already mapped to somewhere?
                   2244:         *      (a) if so, is it same pa? (really a protection change)
                   2245:         *      (b) if not same pa, then we have to unlink from old pa
                   2246:         */
                   2247:        old_pte = get_pte(pgva);
                   2248:        if ((old_pte & PG_VALID) == 0)
                   2249:                goto add_pte;
                   2250:
                   2251:        /* Have valid translation.  Flush cache before changing it. */
1.38      gwr      2252: #ifdef HAVECACHE
1.50      gwr      2253:        if (cache_size) {
1.85      gwr      2254:                cache_flush_page(pgva);
                   2255:                /* Get fresh mod/ref bits from write-back. */
                   2256:                old_pte = get_pte(pgva);
1.50      gwr      2257:        }
1.38      gwr      2258: #endif
                   2259:
1.85      gwr      2260:        /* XXX - removing valid page here, way lame... -glass */
                   2261:        pmegp->pmeg_vpages--;
                   2262:
                   2263:        if (!IS_MAIN_MEM(old_pte)) {
                   2264:                /* Was not main memory, so no pv_entry for it. */
                   2265:                goto add_pte;
1.38      gwr      2266:        }
                   2267:
1.85      gwr      2268:        /* Old mapping was main memory.  Save mod/ref bits. */
                   2269:        save_modref_bits(old_pte);
1.38      gwr      2270:
1.85      gwr      2271:        /*
                   2272:         * If not changing the type or pfnum then re-use pv_entry.
                   2273:         * Note we get here only with old_pte having PGT_OBMEM.
                   2274:         */
                   2275:        if ((old_pte & (PG_TYPE|PG_FRAME)) ==
                   2276:                (new_pte & (PG_TYPE|PG_FRAME)) )
                   2277:        {
                   2278:                do_pv = FALSE;          /* re-use pv_entry */
                   2279:                new_pte |= (old_pte & PG_NC);
                   2280:                goto add_pte;
1.38      gwr      2281:        }
                   2282:
1.85      gwr      2283:        /* OK, different type or PA, have to kill old pv_entry. */
1.92      gwr      2284:        pv_unlink(pmap, old_pte, pgva);
1.29      gwr      2285:
1.85      gwr      2286:  add_pte:      /* can be destructive */
                   2287:        pmeg_set_wiring(pmegp, pgva, wired);
1.80      gwr      2288:
1.85      gwr      2289:        /* Anything but MAIN_MEM is mapped non-cached. */
                   2290:        if (!IS_MAIN_MEM(new_pte)) {
                   2291:                new_pte |= PG_NC;
                   2292:                do_pv = FALSE;
                   2293:        }
1.92      gwr      2294:        if (do_pv == TRUE) {
                   2295:                if (pv_link(pmap, new_pte, pgva) & PV_NC)
1.85      gwr      2296:                        new_pte |= PG_NC;
                   2297:        }
1.39      gwr      2298: #ifdef PMAP_DEBUG
1.85      gwr      2299:        if ((pmap_debug & PMD_SETPTE) || (pgva == pmap_db_watchva)) {
                   2300:                printf("pmap: set_pte pmap=%p va=0x%lx old=0x%x new=0x%x (ek)\n",
1.92      gwr      2301:                           pmap, pgva, old_pte, new_pte);
1.85      gwr      2302:        }
1.39      gwr      2303: #endif
1.85      gwr      2304:        /* cache flush done above */
                   2305:        set_pte(pgva, new_pte);
                   2306:        pmegp->pmeg_vpages++;
1.38      gwr      2307: }
                   2308:
1.80      gwr      2309:
1.92      gwr      2310: static void
                   2311: pmap_enter_user(pmap, pgva, new_pte, wired)
1.38      gwr      2312:        pmap_t pmap;
1.82      gwr      2313:        vm_offset_t pgva;
1.92      gwr      2314:        int new_pte;
1.38      gwr      2315:        boolean_t wired;
                   2316: {
1.80      gwr      2317:        int do_pv, old_pte, sme;
1.82      gwr      2318:        vm_offset_t segva;
1.38      gwr      2319:        pmeg_t pmegp;
                   2320:
1.80      gwr      2321:        CHECK_SPL();
                   2322:
1.85      gwr      2323: #ifdef DIAGNOSTIC
                   2324:        if (pgva >= VM_MAXUSER_ADDRESS)
                   2325:                panic("pmap_enter_user: bad va=0x%lx", pgva);
                   2326:        if ((new_pte & (PG_VALID | PG_SYSTEM)) != PG_VALID)
                   2327:                panic("pmap_enter_user: bad pte");
                   2328: #endif
                   2329: #ifdef PMAP_DEBUG
1.38      gwr      2330:        /*
1.85      gwr      2331:         * Some user pages are wired here, and a later
1.113     thorpej  2332:         * call to pmap_unwire() will unwire them.
1.85      gwr      2333:         * XXX - Need a separate list for wired user pmegs
                   2334:         * so they can not be stolen from the active list.
                   2335:         * XXX - Note: vm_fault.c assumes pmap_extract will
                   2336:         * work on wired mappings, so must preserve them...
                   2337:         * XXX: Maybe keep a list of wired PMEGs?
1.38      gwr      2338:         */
1.85      gwr      2339:        if (wired && (pmap_debug & PMD_WIRING)) {
                   2340:                db_printf("pmap_enter_user: attempt to wire user page, ignored\n");
                   2341:                Debugger();
                   2342:        }
                   2343: #endif
1.15      glass    2344:
1.85      gwr      2345:        /* Validate this assumption. */
                   2346:        if (pmap != current_pmap()) {
                   2347: #ifdef PMAP_DEBUG
1.93      gwr      2348:                /* Aparently, this never happens. */
1.85      gwr      2349:                db_printf("pmap_enter_user: not curproc\n");
                   2350:                Debugger();
1.38      gwr      2351: #endif
1.93      gwr      2352:                /* Just throw it out (fault it in later). */
1.85      gwr      2353:                /* XXX: But must remember it if wired... */
                   2354:                return;
1.1       glass    2355:        }
1.38      gwr      2356:
1.82      gwr      2357:        segva = m68k_trunc_seg(pgva);
1.38      gwr      2358:        do_pv = TRUE;
                   2359:
1.85      gwr      2360:        /*
                   2361:         * If this pmap was sharing the "empty" context,
                   2362:         * allocate a real context for its exclusive use.
                   2363:         */
                   2364:        if (!has_context(pmap)) {
                   2365:                context_allocate(pmap);
1.28      gwr      2366: #ifdef PMAP_DEBUG
1.85      gwr      2367:                if (pmap_debug & PMD_CONTEXT)
                   2368:                        printf("pmap_enter(%p) got context %d\n",
                   2369:                                   pmap, pmap->pm_ctxnum);
                   2370: #endif
                   2371:                set_context(pmap->pm_ctxnum);
                   2372:        } else {
                   2373: #ifdef PMAP_DEBUG
                   2374:                /* Make sure context is correct. */
                   2375:                if (pmap->pm_ctxnum != get_context()) {
                   2376:                        db_printf("pmap_enter_user: wrong context\n");
                   2377:                        Debugger();
                   2378:                        /* XXX: OK to proceed? */
                   2379:                        set_context(pmap->pm_ctxnum);
1.38      gwr      2380:                }
1.28      gwr      2381: #endif
1.38      gwr      2382:        }
                   2383:
1.85      gwr      2384:        /*
                   2385:         * We have a context.  Do we have a PMEG?
                   2386:         */
                   2387:        sme = get_segmap(segva);
                   2388:        if (sme != SEGINV) {
                   2389:                /* Found a PMEG in the segmap.  Cool. */
                   2390:                pmegp = pmeg_p(sme);
1.90      gwr      2391: #ifdef DIAGNOSTIC
1.85      gwr      2392:                /* Make sure it is the right PMEG. */
                   2393:                if (sme != pmap->pm_segmap[VA_SEGNUM(segva)])
                   2394:                        panic("pmap_enter_user: wrong sme at VA=0x%lx", segva);
1.90      gwr      2395:                /* Make sure it is ours. */
                   2396:                if (pmegp->pmeg_owner != pmap)
                   2397:                        panic("pmap_enter_user: MMU has bad pmeg 0x%x", sme);
1.85      gwr      2398: #endif
                   2399:        } else {
                   2400:                /* Not in the segmap.  Try the S/W cache. */
                   2401:                pmegp = pmeg_cache(pmap, segva);
                   2402:                if (pmegp) {
                   2403:                        /* Found PMEG in cache.  Just reload it. */
                   2404:                        sme = pmegp->pmeg_index;
                   2405:                        set_segmap(segva, sme);
                   2406:                } else {
                   2407:                        /* PMEG not in cache, so allocate one. */
                   2408:                        pmegp = pmeg_allocate(pmap, segva);
                   2409:                        sme = pmegp->pmeg_index;
                   2410:                        pmap->pm_segmap[VA_SEGNUM(segva)] = sme;
                   2411:                        set_segmap(segva, sme);
                   2412: #ifdef PMAP_DEBUG
                   2413:                        pmeg_verify_empty(segva);
                   2414: #endif
                   2415:                }
                   2416: #ifdef PMAP_DEBUG
                   2417:                if (pmap_debug & PMD_SEGMAP) {
                   2418:                        printf("pmap: set_segmap pmap=%p va=0x%lx sme=0x%x (eu)\n",
                   2419:                                   pmap, segva, sme);
                   2420:                }
1.30      gwr      2421: #endif
1.85      gwr      2422:        }
1.38      gwr      2423:
                   2424:        /*
1.83      gwr      2425:         * We have a PMEG.  Is the VA already mapped to somewhere?
                   2426:         *      (a) if so, is it same pa? (really a protection change)
                   2427:         *      (b) if not same pa, then we have to unlink from old pa
1.38      gwr      2428:         */
1.82      gwr      2429:        old_pte = get_pte(pgva);
1.38      gwr      2430:        if ((old_pte & PG_VALID) == 0)
                   2431:                goto add_pte;
                   2432:
1.50      gwr      2433:        /* Have valid translation.  Flush cache before changing it. */
                   2434: #ifdef HAVECACHE
1.52      gwr      2435:        if (cache_size) {
1.82      gwr      2436:                cache_flush_page(pgva);
1.52      gwr      2437:                /* Get fresh mod/ref bits from write-back. */
1.82      gwr      2438:                old_pte = get_pte(pgva);
1.52      gwr      2439:        }
1.50      gwr      2440: #endif
                   2441:
                   2442:        /* XXX - removing valid page here, way lame... -glass */
1.38      gwr      2443:        pmegp->pmeg_vpages--;
                   2444:
1.44      gwr      2445:        if (!IS_MAIN_MEM(old_pte)) {
1.38      gwr      2446:                /* Was not main memory, so no pv_entry for it. */
1.33      gwr      2447:                goto add_pte;
                   2448:        }
1.1       glass    2449:
1.38      gwr      2450:        /* Old mapping was main memory.  Save mod/ref bits. */
                   2451:        save_modref_bits(old_pte);
1.1       glass    2452:
1.38      gwr      2453:        /*
                   2454:         * If not changing the type or pfnum then re-use pv_entry.
                   2455:         * Note we get here only with old_pte having PGT_OBMEM.
                   2456:         */
                   2457:        if ((old_pte & (PG_TYPE|PG_FRAME)) ==
                   2458:                (new_pte & (PG_TYPE|PG_FRAME)) )
                   2459:        {
                   2460:                do_pv = FALSE;          /* re-use pv_entry */
                   2461:                new_pte |= (old_pte & PG_NC);
                   2462:                goto add_pte;
1.28      gwr      2463:        }
1.1       glass    2464:
1.38      gwr      2465:        /* OK, different type or PA, have to kill old pv_entry. */
1.92      gwr      2466:        pv_unlink(pmap, old_pte, pgva);
1.38      gwr      2467:
1.85      gwr      2468:  add_pte:
                   2469:        /* XXX - Wiring changes on user pmaps? */
                   2470:        /* pmeg_set_wiring(pmegp, pgva, wired); */
1.38      gwr      2471:
1.92      gwr      2472:        /* Anything but MAIN_MEM is mapped non-cached. */
1.44      gwr      2473:        if (!IS_MAIN_MEM(new_pte)) {
1.38      gwr      2474:                new_pte |= PG_NC;
                   2475:                do_pv = FALSE;
                   2476:        }
1.92      gwr      2477:        if (do_pv == TRUE) {
                   2478:                if (pv_link(pmap, new_pte, pgva) & PV_NC)
1.38      gwr      2479:                        new_pte |= PG_NC;
                   2480:        }
1.39      gwr      2481: #ifdef PMAP_DEBUG
1.82      gwr      2482:        if ((pmap_debug & PMD_SETPTE) || (pgva == pmap_db_watchva)) {
1.85      gwr      2483:                printf("pmap: set_pte pmap=%p va=0x%lx old=0x%x new=0x%x (eu)\n",
                   2484:                           pmap, pgva, old_pte, new_pte);
1.39      gwr      2485:        }
                   2486: #endif
1.50      gwr      2487:        /* cache flush done above */
1.82      gwr      2488:        set_pte(pgva, new_pte);
1.38      gwr      2489:        pmegp->pmeg_vpages++;
                   2490: }
                   2491:
1.115     chs      2492: void
                   2493: pmap_kenter_pa(va, pa, prot)
                   2494:        vaddr_t va;
                   2495:        paddr_t pa;
                   2496:        vm_prot_t prot;
                   2497: {
1.116     thorpej  2498:        pmap_enter(pmap_kernel(), va, pa, prot, PMAP_WIRED);
1.115     chs      2499: }
                   2500:
                   2501: void
                   2502: pmap_kenter_pgs(va, pgs, npgs)
                   2503:        vaddr_t va;
                   2504:        struct vm_page **pgs;
                   2505:        int npgs;
                   2506: {
                   2507:        int i;
                   2508:
                   2509:        for (i = 0; i < npgs; i++, va += PAGE_SIZE) {
                   2510:                pmap_enter(pmap_kernel(), va, VM_PAGE_TO_PHYS(pgs[i]),
1.116     thorpej  2511:                                VM_PROT_READ|VM_PROT_WRITE, PMAP_WIRED);
1.115     chs      2512:        }
                   2513: }
                   2514:
                   2515: void
                   2516: pmap_kremove(va, len)
                   2517:        vaddr_t va;
                   2518:        vsize_t len;
                   2519: {
                   2520:        for (len >>= PAGE_SHIFT; len > 0; len--, va += PAGE_SIZE) {
                   2521:                pmap_remove(pmap_kernel(), va, va + PAGE_SIZE);
                   2522:        }
                   2523: }
                   2524:
1.38      gwr      2525:
1.85      gwr      2526: /*
                   2527:  * The trap handler calls this so we can try to resolve
                   2528:  * user-level faults by reloading a PMEG.
                   2529:  * If that does not prodce a valid mapping,
                   2530:  * call vm_fault as usual.
                   2531:  *
                   2532:  * XXX: Merge this with the next function?
                   2533:  */
                   2534: int
                   2535: _pmap_fault(map, va, ftype)
                   2536:        vm_map_t map;
                   2537:        vm_offset_t va;
                   2538:        vm_prot_t ftype;
                   2539: {
                   2540:        pmap_t pmap;
                   2541:        int rv;
                   2542:
                   2543:        pmap = vm_map_pmap(map);
                   2544:        if (map == kernel_map) {
                   2545:                /* Do not allow faults below the "managed" space. */
                   2546:                if (va < virtual_avail) {
                   2547:                        /*
                   2548:                         * Most pages below virtual_avail are read-only,
                   2549:                         * so I will assume it is a protection failure.
                   2550:                         */
                   2551:                        return KERN_PROTECTION_FAILURE;
                   2552:                }
                   2553:        } else {
                   2554:                /* User map.  Try reload shortcut. */
                   2555:                if (pmap_fault_reload(pmap, va, ftype))
                   2556:                        return KERN_SUCCESS;
                   2557:        }
1.103     gwr      2558:        rv = uvm_fault(map, va, 0, ftype);
1.85      gwr      2559:
                   2560: #ifdef PMAP_DEBUG
                   2561:        if (pmap_debug & PMD_FAULT) {
                   2562:                printf("pmap_fault(%p, 0x%lx, 0x%x) -> 0x%x\n",
                   2563:                           map, va, ftype, rv);
                   2564:        }
                   2565: #endif
                   2566:
                   2567:        return (rv);
                   2568: }
                   2569:
                   2570: /*
                   2571:  * This is a shortcut used by the trap handler to
                   2572:  * reload PMEGs into a user segmap without calling
                   2573:  * the actual VM fault handler.  Returns TRUE if:
                   2574:  *     the PMEG was reloaded, and
                   2575:  *     it has a valid PTE at va.
                   2576:  * Otherwise return zero and let VM code handle it.
                   2577:  */
                   2578: int
                   2579: pmap_fault_reload(pmap, pgva, ftype)
1.38      gwr      2580:        pmap_t pmap;
1.82      gwr      2581:        vm_offset_t pgva;
1.85      gwr      2582:        vm_prot_t ftype;
1.38      gwr      2583: {
1.85      gwr      2584:        int rv, s, pte, chkpte, sme;
1.82      gwr      2585:        vm_offset_t segva;
1.38      gwr      2586:        pmeg_t pmegp;
                   2587:
1.82      gwr      2588:        if (pgva >= VM_MAXUSER_ADDRESS)
1.85      gwr      2589:                return (0);
                   2590:        if (pmap->pm_segmap == NULL) {
1.83      gwr      2591: #ifdef PMAP_DEBUG
1.85      gwr      2592:                db_printf("pmap_fault_reload: null segmap\n");
1.83      gwr      2593:                Debugger();
                   2594: #endif
1.85      gwr      2595:                return (0);
1.83      gwr      2596:        }
                   2597:
1.85      gwr      2598:        /* Short-cut using the S/W segmap. */
                   2599:        if (pmap->pm_segmap[VA_SEGNUM(pgva)] == SEGINV)
                   2600:                return (0);
                   2601:
1.82      gwr      2602:        segva = m68k_trunc_seg(pgva);
1.85      gwr      2603:        chkpte = PG_VALID;
                   2604:        if (ftype & VM_PROT_WRITE)
                   2605:                chkpte |= PG_WRITE;
                   2606:        rv = 0;
                   2607:
                   2608:        s = splpmap();
1.38      gwr      2609:
                   2610:        /*
1.85      gwr      2611:         * Given that we faulted on a user-space address, we will
                   2612:         * probably need a context.  Get a context now so we can
                   2613:         * try to resolve the fault with a segmap reload.
1.38      gwr      2614:         */
1.83      gwr      2615:        if (!has_context(pmap)) {
                   2616:                context_allocate(pmap);
                   2617: #ifdef PMAP_DEBUG
                   2618:                if (pmap_debug & PMD_CONTEXT)
1.85      gwr      2619:                        printf("pmap_fault(%p) got context %d\n",
1.83      gwr      2620:                                   pmap, pmap->pm_ctxnum);
                   2621: #endif
                   2622:                set_context(pmap->pm_ctxnum);
                   2623:        } else {
1.38      gwr      2624: #ifdef PMAP_DEBUG
1.83      gwr      2625:                /* Make sure context is correct. */
                   2626:                if (pmap->pm_ctxnum != get_context()) {
1.85      gwr      2627:                        db_printf("pmap_fault_reload: wrong context\n");
1.38      gwr      2628:                        Debugger();
1.83      gwr      2629:                        /* XXX: OK to proceed? */
                   2630:                        set_context(pmap->pm_ctxnum);
1.38      gwr      2631:                }
                   2632: #endif
                   2633:        }
                   2634:
1.82      gwr      2635:        sme = get_segmap(segva);
1.85      gwr      2636:        if (sme == SEGINV) {
                   2637:                /* See if there is something to reload. */
1.82      gwr      2638:                pmegp = pmeg_cache(pmap, segva);
1.80      gwr      2639:                if (pmegp) {
1.85      gwr      2640:                        /* Found one!  OK, reload it. */
                   2641:                        pmap_stats.ps_pmeg_faultin++;
1.80      gwr      2642:                        sme = pmegp->pmeg_index;
1.82      gwr      2643:                        set_segmap(segva, sme);
1.85      gwr      2644:                        pte = get_pte(pgva);
                   2645:                        if (pte & chkpte)
                   2646:                                rv = 1;
                   2647:                }
                   2648:        }
                   2649:
                   2650:        splx(s);
                   2651:        return (rv);
                   2652: }
                   2653:
                   2654:
                   2655: /*
                   2656:  * Clear the modify bit for the given physical page.
                   2657:  */
1.115     chs      2658: boolean_t
                   2659: pmap_clear_modify(pg)
                   2660:        struct vm_page *pg;
1.85      gwr      2661: {
1.115     chs      2662:        paddr_t pa = VM_PAGE_TO_PHYS(pg);
                   2663:        pv_entry_t *head;
1.85      gwr      2664:        u_char *pv_flags;
                   2665:        int s;
1.115     chs      2666:        boolean_t rv;
1.85      gwr      2667:
                   2668:        if (!pv_initialized)
1.115     chs      2669:                return FALSE;
1.85      gwr      2670:
1.87      gwr      2671:        /* The VM code may call this on device addresses! */
                   2672:        if (PA_IS_DEV(pa))
1.115     chs      2673:                return FALSE;
1.87      gwr      2674:
1.85      gwr      2675:        pv_flags = pa_to_pvflags(pa);
                   2676:        head     = pa_to_pvhead(pa);
                   2677:
                   2678:        s = splpmap();
                   2679:        *pv_flags |= pv_syncflags(*head);
1.115     chs      2680:        rv = *pv_flags & PV_MOD;
1.85      gwr      2681:        *pv_flags &= ~PV_MOD;
                   2682:        splx(s);
1.115     chs      2683:        return rv;
1.85      gwr      2684: }
                   2685:
                   2686: /*
                   2687:  * Tell whether the given physical page has been modified.
                   2688:  */
                   2689: int
1.115     chs      2690: pmap_is_modified(pg)
                   2691:        struct vm_page *pg;
1.85      gwr      2692: {
1.115     chs      2693:        paddr_t pa = VM_PAGE_TO_PHYS(pg);
                   2694:        pv_entry_t *head;
1.85      gwr      2695:        u_char *pv_flags;
                   2696:        int rv, s;
                   2697:
                   2698:        if (!pv_initialized)
                   2699:                return (0);
                   2700:
1.87      gwr      2701:        /* The VM code may call this on device addresses! */
                   2702:        if (PA_IS_DEV(pa))
                   2703:                return (0);
                   2704:
1.85      gwr      2705:        pv_flags = pa_to_pvflags(pa);
                   2706:        head     = pa_to_pvhead(pa);
                   2707:
                   2708:        s = splpmap();
                   2709:        if ((*pv_flags & PV_MOD) == 0)
                   2710:                *pv_flags |= pv_syncflags(*head);
                   2711:        rv = (*pv_flags & PV_MOD);
                   2712:        splx(s);
                   2713:
                   2714:        return (rv);
                   2715: }
                   2716:
                   2717: /*
                   2718:  * Clear the reference bit for the given physical page.
                   2719:  * It's OK to just remove mappings if that's easier.
                   2720:  */
1.115     chs      2721: boolean_t
                   2722: pmap_clear_reference(pg)
                   2723:        struct vm_page *pg;
1.85      gwr      2724: {
1.115     chs      2725:        paddr_t pa = VM_PAGE_TO_PHYS(pg);
                   2726:        pv_entry_t *head;
1.85      gwr      2727:        u_char *pv_flags;
                   2728:        int s;
1.115     chs      2729:        boolean_t rv;
1.85      gwr      2730:
                   2731:        if (!pv_initialized)
1.115     chs      2732:                return FALSE;
1.85      gwr      2733:
1.87      gwr      2734:        /* The VM code may call this on device addresses! */
                   2735:        if (PA_IS_DEV(pa))
1.115     chs      2736:                return FALSE;
1.87      gwr      2737:
1.85      gwr      2738:        pv_flags = pa_to_pvflags(pa);
                   2739:        head     = pa_to_pvhead(pa);
                   2740:
                   2741:        s = splpmap();
                   2742:        *pv_flags |= pv_syncflags(*head);
1.115     chs      2743:        rv = *pv_flags & PV_REF;
1.85      gwr      2744:        *pv_flags &= ~PV_REF;
                   2745:        splx(s);
1.115     chs      2746:        return rv;
1.85      gwr      2747: }
                   2748:
                   2749: /*
                   2750:  * Tell whether the given physical page has been referenced.
                   2751:  * It's OK to just return FALSE if page is not mapped.
                   2752:  */
1.115     chs      2753: boolean_t
                   2754: pmap_is_referenced(pg)
                   2755:        struct vm_page *pg;
1.85      gwr      2756: {
1.115     chs      2757:        paddr_t pa = VM_PAGE_TO_PHYS(pg);
                   2758:        pv_entry_t *head;
1.85      gwr      2759:        u_char *pv_flags;
1.115     chs      2760:        int s;
                   2761:        boolean_t rv;
1.85      gwr      2762:
                   2763:        if (!pv_initialized)
1.115     chs      2764:                return (FALSE);
1.85      gwr      2765:
1.87      gwr      2766:        /* The VM code may call this on device addresses! */
                   2767:        if (PA_IS_DEV(pa))
1.115     chs      2768:                return (FALSE);
1.87      gwr      2769:
1.85      gwr      2770:        pv_flags = pa_to_pvflags(pa);
                   2771:        head     = pa_to_pvhead(pa);
                   2772:
                   2773:        s = splpmap();
                   2774:        if ((*pv_flags & PV_REF) == 0)
                   2775:                *pv_flags |= pv_syncflags(*head);
                   2776:        rv = (*pv_flags & PV_REF);
                   2777:        splx(s);
                   2778:
                   2779:        return (rv);
                   2780: }
                   2781:
                   2782:
                   2783: /*
                   2784:  * This is called by locore.s:cpu_switch() when it is
                   2785:  * switching to a new process.  Load new translations.
1.99      gwr      2786:  * Note: done in-line by locore.s unless PMAP_DEBUG
1.85      gwr      2787:  *
                   2788:  * Note that we do NOT allocate a context here, but
                   2789:  * share the "kernel only" context until we really
                   2790:  * need our own context for user-space mappings in
                   2791:  * pmap_enter_user().
                   2792:  */
                   2793: void
1.99      gwr      2794: _pmap_switch(pmap)
1.85      gwr      2795:        pmap_t pmap;
                   2796: {
                   2797:
                   2798:        CHECK_SPL();
                   2799:        set_context(pmap->pm_ctxnum);
                   2800:        ICIA();
                   2801: }
                   2802:
1.95      thorpej  2803: /*
1.99      gwr      2804:  * Exported version of pmap_activate().  This is called from the
                   2805:  * machine-independent VM code when a process is given a new pmap.
                   2806:  * If (p == curproc) do like cpu_switch would do; otherwise just
                   2807:  * take this as notification that the process has a new pmap.
1.95      thorpej  2808:  */
                   2809: void
                   2810: pmap_activate(p)
                   2811:        struct proc *p;
                   2812: {
                   2813:        pmap_t pmap = p->p_vmspace->vm_map.pmap;
                   2814:        int s;
                   2815:
1.99      gwr      2816:        if (p == curproc) {
                   2817:                s = splpmap();
                   2818:                _pmap_switch(pmap);
                   2819:                splx(s);
                   2820:        }
1.95      thorpej  2821: }
                   2822:
                   2823: /*
                   2824:  * Deactivate the address space of the specified process.
                   2825:  * XXX The semantics of this function are not currently well-defined.
                   2826:  */
                   2827: void
                   2828: pmap_deactivate(p)
                   2829:        struct proc *p;
                   2830: {
                   2831:        /* not implemented. */
                   2832: }
1.85      gwr      2833:
                   2834: /*
1.113     thorpej  2835:  *     Routine:        pmap_unwire
                   2836:  *     Function:       Clear the wired attribute for a map/virtual-address
1.85      gwr      2837:  *                     pair.
                   2838:  *     In/out conditions:
                   2839:  *                     The mapping must already exist in the pmap.
                   2840:  */
                   2841: void
1.113     thorpej  2842: pmap_unwire(pmap, va)
1.85      gwr      2843:        pmap_t  pmap;
                   2844:        vm_offset_t     va;
                   2845: {
                   2846:        int s, sme;
                   2847:        int wiremask, ptenum;
                   2848:        pmeg_t pmegp;
                   2849:
                   2850:        if (pmap == NULL)
                   2851:                return;
                   2852: #ifdef PMAP_DEBUG
                   2853:        if (pmap_debug & PMD_WIRING)
1.113     thorpej  2854:                printf("pmap_unwire(pmap=%p, va=0x%lx)\n",
                   2855:                           pmap, va);
1.80      gwr      2856: #endif
1.85      gwr      2857:        /*
                   2858:         * We are asked to unwire pages that were wired when
                   2859:         * pmap_enter() was called and we ignored wiring.
                   2860:         * (VM code appears to wire a stack page during fork.)
                   2861:         */
                   2862:        if (pmap != kernel_pmap) {
                   2863: #ifdef PMAP_DEBUG
                   2864:                if (pmap_debug & PMD_WIRING) {
                   2865:                        db_printf("  (user pmap -- ignored)\n");
                   2866:                        Debugger();
1.38      gwr      2867:                }
                   2868: #endif
1.85      gwr      2869:                return;
1.38      gwr      2870:        }
                   2871:
1.85      gwr      2872:        ptenum = VA_PTE_NUM(va);
                   2873:        wiremask = 1 << ptenum;
                   2874:
                   2875:        s = splpmap();
                   2876:
                   2877:        sme = get_segmap(va);
                   2878:        if (sme == SEGINV)
1.113     thorpej  2879:                panic("pmap_unwire: invalid va=0x%lx", va);
1.85      gwr      2880:        pmegp = pmeg_p(sme);
1.113     thorpej  2881:        pmegp->pmeg_wired &= ~wiremask;
1.38      gwr      2882:
1.85      gwr      2883:        splx(s);
                   2884: }
1.50      gwr      2885:
1.85      gwr      2886: /*
                   2887:  *     Copy the range specified by src_addr/len
                   2888:  *     from the source map to the range dst_addr/len
                   2889:  *     in the destination map.
                   2890:  *
                   2891:  *     This routine is only advisory and need not do anything.
                   2892:  */
                   2893: void
                   2894: pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr)
                   2895:        pmap_t          dst_pmap;
                   2896:        pmap_t          src_pmap;
                   2897:        vm_offset_t     dst_addr;
                   2898:        vm_size_t       len;
                   2899:        vm_offset_t     src_addr;
                   2900: {
                   2901: }
1.38      gwr      2902:
1.85      gwr      2903: /*
                   2904:  *     Routine:        pmap_extract
                   2905:  *     Function:
                   2906:  *             Extract the physical page address associated
                   2907:  *             with the given map/virtual_address pair.
                   2908:  *     Returns zero if VA not valid.
                   2909:  */
1.114     thorpej  2910: boolean_t
                   2911: pmap_extract(pmap, va, pap)
1.85      gwr      2912:        pmap_t  pmap;
                   2913:        vm_offset_t va;
1.114     thorpej  2914:        paddr_t *pap;
1.85      gwr      2915: {
                   2916:        int s, sme, segnum, ptenum, pte;
1.114     thorpej  2917:        paddr_t pa;
1.38      gwr      2918:
1.85      gwr      2919:        pte = 0;
                   2920:        s = splpmap();
1.38      gwr      2921:
1.85      gwr      2922:        if (pmap == kernel_pmap) {
                   2923:                sme = get_segmap(va);
                   2924:                if (sme != SEGINV)
                   2925:                        pte = get_pte(va);
                   2926:        } else {
                   2927:                /* This is rare, so do it the easy way. */
                   2928:                segnum = VA_SEGNUM(va);
                   2929:                sme = pmap->pm_segmap[segnum];
                   2930:                if (sme != SEGINV) {
                   2931:                        ptenum = VA_PTE_NUM(va);
                   2932:                        pte = get_pte_pmeg(sme, ptenum);
                   2933:                }
1.38      gwr      2934:        }
                   2935:
1.85      gwr      2936:        splx(s);
1.38      gwr      2937:
1.85      gwr      2938:        if ((pte & PG_VALID) == 0) {
                   2939: #ifdef PMAP_DEBUG
                   2940:                db_printf("pmap_extract: invalid va=0x%lx\n", va);
                   2941:                Debugger();
                   2942: #endif
1.114     thorpej  2943:                return (FALSE);
1.38      gwr      2944:        }
1.85      gwr      2945:        pa = PG_PA(pte);
                   2946: #ifdef DIAGNOSTIC
                   2947:        if (pte & PG_TYPE) {
                   2948:                panic("pmap_extract: not main mem, va=0x%lx\n", va);
1.39      gwr      2949:        }
                   2950: #endif
1.114     thorpej  2951:        if (pap != NULL)
                   2952:                *pap = pa;
                   2953:        return (TRUE);
1.1       glass    2954: }
1.38      gwr      2955:
1.85      gwr      2956:
1.38      gwr      2957: /*
1.85      gwr      2958:  *       pmap_page_protect:
1.1       glass    2959:  *
1.85      gwr      2960:  *       Lower the permission for all mappings to a given page.
1.1       glass    2961:  */
                   2962: void
1.115     chs      2963: pmap_page_protect(pg, prot)
                   2964:        struct vm_page *pg;
1.85      gwr      2965:        vm_prot_t          prot;
1.38      gwr      2966: {
1.115     chs      2967:        paddr_t pa = VM_PAGE_TO_PHYS(pg);
1.38      gwr      2968:        int s;
                   2969:
1.87      gwr      2970:        /* The VM code may call this on device addresses! */
                   2971:        if (PA_IS_DEV(pa))
                   2972:                return;
                   2973:
1.85      gwr      2974:        s = splpmap();
                   2975:
                   2976: #ifdef PMAP_DEBUG
                   2977:        if (pmap_debug & PMD_PROTECT)
                   2978:                printf("pmap_page_protect(0x%lx, 0x%lx)\n", pa, prot);
1.38      gwr      2979: #endif
1.85      gwr      2980:        switch (prot) {
                   2981:        case VM_PROT_ALL:
                   2982:                break;
                   2983:        case VM_PROT_READ:
                   2984:        case VM_PROT_READ|VM_PROT_EXECUTE:
                   2985:                pv_changepte(pa, 0, PG_WRITE);
                   2986:                break;
                   2987:        default:
                   2988:                /* remove mapping for all pmaps that have it */
                   2989:                pv_remove_all(pa);
                   2990:                break;
                   2991:        }
1.38      gwr      2992:
1.77      gwr      2993:        splx(s);
1.85      gwr      2994: }
1.66      gwr      2995:
1.85      gwr      2996: /*
                   2997:  * Turn a cdevsw d_mmap value into a byte address for pmap_enter.
                   2998:  * XXX this should almost certainly be done differently, and
                   2999:  *     elsewhere, or even not at all
                   3000:  */
                   3001: #ifndef        pmap_phys_address
                   3002: vm_offset_t
                   3003: pmap_phys_address(x)
                   3004:        int x;
                   3005: {
                   3006:        return (x);
                   3007: }
                   3008: #endif
1.66      gwr      3009:
1.85      gwr      3010: /*
                   3011:  * Initialize a preallocated and zeroed pmap structure,
                   3012:  * such as one in a vmspace structure.
                   3013:  */
                   3014: void
                   3015: pmap_pinit(pmap)
                   3016:        pmap_t pmap;
                   3017: {
                   3018:        pmap_common_init(pmap);
                   3019:        pmap_user_init(pmap);
                   3020: }
1.66      gwr      3021:
1.38      gwr      3022: /*
1.85      gwr      3023:  *     Reduce the permissions on the specified
                   3024:  *     range of this map as requested.
                   3025:  *     (Make pages read-only.)
1.38      gwr      3026:  */
1.85      gwr      3027: void
                   3028: pmap_protect(pmap, sva, eva, prot)
1.38      gwr      3029:        pmap_t pmap;
1.85      gwr      3030:        vm_offset_t sva, eva;
                   3031:        vm_prot_t       prot;
1.1       glass    3032: {
1.85      gwr      3033:        vm_offset_t va, neva;
                   3034:        int segnum;
1.5       glass    3035:
1.85      gwr      3036:        if (pmap == NULL)
                   3037:                return;
1.80      gwr      3038:
1.85      gwr      3039:        /* If leaving writable, nothing to do. */
                   3040:        if (prot & VM_PROT_WRITE)
                   3041:                return;
1.82      gwr      3042:
1.85      gwr      3043:        /* If removing all permissions, just unmap. */
                   3044:        if ((prot & VM_PROT_READ) == 0) {
                   3045:                pmap_remove(pmap, sva, eva);
                   3046:                return;
                   3047:        }
1.38      gwr      3048:
1.85      gwr      3049: #ifdef PMAP_DEBUG
                   3050:        if ((pmap_debug & PMD_PROTECT) ||
                   3051:                ((sva <= pmap_db_watchva && eva > pmap_db_watchva)))
                   3052:                printf("pmap_protect(%p, 0x%lx, 0x%lx)\n", pmap, sva, eva);
                   3053: #endif
1.38      gwr      3054:
1.85      gwr      3055:        if (pmap == kernel_pmap) {
                   3056:                if (sva < virtual_avail)
                   3057:                        sva = virtual_avail;
1.98      gwr      3058:                if (eva > DVMA_MAP_END) {
1.83      gwr      3059: #ifdef PMAP_DEBUG
1.85      gwr      3060:                        db_printf("pmap_protect: eva=0x%lx\n", eva);
1.83      gwr      3061:                        Debugger();
1.85      gwr      3062: #endif
1.98      gwr      3063:                        eva = DVMA_MAP_END;
1.83      gwr      3064:                }
1.85      gwr      3065:        } else {
                   3066:                if (eva > VM_MAXUSER_ADDRESS)
                   3067:                        eva = VM_MAXUSER_ADDRESS;
1.83      gwr      3068:        }
1.80      gwr      3069:
1.85      gwr      3070:        va = sva;
                   3071:        segnum = VA_SEGNUM(va);
                   3072:        while (va < eva) {
                   3073:                neva = m68k_trunc_seg(va) + NBSG;
                   3074:                if (neva > eva)
                   3075:                        neva = eva;
                   3076:                if (pmap->pm_segmap[segnum] != SEGINV)
                   3077:                        pmap_protect1(pmap, va, neva);
                   3078:                va = neva;
                   3079:                segnum++;
1.38      gwr      3080:        }
1.1       glass    3081: }
                   3082:
1.38      gwr      3083: /*
1.85      gwr      3084:  * Remove write permissions in given range.
                   3085:  * (guaranteed to be within one segment)
                   3086:  * similar to pmap_remove1()
1.38      gwr      3087:  */
                   3088: void
1.85      gwr      3089: pmap_protect1(pmap, sva, eva)
                   3090:        pmap_t pmap;
                   3091:        vm_offset_t sva, eva;
1.1       glass    3092: {
1.85      gwr      3093:        int old_ctx, s, sme;
                   3094:        boolean_t in_ctx;
1.38      gwr      3095:
1.85      gwr      3096:        s = splpmap();
1.84      gwr      3097:
1.85      gwr      3098: #ifdef DIAGNOSTIC
                   3099:        if (m68k_trunc_seg(sva) != m68k_trunc_seg(eva-1))
                   3100:                panic("pmap_protect1: bad range!");
                   3101: #endif
1.38      gwr      3102:
1.85      gwr      3103:        if (pmap == kernel_pmap) {
                   3104:                sme = get_segmap(sva);
                   3105:                if (sme != SEGINV)
                   3106:                        pmap_protect_mmu(pmap, sva, eva);
                   3107:                goto out;
                   3108:        }
                   3109:        /* It is a user pmap. */
1.1       glass    3110:
1.85      gwr      3111:        /* There is a PMEG, but maybe not active. */
                   3112:        old_ctx = INVALID_CONTEXT;
                   3113:        in_ctx = FALSE;
                   3114:        if (has_context(pmap)) {
                   3115:                /* Temporary context change. */
                   3116:                old_ctx = get_context();
                   3117:                set_context(pmap->pm_ctxnum);
                   3118:                sme = get_segmap(sva);
                   3119:                if (sme != SEGINV)
                   3120:                        in_ctx = TRUE;
                   3121:        }
1.38      gwr      3122:
1.85      gwr      3123:        if (in_ctx == TRUE)
                   3124:                pmap_protect_mmu(pmap, sva, eva);
                   3125:        else
                   3126:                pmap_protect_noctx(pmap, sva, eva);
1.84      gwr      3127:
1.85      gwr      3128:        if (old_ctx != INVALID_CONTEXT) {
                   3129:                /* Restore previous context. */
                   3130:                set_context(old_ctx);
                   3131:        }
1.80      gwr      3132:
1.85      gwr      3133: out:
1.80      gwr      3134:        splx(s);
1.1       glass    3135: }
                   3136:
1.38      gwr      3137: /*
1.85      gwr      3138:  * Remove write permissions, all in one PMEG,
                   3139:  * where that PMEG is currently in the MMU.
                   3140:  * The current context is already correct.
1.38      gwr      3141:  */
                   3142: void
1.85      gwr      3143: pmap_protect_mmu(pmap, sva, eva)
                   3144:        pmap_t pmap;
                   3145:        vm_offset_t sva, eva;
1.38      gwr      3146: {
1.85      gwr      3147:        pmeg_t pmegp;
                   3148:        vm_offset_t pgva, segva;
                   3149:        int pte, sme;
1.107     gwr      3150: #ifdef HAVECACHE
1.85      gwr      3151:        int flush_by_page = 0;
1.107     gwr      3152: #endif
1.85      gwr      3153:
                   3154:        CHECK_SPL();
                   3155:
                   3156: #ifdef DIAGNOSTIC
                   3157:        if (pmap != kernel_pmap) {
                   3158:                if (pmap->pm_ctxnum != get_context())
                   3159:                        panic("pmap_protect_mmu: wrong context");
                   3160:        }
                   3161: #endif
1.1       glass    3162:
1.85      gwr      3163:        segva = m68k_trunc_seg(sva);
                   3164:        sme = get_segmap(segva);
1.84      gwr      3165:
1.85      gwr      3166: #ifdef DIAGNOSTIC
                   3167:        /* Make sure it is valid and known. */
                   3168:        if (sme == SEGINV)
                   3169:                panic("pmap_protect_mmu: SEGINV");
                   3170:        if (pmap->pm_segmap && (pmap->pm_segmap[VA_SEGNUM(segva)] != sme))
                   3171:                panic("pmap_protect_mmu: incorrect sme, va=0x%lx", segva);
                   3172: #endif
1.38      gwr      3173:
1.85      gwr      3174:        pmegp = pmeg_p(sme);
                   3175:        /* have pmeg, will travel */
1.38      gwr      3176:
1.85      gwr      3177: #ifdef DIAGNOSTIC
                   3178:        /* Make sure we own the pmeg, right va, etc. */
                   3179:        if ((pmegp->pmeg_va != segva) ||
                   3180:                (pmegp->pmeg_owner != pmap) ||
                   3181:                (pmegp->pmeg_version != pmap->pm_version))
                   3182:        {
                   3183:                panic("pmap_protect_mmu: bad pmeg=%p", pmegp);
                   3184:        }
                   3185:        if (pmegp->pmeg_vpages <= 0)
                   3186:                panic("pmap_protect_mmu: no valid pages?");
                   3187: #endif
1.26      gwr      3188:
1.85      gwr      3189: #ifdef HAVECACHE
                   3190:        if (cache_size) {
                   3191:                /*
                   3192:                 * If the range to be removed is larger than the cache,
                   3193:                 * it will be cheaper to flush this segment entirely.
                   3194:                 */
                   3195:                if (cache_size < (eva - sva)) {
                   3196:                        /* cheaper to flush whole segment */
                   3197:                        cache_flush_segment(segva);
                   3198:                } else {
                   3199:                        flush_by_page = 1;
                   3200:                }
                   3201:        }
                   3202: #endif
1.84      gwr      3203:
1.86      gwr      3204:        /* Remove write permission in the given range. */
1.85      gwr      3205:        for (pgva = sva; pgva < eva; pgva += NBPG) {
                   3206:                pte = get_pte(pgva);
                   3207:                if (pte & PG_VALID) {
                   3208: #ifdef HAVECACHE
                   3209:                        if (flush_by_page) {
                   3210:                                cache_flush_page(pgva);
                   3211:                                /* Get fresh mod/ref bits from write-back. */
                   3212:                                pte = get_pte(pgva);
                   3213:                        }
                   3214: #endif
                   3215:                        if (IS_MAIN_MEM(pte)) {
                   3216:                                save_modref_bits(pte);
                   3217:                        }
                   3218:                        pte &= ~(PG_WRITE | PG_MODREF);
                   3219:                        set_pte(pgva, pte);
                   3220:                }
                   3221:        }
1.38      gwr      3222: }
1.28      gwr      3223:
1.66      gwr      3224: /*
1.85      gwr      3225:  * Remove write permissions, all in one PMEG,
                   3226:  * where it is not currently in any context.
1.66      gwr      3227:  */
1.38      gwr      3228: void
1.85      gwr      3229: pmap_protect_noctx(pmap, sva, eva)
1.38      gwr      3230:        pmap_t pmap;
1.85      gwr      3231:        vm_offset_t sva, eva;
1.38      gwr      3232: {
1.86      gwr      3233:        int old_ctx, pte, sme, segnum;
                   3234:        vm_offset_t pgva, segva;
1.66      gwr      3235:
1.38      gwr      3236:        CHECK_SPL();
                   3237:
1.85      gwr      3238: #ifdef DIAGNOSTIC
                   3239:        /* Kernel always in a context (actually, in all contexts). */
                   3240:        if (pmap == kernel_pmap)
                   3241:                panic("pmap_protect_noctx: kernel_pmap");
                   3242:        if (pmap->pm_segmap == NULL)
                   3243:                panic("pmap_protect_noctx: null segmap");
1.38      gwr      3244: #endif
                   3245:
1.86      gwr      3246:        segva = m68k_trunc_seg(sva);
                   3247:        segnum = VA_SEGNUM(segva);
1.85      gwr      3248:        sme = pmap->pm_segmap[segnum];
1.28      gwr      3249:        if (sme == SEGINV)
1.85      gwr      3250:                return;
1.86      gwr      3251:
                   3252:        /*
                   3253:         * Borrow the EMPTY_CONTEXT so we can access the PMEG
                   3254:         * at its normal virtual address.
                   3255:         */
                   3256:        old_ctx = get_context();
                   3257:        set_context(EMPTY_CONTEXT);
                   3258:        set_segmap(segva, sme);
1.77      gwr      3259:
1.85      gwr      3260:        /* Remove write permission in the given range. */
                   3261:        for (pgva = sva; pgva < eva; pgva += NBPG) {
1.86      gwr      3262:                pte = get_pte(pgva);
1.85      gwr      3263:                if (pte & PG_VALID) {
1.86      gwr      3264:                        /* No cache flush needed. */
1.85      gwr      3265:                        if (IS_MAIN_MEM(pte)) {
                   3266:                                save_modref_bits(pte);
                   3267:                        }
                   3268:                        pte &= ~(PG_WRITE | PG_MODREF);
1.86      gwr      3269:                        set_pte(pgva, pte);
1.85      gwr      3270:                }
                   3271:        }
1.86      gwr      3272:
                   3273:        /*
                   3274:         * Make the EMPTY_CONTEXT really empty again, and
                   3275:         * restore the previous context.
                   3276:         */
                   3277:        set_segmap(segva, SEGINV);
                   3278:        set_context(old_ctx);
1.2       glass    3279: }
1.38      gwr      3280:
1.85      gwr      3281:
1.2       glass    3282: /*
1.85      gwr      3283:  *     Remove the given range of addresses from the specified map.
1.2       glass    3284:  *
1.85      gwr      3285:  *     It is assumed that the start and end are properly
                   3286:  *     rounded to the page size.
1.2       glass    3287:  */
1.38      gwr      3288: void
1.85      gwr      3289: pmap_remove(pmap, sva, eva)
                   3290:        pmap_t pmap;
                   3291:        vm_offset_t sva, eva;
1.2       glass    3292: {
1.85      gwr      3293:        vm_offset_t va, neva;
                   3294:        int segnum;
1.2       glass    3295:
1.85      gwr      3296:        if (pmap == NULL)
                   3297:                return;
1.38      gwr      3298:
1.85      gwr      3299: #ifdef PMAP_DEBUG
                   3300:        if ((pmap_debug & PMD_REMOVE) ||
                   3301:                ((sva <= pmap_db_watchva && eva > pmap_db_watchva)))
                   3302:                printf("pmap_remove(%p, 0x%lx, 0x%lx)\n", pmap, sva, eva);
                   3303: #endif
1.80      gwr      3304:
1.50      gwr      3305:        if (pmap == kernel_pmap) {
1.85      gwr      3306:                if (sva < virtual_avail)
                   3307:                        sva = virtual_avail;
1.98      gwr      3308:                if (eva > DVMA_MAP_END) {
1.85      gwr      3309: #ifdef PMAP_DEBUG
                   3310:                        db_printf("pmap_remove: eva=0x%lx\n", eva);
                   3311:                        Debugger();
                   3312: #endif
1.98      gwr      3313:                        eva = DVMA_MAP_END;
1.85      gwr      3314:                }
1.38      gwr      3315:        } else {
1.85      gwr      3316:                if (eva > VM_MAXUSER_ADDRESS)
                   3317:                        eva = VM_MAXUSER_ADDRESS;
1.38      gwr      3318:        }
1.80      gwr      3319:
1.85      gwr      3320:        va = sva;
                   3321:        segnum = VA_SEGNUM(va);
                   3322:        while (va < eva) {
                   3323:                neva = m68k_trunc_seg(va) + NBSG;
                   3324:                if (neva > eva)
                   3325:                        neva = eva;
                   3326:                if (pmap->pm_segmap[segnum] != SEGINV)
                   3327:                        pmap_remove1(pmap, va, neva);
                   3328:                va = neva;
                   3329:                segnum++;
1.56      gwr      3330:        }
1.2       glass    3331: }
                   3332:
                   3333: /*
1.85      gwr      3334:  * Remove user mappings, all within one segment
1.2       glass    3335:  */
                   3336: void
1.85      gwr      3337: pmap_remove1(pmap, sva, eva)
1.38      gwr      3338:        pmap_t pmap;
1.85      gwr      3339:        vm_offset_t sva, eva;
1.2       glass    3340: {
1.85      gwr      3341:        int old_ctx, s, sme;
                   3342:        boolean_t in_ctx;
                   3343:
                   3344:        s = splpmap();
                   3345:
                   3346: #ifdef DIAGNOSTIC
                   3347:        if (m68k_trunc_seg(sva) != m68k_trunc_seg(eva-1))
                   3348:                panic("pmap_remove1: bad range!");
                   3349: #endif
                   3350:
                   3351:        if (pmap == kernel_pmap) {
                   3352:                sme = get_segmap(sva);
                   3353:                if (sme != SEGINV)
                   3354:                        pmap_remove_mmu(pmap, sva, eva);
                   3355:                goto out;
                   3356:        }
                   3357:        /* It is a user pmap. */
                   3358:
                   3359:        /* There is a PMEG, but maybe not active. */
                   3360:        old_ctx = INVALID_CONTEXT;
                   3361:        in_ctx = FALSE;
                   3362:        if (has_context(pmap)) {
                   3363:                /* Temporary context change. */
                   3364:                old_ctx = get_context();
                   3365:                set_context(pmap->pm_ctxnum);
                   3366:                sme = get_segmap(sva);
                   3367:                if (sme != SEGINV)
                   3368:                        in_ctx = TRUE;
                   3369:        }
                   3370:
                   3371:        if (in_ctx == TRUE)
                   3372:                pmap_remove_mmu(pmap, sva, eva);
                   3373:        else
                   3374:                pmap_remove_noctx(pmap, sva, eva);
                   3375:
                   3376:        if (old_ctx != INVALID_CONTEXT) {
                   3377:                /* Restore previous context. */
                   3378:                set_context(old_ctx);
                   3379:        }
                   3380:
                   3381: out:
                   3382:        splx(s);
1.2       glass    3383: }
1.5       glass    3384:
1.38      gwr      3385: /*
1.85      gwr      3386:  * Remove some mappings, all in one PMEG,
1.38      gwr      3387:  * where that PMEG is currently in the MMU.
                   3388:  * The current context is already correct.
1.85      gwr      3389:  * If no PTEs remain valid in the PMEG, free it.
1.38      gwr      3390:  */
                   3391: void
1.85      gwr      3392: pmap_remove_mmu(pmap, sva, eva)
1.38      gwr      3393:        pmap_t pmap;
                   3394:        vm_offset_t sva, eva;
                   3395: {
                   3396:        pmeg_t pmegp;
1.82      gwr      3397:        vm_offset_t pgva, segva;
1.38      gwr      3398:        int pte, sme;
1.107     gwr      3399: #ifdef HAVECACHE
1.52      gwr      3400:        int flush_by_page = 0;
1.107     gwr      3401: #endif
1.38      gwr      3402:
                   3403:        CHECK_SPL();
                   3404:
                   3405: #ifdef DIAGNOSTIC
1.50      gwr      3406:        if (pmap != kernel_pmap) {
1.38      gwr      3407:                if (pmap->pm_ctxnum != get_context())
1.85      gwr      3408:                        panic("pmap_remove_mmu: wrong context");
1.38      gwr      3409:        }
                   3410: #endif
                   3411:
1.82      gwr      3412:        segva = m68k_trunc_seg(sva);
                   3413:        sme = get_segmap(segva);
1.80      gwr      3414:
1.38      gwr      3415: #ifdef DIAGNOSTIC
                   3416:        /* Make sure it is valid and known. */
                   3417:        if (sme == SEGINV)
1.85      gwr      3418:                panic("pmap_remove_mmu: SEGINV");
1.82      gwr      3419:        if (pmap->pm_segmap && (pmap->pm_segmap[VA_SEGNUM(segva)] != sme))
1.85      gwr      3420:                panic("pmap_remove_mmu: incorrect sme, va=0x%lx", segva);
1.38      gwr      3421: #endif
1.80      gwr      3422:
1.29      gwr      3423:        pmegp = pmeg_p(sme);
1.38      gwr      3424:        /* have pmeg, will travel */
                   3425:
1.30      gwr      3426: #ifdef DIAGNOSTIC
1.38      gwr      3427:        /* Make sure we own the pmeg, right va, etc. */
1.82      gwr      3428:        if ((pmegp->pmeg_va != segva) ||
1.38      gwr      3429:                (pmegp->pmeg_owner != pmap) ||
                   3430:                (pmegp->pmeg_version != pmap->pm_version))
                   3431:        {
1.85      gwr      3432:                panic("pmap_remove_mmu: bad pmeg=%p", pmegp);
1.38      gwr      3433:        }
                   3434:        if (pmegp->pmeg_vpages <= 0)
1.85      gwr      3435:                panic("pmap_remove_mmu: no valid pages?");
1.38      gwr      3436: #endif
                   3437:
                   3438: #ifdef HAVECACHE
1.52      gwr      3439:        if (cache_size) {
                   3440:                /*
                   3441:                 * If the range to be removed is larger than the cache,
                   3442:                 * it will be cheaper to flush this segment entirely.
                   3443:                 */
                   3444:                if (cache_size < (eva - sva)) {
                   3445:                        /* cheaper to flush whole segment */
1.82      gwr      3446:                        cache_flush_segment(segva);
1.52      gwr      3447:                } else {
                   3448:                        flush_by_page = 1;
                   3449:                }
                   3450:        }
1.30      gwr      3451: #endif
1.38      gwr      3452:
1.85      gwr      3453:        /* Invalidate the PTEs in the given range. */
1.82      gwr      3454:        for (pgva = sva; pgva < eva; pgva += NBPG) {
                   3455:                pte = get_pte(pgva);
1.38      gwr      3456:                if (pte & PG_VALID) {
1.52      gwr      3457: #ifdef HAVECACHE
                   3458:                        if (flush_by_page) {
1.82      gwr      3459:                                cache_flush_page(pgva);
1.52      gwr      3460:                                /* Get fresh mod/ref bits from write-back. */
1.82      gwr      3461:                                pte = get_pte(pgva);
1.52      gwr      3462:                        }
                   3463: #endif
                   3464:                        if (IS_MAIN_MEM(pte)) {
                   3465:                                save_modref_bits(pte);
1.92      gwr      3466:                                pv_unlink(pmap, pte, pgva);
1.85      gwr      3467:                        }
                   3468: #ifdef PMAP_DEBUG
                   3469:                        if ((pmap_debug & PMD_SETPTE) || (pgva == pmap_db_watchva)) {
                   3470:                                printf("pmap: set_pte pmap=%p va=0x%lx"
                   3471:                                           " old=0x%x new=0x%x (rrmmu)\n",
                   3472:                                           pmap, pgva, pte, PG_INVAL);
                   3473:                        }
                   3474: #endif
                   3475:                        set_pte(pgva, PG_INVAL);
                   3476:                        pmegp->pmeg_vpages--;
                   3477:                }
                   3478:        }
                   3479:
                   3480:        if (pmegp->pmeg_vpages <= 0) {
                   3481:                /* We are done with this pmeg. */
                   3482:                if (is_pmeg_wired(pmegp)) {
                   3483: #ifdef PMAP_DEBUG
                   3484:                        if (pmap_debug & PMD_WIRING) {
                   3485:                                db_printf("pmap: removing wired pmeg: %p\n", pmegp);
                   3486:                                Debugger();
1.52      gwr      3487:                        }
1.85      gwr      3488: #endif /* PMAP_DEBUG */
                   3489:                }
                   3490:
                   3491: #ifdef PMAP_DEBUG
                   3492:                if (pmap_debug & PMD_SEGMAP) {
                   3493:                        printf("pmap: set_segmap ctx=%d v=0x%lx old=0x%x new=ff (rm)\n",
                   3494:                            pmap->pm_ctxnum, segva, pmegp->pmeg_index);
                   3495:                }
                   3496:                pmeg_verify_empty(segva);
                   3497: #endif
                   3498:
                   3499:                /* Remove it from the MMU. */
                   3500:                if (kernel_pmap == pmap) {
                   3501:                        /* Did cache flush above. */
                   3502:                        set_segmap_allctx(segva, SEGINV);
                   3503:                } else {
                   3504:                        /* Did cache flush above. */
                   3505:                        set_segmap(segva, SEGINV);
1.38      gwr      3506:                }
1.85      gwr      3507:                pmap->pm_segmap[VA_SEGNUM(segva)] = SEGINV;
                   3508:                /* Now, put it on the free list. */
                   3509:                pmeg_free(pmegp);
1.38      gwr      3510:        }
                   3511: }
                   3512:
                   3513: /*
1.85      gwr      3514:  * Remove some mappings, all in one PMEG,
1.38      gwr      3515:  * where it is not currently in any context.
                   3516:  */
                   3517: void
1.85      gwr      3518: pmap_remove_noctx(pmap, sva, eva)
1.38      gwr      3519:        pmap_t pmap;
                   3520:        vm_offset_t sva, eva;
                   3521: {
                   3522:        pmeg_t pmegp;
1.86      gwr      3523:        int old_ctx, pte, sme, segnum;
                   3524:        vm_offset_t pgva, segva;
1.38      gwr      3525:
                   3526:        CHECK_SPL();
                   3527:
1.81      gwr      3528: #ifdef DIAGNOSTIC
1.38      gwr      3529:        /* Kernel always in a context (actually, in all contexts). */
1.50      gwr      3530:        if (pmap == kernel_pmap)
1.85      gwr      3531:                panic("pmap_remove_noctx: kernel_pmap");
1.38      gwr      3532:        if (pmap->pm_segmap == NULL)
1.85      gwr      3533:                panic("pmap_remove_noctx: null segmap");
1.38      gwr      3534: #endif
                   3535:
1.86      gwr      3536:        segva = m68k_trunc_seg(sva);
                   3537:        segnum = VA_SEGNUM(segva);
1.38      gwr      3538:        sme = pmap->pm_segmap[segnum];
1.80      gwr      3539:        if (sme == SEGINV)
                   3540:                return;
1.38      gwr      3541:        pmegp = pmeg_p(sme);
                   3542:
1.86      gwr      3543:        /*
                   3544:         * Borrow the EMPTY_CONTEXT so we can access the PMEG
                   3545:         * at its normal virtual address.
                   3546:         */
                   3547:        old_ctx = get_context();
                   3548:        set_context(EMPTY_CONTEXT);
                   3549:        set_segmap(segva, sme);
                   3550:
                   3551:        /* Invalidate the PTEs in the given range. */
1.82      gwr      3552:        for (pgva = sva; pgva < eva; pgva += NBPG) {
1.86      gwr      3553:                pte = get_pte(pgva);
1.38      gwr      3554:                if (pte & PG_VALID) {
1.86      gwr      3555:                        /* No cache flush needed. */
1.52      gwr      3556:                        if (IS_MAIN_MEM(pte)) {
                   3557:                                save_modref_bits(pte);
1.92      gwr      3558:                                pv_unlink(pmap, pte, pgva);
1.52      gwr      3559:                        }
1.76      gwr      3560: #ifdef PMAP_DEBUG
1.85      gwr      3561:                        if ((pmap_debug & PMD_SETPTE) || (pgva == pmap_db_watchva)) {
                   3562:                                printf("pmap: set_pte pmap=%p va=0x%lx"
                   3563:                                           " old=0x%x new=0x%x (rrncx)\n",
                   3564:                                           pmap, pgva, pte, PG_INVAL);
                   3565:                        }
1.76      gwr      3566: #endif
1.86      gwr      3567:                        set_pte(pgva, PG_INVAL);
1.85      gwr      3568:                        pmegp->pmeg_vpages--;
1.38      gwr      3569:                }
1.66      gwr      3570:        }
1.86      gwr      3571:
                   3572:        /*
                   3573:         * Make the EMPTY_CONTEXT really empty again, and
                   3574:         * restore the previous context.
                   3575:         */
                   3576:        set_segmap(segva, SEGINV);
                   3577:        set_context(old_ctx);
                   3578:
1.85      gwr      3579:        if (pmegp->pmeg_vpages <= 0) {
1.86      gwr      3580:                /* We are done with this pmeg. */
                   3581:                if (is_pmeg_wired(pmegp)) {
                   3582: #ifdef PMAP_DEBUG
                   3583:                        if (pmap_debug & PMD_WIRING) {
                   3584:                                db_printf("pmap: removing wired pmeg: %p\n", pmegp);
                   3585:                                Debugger();
                   3586:                        }
                   3587: #endif /* PMAP_DEBUG */
                   3588:                }
1.66      gwr      3589:
1.85      gwr      3590:                pmap->pm_segmap[segnum] = SEGINV;
                   3591:                pmeg_free(pmegp);
1.38      gwr      3592:        }
                   3593: }
1.85      gwr      3594:
1.38      gwr      3595:
                   3596: /*
1.69      gwr      3597:  * Count resident pages in this pmap.
                   3598:  * See: kern_sysctl.c:pmap_resident_count
1.38      gwr      3599:  */
                   3600: segsz_t
                   3601: pmap_resident_pages(pmap)
                   3602:        pmap_t pmap;
                   3603: {
                   3604:        int i, sme, pages;
                   3605:        pmeg_t pmeg;
                   3606:
1.69      gwr      3607:        if (pmap->pm_segmap == 0)
                   3608:                return (0);
                   3609:
1.38      gwr      3610:        pages = 0;
1.69      gwr      3611:        for (i = 0; i < NUSEG; i++) {
                   3612:                sme = pmap->pm_segmap[i];
                   3613:                if (sme != SEGINV) {
                   3614:                        pmeg = pmeg_p(sme);
                   3615:                        pages += pmeg->pmeg_vpages;
                   3616:                }
                   3617:        }
                   3618:        return (pages);
                   3619: }
                   3620:
                   3621: /*
                   3622:  * Count wired pages in this pmap.
                   3623:  * See vm_mmap.c:pmap_wired_count
                   3624:  */
                   3625: segsz_t
                   3626: pmap_wired_pages(pmap)
                   3627:        pmap_t pmap;
                   3628: {
                   3629:        int i, mask, sme, pages;
                   3630:        pmeg_t pmeg;
                   3631:
                   3632:        if (pmap->pm_segmap == 0)
                   3633:                return (0);
                   3634:
                   3635:        pages = 0;
                   3636:        for (i = 0; i < NUSEG; i++) {
                   3637:                sme = pmap->pm_segmap[i];
                   3638:                if (sme != SEGINV) {
                   3639:                        pmeg = pmeg_p(sme);
                   3640:                        mask = 0x8000;
                   3641:                        do {
                   3642:                                if (pmeg->pmeg_wired & mask)
                   3643:                                        pages++;
                   3644:                                mask = (mask >> 1);
                   3645:                        } while (mask);
1.38      gwr      3646:                }
                   3647:        }
                   3648:        return (pages);
1.2       glass    3649: }
                   3650:
1.38      gwr      3651:
1.2       glass    3652: /*
                   3653:  *     Require that all active physical maps contain no
                   3654:  *     incorrect entries NOW.  [This update includes
                   3655:  *     forcing updates of any address map caching.]
                   3656:  *
                   3657:  *     Generally used to insure that a thread about
                   3658:  *     to run will see a semantically correct world.
                   3659:  */
1.38      gwr      3660: void
                   3661: pmap_update()
1.2       glass    3662: {
                   3663: }
1.38      gwr      3664:
                   3665: /*
                   3666:  *     pmap_copy_page copies the specified (machine independent)
                   3667:  *     page by mapping the page into virtual memory and using
                   3668:  *     bcopy to copy the page, one machine dependent page at a
                   3669:  *     time.
                   3670:  */
                   3671: void
                   3672: pmap_copy_page(src, dst)
                   3673:        vm_offset_t     src, dst;
                   3674: {
                   3675:        int pte;
                   3676:        int s;
                   3677:
1.77      gwr      3678:        s = splpmap();
                   3679:
1.38      gwr      3680: #ifdef PMAP_DEBUG
                   3681:        if (pmap_debug & PMD_COW)
1.73      fair     3682:                printf("pmap_copy_page: 0x%lx -> 0x%lx\n", src, dst);
1.38      gwr      3683: #endif
                   3684:
                   3685:        if (tmp_vpages_inuse)
                   3686:                panic("pmap_copy_page: vpages inuse");
                   3687:        tmp_vpages_inuse++;
                   3688:
1.50      gwr      3689:        /* PG_PERM is short for (PG_VALID|PG_WRITE|PG_SYSTEM|PG_NC) */
                   3690:        /* All mappings to vmp_vpages are non-cached, so no flush. */
1.38      gwr      3691:        pte = PG_PERM | PA_PGNUM(src);
                   3692:        set_pte(tmp_vpages[0], pte);
                   3693:        pte = PG_PERM | PA_PGNUM(dst);
                   3694:        set_pte(tmp_vpages[1], pte);
1.68      thorpej  3695:        copypage((char *) tmp_vpages[0], (char *) tmp_vpages[1]);
1.38      gwr      3696:        set_pte(tmp_vpages[0], PG_INVAL);
                   3697:        set_pte(tmp_vpages[0], PG_INVAL);
                   3698:
                   3699:        tmp_vpages_inuse--;
1.77      gwr      3700:
                   3701:        splx(s);
1.38      gwr      3702: }
                   3703:
1.2       glass    3704: /*
                   3705:  *     pmap_zero_page zeros the specified (machine independent)
                   3706:  *     page by mapping the page into virtual memory and using
                   3707:  *     bzero to clear its contents, one machine dependent page
                   3708:  *     at a time.
                   3709:  */
1.38      gwr      3710: void
                   3711: pmap_zero_page(pa)
                   3712:        vm_offset_t     pa;
1.2       glass    3713: {
1.38      gwr      3714:        int pte;
                   3715:        int s;
1.2       glass    3716:
1.77      gwr      3717:        s = splpmap();
                   3718:
1.26      gwr      3719: #ifdef PMAP_DEBUG
1.38      gwr      3720:        if (pmap_debug & PMD_COW)
1.73      fair     3721:                printf("pmap_zero_page: 0x%lx\n", pa);
1.38      gwr      3722: #endif
                   3723:
                   3724:        if (tmp_vpages_inuse)
                   3725:                panic("pmap_zero_page: vpages inuse");
                   3726:        tmp_vpages_inuse++;
1.50      gwr      3727:
                   3728:        /* PG_PERM is short for (PG_VALID|PG_WRITE|PG_SYSTEM|PG_NC) */
                   3729:        /* All mappings to vmp_vpages are non-cached, so no flush. */
1.38      gwr      3730:        pte = PG_PERM | PA_PGNUM(pa);
                   3731:        set_pte(tmp_vpages[0], pte);
1.68      thorpej  3732:        zeropage((char *) tmp_vpages[0]);
1.38      gwr      3733:        set_pte(tmp_vpages[0], PG_INVAL);
1.50      gwr      3734:
1.38      gwr      3735:        tmp_vpages_inuse--;
1.77      gwr      3736:
                   3737:        splx(s);
1.51      gwr      3738: }
                   3739:
                   3740: /*
                   3741:  *     Routine:        pmap_collect
                   3742:  *     Function:
                   3743:  *             Garbage collects the physical map system for
                   3744:  *             pages which are no longer used.
                   3745:  *             Success need not be guaranteed -- that is, there
                   3746:  *             may well be pages which are not referenced, but
                   3747:  *             others may be collected.
                   3748:  *     Usage:
                   3749:  *             Called by the pageout daemon when pages are scarce.
                   3750:  */
                   3751: void
                   3752: pmap_collect(pmap)
                   3753:        pmap_t pmap;
                   3754: {
1.38      gwr      3755: }
                   3756:
1.60      gwr      3757: /*
                   3758:  * Find first virtual address >= *va that is
                   3759:  * least likely to cause cache aliases.
                   3760:  * (This will just seg-align mappings.)
                   3761:  */
                   3762: void
                   3763: pmap_prefer(fo, va)
                   3764:        register vm_offset_t fo;
                   3765:        register vm_offset_t *va;
                   3766: {
                   3767:        register long   d;
                   3768:
                   3769:        d = fo - *va;
                   3770:        d &= SEGOFSET;
                   3771:        *va += d;
                   3772: }
1.61      gwr      3773:
                   3774: /*
1.74      gwr      3775:  * Fill in the sun3x-specific part of the kernel core header
                   3776:  * for dumpsys().  (See machdep.c for the rest.)
1.61      gwr      3777:  */
                   3778: void
1.74      gwr      3779: pmap_kcore_hdr(sh)
                   3780:        struct sun3_kcore_hdr *sh;
1.61      gwr      3781: {
                   3782:        vm_offset_t va;
1.74      gwr      3783:        u_char *cp, *ep;
1.61      gwr      3784:
1.74      gwr      3785:        sh->segshift = SEGSHIFT;
                   3786:        sh->pg_frame = PG_FRAME;
                   3787:        sh->pg_valid = PG_VALID;
                   3788:
                   3789:        /* Copy the kernel segmap (256 bytes). */
1.61      gwr      3790:        va = KERNBASE;
1.74      gwr      3791:        cp = sh->ksegmap;
                   3792:        ep = cp + sizeof(sh->ksegmap);
1.61      gwr      3793:        do {
                   3794:                *cp = get_segmap(va);
1.74      gwr      3795:                va += NBSG;
1.61      gwr      3796:                cp++;
1.74      gwr      3797:        } while (cp < ep);
1.61      gwr      3798: }
                   3799:
                   3800: /*
                   3801:  * Copy the pagemap RAM into the passed buffer (one page)
                   3802:  * starting at OFF in the pagemap RAM.
                   3803:  */
                   3804: void
1.65      gwr      3805: pmap_get_pagemap(pt, off)
                   3806:        int *pt;
1.61      gwr      3807:        int off;
                   3808: {
                   3809:        vm_offset_t va, va_end;
                   3810:        int sme, sme_end;       /* SegMap Entry numbers */
                   3811:
                   3812:        sme = (off >> 6);       /* PMEG to start on */
                   3813:        sme_end = sme + 128; /* where to stop */
                   3814:        va_end = temp_seg_va + NBSG;
                   3815:
                   3816:        do {
                   3817:                set_segmap(temp_seg_va, sme);
                   3818:                va = temp_seg_va;
                   3819:                do {
                   3820:                        *pt++ = get_pte(va);
                   3821:                        va += NBPG;
                   3822:                } while (va < va_end);
                   3823:                sme++;
                   3824:        } while (sme < sme_end);
                   3825:        set_segmap(temp_seg_va, SEGINV);
                   3826: }
                   3827:
1.60      gwr      3828:
                   3829: /*
                   3830:  * Helper functions for changing unloaded PMEGs
1.83      gwr      3831:  * XXX: These should go away.  (Borrow context zero instead.)
1.60      gwr      3832:  */
1.80      gwr      3833:
1.38      gwr      3834: static int temp_seg_inuse;
                   3835:
                   3836: static int
                   3837: get_pte_pmeg(int pmeg_num, int page_num)
                   3838: {
                   3839:        vm_offset_t va;
                   3840:        int pte;
                   3841:
1.50      gwr      3842:        CHECK_SPL();
1.38      gwr      3843:        if (temp_seg_inuse)
                   3844:                panic("get_pte_pmeg: temp_seg_inuse");
1.50      gwr      3845:        temp_seg_inuse++;
1.38      gwr      3846:
                   3847:        va = temp_seg_va;
                   3848:        set_segmap(temp_seg_va, pmeg_num);
                   3849:        va += NBPG*page_num;
                   3850:        pte = get_pte(va);
                   3851:        set_segmap(temp_seg_va, SEGINV);
1.50      gwr      3852:
1.38      gwr      3853:        temp_seg_inuse--;
                   3854:        return pte;
                   3855: }
                   3856:
                   3857: static void
                   3858: set_pte_pmeg(int pmeg_num, int page_num, int pte)
                   3859: {
                   3860:        vm_offset_t va;
                   3861:
1.50      gwr      3862:        CHECK_SPL();
1.38      gwr      3863:        if (temp_seg_inuse)
                   3864:                panic("set_pte_pmeg: temp_seg_inuse");
1.50      gwr      3865:        temp_seg_inuse++;
1.38      gwr      3866:
1.50      gwr      3867:        /* We never access data in temp_seg_va so no need to flush. */
1.38      gwr      3868:        va = temp_seg_va;
                   3869:        set_segmap(temp_seg_va, pmeg_num);
                   3870:        va += NBPG*page_num;
                   3871:        set_pte(va, pte);
                   3872:        set_segmap(temp_seg_va, SEGINV);
1.50      gwr      3873:
1.38      gwr      3874:        temp_seg_inuse--;
1.2       glass    3875: }
1.109     is       3876:
                   3877: /*
                   3878:  *     Routine:        pmap_procwr
                   3879:  *
                   3880:  *     Function:
                   3881:  *             Synchronize caches corresponding to [addr, addr+len) in p.
                   3882:  */
                   3883: void
                   3884: pmap_procwr(p, va, len)
                   3885:        struct proc     *p;
                   3886:        vaddr_t         va;
                   3887:        size_t          len;
                   3888: {
                   3889:        (void)cachectl1(0x80000004, va, len, p);
                   3890: }
                   3891:
1.78      gwr      3892:
                   3893: #ifdef PMAP_DEBUG
                   3894: /* Things to call from the debugger. */
                   3895:
                   3896: void
                   3897: pmap_print(pmap)
                   3898:        pmap_t pmap;
                   3899: {
1.81      gwr      3900:        db_printf(" pm_ctxnum=%d\n", pmap->pm_ctxnum);
                   3901:        db_printf(" pm_version=0x%x\n", pmap->pm_version);
                   3902:        db_printf(" pm_segmap=%p\n", pmap->pm_segmap);
1.78      gwr      3903: }
                   3904:
                   3905: void
                   3906: pmeg_print(pmegp)
                   3907:        pmeg_t pmegp;
                   3908: {
1.81      gwr      3909:        db_printf("link_next=%p  link_prev=%p\n",
1.78      gwr      3910:            pmegp->pmeg_link.tqe_next,
                   3911:            pmegp->pmeg_link.tqe_prev);
1.81      gwr      3912:        db_printf("index=0x%x owner=%p own_vers=0x%x\n",
1.78      gwr      3913:            pmegp->pmeg_index, pmegp->pmeg_owner, pmegp->pmeg_version);
1.81      gwr      3914:        db_printf("va=0x%lx wired=0x%x reserved=0x%x vpgs=0x%x qstate=0x%x\n",
1.78      gwr      3915:            pmegp->pmeg_va, pmegp->pmeg_wired,
                   3916:            pmegp->pmeg_reserved, pmegp->pmeg_vpages,
                   3917:            pmegp->pmeg_qstate);
                   3918: }
                   3919:
                   3920: void
                   3921: pv_print(pa)
                   3922:        vm_offset_t pa;
                   3923: {
                   3924:        pv_entry_t pv;
1.84      gwr      3925:        int idx;
1.78      gwr      3926:
1.84      gwr      3927:        if (!pv_initialized) {
                   3928:                db_printf("no pv_flags_tbl\n");
                   3929:                return;
                   3930:        }
                   3931:        idx = PA_PGNUM(pa);
1.87      gwr      3932:        if (idx >= physmem) {
1.84      gwr      3933:                db_printf("bad address\n");
1.78      gwr      3934:                return;
1.84      gwr      3935:        }
                   3936:        db_printf("pa=0x%lx, flags=0x%x\n",
                   3937:                          pa, pv_flags_tbl[idx]);
1.78      gwr      3938:
1.84      gwr      3939:        pv = pv_head_tbl[idx];
1.78      gwr      3940:        while (pv) {
1.84      gwr      3941:                db_printf(" pv_entry %p pmap %p va 0x%lx next %p\n",
1.78      gwr      3942:                           pv, pv->pv_pmap, pv->pv_va, pv->pv_next);
                   3943:                pv = pv->pv_next;
                   3944:        }
                   3945: }
                   3946: #endif /* PMAP_DEBUG */
1.2       glass    3947:
1.26      gwr      3948: /*
                   3949:  * Local Variables:
1.38      gwr      3950:  * tab-width: 4
1.26      gwr      3951:  * End:
                   3952:  */

CVSweb <webmaster@jp.NetBSD.org>