[BACK]Return to pmap.c CVS log [TXT][DIR] Up to [cvs.NetBSD.org] / src / sys / arch / sun3 / sun3

Annotation of src/sys/arch/sun3/sun3/pmap.c, Revision 1.148.2.1

1.148.2.1! yamt        1: /*     $NetBSD: pmap.c,v 1.148 2005/06/19 20:00:28 thorpej Exp $       */
1.36      cgd         2:
1.64      gwr         3: /*-
                      4:  * Copyright (c) 1996 The NetBSD Foundation, Inc.
1.13      glass       5:  * All rights reserved.
                      6:  *
1.64      gwr         7:  * This code is derived from software contributed to The NetBSD Foundation
                      8:  * by Adam Glass and Gordon W. Ross.
                      9:  *
1.13      glass      10:  * Redistribution and use in source and binary forms, with or without
                     11:  * modification, are permitted provided that the following conditions
                     12:  * are met:
                     13:  * 1. Redistributions of source code must retain the above copyright
                     14:  *    notice, this list of conditions and the following disclaimer.
                     15:  * 2. Redistributions in binary form must reproduce the above copyright
                     16:  *    notice, this list of conditions and the following disclaimer in the
                     17:  *    documentation and/or other materials provided with the distribution.
                     18:  * 3. All advertising materials mentioning features or use of this software
                     19:  *    must display the following acknowledgement:
1.64      gwr        20:  *        This product includes software developed by the NetBSD
                     21:  *        Foundation, Inc. and its contributors.
                     22:  * 4. Neither the name of The NetBSD Foundation nor the names of its
                     23:  *    contributors may be used to endorse or promote products derived
                     24:  *    from this software without specific prior written permission.
1.13      glass      25:  *
1.64      gwr        26:  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
                     27:  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
                     28:  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
1.66      gwr        29:  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
                     30:  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
1.64      gwr        31:  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
                     32:  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
                     33:  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
                     34:  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
                     35:  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
                     36:  * POSSIBILITY OF SUCH DAMAGE.
1.13      glass      37:  */
1.25      gwr        38:
1.3       glass      39: /*
1.1       glass      40:  * Some notes:
                     41:  *
1.84      gwr        42:  * sun3s have contexts (8).  In this pmap design, the kernel is mapped
1.38      gwr        43:  * into all contexts.  Processes take up a known portion of the context,
1.1       glass      44:  * and compete for the available contexts on a LRU basis.
                     45:  *
1.52      gwr        46:  * sun3s also have this evil "PMEG" crapola.  Essentially each "context"'s
1.1       glass      47:  * address space is defined by the 2048 one-byte entries in the segment map.
1.38      gwr        48:  * Each of these 1-byte entries points to a "Page Map Entry Group" (PMEG)
                     49:  * which contains the mappings for that virtual segment.  (This strange
                     50:  * terminology invented by Sun and preserved here for consistency.)
                     51:  * Each PMEG maps a segment of 128Kb length, with 16 pages of 8Kb each.
                     52:  *
1.52      gwr        53:  * As you might guess, these PMEGs are in short supply and heavy demand.
                     54:  * PMEGs allocated to the kernel are "static" in the sense that they can't
                     55:  * be stolen from it.  PMEGs allocated to a particular segment of a
1.1       glass      56:  * pmap's virtual space will be fought over by the other pmaps.
                     57:  */
                     58:
                     59: /*
1.65      gwr        60:  * Cache management:
                     61:  * All sun3 cache implementations are write-back.
                     62:  * Flushes must be done before removing translations
                     63:  * from the MMU because the cache uses the MMU.
                     64:  */
                     65:
                     66: /*
1.1       glass      67:  * wanted attributes:
                     68:  *       pmegs that aren't needed by a pmap remain in the MMU.
                     69:  *       quick context switches between pmaps
                     70:  *       kernel is in all contexts
                     71:  */
                     72:
1.83      gwr        73: /*
1.84      gwr        74:  * Project1:  Use a "null" context for processes that have not
1.83      gwr        75:  * touched any user-space address recently.  This is efficient
                     76:  * for things that stay in the kernel for a while, waking up
                     77:  * to handle some I/O then going back to sleep (i.e. nfsd).
                     78:  * If and when such a process returns to user-mode, it will
                     79:  * fault and be given a real context at that time.
                     80:  *
                     81:  * This also lets context switch be fast, because all we need
                     82:  * to do there for the MMU is slam the context register.
1.84      gwr        83:  *
                     84:  * Project2:  Use a private pool of PV elements.  This pool can be
                     85:  * fixed size because the total mapped virtual space supported by
                     86:  * the MMU H/W (and this pmap) is fixed for all time.
1.83      gwr        87:  */
1.144     lukem      88:
                     89: #include <sys/cdefs.h>
1.148.2.1! yamt       90: __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.148 2005/06/19 20:00:28 thorpej Exp $");
1.83      gwr        91:
1.104     jonathan   92: #include "opt_ddb.h"
1.143     martin     93: #include "opt_pmap_debug.h"
1.103     gwr        94:
1.38      gwr        95: #include <sys/param.h>
                     96: #include <sys/systm.h>
                     97: #include <sys/proc.h>
                     98: #include <sys/malloc.h>
1.132     chs        99: #include <sys/pool.h>
1.38      gwr       100: #include <sys/user.h>
                    101: #include <sys/queue.h>
1.74      gwr       102: #include <sys/kcore.h>
1.38      gwr       103:
1.103     gwr       104: #include <uvm/uvm.h>
1.110     mrg       105:
1.74      gwr       106: #include <machine/cpu.h>
                    107: #include <machine/dvma.h>
1.76      gwr       108: #include <machine/idprom.h>
1.74      gwr       109: #include <machine/kcore.h>
1.38      gwr       110: #include <machine/mon.h>
1.74      gwr       111: #include <machine/pmap.h>
                    112: #include <machine/pte.h>
1.38      gwr       113: #include <machine/vmparam.h>
1.138     chs       114: #include <m68k/cacheops.h>
1.65      gwr       115:
1.99      gwr       116: #include <sun3/sun3/cache.h>
                    117: #include <sun3/sun3/control.h>
                    118: #include <sun3/sun3/fc.h>
                    119: #include <sun3/sun3/machdep.h>
                    120: #include <sun3/sun3/obmem.h>
                    121:
1.81      gwr       122: #ifdef DDB
                    123: #include <ddb/db_output.h>
                    124: #else
                    125: #define db_printf printf
                    126: #endif
                    127:
1.78      gwr       128: /* Verify this correspondence between definitions. */
1.76      gwr       129: #if    (PMAP_OBIO << PG_MOD_SHIFT) != PGT_OBIO
                    130: #error "PMAP_XXX definitions don't match pte.h!"
                    131: #endif
                    132:
1.89      gwr       133: /* Type bits in a "pseudo" physical address. (XXX: pmap.h?) */
                    134: #define PMAP_TYPE      PMAP_VME32
1.75      gwr       135:
1.78      gwr       136: /*
                    137:  * Local convenience macros
                    138:  */
                    139:
1.98      gwr       140: #define DVMA_MAP_END   (DVMA_MAP_BASE + DVMA_MAP_AVAIL)
                    141:
1.80      gwr       142: /* User segments from 0 to KERNBASE */
                    143: #define        NUSEG   (KERNBASE / NBSG)
                    144: /* The remainder are kernel segments. */
                    145: #define        NKSEG   (NSEGMAP - NUSEG)
1.78      gwr       146:
                    147: #define VA_SEGNUM(x)   ((u_int)(x) >> SEGSHIFT)
1.50      gwr       148:
1.76      gwr       149: /*
1.78      gwr       150:  * Only "main memory" pages are registered in the pv_lists.
                    151:  * This macro is used to determine if a given pte refers to
                    152:  * "main memory" or not.  One slight hack here deserves more
                    153:  * explanation:  The Sun frame buffers all appear as PG_OBMEM
                    154:  * devices but way up near the end of the address space.
                    155:  * We do not want to consider these as "main memory" so the
                    156:  * macro below treats the high bits of the PFN as type bits.
                    157:  *
                    158:  * Note that on the 3/60 only 16 bits of PFN are stored in the
                    159:  * MMU and the top 3 bits read back as zero.  This means a
                    160:  * translation entered into the mmu for physical address
                    161:  * 0xFF000000 will look like 0x1F000000 after one reads back
                    162:  * the pte and converts the PFN to a physical address.
                    163:  */
1.88      gwr       164: #define MEM_BITS       (PG_TYPE | PA_PGNUM(0xF8000000))
1.78      gwr       165: #define        IS_MAIN_MEM(pte) (((pte) & MEM_BITS) == 0)
                    166:
1.87      gwr       167: /* Does this (pseudo) PA represent device space? */
1.89      gwr       168: #define PA_DEV_MASK   (0xF8000000 | PMAP_TYPE)
1.88      gwr       169: #define PA_IS_DEV(pa) ((pa) & PA_DEV_MASK)
1.87      gwr       170:
1.78      gwr       171: /*
                    172:  * Is there a Virtually Addressed Cache (VAC) alias problem
                    173:  * if one page is mapped at both a1 and a2?
                    174:  */
                    175: #define        BADALIAS(a1, a2)        (((int)(a1) ^ (int)(a2)) & SEGOFSET)
                    176:
                    177:
                    178: /*
                    179:  * Debugging support.
                    180:  */
                    181: #define        PMD_ENTER       1
                    182: #define        PMD_LINK        2
                    183: #define        PMD_PROTECT     4
                    184: #define        PMD_SWITCH      8
                    185: #define PMD_COW                0x10
                    186: #define PMD_MODBIT     0x20
                    187: #define PMD_REFBIT     0x40
                    188: #define PMD_WIRING     0x80
                    189: #define PMD_CONTEXT    0x100
                    190: #define PMD_CREATE     0x200
                    191: #define PMD_SEGMAP     0x400
                    192: #define PMD_SETPTE     0x800
1.132     chs       193: #define PMD_FAULT      0x1000
                    194: #define PMD_KMAP       0x2000
1.78      gwr       195:
                    196: #define        PMD_REMOVE      PMD_ENTER
                    197: #define        PMD_UNLINK      PMD_LINK
                    198:
                    199: #ifdef PMAP_DEBUG
                    200: int pmap_debug = 0;
                    201: int pmap_db_watchva = -1;
                    202: int pmap_db_watchpmeg = -1;
                    203: #endif /* PMAP_DEBUG */
                    204:
                    205: /*
                    206:  * Miscellaneous variables.
                    207:  *
1.76      gwr       208:  * For simplicity, this interface retains the variables
                    209:  * that were used in the old interface (without NONCONTIG).
                    210:  * These are set in pmap_bootstrap() and used in
                    211:  * pmap_next_page().
                    212:  */
1.142     thorpej   213: vaddr_t virtual_avail, virtual_end;
1.132     chs       214: paddr_t avail_start, avail_end;
1.78      gwr       215: #define        managed(pa)     (((pa) >= avail_start) && ((pa) < avail_end))
1.76      gwr       216:
                    217: /* used to skip the Sun3/50 video RAM */
1.132     chs       218: static vaddr_t hole_start, hole_size;
1.38      gwr       219:
1.78      gwr       220: /* This is for pmap_next_page() */
1.132     chs       221: static paddr_t avail_next;
1.78      gwr       222:
                    223: /* This is where we map a PMEG without a context. */
1.132     chs       224: static vaddr_t temp_seg_va;
1.78      gwr       225:
                    226: /*
                    227:  * Location to store virtual addresses
                    228:  * to be used in copy/zero operations.
                    229:  */
1.132     chs       230: vaddr_t tmp_vpages[2] = {
1.99      gwr       231:        SUN3_MONSHORTSEG,
1.140     thorpej   232:        SUN3_MONSHORTSEG + PAGE_SIZE };
1.78      gwr       233: int tmp_vpages_inuse;
                    234:
                    235: static int pmap_version = 1;
                    236: struct pmap kernel_pmap_store;
                    237: #define kernel_pmap (&kernel_pmap_store)
1.82      gwr       238: static u_char kernel_segmap[NSEGMAP];
1.132     chs       239:
                    240: /* memory pool for pmap structures */
                    241: struct pool    pmap_pmap_pool;
1.78      gwr       242:
1.38      gwr       243: /* statistics... */
                    244: struct pmap_stats {
                    245:        int     ps_enter_firstpv;       /* pv heads entered */
                    246:        int     ps_enter_secondpv;      /* pv nonheads entered */
1.39      gwr       247:        int     ps_unlink_pvfirst;      /* of pv_unlinks on head */
                    248:        int     ps_unlink_pvsearch;     /* of pv_unlink searches */
1.40      gwr       249:        int     ps_pmeg_faultin;        /* pmegs reloaded */
1.39      gwr       250:        int     ps_changeprots;         /* of calls to changeprot */
                    251:        int     ps_changewire;          /* useless wiring changes */
                    252:        int     ps_npg_prot_all;        /* of active pages protected */
                    253:        int     ps_npg_prot_actual;     /* pages actually affected */
1.60      gwr       254:        int     ps_vac_uncached;        /* non-cached due to bad alias */
                    255:        int     ps_vac_recached;        /* re-cached when bad alias gone */
1.38      gwr       256: } pmap_stats;
                    257:
1.78      gwr       258: #define pmap_lock(pmap) simple_lock(&pmap->pm_lock)
                    259: #define pmap_unlock(pmap) simple_unlock(&pmap->pm_lock)
                    260: #define pmap_add_ref(pmap) ++pmap->pm_refcount
                    261: #define pmap_del_ref(pmap) --pmap->pm_refcount
                    262: #define pmap_refcount(pmap) pmap->pm_refcount
                    263:
1.77      gwr       264: #ifdef PMAP_DEBUG
                    265: #define        CHECK_SPL() do { \
                    266:        if ((getsr() & PSL_IPL) < PSL_IPL4) \
                    267:                panic("pmap: bad spl, line %d", __LINE__); \
                    268: } while (0)
                    269: #else  /* PMAP_DEBUG */
                    270: #define        CHECK_SPL() (void)0
                    271: #endif /* PMAP_DEBUG */
                    272:
1.38      gwr       273:
1.2       glass     274: /*
1.78      gwr       275:  * PV support.
                    276:  * (i.e. Find all virtual mappings of a physical page.)
1.5       glass     277:  */
                    278:
1.38      gwr       279: int pv_initialized = 0;
1.2       glass     280:
1.84      gwr       281: /* One of these for each mapped virtual page. */
1.1       glass     282: struct pv_entry {
1.38      gwr       283:        struct pv_entry *pv_next;
                    284:        pmap_t         pv_pmap;
1.132     chs       285:        vaddr_t        pv_va;
1.1       glass     286: };
1.38      gwr       287: typedef struct pv_entry *pv_entry_t;
1.1       glass     288:
1.84      gwr       289: /* Table of PV list heads (per physical page). */
                    290: static struct pv_entry **pv_head_tbl;
                    291:
                    292: /* Free list of PV entries. */
                    293: static struct pv_entry *pv_free_list;
                    294:
                    295: /* Table of flags (per physical page). */
                    296: static u_char *pv_flags_tbl;
1.1       glass     297:
1.38      gwr       298: /* These are as in the MMU but shifted by PV_SHIFT. */
                    299: #define PV_SHIFT       24
                    300: #define PV_VALID  0x80
                    301: #define PV_WRITE  0x40
                    302: #define PV_SYSTEM 0x20
                    303: #define PV_NC     0x10
                    304: #define PV_PERM   0xF0
                    305: #define PV_TYPE   0x0C
                    306: #define PV_REF    0x02
                    307: #define PV_MOD    0x01
                    308:
                    309:
                    310: /*
1.78      gwr       311:  * context structures, and queues
                    312:  */
                    313:
                    314: struct context_state {
                    315:        TAILQ_ENTRY(context_state) context_link;
                    316:        int            context_num;
                    317:        struct pmap   *context_upmap;
                    318: };
                    319: typedef struct context_state *context_t;
                    320:
1.83      gwr       321: #define INVALID_CONTEXT -1     /* impossible value */
                    322: #define EMPTY_CONTEXT 0
                    323: #define FIRST_CONTEXT 1
                    324: #define        has_context(pmap)       ((pmap)->pm_ctxnum != EMPTY_CONTEXT)
1.78      gwr       325:
1.79      gwr       326: TAILQ_HEAD(context_tailq, context_state)
                    327:        context_free_queue, context_active_queue;
1.50      gwr       328:
1.78      gwr       329: static struct context_state context_array[NCONTEXT];
1.1       glass     330:
                    331:
1.38      gwr       332: /*
1.81      gwr       333:  * PMEG structures, queues, and macros
1.38      gwr       334:  */
                    335: #define PMEGQ_FREE     0
                    336: #define PMEGQ_INACTIVE 1
                    337: #define PMEGQ_ACTIVE   2
                    338: #define PMEGQ_KERNEL   3
                    339: #define PMEGQ_NONE     4
                    340:
                    341: struct pmeg_state {
                    342:        TAILQ_ENTRY(pmeg_state) pmeg_link;
                    343:        int            pmeg_index;
                    344:        pmap_t         pmeg_owner;
                    345:        int            pmeg_version;
1.132     chs       346:        vaddr_t        pmeg_va;
1.38      gwr       347:        int            pmeg_wired;
                    348:        int            pmeg_reserved;
                    349:        int            pmeg_vpages;
                    350:        int            pmeg_qstate;
                    351: };
                    352:
                    353: typedef struct pmeg_state *pmeg_t;
                    354:
                    355: #define PMEG_INVAL (NPMEG-1)
                    356: #define PMEG_NULL (pmeg_t) NULL
                    357:
                    358: /* XXX - Replace pmeg_kernel_queue with pmeg_wired_queue ? */
1.79      gwr       359: TAILQ_HEAD(pmeg_tailq, pmeg_state)
                    360:        pmeg_free_queue, pmeg_inactive_queue,
1.38      gwr       361:        pmeg_active_queue, pmeg_kernel_queue;
1.26      gwr       362:
                    363: static struct pmeg_state pmeg_array[NPMEG];
1.20      glass     364:
1.1       glass     365:
1.38      gwr       366: /*
                    367:  * prototypes
                    368:  */
1.145     chs       369: static int get_pte_pmeg(int, int);
                    370: static void set_pte_pmeg(int, int, int);
1.38      gwr       371:
1.145     chs       372: static void context_allocate(pmap_t);
                    373: static void context_free(pmap_t);
                    374: static void context_init(void);
                    375:
                    376: static void pmeg_init(void);
                    377: static void pmeg_reserve(int);
                    378:
                    379: static pmeg_t pmeg_allocate(pmap_t, vaddr_t);
                    380: static void pmeg_mon_init(vaddr_t, vaddr_t, int);
                    381: static void pmeg_release(pmeg_t);
                    382: static void pmeg_free(pmeg_t);
                    383: static pmeg_t pmeg_cache(pmap_t, vaddr_t);
                    384: static void pmeg_set_wiring(pmeg_t, vaddr_t, int);
                    385:
                    386: static int  pv_link  (pmap_t, int, vaddr_t);
                    387: static void pv_unlink(pmap_t, int, vaddr_t);
                    388: static void pv_remove_all(paddr_t);
                    389: static void pv_changepte(paddr_t, int, int);
                    390: static u_int pv_syncflags(pv_entry_t);
                    391: static void pv_init(void);
                    392:
                    393: static void pmeg_clean(pmeg_t);
                    394: static void pmeg_clean_free(void);
                    395:
                    396: static void pmap_common_init(pmap_t);
                    397: static void pmap_kernel_init(pmap_t);
                    398: static void pmap_user_init(pmap_t);
                    399: static void pmap_page_upload(void);
                    400:
                    401: static void pmap_enter_kernel(vaddr_t, int, boolean_t);
                    402: static void pmap_enter_user(pmap_t, vaddr_t, int, boolean_t);
                    403:
                    404: static void pmap_protect1(pmap_t, vaddr_t, vaddr_t);
                    405: static void pmap_protect_mmu(pmap_t, vaddr_t, vaddr_t);
                    406: static void pmap_protect_noctx(pmap_t, vaddr_t, vaddr_t);
                    407:
                    408: static void pmap_remove1(pmap_t, vaddr_t, vaddr_t);
                    409: static void pmap_remove_mmu(pmap_t, vaddr_t, vaddr_t);
                    410: static void pmap_remove_noctx(pmap_t, vaddr_t, vaddr_t);
1.1       glass     411:
1.145     chs       412: static int  pmap_fault_reload(struct pmap *, vaddr_t, int);
1.66      gwr       413:
1.99      gwr       414: /* Called only from locore.s and pmap.c */
1.145     chs       415: void   _pmap_switch(pmap_t);
1.99      gwr       416:
1.79      gwr       417: #ifdef PMAP_DEBUG
1.145     chs       418: void pmap_print(pmap_t);
                    419: void pv_print(struct vm_page *);
                    420: void pmeg_print(pmeg_t);
                    421: static void pmeg_verify_empty(vaddr_t);
1.79      gwr       422: #endif /* PMAP_DEBUG */
1.145     chs       423: void pmap_pinit(pmap_t);
                    424: void pmap_release(pmap_t);
1.79      gwr       425:
                    426: /*
                    427:  * Various in-line helper functions.
                    428:  */
                    429:
1.83      gwr       430: static inline pmap_t
1.145     chs       431: current_pmap(void)
1.83      gwr       432: {
                    433:        struct vmspace *vm;
1.130     chs       434:        struct vm_map *map;
1.83      gwr       435:        pmap_t  pmap;
                    436:
1.139     thorpej   437:        if (curlwp == NULL)
1.83      gwr       438:                pmap = kernel_pmap;
                    439:        else {
1.139     thorpej   440:                vm = curproc->p_vmspace;
1.83      gwr       441:                map = &vm->vm_map;
                    442:                pmap = vm_map_pmap(map);
                    443:        }
                    444:
                    445:        return (pmap);
                    446: }
                    447:
1.84      gwr       448: static inline struct pv_entry **
1.132     chs       449: pa_to_pvhead(paddr_t pa)
1.84      gwr       450: {
                    451:        int idx;
                    452:
                    453:        idx = PA_PGNUM(pa);
1.79      gwr       454: #ifdef DIAGNOSTIC
1.88      gwr       455:        if (PA_IS_DEV(pa) || (idx >= physmem))
1.84      gwr       456:                panic("pmap:pa_to_pvhead: bad pa=0x%lx", pa);
                    457: #endif
                    458:        return (&pv_head_tbl[idx]);
                    459: }
                    460:
                    461: static inline u_char *
1.132     chs       462: pa_to_pvflags(paddr_t pa)
1.79      gwr       463: {
1.84      gwr       464:        int idx;
                    465:
                    466:        idx = PA_PGNUM(pa);
                    467: #ifdef DIAGNOSTIC
1.88      gwr       468:        if (PA_IS_DEV(pa) || (idx >= physmem))
1.87      gwr       469:                panic("pmap:pa_to_pvflags: bad pa=0x%lx", pa);
1.84      gwr       470: #endif
                    471:        return (&pv_flags_tbl[idx]);
1.79      gwr       472: }
                    473:
1.132     chs       474: /*
                    475:  * Save the MOD bit from the given PTE using its PA
                    476:  */
                    477: static inline void
                    478: save_modref_bits(int pte)
                    479: {
                    480:        u_char *pv_flags;
                    481:
                    482:        pv_flags = pa_to_pvflags(PG_PA(pte));
                    483:        *pv_flags |= ((pte & PG_MODREF) >> PV_SHIFT);
                    484: }
                    485:
1.84      gwr       486: static inline pmeg_t
1.79      gwr       487: pmeg_p(int sme)
                    488: {
1.84      gwr       489: #ifdef DIAGNOSTIC
1.79      gwr       490:        if (sme < 0 || sme >= SEGINV)
                    491:                panic("pmeg_p: bad sme");
1.84      gwr       492: #endif
1.79      gwr       493:        return &pmeg_array[sme];
                    494: }
                    495:
                    496: #define is_pmeg_wired(pmegp) (pmegp->pmeg_wired != 0)
                    497:
1.145     chs       498: static void
                    499: pmeg_set_wiring(pmeg_t pmegp, vaddr_t va, int flag)
1.79      gwr       500: {
                    501:        int idx, mask;
                    502:
                    503:        idx = VA_PTE_NUM(va);
                    504:        mask = 1 << idx;
                    505:
                    506:        if (flag)
                    507:                pmegp->pmeg_wired |= mask;
                    508:        else
                    509:                pmegp->pmeg_wired &= ~mask;
                    510: }
                    511:
1.78      gwr       512: /****************************************************************
                    513:  * Context management functions.
1.26      gwr       514:  */
1.39      gwr       515:
1.80      gwr       516: /* part of pmap_bootstrap */
1.145     chs       517: static void
                    518: context_init(void)
1.78      gwr       519: {
                    520:        int i;
                    521:
                    522:        TAILQ_INIT(&context_free_queue);
                    523:        TAILQ_INIT(&context_active_queue);
1.26      gwr       524:
1.83      gwr       525:        /* Leave EMPTY_CONTEXT out of the free list. */
                    526:        context_array[0].context_upmap = kernel_pmap;
                    527:
                    528:        for (i = 1; i < NCONTEXT; i++) {
1.78      gwr       529:                context_array[i].context_num = i;
                    530:                context_array[i].context_upmap = NULL;
                    531:                TAILQ_INSERT_TAIL(&context_free_queue, &context_array[i],
1.145     chs       532:                                  context_link);
1.76      gwr       533: #ifdef PMAP_DEBUG
1.78      gwr       534:                if (pmap_debug & PMD_CONTEXT)
1.81      gwr       535:                        printf("context_init: sizeof(context_array[0])=%d\n",
1.145     chs       536:                               sizeof(context_array[0]));
1.78      gwr       537: #endif
                    538:        }
                    539: }
1.26      gwr       540:
1.80      gwr       541: /* Get us a context (steal one if necessary). */
1.145     chs       542: static void
                    543: context_allocate(pmap_t pmap)
1.38      gwr       544: {
                    545:        context_t context;
                    546:
1.80      gwr       547:        CHECK_SPL();
1.77      gwr       548:
1.80      gwr       549: #ifdef DIAGNOSTIC
1.50      gwr       550:        if (pmap == kernel_pmap)
                    551:                panic("context_allocate: kernel_pmap");
1.38      gwr       552:        if (has_context(pmap))
                    553:                panic("pmap: pmap already has context allocated to it");
1.80      gwr       554: #endif
                    555:
                    556:        context = TAILQ_FIRST(&context_free_queue);
1.65      gwr       557:        if (context == NULL) {
                    558:                /* Steal the head of the active queue. */
1.80      gwr       559:                context = TAILQ_FIRST(&context_active_queue);
1.65      gwr       560:                if (context == NULL)
                    561:                        panic("pmap: no contexts left?");
1.38      gwr       562: #ifdef PMAP_DEBUG
                    563:                if (pmap_debug & PMD_CONTEXT)
1.80      gwr       564:                        printf("context_allocate: steal ctx %d from pmap %p\n",
1.145     chs       565:                               context->context_num, context->context_upmap);
1.38      gwr       566: #endif
1.80      gwr       567:                context_free(context->context_upmap);
                    568:                context = TAILQ_FIRST(&context_free_queue);
1.38      gwr       569:        }
1.80      gwr       570:        TAILQ_REMOVE(&context_free_queue, context, context_link);
                    571:
1.132     chs       572: #ifdef DIAGNOSTIC
1.38      gwr       573:        if (context->context_upmap != NULL)
                    574:                panic("pmap: context in use???");
1.132     chs       575: #endif
1.80      gwr       576:
                    577:        context->context_upmap = pmap;
1.38      gwr       578:        pmap->pm_ctxnum = context->context_num;
1.80      gwr       579:
                    580:        TAILQ_INSERT_TAIL(&context_active_queue, context, context_link);
1.38      gwr       581:
                    582:        /*
                    583:         * We could reload the MMU here, but that would
                    584:         * artificially move PMEGs from the inactive queue
                    585:         * to the active queue, so do lazy reloading.
                    586:         * XXX - Need to reload wired pmegs though...
1.80      gwr       587:         * XXX: Verify the context it is empty?
1.38      gwr       588:         */
1.1       glass     589: }
1.5       glass     590:
1.80      gwr       591: /*
                    592:  * Unload the context and put it on the free queue.
                    593:  */
1.145     chs       594: static void
                    595: context_free(pmap_t pmap)
1.38      gwr       596: {
                    597:        int saved_ctxnum, ctxnum;
1.80      gwr       598:        int i, sme;
1.38      gwr       599:        context_t contextp;
1.132     chs       600:        vaddr_t va;
1.38      gwr       601:
1.80      gwr       602:        CHECK_SPL();
1.1       glass     603:
1.38      gwr       604:        ctxnum = pmap->pm_ctxnum;
1.83      gwr       605:        if (ctxnum < FIRST_CONTEXT || ctxnum >= NCONTEXT)
1.80      gwr       606:                panic("pmap: context_free ctxnum");
1.38      gwr       607:        contextp = &context_array[ctxnum];
                    608:
1.80      gwr       609:        /* Temporary context change. */
1.38      gwr       610:        saved_ctxnum = get_context();
                    611:        set_context(ctxnum);
                    612:
1.50      gwr       613:        /* Before unloading translations, flush cache. */
                    614: #ifdef HAVECACHE
                    615:        if (cache_size)
                    616:                cache_flush_context();
                    617: #endif
                    618:
1.38      gwr       619:        /* Unload MMU (but keep in SW segmap). */
1.145     chs       620:        for (i = 0, va = 0; i < NUSEG; i++, va += NBSG) {
1.80      gwr       621:
                    622: #if !defined(PMAP_DEBUG)
                    623:                /* Short-cut using the S/W segmap (if !debug). */
                    624:                if (pmap->pm_segmap[i] == SEGINV)
                    625:                        continue;
                    626: #endif
                    627:
                    628:                /* Check the H/W segmap. */
                    629:                sme = get_segmap(va);
                    630:                if (sme == SEGINV)
                    631:                        continue;
                    632:
                    633:                /* Found valid PMEG in the segmap. */
1.38      gwr       634: #ifdef PMAP_DEBUG
1.80      gwr       635:                if (pmap_debug & PMD_SEGMAP)
1.145     chs       636:                        printf("pmap: set_segmap ctx=%d v=0x%lx old=0x%x "
                    637:                               "new=ff (cf)\n", ctxnum, va, sme);
1.81      gwr       638: #endif
                    639: #ifdef DIAGNOSTIC
1.80      gwr       640:                if (sme != pmap->pm_segmap[i])
                    641:                        panic("context_free: unknown sme at va=0x%lx", va);
                    642: #endif
                    643:                /* Did cache flush above (whole context). */
                    644:                set_segmap(va, SEGINV);
                    645:                /* In this case, do not clear pm_segmap. */
1.83      gwr       646:                /* XXX: Maybe inline this call? */
1.80      gwr       647:                pmeg_release(pmeg_p(sme));
1.38      gwr       648:        }
1.80      gwr       649:
                    650:        /* Restore previous context. */
1.38      gwr       651:        set_context(saved_ctxnum);
1.80      gwr       652:
                    653:        /* Dequeue, update, requeue. */
                    654:        TAILQ_REMOVE(&context_active_queue, contextp, context_link);
1.83      gwr       655:        pmap->pm_ctxnum = EMPTY_CONTEXT;
1.38      gwr       656:        contextp->context_upmap = NULL;
1.78      gwr       657:        TAILQ_INSERT_TAIL(&context_free_queue, contextp, context_link);
1.1       glass     658: }
                    659:
1.78      gwr       660:
                    661: /****************************************************************
                    662:  * PMEG management functions.
                    663:  */
                    664:
1.145     chs       665: static void
                    666: pmeg_init(void)
1.26      gwr       667: {
1.78      gwr       668:        int x;
                    669:
                    670:        /* clear pmeg array, put it all on the free pmeq queue */
1.38      gwr       671:
1.78      gwr       672:        TAILQ_INIT(&pmeg_free_queue);
                    673:        TAILQ_INIT(&pmeg_inactive_queue);
                    674:        TAILQ_INIT(&pmeg_active_queue);
                    675:        TAILQ_INIT(&pmeg_kernel_queue);
1.38      gwr       676:
1.133     tsutsui   677:        memset(pmeg_array, 0, NPMEG*sizeof(struct pmeg_state));
1.145     chs       678:        for (x = 0; x < NPMEG; x++) {
                    679:                TAILQ_INSERT_TAIL(&pmeg_free_queue, &pmeg_array[x], pmeg_link);
1.78      gwr       680:                pmeg_array[x].pmeg_qstate = PMEGQ_FREE;
                    681:                pmeg_array[x].pmeg_index = x;
1.38      gwr       682:        }
                    683:
1.78      gwr       684:        /* The last pmeg is not usable. */
                    685:        pmeg_reserve(SEGINV);
1.26      gwr       686: }
                    687:
1.38      gwr       688: /*
                    689:  * Reserve a pmeg (forever) for use by PROM, etc.
                    690:  * Contents are left as-is.  Called very early...
                    691:  */
1.145     chs       692: void
                    693: pmeg_reserve(int sme)
1.1       glass     694: {
1.38      gwr       695:        pmeg_t pmegp;
1.1       glass     696:
1.38      gwr       697:        /* Can not use pmeg_p() because it fails on SEGINV. */
                    698:        pmegp = &pmeg_array[sme];
1.26      gwr       699:
1.67      gwr       700:        if (pmegp->pmeg_reserved) {
1.76      gwr       701:                mon_printf("pmeg_reserve: already reserved\n");
1.67      gwr       702:                sunmon_abort();
                    703:        }
                    704:        if (pmegp->pmeg_owner) {
1.76      gwr       705:                mon_printf("pmeg_reserve: already owned\n");
1.67      gwr       706:                sunmon_abort();
                    707:        }
1.38      gwr       708:
1.78      gwr       709:        /* Owned by kernel, but not really usable... */
1.56      gwr       710:        pmegp->pmeg_owner = kernel_pmap;
1.38      gwr       711:        pmegp->pmeg_reserved++; /* keep count, just in case */
                    712:        TAILQ_REMOVE(&pmeg_free_queue, pmegp, pmeg_link);
                    713:        pmegp->pmeg_qstate = PMEGQ_NONE;
1.1       glass     714: }
                    715:
1.75      gwr       716: /*
                    717:  * Examine PMEGs used by the monitor, and either
                    718:  * reserve them (keep=1) or clear them (keep=0)
                    719:  */
1.145     chs       720: static void
                    721: pmeg_mon_init(vaddr_t sva, vaddr_t eva, int keep)
1.75      gwr       722: {
1.132     chs       723:        vaddr_t pgva, endseg;
1.75      gwr       724:        int pte, valid;
                    725:        unsigned char sme;
                    726:
1.94      gwr       727: #ifdef PMAP_DEBUG
                    728:        if (pmap_debug & PMD_SEGMAP)
                    729:                mon_printf("pmeg_mon_init(0x%x, 0x%x, %d)\n",
1.145     chs       730:                           sva, eva, keep);
1.94      gwr       731: #endif
                    732:
1.145     chs       733:        sva &= ~(NBSG - 1);
1.75      gwr       734:
                    735:        while (sva < eva) {
                    736:                sme = get_segmap(sva);
                    737:                if (sme != SEGINV) {
                    738:                        valid = 0;
                    739:                        endseg = sva + NBSG;
1.140     thorpej   740:                        for (pgva = sva; pgva < endseg; pgva += PAGE_SIZE) {
1.75      gwr       741:                                pte = get_pte(pgva);
                    742:                                if (pte & PG_VALID) {
                    743:                                        valid++;
                    744:                                }
                    745:                        }
1.94      gwr       746: #ifdef PMAP_DEBUG
                    747:                        if (pmap_debug & PMD_SEGMAP)
                    748:                                mon_printf(" sva=0x%x seg=0x%x valid=%d\n",
1.145     chs       749:                                           sva, sme, valid);
1.94      gwr       750: #endif
1.75      gwr       751:                        if (keep && valid)
1.76      gwr       752:                                pmeg_reserve(sme);
1.145     chs       753:                        else
                    754:                                set_segmap(sva, SEGINV);
1.75      gwr       755:                }
                    756:                sva += NBSG;
                    757:        }
                    758: }
                    759:
1.81      gwr       760: /*
                    761:  * This is used only during pmap_bootstrap, so we can
                    762:  * get away with borrowing a slot in the segmap.
                    763:  */
1.145     chs       764: static void
                    765: pmeg_clean(pmeg_t pmegp)
1.7       glass     766: {
1.81      gwr       767:        int sme;
1.132     chs       768:        vaddr_t va;
1.81      gwr       769:
                    770:        sme = get_segmap(0);
                    771:        if (sme != SEGINV)
                    772:                panic("pmeg_clean");
                    773:
                    774:        sme = pmegp->pmeg_index;
                    775:        set_segmap(0, sme);
                    776:
1.140     thorpej   777:        for (va = 0; va < NBSG; va += PAGE_SIZE)
1.81      gwr       778:                set_pte(va, PG_INVAL);
1.38      gwr       779:
1.81      gwr       780:        set_segmap(0, SEGINV);
1.7       glass     781: }
                    782:
                    783: /*
                    784:  * This routine makes sure that pmegs on the pmeg_free_queue contain
                    785:  * no valid ptes.  It pulls things off the queue, cleans them, and
1.80      gwr       786:  * puts them at the end.  The ending condition is finding the first
                    787:  * queue element at the head of the queue again.
1.7       glass     788:  */
1.145     chs       789: static void
                    790: pmeg_clean_free(void)
1.7       glass     791: {
1.38      gwr       792:        pmeg_t pmegp, pmegp_first;
1.7       glass     793:
1.80      gwr       794:        pmegp = TAILQ_FIRST(&pmeg_free_queue);
                    795:        if (pmegp == NULL)
1.38      gwr       796:                panic("pmap: no free pmegs available to clean");
1.26      gwr       797:
1.38      gwr       798:        pmegp_first = NULL;
1.26      gwr       799:
1.38      gwr       800:        for (;;) {
1.80      gwr       801:                pmegp = TAILQ_FIRST(&pmeg_free_queue);
                    802:                TAILQ_REMOVE(&pmeg_free_queue, pmegp, pmeg_link);
1.26      gwr       803:
1.38      gwr       804:                pmegp->pmeg_qstate = PMEGQ_NONE;
                    805:                pmeg_clean(pmegp);
1.80      gwr       806:                pmegp->pmeg_qstate = PMEGQ_FREE;
1.26      gwr       807:
1.38      gwr       808:                TAILQ_INSERT_TAIL(&pmeg_free_queue, pmegp, pmeg_link);
1.26      gwr       809:
1.38      gwr       810:                if (pmegp == pmegp_first)
                    811:                        break;
                    812:                if (pmegp_first == NULL)
                    813:                        pmegp_first = pmegp;
1.1       glass     814:        }
                    815: }
                    816:
1.38      gwr       817: /*
1.46      gwr       818:  * Allocate a PMEG by whatever means necessary.
                    819:  * (May invalidate some mappings!)
1.38      gwr       820:  */
1.145     chs       821: static pmeg_t
                    822: pmeg_allocate(pmap_t pmap, vaddr_t va)
1.1       glass     823: {
1.38      gwr       824:        pmeg_t pmegp;
                    825:
                    826:        CHECK_SPL();
1.1       glass     827:
1.39      gwr       828: #ifdef DIAGNOSTIC
                    829:        if (va & SEGOFSET) {
1.73      fair      830:                panic("pmap:pmeg_allocate: va=0x%lx", va);
1.39      gwr       831:        }
                    832: #endif
                    833:
1.38      gwr       834:        /* Get one onto the free list if necessary. */
1.80      gwr       835:        pmegp = TAILQ_FIRST(&pmeg_free_queue);
1.38      gwr       836:        if (!pmegp) {
                    837:                /* Try inactive queue... */
1.80      gwr       838:                pmegp = TAILQ_FIRST(&pmeg_inactive_queue);
1.38      gwr       839:                if (!pmegp) {
                    840:                        /* Try active queue... */
1.80      gwr       841:                        pmegp = TAILQ_FIRST(&pmeg_active_queue);
1.38      gwr       842:                }
                    843:                if (!pmegp) {
                    844:                        panic("pmeg_allocate: failed");
                    845:                }
1.145     chs       846:
1.40      gwr       847:                /*
                    848:                 * Remove mappings to free-up a pmeg
                    849:                 * (so it will go onto the free list).
1.46      gwr       850:                 * XXX - Skip this one if it is wired?
1.40      gwr       851:                 */
1.76      gwr       852:                pmap_remove1(pmegp->pmeg_owner,
1.145     chs       853:                             pmegp->pmeg_va,
                    854:                             pmegp->pmeg_va + NBSG);
1.38      gwr       855:        }
                    856:
                    857:        /* OK, free list has something for us to take. */
1.80      gwr       858:        pmegp = TAILQ_FIRST(&pmeg_free_queue);
                    859: #ifdef DIAGNOSTIC
                    860:        if (pmegp == NULL)
1.38      gwr       861:                panic("pmeg_allocagte: still none free?");
1.80      gwr       862:        if ((pmegp->pmeg_qstate != PMEGQ_FREE) ||
1.145     chs       863:            (pmegp->pmeg_index == SEGINV) ||
                    864:            (pmegp->pmeg_vpages))
1.80      gwr       865:                panic("pmeg_allocate: bad pmegp=%p", pmegp);
1.26      gwr       866: #endif
                    867: #ifdef PMAP_DEBUG
1.38      gwr       868:        if (pmegp->pmeg_index == pmap_db_watchpmeg) {
1.81      gwr       869:                db_printf("pmeg_allocate: watch pmegp=%p\n", pmegp);
1.38      gwr       870:                Debugger();
                    871:        }
1.26      gwr       872: #endif
1.80      gwr       873:
                    874:        TAILQ_REMOVE(&pmeg_free_queue, pmegp, pmeg_link);
1.38      gwr       875:
                    876:        /* Reassign this PMEG for the caller. */
                    877:        pmegp->pmeg_owner = pmap;
                    878:        pmegp->pmeg_version = pmap->pm_version;
                    879:        pmegp->pmeg_va = va;
                    880:        pmegp->pmeg_wired = 0;
                    881:        pmegp->pmeg_reserved  = 0;
                    882:        pmegp->pmeg_vpages  = 0;
1.50      gwr       883:        if (pmap == kernel_pmap) {
1.38      gwr       884:                TAILQ_INSERT_TAIL(&pmeg_kernel_queue, pmegp, pmeg_link);
                    885:                pmegp->pmeg_qstate = PMEGQ_KERNEL;
                    886:        } else {
                    887:                TAILQ_INSERT_TAIL(&pmeg_active_queue, pmegp, pmeg_link);
                    888:                pmegp->pmeg_qstate = PMEGQ_ACTIVE;
1.30      gwr       889:        }
1.38      gwr       890:        /* Caller will verify that it's empty (if debugging). */
                    891:        return pmegp;
1.1       glass     892: }
1.7       glass     893:
1.28      gwr       894: /*
1.38      gwr       895:  * Put pmeg on the inactive queue, leaving its contents intact.
                    896:  * This happens when we loose our context.  We may reclaim
                    897:  * this pmeg later if it is still in the inactive queue.
1.28      gwr       898:  */
1.145     chs       899: static void
                    900: pmeg_release(pmeg_t pmegp)
1.1       glass     901: {
1.78      gwr       902:
1.38      gwr       903:        CHECK_SPL();
1.29      gwr       904:
1.38      gwr       905: #ifdef DIAGNOSTIC
1.80      gwr       906:        if ((pmegp->pmeg_owner == kernel_pmap) ||
1.145     chs       907:            (pmegp->pmeg_qstate != PMEGQ_ACTIVE))
1.80      gwr       908:                panic("pmeg_release: bad pmeg=%p", pmegp);
1.38      gwr       909: #endif
                    910:
1.26      gwr       911:        TAILQ_REMOVE(&pmeg_active_queue, pmegp, pmeg_link);
1.38      gwr       912:        pmegp->pmeg_qstate = PMEGQ_INACTIVE;
1.29      gwr       913:        TAILQ_INSERT_TAIL(&pmeg_inactive_queue, pmegp, pmeg_link);
1.1       glass     914: }
1.7       glass     915:
1.26      gwr       916: /*
1.38      gwr       917:  * Move the pmeg to the free queue from wherever it is.
1.50      gwr       918:  * The pmeg will be clean.  It might be in kernel_pmap.
1.26      gwr       919:  */
1.145     chs       920: static void
                    921: pmeg_free(pmeg_t pmegp)
1.38      gwr       922: {
1.78      gwr       923:
1.38      gwr       924:        CHECK_SPL();
                    925:
1.80      gwr       926: #ifdef DIAGNOSTIC
                    927:        /* Caller should verify that it's empty. */
1.38      gwr       928:        if (pmegp->pmeg_vpages != 0)
                    929:                panic("pmeg_free: vpages");
                    930: #endif
                    931:
                    932:        switch (pmegp->pmeg_qstate) {
                    933:        case PMEGQ_ACTIVE:
                    934:                TAILQ_REMOVE(&pmeg_active_queue, pmegp, pmeg_link);
                    935:                break;
                    936:        case PMEGQ_INACTIVE:
                    937:                TAILQ_REMOVE(&pmeg_inactive_queue, pmegp, pmeg_link);
                    938:                break;
                    939:        case PMEGQ_KERNEL:
                    940:                TAILQ_REMOVE(&pmeg_kernel_queue, pmegp, pmeg_link);
                    941:                break;
                    942:        default:
                    943:                panic("pmeg_free: releasing bad pmeg");
                    944:                break;
                    945:        }
                    946:
1.28      gwr       947: #ifdef PMAP_DEBUG
1.38      gwr       948:        if (pmegp->pmeg_index == pmap_db_watchpmeg) {
1.81      gwr       949:                db_printf("pmeg_free: watch pmeg 0x%x\n",
1.145     chs       950:                          pmegp->pmeg_index);
1.38      gwr       951:                Debugger();
                    952:        }
1.28      gwr       953: #endif
                    954:
1.38      gwr       955:        pmegp->pmeg_owner = NULL;
                    956:        pmegp->pmeg_qstate = PMEGQ_FREE;
                    957:        TAILQ_INSERT_TAIL(&pmeg_free_queue, pmegp, pmeg_link);
                    958: }
                    959:
                    960: /*
                    961:  * Find a PMEG that was put on the inactive queue when we
                    962:  * had our context stolen.  If found, move to active queue.
                    963:  */
1.145     chs       964: static pmeg_t
                    965: pmeg_cache(pmap_t pmap, vaddr_t va)
1.38      gwr       966: {
1.39      gwr       967:        int sme, segnum;
1.38      gwr       968:        pmeg_t pmegp;
                    969:
                    970:        CHECK_SPL();
1.26      gwr       971:
1.80      gwr       972: #ifdef DIAGNOSTIC
1.50      gwr       973:        if (pmap == kernel_pmap)
                    974:                panic("pmeg_cache: kernel_pmap");
1.39      gwr       975:        if (va & SEGOFSET) {
1.73      fair      976:                panic("pmap:pmeg_cache: va=0x%lx", va);
1.39      gwr       977:        }
                    978: #endif
                    979:
1.38      gwr       980:        if (pmap->pm_segmap == NULL)
                    981:                return PMEG_NULL;
1.80      gwr       982:
1.38      gwr       983:        segnum = VA_SEGNUM(va);
                    984:        if (segnum > NUSEG)             /* out of range */
                    985:                return PMEG_NULL;
1.80      gwr       986:
1.39      gwr       987:        sme = pmap->pm_segmap[segnum];
                    988:        if (sme == SEGINV)      /* nothing cached */
1.38      gwr       989:                return PMEG_NULL;
                    990:
1.39      gwr       991:        pmegp = pmeg_p(sme);
1.38      gwr       992:
1.30      gwr       993: #ifdef PMAP_DEBUG
1.38      gwr       994:        if (pmegp->pmeg_index == pmap_db_watchpmeg) {
1.81      gwr       995:                db_printf("pmeg_cache: watch pmeg 0x%x\n", pmegp->pmeg_index);
1.38      gwr       996:                Debugger();
1.30      gwr       997:        }
                    998: #endif
1.38      gwr       999:
                   1000:        /*
                   1001:         * Our segmap named a PMEG.  If it is no longer ours,
                   1002:         * invalidate that entry in our segmap and return NULL.
                   1003:         */
                   1004:        if ((pmegp->pmeg_owner != pmap) ||
1.145     chs      1005:            (pmegp->pmeg_version != pmap->pm_version) ||
                   1006:            (pmegp->pmeg_va != va))
1.38      gwr      1007:        {
1.30      gwr      1008: #ifdef PMAP_DEBUG
1.81      gwr      1009:                db_printf("pmap:pmeg_cache: invalid pmeg: sme=0x%x\n", sme);
1.39      gwr      1010:                pmeg_print(pmegp);
1.78      gwr      1011:                Debugger();
1.38      gwr      1012: #endif
                   1013:                pmap->pm_segmap[segnum] = SEGINV;
                   1014:                return PMEG_NULL; /* cache lookup failed */
1.30      gwr      1015:        }
1.38      gwr      1016:
1.80      gwr      1017: #ifdef DIAGNOSTIC
1.38      gwr      1018:        /* Make sure it is on the inactive queue. */
                   1019:        if (pmegp->pmeg_qstate != PMEGQ_INACTIVE)
1.80      gwr      1020:                panic("pmeg_cache: pmeg was taken: %p", pmegp);
1.30      gwr      1021: #endif
1.26      gwr      1022:
1.38      gwr      1023:        TAILQ_REMOVE(&pmeg_inactive_queue, pmegp, pmeg_link);
                   1024:        pmegp->pmeg_qstate = PMEGQ_ACTIVE;
                   1025:        TAILQ_INSERT_TAIL(&pmeg_active_queue, pmegp, pmeg_link);
1.30      gwr      1026:
1.38      gwr      1027:        return pmegp;
                   1028: }
1.26      gwr      1029:
1.78      gwr      1030: #ifdef PMAP_DEBUG
1.145     chs      1031: static void
                   1032: pmeg_verify_empty(vaddr_t va)
1.38      gwr      1033: {
1.132     chs      1034:        vaddr_t eva;
1.78      gwr      1035:        int pte;
1.29      gwr      1036:
1.140     thorpej  1037:        for (eva = va + NBSG;  va < eva; va += PAGE_SIZE) {
1.78      gwr      1038:                pte = get_pte(va);
                   1039:                if (pte & PG_VALID)
                   1040:                        panic("pmeg_verify_empty");
                   1041:        }
                   1042: }
                   1043: #endif /* PMAP_DEBUG */
1.1       glass    1044:
1.26      gwr      1045:
1.78      gwr      1046: /****************************************************************
                   1047:  * Physical-to-virutal lookup support
1.84      gwr      1048:  *
                   1049:  * Need memory for the pv_alloc/pv_free list heads
                   1050:  * and elements.  We know how many to allocate since
                   1051:  * there is one list head for each physical page, and
                   1052:  * at most one element for each PMEG slot.
1.78      gwr      1053:  */
1.145     chs      1054: static void
                   1055: pv_init(void)
1.38      gwr      1056: {
1.84      gwr      1057:        int npp, nvp, sz;
                   1058:        pv_entry_t pv;
                   1059:        char *p;
                   1060:
                   1061:        /* total allocation size */
                   1062:        sz = 0;
                   1063:
                   1064:        /*
                   1065:         * Data for each physical page.
                   1066:         * Each "mod/ref" flag is a char.
                   1067:         * Each PV head is a pointer.
                   1068:         * Note physmem is in pages.
                   1069:         */
                   1070:        npp = ALIGN(physmem);
                   1071:        sz += (npp * sizeof(*pv_flags_tbl));
                   1072:        sz += (npp * sizeof(*pv_head_tbl));
                   1073:
                   1074:        /*
                   1075:         * Data for each virtual page (all PMEGs).
                   1076:         * One pv_entry for each page frame.
                   1077:         */
                   1078:        nvp = NPMEG * NPAGSEG;
                   1079:        sz += (nvp * sizeof(*pv_free_list));
1.38      gwr      1080:
1.84      gwr      1081:        /* Now allocate the whole thing. */
                   1082:        sz = m68k_round_page(sz);
1.146     yamt     1083:        p = (char *)uvm_km_alloc(kernel_map, sz, 0, UVM_KMF_WIRED);
1.84      gwr      1084:        if (p == NULL)
                   1085:                panic("pmap:pv_init: alloc failed");
1.133     tsutsui  1086:        memset(p, 0, sz);
1.29      gwr      1087:
1.84      gwr      1088:        /* Now divide up the space. */
                   1089:        pv_flags_tbl = (void *) p;
                   1090:        p += (npp * sizeof(*pv_flags_tbl));
                   1091:        pv_head_tbl = (void*) p;
                   1092:        p += (npp * sizeof(*pv_head_tbl));
1.132     chs      1093:        pv_free_list = (void *)p;
1.84      gwr      1094:        p += (nvp * sizeof(*pv_free_list));
                   1095:
                   1096:        /* Finally, make pv_free_list into a list. */
1.132     chs      1097:        for (pv = pv_free_list; (char *)pv < p; pv++)
1.84      gwr      1098:                pv->pv_next = &pv[1];
                   1099:        pv[-1].pv_next = 0;
1.78      gwr      1100:
                   1101:        pv_initialized++;
1.1       glass    1102: }
                   1103:
1.38      gwr      1104: /*
                   1105:  * Set or clear bits in all PTEs mapping a page.
                   1106:  * Also does syncflags work while we are there...
                   1107:  */
1.145     chs      1108: static void
                   1109: pv_changepte(paddr_t pa, int set_bits, int clear_bits)
1.38      gwr      1110: {
1.84      gwr      1111:        pv_entry_t *head, pv;
                   1112:        u_char *pv_flags;
1.38      gwr      1113:        pmap_t pmap;
1.132     chs      1114:        vaddr_t va;
1.80      gwr      1115:        int pte, sme;
1.38      gwr      1116:        int saved_ctx;
                   1117:        boolean_t in_ctx;
1.80      gwr      1118:        u_int flags;
                   1119:
1.84      gwr      1120:        pv_flags = pa_to_pvflags(pa);
                   1121:        head     = pa_to_pvhead(pa);
                   1122:
1.80      gwr      1123:        /* If no mappings, no work to do. */
1.84      gwr      1124:        if (*head == NULL)
1.38      gwr      1125:                return;
1.80      gwr      1126:
1.50      gwr      1127: #ifdef DIAGNOSTIC
                   1128:        /* This function should only clear these bits: */
                   1129:        if (clear_bits & ~(PG_WRITE | PG_NC | PG_REF | PG_MOD))
1.137     provos   1130:                panic("pv_changepte: clear=0x%x", clear_bits);
1.50      gwr      1131: #endif
1.38      gwr      1132:
1.80      gwr      1133:        flags = 0;
1.38      gwr      1134:        saved_ctx = get_context();
1.84      gwr      1135:        for (pv = *head; pv != NULL; pv = pv->pv_next) {
1.38      gwr      1136:                pmap = pv->pv_pmap;
                   1137:                va = pv->pv_va;
1.65      gwr      1138:
1.38      gwr      1139: #ifdef DIAGNOSTIC
1.84      gwr      1140:                if (pmap->pm_segmap == NULL)
                   1141:                        panic("pv_changepte: null segmap");
1.38      gwr      1142: #endif
                   1143:
1.131     wiz      1144:                /* Is the PTE currently accessible in some context? */
1.38      gwr      1145:                in_ctx = FALSE;
1.107     gwr      1146:                sme = SEGINV;   /* kill warning */
1.50      gwr      1147:                if (pmap == kernel_pmap)
1.38      gwr      1148:                        in_ctx = TRUE;
                   1149:                else if (has_context(pmap)) {
                   1150:                        /* PMEG may be inactive. */
                   1151:                        set_context(pmap->pm_ctxnum);
                   1152:                        sme = get_segmap(va);
                   1153:                        if (sme != SEGINV)
                   1154:                                in_ctx = TRUE;
                   1155:                }
                   1156:
                   1157:                if (in_ctx == TRUE) {
                   1158:                        /*
                   1159:                         * The PTE is in the current context.
1.52      gwr      1160:                         * Make sure PTE is up-to-date with VAC.
1.38      gwr      1161:                         */
1.52      gwr      1162: #ifdef HAVECACHE
                   1163:                        if (cache_size)
                   1164:                                cache_flush_page(va);
                   1165: #endif
1.38      gwr      1166:                        pte = get_pte(va);
                   1167:                } else {
1.132     chs      1168:
1.38      gwr      1169:                        /*
                   1170:                         * The PTE is not in any context.
                   1171:                         */
1.132     chs      1172:
1.38      gwr      1173:                        sme = pmap->pm_segmap[VA_SEGNUM(va)];
1.132     chs      1174: #ifdef DIAGNOSTIC
1.38      gwr      1175:                        if (sme == SEGINV)
                   1176:                                panic("pv_changepte: SEGINV");
1.132     chs      1177: #endif
1.38      gwr      1178:                        pte = get_pte_pmeg(sme, VA_PTE_NUM(va));
                   1179:                }
1.1       glass    1180:
1.38      gwr      1181: #ifdef DIAGNOSTIC
1.92      gwr      1182:                /* PV entries point only to valid mappings. */
1.38      gwr      1183:                if ((pte & PG_VALID) == 0)
1.137     provos   1184:                        panic("pv_changepte: not PG_VALID at va=0x%lx", va);
1.38      gwr      1185: #endif
                   1186:                /* Get these while it's easy. */
                   1187:                if (pte & PG_MODREF) {
1.80      gwr      1188:                        flags |= (pte & PG_MODREF);
1.38      gwr      1189:                        pte &= ~PG_MODREF;
                   1190:                }
                   1191:
                   1192:                /* Finally, set and clear some bits. */
                   1193:                pte |= set_bits;
                   1194:                pte &= ~clear_bits;
                   1195:
                   1196:                if (in_ctx == TRUE) {
1.52      gwr      1197:                        /* Did cache flush above. */
1.38      gwr      1198:                        set_pte(va, pte);
                   1199:                } else {
                   1200:                        set_pte_pmeg(sme, VA_PTE_NUM(va), pte);
                   1201:                }
                   1202:        }
1.80      gwr      1203:        set_context(saved_ctx);
1.1       glass    1204:
1.84      gwr      1205:        *pv_flags |= (flags >> PV_SHIFT);
1.38      gwr      1206: }
1.1       glass    1207:
1.38      gwr      1208: /*
1.84      gwr      1209:  * Return ref and mod bits from pvlist,
                   1210:  * and turns off same in hardware PTEs.
1.38      gwr      1211:  */
1.145     chs      1212: static u_int
                   1213: pv_syncflags(pv_entry_t pv)
1.38      gwr      1214: {
                   1215:        pmap_t pmap;
1.132     chs      1216:        vaddr_t va;
1.80      gwr      1217:        int pte, sme;
1.38      gwr      1218:        int saved_ctx;
                   1219:        boolean_t in_ctx;
1.80      gwr      1220:        u_int flags;
                   1221:
                   1222:        /* If no mappings, no work to do. */
1.84      gwr      1223:        if (pv == NULL)
                   1224:                return (0);
1.38      gwr      1225:
1.80      gwr      1226:        flags = 0;
1.38      gwr      1227:        saved_ctx = get_context();
1.132     chs      1228:        for (; pv != NULL; pv = pv->pv_next) {
1.38      gwr      1229:                pmap = pv->pv_pmap;
                   1230:                va = pv->pv_va;
1.132     chs      1231:                sme = SEGINV;
1.65      gwr      1232:
1.38      gwr      1233: #ifdef DIAGNOSTIC
                   1234:                /*
                   1235:                 * Only the head may have a null pmap, and
                   1236:                 * we checked for that above.
                   1237:                 */
1.84      gwr      1238:                if (pmap->pm_segmap == NULL)
                   1239:                        panic("pv_syncflags: null segmap");
1.38      gwr      1240: #endif
                   1241:
1.131     wiz      1242:                /* Is the PTE currently accessible in some context? */
1.38      gwr      1243:                in_ctx = FALSE;
1.50      gwr      1244:                if (pmap == kernel_pmap)
1.38      gwr      1245:                        in_ctx = TRUE;
                   1246:                else if (has_context(pmap)) {
                   1247:                        /* PMEG may be inactive. */
                   1248:                        set_context(pmap->pm_ctxnum);
                   1249:                        sme = get_segmap(va);
                   1250:                        if (sme != SEGINV)
                   1251:                                in_ctx = TRUE;
                   1252:                }
                   1253:
                   1254:                if (in_ctx == TRUE) {
1.132     chs      1255:
1.38      gwr      1256:                        /*
                   1257:                         * The PTE is in the current context.
1.52      gwr      1258:                         * Make sure PTE is up-to-date with VAC.
1.38      gwr      1259:                         */
1.132     chs      1260:
1.52      gwr      1261: #ifdef HAVECACHE
                   1262:                        if (cache_size)
                   1263:                                cache_flush_page(va);
                   1264: #endif
1.38      gwr      1265:                        pte = get_pte(va);
                   1266:                } else {
1.132     chs      1267:
1.38      gwr      1268:                        /*
                   1269:                         * The PTE is not in any context.
                   1270:                         */
1.132     chs      1271:
1.38      gwr      1272:                        sme = pmap->pm_segmap[VA_SEGNUM(va)];
1.132     chs      1273: #ifdef DIAGNOSTIC
1.38      gwr      1274:                        if (sme == SEGINV)
                   1275:                                panic("pv_syncflags: SEGINV");
1.132     chs      1276: #endif
1.38      gwr      1277:                        pte = get_pte_pmeg(sme, VA_PTE_NUM(va));
                   1278:                }
1.29      gwr      1279:
1.38      gwr      1280: #ifdef DIAGNOSTIC
1.92      gwr      1281:                /* PV entries point only to valid mappings. */
1.38      gwr      1282:                if ((pte & PG_VALID) == 0)
1.137     provos   1283:                        panic("pv_syncflags: not PG_VALID at va=0x%lx", va);
1.38      gwr      1284: #endif
                   1285:                /* OK, do what we came here for... */
                   1286:                if (pte & PG_MODREF) {
1.80      gwr      1287:                        flags |= (pte & PG_MODREF);
1.38      gwr      1288:                        pte &= ~PG_MODREF;
                   1289:                }
                   1290:
                   1291:                if (in_ctx == TRUE) {
1.52      gwr      1292:                        /* Did cache flush above. */
1.38      gwr      1293:                        set_pte(va, pte);
                   1294:                } else {
                   1295:                        set_pte_pmeg(sme, VA_PTE_NUM(va), pte);
                   1296:                }
                   1297:        }
                   1298:        set_context(saved_ctx);
1.19      glass    1299:
1.84      gwr      1300:        return (flags >> PV_SHIFT);
1.1       glass    1301: }
                   1302:
1.78      gwr      1303: /* Remove all mappings for the physical page. */
1.145     chs      1304: static void
                   1305: pv_remove_all(paddr_t pa)
1.38      gwr      1306: {
1.84      gwr      1307:        pv_entry_t *head, pv;
1.38      gwr      1308:        pmap_t pmap;
1.132     chs      1309:        vaddr_t va;
1.1       glass    1310:
1.78      gwr      1311:        CHECK_SPL();
                   1312:
1.19      glass    1313: #ifdef PMAP_DEBUG
1.38      gwr      1314:        if (pmap_debug & PMD_REMOVE)
1.73      fair     1315:                printf("pv_remove_all(0x%lx)\n", pa);
1.38      gwr      1316: #endif
1.78      gwr      1317:
1.84      gwr      1318:        head = pa_to_pvhead(pa);
                   1319:        while ((pv = *head) != NULL) {
1.38      gwr      1320:                pmap = pv->pv_pmap;
                   1321:                va   = pv->pv_va;
1.140     thorpej  1322:                pmap_remove1(pmap, va, va + PAGE_SIZE);
1.38      gwr      1323: #ifdef PMAP_DEBUG
                   1324:                /* Make sure it went away. */
1.84      gwr      1325:                if (pv == *head) {
1.145     chs      1326:                        db_printf("pv_remove_all: "
                   1327:                                  "head unchanged for pa=0x%lx\n", pa);
1.38      gwr      1328:                        Debugger();
                   1329:                }
                   1330: #endif
                   1331:        }
1.1       glass    1332: }
                   1333:
1.38      gwr      1334: /*
1.42      gwr      1335:  * The pmap system is asked to lookup all mappings that point to a
1.38      gwr      1336:  * given physical memory address.  This function adds a new element
                   1337:  * to the list of mappings maintained for the given physical address.
1.42      gwr      1338:  * Returns PV_NC if the (new) pvlist says that the address cannot
1.38      gwr      1339:  * be cached.
                   1340:  */
1.145     chs      1341: static int
                   1342: pv_link(pmap_t pmap, int pte, vaddr_t va)
1.38      gwr      1343: {
1.132     chs      1344:        paddr_t pa;
1.84      gwr      1345:        pv_entry_t *head, pv;
                   1346:        u_char *pv_flags;
1.92      gwr      1347:        int flags;
1.38      gwr      1348:
                   1349:        if (!pv_initialized)
                   1350:                return 0;
1.1       glass    1351:
1.80      gwr      1352:        CHECK_SPL();
                   1353:
1.92      gwr      1354:        /* Only the non-cached bit is of interest here. */
                   1355:        flags = (pte & PG_NC) ? PV_NC : 0;
                   1356:        pa = PG_PA(pte);
                   1357:
1.19      glass    1358: #ifdef PMAP_DEBUG
1.38      gwr      1359:        if ((pmap_debug & PMD_LINK) || (va == pmap_db_watchva)) {
1.92      gwr      1360:                printf("pv_link(%p, 0x%x, 0x%lx)\n", pmap, pte, va);
1.38      gwr      1361:                /* pv_print(pa); */
                   1362:        }
                   1363: #endif
1.1       glass    1364:
1.84      gwr      1365:        pv_flags = pa_to_pvflags(pa);
                   1366:        head     = pa_to_pvhead(pa);
1.2       glass    1367:
1.84      gwr      1368: #ifdef DIAGNOSTIC
                   1369:        /* See if this mapping is already in the list. */
                   1370:        for (pv = *head; pv != NULL; pv = pv->pv_next) {
1.81      gwr      1371:                if ((pv->pv_pmap == pmap) && (pv->pv_va == va))
1.73      fair     1372:                        panic("pv_link: duplicate entry for PA=0x%lx", pa);
1.38      gwr      1373:        }
1.19      glass    1374: #endif
1.132     chs      1375: #ifdef HAVECACHE
1.26      gwr      1376:
1.38      gwr      1377:        /*
1.84      gwr      1378:         * Does this new mapping cause VAC alias problems?
1.38      gwr      1379:         */
1.132     chs      1380:
1.84      gwr      1381:        *pv_flags |= flags;
                   1382:        if ((*pv_flags & PV_NC) == 0) {
                   1383:                for (pv = *head; pv != NULL; pv = pv->pv_next) {
1.81      gwr      1384:                        if (BADALIAS(va, pv->pv_va)) {
1.84      gwr      1385:                                *pv_flags |= PV_NC;
                   1386:                                pv_changepte(pa, PG_NC, 0);
1.60      gwr      1387:                                pmap_stats.ps_vac_uncached++;
1.38      gwr      1388:                                break;
                   1389:                        }
                   1390:                }
                   1391:        }
1.132     chs      1392: #endif
1.84      gwr      1393:
                   1394:        /* Allocate a PV element (pv_alloc()). */
                   1395:        pv = pv_free_list;
                   1396:        if (pv == NULL)
                   1397:                panic("pv_link: pv_alloc");
                   1398:        pv_free_list = pv->pv_next;
                   1399:        pv->pv_next = 0;
                   1400:
                   1401:        /* Insert new entry at the head. */
1.81      gwr      1402:        pv->pv_pmap = pmap;
                   1403:        pv->pv_va   = va;
1.84      gwr      1404:        pv->pv_next = *head;
                   1405:        *head = pv;
1.38      gwr      1406:
1.84      gwr      1407:        return (*pv_flags & PV_NC);
1.38      gwr      1408: }
                   1409:
                   1410: /*
                   1411:  * pv_unlink is a helper function for pmap_remove.
                   1412:  * It removes the appropriate (pmap, pa, va) entry.
                   1413:  *
                   1414:  * Once the entry is removed, if the pv_table head has the cache
                   1415:  * inhibit bit set, see if we can turn that off; if so, walk the
                   1416:  * pvlist and turn off PG_NC in each PTE.  (The pvlist is by
                   1417:  * definition nonempty, since it must have at least two elements
                   1418:  * in it to have PV_NC set, and we only remove one here.)
                   1419:  */
1.145     chs      1420: static void
                   1421: pv_unlink(pmap_t pmap, int pte, vaddr_t va)
1.38      gwr      1422: {
1.132     chs      1423:        paddr_t pa;
1.84      gwr      1424:        pv_entry_t *head, *ppv, pv;
                   1425:        u_char *pv_flags;
1.38      gwr      1426:
1.80      gwr      1427:        CHECK_SPL();
                   1428:
1.92      gwr      1429:        pa = PG_PA(pte);
1.18      glass    1430: #ifdef PMAP_DEBUG
1.80      gwr      1431:        if ((pmap_debug & PMD_LINK) || (va == pmap_db_watchva)) {
1.92      gwr      1432:                printf("pv_unlink(%p, 0x%x, 0x%lx)\n", pmap, pte, va);
1.80      gwr      1433:                /* pv_print(pa); */
1.26      gwr      1434:        }
1.18      glass    1435: #endif
1.81      gwr      1436:
1.84      gwr      1437:        pv_flags = pa_to_pvflags(pa);
                   1438:        head     = pa_to_pvhead(pa);
1.38      gwr      1439:
1.84      gwr      1440:        /*
                   1441:         * Find the entry.
                   1442:         */
                   1443:        ppv = head;
                   1444:        pv = *ppv;
                   1445:        while (pv) {
                   1446:                if ((pv->pv_pmap == pmap) && (pv->pv_va == va))
                   1447:                        goto found;
                   1448:                ppv = &pv->pv_next;
                   1449:                pv  =  pv->pv_next;
                   1450:        }
                   1451: #ifdef PMAP_DEBUG
                   1452:        db_printf("pv_unlink: not found (pa=0x%lx,va=0x%lx)\n", pa, va);
                   1453:        Debugger();
1.38      gwr      1454: #endif
1.84      gwr      1455:        return;
                   1456:
1.145     chs      1457:   found:
1.84      gwr      1458:        /* Unlink this entry from the list and clear it. */
                   1459:        *ppv = pv->pv_next;
                   1460:        pv->pv_pmap = NULL;
                   1461:        pv->pv_va   = 0;
                   1462:
                   1463:        /* Insert it on the head of the free list. (pv_free()) */
                   1464:        pv->pv_next = pv_free_list;
                   1465:        pv_free_list = pv;
                   1466:        pv = NULL;
                   1467:
                   1468:        /* Do any non-cached mappings remain? */
                   1469:        if ((*pv_flags & PV_NC) == 0)
                   1470:                return;
                   1471:        if ((pv = *head) == NULL)
                   1472:                return;
1.1       glass    1473:
                   1474:        /*
1.84      gwr      1475:         * Have non-cached mappings.  See if we can fix that now.
1.1       glass    1476:         */
1.84      gwr      1477:        va = pv->pv_va;
                   1478:        for (pv = pv->pv_next; pv != NULL; pv = pv->pv_next) {
                   1479:                /* If there is a DVMA mapping, leave it NC. */
1.98      gwr      1480:                if (va >= DVMA_MAP_BASE)
1.84      gwr      1481:                        return;
                   1482:                /* If there are VAC alias problems, leave NC. */
                   1483:                if (BADALIAS(va, pv->pv_va))
                   1484:                        return;
1.1       glass    1485:        }
1.84      gwr      1486:        /* OK, there are no "problem" mappings. */
                   1487:        *pv_flags &= ~PV_NC;
                   1488:        pv_changepte(pa, 0, PG_NC);
                   1489:        pmap_stats.ps_vac_recached++;
1.1       glass    1490: }
                   1491:
1.38      gwr      1492:
1.78      gwr      1493: /****************************************************************
                   1494:  * Bootstrap and Initialization, etc.
                   1495:  */
1.38      gwr      1496:
1.145     chs      1497: void
                   1498: pmap_common_init(pmap_t pmap)
1.38      gwr      1499: {
1.133     tsutsui  1500:        memset(pmap, 0, sizeof(struct pmap));
1.132     chs      1501:        pmap->pm_refcount = 1;
1.38      gwr      1502:        pmap->pm_version = pmap_version++;
1.83      gwr      1503:        pmap->pm_ctxnum = EMPTY_CONTEXT;
1.38      gwr      1504:        simple_lock_init(&pmap->pm_lock);
                   1505: }
                   1506:
                   1507: /*
                   1508:  * Prepare the kernel for VM operations.
1.99      gwr      1509:  * This is called by locore2.c:_vm_init()
1.38      gwr      1510:  * after the "start/end" globals are set.
1.75      gwr      1511:  * This function must NOT leave context zero.
1.38      gwr      1512:  */
1.145     chs      1513: void
                   1514: pmap_bootstrap(vaddr_t nextva)
1.38      gwr      1515: {
1.99      gwr      1516:        struct sunromvec *rvec;
1.132     chs      1517:        vaddr_t va, eva;
1.75      gwr      1518:        int i, pte, sme;
1.78      gwr      1519:        extern char etext[];
1.75      gwr      1520:
1.76      gwr      1521:        nextva = m68k_round_page(nextva);
1.75      gwr      1522:        rvec = romVectorPtr;
                   1523:
1.76      gwr      1524:        /* Steal some special-purpose, already mapped pages? */
                   1525:
1.75      gwr      1526:        /*
                   1527:         * Determine the range of kernel virtual space available.
1.76      gwr      1528:         * It is segment-aligned to simplify PMEG management.
1.75      gwr      1529:         */
1.76      gwr      1530:        virtual_avail = m68k_round_seg(nextva);
1.75      gwr      1531:        virtual_end = VM_MAX_KERNEL_ADDRESS;
                   1532:
                   1533:        /*
                   1534:         * Determine the range of physical memory available.
                   1535:         * Physical memory at zero was remapped to KERNBASE.
                   1536:         */
1.76      gwr      1537:        avail_start = nextva - KERNBASE;
1.75      gwr      1538:        if (rvec->romvecVersion < 1) {
                   1539:                mon_printf("Warning: ancient PROM version=%d\n",
1.145     chs      1540:                           rvec->romvecVersion);
1.75      gwr      1541:                /* Guess that PROM version 0.X used two pages. */
1.140     thorpej  1542:                avail_end = *rvec->memorySize - (2*PAGE_SIZE);
1.75      gwr      1543:        } else {
                   1544:                /* PROM version 1 or later. */
                   1545:                avail_end = *rvec->memoryAvail;
                   1546:        }
                   1547:        avail_end = m68k_trunc_page(avail_end);
                   1548:
                   1549:        /*
1.76      gwr      1550:         * Report the actual amount of physical memory,
                   1551:         * even though the PROM takes a few pages.
1.75      gwr      1552:         */
1.76      gwr      1553:        physmem = (btoc(avail_end) + 0xF) & ~0xF;
1.75      gwr      1554:
                   1555:        /*
1.76      gwr      1556:         * On the Sun3/50, the video frame buffer is located at
                   1557:         * physical addres 1MB so we must step over it.
1.75      gwr      1558:         */
1.148     thorpej  1559:        if (cpu_machine_id == ID_SUN3_50) {
1.76      gwr      1560:                hole_start = m68k_trunc_page(OBMEM_BW50_ADDR);
                   1561:                hole_size  = m68k_round_page(OBMEM_BW2_SIZE);
1.91      gwr      1562:                if (avail_start > hole_start) {
1.76      gwr      1563:                        mon_printf("kernel too large for Sun3/50\n");
                   1564:                        sunmon_abort();
                   1565:                }
                   1566:        }
1.75      gwr      1567:
                   1568:        /*
                   1569:         * Done allocating PAGES of virtual space, so
                   1570:         * clean out the rest of the last used segment.
                   1571:         */
1.140     thorpej  1572:        for (va = nextva; va < virtual_avail; va += PAGE_SIZE)
1.75      gwr      1573:                set_pte(va, PG_INVAL);
                   1574:
                   1575:        /*
                   1576:         * Now that we are done stealing physical pages, etc.
                   1577:         * figure out which PMEGs are used by those mappings
1.76      gwr      1578:         * and either reserve them or clear them out.
                   1579:         * -- but first, init PMEG management.
                   1580:         * This puts all PMEGs in the free list.
                   1581:         * We will allocte the in-use ones.
1.75      gwr      1582:         */
1.76      gwr      1583:        pmeg_init();
                   1584:
                   1585:        /*
                   1586:         * Unmap user virtual segments.
                   1587:         * VA range: [0 .. KERNBASE]
                   1588:         */
                   1589:        for (va = 0; va < KERNBASE; va += NBSG)
                   1590:                set_segmap(va, SEGINV);
1.75      gwr      1591:
                   1592:        /*
                   1593:         * Reserve PMEGS for kernel text/data/bss
                   1594:         * and the misc pages taken above.
1.76      gwr      1595:         * VA range: [KERNBASE .. virtual_avail]
1.75      gwr      1596:         */
1.76      gwr      1597:        for ( ; va < virtual_avail; va += NBSG) {
1.75      gwr      1598:                sme = get_segmap(va);
                   1599:                if (sme == SEGINV) {
                   1600:                        mon_printf("kernel text/data/bss not mapped\n");
                   1601:                        sunmon_abort();
                   1602:                }
1.76      gwr      1603:                pmeg_reserve(sme);
1.75      gwr      1604:        }
                   1605:
                   1606:        /*
1.76      gwr      1607:         * Unmap kernel virtual space.  Make sure to leave no valid
1.75      gwr      1608:         * segmap entries in the MMU unless pmeg_array records them.
1.76      gwr      1609:         * VA range: [vseg_avail .. virtual_end]
1.75      gwr      1610:         */
1.76      gwr      1611:        for ( ; va < virtual_end; va += NBSG)
1.75      gwr      1612:                set_segmap(va, SEGINV);
                   1613:
                   1614:        /*
1.76      gwr      1615:         * Reserve PMEGs used by the PROM monitor (device mappings).
                   1616:         * Free up any pmegs in this range which have no mappings.
                   1617:         * VA range: [0x0FE00000 .. 0x0FF00000]
1.75      gwr      1618:         */
1.99      gwr      1619:        pmeg_mon_init(SUN3_MONSTART, SUN3_MONEND, TRUE);
1.75      gwr      1620:
                   1621:        /*
1.76      gwr      1622:         * Unmap any pmegs left in DVMA space by the PROM.
                   1623:         * DO NOT kill the last one! (owned by the PROM!)
                   1624:         * VA range: [0x0FF00000 .. 0x0FFE0000]
1.75      gwr      1625:         */
1.99      gwr      1626:        pmeg_mon_init(SUN3_MONEND, SUN3_MONSHORTSEG, FALSE);
1.75      gwr      1627:
                   1628:        /*
                   1629:         * MONSHORTSEG contains MONSHORTPAGE which is a data page
1.76      gwr      1630:         * allocated by the PROM monitor.  Reserve the segment,
                   1631:         * but clear out all but the last PTE inside it.
                   1632:         * Note we use this for tmp_vpages.
1.75      gwr      1633:         */
1.99      gwr      1634:        va  = SUN3_MONSHORTSEG;
                   1635:        eva = SUN3_MONSHORTPAGE;
1.76      gwr      1636:        sme = get_segmap(va);
                   1637:        pmeg_reserve(sme);
1.140     thorpej  1638:        for ( ; va < eva; va += PAGE_SIZE)
1.75      gwr      1639:                set_pte(va, PG_INVAL);
                   1640:
                   1641:        /*
1.76      gwr      1642:         * Done reserving PMEGs and/or clearing out mappings.
                   1643:         *
                   1644:         * Now verify the mapping protections and such for the
                   1645:         * important parts of the address space (in VA order).
                   1646:         * Note that the Sun PROM usually leaves the memory
                   1647:         * mapped with everything non-cached...
1.75      gwr      1648:         */
                   1649:
                   1650:        /*
1.76      gwr      1651:         * Map the message buffer page at a constant location
                   1652:         * (physical address zero) so its contents will be
                   1653:         * preserved through a reboot.
1.75      gwr      1654:         */
                   1655:        va = KERNBASE;
                   1656:        pte = get_pte(va);
1.76      gwr      1657:        pte |= (PG_SYSTEM | PG_WRITE | PG_NC);
1.75      gwr      1658:        set_pte(va, pte);
1.140     thorpej  1659:        va += PAGE_SIZE;
1.75      gwr      1660:        /* Initialize msgbufaddr later, in machdep.c */
                   1661:
1.76      gwr      1662:        /* Next is the tmpstack page. */
                   1663:        pte = get_pte(va);
                   1664:        pte &= ~(PG_NC);
                   1665:        pte |= (PG_SYSTEM | PG_WRITE);
                   1666:        set_pte(va, pte);
1.140     thorpej  1667:        va += PAGE_SIZE;
1.75      gwr      1668:
                   1669:        /*
1.76      gwr      1670:         * Next is the kernel text.
                   1671:         *
1.75      gwr      1672:         * Verify protection bits on kernel text/data/bss
                   1673:         * All of kernel text, data, and bss are cached.
                   1674:         * Text is read-only (except in db_write_ktext).
                   1675:         */
                   1676:        eva = m68k_trunc_page(etext);
                   1677:        while (va < eva) {
                   1678:                pte = get_pte(va);
                   1679:                if ((pte & (PG_VALID|PG_TYPE)) != PG_VALID) {
                   1680:                        mon_printf("invalid page at 0x%x\n", va);
                   1681:                }
                   1682:                pte &= ~(PG_WRITE|PG_NC);
                   1683:                /* Kernel text is read-only */
                   1684:                pte |= (PG_SYSTEM);
                   1685:                set_pte(va, pte);
1.140     thorpej  1686:                va += PAGE_SIZE;
1.75      gwr      1687:        }
1.76      gwr      1688:        /* data, bss, etc. */
                   1689:        while (va < nextva) {
1.75      gwr      1690:                pte = get_pte(va);
                   1691:                if ((pte & (PG_VALID|PG_TYPE)) != PG_VALID) {
                   1692:                        mon_printf("invalid page at 0x%x\n", va);
                   1693:                }
                   1694:                pte &= ~(PG_NC);
                   1695:                pte |= (PG_SYSTEM | PG_WRITE);
                   1696:                set_pte(va, pte);
1.140     thorpej  1697:                va += PAGE_SIZE;
1.75      gwr      1698:        }
                   1699:
                   1700:        /*
                   1701:         * Duplicate all mappings in the current context into
                   1702:         * every other context.  We have to let the PROM do the
                   1703:         * actual segmap manipulation because we can only switch
1.76      gwr      1704:         * the MMU context after we are sure that the kernel is
                   1705:         * identically mapped in all contexts.  The PROM can do
                   1706:         * the job using hardware-dependent tricks...
1.75      gwr      1707:         */
                   1708: #ifdef DIAGNOSTIC
                   1709:        /* Note: PROM setcxsegmap function needs sfc=dfs=FC_CONTROL */
                   1710:        if ((getsfc() != FC_CONTROL) || (getdfc() != FC_CONTROL)) {
                   1711:                mon_printf("pmap_bootstrap: bad dfc or sfc\n");
                   1712:                sunmon_abort();
                   1713:        }
                   1714:        /* Near the beginning of locore.s we set context zero. */
                   1715:        if (get_context() != 0) {
                   1716:                mon_printf("pmap_bootstrap: not in context zero?\n");
                   1717:                sunmon_abort();
                   1718:        }
1.76      gwr      1719: #endif /* DIAGNOSTIC */
1.132     chs      1720:        for (va = 0; va < (vaddr_t) (NBSG * NSEGMAP); va += NBSG) {
1.76      gwr      1721:                /* Read the segmap entry from context zero... */
                   1722:                sme = get_segmap(va);
                   1723:                /* ... then copy it into all other contexts. */
1.75      gwr      1724:                for (i = 1; i < NCONTEXT; i++) {
                   1725:                        (*rvec->setcxsegmap)(i, va, sme);
                   1726:                }
                   1727:        }
                   1728:
1.38      gwr      1729:        /*
                   1730:         * Reserve a segment for the kernel to use to access a pmeg
                   1731:         * that is not currently mapped into any context/segmap.
                   1732:         * The kernel temporarily maps such a pmeg into this segment.
1.83      gwr      1733:         *
                   1734:         * XXX: Now that context zero is reserved as kernel-only,
                   1735:         * we could borrow context zero for these temporary uses.
1.38      gwr      1736:         */
                   1737:        temp_seg_va = virtual_avail;
                   1738:        virtual_avail += NBSG;
1.94      gwr      1739: #ifdef DIAGNOSTIC
1.67      gwr      1740:        if (temp_seg_va & SEGOFSET) {
                   1741:                mon_printf("pmap_bootstrap: temp_seg_va\n");
                   1742:                sunmon_abort();
                   1743:        }
1.38      gwr      1744: #endif
                   1745:
                   1746:        /* Initialization for pmap_next_page() */
                   1747:        avail_next = avail_start;
                   1748:
1.140     thorpej  1749:        uvmexp.pagesize = PAGE_SIZE;
1.110     mrg      1750:        uvm_setpagesize();
1.38      gwr      1751:
                   1752:        /* after setting up some structures */
                   1753:
1.50      gwr      1754:        pmap_common_init(kernel_pmap);
1.82      gwr      1755:        pmap_kernel_init(kernel_pmap);
1.38      gwr      1756:
                   1757:        context_init();
                   1758:
                   1759:        pmeg_clean_free();
1.101     gwr      1760:
                   1761:        pmap_page_upload();
1.38      gwr      1762: }
                   1763:
1.82      gwr      1764: /*
                   1765:  * Give the kernel pmap a segmap, just so there are not
                   1766:  * so many special cases required.  Maybe faster too,
                   1767:  * because this lets pmap_remove() and pmap_protect()
                   1768:  * use a S/W copy of the segmap to avoid function calls.
                   1769:  */
1.145     chs      1770: void
                   1771: pmap_kernel_init(pmap_t pmap)
1.82      gwr      1772: {
1.132     chs      1773:        vaddr_t va;
1.82      gwr      1774:        int i, sme;
                   1775:
                   1776:        for (i=0, va=0; i < NSEGMAP; i++, va+=NBSG) {
                   1777:                sme = get_segmap(va);
                   1778:                kernel_segmap[i] = sme;
                   1779:        }
                   1780:        pmap->pm_segmap = kernel_segmap;
                   1781: }
                   1782:
1.81      gwr      1783:
1.78      gwr      1784: /****************************************************************
                   1785:  * PMAP interface functions.
                   1786:  */
                   1787:
1.38      gwr      1788: /*
1.97      thorpej  1789:  * Support functions for vm_page_bootstrap().
1.38      gwr      1790:  */
1.142     thorpej  1791:
                   1792: /*
                   1793:  * How much virtual space does this kernel have?
                   1794:  * (After mapping kernel text, data, etc.)
                   1795:  */
1.145     chs      1796: void
                   1797: pmap_virtual_space(vaddr_t *v_start, vaddr_t *v_end)
1.142     thorpej  1798: {
                   1799:        *v_start = virtual_avail;
                   1800:        *v_end   = virtual_end;
                   1801: }
1.1       glass    1802:
1.101     gwr      1803: /* Provide memory to the VM system. */
1.145     chs      1804: static void
                   1805: pmap_page_upload(void)
1.101     gwr      1806: {
                   1807:        int a, b, c, d;
                   1808:
                   1809:        if (hole_size) {
                   1810:                /*
                   1811:                 * Supply the memory in two segments so the
                   1812:                 * reserved memory (3/50 video ram at 1MB)
                   1813:                 * can be carved from the front of the 2nd.
                   1814:                 */
                   1815:                a = atop(avail_start);
                   1816:                b = atop(hole_start);
1.105     thorpej  1817:                uvm_page_physload(a, b, a, b, VM_FREELIST_DEFAULT);
1.101     gwr      1818:                c = atop(hole_start + hole_size);
                   1819:                d = atop(avail_end);
1.105     thorpej  1820:                uvm_page_physload(b, d, c, d, VM_FREELIST_DEFAULT);
1.101     gwr      1821:        } else {
                   1822:                a = atop(avail_start);
                   1823:                d = atop(avail_end);
1.105     thorpej  1824:                uvm_page_physload(a, d, a, d, VM_FREELIST_DEFAULT);
1.101     gwr      1825:        }
                   1826: }
1.38      gwr      1827:
1.1       glass    1828: /*
                   1829:  *     Initialize the pmap module.
                   1830:  *     Called by vm_init, to initialize any structures that the pmap
                   1831:  *     system needs to map virtual memory.
                   1832:  */
1.145     chs      1833: void
                   1834: pmap_init(void)
1.1       glass    1835: {
1.132     chs      1836:        pv_init();
1.1       glass    1837:
1.120     tsutsui  1838:        /* Initialize the pmap pool. */
                   1839:        pool_init(&pmap_pmap_pool, sizeof(struct pmap), 0, 0, 0, "pmappl",
1.145     chs      1840:                  &pool_allocator_nointr);
1.1       glass    1841: }
                   1842:
1.38      gwr      1843: /*
1.56      gwr      1844:  * Map a range of kernel virtual address space.
                   1845:  * This might be used for device mappings, or to
                   1846:  * record the mapping for kernel text/data/bss.
1.100     gwr      1847:  * Return VA following the mapped range.
1.38      gwr      1848:  */
1.145     chs      1849: vaddr_t
                   1850: pmap_map(vaddr_t va, paddr_t pa, paddr_t endpa, int prot)
1.38      gwr      1851: {
1.100     gwr      1852:        int sz;
                   1853:
                   1854:        sz = endpa - pa;
                   1855:        do {
1.116     thorpej  1856:                pmap_enter(kernel_pmap, va, pa, prot, 0);
1.140     thorpej  1857:                va += PAGE_SIZE;
                   1858:                pa += PAGE_SIZE;
                   1859:                sz -= PAGE_SIZE;
1.100     gwr      1860:        } while (sz > 0);
1.135     chris    1861:        pmap_update(kernel_pmap);
1.100     gwr      1862:        return(va);
1.38      gwr      1863: }
                   1864:
1.145     chs      1865: void
                   1866: pmap_user_init(pmap_t pmap)
1.38      gwr      1867: {
                   1868:        int i;
                   1869:        pmap->pm_segmap = malloc(sizeof(char)*NUSEG, M_VMPMAP, M_WAITOK);
1.132     chs      1870:        for (i = 0; i < NUSEG; i++) {
1.38      gwr      1871:                pmap->pm_segmap[i] = SEGINV;
                   1872:        }
1.1       glass    1873: }
                   1874:
                   1875: /*
                   1876:  *     Create and return a physical map.
                   1877:  *
                   1878:  *     If the size specified for the map
                   1879:  *     is zero, the map is an actual physical
                   1880:  *     map, and may be referenced by the
                   1881:  *     hardware.
                   1882:  *
                   1883:  *     If the size specified is non-zero,
                   1884:  *     the map will be used in software only, and
                   1885:  *     is bounded by that size.
                   1886:  */
1.145     chs      1887: pmap_t
                   1888: pmap_create(void)
1.1       glass    1889: {
1.38      gwr      1890:        pmap_t pmap;
1.2       glass    1891:
1.120     tsutsui  1892:        pmap = pool_get(&pmap_pmap_pool, PR_WAITOK);
1.96      thorpej  1893:        pmap_pinit(pmap);
1.38      gwr      1894:        return pmap;
1.1       glass    1895: }
                   1896:
                   1897: /*
                   1898:  * Release any resources held by the given physical map.
                   1899:  * Called when a pmap initialized by pmap_pinit is being released.
                   1900:  * Should only be called if the map contains no valid mappings.
                   1901:  */
1.145     chs      1902: void
                   1903: pmap_release(struct pmap *pmap)
1.1       glass    1904: {
1.80      gwr      1905:        int s;
                   1906:
1.122     thorpej  1907:        s = splvm();
1.26      gwr      1908:
1.50      gwr      1909:        if (pmap == kernel_pmap)
                   1910:                panic("pmap_release: kernel_pmap!");
1.29      gwr      1911:
1.80      gwr      1912:        if (has_context(pmap)) {
                   1913: #ifdef PMAP_DEBUG
                   1914:                if (pmap_debug & PMD_CONTEXT)
                   1915:                        printf("pmap_release(%p): free ctx %d\n",
1.145     chs      1916:                               pmap, pmap->pm_ctxnum);
1.80      gwr      1917: #endif
1.38      gwr      1918:                context_free(pmap);
1.80      gwr      1919:        }
1.38      gwr      1920:        free(pmap->pm_segmap, M_VMPMAP);
                   1921:        pmap->pm_segmap = NULL;
1.80      gwr      1922:
                   1923:        splx(s);
1.1       glass    1924: }
                   1925:
                   1926:
                   1927: /*
                   1928:  *     Retire the given physical map from service.
                   1929:  *     Should only be called if the map contains
                   1930:  *     no valid mappings.
                   1931:  */
1.145     chs      1932: void
                   1933: pmap_destroy(pmap_t pmap)
1.1       glass    1934: {
1.38      gwr      1935:        int count;
1.1       glass    1936:
1.18      glass    1937: #ifdef PMAP_DEBUG
1.38      gwr      1938:        if (pmap_debug & PMD_CREATE)
1.65      gwr      1939:                printf("pmap_destroy(%p)\n", pmap);
1.18      glass    1940: #endif
1.50      gwr      1941:        if (pmap == kernel_pmap)
                   1942:                panic("pmap_destroy: kernel_pmap!");
1.38      gwr      1943:        pmap_lock(pmap);
                   1944:        count = pmap_del_ref(pmap);
                   1945:        pmap_unlock(pmap);
                   1946:        if (count == 0) {
                   1947:                pmap_release(pmap);
1.120     tsutsui  1948:                pool_put(&pmap_pmap_pool, pmap);
1.38      gwr      1949:        }
1.1       glass    1950: }
                   1951:
                   1952: /*
                   1953:  *     Add a reference to the specified pmap.
                   1954:  */
1.145     chs      1955: void
                   1956: pmap_reference(pmap_t pmap)
1.1       glass    1957: {
1.132     chs      1958:        pmap_lock(pmap);
                   1959:        pmap_add_ref(pmap);
                   1960:        pmap_unlock(pmap);
1.1       glass    1961: }
1.26      gwr      1962:
1.85      gwr      1963:
1.38      gwr      1964: /*
1.85      gwr      1965:  *     Insert the given physical page (p) at
                   1966:  *     the specified virtual address (v) in the
                   1967:  *     target physical map with the protection requested.
                   1968:  *
                   1969:  *     The physical address is page aligned, but may have some
                   1970:  *     low bits set indicating an OBIO or VME bus page, or just
                   1971:  *     that the non-cache bit should be set (i.e PMAP_NC).
                   1972:  *
                   1973:  *     If specified, the page will be wired down, meaning
                   1974:  *     that the related pte can not be reclaimed.
                   1975:  *
                   1976:  *     NB:  This is the only routine which MAY NOT lazy-evaluate
                   1977:  *     or lose information.  That is, this routine must actually
                   1978:  *     insert this page into the given map NOW.
1.38      gwr      1979:  */
1.145     chs      1980: int
                   1981: pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags)
1.85      gwr      1982: {
1.92      gwr      1983:        int new_pte, s;
1.116     thorpej  1984:        boolean_t wired = (flags & PMAP_WIRED) != 0;
1.85      gwr      1985:
                   1986: #ifdef PMAP_DEBUG
                   1987:        if ((pmap_debug & PMD_ENTER) ||
1.145     chs      1988:            (va == pmap_db_watchva))
1.85      gwr      1989:                printf("pmap_enter(%p, 0x%lx, 0x%lx, 0x%x, 0x%x)\n",
1.145     chs      1990:                       pmap, va, pa, prot, wired);
1.85      gwr      1991: #endif
                   1992:
                   1993:        /* Get page-type bits from low part of the PA... */
1.92      gwr      1994:        new_pte = (pa & PMAP_SPEC) << PG_MOD_SHIFT;
1.85      gwr      1995:
                   1996:        /* ...now the valid and writable bits... */
1.92      gwr      1997:        new_pte |= PG_VALID;
1.90      gwr      1998:        if (prot & VM_PROT_WRITE)
1.92      gwr      1999:                new_pte |= PG_WRITE;
1.147     chs      2000:        if (flags & VM_PROT_ALL) {
                   2001:                new_pte |= PG_REF;
                   2002:                if (flags & VM_PROT_WRITE) {
                   2003:                        new_pte |= PG_MOD;
                   2004:                }
                   2005:        }
1.85      gwr      2006:
                   2007:        /* ...and finally the page-frame number. */
1.92      gwr      2008:        new_pte |= PA_PGNUM(pa);
1.85      gwr      2009:
                   2010:        /*
                   2011:         * treatment varies significantly:
                   2012:         *  kernel ptes are in all contexts, and are always in the mmu
                   2013:         *  user ptes may not necessarily? be in the mmu.  pmap may not
                   2014:         *   be in the mmu either.
                   2015:         *
                   2016:         */
1.122     thorpej  2017:        s = splvm();
1.85      gwr      2018:        if (pmap == kernel_pmap) {
1.92      gwr      2019:                new_pte |= PG_SYSTEM;
                   2020:                pmap_enter_kernel(va, new_pte, wired);
1.85      gwr      2021:        } else {
1.92      gwr      2022:                pmap_enter_user(pmap, va, new_pte, wired);
1.85      gwr      2023:        }
                   2024:        splx(s);
1.124     chs      2025:        return 0;
1.85      gwr      2026: }
                   2027:
1.145     chs      2028: static void
                   2029: pmap_enter_kernel(vaddr_t pgva, int new_pte, boolean_t wired)
1.38      gwr      2030: {
1.92      gwr      2031:        pmap_t pmap = kernel_pmap;
                   2032:        pmeg_t pmegp;
1.85      gwr      2033:        int do_pv, old_pte, sme;
1.132     chs      2034:        vaddr_t segva;
1.80      gwr      2035:
1.85      gwr      2036:        /*
                   2037:          keep in hardware only, since its mapped into all contexts anyway;
                   2038:          need to handle possibly allocating additional pmegs
                   2039:          need to make sure they cant be stolen from the kernel;
                   2040:          map any new pmegs into all contexts, make sure rest of pmeg is null;
                   2041:          deal with pv_stuff; possibly caching problems;
                   2042:          must also deal with changes too.
1.145     chs      2043:        */
1.85      gwr      2044:
                   2045:        /*
                   2046:         * In detail:
                   2047:         *
                   2048:         * (a) lock pmap
                   2049:         * (b) Is the VA in a already mapped segment, if so
                   2050:         *       look to see if that VA address is "valid".  If it is, then
                   2051:         *       action is a change to an existing pte
                   2052:         * (c) if not mapped segment, need to allocate pmeg
                   2053:         * (d) if adding pte entry or changing physaddr of existing one,
                   2054:         *              use pv_stuff, for change, pmap_remove() possibly.
                   2055:         * (e) change/add pte
                   2056:         */
                   2057:
1.30      gwr      2058: #ifdef DIAGNOSTIC
1.98      gwr      2059:        if ((pgva < virtual_avail) || (pgva >= DVMA_MAP_END))
1.85      gwr      2060:                panic("pmap_enter_kernel: bad va=0x%lx", pgva);
                   2061:        if ((new_pte & (PG_VALID | PG_SYSTEM)) != (PG_VALID | PG_SYSTEM))
                   2062:                panic("pmap_enter_kernel: bad pte");
                   2063: #endif
                   2064:
1.98      gwr      2065:        if (pgva >= DVMA_MAP_BASE) {
1.85      gwr      2066:                /* This is DVMA space.  Always want it non-cached. */
                   2067:                new_pte |= PG_NC;
1.29      gwr      2068:        }
                   2069:
1.85      gwr      2070:        segva = m68k_trunc_seg(pgva);
                   2071:        do_pv = TRUE;
                   2072:
1.90      gwr      2073:        /* Do we have a PMEG? */
1.82      gwr      2074:        sme = get_segmap(segva);
1.90      gwr      2075:        if (sme != SEGINV) {
                   2076:                /* Found a PMEG in the segmap.  Cool. */
                   2077:                pmegp = pmeg_p(sme);
                   2078: #ifdef DIAGNOSTIC
                   2079:                /* Make sure it is the right PMEG. */
1.92      gwr      2080:                if (sme != pmap->pm_segmap[VA_SEGNUM(segva)])
1.145     chs      2081:                        panic("pmap_enter_kernel: wrong sme at VA=0x%lx",
                   2082:                              segva);
1.90      gwr      2083:                /* Make sure it is ours. */
1.92      gwr      2084:                if (pmegp->pmeg_owner != pmap)
1.90      gwr      2085:                        panic("pmap_enter_kernel: MMU has bad pmeg 0x%x", sme);
                   2086: #endif
                   2087:        } else {
                   2088:                /* No PMEG in the segmap.  Have to allocate one. */
1.92      gwr      2089:                pmegp = pmeg_allocate(pmap, segva);
1.85      gwr      2090:                sme = pmegp->pmeg_index;
1.92      gwr      2091:                pmap->pm_segmap[VA_SEGNUM(segva)] = sme;
1.85      gwr      2092:                set_segmap_allctx(segva, sme);
1.90      gwr      2093: #ifdef PMAP_DEBUG
                   2094:                pmeg_verify_empty(segva);
1.85      gwr      2095:                if (pmap_debug & PMD_SEGMAP) {
1.145     chs      2096:                        printf("pmap: set_segmap pmap=%p va=0x%lx sme=0x%x "
                   2097:                               "(ek)\n", pmap, segva, sme);
1.85      gwr      2098:                }
1.29      gwr      2099: #endif
1.85      gwr      2100:                /* There are no existing mappings to deal with. */
                   2101:                old_pte = 0;
                   2102:                goto add_pte;
                   2103:        }
1.80      gwr      2104:
1.85      gwr      2105:        /*
                   2106:         * We have a PMEG.  Is the VA already mapped to somewhere?
                   2107:         *      (a) if so, is it same pa? (really a protection change)
                   2108:         *      (b) if not same pa, then we have to unlink from old pa
                   2109:         */
                   2110:        old_pte = get_pte(pgva);
                   2111:        if ((old_pte & PG_VALID) == 0)
                   2112:                goto add_pte;
                   2113:
                   2114:        /* Have valid translation.  Flush cache before changing it. */
1.38      gwr      2115: #ifdef HAVECACHE
1.50      gwr      2116:        if (cache_size) {
1.85      gwr      2117:                cache_flush_page(pgva);
                   2118:                /* Get fresh mod/ref bits from write-back. */
                   2119:                old_pte = get_pte(pgva);
1.50      gwr      2120:        }
1.38      gwr      2121: #endif
                   2122:
1.85      gwr      2123:        /* XXX - removing valid page here, way lame... -glass */
                   2124:        pmegp->pmeg_vpages--;
                   2125:
                   2126:        if (!IS_MAIN_MEM(old_pte)) {
                   2127:                /* Was not main memory, so no pv_entry for it. */
                   2128:                goto add_pte;
1.38      gwr      2129:        }
                   2130:
1.85      gwr      2131:        /* Old mapping was main memory.  Save mod/ref bits. */
                   2132:        save_modref_bits(old_pte);
1.38      gwr      2133:
1.85      gwr      2134:        /*
                   2135:         * If not changing the type or pfnum then re-use pv_entry.
                   2136:         * Note we get here only with old_pte having PGT_OBMEM.
                   2137:         */
1.132     chs      2138:        if ((old_pte & (PG_TYPE|PG_FRAME)) == (new_pte & (PG_TYPE|PG_FRAME))) {
1.85      gwr      2139:                do_pv = FALSE;          /* re-use pv_entry */
                   2140:                new_pte |= (old_pte & PG_NC);
                   2141:                goto add_pte;
1.38      gwr      2142:        }
                   2143:
1.85      gwr      2144:        /* OK, different type or PA, have to kill old pv_entry. */
1.92      gwr      2145:        pv_unlink(pmap, old_pte, pgva);
1.29      gwr      2146:
1.145     chs      2147: add_pte:       /* can be destructive */
1.85      gwr      2148:        pmeg_set_wiring(pmegp, pgva, wired);
1.80      gwr      2149:
1.85      gwr      2150:        /* Anything but MAIN_MEM is mapped non-cached. */
                   2151:        if (!IS_MAIN_MEM(new_pte)) {
                   2152:                new_pte |= PG_NC;
                   2153:                do_pv = FALSE;
                   2154:        }
1.92      gwr      2155:        if (do_pv == TRUE) {
                   2156:                if (pv_link(pmap, new_pte, pgva) & PV_NC)
1.85      gwr      2157:                        new_pte |= PG_NC;
                   2158:        }
1.39      gwr      2159: #ifdef PMAP_DEBUG
1.85      gwr      2160:        if ((pmap_debug & PMD_SETPTE) || (pgva == pmap_db_watchva)) {
1.145     chs      2161:                printf("pmap: set_pte pmap=%p va=0x%lx old=0x%x new=0x%x "
                   2162:                       "(ek)\n", pmap, pgva, old_pte, new_pte);
1.85      gwr      2163:        }
1.39      gwr      2164: #endif
1.85      gwr      2165:        /* cache flush done above */
                   2166:        set_pte(pgva, new_pte);
                   2167:        pmegp->pmeg_vpages++;
1.38      gwr      2168: }
                   2169:
1.80      gwr      2170:
1.145     chs      2171: static void
                   2172: pmap_enter_user(pmap_t pmap, vaddr_t pgva, int new_pte, boolean_t wired)
1.38      gwr      2173: {
1.80      gwr      2174:        int do_pv, old_pte, sme;
1.132     chs      2175:        vaddr_t segva;
1.38      gwr      2176:        pmeg_t pmegp;
                   2177:
1.85      gwr      2178: #ifdef DIAGNOSTIC
                   2179:        if (pgva >= VM_MAXUSER_ADDRESS)
                   2180:                panic("pmap_enter_user: bad va=0x%lx", pgva);
                   2181:        if ((new_pte & (PG_VALID | PG_SYSTEM)) != PG_VALID)
                   2182:                panic("pmap_enter_user: bad pte");
                   2183: #endif
                   2184: #ifdef PMAP_DEBUG
1.38      gwr      2185:        /*
1.85      gwr      2186:         * Some user pages are wired here, and a later
1.113     thorpej  2187:         * call to pmap_unwire() will unwire them.
1.85      gwr      2188:         * XXX - Need a separate list for wired user pmegs
                   2189:         * so they can not be stolen from the active list.
                   2190:         * XXX - Note: vm_fault.c assumes pmap_extract will
                   2191:         * work on wired mappings, so must preserve them...
                   2192:         * XXX: Maybe keep a list of wired PMEGs?
1.38      gwr      2193:         */
1.85      gwr      2194:        if (wired && (pmap_debug & PMD_WIRING)) {
1.145     chs      2195:                db_printf("pmap_enter_user: attempt to wire user page, "
                   2196:                          "ignored\n");
1.85      gwr      2197:                Debugger();
                   2198:        }
                   2199: #endif
1.15      glass    2200:
1.85      gwr      2201:        /* Validate this assumption. */
                   2202:        if (pmap != current_pmap()) {
                   2203: #ifdef PMAP_DEBUG
1.93      gwr      2204:                /* Aparently, this never happens. */
1.139     thorpej  2205:                db_printf("pmap_enter_user: not curlwp\n");
1.85      gwr      2206:                Debugger();
1.38      gwr      2207: #endif
1.93      gwr      2208:                /* Just throw it out (fault it in later). */
1.85      gwr      2209:                /* XXX: But must remember it if wired... */
                   2210:                return;
1.1       glass    2211:        }
1.38      gwr      2212:
1.82      gwr      2213:        segva = m68k_trunc_seg(pgva);
1.38      gwr      2214:        do_pv = TRUE;
                   2215:
1.85      gwr      2216:        /*
                   2217:         * If this pmap was sharing the "empty" context,
                   2218:         * allocate a real context for its exclusive use.
                   2219:         */
                   2220:        if (!has_context(pmap)) {
                   2221:                context_allocate(pmap);
1.28      gwr      2222: #ifdef PMAP_DEBUG
1.85      gwr      2223:                if (pmap_debug & PMD_CONTEXT)
                   2224:                        printf("pmap_enter(%p) got context %d\n",
1.145     chs      2225:                               pmap, pmap->pm_ctxnum);
1.85      gwr      2226: #endif
                   2227:                set_context(pmap->pm_ctxnum);
                   2228:        } else {
                   2229: #ifdef PMAP_DEBUG
                   2230:                /* Make sure context is correct. */
                   2231:                if (pmap->pm_ctxnum != get_context()) {
                   2232:                        db_printf("pmap_enter_user: wrong context\n");
                   2233:                        Debugger();
                   2234:                        /* XXX: OK to proceed? */
                   2235:                        set_context(pmap->pm_ctxnum);
1.38      gwr      2236:                }
1.28      gwr      2237: #endif
1.38      gwr      2238:        }
                   2239:
1.85      gwr      2240:        /*
                   2241:         * We have a context.  Do we have a PMEG?
                   2242:         */
                   2243:        sme = get_segmap(segva);
                   2244:        if (sme != SEGINV) {
                   2245:                /* Found a PMEG in the segmap.  Cool. */
                   2246:                pmegp = pmeg_p(sme);
1.90      gwr      2247: #ifdef DIAGNOSTIC
1.85      gwr      2248:                /* Make sure it is the right PMEG. */
                   2249:                if (sme != pmap->pm_segmap[VA_SEGNUM(segva)])
                   2250:                        panic("pmap_enter_user: wrong sme at VA=0x%lx", segva);
1.90      gwr      2251:                /* Make sure it is ours. */
                   2252:                if (pmegp->pmeg_owner != pmap)
                   2253:                        panic("pmap_enter_user: MMU has bad pmeg 0x%x", sme);
1.85      gwr      2254: #endif
                   2255:        } else {
                   2256:                /* Not in the segmap.  Try the S/W cache. */
                   2257:                pmegp = pmeg_cache(pmap, segva);
                   2258:                if (pmegp) {
                   2259:                        /* Found PMEG in cache.  Just reload it. */
                   2260:                        sme = pmegp->pmeg_index;
                   2261:                        set_segmap(segva, sme);
                   2262:                } else {
                   2263:                        /* PMEG not in cache, so allocate one. */
                   2264:                        pmegp = pmeg_allocate(pmap, segva);
                   2265:                        sme = pmegp->pmeg_index;
                   2266:                        pmap->pm_segmap[VA_SEGNUM(segva)] = sme;
                   2267:                        set_segmap(segva, sme);
                   2268: #ifdef PMAP_DEBUG
                   2269:                        pmeg_verify_empty(segva);
                   2270: #endif
                   2271:                }
                   2272: #ifdef PMAP_DEBUG
                   2273:                if (pmap_debug & PMD_SEGMAP) {
1.145     chs      2274:                        printf("pmap: set_segmap pmap=%p va=0x%lx sme=0x%x "
                   2275:                               "(eu)\n", pmap, segva, sme);
1.85      gwr      2276:                }
1.30      gwr      2277: #endif
1.85      gwr      2278:        }
1.38      gwr      2279:
                   2280:        /*
1.83      gwr      2281:         * We have a PMEG.  Is the VA already mapped to somewhere?
                   2282:         *      (a) if so, is it same pa? (really a protection change)
                   2283:         *      (b) if not same pa, then we have to unlink from old pa
1.38      gwr      2284:         */
1.82      gwr      2285:        old_pte = get_pte(pgva);
1.38      gwr      2286:        if ((old_pte & PG_VALID) == 0)
                   2287:                goto add_pte;
                   2288:
1.50      gwr      2289:        /* Have valid translation.  Flush cache before changing it. */
                   2290: #ifdef HAVECACHE
1.52      gwr      2291:        if (cache_size) {
1.82      gwr      2292:                cache_flush_page(pgva);
1.52      gwr      2293:                /* Get fresh mod/ref bits from write-back. */
1.82      gwr      2294:                old_pte = get_pte(pgva);
1.52      gwr      2295:        }
1.50      gwr      2296: #endif
                   2297:
                   2298:        /* XXX - removing valid page here, way lame... -glass */
1.38      gwr      2299:        pmegp->pmeg_vpages--;
                   2300:
1.44      gwr      2301:        if (!IS_MAIN_MEM(old_pte)) {
1.38      gwr      2302:                /* Was not main memory, so no pv_entry for it. */
1.33      gwr      2303:                goto add_pte;
                   2304:        }
1.1       glass    2305:
1.38      gwr      2306:        /* Old mapping was main memory.  Save mod/ref bits. */
                   2307:        save_modref_bits(old_pte);
1.1       glass    2308:
1.38      gwr      2309:        /*
                   2310:         * If not changing the type or pfnum then re-use pv_entry.
                   2311:         * Note we get here only with old_pte having PGT_OBMEM.
                   2312:         */
1.132     chs      2313:        if ((old_pte & (PG_TYPE|PG_FRAME)) == (new_pte & (PG_TYPE|PG_FRAME))) {
1.38      gwr      2314:                do_pv = FALSE;          /* re-use pv_entry */
                   2315:                new_pte |= (old_pte & PG_NC);
                   2316:                goto add_pte;
1.28      gwr      2317:        }
1.1       glass    2318:
1.38      gwr      2319:        /* OK, different type or PA, have to kill old pv_entry. */
1.92      gwr      2320:        pv_unlink(pmap, old_pte, pgva);
1.38      gwr      2321:
1.145     chs      2322:   add_pte:
1.85      gwr      2323:        /* XXX - Wiring changes on user pmaps? */
                   2324:        /* pmeg_set_wiring(pmegp, pgva, wired); */
1.38      gwr      2325:
1.92      gwr      2326:        /* Anything but MAIN_MEM is mapped non-cached. */
1.44      gwr      2327:        if (!IS_MAIN_MEM(new_pte)) {
1.38      gwr      2328:                new_pte |= PG_NC;
                   2329:                do_pv = FALSE;
                   2330:        }
1.92      gwr      2331:        if (do_pv == TRUE) {
                   2332:                if (pv_link(pmap, new_pte, pgva) & PV_NC)
1.38      gwr      2333:                        new_pte |= PG_NC;
                   2334:        }
1.39      gwr      2335: #ifdef PMAP_DEBUG
1.82      gwr      2336:        if ((pmap_debug & PMD_SETPTE) || (pgva == pmap_db_watchva)) {
1.145     chs      2337:                printf("pmap: set_pte pmap=%p va=0x%lx old=0x%x new=0x%x "
                   2338:                       "(eu)\n", pmap, pgva, old_pte, new_pte);
1.39      gwr      2339:        }
                   2340: #endif
1.50      gwr      2341:        /* cache flush done above */
1.82      gwr      2342:        set_pte(pgva, new_pte);
1.38      gwr      2343:        pmegp->pmeg_vpages++;
                   2344: }
                   2345:
1.145     chs      2346: void
                   2347: pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot)
1.115     chs      2348: {
1.132     chs      2349:        int new_pte, s;
                   2350:        pmap_t pmap = kernel_pmap;
                   2351:        pmeg_t pmegp;
                   2352:        int sme;
                   2353:        vaddr_t segva;
                   2354:
                   2355: #ifdef PMAP_DEBUG
                   2356:        if ((pmap_debug & PMD_ENTER) ||
1.145     chs      2357:            (va == pmap_db_watchva))
1.132     chs      2358:                printf("pmap_kenter_pa(0x%lx, 0x%lx, 0x%x)\n",
1.145     chs      2359:                       va, pa, prot);
1.132     chs      2360: #endif
                   2361:
                   2362:        /* Get page-type bits from low part of the PA... */
                   2363:        new_pte = (pa & PMAP_SPEC) << PG_MOD_SHIFT;
                   2364:
                   2365:        /* ...now the valid and writable bits... */
                   2366:        new_pte |= PG_SYSTEM|PG_VALID;
                   2367:        if (prot & VM_PROT_WRITE)
                   2368:                new_pte |= PG_WRITE;
                   2369:
                   2370:        /* ...and finally the page-frame number. */
                   2371:        new_pte |= PA_PGNUM(pa);
                   2372:
                   2373:        /*
                   2374:         * keep in hardware only, since its mapped into all contexts anyway;
                   2375:         * need to handle possibly allocating additional pmegs
                   2376:         * need to make sure they cant be stolen from the kernel;
                   2377:         * map any new pmegs into all contexts, make sure rest of pmeg is null;
                   2378:         * must also deal with changes too.
                   2379:         */
                   2380:
                   2381:        /*
                   2382:         * In detail:
                   2383:         *
                   2384:         * (a) lock pmap
                   2385:         * (b) Is the VA in a already mapped segment, if so
                   2386:         *       look to see if that VA address is "valid".  If it is, then
                   2387:         *       action is a change to an existing pte
                   2388:         * (c) if not mapped segment, need to allocate pmeg
                   2389:         * (d) change/add pte
                   2390:         */
                   2391:
                   2392: #ifdef DIAGNOSTIC
                   2393:        if ((va < virtual_avail) || (va >= DVMA_MAP_END))
1.134     tsutsui  2394:                panic("pmap_kenter_pa: bad va=0x%lx", va);
1.132     chs      2395: #endif
                   2396:
                   2397:        if (va >= DVMA_MAP_BASE) {
                   2398:                /* This is DVMA space.  Always want it non-cached. */
                   2399:                new_pte |= PG_NC;
                   2400:        }
                   2401:
                   2402:        segva = m68k_trunc_seg(va);
                   2403:
                   2404:        s = splvm();
                   2405:
                   2406:        /* Do we have a PMEG? */
                   2407:        sme = get_segmap(segva);
                   2408:        if (sme != SEGINV) {
                   2409:                KASSERT((get_pte(va) & PG_VALID) == 0);
                   2410:
                   2411:                /* Found a PMEG in the segmap.  Cool. */
                   2412:                pmegp = pmeg_p(sme);
                   2413: #ifdef DIAGNOSTIC
                   2414:                /* Make sure it is the right PMEG. */
                   2415:                if (sme != pmap->pm_segmap[VA_SEGNUM(segva)])
1.134     tsutsui  2416:                        panic("pmap_kenter_pa: wrong sme at VA=0x%lx", segva);
1.132     chs      2417:                /* Make sure it is ours. */
                   2418:                if (pmegp->pmeg_owner != pmap)
                   2419:                        panic("pmap_kenter_pa: MMU has bad pmeg 0x%x", sme);
                   2420: #endif
                   2421:        } else {
                   2422:
                   2423:                /* No PMEG in the segmap.  Have to allocate one. */
                   2424:                pmegp = pmeg_allocate(pmap, segva);
                   2425:                sme = pmegp->pmeg_index;
                   2426:                pmap->pm_segmap[VA_SEGNUM(segva)] = sme;
                   2427:                set_segmap_allctx(segva, sme);
                   2428: #ifdef PMAP_DEBUG
                   2429:                pmeg_verify_empty(segva);
                   2430:                if (pmap_debug & PMD_SEGMAP) {
1.145     chs      2431:                        printf("pmap: set_segmap pmap=%p va=0x%lx sme=0x%x "
                   2432:                               "(ek)\n", pmap, segva, sme);
1.132     chs      2433:                }
                   2434: #endif
                   2435:        }
                   2436:
                   2437:        pmeg_set_wiring(pmegp, va, TRUE);
                   2438:
                   2439:        /* Anything but MAIN_MEM is mapped non-cached. */
                   2440:        if (!IS_MAIN_MEM(new_pte)) {
                   2441:                new_pte |= PG_NC;
                   2442:        }
                   2443: #ifdef PMAP_DEBUG
                   2444:        if ((pmap_debug & PMD_SETPTE) || (va == pmap_db_watchva)) {
1.145     chs      2445:                printf("pmap: set_pte pmap=%p va=0x%lx old=0x%x new=0x%x "
                   2446:                       "(ek)\n", pmap, va, old_pte, new_pte);
1.132     chs      2447:        }
                   2448: #endif
                   2449:        /* cache flush done above */
                   2450:        set_pte(va, new_pte);
                   2451:        pmegp->pmeg_vpages++;
                   2452:        splx(s);
1.115     chs      2453: }
                   2454:
1.145     chs      2455: void
                   2456: pmap_kremove(vaddr_t va, vsize_t len)
1.115     chs      2457: {
1.132     chs      2458:        pmap_t pmap = kernel_pmap;
                   2459:        vaddr_t eva, neva, pgva, segva, segnum;
                   2460:        int pte, sme;
                   2461:        pmeg_t pmegp;
                   2462: #ifdef HAVECACHE
                   2463:        int flush_by_page = 0;
                   2464: #endif
                   2465:        int s;
                   2466:
                   2467:        s = splvm();
                   2468:        segnum = VA_SEGNUM(va);
                   2469:        for (eva = va + len; va < eva; va = neva, segnum++) {
                   2470:                neva = m68k_trunc_seg(va) + NBSG;
                   2471:                if (neva > eva) {
                   2472:                        neva = eva;
                   2473:                }
                   2474:                if (pmap->pm_segmap[segnum] == SEGINV) {
                   2475:                        continue;
                   2476:                }
                   2477:
                   2478:                segva = m68k_trunc_seg(va);
                   2479:                sme = get_segmap(segva);
                   2480:                pmegp = pmeg_p(sme);
                   2481:
                   2482: #ifdef HAVECACHE
                   2483:                if (cache_size) {
                   2484:
1.145     chs      2485:                        /*
1.132     chs      2486:                         * If the range to be removed is larger than the cache,
                   2487:                         * it will be cheaper to flush this segment entirely.
                   2488:                         */
                   2489:
                   2490:                        if (cache_size < (eva - va)) {
                   2491:                                /* cheaper to flush whole segment */
                   2492:                                cache_flush_segment(segva);
                   2493:                        } else {
                   2494:                                flush_by_page = 1;
                   2495:                        }
                   2496:                }
                   2497: #endif
                   2498:
                   2499:                /* Invalidate the PTEs in the given range. */
1.140     thorpej  2500:                for (pgva = va; pgva < neva; pgva += PAGE_SIZE) {
1.132     chs      2501:                        pte = get_pte(pgva);
                   2502:                        if (pte & PG_VALID) {
                   2503: #ifdef HAVECACHE
                   2504:                                if (flush_by_page) {
                   2505:                                        cache_flush_page(pgva);
1.145     chs      2506:                                        /* Get fresh mod/ref bits
                   2507:                                           from write-back. */
1.132     chs      2508:                                        pte = get_pte(pgva);
                   2509:                                }
                   2510: #endif
                   2511: #ifdef PMAP_DEBUG
1.145     chs      2512:                                if ((pmap_debug & PMD_SETPTE) ||
                   2513:                                    (pgva == pmap_db_watchva)) {
1.132     chs      2514:                                        printf("pmap: set_pte pmap=%p va=0x%lx"
1.145     chs      2515:                                               " old=0x%x new=0x%x (rrmmu)\n",
                   2516:                                               pmap, pgva, pte, PG_INVAL);
1.132     chs      2517:                                }
                   2518: #endif
                   2519:                                set_pte(pgva, PG_INVAL);
                   2520:                                KASSERT(pmegp->pmeg_vpages > 0);
                   2521:                                pmegp->pmeg_vpages--;
                   2522:                        }
                   2523:                }
                   2524:                KASSERT(pmegp->pmeg_vpages >= 0);
                   2525:                if (pmegp->pmeg_vpages == 0) {
                   2526:                        /* We are done with this pmeg. */
                   2527: #ifdef PMAP_DEBUG
                   2528:                        if (is_pmeg_wired(pmegp)) {
                   2529:                                if (pmap_debug & PMD_WIRING) {
1.145     chs      2530:                                        db_printf("pmap: removing wired "
                   2531:                                                  "pmeg: %p\n", pmegp);
1.132     chs      2532:                                        Debugger();
                   2533:                                }
                   2534:                        }
                   2535:                        if (pmap_debug & PMD_SEGMAP) {
1.145     chs      2536:                                printf("pmap: set_segmap ctx=%d v=0x%lx "
                   2537:                                       "old=0x%x new=ff (rm)\n",
                   2538:                                       pmap->pm_ctxnum, segva,
                   2539:                                       pmegp->pmeg_index);
1.132     chs      2540:                        }
                   2541:                        pmeg_verify_empty(segva);
                   2542: #endif
                   2543:
                   2544:                        /* Remove it from the MMU. */
                   2545:                        set_segmap_allctx(segva, SEGINV);
                   2546:                        pmap->pm_segmap[VA_SEGNUM(segva)] = SEGINV;
                   2547:
                   2548:                        /* Now, put it on the free list. */
                   2549:                        pmeg_free(pmegp);
                   2550:                }
1.115     chs      2551:        }
1.132     chs      2552:        splx(s);
1.115     chs      2553: }
                   2554:
1.38      gwr      2555:
1.85      gwr      2556: /*
                   2557:  * The trap handler calls this so we can try to resolve
                   2558:  * user-level faults by reloading a PMEG.
                   2559:  * If that does not prodce a valid mapping,
                   2560:  * call vm_fault as usual.
                   2561:  *
                   2562:  * XXX: Merge this with the next function?
                   2563:  */
1.145     chs      2564: int
                   2565: _pmap_fault(struct vm_map *map, vaddr_t va, vm_prot_t ftype)
1.85      gwr      2566: {
                   2567:        pmap_t pmap;
                   2568:        int rv;
                   2569:
                   2570:        pmap = vm_map_pmap(map);
                   2571:        if (map == kernel_map) {
                   2572:                /* Do not allow faults below the "managed" space. */
                   2573:                if (va < virtual_avail) {
                   2574:                        /*
                   2575:                         * Most pages below virtual_avail are read-only,
                   2576:                         * so I will assume it is a protection failure.
                   2577:                         */
1.124     chs      2578:                        return EACCES;
1.85      gwr      2579:                }
                   2580:        } else {
                   2581:                /* User map.  Try reload shortcut. */
                   2582:                if (pmap_fault_reload(pmap, va, ftype))
1.124     chs      2583:                        return 0;
1.85      gwr      2584:        }
1.148.2.1! yamt     2585:        rv = uvm_fault(map, va, ftype);
1.85      gwr      2586:
                   2587: #ifdef PMAP_DEBUG
                   2588:        if (pmap_debug & PMD_FAULT) {
                   2589:                printf("pmap_fault(%p, 0x%lx, 0x%x) -> 0x%x\n",
1.145     chs      2590:                       map, va, ftype, rv);
1.85      gwr      2591:        }
                   2592: #endif
                   2593:
                   2594:        return (rv);
                   2595: }
                   2596:
                   2597: /*
                   2598:  * This is a shortcut used by the trap handler to
                   2599:  * reload PMEGs into a user segmap without calling
                   2600:  * the actual VM fault handler.  Returns TRUE if:
                   2601:  *     the PMEG was reloaded, and
                   2602:  *     it has a valid PTE at va.
                   2603:  * Otherwise return zero and let VM code handle it.
                   2604:  */
1.145     chs      2605: int
                   2606: pmap_fault_reload(pmap_t pmap, vaddr_t pgva, vm_prot_t ftype)
1.38      gwr      2607: {
1.85      gwr      2608:        int rv, s, pte, chkpte, sme;
1.132     chs      2609:        vaddr_t segva;
1.38      gwr      2610:        pmeg_t pmegp;
                   2611:
1.82      gwr      2612:        if (pgva >= VM_MAXUSER_ADDRESS)
1.85      gwr      2613:                return (0);
                   2614:        if (pmap->pm_segmap == NULL) {
1.83      gwr      2615: #ifdef PMAP_DEBUG
1.85      gwr      2616:                db_printf("pmap_fault_reload: null segmap\n");
1.83      gwr      2617:                Debugger();
                   2618: #endif
1.85      gwr      2619:                return (0);
1.83      gwr      2620:        }
                   2621:
1.85      gwr      2622:        /* Short-cut using the S/W segmap. */
                   2623:        if (pmap->pm_segmap[VA_SEGNUM(pgva)] == SEGINV)
                   2624:                return (0);
                   2625:
1.82      gwr      2626:        segva = m68k_trunc_seg(pgva);
1.85      gwr      2627:        chkpte = PG_VALID;
                   2628:        if (ftype & VM_PROT_WRITE)
                   2629:                chkpte |= PG_WRITE;
                   2630:        rv = 0;
                   2631:
1.122     thorpej  2632:        s = splvm();
1.38      gwr      2633:
                   2634:        /*
1.85      gwr      2635:         * Given that we faulted on a user-space address, we will
                   2636:         * probably need a context.  Get a context now so we can
                   2637:         * try to resolve the fault with a segmap reload.
1.38      gwr      2638:         */
1.83      gwr      2639:        if (!has_context(pmap)) {
                   2640:                context_allocate(pmap);
                   2641: #ifdef PMAP_DEBUG
                   2642:                if (pmap_debug & PMD_CONTEXT)
1.85      gwr      2643:                        printf("pmap_fault(%p) got context %d\n",
1.145     chs      2644:                               pmap, pmap->pm_ctxnum);
1.83      gwr      2645: #endif
                   2646:                set_context(pmap->pm_ctxnum);
                   2647:        } else {
1.38      gwr      2648: #ifdef PMAP_DEBUG
1.83      gwr      2649:                /* Make sure context is correct. */
                   2650:                if (pmap->pm_ctxnum != get_context()) {
1.85      gwr      2651:                        db_printf("pmap_fault_reload: wrong context\n");
1.38      gwr      2652:                        Debugger();
1.83      gwr      2653:                        /* XXX: OK to proceed? */
                   2654:                        set_context(pmap->pm_ctxnum);
1.38      gwr      2655:                }
                   2656: #endif
                   2657:        }
                   2658:
1.82      gwr      2659:        sme = get_segmap(segva);
1.85      gwr      2660:        if (sme == SEGINV) {
                   2661:                /* See if there is something to reload. */
1.82      gwr      2662:                pmegp = pmeg_cache(pmap, segva);
1.80      gwr      2663:                if (pmegp) {
1.85      gwr      2664:                        /* Found one!  OK, reload it. */
                   2665:                        pmap_stats.ps_pmeg_faultin++;
1.80      gwr      2666:                        sme = pmegp->pmeg_index;
1.82      gwr      2667:                        set_segmap(segva, sme);
1.85      gwr      2668:                        pte = get_pte(pgva);
                   2669:                        if (pte & chkpte)
                   2670:                                rv = 1;
                   2671:                }
                   2672:        }
                   2673:
                   2674:        splx(s);
                   2675:        return (rv);
                   2676: }
                   2677:
                   2678:
                   2679: /*
                   2680:  * Clear the modify bit for the given physical page.
                   2681:  */
1.115     chs      2682: boolean_t
1.145     chs      2683: pmap_clear_modify(struct vm_page *pg)
1.85      gwr      2684: {
1.115     chs      2685:        paddr_t pa = VM_PAGE_TO_PHYS(pg);
                   2686:        pv_entry_t *head;
1.85      gwr      2687:        u_char *pv_flags;
                   2688:        int s;
1.115     chs      2689:        boolean_t rv;
1.85      gwr      2690:
                   2691:        pv_flags = pa_to_pvflags(pa);
                   2692:        head     = pa_to_pvhead(pa);
                   2693:
1.122     thorpej  2694:        s = splvm();
1.85      gwr      2695:        *pv_flags |= pv_syncflags(*head);
1.115     chs      2696:        rv = *pv_flags & PV_MOD;
1.85      gwr      2697:        *pv_flags &= ~PV_MOD;
                   2698:        splx(s);
1.115     chs      2699:        return rv;
1.85      gwr      2700: }
                   2701:
                   2702: /*
                   2703:  * Tell whether the given physical page has been modified.
                   2704:  */
1.132     chs      2705: boolean_t
1.145     chs      2706: pmap_is_modified(struct vm_page *pg)
1.85      gwr      2707: {
1.115     chs      2708:        paddr_t pa = VM_PAGE_TO_PHYS(pg);
                   2709:        pv_entry_t *head;
1.85      gwr      2710:        u_char *pv_flags;
1.132     chs      2711:        int s;
                   2712:        boolean_t rv;
1.87      gwr      2713:
1.85      gwr      2714:        pv_flags = pa_to_pvflags(pa);
                   2715:        head     = pa_to_pvhead(pa);
                   2716:
1.122     thorpej  2717:        s = splvm();
1.85      gwr      2718:        if ((*pv_flags & PV_MOD) == 0)
                   2719:                *pv_flags |= pv_syncflags(*head);
                   2720:        rv = (*pv_flags & PV_MOD);
                   2721:        splx(s);
                   2722:        return (rv);
                   2723: }
                   2724:
                   2725: /*
                   2726:  * Clear the reference bit for the given physical page.
                   2727:  * It's OK to just remove mappings if that's easier.
                   2728:  */
1.115     chs      2729: boolean_t
1.145     chs      2730: pmap_clear_reference(struct vm_page *pg)
1.85      gwr      2731: {
1.115     chs      2732:        paddr_t pa = VM_PAGE_TO_PHYS(pg);
                   2733:        pv_entry_t *head;
1.85      gwr      2734:        u_char *pv_flags;
                   2735:        int s;
1.115     chs      2736:        boolean_t rv;
1.85      gwr      2737:
                   2738:        pv_flags = pa_to_pvflags(pa);
                   2739:        head     = pa_to_pvhead(pa);
                   2740:
1.122     thorpej  2741:        s = splvm();
1.85      gwr      2742:        *pv_flags |= pv_syncflags(*head);
1.115     chs      2743:        rv = *pv_flags & PV_REF;
1.85      gwr      2744:        *pv_flags &= ~PV_REF;
                   2745:        splx(s);
1.115     chs      2746:        return rv;
1.85      gwr      2747: }
                   2748:
                   2749: /*
                   2750:  * Tell whether the given physical page has been referenced.
                   2751:  * It's OK to just return FALSE if page is not mapped.
                   2752:  */
1.115     chs      2753: boolean_t
1.145     chs      2754: pmap_is_referenced(struct vm_page *pg)
1.85      gwr      2755: {
1.115     chs      2756:        paddr_t pa = VM_PAGE_TO_PHYS(pg);
                   2757:        pv_entry_t *head;
1.85      gwr      2758:        u_char *pv_flags;
1.115     chs      2759:        int s;
                   2760:        boolean_t rv;
1.85      gwr      2761:
                   2762:        pv_flags = pa_to_pvflags(pa);
                   2763:        head     = pa_to_pvhead(pa);
                   2764:
1.122     thorpej  2765:        s = splvm();
1.85      gwr      2766:        if ((*pv_flags & PV_REF) == 0)
                   2767:                *pv_flags |= pv_syncflags(*head);
                   2768:        rv = (*pv_flags & PV_REF);
                   2769:        splx(s);
                   2770:        return (rv);
                   2771: }
                   2772:
                   2773:
                   2774: /*
                   2775:  * This is called by locore.s:cpu_switch() when it is
                   2776:  * switching to a new process.  Load new translations.
1.99      gwr      2777:  * Note: done in-line by locore.s unless PMAP_DEBUG
1.85      gwr      2778:  *
                   2779:  * Note that we do NOT allocate a context here, but
                   2780:  * share the "kernel only" context until we really
                   2781:  * need our own context for user-space mappings in
                   2782:  * pmap_enter_user().
                   2783:  */
1.145     chs      2784: void
                   2785: _pmap_switch(pmap_t pmap)
1.85      gwr      2786: {
                   2787:        set_context(pmap->pm_ctxnum);
                   2788:        ICIA();
                   2789: }
                   2790:
1.95      thorpej  2791: /*
1.99      gwr      2792:  * Exported version of pmap_activate().  This is called from the
                   2793:  * machine-independent VM code when a process is given a new pmap.
1.139     thorpej  2794:  * If (p == curlwp) do like cpu_switch would do; otherwise just
1.99      gwr      2795:  * take this as notification that the process has a new pmap.
1.95      thorpej  2796:  */
1.145     chs      2797: void
                   2798: pmap_activate(struct lwp *l)
1.95      thorpej  2799: {
1.139     thorpej  2800:        pmap_t pmap = l->l_proc->p_vmspace->vm_map.pmap;
1.95      thorpej  2801:
1.139     thorpej  2802:        if (curlwp && l->l_proc == curproc) {
1.99      gwr      2803:                _pmap_switch(pmap);
                   2804:        }
1.95      thorpej  2805: }
                   2806:
                   2807: /*
                   2808:  * Deactivate the address space of the specified process.
                   2809:  */
1.145     chs      2810: void
                   2811: pmap_deactivate(struct lwp *l)
1.95      thorpej  2812: {
1.132     chs      2813:        /* Nothing to do. */
1.95      thorpej  2814: }
1.85      gwr      2815:
                   2816: /*
1.113     thorpej  2817:  *     Routine:        pmap_unwire
                   2818:  *     Function:       Clear the wired attribute for a map/virtual-address
1.85      gwr      2819:  *                     pair.
                   2820:  *     In/out conditions:
                   2821:  *                     The mapping must already exist in the pmap.
                   2822:  */
1.145     chs      2823: void
                   2824: pmap_unwire(pmap_t pmap, vaddr_t va)
1.85      gwr      2825: {
                   2826:        int s, sme;
                   2827:        int wiremask, ptenum;
                   2828:        pmeg_t pmegp;
                   2829:
                   2830: #ifdef PMAP_DEBUG
                   2831:        if (pmap_debug & PMD_WIRING)
1.113     thorpej  2832:                printf("pmap_unwire(pmap=%p, va=0x%lx)\n",
1.145     chs      2833:                       pmap, va);
1.80      gwr      2834: #endif
1.85      gwr      2835:        /*
                   2836:         * We are asked to unwire pages that were wired when
                   2837:         * pmap_enter() was called and we ignored wiring.
                   2838:         * (VM code appears to wire a stack page during fork.)
                   2839:         */
                   2840:        if (pmap != kernel_pmap) {
                   2841: #ifdef PMAP_DEBUG
                   2842:                if (pmap_debug & PMD_WIRING) {
                   2843:                        db_printf("  (user pmap -- ignored)\n");
                   2844:                        Debugger();
1.38      gwr      2845:                }
                   2846: #endif
1.85      gwr      2847:                return;
1.38      gwr      2848:        }
                   2849:
1.85      gwr      2850:        ptenum = VA_PTE_NUM(va);
                   2851:        wiremask = 1 << ptenum;
                   2852:
1.122     thorpej  2853:        s = splvm();
1.85      gwr      2854:        sme = get_segmap(va);
                   2855:        pmegp = pmeg_p(sme);
1.113     thorpej  2856:        pmegp->pmeg_wired &= ~wiremask;
1.85      gwr      2857:        splx(s);
                   2858: }
1.50      gwr      2859:
1.85      gwr      2860: /*
                   2861:  *     Copy the range specified by src_addr/len
                   2862:  *     from the source map to the range dst_addr/len
                   2863:  *     in the destination map.
                   2864:  *
                   2865:  *     This routine is only advisory and need not do anything.
                   2866:  */
                   2867: void
1.145     chs      2868: pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vaddr_t dst_addr, vsize_t len,
                   2869:          vaddr_t src_addr)
1.85      gwr      2870: {
                   2871: }
1.38      gwr      2872:
1.85      gwr      2873: /*
                   2874:  *     Routine:        pmap_extract
                   2875:  *     Function:
                   2876:  *             Extract the physical page address associated
                   2877:  *             with the given map/virtual_address pair.
                   2878:  *     Returns zero if VA not valid.
                   2879:  */
1.145     chs      2880: boolean_t
                   2881: pmap_extract(pmap_t pmap, vaddr_t va, paddr_t *pap)
1.85      gwr      2882: {
                   2883:        int s, sme, segnum, ptenum, pte;
1.114     thorpej  2884:        paddr_t pa;
1.38      gwr      2885:
1.85      gwr      2886:        pte = 0;
1.122     thorpej  2887:        s = splvm();
1.85      gwr      2888:        if (pmap == kernel_pmap) {
                   2889:                sme = get_segmap(va);
                   2890:                if (sme != SEGINV)
                   2891:                        pte = get_pte(va);
                   2892:        } else {
                   2893:                /* This is rare, so do it the easy way. */
                   2894:                segnum = VA_SEGNUM(va);
                   2895:                sme = pmap->pm_segmap[segnum];
                   2896:                if (sme != SEGINV) {
                   2897:                        ptenum = VA_PTE_NUM(va);
                   2898:                        pte = get_pte_pmeg(sme, ptenum);
                   2899:                }
1.38      gwr      2900:        }
1.85      gwr      2901:        splx(s);
1.38      gwr      2902:
1.85      gwr      2903:        if ((pte & PG_VALID) == 0) {
                   2904: #ifdef PMAP_DEBUG
                   2905:                db_printf("pmap_extract: invalid va=0x%lx\n", va);
                   2906:                Debugger();
                   2907: #endif
1.114     thorpej  2908:                return (FALSE);
1.38      gwr      2909:        }
1.85      gwr      2910:        pa = PG_PA(pte);
                   2911: #ifdef DIAGNOSTIC
                   2912:        if (pte & PG_TYPE) {
1.137     provos   2913:                panic("pmap_extract: not main mem, va=0x%lx", va);
1.39      gwr      2914:        }
                   2915: #endif
1.114     thorpej  2916:        if (pap != NULL)
                   2917:                *pap = pa;
                   2918:        return (TRUE);
1.1       glass    2919: }
1.38      gwr      2920:
1.85      gwr      2921:
1.38      gwr      2922: /*
1.85      gwr      2923:  *       pmap_page_protect:
1.1       glass    2924:  *
1.85      gwr      2925:  *       Lower the permission for all mappings to a given page.
1.1       glass    2926:  */
1.145     chs      2927: void
                   2928: pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
1.38      gwr      2929: {
1.115     chs      2930:        paddr_t pa = VM_PAGE_TO_PHYS(pg);
1.38      gwr      2931:        int s;
                   2932:
1.122     thorpej  2933:        s = splvm();
1.85      gwr      2934: #ifdef PMAP_DEBUG
                   2935:        if (pmap_debug & PMD_PROTECT)
                   2936:                printf("pmap_page_protect(0x%lx, 0x%lx)\n", pa, prot);
1.38      gwr      2937: #endif
1.85      gwr      2938:        switch (prot) {
                   2939:        case VM_PROT_ALL:
                   2940:                break;
                   2941:        case VM_PROT_READ:
                   2942:        case VM_PROT_READ|VM_PROT_EXECUTE:
                   2943:                pv_changepte(pa, 0, PG_WRITE);
                   2944:                break;
                   2945:        default:
                   2946:                /* remove mapping for all pmaps that have it */
                   2947:                pv_remove_all(pa);
                   2948:                break;
                   2949:        }
1.77      gwr      2950:        splx(s);
1.85      gwr      2951: }
1.66      gwr      2952:
1.85      gwr      2953: /*
                   2954:  * Initialize a preallocated and zeroed pmap structure,
                   2955:  * such as one in a vmspace structure.
                   2956:  */
1.145     chs      2957: void
                   2958: pmap_pinit(pmap_t pmap)
1.85      gwr      2959: {
                   2960:        pmap_common_init(pmap);
                   2961:        pmap_user_init(pmap);
                   2962: }
1.66      gwr      2963:
1.38      gwr      2964: /*
1.85      gwr      2965:  *     Reduce the permissions on the specified
                   2966:  *     range of this map as requested.
                   2967:  *     (Make pages read-only.)
1.38      gwr      2968:  */
1.145     chs      2969: void
                   2970: pmap_protect(pmap_t pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
1.1       glass    2971: {
1.132     chs      2972:        vaddr_t va, neva;
1.85      gwr      2973:        int segnum;
1.5       glass    2974:
1.85      gwr      2975:        /* If leaving writable, nothing to do. */
                   2976:        if (prot & VM_PROT_WRITE)
                   2977:                return;
1.82      gwr      2978:
1.85      gwr      2979:        /* If removing all permissions, just unmap. */
                   2980:        if ((prot & VM_PROT_READ) == 0) {
                   2981:                pmap_remove(pmap, sva, eva);
                   2982:                return;
                   2983:        }
1.38      gwr      2984:
1.85      gwr      2985: #ifdef PMAP_DEBUG
                   2986:        if ((pmap_debug & PMD_PROTECT) ||
1.145     chs      2987:            ((sva <= pmap_db_watchva && eva > pmap_db_watchva)))
1.85      gwr      2988:                printf("pmap_protect(%p, 0x%lx, 0x%lx)\n", pmap, sva, eva);
                   2989: #endif
1.38      gwr      2990:
1.132     chs      2991:        KASSERT((pmap == kernel_pmap) ?
1.145     chs      2992:                sva >= virtual_avail && eva < DVMA_MAP_END :
                   2993:                eva <= VM_MAXUSER_ADDRESS);
1.85      gwr      2994:        va = sva;
                   2995:        segnum = VA_SEGNUM(va);
                   2996:        while (va < eva) {
                   2997:                neva = m68k_trunc_seg(va) + NBSG;
                   2998:                if (neva > eva)
                   2999:                        neva = eva;
                   3000:                if (pmap->pm_segmap[segnum] != SEGINV)
                   3001:                        pmap_protect1(pmap, va, neva);
                   3002:                va = neva;
                   3003:                segnum++;
1.38      gwr      3004:        }
1.1       glass    3005: }
                   3006:
1.38      gwr      3007: /*
1.85      gwr      3008:  * Remove write permissions in given range.
                   3009:  * (guaranteed to be within one segment)
                   3010:  * similar to pmap_remove1()
1.38      gwr      3011:  */
1.145     chs      3012: void
                   3013: pmap_protect1(pmap_t pmap, vaddr_t sva, vaddr_t eva)
1.1       glass    3014: {
1.85      gwr      3015:        int old_ctx, s, sme;
                   3016:        boolean_t in_ctx;
1.38      gwr      3017:
1.122     thorpej  3018:        s = splvm();
1.84      gwr      3019:
1.85      gwr      3020: #ifdef DIAGNOSTIC
                   3021:        if (m68k_trunc_seg(sva) != m68k_trunc_seg(eva-1))
                   3022:                panic("pmap_protect1: bad range!");
                   3023: #endif
1.38      gwr      3024:
1.85      gwr      3025:        if (pmap == kernel_pmap) {
                   3026:                sme = get_segmap(sva);
                   3027:                if (sme != SEGINV)
                   3028:                        pmap_protect_mmu(pmap, sva, eva);
                   3029:                goto out;
                   3030:        }
                   3031:        /* It is a user pmap. */
1.1       glass    3032:
1.85      gwr      3033:        /* There is a PMEG, but maybe not active. */
                   3034:        old_ctx = INVALID_CONTEXT;
                   3035:        in_ctx = FALSE;
                   3036:        if (has_context(pmap)) {
                   3037:                /* Temporary context change. */
                   3038:                old_ctx = get_context();
                   3039:                set_context(pmap->pm_ctxnum);
                   3040:                sme = get_segmap(sva);
                   3041:                if (sme != SEGINV)
                   3042:                        in_ctx = TRUE;
                   3043:        }
1.38      gwr      3044:
1.85      gwr      3045:        if (in_ctx == TRUE)
                   3046:                pmap_protect_mmu(pmap, sva, eva);
                   3047:        else
                   3048:                pmap_protect_noctx(pmap, sva, eva);
1.84      gwr      3049:
1.85      gwr      3050:        if (old_ctx != INVALID_CONTEXT) {
                   3051:                /* Restore previous context. */
                   3052:                set_context(old_ctx);
                   3053:        }
1.80      gwr      3054:
1.85      gwr      3055: out:
1.80      gwr      3056:        splx(s);
1.1       glass    3057: }
                   3058:
1.38      gwr      3059: /*
1.85      gwr      3060:  * Remove write permissions, all in one PMEG,
                   3061:  * where that PMEG is currently in the MMU.
                   3062:  * The current context is already correct.
1.38      gwr      3063:  */
1.145     chs      3064: void
                   3065: pmap_protect_mmu(pmap_t pmap, vaddr_t sva, vaddr_t eva)
1.38      gwr      3066: {
1.85      gwr      3067:        pmeg_t pmegp;
1.132     chs      3068:        vaddr_t pgva, segva;
1.85      gwr      3069:        int pte, sme;
1.107     gwr      3070: #ifdef HAVECACHE
1.85      gwr      3071:        int flush_by_page = 0;
1.107     gwr      3072: #endif
1.85      gwr      3073:
                   3074:        CHECK_SPL();
                   3075:
                   3076: #ifdef DIAGNOSTIC
                   3077:        if (pmap != kernel_pmap) {
                   3078:                if (pmap->pm_ctxnum != get_context())
                   3079:                        panic("pmap_protect_mmu: wrong context");
                   3080:        }
                   3081: #endif
1.1       glass    3082:
1.85      gwr      3083:        segva = m68k_trunc_seg(sva);
                   3084:        sme = get_segmap(segva);
1.84      gwr      3085:
1.85      gwr      3086: #ifdef DIAGNOSTIC
                   3087:        /* Make sure it is valid and known. */
                   3088:        if (sme == SEGINV)
                   3089:                panic("pmap_protect_mmu: SEGINV");
                   3090:        if (pmap->pm_segmap && (pmap->pm_segmap[VA_SEGNUM(segva)] != sme))
                   3091:                panic("pmap_protect_mmu: incorrect sme, va=0x%lx", segva);
                   3092: #endif
1.38      gwr      3093:
1.85      gwr      3094:        pmegp = pmeg_p(sme);
                   3095:        /* have pmeg, will travel */
1.38      gwr      3096:
1.85      gwr      3097: #ifdef DIAGNOSTIC
                   3098:        /* Make sure we own the pmeg, right va, etc. */
                   3099:        if ((pmegp->pmeg_va != segva) ||
1.145     chs      3100:            (pmegp->pmeg_owner != pmap) ||
                   3101:            (pmegp->pmeg_version != pmap->pm_version))
1.85      gwr      3102:        {
                   3103:                panic("pmap_protect_mmu: bad pmeg=%p", pmegp);
                   3104:        }
1.132     chs      3105:        if (pmegp->pmeg_vpages < 0)
                   3106:                panic("pmap_protect_mmu: npages corrupted");
                   3107:        if (pmegp->pmeg_vpages == 0)
1.85      gwr      3108:                panic("pmap_protect_mmu: no valid pages?");
                   3109: #endif
1.26      gwr      3110:
1.85      gwr      3111: #ifdef HAVECACHE
                   3112:        if (cache_size) {
                   3113:                /*
                   3114:                 * If the range to be removed is larger than the cache,
                   3115:                 * it will be cheaper to flush this segment entirely.
                   3116:                 */
                   3117:                if (cache_size < (eva - sva)) {
                   3118:                        /* cheaper to flush whole segment */
                   3119:                        cache_flush_segment(segva);
                   3120:                } else {
                   3121:                        flush_by_page = 1;
                   3122:                }
                   3123:        }
                   3124: #endif
1.84      gwr      3125:
1.86      gwr      3126:        /* Remove write permission in the given range. */
1.140     thorpej  3127:        for (pgva = sva; pgva < eva; pgva += PAGE_SIZE) {
1.85      gwr      3128:                pte = get_pte(pgva);
                   3129:                if (pte & PG_VALID) {
                   3130: #ifdef HAVECACHE
                   3131:                        if (flush_by_page) {
                   3132:                                cache_flush_page(pgva);
                   3133:                                /* Get fresh mod/ref bits from write-back. */
                   3134:                                pte = get_pte(pgva);
                   3135:                        }
                   3136: #endif
                   3137:                        if (IS_MAIN_MEM(pte)) {
                   3138:                                save_modref_bits(pte);
                   3139:                        }
                   3140:                        pte &= ~(PG_WRITE | PG_MODREF);
                   3141:                        set_pte(pgva, pte);
                   3142:                }
                   3143:        }
1.38      gwr      3144: }
1.28      gwr      3145:
1.66      gwr      3146: /*
1.85      gwr      3147:  * Remove write permissions, all in one PMEG,
                   3148:  * where it is not currently in any context.
1.66      gwr      3149:  */
1.145     chs      3150: void
                   3151: pmap_protect_noctx(pmap_t pmap, vaddr_t sva, vaddr_t eva)
1.38      gwr      3152: {
1.86      gwr      3153:        int old_ctx, pte, sme, segnum;
1.132     chs      3154:        vaddr_t pgva, segva;
1.38      gwr      3155:
1.85      gwr      3156: #ifdef DIAGNOSTIC
                   3157:        /* Kernel always in a context (actually, in all contexts). */
                   3158:        if (pmap == kernel_pmap)
                   3159:                panic("pmap_protect_noctx: kernel_pmap");
                   3160:        if (pmap->pm_segmap == NULL)
                   3161:                panic("pmap_protect_noctx: null segmap");
1.38      gwr      3162: #endif
                   3163:
1.86      gwr      3164:        segva = m68k_trunc_seg(sva);
                   3165:        segnum = VA_SEGNUM(segva);
1.85      gwr      3166:        sme = pmap->pm_segmap[segnum];
1.28      gwr      3167:        if (sme == SEGINV)
1.85      gwr      3168:                return;
1.86      gwr      3169:
                   3170:        /*
                   3171:         * Borrow the EMPTY_CONTEXT so we can access the PMEG
                   3172:         * at its normal virtual address.
                   3173:         */
                   3174:        old_ctx = get_context();
                   3175:        set_context(EMPTY_CONTEXT);
                   3176:        set_segmap(segva, sme);
1.77      gwr      3177:
1.85      gwr      3178:        /* Remove write permission in the given range. */
1.140     thorpej  3179:        for (pgva = sva; pgva < eva; pgva += PAGE_SIZE) {
1.86      gwr      3180:                pte = get_pte(pgva);
1.85      gwr      3181:                if (pte & PG_VALID) {
1.86      gwr      3182:                        /* No cache flush needed. */
1.85      gwr      3183:                        if (IS_MAIN_MEM(pte)) {
                   3184:                                save_modref_bits(pte);
                   3185:                        }
                   3186:                        pte &= ~(PG_WRITE | PG_MODREF);
1.86      gwr      3187:                        set_pte(pgva, pte);
1.85      gwr      3188:                }
                   3189:        }
1.86      gwr      3190:
                   3191:        /*
                   3192:         * Make the EMPTY_CONTEXT really empty again, and
                   3193:         * restore the previous context.
                   3194:         */
                   3195:        set_segmap(segva, SEGINV);
                   3196:        set_context(old_ctx);
1.2       glass    3197: }
1.38      gwr      3198:
1.85      gwr      3199:
1.2       glass    3200: /*
1.85      gwr      3201:  *     Remove the given range of addresses from the specified map.
1.2       glass    3202:  *
1.85      gwr      3203:  *     It is assumed that the start and end are properly
                   3204:  *     rounded to the page size.
1.2       glass    3205:  */
1.145     chs      3206: void
                   3207: pmap_remove(pmap_t pmap, vaddr_t sva, vaddr_t eva)
1.2       glass    3208: {
1.132     chs      3209:        vaddr_t va, neva;
1.85      gwr      3210:        int segnum;
1.2       glass    3211:
1.85      gwr      3212: #ifdef PMAP_DEBUG
                   3213:        if ((pmap_debug & PMD_REMOVE) ||
1.145     chs      3214:            ((sva <= pmap_db_watchva && eva > pmap_db_watchva)))
1.85      gwr      3215:                printf("pmap_remove(%p, 0x%lx, 0x%lx)\n", pmap, sva, eva);
                   3216: #endif
1.80      gwr      3217:
1.132     chs      3218:
                   3219:        KASSERT((pmap == kernel_pmap) ?
1.145     chs      3220:                sva >= virtual_avail && eva < DVMA_MAP_END :
                   3221:                eva <= VM_MAXUSER_ADDRESS);
1.85      gwr      3222:        va = sva;
                   3223:        segnum = VA_SEGNUM(va);
                   3224:        while (va < eva) {
                   3225:                neva = m68k_trunc_seg(va) + NBSG;
                   3226:                if (neva > eva)
                   3227:                        neva = eva;
                   3228:                if (pmap->pm_segmap[segnum] != SEGINV)
                   3229:                        pmap_remove1(pmap, va, neva);
                   3230:                va = neva;
                   3231:                segnum++;
1.56      gwr      3232:        }
1.2       glass    3233: }
                   3234:
                   3235: /*
1.85      gwr      3236:  * Remove user mappings, all within one segment
1.2       glass    3237:  */
1.145     chs      3238: void
                   3239: pmap_remove1(pmap_t pmap, vaddr_t sva, vaddr_t eva)
1.2       glass    3240: {
1.85      gwr      3241:        int old_ctx, s, sme;
                   3242:        boolean_t in_ctx;
                   3243:
1.122     thorpej  3244:        s = splvm();
1.85      gwr      3245:
                   3246: #ifdef DIAGNOSTIC
                   3247:        if (m68k_trunc_seg(sva) != m68k_trunc_seg(eva-1))
                   3248:                panic("pmap_remove1: bad range!");
                   3249: #endif
                   3250:
                   3251:        if (pmap == kernel_pmap) {
                   3252:                sme = get_segmap(sva);
                   3253:                if (sme != SEGINV)
                   3254:                        pmap_remove_mmu(pmap, sva, eva);
                   3255:                goto out;
                   3256:        }
                   3257:        /* It is a user pmap. */
                   3258:
                   3259:        /* There is a PMEG, but maybe not active. */
                   3260:        old_ctx = INVALID_CONTEXT;
                   3261:        in_ctx = FALSE;
                   3262:        if (has_context(pmap)) {
                   3263:                /* Temporary context change. */
                   3264:                old_ctx = get_context();
                   3265:                set_context(pmap->pm_ctxnum);
                   3266:                sme = get_segmap(sva);
                   3267:                if (sme != SEGINV)
                   3268:                        in_ctx = TRUE;
                   3269:        }
                   3270:
                   3271:        if (in_ctx == TRUE)
                   3272:                pmap_remove_mmu(pmap, sva, eva);
                   3273:        else
                   3274:                pmap_remove_noctx(pmap, sva, eva);
                   3275:
                   3276:        if (old_ctx != INVALID_CONTEXT) {
                   3277:                /* Restore previous context. */
                   3278:                set_context(old_ctx);
                   3279:        }
                   3280:
                   3281: out:
                   3282:        splx(s);
1.2       glass    3283: }
1.5       glass    3284:
1.38      gwr      3285: /*
1.85      gwr      3286:  * Remove some mappings, all in one PMEG,
1.38      gwr      3287:  * where that PMEG is currently in the MMU.
                   3288:  * The current context is already correct.
1.85      gwr      3289:  * If no PTEs remain valid in the PMEG, free it.
1.38      gwr      3290:  */
1.145     chs      3291: void
                   3292: pmap_remove_mmu(pmap_t pmap, vaddr_t sva, vaddr_t eva)
1.38      gwr      3293: {
                   3294:        pmeg_t pmegp;
1.132     chs      3295:        vaddr_t pgva, segva;
1.38      gwr      3296:        int pte, sme;
1.107     gwr      3297: #ifdef HAVECACHE
1.52      gwr      3298:        int flush_by_page = 0;
1.107     gwr      3299: #endif
1.38      gwr      3300:
                   3301:        CHECK_SPL();
                   3302:
                   3303: #ifdef DIAGNOSTIC
1.50      gwr      3304:        if (pmap != kernel_pmap) {
1.38      gwr      3305:                if (pmap->pm_ctxnum != get_context())
1.85      gwr      3306:                        panic("pmap_remove_mmu: wrong context");
1.38      gwr      3307:        }
                   3308: #endif
                   3309:
1.82      gwr      3310:        segva = m68k_trunc_seg(sva);
                   3311:        sme = get_segmap(segva);
1.80      gwr      3312:
1.38      gwr      3313: #ifdef DIAGNOSTIC
                   3314:        /* Make sure it is valid and known. */
                   3315:        if (sme == SEGINV)
1.85      gwr      3316:                panic("pmap_remove_mmu: SEGINV");
1.82      gwr      3317:        if (pmap->pm_segmap && (pmap->pm_segmap[VA_SEGNUM(segva)] != sme))
1.85      gwr      3318:                panic("pmap_remove_mmu: incorrect sme, va=0x%lx", segva);
1.38      gwr      3319: #endif
1.80      gwr      3320:
1.29      gwr      3321:        pmegp = pmeg_p(sme);
1.38      gwr      3322:        /* have pmeg, will travel */
                   3323:
1.30      gwr      3324: #ifdef DIAGNOSTIC
1.38      gwr      3325:        /* Make sure we own the pmeg, right va, etc. */
1.82      gwr      3326:        if ((pmegp->pmeg_va != segva) ||
1.145     chs      3327:            (pmegp->pmeg_owner != pmap) ||
                   3328:            (pmegp->pmeg_version != pmap->pm_version))
1.38      gwr      3329:        {
1.85      gwr      3330:                panic("pmap_remove_mmu: bad pmeg=%p", pmegp);
1.38      gwr      3331:        }
1.132     chs      3332:        if (pmegp->pmeg_vpages < 0)
                   3333:                panic("pmap_remove_mmu: npages corrupted");
                   3334:        if (pmegp->pmeg_vpages == 0)
1.85      gwr      3335:                panic("pmap_remove_mmu: no valid pages?");
1.38      gwr      3336: #endif
                   3337:
                   3338: #ifdef HAVECACHE
1.52      gwr      3339:        if (cache_size) {
                   3340:                /*
                   3341:                 * If the range to be removed is larger than the cache,
                   3342:                 * it will be cheaper to flush this segment entirely.
                   3343:                 */
                   3344:                if (cache_size < (eva - sva)) {
                   3345:                        /* cheaper to flush whole segment */
1.82      gwr      3346:                        cache_flush_segment(segva);
1.52      gwr      3347:                } else {
                   3348:                        flush_by_page = 1;
                   3349:                }
                   3350:        }
1.30      gwr      3351: #endif
1.38      gwr      3352:
1.85      gwr      3353:        /* Invalidate the PTEs in the given range. */
1.140     thorpej  3354:        for (pgva = sva; pgva < eva; pgva += PAGE_SIZE) {
1.82      gwr      3355:                pte = get_pte(pgva);
1.38      gwr      3356:                if (pte & PG_VALID) {
1.52      gwr      3357: #ifdef HAVECACHE
                   3358:                        if (flush_by_page) {
1.82      gwr      3359:                                cache_flush_page(pgva);
1.52      gwr      3360:                                /* Get fresh mod/ref bits from write-back. */
1.82      gwr      3361:                                pte = get_pte(pgva);
1.52      gwr      3362:                        }
                   3363: #endif
                   3364:                        if (IS_MAIN_MEM(pte)) {
                   3365:                                save_modref_bits(pte);
1.92      gwr      3366:                                pv_unlink(pmap, pte, pgva);
1.85      gwr      3367:                        }
                   3368: #ifdef PMAP_DEBUG
1.145     chs      3369:                        if ((pmap_debug & PMD_SETPTE) ||
                   3370:                            (pgva == pmap_db_watchva)) {
1.85      gwr      3371:                                printf("pmap: set_pte pmap=%p va=0x%lx"
1.145     chs      3372:                                       " old=0x%x new=0x%x (rrmmu)\n",
                   3373:                                       pmap, pgva, pte, PG_INVAL);
1.85      gwr      3374:                        }
                   3375: #endif
                   3376:                        set_pte(pgva, PG_INVAL);
1.132     chs      3377:                        KASSERT(pmegp->pmeg_vpages > 0);
1.85      gwr      3378:                        pmegp->pmeg_vpages--;
                   3379:                }
                   3380:        }
                   3381:
1.132     chs      3382:        KASSERT(pmegp->pmeg_vpages >= 0);
                   3383:        if (pmegp->pmeg_vpages == 0) {
1.85      gwr      3384:                /* We are done with this pmeg. */
                   3385:                if (is_pmeg_wired(pmegp)) {
                   3386: #ifdef PMAP_DEBUG
                   3387:                        if (pmap_debug & PMD_WIRING) {
1.145     chs      3388:                                db_printf("pmap: removing wired pmeg: %p\n",
                   3389:                                          pmegp);
1.85      gwr      3390:                                Debugger();
1.52      gwr      3391:                        }
1.85      gwr      3392: #endif /* PMAP_DEBUG */
                   3393:                }
                   3394:
                   3395: #ifdef PMAP_DEBUG
                   3396:                if (pmap_debug & PMD_SEGMAP) {
1.145     chs      3397:                        printf("pmap: set_segmap ctx=%d v=0x%lx old=0x%x "
                   3398:                               "new=ff (rm)\n",
                   3399:                               pmap->pm_ctxnum, segva, pmegp->pmeg_index);
1.85      gwr      3400:                }
                   3401:                pmeg_verify_empty(segva);
                   3402: #endif
                   3403:
                   3404:                /* Remove it from the MMU. */
                   3405:                if (kernel_pmap == pmap) {
                   3406:                        /* Did cache flush above. */
                   3407:                        set_segmap_allctx(segva, SEGINV);
                   3408:                } else {
                   3409:                        /* Did cache flush above. */
                   3410:                        set_segmap(segva, SEGINV);
1.38      gwr      3411:                }
1.85      gwr      3412:                pmap->pm_segmap[VA_SEGNUM(segva)] = SEGINV;
                   3413:                /* Now, put it on the free list. */
                   3414:                pmeg_free(pmegp);
1.38      gwr      3415:        }
                   3416: }
                   3417:
                   3418: /*
1.85      gwr      3419:  * Remove some mappings, all in one PMEG,
1.38      gwr      3420:  * where it is not currently in any context.
                   3421:  */
1.145     chs      3422: void
                   3423: pmap_remove_noctx(pmap_t pmap, vaddr_t sva, vaddr_t eva)
1.38      gwr      3424: {
                   3425:        pmeg_t pmegp;
1.86      gwr      3426:        int old_ctx, pte, sme, segnum;
1.132     chs      3427:        vaddr_t pgva, segva;
1.38      gwr      3428:
                   3429:        CHECK_SPL();
                   3430:
1.81      gwr      3431: #ifdef DIAGNOSTIC
1.38      gwr      3432:        /* Kernel always in a context (actually, in all contexts). */
1.50      gwr      3433:        if (pmap == kernel_pmap)
1.85      gwr      3434:                panic("pmap_remove_noctx: kernel_pmap");
1.38      gwr      3435:        if (pmap->pm_segmap == NULL)
1.85      gwr      3436:                panic("pmap_remove_noctx: null segmap");
1.38      gwr      3437: #endif
                   3438:
1.86      gwr      3439:        segva = m68k_trunc_seg(sva);
                   3440:        segnum = VA_SEGNUM(segva);
1.38      gwr      3441:        sme = pmap->pm_segmap[segnum];
1.80      gwr      3442:        if (sme == SEGINV)
                   3443:                return;
1.38      gwr      3444:        pmegp = pmeg_p(sme);
                   3445:
1.86      gwr      3446:        /*
                   3447:         * Borrow the EMPTY_CONTEXT so we can access the PMEG
                   3448:         * at its normal virtual address.
                   3449:         */
                   3450:        old_ctx = get_context();
                   3451:        set_context(EMPTY_CONTEXT);
                   3452:        set_segmap(segva, sme);
                   3453:
                   3454:        /* Invalidate the PTEs in the given range. */
1.140     thorpej  3455:        for (pgva = sva; pgva < eva; pgva += PAGE_SIZE) {
1.86      gwr      3456:                pte = get_pte(pgva);
1.38      gwr      3457:                if (pte & PG_VALID) {
1.86      gwr      3458:                        /* No cache flush needed. */
1.52      gwr      3459:                        if (IS_MAIN_MEM(pte)) {
                   3460:                                save_modref_bits(pte);
1.92      gwr      3461:                                pv_unlink(pmap, pte, pgva);
1.52      gwr      3462:                        }
1.76      gwr      3463: #ifdef PMAP_DEBUG
1.145     chs      3464:                        if ((pmap_debug & PMD_SETPTE) ||
                   3465:                            (pgva == pmap_db_watchva)) {
1.85      gwr      3466:                                printf("pmap: set_pte pmap=%p va=0x%lx"
1.145     chs      3467:                                       " old=0x%x new=0x%x (rrncx)\n",
                   3468:                                       pmap, pgva, pte, PG_INVAL);
1.85      gwr      3469:                        }
1.76      gwr      3470: #endif
1.86      gwr      3471:                        set_pte(pgva, PG_INVAL);
1.132     chs      3472:                        KASSERT(pmegp->pmeg_vpages > 0);
1.85      gwr      3473:                        pmegp->pmeg_vpages--;
1.38      gwr      3474:                }
1.66      gwr      3475:        }
1.86      gwr      3476:
                   3477:        /*
                   3478:         * Make the EMPTY_CONTEXT really empty again, and
                   3479:         * restore the previous context.
                   3480:         */
                   3481:        set_segmap(segva, SEGINV);
                   3482:        set_context(old_ctx);
                   3483:
1.132     chs      3484:        KASSERT(pmegp->pmeg_vpages >= 0);
                   3485:        if (pmegp->pmeg_vpages == 0) {
1.86      gwr      3486:                /* We are done with this pmeg. */
                   3487:                if (is_pmeg_wired(pmegp)) {
                   3488: #ifdef PMAP_DEBUG
                   3489:                        if (pmap_debug & PMD_WIRING) {
1.145     chs      3490:                                db_printf("pmap: removing wired pmeg: %p\n",
                   3491:                                          pmegp);
1.86      gwr      3492:                                Debugger();
                   3493:                        }
                   3494: #endif /* PMAP_DEBUG */
                   3495:                }
1.66      gwr      3496:
1.85      gwr      3497:                pmap->pm_segmap[segnum] = SEGINV;
                   3498:                pmeg_free(pmegp);
1.38      gwr      3499:        }
                   3500: }
1.85      gwr      3501:
1.38      gwr      3502:
                   3503: /*
1.69      gwr      3504:  * Count resident pages in this pmap.
                   3505:  * See: kern_sysctl.c:pmap_resident_count
1.38      gwr      3506:  */
1.145     chs      3507: segsz_t
                   3508: pmap_resident_pages(pmap_t pmap)
1.38      gwr      3509: {
                   3510:        int i, sme, pages;
                   3511:        pmeg_t pmeg;
                   3512:
1.69      gwr      3513:        if (pmap->pm_segmap == 0)
                   3514:                return (0);
                   3515:
1.38      gwr      3516:        pages = 0;
1.69      gwr      3517:        for (i = 0; i < NUSEG; i++) {
                   3518:                sme = pmap->pm_segmap[i];
                   3519:                if (sme != SEGINV) {
                   3520:                        pmeg = pmeg_p(sme);
                   3521:                        pages += pmeg->pmeg_vpages;
                   3522:                }
                   3523:        }
                   3524:        return (pages);
                   3525: }
                   3526:
                   3527: /*
                   3528:  * Count wired pages in this pmap.
                   3529:  * See vm_mmap.c:pmap_wired_count
                   3530:  */
1.145     chs      3531: segsz_t
                   3532: pmap_wired_pages(pmap_t pmap)
1.69      gwr      3533: {
                   3534:        int i, mask, sme, pages;
                   3535:        pmeg_t pmeg;
                   3536:
                   3537:        if (pmap->pm_segmap == 0)
                   3538:                return (0);
                   3539:
                   3540:        pages = 0;
                   3541:        for (i = 0; i < NUSEG; i++) {
                   3542:                sme = pmap->pm_segmap[i];
                   3543:                if (sme != SEGINV) {
                   3544:                        pmeg = pmeg_p(sme);
                   3545:                        mask = 0x8000;
                   3546:                        do {
                   3547:                                if (pmeg->pmeg_wired & mask)
                   3548:                                        pages++;
                   3549:                                mask = (mask >> 1);
                   3550:                        } while (mask);
1.38      gwr      3551:                }
                   3552:        }
                   3553:        return (pages);
1.2       glass    3554: }
                   3555:
1.38      gwr      3556:
                   3557: /*
                   3558:  *     pmap_copy_page copies the specified (machine independent)
                   3559:  *     page by mapping the page into virtual memory and using
                   3560:  *     bcopy to copy the page, one machine dependent page at a
                   3561:  *     time.
                   3562:  */
1.145     chs      3563: void
                   3564: pmap_copy_page(paddr_t src, paddr_t dst)
1.38      gwr      3565: {
                   3566:        int pte;
                   3567:        int s;
                   3568:
1.122     thorpej  3569:        s = splvm();
1.77      gwr      3570:
1.38      gwr      3571: #ifdef PMAP_DEBUG
                   3572:        if (pmap_debug & PMD_COW)
1.73      fair     3573:                printf("pmap_copy_page: 0x%lx -> 0x%lx\n", src, dst);
1.38      gwr      3574: #endif
                   3575:
1.132     chs      3576: #ifdef DIAGNOSTIC
1.38      gwr      3577:        if (tmp_vpages_inuse)
                   3578:                panic("pmap_copy_page: vpages inuse");
                   3579:        tmp_vpages_inuse++;
1.132     chs      3580: #endif
1.38      gwr      3581:
1.50      gwr      3582:        /* PG_PERM is short for (PG_VALID|PG_WRITE|PG_SYSTEM|PG_NC) */
                   3583:        /* All mappings to vmp_vpages are non-cached, so no flush. */
1.38      gwr      3584:        pte = PG_PERM | PA_PGNUM(src);
                   3585:        set_pte(tmp_vpages[0], pte);
                   3586:        pte = PG_PERM | PA_PGNUM(dst);
                   3587:        set_pte(tmp_vpages[1], pte);
1.68      thorpej  3588:        copypage((char *) tmp_vpages[0], (char *) tmp_vpages[1]);
1.38      gwr      3589:        set_pte(tmp_vpages[0], PG_INVAL);
                   3590:        set_pte(tmp_vpages[0], PG_INVAL);
                   3591:
1.132     chs      3592: #ifdef DIAGNOSTIC
1.38      gwr      3593:        tmp_vpages_inuse--;
1.132     chs      3594: #endif
1.77      gwr      3595:
                   3596:        splx(s);
1.38      gwr      3597: }
                   3598:
1.2       glass    3599: /*
                   3600:  *     pmap_zero_page zeros the specified (machine independent)
                   3601:  *     page by mapping the page into virtual memory and using
                   3602:  *     bzero to clear its contents, one machine dependent page
                   3603:  *     at a time.
                   3604:  */
1.145     chs      3605: void
                   3606: pmap_zero_page(paddr_t pa)
1.2       glass    3607: {
1.38      gwr      3608:        int pte;
                   3609:        int s;
1.2       glass    3610:
1.122     thorpej  3611:        s = splvm();
1.77      gwr      3612:
1.26      gwr      3613: #ifdef PMAP_DEBUG
1.38      gwr      3614:        if (pmap_debug & PMD_COW)
1.73      fair     3615:                printf("pmap_zero_page: 0x%lx\n", pa);
1.38      gwr      3616: #endif
                   3617:
1.132     chs      3618: #ifdef DIAGNOSTIC
1.38      gwr      3619:        if (tmp_vpages_inuse)
                   3620:                panic("pmap_zero_page: vpages inuse");
                   3621:        tmp_vpages_inuse++;
1.132     chs      3622: #endif
1.50      gwr      3623:
                   3624:        /* PG_PERM is short for (PG_VALID|PG_WRITE|PG_SYSTEM|PG_NC) */
                   3625:        /* All mappings to vmp_vpages are non-cached, so no flush. */
1.38      gwr      3626:        pte = PG_PERM | PA_PGNUM(pa);
                   3627:        set_pte(tmp_vpages[0], pte);
1.68      thorpej  3628:        zeropage((char *) tmp_vpages[0]);
1.38      gwr      3629:        set_pte(tmp_vpages[0], PG_INVAL);
1.50      gwr      3630:
1.132     chs      3631: #ifdef DIAGNOSTIC
1.38      gwr      3632:        tmp_vpages_inuse--;
1.132     chs      3633: #endif
1.77      gwr      3634:
                   3635:        splx(s);
1.51      gwr      3636: }
                   3637:
                   3638: /*
                   3639:  *     Routine:        pmap_collect
                   3640:  *     Function:
                   3641:  *             Garbage collects the physical map system for
                   3642:  *             pages which are no longer used.
                   3643:  *             Success need not be guaranteed -- that is, there
                   3644:  *             may well be pages which are not referenced, but
                   3645:  *             others may be collected.
                   3646:  *     Usage:
                   3647:  *             Called by the pageout daemon when pages are scarce.
                   3648:  */
1.145     chs      3649: void
                   3650: pmap_collect(pmap_t pmap)
1.51      gwr      3651: {
1.38      gwr      3652: }
                   3653:
1.60      gwr      3654: /*
                   3655:  * Find first virtual address >= *va that is
                   3656:  * least likely to cause cache aliases.
                   3657:  * (This will just seg-align mappings.)
                   3658:  */
1.145     chs      3659: void
                   3660: pmap_prefer(vaddr_t fo, vaddr_t *va)
1.60      gwr      3661: {
1.132     chs      3662:        long d;
1.60      gwr      3663:
                   3664:        d = fo - *va;
                   3665:        d &= SEGOFSET;
                   3666:        *va += d;
                   3667: }
1.61      gwr      3668:
                   3669: /*
1.74      gwr      3670:  * Fill in the sun3x-specific part of the kernel core header
                   3671:  * for dumpsys().  (See machdep.c for the rest.)
1.61      gwr      3672:  */
1.145     chs      3673: void
                   3674: pmap_kcore_hdr(struct sun3_kcore_hdr *sh)
1.61      gwr      3675: {
1.132     chs      3676:        vaddr_t va;
1.74      gwr      3677:        u_char *cp, *ep;
1.61      gwr      3678:
1.74      gwr      3679:        sh->segshift = SEGSHIFT;
                   3680:        sh->pg_frame = PG_FRAME;
                   3681:        sh->pg_valid = PG_VALID;
                   3682:
                   3683:        /* Copy the kernel segmap (256 bytes). */
1.61      gwr      3684:        va = KERNBASE;
1.74      gwr      3685:        cp = sh->ksegmap;
                   3686:        ep = cp + sizeof(sh->ksegmap);
1.61      gwr      3687:        do {
                   3688:                *cp = get_segmap(va);
1.74      gwr      3689:                va += NBSG;
1.61      gwr      3690:                cp++;
1.74      gwr      3691:        } while (cp < ep);
1.61      gwr      3692: }
                   3693:
                   3694: /*
                   3695:  * Copy the pagemap RAM into the passed buffer (one page)
                   3696:  * starting at OFF in the pagemap RAM.
                   3697:  */
1.145     chs      3698: void
                   3699: pmap_get_pagemap(int *pt, int off)
1.61      gwr      3700: {
1.132     chs      3701:        vaddr_t va, va_end;
1.61      gwr      3702:        int sme, sme_end;       /* SegMap Entry numbers */
                   3703:
                   3704:        sme = (off >> 6);       /* PMEG to start on */
                   3705:        sme_end = sme + 128; /* where to stop */
                   3706:        va_end = temp_seg_va + NBSG;
                   3707:
                   3708:        do {
                   3709:                set_segmap(temp_seg_va, sme);
                   3710:                va = temp_seg_va;
                   3711:                do {
                   3712:                        *pt++ = get_pte(va);
1.140     thorpej  3713:                        va += PAGE_SIZE;
1.61      gwr      3714:                } while (va < va_end);
                   3715:                sme++;
                   3716:        } while (sme < sme_end);
                   3717:        set_segmap(temp_seg_va, SEGINV);
                   3718: }
                   3719:
1.60      gwr      3720:
                   3721: /*
                   3722:  * Helper functions for changing unloaded PMEGs
1.83      gwr      3723:  * XXX: These should go away.  (Borrow context zero instead.)
1.60      gwr      3724:  */
1.80      gwr      3725:
1.132     chs      3726: #ifdef DIAGNOSTIC
1.38      gwr      3727: static int temp_seg_inuse;
1.132     chs      3728: #endif
1.38      gwr      3729:
                   3730: static int
                   3731: get_pte_pmeg(int pmeg_num, int page_num)
                   3732: {
1.132     chs      3733:        vaddr_t va;
1.38      gwr      3734:        int pte;
                   3735:
1.50      gwr      3736:        CHECK_SPL();
1.132     chs      3737: #ifdef DIAGNOSTIC
1.38      gwr      3738:        if (temp_seg_inuse)
                   3739:                panic("get_pte_pmeg: temp_seg_inuse");
1.50      gwr      3740:        temp_seg_inuse++;
1.132     chs      3741: #endif
1.38      gwr      3742:
                   3743:        va = temp_seg_va;
                   3744:        set_segmap(temp_seg_va, pmeg_num);
1.140     thorpej  3745:        va += PAGE_SIZE*page_num;
1.38      gwr      3746:        pte = get_pte(va);
                   3747:        set_segmap(temp_seg_va, SEGINV);
1.50      gwr      3748:
1.132     chs      3749: #ifdef DIAGNOSTIC
1.38      gwr      3750:        temp_seg_inuse--;
1.132     chs      3751: #endif
1.38      gwr      3752:        return pte;
                   3753: }
                   3754:
                   3755: static void
                   3756: set_pte_pmeg(int pmeg_num, int page_num, int pte)
                   3757: {
1.132     chs      3758:        vaddr_t va;
1.38      gwr      3759:
1.50      gwr      3760:        CHECK_SPL();
1.132     chs      3761: #ifdef DIAGNOSTIC
1.38      gwr      3762:        if (temp_seg_inuse)
                   3763:                panic("set_pte_pmeg: temp_seg_inuse");
1.50      gwr      3764:        temp_seg_inuse++;
1.132     chs      3765: #endif
1.38      gwr      3766:
1.50      gwr      3767:        /* We never access data in temp_seg_va so no need to flush. */
1.38      gwr      3768:        va = temp_seg_va;
                   3769:        set_segmap(temp_seg_va, pmeg_num);
1.140     thorpej  3770:        va += PAGE_SIZE*page_num;
1.38      gwr      3771:        set_pte(va, pte);
                   3772:        set_segmap(temp_seg_va, SEGINV);
1.50      gwr      3773:
1.132     chs      3774: #ifdef DIAGNOSTIC
1.38      gwr      3775:        temp_seg_inuse--;
1.132     chs      3776: #endif
1.2       glass    3777: }
1.109     is       3778:
                   3779: /*
                   3780:  *     Routine:        pmap_procwr
                   3781:  *
                   3782:  *     Function:
                   3783:  *             Synchronize caches corresponding to [addr, addr+len) in p.
                   3784:  */
1.145     chs      3785: void
                   3786: pmap_procwr(struct proc *p, vaddr_t va, size_t len)
1.109     is       3787: {
                   3788:        (void)cachectl1(0x80000004, va, len, p);
                   3789: }
                   3790:
1.78      gwr      3791:
                   3792: #ifdef PMAP_DEBUG
                   3793: /* Things to call from the debugger. */
                   3794:
1.145     chs      3795: void
                   3796: pmap_print(pmap_t pmap)
1.78      gwr      3797: {
1.81      gwr      3798:        db_printf(" pm_ctxnum=%d\n", pmap->pm_ctxnum);
                   3799:        db_printf(" pm_version=0x%x\n", pmap->pm_version);
                   3800:        db_printf(" pm_segmap=%p\n", pmap->pm_segmap);
1.78      gwr      3801: }
                   3802:
1.145     chs      3803: void
                   3804: pmeg_print(pmeg_t pmegp)
1.78      gwr      3805: {
1.81      gwr      3806:        db_printf("link_next=%p  link_prev=%p\n",
1.145     chs      3807:                  TAILQ_NEXT(pmegp, pmeg_link),
                   3808:                  TAILQ_PREV(pmegp, pmeg_link));
1.81      gwr      3809:        db_printf("index=0x%x owner=%p own_vers=0x%x\n",
1.145     chs      3810:                  pmegp->pmeg_index, pmegp->pmeg_owner, pmegp->pmeg_version);
1.81      gwr      3811:        db_printf("va=0x%lx wired=0x%x reserved=0x%x vpgs=0x%x qstate=0x%x\n",
1.145     chs      3812:                  pmegp->pmeg_va, pmegp->pmeg_wired,
                   3813:                  pmegp->pmeg_reserved, pmegp->pmeg_vpages,
                   3814:                  pmegp->pmeg_qstate);
1.78      gwr      3815: }
                   3816:
1.145     chs      3817: void
                   3818: pv_print(paddr_t pa)
1.78      gwr      3819: {
                   3820:        pv_entry_t pv;
1.84      gwr      3821:        int idx;
1.78      gwr      3822:
1.84      gwr      3823:        idx = PA_PGNUM(pa);
1.87      gwr      3824:        if (idx >= physmem) {
1.84      gwr      3825:                db_printf("bad address\n");
1.78      gwr      3826:                return;
1.84      gwr      3827:        }
                   3828:        db_printf("pa=0x%lx, flags=0x%x\n",
1.145     chs      3829:                  pa, pv_flags_tbl[idx]);
1.78      gwr      3830:
1.84      gwr      3831:        pv = pv_head_tbl[idx];
1.78      gwr      3832:        while (pv) {
1.84      gwr      3833:                db_printf(" pv_entry %p pmap %p va 0x%lx next %p\n",
1.145     chs      3834:                          pv, pv->pv_pmap, pv->pv_va, pv->pv_next);
1.78      gwr      3835:                pv = pv->pv_next;
                   3836:        }
                   3837: }
                   3838: #endif /* PMAP_DEBUG */

CVSweb <webmaster@jp.NetBSD.org>