[BACK]Return to pmap.c CVS log [TXT][DIR] Up to [cvs.NetBSD.org] / src / sys / arch / sun2 / sun2

Annotation of src/sys/arch/sun2/sun2/pmap.c, Revision 1.6

1.6     ! thorpej     1: /*     $NetBSD: pmap.c,v 1.5 2001/04/21 23:51:22 thorpej Exp $ */
1.1       fredette    2:
                      3: /*-
                      4:  * Copyright (c) 1996 The NetBSD Foundation, Inc.
                      5:  * All rights reserved.
                      6:  *
                      7:  * This code is derived from software contributed to The NetBSD Foundation
                      8:  * by Adam Glass, Gordon W. Ross, and Matthew Fredette.
                      9:  *
                     10:  * Redistribution and use in source and binary forms, with or without
                     11:  * modification, are permitted provided that the following conditions
                     12:  * are met:
                     13:  * 1. Redistributions of source code must retain the above copyright
                     14:  *    notice, this list of conditions and the following disclaimer.
                     15:  * 2. Redistributions in binary form must reproduce the above copyright
                     16:  *    notice, this list of conditions and the following disclaimer in the
                     17:  *    documentation and/or other materials provided with the distribution.
                     18:  * 3. All advertising materials mentioning features or use of this software
                     19:  *    must display the following acknowledgement:
                     20:  *        This product includes software developed by the NetBSD
                     21:  *        Foundation, Inc. and its contributors.
                     22:  * 4. Neither the name of The NetBSD Foundation nor the names of its
                     23:  *    contributors may be used to endorse or promote products derived
                     24:  *    from this software without specific prior written permission.
                     25:  *
                     26:  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
                     27:  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
                     28:  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
                     29:  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
                     30:  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
                     31:  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
                     32:  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
                     33:  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
                     34:  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
                     35:  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
                     36:  * POSSIBILITY OF SUCH DAMAGE.
                     37:  */
                     38:
                     39: /*
                     40:  * Some notes:
                     41:  *
                     42:  * sun2s have contexts (8).  In this pmap design, the kernel is mapped
                     43:  * into context zero.  Processes take up a known portion of the context,
                     44:  * and compete for the available contexts on a LRU basis.
                     45:  *
                     46:  * sun2s also have this evil "PMEG" crapola.  Essentially each "context"'s
                     47:  * address space is defined by the 512 one-byte entries in the segment map.
                     48:  * Each of these 1-byte entries points to a "Page Map Entry Group" (PMEG)
                     49:  * which contains the mappings for that virtual segment.  (This strange
                     50:  * terminology invented by Sun and preserved here for consistency.)
                     51:  * Each PMEG maps a segment of 32Kb length, with 16 pages of 2Kb each.
                     52:  *
                     53:  * As you might guess, these PMEGs are in short supply and heavy demand.
                     54:  * PMEGs allocated to the kernel are "static" in the sense that they can't
                     55:  * be stolen from it.  PMEGs allocated to a particular segment of a
                     56:  * pmap's virtual space will be fought over by the other pmaps.
                     57:  *
1.2       fredette   58:  * This pmap was once sys/arch/sun3/sun3/pmap.c revision 1.124.
1.1       fredette   59:  */
                     60:
                     61: /*
                     62:  * Cache management:
                     63:  * sun2's don't have cache implementations, but for now the caching
                     64:  * code remains in.  it's harmless (and, due to our 0 definitions of
                     65:  * PG_NC and BADALIAS, should optimize away), and keeping it in makes
                     66:  * it easier to diff this file against its cousin, sys/arch/sun3/sun3/pmap.c.
                     67:  */
                     68:
                     69: /*
                     70:  * wanted attributes:
                     71:  *       pmegs that aren't needed by a pmap remain in the MMU.
                     72:  *       quick context switches between pmaps
                     73:  */
                     74:
                     75: /*
                     76:  * Project1:  Use a "null" context for processes that have not
                     77:  * touched any user-space address recently.  This is efficient
                     78:  * for things that stay in the kernel for a while, waking up
                     79:  * to handle some I/O then going back to sleep (i.e. nfsd).
                     80:  * If and when such a process returns to user-mode, it will
                     81:  * fault and be given a real context at that time.
                     82:  *
                     83:  * This also lets context switch be fast, because all we need
                     84:  * to do there for the MMU is slam the context register.
                     85:  *
                     86:  * Project2:  Use a private pool of PV elements.  This pool can be
                     87:  * fixed size because the total mapped virtual space supported by
                     88:  * the MMU H/W (and this pmap) is fixed for all time.
                     89:  */
                     90:
                     91: #include "opt_ddb.h"
                     92:
                     93: #include <sys/param.h>
                     94: #include <sys/systm.h>
                     95: #include <sys/proc.h>
                     96: #include <sys/malloc.h>
1.2       fredette   97: #include <sys/pool.h>
1.1       fredette   98: #include <sys/user.h>
                     99: #include <sys/queue.h>
                    100: #include <sys/kcore.h>
                    101:
                    102: #include <uvm/uvm.h>
                    103:
                    104: /* XXX - Pager hacks... (explain?) */
                    105: #define PAGER_SVA (uvm.pager_sva)
                    106: #define PAGER_EVA (uvm.pager_eva)
                    107:
                    108: #include <machine/cpu.h>
                    109: #include <machine/dvma.h>
                    110: #include <machine/idprom.h>
                    111: #include <machine/kcore.h>
1.3       fredette  112: #include <machine/promlib.h>
1.1       fredette  113: #include <machine/pmap.h>
                    114: #include <machine/pte.h>
                    115: #include <machine/vmparam.h>
                    116:
                    117: #include <sun2/sun2/control.h>
                    118: #include <sun2/sun2/fc.h>
                    119: #include <sun2/sun2/machdep.h>
                    120:
                    121: #ifdef DDB
                    122: #include <ddb/db_output.h>
                    123: #else
                    124: #define db_printf printf
                    125: #endif
                    126:
                    127: /* Verify this correspondence between definitions. */
                    128: #if    (PMAP_OBIO << PG_MOD_SHIFT) != PGT_OBIO
                    129: #error "PMAP_XXX definitions don't match pte.h!"
                    130: #endif
                    131:
                    132: /* Type bits in a "pseudo" physical address. (XXX: pmap.h?) */
                    133: #define PMAP_TYPE      PMAP_MBIO
                    134:
                    135: /*
                    136:  * Local convenience macros
                    137:  */
                    138:
                    139: #define DVMA_MAP_END   (DVMA_MAP_BASE + DVMA_MAP_AVAIL)
                    140:
                    141: /* User segments are all of them. */
                    142: #define        NUSEG   (NSEGMAP)
                    143:
                    144: #define VA_SEGNUM(x)   ((u_int)(x) >> SEGSHIFT)
                    145:
                    146: /*
                    147:  * Only "main memory" pages are registered in the pv_lists.
                    148:  * This macro is used to determine if a given pte refers to
                    149:  * "main memory" or not.  One slight hack here deserves more
                    150:  * explanation:  On the Sun-2, the bwtwo and zsc1 appear
                    151:  * as PG_OBMEM devices at 0x00700000 and 0x00780000,
                    152:  * respectively.  We do not want to consider these as
                    153:  * "main memory" so the macro below treats obmem addresses
                    154:  * >= 0x00700000 as device addresses.  NB: this means for now,
                    155:  * you can't have a headless Sun-2 with 8MB of main memory.
                    156:  */
                    157: #define        IS_MAIN_MEM(pte) (((pte) & PG_TYPE) == 0 && PG_PA(pte) < 0x00700000)
                    158:
                    159: /* Does this (pseudo) PA represent device space? */
                    160: #define PA_IS_DEV(pa) (((pa) & PMAP_TYPE) != 0 || (pa) >= 0x00700000)
                    161:
                    162: #define        BADALIAS(a1, a2)        (0)
                    163:
                    164:
                    165: /*
                    166:  * Debugging support.
                    167:  */
                    168: #define        PMD_ENTER       1
                    169: #define        PMD_LINK        2
                    170: #define        PMD_PROTECT     4
                    171: #define        PMD_SWITCH      8
                    172: #define PMD_COW                0x10
                    173: #define PMD_MODBIT     0x20
                    174: #define PMD_REFBIT     0x40
                    175: #define PMD_WIRING     0x80
                    176: #define PMD_CONTEXT    0x100
                    177: #define PMD_CREATE     0x200
                    178: #define PMD_SEGMAP     0x400
                    179: #define PMD_SETPTE     0x800
                    180: #define PMD_FAULT  0x1000
                    181:
                    182: #define        PMD_REMOVE      PMD_ENTER
                    183: #define        PMD_UNLINK      PMD_LINK
                    184:
                    185: #ifdef PMAP_DEBUG
                    186: int pmap_debug = 0;
                    187: int pmap_db_watchva = -1;
                    188: int pmap_db_watchpmeg = -1;
                    189: #endif /* PMAP_DEBUG */
                    190:
                    191:
                    192: /*
                    193:  * Miscellaneous variables.
                    194:  *
                    195:  * For simplicity, this interface retains the variables
                    196:  * that were used in the old interface (without NONCONTIG).
                    197:  * These are set in pmap_bootstrap() and used in
                    198:  * pmap_next_page().
                    199:  */
                    200: vm_offset_t virtual_avail, virtual_end;
                    201: vm_offset_t avail_start, avail_end;
                    202: #define        managed(pa)     (((pa) >= avail_start) && ((pa) < avail_end))
                    203:
                    204: /* used to skip a single hole in RAM */
                    205: static vm_offset_t hole_start, hole_size;
                    206:
                    207: /* This is for pmap_next_page() */
                    208: static vm_offset_t avail_next;
                    209:
                    210: /* This is where we map a PMEG without a context. */
                    211: static vm_offset_t temp_seg_va;
                    212: static int temp_seg_inuse;
                    213:
                    214: /*
                    215:  * Location to store virtual addresses
                    216:  * to be used in copy/zero operations.
                    217:  */
                    218: vm_offset_t tmp_vpages[2] = {
                    219:        KERNBASE + NBPG * 5,
                    220:        KERNBASE + NBPG * 6 };
                    221: int tmp_vpages_inuse;
                    222:
                    223: static int pmap_version = 1;
                    224: struct pmap kernel_pmap_store;
                    225: #define kernel_pmap (&kernel_pmap_store)
                    226: static u_char kernel_segmap[NSEGMAP];
1.2       fredette  227: 
                    228: /* memory pool for pmap structures */
                    229: struct pool    pmap_pmap_pool;
1.1       fredette  230:
                    231: /* statistics... */
                    232: struct pmap_stats {
                    233:        int     ps_enter_firstpv;       /* pv heads entered */
                    234:        int     ps_enter_secondpv;      /* pv nonheads entered */
                    235:        int     ps_unlink_pvfirst;      /* of pv_unlinks on head */
                    236:        int     ps_unlink_pvsearch;     /* of pv_unlink searches */
                    237:        int     ps_pmeg_faultin;        /* pmegs reloaded */
                    238:        int     ps_changeprots;         /* of calls to changeprot */
                    239:        int     ps_changewire;          /* useless wiring changes */
                    240:        int     ps_npg_prot_all;        /* of active pages protected */
                    241:        int     ps_npg_prot_actual;     /* pages actually affected */
                    242:        int     ps_vac_uncached;        /* non-cached due to bad alias */
                    243:        int     ps_vac_recached;        /* re-cached when bad alias gone */
                    244: } pmap_stats;
                    245:
                    246:
                    247: /*
                    248:  * locking issues:  These used to do spl* stuff.
                    249:  * XXX: Use these for reentrance detection?
                    250:  */
                    251: #define PMAP_LOCK()    (void)/XXX
                    252: #define PMAP_UNLOCK()  (void)/XXX
                    253:
                    254: #define pmap_lock(pmap) simple_lock(&pmap->pm_lock)
                    255: #define pmap_unlock(pmap) simple_unlock(&pmap->pm_lock)
                    256: #define pmap_add_ref(pmap) ++pmap->pm_refcount
                    257: #define pmap_del_ref(pmap) --pmap->pm_refcount
                    258: #define pmap_refcount(pmap) pmap->pm_refcount
                    259:
                    260: /*
1.2       fredette  261:  * Note that splvm() is used in routines called at splnet() and
1.1       fredette  262:  * MUST NOT lower the priority.  For this reason we arrange that:
                    263:  *    splimp = max(splnet,splbio)
                    264:  * Would splvm() be more natural here? (same level as splimp).
                    265:  */
                    266:
                    267: #ifdef PMAP_DEBUG
                    268: #define        CHECK_SPL() do { \
                    269:        if ((getsr() & PSL_IPL) < PSL_IPL4) \
                    270:                panic("pmap: bad spl, line %d", __LINE__); \
                    271: } while (0)
                    272: #else  /* PMAP_DEBUG */
                    273: #define        CHECK_SPL() (void)0
                    274: #endif /* PMAP_DEBUG */
                    275:
                    276:
                    277: /*
                    278:  * PV support.
                    279:  * (i.e. Find all virtual mappings of a physical page.)
                    280:  */
                    281:
                    282: /*
                    283:  * XXX - Could eliminate this by causing managed() to return 0
                    284:  * ( avail_start = avail_end = 0 )
                    285:  */
                    286: int pv_initialized = 0;
                    287:
                    288: /* One of these for each mapped virtual page. */
                    289: struct pv_entry {
                    290:        struct pv_entry *pv_next;
                    291:        pmap_t         pv_pmap;
                    292:        vm_offset_t      pv_va;
                    293: };
                    294: typedef struct pv_entry *pv_entry_t;
                    295:
                    296: /* Table of PV list heads (per physical page). */
                    297: static struct pv_entry **pv_head_tbl;
                    298:
                    299: /* Free list of PV entries. */
                    300: static struct pv_entry *pv_free_list;
                    301:
                    302: /* Table of flags (per physical page). */
                    303: static u_char *pv_flags_tbl;
                    304:
                    305: /* These are as in the MMU but shifted by PV_SHIFT. */
                    306: #define PV_SHIFT       20
                    307: #define PV_VALID  (PG_VALID >> PV_SHIFT)
                    308: #define PV_NC     (PG_NC >> PV_SHIFT)
                    309: #define PV_TYPE   (PG_TYPE >> PV_SHIFT)
                    310: #define PV_REF    (PG_REF >> PV_SHIFT)
                    311: #define PV_MOD    (PG_MOD >> PV_SHIFT)
                    312:
                    313:
                    314: /*
                    315:  * context structures, and queues
                    316:  */
                    317:
                    318: struct context_state {
                    319:        TAILQ_ENTRY(context_state) context_link;
                    320:        int            context_num;
                    321:        struct pmap   *context_upmap;
                    322: };
                    323: typedef struct context_state *context_t;
                    324:
                    325: #define INVALID_CONTEXT -1     /* impossible value */
                    326: #define EMPTY_CONTEXT 0
                    327: #define KERNEL_CONTEXT 0
                    328: #define FIRST_CONTEXT 1
1.4       fredette  329: #define        has_context(pmap)       (((pmap)->pm_ctxnum != EMPTY_CONTEXT) == ((pmap) != kernel_pmap))
1.1       fredette  330:
                    331: TAILQ_HEAD(context_tailq, context_state)
                    332:        context_free_queue, context_active_queue;
                    333:
                    334: static struct context_state context_array[NCONTEXT];
                    335:
                    336:
                    337: /*
                    338:  * PMEG structures, queues, and macros
                    339:  */
                    340: #define PMEGQ_FREE     0
                    341: #define PMEGQ_INACTIVE 1
                    342: #define PMEGQ_ACTIVE   2
                    343: #define PMEGQ_KERNEL   3
                    344: #define PMEGQ_NONE     4
                    345:
                    346: struct pmeg_state {
                    347:        TAILQ_ENTRY(pmeg_state) pmeg_link;
                    348:        int            pmeg_index;
                    349:        pmap_t         pmeg_owner;
                    350:        int            pmeg_version;
                    351:        vm_offset_t    pmeg_va;
                    352:        int            pmeg_wired;
                    353:        int            pmeg_reserved;
                    354:        int            pmeg_vpages;
                    355:        int            pmeg_qstate;
                    356: };
                    357:
                    358: typedef struct pmeg_state *pmeg_t;
                    359:
                    360: #define PMEG_INVAL (NPMEG-1)
                    361: #define PMEG_NULL (pmeg_t) NULL
                    362:
                    363: /* XXX - Replace pmeg_kernel_queue with pmeg_wired_queue ? */
                    364: TAILQ_HEAD(pmeg_tailq, pmeg_state)
                    365:        pmeg_free_queue, pmeg_inactive_queue,
                    366:        pmeg_active_queue, pmeg_kernel_queue;
                    367:
                    368: static struct pmeg_state pmeg_array[NPMEG];
                    369:
                    370:
                    371: /*
                    372:  * prototypes
                    373:  */
                    374: static int get_pte_pmeg __P((int, int));
                    375: static void set_pte_pmeg __P((int, int, int));
                    376:
                    377: static void context_allocate __P((pmap_t pmap));
                    378: static void context_free __P((pmap_t pmap));
                    379: static void context_init __P((void));
                    380:
                    381: static void pmeg_init __P((void));
                    382: static void pmeg_reserve __P((int pmeg_num));
                    383:
                    384: static pmeg_t pmeg_allocate __P((pmap_t pmap, vm_offset_t va));
                    385: static void pmeg_mon_init __P((vm_offset_t sva, vm_offset_t eva, int keep));
                    386: static void pmeg_release __P((pmeg_t pmegp));
                    387: static void pmeg_free __P((pmeg_t pmegp));
                    388: static pmeg_t pmeg_cache __P((pmap_t pmap, vm_offset_t va));
                    389: static void pmeg_set_wiring __P((pmeg_t pmegp, vm_offset_t va, int));
                    390:
                    391: static int  pv_link   __P((pmap_t pmap, int pte, vm_offset_t va));
                    392: static void pv_unlink __P((pmap_t pmap, int pte, vm_offset_t va));
                    393: static void pv_remove_all __P((vm_offset_t pa));
                    394: static void pv_changepte __P((vm_offset_t pa, int, int));
                    395: static u_int pv_syncflags __P((pv_entry_t));
                    396: static void pv_init __P((void));
                    397:
                    398: static void pmeg_clean __P((pmeg_t pmegp));
                    399: static void pmeg_clean_free __P((void));
                    400:
                    401: static void pmap_common_init __P((pmap_t pmap));
                    402: static void pmap_kernel_init __P((pmap_t pmap));
                    403: static void pmap_user_init __P((pmap_t pmap));
                    404: static void pmap_page_upload __P((void));
                    405:
                    406: static void pmap_enter_kernel __P((vm_offset_t va,
                    407:        int new_pte, boolean_t wired));
                    408: static void pmap_enter_user __P((pmap_t pmap, vm_offset_t va,
                    409:        int new_pte, boolean_t wired));
                    410:
                    411: static void pmap_protect1 __P((pmap_t, vm_offset_t, vm_offset_t));
                    412: static void pmap_protect_mmu __P((pmap_t, vm_offset_t, vm_offset_t));
                    413: static void pmap_protect_noctx __P((pmap_t, vm_offset_t, vm_offset_t));
                    414:
                    415: static void pmap_remove1 __P((pmap_t pmap, vm_offset_t, vm_offset_t));
                    416: static void pmap_remove_mmu __P((pmap_t, vm_offset_t, vm_offset_t));
                    417: static void pmap_remove_noctx __P((pmap_t, vm_offset_t, vm_offset_t));
                    418:
                    419: static int  pmap_fault_reload __P((struct pmap *, vm_offset_t, int));
                    420:
                    421: /* Called only from locore.s and pmap.c */
                    422: void   _pmap_switch __P((pmap_t pmap));
                    423:
                    424: #ifdef PMAP_DEBUG
                    425: void pmap_print __P((pmap_t pmap));
                    426: void pv_print __P((vm_offset_t pa));
                    427: void pmeg_print __P((pmeg_t pmegp));
                    428: static void pmeg_verify_empty __P((vm_offset_t va));
                    429: #endif /* PMAP_DEBUG */
                    430: void pmap_pinit __P((pmap_t));
                    431: void pmap_release __P((pmap_t));
                    432:
                    433: /*
                    434:  * Various in-line helper functions.
                    435:  */
                    436:
                    437: static inline pmap_t
                    438: current_pmap __P((void))
                    439: {
                    440:        struct proc *p;
                    441:        struct vmspace *vm;
                    442:        vm_map_t        map;
                    443:        pmap_t  pmap;
                    444:
                    445:        p = curproc;    /* XXX */
                    446:        if (p == NULL)
                    447:                pmap = kernel_pmap;
                    448:        else {
                    449:                vm = p->p_vmspace;
                    450:                map = &vm->vm_map;
                    451:                pmap = vm_map_pmap(map);
                    452:        }
                    453:
                    454:        return (pmap);
                    455: }
                    456:
                    457: static inline struct pv_entry **
                    458: pa_to_pvhead(vm_offset_t pa)
                    459: {
                    460:        int idx;
                    461:
                    462:        idx = PA_PGNUM(pa);
                    463: #ifdef DIAGNOSTIC
                    464:        if (PA_IS_DEV(pa) || (idx >= physmem))
                    465:                panic("pmap:pa_to_pvhead: bad pa=0x%lx", pa);
                    466: #endif
                    467:        return (&pv_head_tbl[idx]);
                    468: }
                    469:
                    470: static inline u_char *
                    471: pa_to_pvflags(vm_offset_t pa)
                    472: {
                    473:        int idx;
                    474:
                    475:        idx = PA_PGNUM(pa);
                    476: #ifdef DIAGNOSTIC
                    477:        if (PA_IS_DEV(pa) || (idx >= physmem))
                    478:                panic("pmap:pa_to_pvflags: bad pa=0x%lx", pa);
                    479: #endif
                    480:        return (&pv_flags_tbl[idx]);
                    481: }
                    482:
                    483: static inline pmeg_t
                    484: pmeg_p(int sme)
                    485: {
                    486: #ifdef DIAGNOSTIC
                    487:        if (sme < 0 || sme >= SEGINV)
                    488:                panic("pmeg_p: bad sme");
                    489: #endif
                    490:        return &pmeg_array[sme];
                    491: }
                    492:
                    493: #define is_pmeg_wired(pmegp) (pmegp->pmeg_wired != 0)
                    494:
                    495: static void
                    496: pmeg_set_wiring(pmegp, va, flag)
                    497:        pmeg_t pmegp;
                    498:        vm_offset_t va;
                    499:        int flag;
                    500: {
                    501:        int idx, mask;
                    502:
                    503:        idx = VA_PTE_NUM(va);
                    504:        mask = 1 << idx;
                    505:
                    506:        if (flag)
                    507:                pmegp->pmeg_wired |= mask;
                    508:        else
                    509:                pmegp->pmeg_wired &= ~mask;
                    510: }
                    511:
                    512: /*
                    513:  * Save the MOD bit from the given PTE using its PA
                    514:  */
                    515: static void
                    516: save_modref_bits(int pte)
                    517: {
                    518:        u_char *pv_flags;
                    519:
                    520:        if (pv_initialized == 0)
                    521:                return;
                    522:
                    523:        /* Only main memory is ever in the pv_lists */
                    524:        if (!IS_MAIN_MEM(pte))
                    525:                return;
                    526:
                    527:        CHECK_SPL();
                    528:
                    529:        pv_flags = pa_to_pvflags(PG_PA(pte));
                    530:        *pv_flags |= ((pte & PG_MODREF) >> PV_SHIFT);
                    531: }
                    532:
                    533:
                    534: /****************************************************************
                    535:  * Context management functions.
                    536:  */
                    537:
                    538: /* part of pmap_bootstrap */
                    539: static void
                    540: context_init()
                    541: {
                    542:        int i;
                    543:
                    544:        TAILQ_INIT(&context_free_queue);
                    545:        TAILQ_INIT(&context_active_queue);
                    546:
                    547:        /* Leave EMPTY_CONTEXT out of the free list. */
                    548:        context_array[0].context_upmap = kernel_pmap;
                    549:
                    550:        for (i = 1; i < NCONTEXT; i++) {
                    551:                context_array[i].context_num = i;
                    552:                context_array[i].context_upmap = NULL;
                    553:                TAILQ_INSERT_TAIL(&context_free_queue, &context_array[i],
                    554:                                                  context_link);
                    555: #ifdef PMAP_DEBUG
                    556:                if (pmap_debug & PMD_CONTEXT)
                    557:                        printf("context_init: sizeof(context_array[0])=%d\n",
                    558:                                   sizeof(context_array[0]));
                    559: #endif
                    560:        }
                    561: }
                    562:
                    563: /* Get us a context (steal one if necessary). */
                    564: static void
                    565: context_allocate(pmap)
                    566:        pmap_t pmap;
                    567: {
                    568:        context_t context;
                    569:
                    570:        CHECK_SPL();
                    571:
                    572: #ifdef DIAGNOSTIC
                    573:        if (pmap == kernel_pmap)
                    574:                panic("context_allocate: kernel_pmap");
                    575:        if (has_context(pmap))
                    576:                panic("pmap: pmap already has context allocated to it");
                    577: #endif
                    578:
                    579:        context = TAILQ_FIRST(&context_free_queue);
                    580:        if (context == NULL) {
                    581:                /* Steal the head of the active queue. */
                    582:                context = TAILQ_FIRST(&context_active_queue);
                    583:                if (context == NULL)
                    584:                        panic("pmap: no contexts left?");
                    585: #ifdef PMAP_DEBUG
                    586:                if (pmap_debug & PMD_CONTEXT)
                    587:                        printf("context_allocate: steal ctx %d from pmap %p\n",
                    588:                                   context->context_num, context->context_upmap);
                    589: #endif
                    590:                context_free(context->context_upmap);
                    591:                context = TAILQ_FIRST(&context_free_queue);
                    592:        }
                    593:        TAILQ_REMOVE(&context_free_queue, context, context_link);
                    594:
                    595:        if (context->context_upmap != NULL)
                    596:                panic("pmap: context in use???");
                    597:
                    598:        context->context_upmap = pmap;
                    599:        pmap->pm_ctxnum = context->context_num;
                    600:
                    601:        TAILQ_INSERT_TAIL(&context_active_queue, context, context_link);
                    602:
                    603:        /*
                    604:         * We could reload the MMU here, but that would
                    605:         * artificially move PMEGs from the inactive queue
                    606:         * to the active queue, so do lazy reloading.
                    607:         * XXX - Need to reload wired pmegs though...
                    608:         * XXX: Verify the context it is empty?
                    609:         */
                    610: }
                    611:
                    612: /*
                    613:  * Unload the context and put it on the free queue.
                    614:  */
                    615: static void
                    616: context_free(pmap)             /* :) */
                    617:        pmap_t pmap;
                    618: {
                    619:        int saved_ctxnum, ctxnum;
                    620:        int i, sme;
                    621:        context_t contextp;
                    622:        vm_offset_t va;
                    623:
                    624:        CHECK_SPL();
                    625:
                    626:        ctxnum = pmap->pm_ctxnum;
                    627:        if (ctxnum < FIRST_CONTEXT || ctxnum >= NCONTEXT)
                    628:                panic("pmap: context_free ctxnum");
                    629:        contextp = &context_array[ctxnum];
                    630:
                    631:        /* Temporary context change. */
                    632:        saved_ctxnum = get_context();
                    633:        set_context(ctxnum);
                    634:
                    635:        /* Before unloading translations, flush cache. */
                    636: #ifdef HAVECACHE
                    637:        if (cache_size)
                    638:                cache_flush_context();
                    639: #endif
                    640:
                    641:        /* Unload MMU (but keep in SW segmap). */
                    642:        for (i=0, va=0; i < NUSEG; i++, va+=NBSG) {
                    643:
                    644: #if !defined(PMAP_DEBUG)
                    645:                /* Short-cut using the S/W segmap (if !debug). */
                    646:                if (pmap->pm_segmap[i] == SEGINV)
                    647:                        continue;
                    648: #endif
                    649:
                    650:                /* Check the H/W segmap. */
                    651:                sme = get_segmap(va);
                    652:                if (sme == SEGINV)
                    653:                        continue;
                    654:
                    655:                /* Found valid PMEG in the segmap. */
                    656: #ifdef PMAP_DEBUG
                    657:                if (pmap_debug & PMD_SEGMAP)
                    658:                        printf("pmap: set_segmap ctx=%d v=0x%lx old=0x%x new=ff (cf)\n",
                    659:                                   ctxnum, va, sme);
                    660: #endif
                    661: #ifdef DIAGNOSTIC
                    662:                if (sme != pmap->pm_segmap[i])
                    663:                        panic("context_free: unknown sme at va=0x%lx", va);
                    664: #endif
                    665:                /* Did cache flush above (whole context). */
                    666:                set_segmap(va, SEGINV);
                    667:                /* In this case, do not clear pm_segmap. */
                    668:                /* XXX: Maybe inline this call? */
                    669:                pmeg_release(pmeg_p(sme));
                    670:        }
                    671:
                    672:        /* Restore previous context. */
                    673:        set_context(saved_ctxnum);
                    674:
                    675:        /* Dequeue, update, requeue. */
                    676:        TAILQ_REMOVE(&context_active_queue, contextp, context_link);
                    677:        pmap->pm_ctxnum = EMPTY_CONTEXT;
                    678:        contextp->context_upmap = NULL;
                    679:        TAILQ_INSERT_TAIL(&context_free_queue, contextp, context_link);
                    680: }
                    681:
                    682:
                    683: /****************************************************************
                    684:  * PMEG management functions.
                    685:  */
                    686:
                    687: static void
                    688: pmeg_init()
                    689: {
                    690:        int x;
                    691:
                    692:        /* clear pmeg array, put it all on the free pmeq queue */
                    693:
                    694:        TAILQ_INIT(&pmeg_free_queue);
                    695:        TAILQ_INIT(&pmeg_inactive_queue);
                    696:        TAILQ_INIT(&pmeg_active_queue);
                    697:        TAILQ_INIT(&pmeg_kernel_queue);
                    698:
                    699:        bzero(pmeg_array, NPMEG*sizeof(struct pmeg_state));
                    700:        for (x =0 ; x<NPMEG; x++) {
                    701:                TAILQ_INSERT_TAIL(&pmeg_free_queue, &pmeg_array[x],
                    702:                                  pmeg_link);
                    703:                pmeg_array[x].pmeg_qstate = PMEGQ_FREE;
                    704:                pmeg_array[x].pmeg_index = x;
                    705:        }
                    706:
                    707:        /* The last pmeg is not usable. */
                    708:        pmeg_reserve(SEGINV);
                    709: }
                    710:
                    711: /*
                    712:  * Reserve a pmeg (forever) for use by PROM, etc.
                    713:  * Contents are left as-is.  Called very early...
                    714:  */
                    715: void
                    716: pmeg_reserve(sme)
                    717:        int sme;
                    718: {
                    719:        pmeg_t pmegp;
                    720:
                    721:        /* Can not use pmeg_p() because it fails on SEGINV. */
                    722:        pmegp = &pmeg_array[sme];
                    723:
                    724:        if (pmegp->pmeg_reserved) {
1.3       fredette  725:                prom_printf("pmeg_reserve: already reserved\n");
                    726:                prom_abort();
1.1       fredette  727:        }
                    728:        if (pmegp->pmeg_owner) {
1.3       fredette  729:                prom_printf("pmeg_reserve: already owned\n");
                    730:                prom_abort();
1.1       fredette  731:        }
                    732:
                    733:        /* Owned by kernel, but not really usable... */
                    734:        pmegp->pmeg_owner = kernel_pmap;
                    735:        pmegp->pmeg_reserved++; /* keep count, just in case */
                    736:        TAILQ_REMOVE(&pmeg_free_queue, pmegp, pmeg_link);
                    737:        pmegp->pmeg_qstate = PMEGQ_NONE;
                    738: }
                    739:
                    740: /*
                    741:  * Examine PMEGs used by the monitor, and either
                    742:  * reserve them (keep=1) or clear them (keep=0)
                    743:  */
                    744: static void
                    745: pmeg_mon_init(sva, eva, keep)
                    746:        vm_offset_t sva, eva;
                    747:        int keep;       /* true: steal, false: clear */
                    748: {
                    749:        vm_offset_t pgva, endseg;
                    750:        int pte, valid;
                    751:        unsigned char sme;
                    752:
                    753: #ifdef PMAP_DEBUG
                    754:        if (pmap_debug & PMD_SEGMAP)
1.3       fredette  755:                prom_printf("pmeg_mon_init(0x%x, 0x%x, %d)\n",
1.1       fredette  756:                           sva, eva, keep);
                    757: #endif
                    758:
                    759:        sva &= ~(NBSG-1);
                    760:
                    761:        while (sva < eva) {
                    762:                sme = get_segmap(sva);
                    763:                if (sme != SEGINV) {
                    764:                        valid = 0;
                    765:                        endseg = sva + NBSG;
                    766:                        for (pgva = sva; pgva < endseg; pgva += NBPG) {
                    767:                                pte = get_pte(pgva);
                    768:                                if (pte & PG_VALID) {
                    769:                                        valid++;
                    770:                                }
                    771:                        }
                    772: #ifdef PMAP_DEBUG
                    773:                        if (pmap_debug & PMD_SEGMAP)
1.3       fredette  774:                                prom_printf(" sva=0x%x seg=0x%x valid=%d\n",
1.1       fredette  775:                                           sva, sme, valid);
                    776: #endif
                    777:                        if (keep && valid)
                    778:                                pmeg_reserve(sme);
                    779:                        else set_segmap(sva, SEGINV);
                    780:                }
                    781:                sva += NBSG;
                    782:        }
                    783: }
                    784:
                    785: /*
                    786:  * This is used only during pmap_bootstrap, so we can
                    787:  * get away with borrowing a slot in the segmap.
                    788:  */
                    789: static void
                    790: pmeg_clean(pmegp)
                    791:        pmeg_t pmegp;
                    792: {
                    793:        int sme;
                    794:        vm_offset_t va;
                    795:
                    796:        sme = get_segmap(0);
                    797:        if (sme != SEGINV)
                    798:                panic("pmeg_clean");
                    799:
                    800:        sme = pmegp->pmeg_index;
                    801:        set_segmap(0, sme);
                    802:
                    803:        for (va = 0; va < NBSG; va += NBPG)
                    804:                set_pte(va, PG_INVAL);
                    805:
                    806:        set_segmap(0, SEGINV);
                    807: }
                    808:
                    809: /*
                    810:  * This routine makes sure that pmegs on the pmeg_free_queue contain
                    811:  * no valid ptes.  It pulls things off the queue, cleans them, and
                    812:  * puts them at the end.  The ending condition is finding the first
                    813:  * queue element at the head of the queue again.
                    814:  */
                    815: static void
                    816: pmeg_clean_free()
                    817: {
                    818:        pmeg_t pmegp, pmegp_first;
                    819:
                    820:        pmegp = TAILQ_FIRST(&pmeg_free_queue);
                    821:        if (pmegp == NULL)
                    822:                panic("pmap: no free pmegs available to clean");
                    823:
                    824:        pmegp_first = NULL;
                    825:
                    826:        for (;;) {
                    827:                pmegp = TAILQ_FIRST(&pmeg_free_queue);
                    828:                TAILQ_REMOVE(&pmeg_free_queue, pmegp, pmeg_link);
                    829:
                    830:                pmegp->pmeg_qstate = PMEGQ_NONE;
                    831:                pmeg_clean(pmegp);
                    832:                pmegp->pmeg_qstate = PMEGQ_FREE;
                    833:
                    834:                TAILQ_INSERT_TAIL(&pmeg_free_queue, pmegp, pmeg_link);
                    835:
                    836:                if (pmegp == pmegp_first)
                    837:                        break;
                    838:                if (pmegp_first == NULL)
                    839:                        pmegp_first = pmegp;
                    840:        }
                    841: }
                    842:
                    843: /*
                    844:  * Allocate a PMEG by whatever means necessary.
                    845:  * (May invalidate some mappings!)
                    846:  */
                    847: static pmeg_t
                    848: pmeg_allocate(pmap, va)
                    849:        pmap_t pmap;
                    850:        vm_offset_t va;
                    851: {
                    852:        pmeg_t pmegp;
                    853:
                    854:        CHECK_SPL();
                    855:
                    856: #ifdef DIAGNOSTIC
                    857:        if (va & SEGOFSET) {
                    858:                panic("pmap:pmeg_allocate: va=0x%lx", va);
                    859:        }
                    860: #endif
                    861:
                    862:        /* Get one onto the free list if necessary. */
                    863:        pmegp = TAILQ_FIRST(&pmeg_free_queue);
                    864:        if (!pmegp) {
                    865:                /* Try inactive queue... */
                    866:                pmegp = TAILQ_FIRST(&pmeg_inactive_queue);
                    867:                if (!pmegp) {
                    868:                        /* Try active queue... */
                    869:                        pmegp = TAILQ_FIRST(&pmeg_active_queue);
                    870:                }
                    871:                if (!pmegp) {
                    872:                        panic("pmeg_allocate: failed");
                    873:                }
                    874:                /*
                    875:                 * Remove mappings to free-up a pmeg
                    876:                 * (so it will go onto the free list).
                    877:                 * XXX - Should this call up into the VM layer
                    878:                 * to notify it when pages are deactivated?
                    879:                 * See: vm_page.c:vm_page_deactivate(vm_page_t)
                    880:                 * XXX - Skip this one if it is wired?
                    881:                 */
                    882:                pmap_remove1(pmegp->pmeg_owner,
                    883:                             pmegp->pmeg_va,
                    884:                             pmegp->pmeg_va + NBSG);
                    885:        }
                    886:
                    887:        /* OK, free list has something for us to take. */
                    888:        pmegp = TAILQ_FIRST(&pmeg_free_queue);
                    889: #ifdef DIAGNOSTIC
                    890:        if (pmegp == NULL)
                    891:                panic("pmeg_allocagte: still none free?");
                    892:        if ((pmegp->pmeg_qstate != PMEGQ_FREE) ||
                    893:                (pmegp->pmeg_index == SEGINV) ||
                    894:                (pmegp->pmeg_vpages))
                    895:                panic("pmeg_allocate: bad pmegp=%p", pmegp);
                    896: #endif
                    897: #ifdef PMAP_DEBUG
                    898:        if (pmegp->pmeg_index == pmap_db_watchpmeg) {
                    899:                db_printf("pmeg_allocate: watch pmegp=%p\n", pmegp);
                    900:                Debugger();
                    901:        }
                    902: #endif
                    903:
                    904:        TAILQ_REMOVE(&pmeg_free_queue, pmegp, pmeg_link);
                    905:
                    906:        /* Reassign this PMEG for the caller. */
                    907:        pmegp->pmeg_owner = pmap;
                    908:        pmegp->pmeg_version = pmap->pm_version;
                    909:        pmegp->pmeg_va = va;
                    910:        pmegp->pmeg_wired = 0;
                    911:        pmegp->pmeg_reserved  = 0;
                    912:        pmegp->pmeg_vpages  = 0;
                    913:        if (pmap == kernel_pmap) {
                    914:                TAILQ_INSERT_TAIL(&pmeg_kernel_queue, pmegp, pmeg_link);
                    915:                pmegp->pmeg_qstate = PMEGQ_KERNEL;
                    916:        } else {
                    917:                TAILQ_INSERT_TAIL(&pmeg_active_queue, pmegp, pmeg_link);
                    918:                pmegp->pmeg_qstate = PMEGQ_ACTIVE;
                    919:        }
                    920:        /* Caller will verify that it's empty (if debugging). */
                    921:        return pmegp;
                    922: }
                    923:
                    924: /*
                    925:  * Put pmeg on the inactive queue, leaving its contents intact.
                    926:  * This happens when we loose our context.  We may reclaim
                    927:  * this pmeg later if it is still in the inactive queue.
                    928:  */
                    929: static void
                    930: pmeg_release(pmegp)
                    931:        pmeg_t pmegp;
                    932: {
                    933:
                    934:        CHECK_SPL();
                    935:
                    936: #ifdef DIAGNOSTIC
                    937:        if ((pmegp->pmeg_owner == kernel_pmap) ||
                    938:                (pmegp->pmeg_qstate != PMEGQ_ACTIVE))
                    939:                panic("pmeg_release: bad pmeg=%p", pmegp);
                    940: #endif
                    941:
                    942:        TAILQ_REMOVE(&pmeg_active_queue, pmegp, pmeg_link);
                    943:        pmegp->pmeg_qstate = PMEGQ_INACTIVE;
                    944:        TAILQ_INSERT_TAIL(&pmeg_inactive_queue, pmegp, pmeg_link);
                    945: }
                    946:
                    947: /*
                    948:  * Move the pmeg to the free queue from wherever it is.
                    949:  * The pmeg will be clean.  It might be in kernel_pmap.
                    950:  */
                    951: static void
                    952: pmeg_free(pmegp)
                    953:        pmeg_t pmegp;
                    954: {
                    955:
                    956:        CHECK_SPL();
                    957:
                    958: #ifdef DIAGNOSTIC
                    959:        /* Caller should verify that it's empty. */
                    960:        if (pmegp->pmeg_vpages != 0)
                    961:                panic("pmeg_free: vpages");
                    962: #endif
                    963:
                    964:        switch (pmegp->pmeg_qstate) {
                    965:        case PMEGQ_ACTIVE:
                    966:                TAILQ_REMOVE(&pmeg_active_queue, pmegp, pmeg_link);
                    967:                break;
                    968:        case PMEGQ_INACTIVE:
                    969:                TAILQ_REMOVE(&pmeg_inactive_queue, pmegp, pmeg_link);
                    970:                break;
                    971:        case PMEGQ_KERNEL:
                    972:                TAILQ_REMOVE(&pmeg_kernel_queue, pmegp, pmeg_link);
                    973:                break;
                    974:        default:
                    975:                panic("pmeg_free: releasing bad pmeg");
                    976:                break;
                    977:        }
                    978:
                    979: #ifdef PMAP_DEBUG
                    980:        if (pmegp->pmeg_index == pmap_db_watchpmeg) {
                    981:                db_printf("pmeg_free: watch pmeg 0x%x\n",
                    982:                           pmegp->pmeg_index);
                    983:                Debugger();
                    984:        }
                    985: #endif
                    986:
                    987:        pmegp->pmeg_owner = NULL;
                    988:        pmegp->pmeg_qstate = PMEGQ_FREE;
                    989:        TAILQ_INSERT_TAIL(&pmeg_free_queue, pmegp, pmeg_link);
                    990: }
                    991:
                    992: /*
                    993:  * Find a PMEG that was put on the inactive queue when we
                    994:  * had our context stolen.  If found, move to active queue.
                    995:  */
                    996: static pmeg_t
                    997: pmeg_cache(pmap, va)
                    998:        pmap_t pmap;
                    999:        vm_offset_t va;
                   1000: {
                   1001:        int sme, segnum;
                   1002:        pmeg_t pmegp;
                   1003:
                   1004:        CHECK_SPL();
                   1005:
                   1006: #ifdef DIAGNOSTIC
                   1007:        if (pmap == kernel_pmap)
                   1008:                panic("pmeg_cache: kernel_pmap");
                   1009:        if (va & SEGOFSET) {
                   1010:                panic("pmap:pmeg_cache: va=0x%lx", va);
                   1011:        }
                   1012: #endif
                   1013:
                   1014:        if (pmap->pm_segmap == NULL)
                   1015:                return PMEG_NULL;
                   1016:
                   1017:        segnum = VA_SEGNUM(va);
                   1018:        if (segnum > NUSEG)             /* out of range */
                   1019:                return PMEG_NULL;
                   1020:
                   1021:        sme = pmap->pm_segmap[segnum];
                   1022:        if (sme == SEGINV)      /* nothing cached */
                   1023:                return PMEG_NULL;
                   1024:
                   1025:        pmegp = pmeg_p(sme);
                   1026:
                   1027: #ifdef PMAP_DEBUG
                   1028:        if (pmegp->pmeg_index == pmap_db_watchpmeg) {
                   1029:                db_printf("pmeg_cache: watch pmeg 0x%x\n", pmegp->pmeg_index);
                   1030:                Debugger();
                   1031:        }
                   1032: #endif
                   1033:
                   1034:        /*
                   1035:         * Our segmap named a PMEG.  If it is no longer ours,
                   1036:         * invalidate that entry in our segmap and return NULL.
                   1037:         */
                   1038:        if ((pmegp->pmeg_owner != pmap) ||
                   1039:                (pmegp->pmeg_version != pmap->pm_version) ||
                   1040:                (pmegp->pmeg_va != va))
                   1041:        {
                   1042: #ifdef PMAP_DEBUG
                   1043:                db_printf("pmap:pmeg_cache: invalid pmeg: sme=0x%x\n", sme);
                   1044:                pmeg_print(pmegp);
                   1045:                Debugger();
                   1046: #endif
                   1047:                pmap->pm_segmap[segnum] = SEGINV;
                   1048:                return PMEG_NULL; /* cache lookup failed */
                   1049:        }
                   1050:
                   1051: #ifdef DIAGNOSTIC
                   1052:        /* Make sure it is on the inactive queue. */
                   1053:        if (pmegp->pmeg_qstate != PMEGQ_INACTIVE)
                   1054:                panic("pmeg_cache: pmeg was taken: %p", pmegp);
                   1055: #endif
                   1056:
                   1057:        TAILQ_REMOVE(&pmeg_inactive_queue, pmegp, pmeg_link);
                   1058:        pmegp->pmeg_qstate = PMEGQ_ACTIVE;
                   1059:        TAILQ_INSERT_TAIL(&pmeg_active_queue, pmegp, pmeg_link);
                   1060:
                   1061:        return pmegp;
                   1062: }
                   1063:
                   1064: #ifdef PMAP_DEBUG
                   1065: static void
                   1066: pmeg_verify_empty(va)
                   1067:        vm_offset_t va;
                   1068: {
                   1069:        vm_offset_t eva;
                   1070:        int pte;
                   1071:
                   1072:        for (eva = va + NBSG;  va < eva; va += NBPG) {
                   1073:                pte = get_pte(va);
                   1074:                if (pte & PG_VALID)
                   1075:                        panic("pmeg_verify_empty");
                   1076:        }
                   1077: }
                   1078: #endif /* PMAP_DEBUG */
                   1079:
                   1080:
                   1081: /****************************************************************
                   1082:  * Physical-to-virutal lookup support
                   1083:  *
                   1084:  * Need memory for the pv_alloc/pv_free list heads
                   1085:  * and elements.  We know how many to allocate since
                   1086:  * there is one list head for each physical page, and
                   1087:  * at most one element for each PMEG slot.
                   1088:  */
                   1089: static void
                   1090: pv_init()
                   1091: {
                   1092:        int npp, nvp, sz;
                   1093:        pv_entry_t pv;
                   1094:        char *p;
                   1095:
                   1096:        /* total allocation size */
                   1097:        sz = 0;
                   1098:
                   1099:        /*
                   1100:         * Data for each physical page.
                   1101:         * Each "mod/ref" flag is a char.
                   1102:         * Each PV head is a pointer.
                   1103:         * Note physmem is in pages.
                   1104:         */
                   1105:        npp = ALIGN(physmem);
                   1106:        sz += (npp * sizeof(*pv_flags_tbl));
                   1107:        sz += (npp * sizeof(*pv_head_tbl));
                   1108:
                   1109:        /*
                   1110:         * Data for each virtual page (all PMEGs).
                   1111:         * One pv_entry for each page frame.
                   1112:         */
                   1113:        nvp = NPMEG * NPAGSEG;
                   1114:        sz += (nvp * sizeof(*pv_free_list));
                   1115:
                   1116:        /* Now allocate the whole thing. */
                   1117:        sz = m68k_round_page(sz);
                   1118:        p = (char*) uvm_km_alloc(kernel_map, sz);
                   1119:        if (p == NULL)
                   1120:                panic("pmap:pv_init: alloc failed");
                   1121:        bzero(p, sz);
                   1122:
                   1123:        /* Now divide up the space. */
                   1124:        pv_flags_tbl = (void *) p;
                   1125:        p += (npp * sizeof(*pv_flags_tbl));
                   1126:        pv_head_tbl = (void*) p;
                   1127:        p += (npp * sizeof(*pv_head_tbl));
                   1128:        pv_free_list = (void *) p;
                   1129:        p += (nvp * sizeof(*pv_free_list));
                   1130:
                   1131:        /* Finally, make pv_free_list into a list. */
                   1132:        for (pv = pv_free_list; (char*)pv < p; pv++)
                   1133:                pv->pv_next = &pv[1];
                   1134:        pv[-1].pv_next = 0;
                   1135:
                   1136:        pv_initialized++;
                   1137: }
                   1138:
                   1139: /*
                   1140:  * Set or clear bits in all PTEs mapping a page.
                   1141:  * Also does syncflags work while we are there...
                   1142:  */
                   1143: static void
                   1144: pv_changepte(pa, set_bits, clear_bits)
                   1145:        vm_offset_t pa;
                   1146:        int set_bits;
                   1147:        int clear_bits;
                   1148: {
                   1149:        pv_entry_t *head, pv;
                   1150:        u_char *pv_flags;
                   1151:        pmap_t pmap;
                   1152:        vm_offset_t va;
                   1153:        int pte, sme;
                   1154:        int saved_ctx;
                   1155:        boolean_t in_ctx;
                   1156:        u_int flags;
                   1157:
                   1158:        CHECK_SPL();
                   1159:
                   1160:        if (!pv_initialized)
                   1161:                return;
                   1162:        if ((set_bits == 0) && (clear_bits == 0))
                   1163:                return;
                   1164:
                   1165:        pv_flags = pa_to_pvflags(pa);
                   1166:        head     = pa_to_pvhead(pa);
                   1167:
                   1168:        /* If no mappings, no work to do. */
                   1169:        if (*head == NULL)
                   1170:                return;
                   1171:
                   1172: #ifdef DIAGNOSTIC
                   1173:        /* This function should only clear these bits: */
                   1174:        if (clear_bits & ~(PG_WRITE | PG_NC | PG_REF | PG_MOD))
                   1175:                panic("pv_changepte: clear=0x%x\n", clear_bits);
                   1176: #endif
                   1177:
                   1178:        flags = 0;
                   1179:        saved_ctx = get_context();
                   1180:        for (pv = *head; pv != NULL; pv = pv->pv_next) {
                   1181:                pmap = pv->pv_pmap;
                   1182:                va = pv->pv_va;
                   1183:
                   1184: #ifdef DIAGNOSTIC
                   1185:                if (pmap == NULL)
                   1186:                        panic("pv_changepte: null pmap");
                   1187:                if (pmap->pm_segmap == NULL)
                   1188:                        panic("pv_changepte: null segmap");
                   1189: #endif
                   1190:
                   1191:                /* XXX don't write protect pager mappings */
                   1192:                if (clear_bits & PG_WRITE) {
                   1193:                        if (va >= PAGER_SVA && va < PAGER_EVA) {
                   1194: #ifdef PMAP_DEBUG
                   1195:                                /* XXX - Does this actually happen? */
                   1196:                                printf("pv_changepte: in pager!\n");
                   1197:                                Debugger();
                   1198: #endif
                   1199:                                continue;
                   1200:                        }
                   1201:                }
                   1202:
                   1203:                /* Is the PTE currently accessable in some context? */
                   1204:                in_ctx = FALSE;
                   1205:                sme = SEGINV;   /* kill warning */
                   1206:                if (pmap == kernel_pmap) {
                   1207:                        set_context(KERNEL_CONTEXT);
                   1208:                        in_ctx = TRUE;
                   1209:                }
                   1210:                else if (has_context(pmap)) {
                   1211:                        /* PMEG may be inactive. */
                   1212:                        set_context(pmap->pm_ctxnum);
                   1213:                        sme = get_segmap(va);
                   1214:                        if (sme != SEGINV)
                   1215:                                in_ctx = TRUE;
                   1216:                }
                   1217:
                   1218:                if (in_ctx == TRUE) {
                   1219:                        /*
                   1220:                         * The PTE is in the current context.
                   1221:                         * Make sure PTE is up-to-date with VAC.
                   1222:                         */
                   1223: #ifdef HAVECACHE
                   1224:                        if (cache_size)
                   1225:                                cache_flush_page(va);
                   1226: #endif
                   1227:                        pte = get_pte(va);
                   1228:                } else {
                   1229:                        /*
                   1230:                         * The PTE is not in any context.
                   1231:                         */
                   1232:                        sme = pmap->pm_segmap[VA_SEGNUM(va)];
                   1233:                        if (sme == SEGINV)
                   1234:                                panic("pv_changepte: SEGINV");
                   1235:                        pte = get_pte_pmeg(sme, VA_PTE_NUM(va));
                   1236:                }
                   1237:
                   1238: #ifdef DIAGNOSTIC
                   1239:                /* PV entries point only to valid mappings. */
                   1240:                if ((pte & PG_VALID) == 0)
                   1241:                        panic("pv_changepte: not PG_VALID at va=0x%lx\n", va);
                   1242: #endif
                   1243:                /* Get these while it's easy. */
                   1244:                if (pte & PG_MODREF) {
                   1245:                        flags |= (pte & PG_MODREF);
                   1246:                        pte &= ~PG_MODREF;
                   1247:                }
                   1248:
                   1249:                /* Finally, set and clear some bits. */
                   1250:                pte |= set_bits;
                   1251:                pte &= ~clear_bits;
                   1252:
                   1253:                if (in_ctx == TRUE) {
                   1254:                        /* Did cache flush above. */
                   1255:                        set_pte(va, pte);
                   1256:                } else {
                   1257:                        set_pte_pmeg(sme, VA_PTE_NUM(va), pte);
                   1258:                }
                   1259:        }
                   1260:        set_context(saved_ctx);
                   1261:
                   1262:        *pv_flags |= (flags >> PV_SHIFT);
                   1263: }
                   1264:
                   1265: /*
                   1266:  * Return ref and mod bits from pvlist,
                   1267:  * and turns off same in hardware PTEs.
                   1268:  */
                   1269: static u_int
                   1270: pv_syncflags(pv)
                   1271:        pv_entry_t pv;
                   1272: {
                   1273:        pmap_t pmap;
                   1274:        vm_offset_t va;
                   1275:        int pte, sme;
                   1276:        int saved_ctx;
                   1277:        boolean_t in_ctx;
                   1278:        u_int flags;
                   1279:
                   1280:        CHECK_SPL();
                   1281:
                   1282:        if (!pv_initialized)
                   1283:                return(0);
                   1284:
                   1285:        /* If no mappings, no work to do. */
                   1286:        if (pv == NULL)
                   1287:                return (0);
                   1288:
                   1289:        flags = 0;
                   1290:        saved_ctx = get_context();
                   1291:        for ( ; pv != NULL; pv = pv->pv_next) {
                   1292:                pmap = pv->pv_pmap;
                   1293:                va = pv->pv_va;
                   1294:                sme = SEGINV;   /* kill warning */
                   1295:
                   1296: #ifdef DIAGNOSTIC
                   1297:                /*
                   1298:                 * Only the head may have a null pmap, and
                   1299:                 * we checked for that above.
                   1300:                 */
                   1301:                if (pmap == NULL)
                   1302:                        panic("pv_syncflags: null pmap");
                   1303:                if (pmap->pm_segmap == NULL)
                   1304:                        panic("pv_syncflags: null segmap");
                   1305: #endif
                   1306:
                   1307:                /* Is the PTE currently accessable in some context? */
                   1308:                in_ctx = FALSE;
                   1309:                if (pmap == kernel_pmap) {
                   1310:                        set_context(KERNEL_CONTEXT);
                   1311:                        in_ctx = TRUE;
                   1312:                }
                   1313:                else if (has_context(pmap)) {
                   1314:                        /* PMEG may be inactive. */
                   1315:                        set_context(pmap->pm_ctxnum);
                   1316:                        sme = get_segmap(va);
                   1317:                        if (sme != SEGINV)
                   1318:                                in_ctx = TRUE;
                   1319:                }
                   1320:
                   1321:                if (in_ctx == TRUE) {
                   1322:                        /*
                   1323:                         * The PTE is in the current context.
                   1324:                         * Make sure PTE is up-to-date with VAC.
                   1325:                         */
                   1326: #ifdef HAVECACHE
                   1327:                        if (cache_size)
                   1328:                                cache_flush_page(va);
                   1329: #endif
                   1330:                        pte = get_pte(va);
                   1331:                } else {
                   1332:                        /*
                   1333:                         * The PTE is not in any context.
                   1334:                         * XXX - Consider syncing MODREF bits
                   1335:                         * when the PMEG looses its context?
                   1336:                         */
                   1337:                        sme = pmap->pm_segmap[VA_SEGNUM(va)];
                   1338:                        if (sme == SEGINV)
                   1339:                                panic("pv_syncflags: SEGINV");
                   1340:                        pte = get_pte_pmeg(sme, VA_PTE_NUM(va));
                   1341:                }
                   1342:
                   1343: #ifdef DIAGNOSTIC
                   1344:                /* PV entries point only to valid mappings. */
                   1345:                if ((pte & PG_VALID) == 0)
                   1346:                        panic("pv_syncflags: not PG_VALID at va=0x%lx\n", va);
                   1347: #endif
                   1348:                /* OK, do what we came here for... */
                   1349:                if (pte & PG_MODREF) {
                   1350:                        flags |= (pte & PG_MODREF);
                   1351:                        pte &= ~PG_MODREF;
                   1352:                }
                   1353:
                   1354:                if (in_ctx == TRUE) {
                   1355:                        /* Did cache flush above. */
                   1356:                        set_pte(va, pte);
                   1357:                } else {
                   1358:                        set_pte_pmeg(sme, VA_PTE_NUM(va), pte);
                   1359:                }
                   1360:        }
                   1361:        set_context(saved_ctx);
                   1362:
                   1363:        return (flags >> PV_SHIFT);
                   1364: }
                   1365:
                   1366: /* Remove all mappings for the physical page. */
                   1367: static void
                   1368: pv_remove_all(pa)
                   1369:        vm_offset_t pa;
                   1370: {
                   1371:        pv_entry_t *head, pv;
                   1372:        pmap_t pmap;
                   1373:        vm_offset_t va;
                   1374:
                   1375:        CHECK_SPL();
                   1376:
                   1377: #ifdef PMAP_DEBUG
                   1378:        if (pmap_debug & PMD_REMOVE)
                   1379:                printf("pv_remove_all(0x%lx)\n", pa);
                   1380: #endif
                   1381:
                   1382:        if (!pv_initialized)
                   1383:                return;
                   1384:
                   1385:        head = pa_to_pvhead(pa);
                   1386:        while ((pv = *head) != NULL) {
                   1387:                pmap = pv->pv_pmap;
                   1388:                va   = pv->pv_va;
                   1389:                pmap_remove1(pmap, va, va + NBPG);
                   1390: #ifdef PMAP_DEBUG
                   1391:                /* Make sure it went away. */
                   1392:                if (pv == *head) {
                   1393:                        db_printf("pv_remove_all: head unchanged for pa=0x%lx\n", pa);
                   1394:                        Debugger();
                   1395:                }
                   1396: #endif
                   1397:        }
                   1398: }
                   1399:
                   1400: /*
                   1401:  * The pmap system is asked to lookup all mappings that point to a
                   1402:  * given physical memory address.  This function adds a new element
                   1403:  * to the list of mappings maintained for the given physical address.
                   1404:  * Returns PV_NC if the (new) pvlist says that the address cannot
                   1405:  * be cached.
                   1406:  */
                   1407: static int
                   1408: pv_link(pmap, pte, va)
                   1409:        pmap_t pmap;
                   1410:        int pte;
                   1411:        vm_offset_t va;
                   1412: {
                   1413:        vm_offset_t pa;
                   1414:        pv_entry_t *head, pv;
                   1415:        u_char *pv_flags;
                   1416:        int flags;
                   1417:
                   1418:        if (!pv_initialized)
                   1419:                return 0;
                   1420:
                   1421:        CHECK_SPL();
                   1422:
                   1423:        /* Only the non-cached bit is of interest here. */
                   1424:        flags = (pte & PG_NC) ? PV_NC : 0;
                   1425:        pa = PG_PA(pte);
                   1426:
                   1427: #ifdef PMAP_DEBUG
                   1428:        if ((pmap_debug & PMD_LINK) || (va == pmap_db_watchva)) {
                   1429:                printf("pv_link(%p, 0x%x, 0x%lx)\n", pmap, pte, va);
                   1430:                /* pv_print(pa); */
                   1431:        }
                   1432: #endif
                   1433:
                   1434:        pv_flags = pa_to_pvflags(pa);
                   1435:        head     = pa_to_pvhead(pa);
                   1436:
                   1437: #ifdef DIAGNOSTIC
                   1438:        /* See if this mapping is already in the list. */
                   1439:        for (pv = *head; pv != NULL; pv = pv->pv_next) {
                   1440:                if ((pv->pv_pmap == pmap) && (pv->pv_va == va))
                   1441:                        panic("pv_link: duplicate entry for PA=0x%lx", pa);
                   1442:        }
                   1443: #endif
                   1444:
                   1445:        /*
                   1446:         * Does this new mapping cause VAC alias problems?
                   1447:         */
                   1448:        *pv_flags |= flags;
                   1449:        if ((*pv_flags & PV_NC) == 0) {
                   1450:                for (pv = *head; pv != NULL; pv = pv->pv_next) {
                   1451:                        if (BADALIAS(va, pv->pv_va)) {
                   1452:                                *pv_flags |= PV_NC;
                   1453:                                pv_changepte(pa, PG_NC, 0);
                   1454:                                pmap_stats.ps_vac_uncached++;
                   1455:                                break;
                   1456:                        }
                   1457:                }
                   1458:        }
                   1459:
                   1460:        /* Allocate a PV element (pv_alloc()). */
                   1461:        pv = pv_free_list;
                   1462:        if (pv == NULL)
                   1463:                panic("pv_link: pv_alloc");
                   1464:        pv_free_list = pv->pv_next;
                   1465:        pv->pv_next = 0;
                   1466:
                   1467:        /* Insert new entry at the head. */
                   1468:        pv->pv_pmap = pmap;
                   1469:        pv->pv_va   = va;
                   1470:        pv->pv_next = *head;
                   1471:        *head = pv;
                   1472:
                   1473:        return (*pv_flags & PV_NC);
                   1474: }
                   1475:
                   1476: /*
                   1477:  * pv_unlink is a helper function for pmap_remove.
                   1478:  * It removes the appropriate (pmap, pa, va) entry.
                   1479:  *
                   1480:  * Once the entry is removed, if the pv_table head has the cache
                   1481:  * inhibit bit set, see if we can turn that off; if so, walk the
                   1482:  * pvlist and turn off PG_NC in each PTE.  (The pvlist is by
                   1483:  * definition nonempty, since it must have at least two elements
                   1484:  * in it to have PV_NC set, and we only remove one here.)
                   1485:  */
                   1486: static void
                   1487: pv_unlink(pmap, pte, va)
                   1488:        pmap_t pmap;
                   1489:        int pte;
                   1490:        vm_offset_t va;
                   1491: {
                   1492:        vm_offset_t pa;
                   1493:        pv_entry_t *head, *ppv, pv;
                   1494:        u_char *pv_flags;
                   1495:
                   1496:        if (!pv_initialized)
                   1497:                return;
                   1498:
                   1499:        CHECK_SPL();
                   1500:
                   1501:        pa = PG_PA(pte);
                   1502: #ifdef PMAP_DEBUG
                   1503:        if ((pmap_debug & PMD_LINK) || (va == pmap_db_watchva)) {
                   1504:                printf("pv_unlink(%p, 0x%x, 0x%lx)\n", pmap, pte, va);
                   1505:                /* pv_print(pa); */
                   1506:        }
                   1507: #endif
                   1508:
                   1509:        pv_flags = pa_to_pvflags(pa);
                   1510:        head     = pa_to_pvhead(pa);
                   1511:
                   1512:        /*
                   1513:         * Find the entry.
                   1514:         */
                   1515:        ppv = head;
                   1516:        pv = *ppv;
                   1517:        while (pv) {
                   1518:                if ((pv->pv_pmap == pmap) && (pv->pv_va == va))
                   1519:                        goto found;
                   1520:                ppv = &pv->pv_next;
                   1521:                pv  =  pv->pv_next;
                   1522:        }
                   1523: #ifdef PMAP_DEBUG
                   1524:        db_printf("pv_unlink: not found (pa=0x%lx,va=0x%lx)\n", pa, va);
                   1525:        Debugger();
                   1526: #endif
                   1527:        return;
                   1528:
                   1529: found:
                   1530:        /* Unlink this entry from the list and clear it. */
                   1531:        *ppv = pv->pv_next;
                   1532:        pv->pv_pmap = NULL;
                   1533:        pv->pv_va   = 0;
                   1534:
                   1535:        /* Insert it on the head of the free list. (pv_free()) */
                   1536:        pv->pv_next = pv_free_list;
                   1537:        pv_free_list = pv;
                   1538:        pv = NULL;
                   1539:
                   1540:        /* Do any non-cached mappings remain? */
                   1541:        if ((*pv_flags & PV_NC) == 0)
                   1542:                return;
                   1543:        if ((pv = *head) == NULL)
                   1544:                return;
                   1545:
                   1546:        /*
                   1547:         * Have non-cached mappings.  See if we can fix that now.
                   1548:         */
                   1549:        va = pv->pv_va;
                   1550:        for (pv = pv->pv_next; pv != NULL; pv = pv->pv_next) {
                   1551:                /* If there is a DVMA mapping, leave it NC. */
                   1552:                if (va >= DVMA_MAP_BASE)
                   1553:                        return;
                   1554:                /* If there are VAC alias problems, leave NC. */
                   1555:                if (BADALIAS(va, pv->pv_va))
                   1556:                        return;
                   1557:        }
                   1558:        /* OK, there are no "problem" mappings. */
                   1559:        *pv_flags &= ~PV_NC;
                   1560:        pv_changepte(pa, 0, PG_NC);
                   1561:        pmap_stats.ps_vac_recached++;
                   1562: }
                   1563:
                   1564:
                   1565: /****************************************************************
                   1566:  * Bootstrap and Initialization, etc.
                   1567:  */
                   1568:
                   1569: void
                   1570: pmap_common_init(pmap)
                   1571:        pmap_t pmap;
                   1572: {
                   1573:        bzero(pmap, sizeof(struct pmap));
                   1574:        pmap->pm_refcount=1;
                   1575:        pmap->pm_version = pmap_version++;
                   1576:        pmap->pm_ctxnum = EMPTY_CONTEXT;
                   1577:        simple_lock_init(&pmap->pm_lock);
                   1578: }
                   1579:
                   1580: /*
                   1581:  * Prepare the kernel for VM operations.
                   1582:  * This is called by locore2.c:_vm_init()
                   1583:  * after the "start/end" globals are set.
                   1584:  * This function must NOT leave context zero.
                   1585:  */
                   1586: void
                   1587: pmap_bootstrap(nextva)
                   1588:        vm_offset_t nextva;
                   1589: {
                   1590:        vm_offset_t va, eva;
                   1591:        int i, pte, sme;
                   1592:        extern char etext[];
                   1593:
                   1594:        nextva = m68k_round_page(nextva);
                   1595:
                   1596:        /* Steal some special-purpose, already mapped pages? */
                   1597:
                   1598:        /*
                   1599:         * Determine the range of kernel virtual space available.
                   1600:         * It is segment-aligned to simplify PMEG management.
                   1601:         */
                   1602:        virtual_avail = m68k_round_seg(nextva);
                   1603:        virtual_end = VM_MAX_KERNEL_ADDRESS;
                   1604:
                   1605:        /*
                   1606:         * Determine the range of physical memory available.
                   1607:         * Physical memory at zero was remapped to KERNBASE.
                   1608:         */
                   1609:        avail_start = nextva - KERNBASE;
1.3       fredette 1610:        avail_end = prom_memsize();
1.1       fredette 1611:        avail_end = m68k_trunc_page(avail_end);
                   1612:
                   1613:        /*
                   1614:         * Report the actual amount of physical memory,
                   1615:         * even though the PROM takes a few pages.
                   1616:         */
                   1617:        physmem = (btoc(avail_end) + 0xF) & ~0xF;
                   1618:
                   1619:        /*
                   1620:         * Done allocating PAGES of virtual space, so
                   1621:         * clean out the rest of the last used segment.
                   1622:         */
                   1623:        for (va = nextva; va < virtual_avail; va += NBPG)
                   1624:                set_pte(va, PG_INVAL);
                   1625:
                   1626:        /*
                   1627:         * Now that we are done stealing physical pages, etc.
                   1628:         * figure out which PMEGs are used by those mappings
                   1629:         * and either reserve them or clear them out.
                   1630:         * -- but first, init PMEG management.
                   1631:         * This puts all PMEGs in the free list.
                   1632:         * We will allocte the in-use ones.
                   1633:         */
                   1634:        pmeg_init();
                   1635:
                   1636:        /*
                   1637:         * Unmap low virtual segments.
                   1638:         * VA range: [0 .. KERNBASE]
                   1639:         */
                   1640:        for (va = 0; va < KERNBASE; va += NBSG)
                   1641:                set_segmap(va, SEGINV);
                   1642:
                   1643:        /*
                   1644:         * Reserve PMEGS for kernel text/data/bss
                   1645:         * and the misc pages taken above.
                   1646:         * VA range: [KERNBASE .. virtual_avail]
                   1647:         */
                   1648:        for ( ; va < virtual_avail; va += NBSG) {
                   1649:                sme = get_segmap(va);
                   1650:                if (sme == SEGINV) {
1.3       fredette 1651:                        prom_printf("kernel text/data/bss not mapped\n");
                   1652:                        prom_abort();
1.1       fredette 1653:                }
                   1654:                pmeg_reserve(sme);
                   1655:        }
                   1656:
                   1657:        /*
                   1658:         * Unmap kernel virtual space.  Make sure to leave no valid
                   1659:         * segmap entries in the MMU unless pmeg_array records them.
                   1660:         * VA range: [vseg_avail .. virtual_end]
                   1661:         */
                   1662:        for ( ; va < virtual_end; va += NBSG)
                   1663:                set_segmap(va, SEGINV);
                   1664:
                   1665:        /*
                   1666:         * Reserve PMEGs used by the PROM monitor (device mappings).
                   1667:         * Free up any pmegs in this range which have no mappings.
                   1668:         * VA range: [0x00E00000 .. 0x00F00000]
                   1669:         */
                   1670:        pmeg_mon_init(SUN2_MONSTART, SUN2_MONEND, TRUE);
                   1671:
                   1672:        /*
                   1673:         * Unmap any pmegs left in DVMA space by the PROM.
                   1674:         * DO NOT kill the last one! (owned by the PROM!)
                   1675:         * VA range: [0x00F00000 .. 0x00FE0000]
                   1676:         */
                   1677:        pmeg_mon_init(SUN2_MONEND, SUN2_MONEND + DVMA_MAP_SIZE, FALSE);
                   1678:
                   1679:        /*
                   1680:         * Done reserving PMEGs and/or clearing out mappings.
                   1681:         *
                   1682:         * Now verify the mapping protections and such for the
                   1683:         * important parts of the address space (in VA order).
                   1684:         * Note that the Sun PROM usually leaves the memory
                   1685:         * mapped with everything non-cached...
                   1686:         */
                   1687:
                   1688:        /*
                   1689:         * On both a Sun2 and Sun3, the kernel text starts at virtual
                   1690:         * KERNTEXTOFF, physical 0x4000.  locore.s has set up a temporary
                   1691:         * stack starting at virtual (KERNTEXTOFF - sizeof(struct exec)),
                   1692:         * physical 0x3FE0, and growing down.
                   1693:         *
                   1694:         * On the Sun3, this means there are two physical pages before the
                   1695:         * kernel text.  On the Sun3, page zero is reserved for the msgbuf
                   1696:         * and page one contains the temporary stack.
                   1697:         *
                   1698:         * On the Sun2, there are eight physical pages before the kernel
                   1699:         * text.  Pages 0-3 are used by the PROM.
                   1700:         */
                   1701:        va = KERNBASE;
                   1702:        eva = KERNBASE + (NBPG * 4);
                   1703:        for(va = KERNBASE; va < eva; va += NBPG) {
                   1704:                        pte = get_pte(va);
                   1705:                        pte |= (PG_SYSTEM | PG_WRITE);
                   1706:                        set_pte(va, pte);
                   1707:        }
                   1708:
                   1709:        /*
                   1710:         * We start the msgbuf at page four.
                   1711:         */
                   1712:        pte = get_pte(va);
                   1713:        pte |= (PG_SYSTEM | PG_WRITE | PG_NC);
                   1714:        set_pte(va, pte);
                   1715:        va += NBPG;
                   1716:        /* Initialize msgbufaddr later, in machdep.c */
                   1717:
                   1718:        /*
                   1719:         * On the Sun3, two of the three dead pages in SUN3_MONSHORTSEG
                   1720:         * are used for tmp_vpages.  The Sun2 doesn't have this
                   1721:         * short-segment concept, so we reserve virtual pages five and six
                   1722:         * after KERNBASE for this.
                   1723:         */
                   1724:        set_pte(va, PG_INVAL);
                   1725:        va += NBPG;
                   1726:        set_pte(va, PG_INVAL);
                   1727:        va += NBPG;
                   1728:
                   1729:        /*
                   1730:         * Page seven remains for the temporary kernel stack.  Hopefully
                   1731:         * this is enough space.
                   1732:         */
                   1733:        pte = get_pte(va);
                   1734:        pte &= ~(PG_NC);
                   1735:        pte |= (PG_SYSTEM | PG_WRITE);
                   1736:        set_pte(va, pte);
                   1737:        va += NBPG;
                   1738:
                   1739:        /*
                   1740:         * Next is the kernel text.
                   1741:         *
                   1742:         * Verify protection bits on kernel text/data/bss
                   1743:         * All of kernel text, data, and bss are cached.
                   1744:         * Text is read-only (except in db_write_ktext).
                   1745:         */
                   1746:        eva = m68k_trunc_page(etext);
                   1747:        while (va < eva) {
                   1748:                pte = get_pte(va);
                   1749:                if ((pte & (PG_VALID|PG_TYPE)) != PG_VALID) {
1.3       fredette 1750:                        prom_printf("invalid page at 0x%x\n", va);
1.1       fredette 1751:                }
                   1752:                pte &= ~(PG_WRITE|PG_NC);
                   1753:                /* Kernel text is read-only */
                   1754:                pte |= (PG_SYSTEM);
                   1755:                set_pte(va, pte);
                   1756:                va += NBPG;
                   1757:        }
                   1758:        /* data, bss, etc. */
                   1759:        while (va < nextva) {
                   1760:                pte = get_pte(va);
                   1761:                if ((pte & (PG_VALID|PG_TYPE)) != PG_VALID) {
1.3       fredette 1762:                        prom_printf("invalid page at 0x%x\n", va);
1.1       fredette 1763:                }
                   1764:                pte &= ~(PG_NC);
                   1765:                pte |= (PG_SYSTEM | PG_WRITE);
                   1766:                set_pte(va, pte);
                   1767:                va += NBPG;
                   1768:        }
                   1769:
                   1770:        /*
                   1771:         * Initialize all of the other contexts.
                   1772:         */
                   1773: #ifdef DIAGNOSTIC
                   1774:        /* Near the beginning of locore.s we set context zero. */
                   1775:        if (get_context() != 0) {
1.3       fredette 1776:                prom_printf("pmap_bootstrap: not in context zero?\n");
                   1777:                prom_abort();
1.1       fredette 1778:        }
                   1779: #endif /* DIAGNOSTIC */
                   1780:        for (va = 0; va < (vm_offset_t) (NBSG * NSEGMAP); va += NBSG) {
                   1781:                for (i = 1; i < NCONTEXT; i++) {
                   1782:                        set_context(i);
                   1783:                        set_segmap(va, SEGINV);
                   1784:                }
                   1785:        }
                   1786:        set_context(KERNEL_CONTEXT);
                   1787:
                   1788:        /*
                   1789:         * Reserve a segment for the kernel to use to access a pmeg
                   1790:         * that is not currently mapped into any context/segmap.
                   1791:         * The kernel temporarily maps such a pmeg into this segment.
                   1792:         */
                   1793:        temp_seg_va = virtual_avail;
                   1794:        virtual_avail += NBSG;
                   1795: #ifdef DIAGNOSTIC
                   1796:        if (temp_seg_va & SEGOFSET) {
1.3       fredette 1797:                prom_printf("pmap_bootstrap: temp_seg_va\n");
                   1798:                prom_abort();
1.1       fredette 1799:        }
                   1800: #endif
                   1801:
                   1802:        /* Initialization for pmap_next_page() */
                   1803:        avail_next = avail_start;
                   1804:
1.2       fredette 1805:        uvmexp.pagesize = NBPG;
1.1       fredette 1806:        uvm_setpagesize();
                   1807:
                   1808:        /* after setting up some structures */
                   1809:
                   1810:        pmap_common_init(kernel_pmap);
                   1811:        pmap_kernel_init(kernel_pmap);
                   1812:
                   1813:        context_init();
                   1814:
                   1815:        pmeg_clean_free();
                   1816:
                   1817:        pmap_page_upload();
                   1818: }
                   1819:
                   1820: /*
                   1821:  * Give the kernel pmap a segmap, just so there are not
                   1822:  * so many special cases required.  Maybe faster too,
                   1823:  * because this lets pmap_remove() and pmap_protect()
                   1824:  * use a S/W copy of the segmap to avoid function calls.
                   1825:  */
                   1826: void
                   1827: pmap_kernel_init(pmap)
                   1828:         pmap_t pmap;
                   1829: {
                   1830:        vm_offset_t va;
                   1831:        int i, sme;
                   1832:
                   1833:        for (i=0, va=0; i < NSEGMAP; i++, va+=NBSG) {
                   1834:                sme = get_segmap(va);
                   1835:                kernel_segmap[i] = sme;
                   1836:        }
                   1837:        pmap->pm_segmap = kernel_segmap;
                   1838: }
                   1839:
                   1840:
                   1841: /****************************************************************
                   1842:  * PMAP interface functions.
                   1843:  */
                   1844:
                   1845: /*
                   1846:  * Support functions for vm_page_bootstrap().
                   1847:  */
                   1848:
                   1849: /*
                   1850:  * How much virtual space does this kernel have?
                   1851:  * (After mapping kernel text, data, etc.)
                   1852:  */
                   1853: void
                   1854: pmap_virtual_space(v_start, v_end)
                   1855:        vm_offset_t *v_start;
                   1856:        vm_offset_t *v_end;
                   1857: {
                   1858:        *v_start = virtual_avail;
                   1859:        *v_end   = virtual_end;
                   1860: }
                   1861:
                   1862: /* Provide memory to the VM system. */
                   1863: static void
                   1864: pmap_page_upload()
                   1865: {
                   1866:        int a, b, c, d;
                   1867:
                   1868:        if (hole_size) {
                   1869:                /*
                   1870:                 * Supply the memory in two segments so the
                   1871:                 * reserved memory (3/50 video ram at 1MB)
                   1872:                 * can be carved from the front of the 2nd.
                   1873:                 */
                   1874:                a = atop(avail_start);
                   1875:                b = atop(hole_start);
                   1876:                uvm_page_physload(a, b, a, b, VM_FREELIST_DEFAULT);
                   1877:                c = atop(hole_start + hole_size);
                   1878:                d = atop(avail_end);
                   1879:                uvm_page_physload(b, d, c, d, VM_FREELIST_DEFAULT);
                   1880:        } else {
                   1881:                a = atop(avail_start);
                   1882:                d = atop(avail_end);
                   1883:                uvm_page_physload(a, d, a, d, VM_FREELIST_DEFAULT);
                   1884:        }
                   1885: }
                   1886:
                   1887: /*
                   1888:  * pmap_page_index()
                   1889:  *
                   1890:  * Given a physical address, return a page index.
                   1891:  *
                   1892:  * There can be some values that we never return (i.e. a hole)
                   1893:  * as long as the range of indices returned by this function
                   1894:  * is smaller than the value returned by pmap_free_pages().
                   1895:  * The returned index does NOT need to start at zero.
                   1896:  * (This is normally a macro in pmap.h)
                   1897:  */
                   1898: #ifndef        pmap_page_index
                   1899: int
                   1900: pmap_page_index(pa)
                   1901:        vm_offset_t pa;
                   1902: {
                   1903:        int idx;
                   1904:
                   1905: #ifdef DIAGNOSTIC
                   1906:        if (pa < avail_start || pa >= avail_end)
                   1907:                panic("pmap_page_index: pa=0x%lx", pa);
                   1908: #endif /* DIAGNOSTIC */
                   1909:
                   1910:        idx = atop(pa);
                   1911:        return (idx);
                   1912: }
                   1913: #endif /* !pmap_page_index */
                   1914:
                   1915:
                   1916: /*
                   1917:  *     Initialize the pmap module.
                   1918:  *     Called by vm_init, to initialize any structures that the pmap
                   1919:  *     system needs to map virtual memory.
                   1920:  */
                   1921: void
                   1922: pmap_init()
                   1923: {
                   1924:
                   1925:        pv_init();
1.2       fredette 1926: 
                   1927:        /* Initialize the pmap pool. */
                   1928:        pool_init(&pmap_pmap_pool, sizeof(struct pmap), 0, 0, 0, "pmappl",
                   1929:            0, pool_page_alloc_nointr, pool_page_free_nointr, M_VMPMAP);
1.1       fredette 1930: }
                   1931:
                   1932: /*
                   1933:  * Map a range of kernel virtual address space.
                   1934:  * This might be used for device mappings, or to
                   1935:  * record the mapping for kernel text/data/bss.
                   1936:  * Return VA following the mapped range.
                   1937:  */
                   1938: vm_offset_t
                   1939: pmap_map(va, pa, endpa, prot)
                   1940:        vm_offset_t     va;
                   1941:        vm_offset_t     pa;
                   1942:        vm_offset_t     endpa;
                   1943:        int             prot;
                   1944: {
                   1945:        int sz;
                   1946:
                   1947:        sz = endpa - pa;
                   1948:        do {
                   1949:                pmap_enter(kernel_pmap, va, pa, prot, 0);
                   1950:                va += NBPG;
                   1951:                pa += NBPG;
                   1952:                sz -= NBPG;
                   1953:        } while (sz > 0);
                   1954:        return(va);
                   1955: }
                   1956:
                   1957: void
                   1958: pmap_user_init(pmap)
                   1959:        pmap_t pmap;
                   1960: {
                   1961:        int i;
                   1962:        pmap->pm_segmap = malloc(sizeof(char)*NUSEG, M_VMPMAP, M_WAITOK);
                   1963:        for (i=0; i < NUSEG; i++) {
                   1964:                pmap->pm_segmap[i] = SEGINV;
                   1965:        }
                   1966: }
                   1967:
                   1968: /*
                   1969:  *     Create and return a physical map.
                   1970:  *
                   1971:  *     If the size specified for the map
                   1972:  *     is zero, the map is an actual physical
                   1973:  *     map, and may be referenced by the
                   1974:  *     hardware.
                   1975:  *
                   1976:  *     If the size specified is non-zero,
                   1977:  *     the map will be used in software only, and
                   1978:  *     is bounded by that size.
                   1979:  */
                   1980: pmap_t
                   1981: pmap_create()
                   1982: {
                   1983:        pmap_t pmap;
                   1984:
1.2       fredette 1985:        pmap = pool_get(&pmap_pmap_pool, PR_WAITOK);
1.1       fredette 1986:        pmap_pinit(pmap);
                   1987:        return pmap;
                   1988: }
                   1989:
                   1990: /*
                   1991:  * Release any resources held by the given physical map.
                   1992:  * Called when a pmap initialized by pmap_pinit is being released.
                   1993:  * Should only be called if the map contains no valid mappings.
                   1994:  */
                   1995: void
                   1996: pmap_release(pmap)
                   1997:        struct pmap *pmap;
                   1998: {
                   1999:        int s;
                   2000:
1.2       fredette 2001:        s = splvm();
1.1       fredette 2002:
                   2003:        if (pmap == kernel_pmap)
                   2004:                panic("pmap_release: kernel_pmap!");
                   2005:
                   2006:        if (has_context(pmap)) {
                   2007: #ifdef PMAP_DEBUG
                   2008:                if (pmap_debug & PMD_CONTEXT)
                   2009:                        printf("pmap_release(%p): free ctx %d\n",
                   2010:                                   pmap, pmap->pm_ctxnum);
                   2011: #endif
                   2012:                context_free(pmap);
                   2013:        }
                   2014:        free(pmap->pm_segmap, M_VMPMAP);
                   2015:        pmap->pm_segmap = NULL;
                   2016:
                   2017:        splx(s);
                   2018: }
                   2019:
                   2020:
                   2021: /*
                   2022:  *     Retire the given physical map from service.
                   2023:  *     Should only be called if the map contains
                   2024:  *     no valid mappings.
                   2025:  */
                   2026: void
                   2027: pmap_destroy(pmap)
                   2028:        pmap_t pmap;
                   2029: {
                   2030:        int count;
                   2031:
                   2032:        if (pmap == NULL)
                   2033:                return; /* Duh! */
                   2034:
                   2035: #ifdef PMAP_DEBUG
                   2036:        if (pmap_debug & PMD_CREATE)
                   2037:                printf("pmap_destroy(%p)\n", pmap);
                   2038: #endif
                   2039:        if (pmap == kernel_pmap)
                   2040:                panic("pmap_destroy: kernel_pmap!");
                   2041:        pmap_lock(pmap);
                   2042:        count = pmap_del_ref(pmap);
                   2043:        pmap_unlock(pmap);
                   2044:        if (count == 0) {
                   2045:                pmap_release(pmap);
1.2       fredette 2046:                pool_put(&pmap_pmap_pool, pmap);
1.1       fredette 2047:        }
                   2048: }
                   2049:
                   2050: /*
                   2051:  *     Add a reference to the specified pmap.
                   2052:  */
                   2053: void
                   2054: pmap_reference(pmap)
                   2055:        pmap_t  pmap;
                   2056: {
                   2057:        if (pmap != NULL) {
                   2058:                pmap_lock(pmap);
                   2059:                pmap_add_ref(pmap);
                   2060:                pmap_unlock(pmap);
                   2061:        }
                   2062: }
                   2063:
                   2064:
                   2065: /*
                   2066:  *     Insert the given physical page (p) at
                   2067:  *     the specified virtual address (v) in the
                   2068:  *     target physical map with the protection requested.
                   2069:  *
                   2070:  *     The physical address is page aligned, but may have some
                   2071:  *     low bits set indicating an OBIO or VME bus page, or just
                   2072:  *     that the non-cache bit should be set (i.e PMAP_NC).
                   2073:  *
                   2074:  *     If specified, the page will be wired down, meaning
                   2075:  *     that the related pte can not be reclaimed.
                   2076:  *
                   2077:  *     NB:  This is the only routine which MAY NOT lazy-evaluate
                   2078:  *     or lose information.  That is, this routine must actually
                   2079:  *     insert this page into the given map NOW.
                   2080:  */
                   2081: int
                   2082: pmap_enter(pmap, va, pa, prot, flags)
                   2083:        pmap_t pmap;
                   2084:        vm_offset_t va;
                   2085:        vm_offset_t pa;
                   2086:        vm_prot_t prot;
                   2087:        int flags;
                   2088: {
                   2089:        int new_pte, s;
                   2090:        boolean_t wired = (flags & PMAP_WIRED) != 0;
                   2091:
                   2092: #ifdef PMAP_DEBUG
                   2093:        if ((pmap_debug & PMD_ENTER) ||
                   2094:                (va == pmap_db_watchva))
                   2095:                printf("pmap_enter(%p, 0x%lx, 0x%lx, 0x%x, 0x%x)\n",
                   2096:                           pmap, va, pa, prot, wired);
                   2097: #endif
                   2098:
                   2099:        /* Get page-type bits from low part of the PA... */
                   2100:        new_pte = (pa & PMAP_SPEC) << PG_MOD_SHIFT;
                   2101:
                   2102:        /* ...now the valid and writable bits... */
                   2103:        new_pte |= PG_VALID;
                   2104:        if (prot & VM_PROT_WRITE)
                   2105:                new_pte |= PG_WRITE;
                   2106:
                   2107:        /* ...and finally the page-frame number. */
                   2108:        new_pte |= PA_PGNUM(pa);
                   2109:
                   2110:        /*
                   2111:         * treatment varies significantly:
1.3       fredette 2112:         *  kernel ptes are always in the mmu
1.1       fredette 2113:         *  user ptes may not necessarily? be in the mmu.  pmap may not
                   2114:         *   be in the mmu either.
                   2115:         *
                   2116:         */
1.2       fredette 2117:        s = splvm();
1.1       fredette 2118:        if (pmap == kernel_pmap) {
                   2119:                new_pte |= PG_SYSTEM;
                   2120:                pmap_enter_kernel(va, new_pte, wired);
                   2121:        } else {
                   2122:                pmap_enter_user(pmap, va, new_pte, wired);
                   2123:        }
                   2124:        splx(s);
1.2       fredette 2125:        return (0);
1.1       fredette 2126: }
                   2127:
                   2128: static void
                   2129: pmap_enter_kernel(pgva, new_pte, wired)
                   2130:        vm_offset_t pgva;
                   2131:        int new_pte;
                   2132:        boolean_t wired;
                   2133: {
                   2134:        pmap_t pmap = kernel_pmap;
                   2135:        pmeg_t pmegp;
                   2136:        int do_pv, old_pte, sme;
                   2137:        vm_offset_t segva;
                   2138:        int saved_ctx;
                   2139:
                   2140:        CHECK_SPL();
                   2141:
                   2142:        /*
                   2143:          need to handle possibly allocating additional pmegs
                   2144:          need to make sure they cant be stolen from the kernel;
                   2145:          map any new pmegs into context zero, make sure rest of pmeg is null;
                   2146:          deal with pv_stuff; possibly caching problems;
                   2147:          must also deal with changes too.
                   2148:          */
                   2149:        saved_ctx = get_context();
                   2150:        set_context(KERNEL_CONTEXT);
                   2151:
                   2152:        /*
                   2153:         * In detail:
                   2154:         *
                   2155:         * (a) lock pmap
                   2156:         * (b) Is the VA in a already mapped segment, if so
                   2157:         *       look to see if that VA address is "valid".  If it is, then
                   2158:         *       action is a change to an existing pte
                   2159:         * (c) if not mapped segment, need to allocate pmeg
                   2160:         * (d) if adding pte entry or changing physaddr of existing one,
                   2161:         *              use pv_stuff, for change, pmap_remove() possibly.
                   2162:         * (e) change/add pte
                   2163:         */
                   2164:
                   2165: #ifdef DIAGNOSTIC
                   2166:        if ((pgva < virtual_avail) || (pgva >= DVMA_MAP_END))
                   2167:                panic("pmap_enter_kernel: bad va=0x%lx", pgva);
                   2168:        if ((new_pte & (PG_VALID | PG_SYSTEM)) != (PG_VALID | PG_SYSTEM))
                   2169:                panic("pmap_enter_kernel: bad pte");
                   2170: #endif
                   2171:
                   2172:        if (pgva >= DVMA_MAP_BASE) {
                   2173:                /* This is DVMA space.  Always want it non-cached. */
                   2174:                new_pte |= PG_NC;
                   2175:        }
                   2176:
                   2177:        segva = m68k_trunc_seg(pgva);
                   2178:        do_pv = TRUE;
                   2179:
                   2180:        /* Do we have a PMEG? */
                   2181:        sme = get_segmap(segva);
                   2182:        if (sme != SEGINV) {
                   2183:                /* Found a PMEG in the segmap.  Cool. */
                   2184:                pmegp = pmeg_p(sme);
                   2185: #ifdef DIAGNOSTIC
                   2186:                /* Make sure it is the right PMEG. */
                   2187:                if (sme != pmap->pm_segmap[VA_SEGNUM(segva)])
                   2188:                        panic("pmap_enter_kernel: wrong sme at VA=0x%lx", segva);
                   2189:                /* Make sure it is ours. */
                   2190:                if (pmegp->pmeg_owner != pmap)
                   2191:                        panic("pmap_enter_kernel: MMU has bad pmeg 0x%x", sme);
                   2192: #endif
                   2193:        } else {
                   2194:                /* No PMEG in the segmap.  Have to allocate one. */
                   2195:                pmegp = pmeg_allocate(pmap, segva);
                   2196:                sme = pmegp->pmeg_index;
                   2197:                pmap->pm_segmap[VA_SEGNUM(segva)] = sme;
                   2198:                set_segmap(segva, sme);
                   2199: #ifdef PMAP_DEBUG
                   2200:                pmeg_verify_empty(segva);
                   2201:                if (pmap_debug & PMD_SEGMAP) {
                   2202:                        printf("pmap: set_segmap pmap=%p va=0x%lx sme=0x%x (ek)\n",
                   2203:                                   pmap, segva, sme);
                   2204:                }
                   2205: #endif
                   2206:                /* There are no existing mappings to deal with. */
                   2207:                old_pte = 0;
                   2208:                goto add_pte;
                   2209:        }
                   2210:
                   2211:        /*
                   2212:         * We have a PMEG.  Is the VA already mapped to somewhere?
                   2213:         *      (a) if so, is it same pa? (really a protection change)
                   2214:         *      (b) if not same pa, then we have to unlink from old pa
                   2215:         */
                   2216:        old_pte = get_pte(pgva);
                   2217:        if ((old_pte & PG_VALID) == 0)
                   2218:                goto add_pte;
                   2219:
                   2220:        /* Have valid translation.  Flush cache before changing it. */
                   2221: #ifdef HAVECACHE
                   2222:        if (cache_size) {
                   2223:                cache_flush_page(pgva);
                   2224:                /* Get fresh mod/ref bits from write-back. */
                   2225:                old_pte = get_pte(pgva);
                   2226:        }
                   2227: #endif
                   2228:
                   2229:        /* XXX - removing valid page here, way lame... -glass */
                   2230:        pmegp->pmeg_vpages--;
                   2231:
                   2232:        if (!IS_MAIN_MEM(old_pte)) {
                   2233:                /* Was not main memory, so no pv_entry for it. */
                   2234:                goto add_pte;
                   2235:        }
                   2236:
                   2237:        /* Old mapping was main memory.  Save mod/ref bits. */
                   2238:        save_modref_bits(old_pte);
                   2239:
                   2240:        /*
                   2241:         * If not changing the type or pfnum then re-use pv_entry.
                   2242:         * Note we get here only with old_pte having PGT_OBMEM.
                   2243:         */
                   2244:        if ((old_pte & (PG_TYPE|PG_FRAME)) ==
                   2245:                (new_pte & (PG_TYPE|PG_FRAME)) )
                   2246:        {
                   2247:                do_pv = FALSE;          /* re-use pv_entry */
                   2248:                new_pte |= (old_pte & PG_NC);
                   2249:                goto add_pte;
                   2250:        }
                   2251:
                   2252:        /* OK, different type or PA, have to kill old pv_entry. */
                   2253:        pv_unlink(pmap, old_pte, pgva);
                   2254:
                   2255:  add_pte:      /* can be destructive */
                   2256:        pmeg_set_wiring(pmegp, pgva, wired);
                   2257:
                   2258:        /* Anything but MAIN_MEM is mapped non-cached. */
                   2259:        if (!IS_MAIN_MEM(new_pte)) {
                   2260:                new_pte |= PG_NC;
                   2261:                do_pv = FALSE;
                   2262:        }
                   2263:        if (do_pv == TRUE) {
                   2264:                if (pv_link(pmap, new_pte, pgva) & PV_NC)
                   2265:                        new_pte |= PG_NC;
                   2266:        }
                   2267: #ifdef PMAP_DEBUG
                   2268:        if ((pmap_debug & PMD_SETPTE) || (pgva == pmap_db_watchva)) {
                   2269:                printf("pmap: set_pte pmap=%p va=0x%lx old=0x%x new=0x%x (ek)\n",
                   2270:                           pmap, pgva, old_pte, new_pte);
                   2271:        }
                   2272: #endif
                   2273:        /* cache flush done above */
                   2274:        set_pte(pgva, new_pte);
                   2275:        set_context(saved_ctx);
                   2276:        pmegp->pmeg_vpages++;
                   2277: }
                   2278:
                   2279:
                   2280: static void
                   2281: pmap_enter_user(pmap, pgva, new_pte, wired)
                   2282:        pmap_t pmap;
                   2283:        vm_offset_t pgva;
                   2284:        int new_pte;
                   2285:        boolean_t wired;
                   2286: {
                   2287:        int do_pv, old_pte, sme;
                   2288:        vm_offset_t segva;
                   2289:        pmeg_t pmegp;
                   2290:
                   2291:        CHECK_SPL();
                   2292:
                   2293: #ifdef DIAGNOSTIC
                   2294:        if (pgva >= VM_MAXUSER_ADDRESS)
                   2295:                panic("pmap_enter_user: bad va=0x%lx", pgva);
                   2296:        if ((new_pte & (PG_VALID | PG_SYSTEM)) != PG_VALID)
                   2297:                panic("pmap_enter_user: bad pte");
                   2298: #endif
                   2299: #ifdef PMAP_DEBUG
                   2300:        /*
                   2301:         * Some user pages are wired here, and a later
                   2302:         * call to pmap_unwire() will unwire them.
                   2303:         * XXX - Need a separate list for wired user pmegs
                   2304:         * so they can not be stolen from the active list.
                   2305:         * XXX - Note: vm_fault.c assumes pmap_extract will
                   2306:         * work on wired mappings, so must preserve them...
                   2307:         * XXX: Maybe keep a list of wired PMEGs?
                   2308:         */
                   2309:        if (wired && (pmap_debug & PMD_WIRING)) {
                   2310:                db_printf("pmap_enter_user: attempt to wire user page, ignored\n");
                   2311:                Debugger();
                   2312:        }
                   2313: #endif
                   2314:
                   2315:        /* Validate this assumption. */
                   2316:        if (pmap != current_pmap()) {
                   2317: #ifdef PMAP_DEBUG
                   2318:                /* Aparently, this never happens. */
                   2319:                db_printf("pmap_enter_user: not curproc\n");
                   2320:                Debugger();
                   2321: #endif
                   2322:                /* Just throw it out (fault it in later). */
                   2323:                /* XXX: But must remember it if wired... */
                   2324:                return;
                   2325:        }
                   2326:
                   2327:        segva = m68k_trunc_seg(pgva);
                   2328:        do_pv = TRUE;
                   2329:
                   2330:        /*
                   2331:         * If this pmap was sharing the "empty" context,
                   2332:         * allocate a real context for its exclusive use.
                   2333:         */
                   2334:        if (!has_context(pmap)) {
                   2335:                context_allocate(pmap);
                   2336: #ifdef PMAP_DEBUG
                   2337:                if (pmap_debug & PMD_CONTEXT)
                   2338:                        printf("pmap_enter(%p) got context %d\n",
                   2339:                                   pmap, pmap->pm_ctxnum);
                   2340: #endif
                   2341:                set_context(pmap->pm_ctxnum);
                   2342:        } else {
                   2343: #ifdef PMAP_DEBUG
                   2344:                /* Make sure context is correct. */
                   2345:                if (pmap->pm_ctxnum != get_context()) {
                   2346:                        db_printf("pmap_enter_user: wrong context\n");
                   2347:                        Debugger();
                   2348:                        /* XXX: OK to proceed? */
                   2349:                        set_context(pmap->pm_ctxnum);
                   2350:                }
                   2351: #endif
                   2352:        }
                   2353:
                   2354:        /*
                   2355:         * We have a context.  Do we have a PMEG?
                   2356:         */
                   2357:        sme = get_segmap(segva);
                   2358:        if (sme != SEGINV) {
                   2359:                /* Found a PMEG in the segmap.  Cool. */
                   2360:                pmegp = pmeg_p(sme);
                   2361: #ifdef DIAGNOSTIC
                   2362:                /* Make sure it is the right PMEG. */
                   2363:                if (sme != pmap->pm_segmap[VA_SEGNUM(segva)])
                   2364:                        panic("pmap_enter_user: wrong sme at VA=0x%lx", segva);
                   2365:                /* Make sure it is ours. */
                   2366:                if (pmegp->pmeg_owner != pmap)
                   2367:                        panic("pmap_enter_user: MMU has bad pmeg 0x%x", sme);
                   2368: #endif
                   2369:        } else {
                   2370:                /* Not in the segmap.  Try the S/W cache. */
                   2371:                pmegp = pmeg_cache(pmap, segva);
                   2372:                if (pmegp) {
                   2373:                        /* Found PMEG in cache.  Just reload it. */
                   2374:                        sme = pmegp->pmeg_index;
                   2375:                        set_segmap(segva, sme);
                   2376:                } else {
                   2377:                        /* PMEG not in cache, so allocate one. */
                   2378:                        pmegp = pmeg_allocate(pmap, segva);
                   2379:                        sme = pmegp->pmeg_index;
                   2380:                        pmap->pm_segmap[VA_SEGNUM(segva)] = sme;
                   2381:                        set_segmap(segva, sme);
                   2382: #ifdef PMAP_DEBUG
                   2383:                        pmeg_verify_empty(segva);
                   2384: #endif
                   2385:                }
                   2386: #ifdef PMAP_DEBUG
                   2387:                if (pmap_debug & PMD_SEGMAP) {
                   2388:                        printf("pmap: set_segmap pmap=%p va=0x%lx sme=0x%x (eu)\n",
                   2389:                                   pmap, segva, sme);
                   2390:                }
                   2391: #endif
                   2392:        }
                   2393:
                   2394:        /*
                   2395:         * We have a PMEG.  Is the VA already mapped to somewhere?
                   2396:         *      (a) if so, is it same pa? (really a protection change)
                   2397:         *      (b) if not same pa, then we have to unlink from old pa
                   2398:         */
                   2399:        old_pte = get_pte(pgva);
                   2400:        if ((old_pte & PG_VALID) == 0)
                   2401:                goto add_pte;
                   2402:
                   2403:        /* Have valid translation.  Flush cache before changing it. */
                   2404: #ifdef HAVECACHE
                   2405:        if (cache_size) {
                   2406:                cache_flush_page(pgva);
                   2407:                /* Get fresh mod/ref bits from write-back. */
                   2408:                old_pte = get_pte(pgva);
                   2409:        }
                   2410: #endif
                   2411:
                   2412:        /* XXX - removing valid page here, way lame... -glass */
                   2413:        pmegp->pmeg_vpages--;
                   2414:
                   2415:        if (!IS_MAIN_MEM(old_pte)) {
                   2416:                /* Was not main memory, so no pv_entry for it. */
                   2417:                goto add_pte;
                   2418:        }
                   2419:
                   2420:        /* Old mapping was main memory.  Save mod/ref bits. */
                   2421:        save_modref_bits(old_pte);
                   2422:
                   2423:        /*
                   2424:         * If not changing the type or pfnum then re-use pv_entry.
                   2425:         * Note we get here only with old_pte having PGT_OBMEM.
                   2426:         */
                   2427:        if ((old_pte & (PG_TYPE|PG_FRAME)) ==
                   2428:                (new_pte & (PG_TYPE|PG_FRAME)) )
                   2429:        {
                   2430:                do_pv = FALSE;          /* re-use pv_entry */
                   2431:                new_pte |= (old_pte & PG_NC);
                   2432:                goto add_pte;
                   2433:        }
                   2434:
                   2435:        /* OK, different type or PA, have to kill old pv_entry. */
                   2436:        pv_unlink(pmap, old_pte, pgva);
                   2437:
                   2438:  add_pte:
                   2439:        /* XXX - Wiring changes on user pmaps? */
                   2440:        /* pmeg_set_wiring(pmegp, pgva, wired); */
                   2441:
                   2442:        /* Anything but MAIN_MEM is mapped non-cached. */
                   2443:        if (!IS_MAIN_MEM(new_pte)) {
                   2444:                new_pte |= PG_NC;
                   2445:                do_pv = FALSE;
                   2446:        }
                   2447:        if (do_pv == TRUE) {
                   2448:                if (pv_link(pmap, new_pte, pgva) & PV_NC)
                   2449:                        new_pte |= PG_NC;
                   2450:        }
                   2451: #ifdef PMAP_DEBUG
                   2452:        if ((pmap_debug & PMD_SETPTE) || (pgva == pmap_db_watchva)) {
                   2453:                printf("pmap: set_pte pmap=%p va=0x%lx old=0x%x new=0x%x (eu)\n",
                   2454:                           pmap, pgva, old_pte, new_pte);
                   2455:        }
                   2456: #endif
                   2457:        /* cache flush done above */
                   2458:        set_pte(pgva, new_pte);
                   2459:        pmegp->pmeg_vpages++;
                   2460: }
                   2461:
                   2462: void
                   2463: pmap_kenter_pa(va, pa, prot)
                   2464:        vaddr_t va;
                   2465:        paddr_t pa;
                   2466:        vm_prot_t prot;
                   2467: {
                   2468:        pmap_enter(pmap_kernel(), va, pa, prot, PMAP_WIRED);
                   2469: }
                   2470:
                   2471: void
                   2472: pmap_kremove(va, len)
                   2473:        vaddr_t va;
                   2474:        vsize_t len;
                   2475: {
                   2476:        for (len >>= PAGE_SHIFT; len > 0; len--, va += PAGE_SIZE) {
                   2477:                pmap_remove(pmap_kernel(), va, va + PAGE_SIZE);
                   2478:        }
                   2479: }
                   2480:
                   2481:
                   2482: /*
                   2483:  * The trap handler calls this so we can try to resolve
                   2484:  * user-level faults by reloading a PMEG.
                   2485:  * If that does not prodce a valid mapping,
                   2486:  * call vm_fault as usual.
                   2487:  *
                   2488:  * XXX: Merge this with the next function?
                   2489:  */
                   2490: int
                   2491: _pmap_fault(map, va, ftype)
                   2492:        vm_map_t map;
                   2493:        vm_offset_t va;
                   2494:        vm_prot_t ftype;
                   2495: {
                   2496:        pmap_t pmap;
                   2497:        int rv;
                   2498:
                   2499:        pmap = vm_map_pmap(map);
                   2500:        if (map == kernel_map) {
                   2501:                /* Do not allow faults below the "managed" space. */
                   2502:                if (va < virtual_avail) {
                   2503:                        /*
                   2504:                         * Most pages below virtual_avail are read-only,
                   2505:                         * so I will assume it is a protection failure.
                   2506:                         */
1.2       fredette 2507:                        return EACCES;
1.1       fredette 2508:                }
                   2509:        } else {
                   2510:                /* User map.  Try reload shortcut. */
                   2511:                if (pmap_fault_reload(pmap, va, ftype))
1.2       fredette 2512:                        return 0;
1.1       fredette 2513:        }
                   2514:        rv = uvm_fault(map, va, 0, ftype);
                   2515:
                   2516: #ifdef PMAP_DEBUG
                   2517:        if (pmap_debug & PMD_FAULT) {
                   2518:                printf("pmap_fault(%p, 0x%lx, 0x%x) -> 0x%x\n",
                   2519:                           map, va, ftype, rv);
                   2520:        }
                   2521: #endif
                   2522:
                   2523:        return (rv);
                   2524: }
                   2525:
                   2526: /*
                   2527:  * This is a shortcut used by the trap handler to
                   2528:  * reload PMEGs into a user segmap without calling
                   2529:  * the actual VM fault handler.  Returns TRUE if:
                   2530:  *     the PMEG was reloaded, and
                   2531:  *     it has a valid PTE at va.
                   2532:  * Otherwise return zero and let VM code handle it.
                   2533:  */
                   2534: int
                   2535: pmap_fault_reload(pmap, pgva, ftype)
                   2536:        pmap_t pmap;
                   2537:        vm_offset_t pgva;
                   2538:        vm_prot_t ftype;
                   2539: {
                   2540:        int rv, s, pte, chkpte, sme;
                   2541:        vm_offset_t segva;
                   2542:        pmeg_t pmegp;
                   2543:
                   2544:        if (pgva >= VM_MAXUSER_ADDRESS)
                   2545:                return (0);
                   2546:        if (pmap->pm_segmap == NULL) {
                   2547: #ifdef PMAP_DEBUG
                   2548:                db_printf("pmap_fault_reload: null segmap\n");
                   2549:                Debugger();
                   2550: #endif
                   2551:                return (0);
                   2552:        }
                   2553:
                   2554:        /* Short-cut using the S/W segmap. */
                   2555:        if (pmap->pm_segmap[VA_SEGNUM(pgva)] == SEGINV)
                   2556:                return (0);
                   2557:
                   2558:        segva = m68k_trunc_seg(pgva);
                   2559:        chkpte = PG_VALID;
                   2560:        if (ftype & VM_PROT_WRITE)
                   2561:                chkpte |= PG_WRITE;
                   2562:        rv = 0;
                   2563:
1.2       fredette 2564:        s = splvm();
1.1       fredette 2565:
                   2566:        /*
                   2567:         * Given that we faulted on a user-space address, we will
                   2568:         * probably need a context.  Get a context now so we can
                   2569:         * try to resolve the fault with a segmap reload.
                   2570:         */
                   2571:        if (!has_context(pmap)) {
                   2572:                context_allocate(pmap);
                   2573: #ifdef PMAP_DEBUG
                   2574:                if (pmap_debug & PMD_CONTEXT)
                   2575:                        printf("pmap_fault(%p) got context %d\n",
                   2576:                                   pmap, pmap->pm_ctxnum);
                   2577: #endif
                   2578:                set_context(pmap->pm_ctxnum);
                   2579:        } else {
                   2580: #ifdef PMAP_DEBUG
                   2581:                /* Make sure context is correct. */
                   2582:                if (pmap->pm_ctxnum != get_context()) {
                   2583:                        db_printf("pmap_fault_reload: wrong context\n");
                   2584:                        Debugger();
                   2585:                        /* XXX: OK to proceed? */
                   2586:                        set_context(pmap->pm_ctxnum);
                   2587:                }
                   2588: #endif
                   2589:        }
                   2590:
                   2591:        sme = get_segmap(segva);
                   2592:        if (sme == SEGINV) {
                   2593:                /* See if there is something to reload. */
                   2594:                pmegp = pmeg_cache(pmap, segva);
                   2595:                if (pmegp) {
                   2596:                        /* Found one!  OK, reload it. */
                   2597:                        pmap_stats.ps_pmeg_faultin++;
                   2598:                        sme = pmegp->pmeg_index;
                   2599:                        set_segmap(segva, sme);
                   2600:                        pte = get_pte(pgva);
                   2601:                        if (pte & chkpte)
                   2602:                                rv = 1;
                   2603:                }
                   2604:        }
                   2605:
                   2606:        splx(s);
                   2607:        return (rv);
                   2608: }
                   2609:
                   2610:
                   2611: /*
                   2612:  * Clear the modify bit for the given physical page.
                   2613:  */
                   2614: boolean_t
                   2615: pmap_clear_modify(pg)
                   2616:        struct vm_page *pg;
                   2617: {
                   2618:        paddr_t pa = VM_PAGE_TO_PHYS(pg);
                   2619:        pv_entry_t *head;
                   2620:        u_char *pv_flags;
                   2621:        int s;
                   2622:        boolean_t rv;
                   2623:
                   2624:        if (!pv_initialized)
                   2625:                return FALSE;
                   2626:
                   2627:        /* The VM code may call this on device addresses! */
                   2628:        if (PA_IS_DEV(pa))
                   2629:                return FALSE;
                   2630:
                   2631:        pv_flags = pa_to_pvflags(pa);
                   2632:        head     = pa_to_pvhead(pa);
                   2633:
1.2       fredette 2634:        s = splvm();
1.1       fredette 2635:        *pv_flags |= pv_syncflags(*head);
                   2636:        rv = *pv_flags & PV_MOD;
                   2637:        *pv_flags &= ~PV_MOD;
                   2638:        splx(s);
                   2639:        return rv;
                   2640: }
                   2641:
                   2642: /*
                   2643:  * Tell whether the given physical page has been modified.
                   2644:  */
                   2645: int
                   2646: pmap_is_modified(pg)
                   2647:        struct vm_page *pg;
                   2648: {
                   2649:        paddr_t pa = VM_PAGE_TO_PHYS(pg);
                   2650:        pv_entry_t *head;
                   2651:        u_char *pv_flags;
                   2652:        int rv, s;
                   2653:
                   2654:        if (!pv_initialized)
                   2655:                return (0);
                   2656:
                   2657:        /* The VM code may call this on device addresses! */
                   2658:        if (PA_IS_DEV(pa))
                   2659:                return (0);
                   2660:
                   2661:        pv_flags = pa_to_pvflags(pa);
                   2662:        head     = pa_to_pvhead(pa);
                   2663:
1.2       fredette 2664:        s = splvm();
1.1       fredette 2665:        if ((*pv_flags & PV_MOD) == 0)
                   2666:                *pv_flags |= pv_syncflags(*head);
                   2667:        rv = (*pv_flags & PV_MOD);
                   2668:        splx(s);
                   2669:
                   2670:        return (rv);
                   2671: }
                   2672:
                   2673: /*
                   2674:  * Clear the reference bit for the given physical page.
                   2675:  * It's OK to just remove mappings if that's easier.
                   2676:  */
                   2677: boolean_t
                   2678: pmap_clear_reference(pg)
                   2679:        struct vm_page *pg;
                   2680: {
                   2681:        paddr_t pa = VM_PAGE_TO_PHYS(pg);
                   2682:        pv_entry_t *head;
                   2683:        u_char *pv_flags;
                   2684:        int s;
                   2685:        boolean_t rv;
                   2686:
                   2687:        if (!pv_initialized)
                   2688:                return FALSE;
                   2689:
                   2690:        /* The VM code may call this on device addresses! */
                   2691:        if (PA_IS_DEV(pa))
                   2692:                return FALSE;
                   2693:
                   2694:        pv_flags = pa_to_pvflags(pa);
                   2695:        head     = pa_to_pvhead(pa);
                   2696:
1.2       fredette 2697:        s = splvm();
1.1       fredette 2698:        *pv_flags |= pv_syncflags(*head);
                   2699:        rv = *pv_flags & PV_REF;
                   2700:        *pv_flags &= ~PV_REF;
                   2701:        splx(s);
                   2702:        return rv;
                   2703: }
                   2704:
                   2705: /*
                   2706:  * Tell whether the given physical page has been referenced.
                   2707:  * It's OK to just return FALSE if page is not mapped.
                   2708:  */
                   2709: boolean_t
                   2710: pmap_is_referenced(pg)
                   2711:        struct vm_page *pg;
                   2712: {
                   2713:        paddr_t pa = VM_PAGE_TO_PHYS(pg);
                   2714:        pv_entry_t *head;
                   2715:        u_char *pv_flags;
                   2716:        int s;
                   2717:        boolean_t rv;
                   2718:
                   2719:        if (!pv_initialized)
                   2720:                return (FALSE);
                   2721:
                   2722:        /* The VM code may call this on device addresses! */
                   2723:        if (PA_IS_DEV(pa))
                   2724:                return (FALSE);
                   2725:
                   2726:        pv_flags = pa_to_pvflags(pa);
                   2727:        head     = pa_to_pvhead(pa);
                   2728:
1.2       fredette 2729:        s = splvm();
1.1       fredette 2730:        if ((*pv_flags & PV_REF) == 0)
                   2731:                *pv_flags |= pv_syncflags(*head);
                   2732:        rv = (*pv_flags & PV_REF);
                   2733:        splx(s);
                   2734:
                   2735:        return (rv);
                   2736: }
                   2737:
                   2738:
                   2739: /*
                   2740:  * This is called by locore.s:cpu_switch() when it is
                   2741:  * switching to a new process.  Load new translations.
                   2742:  */
                   2743: void
                   2744: _pmap_switch(pmap)
                   2745:        pmap_t pmap;
                   2746: {
                   2747:
                   2748:        CHECK_SPL();
                   2749:
                   2750:        /*
                   2751:         * Since we maintain completely separate user and kernel address
                   2752:         * spaces, whenever we switch to a process, we need to make sure
                   2753:         * that it has a context allocated.
                   2754:         */
                   2755:        if (!has_context(pmap)) {
                   2756:                context_allocate(pmap);
                   2757: #ifdef PMAP_DEBUG
                   2758:                if (pmap_debug & PMD_CONTEXT)
                   2759:                        printf("_pmap_switch(%p) got context %d\n",
                   2760:                                   pmap, pmap->pm_ctxnum);
                   2761: #endif
                   2762:        }
                   2763:        set_context(pmap->pm_ctxnum);
                   2764: }
                   2765:
                   2766: /*
                   2767:  * Exported version of pmap_activate().  This is called from the
                   2768:  * machine-independent VM code when a process is given a new pmap.
                   2769:  * If (p == curproc) do like cpu_switch would do; otherwise just
                   2770:  * take this as notification that the process has a new pmap.
                   2771:  */
                   2772: void
                   2773: pmap_activate(p)
                   2774:        struct proc *p;
                   2775: {
                   2776:        pmap_t pmap = p->p_vmspace->vm_map.pmap;
                   2777:        int s;
                   2778:
                   2779:        if (p == curproc) {
1.2       fredette 2780:                s = splvm();
1.1       fredette 2781:                _pmap_switch(pmap);
                   2782:                splx(s);
                   2783:        }
                   2784: }
                   2785:
                   2786: /*
                   2787:  * Deactivate the address space of the specified process.
                   2788:  * XXX The semantics of this function are not currently well-defined.
                   2789:  */
                   2790: void
                   2791: pmap_deactivate(p)
                   2792:        struct proc *p;
                   2793: {
                   2794:        /* not implemented. */
                   2795: }
                   2796:
                   2797: /*
                   2798:  *     Routine:        pmap_unwire
                   2799:  *     Function:       Clear the wired attribute for a map/virtual-address
                   2800:  *                     pair.
                   2801:  *     In/out conditions:
                   2802:  *                     The mapping must already exist in the pmap.
                   2803:  */
                   2804: void
                   2805: pmap_unwire(pmap, va)
                   2806:        pmap_t  pmap;
                   2807:        vm_offset_t     va;
                   2808: {
                   2809:        int s, sme;
                   2810:        int wiremask, ptenum;
                   2811:        pmeg_t pmegp;
                   2812:        int saved_ctx;
                   2813:
                   2814:        if (pmap == NULL)
                   2815:                return;
                   2816: #ifdef PMAP_DEBUG
                   2817:        if (pmap_debug & PMD_WIRING)
                   2818:                printf("pmap_unwire(pmap=%p, va=0x%lx)\n",
                   2819:                           pmap, va);
                   2820: #endif
                   2821:        /*
                   2822:         * We are asked to unwire pages that were wired when
                   2823:         * pmap_enter() was called and we ignored wiring.
                   2824:         * (VM code appears to wire a stack page during fork.)
                   2825:         */
                   2826:        if (pmap != kernel_pmap) {
                   2827: #ifdef PMAP_DEBUG
                   2828:                if (pmap_debug & PMD_WIRING) {
                   2829:                        db_printf("  (user pmap -- ignored)\n");
                   2830:                        Debugger();
                   2831:                }
                   2832: #endif
                   2833:                return;
                   2834:        }
                   2835:
                   2836:        ptenum = VA_PTE_NUM(va);
                   2837:        wiremask = 1 << ptenum;
                   2838:
1.2       fredette 2839:        s = splvm();
1.1       fredette 2840:
                   2841:        saved_ctx = get_context();
                   2842:        set_context(KERNEL_CONTEXT);
                   2843:        sme = get_segmap(va);
                   2844:        set_context(saved_ctx);
                   2845:        if (sme == SEGINV)
                   2846:                panic("pmap_unwire: invalid va=0x%lx", va);
                   2847:        pmegp = pmeg_p(sme);
                   2848:        pmegp->pmeg_wired &= ~wiremask;
                   2849:
                   2850:        splx(s);
                   2851: }
                   2852:
                   2853: /*
                   2854:  *     Copy the range specified by src_addr/len
                   2855:  *     from the source map to the range dst_addr/len
                   2856:  *     in the destination map.
                   2857:  *
                   2858:  *     This routine is only advisory and need not do anything.
                   2859:  */
                   2860: void
                   2861: pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr)
                   2862:        pmap_t          dst_pmap;
                   2863:        pmap_t          src_pmap;
                   2864:        vm_offset_t     dst_addr;
                   2865:        vm_size_t       len;
                   2866:        vm_offset_t     src_addr;
                   2867: {
                   2868: }
                   2869:
                   2870: /*
1.3       fredette 2871:  * This extracts the PMEG associated with the given map/virtual
                   2872:  * address pair.  Returns SEGINV if VA not valid.
                   2873:  */
                   2874: int
                   2875: _pmap_extract_pmeg(pmap, va)
                   2876:                pmap_t  pmap;
                   2877:                vm_offset_t va;
                   2878: {
                   2879:                int s, saved_ctx, segnum, sme;
                   2880:
                   2881:                s = splvm();
                   2882:
                   2883:                if (pmap == kernel_pmap) {
                   2884:                                saved_ctx = get_context();
                   2885:                                set_context(KERNEL_CONTEXT);
                   2886:                                sme = get_segmap(va);
                   2887:                                set_context(saved_ctx);
                   2888:                } else {
                   2889:                                /* This is rare, so do it the easy way. */
                   2890:                                segnum = VA_SEGNUM(va);
                   2891:                                sme = pmap->pm_segmap[segnum];
                   2892:                }
                   2893:
                   2894:                splx(s);
                   2895:                return (sme);
                   2896: }
                   2897:
                   2898: /*
1.1       fredette 2899:  *     Routine:        pmap_extract
                   2900:  *     Function:
                   2901:  *             Extract the physical page address associated
                   2902:  *             with the given map/virtual_address pair.
                   2903:  *     Returns zero if VA not valid.
                   2904:  */
                   2905: boolean_t
                   2906: pmap_extract(pmap, va, pap)
                   2907:        pmap_t  pmap;
                   2908:        vm_offset_t va;
                   2909:        paddr_t *pap;
                   2910: {
                   2911:        int s, sme, segnum, ptenum, pte;
                   2912:        paddr_t pa;
                   2913:        int saved_ctx;
                   2914:
                   2915:        pte = 0;
1.2       fredette 2916:        s = splvm();
1.1       fredette 2917:
                   2918:        if (pmap == kernel_pmap) {
                   2919:                saved_ctx = get_context();
                   2920:                set_context(KERNEL_CONTEXT);
                   2921:                sme = get_segmap(va);
                   2922:                if (sme != SEGINV)
                   2923:                        pte = get_pte(va);
                   2924:                set_context(saved_ctx);
                   2925:        } else {
                   2926:                /* This is rare, so do it the easy way. */
                   2927:                segnum = VA_SEGNUM(va);
                   2928:                sme = pmap->pm_segmap[segnum];
                   2929:                if (sme != SEGINV) {
                   2930:                        ptenum = VA_PTE_NUM(va);
                   2931:                        pte = get_pte_pmeg(sme, ptenum);
                   2932:                }
                   2933:        }
                   2934:
                   2935:        splx(s);
                   2936:
                   2937:        if ((pte & PG_VALID) == 0) {
                   2938: #ifdef PMAP_DEBUG
                   2939:                db_printf("pmap_extract: invalid va=0x%lx\n", va);
                   2940:                Debugger();
                   2941: #endif
                   2942:                return (FALSE);
                   2943:        }
                   2944:        pa = PG_PA(pte);
                   2945: #ifdef DIAGNOSTIC
                   2946:        if (pte & PG_TYPE) {
                   2947:                panic("pmap_extract: not main mem, va=0x%lx\n", va);
                   2948:        }
                   2949: #endif
                   2950:        if (pap != NULL)
                   2951:                *pap = pa;
                   2952:        return (TRUE);
                   2953: }
                   2954:
                   2955:
                   2956: /*
                   2957:  *       pmap_page_protect:
                   2958:  *
                   2959:  *       Lower the permission for all mappings to a given page.
                   2960:  */
                   2961: void
                   2962: pmap_page_protect(pg, prot)
                   2963:        struct vm_page *pg;
                   2964:        vm_prot_t          prot;
                   2965: {
                   2966:        paddr_t pa = VM_PAGE_TO_PHYS(pg);
                   2967:        int s;
                   2968:
                   2969:        /* The VM code may call this on device addresses! */
                   2970:        if (PA_IS_DEV(pa))
                   2971:                return;
                   2972:
1.2       fredette 2973:        s = splvm();
1.1       fredette 2974:
                   2975: #ifdef PMAP_DEBUG
                   2976:        if (pmap_debug & PMD_PROTECT)
                   2977:                printf("pmap_page_protect(0x%lx, 0x%lx)\n", pa, prot);
                   2978: #endif
                   2979:        switch (prot) {
                   2980:        case VM_PROT_ALL:
                   2981:                break;
                   2982:        case VM_PROT_READ:
                   2983:        case VM_PROT_READ|VM_PROT_EXECUTE:
                   2984:                pv_changepte(pa, 0, PG_WRITE);
                   2985:                break;
                   2986:        default:
                   2987:                /* remove mapping for all pmaps that have it */
                   2988:                pv_remove_all(pa);
                   2989:                break;
                   2990:        }
                   2991:
                   2992:        splx(s);
                   2993: }
                   2994:
                   2995: /*
                   2996:  * Turn a cdevsw d_mmap value into a byte address for pmap_enter.
                   2997:  * XXX this should almost certainly be done differently, and
                   2998:  *     elsewhere, or even not at all
                   2999:  */
                   3000: #ifndef        pmap_phys_address
                   3001: vm_offset_t
                   3002: pmap_phys_address(x)
                   3003:        int x;
                   3004: {
                   3005:        return (x);
                   3006: }
                   3007: #endif
                   3008:
                   3009: /*
                   3010:  * Initialize a preallocated and zeroed pmap structure,
                   3011:  * such as one in a vmspace structure.
                   3012:  */
                   3013: void
                   3014: pmap_pinit(pmap)
                   3015:        pmap_t pmap;
                   3016: {
                   3017:        pmap_common_init(pmap);
                   3018:        pmap_user_init(pmap);
                   3019: }
                   3020:
                   3021: /*
                   3022:  *     Reduce the permissions on the specified
                   3023:  *     range of this map as requested.
                   3024:  *     (Make pages read-only.)
                   3025:  */
                   3026: void
                   3027: pmap_protect(pmap, sva, eva, prot)
                   3028:        pmap_t pmap;
                   3029:        vm_offset_t sva, eva;
                   3030:        vm_prot_t       prot;
                   3031: {
                   3032:        vm_offset_t va, neva;
                   3033:        int segnum;
                   3034:
                   3035:        if (pmap == NULL)
                   3036:                return;
                   3037:
                   3038:        /* If leaving writable, nothing to do. */
                   3039:        if (prot & VM_PROT_WRITE)
                   3040:                return;
                   3041:
                   3042:        /* If removing all permissions, just unmap. */
                   3043:        if ((prot & VM_PROT_READ) == 0) {
                   3044:                pmap_remove(pmap, sva, eva);
                   3045:                return;
                   3046:        }
                   3047:
                   3048: #ifdef PMAP_DEBUG
                   3049:        if ((pmap_debug & PMD_PROTECT) ||
                   3050:                ((sva <= pmap_db_watchva && eva > pmap_db_watchva)))
                   3051:                printf("pmap_protect(%p, 0x%lx, 0x%lx)\n", pmap, sva, eva);
                   3052: #endif
                   3053:
                   3054:        if (pmap == kernel_pmap) {
                   3055:                if (sva < virtual_avail)
                   3056:                        sva = virtual_avail;
                   3057:                if (eva > DVMA_MAP_END) {
                   3058: #ifdef PMAP_DEBUG
                   3059:                        db_printf("pmap_protect: eva=0x%lx\n", eva);
                   3060:                        Debugger();
                   3061: #endif
                   3062:                        eva = DVMA_MAP_END;
                   3063:                }
                   3064:        } else {
                   3065:                if (eva > VM_MAXUSER_ADDRESS)
                   3066:                        eva = VM_MAXUSER_ADDRESS;
                   3067:        }
                   3068:
                   3069:        va = sva;
                   3070:        segnum = VA_SEGNUM(va);
                   3071:        while (va < eva) {
                   3072:                neva = m68k_trunc_seg(va) + NBSG;
                   3073:                if (neva > eva)
                   3074:                        neva = eva;
                   3075:                if (pmap->pm_segmap[segnum] != SEGINV)
                   3076:                        pmap_protect1(pmap, va, neva);
                   3077:                va = neva;
                   3078:                segnum++;
                   3079:        }
                   3080: }
                   3081:
                   3082: /*
                   3083:  * Remove write permissions in given range.
                   3084:  * (guaranteed to be within one segment)
                   3085:  * similar to pmap_remove1()
                   3086:  */
                   3087: void
                   3088: pmap_protect1(pmap, sva, eva)
                   3089:        pmap_t pmap;
                   3090:        vm_offset_t sva, eva;
                   3091: {
                   3092:        int old_ctx, s, sme;
                   3093:        boolean_t in_ctx;
                   3094:
1.2       fredette 3095:        s = splvm();
1.1       fredette 3096:
                   3097: #ifdef DIAGNOSTIC
                   3098:        if (m68k_trunc_seg(sva) != m68k_trunc_seg(eva-1))
                   3099:                panic("pmap_protect1: bad range!");
                   3100: #endif
                   3101:
                   3102:        if (pmap == kernel_pmap) {
                   3103:                old_ctx = get_context();
                   3104:                set_context(KERNEL_CONTEXT);
                   3105:                sme = get_segmap(sva);
                   3106:                if (sme != SEGINV) {
                   3107:                        pmap_protect_mmu(pmap, sva, eva);
                   3108:                }
                   3109:                set_context(old_ctx);
                   3110:                goto out;
                   3111:        }
                   3112:        /* It is a user pmap. */
                   3113:
                   3114:        /* There is a PMEG, but maybe not active. */
                   3115:        old_ctx = INVALID_CONTEXT;
                   3116:        in_ctx = FALSE;
                   3117:        if (has_context(pmap)) {
                   3118:                /* Temporary context change. */
                   3119:                old_ctx = get_context();
                   3120:                set_context(pmap->pm_ctxnum);
                   3121:                sme = get_segmap(sva);
                   3122:                if (sme != SEGINV)
                   3123:                        in_ctx = TRUE;
                   3124:        }
                   3125:
                   3126:        if (in_ctx == TRUE)
                   3127:                pmap_protect_mmu(pmap, sva, eva);
                   3128:        else
                   3129:                pmap_protect_noctx(pmap, sva, eva);
                   3130:
                   3131:        if (old_ctx != INVALID_CONTEXT) {
                   3132:                /* Restore previous context. */
                   3133:                set_context(old_ctx);
                   3134:        }
                   3135:
                   3136: out:
                   3137:        splx(s);
                   3138: }
                   3139:
                   3140: /*
                   3141:  * Remove write permissions, all in one PMEG,
                   3142:  * where that PMEG is currently in the MMU.
                   3143:  * The current context is already correct.
                   3144:  */
                   3145: void
                   3146: pmap_protect_mmu(pmap, sva, eva)
                   3147:        pmap_t pmap;
                   3148:        vm_offset_t sva, eva;
                   3149: {
                   3150:        pmeg_t pmegp;
                   3151:        vm_offset_t pgva, segva;
                   3152:        int pte, sme;
                   3153: #ifdef HAVECACHE
                   3154:        int flush_by_page = 0;
                   3155: #endif
                   3156:
                   3157:        CHECK_SPL();
                   3158:
                   3159: #ifdef DIAGNOSTIC
                   3160:                if (pmap->pm_ctxnum != get_context())
                   3161:                        panic("pmap_protect_mmu: wrong context");
                   3162: #endif
                   3163:
                   3164:        segva = m68k_trunc_seg(sva);
                   3165:        sme = get_segmap(segva);
                   3166:
                   3167: #ifdef DIAGNOSTIC
                   3168:        /* Make sure it is valid and known. */
                   3169:        if (sme == SEGINV)
                   3170:                panic("pmap_protect_mmu: SEGINV");
                   3171:        if (pmap->pm_segmap && (pmap->pm_segmap[VA_SEGNUM(segva)] != sme))
                   3172:                panic("pmap_protect_mmu: incorrect sme, va=0x%lx", segva);
                   3173: #endif
                   3174:
                   3175:        pmegp = pmeg_p(sme);
                   3176:        /* have pmeg, will travel */
                   3177:
                   3178: #ifdef DIAGNOSTIC
                   3179:        /* Make sure we own the pmeg, right va, etc. */
                   3180:        if ((pmegp->pmeg_va != segva) ||
                   3181:                (pmegp->pmeg_owner != pmap) ||
                   3182:                (pmegp->pmeg_version != pmap->pm_version))
                   3183:        {
                   3184:                panic("pmap_protect_mmu: bad pmeg=%p", pmegp);
                   3185:        }
                   3186:        if (pmegp->pmeg_vpages <= 0)
                   3187:                panic("pmap_protect_mmu: no valid pages?");
                   3188: #endif
                   3189:
                   3190: #ifdef HAVECACHE
                   3191:        if (cache_size) {
                   3192:                /*
                   3193:                 * If the range to be removed is larger than the cache,
                   3194:                 * it will be cheaper to flush this segment entirely.
                   3195:                 */
                   3196:                if (cache_size < (eva - sva)) {
                   3197:                        /* cheaper to flush whole segment */
                   3198:                        cache_flush_segment(segva);
                   3199:                } else {
                   3200:                        flush_by_page = 1;
                   3201:                }
                   3202:        }
                   3203: #endif
                   3204:
                   3205:        /* Remove write permission in the given range. */
                   3206:        for (pgva = sva; pgva < eva; pgva += NBPG) {
                   3207:                pte = get_pte(pgva);
                   3208:                if (pte & PG_VALID) {
                   3209: #ifdef HAVECACHE
                   3210:                        if (flush_by_page) {
                   3211:                                cache_flush_page(pgva);
                   3212:                                /* Get fresh mod/ref bits from write-back. */
                   3213:                                pte = get_pte(pgva);
                   3214:                        }
                   3215: #endif
                   3216:                        if (IS_MAIN_MEM(pte)) {
                   3217:                                save_modref_bits(pte);
                   3218:                        }
                   3219:                        pte &= ~(PG_WRITE | PG_MODREF);
                   3220:                        set_pte(pgva, pte);
                   3221:                }
                   3222:        }
                   3223: }
                   3224:
                   3225: /*
                   3226:  * Remove write permissions, all in one PMEG,
                   3227:  * where it is not currently in any context.
                   3228:  */
                   3229: void
                   3230: pmap_protect_noctx(pmap, sva, eva)
                   3231:        pmap_t pmap;
                   3232:        vm_offset_t sva, eva;
                   3233: {
                   3234:        int old_ctx, pte, sme, segnum;
                   3235:        vm_offset_t pgva, segva;
                   3236:
                   3237:        CHECK_SPL();
                   3238:
                   3239: #ifdef DIAGNOSTIC
                   3240:        /* Kernel always in a context (actually, in context zero). */
                   3241:        if (pmap == kernel_pmap)
                   3242:                panic("pmap_protect_noctx: kernel_pmap");
                   3243:        if (pmap->pm_segmap == NULL)
                   3244:                panic("pmap_protect_noctx: null segmap");
                   3245: #endif
                   3246:
                   3247:        segva = m68k_trunc_seg(sva);
                   3248:        segnum = VA_SEGNUM(segva);
                   3249:        sme = pmap->pm_segmap[segnum];
                   3250:        if (sme == SEGINV)
                   3251:                return;
                   3252:
                   3253:        /*
                   3254:         * Switch to the kernel context so we can access the PMEG
                   3255:         * using the temporary segment.
                   3256:         */
                   3257:        old_ctx = get_context();
                   3258:        set_context(KERNEL_CONTEXT);
                   3259:        if (temp_seg_inuse)
                   3260:                panic("pmap_protect_noctx: temp_seg_inuse");
                   3261:        temp_seg_inuse++;
                   3262:        set_segmap(temp_seg_va, sme);
                   3263:        sva += (temp_seg_va - segva);
                   3264:        eva += (temp_seg_va - segva);
                   3265:
                   3266:        /* Remove write permission in the given range. */
                   3267:        for (pgva = sva; pgva < eva; pgva += NBPG) {
                   3268:                pte = get_pte(pgva);
                   3269:                if (pte & PG_VALID) {
                   3270:                        /* No cache flush needed. */
                   3271:                        if (IS_MAIN_MEM(pte)) {
                   3272:                                save_modref_bits(pte);
                   3273:                        }
                   3274:                        pte &= ~(PG_WRITE | PG_MODREF);
                   3275:                        set_pte(pgva, pte);
                   3276:                }
                   3277:        }
                   3278:
                   3279:        /*
                   3280:         * Release the temporary segment, and
                   3281:         * restore the previous context.
                   3282:         */
                   3283:        set_segmap(temp_seg_va, SEGINV);
                   3284:        temp_seg_inuse--;
                   3285:        set_context(old_ctx);
                   3286: }
                   3287:
                   3288:
                   3289: /*
                   3290:  *     Remove the given range of addresses from the specified map.
                   3291:  *
                   3292:  *     It is assumed that the start and end are properly
                   3293:  *     rounded to the page size.
                   3294:  */
                   3295: void
                   3296: pmap_remove(pmap, sva, eva)
                   3297:        pmap_t pmap;
                   3298:        vm_offset_t sva, eva;
                   3299: {
                   3300:        vm_offset_t va, neva;
                   3301:        int segnum;
                   3302:
                   3303:        if (pmap == NULL)
                   3304:                return;
                   3305:
                   3306: #ifdef PMAP_DEBUG
                   3307:        if ((pmap_debug & PMD_REMOVE) ||
                   3308:                ((sva <= pmap_db_watchva && eva > pmap_db_watchva)))
                   3309:                printf("pmap_remove(%p, 0x%lx, 0x%lx)\n", pmap, sva, eva);
                   3310: #endif
                   3311:
                   3312:        if (pmap == kernel_pmap) {
                   3313:                if (sva < virtual_avail)
                   3314:                        sva = virtual_avail;
                   3315:                if (eva > DVMA_MAP_END) {
                   3316: #ifdef PMAP_DEBUG
                   3317:                        db_printf("pmap_remove: eva=0x%lx\n", eva);
                   3318:                        Debugger();
                   3319: #endif
                   3320:                        eva = DVMA_MAP_END;
                   3321:                }
                   3322:        } else {
                   3323:                if (eva > VM_MAXUSER_ADDRESS)
                   3324:                        eva = VM_MAXUSER_ADDRESS;
                   3325:        }
                   3326:
                   3327:        va = sva;
                   3328:        segnum = VA_SEGNUM(va);
                   3329:        while (va < eva) {
                   3330:                neva = m68k_trunc_seg(va) + NBSG;
                   3331:                if (neva > eva)
                   3332:                        neva = eva;
                   3333:                if (pmap->pm_segmap[segnum] != SEGINV)
                   3334:                        pmap_remove1(pmap, va, neva);
                   3335:                va = neva;
                   3336:                segnum++;
                   3337:        }
                   3338: }
                   3339:
                   3340: /*
                   3341:  * Remove user mappings, all within one segment
                   3342:  */
                   3343: void
                   3344: pmap_remove1(pmap, sva, eva)
                   3345:        pmap_t pmap;
                   3346:        vm_offset_t sva, eva;
                   3347: {
                   3348:        int old_ctx, s, sme;
                   3349:        boolean_t in_ctx;
                   3350:
1.2       fredette 3351:        s = splvm();
1.1       fredette 3352:
                   3353: #ifdef DIAGNOSTIC
                   3354:        if (m68k_trunc_seg(sva) != m68k_trunc_seg(eva-1))
                   3355:                panic("pmap_remove1: bad range!");
                   3356: #endif
                   3357:
                   3358:        if (pmap == kernel_pmap) {
                   3359:                old_ctx = get_context();
                   3360:                set_context(KERNEL_CONTEXT);
                   3361:                sme = get_segmap(sva);
                   3362:                if (sme != SEGINV)
                   3363:                        pmap_remove_mmu(pmap, sva, eva);
                   3364:                set_context(old_ctx);
                   3365:                goto out;
                   3366:        }
                   3367:        /* It is a user pmap. */
                   3368:
                   3369:        /* There is a PMEG, but maybe not active. */
                   3370:        old_ctx = INVALID_CONTEXT;
                   3371:        in_ctx = FALSE;
                   3372:        if (has_context(pmap)) {
                   3373:                /* Temporary context change. */
                   3374:                old_ctx = get_context();
                   3375:                set_context(pmap->pm_ctxnum);
                   3376:                sme = get_segmap(sva);
                   3377:                if (sme != SEGINV)
                   3378:                        in_ctx = TRUE;
                   3379:        }
                   3380:
                   3381:        if (in_ctx == TRUE)
                   3382:                pmap_remove_mmu(pmap, sva, eva);
                   3383:        else
                   3384:                pmap_remove_noctx(pmap, sva, eva);
                   3385:
                   3386:        if (old_ctx != INVALID_CONTEXT) {
                   3387:                /* Restore previous context. */
                   3388:                set_context(old_ctx);
                   3389:        }
                   3390:
                   3391: out:
                   3392:        splx(s);
                   3393: }
                   3394:
                   3395: /*
                   3396:  * Remove some mappings, all in one PMEG,
                   3397:  * where that PMEG is currently in the MMU.
                   3398:  * The current context is already correct.
                   3399:  * If no PTEs remain valid in the PMEG, free it.
                   3400:  */
                   3401: void
                   3402: pmap_remove_mmu(pmap, sva, eva)
                   3403:        pmap_t pmap;
                   3404:        vm_offset_t sva, eva;
                   3405: {
                   3406:        pmeg_t pmegp;
                   3407:        vm_offset_t pgva, segva;
                   3408:        int pte, sme;
                   3409: #ifdef HAVECACHE
                   3410:        int flush_by_page = 0;
                   3411: #endif
                   3412:
                   3413:        CHECK_SPL();
                   3414:
                   3415: #ifdef DIAGNOSTIC
                   3416:                if (pmap->pm_ctxnum != get_context())
                   3417:                        panic("pmap_remove_mmu: wrong context");
                   3418: #endif
                   3419:
                   3420:        segva = m68k_trunc_seg(sva);
                   3421:        sme = get_segmap(segva);
                   3422:
                   3423: #ifdef DIAGNOSTIC
                   3424:        /* Make sure it is valid and known. */
                   3425:        if (sme == SEGINV)
                   3426:                panic("pmap_remove_mmu: SEGINV");
                   3427:        if (pmap->pm_segmap && (pmap->pm_segmap[VA_SEGNUM(segva)] != sme))
                   3428:                panic("pmap_remove_mmu: incorrect sme, va=0x%lx", segva);
                   3429: #endif
                   3430:
                   3431:        pmegp = pmeg_p(sme);
                   3432:        /* have pmeg, will travel */
                   3433:
                   3434: #ifdef DIAGNOSTIC
                   3435:        /* Make sure we own the pmeg, right va, etc. */
                   3436:        if ((pmegp->pmeg_va != segva) ||
                   3437:                (pmegp->pmeg_owner != pmap) ||
                   3438:                (pmegp->pmeg_version != pmap->pm_version))
                   3439:        {
                   3440:                panic("pmap_remove_mmu: bad pmeg=%p", pmegp);
                   3441:        }
                   3442:        if (pmegp->pmeg_vpages <= 0)
                   3443:                panic("pmap_remove_mmu: no valid pages?");
                   3444: #endif
                   3445:
                   3446: #ifdef HAVECACHE
                   3447:        if (cache_size) {
                   3448:                /*
                   3449:                 * If the range to be removed is larger than the cache,
                   3450:                 * it will be cheaper to flush this segment entirely.
                   3451:                 */
                   3452:                if (cache_size < (eva - sva)) {
                   3453:                        /* cheaper to flush whole segment */
                   3454:                        cache_flush_segment(segva);
                   3455:                } else {
                   3456:                        flush_by_page = 1;
                   3457:                }
                   3458:        }
                   3459: #endif
                   3460:
                   3461:        /* Invalidate the PTEs in the given range. */
                   3462:        for (pgva = sva; pgva < eva; pgva += NBPG) {
                   3463:                pte = get_pte(pgva);
                   3464:                if (pte & PG_VALID) {
                   3465: #ifdef HAVECACHE
                   3466:                        if (flush_by_page) {
                   3467:                                cache_flush_page(pgva);
                   3468:                                /* Get fresh mod/ref bits from write-back. */
                   3469:                                pte = get_pte(pgva);
                   3470:                        }
                   3471: #endif
                   3472:                        if (IS_MAIN_MEM(pte)) {
                   3473:                                save_modref_bits(pte);
                   3474:                                pv_unlink(pmap, pte, pgva);
                   3475:                        }
                   3476: #ifdef PMAP_DEBUG
                   3477:                        if ((pmap_debug & PMD_SETPTE) || (pgva == pmap_db_watchva)) {
                   3478:                                printf("pmap: set_pte pmap=%p va=0x%lx"
                   3479:                                           " old=0x%x new=0x%x (rrmmu)\n",
                   3480:                                           pmap, pgva, pte, PG_INVAL);
                   3481:                        }
                   3482: #endif
                   3483:                        set_pte(pgva, PG_INVAL);
                   3484:                        pmegp->pmeg_vpages--;
                   3485:                }
                   3486:        }
                   3487:
                   3488:        if (pmegp->pmeg_vpages <= 0) {
                   3489:                /* We are done with this pmeg. */
                   3490:                if (is_pmeg_wired(pmegp)) {
                   3491: #ifdef PMAP_DEBUG
                   3492:                        if (pmap_debug & PMD_WIRING) {
                   3493:                                db_printf("pmap: removing wired pmeg: %p\n", pmegp);
                   3494:                                Debugger();
                   3495:                        }
                   3496: #endif /* PMAP_DEBUG */
                   3497:                }
                   3498:
                   3499: #ifdef PMAP_DEBUG
                   3500:                if (pmap_debug & PMD_SEGMAP) {
                   3501:                        printf("pmap: set_segmap ctx=%d v=0x%lx old=0x%x new=ff (rm)\n",
                   3502:                            pmap->pm_ctxnum, segva, pmegp->pmeg_index);
                   3503:                }
                   3504:                pmeg_verify_empty(segva);
                   3505: #endif
                   3506:
                   3507:                /* Remove it from the MMU. */
                   3508:                if (kernel_pmap == pmap) {
                   3509:                        /* Did cache flush above. */
                   3510:                        set_segmap(segva, SEGINV);
                   3511:                } else {
                   3512:                        /* Did cache flush above. */
                   3513:                        set_segmap(segva, SEGINV);
                   3514:                }
                   3515:                pmap->pm_segmap[VA_SEGNUM(segva)] = SEGINV;
                   3516:                /* Now, put it on the free list. */
                   3517:                pmeg_free(pmegp);
                   3518:        }
                   3519: }
                   3520:
                   3521: /*
                   3522:  * Remove some mappings, all in one PMEG,
                   3523:  * where it is not currently in any context.
                   3524:  */
                   3525: void
                   3526: pmap_remove_noctx(pmap, sva, eva)
                   3527:        pmap_t pmap;
                   3528:        vm_offset_t sva, eva;
                   3529: {
                   3530:        pmeg_t pmegp;
                   3531:        int old_ctx, pte, sme, segnum;
                   3532:        vm_offset_t pgva, segva;
                   3533:
                   3534:        CHECK_SPL();
                   3535:
                   3536: #ifdef DIAGNOSTIC
                   3537:        /* Kernel always in a context (actually, in context zero). */
                   3538:        if (pmap == kernel_pmap)
                   3539:                panic("pmap_remove_noctx: kernel_pmap");
                   3540:        if (pmap->pm_segmap == NULL)
                   3541:                panic("pmap_remove_noctx: null segmap");
                   3542: #endif
                   3543:
                   3544:        segva = m68k_trunc_seg(sva);
                   3545:        segnum = VA_SEGNUM(segva);
                   3546:        sme = pmap->pm_segmap[segnum];
                   3547:        if (sme == SEGINV)
                   3548:                return;
                   3549:        pmegp = pmeg_p(sme);
                   3550:
                   3551:        /*
                   3552:         * Switch to the kernel context so we can access the PMEG
                   3553:         * using the temporary segment.
                   3554:         */
                   3555:        old_ctx = get_context();
                   3556:        set_context(KERNEL_CONTEXT);
                   3557:        if (temp_seg_inuse)
                   3558:                panic("pmap_remove_noctx: temp_seg_inuse");
                   3559:        temp_seg_inuse++;
                   3560:        set_segmap(temp_seg_va, sme);
                   3561:        sva += (temp_seg_va - segva);
                   3562:        eva += (temp_seg_va - segva);
                   3563:
                   3564:        /* Invalidate the PTEs in the given range. */
                   3565:        for (pgva = sva; pgva < eva; pgva += NBPG) {
                   3566:                pte = get_pte(pgva);
                   3567:                if (pte & PG_VALID) {
                   3568:                        /* No cache flush needed. */
                   3569:                        if (IS_MAIN_MEM(pte)) {
                   3570:                                save_modref_bits(pte);
                   3571:                                pv_unlink(pmap, pte, pgva - (temp_seg_va - segva));
                   3572:                        }
                   3573: #ifdef PMAP_DEBUG
                   3574:                        if ((pmap_debug & PMD_SETPTE) || (pgva == pmap_db_watchva)) {
                   3575:                                printf("pmap: set_pte pmap=%p va=0x%lx"
                   3576:                                           " old=0x%x new=0x%x (rrncx)\n",
                   3577:                                           pmap, pgva, pte, PG_INVAL);
                   3578:                        }
                   3579: #endif
                   3580:                        set_pte(pgva, PG_INVAL);
                   3581:                        pmegp->pmeg_vpages--;
                   3582:                }
                   3583:        }
                   3584:
                   3585:        /*
                   3586:         * Release the temporary segment, and
                   3587:         * restore the previous context.
                   3588:         */
                   3589:        set_segmap(temp_seg_va, SEGINV);
                   3590:        temp_seg_inuse--;
                   3591:        set_context(old_ctx);
                   3592:
                   3593:        if (pmegp->pmeg_vpages <= 0) {
                   3594:                /* We are done with this pmeg. */
                   3595:                if (is_pmeg_wired(pmegp)) {
                   3596: #ifdef PMAP_DEBUG
                   3597:                        if (pmap_debug & PMD_WIRING) {
                   3598:                                db_printf("pmap: removing wired pmeg: %p\n", pmegp);
                   3599:                                Debugger();
                   3600:                        }
                   3601: #endif /* PMAP_DEBUG */
                   3602:                }
                   3603:
                   3604:                pmap->pm_segmap[segnum] = SEGINV;
                   3605:                pmeg_free(pmegp);
                   3606:        }
                   3607: }
                   3608:
                   3609:
                   3610: /*
                   3611:  * Count resident pages in this pmap.
                   3612:  * See: kern_sysctl.c:pmap_resident_count
                   3613:  */
                   3614: segsz_t
                   3615: pmap_resident_pages(pmap)
                   3616:        pmap_t pmap;
                   3617: {
                   3618:        int i, sme, pages;
                   3619:        pmeg_t pmeg;
                   3620:
                   3621:        if (pmap->pm_segmap == 0)
                   3622:                return (0);
                   3623:
                   3624:        pages = 0;
                   3625:        for (i = 0; i < NUSEG; i++) {
                   3626:                sme = pmap->pm_segmap[i];
                   3627:                if (sme != SEGINV) {
                   3628:                        pmeg = pmeg_p(sme);
                   3629:                        pages += pmeg->pmeg_vpages;
                   3630:                }
                   3631:        }
                   3632:        return (pages);
                   3633: }
                   3634:
                   3635: /*
                   3636:  * Count wired pages in this pmap.
                   3637:  * See vm_mmap.c:pmap_wired_count
                   3638:  */
                   3639: segsz_t
                   3640: pmap_wired_pages(pmap)
                   3641:        pmap_t pmap;
                   3642: {
                   3643:        int i, mask, sme, pages;
                   3644:        pmeg_t pmeg;
                   3645:
                   3646:        if (pmap->pm_segmap == 0)
                   3647:                return (0);
                   3648:
                   3649:        pages = 0;
                   3650:        for (i = 0; i < NUSEG; i++) {
                   3651:                sme = pmap->pm_segmap[i];
                   3652:                if (sme != SEGINV) {
                   3653:                        pmeg = pmeg_p(sme);
                   3654:                        mask = 0x8000;
                   3655:                        do {
                   3656:                                if (pmeg->pmeg_wired & mask)
                   3657:                                        pages++;
                   3658:                                mask = (mask >> 1);
                   3659:                        } while (mask);
                   3660:                }
                   3661:        }
                   3662:        return (pages);
                   3663: }
                   3664:
                   3665:
                   3666: /*
                   3667:  *     pmap_copy_page copies the specified (machine independent)
                   3668:  *     page by mapping the page into virtual memory and using
                   3669:  *     bcopy to copy the page, one machine dependent page at a
                   3670:  *     time.
                   3671:  */
                   3672: void
                   3673: pmap_copy_page(src, dst)
                   3674:        vm_offset_t     src, dst;
                   3675: {
                   3676:        int pte;
                   3677:        int s;
                   3678:        int saved_ctx;
                   3679:
1.2       fredette 3680:        s = splvm();
1.1       fredette 3681:
                   3682: #ifdef PMAP_DEBUG
                   3683:        if (pmap_debug & PMD_COW)
                   3684:                printf("pmap_copy_page: 0x%lx -> 0x%lx\n", src, dst);
                   3685: #endif
                   3686:
                   3687:        /*
                   3688:         * Temporarily switch to the kernel context to use the
                   3689:         * tmp_vpages.
                   3690:         */
                   3691:        saved_ctx = get_context();
                   3692:        set_context(KERNEL_CONTEXT);
                   3693:        if (tmp_vpages_inuse)
                   3694:                panic("pmap_copy_page: vpages inuse");
                   3695:        tmp_vpages_inuse++;
                   3696:
                   3697:        /* PG_PERM is short for (PG_VALID|PG_WRITE|PG_SYSTEM|PG_NC) */
                   3698:        /* All mappings to vmp_vpages are non-cached, so no flush. */
                   3699:        pte = PG_PERM | PA_PGNUM(src);
                   3700:        set_pte(tmp_vpages[0], pte);
                   3701:        pte = PG_PERM | PA_PGNUM(dst);
                   3702:        set_pte(tmp_vpages[1], pte);
                   3703:        copypage((char *) tmp_vpages[0], (char *) tmp_vpages[1]);
                   3704:        set_pte(tmp_vpages[0], PG_INVAL);
                   3705:        set_pte(tmp_vpages[1], PG_INVAL);
                   3706:
                   3707:        tmp_vpages_inuse--;
                   3708:        set_context(saved_ctx);
                   3709:
                   3710:        splx(s);
                   3711: }
                   3712:
                   3713: /*
                   3714:  *     pmap_zero_page zeros the specified (machine independent)
                   3715:  *     page by mapping the page into virtual memory and using
                   3716:  *     bzero to clear its contents, one machine dependent page
                   3717:  *     at a time.
                   3718:  */
                   3719: void
                   3720: pmap_zero_page(pa)
                   3721:        vm_offset_t     pa;
                   3722: {
                   3723:        int pte;
                   3724:        int s;
                   3725:        int saved_ctx;
                   3726:
1.2       fredette 3727:        s = splvm();
1.1       fredette 3728:
                   3729: #ifdef PMAP_DEBUG
                   3730:        if (pmap_debug & PMD_COW)
                   3731:                printf("pmap_zero_page: 0x%lx\n", pa);
                   3732: #endif
                   3733:
                   3734:        /*
                   3735:         * Temporarily switch to the kernel context to use the
                   3736:         * tmp_vpages.
                   3737:         */
                   3738:        saved_ctx = get_context();
                   3739:        set_context(KERNEL_CONTEXT);
                   3740:        if (tmp_vpages_inuse)
                   3741:                panic("pmap_zero_page: vpages inuse");
                   3742:        tmp_vpages_inuse++;
                   3743:
                   3744:        /* PG_PERM is short for (PG_VALID|PG_WRITE|PG_SYSTEM|PG_NC) */
                   3745:        /* All mappings to vmp_vpages are non-cached, so no flush. */
                   3746:        pte = PG_PERM | PA_PGNUM(pa);
                   3747:        set_pte(tmp_vpages[0], pte);
                   3748:        zeropage((char *) tmp_vpages[0]);
                   3749:        set_pte(tmp_vpages[0], PG_INVAL);
                   3750:
                   3751:        tmp_vpages_inuse--;
                   3752:        set_context(saved_ctx);
                   3753:
                   3754:        splx(s);
                   3755: }
                   3756:
                   3757: /*
                   3758:  *     Routine:        pmap_collect
                   3759:  *     Function:
                   3760:  *             Garbage collects the physical map system for
                   3761:  *             pages which are no longer used.
                   3762:  *             Success need not be guaranteed -- that is, there
                   3763:  *             may well be pages which are not referenced, but
                   3764:  *             others may be collected.
                   3765:  *     Usage:
                   3766:  *             Called by the pageout daemon when pages are scarce.
                   3767:  */
                   3768: void
                   3769: pmap_collect(pmap)
                   3770:        pmap_t pmap;
                   3771: {
                   3772: }
                   3773:
                   3774: /*
                   3775:  * Find first virtual address >= *va that is
                   3776:  * least likely to cause cache aliases.
                   3777:  * (This will just seg-align mappings.)
                   3778:  */
                   3779: void
                   3780: pmap_prefer(fo, va)
                   3781:        register vm_offset_t fo;
                   3782:        register vm_offset_t *va;
                   3783: {
                   3784:        register long   d;
                   3785:
                   3786:        d = fo - *va;
                   3787:        d &= SEGOFSET;
                   3788:        *va += d;
                   3789: }
                   3790:
                   3791: /*
                   3792:  * Fill in the sun2-specific part of the kernel core header
                   3793:  * for dumpsys().  (See machdep.c for the rest.)
                   3794:  */
                   3795: void
                   3796: pmap_kcore_hdr(sh)
                   3797:        struct sun2_kcore_hdr *sh;
                   3798: {
                   3799:        vm_offset_t va;
                   3800:        u_char *cp, *ep;
                   3801:        int saved_ctx;
                   3802:
                   3803:        sh->segshift = SEGSHIFT;
                   3804:        sh->pg_frame = PG_FRAME;
                   3805:        sh->pg_valid = PG_VALID;
                   3806:
                   3807:        /* Copy the kernel segmap (256 bytes). */
                   3808:        va = KERNBASE;
                   3809:        cp = sh->ksegmap;
                   3810:        ep = cp + sizeof(sh->ksegmap);
                   3811:        saved_ctx = get_context();
                   3812:        set_context(KERNEL_CONTEXT);
                   3813:        do {
                   3814:                *cp = get_segmap(va);
                   3815:                va += NBSG;
                   3816:                cp++;
                   3817:        } while (cp < ep);
                   3818:        set_context(saved_ctx);
                   3819: }
                   3820:
                   3821: /*
                   3822:  * Copy the pagemap RAM into the passed buffer (one page)
                   3823:  * starting at OFF in the pagemap RAM.
                   3824:  */
                   3825: void
                   3826: pmap_get_pagemap(pt, off)
                   3827:        int *pt;
                   3828:        int off;
                   3829: {
                   3830:        vm_offset_t va, va_end;
                   3831:        int sme, sme_end;       /* SegMap Entry numbers */
                   3832:        int saved_ctx;
                   3833:
                   3834:        sme = (off / (NPAGSEG * sizeof(*pt)));  /* PMEG to start on */
                   3835:        sme_end = sme + (NBPG / (NPAGSEG * sizeof(*pt))); /* where to stop */
                   3836:        va_end = temp_seg_va + NBSG;
                   3837:
                   3838:        saved_ctx = get_context();
                   3839:        set_context(KERNEL_CONTEXT);
                   3840:        do {
                   3841:                set_segmap(temp_seg_va, sme);
                   3842:                va = temp_seg_va;
                   3843:                do {
                   3844:                        *pt++ = get_pte(va);
                   3845:                        va += NBPG;
                   3846:                } while (va < va_end);
                   3847:                sme++;
                   3848:        } while (sme < sme_end);
                   3849:        set_segmap(temp_seg_va, SEGINV);
                   3850:        set_context(saved_ctx);
                   3851: }
                   3852:
                   3853:
                   3854: /*
                   3855:  * Helper functions for changing unloaded PMEGs
                   3856:  */
                   3857:
                   3858: static int
                   3859: get_pte_pmeg(int pmeg_num, int page_num)
                   3860: {
                   3861:        vm_offset_t va;
                   3862:        int pte;
                   3863:        int saved_ctx;
                   3864:
                   3865:        CHECK_SPL();
                   3866:        saved_ctx = get_context();
                   3867:        set_context(KERNEL_CONTEXT);
                   3868:        if (temp_seg_inuse)
                   3869:                panic("get_pte_pmeg: temp_seg_inuse");
                   3870:        temp_seg_inuse++;
                   3871:
                   3872:        va = temp_seg_va;
                   3873:        set_segmap(temp_seg_va, pmeg_num);
                   3874:        va += NBPG*page_num;
                   3875:        pte = get_pte(va);
                   3876:        set_segmap(temp_seg_va, SEGINV);
                   3877:
                   3878:        temp_seg_inuse--;
                   3879:        set_context(saved_ctx);
                   3880:        return pte;
                   3881: }
                   3882:
                   3883: static void
                   3884: set_pte_pmeg(int pmeg_num, int page_num, int pte)
                   3885: {
                   3886:        vm_offset_t va;
                   3887:        int saved_ctx;
                   3888:
                   3889:        CHECK_SPL();
                   3890:        saved_ctx = get_context();
                   3891:        set_context(KERNEL_CONTEXT);
                   3892:        if (temp_seg_inuse)
                   3893:                panic("set_pte_pmeg: temp_seg_inuse");
                   3894:        temp_seg_inuse++;
                   3895:
                   3896:        /* We never access data in temp_seg_va so no need to flush. */
                   3897:        va = temp_seg_va;
                   3898:        set_segmap(temp_seg_va, pmeg_num);
                   3899:        va += NBPG*page_num;
                   3900:        set_pte(va, pte);
                   3901:        set_segmap(temp_seg_va, SEGINV);
                   3902:
                   3903:        temp_seg_inuse--;
                   3904:        set_context(saved_ctx);
                   3905: }
                   3906:
                   3907: /*
                   3908:  *     Routine:        pmap_procwr
                   3909:  *
                   3910:  *     Function:
                   3911:  *             Synchronize caches corresponding to [addr, addr+len) in p.
                   3912:  */
                   3913: void
                   3914: pmap_procwr(p, va, len)
                   3915:        struct proc     *p;
                   3916:        vaddr_t         va;
                   3917:        size_t          len;
                   3918: {
                   3919: }
                   3920:
                   3921:
                   3922: #ifdef PMAP_DEBUG
                   3923: /* Things to call from the debugger. */
                   3924:
                   3925: void
                   3926: pmap_print(pmap)
                   3927:        pmap_t pmap;
                   3928: {
                   3929:        db_printf(" pm_ctxnum=%d\n", pmap->pm_ctxnum);
                   3930:        db_printf(" pm_version=0x%x\n", pmap->pm_version);
                   3931:        db_printf(" pm_segmap=%p\n", pmap->pm_segmap);
                   3932: }
                   3933:
                   3934: void
                   3935: pmeg_print(pmegp)
                   3936:        pmeg_t pmegp;
                   3937: {
                   3938:        db_printf("link_next=%p  link_prev=%p\n",
                   3939:            pmegp->pmeg_link.tqe_next,
                   3940:            pmegp->pmeg_link.tqe_prev);
                   3941:        db_printf("index=0x%x owner=%p own_vers=0x%x\n",
                   3942:            pmegp->pmeg_index, pmegp->pmeg_owner, pmegp->pmeg_version);
                   3943:        db_printf("va=0x%lx wired=0x%x reserved=0x%x vpgs=0x%x qstate=0x%x\n",
                   3944:            pmegp->pmeg_va, pmegp->pmeg_wired,
                   3945:            pmegp->pmeg_reserved, pmegp->pmeg_vpages,
                   3946:            pmegp->pmeg_qstate);
                   3947: }
                   3948:
                   3949: void
                   3950: pv_print(pa)
                   3951:        vm_offset_t pa;
                   3952: {
                   3953:        pv_entry_t pv;
                   3954:        int idx;
                   3955:
                   3956:        if (!pv_initialized) {
                   3957:                db_printf("no pv_flags_tbl\n");
                   3958:                return;
                   3959:        }
                   3960:        idx = PA_PGNUM(pa);
                   3961:        if (idx >= physmem) {
                   3962:                db_printf("bad address\n");
                   3963:                return;
                   3964:        }
                   3965:        db_printf("pa=0x%lx, flags=0x%x\n",
                   3966:                          pa, pv_flags_tbl[idx]);
                   3967:
                   3968:        pv = pv_head_tbl[idx];
                   3969:        while (pv) {
                   3970:                db_printf(" pv_entry %p pmap %p va 0x%lx next %p\n",
                   3971:                           pv, pv->pv_pmap, pv->pv_va, pv->pv_next);
                   3972:                pv = pv->pv_next;
                   3973:        }
                   3974: }
                   3975: #endif /* PMAP_DEBUG */
                   3976:
                   3977: /*
                   3978:  * Local Variables:
                   3979:  * tab-width: 4
                   3980:  * End:
                   3981:  */

CVSweb <webmaster@jp.NetBSD.org>