[BACK]Return to pmap_motorola.c CVS log [TXT][DIR] Up to [cvs.NetBSD.org] / src / sys / arch / m68k / m68k

Annotation of src/sys/arch/m68k/m68k/pmap_motorola.c, Revision 1.35.6.2

1.35.6.1  mjf         1: /*     $NetBSD$        */
1.1       chs         2:
                      3: /*-
                      4:  * Copyright (c) 1999 The NetBSD Foundation, Inc.
                      5:  * All rights reserved.
                      6:  *
                      7:  * This code is derived from software contributed to The NetBSD Foundation
                      8:  * by Jason R. Thorpe.
                      9:  *
                     10:  * Redistribution and use in source and binary forms, with or without
                     11:  * modification, are permitted provided that the following conditions
                     12:  * are met:
                     13:  * 1. Redistributions of source code must retain the above copyright
                     14:  *    notice, this list of conditions and the following disclaimer.
                     15:  * 2. Redistributions in binary form must reproduce the above copyright
                     16:  *    notice, this list of conditions and the following disclaimer in the
                     17:  *    documentation and/or other materials provided with the distribution.
                     18:  *
                     19:  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
                     20:  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
                     21:  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
                     22:  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
                     23:  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
                     24:  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
                     25:  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
                     26:  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
                     27:  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
                     28:  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
                     29:  * POSSIBILITY OF SUCH DAMAGE.
                     30:  */
                     31:
                     32: /*
                     33:  * Copyright (c) 1991, 1993
                     34:  *     The Regents of the University of California.  All rights reserved.
                     35:  *
                     36:  * This code is derived from software contributed to Berkeley by
                     37:  * the Systems Programming Group of the University of Utah Computer
                     38:  * Science Department.
                     39:  *
                     40:  * Redistribution and use in source and binary forms, with or without
                     41:  * modification, are permitted provided that the following conditions
                     42:  * are met:
                     43:  * 1. Redistributions of source code must retain the above copyright
                     44:  *    notice, this list of conditions and the following disclaimer.
                     45:  * 2. Redistributions in binary form must reproduce the above copyright
                     46:  *    notice, this list of conditions and the following disclaimer in the
                     47:  *    documentation and/or other materials provided with the distribution.
1.6       agc        48:  * 3. Neither the name of the University nor the names of its contributors
1.1       chs        49:  *    may be used to endorse or promote products derived from this software
                     50:  *    without specific prior written permission.
                     51:  *
                     52:  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
                     53:  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
                     54:  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
                     55:  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
                     56:  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
                     57:  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
                     58:  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
                     59:  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
                     60:  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
                     61:  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
                     62:  * SUCH DAMAGE.
                     63:  *
                     64:  *     @(#)pmap.c      8.6 (Berkeley) 5/27/94
                     65:  */
                     66:
                     67: /*
                     68:  * Motorola m68k-family physical map management code.
                     69:  *
                     70:  * Supports:
                     71:  *     68020 with 68851 MMU
                     72:  *     68030 with on-chip MMU
                     73:  *     68040 with on-chip MMU
                     74:  *     68060 with on-chip MMU
                     75:  *
                     76:  * Notes:
                     77:  *     Don't even pay lip service to multiprocessor support.
                     78:  *
                     79:  *     We assume TLB entries don't have process tags (except for the
                     80:  *     supervisor/user distinction) so we only invalidate TLB entries
                     81:  *     when changing mappings for the current (or kernel) pmap.  This is
                     82:  *     technically not true for the 68851 but we flush the TLB on every
                     83:  *     context switch, so it effectively winds up that way.
                     84:  *
                     85:  *     Bitwise and/or operations are significantly faster than bitfield
                     86:  *     references so we use them when accessing STE/PTEs in the pmap_pte_*
                     87:  *     macros.  Note also that the two are not always equivalent; e.g.:
                     88:  *             (*pte & PG_PROT) [4] != pte->pg_prot [1]
                     89:  *     and a couple of routines that deal with protection and wiring take
                     90:  *     some shortcuts that assume the and/or definitions.
                     91:  */
                     92:
                     93: /*
                     94:  *     Manages physical address maps.
                     95:  *
                     96:  *     In addition to hardware address maps, this
                     97:  *     module is called upon to provide software-use-only
                     98:  *     maps which may or may not be stored in the same
                     99:  *     form as hardware maps.  These pseudo-maps are
                    100:  *     used to store intermediate results from copy
                    101:  *     operations to and from address spaces.
                    102:  *
                    103:  *     Since the information managed by this module is
                    104:  *     also stored by the logical address mapping module,
                    105:  *     this module may throw away valid virtual-to-physical
                    106:  *     mappings at almost any time.  However, invalidations
                    107:  *     of virtual-to-physical mappings must be done as
                    108:  *     requested.
                    109:  *
                    110:  *     In order to cope with hardware architectures which
                    111:  *     make virtual-to-physical map invalidates expensive,
                    112:  *     this module may delay invalidate or reduced protection
                    113:  *     operations until such time as they are actually
                    114:  *     necessary.  This module is given full information as
                    115:  *     to which processors are currently using which maps,
                    116:  *     and to when physical maps must be made correct.
                    117:  */
                    118:
                    119: #include <sys/cdefs.h>
1.35.6.1  mjf       120: __KERNEL_RCSID(0, "$NetBSD$");
1.1       chs       121:
                    122: #include <sys/param.h>
                    123: #include <sys/systm.h>
                    124: #include <sys/proc.h>
                    125: #include <sys/malloc.h>
                    126: #include <sys/user.h>
                    127: #include <sys/pool.h>
                    128:
                    129: #include <machine/pte.h>
                    130:
                    131: #include <uvm/uvm.h>
                    132:
                    133: #include <machine/cpu.h>
                    134: #include <m68k/cacheops.h>
                    135:
                    136: #ifdef DEBUG
                    137: #define PDB_FOLLOW     0x0001
                    138: #define PDB_INIT       0x0002
                    139: #define PDB_ENTER      0x0004
                    140: #define PDB_REMOVE     0x0008
                    141: #define PDB_CREATE     0x0010
                    142: #define PDB_PTPAGE     0x0020
                    143: #define PDB_CACHE      0x0040
                    144: #define PDB_BITS       0x0080
                    145: #define PDB_COLLECT    0x0100
                    146: #define PDB_PROTECT    0x0200
                    147: #define PDB_SEGTAB     0x0400
                    148: #define PDB_MULTIMAP   0x0800
                    149: #define PDB_PARANOIA   0x2000
                    150: #define PDB_WIRING     0x4000
                    151: #define PDB_PVDUMP     0x8000
                    152:
                    153: int debugmap = 0;
                    154: int pmapdebug = PDB_PARANOIA;
                    155:
                    156: #define        PMAP_DPRINTF(l, x)      if (pmapdebug & (l)) printf x
                    157: #else /* ! DEBUG */
                    158: #define        PMAP_DPRINTF(l, x)      /* nothing */
                    159: #endif /* DEBUG */
                    160:
                    161: /*
                    162:  * Get STEs and PTEs for user/kernel address space
                    163:  */
                    164: #if defined(M68040) || defined(M68060)
                    165: #define        pmap_ste1(m, v) \
                    166:        (&((m)->pm_stab[(vaddr_t)(v) >> SG4_SHIFT1]))
                    167: /* XXX assumes physically contiguous ST pages (if more than one) */
                    168: #define pmap_ste2(m, v) \
                    169:        (&((m)->pm_stab[(st_entry_t *)(*(u_int *)pmap_ste1(m, v) & SG4_ADDR1) \
                    170:                        - (m)->pm_stpa + (((v) & SG4_MASK2) >> SG4_SHIFT2)]))
                    171: #if defined(M68020) || defined(M68030)
                    172: #define        pmap_ste(m, v)  \
                    173:        (&((m)->pm_stab[(vaddr_t)(v) \
                    174:                        >> (mmutype == MMU_68040 ? SG4_SHIFT1 : SG_ISHIFT)]))
                    175: #define pmap_ste_v(m, v) \
                    176:        (mmutype == MMU_68040 \
                    177:         ? ((*pmap_ste1(m, v) & SG_V) && \
                    178:            (*pmap_ste2(m, v) & SG_V)) \
                    179:         : (*pmap_ste(m, v) & SG_V))
                    180: #else
                    181: #define        pmap_ste(m, v)  \
                    182:        (&((m)->pm_stab[(vaddr_t)(v) >> SG4_SHIFT1]))
                    183: #define pmap_ste_v(m, v) \
                    184:        ((*pmap_ste1(m, v) & SG_V) && (*pmap_ste2(m, v) & SG_V))
                    185: #endif
                    186: #else
                    187: #define        pmap_ste(m, v)   (&((m)->pm_stab[(vaddr_t)(v) >> SG_ISHIFT]))
                    188: #define pmap_ste_v(m, v) (*pmap_ste(m, v) & SG_V)
                    189: #endif
                    190:
                    191: #define pmap_pte(m, v) (&((m)->pm_ptab[(vaddr_t)(v) >> PG_SHIFT]))
                    192: #define pmap_pte_pa(pte)       (*(pte) & PG_FRAME)
                    193: #define pmap_pte_w(pte)                (*(pte) & PG_W)
                    194: #define pmap_pte_ci(pte)       (*(pte) & PG_CI)
                    195: #define pmap_pte_m(pte)                (*(pte) & PG_M)
                    196: #define pmap_pte_u(pte)                (*(pte) & PG_U)
                    197: #define pmap_pte_prot(pte)     (*(pte) & PG_PROT)
                    198: #define pmap_pte_v(pte)                (*(pte) & PG_V)
                    199:
                    200: #define pmap_pte_set_w(pte, v) \
                    201:        if (v) *(pte) |= PG_W; else *(pte) &= ~PG_W
                    202: #define pmap_pte_set_prot(pte, v) \
                    203:        if (v) *(pte) |= PG_PROT; else *(pte) &= ~PG_PROT
                    204: #define pmap_pte_w_chg(pte, nw)                ((nw) ^ pmap_pte_w(pte))
                    205: #define pmap_pte_prot_chg(pte, np)     ((np) ^ pmap_pte_prot(pte))
                    206:
                    207: /*
                    208:  * Given a map and a machine independent protection code,
                    209:  * convert to an m68k protection code.
                    210:  */
                    211: #define pte_prot(m, p) (protection_codes[p])
                    212: int    protection_codes[8];
                    213:
                    214: /*
                    215:  * Kernel page table page management.
                    216:  */
                    217: struct kpt_page {
                    218:        struct kpt_page *kpt_next;      /* link on either used or free list */
                    219:        vaddr_t         kpt_va;         /* always valid kernel VA */
                    220:        paddr_t         kpt_pa;         /* PA of this page (for speed) */
                    221: };
                    222: struct kpt_page *kpt_free_list, *kpt_used_list;
                    223: struct kpt_page *kpt_pages;
                    224:
                    225: /*
                    226:  * Kernel segment/page table and page table map.
                    227:  * The page table map gives us a level of indirection we need to dynamically
                    228:  * expand the page table.  It is essentially a copy of the segment table
                    229:  * with PTEs instead of STEs.  All are initialized in locore at boot time.
                    230:  * Sysmap will initially contain VM_KERNEL_PT_PAGES pages of PTEs.
                    231:  * Segtabzero is an empty segment table which all processes share til they
                    232:  * reference something.
                    233:  */
                    234: st_entry_t     *Sysseg;
                    235: pt_entry_t     *Sysmap, *Sysptmap;
                    236: st_entry_t     *Segtabzero, *Segtabzeropa;
                    237: vsize_t                Sysptsize = VM_KERNEL_PT_PAGES;
                    238:
                    239: struct pmap    kernel_pmap_store;
                    240: struct vm_map  *st_map, *pt_map;
1.12      yamt      241: struct vm_map_kernel st_map_store, pt_map_store;
1.1       chs       242:
                    243: paddr_t                avail_start;    /* PA of first available physical page */
                    244: paddr_t                avail_end;      /* PA of last available physical page */
                    245: vsize_t                mem_size;       /* memory size in bytes */
1.5       thorpej   246: vaddr_t                virtual_avail;  /* VA of first avail page (after kernel bss)*/
                    247: vaddr_t                virtual_end;    /* VA of last avail page (end of kernel AS) */
1.1       chs       248: int            page_cnt;       /* number of pages managed by VM system */
                    249:
1.25      thorpej   250: bool           pmap_initialized = false;       /* Has pmap_init completed? */
1.1       chs       251: struct pv_entry        *pv_table;
                    252: char           *pmap_attributes;       /* reference and modify bits */
                    253: TAILQ_HEAD(pv_page_list, pv_page) pv_page_freelist;
                    254: int            pv_nfree;
                    255:
                    256: #ifdef M68K_MMU_HP
                    257: int            pmap_aliasmask; /* seperation at which VA aliasing ok */
                    258: #endif
                    259: #if defined(M68040) || defined(M68060)
                    260: int            protostfree;    /* prototype (default) free ST map */
                    261: #endif
                    262:
                    263: pt_entry_t     *caddr1_pte;    /* PTE for CADDR1 */
                    264: pt_entry_t     *caddr2_pte;    /* PTE for CADDR2 */
                    265:
                    266: struct pool    pmap_pmap_pool; /* memory pool for pmap structures */
                    267:
1.20      tsutsui   268: struct pv_entry *pmap_alloc_pv(void);
                    269: void   pmap_free_pv(struct pv_entry *);
                    270: void   pmap_collect_pv(void);
1.1       chs       271:
                    272: #define        PAGE_IS_MANAGED(pa)     (pmap_initialized &&                    \
                    273:                                 vm_physseg_find(atop((pa)), NULL) != -1)
                    274:
1.35.6.2! mjf       275: static inline struct pv_entry *pa_to_pvh(paddr_t pa);
        !           276: static inline char *pa_to_attribute(paddr_t pa);
        !           277:
        !           278: static inline struct pv_entry *
        !           279: pa_to_pvh(paddr_t pa)
        !           280: {
        !           281:        int bank, pg = 0;       /* XXX gcc4 -Wuninitialized */
        !           282:
        !           283:        bank = vm_physseg_find(atop((pa)), &pg);
        !           284:        return &vm_physmem[bank].pmseg.pvent[pg];
        !           285: }
        !           286:
        !           287: static inline char *
        !           288: pa_to_attribute(paddr_t pa)
        !           289: {
        !           290:        int bank, pg = 0;       /* XXX gcc4 -Wuninitialized */
        !           291:
        !           292:        bank = vm_physseg_find(atop((pa)), &pg);
        !           293:        return &vm_physmem[bank].pmseg.attrs[pg];
        !           294: }
1.1       chs       295:
                    296: /*
                    297:  * Internal routines
                    298:  */
1.20      tsutsui   299: void   pmap_remove_mapping(pmap_t, vaddr_t, pt_entry_t *, int);
                    300: void   pmap_do_remove(pmap_t, vaddr_t, vaddr_t, int);
1.23      thorpej   301: bool   pmap_testbit(paddr_t, int);
                    302: bool   pmap_changebit(paddr_t, int, int);
1.24      tsutsui   303: int    pmap_enter_ptpage(pmap_t, vaddr_t, bool);
1.20      tsutsui   304: void   pmap_ptpage_addref(vaddr_t);
                    305: int    pmap_ptpage_delref(vaddr_t);
                    306: void   pmap_collect1(pmap_t, paddr_t, paddr_t);
                    307: void   pmap_pinit(pmap_t);
                    308: void   pmap_release(pmap_t);
1.1       chs       309:
                    310: #ifdef DEBUG
1.20      tsutsui   311: void pmap_pvdump(paddr_t);
                    312: void pmap_check_wiring(const char *, vaddr_t);
1.1       chs       313: #endif
                    314:
                    315: /* pmap_remove_mapping flags */
                    316: #define        PRM_TFLUSH      0x01
                    317: #define        PRM_CFLUSH      0x02
                    318: #define        PRM_KEEPPTPAGE  0x04
                    319:
                    320: /*
1.5       thorpej   321:  * pmap_virtual_space:         [ INTERFACE ]
                    322:  *
                    323:  *     Report the range of available kernel virtual address
                    324:  *     space to the VM system during bootstrap.
                    325:  *
                    326:  *     This is only an interface function if we do not use
                    327:  *     pmap_steal_memory()!
                    328:  *
                    329:  *     Note: no locking is necessary in this function.
                    330:  */
                    331: void
                    332: pmap_virtual_space(vstartp, vendp)
                    333:        vaddr_t *vstartp, *vendp;
                    334: {
                    335:
                    336:        *vstartp = virtual_avail;
                    337:        *vendp = virtual_end;
                    338: }
                    339:
                    340: /*
1.1       chs       341:  * pmap_init:                  [ INTERFACE ]
                    342:  *
                    343:  *     Initialize the pmap module.  Called by vm_init(), to initialize any
                    344:  *     structures that the pmap system needs to map virtual memory.
                    345:  *
                    346:  *     Note: no locking is necessary in this function.
                    347:  */
                    348: void
1.20      tsutsui   349: pmap_init(void)
1.1       chs       350: {
                    351:        vaddr_t         addr, addr2;
                    352:        vsize_t         s;
                    353:        struct pv_entry *pv;
                    354:        char            *attr;
                    355:        int             rv;
                    356:        int             npages;
                    357:        int             bank;
                    358:
                    359:        PMAP_DPRINTF(PDB_FOLLOW, ("pmap_init()\n"));
                    360:
                    361:        /*
                    362:         * Before we do anything else, initialize the PTE pointers
                    363:         * used by pmap_zero_page() and pmap_copy_page().
                    364:         */
                    365:        caddr1_pte = pmap_pte(pmap_kernel(), CADDR1);
                    366:        caddr2_pte = pmap_pte(pmap_kernel(), CADDR2);
                    367:
                    368:        PMAP_DPRINTF(PDB_INIT,
                    369:            ("pmap_init: Sysseg %p, Sysmap %p, Sysptmap %p\n",
                    370:            Sysseg, Sysmap, Sysptmap));
                    371:        PMAP_DPRINTF(PDB_INIT,
                    372:            ("  pstart %lx, pend %lx, vstart %lx, vend %lx\n",
                    373:            avail_start, avail_end, virtual_avail, virtual_end));
                    374:
                    375:        /*
                    376:         * Allocate memory for random pmap data structures.  Includes the
                    377:         * initial segment table, pv_head_table and pmap_attributes.
                    378:         */
                    379:        for (page_cnt = 0, bank = 0; bank < vm_nphysseg; bank++)
                    380:                page_cnt += vm_physmem[bank].end - vm_physmem[bank].start;
                    381:        s = M68K_STSIZE;                                        /* Segtabzero */
                    382:        s += page_cnt * sizeof(struct pv_entry);        /* pv table */
                    383:        s += page_cnt * sizeof(char);                   /* attribute table */
                    384:        s = round_page(s);
1.14      yamt      385:        addr = uvm_km_alloc(kernel_map, s, 0, UVM_KMF_WIRED | UVM_KMF_ZERO);
1.1       chs       386:        if (addr == 0)
                    387:                panic("pmap_init: can't allocate data structures");
                    388:
1.20      tsutsui   389:        Segtabzero = (st_entry_t *)addr;
                    390:        (void)pmap_extract(pmap_kernel(), addr,
                    391:            (paddr_t *)(void *)&Segtabzeropa);
1.1       chs       392:        addr += M68K_STSIZE;
                    393:
                    394:        pv_table = (struct pv_entry *) addr;
                    395:        addr += page_cnt * sizeof(struct pv_entry);
                    396:
1.20      tsutsui   397:        pmap_attributes = (char *)addr;
1.1       chs       398:
                    399:        PMAP_DPRINTF(PDB_INIT, ("pmap_init: %lx bytes: page_cnt %x s0 %p(%p) "
                    400:            "tbl %p atr %p\n",
                    401:            s, page_cnt, Segtabzero, Segtabzeropa,
                    402:            pv_table, pmap_attributes));
                    403:
                    404:        /*
                    405:         * Now that the pv and attribute tables have been allocated,
                    406:         * assign them to the memory segments.
                    407:         */
                    408:        pv = pv_table;
                    409:        attr = pmap_attributes;
                    410:        for (bank = 0; bank < vm_nphysseg; bank++) {
                    411:                npages = vm_physmem[bank].end - vm_physmem[bank].start;
                    412:                vm_physmem[bank].pmseg.pvent = pv;
                    413:                vm_physmem[bank].pmseg.attrs = attr;
                    414:                pv += npages;
                    415:                attr += npages;
                    416:        }
                    417:
                    418:        /*
1.5       thorpej   419:         * Allocate physical memory for kernel PT pages and their management.
                    420:         * We need 1 PT page per possible task plus some slop.
                    421:         */
                    422:        npages = min(atop(M68K_MAX_KPTSIZE), maxproc+16);
                    423:        s = ptoa(npages) + round_page(npages * sizeof(struct kpt_page));
                    424:
                    425:        /*
                    426:         * Verify that space will be allocated in region for which
                    427:         * we already have kernel PT pages.
                    428:         */
                    429:        addr = 0;
                    430:        rv = uvm_map(kernel_map, &addr, s, NULL, UVM_UNKNOWN_OFFSET, 0,
1.20      tsutsui   431:            UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE,
                    432:            UVM_ADV_RANDOM, UVM_FLAG_NOMERGE));
1.5       thorpej   433:        if (rv != 0 || (addr + s) >= (vaddr_t)Sysmap)
                    434:                panic("pmap_init: kernel PT too small");
                    435:        uvm_unmap(kernel_map, addr, addr + s);
                    436:
                    437:        /*
                    438:         * Now allocate the space and link the pages together to
                    439:         * form the KPT free list.
                    440:         */
1.14      yamt      441:        addr = uvm_km_alloc(kernel_map, s, 0, UVM_KMF_WIRED | UVM_KMF_ZERO);
1.5       thorpej   442:        if (addr == 0)
                    443:                panic("pmap_init: cannot allocate KPT free list");
                    444:        s = ptoa(npages);
                    445:        addr2 = addr + s;
                    446:        kpt_pages = &((struct kpt_page *)addr2)[npages];
                    447:        kpt_free_list = NULL;
                    448:        do {
                    449:                addr2 -= PAGE_SIZE;
                    450:                (--kpt_pages)->kpt_next = kpt_free_list;
                    451:                kpt_free_list = kpt_pages;
                    452:                kpt_pages->kpt_va = addr2;
                    453:                (void) pmap_extract(pmap_kernel(), addr2,
                    454:                    (paddr_t *)&kpt_pages->kpt_pa);
                    455:        } while (addr != addr2);
                    456:
                    457:        PMAP_DPRINTF(PDB_INIT, ("pmap_init: KPT: %ld pages from %lx to %lx\n",
                    458:            atop(s), addr, addr + s));
                    459:
                    460:        /*
1.1       chs       461:         * Allocate the segment table map and the page table map.
                    462:         */
                    463:        s = maxproc * M68K_STSIZE;
1.25      thorpej   464:        st_map = uvm_km_suballoc(kernel_map, &addr, &addr2, s, 0, false,
1.1       chs       465:            &st_map_store);
                    466:
                    467:        addr = M68K_PTBASE;
                    468:        if ((M68K_PTMAXSIZE / M68K_MAX_PTSIZE) < maxproc) {
                    469:                s = M68K_PTMAXSIZE;
                    470:                /*
                    471:                 * XXX We don't want to hang when we run out of
                    472:                 * page tables, so we lower maxproc so that fork()
                    473:                 * will fail instead.  Note that root could still raise
                    474:                 * this value via sysctl(3).
                    475:                 */
                    476:                maxproc = (M68K_PTMAXSIZE / M68K_MAX_PTSIZE);
                    477:        } else
                    478:                s = (maxproc * M68K_MAX_PTSIZE);
                    479:        pt_map = uvm_km_suballoc(kernel_map, &addr, &addr2, s, 0,
1.25      thorpej   480:            true, &pt_map_store);
1.1       chs       481:
                    482: #if defined(M68040) || defined(M68060)
                    483:        if (mmutype == MMU_68040) {
                    484:                protostfree = ~l2tobm(0);
                    485:                for (rv = MAXUL2SIZE; rv < sizeof(protostfree)*NBBY; rv++)
                    486:                        protostfree &= ~l2tobm(rv);
                    487:        }
                    488: #endif
                    489:
                    490:        /*
                    491:         * Initialize the pmap pools.
                    492:         */
                    493:        pool_init(&pmap_pmap_pool, sizeof(struct pmap), 0, 0, 0, "pmappl",
1.28      ad        494:            &pool_allocator_nointr, IPL_NONE);
1.1       chs       495:
                    496:        /*
                    497:         * Now that this is done, mark the pages shared with the
                    498:         * hardware page table search as non-CCB (actually, as CI).
                    499:         *
                    500:         * XXX Hm. Given that this is in the kernel map, can't we just
                    501:         * use the va's?
                    502:         */
                    503: #ifdef M68060
                    504: #if defined(M68020) || defined(M68030) || defined(M68040)
                    505:        if (cputype == CPU_68060)
                    506: #endif
                    507:        {
                    508:                struct kpt_page *kptp = kpt_free_list;
                    509:                paddr_t paddr;
                    510:
                    511:                while (kptp) {
                    512:                        pmap_changebit(kptp->kpt_pa, PG_CI, ~PG_CCB);
                    513:                        kptp = kptp->kpt_next;
                    514:                }
                    515:
                    516:                paddr = (paddr_t)Segtabzeropa;
                    517:                while (paddr < (paddr_t)Segtabzeropa + M68K_STSIZE) {
                    518:                        pmap_changebit(paddr, PG_CI, ~PG_CCB);
1.3       thorpej   519:                        paddr += PAGE_SIZE;
1.1       chs       520:                }
                    521:
                    522:                DCIS();
                    523:        }
                    524: #endif
                    525:
                    526:        /*
                    527:         * Now it is safe to enable pv_table recording.
                    528:         */
1.25      thorpej   529:        pmap_initialized = true;
1.1       chs       530: }
                    531:
                    532: /*
                    533:  * pmap_alloc_pv:
                    534:  *
                    535:  *     Allocate a pv_entry.
                    536:  */
                    537: struct pv_entry *
1.20      tsutsui   538: pmap_alloc_pv(void)
1.1       chs       539: {
                    540:        struct pv_page *pvp;
                    541:        struct pv_entry *pv;
                    542:        int i;
                    543:
                    544:        if (pv_nfree == 0) {
1.14      yamt      545:                pvp = (struct pv_page *)uvm_km_alloc(kernel_map, PAGE_SIZE, 0,
                    546:                    UVM_KMF_WIRED | UVM_KMF_ZERO);
1.35.6.2! mjf       547:                if (pvp == NULL)
1.14      yamt      548:                        panic("pmap_alloc_pv: uvm_km_alloc() failed");
1.1       chs       549:                pvp->pvp_pgi.pgi_freelist = pv = &pvp->pvp_pv[1];
                    550:                for (i = NPVPPG - 2; i; i--, pv++)
                    551:                        pv->pv_next = pv + 1;
1.35.6.2! mjf       552:                pv->pv_next = NULL;
1.1       chs       553:                pv_nfree += pvp->pvp_pgi.pgi_nfree = NPVPPG - 1;
                    554:                TAILQ_INSERT_HEAD(&pv_page_freelist, pvp, pvp_pgi.pgi_list);
                    555:                pv = &pvp->pvp_pv[0];
                    556:        } else {
                    557:                --pv_nfree;
1.35.6.2! mjf       558:                pvp = TAILQ_FIRST(&pv_page_freelist);
1.1       chs       559:                if (--pvp->pvp_pgi.pgi_nfree == 0) {
                    560:                        TAILQ_REMOVE(&pv_page_freelist, pvp, pvp_pgi.pgi_list);
                    561:                }
                    562:                pv = pvp->pvp_pgi.pgi_freelist;
                    563: #ifdef DIAGNOSTIC
1.35.6.2! mjf       564:                if (pv == NULL)
1.1       chs       565:                        panic("pmap_alloc_pv: pgi_nfree inconsistent");
                    566: #endif
                    567:                pvp->pvp_pgi.pgi_freelist = pv->pv_next;
                    568:        }
                    569:        return pv;
                    570: }
                    571:
                    572: /*
                    573:  * pmap_free_pv:
                    574:  *
                    575:  *     Free a pv_entry.
                    576:  */
                    577: void
1.20      tsutsui   578: pmap_free_pv(struct pv_entry *pv)
1.1       chs       579: {
                    580:        struct pv_page *pvp;
                    581:
1.20      tsutsui   582:        pvp = (struct pv_page *)trunc_page((vaddr_t)pv);
1.1       chs       583:        switch (++pvp->pvp_pgi.pgi_nfree) {
                    584:        case 1:
                    585:                TAILQ_INSERT_TAIL(&pv_page_freelist, pvp, pvp_pgi.pgi_list);
                    586:        default:
                    587:                pv->pv_next = pvp->pvp_pgi.pgi_freelist;
                    588:                pvp->pvp_pgi.pgi_freelist = pv;
                    589:                ++pv_nfree;
                    590:                break;
                    591:        case NPVPPG:
                    592:                pv_nfree -= NPVPPG - 1;
                    593:                TAILQ_REMOVE(&pv_page_freelist, pvp, pvp_pgi.pgi_list);
1.14      yamt      594:                uvm_km_free(kernel_map, (vaddr_t)pvp, PAGE_SIZE, UVM_KMF_WIRED);
1.1       chs       595:                break;
                    596:        }
                    597: }
                    598:
                    599: /*
                    600:  * pmap_collect_pv:
                    601:  *
                    602:  *     Perform compaction on the PV list, called via pmap_collect().
                    603:  */
                    604: void
1.20      tsutsui   605: pmap_collect_pv(void)
1.1       chs       606: {
                    607:        struct pv_page_list pv_page_collectlist;
                    608:        struct pv_page *pvp, *npvp;
                    609:        struct pv_entry *ph, *ppv, *pv, *npv;
                    610:        int s;
                    611:
                    612:        TAILQ_INIT(&pv_page_collectlist);
                    613:
1.35.6.2! mjf       614:        for (pvp = TAILQ_FIRST(&pv_page_freelist); pvp != NULL; pvp = npvp) {
1.1       chs       615:                if (pv_nfree < NPVPPG)
                    616:                        break;
1.35.6.2! mjf       617:                npvp = TAILQ_NEXT(&pvp->pvp_pgi, pgi_list);
1.1       chs       618:                if (pvp->pvp_pgi.pgi_nfree > NPVPPG / 3) {
                    619:                        TAILQ_REMOVE(&pv_page_freelist, pvp, pvp_pgi.pgi_list);
                    620:                        TAILQ_INSERT_TAIL(&pv_page_collectlist, pvp,
                    621:                            pvp_pgi.pgi_list);
                    622:                        pv_nfree -= NPVPPG;
                    623:                        pvp->pvp_pgi.pgi_nfree = -1;
                    624:                }
                    625:        }
                    626:
1.35.6.2! mjf       627:        if (TAILQ_FIRST(&pv_page_collectlist) == NULL)
1.1       chs       628:                return;
                    629:
                    630:        for (ph = &pv_table[page_cnt - 1]; ph >= &pv_table[0]; ph--) {
1.35.6.2! mjf       631:                if (ph->pv_pmap == NULL)
1.1       chs       632:                        continue;
                    633:                s = splvm();
1.35.6.2! mjf       634:                for (ppv = ph; (pv = ppv->pv_next) != NULL; ) {
1.1       chs       635:                        pvp = (struct pv_page *) trunc_page((vaddr_t)pv);
                    636:                        if (pvp->pvp_pgi.pgi_nfree == -1) {
1.35.6.2! mjf       637:                                pvp = TAILQ_FIRST(&pv_page_freelist);
1.1       chs       638:                                if (--pvp->pvp_pgi.pgi_nfree == 0) {
                    639:                                        TAILQ_REMOVE(&pv_page_freelist, pvp,
                    640:                                            pvp_pgi.pgi_list);
                    641:                                }
                    642:                                npv = pvp->pvp_pgi.pgi_freelist;
                    643: #ifdef DIAGNOSTIC
1.35.6.2! mjf       644:                                if (npv == NULL)
1.20      tsutsui   645:                                        panic("pmap_collect_pv: "
                    646:                                            "pgi_nfree inconsistent");
1.1       chs       647: #endif
                    648:                                pvp->pvp_pgi.pgi_freelist = npv->pv_next;
                    649:                                *npv = *pv;
                    650:                                ppv->pv_next = npv;
                    651:                                ppv = npv;
                    652:                        } else
                    653:                                ppv = pv;
                    654:                }
                    655:                splx(s);
                    656:        }
                    657:
1.35.6.2! mjf       658:        for (pvp = TAILQ_FIRST(&pv_page_collectlist); pvp != NULL; pvp = npvp) {
        !           659:                npvp = TAILQ_NEXT(&pvp->pvp_pgi, pgi_list);
1.14      yamt      660:                uvm_km_free(kernel_map, (vaddr_t)pvp, PAGE_SIZE, UVM_KMF_WIRED);
1.1       chs       661:        }
                    662: }
                    663:
                    664: /*
                    665:  * pmap_map:
                    666:  *
                    667:  *     Used to map a range of physical addresses into kernel
                    668:  *     virtual address space.
                    669:  *
                    670:  *     For now, VM is already on, we only need to map the
                    671:  *     specified memory.
                    672:  *
                    673:  *     Note: THIS FUNCTION IS DEPRECATED, AND SHOULD BE REMOVED!
                    674:  */
                    675: vaddr_t
1.20      tsutsui   676: pmap_map(vaddr_t va, paddr_t spa, paddr_t epa, int prot)
1.1       chs       677: {
                    678:
                    679:        PMAP_DPRINTF(PDB_FOLLOW,
                    680:            ("pmap_map(%lx, %lx, %lx, %x)\n", va, spa, epa, prot));
                    681:
                    682:        while (spa < epa) {
                    683:                pmap_enter(pmap_kernel(), va, spa, prot, 0);
1.3       thorpej   684:                va += PAGE_SIZE;
                    685:                spa += PAGE_SIZE;
1.1       chs       686:        }
                    687:        pmap_update(pmap_kernel());
1.20      tsutsui   688:        return va;
1.1       chs       689: }
                    690:
                    691: /*
                    692:  * pmap_create:                        [ INTERFACE ]
                    693:  *
                    694:  *     Create and return a physical map.
                    695:  *
                    696:  *     Note: no locking is necessary in this function.
                    697:  */
                    698: pmap_t
1.20      tsutsui   699: pmap_create(void)
1.1       chs       700: {
                    701:        struct pmap *pmap;
                    702:
                    703:        PMAP_DPRINTF(PDB_FOLLOW|PDB_CREATE,
                    704:            ("pmap_create()\n"));
                    705:
                    706:        pmap = pool_get(&pmap_pmap_pool, PR_WAITOK);
                    707:        memset(pmap, 0, sizeof(*pmap));
                    708:        pmap_pinit(pmap);
1.20      tsutsui   709:        return pmap;
1.1       chs       710: }
                    711:
                    712: /*
                    713:  * pmap_pinit:
                    714:  *
                    715:  *     Initialize a preallocated and zeroed pmap structure.
                    716:  *
                    717:  *     Note: THIS FUNCTION SHOULD BE MOVED INTO pmap_create()!
                    718:  */
                    719: void
1.20      tsutsui   720: pmap_pinit(struct pmap *pmap)
1.1       chs       721: {
                    722:
                    723:        PMAP_DPRINTF(PDB_FOLLOW|PDB_CREATE,
                    724:            ("pmap_pinit(%p)\n", pmap));
                    725:
                    726:        /*
                    727:         * No need to allocate page table space yet but we do need a
                    728:         * valid segment table.  Initially, we point everyone at the
                    729:         * "null" segment table.  On the first pmap_enter, a real
                    730:         * segment table will be allocated.
                    731:         */
                    732:        pmap->pm_stab = Segtabzero;
                    733:        pmap->pm_stpa = Segtabzeropa;
                    734: #if defined(M68040) || defined(M68060)
                    735: #if defined(M68020) || defined(M68030)
                    736:        if (mmutype == MMU_68040)
                    737: #endif
                    738:                pmap->pm_stfree = protostfree;
                    739: #endif
                    740:        pmap->pm_count = 1;
                    741:        simple_lock_init(&pmap->pm_lock);
                    742: }
                    743:
                    744: /*
                    745:  * pmap_destroy:               [ INTERFACE ]
                    746:  *
                    747:  *     Drop the reference count on the specified pmap, releasing
                    748:  *     all resources if the reference count drops to zero.
                    749:  */
                    750: void
1.20      tsutsui   751: pmap_destroy(pmap_t pmap)
1.1       chs       752: {
                    753:        int count;
                    754:
                    755:        PMAP_DPRINTF(PDB_FOLLOW, ("pmap_destroy(%p)\n", pmap));
                    756:
                    757:        simple_lock(&pmap->pm_lock);
                    758:        count = --pmap->pm_count;
                    759:        simple_unlock(&pmap->pm_lock);
                    760:        if (count == 0) {
                    761:                pmap_release(pmap);
                    762:                pool_put(&pmap_pmap_pool, pmap);
                    763:        }
                    764: }
                    765:
                    766: /*
                    767:  * pmap_release:
                    768:  *
                    769:  *     Relese the resources held by a pmap.
                    770:  *
                    771:  *     Note: THIS FUNCTION SHOULD BE MOVED INTO pmap_destroy().
                    772:  */
                    773: void
1.20      tsutsui   774: pmap_release(pmap_t pmap)
1.1       chs       775: {
                    776:
                    777:        PMAP_DPRINTF(PDB_FOLLOW, ("pmap_release(%p)\n", pmap));
                    778:
                    779: #ifdef notdef /* DIAGNOSTIC */
                    780:        /* count would be 0 from pmap_destroy... */
                    781:        simple_lock(&pmap->pm_lock);
                    782:        if (pmap->pm_count != 1)
                    783:                panic("pmap_release count");
                    784: #endif
                    785:
                    786:        if (pmap->pm_ptab) {
                    787:                pmap_remove(pmap_kernel(), (vaddr_t)pmap->pm_ptab,
                    788:                    (vaddr_t)pmap->pm_ptab + M68K_MAX_PTSIZE);
1.14      yamt      789:                uvm_km_pgremove((vaddr_t)pmap->pm_ptab,
                    790:                    (vaddr_t)pmap->pm_ptab + M68K_MAX_PTSIZE);
                    791:                uvm_km_free(pt_map, (vaddr_t)pmap->pm_ptab,
                    792:                    M68K_MAX_PTSIZE, UVM_KMF_VAONLY);
1.1       chs       793:        }
                    794:        KASSERT(pmap->pm_stab == Segtabzero);
                    795: }
                    796:
                    797: /*
                    798:  * pmap_reference:             [ INTERFACE ]
                    799:  *
                    800:  *     Add a reference to the specified pmap.
                    801:  */
                    802: void
1.20      tsutsui   803: pmap_reference(pmap_t pmap)
1.1       chs       804: {
                    805:        PMAP_DPRINTF(PDB_FOLLOW, ("pmap_reference(%p)\n", pmap));
                    806:
                    807:        simple_lock(&pmap->pm_lock);
                    808:        pmap->pm_count++;
                    809:        simple_unlock(&pmap->pm_lock);
                    810: }
                    811:
                    812: /*
                    813:  * pmap_activate:              [ INTERFACE ]
                    814:  *
                    815:  *     Activate the pmap used by the specified process.  This includes
                    816:  *     reloading the MMU context if the current process, and marking
                    817:  *     the pmap in use by the processor.
                    818:  *
                    819:  *     Note: we may only use spin locks here, since we are called
                    820:  *     by a critical section in cpu_switch()!
                    821:  */
                    822: void
1.20      tsutsui   823: pmap_activate(struct lwp *l)
1.1       chs       824: {
1.20      tsutsui   825:        pmap_t pmap = l->l_proc->p_vmspace->vm_map.pmap;
1.1       chs       826:
                    827:        PMAP_DPRINTF(PDB_FOLLOW|PDB_SEGTAB,
1.2       thorpej   828:            ("pmap_activate(%p)\n", l));
1.1       chs       829:
1.30      mhitch    830:        PMAP_ACTIVATE(pmap, (curlwp->l_flag & LW_IDLE) != 0 ||
                    831:            l->l_proc == curproc);
1.1       chs       832: }
                    833:
                    834: /*
                    835:  * pmap_deactivate:            [ INTERFACE ]
                    836:  *
                    837:  *     Mark that the pmap used by the specified process is no longer
                    838:  *     in use by the processor.
                    839:  *
                    840:  *     The comment above pmap_activate() wrt. locking applies here,
                    841:  *     as well.
                    842:  */
                    843: void
1.20      tsutsui   844: pmap_deactivate(struct lwp *l)
1.1       chs       845: {
                    846:
                    847:        /* No action necessary in this pmap implementation. */
                    848: }
                    849:
                    850: /*
                    851:  * pmap_remove:                        [ INTERFACE ]
                    852:  *
                    853:  *     Remove the given range of addresses from the specified map.
                    854:  *
                    855:  *     It is assumed that the start and end are properly
                    856:  *     rounded to the page size.
                    857:  */
                    858: void
1.20      tsutsui   859: pmap_remove(pmap_t pmap, vaddr_t sva, vaddr_t eva)
1.1       chs       860: {
                    861:
                    862:        pmap_do_remove(pmap, sva, eva, 1);
                    863: }
                    864:
                    865: void
1.20      tsutsui   866: pmap_do_remove(pmap_t pmap, vaddr_t sva, vaddr_t eva, int remove_wired)
1.1       chs       867: {
                    868:        vaddr_t nssva;
                    869:        pt_entry_t *pte;
                    870:        int flags;
                    871: #ifdef M68K_MMU_HP
1.25      thorpej   872:        bool firstpage = true, needcflush = false;
1.1       chs       873: #endif
                    874:
                    875:        PMAP_DPRINTF(PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT,
                    876:            ("pmap_remove(%p, %lx, %lx)\n", pmap, sva, eva));
                    877:
                    878:        flags = active_pmap(pmap) ? PRM_TFLUSH : 0;
                    879:        while (sva < eva) {
                    880:                nssva = m68k_trunc_seg(sva) + NBSEG;
                    881:                if (nssva == 0 || nssva > eva)
                    882:                        nssva = eva;
                    883:
                    884:                /*
                    885:                 * Invalidate every valid mapping within this segment.
                    886:                 * If remove_wired is zero, skip the wired pages.
                    887:                 */
                    888:
                    889:                pte = pmap_pte(pmap, sva);
                    890:                while (sva < nssva) {
                    891:
                    892:                        /*
                    893:                         * If this segment is unallocated,
                    894:                         * skip to the next segment boundary.
                    895:                         */
                    896:
                    897:                        if (!pmap_ste_v(pmap, sva)) {
                    898:                                sva = nssva;
                    899:                                break;
                    900:                        }
                    901:
                    902:
                    903:
                    904:                        if (pmap_pte_v(pte) &&
                    905:                            (remove_wired || !pmap_pte_w(pte))) {
                    906: #ifdef M68K_MMU_HP
                    907:                                if (pmap_aliasmask) {
                    908:
                    909:                                        /*
                    910:                                         * Purge kernel side of VAC to ensure
                    911:                                         * we get the correct state of any
                    912:                                         * hardware maintained bits.
                    913:                                         */
                    914:
                    915:                                        if (firstpage) {
                    916:                                                DCIS();
                    917:                                        }
                    918:
                    919:                                        /*
                    920:                                         * Remember if we may need to
                    921:                                         * flush the VAC due to a non-CI
                    922:                                         * mapping.
                    923:                                         */
                    924:
                    925:                                        if (!needcflush && !pmap_pte_ci(pte))
1.25      thorpej   926:                                                needcflush = true;
1.1       chs       927:
                    928:                                }
1.25      thorpej   929:                                firstpage = false;
1.1       chs       930: #endif
                    931:                                pmap_remove_mapping(pmap, sva, pte, flags);
                    932:                        }
                    933:                        pte++;
1.3       thorpej   934:                        sva += PAGE_SIZE;
1.1       chs       935:                }
                    936:        }
                    937:
                    938: #ifdef M68K_MMU_HP
                    939:
                    940:        /*
                    941:         * Didn't do anything, no need for cache flushes
                    942:         */
                    943:
                    944:        if (firstpage)
                    945:                return;
                    946:
                    947:        /*
                    948:         * In a couple of cases, we don't need to worry about flushing
                    949:         * the VAC:
                    950:         *      1. if this is a kernel mapping,
                    951:         *         we have already done it
                    952:         *      2. if it is a user mapping not for the current process,
                    953:         *         it won't be there
                    954:         */
                    955:
                    956:        if (pmap_aliasmask && !active_user_pmap(pmap))
1.25      thorpej   957:                needcflush = false;
1.1       chs       958:        if (needcflush) {
                    959:                if (pmap == pmap_kernel()) {
                    960:                        DCIS();
                    961:                } else {
                    962:                        DCIU();
                    963:                }
                    964:        }
                    965: #endif
                    966: }
                    967:
                    968: /*
                    969:  * pmap_page_protect:          [ INTERFACE ]
                    970:  *
                    971:  *     Lower the permission for all mappings to a given page to
                    972:  *     the permissions specified.
                    973:  */
                    974: void
1.20      tsutsui   975: pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
1.1       chs       976: {
                    977:        paddr_t pa = VM_PAGE_TO_PHYS(pg);
                    978:        struct pv_entry *pv;
                    979:        pt_entry_t *pte;
                    980:        int s;
                    981:
                    982: #ifdef DEBUG
                    983:        if ((pmapdebug & (PDB_FOLLOW|PDB_PROTECT)) ||
                    984:            (prot == VM_PROT_NONE && (pmapdebug & PDB_REMOVE)))
                    985:                printf("pmap_page_protect(%p, %x)\n", pg, prot);
                    986: #endif
                    987:
                    988:        switch (prot) {
                    989:        case VM_PROT_READ|VM_PROT_WRITE:
                    990:        case VM_PROT_ALL:
                    991:                return;
                    992:
                    993:        /* copy_on_write */
                    994:        case VM_PROT_READ:
                    995:        case VM_PROT_READ|VM_PROT_EXECUTE:
                    996:                pmap_changebit(pa, PG_RO, ~0);
                    997:                return;
                    998:
                    999:        /* remove_all */
                   1000:        default:
                   1001:                break;
                   1002:        }
                   1003:
                   1004:        pv = pa_to_pvh(pa);
                   1005:        s = splvm();
                   1006:        while (pv->pv_pmap != NULL) {
                   1007:
                   1008:                pte = pmap_pte(pv->pv_pmap, pv->pv_va);
                   1009: #ifdef DEBUG
                   1010:                if (!pmap_ste_v(pv->pv_pmap, pv->pv_va) ||
                   1011:                    pmap_pte_pa(pte) != pa)
                   1012:                        panic("pmap_page_protect: bad mapping");
                   1013: #endif
                   1014:                pmap_remove_mapping(pv->pv_pmap, pv->pv_va,
                   1015:                    pte, PRM_TFLUSH|PRM_CFLUSH);
                   1016:        }
                   1017:        splx(s);
                   1018: }
                   1019:
                   1020: /*
                   1021:  * pmap_protect:               [ INTERFACE ]
                   1022:  *
1.29      tnn      1023:  *     Set the physical protection on the specified range of this map
1.1       chs      1024:  *     as requested.
                   1025:  */
                   1026: void
1.20      tsutsui  1027: pmap_protect(pmap_t pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
1.1       chs      1028: {
                   1029:        vaddr_t nssva;
                   1030:        pt_entry_t *pte;
1.23      thorpej  1031:        bool firstpage, needtflush;
1.1       chs      1032:        int isro;
                   1033:
                   1034:        PMAP_DPRINTF(PDB_FOLLOW|PDB_PROTECT,
                   1035:            ("pmap_protect(%p, %lx, %lx, %x)\n",
                   1036:            pmap, sva, eva, prot));
                   1037:
                   1038: #ifdef PMAPSTATS
                   1039:        protect_stats.calls++;
                   1040: #endif
                   1041:        if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
                   1042:                pmap_remove(pmap, sva, eva);
                   1043:                return;
                   1044:        }
                   1045:        isro = pte_prot(pmap, prot);
                   1046:        needtflush = active_pmap(pmap);
1.25      thorpej  1047:        firstpage = true;
1.1       chs      1048:        while (sva < eva) {
                   1049:                nssva = m68k_trunc_seg(sva) + NBSEG;
                   1050:                if (nssva == 0 || nssva > eva)
                   1051:                        nssva = eva;
                   1052:
                   1053:                /*
                   1054:                 * If VA belongs to an unallocated segment,
                   1055:                 * skip to the next segment boundary.
                   1056:                 */
                   1057:
                   1058:                if (!pmap_ste_v(pmap, sva)) {
                   1059:                        sva = nssva;
                   1060:                        continue;
                   1061:                }
                   1062:
                   1063:                /*
                   1064:                 * Change protection on mapping if it is valid and doesn't
                   1065:                 * already have the correct protection.
                   1066:                 */
                   1067:
                   1068:                pte = pmap_pte(pmap, sva);
                   1069:                while (sva < nssva) {
                   1070:                        if (pmap_pte_v(pte) && pmap_pte_prot_chg(pte, isro)) {
                   1071: #ifdef M68K_MMU_HP
                   1072:
                   1073:                                /*
                   1074:                                 * Purge kernel side of VAC to ensure we
                   1075:                                 * get the correct state of any hardware
                   1076:                                 * maintained bits.
                   1077:                                 *
                   1078:                                 * XXX do we need to clear the VAC in
                   1079:                                 * general to reflect the new protection?
                   1080:                                 */
                   1081:
                   1082:                                if (firstpage && pmap_aliasmask)
                   1083:                                        DCIS();
                   1084: #endif
                   1085:
                   1086: #if defined(M68040) || defined(M68060)
                   1087:
                   1088:                                /*
                   1089:                                 * Clear caches if making RO (see section
                   1090:                                 * "7.3 Cache Coherency" in the manual).
                   1091:                                 */
                   1092:
                   1093: #if defined(M68020) || defined(M68030)
                   1094:                                if (isro && mmutype == MMU_68040)
                   1095: #else
                   1096:                                if (isro)
                   1097: #endif
                   1098:                                {
                   1099:                                        paddr_t pa = pmap_pte_pa(pte);
                   1100:
                   1101:                                        DCFP(pa);
                   1102:                                        ICPP(pa);
                   1103:                                }
                   1104: #endif
                   1105:                                pmap_pte_set_prot(pte, isro);
                   1106:                                if (needtflush)
                   1107:                                        TBIS(sva);
1.25      thorpej  1108:                                firstpage = false;
1.1       chs      1109:                        }
                   1110:                        pte++;
1.3       thorpej  1111:                        sva += PAGE_SIZE;
1.1       chs      1112:                }
                   1113:        }
                   1114: }
                   1115:
                   1116: /*
                   1117:  * pmap_enter:                 [ INTERFACE ]
                   1118:  *
                   1119:  *     Insert the given physical page (pa) at
                   1120:  *     the specified virtual address (va) in the
                   1121:  *     target physical map with the protection requested.
                   1122:  *
                   1123:  *     If specified, the page will be wired down, meaning
                   1124:  *     that the related pte cannot be reclaimed.
                   1125:  *
                   1126:  *     Note: This is the only routine which MAY NOT lazy-evaluate
                   1127:  *     or lose information.  Thatis, this routine must actually
                   1128:  *     insert this page into the given map NOW.
                   1129:  */
                   1130: int
1.20      tsutsui  1131: pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags)
1.1       chs      1132: {
                   1133:        pt_entry_t *pte;
                   1134:        int npte;
                   1135:        paddr_t opa;
1.25      thorpej  1136:        bool cacheable = true;
                   1137:        bool checkpv = true;
1.23      thorpej  1138:        bool wired = (flags & PMAP_WIRED) != 0;
                   1139:        bool can_fail = (flags & PMAP_CANFAIL) != 0;
1.1       chs      1140:
                   1141:        PMAP_DPRINTF(PDB_FOLLOW|PDB_ENTER,
                   1142:            ("pmap_enter(%p, %lx, %lx, %x, %x)\n",
                   1143:            pmap, va, pa, prot, wired));
                   1144:
                   1145: #ifdef DIAGNOSTIC
                   1146:        /*
                   1147:         * pmap_enter() should never be used for CADDR1 and CADDR2.
                   1148:         */
                   1149:        if (pmap == pmap_kernel() &&
                   1150:            (va == (vaddr_t)CADDR1 || va == (vaddr_t)CADDR2))
                   1151:                panic("pmap_enter: used for CADDR1 or CADDR2");
                   1152: #endif
                   1153:
                   1154:        /*
                   1155:         * For user mapping, allocate kernel VM resources if necessary.
                   1156:         */
1.22      martin   1157:        if (pmap->pm_ptab == NULL) {
1.1       chs      1158:                pmap->pm_ptab = (pt_entry_t *)
1.14      yamt     1159:                    uvm_km_alloc(pt_map, M68K_MAX_PTSIZE, 0,
1.22      martin   1160:                    UVM_KMF_VAONLY |
                   1161:                    (can_fail ? UVM_KMF_NOWAIT : UVM_KMF_WAITVA));
                   1162:                if (pmap->pm_ptab == NULL)
                   1163:                        return ENOMEM;
                   1164:        }
1.1       chs      1165:
                   1166:        /*
                   1167:         * Segment table entry not valid, we need a new PT page
                   1168:         */
1.22      martin   1169:        if (!pmap_ste_v(pmap, va)) {
                   1170:                int err = pmap_enter_ptpage(pmap, va, can_fail);
                   1171:                if (err)
                   1172:                        return err;
                   1173:        }
1.1       chs      1174:
                   1175:        pa = m68k_trunc_page(pa);
                   1176:        pte = pmap_pte(pmap, va);
                   1177:        opa = pmap_pte_pa(pte);
                   1178:
                   1179:        PMAP_DPRINTF(PDB_ENTER, ("enter: pte %p, *pte %x\n", pte, *pte));
                   1180:
                   1181:        /*
                   1182:         * Mapping has not changed, must be protection or wiring change.
                   1183:         */
                   1184:        if (opa == pa) {
                   1185:                /*
                   1186:                 * Wiring change, just update stats.
                   1187:                 * We don't worry about wiring PT pages as they remain
                   1188:                 * resident as long as there are valid mappings in them.
                   1189:                 * Hence, if a user page is wired, the PT page will be also.
                   1190:                 */
                   1191:                if (pmap_pte_w_chg(pte, wired ? PG_W : 0)) {
                   1192:                        PMAP_DPRINTF(PDB_ENTER,
                   1193:                            ("enter: wiring change -> %x\n", wired));
                   1194:                        if (wired)
                   1195:                                pmap->pm_stats.wired_count++;
                   1196:                        else
                   1197:                                pmap->pm_stats.wired_count--;
                   1198:                }
                   1199:                /*
                   1200:                 * Retain cache inhibition status
                   1201:                 */
1.25      thorpej  1202:                checkpv = false;
1.1       chs      1203:                if (pmap_pte_ci(pte))
1.25      thorpej  1204:                        cacheable = false;
1.1       chs      1205:                goto validate;
                   1206:        }
                   1207:
                   1208:        /*
                   1209:         * Mapping has changed, invalidate old range and fall through to
                   1210:         * handle validating new mapping.
                   1211:         */
                   1212:        if (opa) {
                   1213:                PMAP_DPRINTF(PDB_ENTER,
                   1214:                    ("enter: removing old mapping %lx\n", va));
                   1215:                pmap_remove_mapping(pmap, va, pte,
                   1216:                    PRM_TFLUSH|PRM_CFLUSH|PRM_KEEPPTPAGE);
                   1217:        }
                   1218:
                   1219:        /*
                   1220:         * If this is a new user mapping, increment the wiring count
                   1221:         * on this PT page.  PT pages are wired down as long as there
                   1222:         * is a valid mapping in the page.
                   1223:         */
                   1224:        if (pmap != pmap_kernel())
                   1225:                pmap_ptpage_addref(trunc_page((vaddr_t)pte));
                   1226:
                   1227:        /*
                   1228:         * Enter on the PV list if part of our managed memory
                   1229:         * Note that we raise IPL while manipulating pv_table
                   1230:         * since pmap_enter can be called at interrupt time.
                   1231:         */
                   1232:        if (PAGE_IS_MANAGED(pa)) {
                   1233:                struct pv_entry *pv, *npv;
                   1234:                int s;
                   1235:
                   1236:                pv = pa_to_pvh(pa);
                   1237:                s = splvm();
                   1238:
                   1239:                PMAP_DPRINTF(PDB_ENTER,
                   1240:                    ("enter: pv at %p: %lx/%p/%p\n",
                   1241:                    pv, pv->pv_va, pv->pv_pmap, pv->pv_next));
                   1242:                /*
                   1243:                 * No entries yet, use header as the first entry
                   1244:                 */
                   1245:                if (pv->pv_pmap == NULL) {
                   1246:                        pv->pv_va = va;
                   1247:                        pv->pv_pmap = pmap;
                   1248:                        pv->pv_next = NULL;
                   1249:                        pv->pv_ptste = NULL;
                   1250:                        pv->pv_ptpmap = NULL;
                   1251:                        pv->pv_flags = 0;
                   1252:                }
                   1253:                /*
                   1254:                 * There is at least one other VA mapping this page.
                   1255:                 * Place this entry after the header.
                   1256:                 */
                   1257:                else {
                   1258: #ifdef DEBUG
                   1259:                        for (npv = pv; npv; npv = npv->pv_next)
                   1260:                                if (pmap == npv->pv_pmap && va == npv->pv_va)
                   1261:                                        panic("pmap_enter: already in pv_tab");
                   1262: #endif
                   1263:                        npv = pmap_alloc_pv();
                   1264:                        npv->pv_va = va;
                   1265:                        npv->pv_pmap = pmap;
                   1266:                        npv->pv_next = pv->pv_next;
                   1267:                        npv->pv_ptste = NULL;
                   1268:                        npv->pv_ptpmap = NULL;
                   1269:                        npv->pv_flags = 0;
                   1270:                        pv->pv_next = npv;
                   1271:
                   1272: #ifdef M68K_MMU_HP
                   1273:
                   1274:                        /*
                   1275:                         * Since there is another logical mapping for the
                   1276:                         * same page we may need to cache-inhibit the
                   1277:                         * descriptors on those CPUs with external VACs.
                   1278:                         * We don't need to CI if:
                   1279:                         *
                   1280:                         * - No two mappings belong to the same user pmaps.
                   1281:                         *   Since the cache is flushed on context switches
                   1282:                         *   there is no problem between user processes.
                   1283:                         *
                   1284:                         * - Mappings within a single pmap are a certain
                   1285:                         *   magic distance apart.  VAs at these appropriate
                   1286:                         *   boundaries map to the same cache entries or
                   1287:                         *   otherwise don't conflict.
                   1288:                         *
                   1289:                         * To keep it simple, we only check for these special
                   1290:                         * cases if there are only two mappings, otherwise we
                   1291:                         * punt and always CI.
                   1292:                         *
                   1293:                         * Note that there are no aliasing problems with the
                   1294:                         * on-chip data-cache when the WA bit is set.
                   1295:                         */
                   1296:
                   1297:                        if (pmap_aliasmask) {
                   1298:                                if (pv->pv_flags & PV_CI) {
                   1299:                                        PMAP_DPRINTF(PDB_CACHE,
                   1300:                                            ("enter: pa %lx already CI'ed\n",
                   1301:                                            pa));
1.25      thorpej  1302:                                        checkpv = cacheable = false;
1.1       chs      1303:                                } else if (npv->pv_next ||
                   1304:                                           ((pmap == pv->pv_pmap ||
                   1305:                                             pmap == pmap_kernel() ||
                   1306:                                             pv->pv_pmap == pmap_kernel()) &&
                   1307:                                            ((pv->pv_va & pmap_aliasmask) !=
                   1308:                                             (va & pmap_aliasmask)))) {
                   1309:                                        PMAP_DPRINTF(PDB_CACHE,
                   1310:                                            ("enter: pa %lx CI'ing all\n",
                   1311:                                            pa));
1.25      thorpej  1312:                                        cacheable = false;
1.1       chs      1313:                                        pv->pv_flags |= PV_CI;
                   1314:                                }
                   1315:                        }
                   1316: #endif
                   1317:                }
                   1318:
                   1319:                /*
                   1320:                 * Speed pmap_is_referenced() or pmap_is_modified() based
                   1321:                 * on the hint provided in access_type.
                   1322:                 */
                   1323: #ifdef DIAGNOSTIC
                   1324:                if ((flags & VM_PROT_ALL) & ~prot)
                   1325:                        panic("pmap_enter: access_type exceeds prot");
                   1326: #endif
                   1327:                if (flags & VM_PROT_WRITE)
                   1328:                        *pa_to_attribute(pa) |= (PG_U|PG_M);
                   1329:                else if (flags & VM_PROT_ALL)
                   1330:                        *pa_to_attribute(pa) |= PG_U;
                   1331:
                   1332:                splx(s);
                   1333:        }
                   1334:        /*
                   1335:         * Assumption: if it is not part of our managed memory
                   1336:         * then it must be device memory which may be volitile.
                   1337:         */
                   1338:        else if (pmap_initialized) {
1.25      thorpej  1339:                checkpv = cacheable = false;
1.1       chs      1340:        }
                   1341:
                   1342:        /*
                   1343:         * Increment counters
                   1344:         */
                   1345:        pmap->pm_stats.resident_count++;
                   1346:        if (wired)
                   1347:                pmap->pm_stats.wired_count++;
                   1348:
                   1349: validate:
                   1350: #ifdef M68K_MMU_HP
                   1351:        /*
                   1352:         * Purge kernel side of VAC to ensure we get correct state
                   1353:         * of HW bits so we don't clobber them.
                   1354:         */
                   1355:        if (pmap_aliasmask)
                   1356:                DCIS();
                   1357: #endif
                   1358:
                   1359:        /*
                   1360:         * Build the new PTE.
                   1361:         */
                   1362:
                   1363:        npte = pa | pte_prot(pmap, prot) | (*pte & (PG_M|PG_U)) | PG_V;
                   1364:        if (wired)
                   1365:                npte |= PG_W;
                   1366:        if (!checkpv && !cacheable)
                   1367: #if defined(M68040) || defined(M68060)
                   1368: #if defined(M68020) || defined(M68030)
                   1369:                npte |= (mmutype == MMU_68040 ? PG_CIN : PG_CI);
                   1370: #else
                   1371:                npte |= PG_CIN;
                   1372: #endif
                   1373: #else
                   1374:                npte |= PG_CI;
                   1375: #endif
                   1376: #if defined(M68040) || defined(M68060)
                   1377: #if defined(M68020) || defined(M68030)
                   1378:        else if (mmutype == MMU_68040 && (npte & (PG_PROT|PG_CI)) == PG_RW)
                   1379: #else
                   1380:        else if ((npte & (PG_PROT|PG_CI)) == PG_RW)
                   1381: #endif
                   1382:                npte |= PG_CCB;
                   1383: #endif
                   1384:
                   1385:        PMAP_DPRINTF(PDB_ENTER, ("enter: new pte value %x\n", npte));
                   1386:
                   1387:        /*
                   1388:         * Remember if this was a wiring-only change.
                   1389:         * If so, we need not flush the TLB and caches.
                   1390:         */
                   1391:
                   1392:        wired = ((*pte ^ npte) == PG_W);
                   1393: #if defined(M68040) || defined(M68060)
                   1394: #if defined(M68020) || defined(M68030)
                   1395:        if (mmutype == MMU_68040 && !wired)
                   1396: #else
                   1397:        if (!wired)
                   1398: #endif
                   1399:        {
                   1400:                DCFP(pa);
                   1401:                ICPP(pa);
                   1402:        }
                   1403: #endif
                   1404:        *pte = npte;
                   1405:        if (!wired && active_pmap(pmap))
                   1406:                TBIS(va);
                   1407: #ifdef M68K_MMU_HP
                   1408:        /*
                   1409:         * The following is executed if we are entering a second
                   1410:         * (or greater) mapping for a physical page and the mappings
                   1411:         * may create an aliasing problem.  In this case we must
                   1412:         * cache inhibit the descriptors involved and flush any
                   1413:         * external VAC.
                   1414:         */
                   1415:        if (checkpv && !cacheable) {
                   1416:                pmap_changebit(pa, PG_CI, ~0);
                   1417:                DCIA();
                   1418: #ifdef DEBUG
                   1419:                if ((pmapdebug & (PDB_CACHE|PDB_PVDUMP)) ==
                   1420:                    (PDB_CACHE|PDB_PVDUMP))
                   1421:                        pmap_pvdump(pa);
                   1422: #endif
                   1423:        }
                   1424: #endif
                   1425: #ifdef DEBUG
                   1426:        if ((pmapdebug & PDB_WIRING) && pmap != pmap_kernel())
                   1427:                pmap_check_wiring("enter", trunc_page((vaddr_t)pte));
                   1428: #endif
                   1429:
                   1430:        return 0;
                   1431: }
                   1432:
                   1433: void
1.20      tsutsui  1434: pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot)
1.1       chs      1435: {
1.20      tsutsui  1436:        pmap_t pmap = pmap_kernel();
1.1       chs      1437:        pt_entry_t *pte;
                   1438:        int s, npte;
                   1439:
                   1440:        PMAP_DPRINTF(PDB_FOLLOW|PDB_ENTER,
                   1441:            ("pmap_kenter_pa(%lx, %lx, %x)\n", va, pa, prot));
                   1442:
                   1443:        /*
                   1444:         * Segment table entry not valid, we need a new PT page
                   1445:         */
                   1446:
                   1447:        if (!pmap_ste_v(pmap, va)) {
                   1448:                s = splvm();
1.25      thorpej  1449:                pmap_enter_ptpage(pmap, va, false);
1.1       chs      1450:                splx(s);
                   1451:        }
                   1452:
                   1453:        pa = m68k_trunc_page(pa);
                   1454:        pte = pmap_pte(pmap, va);
                   1455:
                   1456:        PMAP_DPRINTF(PDB_ENTER, ("enter: pte %p, *pte %x\n", pte, *pte));
                   1457:        KASSERT(!pmap_pte_v(pte));
                   1458:
                   1459:        /*
                   1460:         * Increment counters
                   1461:         */
                   1462:
                   1463:        pmap->pm_stats.resident_count++;
                   1464:        pmap->pm_stats.wired_count++;
                   1465:
                   1466:        /*
                   1467:         * Build the new PTE.
                   1468:         */
                   1469:
                   1470:        npte = pa | pte_prot(pmap, prot) | PG_V | PG_W;
                   1471: #if defined(M68040) || defined(M68060)
                   1472: #if defined(M68020) || defined(M68030)
                   1473:        if (mmutype == MMU_68040 && (npte & PG_PROT) == PG_RW)
                   1474: #else
                   1475:        if ((npte & PG_PROT) == PG_RW)
                   1476: #endif
                   1477:                npte |= PG_CCB;
                   1478:
                   1479:        if (mmutype == MMU_68040) {
                   1480:                DCFP(pa);
                   1481:                ICPP(pa);
                   1482:        }
                   1483: #endif
                   1484:
                   1485:        *pte = npte;
                   1486:        TBIS(va);
                   1487: }
                   1488:
                   1489: void
1.20      tsutsui  1490: pmap_kremove(vaddr_t va, vsize_t size)
1.1       chs      1491: {
1.20      tsutsui  1492:        pmap_t pmap = pmap_kernel();
1.1       chs      1493:        pt_entry_t *pte;
                   1494:        vaddr_t nssva;
                   1495:        vaddr_t eva = va + size;
                   1496: #ifdef M68K_MMU_HP
1.23      thorpej  1497:        bool firstpage, needcflush;
1.1       chs      1498: #endif
                   1499:
                   1500:        PMAP_DPRINTF(PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT,
                   1501:            ("pmap_kremove(%lx, %lx)\n", va, size));
                   1502:
                   1503: #ifdef M68K_MMU_HP
1.25      thorpej  1504:        firstpage = true;
                   1505:        needcflush = false;
1.1       chs      1506: #endif
                   1507:        while (va < eva) {
                   1508:                nssva = m68k_trunc_seg(va) + NBSEG;
                   1509:                if (nssva == 0 || nssva > eva)
                   1510:                        nssva = eva;
                   1511:
                   1512:                /*
                   1513:                 * If VA belongs to an unallocated segment,
                   1514:                 * skip to the next segment boundary.
                   1515:                 */
                   1516:
                   1517:                if (!pmap_ste_v(pmap, va)) {
                   1518:                        va = nssva;
                   1519:                        continue;
                   1520:                }
                   1521:
                   1522:                /*
                   1523:                 * Invalidate every valid mapping within this segment.
                   1524:                 */
                   1525:
                   1526:                pte = pmap_pte(pmap, va);
                   1527:                while (va < nssva) {
                   1528:                        if (!pmap_pte_v(pte)) {
                   1529:                                pte++;
1.3       thorpej  1530:                                va += PAGE_SIZE;
1.1       chs      1531:                                continue;
                   1532:                        }
                   1533: #ifdef M68K_MMU_HP
                   1534:                        if (pmap_aliasmask) {
                   1535:
                   1536:                                /*
                   1537:                                 * Purge kernel side of VAC to ensure
                   1538:                                 * we get the correct state of any
                   1539:                                 * hardware maintained bits.
                   1540:                                 */
                   1541:
                   1542:                                if (firstpage) {
                   1543:                                        DCIS();
1.25      thorpej  1544:                                        firstpage = false;
1.1       chs      1545:                                }
                   1546:
                   1547:                                /*
                   1548:                                 * Remember if we may need to
                   1549:                                 * flush the VAC.
                   1550:                                 */
                   1551:
1.25      thorpej  1552:                                needcflush = true;
1.1       chs      1553:                        }
                   1554: #endif
                   1555:                        pmap->pm_stats.wired_count--;
                   1556:                        pmap->pm_stats.resident_count--;
                   1557:                        *pte = PG_NV;
                   1558:                        TBIS(va);
                   1559:                        pte++;
1.3       thorpej  1560:                        va += PAGE_SIZE;
1.1       chs      1561:                }
                   1562:        }
                   1563:
                   1564: #ifdef M68K_MMU_HP
                   1565:
                   1566:        /*
                   1567:         * In a couple of cases, we don't need to worry about flushing
                   1568:         * the VAC:
                   1569:         *      1. if this is a kernel mapping,
                   1570:         *         we have already done it
                   1571:         *      2. if it is a user mapping not for the current process,
                   1572:         *         it won't be there
                   1573:         */
                   1574:
                   1575:        if (pmap_aliasmask && !active_user_pmap(pmap))
1.25      thorpej  1576:                needcflush = false;
1.1       chs      1577:        if (needcflush) {
                   1578:                if (pmap == pmap_kernel()) {
                   1579:                        DCIS();
                   1580:                } else {
                   1581:                        DCIU();
                   1582:                }
                   1583:        }
                   1584: #endif
                   1585: }
                   1586:
                   1587: /*
                   1588:  * pmap_unwire:                        [ INTERFACE ]
                   1589:  *
                   1590:  *     Clear the wired attribute for a map/virtual-address pair.
                   1591:  *
                   1592:  *     The mapping must already exist in the pmap.
                   1593:  */
                   1594: void
1.20      tsutsui  1595: pmap_unwire(pmap_t pmap, vaddr_t va)
1.1       chs      1596: {
                   1597:        pt_entry_t *pte;
                   1598:
                   1599:        PMAP_DPRINTF(PDB_FOLLOW,
                   1600:            ("pmap_unwire(%p, %lx)\n", pmap, va));
                   1601:
                   1602:        pte = pmap_pte(pmap, va);
                   1603:
                   1604:        /*
                   1605:         * If wiring actually changed (always?) clear the wire bit and
                   1606:         * update the wire count.  Note that wiring is not a hardware
                   1607:         * characteristic so there is no need to invalidate the TLB.
                   1608:         */
                   1609:
                   1610:        if (pmap_pte_w_chg(pte, 0)) {
1.25      thorpej  1611:                pmap_pte_set_w(pte, false);
1.1       chs      1612:                pmap->pm_stats.wired_count--;
                   1613:        }
                   1614: }
                   1615:
                   1616: /*
                   1617:  * pmap_extract:               [ INTERFACE ]
                   1618:  *
                   1619:  *     Extract the physical address associated with the given
                   1620:  *     pmap/virtual address pair.
                   1621:  */
1.23      thorpej  1622: bool
1.20      tsutsui  1623: pmap_extract(pmap_t pmap, vaddr_t va, paddr_t *pap)
1.1       chs      1624: {
                   1625:        paddr_t pa;
                   1626:        u_int pte;
1.8       cl       1627:
1.1       chs      1628:        PMAP_DPRINTF(PDB_FOLLOW,
                   1629:            ("pmap_extract(%p, %lx) -> ", pmap, va));
                   1630:
                   1631:        if (pmap_ste_v(pmap, va)) {
                   1632:                pte = *(u_int *)pmap_pte(pmap, va);
                   1633:                if (pte) {
                   1634:                        pa = (pte & PG_FRAME) | (va & ~PG_FRAME);
                   1635:                        if (pap != NULL)
                   1636:                                *pap = pa;
1.9       mycroft  1637: #ifdef DEBUG
                   1638:                        if (pmapdebug & PDB_FOLLOW)
                   1639:                                printf("%lx\n", pa);
                   1640: #endif
1.25      thorpej  1641:                        return true;
1.1       chs      1642:                }
                   1643:        }
                   1644: #ifdef DEBUG
1.9       mycroft  1645:        if (pmapdebug & PDB_FOLLOW)
                   1646:                printf("failed\n");
1.1       chs      1647: #endif
1.25      thorpej  1648:        return false;
1.1       chs      1649: }
                   1650:
                   1651: /*
                   1652:  * pmap_copy:          [ INTERFACE ]
                   1653:  *
                   1654:  *     Copy the mapping range specified by src_addr/len
                   1655:  *     from the source map to the range dst_addr/len
                   1656:  *     in the destination map.
                   1657:  *
                   1658:  *     This routine is only advisory and need not do anything.
                   1659:  */
                   1660: void
1.20      tsutsui  1661: pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vaddr_t dst_addr, vsize_t len,
                   1662:     vaddr_t src_addr)
1.1       chs      1663: {
                   1664:
                   1665:        PMAP_DPRINTF(PDB_FOLLOW,
                   1666:            ("pmap_copy(%p, %p, %lx, %lx, %lx)\n",
                   1667:            dst_pmap, src_pmap, dst_addr, len, src_addr));
                   1668: }
                   1669:
                   1670: /*
                   1671:  * pmap_collect:               [ INTERFACE ]
                   1672:  *
                   1673:  *     Garbage collects the physical map system for pages which are no
                   1674:  *     longer used.  Success need not be guaranteed -- that is, there
                   1675:  *     may well be pages which are not referenced, but others may be
                   1676:  *     collected.
                   1677:  *
                   1678:  *     Called by the pageout daemon when pages are scarce.
                   1679:  */
                   1680: void
1.20      tsutsui  1681: pmap_collect(pmap_t pmap)
1.1       chs      1682: {
                   1683:
                   1684:        PMAP_DPRINTF(PDB_FOLLOW, ("pmap_collect(%p)\n", pmap));
                   1685:
                   1686:        if (pmap == pmap_kernel()) {
                   1687:                int bank, s;
                   1688:
                   1689:                /*
                   1690:                 * XXX This is very bogus.  We should handle kernel PT
                   1691:                 * XXX pages much differently.
                   1692:                 */
                   1693:
                   1694:                s = splvm();
                   1695:                for (bank = 0; bank < vm_nphysseg; bank++)
                   1696:                        pmap_collect1(pmap, ptoa(vm_physmem[bank].start),
                   1697:                            ptoa(vm_physmem[bank].end));
                   1698:                splx(s);
                   1699:        } else {
                   1700:                /*
                   1701:                 * This process is about to be swapped out; free all of
                   1702:                 * the PT pages by removing the physical mappings for its
                   1703:                 * entire address space.  Note: pmap_remove() performs
                   1704:                 * all necessary locking.
                   1705:                 */
                   1706:                pmap_do_remove(pmap, VM_MIN_ADDRESS, VM_MAX_ADDRESS, 0);
                   1707:                pmap_update(pmap);
                   1708:        }
                   1709:
                   1710: #ifdef notyet
                   1711:        /* Go compact and garbage-collect the pv_table. */
                   1712:        pmap_collect_pv();
                   1713: #endif
                   1714: }
                   1715:
                   1716: /*
                   1717:  * pmap_collect1():
                   1718:  *
                   1719:  *     Garbage-collect KPT pages.  Helper for the above (bogus)
                   1720:  *     pmap_collect().
                   1721:  *
                   1722:  *     Note: THIS SHOULD GO AWAY, AND BE REPLACED WITH A BETTER
                   1723:  *     WAY OF HANDLING PT PAGES!
                   1724:  */
                   1725: void
1.20      tsutsui  1726: pmap_collect1(pmap_t pmap, paddr_t startpa, paddr_t endpa)
1.1       chs      1727: {
                   1728:        paddr_t pa;
                   1729:        struct pv_entry *pv;
                   1730:        pt_entry_t *pte;
                   1731:        paddr_t kpa;
                   1732: #ifdef DEBUG
                   1733:        st_entry_t *ste;
                   1734:        int opmapdebug = 0;
                   1735: #endif
                   1736:
1.3       thorpej  1737:        for (pa = startpa; pa < endpa; pa += PAGE_SIZE) {
1.1       chs      1738:                struct kpt_page *kpt, **pkpt;
                   1739:
                   1740:                /*
                   1741:                 * Locate physical pages which are being used as kernel
                   1742:                 * page table pages.
                   1743:                 */
                   1744:
                   1745:                pv = pa_to_pvh(pa);
                   1746:                if (pv->pv_pmap != pmap_kernel() || !(pv->pv_flags & PV_PTPAGE))
                   1747:                        continue;
                   1748:                do {
                   1749:                        if (pv->pv_ptste && pv->pv_ptpmap == pmap_kernel())
                   1750:                                break;
                   1751:                } while ((pv = pv->pv_next));
                   1752:                if (pv == NULL)
                   1753:                        continue;
                   1754: #ifdef DEBUG
                   1755:                if (pv->pv_va < (vaddr_t)Sysmap ||
                   1756:                    pv->pv_va >= (vaddr_t)Sysmap + M68K_MAX_PTSIZE) {
                   1757:                        printf("collect: kernel PT VA out of range\n");
                   1758:                        pmap_pvdump(pa);
                   1759:                        continue;
                   1760:                }
                   1761: #endif
1.3       thorpej  1762:                pte = (pt_entry_t *)(pv->pv_va + PAGE_SIZE);
1.1       chs      1763:                while (--pte >= (pt_entry_t *)pv->pv_va && *pte == PG_NV)
                   1764:                        ;
                   1765:                if (pte >= (pt_entry_t *)pv->pv_va)
                   1766:                        continue;
                   1767:
                   1768: #ifdef DEBUG
                   1769:                if (pmapdebug & (PDB_PTPAGE|PDB_COLLECT)) {
                   1770:                        printf("collect: freeing KPT page at %lx (ste %x@%p)\n",
1.20      tsutsui  1771:                            pv->pv_va, *pv->pv_ptste, pv->pv_ptste);
1.1       chs      1772:                        opmapdebug = pmapdebug;
                   1773:                        pmapdebug |= PDB_PTPAGE;
                   1774:                }
                   1775:
                   1776:                ste = pv->pv_ptste;
                   1777: #endif
                   1778:                /*
                   1779:                 * If all entries were invalid we can remove the page.
                   1780:                 * We call pmap_remove_entry to take care of invalidating
                   1781:                 * ST and Sysptmap entries.
                   1782:                 */
                   1783:
                   1784:                (void) pmap_extract(pmap, pv->pv_va, &kpa);
                   1785:                pmap_remove_mapping(pmap, pv->pv_va, NULL,
                   1786:                    PRM_TFLUSH|PRM_CFLUSH);
                   1787:
                   1788:                /*
                   1789:                 * Use the physical address to locate the original
                   1790:                 * (kmem_alloc assigned) address for the page and put
                   1791:                 * that page back on the free list.
                   1792:                 */
                   1793:
                   1794:                for (pkpt = &kpt_used_list, kpt = *pkpt;
                   1795:                     kpt != NULL;
                   1796:                     pkpt = &kpt->kpt_next, kpt = *pkpt)
                   1797:                        if (kpt->kpt_pa == kpa)
                   1798:                                break;
                   1799: #ifdef DEBUG
                   1800:                if (kpt == NULL)
                   1801:                        panic("pmap_collect: lost a KPT page");
                   1802:                if (pmapdebug & (PDB_PTPAGE|PDB_COLLECT))
                   1803:                        printf("collect: %lx (%lx) to free list\n",
1.20      tsutsui  1804:                            kpt->kpt_va, kpa);
1.1       chs      1805: #endif
                   1806:                *pkpt = kpt->kpt_next;
                   1807:                kpt->kpt_next = kpt_free_list;
                   1808:                kpt_free_list = kpt;
                   1809: #ifdef DEBUG
                   1810:                if (pmapdebug & (PDB_PTPAGE|PDB_COLLECT))
                   1811:                        pmapdebug = opmapdebug;
                   1812:
                   1813:                if (*ste != SG_NV)
                   1814:                        printf("collect: kernel STE at %p still valid (%x)\n",
1.20      tsutsui  1815:                            ste, *ste);
1.1       chs      1816:                ste = &Sysptmap[ste - pmap_ste(pmap_kernel(), 0)];
                   1817:                if (*ste != SG_NV)
                   1818:                        printf("collect: kernel PTmap at %p still valid (%x)\n",
1.20      tsutsui  1819:                            ste, *ste);
1.1       chs      1820: #endif
                   1821:        }
                   1822: }
                   1823:
                   1824: /*
                   1825:  * pmap_zero_page:             [ INTERFACE ]
                   1826:  *
                   1827:  *     Zero the specified (machine independent) page by mapping the page
                   1828:  *     into virtual memory and using memset to clear its contents, one
                   1829:  *     machine dependent page at a time.
                   1830:  *
                   1831:  *     Note: WE DO NOT CURRENTLY LOCK THE TEMPORARY ADDRESSES!
                   1832:  *           (Actually, we go to splvm(), and since we don't
                   1833:  *           support multiple processors, this is sufficient.)
                   1834:  */
                   1835: void
1.20      tsutsui  1836: pmap_zero_page(paddr_t phys)
1.1       chs      1837: {
                   1838:        int npte;
                   1839:
                   1840:        PMAP_DPRINTF(PDB_FOLLOW, ("pmap_zero_page(%lx)\n", phys));
                   1841:
                   1842:        npte = phys | PG_V;
                   1843: #ifdef M68K_MMU_HP
                   1844:        if (pmap_aliasmask) {
                   1845:
                   1846:                /*
                   1847:                 * Cache-inhibit the mapping on VAC machines, as we would
                   1848:                 * be wasting the cache load.
                   1849:                 */
                   1850:
                   1851:                npte |= PG_CI;
                   1852:        }
                   1853: #endif
                   1854:
                   1855: #if defined(M68040) || defined(M68060)
                   1856: #if defined(M68020) || defined(M68030)
                   1857:        if (mmutype == MMU_68040)
                   1858: #endif
                   1859:        {
                   1860:                /*
                   1861:                 * Set copyback caching on the page; this is required
                   1862:                 * for cache consistency (since regular mappings are
                   1863:                 * copyback as well).
                   1864:                 */
                   1865:
                   1866:                npte |= PG_CCB;
                   1867:        }
                   1868: #endif
                   1869:
                   1870:        *caddr1_pte = npte;
                   1871:        TBIS((vaddr_t)CADDR1);
                   1872:
                   1873:        zeropage(CADDR1);
                   1874:
                   1875: #ifdef DEBUG
                   1876:        *caddr1_pte = PG_NV;
                   1877:        TBIS((vaddr_t)CADDR1);
                   1878: #endif
                   1879: }
                   1880:
                   1881: /*
                   1882:  * pmap_copy_page:             [ INTERFACE ]
                   1883:  *
                   1884:  *     Copy the specified (machine independent) page by mapping the page
                   1885:  *     into virtual memory and using memcpy to copy the page, one machine
                   1886:  *     dependent page at a time.
                   1887:  *
                   1888:  *     Note: WE DO NOT CURRENTLY LOCK THE TEMPORARY ADDRESSES!
                   1889:  *           (Actually, we go to splvm(), and since we don't
                   1890:  *           support multiple processors, this is sufficient.)
                   1891:  */
                   1892: void
1.20      tsutsui  1893: pmap_copy_page(paddr_t src, paddr_t dst)
1.1       chs      1894: {
                   1895:        int npte1, npte2;
                   1896:
                   1897:        PMAP_DPRINTF(PDB_FOLLOW, ("pmap_copy_page(%lx, %lx)\n", src, dst));
                   1898:
                   1899:        npte1 = src | PG_RO | PG_V;
                   1900:        npte2 = dst | PG_V;
                   1901: #ifdef M68K_MMU_HP
                   1902:        if (pmap_aliasmask) {
                   1903:
                   1904:                /*
                   1905:                 * Cache-inhibit the mapping on VAC machines, as we would
                   1906:                 * be wasting the cache load.
                   1907:                 */
                   1908:
                   1909:                npte1 |= PG_CI;
                   1910:                npte2 |= PG_CI;
                   1911:        }
                   1912: #endif
                   1913:
                   1914: #if defined(M68040) || defined(M68060)
                   1915: #if defined(M68020) || defined(M68030)
                   1916:        if (mmutype == MMU_68040)
                   1917: #endif
                   1918:        {
                   1919:                /*
                   1920:                 * Set copyback caching on the pages; this is required
                   1921:                 * for cache consistency (since regular mappings are
                   1922:                 * copyback as well).
                   1923:                 */
                   1924:
                   1925:                npte1 |= PG_CCB;
                   1926:                npte2 |= PG_CCB;
                   1927:        }
                   1928: #endif
                   1929:
                   1930:        *caddr1_pte = npte1;
                   1931:        TBIS((vaddr_t)CADDR1);
                   1932:
                   1933:        *caddr2_pte = npte2;
                   1934:        TBIS((vaddr_t)CADDR2);
                   1935:
                   1936:        copypage(CADDR1, CADDR2);
                   1937:
                   1938: #ifdef DEBUG
                   1939:        *caddr1_pte = PG_NV;
                   1940:        TBIS((vaddr_t)CADDR1);
                   1941:
                   1942:        *caddr2_pte = PG_NV;
                   1943:        TBIS((vaddr_t)CADDR2);
                   1944: #endif
                   1945: }
                   1946:
                   1947: /*
                   1948:  * pmap_clear_modify:          [ INTERFACE ]
                   1949:  *
                   1950:  *     Clear the modify bits on the specified physical page.
                   1951:  */
1.23      thorpej  1952: bool
1.20      tsutsui  1953: pmap_clear_modify(struct vm_page *pg)
1.1       chs      1954: {
                   1955:        paddr_t pa = VM_PAGE_TO_PHYS(pg);
                   1956:
                   1957:        PMAP_DPRINTF(PDB_FOLLOW, ("pmap_clear_modify(%p)\n", pg));
                   1958:
                   1959:        return pmap_changebit(pa, 0, ~PG_M);
                   1960: }
                   1961:
                   1962: /*
                   1963:  * pmap_clear_reference:       [ INTERFACE ]
                   1964:  *
                   1965:  *     Clear the reference bit on the specified physical page.
                   1966:  */
1.23      thorpej  1967: bool
1.20      tsutsui  1968: pmap_clear_reference(struct vm_page *pg)
1.1       chs      1969: {
                   1970:        paddr_t pa = VM_PAGE_TO_PHYS(pg);
                   1971:
                   1972:        PMAP_DPRINTF(PDB_FOLLOW, ("pmap_clear_reference(%p)\n", pg));
                   1973:
                   1974:        return pmap_changebit(pa, 0, ~PG_U);
                   1975: }
                   1976:
                   1977: /*
                   1978:  * pmap_is_referenced:         [ INTERFACE ]
                   1979:  *
                   1980:  *     Return whether or not the specified physical page is referenced
                   1981:  *     by any physical maps.
                   1982:  */
1.23      thorpej  1983: bool
1.20      tsutsui  1984: pmap_is_referenced(struct vm_page *pg)
1.1       chs      1985: {
                   1986:        paddr_t pa = VM_PAGE_TO_PHYS(pg);
                   1987:
1.20      tsutsui  1988:        return pmap_testbit(pa, PG_U);
1.1       chs      1989: }
                   1990:
                   1991: /*
                   1992:  * pmap_is_modified:           [ INTERFACE ]
                   1993:  *
                   1994:  *     Return whether or not the specified physical page is modified
                   1995:  *     by any physical maps.
                   1996:  */
1.23      thorpej  1997: bool
1.20      tsutsui  1998: pmap_is_modified(struct vm_page *pg)
1.1       chs      1999: {
                   2000:        paddr_t pa = VM_PAGE_TO_PHYS(pg);
                   2001:
1.20      tsutsui  2002:        return pmap_testbit(pa, PG_M);
1.1       chs      2003: }
                   2004:
                   2005: /*
                   2006:  * pmap_phys_address:          [ INTERFACE ]
                   2007:  *
                   2008:  *     Return the physical address corresponding to the specified
                   2009:  *     cookie.  Used by the device pager to decode a device driver's
                   2010:  *     mmap entry point return value.
                   2011:  *
                   2012:  *     Note: no locking is necessary in this function.
                   2013:  */
                   2014: paddr_t
1.32      macallan 2015: pmap_phys_address(paddr_t ppn)
1.1       chs      2016: {
1.20      tsutsui  2017:        return m68k_ptob(ppn);
1.1       chs      2018: }
                   2019:
                   2020: #ifdef M68K_MMU_HP
                   2021: /*
                   2022:  * pmap_prefer:                        [ INTERFACE ]
                   2023:  *
                   2024:  *     Find the first virtual address >= *vap that does not
                   2025:  *     cause a virtually-addressed cache alias problem.
                   2026:  */
                   2027: void
1.20      tsutsui  2028: pmap_prefer(vaddr_t foff, vaddr_t *vap)
1.1       chs      2029: {
                   2030:        vaddr_t va;
                   2031:        vsize_t d;
                   2032:
                   2033: #ifdef M68K_MMU_MOTOROLA
                   2034:        if (pmap_aliasmask)
                   2035: #endif
                   2036:        {
                   2037:                va = *vap;
                   2038:                d = foff - va;
                   2039:                d &= pmap_aliasmask;
                   2040:                *vap = va + d;
                   2041:        }
                   2042: }
                   2043: #endif /* M68K_MMU_HP */
                   2044:
                   2045: /*
                   2046:  * Miscellaneous support routines follow
                   2047:  */
                   2048:
                   2049: /*
                   2050:  * pmap_remove_mapping:
                   2051:  *
                   2052:  *     Invalidate a single page denoted by pmap/va.
                   2053:  *
                   2054:  *     If (pte != NULL), it is the already computed PTE for the page.
                   2055:  *
                   2056:  *     If (flags & PRM_TFLUSH), we must invalidate any TLB information.
                   2057:  *
                   2058:  *     If (flags & PRM_CFLUSH), we must flush/invalidate any cache
                   2059:  *     information.
                   2060:  *
                   2061:  *     If (flags & PRM_KEEPPTPAGE), we don't free the page table page
                   2062:  *     if the reference drops to zero.
                   2063:  */
                   2064: /* static */
                   2065: void
1.20      tsutsui  2066: pmap_remove_mapping(pmap_t pmap, vaddr_t va, pt_entry_t *pte, int flags)
1.1       chs      2067: {
                   2068:        paddr_t pa;
                   2069:        struct pv_entry *pv, *npv;
                   2070:        struct pmap *ptpmap;
                   2071:        st_entry_t *ste;
                   2072:        int s, bits;
                   2073: #ifdef DEBUG
                   2074:        pt_entry_t opte;
                   2075: #endif
                   2076:
                   2077:        PMAP_DPRINTF(PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT,
                   2078:            ("pmap_remove_mapping(%p, %lx, %p, %x)\n",
                   2079:            pmap, va, pte, flags));
                   2080:
                   2081:        /*
                   2082:         * PTE not provided, compute it from pmap and va.
                   2083:         */
                   2084:
                   2085:        if (pte == NULL) {
                   2086:                pte = pmap_pte(pmap, va);
                   2087:                if (*pte == PG_NV)
                   2088:                        return;
                   2089:        }
                   2090:
                   2091: #ifdef M68K_MMU_HP
                   2092:        if (pmap_aliasmask && (flags & PRM_CFLUSH)) {
                   2093:
                   2094:                /*
                   2095:                 * Purge kernel side of VAC to ensure we get the correct
                   2096:                 * state of any hardware maintained bits.
                   2097:                 */
                   2098:
                   2099:                DCIS();
                   2100:
                   2101:                /*
                   2102:                 * If this is a non-CI user mapping for the current process,
                   2103:                 * flush the VAC.  Note that the kernel side was flushed
                   2104:                 * above so we don't worry about non-CI kernel mappings.
                   2105:                 */
                   2106:
                   2107:                if (active_user_pmap(pmap) && !pmap_pte_ci(pte)) {
                   2108:                        DCIU();
                   2109:                }
                   2110:        }
                   2111: #endif
                   2112:
                   2113:        pa = pmap_pte_pa(pte);
                   2114: #ifdef DEBUG
                   2115:        opte = *pte;
                   2116: #endif
                   2117:
                   2118:        /*
                   2119:         * Update statistics
                   2120:         */
                   2121:
                   2122:        if (pmap_pte_w(pte))
                   2123:                pmap->pm_stats.wired_count--;
                   2124:        pmap->pm_stats.resident_count--;
                   2125:
                   2126: #if defined(M68040) || defined(M68060)
                   2127: #if defined(M68020) || defined(M68030)
                   2128:        if (mmutype == MMU_68040)
                   2129: #endif
                   2130:        if ((flags & PRM_CFLUSH)) {
                   2131:                DCFP(pa);
                   2132:                ICPP(pa);
                   2133:        }
                   2134: #endif
                   2135:
                   2136:        /*
                   2137:         * Invalidate the PTE after saving the reference modify info.
                   2138:         */
                   2139:
                   2140:        PMAP_DPRINTF(PDB_REMOVE, ("remove: invalidating pte at %p\n", pte));
                   2141:        bits = *pte & (PG_U|PG_M);
                   2142:        *pte = PG_NV;
                   2143:        if ((flags & PRM_TFLUSH) && active_pmap(pmap))
                   2144:                TBIS(va);
                   2145:
                   2146:        /*
                   2147:         * For user mappings decrement the wiring count on
                   2148:         * the PT page.
                   2149:         */
                   2150:
                   2151:        if (pmap != pmap_kernel()) {
                   2152:                vaddr_t ptpva = trunc_page((vaddr_t)pte);
                   2153:                int refs = pmap_ptpage_delref(ptpva);
                   2154: #ifdef DEBUG
                   2155:                if (pmapdebug & PDB_WIRING)
                   2156:                        pmap_check_wiring("remove", ptpva);
                   2157: #endif
                   2158:
                   2159:                /*
                   2160:                 * If reference count drops to 0, and we're not instructed
                   2161:                 * to keep it around, free the PT page.
                   2162:                 */
                   2163:
                   2164:                if (refs == 0 && (flags & PRM_KEEPPTPAGE) == 0) {
                   2165: #ifdef DIAGNOSTIC
1.16      tsutsui  2166:                        struct pv_entry *ptppv;
1.1       chs      2167: #endif
1.15      tsutsui  2168:                        paddr_t ptppa;
1.1       chs      2169:
1.15      tsutsui  2170:                        ptppa = pmap_pte_pa(pmap_pte(pmap_kernel(), ptpva));
1.1       chs      2171: #ifdef DIAGNOSTIC
1.15      tsutsui  2172:                        if (PAGE_IS_MANAGED(ptppa) == 0)
1.1       chs      2173:                                panic("pmap_remove_mapping: unmanaged PT page");
1.16      tsutsui  2174:                        ptppv = pa_to_pvh(ptppa);
                   2175:                        if (ptppv->pv_ptste == NULL)
1.1       chs      2176:                                panic("pmap_remove_mapping: ptste == NULL");
1.16      tsutsui  2177:                        if (ptppv->pv_pmap != pmap_kernel() ||
                   2178:                            ptppv->pv_va != ptpva ||
                   2179:                            ptppv->pv_next != NULL)
1.1       chs      2180:                                panic("pmap_remove_mapping: "
                   2181:                                    "bad PT page pmap %p, va 0x%lx, next %p",
1.16      tsutsui  2182:                                    ptppv->pv_pmap, ptppv->pv_va,
                   2183:                                    ptppv->pv_next);
1.1       chs      2184: #endif
                   2185:                        pmap_remove_mapping(pmap_kernel(), ptpva,
                   2186:                            NULL, PRM_TFLUSH|PRM_CFLUSH);
1.35.6.1  mjf      2187:                        mutex_enter(&uvm_kernel_object->vmobjlock);
1.15      tsutsui  2188:                        uvm_pagefree(PHYS_TO_VM_PAGE(ptppa));
1.35.6.1  mjf      2189:                        mutex_exit(&uvm_kernel_object->vmobjlock);
1.1       chs      2190:                        PMAP_DPRINTF(PDB_REMOVE|PDB_PTPAGE,
                   2191:                            ("remove: PT page 0x%lx (0x%lx) freed\n",
1.15      tsutsui  2192:                            ptpva, ptppa));
1.1       chs      2193:                }
                   2194:        }
                   2195:
                   2196:        /*
                   2197:         * If this isn't a managed page, we are all done.
                   2198:         */
                   2199:
                   2200:        if (PAGE_IS_MANAGED(pa) == 0)
                   2201:                return;
                   2202:
                   2203:        /*
                   2204:         * Otherwise remove it from the PV table
                   2205:         * (raise IPL since we may be called at interrupt time).
                   2206:         */
                   2207:
                   2208:        pv = pa_to_pvh(pa);
                   2209:        ste = NULL;
                   2210:        s = splvm();
                   2211:
                   2212:        /*
                   2213:         * If it is the first entry on the list, it is actually
                   2214:         * in the header and we must copy the following entry up
                   2215:         * to the header.  Otherwise we must search the list for
                   2216:         * the entry.  In either case we free the now unused entry.
                   2217:         */
                   2218:
                   2219:        if (pmap == pv->pv_pmap && va == pv->pv_va) {
                   2220:                ste = pv->pv_ptste;
                   2221:                ptpmap = pv->pv_ptpmap;
                   2222:                npv = pv->pv_next;
                   2223:                if (npv) {
                   2224:                        npv->pv_flags = pv->pv_flags;
                   2225:                        *pv = *npv;
                   2226:                        pmap_free_pv(npv);
                   2227:                } else
                   2228:                        pv->pv_pmap = NULL;
                   2229:        } else {
                   2230:                for (npv = pv->pv_next; npv; npv = npv->pv_next) {
                   2231:                        if (pmap == npv->pv_pmap && va == npv->pv_va)
                   2232:                                break;
                   2233:                        pv = npv;
                   2234:                }
                   2235: #ifdef DEBUG
                   2236:                if (npv == NULL)
                   2237:                        panic("pmap_remove: PA not in pv_tab");
                   2238: #endif
                   2239:                ste = npv->pv_ptste;
                   2240:                ptpmap = npv->pv_ptpmap;
                   2241:                pv->pv_next = npv->pv_next;
                   2242:                pmap_free_pv(npv);
                   2243:                pv = pa_to_pvh(pa);
                   2244:        }
                   2245:
                   2246: #ifdef M68K_MMU_HP
                   2247:
                   2248:        /*
                   2249:         * If only one mapping left we no longer need to cache inhibit
                   2250:         */
                   2251:
                   2252:        if (pmap_aliasmask &&
                   2253:            pv->pv_pmap && pv->pv_next == NULL && (pv->pv_flags & PV_CI)) {
                   2254:                PMAP_DPRINTF(PDB_CACHE,
                   2255:                    ("remove: clearing CI for pa %lx\n", pa));
                   2256:                pv->pv_flags &= ~PV_CI;
                   2257:                pmap_changebit(pa, 0, ~PG_CI);
                   2258: #ifdef DEBUG
                   2259:                if ((pmapdebug & (PDB_CACHE|PDB_PVDUMP)) ==
                   2260:                    (PDB_CACHE|PDB_PVDUMP))
                   2261:                        pmap_pvdump(pa);
                   2262: #endif
                   2263:        }
                   2264: #endif
                   2265:
                   2266:        /*
                   2267:         * If this was a PT page we must also remove the
                   2268:         * mapping from the associated segment table.
                   2269:         */
                   2270:
                   2271:        if (ste) {
                   2272:                PMAP_DPRINTF(PDB_REMOVE|PDB_PTPAGE,
                   2273:                    ("remove: ste was %x@%p pte was %x@%p\n",
                   2274:                    *ste, ste, opte, pmap_pte(pmap, va)));
                   2275: #if defined(M68040) || defined(M68060)
                   2276: #if defined(M68020) || defined(M68030)
                   2277:                if (mmutype == MMU_68040)
                   2278: #endif
                   2279:                {
                   2280:                        st_entry_t *este = &ste[NPTEPG/SG4_LEV3SIZE];
                   2281:
                   2282:                        while (ste < este)
                   2283:                                *ste++ = SG_NV;
                   2284: #ifdef DEBUG
                   2285:                        ste -= NPTEPG/SG4_LEV3SIZE;
                   2286: #endif
                   2287:                }
                   2288: #if defined(M68020) || defined(M68030)
                   2289:                else
                   2290: #endif
                   2291: #endif
                   2292: #if defined(M68020) || defined(M68030)
                   2293:                *ste = SG_NV;
                   2294: #endif
                   2295:
                   2296:                /*
                   2297:                 * If it was a user PT page, we decrement the
                   2298:                 * reference count on the segment table as well,
                   2299:                 * freeing it if it is now empty.
                   2300:                 */
                   2301:
                   2302:                if (ptpmap != pmap_kernel()) {
                   2303:                        PMAP_DPRINTF(PDB_REMOVE|PDB_SEGTAB,
                   2304:                            ("remove: stab %p, refcnt %d\n",
                   2305:                            ptpmap->pm_stab, ptpmap->pm_sref - 1));
                   2306: #ifdef DEBUG
                   2307:                        if ((pmapdebug & PDB_PARANOIA) &&
                   2308:                            ptpmap->pm_stab !=
                   2309:                             (st_entry_t *)trunc_page((vaddr_t)ste))
                   2310:                                panic("remove: bogus ste");
                   2311: #endif
                   2312:                        if (--(ptpmap->pm_sref) == 0) {
                   2313:                                PMAP_DPRINTF(PDB_REMOVE|PDB_SEGTAB,
                   2314:                                    ("remove: free stab %p\n",
                   2315:                                    ptpmap->pm_stab));
1.14      yamt     2316:                                uvm_km_free(st_map, (vaddr_t)ptpmap->pm_stab,
                   2317:                                    M68K_STSIZE, UVM_KMF_WIRED);
1.1       chs      2318:                                ptpmap->pm_stab = Segtabzero;
                   2319:                                ptpmap->pm_stpa = Segtabzeropa;
                   2320: #if defined(M68040) || defined(M68060)
                   2321: #if defined(M68020) || defined(M68030)
                   2322:                                if (mmutype == MMU_68040)
                   2323: #endif
                   2324:                                        ptpmap->pm_stfree = protostfree;
                   2325: #endif
                   2326:
                   2327:                                /*
                   2328:                                 * XXX may have changed segment table
                   2329:                                 * pointer for current process so
                   2330:                                 * update now to reload hardware.
                   2331:                                 */
                   2332:
                   2333:                                if (active_user_pmap(ptpmap))
                   2334:                                        PMAP_ACTIVATE(ptpmap, 1);
                   2335:                        }
                   2336:                }
                   2337:                pv->pv_flags &= ~PV_PTPAGE;
                   2338:                ptpmap->pm_ptpages--;
                   2339:        }
                   2340:
                   2341:        /*
                   2342:         * Update saved attributes for managed page
                   2343:         */
                   2344:
                   2345:        *pa_to_attribute(pa) |= bits;
                   2346:        splx(s);
                   2347: }
                   2348:
                   2349: /*
                   2350:  * pmap_testbit:
                   2351:  *
                   2352:  *     Test the modified/referenced bits of a physical page.
                   2353:  */
                   2354: /* static */
1.23      thorpej  2355: bool
1.20      tsutsui  2356: pmap_testbit(paddr_t pa, int bit)
1.1       chs      2357: {
                   2358:        struct pv_entry *pv;
                   2359:        pt_entry_t *pte;
                   2360:        int s;
                   2361:
                   2362:        pv = pa_to_pvh(pa);
                   2363:        s = splvm();
                   2364:
                   2365:        /*
                   2366:         * Check saved info first
                   2367:         */
                   2368:
                   2369:        if (*pa_to_attribute(pa) & bit) {
                   2370:                splx(s);
1.25      thorpej  2371:                return true;
1.1       chs      2372:        }
                   2373:
                   2374: #ifdef M68K_MMU_HP
                   2375:
                   2376:        /*
                   2377:         * Flush VAC to get correct state of any hardware maintained bits.
                   2378:         */
                   2379:
                   2380:        if (pmap_aliasmask && (bit & (PG_U|PG_M)))
                   2381:                DCIS();
                   2382: #endif
                   2383:
                   2384:        /*
                   2385:         * Not found.  Check current mappings, returning immediately if
                   2386:         * found.  Cache a hit to speed future lookups.
                   2387:         */
                   2388:
                   2389:        if (pv->pv_pmap != NULL) {
                   2390:                for (; pv; pv = pv->pv_next) {
                   2391:                        pte = pmap_pte(pv->pv_pmap, pv->pv_va);
                   2392:                        if (*pte & bit) {
                   2393:                                *pa_to_attribute(pa) |= bit;
                   2394:                                splx(s);
1.25      thorpej  2395:                                return true;
1.1       chs      2396:                        }
                   2397:                }
                   2398:        }
                   2399:        splx(s);
1.25      thorpej  2400:        return false;
1.1       chs      2401: }
                   2402:
                   2403: /*
                   2404:  * pmap_changebit:
                   2405:  *
                   2406:  *     Change the modified/referenced bits, or other PTE bits,
                   2407:  *     for a physical page.
                   2408:  */
                   2409: /* static */
1.23      thorpej  2410: bool
1.20      tsutsui  2411: pmap_changebit(paddr_t pa, int set, int mask)
1.1       chs      2412: {
                   2413:        struct pv_entry *pv;
                   2414:        pt_entry_t *pte, npte;
                   2415:        vaddr_t va;
                   2416:        char *attrp;
                   2417:        int s;
                   2418: #if defined(M68K_MMU_HP) || defined(M68040) || defined(M68060)
1.25      thorpej  2419:        bool firstpage = true;
1.1       chs      2420: #endif
1.23      thorpej  2421:        bool r;
1.1       chs      2422:
                   2423:        PMAP_DPRINTF(PDB_BITS,
                   2424:            ("pmap_changebit(%lx, %x, %x)\n", pa, set, mask));
                   2425:
                   2426:        pv = pa_to_pvh(pa);
                   2427:        s = splvm();
                   2428:
                   2429:        /*
                   2430:         * Clear saved attributes (modify, reference)
                   2431:         */
                   2432:
                   2433:        attrp = pa_to_attribute(pa);
                   2434:        r = *attrp & ~mask;
                   2435:        *attrp &= mask;
                   2436:
                   2437:        /*
                   2438:         * Loop over all current mappings setting/clearing as appropos
                   2439:         * If setting RO do we need to clear the VAC?
                   2440:         */
                   2441:
                   2442:        if (pv->pv_pmap != NULL) {
                   2443: #ifdef DEBUG
                   2444:                int toflush = 0;
                   2445: #endif
                   2446:                for (; pv; pv = pv->pv_next) {
                   2447: #ifdef DEBUG
                   2448:                        toflush |= (pv->pv_pmap == pmap_kernel()) ? 2 : 1;
                   2449: #endif
                   2450:                        va = pv->pv_va;
                   2451:                        pte = pmap_pte(pv->pv_pmap, va);
                   2452: #ifdef M68K_MMU_HP
                   2453:
                   2454:                        /*
                   2455:                         * Flush VAC to ensure we get correct state of HW bits
                   2456:                         * so we don't clobber them.
                   2457:                         */
                   2458:
                   2459:                        if (firstpage && pmap_aliasmask) {
1.25      thorpej  2460:                                firstpage = false;
1.1       chs      2461:                                DCIS();
                   2462:                        }
                   2463: #endif
                   2464:                        npte = (*pte | set) & mask;
                   2465:                        if (*pte != npte) {
1.25      thorpej  2466:                                r = true;
1.1       chs      2467: #if defined(M68040) || defined(M68060)
                   2468:                                /*
                   2469:                                 * If we are changing caching status or
                   2470:                                 * protection make sure the caches are
                   2471:                                 * flushed (but only once).
                   2472:                                 */
                   2473:                                if (firstpage &&
                   2474: #if defined(M68020) || defined(M68030)
                   2475:                                    (mmutype == MMU_68040) &&
                   2476: #endif
                   2477:                                    ((set == PG_RO) ||
                   2478:                                     (set & PG_CMASK) ||
                   2479:                                     (mask & PG_CMASK) == 0)) {
1.25      thorpej  2480:                                        firstpage = false;
1.1       chs      2481:                                        DCFP(pa);
                   2482:                                        ICPP(pa);
                   2483:                                }
                   2484: #endif
                   2485:                                *pte = npte;
                   2486:                                if (active_pmap(pv->pv_pmap))
                   2487:                                        TBIS(va);
                   2488:                        }
                   2489:                }
                   2490:        }
                   2491:        splx(s);
1.20      tsutsui  2492:        return r;
1.1       chs      2493: }
                   2494:
                   2495: /*
                   2496:  * pmap_enter_ptpage:
                   2497:  *
                   2498:  *     Allocate and map a PT page for the specified pmap/va pair.
                   2499:  */
                   2500: /* static */
1.22      martin   2501: int
1.23      thorpej  2502: pmap_enter_ptpage(pmap_t pmap, vaddr_t va, bool can_fail)
1.1       chs      2503: {
                   2504:        paddr_t ptpa;
                   2505:        struct vm_page *pg;
                   2506:        struct pv_entry *pv;
                   2507:        st_entry_t *ste;
                   2508:        int s;
                   2509:
                   2510:        PMAP_DPRINTF(PDB_FOLLOW|PDB_ENTER|PDB_PTPAGE,
                   2511:            ("pmap_enter_ptpage: pmap %p, va %lx\n", pmap, va));
                   2512:
                   2513:        /*
                   2514:         * Allocate a segment table if necessary.  Note that it is allocated
                   2515:         * from a private map and not pt_map.  This keeps user page tables
                   2516:         * aligned on segment boundaries in the kernel address space.
                   2517:         * The segment table is wired down.  It will be freed whenever the
                   2518:         * reference count drops to zero.
                   2519:         */
                   2520:        if (pmap->pm_stab == Segtabzero) {
                   2521:                pmap->pm_stab = (st_entry_t *)
1.14      yamt     2522:                    uvm_km_alloc(st_map, M68K_STSIZE, 0,
1.22      martin   2523:                    UVM_KMF_WIRED | UVM_KMF_ZERO |
                   2524:                    (can_fail ? UVM_KMF_NOWAIT : 0));
                   2525:                if (pmap->pm_stab == NULL) {
                   2526:                        pmap->pm_stab = Segtabzero;
                   2527:                        return ENOMEM;
                   2528:                }
1.1       chs      2529:                (void) pmap_extract(pmap_kernel(), (vaddr_t)pmap->pm_stab,
                   2530:                    (paddr_t *)&pmap->pm_stpa);
                   2531: #if defined(M68040) || defined(M68060)
                   2532: #if defined(M68020) || defined(M68030)
                   2533:                if (mmutype == MMU_68040)
                   2534: #endif
                   2535:                {
1.21      mhitch   2536:                        pt_entry_t      *pte;
                   2537:
                   2538:                        pte = pmap_pte(pmap_kernel(), pmap->pm_stab);
                   2539:                        *pte = (*pte & ~PG_CMASK) | PG_CI;
1.1       chs      2540:                        pmap->pm_stfree = protostfree;
                   2541:                }
                   2542: #endif
                   2543:                /*
                   2544:                 * XXX may have changed segment table pointer for current
                   2545:                 * process so update now to reload hardware.
                   2546:                 */
                   2547:                if (active_user_pmap(pmap))
                   2548:                        PMAP_ACTIVATE(pmap, 1);
                   2549:
                   2550:                PMAP_DPRINTF(PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB,
                   2551:                    ("enter: pmap %p stab %p(%p)\n",
                   2552:                    pmap, pmap->pm_stab, pmap->pm_stpa));
                   2553:        }
                   2554:
                   2555:        ste = pmap_ste(pmap, va);
                   2556: #if defined(M68040) || defined(M68060)
                   2557:        /*
                   2558:         * Allocate level 2 descriptor block if necessary
                   2559:         */
                   2560: #if defined(M68020) || defined(M68030)
                   2561:        if (mmutype == MMU_68040)
                   2562: #endif
                   2563:        {
                   2564:                if (*ste == SG_NV) {
                   2565:                        int ix;
1.26      christos 2566:                        void *addr;
1.1       chs      2567:
                   2568:                        ix = bmtol2(pmap->pm_stfree);
                   2569:                        if (ix == -1)
                   2570:                                panic("enter: out of address space"); /* XXX */
                   2571:                        pmap->pm_stfree &= ~l2tobm(ix);
1.26      christos 2572:                        addr = (void *)&pmap->pm_stab[ix*SG4_LEV2SIZE];
1.1       chs      2573:                        memset(addr, 0, SG4_LEV2SIZE*sizeof(st_entry_t));
1.26      christos 2574:                        addr = (void *)&pmap->pm_stpa[ix*SG4_LEV2SIZE];
1.1       chs      2575:                        *ste = (u_int)addr | SG_RW | SG_U | SG_V;
                   2576:
                   2577:                        PMAP_DPRINTF(PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB,
                   2578:                            ("enter: alloc ste2 %d(%p)\n", ix, addr));
                   2579:                }
                   2580:                ste = pmap_ste2(pmap, va);
                   2581:                /*
                   2582:                 * Since a level 2 descriptor maps a block of SG4_LEV3SIZE
                   2583:                 * level 3 descriptors, we need a chunk of NPTEPG/SG4_LEV3SIZE
1.3       thorpej  2584:                 * (16) such descriptors (PAGE_SIZE/SG4_LEV3SIZE bytes) to map a
1.1       chs      2585:                 * PT page--the unit of allocation.  We set `ste' to point
                   2586:                 * to the first entry of that chunk which is validated in its
                   2587:                 * entirety below.
                   2588:                 */
1.3       thorpej  2589:                ste = (st_entry_t *)((int)ste & ~(PAGE_SIZE/SG4_LEV3SIZE-1));
1.1       chs      2590:
                   2591:                PMAP_DPRINTF(PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB,
                   2592:                    ("enter: ste2 %p (%p)\n", pmap_ste2(pmap, va), ste));
                   2593:        }
                   2594: #endif
                   2595:        va = trunc_page((vaddr_t)pmap_pte(pmap, va));
                   2596:
                   2597:        /*
                   2598:         * In the kernel we allocate a page from the kernel PT page
                   2599:         * free list and map it into the kernel page table map (via
                   2600:         * pmap_enter).
                   2601:         */
                   2602:        if (pmap == pmap_kernel()) {
                   2603:                struct kpt_page *kpt;
                   2604:
                   2605:                s = splvm();
                   2606:                if ((kpt = kpt_free_list) == NULL) {
                   2607:                        /*
                   2608:                         * No PT pages available.
                   2609:                         * Try once to free up unused ones.
                   2610:                         */
                   2611:                        PMAP_DPRINTF(PDB_COLLECT,
                   2612:                            ("enter: no KPT pages, collecting...\n"));
                   2613:                        pmap_collect(pmap_kernel());
                   2614:                        if ((kpt = kpt_free_list) == NULL)
                   2615:                                panic("pmap_enter_ptpage: can't get KPT page");
                   2616:                }
                   2617:                kpt_free_list = kpt->kpt_next;
                   2618:                kpt->kpt_next = kpt_used_list;
                   2619:                kpt_used_list = kpt;
                   2620:                ptpa = kpt->kpt_pa;
1.26      christos 2621:                memset((void *)kpt->kpt_va, 0, PAGE_SIZE);
1.1       chs      2622:                pmap_enter(pmap, va, ptpa, VM_PROT_READ | VM_PROT_WRITE,
                   2623:                    VM_PROT_READ | VM_PROT_WRITE | PMAP_WIRED);
                   2624:                pmap_update(pmap);
                   2625: #ifdef DEBUG
                   2626:                if (pmapdebug & (PDB_ENTER|PDB_PTPAGE)) {
                   2627:                        int ix = pmap_ste(pmap, va) - pmap_ste(pmap, 0);
                   2628:
                   2629:                        printf("enter: add &Sysptmap[%d]: %x (KPT page %lx)\n",
1.20      tsutsui  2630:                            ix, Sysptmap[ix], kpt->kpt_va);
1.1       chs      2631:                }
                   2632: #endif
                   2633:                splx(s);
                   2634:        } else {
                   2635:
                   2636:                /*
                   2637:                 * For user processes we just allocate a page from the
                   2638:                 * VM system.  Note that we set the page "wired" count to 1,
                   2639:                 * which is what we use to check if the page can be freed.
                   2640:                 * See pmap_remove_mapping().
                   2641:                 *
                   2642:                 * Count the segment table reference first so that we won't
                   2643:                 * lose the segment table when low on memory.
                   2644:                 */
                   2645:
                   2646:                pmap->pm_sref++;
                   2647:                PMAP_DPRINTF(PDB_ENTER|PDB_PTPAGE,
                   2648:                    ("enter: about to alloc UPT pg at %lx\n", va));
1.35.6.1  mjf      2649:                mutex_enter(&uvm_kernel_object->vmobjlock);
1.33      dogcow   2650:                while ((pg = uvm_pagealloc(uvm_kernel_object,
1.1       chs      2651:                                           va - vm_map_min(kernel_map),
                   2652:                                           NULL, UVM_PGA_ZERO)) == NULL) {
1.35.6.1  mjf      2653:                        mutex_exit(&uvm_kernel_object->vmobjlock);
1.1       chs      2654:                        uvm_wait("ptpage");
1.35.6.1  mjf      2655:                        mutex_enter(&uvm_kernel_object->vmobjlock);
1.1       chs      2656:                }
1.35.6.1  mjf      2657:                mutex_exit(&uvm_kernel_object->vmobjlock);
1.1       chs      2658:                pg->flags &= ~(PG_BUSY|PG_FAKE);
                   2659:                UVM_PAGE_OWN(pg, NULL);
                   2660:                ptpa = VM_PAGE_TO_PHYS(pg);
                   2661:                pmap_enter(pmap_kernel(), va, ptpa,
                   2662:                    VM_PROT_READ | VM_PROT_WRITE,
                   2663:                    VM_PROT_READ | VM_PROT_WRITE | PMAP_WIRED);
                   2664:                pmap_update(pmap_kernel());
                   2665:        }
                   2666: #if defined(M68040) || defined(M68060)
                   2667:        /*
                   2668:         * Turn off copyback caching of page table pages,
                   2669:         * could get ugly otherwise.
                   2670:         */
                   2671: #if defined(M68020) || defined(M68030)
                   2672:        if (mmutype == MMU_68040)
                   2673: #endif
                   2674:        {
                   2675: #ifdef DEBUG
                   2676:                pt_entry_t *pte = pmap_pte(pmap_kernel(), va);
                   2677:                if ((pmapdebug & PDB_PARANOIA) && (*pte & PG_CCB) == 0)
                   2678:                        printf("%s PT no CCB: kva=%lx ptpa=%lx pte@%p=%x\n",
1.20      tsutsui  2679:                            pmap == pmap_kernel() ? "Kernel" : "User",
                   2680:                            va, ptpa, pte, *pte);
1.1       chs      2681: #endif
                   2682:                if (pmap_changebit(ptpa, PG_CI, ~PG_CCB))
                   2683:                        DCIS();
                   2684:        }
                   2685: #endif
                   2686:        /*
                   2687:         * Locate the PV entry in the kernel for this PT page and
                   2688:         * record the STE address.  This is so that we can invalidate
                   2689:         * the STE when we remove the mapping for the page.
                   2690:         */
                   2691:        pv = pa_to_pvh(ptpa);
                   2692:        s = splvm();
                   2693:        if (pv) {
                   2694:                pv->pv_flags |= PV_PTPAGE;
                   2695:                do {
                   2696:                        if (pv->pv_pmap == pmap_kernel() && pv->pv_va == va)
                   2697:                                break;
                   2698:                } while ((pv = pv->pv_next));
                   2699:        }
                   2700: #ifdef DEBUG
                   2701:        if (pv == NULL)
                   2702:                panic("pmap_enter_ptpage: PT page not entered");
                   2703: #endif
                   2704:        pv->pv_ptste = ste;
                   2705:        pv->pv_ptpmap = pmap;
                   2706:
                   2707:        PMAP_DPRINTF(PDB_ENTER|PDB_PTPAGE,
                   2708:            ("enter: new PT page at PA %lx, ste at %p\n", ptpa, ste));
                   2709:
                   2710:        /*
                   2711:         * Map the new PT page into the segment table.
                   2712:         * Also increment the reference count on the segment table if this
                   2713:         * was a user page table page.  Note that we don't use vm_map_pageable
                   2714:         * to keep the count like we do for PT pages, this is mostly because
                   2715:         * it would be difficult to identify ST pages in pmap_pageable to
                   2716:         * release them.  We also avoid the overhead of vm_map_pageable.
                   2717:         */
                   2718: #if defined(M68040) || defined(M68060)
                   2719: #if defined(M68020) || defined(M68030)
                   2720:        if (mmutype == MMU_68040)
                   2721: #endif
                   2722:        {
                   2723:                st_entry_t *este;
                   2724:
                   2725:                for (este = &ste[NPTEPG/SG4_LEV3SIZE]; ste < este; ste++) {
                   2726:                        *ste = ptpa | SG_U | SG_RW | SG_V;
                   2727:                        ptpa += SG4_LEV3SIZE * sizeof(st_entry_t);
                   2728:                }
                   2729:        }
                   2730: #if defined(M68020) || defined(M68030)
                   2731:        else
                   2732:                *ste = (ptpa & SG_FRAME) | SG_RW | SG_V;
                   2733: #endif
                   2734: #else
                   2735:        *ste = (ptpa & SG_FRAME) | SG_RW | SG_V;
                   2736: #endif
                   2737:        if (pmap != pmap_kernel()) {
                   2738:                PMAP_DPRINTF(PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB,
                   2739:                    ("enter: stab %p refcnt %d\n",
                   2740:                    pmap->pm_stab, pmap->pm_sref));
                   2741:        }
                   2742:        /*
                   2743:         * Flush stale TLB info.
                   2744:         */
                   2745:        if (pmap == pmap_kernel())
                   2746:                TBIAS();
                   2747:        else
                   2748:                TBIAU();
                   2749:        pmap->pm_ptpages++;
                   2750:        splx(s);
1.22      martin   2751:
                   2752:        return 0;
1.1       chs      2753: }
                   2754:
                   2755: /*
                   2756:  * pmap_ptpage_addref:
                   2757:  *
                   2758:  *     Add a reference to the specified PT page.
                   2759:  */
                   2760: void
1.20      tsutsui  2761: pmap_ptpage_addref(vaddr_t ptpva)
1.1       chs      2762: {
                   2763:        struct vm_page *pg;
                   2764:
1.35.6.1  mjf      2765:        mutex_enter(&uvm_kernel_object->vmobjlock);
1.33      dogcow   2766:        pg = uvm_pagelookup(uvm_kernel_object, ptpva - vm_map_min(kernel_map));
1.1       chs      2767:        pg->wire_count++;
                   2768:        PMAP_DPRINTF(PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB,
                   2769:            ("ptpage addref: pg %p now %d\n",
                   2770:             pg, pg->wire_count));
1.35.6.1  mjf      2771:        mutex_exit(&uvm_kernel_object->vmobjlock);
1.1       chs      2772: }
                   2773:
                   2774: /*
                   2775:  * pmap_ptpage_delref:
                   2776:  *
                   2777:  *     Delete a reference to the specified PT page.
                   2778:  */
                   2779: int
1.20      tsutsui  2780: pmap_ptpage_delref(vaddr_t ptpva)
1.1       chs      2781: {
                   2782:        struct vm_page *pg;
                   2783:        int rv;
                   2784:
1.35.6.1  mjf      2785:        mutex_enter(&uvm_kernel_object->vmobjlock);
1.33      dogcow   2786:        pg = uvm_pagelookup(uvm_kernel_object, ptpva - vm_map_min(kernel_map));
1.1       chs      2787:        rv = --pg->wire_count;
                   2788:        PMAP_DPRINTF(PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB,
                   2789:            ("ptpage delref: pg %p now %d\n",
                   2790:             pg, pg->wire_count));
1.35.6.1  mjf      2791:        mutex_exit(&uvm_kernel_object->vmobjlock);
1.20      tsutsui  2792:        return rv;
1.1       chs      2793: }
                   2794:
                   2795: /*
                   2796:  *     Routine:        pmap_procwr
                   2797:  *
                   2798:  *     Function:
                   2799:  *             Synchronize caches corresponding to [addr, addr + len) in p.
                   2800:  */
                   2801: void
1.20      tsutsui  2802: pmap_procwr(struct proc        *p, vaddr_t va, size_t len)
1.1       chs      2803: {
1.20      tsutsui  2804:
1.1       chs      2805:        (void)cachectl1(0x80000004, va, len, p);
                   2806: }
                   2807:
                   2808: void
1.20      tsutsui  2809: _pmap_set_page_cacheable(pmap_t pmap, vaddr_t va)
1.1       chs      2810: {
                   2811:
1.20      tsutsui  2812:        if (!pmap_ste_v(pmap, va))
1.1       chs      2813:                return;
                   2814:
                   2815: #if defined(M68040) || defined(M68060)
                   2816: #if defined(M68020) || defined(M68030)
1.20      tsutsui  2817:        if (mmutype == MMU_68040) {
1.1       chs      2818: #endif
1.20      tsutsui  2819:        if (pmap_changebit(pmap_pte_pa(pmap_pte(pmap, va)), PG_CCB, ~PG_CI))
1.1       chs      2820:                DCIS();
                   2821:
                   2822: #if defined(M68020) || defined(M68030)
                   2823:        } else
1.20      tsutsui  2824:                pmap_changebit(pmap_pte_pa(pmap_pte(pmap, va)), 0, ~PG_CI);
1.1       chs      2825: #endif
                   2826: #else
1.20      tsutsui  2827:        pmap_changebit(pmap_pte_pa(pmap_pte(pmap, va)), 0, ~PG_CI);
1.1       chs      2828: #endif
                   2829: }
                   2830:
                   2831: void
1.20      tsutsui  2832: _pmap_set_page_cacheinhibit(pmap_t pmap, vaddr_t va)
1.1       chs      2833: {
                   2834:
1.20      tsutsui  2835:        if (!pmap_ste_v(pmap, va))
1.1       chs      2836:                return;
                   2837:
                   2838: #if defined(M68040) || defined(M68060)
                   2839: #if defined(M68020) || defined(M68030)
1.20      tsutsui  2840:        if (mmutype == MMU_68040) {
1.1       chs      2841: #endif
1.20      tsutsui  2842:        if (pmap_changebit(pmap_pte_pa(pmap_pte(pmap, va)), PG_CI, ~PG_CCB))
1.1       chs      2843:                DCIS();
                   2844: #if defined(M68020) || defined(M68030)
                   2845:        } else
1.20      tsutsui  2846:                pmap_changebit(pmap_pte_pa(pmap_pte(pmap, va)), PG_CI, ~0);
1.1       chs      2847: #endif
                   2848: #else
1.20      tsutsui  2849:        pmap_changebit(pmap_pte_pa(pmap_pte(pmap, va)), PG_CI, ~0);
1.1       chs      2850: #endif
                   2851: }
                   2852:
                   2853: int
1.20      tsutsui  2854: _pmap_page_is_cacheable(pmap_t pmap, vaddr_t va)
1.1       chs      2855: {
                   2856:
1.20      tsutsui  2857:        if (!pmap_ste_v(pmap, va))
                   2858:                return 0;
1.1       chs      2859:
1.20      tsutsui  2860:        return (pmap_pte_ci(pmap_pte(pmap, va)) == 0) ? 1 : 0;
1.1       chs      2861: }
                   2862:
                   2863: #ifdef DEBUG
                   2864: /*
                   2865:  * pmap_pvdump:
                   2866:  *
                   2867:  *     Dump the contents of the PV list for the specified physical page.
                   2868:  */
                   2869: void
1.20      tsutsui  2870: pmap_pvdump(paddr_t pa)
1.1       chs      2871: {
                   2872:        struct pv_entry *pv;
                   2873:
                   2874:        printf("pa %lx", pa);
                   2875:        for (pv = pa_to_pvh(pa); pv; pv = pv->pv_next)
                   2876:                printf(" -> pmap %p, va %lx, ptste %p, ptpmap %p, flags %x",
1.20      tsutsui  2877:                    pv->pv_pmap, pv->pv_va, pv->pv_ptste, pv->pv_ptpmap,
                   2878:                    pv->pv_flags);
1.1       chs      2879:        printf("\n");
                   2880: }
                   2881:
                   2882: /*
                   2883:  * pmap_check_wiring:
                   2884:  *
                   2885:  *     Count the number of valid mappings in the specified PT page,
                   2886:  *     and ensure that it is consistent with the number of wirings
                   2887:  *     to that page that the VM system has.
                   2888:  */
                   2889: void
1.20      tsutsui  2890: pmap_check_wiring(const char *str, vaddr_t va)
1.1       chs      2891: {
                   2892:        pt_entry_t *pte;
                   2893:        paddr_t pa;
                   2894:        struct vm_page *pg;
                   2895:        int count;
                   2896:
                   2897:        if (!pmap_ste_v(pmap_kernel(), va) ||
                   2898:            !pmap_pte_v(pmap_pte(pmap_kernel(), va)))
                   2899:                return;
                   2900:
                   2901:        pa = pmap_pte_pa(pmap_pte(pmap_kernel(), va));
                   2902:        pg = PHYS_TO_VM_PAGE(pa);
1.13      chs      2903:        if (pg->wire_count > PAGE_SIZE / sizeof(pt_entry_t)) {
1.1       chs      2904:                panic("*%s*: 0x%lx: wire count %d", str, va, pg->wire_count);
                   2905:        }
                   2906:
                   2907:        count = 0;
1.3       thorpej  2908:        for (pte = (pt_entry_t *)va; pte < (pt_entry_t *)(va + PAGE_SIZE);
                   2909:             pte++)
1.1       chs      2910:                if (*pte)
                   2911:                        count++;
                   2912:        if (pg->wire_count != count)
                   2913:                panic("*%s*: 0x%lx: w%d/a%d",
                   2914:                       str, va, pg->wire_count, count);
                   2915: }
                   2916: #endif /* DEBUG */

CVSweb <webmaster@jp.NetBSD.org>