[BACK]Return to pmap_motorola.c CVS log [TXT][DIR] Up to [cvs.NetBSD.org] / src / sys / arch / m68k / m68k

Annotation of src/sys/arch/m68k/m68k/pmap_motorola.c, Revision 1.55

1.55    ! tsutsui     1: /*     $NetBSD: pmap_motorola.c,v 1.54 2009/12/06 06:41:30 tsutsui Exp $        */
1.1       chs         2:
                      3: /*-
                      4:  * Copyright (c) 1999 The NetBSD Foundation, Inc.
                      5:  * All rights reserved.
                      6:  *
                      7:  * This code is derived from software contributed to The NetBSD Foundation
                      8:  * by Jason R. Thorpe.
                      9:  *
                     10:  * Redistribution and use in source and binary forms, with or without
                     11:  * modification, are permitted provided that the following conditions
                     12:  * are met:
                     13:  * 1. Redistributions of source code must retain the above copyright
                     14:  *    notice, this list of conditions and the following disclaimer.
                     15:  * 2. Redistributions in binary form must reproduce the above copyright
                     16:  *    notice, this list of conditions and the following disclaimer in the
                     17:  *    documentation and/or other materials provided with the distribution.
                     18:  *
                     19:  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
                     20:  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
                     21:  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
                     22:  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
                     23:  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
                     24:  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
                     25:  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
                     26:  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
                     27:  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
                     28:  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
                     29:  * POSSIBILITY OF SUCH DAMAGE.
                     30:  */
                     31:
                     32: /*
                     33:  * Copyright (c) 1991, 1993
                     34:  *     The Regents of the University of California.  All rights reserved.
                     35:  *
                     36:  * This code is derived from software contributed to Berkeley by
                     37:  * the Systems Programming Group of the University of Utah Computer
                     38:  * Science Department.
                     39:  *
                     40:  * Redistribution and use in source and binary forms, with or without
                     41:  * modification, are permitted provided that the following conditions
                     42:  * are met:
                     43:  * 1. Redistributions of source code must retain the above copyright
                     44:  *    notice, this list of conditions and the following disclaimer.
                     45:  * 2. Redistributions in binary form must reproduce the above copyright
                     46:  *    notice, this list of conditions and the following disclaimer in the
                     47:  *    documentation and/or other materials provided with the distribution.
1.6       agc        48:  * 3. Neither the name of the University nor the names of its contributors
1.1       chs        49:  *    may be used to endorse or promote products derived from this software
                     50:  *    without specific prior written permission.
                     51:  *
                     52:  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
                     53:  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
                     54:  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
                     55:  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
                     56:  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
                     57:  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
                     58:  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
                     59:  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
                     60:  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
                     61:  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
                     62:  * SUCH DAMAGE.
                     63:  *
                     64:  *     @(#)pmap.c      8.6 (Berkeley) 5/27/94
                     65:  */
                     66:
                     67: /*
                     68:  * Motorola m68k-family physical map management code.
                     69:  *
                     70:  * Supports:
                     71:  *     68020 with 68851 MMU
                     72:  *     68030 with on-chip MMU
                     73:  *     68040 with on-chip MMU
                     74:  *     68060 with on-chip MMU
                     75:  *
                     76:  * Notes:
                     77:  *     Don't even pay lip service to multiprocessor support.
                     78:  *
                     79:  *     We assume TLB entries don't have process tags (except for the
                     80:  *     supervisor/user distinction) so we only invalidate TLB entries
                     81:  *     when changing mappings for the current (or kernel) pmap.  This is
                     82:  *     technically not true for the 68851 but we flush the TLB on every
                     83:  *     context switch, so it effectively winds up that way.
                     84:  *
                     85:  *     Bitwise and/or operations are significantly faster than bitfield
                     86:  *     references so we use them when accessing STE/PTEs in the pmap_pte_*
                     87:  *     macros.  Note also that the two are not always equivalent; e.g.:
                     88:  *             (*pte & PG_PROT) [4] != pte->pg_prot [1]
                     89:  *     and a couple of routines that deal with protection and wiring take
                     90:  *     some shortcuts that assume the and/or definitions.
                     91:  */
                     92:
                     93: /*
                     94:  *     Manages physical address maps.
                     95:  *
                     96:  *     In addition to hardware address maps, this
                     97:  *     module is called upon to provide software-use-only
                     98:  *     maps which may or may not be stored in the same
                     99:  *     form as hardware maps.  These pseudo-maps are
                    100:  *     used to store intermediate results from copy
                    101:  *     operations to and from address spaces.
                    102:  *
                    103:  *     Since the information managed by this module is
                    104:  *     also stored by the logical address mapping module,
                    105:  *     this module may throw away valid virtual-to-physical
                    106:  *     mappings at almost any time.  However, invalidations
                    107:  *     of virtual-to-physical mappings must be done as
                    108:  *     requested.
                    109:  *
                    110:  *     In order to cope with hardware architectures which
                    111:  *     make virtual-to-physical map invalidates expensive,
                    112:  *     this module may delay invalidate or reduced protection
                    113:  *     operations until such time as they are actually
                    114:  *     necessary.  This module is given full information as
                    115:  *     to which processors are currently using which maps,
                    116:  *     and to when physical maps must be made correct.
                    117:  */
                    118:
                    119: #include <sys/cdefs.h>
1.55    ! tsutsui   120: __KERNEL_RCSID(0, "$NetBSD: pmap_motorola.c,v 1.54 2009/12/06 06:41:30 tsutsui Exp $");
1.1       chs       121:
                    122: #include <sys/param.h>
                    123: #include <sys/systm.h>
                    124: #include <sys/proc.h>
                    125: #include <sys/malloc.h>
                    126: #include <sys/pool.h>
                    127:
                    128: #include <machine/pte.h>
                    129:
                    130: #include <uvm/uvm.h>
                    131:
                    132: #include <machine/cpu.h>
                    133: #include <m68k/cacheops.h>
                    134:
                    135: #ifdef DEBUG
                    136: #define PDB_FOLLOW     0x0001
                    137: #define PDB_INIT       0x0002
                    138: #define PDB_ENTER      0x0004
                    139: #define PDB_REMOVE     0x0008
                    140: #define PDB_CREATE     0x0010
                    141: #define PDB_PTPAGE     0x0020
                    142: #define PDB_CACHE      0x0040
                    143: #define PDB_BITS       0x0080
                    144: #define PDB_COLLECT    0x0100
                    145: #define PDB_PROTECT    0x0200
                    146: #define PDB_SEGTAB     0x0400
                    147: #define PDB_MULTIMAP   0x0800
                    148: #define PDB_PARANOIA   0x2000
                    149: #define PDB_WIRING     0x4000
                    150: #define PDB_PVDUMP     0x8000
                    151:
                    152: int debugmap = 0;
                    153: int pmapdebug = PDB_PARANOIA;
                    154:
                    155: #define        PMAP_DPRINTF(l, x)      if (pmapdebug & (l)) printf x
                    156: #else /* ! DEBUG */
                    157: #define        PMAP_DPRINTF(l, x)      /* nothing */
                    158: #endif /* DEBUG */
                    159:
                    160: /*
                    161:  * Get STEs and PTEs for user/kernel address space
                    162:  */
                    163: #if defined(M68040) || defined(M68060)
                    164: #define        pmap_ste1(m, v) \
                    165:        (&((m)->pm_stab[(vaddr_t)(v) >> SG4_SHIFT1]))
                    166: /* XXX assumes physically contiguous ST pages (if more than one) */
                    167: #define pmap_ste2(m, v) \
                    168:        (&((m)->pm_stab[(st_entry_t *)(*(u_int *)pmap_ste1(m, v) & SG4_ADDR1) \
                    169:                        - (m)->pm_stpa + (((v) & SG4_MASK2) >> SG4_SHIFT2)]))
                    170: #if defined(M68020) || defined(M68030)
                    171: #define        pmap_ste(m, v)  \
                    172:        (&((m)->pm_stab[(vaddr_t)(v) \
                    173:                        >> (mmutype == MMU_68040 ? SG4_SHIFT1 : SG_ISHIFT)]))
                    174: #define pmap_ste_v(m, v) \
                    175:        (mmutype == MMU_68040 \
                    176:         ? ((*pmap_ste1(m, v) & SG_V) && \
                    177:            (*pmap_ste2(m, v) & SG_V)) \
                    178:         : (*pmap_ste(m, v) & SG_V))
                    179: #else
                    180: #define        pmap_ste(m, v)  \
                    181:        (&((m)->pm_stab[(vaddr_t)(v) >> SG4_SHIFT1]))
                    182: #define pmap_ste_v(m, v) \
                    183:        ((*pmap_ste1(m, v) & SG_V) && (*pmap_ste2(m, v) & SG_V))
                    184: #endif
                    185: #else
                    186: #define        pmap_ste(m, v)   (&((m)->pm_stab[(vaddr_t)(v) >> SG_ISHIFT]))
                    187: #define pmap_ste_v(m, v) (*pmap_ste(m, v) & SG_V)
                    188: #endif
                    189:
                    190: #define pmap_pte(m, v) (&((m)->pm_ptab[(vaddr_t)(v) >> PG_SHIFT]))
                    191: #define pmap_pte_pa(pte)       (*(pte) & PG_FRAME)
                    192: #define pmap_pte_w(pte)                (*(pte) & PG_W)
                    193: #define pmap_pte_ci(pte)       (*(pte) & PG_CI)
                    194: #define pmap_pte_m(pte)                (*(pte) & PG_M)
                    195: #define pmap_pte_u(pte)                (*(pte) & PG_U)
                    196: #define pmap_pte_prot(pte)     (*(pte) & PG_PROT)
                    197: #define pmap_pte_v(pte)                (*(pte) & PG_V)
                    198:
                    199: #define pmap_pte_set_w(pte, v) \
                    200:        if (v) *(pte) |= PG_W; else *(pte) &= ~PG_W
                    201: #define pmap_pte_set_prot(pte, v) \
                    202:        if (v) *(pte) |= PG_PROT; else *(pte) &= ~PG_PROT
                    203: #define pmap_pte_w_chg(pte, nw)                ((nw) ^ pmap_pte_w(pte))
                    204: #define pmap_pte_prot_chg(pte, np)     ((np) ^ pmap_pte_prot(pte))
                    205:
                    206: /*
                    207:  * Given a map and a machine independent protection code,
                    208:  * convert to an m68k protection code.
                    209:  */
                    210: #define pte_prot(m, p) (protection_codes[p])
1.42      tsutsui   211: u_int  protection_codes[8];
1.1       chs       212:
                    213: /*
                    214:  * Kernel page table page management.
                    215:  */
                    216: struct kpt_page {
                    217:        struct kpt_page *kpt_next;      /* link on either used or free list */
                    218:        vaddr_t         kpt_va;         /* always valid kernel VA */
                    219:        paddr_t         kpt_pa;         /* PA of this page (for speed) */
                    220: };
                    221: struct kpt_page *kpt_free_list, *kpt_used_list;
                    222: struct kpt_page *kpt_pages;
                    223:
                    224: /*
                    225:  * Kernel segment/page table and page table map.
                    226:  * The page table map gives us a level of indirection we need to dynamically
                    227:  * expand the page table.  It is essentially a copy of the segment table
                    228:  * with PTEs instead of STEs.  All are initialized in locore at boot time.
                    229:  * Sysmap will initially contain VM_KERNEL_PT_PAGES pages of PTEs.
                    230:  * Segtabzero is an empty segment table which all processes share til they
                    231:  * reference something.
                    232:  */
1.54      tsutsui   233: paddr_t                Sysseg_pa;
1.1       chs       234: st_entry_t     *Sysseg;
                    235: pt_entry_t     *Sysmap, *Sysptmap;
                    236: st_entry_t     *Segtabzero, *Segtabzeropa;
                    237: vsize_t                Sysptsize = VM_KERNEL_PT_PAGES;
                    238:
1.41      tsutsui   239: static struct pmap kernel_pmap_store;
1.40      tsutsui   240: struct pmap    *const kernel_pmap_ptr = &kernel_pmap_store;
1.1       chs       241: struct vm_map  *st_map, *pt_map;
1.12      yamt      242: struct vm_map_kernel st_map_store, pt_map_store;
1.1       chs       243:
1.52      tsutsui   244: vaddr_t                lwp0uarea;      /* lwp0 u-area VA, initialized in bootstrap */
                    245:
1.1       chs       246: paddr_t                avail_start;    /* PA of first available physical page */
                    247: paddr_t                avail_end;      /* PA of last available physical page */
                    248: vsize_t                mem_size;       /* memory size in bytes */
1.5       thorpej   249: vaddr_t                virtual_avail;  /* VA of first avail page (after kernel bss)*/
                    250: vaddr_t                virtual_end;    /* VA of last avail page (end of kernel AS) */
1.1       chs       251: int            page_cnt;       /* number of pages managed by VM system */
                    252:
1.25      thorpej   253: bool           pmap_initialized = false;       /* Has pmap_init completed? */
1.46      thorpej   254:
                    255: struct pv_header {
                    256:        struct pv_entry         pvh_first;      /* first PV entry */
                    257:        uint16_t                pvh_attrs;      /* attributes:
                    258:                                                   bits 0-7: PTE bits
                    259:                                                   bits 8-15: flags */
                    260:        uint16_t                pvh_cimappings; /* # caller-specified CI
                    261:                                                   mappings */
                    262: };
                    263:
                    264: #define        PVH_CI          0x10    /* all entries are cache-inhibited */
                    265: #define        PVH_PTPAGE      0x20    /* entry maps a page table page */
                    266:
                    267: struct pv_header *pv_table;
1.1       chs       268: TAILQ_HEAD(pv_page_list, pv_page) pv_page_freelist;
                    269: int            pv_nfree;
                    270:
                    271: #ifdef M68K_MMU_HP
                    272: int            pmap_aliasmask; /* seperation at which VA aliasing ok */
                    273: #endif
                    274: #if defined(M68040) || defined(M68060)
1.53      tsutsui   275: u_int          protostfree;    /* prototype (default) free ST map */
1.1       chs       276: #endif
                    277:
                    278: pt_entry_t     *caddr1_pte;    /* PTE for CADDR1 */
                    279: pt_entry_t     *caddr2_pte;    /* PTE for CADDR2 */
                    280:
                    281: struct pool    pmap_pmap_pool; /* memory pool for pmap structures */
                    282:
1.20      tsutsui   283: struct pv_entry *pmap_alloc_pv(void);
                    284: void   pmap_free_pv(struct pv_entry *);
1.1       chs       285:
1.45      thorpej   286: #define        PAGE_IS_MANAGED(pa)     (pmap_initialized && uvm_pageismanaged(pa))
1.1       chs       287:
1.46      thorpej   288: static inline struct pv_header *
1.38      tsutsui   289: pa_to_pvh(paddr_t pa)
                    290: {
                    291:        int bank, pg = 0;       /* XXX gcc4 -Wuninitialized */
                    292:
                    293:        bank = vm_physseg_find(atop((pa)), &pg);
1.46      thorpej   294:        return &vm_physmem[bank].pmseg.pvheader[pg];
1.38      tsutsui   295: }
1.1       chs       296:
                    297: /*
                    298:  * Internal routines
                    299:  */
1.20      tsutsui   300: void   pmap_remove_mapping(pmap_t, vaddr_t, pt_entry_t *, int);
1.23      thorpej   301: bool   pmap_testbit(paddr_t, int);
                    302: bool   pmap_changebit(paddr_t, int, int);
1.24      tsutsui   303: int    pmap_enter_ptpage(pmap_t, vaddr_t, bool);
1.20      tsutsui   304: void   pmap_ptpage_addref(vaddr_t);
                    305: int    pmap_ptpage_delref(vaddr_t);
                    306: void   pmap_pinit(pmap_t);
                    307: void   pmap_release(pmap_t);
1.1       chs       308:
                    309: #ifdef DEBUG
1.20      tsutsui   310: void pmap_pvdump(paddr_t);
                    311: void pmap_check_wiring(const char *, vaddr_t);
1.1       chs       312: #endif
                    313:
                    314: /* pmap_remove_mapping flags */
                    315: #define        PRM_TFLUSH      0x01
                    316: #define        PRM_CFLUSH      0x02
                    317: #define        PRM_KEEPPTPAGE  0x04
                    318:
                    319: /*
1.52      tsutsui   320:  * pmap_bootstrap_finalize:    [ INTERFACE ]
                    321:  *
                    322:  *     Initialize lwp0 uarea, curlwp, and curpcb after MMU is turned on,
                    323:  *     using lwp0uarea variable saved during pmap_bootstrap().
                    324:  */
                    325: void
                    326: pmap_bootstrap_finalize(void)
                    327: {
                    328:
1.55    ! tsutsui   329: #if !defined(amiga) && !defined(atari)
        !           330:        /*
        !           331:         * XXX
        !           332:         * amiga and atari have different pmap initialization functions
        !           333:         * and they require this earlier.
        !           334:         */
        !           335:        uvmexp.pagesize = NBPG;
        !           336:        uvm_setpagesize();
        !           337: #endif
        !           338:
1.54      tsutsui   339:        /*
                    340:         * Initialize protection array.
                    341:         * XXX: Could this have port specific values? Can't this be static?
                    342:         */
                    343:        protection_codes[VM_PROT_NONE|VM_PROT_NONE|VM_PROT_NONE]     = 0;
                    344:        protection_codes[VM_PROT_READ|VM_PROT_NONE|VM_PROT_NONE]     = PG_RO;
                    345:        protection_codes[VM_PROT_READ|VM_PROT_NONE|VM_PROT_EXECUTE]  = PG_RO;
                    346:        protection_codes[VM_PROT_NONE|VM_PROT_NONE|VM_PROT_EXECUTE]  = PG_RO;
                    347:        protection_codes[VM_PROT_NONE|VM_PROT_WRITE|VM_PROT_NONE]    = PG_RW;
                    348:        protection_codes[VM_PROT_NONE|VM_PROT_WRITE|VM_PROT_EXECUTE] = PG_RW;
                    349:        protection_codes[VM_PROT_READ|VM_PROT_WRITE|VM_PROT_NONE]    = PG_RW;
                    350:        protection_codes[VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE] = PG_RW;
                    351:
                    352:        /*
                    353:         * Initialize pmap_kernel().
                    354:         */
                    355:        pmap_kernel()->pm_stpa = (st_entry_t *)Sysseg_pa;
                    356:        pmap_kernel()->pm_stab = Sysseg;
                    357:        pmap_kernel()->pm_ptab = Sysmap;
                    358: #if defined(M68040) || defined(M68060)
                    359:        if (mmutype == MMU_68040)
                    360:                pmap_kernel()->pm_stfree = protostfree;
                    361: #endif
                    362:        simple_lock_init(&pmap_kernel()->pm_lock);
                    363:        pmap_kernel()->pm_count = 1;
                    364:
                    365:        /*
                    366:         * Initialize lwp0 uarea, curlwp, and curpcb.
                    367:         */
1.52      tsutsui   368:        memset((void *)lwp0uarea, 0, USPACE);
                    369:        uvm_lwp_setuarea(&lwp0, lwp0uarea);
                    370:        curlwp = &lwp0;
                    371:        curpcb = lwp_getpcb(&lwp0);
                    372: }
                    373:
                    374: /*
1.5       thorpej   375:  * pmap_virtual_space:         [ INTERFACE ]
                    376:  *
                    377:  *     Report the range of available kernel virtual address
                    378:  *     space to the VM system during bootstrap.
                    379:  *
                    380:  *     This is only an interface function if we do not use
                    381:  *     pmap_steal_memory()!
                    382:  *
                    383:  *     Note: no locking is necessary in this function.
                    384:  */
                    385: void
1.43      dsl       386: pmap_virtual_space(vaddr_t *vstartp, vaddr_t *vendp)
1.5       thorpej   387: {
                    388:
                    389:        *vstartp = virtual_avail;
                    390:        *vendp = virtual_end;
                    391: }
                    392:
                    393: /*
1.1       chs       394:  * pmap_init:                  [ INTERFACE ]
                    395:  *
                    396:  *     Initialize the pmap module.  Called by vm_init(), to initialize any
                    397:  *     structures that the pmap system needs to map virtual memory.
                    398:  *
                    399:  *     Note: no locking is necessary in this function.
                    400:  */
                    401: void
1.20      tsutsui   402: pmap_init(void)
1.1       chs       403: {
                    404:        vaddr_t         addr, addr2;
                    405:        vsize_t         s;
1.46      thorpej   406:        struct pv_header *pvh;
1.1       chs       407:        int             rv;
                    408:        int             npages;
                    409:        int             bank;
                    410:
                    411:        PMAP_DPRINTF(PDB_FOLLOW, ("pmap_init()\n"));
                    412:
                    413:        /*
                    414:         * Before we do anything else, initialize the PTE pointers
                    415:         * used by pmap_zero_page() and pmap_copy_page().
                    416:         */
                    417:        caddr1_pte = pmap_pte(pmap_kernel(), CADDR1);
                    418:        caddr2_pte = pmap_pte(pmap_kernel(), CADDR2);
                    419:
                    420:        PMAP_DPRINTF(PDB_INIT,
                    421:            ("pmap_init: Sysseg %p, Sysmap %p, Sysptmap %p\n",
                    422:            Sysseg, Sysmap, Sysptmap));
                    423:        PMAP_DPRINTF(PDB_INIT,
                    424:            ("  pstart %lx, pend %lx, vstart %lx, vend %lx\n",
                    425:            avail_start, avail_end, virtual_avail, virtual_end));
                    426:
                    427:        /*
                    428:         * Allocate memory for random pmap data structures.  Includes the
                    429:         * initial segment table, pv_head_table and pmap_attributes.
                    430:         */
                    431:        for (page_cnt = 0, bank = 0; bank < vm_nphysseg; bank++)
                    432:                page_cnt += vm_physmem[bank].end - vm_physmem[bank].start;
                    433:        s = M68K_STSIZE;                                        /* Segtabzero */
1.46      thorpej   434:        s += page_cnt * sizeof(struct pv_header);       /* pv table */
1.1       chs       435:        s = round_page(s);
1.14      yamt      436:        addr = uvm_km_alloc(kernel_map, s, 0, UVM_KMF_WIRED | UVM_KMF_ZERO);
1.1       chs       437:        if (addr == 0)
                    438:                panic("pmap_init: can't allocate data structures");
                    439:
1.20      tsutsui   440:        Segtabzero = (st_entry_t *)addr;
                    441:        (void)pmap_extract(pmap_kernel(), addr,
                    442:            (paddr_t *)(void *)&Segtabzeropa);
1.1       chs       443:        addr += M68K_STSIZE;
                    444:
1.46      thorpej   445:        pv_table = (struct pv_header *) addr;
                    446:        addr += page_cnt * sizeof(struct pv_header);
1.1       chs       447:
                    448:        PMAP_DPRINTF(PDB_INIT, ("pmap_init: %lx bytes: page_cnt %x s0 %p(%p) "
1.46      thorpej   449:            "tbl %p\n",
1.1       chs       450:            s, page_cnt, Segtabzero, Segtabzeropa,
1.46      thorpej   451:            pv_table));
1.1       chs       452:
                    453:        /*
                    454:         * Now that the pv and attribute tables have been allocated,
                    455:         * assign them to the memory segments.
                    456:         */
1.46      thorpej   457:        pvh = pv_table;
1.1       chs       458:        for (bank = 0; bank < vm_nphysseg; bank++) {
                    459:                npages = vm_physmem[bank].end - vm_physmem[bank].start;
1.46      thorpej   460:                vm_physmem[bank].pmseg.pvheader = pvh;
                    461:                pvh += npages;
1.1       chs       462:        }
                    463:
                    464:        /*
1.5       thorpej   465:         * Allocate physical memory for kernel PT pages and their management.
                    466:         * We need 1 PT page per possible task plus some slop.
                    467:         */
                    468:        npages = min(atop(M68K_MAX_KPTSIZE), maxproc+16);
                    469:        s = ptoa(npages) + round_page(npages * sizeof(struct kpt_page));
                    470:
                    471:        /*
                    472:         * Verify that space will be allocated in region for which
                    473:         * we already have kernel PT pages.
                    474:         */
                    475:        addr = 0;
                    476:        rv = uvm_map(kernel_map, &addr, s, NULL, UVM_UNKNOWN_OFFSET, 0,
1.20      tsutsui   477:            UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE,
                    478:            UVM_ADV_RANDOM, UVM_FLAG_NOMERGE));
1.5       thorpej   479:        if (rv != 0 || (addr + s) >= (vaddr_t)Sysmap)
                    480:                panic("pmap_init: kernel PT too small");
                    481:        uvm_unmap(kernel_map, addr, addr + s);
                    482:
                    483:        /*
                    484:         * Now allocate the space and link the pages together to
                    485:         * form the KPT free list.
                    486:         */
1.14      yamt      487:        addr = uvm_km_alloc(kernel_map, s, 0, UVM_KMF_WIRED | UVM_KMF_ZERO);
1.5       thorpej   488:        if (addr == 0)
                    489:                panic("pmap_init: cannot allocate KPT free list");
                    490:        s = ptoa(npages);
                    491:        addr2 = addr + s;
                    492:        kpt_pages = &((struct kpt_page *)addr2)[npages];
                    493:        kpt_free_list = NULL;
                    494:        do {
                    495:                addr2 -= PAGE_SIZE;
                    496:                (--kpt_pages)->kpt_next = kpt_free_list;
                    497:                kpt_free_list = kpt_pages;
                    498:                kpt_pages->kpt_va = addr2;
                    499:                (void) pmap_extract(pmap_kernel(), addr2,
                    500:                    (paddr_t *)&kpt_pages->kpt_pa);
                    501:        } while (addr != addr2);
                    502:
                    503:        PMAP_DPRINTF(PDB_INIT, ("pmap_init: KPT: %ld pages from %lx to %lx\n",
                    504:            atop(s), addr, addr + s));
                    505:
                    506:        /*
1.1       chs       507:         * Allocate the segment table map and the page table map.
                    508:         */
                    509:        s = maxproc * M68K_STSIZE;
1.25      thorpej   510:        st_map = uvm_km_suballoc(kernel_map, &addr, &addr2, s, 0, false,
1.1       chs       511:            &st_map_store);
                    512:
                    513:        addr = M68K_PTBASE;
                    514:        if ((M68K_PTMAXSIZE / M68K_MAX_PTSIZE) < maxproc) {
                    515:                s = M68K_PTMAXSIZE;
                    516:                /*
                    517:                 * XXX We don't want to hang when we run out of
                    518:                 * page tables, so we lower maxproc so that fork()
                    519:                 * will fail instead.  Note that root could still raise
                    520:                 * this value via sysctl(3).
                    521:                 */
                    522:                maxproc = (M68K_PTMAXSIZE / M68K_MAX_PTSIZE);
                    523:        } else
                    524:                s = (maxproc * M68K_MAX_PTSIZE);
                    525:        pt_map = uvm_km_suballoc(kernel_map, &addr, &addr2, s, 0,
1.25      thorpej   526:            true, &pt_map_store);
1.1       chs       527:
                    528: #if defined(M68040) || defined(M68060)
                    529:        if (mmutype == MMU_68040) {
                    530:                protostfree = ~l2tobm(0);
                    531:                for (rv = MAXUL2SIZE; rv < sizeof(protostfree)*NBBY; rv++)
                    532:                        protostfree &= ~l2tobm(rv);
                    533:        }
                    534: #endif
                    535:
                    536:        /*
                    537:         * Initialize the pmap pools.
                    538:         */
                    539:        pool_init(&pmap_pmap_pool, sizeof(struct pmap), 0, 0, 0, "pmappl",
1.28      ad        540:            &pool_allocator_nointr, IPL_NONE);
1.1       chs       541:
                    542:        /*
                    543:         * Now that this is done, mark the pages shared with the
                    544:         * hardware page table search as non-CCB (actually, as CI).
                    545:         *
                    546:         * XXX Hm. Given that this is in the kernel map, can't we just
                    547:         * use the va's?
                    548:         */
                    549: #ifdef M68060
                    550: #if defined(M68020) || defined(M68030) || defined(M68040)
                    551:        if (cputype == CPU_68060)
                    552: #endif
                    553:        {
                    554:                struct kpt_page *kptp = kpt_free_list;
                    555:                paddr_t paddr;
                    556:
                    557:                while (kptp) {
                    558:                        pmap_changebit(kptp->kpt_pa, PG_CI, ~PG_CCB);
                    559:                        kptp = kptp->kpt_next;
                    560:                }
                    561:
                    562:                paddr = (paddr_t)Segtabzeropa;
                    563:                while (paddr < (paddr_t)Segtabzeropa + M68K_STSIZE) {
                    564:                        pmap_changebit(paddr, PG_CI, ~PG_CCB);
1.3       thorpej   565:                        paddr += PAGE_SIZE;
1.1       chs       566:                }
                    567:
                    568:                DCIS();
                    569:        }
                    570: #endif
                    571:
                    572:        /*
                    573:         * Now it is safe to enable pv_table recording.
                    574:         */
1.25      thorpej   575:        pmap_initialized = true;
1.1       chs       576: }
                    577:
                    578: /*
                    579:  * pmap_alloc_pv:
                    580:  *
                    581:  *     Allocate a pv_entry.
                    582:  */
                    583: struct pv_entry *
1.20      tsutsui   584: pmap_alloc_pv(void)
1.1       chs       585: {
                    586:        struct pv_page *pvp;
                    587:        struct pv_entry *pv;
                    588:        int i;
                    589:
                    590:        if (pv_nfree == 0) {
1.14      yamt      591:                pvp = (struct pv_page *)uvm_km_alloc(kernel_map, PAGE_SIZE, 0,
                    592:                    UVM_KMF_WIRED | UVM_KMF_ZERO);
1.39      tsutsui   593:                if (pvp == NULL)
1.14      yamt      594:                        panic("pmap_alloc_pv: uvm_km_alloc() failed");
1.1       chs       595:                pvp->pvp_pgi.pgi_freelist = pv = &pvp->pvp_pv[1];
                    596:                for (i = NPVPPG - 2; i; i--, pv++)
                    597:                        pv->pv_next = pv + 1;
1.39      tsutsui   598:                pv->pv_next = NULL;
1.1       chs       599:                pv_nfree += pvp->pvp_pgi.pgi_nfree = NPVPPG - 1;
                    600:                TAILQ_INSERT_HEAD(&pv_page_freelist, pvp, pvp_pgi.pgi_list);
                    601:                pv = &pvp->pvp_pv[0];
                    602:        } else {
                    603:                --pv_nfree;
1.39      tsutsui   604:                pvp = TAILQ_FIRST(&pv_page_freelist);
1.1       chs       605:                if (--pvp->pvp_pgi.pgi_nfree == 0) {
                    606:                        TAILQ_REMOVE(&pv_page_freelist, pvp, pvp_pgi.pgi_list);
                    607:                }
                    608:                pv = pvp->pvp_pgi.pgi_freelist;
                    609: #ifdef DIAGNOSTIC
1.39      tsutsui   610:                if (pv == NULL)
1.1       chs       611:                        panic("pmap_alloc_pv: pgi_nfree inconsistent");
                    612: #endif
                    613:                pvp->pvp_pgi.pgi_freelist = pv->pv_next;
                    614:        }
                    615:        return pv;
                    616: }
                    617:
                    618: /*
                    619:  * pmap_free_pv:
                    620:  *
                    621:  *     Free a pv_entry.
                    622:  */
                    623: void
1.20      tsutsui   624: pmap_free_pv(struct pv_entry *pv)
1.1       chs       625: {
                    626:        struct pv_page *pvp;
                    627:
1.20      tsutsui   628:        pvp = (struct pv_page *)trunc_page((vaddr_t)pv);
1.1       chs       629:        switch (++pvp->pvp_pgi.pgi_nfree) {
                    630:        case 1:
                    631:                TAILQ_INSERT_TAIL(&pv_page_freelist, pvp, pvp_pgi.pgi_list);
                    632:        default:
                    633:                pv->pv_next = pvp->pvp_pgi.pgi_freelist;
                    634:                pvp->pvp_pgi.pgi_freelist = pv;
                    635:                ++pv_nfree;
                    636:                break;
                    637:        case NPVPPG:
                    638:                pv_nfree -= NPVPPG - 1;
                    639:                TAILQ_REMOVE(&pv_page_freelist, pvp, pvp_pgi.pgi_list);
1.14      yamt      640:                uvm_km_free(kernel_map, (vaddr_t)pvp, PAGE_SIZE, UVM_KMF_WIRED);
1.1       chs       641:                break;
                    642:        }
                    643: }
                    644:
                    645: /*
                    646:  * pmap_collect_pv:
                    647:  *
                    648:  *     Perform compaction on the PV list, called via pmap_collect().
                    649:  */
1.49      rmind     650: #ifdef notyet
                    651: void
1.20      tsutsui   652: pmap_collect_pv(void)
1.1       chs       653: {
                    654:        struct pv_page_list pv_page_collectlist;
                    655:        struct pv_page *pvp, *npvp;
                    656:        struct pv_entry *ph, *ppv, *pv, *npv;
1.46      thorpej   657:        struct pv_header *pvh;
1.1       chs       658:        int s;
                    659:
                    660:        TAILQ_INIT(&pv_page_collectlist);
                    661:
1.39      tsutsui   662:        for (pvp = TAILQ_FIRST(&pv_page_freelist); pvp != NULL; pvp = npvp) {
1.1       chs       663:                if (pv_nfree < NPVPPG)
                    664:                        break;
1.39      tsutsui   665:                npvp = TAILQ_NEXT(&pvp->pvp_pgi, pgi_list);
1.1       chs       666:                if (pvp->pvp_pgi.pgi_nfree > NPVPPG / 3) {
                    667:                        TAILQ_REMOVE(&pv_page_freelist, pvp, pvp_pgi.pgi_list);
                    668:                        TAILQ_INSERT_TAIL(&pv_page_collectlist, pvp,
                    669:                            pvp_pgi.pgi_list);
                    670:                        pv_nfree -= NPVPPG;
                    671:                        pvp->pvp_pgi.pgi_nfree = -1;
                    672:                }
                    673:        }
                    674:
1.39      tsutsui   675:        if (TAILQ_FIRST(&pv_page_collectlist) == NULL)
1.1       chs       676:                return;
                    677:
1.46      thorpej   678:        for (pvh = &pv_table[page_cnt - 1]; pvh >= &pv_table[0]; pvh--) {
                    679:                ph = &pvh->pvh_first;
1.39      tsutsui   680:                if (ph->pv_pmap == NULL)
1.1       chs       681:                        continue;
                    682:                s = splvm();
1.39      tsutsui   683:                for (ppv = ph; (pv = ppv->pv_next) != NULL; ) {
1.1       chs       684:                        pvp = (struct pv_page *) trunc_page((vaddr_t)pv);
                    685:                        if (pvp->pvp_pgi.pgi_nfree == -1) {
1.39      tsutsui   686:                                pvp = TAILQ_FIRST(&pv_page_freelist);
1.1       chs       687:                                if (--pvp->pvp_pgi.pgi_nfree == 0) {
                    688:                                        TAILQ_REMOVE(&pv_page_freelist, pvp,
                    689:                                            pvp_pgi.pgi_list);
                    690:                                }
                    691:                                npv = pvp->pvp_pgi.pgi_freelist;
                    692: #ifdef DIAGNOSTIC
1.39      tsutsui   693:                                if (npv == NULL)
1.20      tsutsui   694:                                        panic("pmap_collect_pv: "
                    695:                                            "pgi_nfree inconsistent");
1.1       chs       696: #endif
                    697:                                pvp->pvp_pgi.pgi_freelist = npv->pv_next;
                    698:                                *npv = *pv;
                    699:                                ppv->pv_next = npv;
                    700:                                ppv = npv;
                    701:                        } else
                    702:                                ppv = pv;
                    703:                }
                    704:                splx(s);
                    705:        }
                    706:
1.39      tsutsui   707:        for (pvp = TAILQ_FIRST(&pv_page_collectlist); pvp != NULL; pvp = npvp) {
                    708:                npvp = TAILQ_NEXT(&pvp->pvp_pgi, pgi_list);
1.14      yamt      709:                uvm_km_free(kernel_map, (vaddr_t)pvp, PAGE_SIZE, UVM_KMF_WIRED);
1.1       chs       710:        }
                    711: }
1.49      rmind     712: #endif
1.1       chs       713:
                    714: /*
                    715:  * pmap_map:
                    716:  *
                    717:  *     Used to map a range of physical addresses into kernel
                    718:  *     virtual address space.
                    719:  *
                    720:  *     For now, VM is already on, we only need to map the
                    721:  *     specified memory.
                    722:  *
                    723:  *     Note: THIS FUNCTION IS DEPRECATED, AND SHOULD BE REMOVED!
                    724:  */
                    725: vaddr_t
1.20      tsutsui   726: pmap_map(vaddr_t va, paddr_t spa, paddr_t epa, int prot)
1.1       chs       727: {
                    728:
                    729:        PMAP_DPRINTF(PDB_FOLLOW,
                    730:            ("pmap_map(%lx, %lx, %lx, %x)\n", va, spa, epa, prot));
                    731:
                    732:        while (spa < epa) {
                    733:                pmap_enter(pmap_kernel(), va, spa, prot, 0);
1.3       thorpej   734:                va += PAGE_SIZE;
                    735:                spa += PAGE_SIZE;
1.1       chs       736:        }
                    737:        pmap_update(pmap_kernel());
1.20      tsutsui   738:        return va;
1.1       chs       739: }
                    740:
                    741: /*
                    742:  * pmap_create:                        [ INTERFACE ]
                    743:  *
                    744:  *     Create and return a physical map.
                    745:  *
                    746:  *     Note: no locking is necessary in this function.
                    747:  */
                    748: pmap_t
1.20      tsutsui   749: pmap_create(void)
1.1       chs       750: {
                    751:        struct pmap *pmap;
                    752:
                    753:        PMAP_DPRINTF(PDB_FOLLOW|PDB_CREATE,
                    754:            ("pmap_create()\n"));
                    755:
                    756:        pmap = pool_get(&pmap_pmap_pool, PR_WAITOK);
                    757:        memset(pmap, 0, sizeof(*pmap));
                    758:        pmap_pinit(pmap);
1.20      tsutsui   759:        return pmap;
1.1       chs       760: }
                    761:
                    762: /*
                    763:  * pmap_pinit:
                    764:  *
                    765:  *     Initialize a preallocated and zeroed pmap structure.
                    766:  *
                    767:  *     Note: THIS FUNCTION SHOULD BE MOVED INTO pmap_create()!
                    768:  */
                    769: void
1.20      tsutsui   770: pmap_pinit(struct pmap *pmap)
1.1       chs       771: {
                    772:
                    773:        PMAP_DPRINTF(PDB_FOLLOW|PDB_CREATE,
                    774:            ("pmap_pinit(%p)\n", pmap));
                    775:
                    776:        /*
                    777:         * No need to allocate page table space yet but we do need a
                    778:         * valid segment table.  Initially, we point everyone at the
                    779:         * "null" segment table.  On the first pmap_enter, a real
                    780:         * segment table will be allocated.
                    781:         */
                    782:        pmap->pm_stab = Segtabzero;
                    783:        pmap->pm_stpa = Segtabzeropa;
                    784: #if defined(M68040) || defined(M68060)
                    785: #if defined(M68020) || defined(M68030)
                    786:        if (mmutype == MMU_68040)
                    787: #endif
                    788:                pmap->pm_stfree = protostfree;
                    789: #endif
                    790:        pmap->pm_count = 1;
                    791:        simple_lock_init(&pmap->pm_lock);
                    792: }
                    793:
                    794: /*
                    795:  * pmap_destroy:               [ INTERFACE ]
                    796:  *
                    797:  *     Drop the reference count on the specified pmap, releasing
                    798:  *     all resources if the reference count drops to zero.
                    799:  */
                    800: void
1.20      tsutsui   801: pmap_destroy(pmap_t pmap)
1.1       chs       802: {
                    803:        int count;
                    804:
                    805:        PMAP_DPRINTF(PDB_FOLLOW, ("pmap_destroy(%p)\n", pmap));
                    806:
                    807:        simple_lock(&pmap->pm_lock);
                    808:        count = --pmap->pm_count;
                    809:        simple_unlock(&pmap->pm_lock);
                    810:        if (count == 0) {
                    811:                pmap_release(pmap);
                    812:                pool_put(&pmap_pmap_pool, pmap);
                    813:        }
                    814: }
                    815:
                    816: /*
                    817:  * pmap_release:
                    818:  *
                    819:  *     Relese the resources held by a pmap.
                    820:  *
                    821:  *     Note: THIS FUNCTION SHOULD BE MOVED INTO pmap_destroy().
                    822:  */
                    823: void
1.20      tsutsui   824: pmap_release(pmap_t pmap)
1.1       chs       825: {
                    826:
                    827:        PMAP_DPRINTF(PDB_FOLLOW, ("pmap_release(%p)\n", pmap));
                    828:
                    829: #ifdef notdef /* DIAGNOSTIC */
                    830:        /* count would be 0 from pmap_destroy... */
                    831:        simple_lock(&pmap->pm_lock);
                    832:        if (pmap->pm_count != 1)
                    833:                panic("pmap_release count");
                    834: #endif
                    835:
                    836:        if (pmap->pm_ptab) {
                    837:                pmap_remove(pmap_kernel(), (vaddr_t)pmap->pm_ptab,
                    838:                    (vaddr_t)pmap->pm_ptab + M68K_MAX_PTSIZE);
1.14      yamt      839:                uvm_km_pgremove((vaddr_t)pmap->pm_ptab,
                    840:                    (vaddr_t)pmap->pm_ptab + M68K_MAX_PTSIZE);
                    841:                uvm_km_free(pt_map, (vaddr_t)pmap->pm_ptab,
                    842:                    M68K_MAX_PTSIZE, UVM_KMF_VAONLY);
1.1       chs       843:        }
                    844:        KASSERT(pmap->pm_stab == Segtabzero);
                    845: }
                    846:
                    847: /*
                    848:  * pmap_reference:             [ INTERFACE ]
                    849:  *
                    850:  *     Add a reference to the specified pmap.
                    851:  */
                    852: void
1.20      tsutsui   853: pmap_reference(pmap_t pmap)
1.1       chs       854: {
                    855:        PMAP_DPRINTF(PDB_FOLLOW, ("pmap_reference(%p)\n", pmap));
                    856:
                    857:        simple_lock(&pmap->pm_lock);
                    858:        pmap->pm_count++;
                    859:        simple_unlock(&pmap->pm_lock);
                    860: }
                    861:
                    862: /*
                    863:  * pmap_activate:              [ INTERFACE ]
                    864:  *
                    865:  *     Activate the pmap used by the specified process.  This includes
                    866:  *     reloading the MMU context if the current process, and marking
                    867:  *     the pmap in use by the processor.
                    868:  *
                    869:  *     Note: we may only use spin locks here, since we are called
                    870:  *     by a critical section in cpu_switch()!
                    871:  */
                    872: void
1.20      tsutsui   873: pmap_activate(struct lwp *l)
1.1       chs       874: {
1.20      tsutsui   875:        pmap_t pmap = l->l_proc->p_vmspace->vm_map.pmap;
1.1       chs       876:
                    877:        PMAP_DPRINTF(PDB_FOLLOW|PDB_SEGTAB,
1.2       thorpej   878:            ("pmap_activate(%p)\n", l));
1.1       chs       879:
1.30      mhitch    880:        PMAP_ACTIVATE(pmap, (curlwp->l_flag & LW_IDLE) != 0 ||
                    881:            l->l_proc == curproc);
1.1       chs       882: }
                    883:
                    884: /*
                    885:  * pmap_deactivate:            [ INTERFACE ]
                    886:  *
                    887:  *     Mark that the pmap used by the specified process is no longer
                    888:  *     in use by the processor.
                    889:  *
                    890:  *     The comment above pmap_activate() wrt. locking applies here,
                    891:  *     as well.
                    892:  */
                    893: void
1.20      tsutsui   894: pmap_deactivate(struct lwp *l)
1.1       chs       895: {
                    896:
                    897:        /* No action necessary in this pmap implementation. */
                    898: }
                    899:
                    900: /*
                    901:  * pmap_remove:                        [ INTERFACE ]
                    902:  *
                    903:  *     Remove the given range of addresses from the specified map.
                    904:  *
                    905:  *     It is assumed that the start and end are properly
                    906:  *     rounded to the page size.
                    907:  */
                    908: void
1.20      tsutsui   909: pmap_remove(pmap_t pmap, vaddr_t sva, vaddr_t eva)
1.1       chs       910: {
                    911:        vaddr_t nssva;
                    912:        pt_entry_t *pte;
                    913:        int flags;
                    914: #ifdef M68K_MMU_HP
1.25      thorpej   915:        bool firstpage = true, needcflush = false;
1.1       chs       916: #endif
                    917:
                    918:        PMAP_DPRINTF(PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT,
                    919:            ("pmap_remove(%p, %lx, %lx)\n", pmap, sva, eva));
                    920:
                    921:        flags = active_pmap(pmap) ? PRM_TFLUSH : 0;
                    922:        while (sva < eva) {
                    923:                nssva = m68k_trunc_seg(sva) + NBSEG;
                    924:                if (nssva == 0 || nssva > eva)
                    925:                        nssva = eva;
                    926:
                    927:                /*
                    928:                 * Invalidate every valid mapping within this segment.
                    929:                 */
                    930:
                    931:                pte = pmap_pte(pmap, sva);
                    932:                while (sva < nssva) {
                    933:
                    934:                        /*
                    935:                         * If this segment is unallocated,
                    936:                         * skip to the next segment boundary.
                    937:                         */
                    938:
                    939:                        if (!pmap_ste_v(pmap, sva)) {
                    940:                                sva = nssva;
                    941:                                break;
                    942:                        }
                    943:
1.49      rmind     944:                        if (pmap_pte_v(pte)) {
1.1       chs       945: #ifdef M68K_MMU_HP
                    946:                                if (pmap_aliasmask) {
                    947:
                    948:                                        /*
                    949:                                         * Purge kernel side of VAC to ensure
                    950:                                         * we get the correct state of any
                    951:                                         * hardware maintained bits.
                    952:                                         */
                    953:
                    954:                                        if (firstpage) {
                    955:                                                DCIS();
                    956:                                        }
                    957:
                    958:                                        /*
                    959:                                         * Remember if we may need to
                    960:                                         * flush the VAC due to a non-CI
                    961:                                         * mapping.
                    962:                                         */
                    963:
                    964:                                        if (!needcflush && !pmap_pte_ci(pte))
1.25      thorpej   965:                                                needcflush = true;
1.1       chs       966:
                    967:                                }
1.25      thorpej   968:                                firstpage = false;
1.1       chs       969: #endif
                    970:                                pmap_remove_mapping(pmap, sva, pte, flags);
                    971:                        }
                    972:                        pte++;
1.3       thorpej   973:                        sva += PAGE_SIZE;
1.1       chs       974:                }
                    975:        }
                    976:
                    977: #ifdef M68K_MMU_HP
                    978:
                    979:        /*
                    980:         * Didn't do anything, no need for cache flushes
                    981:         */
                    982:
                    983:        if (firstpage)
                    984:                return;
                    985:
                    986:        /*
                    987:         * In a couple of cases, we don't need to worry about flushing
                    988:         * the VAC:
                    989:         *      1. if this is a kernel mapping,
                    990:         *         we have already done it
                    991:         *      2. if it is a user mapping not for the current process,
                    992:         *         it won't be there
                    993:         */
                    994:
                    995:        if (pmap_aliasmask && !active_user_pmap(pmap))
1.25      thorpej   996:                needcflush = false;
1.1       chs       997:        if (needcflush) {
                    998:                if (pmap == pmap_kernel()) {
                    999:                        DCIS();
                   1000:                } else {
                   1001:                        DCIU();
                   1002:                }
                   1003:        }
                   1004: #endif
                   1005: }
                   1006:
                   1007: /*
                   1008:  * pmap_page_protect:          [ INTERFACE ]
                   1009:  *
                   1010:  *     Lower the permission for all mappings to a given page to
                   1011:  *     the permissions specified.
                   1012:  */
                   1013: void
1.20      tsutsui  1014: pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
1.1       chs      1015: {
                   1016:        paddr_t pa = VM_PAGE_TO_PHYS(pg);
1.46      thorpej  1017:        struct pv_header *pvh;
1.1       chs      1018:        struct pv_entry *pv;
                   1019:        pt_entry_t *pte;
                   1020:        int s;
                   1021:
                   1022: #ifdef DEBUG
                   1023:        if ((pmapdebug & (PDB_FOLLOW|PDB_PROTECT)) ||
                   1024:            (prot == VM_PROT_NONE && (pmapdebug & PDB_REMOVE)))
                   1025:                printf("pmap_page_protect(%p, %x)\n", pg, prot);
                   1026: #endif
                   1027:
                   1028:        switch (prot) {
                   1029:        case VM_PROT_READ|VM_PROT_WRITE:
                   1030:        case VM_PROT_ALL:
                   1031:                return;
                   1032:
                   1033:        /* copy_on_write */
                   1034:        case VM_PROT_READ:
                   1035:        case VM_PROT_READ|VM_PROT_EXECUTE:
                   1036:                pmap_changebit(pa, PG_RO, ~0);
                   1037:                return;
                   1038:
                   1039:        /* remove_all */
                   1040:        default:
                   1041:                break;
                   1042:        }
                   1043:
1.46      thorpej  1044:        pvh = pa_to_pvh(pa);
                   1045:        pv = &pvh->pvh_first;
1.1       chs      1046:        s = splvm();
                   1047:        while (pv->pv_pmap != NULL) {
                   1048:
                   1049:                pte = pmap_pte(pv->pv_pmap, pv->pv_va);
                   1050: #ifdef DEBUG
                   1051:                if (!pmap_ste_v(pv->pv_pmap, pv->pv_va) ||
                   1052:                    pmap_pte_pa(pte) != pa)
                   1053:                        panic("pmap_page_protect: bad mapping");
                   1054: #endif
                   1055:                pmap_remove_mapping(pv->pv_pmap, pv->pv_va,
                   1056:                    pte, PRM_TFLUSH|PRM_CFLUSH);
                   1057:        }
                   1058:        splx(s);
                   1059: }
                   1060:
                   1061: /*
                   1062:  * pmap_protect:               [ INTERFACE ]
                   1063:  *
1.29      tnn      1064:  *     Set the physical protection on the specified range of this map
1.1       chs      1065:  *     as requested.
                   1066:  */
                   1067: void
1.20      tsutsui  1068: pmap_protect(pmap_t pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
1.1       chs      1069: {
                   1070:        vaddr_t nssva;
                   1071:        pt_entry_t *pte;
1.23      thorpej  1072:        bool firstpage, needtflush;
1.1       chs      1073:        int isro;
                   1074:
                   1075:        PMAP_DPRINTF(PDB_FOLLOW|PDB_PROTECT,
                   1076:            ("pmap_protect(%p, %lx, %lx, %x)\n",
                   1077:            pmap, sva, eva, prot));
                   1078:
                   1079: #ifdef PMAPSTATS
                   1080:        protect_stats.calls++;
                   1081: #endif
                   1082:        if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
                   1083:                pmap_remove(pmap, sva, eva);
                   1084:                return;
                   1085:        }
                   1086:        isro = pte_prot(pmap, prot);
                   1087:        needtflush = active_pmap(pmap);
1.25      thorpej  1088:        firstpage = true;
1.1       chs      1089:        while (sva < eva) {
                   1090:                nssva = m68k_trunc_seg(sva) + NBSEG;
                   1091:                if (nssva == 0 || nssva > eva)
                   1092:                        nssva = eva;
                   1093:
                   1094:                /*
                   1095:                 * If VA belongs to an unallocated segment,
                   1096:                 * skip to the next segment boundary.
                   1097:                 */
                   1098:
                   1099:                if (!pmap_ste_v(pmap, sva)) {
                   1100:                        sva = nssva;
                   1101:                        continue;
                   1102:                }
                   1103:
                   1104:                /*
                   1105:                 * Change protection on mapping if it is valid and doesn't
                   1106:                 * already have the correct protection.
                   1107:                 */
                   1108:
                   1109:                pte = pmap_pte(pmap, sva);
                   1110:                while (sva < nssva) {
                   1111:                        if (pmap_pte_v(pte) && pmap_pte_prot_chg(pte, isro)) {
                   1112: #ifdef M68K_MMU_HP
                   1113:
                   1114:                                /*
                   1115:                                 * Purge kernel side of VAC to ensure we
                   1116:                                 * get the correct state of any hardware
                   1117:                                 * maintained bits.
                   1118:                                 *
                   1119:                                 * XXX do we need to clear the VAC in
                   1120:                                 * general to reflect the new protection?
                   1121:                                 */
                   1122:
                   1123:                                if (firstpage && pmap_aliasmask)
                   1124:                                        DCIS();
                   1125: #endif
                   1126:
                   1127: #if defined(M68040) || defined(M68060)
                   1128:
                   1129:                                /*
                   1130:                                 * Clear caches if making RO (see section
                   1131:                                 * "7.3 Cache Coherency" in the manual).
                   1132:                                 */
                   1133:
                   1134: #if defined(M68020) || defined(M68030)
                   1135:                                if (isro && mmutype == MMU_68040)
                   1136: #else
                   1137:                                if (isro)
                   1138: #endif
                   1139:                                {
                   1140:                                        paddr_t pa = pmap_pte_pa(pte);
                   1141:
                   1142:                                        DCFP(pa);
                   1143:                                        ICPP(pa);
                   1144:                                }
                   1145: #endif
                   1146:                                pmap_pte_set_prot(pte, isro);
                   1147:                                if (needtflush)
                   1148:                                        TBIS(sva);
1.25      thorpej  1149:                                firstpage = false;
1.1       chs      1150:                        }
                   1151:                        pte++;
1.3       thorpej  1152:                        sva += PAGE_SIZE;
1.1       chs      1153:                }
                   1154:        }
                   1155: }
                   1156:
                   1157: /*
                   1158:  * pmap_enter:                 [ INTERFACE ]
                   1159:  *
                   1160:  *     Insert the given physical page (pa) at
                   1161:  *     the specified virtual address (va) in the
                   1162:  *     target physical map with the protection requested.
                   1163:  *
                   1164:  *     If specified, the page will be wired down, meaning
                   1165:  *     that the related pte cannot be reclaimed.
                   1166:  *
                   1167:  *     Note: This is the only routine which MAY NOT lazy-evaluate
                   1168:  *     or lose information.  Thatis, this routine must actually
                   1169:  *     insert this page into the given map NOW.
                   1170:  */
                   1171: int
1.44      cegger   1172: pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
1.1       chs      1173: {
                   1174:        pt_entry_t *pte;
                   1175:        int npte;
                   1176:        paddr_t opa;
1.25      thorpej  1177:        bool cacheable = true;
                   1178:        bool checkpv = true;
1.23      thorpej  1179:        bool wired = (flags & PMAP_WIRED) != 0;
                   1180:        bool can_fail = (flags & PMAP_CANFAIL) != 0;
1.1       chs      1181:
                   1182:        PMAP_DPRINTF(PDB_FOLLOW|PDB_ENTER,
                   1183:            ("pmap_enter(%p, %lx, %lx, %x, %x)\n",
                   1184:            pmap, va, pa, prot, wired));
                   1185:
                   1186: #ifdef DIAGNOSTIC
                   1187:        /*
                   1188:         * pmap_enter() should never be used for CADDR1 and CADDR2.
                   1189:         */
                   1190:        if (pmap == pmap_kernel() &&
                   1191:            (va == (vaddr_t)CADDR1 || va == (vaddr_t)CADDR2))
                   1192:                panic("pmap_enter: used for CADDR1 or CADDR2");
                   1193: #endif
                   1194:
                   1195:        /*
                   1196:         * For user mapping, allocate kernel VM resources if necessary.
                   1197:         */
1.22      martin   1198:        if (pmap->pm_ptab == NULL) {
1.1       chs      1199:                pmap->pm_ptab = (pt_entry_t *)
1.14      yamt     1200:                    uvm_km_alloc(pt_map, M68K_MAX_PTSIZE, 0,
1.22      martin   1201:                    UVM_KMF_VAONLY |
                   1202:                    (can_fail ? UVM_KMF_NOWAIT : UVM_KMF_WAITVA));
                   1203:                if (pmap->pm_ptab == NULL)
                   1204:                        return ENOMEM;
                   1205:        }
1.1       chs      1206:
                   1207:        /*
                   1208:         * Segment table entry not valid, we need a new PT page
                   1209:         */
1.22      martin   1210:        if (!pmap_ste_v(pmap, va)) {
                   1211:                int err = pmap_enter_ptpage(pmap, va, can_fail);
                   1212:                if (err)
                   1213:                        return err;
                   1214:        }
1.1       chs      1215:
                   1216:        pa = m68k_trunc_page(pa);
                   1217:        pte = pmap_pte(pmap, va);
                   1218:        opa = pmap_pte_pa(pte);
                   1219:
                   1220:        PMAP_DPRINTF(PDB_ENTER, ("enter: pte %p, *pte %x\n", pte, *pte));
                   1221:
                   1222:        /*
                   1223:         * Mapping has not changed, must be protection or wiring change.
                   1224:         */
                   1225:        if (opa == pa) {
                   1226:                /*
                   1227:                 * Wiring change, just update stats.
                   1228:                 * We don't worry about wiring PT pages as they remain
                   1229:                 * resident as long as there are valid mappings in them.
                   1230:                 * Hence, if a user page is wired, the PT page will be also.
                   1231:                 */
                   1232:                if (pmap_pte_w_chg(pte, wired ? PG_W : 0)) {
                   1233:                        PMAP_DPRINTF(PDB_ENTER,
                   1234:                            ("enter: wiring change -> %x\n", wired));
                   1235:                        if (wired)
                   1236:                                pmap->pm_stats.wired_count++;
                   1237:                        else
                   1238:                                pmap->pm_stats.wired_count--;
                   1239:                }
                   1240:                /*
                   1241:                 * Retain cache inhibition status
                   1242:                 */
1.25      thorpej  1243:                checkpv = false;
1.1       chs      1244:                if (pmap_pte_ci(pte))
1.25      thorpej  1245:                        cacheable = false;
1.1       chs      1246:                goto validate;
                   1247:        }
                   1248:
                   1249:        /*
                   1250:         * Mapping has changed, invalidate old range and fall through to
                   1251:         * handle validating new mapping.
                   1252:         */
                   1253:        if (opa) {
                   1254:                PMAP_DPRINTF(PDB_ENTER,
                   1255:                    ("enter: removing old mapping %lx\n", va));
                   1256:                pmap_remove_mapping(pmap, va, pte,
                   1257:                    PRM_TFLUSH|PRM_CFLUSH|PRM_KEEPPTPAGE);
                   1258:        }
                   1259:
                   1260:        /*
                   1261:         * If this is a new user mapping, increment the wiring count
                   1262:         * on this PT page.  PT pages are wired down as long as there
                   1263:         * is a valid mapping in the page.
                   1264:         */
                   1265:        if (pmap != pmap_kernel())
                   1266:                pmap_ptpage_addref(trunc_page((vaddr_t)pte));
                   1267:
                   1268:        /*
                   1269:         * Enter on the PV list if part of our managed memory
                   1270:         * Note that we raise IPL while manipulating pv_table
                   1271:         * since pmap_enter can be called at interrupt time.
                   1272:         */
                   1273:        if (PAGE_IS_MANAGED(pa)) {
1.46      thorpej  1274:                struct pv_header *pvh;
1.1       chs      1275:                struct pv_entry *pv, *npv;
                   1276:                int s;
                   1277:
1.46      thorpej  1278:                pvh = pa_to_pvh(pa);
                   1279:                pv = &pvh->pvh_first;
1.1       chs      1280:                s = splvm();
                   1281:
                   1282:                PMAP_DPRINTF(PDB_ENTER,
                   1283:                    ("enter: pv at %p: %lx/%p/%p\n",
                   1284:                    pv, pv->pv_va, pv->pv_pmap, pv->pv_next));
                   1285:                /*
                   1286:                 * No entries yet, use header as the first entry
                   1287:                 */
                   1288:                if (pv->pv_pmap == NULL) {
                   1289:                        pv->pv_va = va;
                   1290:                        pv->pv_pmap = pmap;
                   1291:                        pv->pv_next = NULL;
                   1292:                        pv->pv_ptste = NULL;
                   1293:                        pv->pv_ptpmap = NULL;
1.46      thorpej  1294:                        pvh->pvh_attrs = 0;
1.1       chs      1295:                }
                   1296:                /*
                   1297:                 * There is at least one other VA mapping this page.
                   1298:                 * Place this entry after the header.
                   1299:                 */
                   1300:                else {
                   1301: #ifdef DEBUG
                   1302:                        for (npv = pv; npv; npv = npv->pv_next)
                   1303:                                if (pmap == npv->pv_pmap && va == npv->pv_va)
                   1304:                                        panic("pmap_enter: already in pv_tab");
                   1305: #endif
                   1306:                        npv = pmap_alloc_pv();
                   1307:                        npv->pv_va = va;
                   1308:                        npv->pv_pmap = pmap;
                   1309:                        npv->pv_next = pv->pv_next;
                   1310:                        npv->pv_ptste = NULL;
                   1311:                        npv->pv_ptpmap = NULL;
                   1312:                        pv->pv_next = npv;
                   1313:
                   1314: #ifdef M68K_MMU_HP
                   1315:
                   1316:                        /*
                   1317:                         * Since there is another logical mapping for the
                   1318:                         * same page we may need to cache-inhibit the
                   1319:                         * descriptors on those CPUs with external VACs.
                   1320:                         * We don't need to CI if:
                   1321:                         *
                   1322:                         * - No two mappings belong to the same user pmaps.
                   1323:                         *   Since the cache is flushed on context switches
                   1324:                         *   there is no problem between user processes.
                   1325:                         *
                   1326:                         * - Mappings within a single pmap are a certain
                   1327:                         *   magic distance apart.  VAs at these appropriate
                   1328:                         *   boundaries map to the same cache entries or
                   1329:                         *   otherwise don't conflict.
                   1330:                         *
                   1331:                         * To keep it simple, we only check for these special
                   1332:                         * cases if there are only two mappings, otherwise we
                   1333:                         * punt and always CI.
                   1334:                         *
                   1335:                         * Note that there are no aliasing problems with the
                   1336:                         * on-chip data-cache when the WA bit is set.
                   1337:                         */
                   1338:
                   1339:                        if (pmap_aliasmask) {
1.46      thorpej  1340:                                if (pvh->pvh_attrs & PVH_CI) {
1.1       chs      1341:                                        PMAP_DPRINTF(PDB_CACHE,
                   1342:                                            ("enter: pa %lx already CI'ed\n",
                   1343:                                            pa));
1.25      thorpej  1344:                                        checkpv = cacheable = false;
1.1       chs      1345:                                } else if (npv->pv_next ||
                   1346:                                           ((pmap == pv->pv_pmap ||
                   1347:                                             pmap == pmap_kernel() ||
                   1348:                                             pv->pv_pmap == pmap_kernel()) &&
                   1349:                                            ((pv->pv_va & pmap_aliasmask) !=
                   1350:                                             (va & pmap_aliasmask)))) {
                   1351:                                        PMAP_DPRINTF(PDB_CACHE,
                   1352:                                            ("enter: pa %lx CI'ing all\n",
                   1353:                                            pa));
1.25      thorpej  1354:                                        cacheable = false;
1.46      thorpej  1355:                                        pvh->pvh_attrs |= PVH_CI;
1.1       chs      1356:                                }
                   1357:                        }
                   1358: #endif
                   1359:                }
                   1360:
                   1361:                /*
                   1362:                 * Speed pmap_is_referenced() or pmap_is_modified() based
                   1363:                 * on the hint provided in access_type.
                   1364:                 */
                   1365: #ifdef DIAGNOSTIC
                   1366:                if ((flags & VM_PROT_ALL) & ~prot)
                   1367:                        panic("pmap_enter: access_type exceeds prot");
                   1368: #endif
                   1369:                if (flags & VM_PROT_WRITE)
1.46      thorpej  1370:                        pvh->pvh_attrs |= (PG_U|PG_M);
1.1       chs      1371:                else if (flags & VM_PROT_ALL)
1.46      thorpej  1372:                        pvh->pvh_attrs |= PG_U;
1.1       chs      1373:
                   1374:                splx(s);
                   1375:        }
                   1376:        /*
                   1377:         * Assumption: if it is not part of our managed memory
                   1378:         * then it must be device memory which may be volitile.
                   1379:         */
                   1380:        else if (pmap_initialized) {
1.25      thorpej  1381:                checkpv = cacheable = false;
1.1       chs      1382:        }
                   1383:
                   1384:        /*
                   1385:         * Increment counters
                   1386:         */
                   1387:        pmap->pm_stats.resident_count++;
                   1388:        if (wired)
                   1389:                pmap->pm_stats.wired_count++;
                   1390:
                   1391: validate:
                   1392: #ifdef M68K_MMU_HP
                   1393:        /*
                   1394:         * Purge kernel side of VAC to ensure we get correct state
                   1395:         * of HW bits so we don't clobber them.
                   1396:         */
                   1397:        if (pmap_aliasmask)
                   1398:                DCIS();
                   1399: #endif
                   1400:
                   1401:        /*
                   1402:         * Build the new PTE.
                   1403:         */
                   1404:
                   1405:        npte = pa | pte_prot(pmap, prot) | (*pte & (PG_M|PG_U)) | PG_V;
                   1406:        if (wired)
                   1407:                npte |= PG_W;
                   1408:        if (!checkpv && !cacheable)
                   1409: #if defined(M68040) || defined(M68060)
                   1410: #if defined(M68020) || defined(M68030)
                   1411:                npte |= (mmutype == MMU_68040 ? PG_CIN : PG_CI);
                   1412: #else
                   1413:                npte |= PG_CIN;
                   1414: #endif
                   1415: #else
                   1416:                npte |= PG_CI;
                   1417: #endif
                   1418: #if defined(M68040) || defined(M68060)
                   1419: #if defined(M68020) || defined(M68030)
                   1420:        else if (mmutype == MMU_68040 && (npte & (PG_PROT|PG_CI)) == PG_RW)
                   1421: #else
                   1422:        else if ((npte & (PG_PROT|PG_CI)) == PG_RW)
                   1423: #endif
                   1424:                npte |= PG_CCB;
                   1425: #endif
                   1426:
                   1427:        PMAP_DPRINTF(PDB_ENTER, ("enter: new pte value %x\n", npte));
                   1428:
                   1429:        /*
                   1430:         * Remember if this was a wiring-only change.
                   1431:         * If so, we need not flush the TLB and caches.
                   1432:         */
                   1433:
                   1434:        wired = ((*pte ^ npte) == PG_W);
                   1435: #if defined(M68040) || defined(M68060)
                   1436: #if defined(M68020) || defined(M68030)
                   1437:        if (mmutype == MMU_68040 && !wired)
                   1438: #else
                   1439:        if (!wired)
                   1440: #endif
                   1441:        {
                   1442:                DCFP(pa);
                   1443:                ICPP(pa);
                   1444:        }
                   1445: #endif
                   1446:        *pte = npte;
                   1447:        if (!wired && active_pmap(pmap))
                   1448:                TBIS(va);
                   1449: #ifdef M68K_MMU_HP
                   1450:        /*
                   1451:         * The following is executed if we are entering a second
                   1452:         * (or greater) mapping for a physical page and the mappings
                   1453:         * may create an aliasing problem.  In this case we must
                   1454:         * cache inhibit the descriptors involved and flush any
                   1455:         * external VAC.
                   1456:         */
                   1457:        if (checkpv && !cacheable) {
                   1458:                pmap_changebit(pa, PG_CI, ~0);
                   1459:                DCIA();
                   1460: #ifdef DEBUG
                   1461:                if ((pmapdebug & (PDB_CACHE|PDB_PVDUMP)) ==
                   1462:                    (PDB_CACHE|PDB_PVDUMP))
                   1463:                        pmap_pvdump(pa);
                   1464: #endif
                   1465:        }
                   1466: #endif
                   1467: #ifdef DEBUG
                   1468:        if ((pmapdebug & PDB_WIRING) && pmap != pmap_kernel())
                   1469:                pmap_check_wiring("enter", trunc_page((vaddr_t)pte));
                   1470: #endif
                   1471:
                   1472:        return 0;
                   1473: }
                   1474:
                   1475: void
1.50      cegger   1476: pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
1.1       chs      1477: {
1.20      tsutsui  1478:        pmap_t pmap = pmap_kernel();
1.1       chs      1479:        pt_entry_t *pte;
                   1480:        int s, npte;
                   1481:
                   1482:        PMAP_DPRINTF(PDB_FOLLOW|PDB_ENTER,
                   1483:            ("pmap_kenter_pa(%lx, %lx, %x)\n", va, pa, prot));
                   1484:
                   1485:        /*
                   1486:         * Segment table entry not valid, we need a new PT page
                   1487:         */
                   1488:
                   1489:        if (!pmap_ste_v(pmap, va)) {
                   1490:                s = splvm();
1.25      thorpej  1491:                pmap_enter_ptpage(pmap, va, false);
1.1       chs      1492:                splx(s);
                   1493:        }
                   1494:
                   1495:        pa = m68k_trunc_page(pa);
                   1496:        pte = pmap_pte(pmap, va);
                   1497:
                   1498:        PMAP_DPRINTF(PDB_ENTER, ("enter: pte %p, *pte %x\n", pte, *pte));
                   1499:        KASSERT(!pmap_pte_v(pte));
                   1500:
                   1501:        /*
                   1502:         * Increment counters
                   1503:         */
                   1504:
                   1505:        pmap->pm_stats.resident_count++;
                   1506:        pmap->pm_stats.wired_count++;
                   1507:
                   1508:        /*
                   1509:         * Build the new PTE.
                   1510:         */
                   1511:
                   1512:        npte = pa | pte_prot(pmap, prot) | PG_V | PG_W;
                   1513: #if defined(M68040) || defined(M68060)
                   1514: #if defined(M68020) || defined(M68030)
                   1515:        if (mmutype == MMU_68040 && (npte & PG_PROT) == PG_RW)
                   1516: #else
                   1517:        if ((npte & PG_PROT) == PG_RW)
                   1518: #endif
                   1519:                npte |= PG_CCB;
                   1520:
                   1521:        if (mmutype == MMU_68040) {
                   1522:                DCFP(pa);
                   1523:                ICPP(pa);
                   1524:        }
                   1525: #endif
                   1526:
                   1527:        *pte = npte;
                   1528:        TBIS(va);
                   1529: }
                   1530:
                   1531: void
1.20      tsutsui  1532: pmap_kremove(vaddr_t va, vsize_t size)
1.1       chs      1533: {
1.20      tsutsui  1534:        pmap_t pmap = pmap_kernel();
1.1       chs      1535:        pt_entry_t *pte;
                   1536:        vaddr_t nssva;
                   1537:        vaddr_t eva = va + size;
                   1538: #ifdef M68K_MMU_HP
1.23      thorpej  1539:        bool firstpage, needcflush;
1.1       chs      1540: #endif
                   1541:
                   1542:        PMAP_DPRINTF(PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT,
                   1543:            ("pmap_kremove(%lx, %lx)\n", va, size));
                   1544:
                   1545: #ifdef M68K_MMU_HP
1.25      thorpej  1546:        firstpage = true;
                   1547:        needcflush = false;
1.1       chs      1548: #endif
                   1549:        while (va < eva) {
                   1550:                nssva = m68k_trunc_seg(va) + NBSEG;
                   1551:                if (nssva == 0 || nssva > eva)
                   1552:                        nssva = eva;
                   1553:
                   1554:                /*
                   1555:                 * If VA belongs to an unallocated segment,
                   1556:                 * skip to the next segment boundary.
                   1557:                 */
                   1558:
                   1559:                if (!pmap_ste_v(pmap, va)) {
                   1560:                        va = nssva;
                   1561:                        continue;
                   1562:                }
                   1563:
                   1564:                /*
                   1565:                 * Invalidate every valid mapping within this segment.
                   1566:                 */
                   1567:
                   1568:                pte = pmap_pte(pmap, va);
                   1569:                while (va < nssva) {
                   1570:                        if (!pmap_pte_v(pte)) {
                   1571:                                pte++;
1.3       thorpej  1572:                                va += PAGE_SIZE;
1.1       chs      1573:                                continue;
                   1574:                        }
                   1575: #ifdef M68K_MMU_HP
                   1576:                        if (pmap_aliasmask) {
                   1577:
                   1578:                                /*
                   1579:                                 * Purge kernel side of VAC to ensure
                   1580:                                 * we get the correct state of any
                   1581:                                 * hardware maintained bits.
                   1582:                                 */
                   1583:
                   1584:                                if (firstpage) {
                   1585:                                        DCIS();
1.25      thorpej  1586:                                        firstpage = false;
1.1       chs      1587:                                }
                   1588:
                   1589:                                /*
                   1590:                                 * Remember if we may need to
                   1591:                                 * flush the VAC.
                   1592:                                 */
                   1593:
1.25      thorpej  1594:                                needcflush = true;
1.1       chs      1595:                        }
                   1596: #endif
                   1597:                        pmap->pm_stats.wired_count--;
                   1598:                        pmap->pm_stats.resident_count--;
                   1599:                        *pte = PG_NV;
                   1600:                        TBIS(va);
                   1601:                        pte++;
1.3       thorpej  1602:                        va += PAGE_SIZE;
1.1       chs      1603:                }
                   1604:        }
                   1605:
                   1606: #ifdef M68K_MMU_HP
                   1607:
                   1608:        /*
                   1609:         * In a couple of cases, we don't need to worry about flushing
                   1610:         * the VAC:
                   1611:         *      1. if this is a kernel mapping,
                   1612:         *         we have already done it
                   1613:         *      2. if it is a user mapping not for the current process,
                   1614:         *         it won't be there
                   1615:         */
                   1616:
                   1617:        if (pmap_aliasmask && !active_user_pmap(pmap))
1.25      thorpej  1618:                needcflush = false;
1.1       chs      1619:        if (needcflush) {
                   1620:                if (pmap == pmap_kernel()) {
                   1621:                        DCIS();
                   1622:                } else {
                   1623:                        DCIU();
                   1624:                }
                   1625:        }
                   1626: #endif
                   1627: }
                   1628:
                   1629: /*
                   1630:  * pmap_unwire:                        [ INTERFACE ]
                   1631:  *
                   1632:  *     Clear the wired attribute for a map/virtual-address pair.
                   1633:  *
                   1634:  *     The mapping must already exist in the pmap.
                   1635:  */
                   1636: void
1.20      tsutsui  1637: pmap_unwire(pmap_t pmap, vaddr_t va)
1.1       chs      1638: {
                   1639:        pt_entry_t *pte;
                   1640:
                   1641:        PMAP_DPRINTF(PDB_FOLLOW,
                   1642:            ("pmap_unwire(%p, %lx)\n", pmap, va));
                   1643:
                   1644:        pte = pmap_pte(pmap, va);
                   1645:
                   1646:        /*
                   1647:         * If wiring actually changed (always?) clear the wire bit and
                   1648:         * update the wire count.  Note that wiring is not a hardware
                   1649:         * characteristic so there is no need to invalidate the TLB.
                   1650:         */
                   1651:
                   1652:        if (pmap_pte_w_chg(pte, 0)) {
1.25      thorpej  1653:                pmap_pte_set_w(pte, false);
1.1       chs      1654:                pmap->pm_stats.wired_count--;
                   1655:        }
                   1656: }
                   1657:
                   1658: /*
                   1659:  * pmap_extract:               [ INTERFACE ]
                   1660:  *
                   1661:  *     Extract the physical address associated with the given
                   1662:  *     pmap/virtual address pair.
                   1663:  */
1.23      thorpej  1664: bool
1.20      tsutsui  1665: pmap_extract(pmap_t pmap, vaddr_t va, paddr_t *pap)
1.1       chs      1666: {
                   1667:        paddr_t pa;
                   1668:        u_int pte;
1.8       cl       1669:
1.1       chs      1670:        PMAP_DPRINTF(PDB_FOLLOW,
                   1671:            ("pmap_extract(%p, %lx) -> ", pmap, va));
                   1672:
                   1673:        if (pmap_ste_v(pmap, va)) {
                   1674:                pte = *(u_int *)pmap_pte(pmap, va);
                   1675:                if (pte) {
                   1676:                        pa = (pte & PG_FRAME) | (va & ~PG_FRAME);
                   1677:                        if (pap != NULL)
                   1678:                                *pap = pa;
1.9       mycroft  1679: #ifdef DEBUG
                   1680:                        if (pmapdebug & PDB_FOLLOW)
                   1681:                                printf("%lx\n", pa);
                   1682: #endif
1.25      thorpej  1683:                        return true;
1.1       chs      1684:                }
                   1685:        }
                   1686: #ifdef DEBUG
1.9       mycroft  1687:        if (pmapdebug & PDB_FOLLOW)
                   1688:                printf("failed\n");
1.1       chs      1689: #endif
1.25      thorpej  1690:        return false;
1.1       chs      1691: }
                   1692:
                   1693: /*
                   1694:  * pmap_copy:          [ INTERFACE ]
                   1695:  *
                   1696:  *     Copy the mapping range specified by src_addr/len
                   1697:  *     from the source map to the range dst_addr/len
                   1698:  *     in the destination map.
                   1699:  *
                   1700:  *     This routine is only advisory and need not do anything.
                   1701:  */
                   1702: void
1.20      tsutsui  1703: pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vaddr_t dst_addr, vsize_t len,
                   1704:     vaddr_t src_addr)
1.1       chs      1705: {
                   1706:
                   1707:        PMAP_DPRINTF(PDB_FOLLOW,
                   1708:            ("pmap_copy(%p, %p, %lx, %lx, %lx)\n",
                   1709:            dst_pmap, src_pmap, dst_addr, len, src_addr));
                   1710: }
                   1711:
                   1712: /*
                   1713:  * pmap_collect1():
                   1714:  *
                   1715:  *     Garbage-collect KPT pages.  Helper for the above (bogus)
                   1716:  *     pmap_collect().
                   1717:  *
                   1718:  *     Note: THIS SHOULD GO AWAY, AND BE REPLACED WITH A BETTER
                   1719:  *     WAY OF HANDLING PT PAGES!
                   1720:  */
1.49      rmind    1721: static inline void
1.20      tsutsui  1722: pmap_collect1(pmap_t pmap, paddr_t startpa, paddr_t endpa)
1.1       chs      1723: {
                   1724:        paddr_t pa;
1.46      thorpej  1725:        struct pv_header *pvh;
1.1       chs      1726:        struct pv_entry *pv;
                   1727:        pt_entry_t *pte;
                   1728:        paddr_t kpa;
                   1729: #ifdef DEBUG
                   1730:        st_entry_t *ste;
                   1731:        int opmapdebug = 0;
                   1732: #endif
                   1733:
1.3       thorpej  1734:        for (pa = startpa; pa < endpa; pa += PAGE_SIZE) {
1.1       chs      1735:                struct kpt_page *kpt, **pkpt;
                   1736:
                   1737:                /*
                   1738:                 * Locate physical pages which are being used as kernel
                   1739:                 * page table pages.
                   1740:                 */
                   1741:
1.46      thorpej  1742:                pvh = pa_to_pvh(pa);
                   1743:                pv = &pvh->pvh_first;
                   1744:                if (pv->pv_pmap != pmap_kernel() ||
                   1745:                    !(pvh->pvh_attrs & PVH_PTPAGE))
1.1       chs      1746:                        continue;
                   1747:                do {
                   1748:                        if (pv->pv_ptste && pv->pv_ptpmap == pmap_kernel())
                   1749:                                break;
                   1750:                } while ((pv = pv->pv_next));
                   1751:                if (pv == NULL)
                   1752:                        continue;
                   1753: #ifdef DEBUG
                   1754:                if (pv->pv_va < (vaddr_t)Sysmap ||
                   1755:                    pv->pv_va >= (vaddr_t)Sysmap + M68K_MAX_PTSIZE) {
                   1756:                        printf("collect: kernel PT VA out of range\n");
                   1757:                        pmap_pvdump(pa);
                   1758:                        continue;
                   1759:                }
                   1760: #endif
1.3       thorpej  1761:                pte = (pt_entry_t *)(pv->pv_va + PAGE_SIZE);
1.1       chs      1762:                while (--pte >= (pt_entry_t *)pv->pv_va && *pte == PG_NV)
                   1763:                        ;
                   1764:                if (pte >= (pt_entry_t *)pv->pv_va)
                   1765:                        continue;
                   1766:
                   1767: #ifdef DEBUG
                   1768:                if (pmapdebug & (PDB_PTPAGE|PDB_COLLECT)) {
                   1769:                        printf("collect: freeing KPT page at %lx (ste %x@%p)\n",
1.20      tsutsui  1770:                            pv->pv_va, *pv->pv_ptste, pv->pv_ptste);
1.1       chs      1771:                        opmapdebug = pmapdebug;
                   1772:                        pmapdebug |= PDB_PTPAGE;
                   1773:                }
                   1774:
                   1775:                ste = pv->pv_ptste;
                   1776: #endif
                   1777:                /*
                   1778:                 * If all entries were invalid we can remove the page.
                   1779:                 * We call pmap_remove_entry to take care of invalidating
                   1780:                 * ST and Sysptmap entries.
                   1781:                 */
                   1782:
                   1783:                (void) pmap_extract(pmap, pv->pv_va, &kpa);
                   1784:                pmap_remove_mapping(pmap, pv->pv_va, NULL,
                   1785:                    PRM_TFLUSH|PRM_CFLUSH);
                   1786:
                   1787:                /*
                   1788:                 * Use the physical address to locate the original
                   1789:                 * (kmem_alloc assigned) address for the page and put
                   1790:                 * that page back on the free list.
                   1791:                 */
                   1792:
                   1793:                for (pkpt = &kpt_used_list, kpt = *pkpt;
                   1794:                     kpt != NULL;
                   1795:                     pkpt = &kpt->kpt_next, kpt = *pkpt)
                   1796:                        if (kpt->kpt_pa == kpa)
                   1797:                                break;
                   1798: #ifdef DEBUG
                   1799:                if (kpt == NULL)
                   1800:                        panic("pmap_collect: lost a KPT page");
                   1801:                if (pmapdebug & (PDB_PTPAGE|PDB_COLLECT))
                   1802:                        printf("collect: %lx (%lx) to free list\n",
1.20      tsutsui  1803:                            kpt->kpt_va, kpa);
1.1       chs      1804: #endif
                   1805:                *pkpt = kpt->kpt_next;
                   1806:                kpt->kpt_next = kpt_free_list;
                   1807:                kpt_free_list = kpt;
                   1808: #ifdef DEBUG
                   1809:                if (pmapdebug & (PDB_PTPAGE|PDB_COLLECT))
                   1810:                        pmapdebug = opmapdebug;
                   1811:
                   1812:                if (*ste != SG_NV)
                   1813:                        printf("collect: kernel STE at %p still valid (%x)\n",
1.20      tsutsui  1814:                            ste, *ste);
1.1       chs      1815:                ste = &Sysptmap[ste - pmap_ste(pmap_kernel(), 0)];
                   1816:                if (*ste != SG_NV)
                   1817:                        printf("collect: kernel PTmap at %p still valid (%x)\n",
1.20      tsutsui  1818:                            ste, *ste);
1.1       chs      1819: #endif
                   1820:        }
                   1821: }
                   1822:
                   1823: /*
1.49      rmind    1824:  * pmap_collect:
                   1825:  *
                   1826:  *     Helper for pmap_enter_ptpage().
                   1827:  *
                   1828:  *     Garbage collects the physical map system for pages which are no
                   1829:  *     longer used.  Success need not be guaranteed -- that is, there
                   1830:  *     may well be pages which are not referenced, but others may be
                   1831:  *     collected.
                   1832:  */
                   1833: static void
                   1834: pmap_collect(void)
                   1835: {
                   1836:        int bank, s;
                   1837:
                   1838:        /*
                   1839:         * XXX This is very bogus.  We should handle kernel PT
                   1840:         * XXX pages much differently.
                   1841:         */
                   1842:
                   1843:        s = splvm();
                   1844:        for (bank = 0; bank < vm_nphysseg; bank++) {
                   1845:                pmap_collect1(pmap_kernel(), ptoa(vm_physmem[bank].start),
                   1846:                    ptoa(vm_physmem[bank].end));
                   1847:        }
                   1848:        splx(s);
                   1849:
                   1850: #ifdef notyet
                   1851:        /* Go compact and garbage-collect the pv_table. */
                   1852:        pmap_collect_pv();
                   1853: #endif
                   1854: }
                   1855:
                   1856: /*
1.1       chs      1857:  * pmap_zero_page:             [ INTERFACE ]
                   1858:  *
                   1859:  *     Zero the specified (machine independent) page by mapping the page
                   1860:  *     into virtual memory and using memset to clear its contents, one
                   1861:  *     machine dependent page at a time.
                   1862:  *
                   1863:  *     Note: WE DO NOT CURRENTLY LOCK THE TEMPORARY ADDRESSES!
                   1864:  *           (Actually, we go to splvm(), and since we don't
                   1865:  *           support multiple processors, this is sufficient.)
                   1866:  */
                   1867: void
1.20      tsutsui  1868: pmap_zero_page(paddr_t phys)
1.1       chs      1869: {
                   1870:        int npte;
                   1871:
                   1872:        PMAP_DPRINTF(PDB_FOLLOW, ("pmap_zero_page(%lx)\n", phys));
                   1873:
                   1874:        npte = phys | PG_V;
                   1875: #ifdef M68K_MMU_HP
                   1876:        if (pmap_aliasmask) {
                   1877:
                   1878:                /*
                   1879:                 * Cache-inhibit the mapping on VAC machines, as we would
                   1880:                 * be wasting the cache load.
                   1881:                 */
                   1882:
                   1883:                npte |= PG_CI;
                   1884:        }
                   1885: #endif
                   1886:
                   1887: #if defined(M68040) || defined(M68060)
                   1888: #if defined(M68020) || defined(M68030)
                   1889:        if (mmutype == MMU_68040)
                   1890: #endif
                   1891:        {
                   1892:                /*
                   1893:                 * Set copyback caching on the page; this is required
                   1894:                 * for cache consistency (since regular mappings are
                   1895:                 * copyback as well).
                   1896:                 */
                   1897:
                   1898:                npte |= PG_CCB;
                   1899:        }
                   1900: #endif
                   1901:
                   1902:        *caddr1_pte = npte;
                   1903:        TBIS((vaddr_t)CADDR1);
                   1904:
                   1905:        zeropage(CADDR1);
                   1906:
                   1907: #ifdef DEBUG
                   1908:        *caddr1_pte = PG_NV;
                   1909:        TBIS((vaddr_t)CADDR1);
                   1910: #endif
                   1911: }
                   1912:
                   1913: /*
                   1914:  * pmap_copy_page:             [ INTERFACE ]
                   1915:  *
                   1916:  *     Copy the specified (machine independent) page by mapping the page
                   1917:  *     into virtual memory and using memcpy to copy the page, one machine
                   1918:  *     dependent page at a time.
                   1919:  *
                   1920:  *     Note: WE DO NOT CURRENTLY LOCK THE TEMPORARY ADDRESSES!
                   1921:  *           (Actually, we go to splvm(), and since we don't
                   1922:  *           support multiple processors, this is sufficient.)
                   1923:  */
                   1924: void
1.20      tsutsui  1925: pmap_copy_page(paddr_t src, paddr_t dst)
1.1       chs      1926: {
                   1927:        int npte1, npte2;
                   1928:
                   1929:        PMAP_DPRINTF(PDB_FOLLOW, ("pmap_copy_page(%lx, %lx)\n", src, dst));
                   1930:
                   1931:        npte1 = src | PG_RO | PG_V;
                   1932:        npte2 = dst | PG_V;
                   1933: #ifdef M68K_MMU_HP
                   1934:        if (pmap_aliasmask) {
                   1935:
                   1936:                /*
                   1937:                 * Cache-inhibit the mapping on VAC machines, as we would
                   1938:                 * be wasting the cache load.
                   1939:                 */
                   1940:
                   1941:                npte1 |= PG_CI;
                   1942:                npte2 |= PG_CI;
                   1943:        }
                   1944: #endif
                   1945:
                   1946: #if defined(M68040) || defined(M68060)
                   1947: #if defined(M68020) || defined(M68030)
                   1948:        if (mmutype == MMU_68040)
                   1949: #endif
                   1950:        {
                   1951:                /*
                   1952:                 * Set copyback caching on the pages; this is required
                   1953:                 * for cache consistency (since regular mappings are
                   1954:                 * copyback as well).
                   1955:                 */
                   1956:
                   1957:                npte1 |= PG_CCB;
                   1958:                npte2 |= PG_CCB;
                   1959:        }
                   1960: #endif
                   1961:
                   1962:        *caddr1_pte = npte1;
                   1963:        TBIS((vaddr_t)CADDR1);
                   1964:
                   1965:        *caddr2_pte = npte2;
                   1966:        TBIS((vaddr_t)CADDR2);
                   1967:
                   1968:        copypage(CADDR1, CADDR2);
                   1969:
                   1970: #ifdef DEBUG
                   1971:        *caddr1_pte = PG_NV;
                   1972:        TBIS((vaddr_t)CADDR1);
                   1973:
                   1974:        *caddr2_pte = PG_NV;
                   1975:        TBIS((vaddr_t)CADDR2);
                   1976: #endif
                   1977: }
                   1978:
                   1979: /*
                   1980:  * pmap_clear_modify:          [ INTERFACE ]
                   1981:  *
                   1982:  *     Clear the modify bits on the specified physical page.
                   1983:  */
1.23      thorpej  1984: bool
1.20      tsutsui  1985: pmap_clear_modify(struct vm_page *pg)
1.1       chs      1986: {
                   1987:        paddr_t pa = VM_PAGE_TO_PHYS(pg);
                   1988:
                   1989:        PMAP_DPRINTF(PDB_FOLLOW, ("pmap_clear_modify(%p)\n", pg));
                   1990:
                   1991:        return pmap_changebit(pa, 0, ~PG_M);
                   1992: }
                   1993:
                   1994: /*
                   1995:  * pmap_clear_reference:       [ INTERFACE ]
                   1996:  *
                   1997:  *     Clear the reference bit on the specified physical page.
                   1998:  */
1.23      thorpej  1999: bool
1.20      tsutsui  2000: pmap_clear_reference(struct vm_page *pg)
1.1       chs      2001: {
                   2002:        paddr_t pa = VM_PAGE_TO_PHYS(pg);
                   2003:
                   2004:        PMAP_DPRINTF(PDB_FOLLOW, ("pmap_clear_reference(%p)\n", pg));
                   2005:
                   2006:        return pmap_changebit(pa, 0, ~PG_U);
                   2007: }
                   2008:
                   2009: /*
                   2010:  * pmap_is_referenced:         [ INTERFACE ]
                   2011:  *
                   2012:  *     Return whether or not the specified physical page is referenced
                   2013:  *     by any physical maps.
                   2014:  */
1.23      thorpej  2015: bool
1.20      tsutsui  2016: pmap_is_referenced(struct vm_page *pg)
1.1       chs      2017: {
                   2018:        paddr_t pa = VM_PAGE_TO_PHYS(pg);
                   2019:
1.20      tsutsui  2020:        return pmap_testbit(pa, PG_U);
1.1       chs      2021: }
                   2022:
                   2023: /*
                   2024:  * pmap_is_modified:           [ INTERFACE ]
                   2025:  *
                   2026:  *     Return whether or not the specified physical page is modified
                   2027:  *     by any physical maps.
                   2028:  */
1.23      thorpej  2029: bool
1.20      tsutsui  2030: pmap_is_modified(struct vm_page *pg)
1.1       chs      2031: {
                   2032:        paddr_t pa = VM_PAGE_TO_PHYS(pg);
                   2033:
1.20      tsutsui  2034:        return pmap_testbit(pa, PG_M);
1.1       chs      2035: }
                   2036:
                   2037: /*
                   2038:  * pmap_phys_address:          [ INTERFACE ]
                   2039:  *
                   2040:  *     Return the physical address corresponding to the specified
                   2041:  *     cookie.  Used by the device pager to decode a device driver's
                   2042:  *     mmap entry point return value.
                   2043:  *
                   2044:  *     Note: no locking is necessary in this function.
                   2045:  */
                   2046: paddr_t
1.32      macallan 2047: pmap_phys_address(paddr_t ppn)
1.1       chs      2048: {
1.20      tsutsui  2049:        return m68k_ptob(ppn);
1.1       chs      2050: }
                   2051:
                   2052: #ifdef M68K_MMU_HP
                   2053: /*
                   2054:  * pmap_prefer:                        [ INTERFACE ]
                   2055:  *
                   2056:  *     Find the first virtual address >= *vap that does not
                   2057:  *     cause a virtually-addressed cache alias problem.
                   2058:  */
                   2059: void
1.20      tsutsui  2060: pmap_prefer(vaddr_t foff, vaddr_t *vap)
1.1       chs      2061: {
                   2062:        vaddr_t va;
                   2063:        vsize_t d;
                   2064:
                   2065: #ifdef M68K_MMU_MOTOROLA
                   2066:        if (pmap_aliasmask)
                   2067: #endif
                   2068:        {
                   2069:                va = *vap;
                   2070:                d = foff - va;
                   2071:                d &= pmap_aliasmask;
                   2072:                *vap = va + d;
                   2073:        }
                   2074: }
                   2075: #endif /* M68K_MMU_HP */
                   2076:
                   2077: /*
                   2078:  * Miscellaneous support routines follow
                   2079:  */
                   2080:
                   2081: /*
                   2082:  * pmap_remove_mapping:
                   2083:  *
                   2084:  *     Invalidate a single page denoted by pmap/va.
                   2085:  *
                   2086:  *     If (pte != NULL), it is the already computed PTE for the page.
                   2087:  *
                   2088:  *     If (flags & PRM_TFLUSH), we must invalidate any TLB information.
                   2089:  *
                   2090:  *     If (flags & PRM_CFLUSH), we must flush/invalidate any cache
                   2091:  *     information.
                   2092:  *
                   2093:  *     If (flags & PRM_KEEPPTPAGE), we don't free the page table page
                   2094:  *     if the reference drops to zero.
                   2095:  */
                   2096: /* static */
                   2097: void
1.20      tsutsui  2098: pmap_remove_mapping(pmap_t pmap, vaddr_t va, pt_entry_t *pte, int flags)
1.1       chs      2099: {
                   2100:        paddr_t pa;
1.46      thorpej  2101:        struct pv_header *pvh;
1.1       chs      2102:        struct pv_entry *pv, *npv;
                   2103:        struct pmap *ptpmap;
                   2104:        st_entry_t *ste;
                   2105:        int s, bits;
                   2106: #ifdef DEBUG
                   2107:        pt_entry_t opte;
                   2108: #endif
                   2109:
                   2110:        PMAP_DPRINTF(PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT,
                   2111:            ("pmap_remove_mapping(%p, %lx, %p, %x)\n",
                   2112:            pmap, va, pte, flags));
                   2113:
                   2114:        /*
                   2115:         * PTE not provided, compute it from pmap and va.
                   2116:         */
                   2117:
                   2118:        if (pte == NULL) {
                   2119:                pte = pmap_pte(pmap, va);
                   2120:                if (*pte == PG_NV)
                   2121:                        return;
                   2122:        }
                   2123:
                   2124: #ifdef M68K_MMU_HP
                   2125:        if (pmap_aliasmask && (flags & PRM_CFLUSH)) {
                   2126:
                   2127:                /*
                   2128:                 * Purge kernel side of VAC to ensure we get the correct
                   2129:                 * state of any hardware maintained bits.
                   2130:                 */
                   2131:
                   2132:                DCIS();
                   2133:
                   2134:                /*
                   2135:                 * If this is a non-CI user mapping for the current process,
                   2136:                 * flush the VAC.  Note that the kernel side was flushed
                   2137:                 * above so we don't worry about non-CI kernel mappings.
                   2138:                 */
                   2139:
                   2140:                if (active_user_pmap(pmap) && !pmap_pte_ci(pte)) {
                   2141:                        DCIU();
                   2142:                }
                   2143:        }
                   2144: #endif
                   2145:
                   2146:        pa = pmap_pte_pa(pte);
                   2147: #ifdef DEBUG
                   2148:        opte = *pte;
                   2149: #endif
                   2150:
                   2151:        /*
                   2152:         * Update statistics
                   2153:         */
                   2154:
                   2155:        if (pmap_pte_w(pte))
                   2156:                pmap->pm_stats.wired_count--;
                   2157:        pmap->pm_stats.resident_count--;
                   2158:
                   2159: #if defined(M68040) || defined(M68060)
                   2160: #if defined(M68020) || defined(M68030)
                   2161:        if (mmutype == MMU_68040)
                   2162: #endif
                   2163:        if ((flags & PRM_CFLUSH)) {
                   2164:                DCFP(pa);
                   2165:                ICPP(pa);
                   2166:        }
                   2167: #endif
                   2168:
                   2169:        /*
                   2170:         * Invalidate the PTE after saving the reference modify info.
                   2171:         */
                   2172:
                   2173:        PMAP_DPRINTF(PDB_REMOVE, ("remove: invalidating pte at %p\n", pte));
                   2174:        bits = *pte & (PG_U|PG_M);
                   2175:        *pte = PG_NV;
                   2176:        if ((flags & PRM_TFLUSH) && active_pmap(pmap))
                   2177:                TBIS(va);
                   2178:
                   2179:        /*
                   2180:         * For user mappings decrement the wiring count on
                   2181:         * the PT page.
                   2182:         */
                   2183:
                   2184:        if (pmap != pmap_kernel()) {
                   2185:                vaddr_t ptpva = trunc_page((vaddr_t)pte);
                   2186:                int refs = pmap_ptpage_delref(ptpva);
                   2187: #ifdef DEBUG
                   2188:                if (pmapdebug & PDB_WIRING)
                   2189:                        pmap_check_wiring("remove", ptpva);
                   2190: #endif
                   2191:
                   2192:                /*
                   2193:                 * If reference count drops to 0, and we're not instructed
                   2194:                 * to keep it around, free the PT page.
                   2195:                 */
                   2196:
                   2197:                if (refs == 0 && (flags & PRM_KEEPPTPAGE) == 0) {
                   2198: #ifdef DIAGNOSTIC
1.46      thorpej  2199:                        struct pv_header *ptppvh;
1.16      tsutsui  2200:                        struct pv_entry *ptppv;
1.1       chs      2201: #endif
1.15      tsutsui  2202:                        paddr_t ptppa;
1.1       chs      2203:
1.15      tsutsui  2204:                        ptppa = pmap_pte_pa(pmap_pte(pmap_kernel(), ptpva));
1.1       chs      2205: #ifdef DIAGNOSTIC
1.15      tsutsui  2206:                        if (PAGE_IS_MANAGED(ptppa) == 0)
1.1       chs      2207:                                panic("pmap_remove_mapping: unmanaged PT page");
1.46      thorpej  2208:                        ptppvh = pa_to_pvh(ptppa);
                   2209:                        ptppv = &ptppvh->pvh_first;
1.16      tsutsui  2210:                        if (ptppv->pv_ptste == NULL)
1.1       chs      2211:                                panic("pmap_remove_mapping: ptste == NULL");
1.16      tsutsui  2212:                        if (ptppv->pv_pmap != pmap_kernel() ||
                   2213:                            ptppv->pv_va != ptpva ||
                   2214:                            ptppv->pv_next != NULL)
1.1       chs      2215:                                panic("pmap_remove_mapping: "
                   2216:                                    "bad PT page pmap %p, va 0x%lx, next %p",
1.16      tsutsui  2217:                                    ptppv->pv_pmap, ptppv->pv_va,
                   2218:                                    ptppv->pv_next);
1.1       chs      2219: #endif
                   2220:                        pmap_remove_mapping(pmap_kernel(), ptpva,
                   2221:                            NULL, PRM_TFLUSH|PRM_CFLUSH);
1.36      tsutsui  2222:                        mutex_enter(&uvm_kernel_object->vmobjlock);
1.15      tsutsui  2223:                        uvm_pagefree(PHYS_TO_VM_PAGE(ptppa));
1.36      tsutsui  2224:                        mutex_exit(&uvm_kernel_object->vmobjlock);
1.1       chs      2225:                        PMAP_DPRINTF(PDB_REMOVE|PDB_PTPAGE,
                   2226:                            ("remove: PT page 0x%lx (0x%lx) freed\n",
1.15      tsutsui  2227:                            ptpva, ptppa));
1.1       chs      2228:                }
                   2229:        }
                   2230:
                   2231:        /*
                   2232:         * If this isn't a managed page, we are all done.
                   2233:         */
                   2234:
                   2235:        if (PAGE_IS_MANAGED(pa) == 0)
                   2236:                return;
                   2237:
                   2238:        /*
                   2239:         * Otherwise remove it from the PV table
                   2240:         * (raise IPL since we may be called at interrupt time).
                   2241:         */
                   2242:
1.46      thorpej  2243:        pvh = pa_to_pvh(pa);
                   2244:        pv = &pvh->pvh_first;
1.1       chs      2245:        ste = NULL;
                   2246:        s = splvm();
                   2247:
                   2248:        /*
                   2249:         * If it is the first entry on the list, it is actually
                   2250:         * in the header and we must copy the following entry up
                   2251:         * to the header.  Otherwise we must search the list for
                   2252:         * the entry.  In either case we free the now unused entry.
                   2253:         */
                   2254:
                   2255:        if (pmap == pv->pv_pmap && va == pv->pv_va) {
                   2256:                ste = pv->pv_ptste;
                   2257:                ptpmap = pv->pv_ptpmap;
                   2258:                npv = pv->pv_next;
                   2259:                if (npv) {
                   2260:                        *pv = *npv;
                   2261:                        pmap_free_pv(npv);
                   2262:                } else
                   2263:                        pv->pv_pmap = NULL;
                   2264:        } else {
                   2265:                for (npv = pv->pv_next; npv; npv = npv->pv_next) {
                   2266:                        if (pmap == npv->pv_pmap && va == npv->pv_va)
                   2267:                                break;
                   2268:                        pv = npv;
                   2269:                }
                   2270: #ifdef DEBUG
                   2271:                if (npv == NULL)
                   2272:                        panic("pmap_remove: PA not in pv_tab");
                   2273: #endif
                   2274:                ste = npv->pv_ptste;
                   2275:                ptpmap = npv->pv_ptpmap;
                   2276:                pv->pv_next = npv->pv_next;
                   2277:                pmap_free_pv(npv);
1.46      thorpej  2278:                pvh = pa_to_pvh(pa);
                   2279:                pv = &pvh->pvh_first;
1.1       chs      2280:        }
                   2281:
                   2282: #ifdef M68K_MMU_HP
                   2283:
                   2284:        /*
                   2285:         * If only one mapping left we no longer need to cache inhibit
                   2286:         */
                   2287:
                   2288:        if (pmap_aliasmask &&
1.46      thorpej  2289:            pv->pv_pmap && pv->pv_next == NULL && (pvh->pvh_attrs & PVH_CI)) {
1.1       chs      2290:                PMAP_DPRINTF(PDB_CACHE,
                   2291:                    ("remove: clearing CI for pa %lx\n", pa));
1.46      thorpej  2292:                pvh->pvh_attrs &= ~PVH_CI;
1.1       chs      2293:                pmap_changebit(pa, 0, ~PG_CI);
                   2294: #ifdef DEBUG
                   2295:                if ((pmapdebug & (PDB_CACHE|PDB_PVDUMP)) ==
                   2296:                    (PDB_CACHE|PDB_PVDUMP))
                   2297:                        pmap_pvdump(pa);
                   2298: #endif
                   2299:        }
                   2300: #endif
                   2301:
                   2302:        /*
                   2303:         * If this was a PT page we must also remove the
                   2304:         * mapping from the associated segment table.
                   2305:         */
                   2306:
                   2307:        if (ste) {
                   2308:                PMAP_DPRINTF(PDB_REMOVE|PDB_PTPAGE,
                   2309:                    ("remove: ste was %x@%p pte was %x@%p\n",
                   2310:                    *ste, ste, opte, pmap_pte(pmap, va)));
                   2311: #if defined(M68040) || defined(M68060)
                   2312: #if defined(M68020) || defined(M68030)
                   2313:                if (mmutype == MMU_68040)
                   2314: #endif
                   2315:                {
                   2316:                        st_entry_t *este = &ste[NPTEPG/SG4_LEV3SIZE];
                   2317:
                   2318:                        while (ste < este)
                   2319:                                *ste++ = SG_NV;
                   2320: #ifdef DEBUG
                   2321:                        ste -= NPTEPG/SG4_LEV3SIZE;
                   2322: #endif
                   2323:                }
                   2324: #if defined(M68020) || defined(M68030)
                   2325:                else
                   2326: #endif
                   2327: #endif
                   2328: #if defined(M68020) || defined(M68030)
                   2329:                *ste = SG_NV;
                   2330: #endif
                   2331:
                   2332:                /*
                   2333:                 * If it was a user PT page, we decrement the
                   2334:                 * reference count on the segment table as well,
                   2335:                 * freeing it if it is now empty.
                   2336:                 */
                   2337:
                   2338:                if (ptpmap != pmap_kernel()) {
                   2339:                        PMAP_DPRINTF(PDB_REMOVE|PDB_SEGTAB,
                   2340:                            ("remove: stab %p, refcnt %d\n",
                   2341:                            ptpmap->pm_stab, ptpmap->pm_sref - 1));
                   2342: #ifdef DEBUG
                   2343:                        if ((pmapdebug & PDB_PARANOIA) &&
                   2344:                            ptpmap->pm_stab !=
                   2345:                             (st_entry_t *)trunc_page((vaddr_t)ste))
                   2346:                                panic("remove: bogus ste");
                   2347: #endif
                   2348:                        if (--(ptpmap->pm_sref) == 0) {
                   2349:                                PMAP_DPRINTF(PDB_REMOVE|PDB_SEGTAB,
                   2350:                                    ("remove: free stab %p\n",
                   2351:                                    ptpmap->pm_stab));
1.14      yamt     2352:                                uvm_km_free(st_map, (vaddr_t)ptpmap->pm_stab,
                   2353:                                    M68K_STSIZE, UVM_KMF_WIRED);
1.1       chs      2354:                                ptpmap->pm_stab = Segtabzero;
                   2355:                                ptpmap->pm_stpa = Segtabzeropa;
                   2356: #if defined(M68040) || defined(M68060)
                   2357: #if defined(M68020) || defined(M68030)
                   2358:                                if (mmutype == MMU_68040)
                   2359: #endif
                   2360:                                        ptpmap->pm_stfree = protostfree;
                   2361: #endif
                   2362:
                   2363:                                /*
                   2364:                                 * XXX may have changed segment table
                   2365:                                 * pointer for current process so
                   2366:                                 * update now to reload hardware.
                   2367:                                 */
                   2368:
                   2369:                                if (active_user_pmap(ptpmap))
                   2370:                                        PMAP_ACTIVATE(ptpmap, 1);
                   2371:                        }
                   2372:                }
1.46      thorpej  2373:                pvh->pvh_attrs &= ~PVH_PTPAGE;
1.1       chs      2374:                ptpmap->pm_ptpages--;
                   2375:        }
                   2376:
                   2377:        /*
                   2378:         * Update saved attributes for managed page
                   2379:         */
                   2380:
1.46      thorpej  2381:        pvh->pvh_attrs |= bits;
1.1       chs      2382:        splx(s);
                   2383: }
                   2384:
                   2385: /*
                   2386:  * pmap_testbit:
                   2387:  *
                   2388:  *     Test the modified/referenced bits of a physical page.
                   2389:  */
                   2390: /* static */
1.23      thorpej  2391: bool
1.20      tsutsui  2392: pmap_testbit(paddr_t pa, int bit)
1.1       chs      2393: {
1.46      thorpej  2394:        struct pv_header *pvh;
1.1       chs      2395:        struct pv_entry *pv;
                   2396:        pt_entry_t *pte;
                   2397:        int s;
                   2398:
1.46      thorpej  2399:        pvh = pa_to_pvh(pa);
                   2400:        pv = &pvh->pvh_first;
1.1       chs      2401:        s = splvm();
                   2402:
                   2403:        /*
                   2404:         * Check saved info first
                   2405:         */
                   2406:
1.46      thorpej  2407:        if (pvh->pvh_attrs & bit) {
1.1       chs      2408:                splx(s);
1.25      thorpej  2409:                return true;
1.1       chs      2410:        }
                   2411:
                   2412: #ifdef M68K_MMU_HP
                   2413:
                   2414:        /*
                   2415:         * Flush VAC to get correct state of any hardware maintained bits.
                   2416:         */
                   2417:
                   2418:        if (pmap_aliasmask && (bit & (PG_U|PG_M)))
                   2419:                DCIS();
                   2420: #endif
                   2421:
                   2422:        /*
                   2423:         * Not found.  Check current mappings, returning immediately if
                   2424:         * found.  Cache a hit to speed future lookups.
                   2425:         */
                   2426:
                   2427:        if (pv->pv_pmap != NULL) {
                   2428:                for (; pv; pv = pv->pv_next) {
                   2429:                        pte = pmap_pte(pv->pv_pmap, pv->pv_va);
                   2430:                        if (*pte & bit) {
1.46      thorpej  2431:                                pvh->pvh_attrs |= bit;
1.1       chs      2432:                                splx(s);
1.25      thorpej  2433:                                return true;
1.1       chs      2434:                        }
                   2435:                }
                   2436:        }
                   2437:        splx(s);
1.25      thorpej  2438:        return false;
1.1       chs      2439: }
                   2440:
                   2441: /*
                   2442:  * pmap_changebit:
                   2443:  *
                   2444:  *     Change the modified/referenced bits, or other PTE bits,
                   2445:  *     for a physical page.
                   2446:  */
                   2447: /* static */
1.23      thorpej  2448: bool
1.20      tsutsui  2449: pmap_changebit(paddr_t pa, int set, int mask)
1.1       chs      2450: {
1.46      thorpej  2451:        struct pv_header *pvh;
1.1       chs      2452:        struct pv_entry *pv;
                   2453:        pt_entry_t *pte, npte;
                   2454:        vaddr_t va;
                   2455:        int s;
                   2456: #if defined(M68K_MMU_HP) || defined(M68040) || defined(M68060)
1.25      thorpej  2457:        bool firstpage = true;
1.1       chs      2458: #endif
1.23      thorpej  2459:        bool r;
1.1       chs      2460:
                   2461:        PMAP_DPRINTF(PDB_BITS,
                   2462:            ("pmap_changebit(%lx, %x, %x)\n", pa, set, mask));
                   2463:
1.46      thorpej  2464:        pvh = pa_to_pvh(pa);
                   2465:        pv = &pvh->pvh_first;
1.1       chs      2466:        s = splvm();
                   2467:
                   2468:        /*
                   2469:         * Clear saved attributes (modify, reference)
                   2470:         */
                   2471:
1.46      thorpej  2472:        r = (pvh->pvh_attrs & ~mask) != 0;
                   2473:        pvh->pvh_attrs &= mask;
1.1       chs      2474:
                   2475:        /*
                   2476:         * Loop over all current mappings setting/clearing as appropos
                   2477:         * If setting RO do we need to clear the VAC?
                   2478:         */
                   2479:
                   2480:        if (pv->pv_pmap != NULL) {
                   2481: #ifdef DEBUG
                   2482:                int toflush = 0;
                   2483: #endif
                   2484:                for (; pv; pv = pv->pv_next) {
                   2485: #ifdef DEBUG
                   2486:                        toflush |= (pv->pv_pmap == pmap_kernel()) ? 2 : 1;
                   2487: #endif
                   2488:                        va = pv->pv_va;
                   2489:                        pte = pmap_pte(pv->pv_pmap, va);
                   2490: #ifdef M68K_MMU_HP
                   2491:
                   2492:                        /*
                   2493:                         * Flush VAC to ensure we get correct state of HW bits
                   2494:                         * so we don't clobber them.
                   2495:                         */
                   2496:
                   2497:                        if (firstpage && pmap_aliasmask) {
1.25      thorpej  2498:                                firstpage = false;
1.1       chs      2499:                                DCIS();
                   2500:                        }
                   2501: #endif
                   2502:                        npte = (*pte | set) & mask;
                   2503:                        if (*pte != npte) {
1.25      thorpej  2504:                                r = true;
1.1       chs      2505: #if defined(M68040) || defined(M68060)
                   2506:                                /*
                   2507:                                 * If we are changing caching status or
                   2508:                                 * protection make sure the caches are
                   2509:                                 * flushed (but only once).
                   2510:                                 */
                   2511:                                if (firstpage &&
                   2512: #if defined(M68020) || defined(M68030)
                   2513:                                    (mmutype == MMU_68040) &&
                   2514: #endif
                   2515:                                    ((set == PG_RO) ||
                   2516:                                     (set & PG_CMASK) ||
                   2517:                                     (mask & PG_CMASK) == 0)) {
1.25      thorpej  2518:                                        firstpage = false;
1.1       chs      2519:                                        DCFP(pa);
                   2520:                                        ICPP(pa);
                   2521:                                }
                   2522: #endif
                   2523:                                *pte = npte;
                   2524:                                if (active_pmap(pv->pv_pmap))
                   2525:                                        TBIS(va);
                   2526:                        }
                   2527:                }
                   2528:        }
                   2529:        splx(s);
1.20      tsutsui  2530:        return r;
1.1       chs      2531: }
                   2532:
                   2533: /*
                   2534:  * pmap_enter_ptpage:
                   2535:  *
                   2536:  *     Allocate and map a PT page for the specified pmap/va pair.
                   2537:  */
                   2538: /* static */
1.22      martin   2539: int
1.23      thorpej  2540: pmap_enter_ptpage(pmap_t pmap, vaddr_t va, bool can_fail)
1.1       chs      2541: {
                   2542:        paddr_t ptpa;
                   2543:        struct vm_page *pg;
1.46      thorpej  2544:        struct pv_header *pvh;
1.1       chs      2545:        struct pv_entry *pv;
                   2546:        st_entry_t *ste;
                   2547:        int s;
                   2548:
                   2549:        PMAP_DPRINTF(PDB_FOLLOW|PDB_ENTER|PDB_PTPAGE,
                   2550:            ("pmap_enter_ptpage: pmap %p, va %lx\n", pmap, va));
                   2551:
                   2552:        /*
                   2553:         * Allocate a segment table if necessary.  Note that it is allocated
                   2554:         * from a private map and not pt_map.  This keeps user page tables
                   2555:         * aligned on segment boundaries in the kernel address space.
                   2556:         * The segment table is wired down.  It will be freed whenever the
                   2557:         * reference count drops to zero.
                   2558:         */
                   2559:        if (pmap->pm_stab == Segtabzero) {
                   2560:                pmap->pm_stab = (st_entry_t *)
1.14      yamt     2561:                    uvm_km_alloc(st_map, M68K_STSIZE, 0,
1.22      martin   2562:                    UVM_KMF_WIRED | UVM_KMF_ZERO |
                   2563:                    (can_fail ? UVM_KMF_NOWAIT : 0));
                   2564:                if (pmap->pm_stab == NULL) {
                   2565:                        pmap->pm_stab = Segtabzero;
                   2566:                        return ENOMEM;
                   2567:                }
1.1       chs      2568:                (void) pmap_extract(pmap_kernel(), (vaddr_t)pmap->pm_stab,
                   2569:                    (paddr_t *)&pmap->pm_stpa);
                   2570: #if defined(M68040) || defined(M68060)
                   2571: #if defined(M68020) || defined(M68030)
                   2572:                if (mmutype == MMU_68040)
                   2573: #endif
                   2574:                {
1.21      mhitch   2575:                        pt_entry_t      *pte;
                   2576:
                   2577:                        pte = pmap_pte(pmap_kernel(), pmap->pm_stab);
                   2578:                        *pte = (*pte & ~PG_CMASK) | PG_CI;
1.1       chs      2579:                        pmap->pm_stfree = protostfree;
                   2580:                }
                   2581: #endif
                   2582:                /*
                   2583:                 * XXX may have changed segment table pointer for current
                   2584:                 * process so update now to reload hardware.
                   2585:                 */
                   2586:                if (active_user_pmap(pmap))
                   2587:                        PMAP_ACTIVATE(pmap, 1);
                   2588:
                   2589:                PMAP_DPRINTF(PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB,
                   2590:                    ("enter: pmap %p stab %p(%p)\n",
                   2591:                    pmap, pmap->pm_stab, pmap->pm_stpa));
                   2592:        }
                   2593:
                   2594:        ste = pmap_ste(pmap, va);
                   2595: #if defined(M68040) || defined(M68060)
                   2596:        /*
                   2597:         * Allocate level 2 descriptor block if necessary
                   2598:         */
                   2599: #if defined(M68020) || defined(M68030)
                   2600:        if (mmutype == MMU_68040)
                   2601: #endif
                   2602:        {
                   2603:                if (*ste == SG_NV) {
                   2604:                        int ix;
1.26      christos 2605:                        void *addr;
1.1       chs      2606:
                   2607:                        ix = bmtol2(pmap->pm_stfree);
                   2608:                        if (ix == -1)
                   2609:                                panic("enter: out of address space"); /* XXX */
                   2610:                        pmap->pm_stfree &= ~l2tobm(ix);
1.26      christos 2611:                        addr = (void *)&pmap->pm_stab[ix*SG4_LEV2SIZE];
1.1       chs      2612:                        memset(addr, 0, SG4_LEV2SIZE*sizeof(st_entry_t));
1.26      christos 2613:                        addr = (void *)&pmap->pm_stpa[ix*SG4_LEV2SIZE];
1.1       chs      2614:                        *ste = (u_int)addr | SG_RW | SG_U | SG_V;
                   2615:
                   2616:                        PMAP_DPRINTF(PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB,
                   2617:                            ("enter: alloc ste2 %d(%p)\n", ix, addr));
                   2618:                }
                   2619:                ste = pmap_ste2(pmap, va);
                   2620:                /*
                   2621:                 * Since a level 2 descriptor maps a block of SG4_LEV3SIZE
                   2622:                 * level 3 descriptors, we need a chunk of NPTEPG/SG4_LEV3SIZE
1.3       thorpej  2623:                 * (16) such descriptors (PAGE_SIZE/SG4_LEV3SIZE bytes) to map a
1.1       chs      2624:                 * PT page--the unit of allocation.  We set `ste' to point
                   2625:                 * to the first entry of that chunk which is validated in its
                   2626:                 * entirety below.
                   2627:                 */
1.3       thorpej  2628:                ste = (st_entry_t *)((int)ste & ~(PAGE_SIZE/SG4_LEV3SIZE-1));
1.1       chs      2629:
                   2630:                PMAP_DPRINTF(PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB,
                   2631:                    ("enter: ste2 %p (%p)\n", pmap_ste2(pmap, va), ste));
                   2632:        }
                   2633: #endif
                   2634:        va = trunc_page((vaddr_t)pmap_pte(pmap, va));
                   2635:
                   2636:        /*
                   2637:         * In the kernel we allocate a page from the kernel PT page
                   2638:         * free list and map it into the kernel page table map (via
                   2639:         * pmap_enter).
                   2640:         */
                   2641:        if (pmap == pmap_kernel()) {
                   2642:                struct kpt_page *kpt;
                   2643:
                   2644:                s = splvm();
                   2645:                if ((kpt = kpt_free_list) == NULL) {
                   2646:                        /*
                   2647:                         * No PT pages available.
                   2648:                         * Try once to free up unused ones.
                   2649:                         */
                   2650:                        PMAP_DPRINTF(PDB_COLLECT,
                   2651:                            ("enter: no KPT pages, collecting...\n"));
1.49      rmind    2652:                        pmap_collect();
1.1       chs      2653:                        if ((kpt = kpt_free_list) == NULL)
                   2654:                                panic("pmap_enter_ptpage: can't get KPT page");
                   2655:                }
                   2656:                kpt_free_list = kpt->kpt_next;
                   2657:                kpt->kpt_next = kpt_used_list;
                   2658:                kpt_used_list = kpt;
                   2659:                ptpa = kpt->kpt_pa;
1.26      christos 2660:                memset((void *)kpt->kpt_va, 0, PAGE_SIZE);
1.1       chs      2661:                pmap_enter(pmap, va, ptpa, VM_PROT_READ | VM_PROT_WRITE,
                   2662:                    VM_PROT_READ | VM_PROT_WRITE | PMAP_WIRED);
                   2663:                pmap_update(pmap);
                   2664: #ifdef DEBUG
                   2665:                if (pmapdebug & (PDB_ENTER|PDB_PTPAGE)) {
                   2666:                        int ix = pmap_ste(pmap, va) - pmap_ste(pmap, 0);
                   2667:
                   2668:                        printf("enter: add &Sysptmap[%d]: %x (KPT page %lx)\n",
1.20      tsutsui  2669:                            ix, Sysptmap[ix], kpt->kpt_va);
1.1       chs      2670:                }
                   2671: #endif
                   2672:                splx(s);
                   2673:        } else {
                   2674:
                   2675:                /*
                   2676:                 * For user processes we just allocate a page from the
                   2677:                 * VM system.  Note that we set the page "wired" count to 1,
                   2678:                 * which is what we use to check if the page can be freed.
                   2679:                 * See pmap_remove_mapping().
                   2680:                 *
                   2681:                 * Count the segment table reference first so that we won't
                   2682:                 * lose the segment table when low on memory.
                   2683:                 */
                   2684:
                   2685:                pmap->pm_sref++;
                   2686:                PMAP_DPRINTF(PDB_ENTER|PDB_PTPAGE,
                   2687:                    ("enter: about to alloc UPT pg at %lx\n", va));
1.36      tsutsui  2688:                mutex_enter(&uvm_kernel_object->vmobjlock);
1.33      dogcow   2689:                while ((pg = uvm_pagealloc(uvm_kernel_object,
1.1       chs      2690:                                           va - vm_map_min(kernel_map),
                   2691:                                           NULL, UVM_PGA_ZERO)) == NULL) {
1.36      tsutsui  2692:                        mutex_exit(&uvm_kernel_object->vmobjlock);
1.1       chs      2693:                        uvm_wait("ptpage");
1.36      tsutsui  2694:                        mutex_enter(&uvm_kernel_object->vmobjlock);
1.1       chs      2695:                }
1.36      tsutsui  2696:                mutex_exit(&uvm_kernel_object->vmobjlock);
1.1       chs      2697:                pg->flags &= ~(PG_BUSY|PG_FAKE);
                   2698:                UVM_PAGE_OWN(pg, NULL);
                   2699:                ptpa = VM_PAGE_TO_PHYS(pg);
                   2700:                pmap_enter(pmap_kernel(), va, ptpa,
                   2701:                    VM_PROT_READ | VM_PROT_WRITE,
                   2702:                    VM_PROT_READ | VM_PROT_WRITE | PMAP_WIRED);
                   2703:                pmap_update(pmap_kernel());
                   2704:        }
                   2705: #if defined(M68040) || defined(M68060)
                   2706:        /*
                   2707:         * Turn off copyback caching of page table pages,
                   2708:         * could get ugly otherwise.
                   2709:         */
                   2710: #if defined(M68020) || defined(M68030)
                   2711:        if (mmutype == MMU_68040)
                   2712: #endif
                   2713:        {
                   2714: #ifdef DEBUG
                   2715:                pt_entry_t *pte = pmap_pte(pmap_kernel(), va);
                   2716:                if ((pmapdebug & PDB_PARANOIA) && (*pte & PG_CCB) == 0)
                   2717:                        printf("%s PT no CCB: kva=%lx ptpa=%lx pte@%p=%x\n",
1.20      tsutsui  2718:                            pmap == pmap_kernel() ? "Kernel" : "User",
                   2719:                            va, ptpa, pte, *pte);
1.1       chs      2720: #endif
                   2721:                if (pmap_changebit(ptpa, PG_CI, ~PG_CCB))
                   2722:                        DCIS();
                   2723:        }
                   2724: #endif
                   2725:        /*
                   2726:         * Locate the PV entry in the kernel for this PT page and
                   2727:         * record the STE address.  This is so that we can invalidate
                   2728:         * the STE when we remove the mapping for the page.
                   2729:         */
1.46      thorpej  2730:        pvh = pa_to_pvh(ptpa);
1.1       chs      2731:        s = splvm();
1.46      thorpej  2732:        if (pvh) {
                   2733:                pv = &pvh->pvh_first;
                   2734:                pvh->pvh_attrs |= PVH_PTPAGE;
1.1       chs      2735:                do {
                   2736:                        if (pv->pv_pmap == pmap_kernel() && pv->pv_va == va)
                   2737:                                break;
                   2738:                } while ((pv = pv->pv_next));
1.46      thorpej  2739:        } else {
                   2740:                pv = NULL;
1.1       chs      2741:        }
                   2742: #ifdef DEBUG
                   2743:        if (pv == NULL)
                   2744:                panic("pmap_enter_ptpage: PT page not entered");
                   2745: #endif
                   2746:        pv->pv_ptste = ste;
                   2747:        pv->pv_ptpmap = pmap;
                   2748:
                   2749:        PMAP_DPRINTF(PDB_ENTER|PDB_PTPAGE,
                   2750:            ("enter: new PT page at PA %lx, ste at %p\n", ptpa, ste));
                   2751:
                   2752:        /*
                   2753:         * Map the new PT page into the segment table.
                   2754:         * Also increment the reference count on the segment table if this
                   2755:         * was a user page table page.  Note that we don't use vm_map_pageable
                   2756:         * to keep the count like we do for PT pages, this is mostly because
                   2757:         * it would be difficult to identify ST pages in pmap_pageable to
                   2758:         * release them.  We also avoid the overhead of vm_map_pageable.
                   2759:         */
                   2760: #if defined(M68040) || defined(M68060)
                   2761: #if defined(M68020) || defined(M68030)
                   2762:        if (mmutype == MMU_68040)
                   2763: #endif
                   2764:        {
                   2765:                st_entry_t *este;
                   2766:
                   2767:                for (este = &ste[NPTEPG/SG4_LEV3SIZE]; ste < este; ste++) {
                   2768:                        *ste = ptpa | SG_U | SG_RW | SG_V;
                   2769:                        ptpa += SG4_LEV3SIZE * sizeof(st_entry_t);
                   2770:                }
                   2771:        }
                   2772: #if defined(M68020) || defined(M68030)
                   2773:        else
                   2774:                *ste = (ptpa & SG_FRAME) | SG_RW | SG_V;
                   2775: #endif
                   2776: #else
                   2777:        *ste = (ptpa & SG_FRAME) | SG_RW | SG_V;
                   2778: #endif
                   2779:        if (pmap != pmap_kernel()) {
                   2780:                PMAP_DPRINTF(PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB,
                   2781:                    ("enter: stab %p refcnt %d\n",
                   2782:                    pmap->pm_stab, pmap->pm_sref));
                   2783:        }
                   2784:        /*
                   2785:         * Flush stale TLB info.
                   2786:         */
                   2787:        if (pmap == pmap_kernel())
                   2788:                TBIAS();
                   2789:        else
                   2790:                TBIAU();
                   2791:        pmap->pm_ptpages++;
                   2792:        splx(s);
1.22      martin   2793:
                   2794:        return 0;
1.1       chs      2795: }
                   2796:
                   2797: /*
                   2798:  * pmap_ptpage_addref:
                   2799:  *
                   2800:  *     Add a reference to the specified PT page.
                   2801:  */
                   2802: void
1.20      tsutsui  2803: pmap_ptpage_addref(vaddr_t ptpva)
1.1       chs      2804: {
                   2805:        struct vm_page *pg;
                   2806:
1.36      tsutsui  2807:        mutex_enter(&uvm_kernel_object->vmobjlock);
1.33      dogcow   2808:        pg = uvm_pagelookup(uvm_kernel_object, ptpva - vm_map_min(kernel_map));
1.1       chs      2809:        pg->wire_count++;
                   2810:        PMAP_DPRINTF(PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB,
                   2811:            ("ptpage addref: pg %p now %d\n",
                   2812:             pg, pg->wire_count));
1.36      tsutsui  2813:        mutex_exit(&uvm_kernel_object->vmobjlock);
1.1       chs      2814: }
                   2815:
                   2816: /*
                   2817:  * pmap_ptpage_delref:
                   2818:  *
                   2819:  *     Delete a reference to the specified PT page.
                   2820:  */
                   2821: int
1.20      tsutsui  2822: pmap_ptpage_delref(vaddr_t ptpva)
1.1       chs      2823: {
                   2824:        struct vm_page *pg;
                   2825:        int rv;
                   2826:
1.36      tsutsui  2827:        mutex_enter(&uvm_kernel_object->vmobjlock);
1.33      dogcow   2828:        pg = uvm_pagelookup(uvm_kernel_object, ptpva - vm_map_min(kernel_map));
1.1       chs      2829:        rv = --pg->wire_count;
                   2830:        PMAP_DPRINTF(PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB,
                   2831:            ("ptpage delref: pg %p now %d\n",
                   2832:             pg, pg->wire_count));
1.36      tsutsui  2833:        mutex_exit(&uvm_kernel_object->vmobjlock);
1.20      tsutsui  2834:        return rv;
1.1       chs      2835: }
                   2836:
                   2837: /*
                   2838:  *     Routine:        pmap_procwr
                   2839:  *
                   2840:  *     Function:
                   2841:  *             Synchronize caches corresponding to [addr, addr + len) in p.
                   2842:  */
                   2843: void
1.20      tsutsui  2844: pmap_procwr(struct proc        *p, vaddr_t va, size_t len)
1.1       chs      2845: {
1.20      tsutsui  2846:
1.1       chs      2847:        (void)cachectl1(0x80000004, va, len, p);
                   2848: }
                   2849:
                   2850: void
1.20      tsutsui  2851: _pmap_set_page_cacheable(pmap_t pmap, vaddr_t va)
1.1       chs      2852: {
                   2853:
1.20      tsutsui  2854:        if (!pmap_ste_v(pmap, va))
1.1       chs      2855:                return;
                   2856:
                   2857: #if defined(M68040) || defined(M68060)
                   2858: #if defined(M68020) || defined(M68030)
1.20      tsutsui  2859:        if (mmutype == MMU_68040) {
1.1       chs      2860: #endif
1.20      tsutsui  2861:        if (pmap_changebit(pmap_pte_pa(pmap_pte(pmap, va)), PG_CCB, ~PG_CI))
1.1       chs      2862:                DCIS();
                   2863:
                   2864: #if defined(M68020) || defined(M68030)
                   2865:        } else
1.20      tsutsui  2866:                pmap_changebit(pmap_pte_pa(pmap_pte(pmap, va)), 0, ~PG_CI);
1.1       chs      2867: #endif
                   2868: #else
1.20      tsutsui  2869:        pmap_changebit(pmap_pte_pa(pmap_pte(pmap, va)), 0, ~PG_CI);
1.1       chs      2870: #endif
                   2871: }
                   2872:
                   2873: void
1.20      tsutsui  2874: _pmap_set_page_cacheinhibit(pmap_t pmap, vaddr_t va)
1.1       chs      2875: {
                   2876:
1.20      tsutsui  2877:        if (!pmap_ste_v(pmap, va))
1.1       chs      2878:                return;
                   2879:
                   2880: #if defined(M68040) || defined(M68060)
                   2881: #if defined(M68020) || defined(M68030)
1.20      tsutsui  2882:        if (mmutype == MMU_68040) {
1.1       chs      2883: #endif
1.20      tsutsui  2884:        if (pmap_changebit(pmap_pte_pa(pmap_pte(pmap, va)), PG_CI, ~PG_CCB))
1.1       chs      2885:                DCIS();
                   2886: #if defined(M68020) || defined(M68030)
                   2887:        } else
1.20      tsutsui  2888:                pmap_changebit(pmap_pte_pa(pmap_pte(pmap, va)), PG_CI, ~0);
1.1       chs      2889: #endif
                   2890: #else
1.20      tsutsui  2891:        pmap_changebit(pmap_pte_pa(pmap_pte(pmap, va)), PG_CI, ~0);
1.1       chs      2892: #endif
                   2893: }
                   2894:
                   2895: int
1.20      tsutsui  2896: _pmap_page_is_cacheable(pmap_t pmap, vaddr_t va)
1.1       chs      2897: {
                   2898:
1.20      tsutsui  2899:        if (!pmap_ste_v(pmap, va))
                   2900:                return 0;
1.1       chs      2901:
1.20      tsutsui  2902:        return (pmap_pte_ci(pmap_pte(pmap, va)) == 0) ? 1 : 0;
1.1       chs      2903: }
                   2904:
                   2905: #ifdef DEBUG
                   2906: /*
                   2907:  * pmap_pvdump:
                   2908:  *
                   2909:  *     Dump the contents of the PV list for the specified physical page.
                   2910:  */
                   2911: void
1.20      tsutsui  2912: pmap_pvdump(paddr_t pa)
1.1       chs      2913: {
1.46      thorpej  2914:        struct pv_header *pvh;
1.1       chs      2915:        struct pv_entry *pv;
                   2916:
                   2917:        printf("pa %lx", pa);
1.46      thorpej  2918:        pvh = pa_to_pvh(pa);
                   2919:        for (pv = &pvh->pvh_first; pv; pv = pv->pv_next)
1.47      mhitch   2920:                printf(" -> pmap %p, va %lx, ptste %p, ptpmap %p",
                   2921:                    pv->pv_pmap, pv->pv_va, pv->pv_ptste, pv->pv_ptpmap);
1.1       chs      2922:        printf("\n");
                   2923: }
                   2924:
                   2925: /*
                   2926:  * pmap_check_wiring:
                   2927:  *
                   2928:  *     Count the number of valid mappings in the specified PT page,
                   2929:  *     and ensure that it is consistent with the number of wirings
                   2930:  *     to that page that the VM system has.
                   2931:  */
                   2932: void
1.20      tsutsui  2933: pmap_check_wiring(const char *str, vaddr_t va)
1.1       chs      2934: {
                   2935:        pt_entry_t *pte;
                   2936:        paddr_t pa;
                   2937:        struct vm_page *pg;
                   2938:        int count;
                   2939:
                   2940:        if (!pmap_ste_v(pmap_kernel(), va) ||
                   2941:            !pmap_pte_v(pmap_pte(pmap_kernel(), va)))
                   2942:                return;
                   2943:
                   2944:        pa = pmap_pte_pa(pmap_pte(pmap_kernel(), va));
                   2945:        pg = PHYS_TO_VM_PAGE(pa);
1.13      chs      2946:        if (pg->wire_count > PAGE_SIZE / sizeof(pt_entry_t)) {
1.1       chs      2947:                panic("*%s*: 0x%lx: wire count %d", str, va, pg->wire_count);
                   2948:        }
                   2949:
                   2950:        count = 0;
1.3       thorpej  2951:        for (pte = (pt_entry_t *)va; pte < (pt_entry_t *)(va + PAGE_SIZE);
                   2952:             pte++)
1.1       chs      2953:                if (*pte)
                   2954:                        count++;
                   2955:        if (pg->wire_count != count)
                   2956:                panic("*%s*: 0x%lx: w%d/a%d",
                   2957:                       str, va, pg->wire_count, count);
                   2958: }
                   2959: #endif /* DEBUG */

CVSweb <webmaster@jp.NetBSD.org>