[BACK]Return to pmap_motorola.c CVS log [TXT][DIR] Up to [cvs.NetBSD.org] / src / sys / arch / m68k / m68k

Annotation of src/sys/arch/m68k/m68k/pmap_motorola.c, Revision 1.71.6.1

1.71.6.1! ad          1: /*     $NetBSD: pmap_motorola.c,v 1.72 2020/02/23 15:46:39 ad Exp $        */
1.1       chs         2:
                      3: /*-
                      4:  * Copyright (c) 1999 The NetBSD Foundation, Inc.
                      5:  * All rights reserved.
                      6:  *
                      7:  * This code is derived from software contributed to The NetBSD Foundation
                      8:  * by Jason R. Thorpe.
                      9:  *
                     10:  * Redistribution and use in source and binary forms, with or without
                     11:  * modification, are permitted provided that the following conditions
                     12:  * are met:
                     13:  * 1. Redistributions of source code must retain the above copyright
                     14:  *    notice, this list of conditions and the following disclaimer.
                     15:  * 2. Redistributions in binary form must reproduce the above copyright
                     16:  *    notice, this list of conditions and the following disclaimer in the
                     17:  *    documentation and/or other materials provided with the distribution.
                     18:  *
                     19:  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
                     20:  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
                     21:  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
                     22:  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
                     23:  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
                     24:  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
                     25:  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
                     26:  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
                     27:  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
                     28:  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
                     29:  * POSSIBILITY OF SUCH DAMAGE.
                     30:  */
                     31:
                     32: /*
                     33:  * Copyright (c) 1991, 1993
                     34:  *     The Regents of the University of California.  All rights reserved.
                     35:  *
                     36:  * This code is derived from software contributed to Berkeley by
                     37:  * the Systems Programming Group of the University of Utah Computer
                     38:  * Science Department.
                     39:  *
                     40:  * Redistribution and use in source and binary forms, with or without
                     41:  * modification, are permitted provided that the following conditions
                     42:  * are met:
                     43:  * 1. Redistributions of source code must retain the above copyright
                     44:  *    notice, this list of conditions and the following disclaimer.
                     45:  * 2. Redistributions in binary form must reproduce the above copyright
                     46:  *    notice, this list of conditions and the following disclaimer in the
                     47:  *    documentation and/or other materials provided with the distribution.
1.6       agc        48:  * 3. Neither the name of the University nor the names of its contributors
1.1       chs        49:  *    may be used to endorse or promote products derived from this software
                     50:  *    without specific prior written permission.
                     51:  *
                     52:  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
                     53:  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
                     54:  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
                     55:  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
                     56:  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
                     57:  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
                     58:  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
                     59:  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
                     60:  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
                     61:  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
                     62:  * SUCH DAMAGE.
                     63:  *
                     64:  *     @(#)pmap.c      8.6 (Berkeley) 5/27/94
                     65:  */
                     66:
                     67: /*
                     68:  * Motorola m68k-family physical map management code.
                     69:  *
                     70:  * Supports:
                     71:  *     68020 with 68851 MMU
                     72:  *     68030 with on-chip MMU
                     73:  *     68040 with on-chip MMU
                     74:  *     68060 with on-chip MMU
                     75:  *
                     76:  * Notes:
                     77:  *     Don't even pay lip service to multiprocessor support.
                     78:  *
                     79:  *     We assume TLB entries don't have process tags (except for the
                     80:  *     supervisor/user distinction) so we only invalidate TLB entries
                     81:  *     when changing mappings for the current (or kernel) pmap.  This is
                     82:  *     technically not true for the 68851 but we flush the TLB on every
                     83:  *     context switch, so it effectively winds up that way.
                     84:  *
                     85:  *     Bitwise and/or operations are significantly faster than bitfield
                     86:  *     references so we use them when accessing STE/PTEs in the pmap_pte_*
                     87:  *     macros.  Note also that the two are not always equivalent; e.g.:
                     88:  *             (*pte & PG_PROT) [4] != pte->pg_prot [1]
                     89:  *     and a couple of routines that deal with protection and wiring take
                     90:  *     some shortcuts that assume the and/or definitions.
                     91:  */
                     92:
                     93: /*
                     94:  *     Manages physical address maps.
                     95:  *
                     96:  *     In addition to hardware address maps, this
                     97:  *     module is called upon to provide software-use-only
                     98:  *     maps which may or may not be stored in the same
                     99:  *     form as hardware maps.  These pseudo-maps are
                    100:  *     used to store intermediate results from copy
                    101:  *     operations to and from address spaces.
                    102:  *
                    103:  *     Since the information managed by this module is
                    104:  *     also stored by the logical address mapping module,
                    105:  *     this module may throw away valid virtual-to-physical
                    106:  *     mappings at almost any time.  However, invalidations
                    107:  *     of virtual-to-physical mappings must be done as
                    108:  *     requested.
                    109:  *
                    110:  *     In order to cope with hardware architectures which
                    111:  *     make virtual-to-physical map invalidates expensive,
                    112:  *     this module may delay invalidate or reduced protection
                    113:  *     operations until such time as they are actually
                    114:  *     necessary.  This module is given full information as
                    115:  *     to which processors are currently using which maps,
                    116:  *     and to when physical maps must be made correct.
                    117:  */
                    118:
1.56      mrg       119: #include "opt_m68k_arch.h"
                    120:
1.1       chs       121: #include <sys/cdefs.h>
1.71.6.1! ad        122: __KERNEL_RCSID(0, "$NetBSD: pmap_motorola.c,v 1.72 2020/02/23 15:46:39 ad Exp $");
1.1       chs       123:
                    124: #include <sys/param.h>
                    125: #include <sys/systm.h>
                    126: #include <sys/proc.h>
                    127: #include <sys/malloc.h>
                    128: #include <sys/pool.h>
1.60      rmind     129: #include <sys/cpu.h>
1.61      tsutsui   130: #include <sys/atomic.h>
1.1       chs       131:
                    132: #include <machine/pte.h>
1.60      rmind     133: #include <machine/pcb.h>
1.1       chs       134:
                    135: #include <uvm/uvm.h>
1.69      cherry    136: #include <uvm/uvm_physseg.h>
1.1       chs       137:
                    138: #include <m68k/cacheops.h>
                    139:
                    140: #ifdef DEBUG
                    141: #define PDB_FOLLOW     0x0001
                    142: #define PDB_INIT       0x0002
                    143: #define PDB_ENTER      0x0004
                    144: #define PDB_REMOVE     0x0008
                    145: #define PDB_CREATE     0x0010
                    146: #define PDB_PTPAGE     0x0020
                    147: #define PDB_CACHE      0x0040
                    148: #define PDB_BITS       0x0080
                    149: #define PDB_COLLECT    0x0100
                    150: #define PDB_PROTECT    0x0200
                    151: #define PDB_SEGTAB     0x0400
                    152: #define PDB_MULTIMAP   0x0800
                    153: #define PDB_PARANOIA   0x2000
                    154: #define PDB_WIRING     0x4000
                    155: #define PDB_PVDUMP     0x8000
                    156:
                    157: int debugmap = 0;
                    158: int pmapdebug = PDB_PARANOIA;
                    159:
                    160: #define        PMAP_DPRINTF(l, x)      if (pmapdebug & (l)) printf x
                    161: #else /* ! DEBUG */
                    162: #define        PMAP_DPRINTF(l, x)      /* nothing */
                    163: #endif /* DEBUG */
                    164:
                    165: /*
                    166:  * Get STEs and PTEs for user/kernel address space
                    167:  */
                    168: #if defined(M68040) || defined(M68060)
                    169: #define        pmap_ste1(m, v) \
                    170:        (&((m)->pm_stab[(vaddr_t)(v) >> SG4_SHIFT1]))
                    171: /* XXX assumes physically contiguous ST pages (if more than one) */
                    172: #define pmap_ste2(m, v) \
                    173:        (&((m)->pm_stab[(st_entry_t *)(*(u_int *)pmap_ste1(m, v) & SG4_ADDR1) \
                    174:                        - (m)->pm_stpa + (((v) & SG4_MASK2) >> SG4_SHIFT2)]))
                    175: #if defined(M68020) || defined(M68030)
                    176: #define        pmap_ste(m, v)  \
                    177:        (&((m)->pm_stab[(vaddr_t)(v) \
                    178:                        >> (mmutype == MMU_68040 ? SG4_SHIFT1 : SG_ISHIFT)]))
                    179: #define pmap_ste_v(m, v) \
                    180:        (mmutype == MMU_68040 \
                    181:         ? ((*pmap_ste1(m, v) & SG_V) && \
                    182:            (*pmap_ste2(m, v) & SG_V)) \
                    183:         : (*pmap_ste(m, v) & SG_V))
                    184: #else
                    185: #define        pmap_ste(m, v)  \
                    186:        (&((m)->pm_stab[(vaddr_t)(v) >> SG4_SHIFT1]))
                    187: #define pmap_ste_v(m, v) \
                    188:        ((*pmap_ste1(m, v) & SG_V) && (*pmap_ste2(m, v) & SG_V))
                    189: #endif
                    190: #else
                    191: #define        pmap_ste(m, v)   (&((m)->pm_stab[(vaddr_t)(v) >> SG_ISHIFT]))
                    192: #define pmap_ste_v(m, v) (*pmap_ste(m, v) & SG_V)
                    193: #endif
                    194:
                    195: #define pmap_pte(m, v) (&((m)->pm_ptab[(vaddr_t)(v) >> PG_SHIFT]))
                    196: #define pmap_pte_pa(pte)       (*(pte) & PG_FRAME)
                    197: #define pmap_pte_w(pte)                (*(pte) & PG_W)
                    198: #define pmap_pte_ci(pte)       (*(pte) & PG_CI)
                    199: #define pmap_pte_m(pte)                (*(pte) & PG_M)
                    200: #define pmap_pte_u(pte)                (*(pte) & PG_U)
                    201: #define pmap_pte_prot(pte)     (*(pte) & PG_PROT)
                    202: #define pmap_pte_v(pte)                (*(pte) & PG_V)
                    203:
                    204: #define pmap_pte_set_w(pte, v) \
                    205:        if (v) *(pte) |= PG_W; else *(pte) &= ~PG_W
                    206: #define pmap_pte_set_prot(pte, v) \
                    207:        if (v) *(pte) |= PG_PROT; else *(pte) &= ~PG_PROT
                    208: #define pmap_pte_w_chg(pte, nw)                ((nw) ^ pmap_pte_w(pte))
                    209: #define pmap_pte_prot_chg(pte, np)     ((np) ^ pmap_pte_prot(pte))
                    210:
                    211: /*
                    212:  * Given a map and a machine independent protection code,
                    213:  * convert to an m68k protection code.
                    214:  */
                    215: #define pte_prot(m, p) (protection_codes[p])
1.42      tsutsui   216: u_int  protection_codes[8];
1.1       chs       217:
                    218: /*
                    219:  * Kernel page table page management.
                    220:  */
                    221: struct kpt_page {
                    222:        struct kpt_page *kpt_next;      /* link on either used or free list */
                    223:        vaddr_t         kpt_va;         /* always valid kernel VA */
                    224:        paddr_t         kpt_pa;         /* PA of this page (for speed) */
                    225: };
                    226: struct kpt_page *kpt_free_list, *kpt_used_list;
                    227: struct kpt_page *kpt_pages;
                    228:
                    229: /*
                    230:  * Kernel segment/page table and page table map.
                    231:  * The page table map gives us a level of indirection we need to dynamically
                    232:  * expand the page table.  It is essentially a copy of the segment table
                    233:  * with PTEs instead of STEs.  All are initialized in locore at boot time.
                    234:  * Sysmap will initially contain VM_KERNEL_PT_PAGES pages of PTEs.
                    235:  * Segtabzero is an empty segment table which all processes share til they
                    236:  * reference something.
                    237:  */
1.54      tsutsui   238: paddr_t                Sysseg_pa;
1.1       chs       239: st_entry_t     *Sysseg;
                    240: pt_entry_t     *Sysmap, *Sysptmap;
                    241: st_entry_t     *Segtabzero, *Segtabzeropa;
                    242: vsize_t                Sysptsize = VM_KERNEL_PT_PAGES;
                    243:
1.41      tsutsui   244: static struct pmap kernel_pmap_store;
1.40      tsutsui   245: struct pmap    *const kernel_pmap_ptr = &kernel_pmap_store;
1.1       chs       246: struct vm_map  *st_map, *pt_map;
1.65      para      247: struct vm_map st_map_store, pt_map_store;
1.1       chs       248:
1.52      tsutsui   249: vaddr_t                lwp0uarea;      /* lwp0 u-area VA, initialized in bootstrap */
                    250:
1.1       chs       251: paddr_t                avail_start;    /* PA of first available physical page */
                    252: paddr_t                avail_end;      /* PA of last available physical page */
                    253: vsize_t                mem_size;       /* memory size in bytes */
1.5       thorpej   254: vaddr_t                virtual_avail;  /* VA of first avail page (after kernel bss)*/
                    255: vaddr_t                virtual_end;    /* VA of last avail page (end of kernel AS) */
1.1       chs       256: int            page_cnt;       /* number of pages managed by VM system */
                    257:
1.25      thorpej   258: bool           pmap_initialized = false;       /* Has pmap_init completed? */
1.46      thorpej   259:
1.66      rkujawa   260: vaddr_t                m68k_uptbase = M68K_PTBASE;
                    261:
1.46      thorpej   262: struct pv_header {
                    263:        struct pv_entry         pvh_first;      /* first PV entry */
                    264:        uint16_t                pvh_attrs;      /* attributes:
                    265:                                                   bits 0-7: PTE bits
                    266:                                                   bits 8-15: flags */
                    267:        uint16_t                pvh_cimappings; /* # caller-specified CI
                    268:                                                   mappings */
                    269: };
                    270:
                    271: #define        PVH_CI          0x10    /* all entries are cache-inhibited */
                    272: #define        PVH_PTPAGE      0x20    /* entry maps a page table page */
                    273:
                    274: struct pv_header *pv_table;
1.1       chs       275: TAILQ_HEAD(pv_page_list, pv_page) pv_page_freelist;
                    276: int            pv_nfree;
                    277:
1.59      tsutsui   278: #ifdef CACHE_HAVE_VAC
1.58      tsutsui   279: u_int          pmap_aliasmask; /* seperation at which VA aliasing ok */
1.1       chs       280: #endif
                    281: #if defined(M68040) || defined(M68060)
1.53      tsutsui   282: u_int          protostfree;    /* prototype (default) free ST map */
1.1       chs       283: #endif
                    284:
                    285: pt_entry_t     *caddr1_pte;    /* PTE for CADDR1 */
                    286: pt_entry_t     *caddr2_pte;    /* PTE for CADDR2 */
                    287:
                    288: struct pool    pmap_pmap_pool; /* memory pool for pmap structures */
1.63      tsutsui   289: struct pool    pmap_pv_pool;   /* memory pool for pv entries */
1.1       chs       290:
1.63      tsutsui   291: #define pmap_alloc_pv()                pool_get(&pmap_pv_pool, PR_NOWAIT)
                    292: #define pmap_free_pv(pv)       pool_put(&pmap_pv_pool, (pv))
1.1       chs       293:
1.45      thorpej   294: #define        PAGE_IS_MANAGED(pa)     (pmap_initialized && uvm_pageismanaged(pa))
1.1       chs       295:
1.46      thorpej   296: static inline struct pv_header *
1.38      tsutsui   297: pa_to_pvh(paddr_t pa)
                    298: {
1.69      cherry    299:        uvm_physseg_t bank = 0; /* XXX gcc4 -Wuninitialized */
                    300:        psize_t pg = 0;
                    301:
                    302:        bank = uvm_physseg_find(atop((pa)), &pg);
                    303:        return &uvm_physseg_get_pmseg(bank)->pvheader[pg];
1.38      tsutsui   304: }
1.1       chs       305:
                    306: /*
                    307:  * Internal routines
                    308:  */
1.70      chs       309: void   pmap_remove_mapping(pmap_t, vaddr_t, pt_entry_t *, int,
                    310:                            struct pv_entry **);
1.23      thorpej   311: bool   pmap_testbit(paddr_t, int);
                    312: bool   pmap_changebit(paddr_t, int, int);
1.24      tsutsui   313: int    pmap_enter_ptpage(pmap_t, vaddr_t, bool);
1.20      tsutsui   314: void   pmap_ptpage_addref(vaddr_t);
                    315: int    pmap_ptpage_delref(vaddr_t);
                    316: void   pmap_pinit(pmap_t);
                    317: void   pmap_release(pmap_t);
1.1       chs       318:
                    319: #ifdef DEBUG
1.20      tsutsui   320: void pmap_pvdump(paddr_t);
                    321: void pmap_check_wiring(const char *, vaddr_t);
1.1       chs       322: #endif
                    323:
                    324: /* pmap_remove_mapping flags */
                    325: #define        PRM_TFLUSH      0x01
                    326: #define        PRM_CFLUSH      0x02
                    327: #define        PRM_KEEPPTPAGE  0x04
                    328:
                    329: /*
1.52      tsutsui   330:  * pmap_bootstrap_finalize:    [ INTERFACE ]
                    331:  *
                    332:  *     Initialize lwp0 uarea, curlwp, and curpcb after MMU is turned on,
                    333:  *     using lwp0uarea variable saved during pmap_bootstrap().
                    334:  */
                    335: void
                    336: pmap_bootstrap_finalize(void)
                    337: {
                    338:
1.55      tsutsui   339: #if !defined(amiga) && !defined(atari)
                    340:        /*
                    341:         * XXX
                    342:         * amiga and atari have different pmap initialization functions
                    343:         * and they require this earlier.
                    344:         */
                    345:        uvmexp.pagesize = NBPG;
1.68      cherry    346:        uvm_md_init();
1.55      tsutsui   347: #endif
                    348:
1.54      tsutsui   349:        /*
                    350:         * Initialize protection array.
                    351:         * XXX: Could this have port specific values? Can't this be static?
                    352:         */
                    353:        protection_codes[VM_PROT_NONE|VM_PROT_NONE|VM_PROT_NONE]     = 0;
                    354:        protection_codes[VM_PROT_READ|VM_PROT_NONE|VM_PROT_NONE]     = PG_RO;
                    355:        protection_codes[VM_PROT_READ|VM_PROT_NONE|VM_PROT_EXECUTE]  = PG_RO;
                    356:        protection_codes[VM_PROT_NONE|VM_PROT_NONE|VM_PROT_EXECUTE]  = PG_RO;
                    357:        protection_codes[VM_PROT_NONE|VM_PROT_WRITE|VM_PROT_NONE]    = PG_RW;
                    358:        protection_codes[VM_PROT_NONE|VM_PROT_WRITE|VM_PROT_EXECUTE] = PG_RW;
                    359:        protection_codes[VM_PROT_READ|VM_PROT_WRITE|VM_PROT_NONE]    = PG_RW;
                    360:        protection_codes[VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE] = PG_RW;
                    361:
                    362:        /*
                    363:         * Initialize pmap_kernel().
                    364:         */
                    365:        pmap_kernel()->pm_stpa = (st_entry_t *)Sysseg_pa;
                    366:        pmap_kernel()->pm_stab = Sysseg;
                    367:        pmap_kernel()->pm_ptab = Sysmap;
                    368: #if defined(M68040) || defined(M68060)
                    369:        if (mmutype == MMU_68040)
                    370:                pmap_kernel()->pm_stfree = protostfree;
                    371: #endif
                    372:        pmap_kernel()->pm_count = 1;
                    373:
                    374:        /*
                    375:         * Initialize lwp0 uarea, curlwp, and curpcb.
                    376:         */
1.52      tsutsui   377:        memset((void *)lwp0uarea, 0, USPACE);
                    378:        uvm_lwp_setuarea(&lwp0, lwp0uarea);
                    379:        curlwp = &lwp0;
                    380:        curpcb = lwp_getpcb(&lwp0);
                    381: }
                    382:
                    383: /*
1.5       thorpej   384:  * pmap_virtual_space:         [ INTERFACE ]
                    385:  *
                    386:  *     Report the range of available kernel virtual address
                    387:  *     space to the VM system during bootstrap.
                    388:  *
                    389:  *     This is only an interface function if we do not use
                    390:  *     pmap_steal_memory()!
                    391:  *
                    392:  *     Note: no locking is necessary in this function.
                    393:  */
                    394: void
1.43      dsl       395: pmap_virtual_space(vaddr_t *vstartp, vaddr_t *vendp)
1.5       thorpej   396: {
                    397:
                    398:        *vstartp = virtual_avail;
                    399:        *vendp = virtual_end;
                    400: }
                    401:
                    402: /*
1.1       chs       403:  * pmap_init:                  [ INTERFACE ]
                    404:  *
                    405:  *     Initialize the pmap module.  Called by vm_init(), to initialize any
                    406:  *     structures that the pmap system needs to map virtual memory.
                    407:  *
                    408:  *     Note: no locking is necessary in this function.
                    409:  */
                    410: void
1.20      tsutsui   411: pmap_init(void)
1.1       chs       412: {
                    413:        vaddr_t         addr, addr2;
                    414:        vsize_t         s;
1.46      thorpej   415:        struct pv_header *pvh;
1.1       chs       416:        int             rv;
                    417:        int             npages;
1.69      cherry    418:        uvm_physseg_t   bank;
1.1       chs       419:
                    420:        PMAP_DPRINTF(PDB_FOLLOW, ("pmap_init()\n"));
                    421:
                    422:        /*
                    423:         * Before we do anything else, initialize the PTE pointers
                    424:         * used by pmap_zero_page() and pmap_copy_page().
                    425:         */
                    426:        caddr1_pte = pmap_pte(pmap_kernel(), CADDR1);
                    427:        caddr2_pte = pmap_pte(pmap_kernel(), CADDR2);
                    428:
                    429:        PMAP_DPRINTF(PDB_INIT,
                    430:            ("pmap_init: Sysseg %p, Sysmap %p, Sysptmap %p\n",
                    431:            Sysseg, Sysmap, Sysptmap));
                    432:        PMAP_DPRINTF(PDB_INIT,
                    433:            ("  pstart %lx, pend %lx, vstart %lx, vend %lx\n",
                    434:            avail_start, avail_end, virtual_avail, virtual_end));
                    435:
                    436:        /*
                    437:         * Allocate memory for random pmap data structures.  Includes the
                    438:         * initial segment table, pv_head_table and pmap_attributes.
                    439:         */
1.69      cherry    440:        for (page_cnt = 0, bank = uvm_physseg_get_first();
                    441:             uvm_physseg_valid_p(bank);
                    442:             bank = uvm_physseg_get_next(bank))
                    443:                page_cnt += uvm_physseg_get_end(bank) - uvm_physseg_get_start(bank);
1.1       chs       444:        s = M68K_STSIZE;                                        /* Segtabzero */
1.46      thorpej   445:        s += page_cnt * sizeof(struct pv_header);       /* pv table */
1.1       chs       446:        s = round_page(s);
1.14      yamt      447:        addr = uvm_km_alloc(kernel_map, s, 0, UVM_KMF_WIRED | UVM_KMF_ZERO);
1.1       chs       448:        if (addr == 0)
                    449:                panic("pmap_init: can't allocate data structures");
                    450:
1.20      tsutsui   451:        Segtabzero = (st_entry_t *)addr;
                    452:        (void)pmap_extract(pmap_kernel(), addr,
                    453:            (paddr_t *)(void *)&Segtabzeropa);
1.1       chs       454:        addr += M68K_STSIZE;
                    455:
1.46      thorpej   456:        pv_table = (struct pv_header *) addr;
                    457:        addr += page_cnt * sizeof(struct pv_header);
1.1       chs       458:
                    459:        PMAP_DPRINTF(PDB_INIT, ("pmap_init: %lx bytes: page_cnt %x s0 %p(%p) "
1.46      thorpej   460:            "tbl %p\n",
1.1       chs       461:            s, page_cnt, Segtabzero, Segtabzeropa,
1.46      thorpej   462:            pv_table));
1.1       chs       463:
                    464:        /*
                    465:         * Now that the pv and attribute tables have been allocated,
                    466:         * assign them to the memory segments.
                    467:         */
1.46      thorpej   468:        pvh = pv_table;
1.69      cherry    469:        for (bank = uvm_physseg_get_first();
                    470:             uvm_physseg_valid_p(bank);
                    471:             bank = uvm_physseg_get_next(bank)) {
                    472:                npages = uvm_physseg_get_end(bank) - uvm_physseg_get_start(bank);
                    473:                uvm_physseg_get_pmseg(bank)->pvheader = pvh;
1.46      thorpej   474:                pvh += npages;
1.1       chs       475:        }
                    476:
                    477:        /*
1.5       thorpej   478:         * Allocate physical memory for kernel PT pages and their management.
                    479:         * We need 1 PT page per possible task plus some slop.
                    480:         */
1.71      riastrad  481:        npages = uimin(atop(M68K_MAX_KPTSIZE), maxproc+16);
1.5       thorpej   482:        s = ptoa(npages) + round_page(npages * sizeof(struct kpt_page));
                    483:
                    484:        /*
                    485:         * Verify that space will be allocated in region for which
                    486:         * we already have kernel PT pages.
                    487:         */
                    488:        addr = 0;
                    489:        rv = uvm_map(kernel_map, &addr, s, NULL, UVM_UNKNOWN_OFFSET, 0,
1.20      tsutsui   490:            UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE,
                    491:            UVM_ADV_RANDOM, UVM_FLAG_NOMERGE));
1.5       thorpej   492:        if (rv != 0 || (addr + s) >= (vaddr_t)Sysmap)
                    493:                panic("pmap_init: kernel PT too small");
                    494:        uvm_unmap(kernel_map, addr, addr + s);
                    495:
                    496:        /*
                    497:         * Now allocate the space and link the pages together to
                    498:         * form the KPT free list.
                    499:         */
1.14      yamt      500:        addr = uvm_km_alloc(kernel_map, s, 0, UVM_KMF_WIRED | UVM_KMF_ZERO);
1.5       thorpej   501:        if (addr == 0)
                    502:                panic("pmap_init: cannot allocate KPT free list");
                    503:        s = ptoa(npages);
                    504:        addr2 = addr + s;
                    505:        kpt_pages = &((struct kpt_page *)addr2)[npages];
                    506:        kpt_free_list = NULL;
                    507:        do {
                    508:                addr2 -= PAGE_SIZE;
                    509:                (--kpt_pages)->kpt_next = kpt_free_list;
                    510:                kpt_free_list = kpt_pages;
                    511:                kpt_pages->kpt_va = addr2;
                    512:                (void) pmap_extract(pmap_kernel(), addr2,
                    513:                    (paddr_t *)&kpt_pages->kpt_pa);
                    514:        } while (addr != addr2);
                    515:
                    516:        PMAP_DPRINTF(PDB_INIT, ("pmap_init: KPT: %ld pages from %lx to %lx\n",
                    517:            atop(s), addr, addr + s));
                    518:
                    519:        /*
1.1       chs       520:         * Allocate the segment table map and the page table map.
                    521:         */
                    522:        s = maxproc * M68K_STSIZE;
1.25      thorpej   523:        st_map = uvm_km_suballoc(kernel_map, &addr, &addr2, s, 0, false,
1.1       chs       524:            &st_map_store);
                    525:
1.66      rkujawa   526:        addr = m68k_uptbase;
1.1       chs       527:        if ((M68K_PTMAXSIZE / M68K_MAX_PTSIZE) < maxproc) {
                    528:                s = M68K_PTMAXSIZE;
                    529:                /*
                    530:                 * XXX We don't want to hang when we run out of
                    531:                 * page tables, so we lower maxproc so that fork()
                    532:                 * will fail instead.  Note that root could still raise
                    533:                 * this value via sysctl(3).
                    534:                 */
                    535:                maxproc = (M68K_PTMAXSIZE / M68K_MAX_PTSIZE);
                    536:        } else
                    537:                s = (maxproc * M68K_MAX_PTSIZE);
                    538:        pt_map = uvm_km_suballoc(kernel_map, &addr, &addr2, s, 0,
1.25      thorpej   539:            true, &pt_map_store);
1.1       chs       540:
                    541: #if defined(M68040) || defined(M68060)
                    542:        if (mmutype == MMU_68040) {
                    543:                protostfree = ~l2tobm(0);
                    544:                for (rv = MAXUL2SIZE; rv < sizeof(protostfree)*NBBY; rv++)
                    545:                        protostfree &= ~l2tobm(rv);
                    546:        }
                    547: #endif
                    548:
                    549:        /*
                    550:         * Initialize the pmap pools.
                    551:         */
                    552:        pool_init(&pmap_pmap_pool, sizeof(struct pmap), 0, 0, 0, "pmappl",
1.28      ad        553:            &pool_allocator_nointr, IPL_NONE);
1.1       chs       554:
                    555:        /*
1.63      tsutsui   556:         * Initialize the pv_entry pools.
                    557:         */
                    558:        pool_init(&pmap_pv_pool, sizeof(struct pv_entry), 0, 0, 0, "pvpl",
1.64      tsutsui   559:            &pool_allocator_meta, IPL_NONE);
1.63      tsutsui   560:
                    561:        /*
1.1       chs       562:         * Now that this is done, mark the pages shared with the
                    563:         * hardware page table search as non-CCB (actually, as CI).
                    564:         *
                    565:         * XXX Hm. Given that this is in the kernel map, can't we just
                    566:         * use the va's?
                    567:         */
                    568: #ifdef M68060
                    569: #if defined(M68020) || defined(M68030) || defined(M68040)
                    570:        if (cputype == CPU_68060)
                    571: #endif
                    572:        {
                    573:                struct kpt_page *kptp = kpt_free_list;
                    574:                paddr_t paddr;
                    575:
                    576:                while (kptp) {
                    577:                        pmap_changebit(kptp->kpt_pa, PG_CI, ~PG_CCB);
                    578:                        kptp = kptp->kpt_next;
                    579:                }
                    580:
                    581:                paddr = (paddr_t)Segtabzeropa;
                    582:                while (paddr < (paddr_t)Segtabzeropa + M68K_STSIZE) {
                    583:                        pmap_changebit(paddr, PG_CI, ~PG_CCB);
1.3       thorpej   584:                        paddr += PAGE_SIZE;
1.1       chs       585:                }
                    586:
                    587:                DCIS();
                    588:        }
                    589: #endif
                    590:
                    591:        /*
                    592:         * Now it is safe to enable pv_table recording.
                    593:         */
1.25      thorpej   594:        pmap_initialized = true;
1.1       chs       595: }
                    596:
                    597: /*
                    598:  * pmap_map:
                    599:  *
                    600:  *     Used to map a range of physical addresses into kernel
                    601:  *     virtual address space.
                    602:  *
                    603:  *     For now, VM is already on, we only need to map the
                    604:  *     specified memory.
                    605:  *
                    606:  *     Note: THIS FUNCTION IS DEPRECATED, AND SHOULD BE REMOVED!
                    607:  */
                    608: vaddr_t
1.20      tsutsui   609: pmap_map(vaddr_t va, paddr_t spa, paddr_t epa, int prot)
1.1       chs       610: {
                    611:
                    612:        PMAP_DPRINTF(PDB_FOLLOW,
                    613:            ("pmap_map(%lx, %lx, %lx, %x)\n", va, spa, epa, prot));
                    614:
                    615:        while (spa < epa) {
                    616:                pmap_enter(pmap_kernel(), va, spa, prot, 0);
1.3       thorpej   617:                va += PAGE_SIZE;
                    618:                spa += PAGE_SIZE;
1.1       chs       619:        }
                    620:        pmap_update(pmap_kernel());
1.20      tsutsui   621:        return va;
1.1       chs       622: }
                    623:
                    624: /*
                    625:  * pmap_create:                        [ INTERFACE ]
                    626:  *
                    627:  *     Create and return a physical map.
                    628:  *
                    629:  *     Note: no locking is necessary in this function.
                    630:  */
                    631: pmap_t
1.20      tsutsui   632: pmap_create(void)
1.1       chs       633: {
                    634:        struct pmap *pmap;
                    635:
                    636:        PMAP_DPRINTF(PDB_FOLLOW|PDB_CREATE,
                    637:            ("pmap_create()\n"));
                    638:
                    639:        pmap = pool_get(&pmap_pmap_pool, PR_WAITOK);
                    640:        memset(pmap, 0, sizeof(*pmap));
                    641:        pmap_pinit(pmap);
1.20      tsutsui   642:        return pmap;
1.1       chs       643: }
                    644:
                    645: /*
                    646:  * pmap_pinit:
                    647:  *
                    648:  *     Initialize a preallocated and zeroed pmap structure.
                    649:  *
                    650:  *     Note: THIS FUNCTION SHOULD BE MOVED INTO pmap_create()!
                    651:  */
                    652: void
1.20      tsutsui   653: pmap_pinit(struct pmap *pmap)
1.1       chs       654: {
                    655:
                    656:        PMAP_DPRINTF(PDB_FOLLOW|PDB_CREATE,
                    657:            ("pmap_pinit(%p)\n", pmap));
                    658:
                    659:        /*
                    660:         * No need to allocate page table space yet but we do need a
                    661:         * valid segment table.  Initially, we point everyone at the
                    662:         * "null" segment table.  On the first pmap_enter, a real
                    663:         * segment table will be allocated.
                    664:         */
                    665:        pmap->pm_stab = Segtabzero;
                    666:        pmap->pm_stpa = Segtabzeropa;
                    667: #if defined(M68040) || defined(M68060)
                    668: #if defined(M68020) || defined(M68030)
                    669:        if (mmutype == MMU_68040)
                    670: #endif
                    671:                pmap->pm_stfree = protostfree;
                    672: #endif
                    673:        pmap->pm_count = 1;
                    674: }
                    675:
                    676: /*
                    677:  * pmap_destroy:               [ INTERFACE ]
                    678:  *
                    679:  *     Drop the reference count on the specified pmap, releasing
                    680:  *     all resources if the reference count drops to zero.
                    681:  */
                    682: void
1.20      tsutsui   683: pmap_destroy(pmap_t pmap)
1.1       chs       684: {
                    685:        int count;
                    686:
                    687:        PMAP_DPRINTF(PDB_FOLLOW, ("pmap_destroy(%p)\n", pmap));
                    688:
1.61      tsutsui   689:        count = atomic_dec_uint_nv(&pmap->pm_count);
1.1       chs       690:        if (count == 0) {
                    691:                pmap_release(pmap);
                    692:                pool_put(&pmap_pmap_pool, pmap);
                    693:        }
                    694: }
                    695:
                    696: /*
                    697:  * pmap_release:
                    698:  *
                    699:  *     Relese the resources held by a pmap.
                    700:  *
                    701:  *     Note: THIS FUNCTION SHOULD BE MOVED INTO pmap_destroy().
                    702:  */
                    703: void
1.20      tsutsui   704: pmap_release(pmap_t pmap)
1.1       chs       705: {
                    706:
                    707:        PMAP_DPRINTF(PDB_FOLLOW, ("pmap_release(%p)\n", pmap));
                    708:
                    709: #ifdef notdef /* DIAGNOSTIC */
                    710:        /* count would be 0 from pmap_destroy... */
                    711:        if (pmap->pm_count != 1)
                    712:                panic("pmap_release count");
                    713: #endif
                    714:
                    715:        if (pmap->pm_ptab) {
                    716:                pmap_remove(pmap_kernel(), (vaddr_t)pmap->pm_ptab,
                    717:                    (vaddr_t)pmap->pm_ptab + M68K_MAX_PTSIZE);
1.14      yamt      718:                uvm_km_pgremove((vaddr_t)pmap->pm_ptab,
                    719:                    (vaddr_t)pmap->pm_ptab + M68K_MAX_PTSIZE);
                    720:                uvm_km_free(pt_map, (vaddr_t)pmap->pm_ptab,
                    721:                    M68K_MAX_PTSIZE, UVM_KMF_VAONLY);
1.1       chs       722:        }
                    723:        KASSERT(pmap->pm_stab == Segtabzero);
                    724: }
                    725:
                    726: /*
                    727:  * pmap_reference:             [ INTERFACE ]
                    728:  *
                    729:  *     Add a reference to the specified pmap.
                    730:  */
                    731: void
1.20      tsutsui   732: pmap_reference(pmap_t pmap)
1.1       chs       733: {
                    734:        PMAP_DPRINTF(PDB_FOLLOW, ("pmap_reference(%p)\n", pmap));
                    735:
1.61      tsutsui   736:        atomic_inc_uint(&pmap->pm_count);
1.1       chs       737: }
                    738:
                    739: /*
                    740:  * pmap_activate:              [ INTERFACE ]
                    741:  *
                    742:  *     Activate the pmap used by the specified process.  This includes
                    743:  *     reloading the MMU context if the current process, and marking
                    744:  *     the pmap in use by the processor.
                    745:  *
                    746:  *     Note: we may only use spin locks here, since we are called
                    747:  *     by a critical section in cpu_switch()!
                    748:  */
                    749: void
1.20      tsutsui   750: pmap_activate(struct lwp *l)
1.1       chs       751: {
1.20      tsutsui   752:        pmap_t pmap = l->l_proc->p_vmspace->vm_map.pmap;
1.1       chs       753:
                    754:        PMAP_DPRINTF(PDB_FOLLOW|PDB_SEGTAB,
1.2       thorpej   755:            ("pmap_activate(%p)\n", l));
1.1       chs       756:
1.30      mhitch    757:        PMAP_ACTIVATE(pmap, (curlwp->l_flag & LW_IDLE) != 0 ||
                    758:            l->l_proc == curproc);
1.1       chs       759: }
                    760:
                    761: /*
                    762:  * pmap_deactivate:            [ INTERFACE ]
                    763:  *
                    764:  *     Mark that the pmap used by the specified process is no longer
                    765:  *     in use by the processor.
                    766:  *
                    767:  *     The comment above pmap_activate() wrt. locking applies here,
                    768:  *     as well.
                    769:  */
                    770: void
1.20      tsutsui   771: pmap_deactivate(struct lwp *l)
1.1       chs       772: {
                    773:
                    774:        /* No action necessary in this pmap implementation. */
                    775: }
                    776:
                    777: /*
                    778:  * pmap_remove:                        [ INTERFACE ]
                    779:  *
                    780:  *     Remove the given range of addresses from the specified map.
                    781:  *
                    782:  *     It is assumed that the start and end are properly
                    783:  *     rounded to the page size.
                    784:  */
                    785: void
1.20      tsutsui   786: pmap_remove(pmap_t pmap, vaddr_t sva, vaddr_t eva)
1.1       chs       787: {
                    788:        vaddr_t nssva;
                    789:        pt_entry_t *pte;
                    790:        int flags;
1.59      tsutsui   791: #ifdef CACHE_HAVE_VAC
1.25      thorpej   792:        bool firstpage = true, needcflush = false;
1.1       chs       793: #endif
                    794:
                    795:        PMAP_DPRINTF(PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT,
                    796:            ("pmap_remove(%p, %lx, %lx)\n", pmap, sva, eva));
                    797:
                    798:        flags = active_pmap(pmap) ? PRM_TFLUSH : 0;
                    799:        while (sva < eva) {
                    800:                nssva = m68k_trunc_seg(sva) + NBSEG;
                    801:                if (nssva == 0 || nssva > eva)
                    802:                        nssva = eva;
                    803:
                    804:                /*
                    805:                 * Invalidate every valid mapping within this segment.
                    806:                 */
                    807:
                    808:                pte = pmap_pte(pmap, sva);
                    809:                while (sva < nssva) {
                    810:
                    811:                        /*
                    812:                         * If this segment is unallocated,
                    813:                         * skip to the next segment boundary.
                    814:                         */
                    815:
                    816:                        if (!pmap_ste_v(pmap, sva)) {
                    817:                                sva = nssva;
                    818:                                break;
                    819:                        }
                    820:
1.49      rmind     821:                        if (pmap_pte_v(pte)) {
1.59      tsutsui   822: #ifdef CACHE_HAVE_VAC
1.1       chs       823:                                if (pmap_aliasmask) {
                    824:
                    825:                                        /*
                    826:                                         * Purge kernel side of VAC to ensure
                    827:                                         * we get the correct state of any
                    828:                                         * hardware maintained bits.
                    829:                                         */
                    830:
                    831:                                        if (firstpage) {
                    832:                                                DCIS();
                    833:                                        }
                    834:
                    835:                                        /*
                    836:                                         * Remember if we may need to
                    837:                                         * flush the VAC due to a non-CI
                    838:                                         * mapping.
                    839:                                         */
                    840:
                    841:                                        if (!needcflush && !pmap_pte_ci(pte))
1.25      thorpej   842:                                                needcflush = true;
1.1       chs       843:
                    844:                                }
1.25      thorpej   845:                                firstpage = false;
1.1       chs       846: #endif
1.70      chs       847:                                pmap_remove_mapping(pmap, sva, pte, flags, NULL);
1.1       chs       848:                        }
                    849:                        pte++;
1.3       thorpej   850:                        sva += PAGE_SIZE;
1.1       chs       851:                }
                    852:        }
                    853:
1.59      tsutsui   854: #ifdef CACHE_HAVE_VAC
1.1       chs       855:
                    856:        /*
                    857:         * Didn't do anything, no need for cache flushes
                    858:         */
                    859:
                    860:        if (firstpage)
                    861:                return;
                    862:
                    863:        /*
                    864:         * In a couple of cases, we don't need to worry about flushing
                    865:         * the VAC:
                    866:         *      1. if this is a kernel mapping,
                    867:         *         we have already done it
                    868:         *      2. if it is a user mapping not for the current process,
                    869:         *         it won't be there
                    870:         */
                    871:
                    872:        if (pmap_aliasmask && !active_user_pmap(pmap))
1.25      thorpej   873:                needcflush = false;
1.1       chs       874:        if (needcflush) {
                    875:                if (pmap == pmap_kernel()) {
                    876:                        DCIS();
                    877:                } else {
                    878:                        DCIU();
                    879:                }
                    880:        }
                    881: #endif
                    882: }
                    883:
                    884: /*
                    885:  * pmap_page_protect:          [ INTERFACE ]
                    886:  *
                    887:  *     Lower the permission for all mappings to a given page to
                    888:  *     the permissions specified.
                    889:  */
                    890: void
1.20      tsutsui   891: pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
1.1       chs       892: {
                    893:        paddr_t pa = VM_PAGE_TO_PHYS(pg);
1.46      thorpej   894:        struct pv_header *pvh;
1.1       chs       895:        struct pv_entry *pv;
                    896:        pt_entry_t *pte;
                    897:        int s;
                    898:
                    899: #ifdef DEBUG
                    900:        if ((pmapdebug & (PDB_FOLLOW|PDB_PROTECT)) ||
                    901:            (prot == VM_PROT_NONE && (pmapdebug & PDB_REMOVE)))
                    902:                printf("pmap_page_protect(%p, %x)\n", pg, prot);
                    903: #endif
                    904:
                    905:        switch (prot) {
                    906:        case VM_PROT_READ|VM_PROT_WRITE:
                    907:        case VM_PROT_ALL:
                    908:                return;
                    909:
                    910:        /* copy_on_write */
                    911:        case VM_PROT_READ:
                    912:        case VM_PROT_READ|VM_PROT_EXECUTE:
                    913:                pmap_changebit(pa, PG_RO, ~0);
                    914:                return;
                    915:
                    916:        /* remove_all */
                    917:        default:
                    918:                break;
                    919:        }
                    920:
1.46      thorpej   921:        pvh = pa_to_pvh(pa);
                    922:        pv = &pvh->pvh_first;
1.1       chs       923:        s = splvm();
                    924:        while (pv->pv_pmap != NULL) {
                    925:
                    926:                pte = pmap_pte(pv->pv_pmap, pv->pv_va);
                    927: #ifdef DEBUG
                    928:                if (!pmap_ste_v(pv->pv_pmap, pv->pv_va) ||
                    929:                    pmap_pte_pa(pte) != pa)
                    930:                        panic("pmap_page_protect: bad mapping");
                    931: #endif
                    932:                pmap_remove_mapping(pv->pv_pmap, pv->pv_va,
1.70      chs       933:                    pte, PRM_TFLUSH|PRM_CFLUSH, NULL);
1.1       chs       934:        }
                    935:        splx(s);
                    936: }
                    937:
                    938: /*
                    939:  * pmap_protect:               [ INTERFACE ]
                    940:  *
1.29      tnn       941:  *     Set the physical protection on the specified range of this map
1.1       chs       942:  *     as requested.
                    943:  */
                    944: void
1.20      tsutsui   945: pmap_protect(pmap_t pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
1.1       chs       946: {
                    947:        vaddr_t nssva;
                    948:        pt_entry_t *pte;
1.67      martin    949:        bool firstpage __unused, needtflush;
1.1       chs       950:        int isro;
                    951:
                    952:        PMAP_DPRINTF(PDB_FOLLOW|PDB_PROTECT,
                    953:            ("pmap_protect(%p, %lx, %lx, %x)\n",
                    954:            pmap, sva, eva, prot));
                    955:
                    956: #ifdef PMAPSTATS
                    957:        protect_stats.calls++;
                    958: #endif
                    959:        if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
                    960:                pmap_remove(pmap, sva, eva);
                    961:                return;
                    962:        }
                    963:        isro = pte_prot(pmap, prot);
                    964:        needtflush = active_pmap(pmap);
1.25      thorpej   965:        firstpage = true;
1.1       chs       966:        while (sva < eva) {
                    967:                nssva = m68k_trunc_seg(sva) + NBSEG;
                    968:                if (nssva == 0 || nssva > eva)
                    969:                        nssva = eva;
                    970:
                    971:                /*
                    972:                 * If VA belongs to an unallocated segment,
                    973:                 * skip to the next segment boundary.
                    974:                 */
                    975:
                    976:                if (!pmap_ste_v(pmap, sva)) {
                    977:                        sva = nssva;
                    978:                        continue;
                    979:                }
                    980:
                    981:                /*
                    982:                 * Change protection on mapping if it is valid and doesn't
                    983:                 * already have the correct protection.
                    984:                 */
                    985:
                    986:                pte = pmap_pte(pmap, sva);
                    987:                while (sva < nssva) {
                    988:                        if (pmap_pte_v(pte) && pmap_pte_prot_chg(pte, isro)) {
1.59      tsutsui   989: #ifdef CACHE_HAVE_VAC
1.1       chs       990:
                    991:                                /*
                    992:                                 * Purge kernel side of VAC to ensure we
                    993:                                 * get the correct state of any hardware
                    994:                                 * maintained bits.
                    995:                                 *
                    996:                                 * XXX do we need to clear the VAC in
                    997:                                 * general to reflect the new protection?
                    998:                                 */
                    999:
                   1000:                                if (firstpage && pmap_aliasmask)
                   1001:                                        DCIS();
                   1002: #endif
                   1003:
                   1004: #if defined(M68040) || defined(M68060)
                   1005:
                   1006:                                /*
                   1007:                                 * Clear caches if making RO (see section
                   1008:                                 * "7.3 Cache Coherency" in the manual).
                   1009:                                 */
                   1010:
                   1011: #if defined(M68020) || defined(M68030)
                   1012:                                if (isro && mmutype == MMU_68040)
                   1013: #else
                   1014:                                if (isro)
                   1015: #endif
                   1016:                                {
                   1017:                                        paddr_t pa = pmap_pte_pa(pte);
                   1018:
                   1019:                                        DCFP(pa);
                   1020:                                        ICPP(pa);
                   1021:                                }
                   1022: #endif
                   1023:                                pmap_pte_set_prot(pte, isro);
                   1024:                                if (needtflush)
                   1025:                                        TBIS(sva);
1.25      thorpej  1026:                                firstpage = false;
1.1       chs      1027:                        }
                   1028:                        pte++;
1.3       thorpej  1029:                        sva += PAGE_SIZE;
1.1       chs      1030:                }
                   1031:        }
                   1032: }
                   1033:
                   1034: /*
                   1035:  * pmap_enter:                 [ INTERFACE ]
                   1036:  *
                   1037:  *     Insert the given physical page (pa) at
                   1038:  *     the specified virtual address (va) in the
                   1039:  *     target physical map with the protection requested.
                   1040:  *
                   1041:  *     If specified, the page will be wired down, meaning
                   1042:  *     that the related pte cannot be reclaimed.
                   1043:  *
                   1044:  *     Note: This is the only routine which MAY NOT lazy-evaluate
                   1045:  *     or lose information.  Thatis, this routine must actually
                   1046:  *     insert this page into the given map NOW.
                   1047:  */
                   1048: int
1.44      cegger   1049: pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
1.1       chs      1050: {
                   1051:        pt_entry_t *pte;
1.70      chs      1052:        struct pv_entry *opv = NULL;
1.1       chs      1053:        int npte;
                   1054:        paddr_t opa;
1.25      thorpej  1055:        bool cacheable = true;
                   1056:        bool checkpv = true;
1.23      thorpej  1057:        bool wired = (flags & PMAP_WIRED) != 0;
                   1058:        bool can_fail = (flags & PMAP_CANFAIL) != 0;
1.1       chs      1059:
                   1060:        PMAP_DPRINTF(PDB_FOLLOW|PDB_ENTER,
                   1061:            ("pmap_enter(%p, %lx, %lx, %x, %x)\n",
                   1062:            pmap, va, pa, prot, wired));
                   1063:
                   1064: #ifdef DIAGNOSTIC
                   1065:        /*
                   1066:         * pmap_enter() should never be used for CADDR1 and CADDR2.
                   1067:         */
                   1068:        if (pmap == pmap_kernel() &&
                   1069:            (va == (vaddr_t)CADDR1 || va == (vaddr_t)CADDR2))
                   1070:                panic("pmap_enter: used for CADDR1 or CADDR2");
                   1071: #endif
                   1072:
                   1073:        /*
                   1074:         * For user mapping, allocate kernel VM resources if necessary.
                   1075:         */
1.22      martin   1076:        if (pmap->pm_ptab == NULL) {
1.1       chs      1077:                pmap->pm_ptab = (pt_entry_t *)
1.14      yamt     1078:                    uvm_km_alloc(pt_map, M68K_MAX_PTSIZE, 0,
1.22      martin   1079:                    UVM_KMF_VAONLY |
                   1080:                    (can_fail ? UVM_KMF_NOWAIT : UVM_KMF_WAITVA));
                   1081:                if (pmap->pm_ptab == NULL)
                   1082:                        return ENOMEM;
                   1083:        }
1.1       chs      1084:
                   1085:        /*
                   1086:         * Segment table entry not valid, we need a new PT page
                   1087:         */
1.22      martin   1088:        if (!pmap_ste_v(pmap, va)) {
                   1089:                int err = pmap_enter_ptpage(pmap, va, can_fail);
                   1090:                if (err)
                   1091:                        return err;
                   1092:        }
1.1       chs      1093:
                   1094:        pa = m68k_trunc_page(pa);
                   1095:        pte = pmap_pte(pmap, va);
                   1096:        opa = pmap_pte_pa(pte);
                   1097:
                   1098:        PMAP_DPRINTF(PDB_ENTER, ("enter: pte %p, *pte %x\n", pte, *pte));
                   1099:
                   1100:        /*
                   1101:         * Mapping has not changed, must be protection or wiring change.
                   1102:         */
                   1103:        if (opa == pa) {
                   1104:                /*
                   1105:                 * Wiring change, just update stats.
                   1106:                 * We don't worry about wiring PT pages as they remain
                   1107:                 * resident as long as there are valid mappings in them.
                   1108:                 * Hence, if a user page is wired, the PT page will be also.
                   1109:                 */
                   1110:                if (pmap_pte_w_chg(pte, wired ? PG_W : 0)) {
                   1111:                        PMAP_DPRINTF(PDB_ENTER,
                   1112:                            ("enter: wiring change -> %x\n", wired));
                   1113:                        if (wired)
                   1114:                                pmap->pm_stats.wired_count++;
                   1115:                        else
                   1116:                                pmap->pm_stats.wired_count--;
                   1117:                }
                   1118:                /*
                   1119:                 * Retain cache inhibition status
                   1120:                 */
1.25      thorpej  1121:                checkpv = false;
1.1       chs      1122:                if (pmap_pte_ci(pte))
1.25      thorpej  1123:                        cacheable = false;
1.1       chs      1124:                goto validate;
                   1125:        }
                   1126:
                   1127:        /*
                   1128:         * Mapping has changed, invalidate old range and fall through to
                   1129:         * handle validating new mapping.
                   1130:         */
                   1131:        if (opa) {
                   1132:                PMAP_DPRINTF(PDB_ENTER,
                   1133:                    ("enter: removing old mapping %lx\n", va));
                   1134:                pmap_remove_mapping(pmap, va, pte,
1.70      chs      1135:                    PRM_TFLUSH|PRM_CFLUSH|PRM_KEEPPTPAGE, &opv);
1.1       chs      1136:        }
                   1137:
                   1138:        /*
                   1139:         * If this is a new user mapping, increment the wiring count
                   1140:         * on this PT page.  PT pages are wired down as long as there
                   1141:         * is a valid mapping in the page.
                   1142:         */
                   1143:        if (pmap != pmap_kernel())
                   1144:                pmap_ptpage_addref(trunc_page((vaddr_t)pte));
                   1145:
                   1146:        /*
                   1147:         * Enter on the PV list if part of our managed memory
                   1148:         * Note that we raise IPL while manipulating pv_table
                   1149:         * since pmap_enter can be called at interrupt time.
                   1150:         */
                   1151:        if (PAGE_IS_MANAGED(pa)) {
1.46      thorpej  1152:                struct pv_header *pvh;
1.1       chs      1153:                struct pv_entry *pv, *npv;
                   1154:                int s;
                   1155:
1.46      thorpej  1156:                pvh = pa_to_pvh(pa);
                   1157:                pv = &pvh->pvh_first;
1.1       chs      1158:                s = splvm();
                   1159:
                   1160:                PMAP_DPRINTF(PDB_ENTER,
                   1161:                    ("enter: pv at %p: %lx/%p/%p\n",
                   1162:                    pv, pv->pv_va, pv->pv_pmap, pv->pv_next));
                   1163:                /*
                   1164:                 * No entries yet, use header as the first entry
                   1165:                 */
                   1166:                if (pv->pv_pmap == NULL) {
                   1167:                        pv->pv_va = va;
                   1168:                        pv->pv_pmap = pmap;
                   1169:                        pv->pv_next = NULL;
                   1170:                        pv->pv_ptste = NULL;
                   1171:                        pv->pv_ptpmap = NULL;
1.46      thorpej  1172:                        pvh->pvh_attrs = 0;
1.1       chs      1173:                }
                   1174:                /*
                   1175:                 * There is at least one other VA mapping this page.
                   1176:                 * Place this entry after the header.
                   1177:                 */
                   1178:                else {
                   1179: #ifdef DEBUG
                   1180:                        for (npv = pv; npv; npv = npv->pv_next)
                   1181:                                if (pmap == npv->pv_pmap && va == npv->pv_va)
                   1182:                                        panic("pmap_enter: already in pv_tab");
                   1183: #endif
1.70      chs      1184:                        if (opv != NULL) {
                   1185:                                npv = opv;
                   1186:                                opv = NULL;
                   1187:                        } else {
                   1188:                                npv = pmap_alloc_pv();
                   1189:                        }
1.63      tsutsui  1190:                        KASSERT(npv != NULL);
1.1       chs      1191:                        npv->pv_va = va;
                   1192:                        npv->pv_pmap = pmap;
                   1193:                        npv->pv_next = pv->pv_next;
                   1194:                        npv->pv_ptste = NULL;
                   1195:                        npv->pv_ptpmap = NULL;
                   1196:                        pv->pv_next = npv;
                   1197:
1.59      tsutsui  1198: #ifdef CACHE_HAVE_VAC
1.1       chs      1199:
                   1200:                        /*
                   1201:                         * Since there is another logical mapping for the
                   1202:                         * same page we may need to cache-inhibit the
                   1203:                         * descriptors on those CPUs with external VACs.
                   1204:                         * We don't need to CI if:
                   1205:                         *
                   1206:                         * - No two mappings belong to the same user pmaps.
                   1207:                         *   Since the cache is flushed on context switches
                   1208:                         *   there is no problem between user processes.
                   1209:                         *
                   1210:                         * - Mappings within a single pmap are a certain
                   1211:                         *   magic distance apart.  VAs at these appropriate
                   1212:                         *   boundaries map to the same cache entries or
                   1213:                         *   otherwise don't conflict.
                   1214:                         *
                   1215:                         * To keep it simple, we only check for these special
                   1216:                         * cases if there are only two mappings, otherwise we
                   1217:                         * punt and always CI.
                   1218:                         *
                   1219:                         * Note that there are no aliasing problems with the
                   1220:                         * on-chip data-cache when the WA bit is set.
                   1221:                         */
                   1222:
                   1223:                        if (pmap_aliasmask) {
1.46      thorpej  1224:                                if (pvh->pvh_attrs & PVH_CI) {
1.1       chs      1225:                                        PMAP_DPRINTF(PDB_CACHE,
                   1226:                                            ("enter: pa %lx already CI'ed\n",
                   1227:                                            pa));
1.25      thorpej  1228:                                        checkpv = cacheable = false;
1.1       chs      1229:                                } else if (npv->pv_next ||
                   1230:                                           ((pmap == pv->pv_pmap ||
                   1231:                                             pmap == pmap_kernel() ||
                   1232:                                             pv->pv_pmap == pmap_kernel()) &&
                   1233:                                            ((pv->pv_va & pmap_aliasmask) !=
                   1234:                                             (va & pmap_aliasmask)))) {
                   1235:                                        PMAP_DPRINTF(PDB_CACHE,
                   1236:                                            ("enter: pa %lx CI'ing all\n",
                   1237:                                            pa));
1.25      thorpej  1238:                                        cacheable = false;
1.46      thorpej  1239:                                        pvh->pvh_attrs |= PVH_CI;
1.1       chs      1240:                                }
                   1241:                        }
                   1242: #endif
                   1243:                }
                   1244:
                   1245:                /*
                   1246:                 * Speed pmap_is_referenced() or pmap_is_modified() based
                   1247:                 * on the hint provided in access_type.
                   1248:                 */
                   1249: #ifdef DIAGNOSTIC
                   1250:                if ((flags & VM_PROT_ALL) & ~prot)
                   1251:                        panic("pmap_enter: access_type exceeds prot");
                   1252: #endif
                   1253:                if (flags & VM_PROT_WRITE)
1.46      thorpej  1254:                        pvh->pvh_attrs |= (PG_U|PG_M);
1.1       chs      1255:                else if (flags & VM_PROT_ALL)
1.46      thorpej  1256:                        pvh->pvh_attrs |= PG_U;
1.1       chs      1257:
                   1258:                splx(s);
                   1259:        }
                   1260:        /*
                   1261:         * Assumption: if it is not part of our managed memory
                   1262:         * then it must be device memory which may be volitile.
                   1263:         */
                   1264:        else if (pmap_initialized) {
1.25      thorpej  1265:                checkpv = cacheable = false;
1.1       chs      1266:        }
                   1267:
                   1268:        /*
                   1269:         * Increment counters
                   1270:         */
                   1271:        pmap->pm_stats.resident_count++;
                   1272:        if (wired)
                   1273:                pmap->pm_stats.wired_count++;
                   1274:
                   1275: validate:
1.59      tsutsui  1276: #ifdef CACHE_HAVE_VAC
1.1       chs      1277:        /*
                   1278:         * Purge kernel side of VAC to ensure we get correct state
                   1279:         * of HW bits so we don't clobber them.
                   1280:         */
                   1281:        if (pmap_aliasmask)
                   1282:                DCIS();
                   1283: #endif
                   1284:
                   1285:        /*
                   1286:         * Build the new PTE.
                   1287:         */
                   1288:
                   1289:        npte = pa | pte_prot(pmap, prot) | (*pte & (PG_M|PG_U)) | PG_V;
                   1290:        if (wired)
                   1291:                npte |= PG_W;
                   1292:        if (!checkpv && !cacheable)
                   1293: #if defined(M68040) || defined(M68060)
                   1294: #if defined(M68020) || defined(M68030)
                   1295:                npte |= (mmutype == MMU_68040 ? PG_CIN : PG_CI);
                   1296: #else
                   1297:                npte |= PG_CIN;
                   1298: #endif
                   1299: #else
                   1300:                npte |= PG_CI;
                   1301: #endif
                   1302: #if defined(M68040) || defined(M68060)
                   1303: #if defined(M68020) || defined(M68030)
                   1304:        else if (mmutype == MMU_68040 && (npte & (PG_PROT|PG_CI)) == PG_RW)
                   1305: #else
                   1306:        else if ((npte & (PG_PROT|PG_CI)) == PG_RW)
                   1307: #endif
                   1308:                npte |= PG_CCB;
                   1309: #endif
                   1310:
                   1311:        PMAP_DPRINTF(PDB_ENTER, ("enter: new pte value %x\n", npte));
                   1312:
                   1313:        /*
                   1314:         * Remember if this was a wiring-only change.
                   1315:         * If so, we need not flush the TLB and caches.
                   1316:         */
                   1317:
                   1318:        wired = ((*pte ^ npte) == PG_W);
                   1319: #if defined(M68040) || defined(M68060)
                   1320: #if defined(M68020) || defined(M68030)
                   1321:        if (mmutype == MMU_68040 && !wired)
                   1322: #else
                   1323:        if (!wired)
                   1324: #endif
                   1325:        {
                   1326:                DCFP(pa);
                   1327:                ICPP(pa);
                   1328:        }
                   1329: #endif
                   1330:        *pte = npte;
                   1331:        if (!wired && active_pmap(pmap))
                   1332:                TBIS(va);
1.59      tsutsui  1333: #ifdef CACHE_HAVE_VAC
1.1       chs      1334:        /*
                   1335:         * The following is executed if we are entering a second
                   1336:         * (or greater) mapping for a physical page and the mappings
                   1337:         * may create an aliasing problem.  In this case we must
                   1338:         * cache inhibit the descriptors involved and flush any
                   1339:         * external VAC.
                   1340:         */
                   1341:        if (checkpv && !cacheable) {
                   1342:                pmap_changebit(pa, PG_CI, ~0);
                   1343:                DCIA();
                   1344: #ifdef DEBUG
                   1345:                if ((pmapdebug & (PDB_CACHE|PDB_PVDUMP)) ==
                   1346:                    (PDB_CACHE|PDB_PVDUMP))
                   1347:                        pmap_pvdump(pa);
                   1348: #endif
                   1349:        }
                   1350: #endif
                   1351: #ifdef DEBUG
                   1352:        if ((pmapdebug & PDB_WIRING) && pmap != pmap_kernel())
                   1353:                pmap_check_wiring("enter", trunc_page((vaddr_t)pte));
                   1354: #endif
                   1355:
1.70      chs      1356:        if (opv != NULL)
                   1357:                pmap_free_pv(opv);
                   1358:
1.1       chs      1359:        return 0;
                   1360: }
                   1361:
                   1362: void
1.50      cegger   1363: pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
1.1       chs      1364: {
1.20      tsutsui  1365:        pmap_t pmap = pmap_kernel();
1.1       chs      1366:        pt_entry_t *pte;
                   1367:        int s, npte;
                   1368:
                   1369:        PMAP_DPRINTF(PDB_FOLLOW|PDB_ENTER,
                   1370:            ("pmap_kenter_pa(%lx, %lx, %x)\n", va, pa, prot));
                   1371:
                   1372:        /*
                   1373:         * Segment table entry not valid, we need a new PT page
                   1374:         */
                   1375:
                   1376:        if (!pmap_ste_v(pmap, va)) {
                   1377:                s = splvm();
1.25      thorpej  1378:                pmap_enter_ptpage(pmap, va, false);
1.1       chs      1379:                splx(s);
                   1380:        }
                   1381:
                   1382:        pa = m68k_trunc_page(pa);
                   1383:        pte = pmap_pte(pmap, va);
                   1384:
                   1385:        PMAP_DPRINTF(PDB_ENTER, ("enter: pte %p, *pte %x\n", pte, *pte));
                   1386:        KASSERT(!pmap_pte_v(pte));
                   1387:
                   1388:        /*
                   1389:         * Increment counters
                   1390:         */
                   1391:
                   1392:        pmap->pm_stats.resident_count++;
                   1393:        pmap->pm_stats.wired_count++;
                   1394:
                   1395:        /*
                   1396:         * Build the new PTE.
                   1397:         */
                   1398:
                   1399:        npte = pa | pte_prot(pmap, prot) | PG_V | PG_W;
                   1400: #if defined(M68040) || defined(M68060)
                   1401: #if defined(M68020) || defined(M68030)
                   1402:        if (mmutype == MMU_68040 && (npte & PG_PROT) == PG_RW)
                   1403: #else
                   1404:        if ((npte & PG_PROT) == PG_RW)
                   1405: #endif
                   1406:                npte |= PG_CCB;
                   1407:
                   1408:        if (mmutype == MMU_68040) {
                   1409:                DCFP(pa);
                   1410:                ICPP(pa);
                   1411:        }
                   1412: #endif
                   1413:
                   1414:        *pte = npte;
                   1415:        TBIS(va);
                   1416: }
                   1417:
                   1418: void
1.20      tsutsui  1419: pmap_kremove(vaddr_t va, vsize_t size)
1.1       chs      1420: {
1.20      tsutsui  1421:        pmap_t pmap = pmap_kernel();
1.1       chs      1422:        pt_entry_t *pte;
                   1423:        vaddr_t nssva;
                   1424:        vaddr_t eva = va + size;
1.59      tsutsui  1425: #ifdef CACHE_HAVE_VAC
1.23      thorpej  1426:        bool firstpage, needcflush;
1.1       chs      1427: #endif
                   1428:
                   1429:        PMAP_DPRINTF(PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT,
                   1430:            ("pmap_kremove(%lx, %lx)\n", va, size));
                   1431:
1.59      tsutsui  1432: #ifdef CACHE_HAVE_VAC
1.25      thorpej  1433:        firstpage = true;
                   1434:        needcflush = false;
1.1       chs      1435: #endif
                   1436:        while (va < eva) {
                   1437:                nssva = m68k_trunc_seg(va) + NBSEG;
                   1438:                if (nssva == 0 || nssva > eva)
                   1439:                        nssva = eva;
                   1440:
                   1441:                /*
                   1442:                 * If VA belongs to an unallocated segment,
                   1443:                 * skip to the next segment boundary.
                   1444:                 */
                   1445:
                   1446:                if (!pmap_ste_v(pmap, va)) {
                   1447:                        va = nssva;
                   1448:                        continue;
                   1449:                }
                   1450:
                   1451:                /*
                   1452:                 * Invalidate every valid mapping within this segment.
                   1453:                 */
                   1454:
                   1455:                pte = pmap_pte(pmap, va);
                   1456:                while (va < nssva) {
                   1457:                        if (!pmap_pte_v(pte)) {
                   1458:                                pte++;
1.3       thorpej  1459:                                va += PAGE_SIZE;
1.1       chs      1460:                                continue;
                   1461:                        }
1.59      tsutsui  1462: #ifdef CACHE_HAVE_VAC
1.1       chs      1463:                        if (pmap_aliasmask) {
                   1464:
                   1465:                                /*
                   1466:                                 * Purge kernel side of VAC to ensure
                   1467:                                 * we get the correct state of any
                   1468:                                 * hardware maintained bits.
                   1469:                                 */
                   1470:
                   1471:                                if (firstpage) {
                   1472:                                        DCIS();
1.25      thorpej  1473:                                        firstpage = false;
1.1       chs      1474:                                }
                   1475:
                   1476:                                /*
                   1477:                                 * Remember if we may need to
                   1478:                                 * flush the VAC.
                   1479:                                 */
                   1480:
1.25      thorpej  1481:                                needcflush = true;
1.1       chs      1482:                        }
                   1483: #endif
                   1484:                        pmap->pm_stats.wired_count--;
                   1485:                        pmap->pm_stats.resident_count--;
                   1486:                        *pte = PG_NV;
                   1487:                        TBIS(va);
                   1488:                        pte++;
1.3       thorpej  1489:                        va += PAGE_SIZE;
1.1       chs      1490:                }
                   1491:        }
                   1492:
1.59      tsutsui  1493: #ifdef CACHE_HAVE_VAC
1.1       chs      1494:
                   1495:        /*
                   1496:         * In a couple of cases, we don't need to worry about flushing
                   1497:         * the VAC:
                   1498:         *      1. if this is a kernel mapping,
                   1499:         *         we have already done it
                   1500:         *      2. if it is a user mapping not for the current process,
                   1501:         *         it won't be there
                   1502:         */
                   1503:
                   1504:        if (pmap_aliasmask && !active_user_pmap(pmap))
1.25      thorpej  1505:                needcflush = false;
1.1       chs      1506:        if (needcflush) {
                   1507:                if (pmap == pmap_kernel()) {
                   1508:                        DCIS();
                   1509:                } else {
                   1510:                        DCIU();
                   1511:                }
                   1512:        }
                   1513: #endif
                   1514: }
                   1515:
                   1516: /*
                   1517:  * pmap_unwire:                        [ INTERFACE ]
                   1518:  *
                   1519:  *     Clear the wired attribute for a map/virtual-address pair.
                   1520:  *
                   1521:  *     The mapping must already exist in the pmap.
                   1522:  */
                   1523: void
1.20      tsutsui  1524: pmap_unwire(pmap_t pmap, vaddr_t va)
1.1       chs      1525: {
                   1526:        pt_entry_t *pte;
                   1527:
                   1528:        PMAP_DPRINTF(PDB_FOLLOW,
                   1529:            ("pmap_unwire(%p, %lx)\n", pmap, va));
                   1530:
                   1531:        pte = pmap_pte(pmap, va);
                   1532:
                   1533:        /*
                   1534:         * If wiring actually changed (always?) clear the wire bit and
                   1535:         * update the wire count.  Note that wiring is not a hardware
                   1536:         * characteristic so there is no need to invalidate the TLB.
                   1537:         */
                   1538:
                   1539:        if (pmap_pte_w_chg(pte, 0)) {
1.25      thorpej  1540:                pmap_pte_set_w(pte, false);
1.1       chs      1541:                pmap->pm_stats.wired_count--;
                   1542:        }
                   1543: }
                   1544:
                   1545: /*
                   1546:  * pmap_extract:               [ INTERFACE ]
                   1547:  *
                   1548:  *     Extract the physical address associated with the given
                   1549:  *     pmap/virtual address pair.
                   1550:  */
1.23      thorpej  1551: bool
1.20      tsutsui  1552: pmap_extract(pmap_t pmap, vaddr_t va, paddr_t *pap)
1.1       chs      1553: {
                   1554:        paddr_t pa;
                   1555:        u_int pte;
1.8       cl       1556:
1.1       chs      1557:        PMAP_DPRINTF(PDB_FOLLOW,
                   1558:            ("pmap_extract(%p, %lx) -> ", pmap, va));
                   1559:
                   1560:        if (pmap_ste_v(pmap, va)) {
                   1561:                pte = *(u_int *)pmap_pte(pmap, va);
                   1562:                if (pte) {
                   1563:                        pa = (pte & PG_FRAME) | (va & ~PG_FRAME);
                   1564:                        if (pap != NULL)
                   1565:                                *pap = pa;
1.9       mycroft  1566: #ifdef DEBUG
                   1567:                        if (pmapdebug & PDB_FOLLOW)
                   1568:                                printf("%lx\n", pa);
                   1569: #endif
1.25      thorpej  1570:                        return true;
1.1       chs      1571:                }
                   1572:        }
                   1573: #ifdef DEBUG
1.9       mycroft  1574:        if (pmapdebug & PDB_FOLLOW)
                   1575:                printf("failed\n");
1.1       chs      1576: #endif
1.25      thorpej  1577:        return false;
1.1       chs      1578: }
                   1579:
                   1580: /*
                   1581:  * pmap_copy:          [ INTERFACE ]
                   1582:  *
                   1583:  *     Copy the mapping range specified by src_addr/len
                   1584:  *     from the source map to the range dst_addr/len
                   1585:  *     in the destination map.
                   1586:  *
                   1587:  *     This routine is only advisory and need not do anything.
                   1588:  */
                   1589: void
1.20      tsutsui  1590: pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vaddr_t dst_addr, vsize_t len,
                   1591:     vaddr_t src_addr)
1.1       chs      1592: {
                   1593:
                   1594:        PMAP_DPRINTF(PDB_FOLLOW,
                   1595:            ("pmap_copy(%p, %p, %lx, %lx, %lx)\n",
                   1596:            dst_pmap, src_pmap, dst_addr, len, src_addr));
                   1597: }
                   1598:
                   1599: /*
                   1600:  * pmap_collect1():
                   1601:  *
                   1602:  *     Garbage-collect KPT pages.  Helper for the above (bogus)
                   1603:  *     pmap_collect().
                   1604:  *
                   1605:  *     Note: THIS SHOULD GO AWAY, AND BE REPLACED WITH A BETTER
                   1606:  *     WAY OF HANDLING PT PAGES!
                   1607:  */
1.49      rmind    1608: static inline void
1.20      tsutsui  1609: pmap_collect1(pmap_t pmap, paddr_t startpa, paddr_t endpa)
1.1       chs      1610: {
                   1611:        paddr_t pa;
1.46      thorpej  1612:        struct pv_header *pvh;
1.1       chs      1613:        struct pv_entry *pv;
                   1614:        pt_entry_t *pte;
                   1615:        paddr_t kpa;
                   1616: #ifdef DEBUG
                   1617:        st_entry_t *ste;
                   1618:        int opmapdebug = 0;
                   1619: #endif
                   1620:
1.3       thorpej  1621:        for (pa = startpa; pa < endpa; pa += PAGE_SIZE) {
1.1       chs      1622:                struct kpt_page *kpt, **pkpt;
                   1623:
                   1624:                /*
                   1625:                 * Locate physical pages which are being used as kernel
                   1626:                 * page table pages.
                   1627:                 */
                   1628:
1.46      thorpej  1629:                pvh = pa_to_pvh(pa);
                   1630:                pv = &pvh->pvh_first;
                   1631:                if (pv->pv_pmap != pmap_kernel() ||
                   1632:                    !(pvh->pvh_attrs & PVH_PTPAGE))
1.1       chs      1633:                        continue;
                   1634:                do {
                   1635:                        if (pv->pv_ptste && pv->pv_ptpmap == pmap_kernel())
                   1636:                                break;
                   1637:                } while ((pv = pv->pv_next));
                   1638:                if (pv == NULL)
                   1639:                        continue;
                   1640: #ifdef DEBUG
                   1641:                if (pv->pv_va < (vaddr_t)Sysmap ||
                   1642:                    pv->pv_va >= (vaddr_t)Sysmap + M68K_MAX_PTSIZE) {
                   1643:                        printf("collect: kernel PT VA out of range\n");
                   1644:                        pmap_pvdump(pa);
                   1645:                        continue;
                   1646:                }
                   1647: #endif
1.3       thorpej  1648:                pte = (pt_entry_t *)(pv->pv_va + PAGE_SIZE);
1.1       chs      1649:                while (--pte >= (pt_entry_t *)pv->pv_va && *pte == PG_NV)
                   1650:                        ;
                   1651:                if (pte >= (pt_entry_t *)pv->pv_va)
                   1652:                        continue;
                   1653:
                   1654: #ifdef DEBUG
                   1655:                if (pmapdebug & (PDB_PTPAGE|PDB_COLLECT)) {
                   1656:                        printf("collect: freeing KPT page at %lx (ste %x@%p)\n",
1.20      tsutsui  1657:                            pv->pv_va, *pv->pv_ptste, pv->pv_ptste);
1.1       chs      1658:                        opmapdebug = pmapdebug;
                   1659:                        pmapdebug |= PDB_PTPAGE;
                   1660:                }
                   1661:
                   1662:                ste = pv->pv_ptste;
                   1663: #endif
                   1664:                /*
                   1665:                 * If all entries were invalid we can remove the page.
                   1666:                 * We call pmap_remove_entry to take care of invalidating
                   1667:                 * ST and Sysptmap entries.
                   1668:                 */
                   1669:
                   1670:                (void) pmap_extract(pmap, pv->pv_va, &kpa);
                   1671:                pmap_remove_mapping(pmap, pv->pv_va, NULL,
1.70      chs      1672:                    PRM_TFLUSH|PRM_CFLUSH, NULL);
1.1       chs      1673:
                   1674:                /*
                   1675:                 * Use the physical address to locate the original
                   1676:                 * (kmem_alloc assigned) address for the page and put
                   1677:                 * that page back on the free list.
                   1678:                 */
                   1679:
                   1680:                for (pkpt = &kpt_used_list, kpt = *pkpt;
                   1681:                     kpt != NULL;
                   1682:                     pkpt = &kpt->kpt_next, kpt = *pkpt)
                   1683:                        if (kpt->kpt_pa == kpa)
                   1684:                                break;
                   1685: #ifdef DEBUG
                   1686:                if (kpt == NULL)
                   1687:                        panic("pmap_collect: lost a KPT page");
                   1688:                if (pmapdebug & (PDB_PTPAGE|PDB_COLLECT))
                   1689:                        printf("collect: %lx (%lx) to free list\n",
1.20      tsutsui  1690:                            kpt->kpt_va, kpa);
1.1       chs      1691: #endif
                   1692:                *pkpt = kpt->kpt_next;
                   1693:                kpt->kpt_next = kpt_free_list;
                   1694:                kpt_free_list = kpt;
                   1695: #ifdef DEBUG
                   1696:                if (pmapdebug & (PDB_PTPAGE|PDB_COLLECT))
                   1697:                        pmapdebug = opmapdebug;
                   1698:
                   1699:                if (*ste != SG_NV)
                   1700:                        printf("collect: kernel STE at %p still valid (%x)\n",
1.20      tsutsui  1701:                            ste, *ste);
1.1       chs      1702:                ste = &Sysptmap[ste - pmap_ste(pmap_kernel(), 0)];
                   1703:                if (*ste != SG_NV)
                   1704:                        printf("collect: kernel PTmap at %p still valid (%x)\n",
1.20      tsutsui  1705:                            ste, *ste);
1.1       chs      1706: #endif
                   1707:        }
                   1708: }
                   1709:
                   1710: /*
1.49      rmind    1711:  * pmap_collect:
                   1712:  *
                   1713:  *     Helper for pmap_enter_ptpage().
                   1714:  *
                   1715:  *     Garbage collects the physical map system for pages which are no
                   1716:  *     longer used.  Success need not be guaranteed -- that is, there
                   1717:  *     may well be pages which are not referenced, but others may be
                   1718:  *     collected.
                   1719:  */
                   1720: static void
                   1721: pmap_collect(void)
                   1722: {
1.69      cherry   1723:        int s;
                   1724:        uvm_physseg_t bank;
1.49      rmind    1725:
                   1726:        /*
                   1727:         * XXX This is very bogus.  We should handle kernel PT
                   1728:         * XXX pages much differently.
                   1729:         */
                   1730:
                   1731:        s = splvm();
1.69      cherry   1732:        for (bank = uvm_physseg_get_first();
                   1733:             uvm_physseg_valid_p(bank);
                   1734:             bank = uvm_physseg_get_next(bank)) {
                   1735:                pmap_collect1(pmap_kernel(), ptoa(uvm_physseg_get_start(bank)),
                   1736:                    ptoa(uvm_physseg_get_end(bank)));
1.49      rmind    1737:        }
                   1738:        splx(s);
                   1739: }
                   1740:
                   1741: /*
1.1       chs      1742:  * pmap_zero_page:             [ INTERFACE ]
                   1743:  *
                   1744:  *     Zero the specified (machine independent) page by mapping the page
                   1745:  *     into virtual memory and using memset to clear its contents, one
                   1746:  *     machine dependent page at a time.
                   1747:  *
                   1748:  *     Note: WE DO NOT CURRENTLY LOCK THE TEMPORARY ADDRESSES!
                   1749:  *           (Actually, we go to splvm(), and since we don't
                   1750:  *           support multiple processors, this is sufficient.)
                   1751:  */
                   1752: void
1.20      tsutsui  1753: pmap_zero_page(paddr_t phys)
1.1       chs      1754: {
                   1755:        int npte;
                   1756:
                   1757:        PMAP_DPRINTF(PDB_FOLLOW, ("pmap_zero_page(%lx)\n", phys));
                   1758:
                   1759:        npte = phys | PG_V;
1.59      tsutsui  1760: #ifdef CACHE_HAVE_VAC
1.1       chs      1761:        if (pmap_aliasmask) {
                   1762:
                   1763:                /*
                   1764:                 * Cache-inhibit the mapping on VAC machines, as we would
                   1765:                 * be wasting the cache load.
                   1766:                 */
                   1767:
                   1768:                npte |= PG_CI;
                   1769:        }
                   1770: #endif
                   1771:
                   1772: #if defined(M68040) || defined(M68060)
                   1773: #if defined(M68020) || defined(M68030)
                   1774:        if (mmutype == MMU_68040)
                   1775: #endif
                   1776:        {
                   1777:                /*
                   1778:                 * Set copyback caching on the page; this is required
                   1779:                 * for cache consistency (since regular mappings are
                   1780:                 * copyback as well).
                   1781:                 */
                   1782:
                   1783:                npte |= PG_CCB;
                   1784:        }
                   1785: #endif
                   1786:
                   1787:        *caddr1_pte = npte;
                   1788:        TBIS((vaddr_t)CADDR1);
                   1789:
                   1790:        zeropage(CADDR1);
                   1791:
                   1792: #ifdef DEBUG
                   1793:        *caddr1_pte = PG_NV;
                   1794:        TBIS((vaddr_t)CADDR1);
                   1795: #endif
                   1796: }
                   1797:
                   1798: /*
                   1799:  * pmap_copy_page:             [ INTERFACE ]
                   1800:  *
                   1801:  *     Copy the specified (machine independent) page by mapping the page
                   1802:  *     into virtual memory and using memcpy to copy the page, one machine
                   1803:  *     dependent page at a time.
                   1804:  *
                   1805:  *     Note: WE DO NOT CURRENTLY LOCK THE TEMPORARY ADDRESSES!
                   1806:  *           (Actually, we go to splvm(), and since we don't
                   1807:  *           support multiple processors, this is sufficient.)
                   1808:  */
                   1809: void
1.20      tsutsui  1810: pmap_copy_page(paddr_t src, paddr_t dst)
1.1       chs      1811: {
                   1812:        int npte1, npte2;
                   1813:
                   1814:        PMAP_DPRINTF(PDB_FOLLOW, ("pmap_copy_page(%lx, %lx)\n", src, dst));
                   1815:
                   1816:        npte1 = src | PG_RO | PG_V;
                   1817:        npte2 = dst | PG_V;
1.59      tsutsui  1818: #ifdef CACHE_HAVE_VAC
1.1       chs      1819:        if (pmap_aliasmask) {
                   1820:
                   1821:                /*
                   1822:                 * Cache-inhibit the mapping on VAC machines, as we would
                   1823:                 * be wasting the cache load.
                   1824:                 */
                   1825:
                   1826:                npte1 |= PG_CI;
                   1827:                npte2 |= PG_CI;
                   1828:        }
                   1829: #endif
                   1830:
                   1831: #if defined(M68040) || defined(M68060)
                   1832: #if defined(M68020) || defined(M68030)
                   1833:        if (mmutype == MMU_68040)
                   1834: #endif
                   1835:        {
                   1836:                /*
                   1837:                 * Set copyback caching on the pages; this is required
                   1838:                 * for cache consistency (since regular mappings are
                   1839:                 * copyback as well).
                   1840:                 */
                   1841:
                   1842:                npte1 |= PG_CCB;
                   1843:                npte2 |= PG_CCB;
                   1844:        }
                   1845: #endif
                   1846:
                   1847:        *caddr1_pte = npte1;
                   1848:        TBIS((vaddr_t)CADDR1);
                   1849:
                   1850:        *caddr2_pte = npte2;
                   1851:        TBIS((vaddr_t)CADDR2);
                   1852:
                   1853:        copypage(CADDR1, CADDR2);
                   1854:
                   1855: #ifdef DEBUG
                   1856:        *caddr1_pte = PG_NV;
                   1857:        TBIS((vaddr_t)CADDR1);
                   1858:
                   1859:        *caddr2_pte = PG_NV;
                   1860:        TBIS((vaddr_t)CADDR2);
                   1861: #endif
                   1862: }
                   1863:
                   1864: /*
                   1865:  * pmap_clear_modify:          [ INTERFACE ]
                   1866:  *
                   1867:  *     Clear the modify bits on the specified physical page.
                   1868:  */
1.23      thorpej  1869: bool
1.20      tsutsui  1870: pmap_clear_modify(struct vm_page *pg)
1.1       chs      1871: {
                   1872:        paddr_t pa = VM_PAGE_TO_PHYS(pg);
                   1873:
                   1874:        PMAP_DPRINTF(PDB_FOLLOW, ("pmap_clear_modify(%p)\n", pg));
                   1875:
                   1876:        return pmap_changebit(pa, 0, ~PG_M);
                   1877: }
                   1878:
                   1879: /*
                   1880:  * pmap_clear_reference:       [ INTERFACE ]
                   1881:  *
                   1882:  *     Clear the reference bit on the specified physical page.
                   1883:  */
1.23      thorpej  1884: bool
1.20      tsutsui  1885: pmap_clear_reference(struct vm_page *pg)
1.1       chs      1886: {
                   1887:        paddr_t pa = VM_PAGE_TO_PHYS(pg);
                   1888:
                   1889:        PMAP_DPRINTF(PDB_FOLLOW, ("pmap_clear_reference(%p)\n", pg));
                   1890:
                   1891:        return pmap_changebit(pa, 0, ~PG_U);
                   1892: }
                   1893:
                   1894: /*
                   1895:  * pmap_is_referenced:         [ INTERFACE ]
                   1896:  *
                   1897:  *     Return whether or not the specified physical page is referenced
                   1898:  *     by any physical maps.
                   1899:  */
1.23      thorpej  1900: bool
1.20      tsutsui  1901: pmap_is_referenced(struct vm_page *pg)
1.1       chs      1902: {
                   1903:        paddr_t pa = VM_PAGE_TO_PHYS(pg);
                   1904:
1.20      tsutsui  1905:        return pmap_testbit(pa, PG_U);
1.1       chs      1906: }
                   1907:
                   1908: /*
                   1909:  * pmap_is_modified:           [ INTERFACE ]
                   1910:  *
                   1911:  *     Return whether or not the specified physical page is modified
                   1912:  *     by any physical maps.
                   1913:  */
1.23      thorpej  1914: bool
1.20      tsutsui  1915: pmap_is_modified(struct vm_page *pg)
1.1       chs      1916: {
                   1917:        paddr_t pa = VM_PAGE_TO_PHYS(pg);
                   1918:
1.20      tsutsui  1919:        return pmap_testbit(pa, PG_M);
1.1       chs      1920: }
                   1921:
                   1922: /*
                   1923:  * pmap_phys_address:          [ INTERFACE ]
                   1924:  *
                   1925:  *     Return the physical address corresponding to the specified
                   1926:  *     cookie.  Used by the device pager to decode a device driver's
                   1927:  *     mmap entry point return value.
                   1928:  *
                   1929:  *     Note: no locking is necessary in this function.
                   1930:  */
                   1931: paddr_t
1.32      macallan 1932: pmap_phys_address(paddr_t ppn)
1.1       chs      1933: {
1.20      tsutsui  1934:        return m68k_ptob(ppn);
1.1       chs      1935: }
                   1936:
1.59      tsutsui  1937: #ifdef CACHE_HAVE_VAC
1.1       chs      1938: /*
                   1939:  * pmap_prefer:                        [ INTERFACE ]
                   1940:  *
                   1941:  *     Find the first virtual address >= *vap that does not
                   1942:  *     cause a virtually-addressed cache alias problem.
                   1943:  */
                   1944: void
1.20      tsutsui  1945: pmap_prefer(vaddr_t foff, vaddr_t *vap)
1.1       chs      1946: {
                   1947:        vaddr_t va;
                   1948:        vsize_t d;
                   1949:
                   1950: #ifdef M68K_MMU_MOTOROLA
                   1951:        if (pmap_aliasmask)
                   1952: #endif
                   1953:        {
                   1954:                va = *vap;
                   1955:                d = foff - va;
                   1956:                d &= pmap_aliasmask;
                   1957:                *vap = va + d;
                   1958:        }
                   1959: }
1.59      tsutsui  1960: #endif /* CACHE_HAVE_VAC */
1.1       chs      1961:
                   1962: /*
                   1963:  * Miscellaneous support routines follow
                   1964:  */
                   1965:
                   1966: /*
                   1967:  * pmap_remove_mapping:
                   1968:  *
                   1969:  *     Invalidate a single page denoted by pmap/va.
                   1970:  *
                   1971:  *     If (pte != NULL), it is the already computed PTE for the page.
                   1972:  *
                   1973:  *     If (flags & PRM_TFLUSH), we must invalidate any TLB information.
                   1974:  *
                   1975:  *     If (flags & PRM_CFLUSH), we must flush/invalidate any cache
                   1976:  *     information.
                   1977:  *
                   1978:  *     If (flags & PRM_KEEPPTPAGE), we don't free the page table page
                   1979:  *     if the reference drops to zero.
                   1980:  */
                   1981: /* static */
                   1982: void
1.70      chs      1983: pmap_remove_mapping(pmap_t pmap, vaddr_t va, pt_entry_t *pte, int flags,
                   1984:     struct pv_entry **opvp)
1.1       chs      1985: {
                   1986:        paddr_t pa;
1.46      thorpej  1987:        struct pv_header *pvh;
1.70      chs      1988:        struct pv_entry *pv, *npv, *opv = NULL;
1.1       chs      1989:        struct pmap *ptpmap;
                   1990:        st_entry_t *ste;
                   1991:        int s, bits;
                   1992: #ifdef DEBUG
                   1993:        pt_entry_t opte;
                   1994: #endif
                   1995:
                   1996:        PMAP_DPRINTF(PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT,
1.70      chs      1997:            ("pmap_remove_mapping(%p, %lx, %p, %x, %p)\n",
                   1998:            pmap, va, pte, flags, opvp));
1.1       chs      1999:
                   2000:        /*
                   2001:         * PTE not provided, compute it from pmap and va.
                   2002:         */
                   2003:
                   2004:        if (pte == NULL) {
                   2005:                pte = pmap_pte(pmap, va);
                   2006:                if (*pte == PG_NV)
                   2007:                        return;
                   2008:        }
                   2009:
1.59      tsutsui  2010: #ifdef CACHE_HAVE_VAC
1.1       chs      2011:        if (pmap_aliasmask && (flags & PRM_CFLUSH)) {
                   2012:
                   2013:                /*
                   2014:                 * Purge kernel side of VAC to ensure we get the correct
                   2015:                 * state of any hardware maintained bits.
                   2016:                 */
                   2017:
                   2018:                DCIS();
                   2019:
                   2020:                /*
                   2021:                 * If this is a non-CI user mapping for the current process,
                   2022:                 * flush the VAC.  Note that the kernel side was flushed
                   2023:                 * above so we don't worry about non-CI kernel mappings.
                   2024:                 */
                   2025:
                   2026:                if (active_user_pmap(pmap) && !pmap_pte_ci(pte)) {
                   2027:                        DCIU();
                   2028:                }
                   2029:        }
                   2030: #endif
                   2031:
                   2032:        pa = pmap_pte_pa(pte);
                   2033: #ifdef DEBUG
                   2034:        opte = *pte;
                   2035: #endif
                   2036:
                   2037:        /*
                   2038:         * Update statistics
                   2039:         */
                   2040:
                   2041:        if (pmap_pte_w(pte))
                   2042:                pmap->pm_stats.wired_count--;
                   2043:        pmap->pm_stats.resident_count--;
                   2044:
                   2045: #if defined(M68040) || defined(M68060)
                   2046: #if defined(M68020) || defined(M68030)
                   2047:        if (mmutype == MMU_68040)
                   2048: #endif
                   2049:        if ((flags & PRM_CFLUSH)) {
                   2050:                DCFP(pa);
                   2051:                ICPP(pa);
                   2052:        }
                   2053: #endif
                   2054:
                   2055:        /*
                   2056:         * Invalidate the PTE after saving the reference modify info.
                   2057:         */
                   2058:
                   2059:        PMAP_DPRINTF(PDB_REMOVE, ("remove: invalidating pte at %p\n", pte));
                   2060:        bits = *pte & (PG_U|PG_M);
                   2061:        *pte = PG_NV;
                   2062:        if ((flags & PRM_TFLUSH) && active_pmap(pmap))
                   2063:                TBIS(va);
                   2064:
                   2065:        /*
                   2066:         * For user mappings decrement the wiring count on
                   2067:         * the PT page.
                   2068:         */
                   2069:
                   2070:        if (pmap != pmap_kernel()) {
                   2071:                vaddr_t ptpva = trunc_page((vaddr_t)pte);
                   2072:                int refs = pmap_ptpage_delref(ptpva);
                   2073: #ifdef DEBUG
                   2074:                if (pmapdebug & PDB_WIRING)
                   2075:                        pmap_check_wiring("remove", ptpva);
                   2076: #endif
                   2077:
                   2078:                /*
                   2079:                 * If reference count drops to 0, and we're not instructed
                   2080:                 * to keep it around, free the PT page.
                   2081:                 */
                   2082:
                   2083:                if (refs == 0 && (flags & PRM_KEEPPTPAGE) == 0) {
                   2084: #ifdef DIAGNOSTIC
1.46      thorpej  2085:                        struct pv_header *ptppvh;
1.16      tsutsui  2086:                        struct pv_entry *ptppv;
1.1       chs      2087: #endif
1.15      tsutsui  2088:                        paddr_t ptppa;
1.1       chs      2089:
1.15      tsutsui  2090:                        ptppa = pmap_pte_pa(pmap_pte(pmap_kernel(), ptpva));
1.1       chs      2091: #ifdef DIAGNOSTIC
1.15      tsutsui  2092:                        if (PAGE_IS_MANAGED(ptppa) == 0)
1.1       chs      2093:                                panic("pmap_remove_mapping: unmanaged PT page");
1.46      thorpej  2094:                        ptppvh = pa_to_pvh(ptppa);
                   2095:                        ptppv = &ptppvh->pvh_first;
1.16      tsutsui  2096:                        if (ptppv->pv_ptste == NULL)
1.1       chs      2097:                                panic("pmap_remove_mapping: ptste == NULL");
1.16      tsutsui  2098:                        if (ptppv->pv_pmap != pmap_kernel() ||
                   2099:                            ptppv->pv_va != ptpva ||
                   2100:                            ptppv->pv_next != NULL)
1.1       chs      2101:                                panic("pmap_remove_mapping: "
                   2102:                                    "bad PT page pmap %p, va 0x%lx, next %p",
1.16      tsutsui  2103:                                    ptppv->pv_pmap, ptppv->pv_va,
                   2104:                                    ptppv->pv_next);
1.1       chs      2105: #endif
                   2106:                        pmap_remove_mapping(pmap_kernel(), ptpva,
1.70      chs      2107:                            NULL, PRM_TFLUSH|PRM_CFLUSH, NULL);
1.71.6.1! ad       2108:                        rw_enter(uvm_kernel_object->vmobjlock, RW_WRITER);
1.15      tsutsui  2109:                        uvm_pagefree(PHYS_TO_VM_PAGE(ptppa));
1.71.6.1! ad       2110:                        rw_exit(uvm_kernel_object->vmobjlock);
1.1       chs      2111:                        PMAP_DPRINTF(PDB_REMOVE|PDB_PTPAGE,
                   2112:                            ("remove: PT page 0x%lx (0x%lx) freed\n",
1.15      tsutsui  2113:                            ptpva, ptppa));
1.1       chs      2114:                }
                   2115:        }
                   2116:
                   2117:        /*
                   2118:         * If this isn't a managed page, we are all done.
                   2119:         */
                   2120:
                   2121:        if (PAGE_IS_MANAGED(pa) == 0)
                   2122:                return;
                   2123:
                   2124:        /*
                   2125:         * Otherwise remove it from the PV table
                   2126:         * (raise IPL since we may be called at interrupt time).
                   2127:         */
                   2128:
1.46      thorpej  2129:        pvh = pa_to_pvh(pa);
                   2130:        pv = &pvh->pvh_first;
1.1       chs      2131:        ste = NULL;
                   2132:        s = splvm();
                   2133:
                   2134:        /*
                   2135:         * If it is the first entry on the list, it is actually
                   2136:         * in the header and we must copy the following entry up
                   2137:         * to the header.  Otherwise we must search the list for
                   2138:         * the entry.  In either case we free the now unused entry.
                   2139:         */
                   2140:
                   2141:        if (pmap == pv->pv_pmap && va == pv->pv_va) {
                   2142:                ste = pv->pv_ptste;
                   2143:                ptpmap = pv->pv_ptpmap;
                   2144:                npv = pv->pv_next;
                   2145:                if (npv) {
                   2146:                        *pv = *npv;
1.70      chs      2147:                        opv = npv;
1.1       chs      2148:                } else
                   2149:                        pv->pv_pmap = NULL;
                   2150:        } else {
                   2151:                for (npv = pv->pv_next; npv; npv = npv->pv_next) {
                   2152:                        if (pmap == npv->pv_pmap && va == npv->pv_va)
                   2153:                                break;
                   2154:                        pv = npv;
                   2155:                }
                   2156: #ifdef DEBUG
                   2157:                if (npv == NULL)
                   2158:                        panic("pmap_remove: PA not in pv_tab");
                   2159: #endif
                   2160:                ste = npv->pv_ptste;
                   2161:                ptpmap = npv->pv_ptpmap;
                   2162:                pv->pv_next = npv->pv_next;
1.70      chs      2163:                opv = npv;
1.46      thorpej  2164:                pvh = pa_to_pvh(pa);
                   2165:                pv = &pvh->pvh_first;
1.1       chs      2166:        }
                   2167:
1.59      tsutsui  2168: #ifdef CACHE_HAVE_VAC
1.1       chs      2169:
                   2170:        /*
                   2171:         * If only one mapping left we no longer need to cache inhibit
                   2172:         */
                   2173:
                   2174:        if (pmap_aliasmask &&
1.46      thorpej  2175:            pv->pv_pmap && pv->pv_next == NULL && (pvh->pvh_attrs & PVH_CI)) {
1.1       chs      2176:                PMAP_DPRINTF(PDB_CACHE,
                   2177:                    ("remove: clearing CI for pa %lx\n", pa));
1.46      thorpej  2178:                pvh->pvh_attrs &= ~PVH_CI;
1.1       chs      2179:                pmap_changebit(pa, 0, ~PG_CI);
                   2180: #ifdef DEBUG
                   2181:                if ((pmapdebug & (PDB_CACHE|PDB_PVDUMP)) ==
                   2182:                    (PDB_CACHE|PDB_PVDUMP))
                   2183:                        pmap_pvdump(pa);
                   2184: #endif
                   2185:        }
                   2186: #endif
                   2187:
                   2188:        /*
                   2189:         * If this was a PT page we must also remove the
                   2190:         * mapping from the associated segment table.
                   2191:         */
                   2192:
                   2193:        if (ste) {
                   2194:                PMAP_DPRINTF(PDB_REMOVE|PDB_PTPAGE,
                   2195:                    ("remove: ste was %x@%p pte was %x@%p\n",
                   2196:                    *ste, ste, opte, pmap_pte(pmap, va)));
                   2197: #if defined(M68040) || defined(M68060)
                   2198: #if defined(M68020) || defined(M68030)
                   2199:                if (mmutype == MMU_68040)
                   2200: #endif
                   2201:                {
                   2202:                        st_entry_t *este = &ste[NPTEPG/SG4_LEV3SIZE];
                   2203:
                   2204:                        while (ste < este)
                   2205:                                *ste++ = SG_NV;
                   2206: #ifdef DEBUG
                   2207:                        ste -= NPTEPG/SG4_LEV3SIZE;
                   2208: #endif
                   2209:                }
                   2210: #if defined(M68020) || defined(M68030)
                   2211:                else
                   2212: #endif
                   2213: #endif
                   2214: #if defined(M68020) || defined(M68030)
                   2215:                *ste = SG_NV;
                   2216: #endif
                   2217:
                   2218:                /*
                   2219:                 * If it was a user PT page, we decrement the
                   2220:                 * reference count on the segment table as well,
                   2221:                 * freeing it if it is now empty.
                   2222:                 */
                   2223:
                   2224:                if (ptpmap != pmap_kernel()) {
                   2225:                        PMAP_DPRINTF(PDB_REMOVE|PDB_SEGTAB,
                   2226:                            ("remove: stab %p, refcnt %d\n",
                   2227:                            ptpmap->pm_stab, ptpmap->pm_sref - 1));
                   2228: #ifdef DEBUG
                   2229:                        if ((pmapdebug & PDB_PARANOIA) &&
                   2230:                            ptpmap->pm_stab !=
                   2231:                             (st_entry_t *)trunc_page((vaddr_t)ste))
                   2232:                                panic("remove: bogus ste");
                   2233: #endif
                   2234:                        if (--(ptpmap->pm_sref) == 0) {
                   2235:                                PMAP_DPRINTF(PDB_REMOVE|PDB_SEGTAB,
                   2236:                                    ("remove: free stab %p\n",
                   2237:                                    ptpmap->pm_stab));
1.14      yamt     2238:                                uvm_km_free(st_map, (vaddr_t)ptpmap->pm_stab,
                   2239:                                    M68K_STSIZE, UVM_KMF_WIRED);
1.1       chs      2240:                                ptpmap->pm_stab = Segtabzero;
                   2241:                                ptpmap->pm_stpa = Segtabzeropa;
                   2242: #if defined(M68040) || defined(M68060)
                   2243: #if defined(M68020) || defined(M68030)
                   2244:                                if (mmutype == MMU_68040)
                   2245: #endif
                   2246:                                        ptpmap->pm_stfree = protostfree;
                   2247: #endif
                   2248:
                   2249:                                /*
                   2250:                                 * XXX may have changed segment table
                   2251:                                 * pointer for current process so
                   2252:                                 * update now to reload hardware.
                   2253:                                 */
                   2254:
                   2255:                                if (active_user_pmap(ptpmap))
                   2256:                                        PMAP_ACTIVATE(ptpmap, 1);
                   2257:                        }
                   2258:                }
1.46      thorpej  2259:                pvh->pvh_attrs &= ~PVH_PTPAGE;
1.1       chs      2260:                ptpmap->pm_ptpages--;
                   2261:        }
                   2262:
                   2263:        /*
                   2264:         * Update saved attributes for managed page
                   2265:         */
                   2266:
1.46      thorpej  2267:        pvh->pvh_attrs |= bits;
1.1       chs      2268:        splx(s);
1.70      chs      2269:
                   2270:        if (opvp != NULL)
                   2271:                *opvp = opv;
                   2272:        else if (opv != NULL)
                   2273:                pmap_free_pv(opv);
1.1       chs      2274: }
                   2275:
                   2276: /*
                   2277:  * pmap_testbit:
                   2278:  *
                   2279:  *     Test the modified/referenced bits of a physical page.
                   2280:  */
                   2281: /* static */
1.23      thorpej  2282: bool
1.20      tsutsui  2283: pmap_testbit(paddr_t pa, int bit)
1.1       chs      2284: {
1.46      thorpej  2285:        struct pv_header *pvh;
1.1       chs      2286:        struct pv_entry *pv;
                   2287:        pt_entry_t *pte;
                   2288:        int s;
                   2289:
1.46      thorpej  2290:        pvh = pa_to_pvh(pa);
                   2291:        pv = &pvh->pvh_first;
1.1       chs      2292:        s = splvm();
                   2293:
                   2294:        /*
                   2295:         * Check saved info first
                   2296:         */
                   2297:
1.46      thorpej  2298:        if (pvh->pvh_attrs & bit) {
1.1       chs      2299:                splx(s);
1.25      thorpej  2300:                return true;
1.1       chs      2301:        }
                   2302:
1.59      tsutsui  2303: #ifdef CACHE_HAVE_VAC
1.1       chs      2304:
                   2305:        /*
                   2306:         * Flush VAC to get correct state of any hardware maintained bits.
                   2307:         */
                   2308:
                   2309:        if (pmap_aliasmask && (bit & (PG_U|PG_M)))
                   2310:                DCIS();
                   2311: #endif
                   2312:
                   2313:        /*
                   2314:         * Not found.  Check current mappings, returning immediately if
                   2315:         * found.  Cache a hit to speed future lookups.
                   2316:         */
                   2317:
                   2318:        if (pv->pv_pmap != NULL) {
                   2319:                for (; pv; pv = pv->pv_next) {
                   2320:                        pte = pmap_pte(pv->pv_pmap, pv->pv_va);
                   2321:                        if (*pte & bit) {
1.46      thorpej  2322:                                pvh->pvh_attrs |= bit;
1.1       chs      2323:                                splx(s);
1.25      thorpej  2324:                                return true;
1.1       chs      2325:                        }
                   2326:                }
                   2327:        }
                   2328:        splx(s);
1.25      thorpej  2329:        return false;
1.1       chs      2330: }
                   2331:
                   2332: /*
                   2333:  * pmap_changebit:
                   2334:  *
                   2335:  *     Change the modified/referenced bits, or other PTE bits,
                   2336:  *     for a physical page.
                   2337:  */
                   2338: /* static */
1.23      thorpej  2339: bool
1.20      tsutsui  2340: pmap_changebit(paddr_t pa, int set, int mask)
1.1       chs      2341: {
1.46      thorpej  2342:        struct pv_header *pvh;
1.1       chs      2343:        struct pv_entry *pv;
                   2344:        pt_entry_t *pte, npte;
                   2345:        vaddr_t va;
                   2346:        int s;
1.59      tsutsui  2347: #if defined(CACHE_HAVE_VAC) || defined(M68040) || defined(M68060)
1.25      thorpej  2348:        bool firstpage = true;
1.1       chs      2349: #endif
1.23      thorpej  2350:        bool r;
1.1       chs      2351:
                   2352:        PMAP_DPRINTF(PDB_BITS,
                   2353:            ("pmap_changebit(%lx, %x, %x)\n", pa, set, mask));
                   2354:
1.46      thorpej  2355:        pvh = pa_to_pvh(pa);
                   2356:        pv = &pvh->pvh_first;
1.1       chs      2357:        s = splvm();
                   2358:
                   2359:        /*
                   2360:         * Clear saved attributes (modify, reference)
                   2361:         */
                   2362:
1.46      thorpej  2363:        r = (pvh->pvh_attrs & ~mask) != 0;
                   2364:        pvh->pvh_attrs &= mask;
1.1       chs      2365:
                   2366:        /*
                   2367:         * Loop over all current mappings setting/clearing as appropos
                   2368:         * If setting RO do we need to clear the VAC?
                   2369:         */
                   2370:
                   2371:        if (pv->pv_pmap != NULL) {
                   2372: #ifdef DEBUG
                   2373:                int toflush = 0;
                   2374: #endif
                   2375:                for (; pv; pv = pv->pv_next) {
                   2376: #ifdef DEBUG
                   2377:                        toflush |= (pv->pv_pmap == pmap_kernel()) ? 2 : 1;
                   2378: #endif
                   2379:                        va = pv->pv_va;
                   2380:                        pte = pmap_pte(pv->pv_pmap, va);
1.59      tsutsui  2381: #ifdef CACHE_HAVE_VAC
1.1       chs      2382:
                   2383:                        /*
                   2384:                         * Flush VAC to ensure we get correct state of HW bits
                   2385:                         * so we don't clobber them.
                   2386:                         */
                   2387:
                   2388:                        if (firstpage && pmap_aliasmask) {
1.25      thorpej  2389:                                firstpage = false;
1.1       chs      2390:                                DCIS();
                   2391:                        }
                   2392: #endif
                   2393:                        npte = (*pte | set) & mask;
                   2394:                        if (*pte != npte) {
1.25      thorpej  2395:                                r = true;
1.1       chs      2396: #if defined(M68040) || defined(M68060)
                   2397:                                /*
                   2398:                                 * If we are changing caching status or
                   2399:                                 * protection make sure the caches are
                   2400:                                 * flushed (but only once).
                   2401:                                 */
                   2402:                                if (firstpage &&
                   2403: #if defined(M68020) || defined(M68030)
                   2404:                                    (mmutype == MMU_68040) &&
                   2405: #endif
                   2406:                                    ((set == PG_RO) ||
                   2407:                                     (set & PG_CMASK) ||
                   2408:                                     (mask & PG_CMASK) == 0)) {
1.25      thorpej  2409:                                        firstpage = false;
1.1       chs      2410:                                        DCFP(pa);
                   2411:                                        ICPP(pa);
                   2412:                                }
                   2413: #endif
                   2414:                                *pte = npte;
                   2415:                                if (active_pmap(pv->pv_pmap))
                   2416:                                        TBIS(va);
                   2417:                        }
                   2418:                }
                   2419:        }
                   2420:        splx(s);
1.20      tsutsui  2421:        return r;
1.1       chs      2422: }
                   2423:
                   2424: /*
                   2425:  * pmap_enter_ptpage:
                   2426:  *
                   2427:  *     Allocate and map a PT page for the specified pmap/va pair.
                   2428:  */
                   2429: /* static */
1.22      martin   2430: int
1.23      thorpej  2431: pmap_enter_ptpage(pmap_t pmap, vaddr_t va, bool can_fail)
1.1       chs      2432: {
                   2433:        paddr_t ptpa;
                   2434:        struct vm_page *pg;
1.46      thorpej  2435:        struct pv_header *pvh;
1.1       chs      2436:        struct pv_entry *pv;
                   2437:        st_entry_t *ste;
                   2438:        int s;
                   2439:
                   2440:        PMAP_DPRINTF(PDB_FOLLOW|PDB_ENTER|PDB_PTPAGE,
                   2441:            ("pmap_enter_ptpage: pmap %p, va %lx\n", pmap, va));
                   2442:
                   2443:        /*
                   2444:         * Allocate a segment table if necessary.  Note that it is allocated
                   2445:         * from a private map and not pt_map.  This keeps user page tables
                   2446:         * aligned on segment boundaries in the kernel address space.
                   2447:         * The segment table is wired down.  It will be freed whenever the
                   2448:         * reference count drops to zero.
                   2449:         */
                   2450:        if (pmap->pm_stab == Segtabzero) {
                   2451:                pmap->pm_stab = (st_entry_t *)
1.14      yamt     2452:                    uvm_km_alloc(st_map, M68K_STSIZE, 0,
1.22      martin   2453:                    UVM_KMF_WIRED | UVM_KMF_ZERO |
                   2454:                    (can_fail ? UVM_KMF_NOWAIT : 0));
                   2455:                if (pmap->pm_stab == NULL) {
                   2456:                        pmap->pm_stab = Segtabzero;
                   2457:                        return ENOMEM;
                   2458:                }
1.1       chs      2459:                (void) pmap_extract(pmap_kernel(), (vaddr_t)pmap->pm_stab,
                   2460:                    (paddr_t *)&pmap->pm_stpa);
                   2461: #if defined(M68040) || defined(M68060)
                   2462: #if defined(M68020) || defined(M68030)
                   2463:                if (mmutype == MMU_68040)
                   2464: #endif
                   2465:                {
1.21      mhitch   2466:                        pt_entry_t      *pte;
                   2467:
                   2468:                        pte = pmap_pte(pmap_kernel(), pmap->pm_stab);
                   2469:                        *pte = (*pte & ~PG_CMASK) | PG_CI;
1.1       chs      2470:                        pmap->pm_stfree = protostfree;
                   2471:                }
                   2472: #endif
                   2473:                /*
                   2474:                 * XXX may have changed segment table pointer for current
                   2475:                 * process so update now to reload hardware.
                   2476:                 */
                   2477:                if (active_user_pmap(pmap))
                   2478:                        PMAP_ACTIVATE(pmap, 1);
                   2479:
                   2480:                PMAP_DPRINTF(PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB,
                   2481:                    ("enter: pmap %p stab %p(%p)\n",
                   2482:                    pmap, pmap->pm_stab, pmap->pm_stpa));
                   2483:        }
                   2484:
                   2485:        ste = pmap_ste(pmap, va);
                   2486: #if defined(M68040) || defined(M68060)
                   2487:        /*
                   2488:         * Allocate level 2 descriptor block if necessary
                   2489:         */
                   2490: #if defined(M68020) || defined(M68030)
                   2491:        if (mmutype == MMU_68040)
                   2492: #endif
                   2493:        {
                   2494:                if (*ste == SG_NV) {
                   2495:                        int ix;
1.26      christos 2496:                        void *addr;
1.1       chs      2497:
                   2498:                        ix = bmtol2(pmap->pm_stfree);
                   2499:                        if (ix == -1)
                   2500:                                panic("enter: out of address space"); /* XXX */
                   2501:                        pmap->pm_stfree &= ~l2tobm(ix);
1.26      christos 2502:                        addr = (void *)&pmap->pm_stab[ix*SG4_LEV2SIZE];
1.1       chs      2503:                        memset(addr, 0, SG4_LEV2SIZE*sizeof(st_entry_t));
1.26      christos 2504:                        addr = (void *)&pmap->pm_stpa[ix*SG4_LEV2SIZE];
1.1       chs      2505:                        *ste = (u_int)addr | SG_RW | SG_U | SG_V;
                   2506:
                   2507:                        PMAP_DPRINTF(PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB,
                   2508:                            ("enter: alloc ste2 %d(%p)\n", ix, addr));
                   2509:                }
                   2510:                ste = pmap_ste2(pmap, va);
                   2511:                /*
                   2512:                 * Since a level 2 descriptor maps a block of SG4_LEV3SIZE
                   2513:                 * level 3 descriptors, we need a chunk of NPTEPG/SG4_LEV3SIZE
1.3       thorpej  2514:                 * (16) such descriptors (PAGE_SIZE/SG4_LEV3SIZE bytes) to map a
1.1       chs      2515:                 * PT page--the unit of allocation.  We set `ste' to point
                   2516:                 * to the first entry of that chunk which is validated in its
                   2517:                 * entirety below.
                   2518:                 */
1.3       thorpej  2519:                ste = (st_entry_t *)((int)ste & ~(PAGE_SIZE/SG4_LEV3SIZE-1));
1.1       chs      2520:
                   2521:                PMAP_DPRINTF(PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB,
                   2522:                    ("enter: ste2 %p (%p)\n", pmap_ste2(pmap, va), ste));
                   2523:        }
                   2524: #endif
                   2525:        va = trunc_page((vaddr_t)pmap_pte(pmap, va));
                   2526:
                   2527:        /*
                   2528:         * In the kernel we allocate a page from the kernel PT page
                   2529:         * free list and map it into the kernel page table map (via
                   2530:         * pmap_enter).
                   2531:         */
                   2532:        if (pmap == pmap_kernel()) {
                   2533:                struct kpt_page *kpt;
                   2534:
                   2535:                s = splvm();
                   2536:                if ((kpt = kpt_free_list) == NULL) {
                   2537:                        /*
                   2538:                         * No PT pages available.
                   2539:                         * Try once to free up unused ones.
                   2540:                         */
                   2541:                        PMAP_DPRINTF(PDB_COLLECT,
                   2542:                            ("enter: no KPT pages, collecting...\n"));
1.49      rmind    2543:                        pmap_collect();
1.1       chs      2544:                        if ((kpt = kpt_free_list) == NULL)
                   2545:                                panic("pmap_enter_ptpage: can't get KPT page");
                   2546:                }
                   2547:                kpt_free_list = kpt->kpt_next;
                   2548:                kpt->kpt_next = kpt_used_list;
                   2549:                kpt_used_list = kpt;
                   2550:                ptpa = kpt->kpt_pa;
1.26      christos 2551:                memset((void *)kpt->kpt_va, 0, PAGE_SIZE);
1.1       chs      2552:                pmap_enter(pmap, va, ptpa, VM_PROT_READ | VM_PROT_WRITE,
                   2553:                    VM_PROT_READ | VM_PROT_WRITE | PMAP_WIRED);
                   2554:                pmap_update(pmap);
                   2555: #ifdef DEBUG
                   2556:                if (pmapdebug & (PDB_ENTER|PDB_PTPAGE)) {
                   2557:                        int ix = pmap_ste(pmap, va) - pmap_ste(pmap, 0);
                   2558:
                   2559:                        printf("enter: add &Sysptmap[%d]: %x (KPT page %lx)\n",
1.20      tsutsui  2560:                            ix, Sysptmap[ix], kpt->kpt_va);
1.1       chs      2561:                }
                   2562: #endif
                   2563:                splx(s);
                   2564:        } else {
                   2565:
                   2566:                /*
                   2567:                 * For user processes we just allocate a page from the
                   2568:                 * VM system.  Note that we set the page "wired" count to 1,
                   2569:                 * which is what we use to check if the page can be freed.
                   2570:                 * See pmap_remove_mapping().
                   2571:                 *
                   2572:                 * Count the segment table reference first so that we won't
                   2573:                 * lose the segment table when low on memory.
                   2574:                 */
                   2575:
                   2576:                pmap->pm_sref++;
                   2577:                PMAP_DPRINTF(PDB_ENTER|PDB_PTPAGE,
                   2578:                    ("enter: about to alloc UPT pg at %lx\n", va));
1.71.6.1! ad       2579:                rw_enter(uvm_kernel_object->vmobjlock, RW_WRITER);
1.33      dogcow   2580:                while ((pg = uvm_pagealloc(uvm_kernel_object,
1.1       chs      2581:                                           va - vm_map_min(kernel_map),
                   2582:                                           NULL, UVM_PGA_ZERO)) == NULL) {
1.71.6.1! ad       2583:                        rw_exit(uvm_kernel_object->vmobjlock);
1.1       chs      2584:                        uvm_wait("ptpage");
1.71.6.1! ad       2585:                        rw_enter(uvm_kernel_object->vmobjlock, RW_WRITER);
1.1       chs      2586:                }
1.71.6.1! ad       2587:                rw_exit(uvm_kernel_object->vmobjlock);
1.1       chs      2588:                pg->flags &= ~(PG_BUSY|PG_FAKE);
                   2589:                UVM_PAGE_OWN(pg, NULL);
                   2590:                ptpa = VM_PAGE_TO_PHYS(pg);
                   2591:                pmap_enter(pmap_kernel(), va, ptpa,
                   2592:                    VM_PROT_READ | VM_PROT_WRITE,
                   2593:                    VM_PROT_READ | VM_PROT_WRITE | PMAP_WIRED);
                   2594:                pmap_update(pmap_kernel());
                   2595:        }
                   2596: #if defined(M68040) || defined(M68060)
                   2597:        /*
                   2598:         * Turn off copyback caching of page table pages,
                   2599:         * could get ugly otherwise.
                   2600:         */
                   2601: #if defined(M68020) || defined(M68030)
                   2602:        if (mmutype == MMU_68040)
                   2603: #endif
                   2604:        {
                   2605: #ifdef DEBUG
                   2606:                pt_entry_t *pte = pmap_pte(pmap_kernel(), va);
                   2607:                if ((pmapdebug & PDB_PARANOIA) && (*pte & PG_CCB) == 0)
                   2608:                        printf("%s PT no CCB: kva=%lx ptpa=%lx pte@%p=%x\n",
1.20      tsutsui  2609:                            pmap == pmap_kernel() ? "Kernel" : "User",
                   2610:                            va, ptpa, pte, *pte);
1.1       chs      2611: #endif
                   2612:                if (pmap_changebit(ptpa, PG_CI, ~PG_CCB))
                   2613:                        DCIS();
                   2614:        }
                   2615: #endif
                   2616:        /*
                   2617:         * Locate the PV entry in the kernel for this PT page and
                   2618:         * record the STE address.  This is so that we can invalidate
                   2619:         * the STE when we remove the mapping for the page.
                   2620:         */
1.46      thorpej  2621:        pvh = pa_to_pvh(ptpa);
1.1       chs      2622:        s = splvm();
1.46      thorpej  2623:        if (pvh) {
                   2624:                pv = &pvh->pvh_first;
                   2625:                pvh->pvh_attrs |= PVH_PTPAGE;
1.1       chs      2626:                do {
                   2627:                        if (pv->pv_pmap == pmap_kernel() && pv->pv_va == va)
                   2628:                                break;
                   2629:                } while ((pv = pv->pv_next));
1.46      thorpej  2630:        } else {
                   2631:                pv = NULL;
1.1       chs      2632:        }
                   2633: #ifdef DEBUG
                   2634:        if (pv == NULL)
                   2635:                panic("pmap_enter_ptpage: PT page not entered");
                   2636: #endif
                   2637:        pv->pv_ptste = ste;
                   2638:        pv->pv_ptpmap = pmap;
                   2639:
                   2640:        PMAP_DPRINTF(PDB_ENTER|PDB_PTPAGE,
                   2641:            ("enter: new PT page at PA %lx, ste at %p\n", ptpa, ste));
                   2642:
                   2643:        /*
                   2644:         * Map the new PT page into the segment table.
                   2645:         * Also increment the reference count on the segment table if this
                   2646:         * was a user page table page.  Note that we don't use vm_map_pageable
                   2647:         * to keep the count like we do for PT pages, this is mostly because
                   2648:         * it would be difficult to identify ST pages in pmap_pageable to
                   2649:         * release them.  We also avoid the overhead of vm_map_pageable.
                   2650:         */
                   2651: #if defined(M68040) || defined(M68060)
                   2652: #if defined(M68020) || defined(M68030)
                   2653:        if (mmutype == MMU_68040)
                   2654: #endif
                   2655:        {
                   2656:                st_entry_t *este;
                   2657:
                   2658:                for (este = &ste[NPTEPG/SG4_LEV3SIZE]; ste < este; ste++) {
                   2659:                        *ste = ptpa | SG_U | SG_RW | SG_V;
                   2660:                        ptpa += SG4_LEV3SIZE * sizeof(st_entry_t);
                   2661:                }
                   2662:        }
                   2663: #if defined(M68020) || defined(M68030)
                   2664:        else
                   2665:                *ste = (ptpa & SG_FRAME) | SG_RW | SG_V;
                   2666: #endif
                   2667: #else
                   2668:        *ste = (ptpa & SG_FRAME) | SG_RW | SG_V;
                   2669: #endif
                   2670:        if (pmap != pmap_kernel()) {
                   2671:                PMAP_DPRINTF(PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB,
                   2672:                    ("enter: stab %p refcnt %d\n",
                   2673:                    pmap->pm_stab, pmap->pm_sref));
                   2674:        }
                   2675:        /*
                   2676:         * Flush stale TLB info.
                   2677:         */
                   2678:        if (pmap == pmap_kernel())
                   2679:                TBIAS();
                   2680:        else
                   2681:                TBIAU();
                   2682:        pmap->pm_ptpages++;
                   2683:        splx(s);
1.22      martin   2684:
                   2685:        return 0;
1.1       chs      2686: }
                   2687:
                   2688: /*
                   2689:  * pmap_ptpage_addref:
                   2690:  *
                   2691:  *     Add a reference to the specified PT page.
                   2692:  */
                   2693: void
1.20      tsutsui  2694: pmap_ptpage_addref(vaddr_t ptpva)
1.1       chs      2695: {
                   2696:        struct vm_page *pg;
                   2697:
1.71.6.1! ad       2698:        rw_enter(uvm_kernel_object->vmobjlock, RW_WRITER);
1.33      dogcow   2699:        pg = uvm_pagelookup(uvm_kernel_object, ptpva - vm_map_min(kernel_map));
1.1       chs      2700:        pg->wire_count++;
                   2701:        PMAP_DPRINTF(PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB,
                   2702:            ("ptpage addref: pg %p now %d\n",
                   2703:             pg, pg->wire_count));
1.71.6.1! ad       2704:        rw_exit(uvm_kernel_object->vmobjlock);
1.1       chs      2705: }
                   2706:
                   2707: /*
                   2708:  * pmap_ptpage_delref:
                   2709:  *
                   2710:  *     Delete a reference to the specified PT page.
                   2711:  */
                   2712: int
1.20      tsutsui  2713: pmap_ptpage_delref(vaddr_t ptpva)
1.1       chs      2714: {
                   2715:        struct vm_page *pg;
                   2716:        int rv;
                   2717:
1.71.6.1! ad       2718:        rw_enter(uvm_kernel_object->vmobjlock, RW_WRITER);
1.33      dogcow   2719:        pg = uvm_pagelookup(uvm_kernel_object, ptpva - vm_map_min(kernel_map));
1.1       chs      2720:        rv = --pg->wire_count;
                   2721:        PMAP_DPRINTF(PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB,
                   2722:            ("ptpage delref: pg %p now %d\n",
                   2723:             pg, pg->wire_count));
1.71.6.1! ad       2724:        rw_exit(uvm_kernel_object->vmobjlock);
1.20      tsutsui  2725:        return rv;
1.1       chs      2726: }
                   2727:
                   2728: /*
                   2729:  *     Routine:        pmap_procwr
                   2730:  *
                   2731:  *     Function:
                   2732:  *             Synchronize caches corresponding to [addr, addr + len) in p.
                   2733:  */
                   2734: void
1.20      tsutsui  2735: pmap_procwr(struct proc        *p, vaddr_t va, size_t len)
1.1       chs      2736: {
1.20      tsutsui  2737:
1.1       chs      2738:        (void)cachectl1(0x80000004, va, len, p);
                   2739: }
                   2740:
                   2741: void
1.20      tsutsui  2742: _pmap_set_page_cacheable(pmap_t pmap, vaddr_t va)
1.1       chs      2743: {
                   2744:
1.20      tsutsui  2745:        if (!pmap_ste_v(pmap, va))
1.1       chs      2746:                return;
                   2747:
                   2748: #if defined(M68040) || defined(M68060)
                   2749: #if defined(M68020) || defined(M68030)
1.20      tsutsui  2750:        if (mmutype == MMU_68040) {
1.1       chs      2751: #endif
1.20      tsutsui  2752:        if (pmap_changebit(pmap_pte_pa(pmap_pte(pmap, va)), PG_CCB, ~PG_CI))
1.1       chs      2753:                DCIS();
                   2754:
                   2755: #if defined(M68020) || defined(M68030)
                   2756:        } else
1.20      tsutsui  2757:                pmap_changebit(pmap_pte_pa(pmap_pte(pmap, va)), 0, ~PG_CI);
1.1       chs      2758: #endif
                   2759: #else
1.20      tsutsui  2760:        pmap_changebit(pmap_pte_pa(pmap_pte(pmap, va)), 0, ~PG_CI);
1.1       chs      2761: #endif
                   2762: }
                   2763:
                   2764: void
1.20      tsutsui  2765: _pmap_set_page_cacheinhibit(pmap_t pmap, vaddr_t va)
1.1       chs      2766: {
                   2767:
1.20      tsutsui  2768:        if (!pmap_ste_v(pmap, va))
1.1       chs      2769:                return;
                   2770:
                   2771: #if defined(M68040) || defined(M68060)
                   2772: #if defined(M68020) || defined(M68030)
1.20      tsutsui  2773:        if (mmutype == MMU_68040) {
1.1       chs      2774: #endif
1.20      tsutsui  2775:        if (pmap_changebit(pmap_pte_pa(pmap_pte(pmap, va)), PG_CI, ~PG_CCB))
1.1       chs      2776:                DCIS();
                   2777: #if defined(M68020) || defined(M68030)
                   2778:        } else
1.20      tsutsui  2779:                pmap_changebit(pmap_pte_pa(pmap_pte(pmap, va)), PG_CI, ~0);
1.1       chs      2780: #endif
                   2781: #else
1.20      tsutsui  2782:        pmap_changebit(pmap_pte_pa(pmap_pte(pmap, va)), PG_CI, ~0);
1.1       chs      2783: #endif
                   2784: }
                   2785:
                   2786: int
1.20      tsutsui  2787: _pmap_page_is_cacheable(pmap_t pmap, vaddr_t va)
1.1       chs      2788: {
                   2789:
1.20      tsutsui  2790:        if (!pmap_ste_v(pmap, va))
                   2791:                return 0;
1.1       chs      2792:
1.20      tsutsui  2793:        return (pmap_pte_ci(pmap_pte(pmap, va)) == 0) ? 1 : 0;
1.1       chs      2794: }
                   2795:
                   2796: #ifdef DEBUG
                   2797: /*
                   2798:  * pmap_pvdump:
                   2799:  *
                   2800:  *     Dump the contents of the PV list for the specified physical page.
                   2801:  */
                   2802: void
1.20      tsutsui  2803: pmap_pvdump(paddr_t pa)
1.1       chs      2804: {
1.46      thorpej  2805:        struct pv_header *pvh;
1.1       chs      2806:        struct pv_entry *pv;
                   2807:
                   2808:        printf("pa %lx", pa);
1.46      thorpej  2809:        pvh = pa_to_pvh(pa);
                   2810:        for (pv = &pvh->pvh_first; pv; pv = pv->pv_next)
1.47      mhitch   2811:                printf(" -> pmap %p, va %lx, ptste %p, ptpmap %p",
                   2812:                    pv->pv_pmap, pv->pv_va, pv->pv_ptste, pv->pv_ptpmap);
1.1       chs      2813:        printf("\n");
                   2814: }
                   2815:
                   2816: /*
                   2817:  * pmap_check_wiring:
                   2818:  *
                   2819:  *     Count the number of valid mappings in the specified PT page,
                   2820:  *     and ensure that it is consistent with the number of wirings
                   2821:  *     to that page that the VM system has.
                   2822:  */
                   2823: void
1.20      tsutsui  2824: pmap_check_wiring(const char *str, vaddr_t va)
1.1       chs      2825: {
                   2826:        pt_entry_t *pte;
                   2827:        paddr_t pa;
                   2828:        struct vm_page *pg;
                   2829:        int count;
                   2830:
                   2831:        if (!pmap_ste_v(pmap_kernel(), va) ||
                   2832:            !pmap_pte_v(pmap_pte(pmap_kernel(), va)))
                   2833:                return;
                   2834:
                   2835:        pa = pmap_pte_pa(pmap_pte(pmap_kernel(), va));
                   2836:        pg = PHYS_TO_VM_PAGE(pa);
1.13      chs      2837:        if (pg->wire_count > PAGE_SIZE / sizeof(pt_entry_t)) {
1.1       chs      2838:                panic("*%s*: 0x%lx: wire count %d", str, va, pg->wire_count);
                   2839:        }
                   2840:
                   2841:        count = 0;
1.3       thorpej  2842:        for (pte = (pt_entry_t *)va; pte < (pt_entry_t *)(va + PAGE_SIZE);
                   2843:             pte++)
1.1       chs      2844:                if (*pte)
                   2845:                        count++;
                   2846:        if (pg->wire_count != count)
                   2847:                panic("*%s*: 0x%lx: w%d/a%d",
                   2848:                       str, va, pg->wire_count, count);
                   2849: }
                   2850: #endif /* DEBUG */

CVSweb <webmaster@jp.NetBSD.org>