[BACK]Return to pmap_motorola.c CVS log [TXT][DIR] Up to [cvs.NetBSD.org] / src / sys / arch / m68k / m68k

Annotation of src/sys/arch/m68k/m68k/pmap_motorola.c, Revision 1.21.4.1

1.21.4.1! bouyer      1: /*     $NetBSD$        */
1.1       chs         2:
                      3: /*-
                      4:  * Copyright (c) 1999 The NetBSD Foundation, Inc.
                      5:  * All rights reserved.
                      6:  *
                      7:  * This code is derived from software contributed to The NetBSD Foundation
                      8:  * by Jason R. Thorpe.
                      9:  *
                     10:  * Redistribution and use in source and binary forms, with or without
                     11:  * modification, are permitted provided that the following conditions
                     12:  * are met:
                     13:  * 1. Redistributions of source code must retain the above copyright
                     14:  *    notice, this list of conditions and the following disclaimer.
                     15:  * 2. Redistributions in binary form must reproduce the above copyright
                     16:  *    notice, this list of conditions and the following disclaimer in the
                     17:  *    documentation and/or other materials provided with the distribution.
                     18:  * 3. All advertising materials mentioning features or use of this software
                     19:  *    must display the following acknowledgement:
                     20:  *     This product includes software developed by the NetBSD
                     21:  *     Foundation, Inc. and its contributors.
                     22:  * 4. Neither the name of The NetBSD Foundation nor the names of its
                     23:  *    contributors may be used to endorse or promote products derived
                     24:  *    from this software without specific prior written permission.
                     25:  *
                     26:  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
                     27:  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
                     28:  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
                     29:  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
                     30:  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
                     31:  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
                     32:  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
                     33:  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
                     34:  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
                     35:  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
                     36:  * POSSIBILITY OF SUCH DAMAGE.
                     37:  */
                     38:
                     39: /*
                     40:  * Copyright (c) 1991, 1993
                     41:  *     The Regents of the University of California.  All rights reserved.
                     42:  *
                     43:  * This code is derived from software contributed to Berkeley by
                     44:  * the Systems Programming Group of the University of Utah Computer
                     45:  * Science Department.
                     46:  *
                     47:  * Redistribution and use in source and binary forms, with or without
                     48:  * modification, are permitted provided that the following conditions
                     49:  * are met:
                     50:  * 1. Redistributions of source code must retain the above copyright
                     51:  *    notice, this list of conditions and the following disclaimer.
                     52:  * 2. Redistributions in binary form must reproduce the above copyright
                     53:  *    notice, this list of conditions and the following disclaimer in the
                     54:  *    documentation and/or other materials provided with the distribution.
1.6       agc        55:  * 3. Neither the name of the University nor the names of its contributors
1.1       chs        56:  *    may be used to endorse or promote products derived from this software
                     57:  *    without specific prior written permission.
                     58:  *
                     59:  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
                     60:  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
                     61:  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
                     62:  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
                     63:  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
                     64:  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
                     65:  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
                     66:  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
                     67:  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
                     68:  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
                     69:  * SUCH DAMAGE.
                     70:  *
                     71:  *     @(#)pmap.c      8.6 (Berkeley) 5/27/94
                     72:  */
                     73:
                     74: /*
                     75:  * Motorola m68k-family physical map management code.
                     76:  *
                     77:  * Supports:
                     78:  *     68020 with 68851 MMU
                     79:  *     68030 with on-chip MMU
                     80:  *     68040 with on-chip MMU
                     81:  *     68060 with on-chip MMU
                     82:  *
                     83:  * Notes:
                     84:  *     Don't even pay lip service to multiprocessor support.
                     85:  *
                     86:  *     We assume TLB entries don't have process tags (except for the
                     87:  *     supervisor/user distinction) so we only invalidate TLB entries
                     88:  *     when changing mappings for the current (or kernel) pmap.  This is
                     89:  *     technically not true for the 68851 but we flush the TLB on every
                     90:  *     context switch, so it effectively winds up that way.
                     91:  *
                     92:  *     Bitwise and/or operations are significantly faster than bitfield
                     93:  *     references so we use them when accessing STE/PTEs in the pmap_pte_*
                     94:  *     macros.  Note also that the two are not always equivalent; e.g.:
                     95:  *             (*pte & PG_PROT) [4] != pte->pg_prot [1]
                     96:  *     and a couple of routines that deal with protection and wiring take
                     97:  *     some shortcuts that assume the and/or definitions.
                     98:  */
                     99:
                    100: /*
                    101:  *     Manages physical address maps.
                    102:  *
                    103:  *     In addition to hardware address maps, this
                    104:  *     module is called upon to provide software-use-only
                    105:  *     maps which may or may not be stored in the same
                    106:  *     form as hardware maps.  These pseudo-maps are
                    107:  *     used to store intermediate results from copy
                    108:  *     operations to and from address spaces.
                    109:  *
                    110:  *     Since the information managed by this module is
                    111:  *     also stored by the logical address mapping module,
                    112:  *     this module may throw away valid virtual-to-physical
                    113:  *     mappings at almost any time.  However, invalidations
                    114:  *     of virtual-to-physical mappings must be done as
                    115:  *     requested.
                    116:  *
                    117:  *     In order to cope with hardware architectures which
                    118:  *     make virtual-to-physical map invalidates expensive,
                    119:  *     this module may delay invalidate or reduced protection
                    120:  *     operations until such time as they are actually
                    121:  *     necessary.  This module is given full information as
                    122:  *     to which processors are currently using which maps,
                    123:  *     and to when physical maps must be made correct.
                    124:  */
                    125:
                    126: #include <sys/cdefs.h>
1.21.4.1! bouyer    127: __KERNEL_RCSID(0, "$NetBSD$");
1.1       chs       128:
                    129: #include "opt_compat_hpux.h"
                    130:
                    131: #include <sys/param.h>
                    132: #include <sys/systm.h>
                    133: #include <sys/proc.h>
                    134: #include <sys/malloc.h>
                    135: #include <sys/user.h>
                    136: #include <sys/pool.h>
                    137:
                    138: #include <machine/pte.h>
                    139:
                    140: #include <uvm/uvm.h>
                    141:
                    142: #include <machine/cpu.h>
                    143: #include <m68k/cacheops.h>
                    144:
                    145: #ifdef DEBUG
                    146: #define PDB_FOLLOW     0x0001
                    147: #define PDB_INIT       0x0002
                    148: #define PDB_ENTER      0x0004
                    149: #define PDB_REMOVE     0x0008
                    150: #define PDB_CREATE     0x0010
                    151: #define PDB_PTPAGE     0x0020
                    152: #define PDB_CACHE      0x0040
                    153: #define PDB_BITS       0x0080
                    154: #define PDB_COLLECT    0x0100
                    155: #define PDB_PROTECT    0x0200
                    156: #define PDB_SEGTAB     0x0400
                    157: #define PDB_MULTIMAP   0x0800
                    158: #define PDB_PARANOIA   0x2000
                    159: #define PDB_WIRING     0x4000
                    160: #define PDB_PVDUMP     0x8000
                    161:
                    162: int debugmap = 0;
                    163: int pmapdebug = PDB_PARANOIA;
                    164:
                    165: #define        PMAP_DPRINTF(l, x)      if (pmapdebug & (l)) printf x
                    166: #else /* ! DEBUG */
                    167: #define        PMAP_DPRINTF(l, x)      /* nothing */
                    168: #endif /* DEBUG */
                    169:
                    170: /*
                    171:  * Get STEs and PTEs for user/kernel address space
                    172:  */
                    173: #if defined(M68040) || defined(M68060)
                    174: #define        pmap_ste1(m, v) \
                    175:        (&((m)->pm_stab[(vaddr_t)(v) >> SG4_SHIFT1]))
                    176: /* XXX assumes physically contiguous ST pages (if more than one) */
                    177: #define pmap_ste2(m, v) \
                    178:        (&((m)->pm_stab[(st_entry_t *)(*(u_int *)pmap_ste1(m, v) & SG4_ADDR1) \
                    179:                        - (m)->pm_stpa + (((v) & SG4_MASK2) >> SG4_SHIFT2)]))
                    180: #if defined(M68020) || defined(M68030)
                    181: #define        pmap_ste(m, v)  \
                    182:        (&((m)->pm_stab[(vaddr_t)(v) \
                    183:                        >> (mmutype == MMU_68040 ? SG4_SHIFT1 : SG_ISHIFT)]))
                    184: #define pmap_ste_v(m, v) \
                    185:        (mmutype == MMU_68040 \
                    186:         ? ((*pmap_ste1(m, v) & SG_V) && \
                    187:            (*pmap_ste2(m, v) & SG_V)) \
                    188:         : (*pmap_ste(m, v) & SG_V))
                    189: #else
                    190: #define        pmap_ste(m, v)  \
                    191:        (&((m)->pm_stab[(vaddr_t)(v) >> SG4_SHIFT1]))
                    192: #define pmap_ste_v(m, v) \
                    193:        ((*pmap_ste1(m, v) & SG_V) && (*pmap_ste2(m, v) & SG_V))
                    194: #endif
                    195: #else
                    196: #define        pmap_ste(m, v)   (&((m)->pm_stab[(vaddr_t)(v) >> SG_ISHIFT]))
                    197: #define pmap_ste_v(m, v) (*pmap_ste(m, v) & SG_V)
                    198: #endif
                    199:
                    200: #define pmap_pte(m, v) (&((m)->pm_ptab[(vaddr_t)(v) >> PG_SHIFT]))
                    201: #define pmap_pte_pa(pte)       (*(pte) & PG_FRAME)
                    202: #define pmap_pte_w(pte)                (*(pte) & PG_W)
                    203: #define pmap_pte_ci(pte)       (*(pte) & PG_CI)
                    204: #define pmap_pte_m(pte)                (*(pte) & PG_M)
                    205: #define pmap_pte_u(pte)                (*(pte) & PG_U)
                    206: #define pmap_pte_prot(pte)     (*(pte) & PG_PROT)
                    207: #define pmap_pte_v(pte)                (*(pte) & PG_V)
                    208:
                    209: #define pmap_pte_set_w(pte, v) \
                    210:        if (v) *(pte) |= PG_W; else *(pte) &= ~PG_W
                    211: #define pmap_pte_set_prot(pte, v) \
                    212:        if (v) *(pte) |= PG_PROT; else *(pte) &= ~PG_PROT
                    213: #define pmap_pte_w_chg(pte, nw)                ((nw) ^ pmap_pte_w(pte))
                    214: #define pmap_pte_prot_chg(pte, np)     ((np) ^ pmap_pte_prot(pte))
                    215:
                    216: /*
                    217:  * Given a map and a machine independent protection code,
                    218:  * convert to an m68k protection code.
                    219:  */
                    220: #define pte_prot(m, p) (protection_codes[p])
                    221: int    protection_codes[8];
                    222:
                    223: /*
                    224:  * Kernel page table page management.
                    225:  */
                    226: struct kpt_page {
                    227:        struct kpt_page *kpt_next;      /* link on either used or free list */
                    228:        vaddr_t         kpt_va;         /* always valid kernel VA */
                    229:        paddr_t         kpt_pa;         /* PA of this page (for speed) */
                    230: };
                    231: struct kpt_page *kpt_free_list, *kpt_used_list;
                    232: struct kpt_page *kpt_pages;
                    233:
                    234: /*
                    235:  * Kernel segment/page table and page table map.
                    236:  * The page table map gives us a level of indirection we need to dynamically
                    237:  * expand the page table.  It is essentially a copy of the segment table
                    238:  * with PTEs instead of STEs.  All are initialized in locore at boot time.
                    239:  * Sysmap will initially contain VM_KERNEL_PT_PAGES pages of PTEs.
                    240:  * Segtabzero is an empty segment table which all processes share til they
                    241:  * reference something.
                    242:  */
                    243: st_entry_t     *Sysseg;
                    244: pt_entry_t     *Sysmap, *Sysptmap;
                    245: st_entry_t     *Segtabzero, *Segtabzeropa;
                    246: vsize_t                Sysptsize = VM_KERNEL_PT_PAGES;
                    247:
                    248: struct pmap    kernel_pmap_store;
                    249: struct vm_map  *st_map, *pt_map;
1.12      yamt      250: struct vm_map_kernel st_map_store, pt_map_store;
1.1       chs       251:
                    252: paddr_t                avail_start;    /* PA of first available physical page */
                    253: paddr_t                avail_end;      /* PA of last available physical page */
                    254: vsize_t                mem_size;       /* memory size in bytes */
1.5       thorpej   255: vaddr_t                virtual_avail;  /* VA of first avail page (after kernel bss)*/
                    256: vaddr_t                virtual_end;    /* VA of last avail page (end of kernel AS) */
1.1       chs       257: int            page_cnt;       /* number of pages managed by VM system */
                    258:
                    259: boolean_t      pmap_initialized = FALSE;       /* Has pmap_init completed? */
                    260: struct pv_entry        *pv_table;
                    261: char           *pmap_attributes;       /* reference and modify bits */
                    262: TAILQ_HEAD(pv_page_list, pv_page) pv_page_freelist;
                    263: int            pv_nfree;
                    264:
                    265: #ifdef M68K_MMU_HP
                    266: int            pmap_aliasmask; /* seperation at which VA aliasing ok */
                    267: #endif
                    268: #if defined(M68040) || defined(M68060)
                    269: int            protostfree;    /* prototype (default) free ST map */
                    270: #endif
                    271:
                    272: extern caddr_t CADDR1, CADDR2;
                    273:
                    274: pt_entry_t     *caddr1_pte;    /* PTE for CADDR1 */
                    275: pt_entry_t     *caddr2_pte;    /* PTE for CADDR2 */
                    276:
                    277: struct pool    pmap_pmap_pool; /* memory pool for pmap structures */
                    278:
1.20      tsutsui   279: struct pv_entry *pmap_alloc_pv(void);
                    280: void   pmap_free_pv(struct pv_entry *);
                    281: void   pmap_collect_pv(void);
1.1       chs       282: #ifdef COMPAT_HPUX
1.20      tsutsui   283: int    pmap_mapmulti(pmap_t, vaddr_t);
1.1       chs       284: #endif /* COMPAT_HPUX */
                    285:
                    286: #define        PAGE_IS_MANAGED(pa)     (pmap_initialized &&                    \
                    287:                                 vm_physseg_find(atop((pa)), NULL) != -1)
                    288:
                    289: #define        pa_to_pvh(pa)                                                   \
                    290: ({                                                                     \
1.19      tsutsui   291:        int bank_, pg_ = 0;     /* XXX gcc4 -Wuninitialized */          \
1.1       chs       292:                                                                        \
                    293:        bank_ = vm_physseg_find(atop((pa)), &pg_);                      \
                    294:        &vm_physmem[bank_].pmseg.pvent[pg_];                            \
                    295: })
                    296:
                    297: #define        pa_to_attribute(pa)                                             \
                    298: ({                                                                     \
1.19      tsutsui   299:        int bank_, pg_ = 0;     /* XXX gcc4 -Wuninitialized */          \
1.1       chs       300:                                                                        \
                    301:        bank_ = vm_physseg_find(atop((pa)), &pg_);                      \
                    302:        &vm_physmem[bank_].pmseg.attrs[pg_];                            \
                    303: })
                    304:
                    305: /*
                    306:  * Internal routines
                    307:  */
1.20      tsutsui   308: void   pmap_remove_mapping(pmap_t, vaddr_t, pt_entry_t *, int);
                    309: void   pmap_do_remove(pmap_t, vaddr_t, vaddr_t, int);
                    310: boolean_t pmap_testbit(paddr_t, int);
                    311: boolean_t pmap_changebit(paddr_t, int, int);
1.21.4.1! bouyer    312: boolean_t pmap_enter_ptpage(pmap_t, vaddr_t, boolean_t);
1.20      tsutsui   313: void   pmap_ptpage_addref(vaddr_t);
                    314: int    pmap_ptpage_delref(vaddr_t);
                    315: void   pmap_collect1(pmap_t, paddr_t, paddr_t);
                    316: void   pmap_pinit(pmap_t);
                    317: void   pmap_release(pmap_t);
1.1       chs       318:
                    319: #ifdef DEBUG
1.20      tsutsui   320: void pmap_pvdump(paddr_t);
                    321: void pmap_check_wiring(const char *, vaddr_t);
1.1       chs       322: #endif
                    323:
                    324: /* pmap_remove_mapping flags */
                    325: #define        PRM_TFLUSH      0x01
                    326: #define        PRM_CFLUSH      0x02
                    327: #define        PRM_KEEPPTPAGE  0x04
                    328:
                    329: /*
1.5       thorpej   330:  * pmap_virtual_space:         [ INTERFACE ]
                    331:  *
                    332:  *     Report the range of available kernel virtual address
                    333:  *     space to the VM system during bootstrap.
                    334:  *
                    335:  *     This is only an interface function if we do not use
                    336:  *     pmap_steal_memory()!
                    337:  *
                    338:  *     Note: no locking is necessary in this function.
                    339:  */
                    340: void
                    341: pmap_virtual_space(vstartp, vendp)
                    342:        vaddr_t *vstartp, *vendp;
                    343: {
                    344:
                    345:        *vstartp = virtual_avail;
                    346:        *vendp = virtual_end;
                    347: }
                    348:
                    349: /*
1.1       chs       350:  * pmap_init:                  [ INTERFACE ]
                    351:  *
                    352:  *     Initialize the pmap module.  Called by vm_init(), to initialize any
                    353:  *     structures that the pmap system needs to map virtual memory.
                    354:  *
                    355:  *     Note: no locking is necessary in this function.
                    356:  */
                    357: void
1.20      tsutsui   358: pmap_init(void)
1.1       chs       359: {
                    360:        vaddr_t         addr, addr2;
                    361:        vsize_t         s;
                    362:        struct pv_entry *pv;
                    363:        char            *attr;
                    364:        int             rv;
                    365:        int             npages;
                    366:        int             bank;
                    367:
                    368:        PMAP_DPRINTF(PDB_FOLLOW, ("pmap_init()\n"));
                    369:
                    370:        /*
                    371:         * Before we do anything else, initialize the PTE pointers
                    372:         * used by pmap_zero_page() and pmap_copy_page().
                    373:         */
                    374:        caddr1_pte = pmap_pte(pmap_kernel(), CADDR1);
                    375:        caddr2_pte = pmap_pte(pmap_kernel(), CADDR2);
                    376:
                    377:        PMAP_DPRINTF(PDB_INIT,
                    378:            ("pmap_init: Sysseg %p, Sysmap %p, Sysptmap %p\n",
                    379:            Sysseg, Sysmap, Sysptmap));
                    380:        PMAP_DPRINTF(PDB_INIT,
                    381:            ("  pstart %lx, pend %lx, vstart %lx, vend %lx\n",
                    382:            avail_start, avail_end, virtual_avail, virtual_end));
                    383:
                    384:        /*
                    385:         * Allocate memory for random pmap data structures.  Includes the
                    386:         * initial segment table, pv_head_table and pmap_attributes.
                    387:         */
                    388:        for (page_cnt = 0, bank = 0; bank < vm_nphysseg; bank++)
                    389:                page_cnt += vm_physmem[bank].end - vm_physmem[bank].start;
                    390:        s = M68K_STSIZE;                                        /* Segtabzero */
                    391:        s += page_cnt * sizeof(struct pv_entry);        /* pv table */
                    392:        s += page_cnt * sizeof(char);                   /* attribute table */
                    393:        s = round_page(s);
1.14      yamt      394:        addr = uvm_km_alloc(kernel_map, s, 0, UVM_KMF_WIRED | UVM_KMF_ZERO);
1.1       chs       395:        if (addr == 0)
                    396:                panic("pmap_init: can't allocate data structures");
                    397:
1.20      tsutsui   398:        Segtabzero = (st_entry_t *)addr;
                    399:        (void)pmap_extract(pmap_kernel(), addr,
                    400:            (paddr_t *)(void *)&Segtabzeropa);
1.1       chs       401:        addr += M68K_STSIZE;
                    402:
                    403:        pv_table = (struct pv_entry *) addr;
                    404:        addr += page_cnt * sizeof(struct pv_entry);
                    405:
1.20      tsutsui   406:        pmap_attributes = (char *)addr;
1.1       chs       407:
                    408:        PMAP_DPRINTF(PDB_INIT, ("pmap_init: %lx bytes: page_cnt %x s0 %p(%p) "
                    409:            "tbl %p atr %p\n",
                    410:            s, page_cnt, Segtabzero, Segtabzeropa,
                    411:            pv_table, pmap_attributes));
                    412:
                    413:        /*
                    414:         * Now that the pv and attribute tables have been allocated,
                    415:         * assign them to the memory segments.
                    416:         */
                    417:        pv = pv_table;
                    418:        attr = pmap_attributes;
                    419:        for (bank = 0; bank < vm_nphysseg; bank++) {
                    420:                npages = vm_physmem[bank].end - vm_physmem[bank].start;
                    421:                vm_physmem[bank].pmseg.pvent = pv;
                    422:                vm_physmem[bank].pmseg.attrs = attr;
                    423:                pv += npages;
                    424:                attr += npages;
                    425:        }
                    426:
                    427:        /*
1.5       thorpej   428:         * Allocate physical memory for kernel PT pages and their management.
                    429:         * We need 1 PT page per possible task plus some slop.
                    430:         */
                    431:        npages = min(atop(M68K_MAX_KPTSIZE), maxproc+16);
                    432:        s = ptoa(npages) + round_page(npages * sizeof(struct kpt_page));
                    433:
                    434:        /*
                    435:         * Verify that space will be allocated in region for which
                    436:         * we already have kernel PT pages.
                    437:         */
                    438:        addr = 0;
                    439:        rv = uvm_map(kernel_map, &addr, s, NULL, UVM_UNKNOWN_OFFSET, 0,
1.20      tsutsui   440:            UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE,
                    441:            UVM_ADV_RANDOM, UVM_FLAG_NOMERGE));
1.5       thorpej   442:        if (rv != 0 || (addr + s) >= (vaddr_t)Sysmap)
                    443:                panic("pmap_init: kernel PT too small");
                    444:        uvm_unmap(kernel_map, addr, addr + s);
                    445:
                    446:        /*
                    447:         * Now allocate the space and link the pages together to
                    448:         * form the KPT free list.
                    449:         */
1.14      yamt      450:        addr = uvm_km_alloc(kernel_map, s, 0, UVM_KMF_WIRED | UVM_KMF_ZERO);
1.5       thorpej   451:        if (addr == 0)
                    452:                panic("pmap_init: cannot allocate KPT free list");
                    453:        s = ptoa(npages);
                    454:        addr2 = addr + s;
                    455:        kpt_pages = &((struct kpt_page *)addr2)[npages];
                    456:        kpt_free_list = NULL;
                    457:        do {
                    458:                addr2 -= PAGE_SIZE;
                    459:                (--kpt_pages)->kpt_next = kpt_free_list;
                    460:                kpt_free_list = kpt_pages;
                    461:                kpt_pages->kpt_va = addr2;
                    462:                (void) pmap_extract(pmap_kernel(), addr2,
                    463:                    (paddr_t *)&kpt_pages->kpt_pa);
                    464:        } while (addr != addr2);
                    465:
                    466:        PMAP_DPRINTF(PDB_INIT, ("pmap_init: KPT: %ld pages from %lx to %lx\n",
                    467:            atop(s), addr, addr + s));
                    468:
                    469:        /*
1.1       chs       470:         * Allocate the segment table map and the page table map.
                    471:         */
                    472:        s = maxproc * M68K_STSIZE;
                    473:        st_map = uvm_km_suballoc(kernel_map, &addr, &addr2, s, 0, FALSE,
                    474:            &st_map_store);
                    475:
                    476:        addr = M68K_PTBASE;
                    477:        if ((M68K_PTMAXSIZE / M68K_MAX_PTSIZE) < maxproc) {
                    478:                s = M68K_PTMAXSIZE;
                    479:                /*
                    480:                 * XXX We don't want to hang when we run out of
                    481:                 * page tables, so we lower maxproc so that fork()
                    482:                 * will fail instead.  Note that root could still raise
                    483:                 * this value via sysctl(3).
                    484:                 */
                    485:                maxproc = (M68K_PTMAXSIZE / M68K_MAX_PTSIZE);
                    486:        } else
                    487:                s = (maxproc * M68K_MAX_PTSIZE);
                    488:        pt_map = uvm_km_suballoc(kernel_map, &addr, &addr2, s, 0,
                    489:            TRUE, &pt_map_store);
                    490:
                    491: #if defined(M68040) || defined(M68060)
                    492:        if (mmutype == MMU_68040) {
                    493:                protostfree = ~l2tobm(0);
                    494:                for (rv = MAXUL2SIZE; rv < sizeof(protostfree)*NBBY; rv++)
                    495:                        protostfree &= ~l2tobm(rv);
                    496:        }
                    497: #endif
                    498:
                    499:        /*
                    500:         * Initialize the pmap pools.
                    501:         */
                    502:        pool_init(&pmap_pmap_pool, sizeof(struct pmap), 0, 0, 0, "pmappl",
                    503:            &pool_allocator_nointr);
                    504:
                    505:        /*
                    506:         * Now that this is done, mark the pages shared with the
                    507:         * hardware page table search as non-CCB (actually, as CI).
                    508:         *
                    509:         * XXX Hm. Given that this is in the kernel map, can't we just
                    510:         * use the va's?
                    511:         */
                    512: #ifdef M68060
                    513: #if defined(M68020) || defined(M68030) || defined(M68040)
                    514:        if (cputype == CPU_68060)
                    515: #endif
                    516:        {
                    517:                struct kpt_page *kptp = kpt_free_list;
                    518:                paddr_t paddr;
                    519:
                    520:                while (kptp) {
                    521:                        pmap_changebit(kptp->kpt_pa, PG_CI, ~PG_CCB);
                    522:                        kptp = kptp->kpt_next;
                    523:                }
                    524:
                    525:                paddr = (paddr_t)Segtabzeropa;
                    526:                while (paddr < (paddr_t)Segtabzeropa + M68K_STSIZE) {
                    527:                        pmap_changebit(paddr, PG_CI, ~PG_CCB);
1.3       thorpej   528:                        paddr += PAGE_SIZE;
1.1       chs       529:                }
                    530:
                    531:                DCIS();
                    532:        }
                    533: #endif
                    534:
                    535:        /*
                    536:         * Now it is safe to enable pv_table recording.
                    537:         */
                    538:        pmap_initialized = TRUE;
                    539: }
                    540:
                    541: /*
                    542:  * pmap_alloc_pv:
                    543:  *
                    544:  *     Allocate a pv_entry.
                    545:  */
                    546: struct pv_entry *
1.20      tsutsui   547: pmap_alloc_pv(void)
1.1       chs       548: {
                    549:        struct pv_page *pvp;
                    550:        struct pv_entry *pv;
                    551:        int i;
                    552:
                    553:        if (pv_nfree == 0) {
1.14      yamt      554:                pvp = (struct pv_page *)uvm_km_alloc(kernel_map, PAGE_SIZE, 0,
                    555:                    UVM_KMF_WIRED | UVM_KMF_ZERO);
1.1       chs       556:                if (pvp == 0)
1.14      yamt      557:                        panic("pmap_alloc_pv: uvm_km_alloc() failed");
1.1       chs       558:                pvp->pvp_pgi.pgi_freelist = pv = &pvp->pvp_pv[1];
                    559:                for (i = NPVPPG - 2; i; i--, pv++)
                    560:                        pv->pv_next = pv + 1;
                    561:                pv->pv_next = 0;
                    562:                pv_nfree += pvp->pvp_pgi.pgi_nfree = NPVPPG - 1;
                    563:                TAILQ_INSERT_HEAD(&pv_page_freelist, pvp, pvp_pgi.pgi_list);
                    564:                pv = &pvp->pvp_pv[0];
                    565:        } else {
                    566:                --pv_nfree;
                    567:                pvp = pv_page_freelist.tqh_first;
                    568:                if (--pvp->pvp_pgi.pgi_nfree == 0) {
                    569:                        TAILQ_REMOVE(&pv_page_freelist, pvp, pvp_pgi.pgi_list);
                    570:                }
                    571:                pv = pvp->pvp_pgi.pgi_freelist;
                    572: #ifdef DIAGNOSTIC
                    573:                if (pv == 0)
                    574:                        panic("pmap_alloc_pv: pgi_nfree inconsistent");
                    575: #endif
                    576:                pvp->pvp_pgi.pgi_freelist = pv->pv_next;
                    577:        }
                    578:        return pv;
                    579: }
                    580:
                    581: /*
                    582:  * pmap_free_pv:
                    583:  *
                    584:  *     Free a pv_entry.
                    585:  */
                    586: void
1.20      tsutsui   587: pmap_free_pv(struct pv_entry *pv)
1.1       chs       588: {
                    589:        struct pv_page *pvp;
                    590:
1.20      tsutsui   591:        pvp = (struct pv_page *)trunc_page((vaddr_t)pv);
1.1       chs       592:        switch (++pvp->pvp_pgi.pgi_nfree) {
                    593:        case 1:
                    594:                TAILQ_INSERT_TAIL(&pv_page_freelist, pvp, pvp_pgi.pgi_list);
                    595:        default:
                    596:                pv->pv_next = pvp->pvp_pgi.pgi_freelist;
                    597:                pvp->pvp_pgi.pgi_freelist = pv;
                    598:                ++pv_nfree;
                    599:                break;
                    600:        case NPVPPG:
                    601:                pv_nfree -= NPVPPG - 1;
                    602:                TAILQ_REMOVE(&pv_page_freelist, pvp, pvp_pgi.pgi_list);
1.14      yamt      603:                uvm_km_free(kernel_map, (vaddr_t)pvp, PAGE_SIZE, UVM_KMF_WIRED);
1.1       chs       604:                break;
                    605:        }
                    606: }
                    607:
                    608: /*
                    609:  * pmap_collect_pv:
                    610:  *
                    611:  *     Perform compaction on the PV list, called via pmap_collect().
                    612:  */
                    613: void
1.20      tsutsui   614: pmap_collect_pv(void)
1.1       chs       615: {
                    616:        struct pv_page_list pv_page_collectlist;
                    617:        struct pv_page *pvp, *npvp;
                    618:        struct pv_entry *ph, *ppv, *pv, *npv;
                    619:        int s;
                    620:
                    621:        TAILQ_INIT(&pv_page_collectlist);
                    622:
                    623:        for (pvp = pv_page_freelist.tqh_first; pvp; pvp = npvp) {
                    624:                if (pv_nfree < NPVPPG)
                    625:                        break;
                    626:                npvp = pvp->pvp_pgi.pgi_list.tqe_next;
                    627:                if (pvp->pvp_pgi.pgi_nfree > NPVPPG / 3) {
                    628:                        TAILQ_REMOVE(&pv_page_freelist, pvp, pvp_pgi.pgi_list);
                    629:                        TAILQ_INSERT_TAIL(&pv_page_collectlist, pvp,
                    630:                            pvp_pgi.pgi_list);
                    631:                        pv_nfree -= NPVPPG;
                    632:                        pvp->pvp_pgi.pgi_nfree = -1;
                    633:                }
                    634:        }
                    635:
                    636:        if (pv_page_collectlist.tqh_first == 0)
                    637:                return;
                    638:
                    639:        for (ph = &pv_table[page_cnt - 1]; ph >= &pv_table[0]; ph--) {
                    640:                if (ph->pv_pmap == 0)
                    641:                        continue;
                    642:                s = splvm();
                    643:                for (ppv = ph; (pv = ppv->pv_next) != 0; ) {
                    644:                        pvp = (struct pv_page *) trunc_page((vaddr_t)pv);
                    645:                        if (pvp->pvp_pgi.pgi_nfree == -1) {
                    646:                                pvp = pv_page_freelist.tqh_first;
                    647:                                if (--pvp->pvp_pgi.pgi_nfree == 0) {
                    648:                                        TAILQ_REMOVE(&pv_page_freelist, pvp,
                    649:                                            pvp_pgi.pgi_list);
                    650:                                }
                    651:                                npv = pvp->pvp_pgi.pgi_freelist;
                    652: #ifdef DIAGNOSTIC
                    653:                                if (npv == 0)
1.20      tsutsui   654:                                        panic("pmap_collect_pv: "
                    655:                                            "pgi_nfree inconsistent");
1.1       chs       656: #endif
                    657:                                pvp->pvp_pgi.pgi_freelist = npv->pv_next;
                    658:                                *npv = *pv;
                    659:                                ppv->pv_next = npv;
                    660:                                ppv = npv;
                    661:                        } else
                    662:                                ppv = pv;
                    663:                }
                    664:                splx(s);
                    665:        }
                    666:
                    667:        for (pvp = pv_page_collectlist.tqh_first; pvp; pvp = npvp) {
                    668:                npvp = pvp->pvp_pgi.pgi_list.tqe_next;
1.14      yamt      669:                uvm_km_free(kernel_map, (vaddr_t)pvp, PAGE_SIZE, UVM_KMF_WIRED);
1.1       chs       670:        }
                    671: }
                    672:
                    673: /*
                    674:  * pmap_map:
                    675:  *
                    676:  *     Used to map a range of physical addresses into kernel
                    677:  *     virtual address space.
                    678:  *
                    679:  *     For now, VM is already on, we only need to map the
                    680:  *     specified memory.
                    681:  *
                    682:  *     Note: THIS FUNCTION IS DEPRECATED, AND SHOULD BE REMOVED!
                    683:  */
                    684: vaddr_t
1.20      tsutsui   685: pmap_map(vaddr_t va, paddr_t spa, paddr_t epa, int prot)
1.1       chs       686: {
                    687:
                    688:        PMAP_DPRINTF(PDB_FOLLOW,
                    689:            ("pmap_map(%lx, %lx, %lx, %x)\n", va, spa, epa, prot));
                    690:
                    691:        while (spa < epa) {
                    692:                pmap_enter(pmap_kernel(), va, spa, prot, 0);
1.3       thorpej   693:                va += PAGE_SIZE;
                    694:                spa += PAGE_SIZE;
1.1       chs       695:        }
                    696:        pmap_update(pmap_kernel());
1.20      tsutsui   697:        return va;
1.1       chs       698: }
                    699:
                    700: /*
                    701:  * pmap_create:                        [ INTERFACE ]
                    702:  *
                    703:  *     Create and return a physical map.
                    704:  *
                    705:  *     Note: no locking is necessary in this function.
                    706:  */
                    707: pmap_t
1.20      tsutsui   708: pmap_create(void)
1.1       chs       709: {
                    710:        struct pmap *pmap;
                    711:
                    712:        PMAP_DPRINTF(PDB_FOLLOW|PDB_CREATE,
                    713:            ("pmap_create()\n"));
                    714:
                    715:        pmap = pool_get(&pmap_pmap_pool, PR_WAITOK);
                    716:        memset(pmap, 0, sizeof(*pmap));
                    717:        pmap_pinit(pmap);
1.20      tsutsui   718:        return pmap;
1.1       chs       719: }
                    720:
                    721: /*
                    722:  * pmap_pinit:
                    723:  *
                    724:  *     Initialize a preallocated and zeroed pmap structure.
                    725:  *
                    726:  *     Note: THIS FUNCTION SHOULD BE MOVED INTO pmap_create()!
                    727:  */
                    728: void
1.20      tsutsui   729: pmap_pinit(struct pmap *pmap)
1.1       chs       730: {
                    731:
                    732:        PMAP_DPRINTF(PDB_FOLLOW|PDB_CREATE,
                    733:            ("pmap_pinit(%p)\n", pmap));
                    734:
                    735:        /*
                    736:         * No need to allocate page table space yet but we do need a
                    737:         * valid segment table.  Initially, we point everyone at the
                    738:         * "null" segment table.  On the first pmap_enter, a real
                    739:         * segment table will be allocated.
                    740:         */
                    741:        pmap->pm_stab = Segtabzero;
                    742:        pmap->pm_stpa = Segtabzeropa;
                    743: #if defined(M68040) || defined(M68060)
                    744: #if defined(M68020) || defined(M68030)
                    745:        if (mmutype == MMU_68040)
                    746: #endif
                    747:                pmap->pm_stfree = protostfree;
                    748: #endif
                    749:        pmap->pm_count = 1;
                    750:        simple_lock_init(&pmap->pm_lock);
                    751: }
                    752:
                    753: /*
                    754:  * pmap_destroy:               [ INTERFACE ]
                    755:  *
                    756:  *     Drop the reference count on the specified pmap, releasing
                    757:  *     all resources if the reference count drops to zero.
                    758:  */
                    759: void
1.20      tsutsui   760: pmap_destroy(pmap_t pmap)
1.1       chs       761: {
                    762:        int count;
                    763:
                    764:        PMAP_DPRINTF(PDB_FOLLOW, ("pmap_destroy(%p)\n", pmap));
                    765:
                    766:        simple_lock(&pmap->pm_lock);
                    767:        count = --pmap->pm_count;
                    768:        simple_unlock(&pmap->pm_lock);
                    769:        if (count == 0) {
                    770:                pmap_release(pmap);
                    771:                pool_put(&pmap_pmap_pool, pmap);
                    772:        }
                    773: }
                    774:
                    775: /*
                    776:  * pmap_release:
                    777:  *
                    778:  *     Relese the resources held by a pmap.
                    779:  *
                    780:  *     Note: THIS FUNCTION SHOULD BE MOVED INTO pmap_destroy().
                    781:  */
                    782: void
1.20      tsutsui   783: pmap_release(pmap_t pmap)
1.1       chs       784: {
                    785:
                    786:        PMAP_DPRINTF(PDB_FOLLOW, ("pmap_release(%p)\n", pmap));
                    787:
                    788: #ifdef notdef /* DIAGNOSTIC */
                    789:        /* count would be 0 from pmap_destroy... */
                    790:        simple_lock(&pmap->pm_lock);
                    791:        if (pmap->pm_count != 1)
                    792:                panic("pmap_release count");
                    793: #endif
                    794:
                    795:        if (pmap->pm_ptab) {
                    796:                pmap_remove(pmap_kernel(), (vaddr_t)pmap->pm_ptab,
                    797:                    (vaddr_t)pmap->pm_ptab + M68K_MAX_PTSIZE);
1.14      yamt      798:                uvm_km_pgremove((vaddr_t)pmap->pm_ptab,
                    799:                    (vaddr_t)pmap->pm_ptab + M68K_MAX_PTSIZE);
                    800:                uvm_km_free(pt_map, (vaddr_t)pmap->pm_ptab,
                    801:                    M68K_MAX_PTSIZE, UVM_KMF_VAONLY);
1.1       chs       802:        }
                    803:        KASSERT(pmap->pm_stab == Segtabzero);
                    804: }
                    805:
                    806: /*
                    807:  * pmap_reference:             [ INTERFACE ]
                    808:  *
                    809:  *     Add a reference to the specified pmap.
                    810:  */
                    811: void
1.20      tsutsui   812: pmap_reference(pmap_t pmap)
1.1       chs       813: {
                    814:        PMAP_DPRINTF(PDB_FOLLOW, ("pmap_reference(%p)\n", pmap));
                    815:
                    816:        simple_lock(&pmap->pm_lock);
                    817:        pmap->pm_count++;
                    818:        simple_unlock(&pmap->pm_lock);
                    819: }
                    820:
                    821: /*
                    822:  * pmap_activate:              [ INTERFACE ]
                    823:  *
                    824:  *     Activate the pmap used by the specified process.  This includes
                    825:  *     reloading the MMU context if the current process, and marking
                    826:  *     the pmap in use by the processor.
                    827:  *
                    828:  *     Note: we may only use spin locks here, since we are called
                    829:  *     by a critical section in cpu_switch()!
                    830:  */
                    831: void
1.20      tsutsui   832: pmap_activate(struct lwp *l)
1.1       chs       833: {
1.20      tsutsui   834:        pmap_t pmap = l->l_proc->p_vmspace->vm_map.pmap;
1.1       chs       835:
                    836:        PMAP_DPRINTF(PDB_FOLLOW|PDB_SEGTAB,
1.2       thorpej   837:            ("pmap_activate(%p)\n", l));
1.1       chs       838:
1.2       thorpej   839:        PMAP_ACTIVATE(pmap, curlwp == NULL || l->l_proc == curproc);
1.1       chs       840: }
                    841:
                    842: /*
                    843:  * pmap_deactivate:            [ INTERFACE ]
                    844:  *
                    845:  *     Mark that the pmap used by the specified process is no longer
                    846:  *     in use by the processor.
                    847:  *
                    848:  *     The comment above pmap_activate() wrt. locking applies here,
                    849:  *     as well.
                    850:  */
                    851: void
1.20      tsutsui   852: pmap_deactivate(struct lwp *l)
1.1       chs       853: {
                    854:
                    855:        /* No action necessary in this pmap implementation. */
                    856: }
                    857:
                    858: /*
                    859:  * pmap_remove:                        [ INTERFACE ]
                    860:  *
                    861:  *     Remove the given range of addresses from the specified map.
                    862:  *
                    863:  *     It is assumed that the start and end are properly
                    864:  *     rounded to the page size.
                    865:  */
                    866: void
1.20      tsutsui   867: pmap_remove(pmap_t pmap, vaddr_t sva, vaddr_t eva)
1.1       chs       868: {
                    869:
                    870:        pmap_do_remove(pmap, sva, eva, 1);
                    871: }
                    872:
                    873: void
1.20      tsutsui   874: pmap_do_remove(pmap_t pmap, vaddr_t sva, vaddr_t eva, int remove_wired)
1.1       chs       875: {
                    876:        vaddr_t nssva;
                    877:        pt_entry_t *pte;
                    878:        int flags;
                    879: #ifdef M68K_MMU_HP
1.11      he        880:        boolean_t firstpage = TRUE, needcflush = FALSE;
1.1       chs       881: #endif
                    882:
                    883:        PMAP_DPRINTF(PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT,
                    884:            ("pmap_remove(%p, %lx, %lx)\n", pmap, sva, eva));
                    885:
                    886:        flags = active_pmap(pmap) ? PRM_TFLUSH : 0;
                    887:        while (sva < eva) {
                    888:                nssva = m68k_trunc_seg(sva) + NBSEG;
                    889:                if (nssva == 0 || nssva > eva)
                    890:                        nssva = eva;
                    891:
                    892:                /*
                    893:                 * Invalidate every valid mapping within this segment.
                    894:                 * If remove_wired is zero, skip the wired pages.
                    895:                 */
                    896:
                    897:                pte = pmap_pte(pmap, sva);
                    898:                while (sva < nssva) {
                    899:
                    900:                        /*
                    901:                         * If this segment is unallocated,
                    902:                         * skip to the next segment boundary.
                    903:                         */
                    904:
                    905:                        if (!pmap_ste_v(pmap, sva)) {
                    906:                                sva = nssva;
                    907:                                break;
                    908:                        }
                    909:
                    910:
                    911:
                    912:                        if (pmap_pte_v(pte) &&
                    913:                            (remove_wired || !pmap_pte_w(pte))) {
                    914: #ifdef M68K_MMU_HP
                    915:                                if (pmap_aliasmask) {
                    916:
                    917:                                        /*
                    918:                                         * Purge kernel side of VAC to ensure
                    919:                                         * we get the correct state of any
                    920:                                         * hardware maintained bits.
                    921:                                         */
                    922:
                    923:                                        if (firstpage) {
                    924:                                                DCIS();
                    925:                                        }
                    926:
                    927:                                        /*
                    928:                                         * Remember if we may need to
                    929:                                         * flush the VAC due to a non-CI
                    930:                                         * mapping.
                    931:                                         */
                    932:
                    933:                                        if (!needcflush && !pmap_pte_ci(pte))
                    934:                                                needcflush = TRUE;
                    935:
                    936:                                }
1.11      he        937:                                firstpage = FALSE;
1.1       chs       938: #endif
                    939:                                pmap_remove_mapping(pmap, sva, pte, flags);
                    940:                        }
                    941:                        pte++;
1.3       thorpej   942:                        sva += PAGE_SIZE;
1.1       chs       943:                }
                    944:        }
                    945:
                    946: #ifdef M68K_MMU_HP
                    947:
                    948:        /*
                    949:         * Didn't do anything, no need for cache flushes
                    950:         */
                    951:
                    952:        if (firstpage)
                    953:                return;
                    954:
                    955:        /*
                    956:         * In a couple of cases, we don't need to worry about flushing
                    957:         * the VAC:
                    958:         *      1. if this is a kernel mapping,
                    959:         *         we have already done it
                    960:         *      2. if it is a user mapping not for the current process,
                    961:         *         it won't be there
                    962:         */
                    963:
                    964:        if (pmap_aliasmask && !active_user_pmap(pmap))
                    965:                needcflush = FALSE;
                    966:        if (needcflush) {
                    967:                if (pmap == pmap_kernel()) {
                    968:                        DCIS();
                    969:                } else {
                    970:                        DCIU();
                    971:                }
                    972:        }
                    973: #endif
                    974: }
                    975:
                    976: /*
                    977:  * pmap_page_protect:          [ INTERFACE ]
                    978:  *
                    979:  *     Lower the permission for all mappings to a given page to
                    980:  *     the permissions specified.
                    981:  */
                    982: void
1.20      tsutsui   983: pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
1.1       chs       984: {
                    985:        paddr_t pa = VM_PAGE_TO_PHYS(pg);
                    986:        struct pv_entry *pv;
                    987:        pt_entry_t *pte;
                    988:        int s;
                    989:
                    990: #ifdef DEBUG
                    991:        if ((pmapdebug & (PDB_FOLLOW|PDB_PROTECT)) ||
                    992:            (prot == VM_PROT_NONE && (pmapdebug & PDB_REMOVE)))
                    993:                printf("pmap_page_protect(%p, %x)\n", pg, prot);
                    994: #endif
                    995:
                    996:        switch (prot) {
                    997:        case VM_PROT_READ|VM_PROT_WRITE:
                    998:        case VM_PROT_ALL:
                    999:                return;
                   1000:
                   1001:        /* copy_on_write */
                   1002:        case VM_PROT_READ:
                   1003:        case VM_PROT_READ|VM_PROT_EXECUTE:
                   1004:                pmap_changebit(pa, PG_RO, ~0);
                   1005:                return;
                   1006:
                   1007:        /* remove_all */
                   1008:        default:
                   1009:                break;
                   1010:        }
                   1011:
                   1012:        pv = pa_to_pvh(pa);
                   1013:        s = splvm();
                   1014:        while (pv->pv_pmap != NULL) {
                   1015:
                   1016:                pte = pmap_pte(pv->pv_pmap, pv->pv_va);
                   1017: #ifdef DEBUG
                   1018:                if (!pmap_ste_v(pv->pv_pmap, pv->pv_va) ||
                   1019:                    pmap_pte_pa(pte) != pa)
                   1020:                        panic("pmap_page_protect: bad mapping");
                   1021: #endif
                   1022:                pmap_remove_mapping(pv->pv_pmap, pv->pv_va,
                   1023:                    pte, PRM_TFLUSH|PRM_CFLUSH);
                   1024:        }
                   1025:        splx(s);
                   1026: }
                   1027:
                   1028: /*
                   1029:  * pmap_protect:               [ INTERFACE ]
                   1030:  *
                   1031:  *     Set the physical protectoin on the specified range of this map
                   1032:  *     as requested.
                   1033:  */
                   1034: void
1.20      tsutsui  1035: pmap_protect(pmap_t pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
1.1       chs      1036: {
                   1037:        vaddr_t nssva;
                   1038:        pt_entry_t *pte;
                   1039:        boolean_t firstpage, needtflush;
                   1040:        int isro;
                   1041:
                   1042:        PMAP_DPRINTF(PDB_FOLLOW|PDB_PROTECT,
                   1043:            ("pmap_protect(%p, %lx, %lx, %x)\n",
                   1044:            pmap, sva, eva, prot));
                   1045:
                   1046: #ifdef PMAPSTATS
                   1047:        protect_stats.calls++;
                   1048: #endif
                   1049:        if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
                   1050:                pmap_remove(pmap, sva, eva);
                   1051:                return;
                   1052:        }
                   1053:        isro = pte_prot(pmap, prot);
                   1054:        needtflush = active_pmap(pmap);
                   1055:        firstpage = TRUE;
                   1056:        while (sva < eva) {
                   1057:                nssva = m68k_trunc_seg(sva) + NBSEG;
                   1058:                if (nssva == 0 || nssva > eva)
                   1059:                        nssva = eva;
                   1060:
                   1061:                /*
                   1062:                 * If VA belongs to an unallocated segment,
                   1063:                 * skip to the next segment boundary.
                   1064:                 */
                   1065:
                   1066:                if (!pmap_ste_v(pmap, sva)) {
                   1067:                        sva = nssva;
                   1068:                        continue;
                   1069:                }
                   1070:
                   1071:                /*
                   1072:                 * Change protection on mapping if it is valid and doesn't
                   1073:                 * already have the correct protection.
                   1074:                 */
                   1075:
                   1076:                pte = pmap_pte(pmap, sva);
                   1077:                while (sva < nssva) {
                   1078:                        if (pmap_pte_v(pte) && pmap_pte_prot_chg(pte, isro)) {
                   1079: #ifdef M68K_MMU_HP
                   1080:
                   1081:                                /*
                   1082:                                 * Purge kernel side of VAC to ensure we
                   1083:                                 * get the correct state of any hardware
                   1084:                                 * maintained bits.
                   1085:                                 *
                   1086:                                 * XXX do we need to clear the VAC in
                   1087:                                 * general to reflect the new protection?
                   1088:                                 */
                   1089:
                   1090:                                if (firstpage && pmap_aliasmask)
                   1091:                                        DCIS();
                   1092: #endif
                   1093:
                   1094: #if defined(M68040) || defined(M68060)
                   1095:
                   1096:                                /*
                   1097:                                 * Clear caches if making RO (see section
                   1098:                                 * "7.3 Cache Coherency" in the manual).
                   1099:                                 */
                   1100:
                   1101: #if defined(M68020) || defined(M68030)
                   1102:                                if (isro && mmutype == MMU_68040)
                   1103: #else
                   1104:                                if (isro)
                   1105: #endif
                   1106:                                {
                   1107:                                        paddr_t pa = pmap_pte_pa(pte);
                   1108:
                   1109:                                        DCFP(pa);
                   1110:                                        ICPP(pa);
                   1111:                                }
                   1112: #endif
                   1113:                                pmap_pte_set_prot(pte, isro);
                   1114:                                if (needtflush)
                   1115:                                        TBIS(sva);
                   1116:                                firstpage = FALSE;
                   1117:                        }
                   1118:                        pte++;
1.3       thorpej  1119:                        sva += PAGE_SIZE;
1.1       chs      1120:                }
                   1121:        }
                   1122: }
                   1123:
                   1124: /*
                   1125:  * pmap_enter:                 [ INTERFACE ]
                   1126:  *
                   1127:  *     Insert the given physical page (pa) at
                   1128:  *     the specified virtual address (va) in the
                   1129:  *     target physical map with the protection requested.
                   1130:  *
                   1131:  *     If specified, the page will be wired down, meaning
                   1132:  *     that the related pte cannot be reclaimed.
                   1133:  *
                   1134:  *     Note: This is the only routine which MAY NOT lazy-evaluate
                   1135:  *     or lose information.  Thatis, this routine must actually
                   1136:  *     insert this page into the given map NOW.
                   1137:  */
                   1138: int
1.20      tsutsui  1139: pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags)
1.1       chs      1140: {
                   1141:        pt_entry_t *pte;
                   1142:        int npte;
                   1143:        paddr_t opa;
                   1144:        boolean_t cacheable = TRUE;
                   1145:        boolean_t checkpv = TRUE;
                   1146:        boolean_t wired = (flags & PMAP_WIRED) != 0;
1.21.4.1! bouyer   1147:        boolean_t can_fail = (flags & PMAP_CANFAIL) != 0;
1.1       chs      1148:
                   1149:        PMAP_DPRINTF(PDB_FOLLOW|PDB_ENTER,
                   1150:            ("pmap_enter(%p, %lx, %lx, %x, %x)\n",
                   1151:            pmap, va, pa, prot, wired));
                   1152:
                   1153: #ifdef DIAGNOSTIC
                   1154:        /*
                   1155:         * pmap_enter() should never be used for CADDR1 and CADDR2.
                   1156:         */
                   1157:        if (pmap == pmap_kernel() &&
                   1158:            (va == (vaddr_t)CADDR1 || va == (vaddr_t)CADDR2))
                   1159:                panic("pmap_enter: used for CADDR1 or CADDR2");
                   1160: #endif
                   1161:
                   1162:        /*
                   1163:         * For user mapping, allocate kernel VM resources if necessary.
                   1164:         */
1.21.4.1! bouyer   1165:        if (pmap->pm_ptab == NULL) {
1.1       chs      1166:                pmap->pm_ptab = (pt_entry_t *)
1.14      yamt     1167:                    uvm_km_alloc(pt_map, M68K_MAX_PTSIZE, 0,
1.21.4.1! bouyer   1168:                    UVM_KMF_VAONLY |
        !          1169:                    (can_fail ? UVM_KMF_NOWAIT : UVM_KMF_WAITVA));
        !          1170:                if (pmap->pm_ptab == NULL)
        !          1171:                        return ENOMEM;
        !          1172:        }
1.1       chs      1173:
                   1174:        /*
                   1175:         * Segment table entry not valid, we need a new PT page
                   1176:         */
1.21.4.1! bouyer   1177:        if (!pmap_ste_v(pmap, va)) {
        !          1178:                int err = pmap_enter_ptpage(pmap, va, can_fail);
        !          1179:                if (err)
        !          1180:                        return err;
        !          1181:        }
1.1       chs      1182:
                   1183:        pa = m68k_trunc_page(pa);
                   1184:        pte = pmap_pte(pmap, va);
                   1185:        opa = pmap_pte_pa(pte);
                   1186:
                   1187:        PMAP_DPRINTF(PDB_ENTER, ("enter: pte %p, *pte %x\n", pte, *pte));
                   1188:
                   1189:        /*
                   1190:         * Mapping has not changed, must be protection or wiring change.
                   1191:         */
                   1192:        if (opa == pa) {
                   1193:                /*
                   1194:                 * Wiring change, just update stats.
                   1195:                 * We don't worry about wiring PT pages as they remain
                   1196:                 * resident as long as there are valid mappings in them.
                   1197:                 * Hence, if a user page is wired, the PT page will be also.
                   1198:                 */
                   1199:                if (pmap_pte_w_chg(pte, wired ? PG_W : 0)) {
                   1200:                        PMAP_DPRINTF(PDB_ENTER,
                   1201:                            ("enter: wiring change -> %x\n", wired));
                   1202:                        if (wired)
                   1203:                                pmap->pm_stats.wired_count++;
                   1204:                        else
                   1205:                                pmap->pm_stats.wired_count--;
                   1206:                }
                   1207:                /*
                   1208:                 * Retain cache inhibition status
                   1209:                 */
                   1210:                checkpv = FALSE;
                   1211:                if (pmap_pte_ci(pte))
                   1212:                        cacheable = FALSE;
                   1213:                goto validate;
                   1214:        }
                   1215:
                   1216:        /*
                   1217:         * Mapping has changed, invalidate old range and fall through to
                   1218:         * handle validating new mapping.
                   1219:         */
                   1220:        if (opa) {
                   1221:                PMAP_DPRINTF(PDB_ENTER,
                   1222:                    ("enter: removing old mapping %lx\n", va));
                   1223:                pmap_remove_mapping(pmap, va, pte,
                   1224:                    PRM_TFLUSH|PRM_CFLUSH|PRM_KEEPPTPAGE);
                   1225:        }
                   1226:
                   1227:        /*
                   1228:         * If this is a new user mapping, increment the wiring count
                   1229:         * on this PT page.  PT pages are wired down as long as there
                   1230:         * is a valid mapping in the page.
                   1231:         */
                   1232:        if (pmap != pmap_kernel())
                   1233:                pmap_ptpage_addref(trunc_page((vaddr_t)pte));
                   1234:
                   1235:        /*
                   1236:         * Enter on the PV list if part of our managed memory
                   1237:         * Note that we raise IPL while manipulating pv_table
                   1238:         * since pmap_enter can be called at interrupt time.
                   1239:         */
                   1240:        if (PAGE_IS_MANAGED(pa)) {
                   1241:                struct pv_entry *pv, *npv;
                   1242:                int s;
                   1243:
                   1244:                pv = pa_to_pvh(pa);
                   1245:                s = splvm();
                   1246:
                   1247:                PMAP_DPRINTF(PDB_ENTER,
                   1248:                    ("enter: pv at %p: %lx/%p/%p\n",
                   1249:                    pv, pv->pv_va, pv->pv_pmap, pv->pv_next));
                   1250:                /*
                   1251:                 * No entries yet, use header as the first entry
                   1252:                 */
                   1253:                if (pv->pv_pmap == NULL) {
                   1254:                        pv->pv_va = va;
                   1255:                        pv->pv_pmap = pmap;
                   1256:                        pv->pv_next = NULL;
                   1257:                        pv->pv_ptste = NULL;
                   1258:                        pv->pv_ptpmap = NULL;
                   1259:                        pv->pv_flags = 0;
                   1260:                }
                   1261:                /*
                   1262:                 * There is at least one other VA mapping this page.
                   1263:                 * Place this entry after the header.
                   1264:                 */
                   1265:                else {
                   1266: #ifdef DEBUG
                   1267:                        for (npv = pv; npv; npv = npv->pv_next)
                   1268:                                if (pmap == npv->pv_pmap && va == npv->pv_va)
                   1269:                                        panic("pmap_enter: already in pv_tab");
                   1270: #endif
                   1271:                        npv = pmap_alloc_pv();
                   1272:                        npv->pv_va = va;
                   1273:                        npv->pv_pmap = pmap;
                   1274:                        npv->pv_next = pv->pv_next;
                   1275:                        npv->pv_ptste = NULL;
                   1276:                        npv->pv_ptpmap = NULL;
                   1277:                        npv->pv_flags = 0;
                   1278:                        pv->pv_next = npv;
                   1279:
                   1280: #ifdef M68K_MMU_HP
                   1281:
                   1282:                        /*
                   1283:                         * Since there is another logical mapping for the
                   1284:                         * same page we may need to cache-inhibit the
                   1285:                         * descriptors on those CPUs with external VACs.
                   1286:                         * We don't need to CI if:
                   1287:                         *
                   1288:                         * - No two mappings belong to the same user pmaps.
                   1289:                         *   Since the cache is flushed on context switches
                   1290:                         *   there is no problem between user processes.
                   1291:                         *
                   1292:                         * - Mappings within a single pmap are a certain
                   1293:                         *   magic distance apart.  VAs at these appropriate
                   1294:                         *   boundaries map to the same cache entries or
                   1295:                         *   otherwise don't conflict.
                   1296:                         *
                   1297:                         * To keep it simple, we only check for these special
                   1298:                         * cases if there are only two mappings, otherwise we
                   1299:                         * punt and always CI.
                   1300:                         *
                   1301:                         * Note that there are no aliasing problems with the
                   1302:                         * on-chip data-cache when the WA bit is set.
                   1303:                         */
                   1304:
                   1305:                        if (pmap_aliasmask) {
                   1306:                                if (pv->pv_flags & PV_CI) {
                   1307:                                        PMAP_DPRINTF(PDB_CACHE,
                   1308:                                            ("enter: pa %lx already CI'ed\n",
                   1309:                                            pa));
                   1310:                                        checkpv = cacheable = FALSE;
                   1311:                                } else if (npv->pv_next ||
                   1312:                                           ((pmap == pv->pv_pmap ||
                   1313:                                             pmap == pmap_kernel() ||
                   1314:                                             pv->pv_pmap == pmap_kernel()) &&
                   1315:                                            ((pv->pv_va & pmap_aliasmask) !=
                   1316:                                             (va & pmap_aliasmask)))) {
                   1317:                                        PMAP_DPRINTF(PDB_CACHE,
                   1318:                                            ("enter: pa %lx CI'ing all\n",
                   1319:                                            pa));
                   1320:                                        cacheable = FALSE;
                   1321:                                        pv->pv_flags |= PV_CI;
                   1322:                                }
                   1323:                        }
                   1324: #endif
                   1325:                }
                   1326:
                   1327:                /*
                   1328:                 * Speed pmap_is_referenced() or pmap_is_modified() based
                   1329:                 * on the hint provided in access_type.
                   1330:                 */
                   1331: #ifdef DIAGNOSTIC
                   1332:                if ((flags & VM_PROT_ALL) & ~prot)
                   1333:                        panic("pmap_enter: access_type exceeds prot");
                   1334: #endif
                   1335:                if (flags & VM_PROT_WRITE)
                   1336:                        *pa_to_attribute(pa) |= (PG_U|PG_M);
                   1337:                else if (flags & VM_PROT_ALL)
                   1338:                        *pa_to_attribute(pa) |= PG_U;
                   1339:
                   1340:                splx(s);
                   1341:        }
                   1342:        /*
                   1343:         * Assumption: if it is not part of our managed memory
                   1344:         * then it must be device memory which may be volitile.
                   1345:         */
                   1346:        else if (pmap_initialized) {
                   1347:                checkpv = cacheable = FALSE;
                   1348:        }
                   1349:
                   1350:        /*
                   1351:         * Increment counters
                   1352:         */
                   1353:        pmap->pm_stats.resident_count++;
                   1354:        if (wired)
                   1355:                pmap->pm_stats.wired_count++;
                   1356:
                   1357: validate:
                   1358: #ifdef M68K_MMU_HP
                   1359:        /*
                   1360:         * Purge kernel side of VAC to ensure we get correct state
                   1361:         * of HW bits so we don't clobber them.
                   1362:         */
                   1363:        if (pmap_aliasmask)
                   1364:                DCIS();
                   1365: #endif
                   1366:
                   1367:        /*
                   1368:         * Build the new PTE.
                   1369:         */
                   1370:
                   1371:        npte = pa | pte_prot(pmap, prot) | (*pte & (PG_M|PG_U)) | PG_V;
                   1372:        if (wired)
                   1373:                npte |= PG_W;
                   1374:        if (!checkpv && !cacheable)
                   1375: #if defined(M68040) || defined(M68060)
                   1376: #if defined(M68020) || defined(M68030)
                   1377:                npte |= (mmutype == MMU_68040 ? PG_CIN : PG_CI);
                   1378: #else
                   1379:                npte |= PG_CIN;
                   1380: #endif
                   1381: #else
                   1382:                npte |= PG_CI;
                   1383: #endif
                   1384: #if defined(M68040) || defined(M68060)
                   1385: #if defined(M68020) || defined(M68030)
                   1386:        else if (mmutype == MMU_68040 && (npte & (PG_PROT|PG_CI)) == PG_RW)
                   1387: #else
                   1388:        else if ((npte & (PG_PROT|PG_CI)) == PG_RW)
                   1389: #endif
                   1390:                npte |= PG_CCB;
                   1391: #endif
                   1392:
                   1393:        PMAP_DPRINTF(PDB_ENTER, ("enter: new pte value %x\n", npte));
                   1394:
                   1395:        /*
                   1396:         * Remember if this was a wiring-only change.
                   1397:         * If so, we need not flush the TLB and caches.
                   1398:         */
                   1399:
                   1400:        wired = ((*pte ^ npte) == PG_W);
                   1401: #if defined(M68040) || defined(M68060)
                   1402: #if defined(M68020) || defined(M68030)
                   1403:        if (mmutype == MMU_68040 && !wired)
                   1404: #else
                   1405:        if (!wired)
                   1406: #endif
                   1407:        {
                   1408:                DCFP(pa);
                   1409:                ICPP(pa);
                   1410:        }
                   1411: #endif
                   1412:        *pte = npte;
                   1413:        if (!wired && active_pmap(pmap))
                   1414:                TBIS(va);
                   1415: #ifdef M68K_MMU_HP
                   1416:        /*
                   1417:         * The following is executed if we are entering a second
                   1418:         * (or greater) mapping for a physical page and the mappings
                   1419:         * may create an aliasing problem.  In this case we must
                   1420:         * cache inhibit the descriptors involved and flush any
                   1421:         * external VAC.
                   1422:         */
                   1423:        if (checkpv && !cacheable) {
                   1424:                pmap_changebit(pa, PG_CI, ~0);
                   1425:                DCIA();
                   1426: #ifdef DEBUG
                   1427:                if ((pmapdebug & (PDB_CACHE|PDB_PVDUMP)) ==
                   1428:                    (PDB_CACHE|PDB_PVDUMP))
                   1429:                        pmap_pvdump(pa);
                   1430: #endif
                   1431:        }
                   1432: #endif
                   1433: #ifdef DEBUG
                   1434:        if ((pmapdebug & PDB_WIRING) && pmap != pmap_kernel())
                   1435:                pmap_check_wiring("enter", trunc_page((vaddr_t)pte));
                   1436: #endif
                   1437:
                   1438:        return 0;
                   1439: }
                   1440:
                   1441: void
1.20      tsutsui  1442: pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot)
1.1       chs      1443: {
1.20      tsutsui  1444:        pmap_t pmap = pmap_kernel();
1.1       chs      1445:        pt_entry_t *pte;
                   1446:        int s, npte;
                   1447:
                   1448:        PMAP_DPRINTF(PDB_FOLLOW|PDB_ENTER,
                   1449:            ("pmap_kenter_pa(%lx, %lx, %x)\n", va, pa, prot));
                   1450:
                   1451:        /*
                   1452:         * Segment table entry not valid, we need a new PT page
                   1453:         */
                   1454:
                   1455:        if (!pmap_ste_v(pmap, va)) {
                   1456:                s = splvm();
1.21.4.1! bouyer   1457:                pmap_enter_ptpage(pmap, va, FALSE);
1.1       chs      1458:                splx(s);
                   1459:        }
                   1460:
                   1461:        pa = m68k_trunc_page(pa);
                   1462:        pte = pmap_pte(pmap, va);
                   1463:
                   1464:        PMAP_DPRINTF(PDB_ENTER, ("enter: pte %p, *pte %x\n", pte, *pte));
                   1465:        KASSERT(!pmap_pte_v(pte));
                   1466:
                   1467:        /*
                   1468:         * Increment counters
                   1469:         */
                   1470:
                   1471:        pmap->pm_stats.resident_count++;
                   1472:        pmap->pm_stats.wired_count++;
                   1473:
                   1474:        /*
                   1475:         * Build the new PTE.
                   1476:         */
                   1477:
                   1478:        npte = pa | pte_prot(pmap, prot) | PG_V | PG_W;
                   1479: #if defined(M68040) || defined(M68060)
                   1480: #if defined(M68020) || defined(M68030)
                   1481:        if (mmutype == MMU_68040 && (npte & PG_PROT) == PG_RW)
                   1482: #else
                   1483:        if ((npte & PG_PROT) == PG_RW)
                   1484: #endif
                   1485:                npte |= PG_CCB;
                   1486:
                   1487:        if (mmutype == MMU_68040) {
                   1488:                DCFP(pa);
                   1489:                ICPP(pa);
                   1490:        }
                   1491: #endif
                   1492:
                   1493:        *pte = npte;
                   1494:        TBIS(va);
                   1495: }
                   1496:
                   1497: void
1.20      tsutsui  1498: pmap_kremove(vaddr_t va, vsize_t size)
1.1       chs      1499: {
1.20      tsutsui  1500:        pmap_t pmap = pmap_kernel();
1.1       chs      1501:        pt_entry_t *pte;
                   1502:        vaddr_t nssva;
                   1503:        vaddr_t eva = va + size;
                   1504: #ifdef M68K_MMU_HP
                   1505:        boolean_t firstpage, needcflush;
                   1506: #endif
                   1507:
                   1508:        PMAP_DPRINTF(PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT,
                   1509:            ("pmap_kremove(%lx, %lx)\n", va, size));
                   1510:
                   1511: #ifdef M68K_MMU_HP
                   1512:        firstpage = TRUE;
                   1513:        needcflush = FALSE;
                   1514: #endif
                   1515:        while (va < eva) {
                   1516:                nssva = m68k_trunc_seg(va) + NBSEG;
                   1517:                if (nssva == 0 || nssva > eva)
                   1518:                        nssva = eva;
                   1519:
                   1520:                /*
                   1521:                 * If VA belongs to an unallocated segment,
                   1522:                 * skip to the next segment boundary.
                   1523:                 */
                   1524:
                   1525:                if (!pmap_ste_v(pmap, va)) {
                   1526:                        va = nssva;
                   1527:                        continue;
                   1528:                }
                   1529:
                   1530:                /*
                   1531:                 * Invalidate every valid mapping within this segment.
                   1532:                 */
                   1533:
                   1534:                pte = pmap_pte(pmap, va);
                   1535:                while (va < nssva) {
                   1536:                        if (!pmap_pte_v(pte)) {
                   1537:                                pte++;
1.3       thorpej  1538:                                va += PAGE_SIZE;
1.1       chs      1539:                                continue;
                   1540:                        }
                   1541: #ifdef M68K_MMU_HP
                   1542:                        if (pmap_aliasmask) {
                   1543:
                   1544:                                /*
                   1545:                                 * Purge kernel side of VAC to ensure
                   1546:                                 * we get the correct state of any
                   1547:                                 * hardware maintained bits.
                   1548:                                 */
                   1549:
                   1550:                                if (firstpage) {
                   1551:                                        DCIS();
                   1552:                                        firstpage = FALSE;
                   1553:                                }
                   1554:
                   1555:                                /*
                   1556:                                 * Remember if we may need to
                   1557:                                 * flush the VAC.
                   1558:                                 */
                   1559:
                   1560:                                needcflush = TRUE;
                   1561:                        }
                   1562: #endif
                   1563:                        pmap->pm_stats.wired_count--;
                   1564:                        pmap->pm_stats.resident_count--;
                   1565:                        *pte = PG_NV;
                   1566:                        TBIS(va);
                   1567:                        pte++;
1.3       thorpej  1568:                        va += PAGE_SIZE;
1.1       chs      1569:                }
                   1570:        }
                   1571:
                   1572: #ifdef M68K_MMU_HP
                   1573:
                   1574:        /*
                   1575:         * In a couple of cases, we don't need to worry about flushing
                   1576:         * the VAC:
                   1577:         *      1. if this is a kernel mapping,
                   1578:         *         we have already done it
                   1579:         *      2. if it is a user mapping not for the current process,
                   1580:         *         it won't be there
                   1581:         */
                   1582:
                   1583:        if (pmap_aliasmask && !active_user_pmap(pmap))
                   1584:                needcflush = FALSE;
                   1585:        if (needcflush) {
                   1586:                if (pmap == pmap_kernel()) {
                   1587:                        DCIS();
                   1588:                } else {
                   1589:                        DCIU();
                   1590:                }
                   1591:        }
                   1592: #endif
                   1593: }
                   1594:
                   1595: /*
                   1596:  * pmap_unwire:                        [ INTERFACE ]
                   1597:  *
                   1598:  *     Clear the wired attribute for a map/virtual-address pair.
                   1599:  *
                   1600:  *     The mapping must already exist in the pmap.
                   1601:  */
                   1602: void
1.20      tsutsui  1603: pmap_unwire(pmap_t pmap, vaddr_t va)
1.1       chs      1604: {
                   1605:        pt_entry_t *pte;
                   1606:
                   1607:        PMAP_DPRINTF(PDB_FOLLOW,
                   1608:            ("pmap_unwire(%p, %lx)\n", pmap, va));
                   1609:
                   1610:        pte = pmap_pte(pmap, va);
                   1611:
                   1612:        /*
                   1613:         * If wiring actually changed (always?) clear the wire bit and
                   1614:         * update the wire count.  Note that wiring is not a hardware
                   1615:         * characteristic so there is no need to invalidate the TLB.
                   1616:         */
                   1617:
                   1618:        if (pmap_pte_w_chg(pte, 0)) {
                   1619:                pmap_pte_set_w(pte, FALSE);
                   1620:                pmap->pm_stats.wired_count--;
                   1621:        }
                   1622: }
                   1623:
                   1624: /*
                   1625:  * pmap_extract:               [ INTERFACE ]
                   1626:  *
                   1627:  *     Extract the physical address associated with the given
                   1628:  *     pmap/virtual address pair.
                   1629:  */
                   1630: boolean_t
1.20      tsutsui  1631: pmap_extract(pmap_t pmap, vaddr_t va, paddr_t *pap)
1.1       chs      1632: {
                   1633:        paddr_t pa;
                   1634:        u_int pte;
1.8       cl       1635:
1.1       chs      1636:        PMAP_DPRINTF(PDB_FOLLOW,
                   1637:            ("pmap_extract(%p, %lx) -> ", pmap, va));
                   1638:
                   1639:        if (pmap_ste_v(pmap, va)) {
                   1640:                pte = *(u_int *)pmap_pte(pmap, va);
                   1641:                if (pte) {
                   1642:                        pa = (pte & PG_FRAME) | (va & ~PG_FRAME);
                   1643:                        if (pap != NULL)
                   1644:                                *pap = pa;
1.9       mycroft  1645: #ifdef DEBUG
                   1646:                        if (pmapdebug & PDB_FOLLOW)
                   1647:                                printf("%lx\n", pa);
                   1648: #endif
1.20      tsutsui  1649:                        return TRUE;
1.1       chs      1650:                }
                   1651:        }
                   1652: #ifdef DEBUG
1.9       mycroft  1653:        if (pmapdebug & PDB_FOLLOW)
                   1654:                printf("failed\n");
1.1       chs      1655: #endif
1.20      tsutsui  1656:        return FALSE;
1.1       chs      1657: }
                   1658:
                   1659: /*
                   1660:  * pmap_copy:          [ INTERFACE ]
                   1661:  *
                   1662:  *     Copy the mapping range specified by src_addr/len
                   1663:  *     from the source map to the range dst_addr/len
                   1664:  *     in the destination map.
                   1665:  *
                   1666:  *     This routine is only advisory and need not do anything.
                   1667:  */
                   1668: void
1.20      tsutsui  1669: pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vaddr_t dst_addr, vsize_t len,
                   1670:     vaddr_t src_addr)
1.1       chs      1671: {
                   1672:
                   1673:        PMAP_DPRINTF(PDB_FOLLOW,
                   1674:            ("pmap_copy(%p, %p, %lx, %lx, %lx)\n",
                   1675:            dst_pmap, src_pmap, dst_addr, len, src_addr));
                   1676: }
                   1677:
                   1678: /*
                   1679:  * pmap_collect:               [ INTERFACE ]
                   1680:  *
                   1681:  *     Garbage collects the physical map system for pages which are no
                   1682:  *     longer used.  Success need not be guaranteed -- that is, there
                   1683:  *     may well be pages which are not referenced, but others may be
                   1684:  *     collected.
                   1685:  *
                   1686:  *     Called by the pageout daemon when pages are scarce.
                   1687:  */
                   1688: void
1.20      tsutsui  1689: pmap_collect(pmap_t pmap)
1.1       chs      1690: {
                   1691:
                   1692:        PMAP_DPRINTF(PDB_FOLLOW, ("pmap_collect(%p)\n", pmap));
                   1693:
                   1694:        if (pmap == pmap_kernel()) {
                   1695:                int bank, s;
                   1696:
                   1697:                /*
                   1698:                 * XXX This is very bogus.  We should handle kernel PT
                   1699:                 * XXX pages much differently.
                   1700:                 */
                   1701:
                   1702:                s = splvm();
                   1703:                for (bank = 0; bank < vm_nphysseg; bank++)
                   1704:                        pmap_collect1(pmap, ptoa(vm_physmem[bank].start),
                   1705:                            ptoa(vm_physmem[bank].end));
                   1706:                splx(s);
                   1707:        } else {
                   1708:                /*
                   1709:                 * This process is about to be swapped out; free all of
                   1710:                 * the PT pages by removing the physical mappings for its
                   1711:                 * entire address space.  Note: pmap_remove() performs
                   1712:                 * all necessary locking.
                   1713:                 */
                   1714:                pmap_do_remove(pmap, VM_MIN_ADDRESS, VM_MAX_ADDRESS, 0);
                   1715:                pmap_update(pmap);
                   1716:        }
                   1717:
                   1718: #ifdef notyet
                   1719:        /* Go compact and garbage-collect the pv_table. */
                   1720:        pmap_collect_pv();
                   1721: #endif
                   1722: }
                   1723:
                   1724: /*
                   1725:  * pmap_collect1():
                   1726:  *
                   1727:  *     Garbage-collect KPT pages.  Helper for the above (bogus)
                   1728:  *     pmap_collect().
                   1729:  *
                   1730:  *     Note: THIS SHOULD GO AWAY, AND BE REPLACED WITH A BETTER
                   1731:  *     WAY OF HANDLING PT PAGES!
                   1732:  */
                   1733: void
1.20      tsutsui  1734: pmap_collect1(pmap_t pmap, paddr_t startpa, paddr_t endpa)
1.1       chs      1735: {
                   1736:        paddr_t pa;
                   1737:        struct pv_entry *pv;
                   1738:        pt_entry_t *pte;
                   1739:        paddr_t kpa;
                   1740: #ifdef DEBUG
                   1741:        st_entry_t *ste;
                   1742:        int opmapdebug = 0;
                   1743: #endif
                   1744:
1.3       thorpej  1745:        for (pa = startpa; pa < endpa; pa += PAGE_SIZE) {
1.1       chs      1746:                struct kpt_page *kpt, **pkpt;
                   1747:
                   1748:                /*
                   1749:                 * Locate physical pages which are being used as kernel
                   1750:                 * page table pages.
                   1751:                 */
                   1752:
                   1753:                pv = pa_to_pvh(pa);
                   1754:                if (pv->pv_pmap != pmap_kernel() || !(pv->pv_flags & PV_PTPAGE))
                   1755:                        continue;
                   1756:                do {
                   1757:                        if (pv->pv_ptste && pv->pv_ptpmap == pmap_kernel())
                   1758:                                break;
                   1759:                } while ((pv = pv->pv_next));
                   1760:                if (pv == NULL)
                   1761:                        continue;
                   1762: #ifdef DEBUG
                   1763:                if (pv->pv_va < (vaddr_t)Sysmap ||
                   1764:                    pv->pv_va >= (vaddr_t)Sysmap + M68K_MAX_PTSIZE) {
                   1765:                        printf("collect: kernel PT VA out of range\n");
                   1766:                        pmap_pvdump(pa);
                   1767:                        continue;
                   1768:                }
                   1769: #endif
1.3       thorpej  1770:                pte = (pt_entry_t *)(pv->pv_va + PAGE_SIZE);
1.1       chs      1771:                while (--pte >= (pt_entry_t *)pv->pv_va && *pte == PG_NV)
                   1772:                        ;
                   1773:                if (pte >= (pt_entry_t *)pv->pv_va)
                   1774:                        continue;
                   1775:
                   1776: #ifdef DEBUG
                   1777:                if (pmapdebug & (PDB_PTPAGE|PDB_COLLECT)) {
                   1778:                        printf("collect: freeing KPT page at %lx (ste %x@%p)\n",
1.20      tsutsui  1779:                            pv->pv_va, *pv->pv_ptste, pv->pv_ptste);
1.1       chs      1780:                        opmapdebug = pmapdebug;
                   1781:                        pmapdebug |= PDB_PTPAGE;
                   1782:                }
                   1783:
                   1784:                ste = pv->pv_ptste;
                   1785: #endif
                   1786:                /*
                   1787:                 * If all entries were invalid we can remove the page.
                   1788:                 * We call pmap_remove_entry to take care of invalidating
                   1789:                 * ST and Sysptmap entries.
                   1790:                 */
                   1791:
                   1792:                (void) pmap_extract(pmap, pv->pv_va, &kpa);
                   1793:                pmap_remove_mapping(pmap, pv->pv_va, NULL,
                   1794:                    PRM_TFLUSH|PRM_CFLUSH);
                   1795:
                   1796:                /*
                   1797:                 * Use the physical address to locate the original
                   1798:                 * (kmem_alloc assigned) address for the page and put
                   1799:                 * that page back on the free list.
                   1800:                 */
                   1801:
                   1802:                for (pkpt = &kpt_used_list, kpt = *pkpt;
                   1803:                     kpt != NULL;
                   1804:                     pkpt = &kpt->kpt_next, kpt = *pkpt)
                   1805:                        if (kpt->kpt_pa == kpa)
                   1806:                                break;
                   1807: #ifdef DEBUG
                   1808:                if (kpt == NULL)
                   1809:                        panic("pmap_collect: lost a KPT page");
                   1810:                if (pmapdebug & (PDB_PTPAGE|PDB_COLLECT))
                   1811:                        printf("collect: %lx (%lx) to free list\n",
1.20      tsutsui  1812:                            kpt->kpt_va, kpa);
1.1       chs      1813: #endif
                   1814:                *pkpt = kpt->kpt_next;
                   1815:                kpt->kpt_next = kpt_free_list;
                   1816:                kpt_free_list = kpt;
                   1817: #ifdef DEBUG
                   1818:                if (pmapdebug & (PDB_PTPAGE|PDB_COLLECT))
                   1819:                        pmapdebug = opmapdebug;
                   1820:
                   1821:                if (*ste != SG_NV)
                   1822:                        printf("collect: kernel STE at %p still valid (%x)\n",
1.20      tsutsui  1823:                            ste, *ste);
1.1       chs      1824:                ste = &Sysptmap[ste - pmap_ste(pmap_kernel(), 0)];
                   1825:                if (*ste != SG_NV)
                   1826:                        printf("collect: kernel PTmap at %p still valid (%x)\n",
1.20      tsutsui  1827:                            ste, *ste);
1.1       chs      1828: #endif
                   1829:        }
                   1830: }
                   1831:
                   1832: /*
                   1833:  * pmap_zero_page:             [ INTERFACE ]
                   1834:  *
                   1835:  *     Zero the specified (machine independent) page by mapping the page
                   1836:  *     into virtual memory and using memset to clear its contents, one
                   1837:  *     machine dependent page at a time.
                   1838:  *
                   1839:  *     Note: WE DO NOT CURRENTLY LOCK THE TEMPORARY ADDRESSES!
                   1840:  *           (Actually, we go to splvm(), and since we don't
                   1841:  *           support multiple processors, this is sufficient.)
                   1842:  */
                   1843: void
1.20      tsutsui  1844: pmap_zero_page(paddr_t phys)
1.1       chs      1845: {
                   1846:        int npte;
                   1847:
                   1848:        PMAP_DPRINTF(PDB_FOLLOW, ("pmap_zero_page(%lx)\n", phys));
                   1849:
                   1850:        npte = phys | PG_V;
                   1851: #ifdef M68K_MMU_HP
                   1852:        if (pmap_aliasmask) {
                   1853:
                   1854:                /*
                   1855:                 * Cache-inhibit the mapping on VAC machines, as we would
                   1856:                 * be wasting the cache load.
                   1857:                 */
                   1858:
                   1859:                npte |= PG_CI;
                   1860:        }
                   1861: #endif
                   1862:
                   1863: #if defined(M68040) || defined(M68060)
                   1864: #if defined(M68020) || defined(M68030)
                   1865:        if (mmutype == MMU_68040)
                   1866: #endif
                   1867:        {
                   1868:                /*
                   1869:                 * Set copyback caching on the page; this is required
                   1870:                 * for cache consistency (since regular mappings are
                   1871:                 * copyback as well).
                   1872:                 */
                   1873:
                   1874:                npte |= PG_CCB;
                   1875:        }
                   1876: #endif
                   1877:
                   1878:        *caddr1_pte = npte;
                   1879:        TBIS((vaddr_t)CADDR1);
                   1880:
                   1881:        zeropage(CADDR1);
                   1882:
                   1883: #ifdef DEBUG
                   1884:        *caddr1_pte = PG_NV;
                   1885:        TBIS((vaddr_t)CADDR1);
                   1886: #endif
                   1887: }
                   1888:
                   1889: /*
                   1890:  * pmap_copy_page:             [ INTERFACE ]
                   1891:  *
                   1892:  *     Copy the specified (machine independent) page by mapping the page
                   1893:  *     into virtual memory and using memcpy to copy the page, one machine
                   1894:  *     dependent page at a time.
                   1895:  *
                   1896:  *     Note: WE DO NOT CURRENTLY LOCK THE TEMPORARY ADDRESSES!
                   1897:  *           (Actually, we go to splvm(), and since we don't
                   1898:  *           support multiple processors, this is sufficient.)
                   1899:  */
                   1900: void
1.20      tsutsui  1901: pmap_copy_page(paddr_t src, paddr_t dst)
1.1       chs      1902: {
                   1903:        int npte1, npte2;
                   1904:
                   1905:        PMAP_DPRINTF(PDB_FOLLOW, ("pmap_copy_page(%lx, %lx)\n", src, dst));
                   1906:
                   1907:        npte1 = src | PG_RO | PG_V;
                   1908:        npte2 = dst | PG_V;
                   1909: #ifdef M68K_MMU_HP
                   1910:        if (pmap_aliasmask) {
                   1911:
                   1912:                /*
                   1913:                 * Cache-inhibit the mapping on VAC machines, as we would
                   1914:                 * be wasting the cache load.
                   1915:                 */
                   1916:
                   1917:                npte1 |= PG_CI;
                   1918:                npte2 |= PG_CI;
                   1919:        }
                   1920: #endif
                   1921:
                   1922: #if defined(M68040) || defined(M68060)
                   1923: #if defined(M68020) || defined(M68030)
                   1924:        if (mmutype == MMU_68040)
                   1925: #endif
                   1926:        {
                   1927:                /*
                   1928:                 * Set copyback caching on the pages; this is required
                   1929:                 * for cache consistency (since regular mappings are
                   1930:                 * copyback as well).
                   1931:                 */
                   1932:
                   1933:                npte1 |= PG_CCB;
                   1934:                npte2 |= PG_CCB;
                   1935:        }
                   1936: #endif
                   1937:
                   1938:        *caddr1_pte = npte1;
                   1939:        TBIS((vaddr_t)CADDR1);
                   1940:
                   1941:        *caddr2_pte = npte2;
                   1942:        TBIS((vaddr_t)CADDR2);
                   1943:
                   1944:        copypage(CADDR1, CADDR2);
                   1945:
                   1946: #ifdef DEBUG
                   1947:        *caddr1_pte = PG_NV;
                   1948:        TBIS((vaddr_t)CADDR1);
                   1949:
                   1950:        *caddr2_pte = PG_NV;
                   1951:        TBIS((vaddr_t)CADDR2);
                   1952: #endif
                   1953: }
                   1954:
                   1955: /*
                   1956:  * pmap_clear_modify:          [ INTERFACE ]
                   1957:  *
                   1958:  *     Clear the modify bits on the specified physical page.
                   1959:  */
                   1960: boolean_t
1.20      tsutsui  1961: pmap_clear_modify(struct vm_page *pg)
1.1       chs      1962: {
                   1963:        paddr_t pa = VM_PAGE_TO_PHYS(pg);
                   1964:
                   1965:        PMAP_DPRINTF(PDB_FOLLOW, ("pmap_clear_modify(%p)\n", pg));
                   1966:
                   1967:        return pmap_changebit(pa, 0, ~PG_M);
                   1968: }
                   1969:
                   1970: /*
                   1971:  * pmap_clear_reference:       [ INTERFACE ]
                   1972:  *
                   1973:  *     Clear the reference bit on the specified physical page.
                   1974:  */
                   1975: boolean_t
1.20      tsutsui  1976: pmap_clear_reference(struct vm_page *pg)
1.1       chs      1977: {
                   1978:        paddr_t pa = VM_PAGE_TO_PHYS(pg);
                   1979:
                   1980:        PMAP_DPRINTF(PDB_FOLLOW, ("pmap_clear_reference(%p)\n", pg));
                   1981:
                   1982:        return pmap_changebit(pa, 0, ~PG_U);
                   1983: }
                   1984:
                   1985: /*
                   1986:  * pmap_is_referenced:         [ INTERFACE ]
                   1987:  *
                   1988:  *     Return whether or not the specified physical page is referenced
                   1989:  *     by any physical maps.
                   1990:  */
                   1991: boolean_t
1.20      tsutsui  1992: pmap_is_referenced(struct vm_page *pg)
1.1       chs      1993: {
                   1994:        paddr_t pa = VM_PAGE_TO_PHYS(pg);
                   1995:
1.20      tsutsui  1996:        return pmap_testbit(pa, PG_U);
1.1       chs      1997: }
                   1998:
                   1999: /*
                   2000:  * pmap_is_modified:           [ INTERFACE ]
                   2001:  *
                   2002:  *     Return whether or not the specified physical page is modified
                   2003:  *     by any physical maps.
                   2004:  */
                   2005: boolean_t
1.20      tsutsui  2006: pmap_is_modified(struct vm_page *pg)
1.1       chs      2007: {
                   2008:        paddr_t pa = VM_PAGE_TO_PHYS(pg);
                   2009:
1.20      tsutsui  2010:        return pmap_testbit(pa, PG_M);
1.1       chs      2011: }
                   2012:
                   2013: /*
                   2014:  * pmap_phys_address:          [ INTERFACE ]
                   2015:  *
                   2016:  *     Return the physical address corresponding to the specified
                   2017:  *     cookie.  Used by the device pager to decode a device driver's
                   2018:  *     mmap entry point return value.
                   2019:  *
                   2020:  *     Note: no locking is necessary in this function.
                   2021:  */
                   2022: paddr_t
1.20      tsutsui  2023: pmap_phys_address(int ppn)
1.1       chs      2024: {
1.20      tsutsui  2025:        return m68k_ptob(ppn);
1.1       chs      2026: }
                   2027:
                   2028: #ifdef M68K_MMU_HP
                   2029: /*
                   2030:  * pmap_prefer:                        [ INTERFACE ]
                   2031:  *
                   2032:  *     Find the first virtual address >= *vap that does not
                   2033:  *     cause a virtually-addressed cache alias problem.
                   2034:  */
                   2035: void
1.20      tsutsui  2036: pmap_prefer(vaddr_t foff, vaddr_t *vap)
1.1       chs      2037: {
                   2038:        vaddr_t va;
                   2039:        vsize_t d;
                   2040:
                   2041: #ifdef M68K_MMU_MOTOROLA
                   2042:        if (pmap_aliasmask)
                   2043: #endif
                   2044:        {
                   2045:                va = *vap;
                   2046:                d = foff - va;
                   2047:                d &= pmap_aliasmask;
                   2048:                *vap = va + d;
                   2049:        }
                   2050: }
                   2051: #endif /* M68K_MMU_HP */
                   2052:
                   2053: #ifdef COMPAT_HPUX
                   2054: /*
                   2055:  * pmap_mapmulti:
                   2056:  *
                   2057:  *     'PUX hack for dealing with the so called multi-mapped address space.
                   2058:  *     The first 256mb is mapped in at every 256mb region from 0x10000000
                   2059:  *     up to 0xF0000000.  This allows for 15 bits of tag information.
                   2060:  *
                   2061:  *     We implement this at the segment table level, the machine independent
                   2062:  *     VM knows nothing about it.
                   2063:  */
                   2064: int
1.20      tsutsui  2065: pmap_mapmulti(pmap_t pmap, vaddr_t va)
1.1       chs      2066: {
                   2067:        st_entry_t *ste, *bste;
                   2068:
                   2069: #ifdef DEBUG
                   2070:        if (pmapdebug & PDB_MULTIMAP) {
                   2071:                ste = pmap_ste(pmap, HPMMBASEADDR(va));
                   2072:                printf("pmap_mapmulti(%p, %lx): bste %p(%x)",
1.20      tsutsui  2073:                    pmap, va, ste, *ste);
1.1       chs      2074:                ste = pmap_ste(pmap, va);
                   2075:                printf(" ste %p(%x)\n", ste, *ste);
                   2076:        }
                   2077: #endif
                   2078:        bste = pmap_ste(pmap, HPMMBASEADDR(va));
                   2079:        ste = pmap_ste(pmap, va);
                   2080:        if (*ste == SG_NV && (*bste & SG_V)) {
                   2081:                *ste = *bste;
                   2082:                TBIAU();
                   2083:                return 0;
                   2084:        }
                   2085:        return EFAULT;
                   2086: }
                   2087: #endif /* COMPAT_HPUX */
                   2088:
                   2089: /*
                   2090:  * Miscellaneous support routines follow
                   2091:  */
                   2092:
                   2093: /*
                   2094:  * pmap_remove_mapping:
                   2095:  *
                   2096:  *     Invalidate a single page denoted by pmap/va.
                   2097:  *
                   2098:  *     If (pte != NULL), it is the already computed PTE for the page.
                   2099:  *
                   2100:  *     If (flags & PRM_TFLUSH), we must invalidate any TLB information.
                   2101:  *
                   2102:  *     If (flags & PRM_CFLUSH), we must flush/invalidate any cache
                   2103:  *     information.
                   2104:  *
                   2105:  *     If (flags & PRM_KEEPPTPAGE), we don't free the page table page
                   2106:  *     if the reference drops to zero.
                   2107:  */
                   2108: /* static */
                   2109: void
1.20      tsutsui  2110: pmap_remove_mapping(pmap_t pmap, vaddr_t va, pt_entry_t *pte, int flags)
1.1       chs      2111: {
                   2112:        paddr_t pa;
                   2113:        struct pv_entry *pv, *npv;
                   2114:        struct pmap *ptpmap;
                   2115:        st_entry_t *ste;
                   2116:        int s, bits;
                   2117: #ifdef DEBUG
                   2118:        pt_entry_t opte;
                   2119: #endif
                   2120:
                   2121:        PMAP_DPRINTF(PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT,
                   2122:            ("pmap_remove_mapping(%p, %lx, %p, %x)\n",
                   2123:            pmap, va, pte, flags));
                   2124:
                   2125:        /*
                   2126:         * PTE not provided, compute it from pmap and va.
                   2127:         */
                   2128:
                   2129:        if (pte == NULL) {
                   2130:                pte = pmap_pte(pmap, va);
                   2131:                if (*pte == PG_NV)
                   2132:                        return;
                   2133:        }
                   2134:
                   2135: #ifdef M68K_MMU_HP
                   2136:        if (pmap_aliasmask && (flags & PRM_CFLUSH)) {
                   2137:
                   2138:                /*
                   2139:                 * Purge kernel side of VAC to ensure we get the correct
                   2140:                 * state of any hardware maintained bits.
                   2141:                 */
                   2142:
                   2143:                DCIS();
                   2144:
                   2145:                /*
                   2146:                 * If this is a non-CI user mapping for the current process,
                   2147:                 * flush the VAC.  Note that the kernel side was flushed
                   2148:                 * above so we don't worry about non-CI kernel mappings.
                   2149:                 */
                   2150:
                   2151:                if (active_user_pmap(pmap) && !pmap_pte_ci(pte)) {
                   2152:                        DCIU();
                   2153:                }
                   2154:        }
                   2155: #endif
                   2156:
                   2157:        pa = pmap_pte_pa(pte);
                   2158: #ifdef DEBUG
                   2159:        opte = *pte;
                   2160: #endif
                   2161:
                   2162:        /*
                   2163:         * Update statistics
                   2164:         */
                   2165:
                   2166:        if (pmap_pte_w(pte))
                   2167:                pmap->pm_stats.wired_count--;
                   2168:        pmap->pm_stats.resident_count--;
                   2169:
                   2170: #if defined(M68040) || defined(M68060)
                   2171: #if defined(M68020) || defined(M68030)
                   2172:        if (mmutype == MMU_68040)
                   2173: #endif
                   2174:        if ((flags & PRM_CFLUSH)) {
                   2175:                DCFP(pa);
                   2176:                ICPP(pa);
                   2177:        }
                   2178: #endif
                   2179:
                   2180:        /*
                   2181:         * Invalidate the PTE after saving the reference modify info.
                   2182:         */
                   2183:
                   2184:        PMAP_DPRINTF(PDB_REMOVE, ("remove: invalidating pte at %p\n", pte));
                   2185:        bits = *pte & (PG_U|PG_M);
                   2186:        *pte = PG_NV;
                   2187:        if ((flags & PRM_TFLUSH) && active_pmap(pmap))
                   2188:                TBIS(va);
                   2189:
                   2190:        /*
                   2191:         * For user mappings decrement the wiring count on
                   2192:         * the PT page.
                   2193:         */
                   2194:
                   2195:        if (pmap != pmap_kernel()) {
                   2196:                vaddr_t ptpva = trunc_page((vaddr_t)pte);
                   2197:                int refs = pmap_ptpage_delref(ptpva);
                   2198: #ifdef DEBUG
                   2199:                if (pmapdebug & PDB_WIRING)
                   2200:                        pmap_check_wiring("remove", ptpva);
                   2201: #endif
                   2202:
                   2203:                /*
                   2204:                 * If reference count drops to 0, and we're not instructed
                   2205:                 * to keep it around, free the PT page.
                   2206:                 */
                   2207:
                   2208:                if (refs == 0 && (flags & PRM_KEEPPTPAGE) == 0) {
                   2209: #ifdef DIAGNOSTIC
1.16      tsutsui  2210:                        struct pv_entry *ptppv;
1.1       chs      2211: #endif
1.15      tsutsui  2212:                        paddr_t ptppa;
1.1       chs      2213:
1.15      tsutsui  2214:                        ptppa = pmap_pte_pa(pmap_pte(pmap_kernel(), ptpva));
1.1       chs      2215: #ifdef DIAGNOSTIC
1.15      tsutsui  2216:                        if (PAGE_IS_MANAGED(ptppa) == 0)
1.1       chs      2217:                                panic("pmap_remove_mapping: unmanaged PT page");
1.16      tsutsui  2218:                        ptppv = pa_to_pvh(ptppa);
                   2219:                        if (ptppv->pv_ptste == NULL)
1.1       chs      2220:                                panic("pmap_remove_mapping: ptste == NULL");
1.16      tsutsui  2221:                        if (ptppv->pv_pmap != pmap_kernel() ||
                   2222:                            ptppv->pv_va != ptpva ||
                   2223:                            ptppv->pv_next != NULL)
1.1       chs      2224:                                panic("pmap_remove_mapping: "
                   2225:                                    "bad PT page pmap %p, va 0x%lx, next %p",
1.16      tsutsui  2226:                                    ptppv->pv_pmap, ptppv->pv_va,
                   2227:                                    ptppv->pv_next);
1.1       chs      2228: #endif
                   2229:                        pmap_remove_mapping(pmap_kernel(), ptpva,
                   2230:                            NULL, PRM_TFLUSH|PRM_CFLUSH);
1.21.4.1! bouyer   2231:                        simple_lock(&uvm.kernel_object->vmobjlock);
1.15      tsutsui  2232:                        uvm_pagefree(PHYS_TO_VM_PAGE(ptppa));
1.21.4.1! bouyer   2233:                        simple_unlock(&uvm.kernel_object->vmobjlock);
1.1       chs      2234:                        PMAP_DPRINTF(PDB_REMOVE|PDB_PTPAGE,
                   2235:                            ("remove: PT page 0x%lx (0x%lx) freed\n",
1.15      tsutsui  2236:                            ptpva, ptppa));
1.1       chs      2237:                }
                   2238:        }
                   2239:
                   2240:        /*
                   2241:         * If this isn't a managed page, we are all done.
                   2242:         */
                   2243:
                   2244:        if (PAGE_IS_MANAGED(pa) == 0)
                   2245:                return;
                   2246:
                   2247:        /*
                   2248:         * Otherwise remove it from the PV table
                   2249:         * (raise IPL since we may be called at interrupt time).
                   2250:         */
                   2251:
                   2252:        pv = pa_to_pvh(pa);
                   2253:        ste = NULL;
                   2254:        s = splvm();
                   2255:
                   2256:        /*
                   2257:         * If it is the first entry on the list, it is actually
                   2258:         * in the header and we must copy the following entry up
                   2259:         * to the header.  Otherwise we must search the list for
                   2260:         * the entry.  In either case we free the now unused entry.
                   2261:         */
                   2262:
                   2263:        if (pmap == pv->pv_pmap && va == pv->pv_va) {
                   2264:                ste = pv->pv_ptste;
                   2265:                ptpmap = pv->pv_ptpmap;
                   2266:                npv = pv->pv_next;
                   2267:                if (npv) {
                   2268:                        npv->pv_flags = pv->pv_flags;
                   2269:                        *pv = *npv;
                   2270:                        pmap_free_pv(npv);
                   2271:                } else
                   2272:                        pv->pv_pmap = NULL;
                   2273:        } else {
                   2274:                for (npv = pv->pv_next; npv; npv = npv->pv_next) {
                   2275:                        if (pmap == npv->pv_pmap && va == npv->pv_va)
                   2276:                                break;
                   2277:                        pv = npv;
                   2278:                }
                   2279: #ifdef DEBUG
                   2280:                if (npv == NULL)
                   2281:                        panic("pmap_remove: PA not in pv_tab");
                   2282: #endif
                   2283:                ste = npv->pv_ptste;
                   2284:                ptpmap = npv->pv_ptpmap;
                   2285:                pv->pv_next = npv->pv_next;
                   2286:                pmap_free_pv(npv);
                   2287:                pv = pa_to_pvh(pa);
                   2288:        }
                   2289:
                   2290: #ifdef M68K_MMU_HP
                   2291:
                   2292:        /*
                   2293:         * If only one mapping left we no longer need to cache inhibit
                   2294:         */
                   2295:
                   2296:        if (pmap_aliasmask &&
                   2297:            pv->pv_pmap && pv->pv_next == NULL && (pv->pv_flags & PV_CI)) {
                   2298:                PMAP_DPRINTF(PDB_CACHE,
                   2299:                    ("remove: clearing CI for pa %lx\n", pa));
                   2300:                pv->pv_flags &= ~PV_CI;
                   2301:                pmap_changebit(pa, 0, ~PG_CI);
                   2302: #ifdef DEBUG
                   2303:                if ((pmapdebug & (PDB_CACHE|PDB_PVDUMP)) ==
                   2304:                    (PDB_CACHE|PDB_PVDUMP))
                   2305:                        pmap_pvdump(pa);
                   2306: #endif
                   2307:        }
                   2308: #endif
                   2309:
                   2310:        /*
                   2311:         * If this was a PT page we must also remove the
                   2312:         * mapping from the associated segment table.
                   2313:         */
                   2314:
                   2315:        if (ste) {
                   2316:                PMAP_DPRINTF(PDB_REMOVE|PDB_PTPAGE,
                   2317:                    ("remove: ste was %x@%p pte was %x@%p\n",
                   2318:                    *ste, ste, opte, pmap_pte(pmap, va)));
                   2319: #if defined(M68040) || defined(M68060)
                   2320: #if defined(M68020) || defined(M68030)
                   2321:                if (mmutype == MMU_68040)
                   2322: #endif
                   2323:                {
                   2324:                        st_entry_t *este = &ste[NPTEPG/SG4_LEV3SIZE];
                   2325:
                   2326:                        while (ste < este)
                   2327:                                *ste++ = SG_NV;
                   2328: #ifdef DEBUG
                   2329:                        ste -= NPTEPG/SG4_LEV3SIZE;
                   2330: #endif
                   2331:                }
                   2332: #if defined(M68020) || defined(M68030)
                   2333:                else
                   2334: #endif
                   2335: #endif
                   2336: #if defined(M68020) || defined(M68030)
                   2337:                *ste = SG_NV;
                   2338: #endif
                   2339:
                   2340:                /*
                   2341:                 * If it was a user PT page, we decrement the
                   2342:                 * reference count on the segment table as well,
                   2343:                 * freeing it if it is now empty.
                   2344:                 */
                   2345:
                   2346:                if (ptpmap != pmap_kernel()) {
                   2347:                        PMAP_DPRINTF(PDB_REMOVE|PDB_SEGTAB,
                   2348:                            ("remove: stab %p, refcnt %d\n",
                   2349:                            ptpmap->pm_stab, ptpmap->pm_sref - 1));
                   2350: #ifdef DEBUG
                   2351:                        if ((pmapdebug & PDB_PARANOIA) &&
                   2352:                            ptpmap->pm_stab !=
                   2353:                             (st_entry_t *)trunc_page((vaddr_t)ste))
                   2354:                                panic("remove: bogus ste");
                   2355: #endif
                   2356:                        if (--(ptpmap->pm_sref) == 0) {
                   2357:                                PMAP_DPRINTF(PDB_REMOVE|PDB_SEGTAB,
                   2358:                                    ("remove: free stab %p\n",
                   2359:                                    ptpmap->pm_stab));
1.14      yamt     2360:                                uvm_km_free(st_map, (vaddr_t)ptpmap->pm_stab,
                   2361:                                    M68K_STSIZE, UVM_KMF_WIRED);
1.1       chs      2362:                                ptpmap->pm_stab = Segtabzero;
                   2363:                                ptpmap->pm_stpa = Segtabzeropa;
                   2364: #if defined(M68040) || defined(M68060)
                   2365: #if defined(M68020) || defined(M68030)
                   2366:                                if (mmutype == MMU_68040)
                   2367: #endif
                   2368:                                        ptpmap->pm_stfree = protostfree;
                   2369: #endif
                   2370:
                   2371:                                /*
                   2372:                                 * XXX may have changed segment table
                   2373:                                 * pointer for current process so
                   2374:                                 * update now to reload hardware.
                   2375:                                 */
                   2376:
                   2377:                                if (active_user_pmap(ptpmap))
                   2378:                                        PMAP_ACTIVATE(ptpmap, 1);
                   2379:                        }
                   2380:                }
                   2381:                pv->pv_flags &= ~PV_PTPAGE;
                   2382:                ptpmap->pm_ptpages--;
                   2383:        }
                   2384:
                   2385:        /*
                   2386:         * Update saved attributes for managed page
                   2387:         */
                   2388:
                   2389:        *pa_to_attribute(pa) |= bits;
                   2390:        splx(s);
                   2391: }
                   2392:
                   2393: /*
                   2394:  * pmap_testbit:
                   2395:  *
                   2396:  *     Test the modified/referenced bits of a physical page.
                   2397:  */
                   2398: /* static */
                   2399: boolean_t
1.20      tsutsui  2400: pmap_testbit(paddr_t pa, int bit)
1.1       chs      2401: {
                   2402:        struct pv_entry *pv;
                   2403:        pt_entry_t *pte;
                   2404:        int s;
                   2405:
                   2406:        pv = pa_to_pvh(pa);
                   2407:        s = splvm();
                   2408:
                   2409:        /*
                   2410:         * Check saved info first
                   2411:         */
                   2412:
                   2413:        if (*pa_to_attribute(pa) & bit) {
                   2414:                splx(s);
1.20      tsutsui  2415:                return TRUE;
1.1       chs      2416:        }
                   2417:
                   2418: #ifdef M68K_MMU_HP
                   2419:
                   2420:        /*
                   2421:         * Flush VAC to get correct state of any hardware maintained bits.
                   2422:         */
                   2423:
                   2424:        if (pmap_aliasmask && (bit & (PG_U|PG_M)))
                   2425:                DCIS();
                   2426: #endif
                   2427:
                   2428:        /*
                   2429:         * Not found.  Check current mappings, returning immediately if
                   2430:         * found.  Cache a hit to speed future lookups.
                   2431:         */
                   2432:
                   2433:        if (pv->pv_pmap != NULL) {
                   2434:                for (; pv; pv = pv->pv_next) {
                   2435:                        pte = pmap_pte(pv->pv_pmap, pv->pv_va);
                   2436:                        if (*pte & bit) {
                   2437:                                *pa_to_attribute(pa) |= bit;
                   2438:                                splx(s);
1.20      tsutsui  2439:                                return TRUE;
1.1       chs      2440:                        }
                   2441:                }
                   2442:        }
                   2443:        splx(s);
1.20      tsutsui  2444:        return FALSE;
1.1       chs      2445: }
                   2446:
                   2447: /*
                   2448:  * pmap_changebit:
                   2449:  *
                   2450:  *     Change the modified/referenced bits, or other PTE bits,
                   2451:  *     for a physical page.
                   2452:  */
                   2453: /* static */
                   2454: boolean_t
1.20      tsutsui  2455: pmap_changebit(paddr_t pa, int set, int mask)
1.1       chs      2456: {
                   2457:        struct pv_entry *pv;
                   2458:        pt_entry_t *pte, npte;
                   2459:        vaddr_t va;
                   2460:        char *attrp;
                   2461:        int s;
                   2462: #if defined(M68K_MMU_HP) || defined(M68040) || defined(M68060)
                   2463:        boolean_t firstpage = TRUE;
                   2464: #endif
                   2465:        boolean_t r;
                   2466:
                   2467:        PMAP_DPRINTF(PDB_BITS,
                   2468:            ("pmap_changebit(%lx, %x, %x)\n", pa, set, mask));
                   2469:
                   2470:        pv = pa_to_pvh(pa);
                   2471:        s = splvm();
                   2472:
                   2473:        /*
                   2474:         * Clear saved attributes (modify, reference)
                   2475:         */
                   2476:
                   2477:        attrp = pa_to_attribute(pa);
                   2478:        r = *attrp & ~mask;
                   2479:        *attrp &= mask;
                   2480:
                   2481:        /*
                   2482:         * Loop over all current mappings setting/clearing as appropos
                   2483:         * If setting RO do we need to clear the VAC?
                   2484:         */
                   2485:
                   2486:        if (pv->pv_pmap != NULL) {
                   2487: #ifdef DEBUG
                   2488:                int toflush = 0;
                   2489: #endif
                   2490:                for (; pv; pv = pv->pv_next) {
                   2491: #ifdef DEBUG
                   2492:                        toflush |= (pv->pv_pmap == pmap_kernel()) ? 2 : 1;
                   2493: #endif
                   2494:                        va = pv->pv_va;
                   2495:                        pte = pmap_pte(pv->pv_pmap, va);
                   2496: #ifdef M68K_MMU_HP
                   2497:
                   2498:                        /*
                   2499:                         * Flush VAC to ensure we get correct state of HW bits
                   2500:                         * so we don't clobber them.
                   2501:                         */
                   2502:
                   2503:                        if (firstpage && pmap_aliasmask) {
                   2504:                                firstpage = FALSE;
                   2505:                                DCIS();
                   2506:                        }
                   2507: #endif
                   2508:                        npte = (*pte | set) & mask;
                   2509:                        if (*pte != npte) {
                   2510:                                r = TRUE;
                   2511: #if defined(M68040) || defined(M68060)
                   2512:                                /*
                   2513:                                 * If we are changing caching status or
                   2514:                                 * protection make sure the caches are
                   2515:                                 * flushed (but only once).
                   2516:                                 */
                   2517:                                if (firstpage &&
                   2518: #if defined(M68020) || defined(M68030)
                   2519:                                    (mmutype == MMU_68040) &&
                   2520: #endif
                   2521:                                    ((set == PG_RO) ||
                   2522:                                     (set & PG_CMASK) ||
                   2523:                                     (mask & PG_CMASK) == 0)) {
                   2524:                                        firstpage = FALSE;
                   2525:                                        DCFP(pa);
                   2526:                                        ICPP(pa);
                   2527:                                }
                   2528: #endif
                   2529:                                *pte = npte;
                   2530:                                if (active_pmap(pv->pv_pmap))
                   2531:                                        TBIS(va);
                   2532:                        }
                   2533:                }
                   2534:        }
                   2535:        splx(s);
1.20      tsutsui  2536:        return r;
1.1       chs      2537: }
                   2538:
                   2539: /*
                   2540:  * pmap_enter_ptpage:
                   2541:  *
                   2542:  *     Allocate and map a PT page for the specified pmap/va pair.
                   2543:  */
                   2544: /* static */
1.21.4.1! bouyer   2545: int
        !          2546: pmap_enter_ptpage(pmap_t pmap, vaddr_t va, boolean_t can_fail)
1.1       chs      2547: {
                   2548:        paddr_t ptpa;
                   2549:        struct vm_page *pg;
                   2550:        struct pv_entry *pv;
                   2551:        st_entry_t *ste;
                   2552:        int s;
                   2553:
                   2554:        PMAP_DPRINTF(PDB_FOLLOW|PDB_ENTER|PDB_PTPAGE,
                   2555:            ("pmap_enter_ptpage: pmap %p, va %lx\n", pmap, va));
                   2556:
                   2557:        /*
                   2558:         * Allocate a segment table if necessary.  Note that it is allocated
                   2559:         * from a private map and not pt_map.  This keeps user page tables
                   2560:         * aligned on segment boundaries in the kernel address space.
                   2561:         * The segment table is wired down.  It will be freed whenever the
                   2562:         * reference count drops to zero.
                   2563:         */
                   2564:        if (pmap->pm_stab == Segtabzero) {
                   2565:                pmap->pm_stab = (st_entry_t *)
1.14      yamt     2566:                    uvm_km_alloc(st_map, M68K_STSIZE, 0,
1.21.4.1! bouyer   2567:                    UVM_KMF_WIRED | UVM_KMF_ZERO |
        !          2568:                    (can_fail ? UVM_KMF_NOWAIT : 0));
        !          2569:                if (pmap->pm_stab == NULL) {
        !          2570:                        pmap->pm_stab = Segtabzero;
        !          2571:                        return ENOMEM;
        !          2572:                }
1.1       chs      2573:                (void) pmap_extract(pmap_kernel(), (vaddr_t)pmap->pm_stab,
                   2574:                    (paddr_t *)&pmap->pm_stpa);
                   2575: #if defined(M68040) || defined(M68060)
                   2576: #if defined(M68020) || defined(M68030)
                   2577:                if (mmutype == MMU_68040)
                   2578: #endif
                   2579:                {
1.21      mhitch   2580:                        pt_entry_t      *pte;
                   2581:
                   2582:                        pte = pmap_pte(pmap_kernel(), pmap->pm_stab);
                   2583:                        *pte = (*pte & ~PG_CMASK) | PG_CI;
1.1       chs      2584:                        pmap->pm_stfree = protostfree;
                   2585:                }
                   2586: #endif
                   2587:                /*
                   2588:                 * XXX may have changed segment table pointer for current
                   2589:                 * process so update now to reload hardware.
                   2590:                 */
                   2591:                if (active_user_pmap(pmap))
                   2592:                        PMAP_ACTIVATE(pmap, 1);
                   2593:
                   2594:                PMAP_DPRINTF(PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB,
                   2595:                    ("enter: pmap %p stab %p(%p)\n",
                   2596:                    pmap, pmap->pm_stab, pmap->pm_stpa));
                   2597:        }
                   2598:
                   2599:        ste = pmap_ste(pmap, va);
                   2600: #if defined(M68040) || defined(M68060)
                   2601:        /*
                   2602:         * Allocate level 2 descriptor block if necessary
                   2603:         */
                   2604: #if defined(M68020) || defined(M68030)
                   2605:        if (mmutype == MMU_68040)
                   2606: #endif
                   2607:        {
                   2608:                if (*ste == SG_NV) {
                   2609:                        int ix;
                   2610:                        caddr_t addr;
                   2611:
                   2612:                        ix = bmtol2(pmap->pm_stfree);
                   2613:                        if (ix == -1)
                   2614:                                panic("enter: out of address space"); /* XXX */
                   2615:                        pmap->pm_stfree &= ~l2tobm(ix);
                   2616:                        addr = (caddr_t)&pmap->pm_stab[ix*SG4_LEV2SIZE];
                   2617:                        memset(addr, 0, SG4_LEV2SIZE*sizeof(st_entry_t));
                   2618:                        addr = (caddr_t)&pmap->pm_stpa[ix*SG4_LEV2SIZE];
                   2619:                        *ste = (u_int)addr | SG_RW | SG_U | SG_V;
                   2620:
                   2621:                        PMAP_DPRINTF(PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB,
                   2622:                            ("enter: alloc ste2 %d(%p)\n", ix, addr));
                   2623:                }
                   2624:                ste = pmap_ste2(pmap, va);
                   2625:                /*
                   2626:                 * Since a level 2 descriptor maps a block of SG4_LEV3SIZE
                   2627:                 * level 3 descriptors, we need a chunk of NPTEPG/SG4_LEV3SIZE
1.3       thorpej  2628:                 * (16) such descriptors (PAGE_SIZE/SG4_LEV3SIZE bytes) to map a
1.1       chs      2629:                 * PT page--the unit of allocation.  We set `ste' to point
                   2630:                 * to the first entry of that chunk which is validated in its
                   2631:                 * entirety below.
                   2632:                 */
1.3       thorpej  2633:                ste = (st_entry_t *)((int)ste & ~(PAGE_SIZE/SG4_LEV3SIZE-1));
1.1       chs      2634:
                   2635:                PMAP_DPRINTF(PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB,
                   2636:                    ("enter: ste2 %p (%p)\n", pmap_ste2(pmap, va), ste));
                   2637:        }
                   2638: #endif
                   2639:        va = trunc_page((vaddr_t)pmap_pte(pmap, va));
                   2640:
                   2641:        /*
                   2642:         * In the kernel we allocate a page from the kernel PT page
                   2643:         * free list and map it into the kernel page table map (via
                   2644:         * pmap_enter).
                   2645:         */
                   2646:        if (pmap == pmap_kernel()) {
                   2647:                struct kpt_page *kpt;
                   2648:
                   2649:                s = splvm();
                   2650:                if ((kpt = kpt_free_list) == NULL) {
                   2651:                        /*
                   2652:                         * No PT pages available.
                   2653:                         * Try once to free up unused ones.
                   2654:                         */
                   2655:                        PMAP_DPRINTF(PDB_COLLECT,
                   2656:                            ("enter: no KPT pages, collecting...\n"));
                   2657:                        pmap_collect(pmap_kernel());
                   2658:                        if ((kpt = kpt_free_list) == NULL)
                   2659:                                panic("pmap_enter_ptpage: can't get KPT page");
                   2660:                }
                   2661:                kpt_free_list = kpt->kpt_next;
                   2662:                kpt->kpt_next = kpt_used_list;
                   2663:                kpt_used_list = kpt;
                   2664:                ptpa = kpt->kpt_pa;
1.3       thorpej  2665:                memset((caddr_t)kpt->kpt_va, 0, PAGE_SIZE);
1.1       chs      2666:                pmap_enter(pmap, va, ptpa, VM_PROT_READ | VM_PROT_WRITE,
                   2667:                    VM_PROT_READ | VM_PROT_WRITE | PMAP_WIRED);
                   2668:                pmap_update(pmap);
                   2669: #ifdef DEBUG
                   2670:                if (pmapdebug & (PDB_ENTER|PDB_PTPAGE)) {
                   2671:                        int ix = pmap_ste(pmap, va) - pmap_ste(pmap, 0);
                   2672:
                   2673:                        printf("enter: add &Sysptmap[%d]: %x (KPT page %lx)\n",
1.20      tsutsui  2674:                            ix, Sysptmap[ix], kpt->kpt_va);
1.1       chs      2675:                }
                   2676: #endif
                   2677:                splx(s);
                   2678:        } else {
                   2679:
                   2680:                /*
                   2681:                 * For user processes we just allocate a page from the
                   2682:                 * VM system.  Note that we set the page "wired" count to 1,
                   2683:                 * which is what we use to check if the page can be freed.
                   2684:                 * See pmap_remove_mapping().
                   2685:                 *
                   2686:                 * Count the segment table reference first so that we won't
                   2687:                 * lose the segment table when low on memory.
                   2688:                 */
                   2689:
                   2690:                pmap->pm_sref++;
                   2691:                PMAP_DPRINTF(PDB_ENTER|PDB_PTPAGE,
                   2692:                    ("enter: about to alloc UPT pg at %lx\n", va));
1.21.4.1! bouyer   2693:                simple_lock(&uvm.kernel_object->vmobjlock);
1.1       chs      2694:                while ((pg = uvm_pagealloc(uvm.kernel_object,
                   2695:                                           va - vm_map_min(kernel_map),
                   2696:                                           NULL, UVM_PGA_ZERO)) == NULL) {
1.21.4.1! bouyer   2697:                        simple_unlock(&uvm.kernel_object->vmobjlock);
1.1       chs      2698:                        uvm_wait("ptpage");
1.21.4.1! bouyer   2699:                        simple_lock(&uvm.kernel_object->vmobjlock);
1.1       chs      2700:                }
1.21.4.1! bouyer   2701:                simple_unlock(&uvm.kernel_object->vmobjlock);
1.1       chs      2702:                pg->flags &= ~(PG_BUSY|PG_FAKE);
                   2703:                UVM_PAGE_OWN(pg, NULL);
                   2704:                ptpa = VM_PAGE_TO_PHYS(pg);
                   2705:                pmap_enter(pmap_kernel(), va, ptpa,
                   2706:                    VM_PROT_READ | VM_PROT_WRITE,
                   2707:                    VM_PROT_READ | VM_PROT_WRITE | PMAP_WIRED);
                   2708:                pmap_update(pmap_kernel());
                   2709:        }
                   2710: #if defined(M68040) || defined(M68060)
                   2711:        /*
                   2712:         * Turn off copyback caching of page table pages,
                   2713:         * could get ugly otherwise.
                   2714:         */
                   2715: #if defined(M68020) || defined(M68030)
                   2716:        if (mmutype == MMU_68040)
                   2717: #endif
                   2718:        {
                   2719: #ifdef DEBUG
                   2720:                pt_entry_t *pte = pmap_pte(pmap_kernel(), va);
                   2721:                if ((pmapdebug & PDB_PARANOIA) && (*pte & PG_CCB) == 0)
                   2722:                        printf("%s PT no CCB: kva=%lx ptpa=%lx pte@%p=%x\n",
1.20      tsutsui  2723:                            pmap == pmap_kernel() ? "Kernel" : "User",
                   2724:                            va, ptpa, pte, *pte);
1.1       chs      2725: #endif
                   2726:                if (pmap_changebit(ptpa, PG_CI, ~PG_CCB))
                   2727:                        DCIS();
                   2728:        }
                   2729: #endif
                   2730:        /*
                   2731:         * Locate the PV entry in the kernel for this PT page and
                   2732:         * record the STE address.  This is so that we can invalidate
                   2733:         * the STE when we remove the mapping for the page.
                   2734:         */
                   2735:        pv = pa_to_pvh(ptpa);
                   2736:        s = splvm();
                   2737:        if (pv) {
                   2738:                pv->pv_flags |= PV_PTPAGE;
                   2739:                do {
                   2740:                        if (pv->pv_pmap == pmap_kernel() && pv->pv_va == va)
                   2741:                                break;
                   2742:                } while ((pv = pv->pv_next));
                   2743:        }
                   2744: #ifdef DEBUG
                   2745:        if (pv == NULL)
                   2746:                panic("pmap_enter_ptpage: PT page not entered");
                   2747: #endif
                   2748:        pv->pv_ptste = ste;
                   2749:        pv->pv_ptpmap = pmap;
                   2750:
                   2751:        PMAP_DPRINTF(PDB_ENTER|PDB_PTPAGE,
                   2752:            ("enter: new PT page at PA %lx, ste at %p\n", ptpa, ste));
                   2753:
                   2754:        /*
                   2755:         * Map the new PT page into the segment table.
                   2756:         * Also increment the reference count on the segment table if this
                   2757:         * was a user page table page.  Note that we don't use vm_map_pageable
                   2758:         * to keep the count like we do for PT pages, this is mostly because
                   2759:         * it would be difficult to identify ST pages in pmap_pageable to
                   2760:         * release them.  We also avoid the overhead of vm_map_pageable.
                   2761:         */
                   2762: #if defined(M68040) || defined(M68060)
                   2763: #if defined(M68020) || defined(M68030)
                   2764:        if (mmutype == MMU_68040)
                   2765: #endif
                   2766:        {
                   2767:                st_entry_t *este;
                   2768:
                   2769:                for (este = &ste[NPTEPG/SG4_LEV3SIZE]; ste < este; ste++) {
                   2770:                        *ste = ptpa | SG_U | SG_RW | SG_V;
                   2771:                        ptpa += SG4_LEV3SIZE * sizeof(st_entry_t);
                   2772:                }
                   2773:        }
                   2774: #if defined(M68020) || defined(M68030)
                   2775:        else
                   2776:                *ste = (ptpa & SG_FRAME) | SG_RW | SG_V;
                   2777: #endif
                   2778: #else
                   2779:        *ste = (ptpa & SG_FRAME) | SG_RW | SG_V;
                   2780: #endif
                   2781:        if (pmap != pmap_kernel()) {
                   2782:                PMAP_DPRINTF(PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB,
                   2783:                    ("enter: stab %p refcnt %d\n",
                   2784:                    pmap->pm_stab, pmap->pm_sref));
                   2785:        }
                   2786:        /*
                   2787:         * Flush stale TLB info.
                   2788:         */
                   2789:        if (pmap == pmap_kernel())
                   2790:                TBIAS();
                   2791:        else
                   2792:                TBIAU();
                   2793:        pmap->pm_ptpages++;
                   2794:        splx(s);
1.21.4.1! bouyer   2795:
        !          2796:        return 0;
1.1       chs      2797: }
                   2798:
                   2799: /*
                   2800:  * pmap_ptpage_addref:
                   2801:  *
                   2802:  *     Add a reference to the specified PT page.
                   2803:  */
                   2804: void
1.20      tsutsui  2805: pmap_ptpage_addref(vaddr_t ptpva)
1.1       chs      2806: {
                   2807:        struct vm_page *pg;
                   2808:
                   2809:        simple_lock(&uvm.kernel_object->vmobjlock);
                   2810:        pg = uvm_pagelookup(uvm.kernel_object, ptpva - vm_map_min(kernel_map));
                   2811:        pg->wire_count++;
                   2812:        PMAP_DPRINTF(PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB,
                   2813:            ("ptpage addref: pg %p now %d\n",
                   2814:             pg, pg->wire_count));
                   2815:        simple_unlock(&uvm.kernel_object->vmobjlock);
                   2816: }
                   2817:
                   2818: /*
                   2819:  * pmap_ptpage_delref:
                   2820:  *
                   2821:  *     Delete a reference to the specified PT page.
                   2822:  */
                   2823: int
1.20      tsutsui  2824: pmap_ptpage_delref(vaddr_t ptpva)
1.1       chs      2825: {
                   2826:        struct vm_page *pg;
                   2827:        int rv;
                   2828:
                   2829:        simple_lock(&uvm.kernel_object->vmobjlock);
                   2830:        pg = uvm_pagelookup(uvm.kernel_object, ptpva - vm_map_min(kernel_map));
                   2831:        rv = --pg->wire_count;
                   2832:        PMAP_DPRINTF(PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB,
                   2833:            ("ptpage delref: pg %p now %d\n",
                   2834:             pg, pg->wire_count));
                   2835:        simple_unlock(&uvm.kernel_object->vmobjlock);
1.20      tsutsui  2836:        return rv;
1.1       chs      2837: }
                   2838:
                   2839: /*
                   2840:  *     Routine:        pmap_procwr
                   2841:  *
                   2842:  *     Function:
                   2843:  *             Synchronize caches corresponding to [addr, addr + len) in p.
                   2844:  */
                   2845: void
1.20      tsutsui  2846: pmap_procwr(struct proc        *p, vaddr_t va, size_t len)
1.1       chs      2847: {
1.20      tsutsui  2848:
1.1       chs      2849:        (void)cachectl1(0x80000004, va, len, p);
                   2850: }
                   2851:
                   2852: #ifdef mvme68k
                   2853:
                   2854: void
1.20      tsutsui  2855: _pmap_set_page_cacheable(pmap_t pmap, vaddr_t va)
1.1       chs      2856: {
                   2857:
1.20      tsutsui  2858:        if (!pmap_ste_v(pmap, va))
1.1       chs      2859:                return;
                   2860:
                   2861: #if defined(M68040) || defined(M68060)
                   2862: #if defined(M68020) || defined(M68030)
1.20      tsutsui  2863:        if (mmutype == MMU_68040) {
1.1       chs      2864: #endif
1.20      tsutsui  2865:        if (pmap_changebit(pmap_pte_pa(pmap_pte(pmap, va)), PG_CCB, ~PG_CI))
1.1       chs      2866:                DCIS();
                   2867:
                   2868: #if defined(M68020) || defined(M68030)
                   2869:        } else
1.20      tsutsui  2870:                pmap_changebit(pmap_pte_pa(pmap_pte(pmap, va)), 0, ~PG_CI);
1.1       chs      2871: #endif
                   2872: #else
1.20      tsutsui  2873:        pmap_changebit(pmap_pte_pa(pmap_pte(pmap, va)), 0, ~PG_CI);
1.1       chs      2874: #endif
                   2875: }
                   2876:
                   2877: void
1.20      tsutsui  2878: _pmap_set_page_cacheinhibit(pmap_t pmap, vaddr_t va)
1.1       chs      2879: {
                   2880:
1.20      tsutsui  2881:        if (!pmap_ste_v(pmap, va))
1.1       chs      2882:                return;
                   2883:
                   2884: #if defined(M68040) || defined(M68060)
                   2885: #if defined(M68020) || defined(M68030)
1.20      tsutsui  2886:        if (mmutype == MMU_68040) {
1.1       chs      2887: #endif
1.20      tsutsui  2888:        if (pmap_changebit(pmap_pte_pa(pmap_pte(pmap, va)), PG_CI, ~PG_CCB))
1.1       chs      2889:                DCIS();
                   2890: #if defined(M68020) || defined(M68030)
                   2891:        } else
1.20      tsutsui  2892:                pmap_changebit(pmap_pte_pa(pmap_pte(pmap, va)), PG_CI, ~0);
1.1       chs      2893: #endif
                   2894: #else
1.20      tsutsui  2895:        pmap_changebit(pmap_pte_pa(pmap_pte(pmap, va)), PG_CI, ~0);
1.1       chs      2896: #endif
                   2897: }
                   2898:
                   2899: int
1.20      tsutsui  2900: _pmap_page_is_cacheable(pmap_t pmap, vaddr_t va)
1.1       chs      2901: {
                   2902:
1.20      tsutsui  2903:        if (!pmap_ste_v(pmap, va))
                   2904:                return 0;
1.1       chs      2905:
1.20      tsutsui  2906:        return (pmap_pte_ci(pmap_pte(pmap, va)) == 0) ? 1 : 0;
1.1       chs      2907: }
                   2908:
                   2909: #endif /* mvme68k */
                   2910:
                   2911: #ifdef DEBUG
                   2912: /*
                   2913:  * pmap_pvdump:
                   2914:  *
                   2915:  *     Dump the contents of the PV list for the specified physical page.
                   2916:  */
                   2917: void
1.20      tsutsui  2918: pmap_pvdump(paddr_t pa)
1.1       chs      2919: {
                   2920:        struct pv_entry *pv;
                   2921:
                   2922:        printf("pa %lx", pa);
                   2923:        for (pv = pa_to_pvh(pa); pv; pv = pv->pv_next)
                   2924:                printf(" -> pmap %p, va %lx, ptste %p, ptpmap %p, flags %x",
1.20      tsutsui  2925:                    pv->pv_pmap, pv->pv_va, pv->pv_ptste, pv->pv_ptpmap,
                   2926:                    pv->pv_flags);
1.1       chs      2927:        printf("\n");
                   2928: }
                   2929:
                   2930: /*
                   2931:  * pmap_check_wiring:
                   2932:  *
                   2933:  *     Count the number of valid mappings in the specified PT page,
                   2934:  *     and ensure that it is consistent with the number of wirings
                   2935:  *     to that page that the VM system has.
                   2936:  */
                   2937: void
1.20      tsutsui  2938: pmap_check_wiring(const char *str, vaddr_t va)
1.1       chs      2939: {
                   2940:        pt_entry_t *pte;
                   2941:        paddr_t pa;
                   2942:        struct vm_page *pg;
                   2943:        int count;
                   2944:
                   2945:        if (!pmap_ste_v(pmap_kernel(), va) ||
                   2946:            !pmap_pte_v(pmap_pte(pmap_kernel(), va)))
                   2947:                return;
                   2948:
                   2949:        pa = pmap_pte_pa(pmap_pte(pmap_kernel(), va));
                   2950:        pg = PHYS_TO_VM_PAGE(pa);
1.13      chs      2951:        if (pg->wire_count > PAGE_SIZE / sizeof(pt_entry_t)) {
1.1       chs      2952:                panic("*%s*: 0x%lx: wire count %d", str, va, pg->wire_count);
                   2953:        }
                   2954:
                   2955:        count = 0;
1.3       thorpej  2956:        for (pte = (pt_entry_t *)va; pte < (pt_entry_t *)(va + PAGE_SIZE);
                   2957:             pte++)
1.1       chs      2958:                if (*pte)
                   2959:                        count++;
                   2960:        if (pg->wire_count != count)
                   2961:                panic("*%s*: 0x%lx: w%d/a%d",
                   2962:                       str, va, pg->wire_count, count);
                   2963: }
                   2964: #endif /* DEBUG */

CVSweb <webmaster@jp.NetBSD.org>