[BACK]Return to pmap.c CVS log [TXT][DIR] Up to [cvs.NetBSD.org] / src / sys / uvm / pmap

Annotation of src/sys/uvm/pmap/pmap.c, Revision 1.39

1.39    ! pgoyette    1: /*     $NetBSD: pmap.c,v 1.38 2017/10/30 00:55:42 kre Exp $    */
1.1       christos    2:
                      3: /*-
                      4:  * Copyright (c) 1998, 2001 The NetBSD Foundation, Inc.
                      5:  * All rights reserved.
                      6:  *
                      7:  * This code is derived from software contributed to The NetBSD Foundation
                      8:  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
                      9:  * NASA Ames Research Center and by Chris G. Demetriou.
                     10:  *
                     11:  * Redistribution and use in source and binary forms, with or without
                     12:  * modification, are permitted provided that the following conditions
                     13:  * are met:
                     14:  * 1. Redistributions of source code must retain the above copyright
                     15:  *    notice, this list of conditions and the following disclaimer.
                     16:  * 2. Redistributions in binary form must reproduce the above copyright
                     17:  *    notice, this list of conditions and the following disclaimer in the
                     18:  *    documentation and/or other materials provided with the distribution.
                     19:  *
                     20:  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
                     21:  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
                     22:  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
                     23:  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
                     24:  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
                     25:  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
                     26:  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
                     27:  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
                     28:  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
                     29:  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
                     30:  * POSSIBILITY OF SUCH DAMAGE.
                     31:  */
                     32:
                     33: /*
                     34:  * Copyright (c) 1992, 1993
                     35:  *     The Regents of the University of California.  All rights reserved.
                     36:  *
                     37:  * This code is derived from software contributed to Berkeley by
                     38:  * the Systems Programming Group of the University of Utah Computer
                     39:  * Science Department and Ralph Campbell.
                     40:  *
                     41:  * Redistribution and use in source and binary forms, with or without
                     42:  * modification, are permitted provided that the following conditions
                     43:  * are met:
                     44:  * 1. Redistributions of source code must retain the above copyright
                     45:  *    notice, this list of conditions and the following disclaimer.
                     46:  * 2. Redistributions in binary form must reproduce the above copyright
                     47:  *    notice, this list of conditions and the following disclaimer in the
                     48:  *    documentation and/or other materials provided with the distribution.
                     49:  * 3. Neither the name of the University nor the names of its contributors
                     50:  *    may be used to endorse or promote products derived from this software
                     51:  *    without specific prior written permission.
                     52:  *
                     53:  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
                     54:  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
                     55:  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
                     56:  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
                     57:  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
                     58:  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
                     59:  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
                     60:  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
                     61:  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
                     62:  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
                     63:  * SUCH DAMAGE.
                     64:  *
                     65:  *     @(#)pmap.c      8.4 (Berkeley) 1/26/94
                     66:  */
                     67:
                     68: #include <sys/cdefs.h>
                     69:
1.39    ! pgoyette   70: __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.38 2017/10/30 00:55:42 kre Exp $");
1.1       christos   71:
                     72: /*
                     73:  *     Manages physical address maps.
                     74:  *
                     75:  *     In addition to hardware address maps, this
                     76:  *     module is called upon to provide software-use-only
                     77:  *     maps which may or may not be stored in the same
                     78:  *     form as hardware maps.  These pseudo-maps are
                     79:  *     used to store intermediate results from copy
                     80:  *     operations to and from address spaces.
                     81:  *
                     82:  *     Since the information managed by this module is
                     83:  *     also stored by the logical address mapping module,
                     84:  *     this module may throw away valid virtual-to-physical
                     85:  *     mappings at almost any time.  However, invalidations
                     86:  *     of virtual-to-physical mappings must be done as
                     87:  *     requested.
                     88:  *
                     89:  *     In order to cope with hardware architectures which
                     90:  *     make virtual-to-physical map invalidates expensive,
                     91:  *     this module may delay invalidate or reduced protection
                     92:  *     operations until such time as they are actually
                     93:  *     necessary.  This module is given full information as
                     94:  *     to which processors are currently using which maps,
                     95:  *     and to when physical maps must be made correct.
                     96:  */
                     97:
                     98: #include "opt_modular.h"
                     99: #include "opt_multiprocessor.h"
                    100: #include "opt_sysv.h"
                    101:
                    102: #define __PMAP_PRIVATE
                    103:
                    104: #include <sys/param.h>
1.15      matt      105: #include <sys/atomic.h>
1.1       christos  106: #include <sys/buf.h>
1.15      matt      107: #include <sys/cpu.h>
                    108: #include <sys/mutex.h>
1.1       christos  109: #include <sys/pool.h>
                    110: #include <sys/atomic.h>
                    111: #include <sys/mutex.h>
                    112: #include <sys/atomic.h>
                    113:
                    114: #include <uvm/uvm.h>
1.26      cherry    115: #include <uvm/uvm_physseg.h>
1.1       christos  116:
1.15      matt      117: #if defined(MULTIPROCESSOR) && defined(PMAP_VIRTUAL_CACHE_ALIASES) \
                    118:     && !defined(PMAP_NO_PV_UNCACHED)
                    119: #error PMAP_VIRTUAL_CACHE_ALIASES with MULTIPROCESSOR requires \
                    120:  PMAP_NO_PV_UNCACHED to be defined
                    121: #endif
1.1       christos  122:
                    123: PMAP_COUNTER(remove_kernel_calls, "remove kernel calls");
                    124: PMAP_COUNTER(remove_kernel_pages, "kernel pages unmapped");
                    125: PMAP_COUNTER(remove_user_calls, "remove user calls");
                    126: PMAP_COUNTER(remove_user_pages, "user pages unmapped");
                    127: PMAP_COUNTER(remove_flushes, "remove cache flushes");
                    128: PMAP_COUNTER(remove_tlb_ops, "remove tlb ops");
                    129: PMAP_COUNTER(remove_pvfirst, "remove pv first");
                    130: PMAP_COUNTER(remove_pvsearch, "remove pv search");
                    131:
                    132: PMAP_COUNTER(prefer_requests, "prefer requests");
                    133: PMAP_COUNTER(prefer_adjustments, "prefer adjustments");
                    134:
                    135: PMAP_COUNTER(idlezeroed_pages, "pages idle zeroed");
                    136:
                    137: PMAP_COUNTER(kenter_pa, "kernel fast mapped pages");
                    138: PMAP_COUNTER(kenter_pa_bad, "kernel fast mapped pages (bad color)");
                    139: PMAP_COUNTER(kenter_pa_unmanaged, "kernel fast mapped unmanaged pages");
                    140: PMAP_COUNTER(kremove_pages, "kernel fast unmapped pages");
                    141:
                    142: PMAP_COUNTER(page_cache_evictions, "pages changed to uncacheable");
                    143: PMAP_COUNTER(page_cache_restorations, "pages changed to cacheable");
                    144:
                    145: PMAP_COUNTER(kernel_mappings_bad, "kernel pages mapped (bad color)");
                    146: PMAP_COUNTER(user_mappings_bad, "user pages mapped (bad color)");
                    147: PMAP_COUNTER(kernel_mappings, "kernel pages mapped");
                    148: PMAP_COUNTER(user_mappings, "user pages mapped");
                    149: PMAP_COUNTER(user_mappings_changed, "user mapping changed");
                    150: PMAP_COUNTER(kernel_mappings_changed, "kernel mapping changed");
                    151: PMAP_COUNTER(uncached_mappings, "uncached pages mapped");
                    152: PMAP_COUNTER(unmanaged_mappings, "unmanaged pages mapped");
                    153: PMAP_COUNTER(managed_mappings, "managed pages mapped");
                    154: PMAP_COUNTER(mappings, "pages mapped");
                    155: PMAP_COUNTER(remappings, "pages remapped");
                    156: PMAP_COUNTER(unmappings, "pages unmapped");
                    157: PMAP_COUNTER(primary_mappings, "page initial mappings");
                    158: PMAP_COUNTER(primary_unmappings, "page final unmappings");
                    159: PMAP_COUNTER(tlb_hit, "page mapping");
                    160:
                    161: PMAP_COUNTER(exec_mappings, "exec pages mapped");
                    162: PMAP_COUNTER(exec_synced_mappings, "exec pages synced");
                    163: PMAP_COUNTER(exec_synced_remove, "exec pages synced (PR)");
                    164: PMAP_COUNTER(exec_synced_clear_modify, "exec pages synced (CM)");
                    165: PMAP_COUNTER(exec_synced_page_protect, "exec pages synced (PP)");
                    166: PMAP_COUNTER(exec_synced_protect, "exec pages synced (P)");
                    167: PMAP_COUNTER(exec_uncached_page_protect, "exec pages uncached (PP)");
                    168: PMAP_COUNTER(exec_uncached_clear_modify, "exec pages uncached (CM)");
                    169: PMAP_COUNTER(exec_uncached_zero_page, "exec pages uncached (ZP)");
                    170: PMAP_COUNTER(exec_uncached_copy_page, "exec pages uncached (CP)");
                    171: PMAP_COUNTER(exec_uncached_remove, "exec pages uncached (PR)");
                    172:
                    173: PMAP_COUNTER(create, "creates");
                    174: PMAP_COUNTER(reference, "references");
                    175: PMAP_COUNTER(dereference, "dereferences");
                    176: PMAP_COUNTER(destroy, "destroyed");
                    177: PMAP_COUNTER(activate, "activations");
                    178: PMAP_COUNTER(deactivate, "deactivations");
                    179: PMAP_COUNTER(update, "updates");
                    180: #ifdef MULTIPROCESSOR
                    181: PMAP_COUNTER(shootdown_ipis, "shootdown IPIs");
                    182: #endif
                    183: PMAP_COUNTER(unwire, "unwires");
                    184: PMAP_COUNTER(copy, "copies");
                    185: PMAP_COUNTER(clear_modify, "clear_modifies");
                    186: PMAP_COUNTER(protect, "protects");
                    187: PMAP_COUNTER(page_protect, "page_protects");
                    188:
                    189: #define PMAP_ASID_RESERVED 0
                    190: CTASSERT(PMAP_ASID_RESERVED == 0);
                    191:
1.15      matt      192: #ifndef PMAP_SEGTAB_ALIGN
                    193: #define PMAP_SEGTAB_ALIGN      /* nothing */
                    194: #endif
                    195: #ifdef _LP64
                    196: pmap_segtab_t  pmap_kstart_segtab PMAP_SEGTAB_ALIGN; /* first mid-level segtab for kernel */
                    197: #endif
                    198: pmap_segtab_t  pmap_kern_segtab PMAP_SEGTAB_ALIGN = { /* top level segtab for kernel */
                    199: #ifdef _LP64
                    200:        .seg_seg[(VM_MIN_KERNEL_ADDRESS & XSEGOFSET) >> SEGSHIFT] = &pmap_kstart_segtab,
1.1       christos  201: #endif
1.15      matt      202: };
1.1       christos  203:
                    204: struct pmap_kernel kernel_pmap_store = {
                    205:        .kernel_pmap = {
                    206:                .pm_count = 1,
1.15      matt      207:                .pm_segtab = &pmap_kern_segtab,
1.1       christos  208:                .pm_minaddr = VM_MIN_KERNEL_ADDRESS,
                    209:                .pm_maxaddr = VM_MAX_KERNEL_ADDRESS,
                    210:        },
                    211: };
                    212:
                    213: struct pmap * const kernel_pmap_ptr = &kernel_pmap_store.kernel_pmap;
                    214:
1.15      matt      215: struct pmap_limits pmap_limits = {     /* VA and PA limits */
1.12      matt      216:        .virtual_start = VM_MIN_KERNEL_ADDRESS,
                    217: };
1.1       christos  218:
                    219: #ifdef UVMHIST
                    220: static struct kern_history_ent pmapexechistbuf[10000];
                    221: static struct kern_history_ent pmaphistbuf[10000];
1.8       nonaka    222: UVMHIST_DEFINE(pmapexechist);
                    223: UVMHIST_DEFINE(pmaphist);
1.1       christos  224: #endif
                    225:
                    226: /*
                    227:  * The pools from which pmap structures and sub-structures are allocated.
                    228:  */
                    229: struct pool pmap_pmap_pool;
                    230: struct pool pmap_pv_pool;
                    231:
                    232: #ifndef PMAP_PV_LOWAT
                    233: #define        PMAP_PV_LOWAT   16
                    234: #endif
1.15      matt      235: int    pmap_pv_lowat = PMAP_PV_LOWAT;
1.1       christos  236:
1.15      matt      237: bool   pmap_initialized = false;
1.1       christos  238: #define        PMAP_PAGE_COLOROK_P(a, b) \
                    239:                ((((int)(a) ^ (int)(b)) & pmap_page_colormask) == 0)
1.15      matt      240: u_int  pmap_page_colormask;
1.1       christos  241:
1.15      matt      242: #define PAGE_IS_MANAGED(pa)    (pmap_initialized && uvm_pageismanaged(pa))
1.1       christos  243:
                    244: #define PMAP_IS_ACTIVE(pm)                                             \
                    245:        ((pm) == pmap_kernel() ||                                       \
                    246:         (pm) == curlwp->l_proc->p_vmspace->vm_map.pmap)
                    247:
                    248: /* Forward function declarations */
1.15      matt      249: void pmap_page_remove(struct vm_page *);
                    250: static void pmap_pvlist_check(struct vm_page_md *);
1.1       christos  251: void pmap_remove_pv(pmap_t, vaddr_t, struct vm_page *, bool);
1.15      matt      252: void pmap_enter_pv(pmap_t, vaddr_t, struct vm_page *, pt_entry_t *, u_int);
1.1       christos  253:
                    254: /*
                    255:  * PV table management functions.
                    256:  */
                    257: void   *pmap_pv_page_alloc(struct pool *, int);
                    258: void   pmap_pv_page_free(struct pool *, void *);
                    259:
                    260: struct pool_allocator pmap_pv_page_allocator = {
                    261:        pmap_pv_page_alloc, pmap_pv_page_free, 0,
                    262: };
                    263:
                    264: #define        pmap_pv_alloc()         pool_get(&pmap_pv_pool, PR_NOWAIT)
                    265: #define        pmap_pv_free(pv)        pool_put(&pmap_pv_pool, (pv))
                    266:
1.10      nonaka    267: #if !defined(MULTIPROCESSOR) || !defined(PMAP_MD_NEED_TLB_MISS_LOCK)
                    268: #define        pmap_md_tlb_miss_lock_enter()   do { } while(/*CONSTCOND*/0)
                    269: #define        pmap_md_tlb_miss_lock_exit()    do { } while(/*CONSTCOND*/0)
1.15      matt      270: #endif /* !MULTIPROCESSOR || !PMAP_MD_NEED_TLB_MISS_LOCK */
                    271:
                    272: #ifndef MULTIPROCESSOR
                    273: kmutex_t pmap_pvlist_mutex     __cacheline_aligned;
                    274: #endif
                    275:
                    276: /*
                    277:  * Debug functions.
                    278:  */
                    279:
1.19      jakllsch  280: #ifdef DEBUG
1.15      matt      281: static inline void
                    282: pmap_asid_check(pmap_t pm, const char *func)
                    283: {
                    284:        if (!PMAP_IS_ACTIVE(pm))
                    285:                return;
                    286:
                    287:        struct pmap_asid_info * const pai = PMAP_PAI(pm, cpu_tlb_info(curcpu()));
                    288:        tlb_asid_t asid = tlb_get_asid();
                    289:        if (asid != pai->pai_asid)
                    290:                panic("%s: inconsistency for active TLB update: %u <-> %u",
                    291:                    func, asid, pai->pai_asid);
1.19      jakllsch  292: }
1.15      matt      293: #endif
                    294:
                    295: static void
                    296: pmap_addr_range_check(pmap_t pmap, vaddr_t sva, vaddr_t eva, const char *func)
                    297: {
                    298: #ifdef DEBUG
                    299:        if (pmap == pmap_kernel()) {
                    300:                if (sva < VM_MIN_KERNEL_ADDRESS)
                    301:                        panic("%s: kva %#"PRIxVADDR" not in range",
                    302:                            func, sva);
                    303:                if (eva >= pmap_limits.virtual_end)
                    304:                        panic("%s: kva %#"PRIxVADDR" not in range",
                    305:                            func, eva);
                    306:        } else {
                    307:                if (eva > VM_MAXUSER_ADDRESS)
                    308:                        panic("%s: uva %#"PRIxVADDR" not in range",
                    309:                            func, eva);
                    310:                pmap_asid_check(pmap, func);
                    311:        }
                    312: #endif
                    313: }
1.10      nonaka    314:
1.1       christos  315: /*
                    316:  * Misc. functions.
                    317:  */
                    318:
                    319: bool
                    320: pmap_page_clear_attributes(struct vm_page_md *mdpg, u_int clear_attributes)
                    321: {
1.15      matt      322:        volatile unsigned long * const attrp = &mdpg->mdpg_attrs;
1.1       christos  323: #ifdef MULTIPROCESSOR
                    324:        for (;;) {
                    325:                u_int old_attr = *attrp;
                    326:                if ((old_attr & clear_attributes) == 0)
                    327:                        return false;
                    328:                u_int new_attr = old_attr & ~clear_attributes;
1.15      matt      329:                if (old_attr == atomic_cas_ulong(attrp, old_attr, new_attr))
1.1       christos  330:                        return true;
                    331:        }
                    332: #else
1.15      matt      333:        unsigned long old_attr = *attrp;
1.1       christos  334:        if ((old_attr & clear_attributes) == 0)
                    335:                return false;
                    336:        *attrp &= ~clear_attributes;
                    337:        return true;
                    338: #endif
                    339: }
                    340:
                    341: void
                    342: pmap_page_set_attributes(struct vm_page_md *mdpg, u_int set_attributes)
                    343: {
                    344: #ifdef MULTIPROCESSOR
1.15      matt      345:        atomic_or_ulong(&mdpg->mdpg_attrs, set_attributes);
1.1       christos  346: #else
                    347:        mdpg->mdpg_attrs |= set_attributes;
                    348: #endif
                    349: }
                    350:
                    351: static void
                    352: pmap_page_syncicache(struct vm_page *pg)
                    353: {
                    354: #ifndef MULTIPROCESSOR
1.15      matt      355:        struct pmap * const curpmap = curlwp->l_proc->p_vmspace->vm_map.pmap;
1.1       christos  356: #endif
                    357:        struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
                    358:        pv_entry_t pv = &mdpg->mdpg_first;
1.2       matt      359:        kcpuset_t *onproc;
                    360: #ifdef MULTIPROCESSOR
                    361:        kcpuset_create(&onproc, true);
1.15      matt      362:        KASSERT(onproc != NULL);
1.3       matt      363: #else
                    364:        onproc = NULL;
1.2       matt      365: #endif
1.15      matt      366:        VM_PAGEMD_PVLIST_READLOCK(mdpg);
                    367:        pmap_pvlist_check(mdpg);
1.2       matt      368:
1.1       christos  369:        if (pv->pv_pmap != NULL) {
                    370:                for (; pv != NULL; pv = pv->pv_next) {
                    371: #ifdef MULTIPROCESSOR
1.2       matt      372:                        kcpuset_merge(onproc, pv->pv_pmap->pm_onproc);
                    373:                        if (kcpuset_match(onproc, kcpuset_running)) {
1.1       christos  374:                                break;
                    375:                        }
                    376: #else
                    377:                        if (pv->pv_pmap == curpmap) {
1.2       matt      378:                                onproc = curcpu()->ci_data.cpu_kcpuset;
1.1       christos  379:                                break;
                    380:                        }
                    381: #endif
                    382:                }
                    383:        }
1.15      matt      384:        pmap_pvlist_check(mdpg);
1.1       christos  385:        VM_PAGEMD_PVLIST_UNLOCK(mdpg);
                    386:        kpreempt_disable();
                    387:        pmap_md_page_syncicache(pg, onproc);
1.15      matt      388:        kpreempt_enable();
1.2       matt      389: #ifdef MULTIPROCESSOR
                    390:        kcpuset_destroy(onproc);
                    391: #endif
1.1       christos  392: }
                    393:
                    394: /*
                    395:  * Define the initial bounds of the kernel virtual address space.
                    396:  */
                    397: void
                    398: pmap_virtual_space(vaddr_t *vstartp, vaddr_t *vendp)
                    399: {
                    400:
1.12      matt      401:        *vstartp = pmap_limits.virtual_start;
                    402:        *vendp = pmap_limits.virtual_end;
1.1       christos  403: }
                    404:
                    405: vaddr_t
                    406: pmap_growkernel(vaddr_t maxkvaddr)
                    407: {
1.14      msaitoh   408:        vaddr_t virtual_end = pmap_limits.virtual_end;
1.1       christos  409:        maxkvaddr = pmap_round_seg(maxkvaddr) - 1;
                    410:
                    411:        /*
                    412:         * Reserve PTEs for the new KVA space.
                    413:         */
                    414:        for (; virtual_end < maxkvaddr; virtual_end += NBSEG) {
                    415:                pmap_pte_reserve(pmap_kernel(), virtual_end, 0);
                    416:        }
                    417:
                    418:        /*
                    419:         * Don't exceed VM_MAX_KERNEL_ADDRESS!
                    420:         */
                    421:        if (virtual_end == 0 || virtual_end > VM_MAX_KERNEL_ADDRESS)
                    422:                virtual_end = VM_MAX_KERNEL_ADDRESS;
                    423:
                    424:        /*
                    425:         * Update new end.
                    426:         */
                    427:        pmap_limits.virtual_end = virtual_end;
                    428:        return virtual_end;
                    429: }
                    430:
                    431: /*
                    432:  * Bootstrap memory allocator (alternative to vm_bootstrap_steal_memory()).
                    433:  * This function allows for early dynamic memory allocation until the virtual
                    434:  * memory system has been bootstrapped.  After that point, either kmem_alloc
                    435:  * or malloc should be used.  This function works by stealing pages from the
                    436:  * (to be) managed page pool, then implicitly mapping the pages (by using
                    437:  * their k0seg addresses) and zeroing them.
                    438:  *
                    439:  * It may be used once the physical memory segments have been pre-loaded
                    440:  * into the vm_physmem[] array.  Early memory allocation MUST use this
                    441:  * interface!  This cannot be used after vm_page_startup(), and will
                    442:  * generate a panic if tried.
                    443:  *
                    444:  * Note that this memory will never be freed, and in essence it is wired
                    445:  * down.
                    446:  *
                    447:  * We must adjust *vstartp and/or *vendp iff we use address space
                    448:  * from the kernel virtual address range defined by pmap_virtual_space().
                    449:  */
                    450: vaddr_t
                    451: pmap_steal_memory(vsize_t size, vaddr_t *vstartp, vaddr_t *vendp)
                    452: {
1.15      matt      453:        size_t npgs;
1.1       christos  454:        paddr_t pa;
                    455:        vaddr_t va;
1.26      cherry    456:
1.27      skrll     457:        uvm_physseg_t maybe_bank = UVM_PHYSSEG_TYPE_INVALID;
1.1       christos  458:
                    459:        size = round_page(size);
                    460:        npgs = atop(size);
                    461:
1.15      matt      462:        aprint_debug("%s: need %zu pages\n", __func__, npgs);
                    463:
1.26      cherry    464:        for (uvm_physseg_t bank = uvm_physseg_get_first();
                    465:             uvm_physseg_valid_p(bank);
                    466:             bank = uvm_physseg_get_next(bank)) {
                    467:
1.1       christos  468:                if (uvm.page_init_done == true)
                    469:                        panic("pmap_steal_memory: called _after_ bootstrap");
                    470:
1.27      skrll     471:                aprint_debug("%s: seg %"PRIxPHYSSEG": %#"PRIxPADDR" %#"PRIxPADDR" %#"PRIxPADDR" %#"PRIxPADDR"\n",
1.15      matt      472:                    __func__, bank,
1.26      cherry    473:                    uvm_physseg_get_avail_start(bank), uvm_physseg_get_start(bank),
                    474:                    uvm_physseg_get_avail_end(bank), uvm_physseg_get_end(bank));
1.15      matt      475:
1.26      cherry    476:                if (uvm_physseg_get_avail_start(bank) != uvm_physseg_get_start(bank)
                    477:                    || uvm_physseg_get_avail_start(bank) >= uvm_physseg_get_avail_end(bank)) {
1.27      skrll     478:                        aprint_debug("%s: seg %"PRIxPHYSSEG": bad start\n", __func__, bank);
1.1       christos  479:                        continue;
1.15      matt      480:                }
1.1       christos  481:
1.26      cherry    482:                if (uvm_physseg_get_avail_end(bank) - uvm_physseg_get_avail_start(bank) < npgs) {
1.27      skrll     483:                        aprint_debug("%s: seg %"PRIxPHYSSEG": too small for %zu pages\n",
1.15      matt      484:                            __func__, bank, npgs);
1.1       christos  485:                        continue;
1.15      matt      486:                }
                    487:
1.26      cherry    488:                if (!pmap_md_ok_to_steal_p(bank, npgs)) {
1.15      matt      489:                        continue;
                    490:                }
                    491:
                    492:                /*
                    493:                 * Always try to allocate from the segment with the least
                    494:                 * amount of space left.
                    495:                 */
1.26      cherry    496: #define VM_PHYSMEM_SPACE(b)    ((uvm_physseg_get_avail_end(b)) - (uvm_physseg_get_avail_start(b)))
                    497:                if (uvm_physseg_valid_p(maybe_bank) == false
                    498:                    || VM_PHYSMEM_SPACE(bank) < VM_PHYSMEM_SPACE(maybe_bank)) {
1.15      matt      499:                        maybe_bank = bank;
                    500:                }
                    501:        }
                    502:
1.26      cherry    503:        if (uvm_physseg_valid_p(maybe_bank)) {
                    504:                const uvm_physseg_t bank = maybe_bank;
1.29      skrll     505:
1.1       christos  506:                /*
                    507:                 * There are enough pages here; steal them!
                    508:                 */
1.26      cherry    509:                pa = ptoa(uvm_physseg_get_start(bank));
                    510:                uvm_physseg_unplug(atop(pa), npgs);
1.1       christos  511:
1.27      skrll     512:                aprint_debug("%s: seg %"PRIxPHYSSEG": %zu pages stolen (%#"PRIxPADDR" left)\n",
1.26      cherry    513:                    __func__, bank, npgs, VM_PHYSMEM_SPACE(bank));
1.1       christos  514:
                    515:                va = pmap_md_map_poolpage(pa, size);
                    516:                memset((void *)va, 0, size);
                    517:                return va;
                    518:        }
                    519:
                    520:        /*
                    521:         * If we got here, there was no memory left.
                    522:         */
1.15      matt      523:        panic("pmap_steal_memory: no memory to steal %zu pages", npgs);
1.1       christos  524: }
                    525:
                    526: /*
                    527:  *     Initialize the pmap module.
                    528:  *     Called by vm_init, to initialize any structures that the pmap
                    529:  *     system needs to map virtual memory.
                    530:  */
                    531: void
                    532: pmap_init(void)
                    533: {
                    534:        UVMHIST_INIT_STATIC(pmapexechist, pmapexechistbuf);
                    535:        UVMHIST_INIT_STATIC(pmaphist, pmaphistbuf);
                    536:
                    537:        UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
                    538:
                    539:        /*
                    540:         * Initialize the segtab lock.
                    541:         */
                    542:        mutex_init(&pmap_segtab_lock, MUTEX_DEFAULT, IPL_HIGH);
                    543:
                    544:        /*
                    545:         * Set a low water mark on the pv_entry pool, so that we are
                    546:         * more likely to have these around even in extreme memory
                    547:         * starvation.
                    548:         */
                    549:        pool_setlowat(&pmap_pv_pool, pmap_pv_lowat);
                    550:
1.15      matt      551:        /*
                    552:         * Set the page colormask but allow pmap_md_init to override it.
                    553:         */
                    554:        pmap_page_colormask = ptoa(uvmexp.colormask);
                    555:
1.1       christos  556:        pmap_md_init();
                    557:
                    558:        /*
                    559:         * Now it is safe to enable pv entry recording.
                    560:         */
                    561:        pmap_initialized = true;
                    562: }
                    563:
                    564: /*
                    565:  *     Create and return a physical map.
                    566:  *
                    567:  *     If the size specified for the map
                    568:  *     is zero, the map is an actual physical
                    569:  *     map, and may be referenced by the
                    570:  *     hardware.
                    571:  *
                    572:  *     If the size specified is non-zero,
                    573:  *     the map will be used in software only, and
                    574:  *     is bounded by that size.
                    575:  */
                    576: pmap_t
                    577: pmap_create(void)
                    578: {
                    579:        UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
                    580:        PMAP_COUNT(create);
                    581:
1.15      matt      582:        pmap_t pmap = pool_get(&pmap_pmap_pool, PR_WAITOK);
1.1       christos  583:        memset(pmap, 0, PMAP_SIZE);
                    584:
                    585:        KASSERT(pmap->pm_pai[0].pai_link.le_prev == NULL);
                    586:
                    587:        pmap->pm_count = 1;
                    588:        pmap->pm_minaddr = VM_MIN_ADDRESS;
                    589:        pmap->pm_maxaddr = VM_MAXUSER_ADDRESS;
                    590:
                    591:        pmap_segtab_init(pmap);
                    592:
1.5       nonaka    593: #ifdef MULTIPROCESSOR
                    594:        kcpuset_create(&pmap->pm_active, true);
                    595:        kcpuset_create(&pmap->pm_onproc, true);
1.15      matt      596:        KASSERT(pmap->pm_active != NULL);
                    597:        KASSERT(pmap->pm_onproc != NULL);
1.5       nonaka    598: #endif
                    599:
1.37      pgoyette  600:        UVMHIST_LOG(pmaphist, " <-- done (pmap=%#jx)", (uintptr_t)pmap,
                    601:            0, 0, 0);
1.15      matt      602:
1.1       christos  603:        return pmap;
                    604: }
                    605:
                    606: /*
                    607:  *     Retire the given physical map from service.
                    608:  *     Should only be called if the map contains
                    609:  *     no valid mappings.
                    610:  */
                    611: void
                    612: pmap_destroy(pmap_t pmap)
                    613: {
                    614:        UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
1.37      pgoyette  615:        UVMHIST_LOG(pmaphist, "(pmap=%#jx)", (uintptr_t)pmap, 0, 0, 0);
1.1       christos  616:
                    617:        if (atomic_dec_uint_nv(&pmap->pm_count) > 0) {
                    618:                PMAP_COUNT(dereference);
1.15      matt      619:                UVMHIST_LOG(pmaphist, " <-- done (deref)", 0, 0, 0, 0);
1.1       christos  620:                return;
                    621:        }
                    622:
1.15      matt      623:        PMAP_COUNT(destroy);
1.1       christos  624:        KASSERT(pmap->pm_count == 0);
                    625:        kpreempt_disable();
1.10      nonaka    626:        pmap_md_tlb_miss_lock_enter();
1.1       christos  627:        pmap_tlb_asid_release_all(pmap);
                    628:        pmap_segtab_destroy(pmap, NULL, 0);
1.10      nonaka    629:        pmap_md_tlb_miss_lock_exit();
1.1       christos  630:
1.6       nonaka    631: #ifdef MULTIPROCESSOR
1.7       nonaka    632:        kcpuset_destroy(pmap->pm_active);
                    633:        kcpuset_destroy(pmap->pm_onproc);
1.15      matt      634:        pmap->pm_active = NULL;
                    635:        pmap->pm_onproc = NULL;
1.6       nonaka    636: #endif
                    637:
1.1       christos  638:        pool_put(&pmap_pmap_pool, pmap);
                    639:        kpreempt_enable();
                    640:
1.15      matt      641:        UVMHIST_LOG(pmaphist, " <-- done (freed)", 0, 0, 0, 0);
1.1       christos  642: }
                    643:
                    644: /*
                    645:  *     Add a reference to the specified pmap.
                    646:  */
                    647: void
                    648: pmap_reference(pmap_t pmap)
                    649: {
                    650:        UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
1.37      pgoyette  651:        UVMHIST_LOG(pmaphist, "(pmap=%#jx)", (uintptr_t)pmap, 0, 0, 0);
1.1       christos  652:        PMAP_COUNT(reference);
                    653:
                    654:        if (pmap != NULL) {
                    655:                atomic_inc_uint(&pmap->pm_count);
                    656:        }
                    657:
1.15      matt      658:        UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
1.1       christos  659: }
                    660:
                    661: /*
                    662:  *     Make a new pmap (vmspace) active for the given process.
                    663:  */
                    664: void
                    665: pmap_activate(struct lwp *l)
                    666: {
                    667:        pmap_t pmap = l->l_proc->p_vmspace->vm_map.pmap;
                    668:
                    669:        UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
1.37      pgoyette  670:        UVMHIST_LOG(pmaphist, "(l=%#jx pmap=%#jx)", (uintptr_t)l,
                    671:            (uintptr_t)pmap, 0, 0);
1.1       christos  672:        PMAP_COUNT(activate);
                    673:
                    674:        kpreempt_disable();
1.10      nonaka    675:        pmap_md_tlb_miss_lock_enter();
1.1       christos  676:        pmap_tlb_asid_acquire(pmap, l);
                    677:        if (l == curlwp) {
                    678:                pmap_segtab_activate(pmap, l);
                    679:        }
1.10      nonaka    680:        pmap_md_tlb_miss_lock_exit();
1.1       christos  681:        kpreempt_enable();
                    682:
1.37      pgoyette  683:        UVMHIST_LOG(pmaphist, " <-- done (%ju:%ju)", l->l_proc->p_pid,
                    684:            l->l_lid, 0, 0);
1.15      matt      685: }
                    686:
                    687: /*
                    688:  * Remove this page from all physical maps in which it resides.
                    689:  * Reflects back modify bits to the pager.
                    690:  */
                    691: void
                    692: pmap_page_remove(struct vm_page *pg)
                    693: {
                    694:        struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
                    695:
                    696:        kpreempt_disable();
                    697:        VM_PAGEMD_PVLIST_LOCK(mdpg);
                    698:        pmap_pvlist_check(mdpg);
                    699:
                    700:        UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
                    701:
1.37      pgoyette  702:        UVMHIST_LOG(pmapexechist, "pg %#jx (pa %#jx) [page removed]: "
                    703:            "execpage cleared", (uintptr_t)pg, VM_PAGE_TO_PHYS(pg), 0, 0);
1.22      matt      704: #ifdef PMAP_VIRTUAL_CACHE_ALIASES
                    705:        pmap_page_clear_attributes(mdpg, VM_PAGEMD_EXECPAGE|VM_PAGEMD_UNCACHED);
                    706: #else
                    707:        pmap_page_clear_attributes(mdpg, VM_PAGEMD_EXECPAGE);
                    708: #endif
                    709:        PMAP_COUNT(exec_uncached_remove);
                    710:
1.15      matt      711:        pv_entry_t pv = &mdpg->mdpg_first;
                    712:        if (pv->pv_pmap == NULL) {
                    713:                VM_PAGEMD_PVLIST_UNLOCK(mdpg);
                    714:                kpreempt_enable();
                    715:                UVMHIST_LOG(pmaphist, " <-- done (empty)", 0, 0, 0, 0);
                    716:                return;
                    717:        }
                    718:
                    719:        pv_entry_t npv;
                    720:        pv_entry_t pvp = NULL;
                    721:
                    722:        for (; pv != NULL; pv = npv) {
                    723:                npv = pv->pv_next;
                    724: #ifdef PMAP_VIRTUAL_CACHE_ALIASES
                    725:                if (pv->pv_va & PV_KENTER) {
1.37      pgoyette  726:                        UVMHIST_LOG(pmaphist, " pv %#jx pmap %#jx va %jx"
                    727:                            " skip", (uintptr_t)pv, (uintptr_t)pv->pv_pmap,
                    728:                            pv->pv_va, 0);
1.15      matt      729:
                    730:                        KASSERT(pv->pv_pmap == pmap_kernel());
                    731:
                    732:                        /* Assume no more - it'll get fixed if there are */
                    733:                        pv->pv_next = NULL;
                    734:
                    735:                        /*
                    736:                         * pvp is non-null when we already have a PV_KENTER
                    737:                         * pv in pvh_first; otherwise we haven't seen a
                    738:                         * PV_KENTER pv and we need to copy this one to
                    739:                         * pvh_first
                    740:                         */
                    741:                        if (pvp) {
                    742:                                /*
                    743:                                 * The previous PV_KENTER pv needs to point to
                    744:                                 * this PV_KENTER pv
                    745:                                 */
                    746:                                pvp->pv_next = pv;
                    747:                        } else {
                    748:                                pv_entry_t fpv = &mdpg->mdpg_first;
                    749:                                *fpv = *pv;
                    750:                                KASSERT(fpv->pv_pmap == pmap_kernel());
                    751:                        }
                    752:                        pvp = pv;
                    753:                        continue;
                    754:                }
                    755: #endif
                    756:                const pmap_t pmap = pv->pv_pmap;
                    757:                vaddr_t va = trunc_page(pv->pv_va);
                    758:                pt_entry_t * const ptep = pmap_pte_lookup(pmap, va);
                    759:                KASSERTMSG(ptep != NULL, "%#"PRIxVADDR " %#"PRIxVADDR, va,
                    760:                    pmap_limits.virtual_end);
                    761:                pt_entry_t pte = *ptep;
1.37      pgoyette  762:                UVMHIST_LOG(pmaphist, " pv %#jx pmap %#jx va %jx"
                    763:                    " pte %jx", (uintptr_t)pv, (uintptr_t)pmap, va,
                    764:                    pte_value(pte));
1.15      matt      765:                if (!pte_valid_p(pte))
                    766:                        continue;
                    767:                const bool is_kernel_pmap_p = (pmap == pmap_kernel());
                    768:                if (is_kernel_pmap_p) {
                    769:                        PMAP_COUNT(remove_kernel_pages);
                    770:                } else {
                    771:                        PMAP_COUNT(remove_user_pages);
                    772:                }
                    773:                if (pte_wired_p(pte))
                    774:                        pmap->pm_stats.wired_count--;
                    775:                pmap->pm_stats.resident_count--;
                    776:
                    777:                pmap_md_tlb_miss_lock_enter();
                    778:                const pt_entry_t npte = pte_nv_entry(is_kernel_pmap_p);
1.35      skrll     779:                pte_set(ptep, npte);
1.36      skrll     780:                if (__predict_true(!(pmap->pm_flags & PMAP_DEFERRED_ACTIVATE))) {
                    781:                        /*
                    782:                         * Flush the TLB for the given address.
                    783:                         */
                    784:                        pmap_tlb_invalidate_addr(pmap, va);
                    785:                }
1.15      matt      786:                pmap_md_tlb_miss_lock_exit();
                    787:
                    788:                /*
                    789:                 * non-null means this is a non-pvh_first pv, so we should
                    790:                 * free it.
                    791:                 */
                    792:                if (pvp) {
                    793:                        KASSERT(pvp->pv_pmap == pmap_kernel());
                    794:                        KASSERT(pvp->pv_next == NULL);
                    795:                        pmap_pv_free(pv);
                    796:                } else {
                    797:                        pv->pv_pmap = NULL;
                    798:                        pv->pv_next = NULL;
                    799:                }
                    800:        }
                    801:
                    802:        pmap_pvlist_check(mdpg);
                    803:        VM_PAGEMD_PVLIST_UNLOCK(mdpg);
                    804:        kpreempt_enable();
                    805:
                    806:        UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
1.1       christos  807: }
                    808:
1.15      matt      809:
1.1       christos  810: /*
                    811:  *     Make a previously active pmap (vmspace) inactive.
                    812:  */
                    813: void
                    814: pmap_deactivate(struct lwp *l)
                    815: {
                    816:        pmap_t pmap = l->l_proc->p_vmspace->vm_map.pmap;
                    817:
                    818:        UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
1.37      pgoyette  819:        UVMHIST_LOG(pmaphist, "(l=%#jx pmap=%#jx)", (uintptr_t)l,
                    820:            (uintptr_t)pmap, 0, 0);
1.1       christos  821:        PMAP_COUNT(deactivate);
                    822:
                    823:        kpreempt_disable();
1.15      matt      824:        KASSERT(l == curlwp || l->l_cpu == curlwp->l_cpu);
1.10      nonaka    825:        pmap_md_tlb_miss_lock_enter();
1.1       christos  826:        curcpu()->ci_pmap_user_segtab = PMAP_INVALID_SEGTAB_ADDRESS;
1.15      matt      827: #ifdef _LP64
                    828:        curcpu()->ci_pmap_user_seg0tab = NULL;
                    829: #endif
1.1       christos  830:        pmap_tlb_asid_deactivate(pmap);
1.10      nonaka    831:        pmap_md_tlb_miss_lock_exit();
1.1       christos  832:        kpreempt_enable();
                    833:
1.37      pgoyette  834:        UVMHIST_LOG(pmaphist, " <-- done (%ju:%ju)", l->l_proc->p_pid,
                    835:            l->l_lid, 0, 0);
1.1       christos  836: }
                    837:
                    838: void
                    839: pmap_update(struct pmap *pmap)
                    840: {
                    841:        UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
1.37      pgoyette  842:        UVMHIST_LOG(pmaphist, "(pmap=%#jx)", (uintptr_t)pmap, 0, 0, 0);
1.1       christos  843:        PMAP_COUNT(update);
                    844:
                    845:        kpreempt_disable();
1.18      skrll     846: #if defined(MULTIPROCESSOR) && defined(PMAP_TLB_NEED_SHOOTDOWN)
1.1       christos  847:        u_int pending = atomic_swap_uint(&pmap->pm_shootdown_pending, 0);
                    848:        if (pending && pmap_tlb_shootdown_bystanders(pmap))
                    849:                PMAP_COUNT(shootdown_ipis);
                    850: #endif
1.10      nonaka    851:        pmap_md_tlb_miss_lock_enter();
1.11      nonaka    852: #if defined(DEBUG) && !defined(MULTIPROCESSOR)
1.1       christos  853:        pmap_tlb_check(pmap, pmap_md_tlb_check_entry);
                    854: #endif /* DEBUG */
                    855:
                    856:        /*
                    857:         * If pmap_remove_all was called, we deactivated ourselves and nuked
                    858:         * our ASID.  Now we have to reactivate ourselves.
                    859:         */
                    860:        if (__predict_false(pmap->pm_flags & PMAP_DEFERRED_ACTIVATE)) {
                    861:                pmap->pm_flags ^= PMAP_DEFERRED_ACTIVATE;
                    862:                pmap_tlb_asid_acquire(pmap, curlwp);
                    863:                pmap_segtab_activate(pmap, curlwp);
                    864:        }
1.10      nonaka    865:        pmap_md_tlb_miss_lock_exit();
1.1       christos  866:        kpreempt_enable();
                    867:
1.37      pgoyette  868:        UVMHIST_LOG(pmaphist, " <-- done (kernel=%#jx)",
                    869:                    (pmap == pmap_kernel() ? 1 : 0), 0, 0, 0);
1.1       christos  870: }
                    871:
                    872: /*
                    873:  *     Remove the given range of addresses from the specified map.
                    874:  *
                    875:  *     It is assumed that the start and end are properly
                    876:  *     rounded to the page size.
                    877:  */
                    878:
                    879: static bool
                    880: pmap_pte_remove(pmap_t pmap, vaddr_t sva, vaddr_t eva, pt_entry_t *ptep,
                    881:        uintptr_t flags)
                    882: {
                    883:        const pt_entry_t npte = flags;
                    884:        const bool is_kernel_pmap_p = (pmap == pmap_kernel());
                    885:
                    886:        UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
1.39    ! pgoyette  887:        UVMHIST_LOG(pmaphist, "(pmap=%#jx kernel=%c va=%#jx..%#jx)",
1.38      kre       888:            (uintmax_t)(uintptr_t)pmap, (is_kernel_pmap_p ? 1 : 0), sva, eva);
1.37      pgoyette  889:        UVMHIST_LOG(pmaphist, "ptep=%#jx, flags(npte)=%#jx",
                    890:            (uintptr_t)ptep, flags, 0, 0);
1.1       christos  891:
                    892:        KASSERT(kpreempt_disabled());
                    893:
                    894:        for (; sva < eva; sva += NBPG, ptep++) {
1.15      matt      895:                const pt_entry_t pte = *ptep;
                    896:                if (!pte_valid_p(pte))
1.1       christos  897:                        continue;
1.15      matt      898:                if (is_kernel_pmap_p) {
                    899:                        PMAP_COUNT(remove_kernel_pages);
                    900:                } else {
1.1       christos  901:                        PMAP_COUNT(remove_user_pages);
1.15      matt      902:                }
                    903:                if (pte_wired_p(pte))
1.1       christos  904:                        pmap->pm_stats.wired_count--;
                    905:                pmap->pm_stats.resident_count--;
1.15      matt      906:                struct vm_page * const pg = PHYS_TO_VM_PAGE(pte_to_paddr(pte));
1.1       christos  907:                if (__predict_true(pg != NULL)) {
1.15      matt      908:                        pmap_remove_pv(pmap, sva, pg, pte_modified_p(pte));
1.1       christos  909:                }
1.10      nonaka    910:                pmap_md_tlb_miss_lock_enter();
1.35      skrll     911:                pte_set(ptep, npte);
1.36      skrll     912:                if (__predict_true(!(pmap->pm_flags & PMAP_DEFERRED_ACTIVATE))) {
                    913:
                    914:                        /*
                    915:                         * Flush the TLB for the given address.
                    916:                         */
                    917:                        pmap_tlb_invalidate_addr(pmap, sva);
                    918:                }
1.10      nonaka    919:                pmap_md_tlb_miss_lock_exit();
1.1       christos  920:        }
1.15      matt      921:
                    922:        UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
                    923:
1.1       christos  924:        return false;
                    925: }
                    926:
                    927: void
                    928: pmap_remove(pmap_t pmap, vaddr_t sva, vaddr_t eva)
                    929: {
                    930:        const bool is_kernel_pmap_p = (pmap == pmap_kernel());
                    931:        const pt_entry_t npte = pte_nv_entry(is_kernel_pmap_p);
                    932:
                    933:        UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
1.37      pgoyette  934:        UVMHIST_LOG(pmaphist, "(pmap=%#jx, va=%#jx..%#jx)",
                    935:            (uintptr_t)pmap, sva, eva, 0);
1.1       christos  936:
1.15      matt      937:        if (is_kernel_pmap_p) {
1.1       christos  938:                PMAP_COUNT(remove_kernel_calls);
1.15      matt      939:        } else {
1.1       christos  940:                PMAP_COUNT(remove_user_calls);
                    941:        }
1.15      matt      942: #ifdef PMAP_FAULTINFO
                    943:        curpcb->pcb_faultinfo.pfi_faultaddr = 0;
                    944:        curpcb->pcb_faultinfo.pfi_repeats = 0;
                    945:        curpcb->pcb_faultinfo.pfi_faultpte = NULL;
1.1       christos  946: #endif
                    947:        kpreempt_disable();
1.15      matt      948:        pmap_addr_range_check(pmap, sva, eva, __func__);
1.1       christos  949:        pmap_pte_process(pmap, sva, eva, pmap_pte_remove, npte);
                    950:        kpreempt_enable();
                    951:
1.15      matt      952:        UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
1.1       christos  953: }
                    954:
                    955: /*
                    956:  *     pmap_page_protect:
                    957:  *
                    958:  *     Lower the permission for all mappings to a given page.
                    959:  */
                    960: void
                    961: pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
                    962: {
                    963:        struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
                    964:        pv_entry_t pv;
                    965:        vaddr_t va;
                    966:
                    967:        UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
1.37      pgoyette  968:        UVMHIST_LOG(pmaphist, "(pg=%#jx (pa %#jx) prot=%#jx)",
                    969:            (uintptr_t)pg, VM_PAGE_TO_PHYS(pg), prot, 0);
1.1       christos  970:        PMAP_COUNT(page_protect);
                    971:
                    972:        switch (prot) {
                    973:        case VM_PROT_READ|VM_PROT_WRITE:
                    974:        case VM_PROT_ALL:
                    975:                break;
                    976:
                    977:        /* copy_on_write */
                    978:        case VM_PROT_READ:
                    979:        case VM_PROT_READ|VM_PROT_EXECUTE:
                    980:                pv = &mdpg->mdpg_first;
1.15      matt      981:                kpreempt_disable();
                    982:                VM_PAGEMD_PVLIST_READLOCK(mdpg);
                    983:                pmap_pvlist_check(mdpg);
1.1       christos  984:                /*
1.33      skrll     985:                 * Loop over all current mappings setting/clearing as
                    986:                 * appropriate.
1.1       christos  987:                 */
                    988:                if (pv->pv_pmap != NULL) {
                    989:                        while (pv != NULL) {
1.15      matt      990: #ifdef PMAP_VIRTUAL_CACHE_ALIASES
                    991:                                if (pv->pv_va & PV_KENTER) {
                    992:                                        pv = pv->pv_next;
                    993:                                        continue;
                    994:                                }
                    995: #endif
1.1       christos  996:                                const pmap_t pmap = pv->pv_pmap;
1.15      matt      997:                                va = trunc_page(pv->pv_va);
                    998:                                const uintptr_t gen =
                    999:                                    VM_PAGEMD_PVLIST_UNLOCK(mdpg);
1.1       christos 1000:                                pmap_protect(pmap, va, va + PAGE_SIZE, prot);
                   1001:                                KASSERT(pv->pv_pmap == pmap);
                   1002:                                pmap_update(pmap);
1.15      matt     1003:                                if (gen != VM_PAGEMD_PVLIST_READLOCK(mdpg)) {
1.1       christos 1004:                                        pv = &mdpg->mdpg_first;
                   1005:                                } else {
                   1006:                                        pv = pv->pv_next;
                   1007:                                }
1.15      matt     1008:                                pmap_pvlist_check(mdpg);
1.1       christos 1009:                        }
                   1010:                }
1.15      matt     1011:                pmap_pvlist_check(mdpg);
1.1       christos 1012:                VM_PAGEMD_PVLIST_UNLOCK(mdpg);
1.15      matt     1013:                kpreempt_enable();
1.1       christos 1014:                break;
                   1015:
                   1016:        /* remove_all */
                   1017:        default:
1.15      matt     1018:                pmap_page_remove(pg);
1.1       christos 1019:        }
                   1020:
1.15      matt     1021:        UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
1.1       christos 1022: }
                   1023:
                   1024: static bool
                   1025: pmap_pte_protect(pmap_t pmap, vaddr_t sva, vaddr_t eva, pt_entry_t *ptep,
                   1026:        uintptr_t flags)
                   1027: {
                   1028:        const vm_prot_t prot = (flags & VM_PROT_ALL);
                   1029:
                   1030:        UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
1.37      pgoyette 1031:        UVMHIST_LOG(pmaphist, "(pmap=%#jx kernel=%jx va=%#jx..%#jx)",
1.38      kre      1032:            (uintmax_t)(uintptr_t)pmap, (pmap == pmap_kernel() ? 1 : 0),
                   1033:            sva, eva);
1.37      pgoyette 1034:        UVMHIST_LOG(pmaphist, "ptep=%#jx, flags(npte)=%#jx)",
                   1035:            (uintptr_t)ptep, flags, 0, 0);
1.1       christos 1036:
                   1037:        KASSERT(kpreempt_disabled());
                   1038:        /*
                   1039:         * Change protection on every valid mapping within this segment.
                   1040:         */
                   1041:        for (; sva < eva; sva += NBPG, ptep++) {
1.15      matt     1042:                pt_entry_t pte = *ptep;
                   1043:                if (!pte_valid_p(pte))
1.1       christos 1044:                        continue;
1.15      matt     1045:                struct vm_page * const pg = PHYS_TO_VM_PAGE(pte_to_paddr(pte));
                   1046:                if (pg != NULL && pte_modified_p(pte)) {
1.1       christos 1047:                        struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
                   1048:                        if (VM_PAGEMD_EXECPAGE_P(mdpg)) {
                   1049:                                KASSERT(mdpg->mdpg_first.pv_pmap != NULL);
1.15      matt     1050: #ifdef PMAP_VIRTUAL_CACHE_ALIASES
                   1051:                                if (VM_PAGEMD_CACHED_P(mdpg)) {
                   1052: #endif
1.1       christos 1053:                                        UVMHIST_LOG(pmapexechist,
1.37      pgoyette 1054:                                            "pg %#jx (pa %#jx): "
1.28      mrg      1055:                                            "syncicached performed",
1.37      pgoyette 1056:                                            (uintptr_t)pg, VM_PAGE_TO_PHYS(pg),
                   1057:                                            0, 0);
1.1       christos 1058:                                        pmap_page_syncicache(pg);
                   1059:                                        PMAP_COUNT(exec_synced_protect);
1.15      matt     1060: #ifdef PMAP_VIRTUAL_CACHE_ALIASES
1.1       christos 1061:                                }
1.15      matt     1062: #endif
1.1       christos 1063:                        }
                   1064:                }
1.15      matt     1065:                pte = pte_prot_downgrade(pte, prot);
                   1066:                if (*ptep != pte) {
1.10      nonaka   1067:                        pmap_md_tlb_miss_lock_enter();
1.35      skrll    1068:                        pte_set(ptep, pte);
1.1       christos 1069:                        /*
                   1070:                         * Update the TLB if needed.
                   1071:                         */
1.15      matt     1072:                        pmap_tlb_update_addr(pmap, sva, pte, PMAP_TLB_NEED_IPI);
1.10      nonaka   1073:                        pmap_md_tlb_miss_lock_exit();
1.1       christos 1074:                }
                   1075:        }
1.15      matt     1076:
                   1077:        UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
                   1078:
1.1       christos 1079:        return false;
                   1080: }
                   1081:
                   1082: /*
                   1083:  *     Set the physical protection on the
                   1084:  *     specified range of this map as requested.
                   1085:  */
                   1086: void
                   1087: pmap_protect(pmap_t pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
                   1088: {
                   1089:        UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
1.37      pgoyette 1090:        UVMHIST_LOG(pmaphist, "(pmap=%#jx, va=%#jx..%#jx, prot=%ju)",
                   1091:            (uintptr_t)pmap, sva, eva, prot);
1.1       christos 1092:        PMAP_COUNT(protect);
                   1093:
                   1094:        if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
                   1095:                pmap_remove(pmap, sva, eva);
1.15      matt     1096:                UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
1.1       christos 1097:                return;
                   1098:        }
                   1099:
                   1100:        /*
                   1101:         * Change protection on every valid mapping within this segment.
                   1102:         */
                   1103:        kpreempt_disable();
1.15      matt     1104:        pmap_addr_range_check(pmap, sva, eva, __func__);
1.1       christos 1105:        pmap_pte_process(pmap, sva, eva, pmap_pte_protect, prot);
                   1106:        kpreempt_enable();
                   1107:
1.15      matt     1108:        UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
1.1       christos 1109: }
                   1110:
1.15      matt     1111: #if defined(PMAP_VIRTUAL_CACHE_ALIASES) && !defined(PMAP_NO_PV_UNCACHED)
1.1       christos 1112: /*
                   1113:  *     pmap_page_cache:
                   1114:  *
                   1115:  *     Change all mappings of a managed page to cached/uncached.
                   1116:  */
1.15      matt     1117: void
1.1       christos 1118: pmap_page_cache(struct vm_page *pg, bool cached)
                   1119: {
                   1120:        struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
1.15      matt     1121:
1.1       christos 1122:        UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
1.37      pgoyette 1123:        UVMHIST_LOG(pmaphist, "(pg=%#jx (pa %#jx) cached=%jd)",
                   1124:            (uintptr_t)pg, VM_PAGE_TO_PHYS(pg), cached, 0);
1.15      matt     1125:
1.1       christos 1126:        KASSERT(kpreempt_disabled());
1.15      matt     1127:        KASSERT(VM_PAGEMD_PVLIST_LOCKED_P(mdpg));
1.1       christos 1128:
                   1129:        if (cached) {
                   1130:                pmap_page_clear_attributes(mdpg, VM_PAGEMD_UNCACHED);
                   1131:                PMAP_COUNT(page_cache_restorations);
                   1132:        } else {
                   1133:                pmap_page_set_attributes(mdpg, VM_PAGEMD_UNCACHED);
                   1134:                PMAP_COUNT(page_cache_evictions);
                   1135:        }
                   1136:
1.15      matt     1137:        for (pv_entry_t pv = &mdpg->mdpg_first; pv != NULL; pv = pv->pv_next) {
1.1       christos 1138:                pmap_t pmap = pv->pv_pmap;
1.15      matt     1139:                vaddr_t va = trunc_page(pv->pv_va);
1.1       christos 1140:
                   1141:                KASSERT(pmap != NULL);
                   1142:                KASSERT(pmap != pmap_kernel() || !pmap_md_direct_mapped_vaddr_p(va));
                   1143:                pt_entry_t * const ptep = pmap_pte_lookup(pmap, va);
                   1144:                if (ptep == NULL)
                   1145:                        continue;
1.15      matt     1146:                pt_entry_t pte = *ptep;
                   1147:                if (pte_valid_p(pte)) {
                   1148:                        pte = pte_cached_change(pte, cached);
1.10      nonaka   1149:                        pmap_md_tlb_miss_lock_enter();
1.35      skrll    1150:                        pte_set(ptep, pte);
1.15      matt     1151:                        pmap_tlb_update_addr(pmap, va, pte, PMAP_TLB_NEED_IPI);
1.10      nonaka   1152:                        pmap_md_tlb_miss_lock_exit();
1.1       christos 1153:                }
                   1154:        }
1.15      matt     1155:
                   1156:        UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
1.1       christos 1157: }
1.15      matt     1158: #endif /* PMAP_VIRTUAL_CACHE_ALIASES && !PMAP_NO_PV_UNCACHED */
1.1       christos 1159:
                   1160: /*
                   1161:  *     Insert the given physical page (p) at
                   1162:  *     the specified virtual address (v) in the
                   1163:  *     target physical map with the protection requested.
                   1164:  *
                   1165:  *     If specified, the page will be wired down, meaning
                   1166:  *     that the related pte can not be reclaimed.
                   1167:  *
                   1168:  *     NB:  This is the only routine which MAY NOT lazy-evaluate
                   1169:  *     or lose information.  That is, this routine must actually
                   1170:  *     insert this page into the given map NOW.
                   1171:  */
                   1172: int
                   1173: pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
                   1174: {
                   1175:        const bool wired = (flags & PMAP_WIRED) != 0;
                   1176:        const bool is_kernel_pmap_p = (pmap == pmap_kernel());
1.15      matt     1177:        u_int update_flags = (flags & VM_PROT_ALL) != 0 ? PMAP_TLB_INSERT : 0;
1.1       christos 1178: #ifdef UVMHIST
1.15      matt     1179:        struct kern_history * const histp =
1.1       christos 1180:            ((prot & VM_PROT_EXECUTE) ? &pmapexechist : &pmaphist);
                   1181: #endif
                   1182:
1.15      matt     1183:        UVMHIST_FUNC(__func__); UVMHIST_CALLED(*histp);
1.37      pgoyette 1184:        UVMHIST_LOG(*histp, "(pmap=%#jx, va=%#jx, pa=%#jx",
                   1185:            (uintptr_t)pmap, va, pa, 0);
                   1186:        UVMHIST_LOG(*histp, "prot=%#jx flags=%#jx)", prot, flags, 0, 0);
1.1       christos 1187:
                   1188:        const bool good_color = PMAP_PAGE_COLOROK_P(pa, va);
                   1189:        if (is_kernel_pmap_p) {
                   1190:                PMAP_COUNT(kernel_mappings);
                   1191:                if (!good_color)
                   1192:                        PMAP_COUNT(kernel_mappings_bad);
                   1193:        } else {
                   1194:                PMAP_COUNT(user_mappings);
                   1195:                if (!good_color)
                   1196:                        PMAP_COUNT(user_mappings_bad);
                   1197:        }
1.15      matt     1198:        pmap_addr_range_check(pmap, va, va, __func__);
1.1       christos 1199:
1.15      matt     1200:        KASSERTMSG(prot & VM_PROT_READ, "no READ (%#x) in prot %#x",
                   1201:            VM_PROT_READ, prot);
1.1       christos 1202:
                   1203:        struct vm_page * const pg = PHYS_TO_VM_PAGE(pa);
1.15      matt     1204:        struct vm_page_md * const mdpg = (pg ? VM_PAGE_TO_MD(pg) : NULL);
1.1       christos 1205:
                   1206:        if (pg) {
                   1207:                /* Set page referenced/modified status based on flags */
1.15      matt     1208:                if (flags & VM_PROT_WRITE) {
1.1       christos 1209:                        pmap_page_set_attributes(mdpg, VM_PAGEMD_MODIFIED|VM_PAGEMD_REFERENCED);
1.15      matt     1210:                } else if (flags & VM_PROT_ALL) {
1.1       christos 1211:                        pmap_page_set_attributes(mdpg, VM_PAGEMD_REFERENCED);
1.15      matt     1212:                }
1.1       christos 1213:
1.15      matt     1214: #ifdef PMAP_VIRTUAL_CACHE_ALIASES
                   1215:                if (!VM_PAGEMD_CACHED_P(mdpg)) {
1.1       christos 1216:                        flags |= PMAP_NOCACHE;
1.15      matt     1217:                        PMAP_COUNT(uncached_mappings);
                   1218:                }
1.1       christos 1219: #endif
                   1220:
                   1221:                PMAP_COUNT(managed_mappings);
                   1222:        } else {
                   1223:                /*
                   1224:                 * Assumption: if it is not part of our managed memory
                   1225:                 * then it must be device memory which may be volatile.
                   1226:                 */
1.15      matt     1227:                if ((flags & PMAP_CACHE_MASK) == 0)
                   1228:                        flags |= PMAP_NOCACHE;
1.1       christos 1229:                PMAP_COUNT(unmanaged_mappings);
                   1230:        }
                   1231:
1.15      matt     1232:        pt_entry_t npte = pte_make_enter(pa, mdpg, prot, flags,
                   1233:            is_kernel_pmap_p);
1.1       christos 1234:
                   1235:        kpreempt_disable();
1.15      matt     1236:
1.1       christos 1237:        pt_entry_t * const ptep = pmap_pte_reserve(pmap, va, flags);
                   1238:        if (__predict_false(ptep == NULL)) {
                   1239:                kpreempt_enable();
1.15      matt     1240:                UVMHIST_LOG(*histp, " <-- ENOMEM", 0, 0, 0, 0);
1.1       christos 1241:                return ENOMEM;
                   1242:        }
1.15      matt     1243:        const pt_entry_t opte = *ptep;
1.24      skrll    1244:        const bool resident = pte_valid_p(opte);
                   1245:        bool remap = false;
                   1246:        if (resident) {
                   1247:                if (pte_to_paddr(opte) != pa) {
                   1248:                        KASSERT(!is_kernel_pmap_p);
                   1249:                        const pt_entry_t rpte = pte_nv_entry(false);
                   1250:
                   1251:                        pmap_addr_range_check(pmap, va, va + NBPG, __func__);
                   1252:                        pmap_pte_process(pmap, va, va + NBPG, pmap_pte_remove,
                   1253:                            rpte);
                   1254:                        PMAP_COUNT(user_mappings_changed);
                   1255:                        remap = true;
                   1256:                }
                   1257:                update_flags |= PMAP_TLB_NEED_IPI;
                   1258:        }
                   1259:
                   1260:        if (!resident || remap) {
                   1261:                pmap->pm_stats.resident_count++;
                   1262:        }
1.1       christos 1263:
                   1264:        /* Done after case that may sleep/return. */
                   1265:        if (pg)
1.15      matt     1266:                pmap_enter_pv(pmap, va, pg, &npte, 0);
1.1       christos 1267:
                   1268:        /*
                   1269:         * Now validate mapping with desired protection/wiring.
                   1270:         * Assume uniform modified and referenced status for all
                   1271:         * MIPS pages in a MACH page.
                   1272:         */
                   1273:        if (wired) {
                   1274:                pmap->pm_stats.wired_count++;
                   1275:                npte = pte_wire_entry(npte);
                   1276:        }
                   1277:
1.37      pgoyette 1278:        UVMHIST_LOG(*histp, "new pte %#jx (pa %#jx)",
1.15      matt     1279:            pte_value(npte), pa, 0, 0);
1.1       christos 1280:
                   1281:        KASSERT(pte_valid_p(npte));
1.15      matt     1282:
1.10      nonaka   1283:        pmap_md_tlb_miss_lock_enter();
1.35      skrll    1284:        pte_set(ptep, npte);
1.15      matt     1285:        pmap_tlb_update_addr(pmap, va, npte, update_flags);
1.10      nonaka   1286:        pmap_md_tlb_miss_lock_exit();
1.1       christos 1287:        kpreempt_enable();
                   1288:
                   1289:        if (pg != NULL && (prot == (VM_PROT_READ | VM_PROT_EXECUTE))) {
                   1290:                KASSERT(mdpg != NULL);
                   1291:                PMAP_COUNT(exec_mappings);
                   1292:                if (!VM_PAGEMD_EXECPAGE_P(mdpg) && pte_cached_p(npte)) {
                   1293:                        if (!pte_deferred_exec_p(npte)) {
1.37      pgoyette 1294:                                UVMHIST_LOG(*histp, "va=%#jx pg %#jx: "
                   1295:                                    "immediate syncicache",
                   1296:                                    va, (uintptr_t)pg, 0, 0);
1.1       christos 1297:                                pmap_page_syncicache(pg);
                   1298:                                pmap_page_set_attributes(mdpg,
                   1299:                                    VM_PAGEMD_EXECPAGE);
                   1300:                                PMAP_COUNT(exec_synced_mappings);
                   1301:                        } else {
1.37      pgoyette 1302:                                UVMHIST_LOG(*histp, "va=%#jx pg %#jx: defer "
                   1303:                                    "syncicache: pte %#jx",
                   1304:                                    va, (uintptr_t)pg, npte, 0);
1.1       christos 1305:                        }
                   1306:                } else {
                   1307:                        UVMHIST_LOG(*histp,
1.37      pgoyette 1308:                            "va=%#jx pg %#jx: no syncicache cached %jd",
                   1309:                            va, (uintptr_t)pg, pte_cached_p(npte), 0);
1.1       christos 1310:                }
                   1311:        } else if (pg != NULL && (prot & VM_PROT_EXECUTE)) {
                   1312:                KASSERT(mdpg != NULL);
                   1313:                KASSERT(prot & VM_PROT_WRITE);
                   1314:                PMAP_COUNT(exec_mappings);
                   1315:                pmap_page_syncicache(pg);
                   1316:                pmap_page_clear_attributes(mdpg, VM_PAGEMD_EXECPAGE);
1.15      matt     1317:                UVMHIST_LOG(*histp,
1.37      pgoyette 1318:                    "va=%#jx pg %#jx: immediate syncicache (writeable)",
                   1319:                    va, (uintptr_t)pg, 0, 0);
1.1       christos 1320:        }
                   1321:
1.15      matt     1322:        UVMHIST_LOG(*histp, " <-- 0 (OK)", 0, 0, 0, 0);
1.1       christos 1323:        return 0;
                   1324: }
                   1325:
                   1326: void
                   1327: pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
                   1328: {
1.15      matt     1329:        pmap_t pmap = pmap_kernel();
1.1       christos 1330:        struct vm_page * const pg = PHYS_TO_VM_PAGE(pa);
1.15      matt     1331:        struct vm_page_md * const mdpg = (pg ? VM_PAGE_TO_MD(pg) : NULL);
1.1       christos 1332:
                   1333:        UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
1.37      pgoyette 1334:        UVMHIST_LOG(pmaphist, "(va=%#jx pa=%#jx prot=%ju, flags=%#jx)",
1.15      matt     1335:            va, pa, prot, flags);
1.1       christos 1336:        PMAP_COUNT(kenter_pa);
                   1337:
1.15      matt     1338:        if (mdpg == NULL) {
1.1       christos 1339:                PMAP_COUNT(kenter_pa_unmanaged);
1.15      matt     1340:                if ((flags & PMAP_CACHE_MASK) == 0)
                   1341:                        flags |= PMAP_NOCACHE;
1.1       christos 1342:        } else {
1.15      matt     1343:                if ((flags & PMAP_NOCACHE) == 0 && !PMAP_PAGE_COLOROK_P(pa, va))
                   1344:                        PMAP_COUNT(kenter_pa_bad);
1.1       christos 1345:        }
                   1346:
1.15      matt     1347:        pt_entry_t npte = pte_make_kenter_pa(pa, mdpg, prot, flags);
1.1       christos 1348:        kpreempt_disable();
1.15      matt     1349:        pt_entry_t * const ptep = pmap_pte_lookup(pmap, va);
                   1350:        KASSERTMSG(ptep != NULL, "%#"PRIxVADDR " %#"PRIxVADDR, va,
                   1351:            pmap_limits.virtual_end);
1.1       christos 1352:        KASSERT(!pte_valid_p(*ptep));
1.15      matt     1353:
                   1354:        /*
                   1355:         * No need to track non-managed pages or PMAP_KMPAGEs pages for aliases
                   1356:         */
                   1357: #ifdef PMAP_VIRTUAL_CACHE_ALIASES
1.20      matt     1358:        if (pg != NULL && (flags & PMAP_KMPAGE) == 0
                   1359:            && pmap_md_virtual_cache_aliasing_p()) {
1.15      matt     1360:                pmap_enter_pv(pmap, va, pg, &npte, PV_KENTER);
                   1361:        }
                   1362: #endif
                   1363:
1.1       christos 1364:        /*
                   1365:         * We have the option to force this mapping into the TLB but we
                   1366:         * don't.  Instead let the next reference to the page do it.
                   1367:         */
1.15      matt     1368:        pmap_md_tlb_miss_lock_enter();
1.35      skrll    1369:        pte_set(ptep, npte);
1.1       christos 1370:        pmap_tlb_update_addr(pmap_kernel(), va, npte, 0);
1.10      nonaka   1371:        pmap_md_tlb_miss_lock_exit();
1.1       christos 1372:        kpreempt_enable();
                   1373: #if DEBUG > 1
                   1374:        for (u_int i = 0; i < PAGE_SIZE / sizeof(long); i++) {
                   1375:                if (((long *)va)[i] != ((long *)pa)[i])
                   1376:                        panic("%s: contents (%lx) of va %#"PRIxVADDR
                   1377:                            " != contents (%lx) of pa %#"PRIxPADDR, __func__,
                   1378:                            ((long *)va)[i], va, ((long *)pa)[i], pa);
                   1379:        }
                   1380: #endif
1.15      matt     1381:
1.37      pgoyette 1382:        UVMHIST_LOG(pmaphist, " <-- done (ptep=%#jx)", (uintptr_t)ptep, 0, 0,
                   1383:            0);
1.1       christos 1384: }
                   1385:
1.15      matt     1386: /*
                   1387:  *     Remove the given range of addresses from the kernel map.
                   1388:  *
                   1389:  *     It is assumed that the start and end are properly
                   1390:  *     rounded to the page size.
                   1391:  */
                   1392:
1.1       christos 1393: static bool
                   1394: pmap_pte_kremove(pmap_t pmap, vaddr_t sva, vaddr_t eva, pt_entry_t *ptep,
                   1395:        uintptr_t flags)
                   1396: {
1.15      matt     1397:        const pt_entry_t new_pte = pte_nv_entry(true);
                   1398:
                   1399:        UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
                   1400:        UVMHIST_LOG(pmaphist,
1.37      pgoyette 1401:            "(pmap=%#jx, sva=%#jx eva=%#jx ptep=%#jx)",
                   1402:            (uintptr_t)pmap, sva, eva, (uintptr_t)ptep);
1.1       christos 1403:
                   1404:        KASSERT(kpreempt_disabled());
                   1405:
                   1406:        for (; sva < eva; sva += NBPG, ptep++) {
1.15      matt     1407:                pt_entry_t pte = *ptep;
                   1408:                if (!pte_valid_p(pte))
1.1       christos 1409:                        continue;
                   1410:
                   1411:                PMAP_COUNT(kremove_pages);
1.21      mrg      1412: #ifdef PMAP_VIRTUAL_CACHE_ALIASES
1.15      matt     1413:                struct vm_page * const pg = PHYS_TO_VM_PAGE(pte_to_paddr(pte));
1.20      matt     1414:                if (pg != NULL && pmap_md_virtual_cache_aliasing_p()) {
1.15      matt     1415:                        pmap_remove_pv(pmap, sva, pg, !pte_readonly_p(pte));
                   1416:                }
1.20      matt     1417: #endif
1.1       christos 1418:
1.10      nonaka   1419:                pmap_md_tlb_miss_lock_enter();
1.35      skrll    1420:                pte_set(ptep, new_pte);
1.15      matt     1421:                pmap_tlb_invalidate_addr(pmap, sva);
1.10      nonaka   1422:                pmap_md_tlb_miss_lock_exit();
1.1       christos 1423:        }
                   1424:
1.15      matt     1425:        UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
                   1426:
1.1       christos 1427:        return false;
                   1428: }
                   1429:
                   1430: void
                   1431: pmap_kremove(vaddr_t va, vsize_t len)
                   1432: {
                   1433:        const vaddr_t sva = trunc_page(va);
                   1434:        const vaddr_t eva = round_page(va + len);
                   1435:
                   1436:        UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
1.37      pgoyette 1437:        UVMHIST_LOG(pmaphist, "(va=%#jx len=%#jx)", va, len, 0, 0);
1.1       christos 1438:
                   1439:        kpreempt_disable();
                   1440:        pmap_pte_process(pmap_kernel(), sva, eva, pmap_pte_kremove, 0);
                   1441:        kpreempt_enable();
                   1442:
1.15      matt     1443:        UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
1.1       christos 1444: }
                   1445:
                   1446: void
                   1447: pmap_remove_all(struct pmap *pmap)
                   1448: {
1.15      matt     1449:        UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
1.37      pgoyette 1450:        UVMHIST_LOG(pmaphist, "(pm=%#jx)", (uintptr_t)pmap, 0, 0, 0);
1.15      matt     1451:
1.1       christos 1452:        KASSERT(pmap != pmap_kernel());
                   1453:
                   1454:        kpreempt_disable();
                   1455:        /*
                   1456:         * Free all of our ASIDs which means we can skip doing all the
                   1457:         * tlb_invalidate_addrs().
                   1458:         */
1.10      nonaka   1459:        pmap_md_tlb_miss_lock_enter();
1.15      matt     1460: #ifdef MULTIPROCESSOR
                   1461:        // This should be the last CPU with this pmap onproc
                   1462:        KASSERT(!kcpuset_isotherset(pmap->pm_onproc, cpu_index(curcpu())));
                   1463:        if (kcpuset_isset(pmap->pm_onproc, cpu_index(curcpu())))
                   1464: #endif
                   1465:                pmap_tlb_asid_deactivate(pmap);
                   1466: #ifdef MULTIPROCESSOR
                   1467:        KASSERT(kcpuset_iszero(pmap->pm_onproc));
                   1468: #endif
1.1       christos 1469:        pmap_tlb_asid_release_all(pmap);
1.10      nonaka   1470:        pmap_md_tlb_miss_lock_exit();
1.1       christos 1471:        pmap->pm_flags |= PMAP_DEFERRED_ACTIVATE;
                   1472:
1.15      matt     1473: #ifdef PMAP_FAULTINFO
                   1474:        curpcb->pcb_faultinfo.pfi_faultaddr = 0;
                   1475:        curpcb->pcb_faultinfo.pfi_repeats = 0;
                   1476:        curpcb->pcb_faultinfo.pfi_faultpte = NULL;
                   1477: #endif
1.1       christos 1478:        kpreempt_enable();
1.15      matt     1479:
                   1480:        UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
1.1       christos 1481: }
                   1482:
                   1483: /*
                   1484:  *     Routine:        pmap_unwire
                   1485:  *     Function:       Clear the wired attribute for a map/virtual-address
                   1486:  *                     pair.
                   1487:  *     In/out conditions:
                   1488:  *                     The mapping must already exist in the pmap.
                   1489:  */
                   1490: void
                   1491: pmap_unwire(pmap_t pmap, vaddr_t va)
                   1492: {
                   1493:        UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
1.37      pgoyette 1494:        UVMHIST_LOG(pmaphist, "(pmap=%#jx, va=%#jx)", (uintptr_t)pmap, va,
                   1495:            0, 0);
1.1       christos 1496:        PMAP_COUNT(unwire);
                   1497:
                   1498:        /*
                   1499:         * Don't need to flush the TLB since PG_WIRED is only in software.
                   1500:         */
                   1501:        kpreempt_disable();
1.15      matt     1502:        pmap_addr_range_check(pmap, va, va, __func__);
1.1       christos 1503:        pt_entry_t * const ptep = pmap_pte_lookup(pmap, va);
1.15      matt     1504:        KASSERTMSG(ptep != NULL, "pmap %p va %#"PRIxVADDR" invalid STE",
                   1505:            pmap, va);
                   1506:        pt_entry_t pte = *ptep;
                   1507:        KASSERTMSG(pte_valid_p(pte),
                   1508:            "pmap %p va %#"PRIxVADDR" invalid PTE %#"PRIxPTE" @ %p",
                   1509:            pmap, va, pte_value(pte), ptep);
1.1       christos 1510:
1.15      matt     1511:        if (pte_wired_p(pte)) {
1.10      nonaka   1512:                pmap_md_tlb_miss_lock_enter();
1.35      skrll    1513:                pte_set(ptep, pte_unwire_entry(pte));
1.10      nonaka   1514:                pmap_md_tlb_miss_lock_exit();
1.1       christos 1515:                pmap->pm_stats.wired_count--;
                   1516:        }
                   1517: #ifdef DIAGNOSTIC
                   1518:        else {
                   1519:                printf("%s: wiring for pmap %p va %#"PRIxVADDR" unchanged!\n",
                   1520:                    __func__, pmap, va);
                   1521:        }
                   1522: #endif
                   1523:        kpreempt_enable();
1.15      matt     1524:
                   1525:        UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
1.1       christos 1526: }
                   1527:
                   1528: /*
                   1529:  *     Routine:        pmap_extract
                   1530:  *     Function:
                   1531:  *             Extract the physical page address associated
                   1532:  *             with the given map/virtual_address pair.
                   1533:  */
                   1534: bool
                   1535: pmap_extract(pmap_t pmap, vaddr_t va, paddr_t *pap)
                   1536: {
                   1537:        paddr_t pa;
                   1538:
                   1539:        if (pmap == pmap_kernel()) {
                   1540:                if (pmap_md_direct_mapped_vaddr_p(va)) {
                   1541:                        pa = pmap_md_direct_mapped_vaddr_to_paddr(va);
                   1542:                        goto done;
                   1543:                }
                   1544:                if (pmap_md_io_vaddr_p(va))
                   1545:                        panic("pmap_extract: io address %#"PRIxVADDR"", va);
1.15      matt     1546:
                   1547:                if (va >= pmap_limits.virtual_end)
                   1548:                        panic("%s: illegal kernel mapped address %#"PRIxVADDR,
                   1549:                            __func__, va);
1.1       christos 1550:        }
                   1551:        kpreempt_disable();
1.15      matt     1552:        const pt_entry_t * const ptep = pmap_pte_lookup(pmap, va);
                   1553:        if (ptep == NULL || !pte_valid_p(*ptep)) {
1.1       christos 1554:                kpreempt_enable();
                   1555:                return false;
                   1556:        }
                   1557:        pa = pte_to_paddr(*ptep) | (va & PGOFSET);
                   1558:        kpreempt_enable();
                   1559: done:
                   1560:        if (pap != NULL) {
                   1561:                *pap = pa;
                   1562:        }
                   1563:        return true;
                   1564: }
                   1565:
                   1566: /*
                   1567:  *     Copy the range specified by src_addr/len
                   1568:  *     from the source map to the range dst_addr/len
                   1569:  *     in the destination map.
                   1570:  *
                   1571:  *     This routine is only advisory and need not do anything.
                   1572:  */
                   1573: void
                   1574: pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vaddr_t dst_addr, vsize_t len,
                   1575:     vaddr_t src_addr)
                   1576: {
                   1577:        UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
                   1578:        PMAP_COUNT(copy);
                   1579: }
                   1580:
                   1581: /*
                   1582:  *     pmap_clear_reference:
                   1583:  *
                   1584:  *     Clear the reference bit on the specified physical page.
                   1585:  */
                   1586: bool
                   1587: pmap_clear_reference(struct vm_page *pg)
                   1588: {
                   1589:        struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
                   1590:
                   1591:        UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
1.37      pgoyette 1592:        UVMHIST_LOG(pmaphist, "(pg=%#jx (pa %#jx))",
                   1593:           (uintptr_t)pg, VM_PAGE_TO_PHYS(pg), 0,0);
1.1       christos 1594:
                   1595:        bool rv = pmap_page_clear_attributes(mdpg, VM_PAGEMD_REFERENCED);
                   1596:
1.37      pgoyette 1597:        UVMHIST_LOG(pmaphist, " <-- wasref %ju", rv, 0, 0, 0);
1.1       christos 1598:
                   1599:        return rv;
                   1600: }
                   1601:
                   1602: /*
                   1603:  *     pmap_is_referenced:
                   1604:  *
                   1605:  *     Return whether or not the specified physical page is referenced
                   1606:  *     by any physical maps.
                   1607:  */
                   1608: bool
                   1609: pmap_is_referenced(struct vm_page *pg)
                   1610: {
                   1611:        return VM_PAGEMD_REFERENCED_P(VM_PAGE_TO_MD(pg));
                   1612: }
                   1613:
                   1614: /*
                   1615:  *     Clear the modify bits on the specified physical page.
                   1616:  */
                   1617: bool
                   1618: pmap_clear_modify(struct vm_page *pg)
                   1619: {
                   1620:        struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
                   1621:        pv_entry_t pv = &mdpg->mdpg_first;
                   1622:        pv_entry_t pv_next;
                   1623:
                   1624:        UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
1.37      pgoyette 1625:        UVMHIST_LOG(pmaphist, "(pg=%#jx (%#jx))",
                   1626:            (uintptr_t)pg, VM_PAGE_TO_PHYS(pg), 0,0);
1.1       christos 1627:        PMAP_COUNT(clear_modify);
                   1628:
                   1629:        if (VM_PAGEMD_EXECPAGE_P(mdpg)) {
                   1630:                if (pv->pv_pmap == NULL) {
                   1631:                        UVMHIST_LOG(pmapexechist,
1.37      pgoyette 1632:                            "pg %#jx (pa %#jx): execpage cleared",
                   1633:                            (uintptr_t)pg, VM_PAGE_TO_PHYS(pg), 0, 0);
1.1       christos 1634:                        pmap_page_clear_attributes(mdpg, VM_PAGEMD_EXECPAGE);
                   1635:                        PMAP_COUNT(exec_uncached_clear_modify);
                   1636:                } else {
                   1637:                        UVMHIST_LOG(pmapexechist,
1.37      pgoyette 1638:                            "pg %#jx (pa %#jx): syncicache performed",
                   1639:                            (uintptr_t)pg, VM_PAGE_TO_PHYS(pg), 0, 0);
1.1       christos 1640:                        pmap_page_syncicache(pg);
                   1641:                        PMAP_COUNT(exec_synced_clear_modify);
                   1642:                }
                   1643:        }
                   1644:        if (!pmap_page_clear_attributes(mdpg, VM_PAGEMD_MODIFIED)) {
1.15      matt     1645:                UVMHIST_LOG(pmaphist, " <-- false", 0, 0, 0, 0);
1.1       christos 1646:                return false;
                   1647:        }
                   1648:        if (pv->pv_pmap == NULL) {
1.15      matt     1649:                UVMHIST_LOG(pmaphist, " <-- true (no mappings)", 0, 0, 0, 0);
1.1       christos 1650:                return true;
                   1651:        }
                   1652:
                   1653:        /*
                   1654:         * remove write access from any pages that are dirty
                   1655:         * so we can tell if they are written to again later.
                   1656:         * flush the VAC first if there is one.
                   1657:         */
                   1658:        kpreempt_disable();
1.15      matt     1659:        KASSERT(!VM_PAGEMD_PVLIST_LOCKED_P(mdpg));
                   1660:        VM_PAGEMD_PVLIST_READLOCK(mdpg);
                   1661:        pmap_pvlist_check(mdpg);
1.1       christos 1662:        for (; pv != NULL; pv = pv_next) {
                   1663:                pmap_t pmap = pv->pv_pmap;
1.15      matt     1664:                vaddr_t va = trunc_page(pv->pv_va);
                   1665:
                   1666:                pv_next = pv->pv_next;
                   1667: #ifdef PMAP_VIRTUAL_CACHE_ALIASES
                   1668:                if (pv->pv_va & PV_KENTER)
                   1669:                        continue;
                   1670: #endif
1.1       christos 1671:                pt_entry_t * const ptep = pmap_pte_lookup(pmap, va);
                   1672:                KASSERT(ptep);
1.15      matt     1673:                pt_entry_t pte = pte_prot_nowrite(*ptep);
                   1674:                if (*ptep == pte) {
1.1       christos 1675:                        continue;
                   1676:                }
1.15      matt     1677:                KASSERT(pte_valid_p(pte));
                   1678:                const uintptr_t gen = VM_PAGEMD_PVLIST_UNLOCK(mdpg);
1.10      nonaka   1679:                pmap_md_tlb_miss_lock_enter();
1.35      skrll    1680:                pte_set(ptep, pte);
1.1       christos 1681:                pmap_tlb_invalidate_addr(pmap, va);
1.10      nonaka   1682:                pmap_md_tlb_miss_lock_exit();
1.1       christos 1683:                pmap_update(pmap);
1.15      matt     1684:                if (__predict_false(gen != VM_PAGEMD_PVLIST_READLOCK(mdpg))) {
1.1       christos 1685:                        /*
                   1686:                         * The list changed!  So restart from the beginning.
                   1687:                         */
                   1688:                        pv_next = &mdpg->mdpg_first;
1.15      matt     1689:                        pmap_pvlist_check(mdpg);
1.1       christos 1690:                }
                   1691:        }
1.15      matt     1692:        pmap_pvlist_check(mdpg);
1.1       christos 1693:        VM_PAGEMD_PVLIST_UNLOCK(mdpg);
                   1694:        kpreempt_enable();
                   1695:
1.15      matt     1696:        UVMHIST_LOG(pmaphist, " <-- true (mappings changed)", 0, 0, 0, 0);
1.1       christos 1697:        return true;
                   1698: }
                   1699:
                   1700: /*
                   1701:  *     pmap_is_modified:
                   1702:  *
                   1703:  *     Return whether or not the specified physical page is modified
                   1704:  *     by any physical maps.
                   1705:  */
                   1706: bool
                   1707: pmap_is_modified(struct vm_page *pg)
                   1708: {
                   1709:        return VM_PAGEMD_MODIFIED_P(VM_PAGE_TO_MD(pg));
                   1710: }
                   1711:
                   1712: /*
                   1713:  *     pmap_set_modified:
                   1714:  *
                   1715:  *     Sets the page modified reference bit for the specified page.
                   1716:  */
                   1717: void
                   1718: pmap_set_modified(paddr_t pa)
                   1719: {
                   1720:        struct vm_page * const pg = PHYS_TO_VM_PAGE(pa);
                   1721:        struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
                   1722:        pmap_page_set_attributes(mdpg, VM_PAGEMD_MODIFIED|VM_PAGEMD_REFERENCED);
                   1723: }
                   1724:
                   1725: /******************** pv_entry management ********************/
                   1726:
                   1727: static void
1.15      matt     1728: pmap_pvlist_check(struct vm_page_md *mdpg)
1.1       christos 1729: {
1.15      matt     1730: #ifdef DEBUG
                   1731:        pv_entry_t pv = &mdpg->mdpg_first;
1.1       christos 1732:        if (pv->pv_pmap != NULL) {
1.15      matt     1733: #ifdef PMAP_VIRTUAL_CACHE_ALIASES
                   1734:                const u_int colormask = uvmexp.colormask;
                   1735:                u_int colors = 0;
                   1736: #endif
1.1       christos 1737:                for (; pv != NULL; pv = pv->pv_next) {
1.15      matt     1738:                        KASSERT(pv->pv_pmap != pmap_kernel() || !pmap_md_direct_mapped_vaddr_p(pv->pv_va));
                   1739: #ifdef PMAP_VIRTUAL_CACHE_ALIASES
                   1740:                        colors |= __BIT(atop(pv->pv_va) & colormask);
                   1741: #endif
1.1       christos 1742:                }
1.15      matt     1743: #ifdef PMAP_VIRTUAL_CACHE_ALIASES
1.30      skrll    1744:                // Assert that if there is more than 1 color mapped, that the
                   1745:                // page is uncached.
1.15      matt     1746:                KASSERTMSG(!pmap_md_virtual_cache_aliasing_p()
                   1747:                    || colors == 0 || (colors & (colors-1)) == 0
                   1748:                    || VM_PAGEMD_UNCACHED_P(mdpg), "colors=%#x uncached=%u",
                   1749:                    colors, VM_PAGEMD_UNCACHED_P(mdpg));
                   1750: #endif
1.34      skrll    1751:        } else {
                   1752:                KASSERT(pv->pv_next == NULL);
1.1       christos 1753:        }
1.15      matt     1754: #endif /* DEBUG */
1.1       christos 1755: }
                   1756:
                   1757: /*
                   1758:  * Enter the pmap and virtual address into the
                   1759:  * physical to virtual map table.
                   1760:  */
                   1761: void
1.15      matt     1762: pmap_enter_pv(pmap_t pmap, vaddr_t va, struct vm_page *pg, pt_entry_t *nptep,
                   1763:     u_int flags)
1.1       christos 1764: {
                   1765:        struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
                   1766:        pv_entry_t pv, npv, apv;
1.15      matt     1767: #ifdef UVMHIST
                   1768:        bool first = false;
                   1769: #endif
1.1       christos 1770:
                   1771:        UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
                   1772:        UVMHIST_LOG(pmaphist,
1.37      pgoyette 1773:            "(pmap=%#jx va=%#jx pg=%#jx (%#jx)",
                   1774:            (uintptr_t)pmap, va, (uintptr_t)pg, VM_PAGE_TO_PHYS(pg));
                   1775:        UVMHIST_LOG(pmaphist, "nptep=%#jx (%#jx))",
                   1776:            (uintptr_t)nptep, pte_value(*nptep), 0, 0);
1.1       christos 1777:
                   1778:        KASSERT(kpreempt_disabled());
                   1779:        KASSERT(pmap != pmap_kernel() || !pmap_md_direct_mapped_vaddr_p(va));
1.15      matt     1780:        KASSERTMSG(pmap != pmap_kernel() || !pmap_md_io_vaddr_p(va),
                   1781:            "va %#"PRIxVADDR, va);
1.1       christos 1782:
                   1783:        apv = NULL;
1.15      matt     1784:        VM_PAGEMD_PVLIST_LOCK(mdpg);
                   1785: again:
1.1       christos 1786:        pv = &mdpg->mdpg_first;
1.15      matt     1787:        pmap_pvlist_check(mdpg);
1.1       christos 1788:        if (pv->pv_pmap == NULL) {
                   1789:                KASSERT(pv->pv_next == NULL);
                   1790:                /*
                   1791:                 * No entries yet, use header as the first entry
                   1792:                 */
                   1793:                PMAP_COUNT(primary_mappings);
                   1794:                PMAP_COUNT(mappings);
1.15      matt     1795: #ifdef UVMHIST
1.1       christos 1796:                first = true;
1.15      matt     1797: #endif
                   1798: #ifdef PMAP_VIRTUAL_CACHE_ALIASES
                   1799:                KASSERT(VM_PAGEMD_CACHED_P(mdpg));
                   1800:                // If the new mapping has an incompatible color the last
                   1801:                // mapping of this page, clean the page before using it.
                   1802:                if (!PMAP_PAGE_COLOROK_P(va, pv->pv_va)) {
                   1803:                        pmap_md_vca_clean(pg, PMAP_WBINV);
                   1804:                }
1.1       christos 1805: #endif
                   1806:                pv->pv_pmap = pmap;
1.15      matt     1807:                pv->pv_va = va | flags;
1.1       christos 1808:        } else {
1.15      matt     1809: #ifdef PMAP_VIRTUAL_CACHE_ALIASES
                   1810:                if (pmap_md_vca_add(pg, va, nptep)) {
1.1       christos 1811:                        goto again;
1.15      matt     1812:                }
                   1813: #endif
1.1       christos 1814:
                   1815:                /*
                   1816:                 * There is at least one other VA mapping this page.
                   1817:                 * Place this entry after the header.
                   1818:                 *
                   1819:                 * Note: the entry may already be in the table if
                   1820:                 * we are only changing the protection bits.
                   1821:                 */
                   1822:
                   1823: #ifdef PARANOIADIAG
                   1824:                const paddr_t pa = VM_PAGE_TO_PHYS(pg);
                   1825: #endif
                   1826:                for (npv = pv; npv; npv = npv->pv_next) {
1.15      matt     1827:                        if (pmap == npv->pv_pmap
                   1828:                            && va == trunc_page(npv->pv_va)) {
1.1       christos 1829: #ifdef PARANOIADIAG
                   1830:                                pt_entry_t *ptep = pmap_pte_lookup(pmap, va);
1.15      matt     1831:                                pt_entry_t pte = (ptep != NULL) ? *ptep : 0;
                   1832:                                if (!pte_valid_p(pte) || pte_to_paddr(pte) != pa)
                   1833:                                        printf("%s: found va %#"PRIxVADDR
                   1834:                                            " pa %#"PRIxPADDR
                   1835:                                            " in pv_table but != %#"PRIxPTE"\n",
                   1836:                                            __func__, va, pa, pte_value(pte));
1.1       christos 1837: #endif
                   1838:                                PMAP_COUNT(remappings);
                   1839:                                VM_PAGEMD_PVLIST_UNLOCK(mdpg);
                   1840:                                if (__predict_false(apv != NULL))
                   1841:                                        pmap_pv_free(apv);
1.15      matt     1842:
1.37      pgoyette 1843:                                UVMHIST_LOG(pmaphist,
                   1844:                                    " <-- done pv=%#jx (reused)",
                   1845:                                    (uintptr_t)pv, 0, 0, 0);
1.1       christos 1846:                                return;
                   1847:                        }
                   1848:                }
                   1849:                if (__predict_true(apv == NULL)) {
                   1850:                        /*
                   1851:                         * To allocate a PV, we have to release the PVLIST lock
                   1852:                         * so get the page generation.  We allocate the PV, and
1.15      matt     1853:                         * then reacquire the lock.
1.1       christos 1854:                         */
1.15      matt     1855:                        pmap_pvlist_check(mdpg);
                   1856:                        const uintptr_t gen = VM_PAGEMD_PVLIST_UNLOCK(mdpg);
1.1       christos 1857:
                   1858:                        apv = (pv_entry_t)pmap_pv_alloc();
                   1859:                        if (apv == NULL)
                   1860:                                panic("pmap_enter_pv: pmap_pv_alloc() failed");
                   1861:
                   1862:                        /*
                   1863:                         * If the generation has changed, then someone else
1.15      matt     1864:                         * tinkered with this page so we should start over.
1.1       christos 1865:                         */
1.15      matt     1866:                        if (gen != VM_PAGEMD_PVLIST_LOCK(mdpg))
1.1       christos 1867:                                goto again;
                   1868:                }
                   1869:                npv = apv;
                   1870:                apv = NULL;
1.15      matt     1871: #ifdef PMAP_VIRTUAL_CACHE_ALIASES
                   1872:                /*
                   1873:                 * If need to deal with virtual cache aliases, keep mappings
                   1874:                 * in the kernel pmap at the head of the list.  This allows
                   1875:                 * the VCA code to easily use them for cache operations if
                   1876:                 * present.
                   1877:                 */
                   1878:                pmap_t kpmap = pmap_kernel();
                   1879:                if (pmap != kpmap) {
                   1880:                        while (pv->pv_pmap == kpmap && pv->pv_next != NULL) {
                   1881:                                pv = pv->pv_next;
                   1882:                        }
                   1883:                }
                   1884: #endif
                   1885:                npv->pv_va = va | flags;
1.1       christos 1886:                npv->pv_pmap = pmap;
                   1887:                npv->pv_next = pv->pv_next;
                   1888:                pv->pv_next = npv;
                   1889:                PMAP_COUNT(mappings);
                   1890:        }
1.15      matt     1891:        pmap_pvlist_check(mdpg);
1.1       christos 1892:        VM_PAGEMD_PVLIST_UNLOCK(mdpg);
                   1893:        if (__predict_false(apv != NULL))
                   1894:                pmap_pv_free(apv);
                   1895:
1.37      pgoyette 1896:        UVMHIST_LOG(pmaphist, " <-- done pv=%#jx (first %ju)", (uintptr_t)pv,
                   1897:            first, 0, 0);
1.1       christos 1898: }
                   1899:
                   1900: /*
                   1901:  * Remove a physical to virtual address translation.
                   1902:  * If cache was inhibited on this page, and there are no more cache
                   1903:  * conflicts, restore caching.
                   1904:  * Flush the cache if the last page is removed (should always be cached
                   1905:  * at this point).
                   1906:  */
                   1907: void
                   1908: pmap_remove_pv(pmap_t pmap, vaddr_t va, struct vm_page *pg, bool dirty)
                   1909: {
                   1910:        struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
                   1911:        pv_entry_t pv, npv;
                   1912:        bool last;
                   1913:
                   1914:        UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
                   1915:        UVMHIST_LOG(pmaphist,
1.37      pgoyette 1916:            "(pmap=%#jx, va=%#jx, pg=%#jx (pa %#jx)",
                   1917:            (uintptr_t)pmap, va, (uintptr_t)pg, VM_PAGE_TO_PHYS(pg));
                   1918:        UVMHIST_LOG(pmaphist, "dirty=%ju)", dirty, 0, 0, 0);
1.1       christos 1919:
                   1920:        KASSERT(kpreempt_disabled());
1.15      matt     1921:        KASSERT((va & PAGE_MASK) == 0);
1.1       christos 1922:        pv = &mdpg->mdpg_first;
                   1923:
1.15      matt     1924:        VM_PAGEMD_PVLIST_LOCK(mdpg);
                   1925:        pmap_pvlist_check(mdpg);
1.1       christos 1926:
                   1927:        /*
                   1928:         * If it is the first entry on the list, it is actually
                   1929:         * in the header and we must copy the following entry up
                   1930:         * to the header.  Otherwise we must search the list for
                   1931:         * the entry.  In either case we free the now unused entry.
                   1932:         */
                   1933:
                   1934:        last = false;
1.15      matt     1935:        if (pmap == pv->pv_pmap && va == trunc_page(pv->pv_va)) {
1.1       christos 1936:                npv = pv->pv_next;
                   1937:                if (npv) {
                   1938:                        *pv = *npv;
                   1939:                        KASSERT(pv->pv_pmap != NULL);
                   1940:                } else {
1.15      matt     1941: #ifdef PMAP_VIRTUAL_CACHE_ALIASES
                   1942:                        pmap_page_clear_attributes(mdpg, VM_PAGEMD_UNCACHED);
1.1       christos 1943: #endif
                   1944:                        pv->pv_pmap = NULL;
                   1945:                        last = true;    /* Last mapping removed */
                   1946:                }
                   1947:                PMAP_COUNT(remove_pvfirst);
                   1948:        } else {
                   1949:                for (npv = pv->pv_next; npv; pv = npv, npv = npv->pv_next) {
                   1950:                        PMAP_COUNT(remove_pvsearch);
1.15      matt     1951:                        if (pmap == npv->pv_pmap && va == trunc_page(npv->pv_va))
1.1       christos 1952:                                break;
                   1953:                }
                   1954:                if (npv) {
                   1955:                        pv->pv_next = npv->pv_next;
                   1956:                }
                   1957:        }
                   1958:
1.15      matt     1959:        pmap_pvlist_check(mdpg);
1.1       christos 1960:        VM_PAGEMD_PVLIST_UNLOCK(mdpg);
                   1961:
1.15      matt     1962: #ifdef PMAP_VIRTUAL_CACHE_ALIASES
                   1963:        pmap_md_vca_remove(pg, va, dirty, last);
                   1964: #endif
                   1965:
1.1       christos 1966:        /*
                   1967:         * Free the pv_entry if needed.
                   1968:         */
                   1969:        if (npv)
                   1970:                pmap_pv_free(npv);
                   1971:        if (VM_PAGEMD_EXECPAGE_P(mdpg) && dirty) {
                   1972:                if (last) {
                   1973:                        /*
                   1974:                         * If this was the page's last mapping, we no longer
                   1975:                         * care about its execness.
                   1976:                         */
                   1977:                        UVMHIST_LOG(pmapexechist,
1.37      pgoyette 1978:                            "pg %#jx (pa %#jx)last %ju: execpage cleared",
                   1979:                            (uintptr_t)pg, VM_PAGE_TO_PHYS(pg), last, 0);
1.1       christos 1980:                        pmap_page_clear_attributes(mdpg, VM_PAGEMD_EXECPAGE);
                   1981:                        PMAP_COUNT(exec_uncached_remove);
                   1982:                } else {
                   1983:                        /*
                   1984:                         * Someone still has it mapped as an executable page
                   1985:                         * so we must sync it.
                   1986:                         */
                   1987:                        UVMHIST_LOG(pmapexechist,
1.37      pgoyette 1988:                            "pg %#jx (pa %#jx) last %ju: performed syncicache",
                   1989:                            (uintptr_t)pg, VM_PAGE_TO_PHYS(pg), last, 0);
1.1       christos 1990:                        pmap_page_syncicache(pg);
                   1991:                        PMAP_COUNT(exec_synced_remove);
                   1992:                }
                   1993:        }
1.15      matt     1994:
                   1995:        UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
1.1       christos 1996: }
                   1997:
                   1998: #if defined(MULTIPROCESSOR)
                   1999: struct pmap_pvlist_info {
                   2000:        kmutex_t *pli_locks[PAGE_SIZE / 32];
                   2001:        volatile u_int pli_lock_refs[PAGE_SIZE / 32];
                   2002:        volatile u_int pli_lock_index;
                   2003:        u_int pli_lock_mask;
                   2004: } pmap_pvlist_info;
                   2005:
                   2006: void
                   2007: pmap_pvlist_lock_init(size_t cache_line_size)
                   2008: {
                   2009:        struct pmap_pvlist_info * const pli = &pmap_pvlist_info;
                   2010:        const vaddr_t lock_page = uvm_pageboot_alloc(PAGE_SIZE);
                   2011:        vaddr_t lock_va = lock_page;
                   2012:        if (sizeof(kmutex_t) > cache_line_size) {
                   2013:                cache_line_size = roundup2(sizeof(kmutex_t), cache_line_size);
                   2014:        }
                   2015:        const size_t nlocks = PAGE_SIZE / cache_line_size;
                   2016:        KASSERT((nlocks & (nlocks - 1)) == 0);
                   2017:        /*
                   2018:         * Now divide the page into a number of mutexes, one per cacheline.
                   2019:         */
                   2020:        for (size_t i = 0; i < nlocks; lock_va += cache_line_size, i++) {
                   2021:                kmutex_t * const lock = (kmutex_t *)lock_va;
1.15      matt     2022:                mutex_init(lock, MUTEX_DEFAULT, IPL_HIGH);
1.1       christos 2023:                pli->pli_locks[i] = lock;
                   2024:        }
                   2025:        pli->pli_lock_mask = nlocks - 1;
                   2026: }
                   2027:
1.15      matt     2028: kmutex_t *
                   2029: pmap_pvlist_lock_addr(struct vm_page_md *mdpg)
1.1       christos 2030: {
                   2031:        struct pmap_pvlist_info * const pli = &pmap_pvlist_info;
                   2032:        kmutex_t *lock = mdpg->mdpg_lock;
                   2033:
                   2034:        /*
                   2035:         * Allocate a lock on an as-needed basis.  This will hopefully give us
                   2036:         * semi-random distribution not based on page color.
                   2037:         */
                   2038:        if (__predict_false(lock == NULL)) {
                   2039:                size_t locknum = atomic_add_int_nv(&pli->pli_lock_index, 37);
                   2040:                size_t lockid = locknum & pli->pli_lock_mask;
                   2041:                kmutex_t * const new_lock = pli->pli_locks[lockid];
                   2042:                /*
                   2043:                 * Set the lock.  If some other thread already did, just use
                   2044:                 * the one they assigned.
                   2045:                 */
                   2046:                lock = atomic_cas_ptr(&mdpg->mdpg_lock, NULL, new_lock);
                   2047:                if (lock == NULL) {
                   2048:                        lock = new_lock;
                   2049:                        atomic_inc_uint(&pli->pli_lock_refs[lockid]);
                   2050:                }
                   2051:        }
                   2052:
                   2053:        /*
1.15      matt     2054:         * Now finally provide the lock.
1.1       christos 2055:         */
1.15      matt     2056:        return lock;
1.1       christos 2057: }
                   2058: #else /* !MULTIPROCESSOR */
                   2059: void
                   2060: pmap_pvlist_lock_init(size_t cache_line_size)
                   2061: {
1.15      matt     2062:        mutex_init(&pmap_pvlist_mutex, MUTEX_DEFAULT, IPL_HIGH);
1.1       christos 2063: }
                   2064:
                   2065: #ifdef MODULAR
1.15      matt     2066: kmutex_t *
                   2067: pmap_pvlist_lock_addr(struct vm_page_md *mdpg)
1.1       christos 2068: {
                   2069:        /*
                   2070:         * We just use a global lock.
                   2071:         */
                   2072:        if (__predict_false(mdpg->mdpg_lock == NULL)) {
                   2073:                mdpg->mdpg_lock = &pmap_pvlist_mutex;
                   2074:        }
                   2075:
                   2076:        /*
1.15      matt     2077:         * Now finally provide the lock.
1.1       christos 2078:         */
1.15      matt     2079:        return mdpg->mdpg_lock;
1.1       christos 2080: }
                   2081: #endif /* MODULAR */
                   2082: #endif /* !MULTIPROCESSOR */
                   2083:
                   2084: /*
                   2085:  * pmap_pv_page_alloc:
                   2086:  *
                   2087:  *     Allocate a page for the pv_entry pool.
                   2088:  */
                   2089: void *
                   2090: pmap_pv_page_alloc(struct pool *pp, int flags)
                   2091: {
1.15      matt     2092:        struct vm_page * const pg = PMAP_ALLOC_POOLPAGE(UVM_PGA_USERESERVE);
1.1       christos 2093:        if (pg == NULL)
                   2094:                return NULL;
                   2095:
                   2096:        return (void *)pmap_map_poolpage(VM_PAGE_TO_PHYS(pg));
                   2097: }
                   2098:
                   2099: /*
                   2100:  * pmap_pv_page_free:
                   2101:  *
                   2102:  *     Free a pv_entry pool page.
                   2103:  */
                   2104: void
                   2105: pmap_pv_page_free(struct pool *pp, void *v)
                   2106: {
                   2107:        vaddr_t va = (vaddr_t)v;
                   2108:
                   2109:        KASSERT(pmap_md_direct_mapped_vaddr_p(va));
                   2110:        const paddr_t pa = pmap_md_direct_mapped_vaddr_to_paddr(va);
                   2111:        struct vm_page * const pg = PHYS_TO_VM_PAGE(pa);
1.15      matt     2112:        KASSERT(pg != NULL);
                   2113: #ifdef PMAP_VIRTUAL_CACHE_ALIASES
                   2114:        kpreempt_disable();
                   2115:        pmap_md_vca_remove(pg, va, true, true);
                   2116:        kpreempt_enable();
                   2117: #endif
                   2118:        pmap_page_clear_attributes(VM_PAGE_TO_MD(pg), VM_PAGEMD_POOLPAGE);
1.34      skrll    2119:        KASSERT(!VM_PAGEMD_EXECPAGE_P(VM_PAGE_TO_MD(pg)));
1.1       christos 2120:        uvm_pagefree(pg);
                   2121: }
                   2122:
                   2123: #ifdef PMAP_PREFER
                   2124: /*
                   2125:  * Find first virtual address >= *vap that doesn't cause
                   2126:  * a cache alias conflict.
                   2127:  */
                   2128: void
                   2129: pmap_prefer(vaddr_t foff, vaddr_t *vap, vsize_t sz, int td)
                   2130: {
                   2131:        vsize_t prefer_mask = ptoa(uvmexp.colormask);
                   2132:
                   2133:        PMAP_COUNT(prefer_requests);
                   2134:
                   2135:        prefer_mask |= pmap_md_cache_prefer_mask();
                   2136:
                   2137:        if (prefer_mask) {
1.15      matt     2138:                vaddr_t va = *vap;
                   2139:                vsize_t d = (foff - va) & prefer_mask;
1.1       christos 2140:                if (d) {
                   2141:                        if (td)
1.15      matt     2142:                                *vap = trunc_page(va - ((-d) & prefer_mask));
1.1       christos 2143:                        else
                   2144:                                *vap = round_page(va + d);
                   2145:                        PMAP_COUNT(prefer_adjustments);
                   2146:                }
                   2147:        }
                   2148: }
                   2149: #endif /* PMAP_PREFER */
                   2150:
                   2151: #ifdef PMAP_MAP_POOLPAGE
                   2152: vaddr_t
                   2153: pmap_map_poolpage(paddr_t pa)
                   2154: {
                   2155:        struct vm_page * const pg = PHYS_TO_VM_PAGE(pa);
                   2156:        KASSERT(pg);
1.34      skrll    2157:
1.1       christos 2158:        struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
1.34      skrll    2159:        KASSERT(!VM_PAGEMD_EXECPAGE_P(mdpg));
                   2160:
1.1       christos 2161:        pmap_page_set_attributes(mdpg, VM_PAGEMD_POOLPAGE);
                   2162:
1.15      matt     2163:        return pmap_md_map_poolpage(pa, NBPG);
1.1       christos 2164: }
                   2165:
                   2166: paddr_t
                   2167: pmap_unmap_poolpage(vaddr_t va)
                   2168: {
                   2169:        KASSERT(pmap_md_direct_mapped_vaddr_p(va));
                   2170:        paddr_t pa = pmap_md_direct_mapped_vaddr_to_paddr(va);
                   2171:
                   2172:        struct vm_page * const pg = PHYS_TO_VM_PAGE(pa);
1.15      matt     2173:        KASSERT(pg != NULL);
1.34      skrll    2174:        KASSERT(!VM_PAGEMD_EXECPAGE_P(VM_PAGE_TO_MD(pg)));
                   2175:
1.15      matt     2176:        pmap_page_clear_attributes(VM_PAGE_TO_MD(pg), VM_PAGEMD_POOLPAGE);
1.1       christos 2177:        pmap_md_unmap_poolpage(va, NBPG);
                   2178:
                   2179:        return pa;
                   2180: }
                   2181: #endif /* PMAP_MAP_POOLPAGE */

CVSweb <webmaster@jp.NetBSD.org>