[BACK]Return to pmap.c CVS log [TXT][DIR] Up to [cvs.NetBSD.org] / src / sys / arch / powerpc / oea

Annotation of src/sys/arch/powerpc/oea/pmap.c, Revision 1.110

1.110   ! martin      1: /*     $NetBSD: pmap.c,v 1.109 2022/02/16 23:49:27 riastradh Exp $     */
1.1       matt        2: /*-
                      3:  * Copyright (c) 2001 The NetBSD Foundation, Inc.
                      4:  * All rights reserved.
                      5:  *
                      6:  * This code is derived from software contributed to The NetBSD Foundation
                      7:  * by Matt Thomas <matt@3am-software.com> of Allegro Networks, Inc.
                      8:  *
1.38      sanjayl     9:  * Support for PPC64 Bridge mode added by Sanjay Lal <sanjayl@kymasys.com>
                     10:  * of Kyma Systems LLC.
                     11:  *
1.1       matt       12:  * Redistribution and use in source and binary forms, with or without
                     13:  * modification, are permitted provided that the following conditions
                     14:  * are met:
                     15:  * 1. Redistributions of source code must retain the above copyright
                     16:  *    notice, this list of conditions and the following disclaimer.
                     17:  * 2. Redistributions in binary form must reproduce the above copyright
                     18:  *    notice, this list of conditions and the following disclaimer in the
                     19:  *    documentation and/or other materials provided with the distribution.
                     20:  *
                     21:  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
                     22:  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
                     23:  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
                     24:  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
                     25:  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
                     26:  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
                     27:  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
                     28:  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
                     29:  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
                     30:  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
                     31:  * POSSIBILITY OF SUCH DAMAGE.
                     32:  */
                     33:
                     34: /*
                     35:  * Copyright (C) 1995, 1996 Wolfgang Solfrank.
                     36:  * Copyright (C) 1995, 1996 TooLs GmbH.
                     37:  * All rights reserved.
                     38:  *
                     39:  * Redistribution and use in source and binary forms, with or without
                     40:  * modification, are permitted provided that the following conditions
                     41:  * are met:
                     42:  * 1. Redistributions of source code must retain the above copyright
                     43:  *    notice, this list of conditions and the following disclaimer.
                     44:  * 2. Redistributions in binary form must reproduce the above copyright
                     45:  *    notice, this list of conditions and the following disclaimer in the
                     46:  *    documentation and/or other materials provided with the distribution.
                     47:  * 3. All advertising materials mentioning features or use of this software
                     48:  *    must display the following acknowledgement:
                     49:  *     This product includes software developed by TooLs GmbH.
                     50:  * 4. The name of TooLs GmbH may not be used to endorse or promote products
                     51:  *    derived from this software without specific prior written permission.
                     52:  *
                     53:  * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
                     54:  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
                     55:  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
                     56:  * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
                     57:  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
                     58:  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
                     59:  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
                     60:  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
                     61:  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
                     62:  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
                     63:  */
1.11      lukem      64:
                     65: #include <sys/cdefs.h>
1.110   ! martin     66: __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.109 2022/02/16 23:49:27 riastradh Exp $");
1.53      garbled    67:
                     68: #define        PMAP_NOOPNAMES
1.1       matt       69:
1.98      rin        70: #ifdef _KERNEL_OPT
1.1       matt       71: #include "opt_altivec.h"
1.57      matt       72: #include "opt_multiprocessor.h"
1.1       matt       73: #include "opt_pmap.h"
1.98      rin        74: #include "opt_ppcarch.h"
                     75: #endif
1.57      matt       76:
1.1       matt       77: #include <sys/param.h>
                     78: #include <sys/proc.h>
                     79: #include <sys/pool.h>
                     80: #include <sys/queue.h>
                     81: #include <sys/device.h>                /* for evcnt */
                     82: #include <sys/systm.h>
1.50      ad         83: #include <sys/atomic.h>
1.1       matt       84:
                     85: #include <uvm/uvm.h>
1.94      cherry     86: #include <uvm/uvm_physseg.h>
1.1       matt       87:
                     88: #include <machine/powerpc.h>
1.80      matt       89: #include <powerpc/bat.h>
                     90: #include <powerpc/pcb.h>
                     91: #include <powerpc/psl.h>
1.1       matt       92: #include <powerpc/spr.h>
1.71      matt       93: #include <powerpc/oea/spr.h>
                     94: #include <powerpc/oea/sr_601.h>
1.1       matt       95:
                     96: #ifdef ALTIVEC
1.86      matt       97: extern int pmap_use_altivec;
1.1       matt       98: #endif
                     99:
1.21      aymeric   100: #ifdef PMAP_MEMLIMIT
1.53      garbled   101: static paddr_t pmap_memlimit = PMAP_MEMLIMIT;
1.21      aymeric   102: #else
1.53      garbled   103: static paddr_t pmap_memlimit = -PAGE_SIZE;             /* there is no limit */
1.21      aymeric   104: #endif
1.1       matt      105:
1.86      matt      106: extern struct pmap kernel_pmap_;
                    107: static unsigned int pmap_pages_stolen;
                    108: static u_long pmap_pte_valid;
1.1       matt      109: #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
1.86      matt      110: static u_long pmap_pvo_enter_depth;
                    111: static u_long pmap_pvo_remove_depth;
1.1       matt      112: #endif
                    113:
                    114: #ifndef MSGBUFADDR
                    115: extern paddr_t msgbuf_paddr;
                    116: #endif
                    117:
                    118: static struct mem_region *mem, *avail;
                    119: static u_int mem_cnt, avail_cnt;
                    120:
1.53      garbled   121: #if !defined(PMAP_OEA64) && !defined(PMAP_OEA64_BRIDGE)
                    122: # define       PMAP_OEA 1
                    123: #endif
                    124:
                    125: #if defined(PMAP_OEA)
                    126: #define        _PRIxpte        "lx"
                    127: #else
                    128: #define        _PRIxpte        PRIx64
                    129: #endif
                    130: #define        _PRIxpa         "lx"
                    131: #define        _PRIxva         "lx"
1.54      mlelstv   132: #define        _PRIsr          "lx"
1.53      garbled   133:
1.76      matt      134: #ifdef PMAP_NEEDS_FIXUP
1.53      garbled   135: #if defined(PMAP_OEA)
                    136: #define        PMAPNAME(name)  pmap32_##name
                    137: #elif defined(PMAP_OEA64)
                    138: #define        PMAPNAME(name)  pmap64_##name
                    139: #elif defined(PMAP_OEA64_BRIDGE)
                    140: #define        PMAPNAME(name)  pmap64bridge_##name
                    141: #else
                    142: #error unknown variant for pmap
                    143: #endif
1.76      matt      144: #endif /* PMAP_NEEDS_FIXUP */
1.53      garbled   145:
1.76      matt      146: #ifdef PMAPNAME
1.53      garbled   147: #define        STATIC                  static
                    148: #define pmap_pte_spill         PMAPNAME(pte_spill)
                    149: #define pmap_real_memory       PMAPNAME(real_memory)
                    150: #define pmap_init              PMAPNAME(init)
                    151: #define pmap_virtual_space     PMAPNAME(virtual_space)
                    152: #define pmap_create            PMAPNAME(create)
                    153: #define pmap_reference         PMAPNAME(reference)
                    154: #define pmap_destroy           PMAPNAME(destroy)
                    155: #define pmap_copy              PMAPNAME(copy)
                    156: #define pmap_update            PMAPNAME(update)
                    157: #define pmap_enter             PMAPNAME(enter)
                    158: #define pmap_remove            PMAPNAME(remove)
                    159: #define pmap_kenter_pa         PMAPNAME(kenter_pa)
                    160: #define pmap_kremove           PMAPNAME(kremove)
                    161: #define pmap_extract           PMAPNAME(extract)
                    162: #define pmap_protect           PMAPNAME(protect)
                    163: #define pmap_unwire            PMAPNAME(unwire)
                    164: #define pmap_page_protect      PMAPNAME(page_protect)
                    165: #define pmap_query_bit         PMAPNAME(query_bit)
                    166: #define pmap_clear_bit         PMAPNAME(clear_bit)
                    167:
                    168: #define pmap_activate          PMAPNAME(activate)
                    169: #define pmap_deactivate                PMAPNAME(deactivate)
                    170:
                    171: #define pmap_pinit             PMAPNAME(pinit)
                    172: #define pmap_procwr            PMAPNAME(procwr)
                    173:
1.86      matt      174: #define pmap_pool              PMAPNAME(pool)
1.106     martin    175: #define pmap_pvo_pool          PMAPNAME(pvo_pool)
1.86      matt      176: #define pmap_pvo_table         PMAPNAME(pvo_table)
1.53      garbled   177: #if defined(DEBUG) || defined(PMAPCHECK) || defined(DDB)
                    178: #define pmap_pte_print         PMAPNAME(pte_print)
                    179: #define pmap_pteg_check                PMAPNAME(pteg_check)
                    180: #define pmap_print_mmruregs    PMAPNAME(print_mmuregs)
                    181: #define pmap_print_pte         PMAPNAME(print_pte)
                    182: #define pmap_pteg_dist         PMAPNAME(pteg_dist)
                    183: #endif
                    184: #if defined(DEBUG) || defined(PMAPCHECK)
                    185: #define        pmap_pvo_verify         PMAPNAME(pvo_verify)
1.56      phx       186: #define pmapcheck              PMAPNAME(check)
                    187: #endif
                    188: #if defined(DEBUG) || defined(PMAPDEBUG)
                    189: #define pmapdebug              PMAPNAME(debug)
1.53      garbled   190: #endif
                    191: #define pmap_steal_memory      PMAPNAME(steal_memory)
                    192: #define pmap_bootstrap         PMAPNAME(bootstrap)
1.100     rin       193: #define pmap_bootstrap1                PMAPNAME(bootstrap1)
                    194: #define pmap_bootstrap2                PMAPNAME(bootstrap2)
1.53      garbled   195: #else
                    196: #define        STATIC                  /* nothing */
                    197: #endif /* PMAPNAME */
                    198:
                    199: STATIC int pmap_pte_spill(struct pmap *, vaddr_t, bool);
                    200: STATIC void pmap_real_memory(paddr_t *, psize_t *);
                    201: STATIC void pmap_init(void);
                    202: STATIC void pmap_virtual_space(vaddr_t *, vaddr_t *);
                    203: STATIC pmap_t pmap_create(void);
                    204: STATIC void pmap_reference(pmap_t);
                    205: STATIC void pmap_destroy(pmap_t);
                    206: STATIC void pmap_copy(pmap_t, pmap_t, vaddr_t, vsize_t, vaddr_t);
                    207: STATIC void pmap_update(pmap_t);
1.65      cegger    208: STATIC int pmap_enter(pmap_t, vaddr_t, paddr_t, vm_prot_t, u_int);
1.53      garbled   209: STATIC void pmap_remove(pmap_t, vaddr_t, vaddr_t);
1.68      cegger    210: STATIC void pmap_kenter_pa(vaddr_t, paddr_t, vm_prot_t, u_int);
1.53      garbled   211: STATIC void pmap_kremove(vaddr_t, vsize_t);
                    212: STATIC bool pmap_extract(pmap_t, vaddr_t, paddr_t *);
                    213:
                    214: STATIC void pmap_protect(pmap_t, vaddr_t, vaddr_t, vm_prot_t);
                    215: STATIC void pmap_unwire(pmap_t, vaddr_t);
                    216: STATIC void pmap_page_protect(struct vm_page *, vm_prot_t);
1.110   ! martin    217: void pmap_pv_protect(paddr_t, vm_prot_t);
1.53      garbled   218: STATIC bool pmap_query_bit(struct vm_page *, int);
                    219: STATIC bool pmap_clear_bit(struct vm_page *, int);
                    220:
                    221: STATIC void pmap_activate(struct lwp *);
                    222: STATIC void pmap_deactivate(struct lwp *);
                    223:
                    224: STATIC void pmap_pinit(pmap_t pm);
                    225: STATIC void pmap_procwr(struct proc *, vaddr_t, size_t);
                    226:
                    227: #if defined(DEBUG) || defined(PMAPCHECK) || defined(DDB)
                    228: STATIC void pmap_pte_print(volatile struct pte *);
                    229: STATIC void pmap_pteg_check(void);
                    230: STATIC void pmap_print_mmuregs(void);
                    231: STATIC void pmap_print_pte(pmap_t, vaddr_t);
                    232: STATIC void pmap_pteg_dist(void);
                    233: #endif
                    234: #if defined(DEBUG) || defined(PMAPCHECK)
                    235: STATIC void pmap_pvo_verify(void);
                    236: #endif
                    237: STATIC vaddr_t pmap_steal_memory(vsize_t, vaddr_t *, vaddr_t *);
                    238: STATIC void pmap_bootstrap(paddr_t, paddr_t);
1.100     rin       239: STATIC void pmap_bootstrap1(paddr_t, paddr_t);
                    240: STATIC void pmap_bootstrap2(void);
1.53      garbled   241:
                    242: #ifdef PMAPNAME
                    243: const struct pmap_ops PMAPNAME(ops) = {
                    244:        .pmapop_pte_spill = pmap_pte_spill,
                    245:        .pmapop_real_memory = pmap_real_memory,
                    246:        .pmapop_init = pmap_init,
                    247:        .pmapop_virtual_space = pmap_virtual_space,
                    248:        .pmapop_create = pmap_create,
                    249:        .pmapop_reference = pmap_reference,
                    250:        .pmapop_destroy = pmap_destroy,
                    251:        .pmapop_copy = pmap_copy,
                    252:        .pmapop_update = pmap_update,
                    253:        .pmapop_enter = pmap_enter,
                    254:        .pmapop_remove = pmap_remove,
                    255:        .pmapop_kenter_pa = pmap_kenter_pa,
                    256:        .pmapop_kremove = pmap_kremove,
                    257:        .pmapop_extract = pmap_extract,
                    258:        .pmapop_protect = pmap_protect,
                    259:        .pmapop_unwire = pmap_unwire,
                    260:        .pmapop_page_protect = pmap_page_protect,
                    261:        .pmapop_query_bit = pmap_query_bit,
                    262:        .pmapop_clear_bit = pmap_clear_bit,
                    263:        .pmapop_activate = pmap_activate,
                    264:        .pmapop_deactivate = pmap_deactivate,
                    265:        .pmapop_pinit = pmap_pinit,
                    266:        .pmapop_procwr = pmap_procwr,
                    267: #if defined(DEBUG) || defined(PMAPCHECK) || defined(DDB)
                    268:        .pmapop_pte_print = pmap_pte_print,
                    269:        .pmapop_pteg_check = pmap_pteg_check,
                    270:        .pmapop_print_mmuregs = pmap_print_mmuregs,
                    271:        .pmapop_print_pte = pmap_print_pte,
                    272:        .pmapop_pteg_dist = pmap_pteg_dist,
                    273: #else
                    274:        .pmapop_pte_print = NULL,
                    275:        .pmapop_pteg_check = NULL,
                    276:        .pmapop_print_mmuregs = NULL,
                    277:        .pmapop_print_pte = NULL,
                    278:        .pmapop_pteg_dist = NULL,
                    279: #endif
                    280: #if defined(DEBUG) || defined(PMAPCHECK)
                    281:        .pmapop_pvo_verify = pmap_pvo_verify,
                    282: #else
                    283:        .pmapop_pvo_verify = NULL,
1.1       matt      284: #endif
1.53      garbled   285:        .pmapop_steal_memory = pmap_steal_memory,
                    286:        .pmapop_bootstrap = pmap_bootstrap,
1.101     thorpej   287:        .pmapop_bootstrap1 = pmap_bootstrap1,
                    288:        .pmapop_bootstrap2 = pmap_bootstrap2,
1.53      garbled   289: };
                    290: #endif /* !PMAPNAME */
1.1       matt      291:
                    292: /*
1.38      sanjayl   293:  * The following structure is aligned to 32 bytes
1.1       matt      294:  */
                    295: struct pvo_entry {
                    296:        LIST_ENTRY(pvo_entry) pvo_vlink;        /* Link to common virt page */
                    297:        TAILQ_ENTRY(pvo_entry) pvo_olink;       /* Link to overflow entry */
                    298:        struct pte pvo_pte;                     /* Prebuilt PTE */
                    299:        pmap_t pvo_pmap;                        /* ptr to owning pmap */
                    300:        vaddr_t pvo_vaddr;                      /* VA of entry */
                    301: #define        PVO_PTEGIDX_MASK        0x0007          /* which PTEG slot */
                    302: #define        PVO_PTEGIDX_VALID       0x0008          /* slot is valid */
                    303: #define        PVO_WIRED               0x0010          /* PVO entry is wired */
                    304: #define        PVO_MANAGED             0x0020          /* PVO e. for managed page */
                    305: #define        PVO_EXECUTABLE          0x0040          /* PVO e. for executable page */
1.39      matt      306: #define        PVO_WIRED_P(pvo)        ((pvo)->pvo_vaddr & PVO_WIRED)
                    307: #define        PVO_MANAGED_P(pvo)      ((pvo)->pvo_vaddr & PVO_MANAGED)
                    308: #define        PVO_EXECUTABLE_P(pvo)   ((pvo)->pvo_vaddr & PVO_EXECUTABLE)
1.12      matt      309: #define        PVO_ENTER_INSERT        0               /* PVO has been removed */
                    310: #define        PVO_SPILL_UNSET         1               /* PVO has been evicted */
                    311: #define        PVO_SPILL_SET           2               /* PVO has been spilled */
                    312: #define        PVO_SPILL_INSERT        3               /* PVO has been inserted */
                    313: #define        PVO_PMAP_PAGE_PROTECT   4               /* PVO has changed */
                    314: #define        PVO_PMAP_PROTECT        5               /* PVO has changed */
                    315: #define        PVO_REMOVE              6               /* PVO has been removed */
                    316: #define        PVO_WHERE_MASK          15
                    317: #define        PVO_WHERE_SHFT          8
1.38      sanjayl   318: } __attribute__ ((aligned (32)));
1.1       matt      319: #define        PVO_VADDR(pvo)          ((pvo)->pvo_vaddr & ~ADDR_POFF)
                    320: #define        PVO_PTEGIDX_GET(pvo)    ((pvo)->pvo_vaddr & PVO_PTEGIDX_MASK)
                    321: #define        PVO_PTEGIDX_ISSET(pvo)  ((pvo)->pvo_vaddr & PVO_PTEGIDX_VALID)
                    322: #define        PVO_PTEGIDX_CLR(pvo)    \
                    323:        ((void)((pvo)->pvo_vaddr &= ~(PVO_PTEGIDX_VALID|PVO_PTEGIDX_MASK)))
                    324: #define        PVO_PTEGIDX_SET(pvo,i)  \
                    325:        ((void)((pvo)->pvo_vaddr |= (i)|PVO_PTEGIDX_VALID))
1.12      matt      326: #define        PVO_WHERE(pvo,w)        \
                    327:        ((pvo)->pvo_vaddr &= ~(PVO_WHERE_MASK << PVO_WHERE_SHFT), \
                    328:         (pvo)->pvo_vaddr |= ((PVO_ ## w) << PVO_WHERE_SHFT))
1.1       matt      329:
                    330: TAILQ_HEAD(pvo_tqhead, pvo_entry);
                    331: struct pvo_tqhead *pmap_pvo_table;     /* pvo entries by ptegroup index */
                    332:
                    333: struct pool pmap_pool;         /* pool for pmap structures */
1.106     martin    334: struct pool pmap_pvo_pool;     /* pool for pvo entries */
1.1       matt      335:
                    336: /*
                    337:  * We keep a cache of unmanaged pages to be used for pvo entries for
                    338:  * unmanaged pages.
                    339:  */
                    340: struct pvo_page {
                    341:        SIMPLEQ_ENTRY(pvo_page) pvop_link;
                    342: };
                    343: SIMPLEQ_HEAD(pvop_head, pvo_page);
1.106     martin    344: static struct pvop_head pmap_pvop_head = SIMPLEQ_HEAD_INITIALIZER(pmap_pvop_head);
                    345: static u_long pmap_pvop_free;
                    346: static u_long pmap_pvop_maxfree;
                    347:
                    348: static void *pmap_pool_alloc(struct pool *, int);
                    349: static void pmap_pool_free(struct pool *, void *);
                    350:
                    351: static struct pool_allocator pmap_pool_allocator = {
                    352:        .pa_alloc = pmap_pool_alloc,
                    353:        .pa_free = pmap_pool_free,
1.43      garbled   354:        .pa_pagesz = 0,
1.1       matt      355: };
                    356:
                    357: #if defined(DEBUG) || defined(PMAPCHECK) || defined(DDB)
1.2       matt      358: void pmap_pte_print(volatile struct pte *);
1.1       matt      359: void pmap_pteg_check(void);
                    360: void pmap_pteg_dist(void);
                    361: void pmap_print_pte(pmap_t, vaddr_t);
                    362: void pmap_print_mmuregs(void);
                    363: #endif
                    364:
                    365: #if defined(DEBUG) || defined(PMAPCHECK)
                    366: #ifdef PMAPCHECK
                    367: int pmapcheck = 1;
                    368: #else
                    369: int pmapcheck = 0;
                    370: #endif
                    371: void pmap_pvo_verify(void);
1.53      garbled   372: static void pmap_pvo_check(const struct pvo_entry *);
1.1       matt      373: #define        PMAP_PVO_CHECK(pvo)                     \
                    374:        do {                                    \
                    375:                if (pmapcheck)                  \
                    376:                        pmap_pvo_check(pvo);    \
                    377:        } while (0)
                    378: #else
                    379: #define        PMAP_PVO_CHECK(pvo)     do { } while (/*CONSTCOND*/0)
                    380: #endif
1.53      garbled   381: static int pmap_pte_insert(int, struct pte *);
                    382: static int pmap_pvo_enter(pmap_t, struct pool *, struct pvo_head *,
1.2       matt      383:        vaddr_t, paddr_t, register_t, int);
1.53      garbled   384: static void pmap_pvo_remove(struct pvo_entry *, int, struct pvo_head *);
                    385: static void pmap_pvo_free(struct pvo_entry *);
                    386: static void pmap_pvo_free_list(struct pvo_head *);
                    387: static struct pvo_entry *pmap_pvo_find_va(pmap_t, vaddr_t, int *);
                    388: static volatile struct pte *pmap_pvo_to_pte(const struct pvo_entry *, int);
                    389: static struct pvo_entry *pmap_pvo_reclaim(struct pmap *);
                    390: static void pvo_set_exec(struct pvo_entry *);
                    391: static void pvo_clear_exec(struct pvo_entry *);
1.1       matt      392:
1.53      garbled   393: static void tlbia(void);
1.1       matt      394:
1.53      garbled   395: static void pmap_release(pmap_t);
                    396: static paddr_t pmap_boot_find_memory(psize_t, psize_t, int);
1.1       matt      397:
1.25      chs       398: static uint32_t pmap_pvo_reclaim_nextidx;
                    399: #ifdef DEBUG
                    400: static int pmap_pvo_reclaim_debugctr;
                    401: #endif
                    402:
1.1       matt      403: #define        VSID_NBPW       (sizeof(uint32_t) * 8)
                    404: static uint32_t pmap_vsid_bitmap[NPMAPS / VSID_NBPW];
                    405:
                    406: static int pmap_initialized;
                    407:
                    408: #if defined(DEBUG) || defined(PMAPDEBUG)
                    409: #define        PMAPDEBUG_BOOT          0x0001
                    410: #define        PMAPDEBUG_PTE           0x0002
                    411: #define        PMAPDEBUG_EXEC          0x0008
                    412: #define        PMAPDEBUG_PVOENTER      0x0010
                    413: #define        PMAPDEBUG_PVOREMOVE     0x0020
                    414: #define        PMAPDEBUG_ACTIVATE      0x0100
                    415: #define        PMAPDEBUG_CREATE        0x0200
                    416: #define        PMAPDEBUG_ENTER         0x1000
                    417: #define        PMAPDEBUG_KENTER        0x2000
                    418: #define        PMAPDEBUG_KREMOVE       0x4000
                    419: #define        PMAPDEBUG_REMOVE        0x8000
1.38      sanjayl   420:
1.1       matt      421: unsigned int pmapdebug = 0;
1.38      sanjayl   422:
1.85      matt      423: # define DPRINTF(x, ...)       printf(x, __VA_ARGS__)
                    424: # define DPRINTFN(n, x, ...)   do if (pmapdebug & PMAPDEBUG_ ## n) printf(x, __VA_ARGS__); while (0)
1.1       matt      425: #else
1.85      matt      426: # define DPRINTF(x, ...)       do { } while (0)
                    427: # define DPRINTFN(n, x, ...)   do { } while (0)
1.1       matt      428: #endif
                    429:
                    430:
                    431: #ifdef PMAPCOUNTERS
                    432: /*
                    433:  * From pmap_subr.c
                    434:  */
1.53      garbled   435: extern struct evcnt pmap_evcnt_mappings;
                    436: extern struct evcnt pmap_evcnt_unmappings;
                    437:
                    438: extern struct evcnt pmap_evcnt_kernel_mappings;
                    439: extern struct evcnt pmap_evcnt_kernel_unmappings;
                    440:
                    441: extern struct evcnt pmap_evcnt_mappings_replaced;
                    442:
                    443: extern struct evcnt pmap_evcnt_exec_mappings;
                    444: extern struct evcnt pmap_evcnt_exec_cached;
                    445:
                    446: extern struct evcnt pmap_evcnt_exec_synced;
                    447: extern struct evcnt pmap_evcnt_exec_synced_clear_modify;
                    448: extern struct evcnt pmap_evcnt_exec_synced_pvo_remove;
                    449:
                    450: extern struct evcnt pmap_evcnt_exec_uncached_page_protect;
                    451: extern struct evcnt pmap_evcnt_exec_uncached_clear_modify;
                    452: extern struct evcnt pmap_evcnt_exec_uncached_zero_page;
                    453: extern struct evcnt pmap_evcnt_exec_uncached_copy_page;
                    454: extern struct evcnt pmap_evcnt_exec_uncached_pvo_remove;
                    455:
                    456: extern struct evcnt pmap_evcnt_updates;
                    457: extern struct evcnt pmap_evcnt_collects;
                    458: extern struct evcnt pmap_evcnt_copies;
                    459:
                    460: extern struct evcnt pmap_evcnt_ptes_spilled;
                    461: extern struct evcnt pmap_evcnt_ptes_unspilled;
                    462: extern struct evcnt pmap_evcnt_ptes_evicted;
                    463:
                    464: extern struct evcnt pmap_evcnt_ptes_primary[8];
                    465: extern struct evcnt pmap_evcnt_ptes_secondary[8];
                    466: extern struct evcnt pmap_evcnt_ptes_removed;
                    467: extern struct evcnt pmap_evcnt_ptes_changed;
                    468: extern struct evcnt pmap_evcnt_pvos_reclaimed;
                    469: extern struct evcnt pmap_evcnt_pvos_failed;
                    470:
1.1       matt      471: extern struct evcnt pmap_evcnt_zeroed_pages;
                    472: extern struct evcnt pmap_evcnt_copied_pages;
                    473: extern struct evcnt pmap_evcnt_idlezeroed_pages;
1.26      matt      474:
1.53      garbled   475: #define        PMAPCOUNT(ev)   ((pmap_evcnt_ ## ev).ev_count++)
                    476: #define        PMAPCOUNT2(ev)  ((ev).ev_count++)
1.1       matt      477: #else
                    478: #define        PMAPCOUNT(ev)   ((void) 0)
                    479: #define        PMAPCOUNT2(ev)  ((void) 0)
                    480: #endif
                    481:
1.109     riastrad  482: #define        TLBIE(va)       __asm volatile("tlbie %0" :: "r"(va) : "memory")
1.38      sanjayl   483:
                    484: /* XXXSL: this needs to be moved to assembler */
1.109     riastrad  485: #define        TLBIEL(va)      __asm volatile("tlbie %0" :: "r"(va) : "memory")
1.38      sanjayl   486:
1.87      kiyohara  487: #ifdef MD_TLBSYNC
                    488: #define TLBSYNC()      MD_TLBSYNC()
                    489: #else
1.109     riastrad  490: #define        TLBSYNC()       __asm volatile("tlbsync" ::: "memory")
1.87      kiyohara  491: #endif
1.109     riastrad  492: #define        SYNC()          __asm volatile("sync" ::: "memory")
                    493: #define        EIEIO()         __asm volatile("eieio" ::: "memory")
                    494: #define        DCBST(va)       __asm volatile("dcbst 0,%0" :: "r"(va) : "memory")
1.1       matt      495: #define        MFMSR()         mfmsr()
                    496: #define        MTMSR(psl)      mtmsr(psl)
                    497: #define        MFPVR()         mfpvr()
                    498: #define        MFSRIN(va)      mfsrin(va)
                    499: #define        MFTB()          mfrtcltbl()
                    500:
1.92      joerg     501: #if defined(DDB) && !defined(PMAP_OEA64)
1.35      perry     502: static inline register_t
1.1       matt      503: mfsrin(vaddr_t va)
                    504: {
1.2       matt      505:        register_t sr;
1.35      perry     506:        __asm volatile ("mfsrin %0,%1" : "=r"(sr) : "r"(va));
1.1       matt      507:        return sr;
                    508: }
1.92      joerg     509: #endif /* DDB && !PMAP_OEA64 */
1.38      sanjayl   510:
1.53      garbled   511: #if defined (PMAP_OEA64_BRIDGE)
1.38      sanjayl   512: extern void mfmsr64 (register64_t *result);
1.53      garbled   513: #endif /* PMAP_OEA64_BRIDGE */
1.38      sanjayl   514:
1.50      ad        515: #define        PMAP_LOCK()             KERNEL_LOCK(1, NULL)
                    516: #define        PMAP_UNLOCK()           KERNEL_UNLOCK_ONE(NULL)
1.1       matt      517:
1.35      perry     518: static inline register_t
1.1       matt      519: pmap_interrupts_off(void)
                    520: {
1.2       matt      521:        register_t msr = MFMSR();
1.1       matt      522:        if (msr & PSL_EE)
                    523:                MTMSR(msr & ~PSL_EE);
                    524:        return msr;
                    525: }
                    526:
                    527: static void
1.2       matt      528: pmap_interrupts_restore(register_t msr)
1.1       matt      529: {
                    530:        if (msr & PSL_EE)
                    531:                MTMSR(msr);
                    532: }
                    533:
1.35      perry     534: static inline u_int32_t
1.1       matt      535: mfrtcltbl(void)
                    536: {
1.55      garbled   537: #ifdef PPC_OEA601
1.1       matt      538:        if ((MFPVR() >> 16) == MPC601)
                    539:                return (mfrtcl() >> 7);
                    540:        else
1.55      garbled   541: #endif
1.1       matt      542:                return (mftbl());
                    543: }
                    544:
                    545: /*
                    546:  * These small routines may have to be replaced,
                    547:  * if/when we support processors other that the 604.
                    548:  */
                    549:
                    550: void
                    551: tlbia(void)
                    552: {
1.47      macallan  553:        char *i;
1.1       matt      554:
                    555:        SYNC();
1.53      garbled   556: #if defined(PMAP_OEA)
1.1       matt      557:        /*
                    558:         * Why not use "tlbia"?  Because not all processors implement it.
                    559:         *
1.20      wiz       560:         * This needs to be a per-CPU callback to do the appropriate thing
1.1       matt      561:         * for the CPU. XXX
                    562:         */
1.47      macallan  563:        for (i = 0; i < (char *)0x00040000; i += 0x00001000) {
1.1       matt      564:                TLBIE(i);
                    565:                EIEIO();
                    566:                SYNC();
                    567:        }
1.53      garbled   568: #elif defined (PMAP_OEA64) || defined (PMAP_OEA64_BRIDGE)
1.38      sanjayl   569:        /* This is specifically for the 970, 970UM v1.6 pp. 140. */
1.51      garbled   570:        for (i = 0; i <= (char *)0xFF000; i += 0x00001000) {
1.38      sanjayl   571:                TLBIEL(i);
                    572:                EIEIO();
                    573:                SYNC();
                    574:        }
                    575: #endif
1.1       matt      576:        TLBSYNC();
                    577:        SYNC();
                    578: }
                    579:
1.35      perry     580: static inline register_t
1.2       matt      581: va_to_vsid(const struct pmap *pm, vaddr_t addr)
1.1       matt      582: {
1.18      matt      583:        /*
1.102     thorpej   584:         * Rather than searching the STE groups for the VSID or extracting
                    585:         * it from the SR, we know how we generate that from the ESID and
                    586:         * so do that.
                    587:         *
                    588:         * This makes the code the same for OEA and OEA64, and also allows
                    589:         * us to generate a correct-for-that-address-space VSID even if the
                    590:         * pmap contains a different SR value at any given moment (e.g.
                    591:         * kernel pmap on a 601 that is using I/O segments).
1.18      matt      592:         */
                    593:        return VSID_MAKE(addr >> ADDR_SR_SHFT, pm->pm_vsid) >> SR_VSID_SHFT;
1.1       matt      594: }
                    595:
1.35      perry     596: static inline register_t
1.2       matt      597: va_to_pteg(const struct pmap *pm, vaddr_t addr)
1.1       matt      598: {
1.2       matt      599:        register_t hash;
                    600:
                    601:        hash = va_to_vsid(pm, addr) ^ ((addr & ADDR_PIDX) >> ADDR_PIDX_SHFT);
1.1       matt      602:        return hash & pmap_pteg_mask;
                    603: }
                    604:
                    605: #if defined(DEBUG) || defined(PMAPCHECK) || defined(DDB)
                    606: /*
                    607:  * Given a PTE in the page table, calculate the VADDR that hashes to it.
                    608:  * The only bit of magic is that the top 4 bits of the address doesn't
                    609:  * technically exist in the PTE.  But we know we reserved 4 bits of the
                    610:  * VSID for it so that's how we get it.
                    611:  */
                    612: static vaddr_t
1.2       matt      613: pmap_pte_to_va(volatile const struct pte *pt)
1.1       matt      614: {
                    615:        vaddr_t va;
                    616:        uintptr_t ptaddr = (uintptr_t) pt;
                    617:
                    618:        if (pt->pte_hi & PTE_HID)
1.2       matt      619:                ptaddr ^= (pmap_pteg_mask * sizeof(struct pteg));
1.1       matt      620:
1.18      matt      621:        /* PPC Bits 10-19  PPC64 Bits 42-51 */
1.53      garbled   622: #if defined(PMAP_OEA)
1.4       matt      623:        va = ((pt->pte_hi >> PTE_VSID_SHFT) ^ (ptaddr / sizeof(struct pteg))) & 0x3ff;
1.53      garbled   624: #elif defined (PMAP_OEA64) || defined (PMAP_OEA64_BRIDGE)
1.38      sanjayl   625:        va = ((pt->pte_hi >> PTE_VSID_SHFT) ^ (ptaddr / sizeof(struct pteg))) & 0x7ff;
                    626: #endif
1.1       matt      627:        va <<= ADDR_PIDX_SHFT;
                    628:
1.18      matt      629:        /* PPC Bits 4-9  PPC64 Bits 36-41 */
1.1       matt      630:        va |= (pt->pte_hi & PTE_API) << ADDR_API_SHFT;
                    631:
1.53      garbled   632: #if defined(PMAP_OEA64)
1.18      matt      633:        /* PPC63 Bits 0-35 */
                    634:        /* va |= VSID_TO_SR(pt->pte_hi >> PTE_VSID_SHFT) << ADDR_SR_SHFT; */
1.53      garbled   635: #elif defined(PMAP_OEA) || defined(PMAP_OEA64_BRIDGE)
1.1       matt      636:        /* PPC Bits 0-3 */
                    637:        va |= VSID_TO_SR(pt->pte_hi >> PTE_VSID_SHFT) << ADDR_SR_SHFT;
1.18      matt      638: #endif
1.1       matt      639:
                    640:        return va;
                    641: }
                    642: #endif
                    643:
1.35      perry     644: static inline struct pvo_head *
1.1       matt      645: pa_to_pvoh(paddr_t pa, struct vm_page **pg_p)
                    646: {
                    647:        struct vm_page *pg;
1.72      uebayasi  648:        struct vm_page_md *md;
1.108     riastrad  649:        struct pmap_page *pp;
1.1       matt      650:
                    651:        pg = PHYS_TO_VM_PAGE(pa);
                    652:        if (pg_p != NULL)
                    653:                *pg_p = pg;
1.108     riastrad  654:        if (pg == NULL) {
                    655:                if ((pp = pmap_pv_tracked(pa)) != NULL)
                    656:                        return &pp->pp_pvoh;
1.107     chs       657:                return NULL;
1.108     riastrad  658:        }
1.72      uebayasi  659:        md = VM_PAGE_TO_MD(pg);
                    660:        return &md->mdpg_pvoh;
1.1       matt      661: }
                    662:
1.35      perry     663: static inline struct pvo_head *
1.1       matt      664: vm_page_to_pvoh(struct vm_page *pg)
                    665: {
1.72      uebayasi  666:        struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
                    667:
                    668:        return &md->mdpg_pvoh;
1.1       matt      669: }
                    670:
1.108     riastrad  671: static inline void
                    672: pmap_pp_attr_clear(struct pmap_page *pp, int ptebit)
                    673: {
                    674:
                    675:        pp->pp_attrs &= ptebit;
                    676: }
1.1       matt      677:
1.35      perry     678: static inline void
1.1       matt      679: pmap_attr_clear(struct vm_page *pg, int ptebit)
                    680: {
1.72      uebayasi  681:        struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
                    682:
1.108     riastrad  683:        pmap_pp_attr_clear(&md->mdpg_pp, ptebit);
                    684: }
                    685:
                    686: static inline int
                    687: pmap_pp_attr_fetch(struct pmap_page *pp)
                    688: {
                    689:
                    690:        return pp->pp_attrs;
1.1       matt      691: }
                    692:
1.35      perry     693: static inline int
1.1       matt      694: pmap_attr_fetch(struct vm_page *pg)
                    695: {
1.72      uebayasi  696:        struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
                    697:
1.108     riastrad  698:        return pmap_pp_attr_fetch(&md->mdpg_pp);
1.1       matt      699: }
                    700:
1.35      perry     701: static inline void
1.1       matt      702: pmap_attr_save(struct vm_page *pg, int ptebit)
                    703: {
1.72      uebayasi  704:        struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
                    705:
                    706:        md->mdpg_attrs |= ptebit;
1.1       matt      707: }
                    708:
1.35      perry     709: static inline int
1.2       matt      710: pmap_pte_compare(const volatile struct pte *pt, const struct pte *pvo_pt)
1.1       matt      711: {
                    712:        if (pt->pte_hi == pvo_pt->pte_hi
                    713: #if 0
                    714:            && ((pt->pte_lo ^ pvo_pt->pte_lo) &
                    715:                ~(PTE_REF|PTE_CHG)) == 0
                    716: #endif
                    717:            )
                    718:                return 1;
                    719:        return 0;
                    720: }
                    721:
1.35      perry     722: static inline void
1.2       matt      723: pmap_pte_create(struct pte *pt, const struct pmap *pm, vaddr_t va, register_t pte_lo)
1.1       matt      724: {
                    725:        /*
                    726:         * Construct the PTE.  Default to IMB initially.  Valid bit
                    727:         * only gets set when the real pte is set in memory.
                    728:         *
                    729:         * Note: Don't set the valid bit for correct operation of tlb update.
                    730:         */
1.53      garbled   731: #if defined(PMAP_OEA)
1.2       matt      732:        pt->pte_hi = (va_to_vsid(pm, va) << PTE_VSID_SHFT)
                    733:            | (((va & ADDR_PIDX) >> (ADDR_API_SHFT - PTE_API_SHFT)) & PTE_API);
1.1       matt      734:        pt->pte_lo = pte_lo;
1.79      matt      735: #elif defined (PMAP_OEA64_BRIDGE) || defined (PMAP_OEA64)
1.38      sanjayl   736:        pt->pte_hi = ((u_int64_t)va_to_vsid(pm, va) << PTE_VSID_SHFT)
                    737:            | (((va & ADDR_PIDX) >> (ADDR_API_SHFT - PTE_API_SHFT)) & PTE_API);
                    738:        pt->pte_lo = (u_int64_t) pte_lo;
1.53      garbled   739: #endif /* PMAP_OEA */
1.1       matt      740: }
                    741:
1.35      perry     742: static inline void
1.2       matt      743: pmap_pte_synch(volatile struct pte *pt, struct pte *pvo_pt)
1.1       matt      744: {
                    745:        pvo_pt->pte_lo |= pt->pte_lo & (PTE_REF|PTE_CHG);
                    746: }
                    747:
1.35      perry     748: static inline void
1.2       matt      749: pmap_pte_clear(volatile struct pte *pt, vaddr_t va, int ptebit)
1.1       matt      750: {
                    751:        /*
                    752:         * As shown in Section 7.6.3.2.3
                    753:         */
                    754:        pt->pte_lo &= ~ptebit;
                    755:        TLBIE(va);
                    756:        SYNC();
                    757:        EIEIO();
                    758:        TLBSYNC();
                    759:        SYNC();
1.57      matt      760: #ifdef MULTIPROCESSOR
                    761:        DCBST(pt);
                    762: #endif
1.1       matt      763: }
                    764:
1.35      perry     765: static inline void
1.2       matt      766: pmap_pte_set(volatile struct pte *pt, struct pte *pvo_pt)
1.1       matt      767: {
                    768: #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
                    769:        if (pvo_pt->pte_hi & PTE_VALID)
                    770:                panic("pte_set: setting an already valid pte %p", pvo_pt);
                    771: #endif
                    772:        pvo_pt->pte_hi |= PTE_VALID;
1.38      sanjayl   773:
1.1       matt      774:        /*
                    775:         * Update the PTE as defined in section 7.6.3.1
                    776:         * Note that the REF/CHG bits are from pvo_pt and thus should
                    777:         * have been saved so this routine can restore them (if desired).
                    778:         */
                    779:        pt->pte_lo = pvo_pt->pte_lo;
                    780:        EIEIO();
                    781:        pt->pte_hi = pvo_pt->pte_hi;
1.38      sanjayl   782:        TLBSYNC();
1.1       matt      783:        SYNC();
1.57      matt      784: #ifdef MULTIPROCESSOR
                    785:        DCBST(pt);
                    786: #endif
1.1       matt      787:        pmap_pte_valid++;
                    788: }
                    789:
1.35      perry     790: static inline void
1.2       matt      791: pmap_pte_unset(volatile struct pte *pt, struct pte *pvo_pt, vaddr_t va)
1.1       matt      792: {
                    793: #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
                    794:        if ((pvo_pt->pte_hi & PTE_VALID) == 0)
                    795:                panic("pte_unset: attempt to unset an inactive pte#1 %p/%p", pvo_pt, pt);
                    796:        if ((pt->pte_hi & PTE_VALID) == 0)
                    797:                panic("pte_unset: attempt to unset an inactive pte#2 %p/%p", pvo_pt, pt);
                    798: #endif
                    799:
                    800:        pvo_pt->pte_hi &= ~PTE_VALID;
                    801:        /*
                    802:         * Force the ref & chg bits back into the PTEs.
                    803:         */
                    804:        SYNC();
                    805:        /*
                    806:         * Invalidate the pte ... (Section 7.6.3.3)
                    807:         */
                    808:        pt->pte_hi &= ~PTE_VALID;
                    809:        SYNC();
                    810:        TLBIE(va);
                    811:        SYNC();
                    812:        EIEIO();
                    813:        TLBSYNC();
                    814:        SYNC();
                    815:        /*
                    816:         * Save the ref & chg bits ...
                    817:         */
                    818:        pmap_pte_synch(pt, pvo_pt);
                    819:        pmap_pte_valid--;
                    820: }
                    821:
1.35      perry     822: static inline void
1.2       matt      823: pmap_pte_change(volatile struct pte *pt, struct pte *pvo_pt, vaddr_t va)
1.1       matt      824: {
                    825:        /*
                    826:         * Invalidate the PTE
                    827:         */
                    828:        pmap_pte_unset(pt, pvo_pt, va);
                    829:        pmap_pte_set(pt, pvo_pt);
                    830: }
                    831:
                    832: /*
                    833:  * Try to insert the PTE @ *pvo_pt into the pmap_pteg_table at ptegidx
                    834:  * (either primary or secondary location).
                    835:  *
                    836:  * Note: both the destination and source PTEs must not have PTE_VALID set.
                    837:  */
                    838:
1.53      garbled   839: static int
1.2       matt      840: pmap_pte_insert(int ptegidx, struct pte *pvo_pt)
1.1       matt      841: {
1.2       matt      842:        volatile struct pte *pt;
1.1       matt      843:        int i;
                    844:
                    845: #if defined(DEBUG)
1.85      matt      846:        DPRINTFN(PTE, "pmap_pte_insert: idx %#x, pte %#" _PRIxpte " %#" _PRIxpte "\n",
                    847:                ptegidx, pvo_pt->pte_hi, pvo_pt->pte_lo);
1.1       matt      848: #endif
                    849:        /*
                    850:         * First try primary hash.
                    851:         */
                    852:        for (pt = pmap_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) {
                    853:                if ((pt->pte_hi & PTE_VALID) == 0) {
                    854:                        pvo_pt->pte_hi &= ~PTE_HID;
                    855:                        pmap_pte_set(pt, pvo_pt);
                    856:                        return i;
                    857:                }
                    858:        }
                    859:
                    860:        /*
                    861:         * Now try secondary hash.
                    862:         */
                    863:        ptegidx ^= pmap_pteg_mask;
                    864:        for (pt = pmap_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) {
                    865:                if ((pt->pte_hi & PTE_VALID) == 0) {
                    866:                        pvo_pt->pte_hi |= PTE_HID;
                    867:                        pmap_pte_set(pt, pvo_pt);
                    868:                        return i;
                    869:                }
                    870:        }
                    871:        return -1;
                    872: }
                    873:
                    874: /*
                    875:  * Spill handler.
                    876:  *
                    877:  * Tries to spill a page table entry from the overflow area.
                    878:  * This runs in either real mode (if dealing with a exception spill)
                    879:  * or virtual mode when dealing with manually spilling one of the
                    880:  * kernel's pte entries.  In either case, interrupts are already
                    881:  * disabled.
                    882:  */
1.14      chs       883:
1.1       matt      884: int
1.44      thorpej   885: pmap_pte_spill(struct pmap *pm, vaddr_t addr, bool exec)
1.1       matt      886: {
                    887:        struct pvo_entry *source_pvo, *victim_pvo, *next_pvo;
                    888:        struct pvo_entry *pvo;
1.15      dyoung    889:        /* XXX: gcc -- vpvoh is always set at either *1* or *2* */
                    890:        struct pvo_tqhead *pvoh, *vpvoh = NULL;
1.1       matt      891:        int ptegidx, i, j;
1.2       matt      892:        volatile struct pteg *pteg;
                    893:        volatile struct pte *pt;
1.1       matt      894:
1.50      ad        895:        PMAP_LOCK();
                    896:
1.2       matt      897:        ptegidx = va_to_pteg(pm, addr);
1.1       matt      898:
                    899:        /*
                    900:         * Have to substitute some entry. Use the primary hash for this.
1.12      matt      901:         * Use low bits of timebase as random generator.  Make sure we are
                    902:         * not picking a kernel pte for replacement.
1.1       matt      903:         */
                    904:        pteg = &pmap_pteg_table[ptegidx];
                    905:        i = MFTB() & 7;
1.12      matt      906:        for (j = 0; j < 8; j++) {
                    907:                pt = &pteg->pt[i];
1.53      garbled   908:                if ((pt->pte_hi & PTE_VALID) == 0)
                    909:                        break;
                    910:                if (VSID_TO_HASH((pt->pte_hi & PTE_VSID) >> PTE_VSID_SHFT)
                    911:                                < PHYSMAP_VSIDBITS)
1.12      matt      912:                        break;
                    913:                i = (i + 1) & 7;
                    914:        }
                    915:        KASSERT(j < 8);
1.1       matt      916:
                    917:        source_pvo = NULL;
                    918:        victim_pvo = NULL;
                    919:        pvoh = &pmap_pvo_table[ptegidx];
                    920:        TAILQ_FOREACH(pvo, pvoh, pvo_olink) {
                    921:
                    922:                /*
                    923:                 * We need to find pvo entry for this address...
                    924:                 */
                    925:                PMAP_PVO_CHECK(pvo);            /* sanity check */
                    926:
                    927:                /*
                    928:                 * If we haven't found the source and we come to a PVO with
                    929:                 * a valid PTE, then we know we can't find it because all
                    930:                 * evicted PVOs always are first in the list.
                    931:                 */
                    932:                if (source_pvo == NULL && (pvo->pvo_pte.pte_hi & PTE_VALID))
                    933:                        break;
1.2       matt      934:                if (source_pvo == NULL && pm == pvo->pvo_pmap &&
                    935:                    addr == PVO_VADDR(pvo)) {
1.1       matt      936:
                    937:                        /*
                    938:                         * Now we have found the entry to be spilled into the
                    939:                         * pteg.  Attempt to insert it into the page table.
                    940:                         */
                    941:                        j = pmap_pte_insert(ptegidx, &pvo->pvo_pte);
                    942:                        if (j >= 0) {
                    943:                                PVO_PTEGIDX_SET(pvo, j);
                    944:                                PMAP_PVO_CHECK(pvo);    /* sanity check */
1.12      matt      945:                                PVO_WHERE(pvo, SPILL_INSERT);
1.1       matt      946:                                pvo->pvo_pmap->pm_evictions--;
                    947:                                PMAPCOUNT(ptes_spilled);
                    948:                                PMAPCOUNT2(((pvo->pvo_pte.pte_hi & PTE_HID)
                    949:                                    ? pmap_evcnt_ptes_secondary
                    950:                                    : pmap_evcnt_ptes_primary)[j]);
                    951:
                    952:                                /*
                    953:                                 * Since we keep the evicted entries at the
                    954:                                 * from of the PVO list, we need move this
                    955:                                 * (now resident) PVO after the evicted
                    956:                                 * entries.
                    957:                                 */
                    958:                                next_pvo = TAILQ_NEXT(pvo, pvo_olink);
                    959:
                    960:                                /*
1.5       matt      961:                                 * If we don't have to move (either we were the
                    962:                                 * last entry or the next entry was valid),
1.1       matt      963:                                 * don't change our position.  Otherwise
                    964:                                 * move ourselves to the tail of the queue.
                    965:                                 */
                    966:                                if (next_pvo != NULL &&
                    967:                                    !(next_pvo->pvo_pte.pte_hi & PTE_VALID)) {
                    968:                                        TAILQ_REMOVE(pvoh, pvo, pvo_olink);
                    969:                                        TAILQ_INSERT_TAIL(pvoh, pvo, pvo_olink);
                    970:                                }
1.50      ad        971:                                PMAP_UNLOCK();
1.1       matt      972:                                return 1;
                    973:                        }
                    974:                        source_pvo = pvo;
1.39      matt      975:                        if (exec && !PVO_EXECUTABLE_P(source_pvo)) {
1.96      rin       976:                                PMAP_UNLOCK();
1.14      chs       977:                                return 0;
                    978:                        }
1.1       matt      979:                        if (victim_pvo != NULL)
                    980:                                break;
                    981:                }
                    982:
                    983:                /*
                    984:                 * We also need the pvo entry of the victim we are replacing
                    985:                 * so save the R & C bits of the PTE.
                    986:                 */
                    987:                if ((pt->pte_hi & PTE_HID) == 0 && victim_pvo == NULL &&
                    988:                    pmap_pte_compare(pt, &pvo->pvo_pte)) {
1.15      dyoung    989:                        vpvoh = pvoh;                   /* *1* */
1.1       matt      990:                        victim_pvo = pvo;
                    991:                        if (source_pvo != NULL)
                    992:                                break;
                    993:                }
                    994:        }
                    995:
                    996:        if (source_pvo == NULL) {
                    997:                PMAPCOUNT(ptes_unspilled);
1.50      ad        998:                PMAP_UNLOCK();
1.1       matt      999:                return 0;
                   1000:        }
                   1001:
                   1002:        if (victim_pvo == NULL) {
                   1003:                if ((pt->pte_hi & PTE_HID) == 0)
                   1004:                        panic("pmap_pte_spill: victim p-pte (%p) has "
                   1005:                            "no pvo entry!", pt);
                   1006:
                   1007:                /*
                   1008:                 * If this is a secondary PTE, we need to search
                   1009:                 * its primary pvo bucket for the matching PVO.
                   1010:                 */
1.15      dyoung   1011:                vpvoh = &pmap_pvo_table[ptegidx ^ pmap_pteg_mask]; /* *2* */
1.1       matt     1012:                TAILQ_FOREACH(pvo, vpvoh, pvo_olink) {
                   1013:                        PMAP_PVO_CHECK(pvo);            /* sanity check */
                   1014:
                   1015:                        /*
                   1016:                         * We also need the pvo entry of the victim we are
                   1017:                         * replacing so save the R & C bits of the PTE.
                   1018:                         */
                   1019:                        if (pmap_pte_compare(pt, &pvo->pvo_pte)) {
                   1020:                                victim_pvo = pvo;
                   1021:                                break;
                   1022:                        }
                   1023:                }
                   1024:                if (victim_pvo == NULL)
                   1025:                        panic("pmap_pte_spill: victim s-pte (%p) has "
                   1026:                            "no pvo entry!", pt);
                   1027:        }
                   1028:
                   1029:        /*
1.12      matt     1030:         * The victim should be not be a kernel PVO/PTE entry.
                   1031:         */
                   1032:        KASSERT(victim_pvo->pvo_pmap != pmap_kernel());
                   1033:        KASSERT(PVO_PTEGIDX_ISSET(victim_pvo));
                   1034:        KASSERT(PVO_PTEGIDX_GET(victim_pvo) == i);
                   1035:
                   1036:        /*
1.1       matt     1037:         * We are invalidating the TLB entry for the EA for the
                   1038:         * we are replacing even though its valid; If we don't
                   1039:         * we lose any ref/chg bit changes contained in the TLB
                   1040:         * entry.
                   1041:         */
                   1042:        source_pvo->pvo_pte.pte_hi &= ~PTE_HID;
                   1043:
                   1044:        /*
                   1045:         * To enforce the PVO list ordering constraint that all
                   1046:         * evicted entries should come before all valid entries,
                   1047:         * move the source PVO to the tail of its list and the
                   1048:         * victim PVO to the head of its list (which might not be
                   1049:         * the same list, if the victim was using the secondary hash).
                   1050:         */
                   1051:        TAILQ_REMOVE(pvoh, source_pvo, pvo_olink);
                   1052:        TAILQ_INSERT_TAIL(pvoh, source_pvo, pvo_olink);
                   1053:        TAILQ_REMOVE(vpvoh, victim_pvo, pvo_olink);
                   1054:        TAILQ_INSERT_HEAD(vpvoh, victim_pvo, pvo_olink);
                   1055:        pmap_pte_unset(pt, &victim_pvo->pvo_pte, victim_pvo->pvo_vaddr);
                   1056:        pmap_pte_set(pt, &source_pvo->pvo_pte);
                   1057:        victim_pvo->pvo_pmap->pm_evictions++;
                   1058:        source_pvo->pvo_pmap->pm_evictions--;
1.12      matt     1059:        PVO_WHERE(victim_pvo, SPILL_UNSET);
                   1060:        PVO_WHERE(source_pvo, SPILL_SET);
1.1       matt     1061:
                   1062:        PVO_PTEGIDX_CLR(victim_pvo);
                   1063:        PVO_PTEGIDX_SET(source_pvo, i);
                   1064:        PMAPCOUNT2(pmap_evcnt_ptes_primary[i]);
                   1065:        PMAPCOUNT(ptes_spilled);
                   1066:        PMAPCOUNT(ptes_evicted);
                   1067:        PMAPCOUNT(ptes_removed);
                   1068:
                   1069:        PMAP_PVO_CHECK(victim_pvo);
                   1070:        PMAP_PVO_CHECK(source_pvo);
1.50      ad       1071:
                   1072:        PMAP_UNLOCK();
1.1       matt     1073:        return 1;
                   1074: }
                   1075:
                   1076: /*
                   1077:  * Restrict given range to physical memory
                   1078:  */
                   1079: void
                   1080: pmap_real_memory(paddr_t *start, psize_t *size)
                   1081: {
                   1082:        struct mem_region *mp;
                   1083:
                   1084:        for (mp = mem; mp->size; mp++) {
                   1085:                if (*start + *size > mp->start
                   1086:                    && *start < mp->start + mp->size) {
                   1087:                        if (*start < mp->start) {
                   1088:                                *size -= mp->start - *start;
                   1089:                                *start = mp->start;
                   1090:                        }
                   1091:                        if (*start + *size > mp->start + mp->size)
                   1092:                                *size = mp->start + mp->size - *start;
                   1093:                        return;
                   1094:                }
                   1095:        }
                   1096:        *size = 0;
                   1097: }
                   1098:
                   1099: /*
                   1100:  * Initialize anything else for pmap handling.
                   1101:  * Called during vm_init().
                   1102:  */
                   1103: void
                   1104: pmap_init(void)
                   1105: {
                   1106:
                   1107:        pmap_initialized = 1;
                   1108: }
                   1109:
                   1110: /*
1.10      thorpej  1111:  * How much virtual space does the kernel get?
                   1112:  */
                   1113: void
                   1114: pmap_virtual_space(vaddr_t *start, vaddr_t *end)
                   1115: {
                   1116:        /*
                   1117:         * For now, reserve one segment (minus some overhead) for kernel
                   1118:         * virtual memory
                   1119:         */
                   1120:        *start = VM_MIN_KERNEL_ADDRESS;
                   1121:        *end = VM_MAX_KERNEL_ADDRESS;
                   1122: }
                   1123:
                   1124: /*
1.1       matt     1125:  * Allocate, initialize, and return a new physical map.
                   1126:  */
                   1127: pmap_t
                   1128: pmap_create(void)
                   1129: {
                   1130:        pmap_t pm;
1.38      sanjayl  1131:
1.1       matt     1132:        pm = pool_get(&pmap_pool, PR_WAITOK);
1.84      matt     1133:        KASSERT((vaddr_t)pm < VM_MIN_KERNEL_ADDRESS);
1.46      christos 1134:        memset((void *)pm, 0, sizeof *pm);
1.1       matt     1135:        pmap_pinit(pm);
                   1136:
1.85      matt     1137:        DPRINTFN(CREATE, "pmap_create: pm %p:\n"
1.54      mlelstv  1138:            "\t%#" _PRIsr " %#" _PRIsr " %#" _PRIsr " %#" _PRIsr
                   1139:            "    %#" _PRIsr " %#" _PRIsr " %#" _PRIsr " %#" _PRIsr "\n"
                   1140:            "\t%#" _PRIsr " %#" _PRIsr " %#" _PRIsr " %#" _PRIsr
                   1141:            "    %#" _PRIsr " %#" _PRIsr " %#" _PRIsr " %#" _PRIsr "\n",
                   1142:            pm,
                   1143:            pm->pm_sr[0], pm->pm_sr[1],
                   1144:            pm->pm_sr[2], pm->pm_sr[3],
                   1145:            pm->pm_sr[4], pm->pm_sr[5],
                   1146:            pm->pm_sr[6], pm->pm_sr[7],
                   1147:            pm->pm_sr[8], pm->pm_sr[9],
                   1148:            pm->pm_sr[10], pm->pm_sr[11],
                   1149:            pm->pm_sr[12], pm->pm_sr[13],
1.85      matt     1150:            pm->pm_sr[14], pm->pm_sr[15]);
1.1       matt     1151:        return pm;
                   1152: }
                   1153:
                   1154: /*
                   1155:  * Initialize a preallocated and zeroed pmap structure.
                   1156:  */
                   1157: void
                   1158: pmap_pinit(pmap_t pm)
                   1159: {
1.2       matt     1160:        register_t entropy = MFTB();
                   1161:        register_t mask;
                   1162:        int i;
1.1       matt     1163:
                   1164:        /*
                   1165:         * Allocate some segment registers for this pmap.
                   1166:         */
                   1167:        pm->pm_refs = 1;
1.50      ad       1168:        PMAP_LOCK();
1.2       matt     1169:        for (i = 0; i < NPMAPS; i += VSID_NBPW) {
                   1170:                static register_t pmap_vsidcontext;
                   1171:                register_t hash;
                   1172:                unsigned int n;
1.1       matt     1173:
                   1174:                /* Create a new value by multiplying by a prime adding in
                   1175:                 * entropy from the timebase register.  This is to make the
                   1176:                 * VSID more random so that the PT Hash function collides
                   1177:                 * less often. (note that the prime causes gcc to do shifts
                   1178:                 * instead of a multiply)
                   1179:                 */
                   1180:                pmap_vsidcontext = (pmap_vsidcontext * 0x1105) + entropy;
                   1181:                hash = pmap_vsidcontext & (NPMAPS - 1);
1.23      aymeric  1182:                if (hash == 0) {                /* 0 is special, avoid it */
                   1183:                        entropy += 0xbadf00d;
1.1       matt     1184:                        continue;
1.23      aymeric  1185:                }
1.1       matt     1186:                n = hash >> 5;
1.2       matt     1187:                mask = 1L << (hash & (VSID_NBPW-1));
                   1188:                hash = pmap_vsidcontext;
1.1       matt     1189:                if (pmap_vsid_bitmap[n] & mask) {       /* collision? */
                   1190:                        /* anything free in this bucket? */
1.2       matt     1191:                        if (~pmap_vsid_bitmap[n] == 0) {
1.23      aymeric  1192:                                entropy = hash ^ (hash >> 16);
1.1       matt     1193:                                continue;
                   1194:                        }
                   1195:                        i = ffs(~pmap_vsid_bitmap[n]) - 1;
1.2       matt     1196:                        mask = 1L << i;
                   1197:                        hash &= ~(VSID_NBPW-1);
1.1       matt     1198:                        hash |= i;
                   1199:                }
1.18      matt     1200:                hash &= PTE_VSID >> PTE_VSID_SHFT;
1.1       matt     1201:                pmap_vsid_bitmap[n] |= mask;
1.18      matt     1202:                pm->pm_vsid = hash;
1.53      garbled  1203: #if defined (PMAP_OEA) || defined (PMAP_OEA64_BRIDGE)
1.1       matt     1204:                for (i = 0; i < 16; i++)
1.14      chs      1205:                        pm->pm_sr[i] = VSID_MAKE(i, hash) | SR_PRKEY |
                   1206:                            SR_NOEXEC;
1.18      matt     1207: #endif
1.50      ad       1208:                PMAP_UNLOCK();
1.1       matt     1209:                return;
                   1210:        }
1.50      ad       1211:        PMAP_UNLOCK();
1.1       matt     1212:        panic("pmap_pinit: out of segments");
                   1213: }
                   1214:
                   1215: /*
                   1216:  * Add a reference to the given pmap.
                   1217:  */
                   1218: void
                   1219: pmap_reference(pmap_t pm)
                   1220: {
1.50      ad       1221:        atomic_inc_uint(&pm->pm_refs);
1.1       matt     1222: }
                   1223:
                   1224: /*
                   1225:  * Retire the given pmap from service.
                   1226:  * Should only be called if the map contains no valid mappings.
                   1227:  */
                   1228: void
                   1229: pmap_destroy(pmap_t pm)
                   1230: {
1.50      ad       1231:        if (atomic_dec_uint_nv(&pm->pm_refs) == 0) {
1.1       matt     1232:                pmap_release(pm);
                   1233:                pool_put(&pmap_pool, pm);
                   1234:        }
                   1235: }
                   1236:
                   1237: /*
                   1238:  * Release any resources held by the given physical map.
                   1239:  * Called when a pmap initialized by pmap_pinit is being released.
                   1240:  */
                   1241: void
                   1242: pmap_release(pmap_t pm)
                   1243: {
                   1244:        int idx, mask;
1.39      matt     1245:
                   1246:        KASSERT(pm->pm_stats.resident_count == 0);
                   1247:        KASSERT(pm->pm_stats.wired_count == 0);
1.1       matt     1248:
1.50      ad       1249:        PMAP_LOCK();
1.1       matt     1250:        if (pm->pm_sr[0] == 0)
                   1251:                panic("pmap_release");
1.22      aymeric  1252:        idx = pm->pm_vsid & (NPMAPS-1);
1.1       matt     1253:        mask = 1 << (idx % VSID_NBPW);
                   1254:        idx /= VSID_NBPW;
1.22      aymeric  1255:
                   1256:        KASSERT(pmap_vsid_bitmap[idx] & mask);
1.1       matt     1257:        pmap_vsid_bitmap[idx] &= ~mask;
1.50      ad       1258:        PMAP_UNLOCK();
1.1       matt     1259: }
                   1260:
                   1261: /*
                   1262:  * Copy the range specified by src_addr/len
                   1263:  * from the source map to the range dst_addr/len
                   1264:  * in the destination map.
                   1265:  *
                   1266:  * This routine is only advisory and need not do anything.
                   1267:  */
                   1268: void
                   1269: pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vaddr_t dst_addr,
                   1270:        vsize_t len, vaddr_t src_addr)
                   1271: {
                   1272:        PMAPCOUNT(copies);
                   1273: }
                   1274:
                   1275: /*
                   1276:  * Require that all active physical maps contain no
                   1277:  * incorrect entries NOW.
                   1278:  */
                   1279: void
                   1280: pmap_update(struct pmap *pmap)
                   1281: {
                   1282:        PMAPCOUNT(updates);
                   1283:        TLBSYNC();
                   1284: }
                   1285:
1.35      perry    1286: static inline int
1.1       matt     1287: pmap_pvo_pte_index(const struct pvo_entry *pvo, int ptegidx)
                   1288: {
                   1289:        int pteidx;
                   1290:        /*
                   1291:         * We can find the actual pte entry without searching by
                   1292:         * grabbing the PTEG index from 3 unused bits in pte_lo[11:9]
                   1293:         * and by noticing the HID bit.
                   1294:         */
                   1295:        pteidx = ptegidx * 8 + PVO_PTEGIDX_GET(pvo);
                   1296:        if (pvo->pvo_pte.pte_hi & PTE_HID)
                   1297:                pteidx ^= pmap_pteg_mask * 8;
                   1298:        return pteidx;
                   1299: }
                   1300:
1.2       matt     1301: volatile struct pte *
1.1       matt     1302: pmap_pvo_to_pte(const struct pvo_entry *pvo, int pteidx)
                   1303: {
1.2       matt     1304:        volatile struct pte *pt;
1.1       matt     1305:
                   1306: #if !defined(DIAGNOSTIC) && !defined(DEBUG) && !defined(PMAPCHECK)
                   1307:        if ((pvo->pvo_pte.pte_hi & PTE_VALID) == 0)
                   1308:                return NULL;
                   1309: #endif
                   1310:
                   1311:        /*
                   1312:         * If we haven't been supplied the ptegidx, calculate it.
                   1313:         */
                   1314:        if (pteidx == -1) {
                   1315:                int ptegidx;
1.2       matt     1316:                ptegidx = va_to_pteg(pvo->pvo_pmap, pvo->pvo_vaddr);
1.1       matt     1317:                pteidx = pmap_pvo_pte_index(pvo, ptegidx);
                   1318:        }
                   1319:
                   1320:        pt = &pmap_pteg_table[pteidx >> 3].pt[pteidx & 7];
                   1321:
                   1322: #if !defined(DIAGNOSTIC) && !defined(DEBUG) && !defined(PMAPCHECK)
                   1323:        return pt;
                   1324: #else
                   1325:        if ((pvo->pvo_pte.pte_hi & PTE_VALID) && !PVO_PTEGIDX_ISSET(pvo)) {
                   1326:                panic("pmap_pvo_to_pte: pvo %p: has valid pte in "
                   1327:                    "pvo but no valid pte index", pvo);
                   1328:        }
                   1329:        if ((pvo->pvo_pte.pte_hi & PTE_VALID) == 0 && PVO_PTEGIDX_ISSET(pvo)) {
                   1330:                panic("pmap_pvo_to_pte: pvo %p: has valid pte index in "
                   1331:                    "pvo but no valid pte", pvo);
                   1332:        }
                   1333:
                   1334:        if ((pt->pte_hi ^ (pvo->pvo_pte.pte_hi & ~PTE_VALID)) == PTE_VALID) {
                   1335:                if ((pvo->pvo_pte.pte_hi & PTE_VALID) == 0) {
                   1336: #if defined(DEBUG) || defined(PMAPCHECK)
                   1337:                        pmap_pte_print(pt);
                   1338: #endif
                   1339:                        panic("pmap_pvo_to_pte: pvo %p: has valid pte in "
                   1340:                            "pmap_pteg_table %p but invalid in pvo",
                   1341:                            pvo, pt);
                   1342:                }
                   1343:                if (((pt->pte_lo ^ pvo->pvo_pte.pte_lo) & ~(PTE_CHG|PTE_REF)) != 0) {
                   1344: #if defined(DEBUG) || defined(PMAPCHECK)
                   1345:                        pmap_pte_print(pt);
                   1346: #endif
                   1347:                        panic("pmap_pvo_to_pte: pvo %p: pvo pte does "
                   1348:                            "not match pte %p in pmap_pteg_table",
                   1349:                            pvo, pt);
                   1350:                }
                   1351:                return pt;
                   1352:        }
                   1353:
                   1354:        if (pvo->pvo_pte.pte_hi & PTE_VALID) {
                   1355: #if defined(DEBUG) || defined(PMAPCHECK)
                   1356:                pmap_pte_print(pt);
                   1357: #endif
1.12      matt     1358:                panic("pmap_pvo_to_pte: pvo %p: has nomatching pte %p in "
1.1       matt     1359:                    "pmap_pteg_table but valid in pvo", pvo, pt);
                   1360:        }
                   1361:        return NULL;
                   1362: #endif /* !(!DIAGNOSTIC && !DEBUG && !PMAPCHECK) */
                   1363: }
                   1364:
                   1365: struct pvo_entry *
                   1366: pmap_pvo_find_va(pmap_t pm, vaddr_t va, int *pteidx_p)
                   1367: {
                   1368:        struct pvo_entry *pvo;
                   1369:        int ptegidx;
                   1370:
                   1371:        va &= ~ADDR_POFF;
1.2       matt     1372:        ptegidx = va_to_pteg(pm, va);
1.1       matt     1373:
                   1374:        TAILQ_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) {
                   1375: #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
                   1376:                if ((uintptr_t) pvo >= SEGMENT_LENGTH)
                   1377:                        panic("pmap_pvo_find_va: invalid pvo %p on "
                   1378:                            "list %#x (%p)", pvo, ptegidx,
                   1379:                             &pmap_pvo_table[ptegidx]);
                   1380: #endif
                   1381:                if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) {
                   1382:                        if (pteidx_p)
                   1383:                                *pteidx_p = pmap_pvo_pte_index(pvo, ptegidx);
                   1384:                        return pvo;
                   1385:                }
                   1386:        }
1.38      sanjayl  1387:        if ((pm == pmap_kernel()) && (va < SEGMENT_LENGTH))
1.54      mlelstv  1388:                panic("%s: returning NULL for %s pmap, va: %#" _PRIxva "\n",
1.53      garbled  1389:                    __func__, (pm == pmap_kernel() ? "kernel" : "user"), va);
1.1       matt     1390:        return NULL;
                   1391: }
                   1392:
                   1393: #if defined(DEBUG) || defined(PMAPCHECK)
                   1394: void
                   1395: pmap_pvo_check(const struct pvo_entry *pvo)
                   1396: {
                   1397:        struct pvo_head *pvo_head;
                   1398:        struct pvo_entry *pvo0;
1.2       matt     1399:        volatile struct pte *pt;
1.1       matt     1400:        int failed = 0;
                   1401:
1.50      ad       1402:        PMAP_LOCK();
                   1403:
1.1       matt     1404:        if ((uintptr_t)(pvo+1) >= SEGMENT_LENGTH)
                   1405:                panic("pmap_pvo_check: pvo %p: invalid address", pvo);
                   1406:
                   1407:        if ((uintptr_t)(pvo->pvo_pmap+1) >= SEGMENT_LENGTH) {
                   1408:                printf("pmap_pvo_check: pvo %p: invalid pmap address %p\n",
                   1409:                    pvo, pvo->pvo_pmap);
                   1410:                failed = 1;
                   1411:        }
                   1412:
                   1413:        if ((uintptr_t)TAILQ_NEXT(pvo, pvo_olink) >= SEGMENT_LENGTH ||
                   1414:            (((uintptr_t)TAILQ_NEXT(pvo, pvo_olink)) & 0x1f) != 0) {
                   1415:                printf("pmap_pvo_check: pvo %p: invalid ovlink address %p\n",
                   1416:                    pvo, TAILQ_NEXT(pvo, pvo_olink));
                   1417:                failed = 1;
                   1418:        }
                   1419:
                   1420:        if ((uintptr_t)LIST_NEXT(pvo, pvo_vlink) >= SEGMENT_LENGTH ||
                   1421:            (((uintptr_t)LIST_NEXT(pvo, pvo_vlink)) & 0x1f) != 0) {
                   1422:                printf("pmap_pvo_check: pvo %p: invalid ovlink address %p\n",
                   1423:                    pvo, LIST_NEXT(pvo, pvo_vlink));
                   1424:                failed = 1;
                   1425:        }
                   1426:
1.39      matt     1427:        if (PVO_MANAGED_P(pvo)) {
1.1       matt     1428:                pvo_head = pa_to_pvoh(pvo->pvo_pte.pte_lo & PTE_RPGN, NULL);
1.107     chs      1429:                LIST_FOREACH(pvo0, pvo_head, pvo_vlink) {
                   1430:                        if (pvo0 == pvo)
                   1431:                                break;
                   1432:                }
                   1433:                if (pvo0 == NULL) {
                   1434:                        printf("pmap_pvo_check: pvo %p: not present "
                   1435:                               "on its vlist head %p\n", pvo, pvo_head);
                   1436:                        failed = 1;
                   1437:                }
1.1       matt     1438:        } else {
1.107     chs      1439:                KASSERT(pvo->pvo_vaddr >= VM_MIN_KERNEL_ADDRESS);
                   1440:                if (__predict_false(pvo->pvo_vaddr < VM_MIN_KERNEL_ADDRESS))
1.1       matt     1441:                        failed = 1;
                   1442:        }
                   1443:        if (pvo != pmap_pvo_find_va(pvo->pvo_pmap, pvo->pvo_vaddr, NULL)) {
                   1444:                printf("pmap_pvo_check: pvo %p: not present "
                   1445:                    "on its olist head\n", pvo);
                   1446:                failed = 1;
                   1447:        }
                   1448:        pt = pmap_pvo_to_pte(pvo, -1);
                   1449:        if (pt == NULL) {
                   1450:                if (pvo->pvo_pte.pte_hi & PTE_VALID) {
                   1451:                        printf("pmap_pvo_check: pvo %p: pte_hi VALID but "
                   1452:                            "no PTE\n", pvo);
                   1453:                        failed = 1;
                   1454:                }
                   1455:        } else {
                   1456:                if ((uintptr_t) pt < (uintptr_t) &pmap_pteg_table[0] ||
                   1457:                    (uintptr_t) pt >=
                   1458:                    (uintptr_t) &pmap_pteg_table[pmap_pteg_cnt]) {
                   1459:                        printf("pmap_pvo_check: pvo %p: pte %p not in "
                   1460:                            "pteg table\n", pvo, pt);
                   1461:                        failed = 1;
                   1462:                }
                   1463:                if (((((uintptr_t) pt) >> 3) & 7) != PVO_PTEGIDX_GET(pvo)) {
                   1464:                        printf("pmap_pvo_check: pvo %p: pte_hi VALID but "
                   1465:                            "no PTE\n", pvo);
                   1466:                        failed = 1;
                   1467:                }
                   1468:                if (pvo->pvo_pte.pte_hi != pt->pte_hi) {
                   1469:                        printf("pmap_pvo_check: pvo %p: pte_hi differ: "
1.54      mlelstv  1470:                            "%#" _PRIxpte "/%#" _PRIxpte "\n", pvo,
                   1471:                            pvo->pvo_pte.pte_hi,
                   1472:                            pt->pte_hi);
1.1       matt     1473:                        failed = 1;
                   1474:                }
                   1475:                if (((pvo->pvo_pte.pte_lo ^ pt->pte_lo) &
                   1476:                    (PTE_PP|PTE_WIMG|PTE_RPGN)) != 0) {
                   1477:                        printf("pmap_pvo_check: pvo %p: pte_lo differ: "
1.54      mlelstv  1478:                            "%#" _PRIxpte "/%#" _PRIxpte "\n", pvo,
                   1479:                            (pvo->pvo_pte.pte_lo & (PTE_PP|PTE_WIMG|PTE_RPGN)),
                   1480:                            (pt->pte_lo & (PTE_PP|PTE_WIMG|PTE_RPGN)));
1.1       matt     1481:                        failed = 1;
                   1482:                }
                   1483:                if ((pmap_pte_to_va(pt) ^ PVO_VADDR(pvo)) & 0x0fffffff) {
1.53      garbled  1484:                        printf("pmap_pvo_check: pvo %p: PTE %p derived VA %#" _PRIxva ""
                   1485:                            " doesn't not match PVO's VA %#" _PRIxva "\n",
1.1       matt     1486:                            pvo, pt, pmap_pte_to_va(pt), PVO_VADDR(pvo));
                   1487:                        failed = 1;
                   1488:                }
                   1489:                if (failed)
                   1490:                        pmap_pte_print(pt);
                   1491:        }
                   1492:        if (failed)
                   1493:                panic("pmap_pvo_check: pvo %p, pm %p: bugcheck!", pvo,
                   1494:                    pvo->pvo_pmap);
1.50      ad       1495:
                   1496:        PMAP_UNLOCK();
1.1       matt     1497: }
                   1498: #endif /* DEBUG || PMAPCHECK */
                   1499:
                   1500: /*
1.25      chs      1501:  * Search the PVO table looking for a non-wired entry.
                   1502:  * If we find one, remove it and return it.
                   1503:  */
                   1504:
                   1505: struct pvo_entry *
                   1506: pmap_pvo_reclaim(struct pmap *pm)
                   1507: {
                   1508:        struct pvo_tqhead *pvoh;
                   1509:        struct pvo_entry *pvo;
                   1510:        uint32_t idx, endidx;
                   1511:
                   1512:        endidx = pmap_pvo_reclaim_nextidx;
                   1513:        for (idx = (endidx + 1) & pmap_pteg_mask; idx != endidx;
                   1514:             idx = (idx + 1) & pmap_pteg_mask) {
                   1515:                pvoh = &pmap_pvo_table[idx];
                   1516:                TAILQ_FOREACH(pvo, pvoh, pvo_olink) {
1.39      matt     1517:                        if (!PVO_WIRED_P(pvo)) {
1.33      chs      1518:                                pmap_pvo_remove(pvo, -1, NULL);
1.25      chs      1519:                                pmap_pvo_reclaim_nextidx = idx;
1.26      matt     1520:                                PMAPCOUNT(pvos_reclaimed);
1.25      chs      1521:                                return pvo;
                   1522:                        }
                   1523:                }
                   1524:        }
                   1525:        return NULL;
                   1526: }
                   1527:
                   1528: /*
1.1       matt     1529:  * This returns whether this is the first mapping of a page.
                   1530:  */
                   1531: int
                   1532: pmap_pvo_enter(pmap_t pm, struct pool *pl, struct pvo_head *pvo_head,
1.2       matt     1533:        vaddr_t va, paddr_t pa, register_t pte_lo, int flags)
1.1       matt     1534: {
                   1535:        struct pvo_entry *pvo;
                   1536:        struct pvo_tqhead *pvoh;
1.2       matt     1537:        register_t msr;
1.1       matt     1538:        int ptegidx;
                   1539:        int i;
                   1540:        int poolflags = PR_NOWAIT;
                   1541:
1.28      chs      1542:        /*
                   1543:         * Compute the PTE Group index.
                   1544:         */
                   1545:        va &= ~ADDR_POFF;
                   1546:        ptegidx = va_to_pteg(pm, va);
                   1547:
                   1548:        msr = pmap_interrupts_off();
                   1549:
1.1       matt     1550: #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
                   1551:        if (pmap_pvo_remove_depth > 0)
                   1552:                panic("pmap_pvo_enter: called while pmap_pvo_remove active!");
                   1553:        if (++pmap_pvo_enter_depth > 1)
                   1554:                panic("pmap_pvo_enter: called recursively!");
                   1555: #endif
                   1556:
                   1557:        /*
                   1558:         * Remove any existing mapping for this page.  Reuse the
                   1559:         * pvo entry if there a mapping.
                   1560:         */
                   1561:        TAILQ_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) {
                   1562:                if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) {
                   1563: #ifdef DEBUG
                   1564:                        if ((pmapdebug & PMAPDEBUG_PVOENTER) &&
                   1565:                            ((pvo->pvo_pte.pte_lo ^ (pa|pte_lo)) &
                   1566:                            ~(PTE_REF|PTE_CHG)) == 0 &&
                   1567:                           va < VM_MIN_KERNEL_ADDRESS) {
1.56      phx      1568:                                printf("pmap_pvo_enter: pvo %p: dup %#" _PRIxpte "/%#" _PRIxpa "\n",
1.54      mlelstv  1569:                                    pvo, pvo->pvo_pte.pte_lo, pte_lo|pa);
1.56      phx      1570:                                printf("pmap_pvo_enter: pte_hi=%#" _PRIxpte " sr=%#" _PRIsr "\n",
1.54      mlelstv  1571:                                    pvo->pvo_pte.pte_hi,
                   1572:                                    pm->pm_sr[va >> ADDR_SR_SHFT]);
1.1       matt     1573:                                pmap_pte_print(pmap_pvo_to_pte(pvo, -1));
                   1574: #ifdef DDBX
                   1575:                                Debugger();
                   1576: #endif
                   1577:                        }
                   1578: #endif
                   1579:                        PMAPCOUNT(mappings_replaced);
1.33      chs      1580:                        pmap_pvo_remove(pvo, -1, NULL);
1.1       matt     1581:                        break;
                   1582:                }
                   1583:        }
                   1584:
                   1585:        /*
                   1586:         * If we aren't overwriting an mapping, try to allocate
                   1587:         */
1.26      matt     1588: #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
                   1589:        --pmap_pvo_enter_depth;
                   1590: #endif
1.1       matt     1591:        pmap_interrupts_restore(msr);
1.106     martin   1592:        if (pvo == NULL) {
1.95      chs      1593:                pvo = pool_get(pl, poolflags);
1.33      chs      1594:        }
1.84      matt     1595:        KASSERT((vaddr_t)pvo < VM_MIN_KERNEL_ADDRESS);
1.25      chs      1596:
                   1597: #ifdef DEBUG
                   1598:        /*
                   1599:         * Exercise pmap_pvo_reclaim() a little.
                   1600:         */
                   1601:        if (pvo && (flags & PMAP_CANFAIL) != 0 &&
                   1602:            pmap_pvo_reclaim_debugctr++ > 0x1000 &&
                   1603:            (pmap_pvo_reclaim_debugctr & 0xff) == 0) {
                   1604:                pool_put(pl, pvo);
                   1605:                pvo = NULL;
                   1606:        }
                   1607: #endif
                   1608:
1.1       matt     1609:        msr = pmap_interrupts_off();
1.26      matt     1610: #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
                   1611:        ++pmap_pvo_enter_depth;
                   1612: #endif
1.1       matt     1613:        if (pvo == NULL) {
                   1614:                pvo = pmap_pvo_reclaim(pm);
                   1615:                if (pvo == NULL) {
                   1616:                        if ((flags & PMAP_CANFAIL) == 0)
                   1617:                                panic("pmap_pvo_enter: failed");
                   1618: #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
                   1619:                        pmap_pvo_enter_depth--;
                   1620: #endif
1.26      matt     1621:                        PMAPCOUNT(pvos_failed);
1.1       matt     1622:                        pmap_interrupts_restore(msr);
                   1623:                        return ENOMEM;
                   1624:                }
                   1625:        }
1.25      chs      1626:
1.1       matt     1627:        pvo->pvo_vaddr = va;
                   1628:        pvo->pvo_pmap = pm;
                   1629:        pvo->pvo_vaddr &= ~ADDR_POFF;
                   1630:        if (flags & VM_PROT_EXECUTE) {
                   1631:                PMAPCOUNT(exec_mappings);
1.14      chs      1632:                pvo_set_exec(pvo);
1.1       matt     1633:        }
                   1634:        if (flags & PMAP_WIRED)
                   1635:                pvo->pvo_vaddr |= PVO_WIRED;
1.107     chs      1636:        if (pvo_head != NULL) {
1.1       matt     1637:                pvo->pvo_vaddr |= PVO_MANAGED;
                   1638:                PMAPCOUNT(mappings);
                   1639:        } else {
                   1640:                PMAPCOUNT(kernel_mappings);
                   1641:        }
1.2       matt     1642:        pmap_pte_create(&pvo->pvo_pte, pm, va, pa | pte_lo);
1.1       matt     1643:
1.107     chs      1644:        if (pvo_head != NULL)
                   1645:                LIST_INSERT_HEAD(pvo_head, pvo, pvo_vlink);
1.39      matt     1646:        if (PVO_WIRED_P(pvo))
1.1       matt     1647:                pvo->pvo_pmap->pm_stats.wired_count++;
                   1648:        pvo->pvo_pmap->pm_stats.resident_count++;
                   1649: #if defined(DEBUG)
1.38      sanjayl  1650: /*     if (pm != pmap_kernel() && va < VM_MIN_KERNEL_ADDRESS) */
1.1       matt     1651:                DPRINTFN(PVOENTER,
1.85      matt     1652:                    "pmap_pvo_enter: pvo %p: pm %p va %#" _PRIxva " pa %#" _PRIxpa "\n",
                   1653:                    pvo, pm, va, pa);
1.1       matt     1654: #endif
                   1655:
                   1656:        /*
                   1657:         * We hope this succeeds but it isn't required.
                   1658:         */
                   1659:        pvoh = &pmap_pvo_table[ptegidx];
                   1660:        i = pmap_pte_insert(ptegidx, &pvo->pvo_pte);
                   1661:        if (i >= 0) {
                   1662:                PVO_PTEGIDX_SET(pvo, i);
1.12      matt     1663:                PVO_WHERE(pvo, ENTER_INSERT);
1.1       matt     1664:                PMAPCOUNT2(((pvo->pvo_pte.pte_hi & PTE_HID)
                   1665:                    ? pmap_evcnt_ptes_secondary : pmap_evcnt_ptes_primary)[i]);
                   1666:                TAILQ_INSERT_TAIL(pvoh, pvo, pvo_olink);
1.38      sanjayl  1667:
1.1       matt     1668:        } else {
                   1669:                /*
                   1670:                 * Since we didn't have room for this entry (which makes it
                   1671:                 * and evicted entry), place it at the head of the list.
                   1672:                 */
                   1673:                TAILQ_INSERT_HEAD(pvoh, pvo, pvo_olink);
                   1674:                PMAPCOUNT(ptes_evicted);
                   1675:                pm->pm_evictions++;
1.12      matt     1676:                /*
                   1677:                 * If this is a kernel page, make sure it's active.
                   1678:                 */
                   1679:                if (pm == pmap_kernel()) {
1.45      thorpej  1680:                        i = pmap_pte_spill(pm, va, false);
1.12      matt     1681:                        KASSERT(i);
                   1682:                }
1.1       matt     1683:        }
                   1684:        PMAP_PVO_CHECK(pvo);            /* sanity check */
                   1685: #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
                   1686:        pmap_pvo_enter_depth--;
                   1687: #endif
                   1688:        pmap_interrupts_restore(msr);
                   1689:        return 0;
                   1690: }
                   1691:
1.53      garbled  1692: static void
1.33      chs      1693: pmap_pvo_remove(struct pvo_entry *pvo, int pteidx, struct pvo_head *pvol)
1.1       matt     1694: {
1.2       matt     1695:        volatile struct pte *pt;
1.1       matt     1696:        int ptegidx;
                   1697:
                   1698: #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
                   1699:        if (++pmap_pvo_remove_depth > 1)
                   1700:                panic("pmap_pvo_remove: called recursively!");
                   1701: #endif
                   1702:
                   1703:        /*
                   1704:         * If we haven't been supplied the ptegidx, calculate it.
                   1705:         */
                   1706:        if (pteidx == -1) {
1.2       matt     1707:                ptegidx = va_to_pteg(pvo->pvo_pmap, pvo->pvo_vaddr);
1.1       matt     1708:                pteidx = pmap_pvo_pte_index(pvo, ptegidx);
                   1709:        } else {
                   1710:                ptegidx = pteidx >> 3;
                   1711:                if (pvo->pvo_pte.pte_hi & PTE_HID)
                   1712:                        ptegidx ^= pmap_pteg_mask;
                   1713:        }
                   1714:        PMAP_PVO_CHECK(pvo);            /* sanity check */
                   1715:
                   1716:        /*
                   1717:         * If there is an active pte entry, we need to deactivate it
                   1718:         * (and save the ref & chg bits).
                   1719:         */
                   1720:        pt = pmap_pvo_to_pte(pvo, pteidx);
                   1721:        if (pt != NULL) {
                   1722:                pmap_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr);
1.12      matt     1723:                PVO_WHERE(pvo, REMOVE);
1.1       matt     1724:                PVO_PTEGIDX_CLR(pvo);
                   1725:                PMAPCOUNT(ptes_removed);
                   1726:        } else {
                   1727:                KASSERT(pvo->pvo_pmap->pm_evictions > 0);
                   1728:                pvo->pvo_pmap->pm_evictions--;
                   1729:        }
                   1730:
                   1731:        /*
1.14      chs      1732:         * Account for executable mappings.
                   1733:         */
1.39      matt     1734:        if (PVO_EXECUTABLE_P(pvo))
1.14      chs      1735:                pvo_clear_exec(pvo);
                   1736:
                   1737:        /*
                   1738:         * Update our statistics.
1.1       matt     1739:         */
                   1740:        pvo->pvo_pmap->pm_stats.resident_count--;
1.39      matt     1741:        if (PVO_WIRED_P(pvo))
1.1       matt     1742:                pvo->pvo_pmap->pm_stats.wired_count--;
                   1743:
                   1744:        /*
1.107     chs      1745:         * If the page is managed:
                   1746:         * Save the REF/CHG bits into their cache.
                   1747:         * Remove the PVO from the P/V list.
1.1       matt     1748:         */
1.39      matt     1749:        if (PVO_MANAGED_P(pvo)) {
1.2       matt     1750:                register_t ptelo = pvo->pvo_pte.pte_lo;
1.1       matt     1751:                struct vm_page *pg = PHYS_TO_VM_PAGE(ptelo & PTE_RPGN);
                   1752:
                   1753:                if (pg != NULL) {
1.37      matt     1754:                        /*
                   1755:                         * If this page was changed and it is mapped exec,
                   1756:                         * invalidate it.
                   1757:                         */
                   1758:                        if ((ptelo & PTE_CHG) &&
                   1759:                            (pmap_attr_fetch(pg) & PTE_EXEC)) {
                   1760:                                struct pvo_head *pvoh = vm_page_to_pvoh(pg);
                   1761:                                if (LIST_EMPTY(pvoh)) {
1.85      matt     1762:                                        DPRINTFN(EXEC, "[pmap_pvo_remove: "
1.53      garbled  1763:                                            "%#" _PRIxpa ": clear-exec]\n",
1.85      matt     1764:                                            VM_PAGE_TO_PHYS(pg));
1.37      matt     1765:                                        pmap_attr_clear(pg, PTE_EXEC);
                   1766:                                        PMAPCOUNT(exec_uncached_pvo_remove);
                   1767:                                } else {
1.85      matt     1768:                                        DPRINTFN(EXEC, "[pmap_pvo_remove: "
1.53      garbled  1769:                                            "%#" _PRIxpa ": syncicache]\n",
1.85      matt     1770:                                            VM_PAGE_TO_PHYS(pg));
1.37      matt     1771:                                        pmap_syncicache(VM_PAGE_TO_PHYS(pg),
                   1772:                                            PAGE_SIZE);
                   1773:                                        PMAPCOUNT(exec_synced_pvo_remove);
                   1774:                                }
                   1775:                        }
                   1776:
1.1       matt     1777:                        pmap_attr_save(pg, ptelo & (PTE_REF|PTE_CHG));
                   1778:                }
1.107     chs      1779:                LIST_REMOVE(pvo, pvo_vlink);
1.1       matt     1780:                PMAPCOUNT(unmappings);
                   1781:        } else {
                   1782:                PMAPCOUNT(kernel_unmappings);
                   1783:        }
                   1784:
                   1785:        /*
1.107     chs      1786:         * Remove the PVO from its list and return it to the pool.
1.1       matt     1787:         */
                   1788:        TAILQ_REMOVE(&pmap_pvo_table[ptegidx], pvo, pvo_olink);
1.33      chs      1789:        if (pvol) {
                   1790:                LIST_INSERT_HEAD(pvol, pvo, pvo_vlink);
1.25      chs      1791:        }
1.1       matt     1792: #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
                   1793:        pmap_pvo_remove_depth--;
                   1794: #endif
                   1795: }
                   1796:
1.33      chs      1797: void
                   1798: pmap_pvo_free(struct pvo_entry *pvo)
                   1799: {
                   1800:
1.106     martin   1801:        pool_put(&pmap_pvo_pool, pvo);
1.33      chs      1802: }
                   1803:
                   1804: void
                   1805: pmap_pvo_free_list(struct pvo_head *pvol)
                   1806: {
                   1807:        struct pvo_entry *pvo, *npvo;
                   1808:
                   1809:        for (pvo = LIST_FIRST(pvol); pvo != NULL; pvo = npvo) {
                   1810:                npvo = LIST_NEXT(pvo, pvo_vlink);
                   1811:                LIST_REMOVE(pvo, pvo_vlink);
                   1812:                pmap_pvo_free(pvo);
                   1813:        }
                   1814: }
                   1815:
1.1       matt     1816: /*
1.14      chs      1817:  * Mark a mapping as executable.
                   1818:  * If this is the first executable mapping in the segment,
                   1819:  * clear the noexec flag.
                   1820:  */
1.53      garbled  1821: static void
1.14      chs      1822: pvo_set_exec(struct pvo_entry *pvo)
                   1823: {
                   1824:        struct pmap *pm = pvo->pvo_pmap;
                   1825:
1.39      matt     1826:        if (pm == pmap_kernel() || PVO_EXECUTABLE_P(pvo)) {
1.14      chs      1827:                return;
                   1828:        }
                   1829:        pvo->pvo_vaddr |= PVO_EXECUTABLE;
1.53      garbled  1830: #if defined (PMAP_OEA) || defined (PMAP_OEA64_BRIDGE)
1.18      matt     1831:        {
                   1832:                int sr = PVO_VADDR(pvo) >> ADDR_SR_SHFT;
                   1833:                if (pm->pm_exec[sr]++ == 0) {
                   1834:                        pm->pm_sr[sr] &= ~SR_NOEXEC;
                   1835:                }
1.14      chs      1836:        }
1.18      matt     1837: #endif
1.14      chs      1838: }
                   1839:
                   1840: /*
                   1841:  * Mark a mapping as non-executable.
                   1842:  * If this was the last executable mapping in the segment,
                   1843:  * set the noexec flag.
                   1844:  */
1.53      garbled  1845: static void
1.14      chs      1846: pvo_clear_exec(struct pvo_entry *pvo)
                   1847: {
                   1848:        struct pmap *pm = pvo->pvo_pmap;
                   1849:
1.39      matt     1850:        if (pm == pmap_kernel() || !PVO_EXECUTABLE_P(pvo)) {
1.14      chs      1851:                return;
                   1852:        }
                   1853:        pvo->pvo_vaddr &= ~PVO_EXECUTABLE;
1.53      garbled  1854: #if defined (PMAP_OEA) || defined (PMAP_OEA64_BRIDGE)
1.18      matt     1855:        {
                   1856:                int sr = PVO_VADDR(pvo) >> ADDR_SR_SHFT;
                   1857:                if (--pm->pm_exec[sr] == 0) {
                   1858:                        pm->pm_sr[sr] |= SR_NOEXEC;
                   1859:                }
1.14      chs      1860:        }
1.18      matt     1861: #endif
1.14      chs      1862: }
                   1863:
                   1864: /*
1.1       matt     1865:  * Insert physical page at pa into the given pmap at virtual address va.
                   1866:  */
                   1867: int
1.65      cegger   1868: pmap_enter(pmap_t pm, vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
1.1       matt     1869: {
                   1870:        struct mem_region *mp;
                   1871:        struct pvo_head *pvo_head;
                   1872:        struct vm_page *pg;
1.2       matt     1873:        register_t pte_lo;
1.1       matt     1874:        int error;
                   1875:        u_int was_exec = 0;
                   1876:
1.50      ad       1877:        PMAP_LOCK();
                   1878:
1.1       matt     1879:        if (__predict_false(!pmap_initialized)) {
1.107     chs      1880:                pvo_head = NULL;
1.1       matt     1881:                pg = NULL;
                   1882:                was_exec = PTE_EXEC;
1.107     chs      1883:
1.1       matt     1884:        } else {
                   1885:                pvo_head = pa_to_pvoh(pa, &pg);
                   1886:        }
                   1887:
                   1888:        DPRINTFN(ENTER,
1.85      matt     1889:            "pmap_enter(%p, %#" _PRIxva ", %#" _PRIxpa ", 0x%x, 0x%x):",
                   1890:            pm, va, pa, prot, flags);
1.1       matt     1891:
                   1892:        /*
                   1893:         * If this is a managed page, and it's the first reference to the
                   1894:         * page clear the execness of the page.  Otherwise fetch the execness.
                   1895:         */
                   1896:        if (pg != NULL)
                   1897:                was_exec = pmap_attr_fetch(pg) & PTE_EXEC;
                   1898:
1.85      matt     1899:        DPRINTFN(ENTER, " was_exec=%d", was_exec);
1.1       matt     1900:
                   1901:        /*
                   1902:         * Assume the page is cache inhibited and access is guarded unless
                   1903:         * it's in our available memory array.  If it is in the memory array,
                   1904:         * asssume it's in memory coherent memory.
                   1905:         */
1.77      macallan 1906:        if (flags & PMAP_MD_PREFETCHABLE) {
                   1907:                pte_lo = 0;
                   1908:        } else
                   1909:                pte_lo = PTE_G;
                   1910:
1.81      matt     1911:        if ((flags & PMAP_NOCACHE) == 0) {
1.1       matt     1912:                for (mp = mem; mp->size; mp++) {
                   1913:                        if (pa >= mp->start && pa < mp->start + mp->size) {
                   1914:                                pte_lo = PTE_M;
                   1915:                                break;
                   1916:                        }
                   1917:                }
1.87      kiyohara 1918: #ifdef MULTIPROCESSOR
                   1919:                if (((mfpvr() >> 16) & 0xffff) == MPC603e)
                   1920:                        pte_lo = PTE_M;
                   1921: #endif
1.77      macallan 1922:        } else {
                   1923:                pte_lo |= PTE_I;
1.1       matt     1924:        }
                   1925:
                   1926:        if (prot & VM_PROT_WRITE)
                   1927:                pte_lo |= PTE_BW;
                   1928:        else
                   1929:                pte_lo |= PTE_BR;
                   1930:
                   1931:        /*
                   1932:         * If this was in response to a fault, "pre-fault" the PTE's
                   1933:         * changed/referenced bit appropriately.
                   1934:         */
                   1935:        if (flags & VM_PROT_WRITE)
                   1936:                pte_lo |= PTE_CHG;
1.30      chs      1937:        if (flags & VM_PROT_ALL)
1.1       matt     1938:                pte_lo |= PTE_REF;
                   1939:
                   1940:        /*
                   1941:         * We need to know if this page can be executable
                   1942:         */
                   1943:        flags |= (prot & VM_PROT_EXECUTE);
                   1944:
                   1945:        /*
                   1946:         * Record mapping for later back-translation and pte spilling.
                   1947:         * This will overwrite any existing mapping.
                   1948:         */
1.106     martin   1949:        error = pmap_pvo_enter(pm, &pmap_pvo_pool, pvo_head, va, pa, pte_lo, flags);
1.1       matt     1950:
                   1951:        /*
                   1952:         * Flush the real page from the instruction cache if this page is
                   1953:         * mapped executable and cacheable and has not been flushed since
                   1954:         * the last time it was modified.
                   1955:         */
                   1956:        if (error == 0 &&
                   1957:             (flags & VM_PROT_EXECUTE) &&
                   1958:             (pte_lo & PTE_I) == 0 &&
                   1959:            was_exec == 0) {
1.85      matt     1960:                DPRINTFN(ENTER, " %s", "syncicache");
1.1       matt     1961:                PMAPCOUNT(exec_synced);
1.6       thorpej  1962:                pmap_syncicache(pa, PAGE_SIZE);
1.1       matt     1963:                if (pg != NULL) {
                   1964:                        pmap_attr_save(pg, PTE_EXEC);
                   1965:                        PMAPCOUNT(exec_cached);
                   1966: #if defined(DEBUG) || defined(PMAPDEBUG)
                   1967:                        if (pmapdebug & PMAPDEBUG_ENTER)
                   1968:                                printf(" marked-as-exec");
                   1969:                        else if (pmapdebug & PMAPDEBUG_EXEC)
1.53      garbled  1970:                                printf("[pmap_enter: %#" _PRIxpa ": marked-as-exec]\n",
1.34      yamt     1971:                                    VM_PAGE_TO_PHYS(pg));
1.1       matt     1972: #endif
                   1973:                }
                   1974:        }
                   1975:
1.85      matt     1976:        DPRINTFN(ENTER, ": error=%d\n", error);
1.1       matt     1977:
1.50      ad       1978:        PMAP_UNLOCK();
                   1979:
1.1       matt     1980:        return error;
                   1981: }
                   1982:
                   1983: void
1.68      cegger   1984: pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
1.1       matt     1985: {
                   1986:        struct mem_region *mp;
1.2       matt     1987:        register_t pte_lo;
1.1       matt     1988:        int error;
                   1989:
1.85      matt     1990: #if defined (PMAP_OEA64_BRIDGE) || defined (PMAP_OEA)
1.1       matt     1991:        if (va < VM_MIN_KERNEL_ADDRESS)
                   1992:                panic("pmap_kenter_pa: attempt to enter "
1.53      garbled  1993:                    "non-kernel address %#" _PRIxva "!", va);
1.38      sanjayl  1994: #endif
1.1       matt     1995:
                   1996:        DPRINTFN(KENTER,
1.85      matt     1997:            "pmap_kenter_pa(%#" _PRIxva ",%#" _PRIxpa ",%#x)\n", va, pa, prot);
1.1       matt     1998:
1.50      ad       1999:        PMAP_LOCK();
                   2000:
1.1       matt     2001:        /*
                   2002:         * Assume the page is cache inhibited and access is guarded unless
                   2003:         * it's in our available memory array.  If it is in the memory array,
                   2004:         * asssume it's in memory coherent memory.
                   2005:         */
                   2006:        pte_lo = PTE_IG;
1.81      matt     2007:        if ((flags & PMAP_NOCACHE) == 0) {
1.4       matt     2008:                for (mp = mem; mp->size; mp++) {
                   2009:                        if (pa >= mp->start && pa < mp->start + mp->size) {
                   2010:                                pte_lo = PTE_M;
                   2011:                                break;
                   2012:                        }
1.1       matt     2013:                }
1.87      kiyohara 2014: #ifdef MULTIPROCESSOR
                   2015:                if (((mfpvr() >> 16) & 0xffff) == MPC603e)
                   2016:                        pte_lo = PTE_M;
                   2017: #endif
1.1       matt     2018:        }
                   2019:
                   2020:        if (prot & VM_PROT_WRITE)
                   2021:                pte_lo |= PTE_BW;
                   2022:        else
                   2023:                pte_lo |= PTE_BR;
                   2024:
                   2025:        /*
                   2026:         * We don't care about REF/CHG on PVOs on the unmanaged list.
                   2027:         */
1.106     martin   2028:        error = pmap_pvo_enter(pmap_kernel(), &pmap_pvo_pool,
1.107     chs      2029:            NULL, va, pa, pte_lo, prot|PMAP_WIRED);
1.1       matt     2030:
                   2031:        if (error != 0)
1.53      garbled  2032:                panic("pmap_kenter_pa: failed to enter va %#" _PRIxva " pa %#" _PRIxpa ": %d",
1.1       matt     2033:                      va, pa, error);
1.50      ad       2034:
                   2035:        PMAP_UNLOCK();
1.1       matt     2036: }
                   2037:
                   2038: void
                   2039: pmap_kremove(vaddr_t va, vsize_t len)
                   2040: {
                   2041:        if (va < VM_MIN_KERNEL_ADDRESS)
                   2042:                panic("pmap_kremove: attempt to remove "
1.53      garbled  2043:                    "non-kernel address %#" _PRIxva "!", va);
1.1       matt     2044:
1.85      matt     2045:        DPRINTFN(KREMOVE, "pmap_kremove(%#" _PRIxva ",%#" _PRIxva ")\n", va, len);
1.1       matt     2046:        pmap_remove(pmap_kernel(), va, va + len);
                   2047: }
                   2048:
                   2049: /*
                   2050:  * Remove the given range of mapping entries.
                   2051:  */
                   2052: void
                   2053: pmap_remove(pmap_t pm, vaddr_t va, vaddr_t endva)
                   2054: {
1.33      chs      2055:        struct pvo_head pvol;
1.1       matt     2056:        struct pvo_entry *pvo;
1.2       matt     2057:        register_t msr;
1.1       matt     2058:        int pteidx;
                   2059:
1.50      ad       2060:        PMAP_LOCK();
1.33      chs      2061:        LIST_INIT(&pvol);
1.14      chs      2062:        msr = pmap_interrupts_off();
1.1       matt     2063:        for (; va < endva; va += PAGE_SIZE) {
                   2064:                pvo = pmap_pvo_find_va(pm, va, &pteidx);
                   2065:                if (pvo != NULL) {
1.33      chs      2066:                        pmap_pvo_remove(pvo, pteidx, &pvol);
1.1       matt     2067:                }
                   2068:        }
1.14      chs      2069:        pmap_interrupts_restore(msr);
1.33      chs      2070:        pmap_pvo_free_list(&pvol);
1.50      ad       2071:        PMAP_UNLOCK();
1.1       matt     2072: }
                   2073:
1.104     thorpej  2074: #if defined(PMAP_OEA)
                   2075: #ifdef PPC_OEA601
                   2076: bool
                   2077: pmap_extract_ioseg601(vaddr_t va, paddr_t *pap)
                   2078: {
                   2079:        if ((MFPVR() >> 16) != MPC601)
                   2080:                return false;
                   2081:
                   2082:        const register_t sr = iosrtable[va >> ADDR_SR_SHFT];
                   2083:
                   2084:        if (SR601_VALID_P(sr) && SR601_PA_MATCH_P(sr, va)) {
                   2085:                if (pap)
                   2086:                        *pap = va;
                   2087:                return true;
                   2088:        }
                   2089:        return false;
                   2090: }
                   2091:
                   2092: static bool
                   2093: pmap_extract_battable601(vaddr_t va, paddr_t *pap)
                   2094: {
                   2095:        const register_t batu = battable[va >> 23].batu;
                   2096:        const register_t batl = battable[va >> 23].batl;
                   2097:
                   2098:        if (BAT601_VALID_P(batl) && BAT601_VA_MATCH_P(batu, batl, va)) {
                   2099:                const register_t mask =
                   2100:                    (~(batl & BAT601_BSM) << 17) & ~0x1ffffL;
                   2101:                if (pap)
                   2102:                        *pap = (batl & mask) | (va & ~mask);
                   2103:                return true;
                   2104:        }
                   2105:        return false;
                   2106: }
                   2107: #endif /* PPC_OEA601 */
                   2108:
                   2109: bool
                   2110: pmap_extract_battable(vaddr_t va, paddr_t *pap)
                   2111: {
                   2112: #ifdef PPC_OEA601
                   2113:        if ((MFPVR() >> 16) == MPC601)
                   2114:                return pmap_extract_battable601(va, pap);
                   2115: #endif /* PPC_OEA601 */
                   2116:
                   2117:        if (oeacpufeat & OEACPU_NOBAT)
                   2118:                return false;
                   2119:
                   2120:        const register_t batu = battable[BAT_VA2IDX(va)].batu;
                   2121:
                   2122:        if (BAT_VALID_P(batu, 0) && BAT_VA_MATCH_P(batu, va)) {
                   2123:                const register_t batl = battable[BAT_VA2IDX(va)].batl;
                   2124:                const register_t mask =
                   2125:                    (~(batu & (BAT_XBL|BAT_BL)) << 15) & ~0x1ffffL;
                   2126:                if (pap)
                   2127:                        *pap = (batl & mask) | (va & ~mask);
                   2128:                return true;
                   2129:        }
                   2130:        return false;
                   2131: }
                   2132: #endif /* PMAP_OEA */
                   2133:
1.1       matt     2134: /*
                   2135:  * Get the physical page address for the given pmap/virtual address.
                   2136:  */
1.44      thorpej  2137: bool
1.1       matt     2138: pmap_extract(pmap_t pm, vaddr_t va, paddr_t *pap)
                   2139: {
                   2140:        struct pvo_entry *pvo;
1.2       matt     2141:        register_t msr;
1.7       matt     2142:
1.50      ad       2143:        PMAP_LOCK();
1.38      sanjayl  2144:
1.7       matt     2145:        /*
1.104     thorpej  2146:         * If this is the kernel pmap, check the battable and I/O
                   2147:         * segments for a hit.  This is done only for regions outside
                   2148:         * VM_MIN_KERNEL_ADDRESS-VM_MAX_KERNEL_ADDRESS.
                   2149:         *
                   2150:         * Be careful when checking VM_MAX_KERNEL_ADDRESS; you don't
                   2151:         * want to wrap around to 0.
1.7       matt     2152:         */
                   2153:        if (pm == pmap_kernel() &&
                   2154:            (va < VM_MIN_KERNEL_ADDRESS ||
                   2155:             (KERNEL2_SR < 15 && VM_MAX_KERNEL_ADDRESS <= va))) {
1.8       matt     2156:                KASSERT((va >> ADDR_SR_SHFT) != USER_SR);
1.104     thorpej  2157: #if defined(PMAP_OEA)
1.55      garbled  2158: #ifdef PPC_OEA601
1.104     thorpej  2159:                if (pmap_extract_ioseg601(va, pap)) {
                   2160:                        PMAP_UNLOCK();
                   2161:                        return true;
                   2162:                }
1.55      garbled  2163: #endif /* PPC_OEA601 */
1.104     thorpej  2164:                if (pmap_extract_battable(va, pap)) {
                   2165:                        PMAP_UNLOCK();
                   2166:                        return true;
1.7       matt     2167:                }
1.104     thorpej  2168:                /*
                   2169:                 * We still check the HTAB...
                   2170:                 */
                   2171: #elif defined(PMAP_OEA64_BRIDGE)
                   2172:                if (va < SEGMENT_LENGTH) {
                   2173:                        if (pap)
                   2174:                                *pap = va;
1.52      garbled  2175:                        PMAP_UNLOCK();
                   2176:                        return true;
1.104     thorpej  2177:                }
                   2178:                /*
                   2179:                 * We still check the HTAB...
                   2180:                 */
                   2181: #elif defined(PMAP_OEA64)
1.38      sanjayl  2182: #error PPC_OEA64 not supported
                   2183: #endif /* PPC_OEA */
1.7       matt     2184:        }
1.1       matt     2185:
                   2186:        msr = pmap_interrupts_off();
                   2187:        pvo = pmap_pvo_find_va(pm, va & ~ADDR_POFF, NULL);
                   2188:        if (pvo != NULL) {
                   2189:                PMAP_PVO_CHECK(pvo);            /* sanity check */
1.29      briggs   2190:                if (pap)
                   2191:                        *pap = (pvo->pvo_pte.pte_lo & PTE_RPGN)
                   2192:                            | (va & ADDR_POFF);
1.1       matt     2193:        }
                   2194:        pmap_interrupts_restore(msr);
1.50      ad       2195:        PMAP_UNLOCK();
1.1       matt     2196:        return pvo != NULL;
                   2197: }
                   2198:
                   2199: /*
                   2200:  * Lower the protection on the specified range of this pmap.
                   2201:  */
                   2202: void
                   2203: pmap_protect(pmap_t pm, vaddr_t va, vaddr_t endva, vm_prot_t prot)
                   2204: {
                   2205:        struct pvo_entry *pvo;
1.2       matt     2206:        volatile struct pte *pt;
                   2207:        register_t msr;
1.1       matt     2208:        int pteidx;
                   2209:
                   2210:        /*
                   2211:         * Since this routine only downgrades protection, we should
1.14      chs      2212:         * always be called with at least one bit not set.
1.1       matt     2213:         */
1.14      chs      2214:        KASSERT(prot != VM_PROT_ALL);
1.1       matt     2215:
                   2216:        /*
                   2217:         * If there is no protection, this is equivalent to
                   2218:         * remove the pmap from the pmap.
                   2219:         */
                   2220:        if ((prot & VM_PROT_READ) == 0) {
                   2221:                pmap_remove(pm, va, endva);
                   2222:                return;
                   2223:        }
                   2224:
1.50      ad       2225:        PMAP_LOCK();
                   2226:
1.1       matt     2227:        msr = pmap_interrupts_off();
1.6       thorpej  2228:        for (; va < endva; va += PAGE_SIZE) {
1.1       matt     2229:                pvo = pmap_pvo_find_va(pm, va, &pteidx);
                   2230:                if (pvo == NULL)
                   2231:                        continue;
                   2232:                PMAP_PVO_CHECK(pvo);            /* sanity check */
                   2233:
                   2234:                /*
                   2235:                 * Revoke executable if asked to do so.
                   2236:                 */
                   2237:                if ((prot & VM_PROT_EXECUTE) == 0)
1.14      chs      2238:                        pvo_clear_exec(pvo);
1.1       matt     2239:
                   2240: #if 0
                   2241:                /*
                   2242:                 * If the page is already read-only, no change
                   2243:                 * needs to be made.
                   2244:                 */
                   2245:                if ((pvo->pvo_pte.pte_lo & PTE_PP) == PTE_BR)
                   2246:                        continue;
                   2247: #endif
                   2248:                /*
                   2249:                 * Grab the PTE pointer before we diddle with
                   2250:                 * the cached PTE copy.
                   2251:                 */
                   2252:                pt = pmap_pvo_to_pte(pvo, pteidx);
                   2253:                /*
                   2254:                 * Change the protection of the page.
                   2255:                 */
                   2256:                pvo->pvo_pte.pte_lo &= ~PTE_PP;
                   2257:                pvo->pvo_pte.pte_lo |= PTE_BR;
                   2258:
                   2259:                /*
                   2260:                 * If the PVO is in the page table, update
                   2261:                 * that pte at well.
                   2262:                 */
                   2263:                if (pt != NULL) {
                   2264:                        pmap_pte_change(pt, &pvo->pvo_pte, pvo->pvo_vaddr);
1.12      matt     2265:                        PVO_WHERE(pvo, PMAP_PROTECT);
1.1       matt     2266:                        PMAPCOUNT(ptes_changed);
                   2267:                }
                   2268:
                   2269:                PMAP_PVO_CHECK(pvo);            /* sanity check */
                   2270:        }
                   2271:        pmap_interrupts_restore(msr);
1.50      ad       2272:        PMAP_UNLOCK();
1.1       matt     2273: }
                   2274:
                   2275: void
                   2276: pmap_unwire(pmap_t pm, vaddr_t va)
                   2277: {
                   2278:        struct pvo_entry *pvo;
1.2       matt     2279:        register_t msr;
1.1       matt     2280:
1.50      ad       2281:        PMAP_LOCK();
1.1       matt     2282:        msr = pmap_interrupts_off();
                   2283:        pvo = pmap_pvo_find_va(pm, va, NULL);
                   2284:        if (pvo != NULL) {
1.39      matt     2285:                if (PVO_WIRED_P(pvo)) {
1.1       matt     2286:                        pvo->pvo_vaddr &= ~PVO_WIRED;
                   2287:                        pm->pm_stats.wired_count--;
                   2288:                }
                   2289:                PMAP_PVO_CHECK(pvo);            /* sanity check */
                   2290:        }
                   2291:        pmap_interrupts_restore(msr);
1.50      ad       2292:        PMAP_UNLOCK();
1.1       matt     2293: }
                   2294:
1.108     riastrad 2295: static void
                   2296: pmap_pp_protect(struct pmap_page *pp, paddr_t pa, vm_prot_t prot)
1.1       matt     2297: {
1.33      chs      2298:        struct pvo_head *pvo_head, pvol;
1.1       matt     2299:        struct pvo_entry *pvo, *next_pvo;
1.2       matt     2300:        volatile struct pte *pt;
                   2301:        register_t msr;
1.1       matt     2302:
1.50      ad       2303:        PMAP_LOCK();
                   2304:
1.14      chs      2305:        KASSERT(prot != VM_PROT_ALL);
1.33      chs      2306:        LIST_INIT(&pvol);
1.1       matt     2307:        msr = pmap_interrupts_off();
                   2308:
                   2309:        /*
                   2310:         * When UVM reuses a page, it does a pmap_page_protect with
                   2311:         * VM_PROT_NONE.  At that point, we can clear the exec flag
                   2312:         * since we know the page will have different contents.
                   2313:         */
                   2314:        if ((prot & VM_PROT_READ) == 0) {
1.85      matt     2315:                DPRINTFN(EXEC, "[pmap_page_protect: %#" _PRIxpa ": clear-exec]\n",
1.108     riastrad 2316:                    pa);
                   2317:                if (pmap_pp_attr_fetch(pp) & PTE_EXEC) {
1.1       matt     2318:                        PMAPCOUNT(exec_uncached_page_protect);
1.108     riastrad 2319:                        pmap_pp_attr_clear(pp, PTE_EXEC);
1.1       matt     2320:                }
                   2321:        }
                   2322:
1.108     riastrad 2323:        pvo_head = &pp->pp_pvoh;
1.1       matt     2324:        for (pvo = LIST_FIRST(pvo_head); pvo != NULL; pvo = next_pvo) {
                   2325:                next_pvo = LIST_NEXT(pvo, pvo_vlink);
                   2326:                PMAP_PVO_CHECK(pvo);            /* sanity check */
                   2327:
                   2328:                /*
                   2329:                 * Downgrading to no mapping at all, we just remove the entry.
                   2330:                 */
                   2331:                if ((prot & VM_PROT_READ) == 0) {
1.33      chs      2332:                        pmap_pvo_remove(pvo, -1, &pvol);
1.1       matt     2333:                        continue;
                   2334:                }
                   2335:
                   2336:                /*
                   2337:                 * If EXEC permission is being revoked, just clear the
                   2338:                 * flag in the PVO.
                   2339:                 */
                   2340:                if ((prot & VM_PROT_EXECUTE) == 0)
1.14      chs      2341:                        pvo_clear_exec(pvo);
1.1       matt     2342:
                   2343:                /*
                   2344:                 * If this entry is already RO, don't diddle with the
                   2345:                 * page table.
                   2346:                 */
                   2347:                if ((pvo->pvo_pte.pte_lo & PTE_PP) == PTE_BR) {
                   2348:                        PMAP_PVO_CHECK(pvo);
                   2349:                        continue;
                   2350:                }
                   2351:
                   2352:                /*
                   2353:                 * Grab the PTE before the we diddle the bits so
                   2354:                 * pvo_to_pte can verify the pte contents are as
                   2355:                 * expected.
                   2356:                 */
                   2357:                pt = pmap_pvo_to_pte(pvo, -1);
                   2358:                pvo->pvo_pte.pte_lo &= ~PTE_PP;
                   2359:                pvo->pvo_pte.pte_lo |= PTE_BR;
                   2360:                if (pt != NULL) {
                   2361:                        pmap_pte_change(pt, &pvo->pvo_pte, pvo->pvo_vaddr);
1.12      matt     2362:                        PVO_WHERE(pvo, PMAP_PAGE_PROTECT);
1.1       matt     2363:                        PMAPCOUNT(ptes_changed);
                   2364:                }
                   2365:                PMAP_PVO_CHECK(pvo);            /* sanity check */
                   2366:        }
                   2367:        pmap_interrupts_restore(msr);
1.33      chs      2368:        pmap_pvo_free_list(&pvol);
1.50      ad       2369:
                   2370:        PMAP_UNLOCK();
1.1       matt     2371: }
                   2372:
                   2373: /*
1.108     riastrad 2374:  * Lower the protection on the specified physical page.
                   2375:  */
                   2376: void
                   2377: pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
                   2378: {
                   2379:        struct vm_page_md *md = VM_PAGE_TO_MD(pg);
                   2380:
                   2381:        pmap_pp_protect(&md->mdpg_pp, VM_PAGE_TO_PHYS(pg), prot);
                   2382: }
                   2383:
                   2384: /*
                   2385:  * Lower the protection on the physical page at the specified physical
                   2386:  * address, which may not be managed and so may not have a struct
                   2387:  * vm_page.
                   2388:  */
                   2389: void
                   2390: pmap_pv_protect(paddr_t pa, vm_prot_t prot)
                   2391: {
                   2392:        struct pmap_page *pp;
                   2393:
                   2394:        if ((pp = pmap_pv_tracked(pa)) == NULL)
                   2395:                return;
                   2396:        pmap_pp_protect(pp, pa, prot);
                   2397: }
                   2398:
                   2399: /*
1.1       matt     2400:  * Activate the address space for the specified process.  If the process
                   2401:  * is the current process, load the new MMU context.
                   2402:  */
                   2403: void
                   2404: pmap_activate(struct lwp *l)
                   2405: {
1.69      rmind    2406:        struct pcb *pcb = lwp_getpcb(l);
1.1       matt     2407:        pmap_t pmap = l->l_proc->p_vmspace->vm_map.pmap;
                   2408:
                   2409:        DPRINTFN(ACTIVATE,
1.85      matt     2410:            "pmap_activate: lwp %p (curlwp %p)\n", l, curlwp);
1.1       matt     2411:
                   2412:        /*
1.70      skrll    2413:         * XXX Normally performed in cpu_lwp_fork().
1.1       matt     2414:         */
1.13      matt     2415:        pcb->pcb_pm = pmap;
1.17      matt     2416:
                   2417:        /*
                   2418:        * In theory, the SR registers need only be valid on return
                   2419:        * to user space wait to do them there.
                   2420:        */
                   2421:        if (l == curlwp) {
                   2422:                /* Store pointer to new current pmap. */
                   2423:                curpm = pmap;
                   2424:        }
1.1       matt     2425: }
                   2426:
                   2427: /*
                   2428:  * Deactivate the specified process's address space.
                   2429:  */
                   2430: void
                   2431: pmap_deactivate(struct lwp *l)
                   2432: {
                   2433: }
                   2434:
1.44      thorpej  2435: bool
1.1       matt     2436: pmap_query_bit(struct vm_page *pg, int ptebit)
                   2437: {
                   2438:        struct pvo_entry *pvo;
1.2       matt     2439:        volatile struct pte *pt;
                   2440:        register_t msr;
1.1       matt     2441:
1.50      ad       2442:        PMAP_LOCK();
                   2443:
                   2444:        if (pmap_attr_fetch(pg) & ptebit) {
                   2445:                PMAP_UNLOCK();
1.45      thorpej  2446:                return true;
1.50      ad       2447:        }
1.14      chs      2448:
1.1       matt     2449:        msr = pmap_interrupts_off();
                   2450:        LIST_FOREACH(pvo, vm_page_to_pvoh(pg), pvo_vlink) {
                   2451:                PMAP_PVO_CHECK(pvo);            /* sanity check */
                   2452:                /*
                   2453:                 * See if we saved the bit off.  If so cache, it and return
                   2454:                 * success.
                   2455:                 */
                   2456:                if (pvo->pvo_pte.pte_lo & ptebit) {
                   2457:                        pmap_attr_save(pg, ptebit);
                   2458:                        PMAP_PVO_CHECK(pvo);            /* sanity check */
                   2459:                        pmap_interrupts_restore(msr);
1.50      ad       2460:                        PMAP_UNLOCK();
1.45      thorpej  2461:                        return true;
1.1       matt     2462:                }
                   2463:        }
                   2464:        /*
                   2465:         * No luck, now go thru the hard part of looking at the ptes
                   2466:         * themselves.  Sync so any pending REF/CHG bits are flushed
                   2467:         * to the PTEs.
                   2468:         */
                   2469:        SYNC();
                   2470:        LIST_FOREACH(pvo, vm_page_to_pvoh(pg), pvo_vlink) {
                   2471:                PMAP_PVO_CHECK(pvo);            /* sanity check */
                   2472:                /*
                   2473:                 * See if this pvo have a valid PTE.  If so, fetch the
                   2474:                 * REF/CHG bits from the valid PTE.  If the appropriate
                   2475:                 * ptebit is set, cache, it and return success.
                   2476:                 */
                   2477:                pt = pmap_pvo_to_pte(pvo, -1);
                   2478:                if (pt != NULL) {
                   2479:                        pmap_pte_synch(pt, &pvo->pvo_pte);
                   2480:                        if (pvo->pvo_pte.pte_lo & ptebit) {
                   2481:                                pmap_attr_save(pg, ptebit);
                   2482:                                PMAP_PVO_CHECK(pvo);            /* sanity check */
                   2483:                                pmap_interrupts_restore(msr);
1.50      ad       2484:                                PMAP_UNLOCK();
1.45      thorpej  2485:                                return true;
1.1       matt     2486:                        }
                   2487:                }
                   2488:        }
                   2489:        pmap_interrupts_restore(msr);
1.50      ad       2490:        PMAP_UNLOCK();
1.45      thorpej  2491:        return false;
1.1       matt     2492: }
                   2493:
1.44      thorpej  2494: bool
1.1       matt     2495: pmap_clear_bit(struct vm_page *pg, int ptebit)
                   2496: {
                   2497:        struct pvo_head *pvoh = vm_page_to_pvoh(pg);
                   2498:        struct pvo_entry *pvo;
1.2       matt     2499:        volatile struct pte *pt;
                   2500:        register_t msr;
1.1       matt     2501:        int rv = 0;
                   2502:
1.50      ad       2503:        PMAP_LOCK();
1.1       matt     2504:        msr = pmap_interrupts_off();
                   2505:
                   2506:        /*
                   2507:         * Fetch the cache value
                   2508:         */
                   2509:        rv |= pmap_attr_fetch(pg);
                   2510:
                   2511:        /*
                   2512:         * Clear the cached value.
                   2513:         */
                   2514:        pmap_attr_clear(pg, ptebit);
                   2515:
                   2516:        /*
                   2517:         * Sync so any pending REF/CHG bits are flushed to the PTEs (so we
                   2518:         * can reset the right ones).  Note that since the pvo entries and
                   2519:         * list heads are accessed via BAT0 and are never placed in the
                   2520:         * page table, we don't have to worry about further accesses setting
                   2521:         * the REF/CHG bits.
                   2522:         */
                   2523:        SYNC();
                   2524:
                   2525:        /*
                   2526:         * For each pvo entry, clear pvo's ptebit.  If this pvo have a
                   2527:         * valid PTE.  If so, clear the ptebit from the valid PTE.
                   2528:         */
                   2529:        LIST_FOREACH(pvo, pvoh, pvo_vlink) {
                   2530:                PMAP_PVO_CHECK(pvo);            /* sanity check */
                   2531:                pt = pmap_pvo_to_pte(pvo, -1);
                   2532:                if (pt != NULL) {
                   2533:                        /*
                   2534:                         * Only sync the PTE if the bit we are looking
                   2535:                         * for is not already set.
                   2536:                         */
                   2537:                        if ((pvo->pvo_pte.pte_lo & ptebit) == 0)
                   2538:                                pmap_pte_synch(pt, &pvo->pvo_pte);
                   2539:                        /*
                   2540:                         * If the bit we are looking for was already set,
                   2541:                         * clear that bit in the pte.
                   2542:                         */
                   2543:                        if (pvo->pvo_pte.pte_lo & ptebit)
                   2544:                                pmap_pte_clear(pt, PVO_VADDR(pvo), ptebit);
                   2545:                }
                   2546:                rv |= pvo->pvo_pte.pte_lo & (PTE_CHG|PTE_REF);
                   2547:                pvo->pvo_pte.pte_lo &= ~ptebit;
                   2548:                PMAP_PVO_CHECK(pvo);            /* sanity check */
                   2549:        }
                   2550:        pmap_interrupts_restore(msr);
1.14      chs      2551:
1.1       matt     2552:        /*
                   2553:         * If we are clearing the modify bit and this page was marked EXEC
                   2554:         * and the user of the page thinks the page was modified, then we
                   2555:         * need to clean it from the icache if it's mapped or clear the EXEC
                   2556:         * bit if it's not mapped.  The page itself might not have the CHG
                   2557:         * bit set if the modification was done via DMA to the page.
                   2558:         */
                   2559:        if ((ptebit & PTE_CHG) && (rv & PTE_EXEC)) {
                   2560:                if (LIST_EMPTY(pvoh)) {
1.85      matt     2561:                        DPRINTFN(EXEC, "[pmap_clear_bit: %#" _PRIxpa ": clear-exec]\n",
                   2562:                            VM_PAGE_TO_PHYS(pg));
1.1       matt     2563:                        pmap_attr_clear(pg, PTE_EXEC);
                   2564:                        PMAPCOUNT(exec_uncached_clear_modify);
                   2565:                } else {
1.85      matt     2566:                        DPRINTFN(EXEC, "[pmap_clear_bit: %#" _PRIxpa ": syncicache]\n",
                   2567:                            VM_PAGE_TO_PHYS(pg));
1.34      yamt     2568:                        pmap_syncicache(VM_PAGE_TO_PHYS(pg), PAGE_SIZE);
1.1       matt     2569:                        PMAPCOUNT(exec_synced_clear_modify);
                   2570:                }
                   2571:        }
1.50      ad       2572:        PMAP_UNLOCK();
1.1       matt     2573:        return (rv & ptebit) != 0;
                   2574: }
                   2575:
                   2576: void
                   2577: pmap_procwr(struct proc *p, vaddr_t va, size_t len)
                   2578: {
                   2579:        struct pvo_entry *pvo;
                   2580:        size_t offset = va & ADDR_POFF;
                   2581:        int s;
                   2582:
1.50      ad       2583:        PMAP_LOCK();
1.1       matt     2584:        s = splvm();
                   2585:        while (len > 0) {
1.6       thorpej  2586:                size_t seglen = PAGE_SIZE - offset;
1.1       matt     2587:                if (seglen > len)
                   2588:                        seglen = len;
                   2589:                pvo = pmap_pvo_find_va(p->p_vmspace->vm_map.pmap, va, NULL);
1.39      matt     2590:                if (pvo != NULL && PVO_EXECUTABLE_P(pvo)) {
1.1       matt     2591:                        pmap_syncicache(
                   2592:                            (pvo->pvo_pte.pte_lo & PTE_RPGN) | offset, seglen);
                   2593:                        PMAP_PVO_CHECK(pvo);
                   2594:                }
                   2595:                va += seglen;
                   2596:                len -= seglen;
                   2597:                offset = 0;
                   2598:        }
                   2599:        splx(s);
1.50      ad       2600:        PMAP_UNLOCK();
1.1       matt     2601: }
                   2602:
                   2603: #if defined(DEBUG) || defined(PMAPCHECK) || defined(DDB)
                   2604: void
1.2       matt     2605: pmap_pte_print(volatile struct pte *pt)
1.1       matt     2606: {
                   2607:        printf("PTE %p: ", pt);
1.38      sanjayl  2608:
1.53      garbled  2609: #if defined(PMAP_OEA)
1.1       matt     2610:        /* High word: */
1.54      mlelstv  2611:        printf("%#" _PRIxpte ": [", pt->pte_hi);
1.53      garbled  2612: #else
1.54      mlelstv  2613:        printf("%#" _PRIxpte ": [", pt->pte_hi);
1.53      garbled  2614: #endif /* PMAP_OEA */
1.38      sanjayl  2615:
1.1       matt     2616:        printf("%c ", (pt->pte_hi & PTE_VALID) ? 'v' : 'i');
                   2617:        printf("%c ", (pt->pte_hi & PTE_HID) ? 'h' : '-');
1.38      sanjayl  2618:
1.54      mlelstv  2619:        printf("%#" _PRIxpte " %#" _PRIxpte "",
1.38      sanjayl  2620:            (pt->pte_hi &~ PTE_VALID)>>PTE_VSID_SHFT,
                   2621:            pt->pte_hi & PTE_API);
1.53      garbled  2622: #if defined(PMAP_OEA) || defined(PMAP_OEA64_BRIDGE)
1.54      mlelstv  2623:        printf(" (va %#" _PRIxva ")] ", pmap_pte_to_va(pt));
1.38      sanjayl  2624: #else
1.54      mlelstv  2625:        printf(" (va %#" _PRIxva ")] ", pmap_pte_to_va(pt));
1.53      garbled  2626: #endif /* PMAP_OEA */
1.38      sanjayl  2627:
1.1       matt     2628:        /* Low word: */
1.53      garbled  2629: #if defined (PMAP_OEA)
1.54      mlelstv  2630:        printf(" %#" _PRIxpte ": [", pt->pte_lo);
                   2631:        printf("%#" _PRIxpte "... ", pt->pte_lo >> 12);
1.53      garbled  2632: #else
1.54      mlelstv  2633:        printf(" %#" _PRIxpte ": [", pt->pte_lo);
                   2634:        printf("%#" _PRIxpte "... ", pt->pte_lo >> 12);
1.38      sanjayl  2635: #endif
1.1       matt     2636:        printf("%c ", (pt->pte_lo & PTE_REF) ? 'r' : 'u');
                   2637:        printf("%c ", (pt->pte_lo & PTE_CHG) ? 'c' : 'n');
                   2638:        printf("%c", (pt->pte_lo & PTE_W) ? 'w' : '.');
                   2639:        printf("%c", (pt->pte_lo & PTE_I) ? 'i' : '.');
                   2640:        printf("%c", (pt->pte_lo & PTE_M) ? 'm' : '.');
                   2641:        printf("%c ", (pt->pte_lo & PTE_G) ? 'g' : '.');
                   2642:        switch (pt->pte_lo & PTE_PP) {
                   2643:        case PTE_BR: printf("br]\n"); break;
                   2644:        case PTE_BW: printf("bw]\n"); break;
                   2645:        case PTE_SO: printf("so]\n"); break;
                   2646:        case PTE_SW: printf("sw]\n"); break;
                   2647:        }
                   2648: }
                   2649: #endif
                   2650:
                   2651: #if defined(DDB)
                   2652: void
                   2653: pmap_pteg_check(void)
                   2654: {
1.2       matt     2655:        volatile struct pte *pt;
1.1       matt     2656:        int i;
                   2657:        int ptegidx;
                   2658:        u_int p_valid = 0;
                   2659:        u_int s_valid = 0;
                   2660:        u_int invalid = 0;
1.38      sanjayl  2661:
1.1       matt     2662:        for (ptegidx = 0; ptegidx < pmap_pteg_cnt; ptegidx++) {
                   2663:                for (pt = pmap_pteg_table[ptegidx].pt, i = 8; --i >= 0; pt++) {
                   2664:                        if (pt->pte_hi & PTE_VALID) {
                   2665:                                if (pt->pte_hi & PTE_HID)
                   2666:                                        s_valid++;
                   2667:                                else
1.38      sanjayl  2668:                                {
1.1       matt     2669:                                        p_valid++;
1.38      sanjayl  2670:                                }
1.1       matt     2671:                        } else
                   2672:                                invalid++;
                   2673:                }
                   2674:        }
                   2675:        printf("pteg_check: v(p) %#x (%d), v(s) %#x (%d), i %#x (%d)\n",
                   2676:                p_valid, p_valid, s_valid, s_valid,
                   2677:                invalid, invalid);
                   2678: }
                   2679:
                   2680: void
                   2681: pmap_print_mmuregs(void)
                   2682: {
                   2683:        int i;
1.97      rin      2684: #if defined (PMAP_OEA) || defined (PMAP_OEA64_BRIDGE)
1.1       matt     2685:        u_int cpuvers;
1.90      mrg      2686: #endif
1.53      garbled  2687: #ifndef PMAP_OEA64
1.1       matt     2688:        vaddr_t addr;
1.2       matt     2689:        register_t soft_sr[16];
1.18      matt     2690: #endif
1.97      rin      2691: #if defined (PMAP_OEA) || defined (PMAP_OEA64_BRIDGE)
1.1       matt     2692:        struct bat soft_ibat[4];
                   2693:        struct bat soft_dbat[4];
1.38      sanjayl  2694: #endif
1.53      garbled  2695:        paddr_t sdr1;
1.1       matt     2696:
1.97      rin      2697: #if defined (PMAP_OEA) || defined (PMAP_OEA64_BRIDGE)
1.1       matt     2698:        cpuvers = MFPVR() >> 16;
1.90      mrg      2699: #endif
1.35      perry    2700:        __asm volatile ("mfsdr1 %0" : "=r"(sdr1));
1.53      garbled  2701: #ifndef PMAP_OEA64
1.16      kleink   2702:        addr = 0;
1.27      chs      2703:        for (i = 0; i < 16; i++) {
1.1       matt     2704:                soft_sr[i] = MFSRIN(addr);
                   2705:                addr += (1 << ADDR_SR_SHFT);
                   2706:        }
1.18      matt     2707: #endif
1.1       matt     2708:
1.97      rin      2709: #if defined (PMAP_OEA) || defined (PMAP_OEA64_BRIDGE)
1.1       matt     2710:        /* read iBAT (601: uBAT) registers */
1.35      perry    2711:        __asm volatile ("mfibatu %0,0" : "=r"(soft_ibat[0].batu));
                   2712:        __asm volatile ("mfibatl %0,0" : "=r"(soft_ibat[0].batl));
                   2713:        __asm volatile ("mfibatu %0,1" : "=r"(soft_ibat[1].batu));
                   2714:        __asm volatile ("mfibatl %0,1" : "=r"(soft_ibat[1].batl));
                   2715:        __asm volatile ("mfibatu %0,2" : "=r"(soft_ibat[2].batu));
                   2716:        __asm volatile ("mfibatl %0,2" : "=r"(soft_ibat[2].batl));
                   2717:        __asm volatile ("mfibatu %0,3" : "=r"(soft_ibat[3].batu));
                   2718:        __asm volatile ("mfibatl %0,3" : "=r"(soft_ibat[3].batl));
1.1       matt     2719:
                   2720:
                   2721:        if (cpuvers != MPC601) {
                   2722:                /* read dBAT registers */
1.35      perry    2723:                __asm volatile ("mfdbatu %0,0" : "=r"(soft_dbat[0].batu));
                   2724:                __asm volatile ("mfdbatl %0,0" : "=r"(soft_dbat[0].batl));
                   2725:                __asm volatile ("mfdbatu %0,1" : "=r"(soft_dbat[1].batu));
                   2726:                __asm volatile ("mfdbatl %0,1" : "=r"(soft_dbat[1].batl));
                   2727:                __asm volatile ("mfdbatu %0,2" : "=r"(soft_dbat[2].batu));
                   2728:                __asm volatile ("mfdbatl %0,2" : "=r"(soft_dbat[2].batl));
                   2729:                __asm volatile ("mfdbatu %0,3" : "=r"(soft_dbat[3].batu));
                   2730:                __asm volatile ("mfdbatl %0,3" : "=r"(soft_dbat[3].batl));
1.1       matt     2731:        }
1.38      sanjayl  2732: #endif
1.1       matt     2733:
1.54      mlelstv  2734:        printf("SDR1:\t%#" _PRIxpa "\n", sdr1);
1.53      garbled  2735: #ifndef PMAP_OEA64
1.1       matt     2736:        printf("SR[]:\t");
1.27      chs      2737:        for (i = 0; i < 4; i++)
1.53      garbled  2738:                printf("0x%08lx,   ", soft_sr[i]);
1.1       matt     2739:        printf("\n\t");
1.27      chs      2740:        for ( ; i < 8; i++)
1.53      garbled  2741:                printf("0x%08lx,   ", soft_sr[i]);
1.1       matt     2742:        printf("\n\t");
1.27      chs      2743:        for ( ; i < 12; i++)
1.53      garbled  2744:                printf("0x%08lx,   ", soft_sr[i]);
1.1       matt     2745:        printf("\n\t");
1.27      chs      2746:        for ( ; i < 16; i++)
1.53      garbled  2747:                printf("0x%08lx,   ", soft_sr[i]);
1.1       matt     2748:        printf("\n");
1.18      matt     2749: #endif
1.1       matt     2750:
1.97      rin      2751: #if defined(PMAP_OEA) || defined(PMAP_OEA64_BRIDGE)
1.1       matt     2752:        printf("%cBAT[]:\t", cpuvers == MPC601 ? 'u' : 'i');
1.27      chs      2753:        for (i = 0; i < 4; i++) {
1.2       matt     2754:                printf("0x%08lx 0x%08lx, ",
1.1       matt     2755:                        soft_ibat[i].batu, soft_ibat[i].batl);
                   2756:                if (i == 1)
                   2757:                        printf("\n\t");
                   2758:        }
                   2759:        if (cpuvers != MPC601) {
                   2760:                printf("\ndBAT[]:\t");
1.27      chs      2761:                for (i = 0; i < 4; i++) {
1.2       matt     2762:                        printf("0x%08lx 0x%08lx, ",
1.1       matt     2763:                                soft_dbat[i].batu, soft_dbat[i].batl);
                   2764:                        if (i == 1)
                   2765:                                printf("\n\t");
                   2766:                }
                   2767:        }
                   2768:        printf("\n");
1.53      garbled  2769: #endif /* PMAP_OEA... */
1.1       matt     2770: }
                   2771:
                   2772: void
                   2773: pmap_print_pte(pmap_t pm, vaddr_t va)
                   2774: {
                   2775:        struct pvo_entry *pvo;
1.2       matt     2776:        volatile struct pte *pt;
1.1       matt     2777:        int pteidx;
                   2778:
                   2779:        pvo = pmap_pvo_find_va(pm, va, &pteidx);
                   2780:        if (pvo != NULL) {
                   2781:                pt = pmap_pvo_to_pte(pvo, pteidx);
                   2782:                if (pt != NULL) {
1.53      garbled  2783:                        printf("VA %#" _PRIxva " -> %p -> %s %#" _PRIxpte ", %#" _PRIxpte "\n",
1.38      sanjayl  2784:                                va, pt,
                   2785:                                pt->pte_hi & PTE_HID ? "(sec)" : "(pri)",
                   2786:                                pt->pte_hi, pt->pte_lo);
1.1       matt     2787:                } else {
                   2788:                        printf("No valid PTE found\n");
                   2789:                }
                   2790:        } else {
                   2791:                printf("Address not in pmap\n");
                   2792:        }
                   2793: }
                   2794:
                   2795: void
                   2796: pmap_pteg_dist(void)
                   2797: {
                   2798:        struct pvo_entry *pvo;
                   2799:        int ptegidx;
                   2800:        int depth;
                   2801:        int max_depth = 0;
                   2802:        unsigned int depths[64];
                   2803:
                   2804:        memset(depths, 0, sizeof(depths));
                   2805:        for (ptegidx = 0; ptegidx < pmap_pteg_cnt; ptegidx++) {
                   2806:                depth = 0;
                   2807:                TAILQ_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) {
                   2808:                        depth++;
                   2809:                }
                   2810:                if (depth > max_depth)
                   2811:                        max_depth = depth;
                   2812:                if (depth > 63)
                   2813:                        depth = 63;
                   2814:                depths[depth]++;
                   2815:        }
                   2816:
                   2817:        for (depth = 0; depth < 64; depth++) {
                   2818:                printf("  [%2d]: %8u", depth, depths[depth]);
                   2819:                if ((depth & 3) == 3)
                   2820:                        printf("\n");
                   2821:                if (depth == max_depth)
                   2822:                        break;
                   2823:        }
                   2824:        if ((depth & 3) != 3)
                   2825:                printf("\n");
                   2826:        printf("Max depth found was %d\n", max_depth);
                   2827: }
                   2828: #endif /* DEBUG */
                   2829:
                   2830: #if defined(PMAPCHECK) || defined(DEBUG)
                   2831: void
                   2832: pmap_pvo_verify(void)
                   2833: {
                   2834:        int ptegidx;
                   2835:        int s;
                   2836:
                   2837:        s = splvm();
                   2838:        for (ptegidx = 0; ptegidx < pmap_pteg_cnt; ptegidx++) {
                   2839:                struct pvo_entry *pvo;
                   2840:                TAILQ_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) {
                   2841:                        if ((uintptr_t) pvo >= SEGMENT_LENGTH)
                   2842:                                panic("pmap_pvo_verify: invalid pvo %p "
                   2843:                                    "on list %#x", pvo, ptegidx);
                   2844:                        pmap_pvo_check(pvo);
                   2845:                }
                   2846:        }
                   2847:        splx(s);
                   2848: }
                   2849: #endif /* PMAPCHECK */
                   2850:
                   2851: void *
1.106     martin   2852: pmap_pool_alloc(struct pool *pp, int flags)
1.1       matt     2853: {
                   2854:        struct pvo_page *pvop;
1.106     martin   2855:        struct vm_page *pg;
1.1       matt     2856:
1.50      ad       2857:        if (uvm.page_init_done != true) {
                   2858:                return (void *) uvm_pageboot_alloc(PAGE_SIZE);
                   2859:        }
                   2860:
                   2861:        PMAP_LOCK();
1.106     martin   2862:        pvop = SIMPLEQ_FIRST(&pmap_pvop_head);
1.1       matt     2863:        if (pvop != NULL) {
1.106     martin   2864:                pmap_pvop_free--;
                   2865:                SIMPLEQ_REMOVE_HEAD(&pmap_pvop_head, pvop_link);
1.50      ad       2866:                PMAP_UNLOCK();
1.1       matt     2867:                return pvop;
                   2868:        }
1.50      ad       2869:        PMAP_UNLOCK();
1.1       matt     2870:  again:
                   2871:        pg = uvm_pagealloc_strat(NULL, 0, NULL, UVM_PGA_USERESERVE,
                   2872:            UVM_PGA_STRAT_ONLY, VM_FREELIST_FIRST256);
                   2873:        if (__predict_false(pg == NULL)) {
                   2874:                if (flags & PR_WAITOK) {
                   2875:                        uvm_wait("plpg");
                   2876:                        goto again;
                   2877:                } else {
                   2878:                        return (0);
                   2879:                }
                   2880:        }
1.53      garbled  2881:        KDASSERT(VM_PAGE_TO_PHYS(pg) == (uintptr_t)VM_PAGE_TO_PHYS(pg));
                   2882:        return (void *)(uintptr_t) VM_PAGE_TO_PHYS(pg);
1.1       matt     2883: }
                   2884:
                   2885: void
1.106     martin   2886: pmap_pool_free(struct pool *pp, void *va)
1.1       matt     2887: {
                   2888:        struct pvo_page *pvop;
                   2889:
1.50      ad       2890:        PMAP_LOCK();
1.1       matt     2891:        pvop = va;
1.106     martin   2892:        SIMPLEQ_INSERT_HEAD(&pmap_pvop_head, pvop, pvop_link);
                   2893:        pmap_pvop_free++;
                   2894:        if (pmap_pvop_free > pmap_pvop_maxfree)
                   2895:                pmap_pvop_maxfree = pmap_pvop_free;
1.50      ad       2896:        PMAP_UNLOCK();
1.1       matt     2897: #if 0
                   2898:        uvm_pagefree(PHYS_TO_VM_PAGE((paddr_t) va));
                   2899: #endif
                   2900: }
                   2901:
                   2902: /*
                   2903:  * This routine in bootstraping to steal to-be-managed memory (which will
                   2904:  * then be unmanaged).  We use it to grab from the first 256MB for our
                   2905:  * pmap needs and above 256MB for other stuff.
                   2906:  */
                   2907: vaddr_t
1.10      thorpej  2908: pmap_steal_memory(vsize_t vsize, vaddr_t *vstartp, vaddr_t *vendp)
1.1       matt     2909: {
                   2910:        vsize_t size;
                   2911:        vaddr_t va;
1.94      cherry   2912:        paddr_t start, end, pa = 0;
                   2913:        int npgs, freelist;
                   2914:        uvm_physseg_t bank;
1.1       matt     2915:
1.45      thorpej  2916:        if (uvm.page_init_done == true)
1.1       matt     2917:                panic("pmap_steal_memory: called _after_ bootstrap");
                   2918:
1.10      thorpej  2919:        *vstartp = VM_MIN_KERNEL_ADDRESS;
                   2920:        *vendp = VM_MAX_KERNEL_ADDRESS;
                   2921:
1.1       matt     2922:        size = round_page(vsize);
                   2923:        npgs = atop(size);
                   2924:
                   2925:        /*
                   2926:         * PA 0 will never be among those given to UVM so we can use it
                   2927:         * to indicate we couldn't steal any memory.
                   2928:         */
1.94      cherry   2929:
                   2930:        for (bank = uvm_physseg_get_first();
                   2931:             uvm_physseg_valid_p(bank);
                   2932:             bank = uvm_physseg_get_next(bank)) {
                   2933:
                   2934:                freelist = uvm_physseg_get_free_list(bank);
                   2935:                start = uvm_physseg_get_start(bank);
                   2936:                end = uvm_physseg_get_end(bank);
                   2937:
                   2938:                if (freelist == VM_FREELIST_FIRST256 &&
                   2939:                    (end - start) >= npgs) {
                   2940:                        pa = ptoa(start);
1.1       matt     2941:                        break;
                   2942:                }
                   2943:        }
                   2944:
                   2945:        if (pa == 0)
                   2946:                panic("pmap_steal_memory: no approriate memory to steal!");
                   2947:
1.94      cherry   2948:        uvm_physseg_unplug(start, npgs);
1.1       matt     2949:
                   2950:        va = (vaddr_t) pa;
1.46      christos 2951:        memset((void *) va, 0, size);
1.1       matt     2952:        pmap_pages_stolen += npgs;
                   2953: #ifdef DEBUG
                   2954:        if (pmapdebug && npgs > 1) {
                   2955:                u_int cnt = 0;
1.94      cherry   2956:        for (bank = uvm_physseg_get_first();
                   2957:             uvm_physseg_valid_p(bank);
                   2958:             bank = uvm_physseg_get_next(bank)) {
                   2959:                cnt += uvm_physseg_get_avail_end(bank) - uvm_physseg_get_avail_start(bank);
1.73      uebayasi 2960:                }
1.1       matt     2961:                printf("pmap_steal_memory: stole %u (total %u) pages (%u left)\n",
                   2962:                    npgs, pmap_pages_stolen, cnt);
                   2963:        }
                   2964: #endif
                   2965:
                   2966:        return va;
                   2967: }
                   2968:
                   2969: /*
                   2970:  * Find a chuck of memory with right size and alignment.
                   2971:  */
1.53      garbled  2972: paddr_t
1.1       matt     2973: pmap_boot_find_memory(psize_t size, psize_t alignment, int at_end)
                   2974: {
                   2975:        struct mem_region *mp;
                   2976:        paddr_t s, e;
                   2977:        int i, j;
                   2978:
                   2979:        size = round_page(size);
                   2980:
                   2981:        DPRINTFN(BOOT,
1.85      matt     2982:            "pmap_boot_find_memory: size=%#" _PRIxpa ", alignment=%#" _PRIxpa ", at_end=%d",
                   2983:            size, alignment, at_end);
1.1       matt     2984:
1.6       thorpej  2985:        if (alignment < PAGE_SIZE || (alignment & (alignment-1)) != 0)
1.54      mlelstv  2986:                panic("pmap_boot_find_memory: invalid alignment %#" _PRIxpa,
1.1       matt     2987:                    alignment);
                   2988:
                   2989:        if (at_end) {
1.6       thorpej  2990:                if (alignment != PAGE_SIZE)
1.1       matt     2991:                        panic("pmap_boot_find_memory: invalid ending "
1.53      garbled  2992:                            "alignment %#" _PRIxpa, alignment);
1.1       matt     2993:
                   2994:                for (mp = &avail[avail_cnt-1]; mp >= avail; mp--) {
                   2995:                        s = mp->start + mp->size - size;
                   2996:                        if (s >= mp->start && mp->size >= size) {
1.85      matt     2997:                                DPRINTFN(BOOT, ": %#" _PRIxpa "\n", s);
1.1       matt     2998:                                DPRINTFN(BOOT,
1.85      matt     2999:                                    "pmap_boot_find_memory: b-avail[%d] start "
                   3000:                                    "%#" _PRIxpa " size %#" _PRIxpa "\n", mp - avail,
                   3001:                                     mp->start, mp->size);
1.1       matt     3002:                                mp->size -= size;
                   3003:                                DPRINTFN(BOOT,
1.85      matt     3004:                                    "pmap_boot_find_memory: a-avail[%d] start "
                   3005:                                    "%#" _PRIxpa " size %#" _PRIxpa "\n", mp - avail,
                   3006:                                     mp->start, mp->size);
1.53      garbled  3007:                                return s;
1.1       matt     3008:                        }
                   3009:                }
                   3010:                panic("pmap_boot_find_memory: no available memory");
                   3011:        }
                   3012:
                   3013:        for (mp = avail, i = 0; i < avail_cnt; i++, mp++) {
                   3014:                s = (mp->start + alignment - 1) & ~(alignment-1);
                   3015:                e = s + size;
                   3016:
                   3017:                /*
                   3018:                 * Is the calculated region entirely within the region?
                   3019:                 */
                   3020:                if (s < mp->start || e > mp->start + mp->size)
                   3021:                        continue;
                   3022:
1.85      matt     3023:                DPRINTFN(BOOT, ": %#" _PRIxpa "\n", s);
1.1       matt     3024:                if (s == mp->start) {
                   3025:                        /*
                   3026:                         * If the block starts at the beginning of region,
                   3027:                         * adjust the size & start. (the region may now be
                   3028:                         * zero in length)
                   3029:                         */
                   3030:                        DPRINTFN(BOOT,
1.85      matt     3031:                            "pmap_boot_find_memory: b-avail[%d] start "
                   3032:                            "%#" _PRIxpa " size %#" _PRIxpa "\n", i, mp->start, mp->size);
1.1       matt     3033:                        mp->start += size;
                   3034:                        mp->size -= size;
                   3035:                        DPRINTFN(BOOT,
1.85      matt     3036:                            "pmap_boot_find_memory: a-avail[%d] start "
                   3037:                            "%#" _PRIxpa " size %#" _PRIxpa "\n", i, mp->start, mp->size);
1.1       matt     3038:                } else if (e == mp->start + mp->size) {
                   3039:                        /*
                   3040:                         * If the block starts at the beginning of region,
                   3041:                         * adjust only the size.
                   3042:                         */
                   3043:                        DPRINTFN(BOOT,
1.85      matt     3044:                            "pmap_boot_find_memory: b-avail[%d] start "
                   3045:                            "%#" _PRIxpa " size %#" _PRIxpa "\n", i, mp->start, mp->size);
1.1       matt     3046:                        mp->size -= size;
                   3047:                        DPRINTFN(BOOT,
1.85      matt     3048:                            "pmap_boot_find_memory: a-avail[%d] start "
                   3049:                            "%#" _PRIxpa " size %#" _PRIxpa "\n", i, mp->start, mp->size);
1.1       matt     3050:                } else {
                   3051:                        /*
                   3052:                         * Block is in the middle of the region, so we
                   3053:                         * have to split it in two.
                   3054:                         */
                   3055:                        for (j = avail_cnt; j > i + 1; j--) {
                   3056:                                avail[j] = avail[j-1];
                   3057:                        }
                   3058:                        DPRINTFN(BOOT,
1.85      matt     3059:                            "pmap_boot_find_memory: b-avail[%d] start "
                   3060:                            "%#" _PRIxpa " size %#" _PRIxpa "\n", i, mp->start, mp->size);
1.1       matt     3061:                        mp[1].start = e;
                   3062:                        mp[1].size = mp[0].start + mp[0].size - e;
                   3063:                        mp[0].size = s - mp[0].start;
                   3064:                        avail_cnt++;
                   3065:                        for (; i < avail_cnt; i++) {
                   3066:                                DPRINTFN(BOOT,
1.85      matt     3067:                                    "pmap_boot_find_memory: a-avail[%d] "
                   3068:                                    "start %#" _PRIxpa " size %#" _PRIxpa "\n", i,
                   3069:                                     avail[i].start, avail[i].size);
1.1       matt     3070:                        }
                   3071:                }
1.53      garbled  3072:                KASSERT(s == (uintptr_t) s);
                   3073:                return s;
1.1       matt     3074:        }
                   3075:        panic("pmap_boot_find_memory: not enough memory for "
1.54      mlelstv  3076:            "%#" _PRIxpa "/%#" _PRIxpa " allocation?", size, alignment);
1.1       matt     3077: }
                   3078:
1.38      sanjayl  3079: /* XXXSL: we dont have any BATs to do this, map in Segment 0 1:1 using page tables */
1.53      garbled  3080: #if defined (PMAP_OEA64_BRIDGE)
1.38      sanjayl  3081: int
                   3082: pmap_setup_segment0_map(int use_large_pages, ...)
                   3083: {
1.88      christos 3084:     vaddr_t va, va_end;
1.38      sanjayl  3085:
                   3086:     register_t pte_lo = 0x0;
1.90      mrg      3087:     int ptegidx = 0;
1.38      sanjayl  3088:     struct pte pte;
                   3089:     va_list ap;
                   3090:
                   3091:     /* Coherent + Supervisor RW, no user access */
                   3092:     pte_lo = PTE_M;
                   3093:
                   3094:     /* XXXSL
                   3095:      * Map in 1st segment 1:1, we'll be careful not to spill kernel entries later,
                   3096:      * these have to take priority.
                   3097:      */
                   3098:     for (va = 0x0; va < SEGMENT_LENGTH; va += 0x1000) {
                   3099:         ptegidx = va_to_pteg(pmap_kernel(), va);
                   3100:         pmap_pte_create(&pte, pmap_kernel(), va, va | pte_lo);
1.90      mrg      3101:         (void)pmap_pte_insert(ptegidx, &pte);
1.38      sanjayl  3102:     }
                   3103:
                   3104:     va_start(ap, use_large_pages);
                   3105:     while (1) {
                   3106:         paddr_t pa;
                   3107:         size_t size;
                   3108:
                   3109:         va = va_arg(ap, vaddr_t);
                   3110:
                   3111:         if (va == 0)
                   3112:             break;
                   3113:
                   3114:         pa = va_arg(ap, paddr_t);
                   3115:         size = va_arg(ap, size_t);
                   3116:
1.88      christos 3117:         for (va_end = va + size; va < va_end; va += 0x1000, pa += 0x1000) {
1.38      sanjayl  3118: #if 0
1.54      mlelstv  3119:            printf("%s: Inserting: va: %#" _PRIxva ", pa: %#" _PRIxpa "\n", __func__,  va, pa);
1.38      sanjayl  3120: #endif
                   3121:             ptegidx = va_to_pteg(pmap_kernel(), va);
                   3122:             pmap_pte_create(&pte, pmap_kernel(), va, pa | pte_lo);
1.90      mrg      3123:             (void)pmap_pte_insert(ptegidx, &pte);
1.38      sanjayl  3124:         }
                   3125:     }
1.93      dholland 3126:     va_end(ap);
1.38      sanjayl  3127:
                   3128:     TLBSYNC();
                   3129:     SYNC();
                   3130:     return (0);
                   3131: }
1.53      garbled  3132: #endif /* PMAP_OEA64_BRIDGE */
1.38      sanjayl  3133:
1.1       matt     3134: /*
1.99      thorpej  3135:  * Set up the bottom level of the data structures necessary for the kernel
                   3136:  * to manage memory.  MMU hardware is programmed in pmap_bootstrap2().
1.1       matt     3137:  */
                   3138: void
1.99      thorpej  3139: pmap_bootstrap1(paddr_t kernelstart, paddr_t kernelend)
1.1       matt     3140: {
                   3141:        struct mem_region *mp, tmp;
                   3142:        paddr_t s, e;
                   3143:        psize_t size;
                   3144:        int i, j;
                   3145:
                   3146:        /*
                   3147:         * Get memory.
                   3148:         */
                   3149:        mem_regions(&mem, &avail);
                   3150: #if defined(DEBUG)
                   3151:        if (pmapdebug & PMAPDEBUG_BOOT) {
                   3152:                printf("pmap_bootstrap: memory configuration:\n");
                   3153:                for (mp = mem; mp->size; mp++) {
1.54      mlelstv  3154:                        printf("pmap_bootstrap: mem start %#" _PRIxpa " size %#" _PRIxpa "\n",
1.1       matt     3155:                                mp->start, mp->size);
                   3156:                }
                   3157:                for (mp = avail; mp->size; mp++) {
1.54      mlelstv  3158:                        printf("pmap_bootstrap: avail start %#" _PRIxpa " size %#" _PRIxpa "\n",
1.1       matt     3159:                                mp->start, mp->size);
                   3160:                }
                   3161:        }
                   3162: #endif
                   3163:
                   3164:        /*
                   3165:         * Find out how much physical memory we have and in how many chunks.
                   3166:         */
                   3167:        for (mem_cnt = 0, mp = mem; mp->size; mp++) {
                   3168:                if (mp->start >= pmap_memlimit)
                   3169:                        continue;
                   3170:                if (mp->start + mp->size > pmap_memlimit) {
                   3171:                        size = pmap_memlimit - mp->start;
                   3172:                        physmem += btoc(size);
                   3173:                } else {
                   3174:                        physmem += btoc(mp->size);
                   3175:                }
                   3176:                mem_cnt++;
                   3177:        }
                   3178:
                   3179:        /*
                   3180:         * Count the number of available entries.
                   3181:         */
                   3182:        for (avail_cnt = 0, mp = avail; mp->size; mp++)
                   3183:                avail_cnt++;
                   3184:
                   3185:        /*
                   3186:         * Page align all regions.
                   3187:         */
                   3188:        kernelstart = trunc_page(kernelstart);
                   3189:        kernelend = round_page(kernelend);
                   3190:        for (mp = avail, i = 0; i < avail_cnt; i++, mp++) {
                   3191:                s = round_page(mp->start);
                   3192:                mp->size -= (s - mp->start);
                   3193:                mp->size = trunc_page(mp->size);
                   3194:                mp->start = s;
                   3195:                e = mp->start + mp->size;
                   3196:
                   3197:                DPRINTFN(BOOT,
1.85      matt     3198:                    "pmap_bootstrap: b-avail[%d] start %#" _PRIxpa " size %#" _PRIxpa "\n",
                   3199:                    i, mp->start, mp->size);
1.1       matt     3200:
                   3201:                /*
                   3202:                 * Don't allow the end to run beyond our artificial limit
                   3203:                 */
                   3204:                if (e > pmap_memlimit)
                   3205:                        e = pmap_memlimit;
                   3206:
                   3207:                /*
                   3208:                 * Is this region empty or strange?  skip it.
                   3209:                 */
                   3210:                if (e <= s) {
                   3211:                        mp->start = 0;
                   3212:                        mp->size = 0;
                   3213:                        continue;
                   3214:                }
                   3215:
                   3216:                /*
                   3217:                 * Does this overlap the beginning of kernel?
                   3218:                 *   Does extend past the end of the kernel?
                   3219:                 */
                   3220:                else if (s < kernelstart && e > kernelstart) {
                   3221:                        if (e > kernelend) {
                   3222:                                avail[avail_cnt].start = kernelend;
                   3223:                                avail[avail_cnt].size = e - kernelend;
                   3224:                                avail_cnt++;
                   3225:                        }
                   3226:                        mp->size = kernelstart - s;
                   3227:                }
                   3228:                /*
                   3229:                 * Check whether this region overlaps the end of the kernel.
                   3230:                 */
                   3231:                else if (s < kernelend && e > kernelend) {
                   3232:                        mp->start = kernelend;
                   3233:                        mp->size = e - kernelend;
                   3234:                }
                   3235:                /*
                   3236:                 * Look whether this regions is completely inside the kernel.
                   3237:                 * Nuke it if it does.
                   3238:                 */
                   3239:                else if (s >= kernelstart && e <= kernelend) {
                   3240:                        mp->start = 0;
                   3241:                        mp->size = 0;
                   3242:                }
                   3243:                /*
                   3244:                 * If the user imposed a memory limit, enforce it.
                   3245:                 */
                   3246:                else if (s >= pmap_memlimit) {
1.6       thorpej  3247:                        mp->start = -PAGE_SIZE; /* let's know why */
1.1       matt     3248:                        mp->size = 0;
                   3249:                }
                   3250:                else {
                   3251:                        mp->start = s;
                   3252:                        mp->size = e - s;
                   3253:                }
                   3254:                DPRINTFN(BOOT,
1.85      matt     3255:                    "pmap_bootstrap: a-avail[%d] start %#" _PRIxpa " size %#" _PRIxpa "\n",
                   3256:                    i, mp->start, mp->size);
1.1       matt     3257:        }
                   3258:
                   3259:        /*
                   3260:         * Move (and uncount) all the null return to the end.
                   3261:         */
                   3262:        for (mp = avail, i = 0; i < avail_cnt; i++, mp++) {
                   3263:                if (mp->size == 0) {
                   3264:                        tmp = avail[i];
                   3265:                        avail[i] = avail[--avail_cnt];
                   3266:                        avail[avail_cnt] = avail[i];
                   3267:                }
                   3268:        }
                   3269:
                   3270:        /*
1.61      skrll    3271:         * (Bubble)sort them into ascending order.
1.1       matt     3272:         */
                   3273:        for (i = 0; i < avail_cnt; i++) {
                   3274:                for (j = i + 1; j < avail_cnt; j++) {
                   3275:                        if (avail[i].start > avail[j].start) {
                   3276:                                tmp = avail[i];
                   3277:                                avail[i] = avail[j];
                   3278:                                avail[j] = tmp;
                   3279:                        }
                   3280:                }
                   3281:        }
                   3282:
                   3283:        /*
                   3284:         * Make sure they don't overlap.
                   3285:         */
                   3286:        for (mp = avail, i = 0; i < avail_cnt - 1; i++, mp++) {
                   3287:                if (mp[0].start + mp[0].size > mp[1].start) {
                   3288:                        mp[0].size = mp[1].start - mp[0].start;
                   3289:                }
                   3290:                DPRINTFN(BOOT,
1.85      matt     3291:                    "pmap_bootstrap: avail[%d] start %#" _PRIxpa " size %#" _PRIxpa "\n",
                   3292:                    i, mp->start, mp->size);
1.1       matt     3293:        }
                   3294:        DPRINTFN(BOOT,
1.85      matt     3295:            "pmap_bootstrap: avail[%d] start %#" _PRIxpa " size %#" _PRIxpa "\n",
                   3296:            i, mp->start, mp->size);
1.1       matt     3297:
                   3298: #ifdef PTEGCOUNT
                   3299:        pmap_pteg_cnt = PTEGCOUNT;
                   3300: #else /* PTEGCOUNT */
1.38      sanjayl  3301:
1.1       matt     3302:        pmap_pteg_cnt = 0x1000;
                   3303:
                   3304:        while (pmap_pteg_cnt < physmem)
                   3305:                pmap_pteg_cnt <<= 1;
                   3306:
                   3307:        pmap_pteg_cnt >>= 1;
                   3308: #endif /* PTEGCOUNT */
                   3309:
1.38      sanjayl  3310: #ifdef DEBUG
1.85      matt     3311:        DPRINTFN(BOOT, "pmap_pteg_cnt: 0x%x\n", pmap_pteg_cnt);
1.38      sanjayl  3312: #endif
                   3313:
1.1       matt     3314:        /*
                   3315:         * Find suitably aligned memory for PTEG hash table.
                   3316:         */
1.2       matt     3317:        size = pmap_pteg_cnt * sizeof(struct pteg);
1.53      garbled  3318:        pmap_pteg_table = (void *)(uintptr_t) pmap_boot_find_memory(size, size, 0);
1.38      sanjayl  3319:
                   3320: #ifdef DEBUG
                   3321:        DPRINTFN(BOOT,
1.85      matt     3322:                "PTEG cnt: 0x%x HTAB size: 0x%08x bytes, address: %p\n", pmap_pteg_cnt, (unsigned int)size, pmap_pteg_table);
1.38      sanjayl  3323: #endif
                   3324:
                   3325:
1.1       matt     3326: #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
                   3327:        if ( (uintptr_t) pmap_pteg_table + size > SEGMENT_LENGTH)
1.54      mlelstv  3328:                panic("pmap_bootstrap: pmap_pteg_table end (%p + %#" _PRIxpa ") > 256MB",
1.1       matt     3329:                    pmap_pteg_table, size);
                   3330: #endif
                   3331:
1.32      he       3332:        memset(__UNVOLATILE(pmap_pteg_table), 0,
                   3333:                pmap_pteg_cnt * sizeof(struct pteg));
1.1       matt     3334:        pmap_pteg_mask = pmap_pteg_cnt - 1;
                   3335:
                   3336:        /*
                   3337:         * We cannot do pmap_steal_memory here since UVM hasn't been loaded
                   3338:         * with pages.  So we just steal them before giving them to UVM.
                   3339:         */
                   3340:        size = sizeof(pmap_pvo_table[0]) * pmap_pteg_cnt;
1.53      garbled  3341:        pmap_pvo_table = (void *)(uintptr_t) pmap_boot_find_memory(size, PAGE_SIZE, 0);
1.1       matt     3342: #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
                   3343:        if ( (uintptr_t) pmap_pvo_table + size > SEGMENT_LENGTH)
1.54      mlelstv  3344:                panic("pmap_bootstrap: pmap_pvo_table end (%p + %#" _PRIxpa ") > 256MB",
1.1       matt     3345:                    pmap_pvo_table, size);
                   3346: #endif
                   3347:
                   3348:        for (i = 0; i < pmap_pteg_cnt; i++)
                   3349:                TAILQ_INIT(&pmap_pvo_table[i]);
                   3350:
                   3351: #ifndef MSGBUFADDR
                   3352:        /*
                   3353:         * Allocate msgbuf in high memory.
                   3354:         */
1.53      garbled  3355:        msgbuf_paddr = pmap_boot_find_memory(MSGBUFSIZE, PAGE_SIZE, 1);
1.1       matt     3356: #endif
                   3357:
                   3358:        for (mp = avail, i = 0; i < avail_cnt; mp++, i++) {
                   3359:                paddr_t pfstart = atop(mp->start);
                   3360:                paddr_t pfend = atop(mp->start + mp->size);
                   3361:                if (mp->size == 0)
                   3362:                        continue;
                   3363:                if (mp->start + mp->size <= SEGMENT_LENGTH) {
                   3364:                        uvm_page_physload(pfstart, pfend, pfstart, pfend,
                   3365:                                VM_FREELIST_FIRST256);
                   3366:                } else if (mp->start >= SEGMENT_LENGTH) {
                   3367:                        uvm_page_physload(pfstart, pfend, pfstart, pfend,
                   3368:                                VM_FREELIST_DEFAULT);
                   3369:                } else {
                   3370:                        pfend = atop(SEGMENT_LENGTH);
                   3371:                        uvm_page_physload(pfstart, pfend, pfstart, pfend,
                   3372:                                VM_FREELIST_FIRST256);
                   3373:                        pfstart = atop(SEGMENT_LENGTH);
                   3374:                        pfend = atop(mp->start + mp->size);
                   3375:                        uvm_page_physload(pfstart, pfend, pfstart, pfend,
                   3376:                                VM_FREELIST_DEFAULT);
                   3377:                }
                   3378:        }
                   3379:
                   3380:        /*
                   3381:         * Make sure kernel vsid is allocated as well as VSID 0.
                   3382:         */
                   3383:        pmap_vsid_bitmap[(KERNEL_VSIDBITS & (NPMAPS-1)) / VSID_NBPW]
                   3384:                |= 1 << (KERNEL_VSIDBITS % VSID_NBPW);
1.53      garbled  3385:        pmap_vsid_bitmap[(PHYSMAP_VSIDBITS & (NPMAPS-1)) / VSID_NBPW]
                   3386:                |= 1 << (PHYSMAP_VSIDBITS % VSID_NBPW);
1.1       matt     3387:        pmap_vsid_bitmap[0] |= 1;
                   3388:
                   3389:        /*
1.103     thorpej  3390:         * Initialize kernel pmap.
1.1       matt     3391:         */
1.103     thorpej  3392: #if defined(PMAP_OEA) || defined(PMAP_OEA64_BRIDGE)
1.1       matt     3393:        for (i = 0; i < 16; i++) {
1.38      sanjayl  3394:                pmap_kernel()->pm_sr[i] = KERNELN_SEGMENT(i)|SR_PRKEY;
1.1       matt     3395:        }
1.102     thorpej  3396:        pmap_kernel()->pm_vsid = KERNEL_VSIDBITS;
1.1       matt     3397:
                   3398:        pmap_kernel()->pm_sr[KERNEL_SR] = KERNEL_SEGMENT|SR_SUKEY|SR_PRKEY;
                   3399: #ifdef KERNEL2_SR
                   3400:        pmap_kernel()->pm_sr[KERNEL2_SR] = KERNEL2_SEGMENT|SR_SUKEY|SR_PRKEY;
                   3401: #endif
1.53      garbled  3402: #endif /* PMAP_OEA || PMAP_OEA64_BRIDGE */
1.103     thorpej  3403:
                   3404: #if defined(PMAP_OEA) && defined(PPC_OEA601)
1.105     thorpej  3405:        if ((MFPVR() >> 16) == MPC601) {
1.103     thorpej  3406:                for (i = 0; i < 16; i++) {
                   3407:                        if (iosrtable[i] & SR601_T) {
                   3408:                                pmap_kernel()->pm_sr[i] = iosrtable[i];
                   3409:                        }
1.1       matt     3410:                }
                   3411:        }
1.103     thorpej  3412: #endif /* PMAP_OEA && PPC_OEA601 */
1.1       matt     3413:
                   3414: #ifdef ALTIVEC
                   3415:        pmap_use_altivec = cpu_altivec;
                   3416: #endif
                   3417:
                   3418: #ifdef DEBUG
                   3419:        if (pmapdebug & PMAPDEBUG_BOOT) {
                   3420:                u_int cnt;
1.94      cherry   3421:                uvm_physseg_t bank;
1.1       matt     3422:                char pbuf[9];
1.94      cherry   3423:                for (cnt = 0, bank = uvm_physseg_get_first();
                   3424:                     uvm_physseg_valid_p(bank);
                   3425:                     bank = uvm_physseg_get_next(bank)) {
                   3426:                        cnt += uvm_physseg_get_avail_end(bank) -
                   3427:                            uvm_physseg_get_avail_start(bank);
1.53      garbled  3428:                        printf("pmap_bootstrap: vm_physmem[%d]=%#" _PRIxpa "-%#" _PRIxpa "/%#" _PRIxpa "\n",
1.1       matt     3429:                            bank,
1.94      cherry   3430:                            ptoa(uvm_physseg_get_avail_start(bank)),
                   3431:                            ptoa(uvm_physseg_get_avail_end(bank)),
                   3432:                            ptoa(uvm_physseg_get_avail_end(bank) - uvm_physseg_get_avail_start(bank)));
1.1       matt     3433:                }
                   3434:                format_bytes(pbuf, sizeof(pbuf), ptoa((u_int64_t) cnt));
                   3435:                printf("pmap_bootstrap: UVM memory = %s (%u pages)\n",
                   3436:                    pbuf, cnt);
                   3437:        }
                   3438: #endif
                   3439:
1.106     martin   3440:        pool_init(&pmap_pvo_pool, sizeof(struct pvo_entry),
                   3441:            sizeof(struct pvo_entry), 0, 0, "pmap_pvopl",
                   3442:            &pmap_pool_allocator, IPL_VM);
1.1       matt     3443:
1.106     martin   3444:        pool_setlowat(&pmap_pvo_pool, 1008);
1.1       matt     3445:
                   3446:        pool_init(&pmap_pool, sizeof(struct pmap),
1.106     martin   3447:            sizeof(void *), 0, 0, "pmap_pl", &pmap_pool_allocator,
1.48      ad       3448:            IPL_NONE);
1.41      matt     3449:
1.89      macallan 3450: #if defined(PMAP_NEED_MAPKERNEL)
1.41      matt     3451:        {
1.53      garbled  3452:                struct pmap *pm = pmap_kernel();
1.58      garbled  3453: #if defined(PMAP_NEED_FULL_MAPKERNEL)
1.41      matt     3454:                extern int etext[], kernel_text[];
                   3455:                vaddr_t va, va_etext = (paddr_t) etext;
1.53      garbled  3456: #endif
                   3457:                paddr_t pa, pa_end;
1.42      matt     3458:                register_t sr;
1.53      garbled  3459:                struct pte pt;
                   3460:                unsigned int ptegidx;
                   3461:                int bank;
1.42      matt     3462:
1.53      garbled  3463:                sr = PHYSMAPN_SEGMENT(0) | SR_SUKEY|SR_PRKEY;
                   3464:                pm->pm_sr[0] = sr;
1.41      matt     3465:
1.53      garbled  3466:                for (bank = 0; bank < vm_nphysseg; bank++) {
1.73      uebayasi 3467:                        pa_end = ptoa(VM_PHYSMEM_PTR(bank)->avail_end);
                   3468:                        pa = ptoa(VM_PHYSMEM_PTR(bank)->avail_start);
1.53      garbled  3469:                        for (; pa < pa_end; pa += PAGE_SIZE) {
                   3470:                                ptegidx = va_to_pteg(pm, pa);
                   3471:                                pmap_pte_create(&pt, pm, pa, pa | PTE_M|PTE_BW);
                   3472:                                pmap_pte_insert(ptegidx, &pt);
                   3473:                        }
                   3474:                }
                   3475:
1.58      garbled  3476: #if defined(PMAP_NEED_FULL_MAPKERNEL)
1.41      matt     3477:                va = (vaddr_t) kernel_text;
                   3478:
                   3479:                for (pa = kernelstart; va < va_etext;
1.53      garbled  3480:                     pa += PAGE_SIZE, va += PAGE_SIZE) {
                   3481:                        ptegidx = va_to_pteg(pm, va);
                   3482:                        pmap_pte_create(&pt, pm, va, pa | PTE_M|PTE_BR);
                   3483:                        pmap_pte_insert(ptegidx, &pt);
                   3484:                }
1.41      matt     3485:
                   3486:                for (; pa < kernelend;
1.53      garbled  3487:                     pa += PAGE_SIZE, va += PAGE_SIZE) {
                   3488:                        ptegidx = va_to_pteg(pm, va);
                   3489:                        pmap_pte_create(&pt, pm, va, pa | PTE_M|PTE_BW);
                   3490:                        pmap_pte_insert(ptegidx, &pt);
                   3491:                }
                   3492:
1.58      garbled  3493:                for (va = 0, pa = 0; va < kernelstart;
1.53      garbled  3494:                     pa += PAGE_SIZE, va += PAGE_SIZE) {
                   3495:                        ptegidx = va_to_pteg(pm, va);
1.58      garbled  3496:                        if (va < 0x3000)
                   3497:                                pmap_pte_create(&pt, pm, va, pa | PTE_M|PTE_BR);
                   3498:                        else
                   3499:                                pmap_pte_create(&pt, pm, va, pa | PTE_M|PTE_BW);
                   3500:                        pmap_pte_insert(ptegidx, &pt);
                   3501:                }
                   3502:                for (va = kernelend, pa = kernelend; va < SEGMENT_LENGTH;
                   3503:                    pa += PAGE_SIZE, va += PAGE_SIZE) {
                   3504:                        ptegidx = va_to_pteg(pm, va);
1.53      garbled  3505:                        pmap_pte_create(&pt, pm, va, pa | PTE_M|PTE_BW);
                   3506:                        pmap_pte_insert(ptegidx, &pt);
                   3507:                }
1.103     thorpej  3508: #endif /* PMAP_NEED_FULL_MAPKERNEL */
1.99      thorpej  3509:        }
1.103     thorpej  3510: #endif /* PMAP_NEED_MAPKERNEL */
1.99      thorpej  3511: }
1.42      matt     3512:
1.99      thorpej  3513: /*
                   3514:  * Using the data structures prepared in pmap_bootstrap1(), program
                   3515:  * the MMU hardware.
                   3516:  */
                   3517: void
                   3518: pmap_bootstrap2(void)
                   3519: {
1.103     thorpej  3520: #if defined(PMAP_OEA) || defined(PMAP_OEA64_BRIDGE)
1.99      thorpej  3521:        for (int i = 0; i < 16; i++) {
                   3522:                __asm volatile("mtsrin %0,%1"
                   3523:                        :: "r"(pmap_kernel()->pm_sr[i]),
                   3524:                           "r"(i << ADDR_SR_SHFT));
1.41      matt     3525:        }
1.99      thorpej  3526: #endif /* PMAP_OEA || PMAP_OEA64_BRIDGE */
1.103     thorpej  3527:
                   3528: #if defined(PMAP_OEA)
1.109     riastrad 3529:        __asm volatile("sync; mtsdr1 %0; isync"
                   3530:            :
                   3531:            : "r"((uintptr_t)pmap_pteg_table | (pmap_pteg_mask >> 10))
                   3532:            : "memory");
1.103     thorpej  3533: #elif defined(PMAP_OEA64) || defined(PMAP_OEA64_BRIDGE)
1.109     riastrad 3534:        __asm volatile("sync; mtsdr1 %0; isync"
                   3535:            :
                   3536:            : "r"((uintptr_t)pmap_pteg_table |
                   3537:                (32 - __builtin_clz(pmap_pteg_mask >> 11)))
                   3538:            : "memory");
1.41      matt     3539: #endif
1.99      thorpej  3540:        tlbia();
1.91      macallan 3541:
                   3542: #if defined(PMAPDEBUG)
1.103     thorpej  3543:        if (pmapdebug)
1.91      macallan 3544:            pmap_print_mmuregs();
                   3545: #endif
1.1       matt     3546: }
1.99      thorpej  3547:
                   3548: /*
                   3549:  * This is not part of the defined PMAP interface and is specific to the
                   3550:  * PowerPC architecture.  This is called during initppc, before the system
                   3551:  * is really initialized.
                   3552:  */
                   3553: void
                   3554: pmap_bootstrap(paddr_t kernelstart, paddr_t kernelend)
                   3555: {
                   3556:        pmap_bootstrap1(kernelstart, kernelend);
                   3557:        pmap_bootstrap2();
                   3558: }

CVSweb <webmaster@jp.NetBSD.org>