[BACK]Return to pmap.c CVS log [TXT][DIR] Up to [cvs.NetBSD.org] / src / sys / arch / arm / arm32

Annotation of src/sys/arch/arm/arm32/pmap.c, Revision 1.263

1.263   ! matt        1: /*     $NetBSD: pmap.c,v 1.262 2013/07/03 21:37:35 matt Exp $  */
1.12      chris       2:
                      3: /*
1.134     thorpej     4:  * Copyright 2003 Wasabi Systems, Inc.
                      5:  * All rights reserved.
                      6:  *
                      7:  * Written by Steve C. Woodford for Wasabi Systems, Inc.
                      8:  *
                      9:  * Redistribution and use in source and binary forms, with or without
                     10:  * modification, are permitted provided that the following conditions
                     11:  * are met:
                     12:  * 1. Redistributions of source code must retain the above copyright
                     13:  *    notice, this list of conditions and the following disclaimer.
                     14:  * 2. Redistributions in binary form must reproduce the above copyright
                     15:  *    notice, this list of conditions and the following disclaimer in the
                     16:  *    documentation and/or other materials provided with the distribution.
                     17:  * 3. All advertising materials mentioning features or use of this software
                     18:  *    must display the following acknowledgement:
                     19:  *      This product includes software developed for the NetBSD Project by
                     20:  *      Wasabi Systems, Inc.
                     21:  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
                     22:  *    or promote products derived from this software without specific prior
                     23:  *    written permission.
                     24:  *
                     25:  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
                     26:  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
                     27:  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
                     28:  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
                     29:  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
                     30:  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
                     31:  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
                     32:  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
                     33:  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
                     34:  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
                     35:  * POSSIBILITY OF SUCH DAMAGE.
                     36:  */
                     37:
                     38: /*
                     39:  * Copyright (c) 2002-2003 Wasabi Systems, Inc.
1.12      chris      40:  * Copyright (c) 2001 Richard Earnshaw
1.119     chris      41:  * Copyright (c) 2001-2002 Christopher Gilbert
1.12      chris      42:  * All rights reserved.
                     43:  *
                     44:  * 1. Redistributions of source code must retain the above copyright
                     45:  *    notice, this list of conditions and the following disclaimer.
                     46:  * 2. Redistributions in binary form must reproduce the above copyright
                     47:  *    notice, this list of conditions and the following disclaimer in the
                     48:  *    documentation and/or other materials provided with the distribution.
                     49:  * 3. The name of the company nor the name of the author may be used to
                     50:  *    endorse or promote products derived from this software without specific
                     51:  *    prior written permission.
                     52:  *
                     53:  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
                     54:  * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
                     55:  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
                     56:  * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
                     57:  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
                     58:  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
                     59:  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
                     60:  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
                     61:  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
                     62:  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
                     63:  * SUCH DAMAGE.
                     64:  */
1.1       matt       65:
                     66: /*-
                     67:  * Copyright (c) 1999 The NetBSD Foundation, Inc.
                     68:  * All rights reserved.
                     69:  *
                     70:  * This code is derived from software contributed to The NetBSD Foundation
                     71:  * by Charles M. Hannum.
                     72:  *
                     73:  * Redistribution and use in source and binary forms, with or without
                     74:  * modification, are permitted provided that the following conditions
                     75:  * are met:
                     76:  * 1. Redistributions of source code must retain the above copyright
                     77:  *    notice, this list of conditions and the following disclaimer.
                     78:  * 2. Redistributions in binary form must reproduce the above copyright
                     79:  *    notice, this list of conditions and the following disclaimer in the
                     80:  *    documentation and/or other materials provided with the distribution.
                     81:  *
                     82:  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
                     83:  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
                     84:  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
                     85:  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
                     86:  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
                     87:  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
                     88:  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
                     89:  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
                     90:  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
                     91:  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
                     92:  * POSSIBILITY OF SUCH DAMAGE.
                     93:  */
                     94:
                     95: /*
                     96:  * Copyright (c) 1994-1998 Mark Brinicombe.
                     97:  * Copyright (c) 1994 Brini.
                     98:  * All rights reserved.
                     99:  *
                    100:  * This code is derived from software written for Brini by Mark Brinicombe
                    101:  *
                    102:  * Redistribution and use in source and binary forms, with or without
                    103:  * modification, are permitted provided that the following conditions
                    104:  * are met:
                    105:  * 1. Redistributions of source code must retain the above copyright
                    106:  *    notice, this list of conditions and the following disclaimer.
                    107:  * 2. Redistributions in binary form must reproduce the above copyright
                    108:  *    notice, this list of conditions and the following disclaimer in the
                    109:  *    documentation and/or other materials provided with the distribution.
                    110:  * 3. All advertising materials mentioning features or use of this software
                    111:  *    must display the following acknowledgement:
                    112:  *     This product includes software developed by Mark Brinicombe.
                    113:  * 4. The name of the author may not be used to endorse or promote products
                    114:  *    derived from this software without specific prior written permission.
                    115:  *
                    116:  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
                    117:  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
                    118:  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
                    119:  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
                    120:  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
                    121:  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
                    122:  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
                    123:  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
                    124:  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
                    125:  *
                    126:  * RiscBSD kernel project
                    127:  *
                    128:  * pmap.c
                    129:  *
1.223     wiz       130:  * Machine dependent vm stuff
1.1       matt      131:  *
                    132:  * Created      : 20/09/94
                    133:  */
                    134:
                    135: /*
1.174     matt      136:  * armv6 and VIPT cache support by 3am Software Foundry,
                    137:  * Copyright (c) 2007 Microsoft
                    138:  */
                    139:
                    140: /*
1.1       matt      141:  * Performance improvements, UVM changes, overhauls and part-rewrites
                    142:  * were contributed by Neil A. Carson <neil@causality.com>.
                    143:  */
                    144:
                    145: /*
1.134     thorpej   146:  * Overhauled again to speedup the pmap, use MMU Domains so that L1 tables
                    147:  * can be shared, and re-work the KVM layout, by Steve Woodford of Wasabi
                    148:  * Systems, Inc.
                    149:  *
                    150:  * There are still a few things outstanding at this time:
                    151:  *
                    152:  *   - There are some unresolved issues for MP systems:
                    153:  *
                    154:  *     o The L1 metadata needs a lock, or more specifically, some places
                    155:  *       need to acquire an exclusive lock when modifying L1 translation
                    156:  *       table entries.
                    157:  *
                    158:  *     o When one cpu modifies an L1 entry, and that L1 table is also
                    159:  *       being used by another cpu, then the latter will need to be told
                    160:  *       that a tlb invalidation may be necessary. (But only if the old
                    161:  *       domain number in the L1 entry being over-written is currently
                    162:  *       the active domain on that cpu). I guess there are lots more tlb
                    163:  *       shootdown issues too...
                    164:  *
1.256     matt      165:  *     o If the vector_page is at 0x00000000 instead of in kernel VA space,
                    166:  *       then MP systems will lose big-time because of the MMU domain hack.
1.134     thorpej   167:  *       The only way this can be solved (apart from moving the vector
                    168:  *       page to 0xffff0000) is to reserve the first 1MB of user address
                    169:  *       space for kernel use only. This would require re-linking all
                    170:  *       applications so that the text section starts above this 1MB
                    171:  *       boundary.
                    172:  *
                    173:  *     o Tracking which VM space is resident in the cache/tlb has not yet
                    174:  *       been implemented for MP systems.
                    175:  *
                    176:  *     o Finally, there is a pathological condition where two cpus running
                    177:  *       two separate processes (not lwps) which happen to share an L1
                    178:  *       can get into a fight over one or more L1 entries. This will result
                    179:  *       in a significant slow-down if both processes are in tight loops.
1.1       matt      180:  */
                    181:
                    182: /*
                    183:  * Special compilation symbols
                    184:  * PMAP_DEBUG          - Build in pmap_debug_level code
                    185:  */
1.134     thorpej   186:
1.1       matt      187: /* Include header files */
                    188:
1.134     thorpej   189: #include "opt_cpuoptions.h"
1.1       matt      190: #include "opt_pmap_debug.h"
                    191: #include "opt_ddb.h"
1.137     martin    192: #include "opt_lockdebug.h"
                    193: #include "opt_multiprocessor.h"
1.1       matt      194:
1.171     matt      195: #include <sys/param.h>
1.1       matt      196: #include <sys/types.h>
                    197: #include <sys/kernel.h>
                    198: #include <sys/systm.h>
                    199: #include <sys/proc.h>
1.10      chris     200: #include <sys/pool.h>
1.225     para      201: #include <sys/kmem.h>
1.16      chris     202: #include <sys/cdefs.h>
1.171     matt      203: #include <sys/cpu.h>
1.186     matt      204: #include <sys/sysctl.h>
1.263   ! matt      205: #include <sys/bus.h>
1.225     para      206:
1.1       matt      207: #include <uvm/uvm.h>
                    208:
1.263   ! matt      209: #include <arm/locore.h>
1.32      thorpej   210: #include <arm/arm32/katelib.h>
1.16      chris     211:
1.263   ! matt      212: __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.262 2013/07/03 21:37:35 matt Exp $");
1.215     uebayasi  213:
1.1       matt      214: #ifdef PMAP_DEBUG
1.140     matt      215:
                    216: /* XXX need to get rid of all refs to this */
1.134     thorpej   217: int pmap_debug_level = 0;
1.17      chris     218:
                    219: /*
                    220:  * for switching to potentially finer grained debugging
                    221:  */
                    222: #define        PDB_FOLLOW      0x0001
                    223: #define        PDB_INIT        0x0002
                    224: #define        PDB_ENTER       0x0004
                    225: #define        PDB_REMOVE      0x0008
                    226: #define        PDB_CREATE      0x0010
                    227: #define        PDB_PTPAGE      0x0020
1.48      chris     228: #define        PDB_GROWKERN    0x0040
1.17      chris     229: #define        PDB_BITS        0x0080
                    230: #define        PDB_COLLECT     0x0100
                    231: #define        PDB_PROTECT     0x0200
1.48      chris     232: #define        PDB_MAP_L1      0x0400
1.17      chris     233: #define        PDB_BOOTSTRAP   0x1000
                    234: #define        PDB_PARANOIA    0x2000
                    235: #define        PDB_WIRING      0x4000
                    236: #define        PDB_PVDUMP      0x8000
1.134     thorpej   237: #define        PDB_VAC         0x10000
                    238: #define        PDB_KENTER      0x20000
                    239: #define        PDB_KREMOVE     0x40000
1.174     matt      240: #define        PDB_EXEC        0x80000
1.17      chris     241:
1.134     thorpej   242: int debugmap = 1;
                    243: int pmapdebug = 0;
1.17      chris     244: #define        NPDEBUG(_lev_,_stat_) \
                    245:        if (pmapdebug & (_lev_)) \
                    246:                ((_stat_))
                    247:
1.1       matt      248: #else  /* PMAP_DEBUG */
1.48      chris     249: #define NPDEBUG(_lev_,_stat_) /* Nothing */
1.1       matt      250: #endif /* PMAP_DEBUG */
                    251:
1.134     thorpej   252: /*
                    253:  * pmap_kernel() points here
                    254:  */
1.192     pooka     255: static struct pmap     kernel_pmap_store;
1.193     pooka     256: struct pmap            *const kernel_pmap_ptr = &kernel_pmap_store;
1.241     matt      257: #ifdef PMAP_NEED_ALLOC_POOLPAGE
                    258: int                    arm_poolpage_vmfreelist = VM_FREELIST_DEFAULT;
                    259: #endif
1.1       matt      260:
1.10      chris     261: /*
1.134     thorpej   262:  * Which pmap is currently 'live' in the cache
                    263:  *
                    264:  * XXXSCW: Fix for SMP ...
1.48      chris     265:  */
1.165     scw       266: static pmap_t pmap_recent_user;
1.48      chris     267:
1.134     thorpej   268: /*
1.173     scw       269:  * Pointer to last active lwp, or NULL if it exited.
                    270:  */
                    271: struct lwp *pmap_previous_active_lwp;
                    272:
                    273: /*
1.134     thorpej   274:  * Pool and cache that pmap structures are allocated from.
                    275:  * We use a cache to avoid clearing the pm_l2[] array (1KB)
                    276:  * in pmap_create().
                    277:  */
1.168     ad        278: static struct pool_cache pmap_cache;
1.134     thorpej   279: static LIST_HEAD(, pmap) pmap_pmaps;
1.48      chris     280:
                    281: /*
1.134     thorpej   282:  * Pool of PV structures
1.10      chris     283:  */
1.134     thorpej   284: static struct pool pmap_pv_pool;
                    285: static void *pmap_bootstrap_pv_page_alloc(struct pool *, int);
                    286: static void pmap_bootstrap_pv_page_free(struct pool *, void *);
                    287: static struct pool_allocator pmap_bootstrap_pv_allocator = {
                    288:        pmap_bootstrap_pv_page_alloc, pmap_bootstrap_pv_page_free
                    289: };
1.10      chris     290:
1.134     thorpej   291: /*
                    292:  * Pool and cache of l2_dtable structures.
                    293:  * We use a cache to avoid clearing the structures when they're
                    294:  * allocated. (196 bytes)
                    295:  */
                    296: static struct pool_cache pmap_l2dtable_cache;
                    297: static vaddr_t pmap_kernel_l2dtable_kva;
1.10      chris     298:
1.111     thorpej   299: /*
1.134     thorpej   300:  * Pool and cache of L2 page descriptors.
                    301:  * We use a cache to avoid clearing the descriptor table
                    302:  * when they're allocated. (1KB)
1.111     thorpej   303:  */
1.134     thorpej   304: static struct pool_cache pmap_l2ptp_cache;
                    305: static vaddr_t pmap_kernel_l2ptp_kva;
                    306: static paddr_t pmap_kernel_l2ptp_phys;
1.111     thorpej   307:
1.183     matt      308: #ifdef PMAPCOUNTERS
1.174     matt      309: #define        PMAP_EVCNT_INITIALIZER(name) \
                    310:        EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "pmap", name)
                    311:
                    312: #ifdef PMAP_CACHE_VIPT
1.194     matt      313: static struct evcnt pmap_ev_vac_clean_one =
                    314:    PMAP_EVCNT_INITIALIZER("clean page (1 color)");
                    315: static struct evcnt pmap_ev_vac_flush_one =
                    316:    PMAP_EVCNT_INITIALIZER("flush page (1 color)");
                    317: static struct evcnt pmap_ev_vac_flush_lots =
                    318:    PMAP_EVCNT_INITIALIZER("flush page (2+ colors)");
1.195     matt      319: static struct evcnt pmap_ev_vac_flush_lots2 =
                    320:    PMAP_EVCNT_INITIALIZER("flush page (2+ colors, kmpage)");
1.194     matt      321: EVCNT_ATTACH_STATIC(pmap_ev_vac_clean_one);
                    322: EVCNT_ATTACH_STATIC(pmap_ev_vac_flush_one);
                    323: EVCNT_ATTACH_STATIC(pmap_ev_vac_flush_lots);
1.195     matt      324: EVCNT_ATTACH_STATIC(pmap_ev_vac_flush_lots2);
1.194     matt      325:
1.174     matt      326: static struct evcnt pmap_ev_vac_color_new =
                    327:    PMAP_EVCNT_INITIALIZER("new page color");
                    328: static struct evcnt pmap_ev_vac_color_reuse =
                    329:    PMAP_EVCNT_INITIALIZER("ok first page color");
                    330: static struct evcnt pmap_ev_vac_color_ok =
                    331:    PMAP_EVCNT_INITIALIZER("ok page color");
1.182     matt      332: static struct evcnt pmap_ev_vac_color_blind =
                    333:    PMAP_EVCNT_INITIALIZER("blind page color");
1.174     matt      334: static struct evcnt pmap_ev_vac_color_change =
                    335:    PMAP_EVCNT_INITIALIZER("change page color");
                    336: static struct evcnt pmap_ev_vac_color_erase =
                    337:    PMAP_EVCNT_INITIALIZER("erase page color");
                    338: static struct evcnt pmap_ev_vac_color_none =
                    339:    PMAP_EVCNT_INITIALIZER("no page color");
                    340: static struct evcnt pmap_ev_vac_color_restore =
                    341:    PMAP_EVCNT_INITIALIZER("restore page color");
                    342:
                    343: EVCNT_ATTACH_STATIC(pmap_ev_vac_color_new);
                    344: EVCNT_ATTACH_STATIC(pmap_ev_vac_color_reuse);
                    345: EVCNT_ATTACH_STATIC(pmap_ev_vac_color_ok);
1.182     matt      346: EVCNT_ATTACH_STATIC(pmap_ev_vac_color_blind);
1.174     matt      347: EVCNT_ATTACH_STATIC(pmap_ev_vac_color_change);
                    348: EVCNT_ATTACH_STATIC(pmap_ev_vac_color_erase);
                    349: EVCNT_ATTACH_STATIC(pmap_ev_vac_color_none);
                    350: EVCNT_ATTACH_STATIC(pmap_ev_vac_color_restore);
                    351: #endif
                    352:
                    353: static struct evcnt pmap_ev_mappings =
                    354:    PMAP_EVCNT_INITIALIZER("pages mapped");
                    355: static struct evcnt pmap_ev_unmappings =
                    356:    PMAP_EVCNT_INITIALIZER("pages unmapped");
                    357: static struct evcnt pmap_ev_remappings =
                    358:    PMAP_EVCNT_INITIALIZER("pages remapped");
                    359:
                    360: EVCNT_ATTACH_STATIC(pmap_ev_mappings);
                    361: EVCNT_ATTACH_STATIC(pmap_ev_unmappings);
                    362: EVCNT_ATTACH_STATIC(pmap_ev_remappings);
                    363:
                    364: static struct evcnt pmap_ev_kernel_mappings =
                    365:    PMAP_EVCNT_INITIALIZER("kernel pages mapped");
                    366: static struct evcnt pmap_ev_kernel_unmappings =
                    367:    PMAP_EVCNT_INITIALIZER("kernel pages unmapped");
                    368: static struct evcnt pmap_ev_kernel_remappings =
                    369:    PMAP_EVCNT_INITIALIZER("kernel pages remapped");
                    370:
                    371: EVCNT_ATTACH_STATIC(pmap_ev_kernel_mappings);
                    372: EVCNT_ATTACH_STATIC(pmap_ev_kernel_unmappings);
                    373: EVCNT_ATTACH_STATIC(pmap_ev_kernel_remappings);
                    374:
                    375: static struct evcnt pmap_ev_kenter_mappings =
                    376:    PMAP_EVCNT_INITIALIZER("kenter pages mapped");
                    377: static struct evcnt pmap_ev_kenter_unmappings =
                    378:    PMAP_EVCNT_INITIALIZER("kenter pages unmapped");
                    379: static struct evcnt pmap_ev_kenter_remappings =
                    380:    PMAP_EVCNT_INITIALIZER("kenter pages remapped");
                    381: static struct evcnt pmap_ev_pt_mappings =
                    382:    PMAP_EVCNT_INITIALIZER("page table pages mapped");
                    383:
                    384: EVCNT_ATTACH_STATIC(pmap_ev_kenter_mappings);
                    385: EVCNT_ATTACH_STATIC(pmap_ev_kenter_unmappings);
                    386: EVCNT_ATTACH_STATIC(pmap_ev_kenter_remappings);
                    387: EVCNT_ATTACH_STATIC(pmap_ev_pt_mappings);
                    388:
                    389: #ifdef PMAP_CACHE_VIPT
                    390: static struct evcnt pmap_ev_exec_mappings =
                    391:    PMAP_EVCNT_INITIALIZER("exec pages mapped");
                    392: static struct evcnt pmap_ev_exec_cached =
                    393:    PMAP_EVCNT_INITIALIZER("exec pages cached");
                    394:
                    395: EVCNT_ATTACH_STATIC(pmap_ev_exec_mappings);
                    396: EVCNT_ATTACH_STATIC(pmap_ev_exec_cached);
                    397:
                    398: static struct evcnt pmap_ev_exec_synced =
                    399:    PMAP_EVCNT_INITIALIZER("exec pages synced");
                    400: static struct evcnt pmap_ev_exec_synced_map =
                    401:    PMAP_EVCNT_INITIALIZER("exec pages synced (MP)");
                    402: static struct evcnt pmap_ev_exec_synced_unmap =
                    403:    PMAP_EVCNT_INITIALIZER("exec pages synced (UM)");
                    404: static struct evcnt pmap_ev_exec_synced_remap =
                    405:    PMAP_EVCNT_INITIALIZER("exec pages synced (RM)");
                    406: static struct evcnt pmap_ev_exec_synced_clearbit =
                    407:    PMAP_EVCNT_INITIALIZER("exec pages synced (DG)");
                    408: static struct evcnt pmap_ev_exec_synced_kremove =
                    409:    PMAP_EVCNT_INITIALIZER("exec pages synced (KU)");
                    410:
                    411: EVCNT_ATTACH_STATIC(pmap_ev_exec_synced);
                    412: EVCNT_ATTACH_STATIC(pmap_ev_exec_synced_map);
                    413: EVCNT_ATTACH_STATIC(pmap_ev_exec_synced_unmap);
                    414: EVCNT_ATTACH_STATIC(pmap_ev_exec_synced_remap);
                    415: EVCNT_ATTACH_STATIC(pmap_ev_exec_synced_clearbit);
                    416: EVCNT_ATTACH_STATIC(pmap_ev_exec_synced_kremove);
                    417:
                    418: static struct evcnt pmap_ev_exec_discarded_unmap =
                    419:    PMAP_EVCNT_INITIALIZER("exec pages discarded (UM)");
                    420: static struct evcnt pmap_ev_exec_discarded_zero =
                    421:    PMAP_EVCNT_INITIALIZER("exec pages discarded (ZP)");
                    422: static struct evcnt pmap_ev_exec_discarded_copy =
                    423:    PMAP_EVCNT_INITIALIZER("exec pages discarded (CP)");
                    424: static struct evcnt pmap_ev_exec_discarded_page_protect =
                    425:    PMAP_EVCNT_INITIALIZER("exec pages discarded (PP)");
                    426: static struct evcnt pmap_ev_exec_discarded_clearbit =
                    427:    PMAP_EVCNT_INITIALIZER("exec pages discarded (DG)");
                    428: static struct evcnt pmap_ev_exec_discarded_kremove =
                    429:    PMAP_EVCNT_INITIALIZER("exec pages discarded (KU)");
                    430:
                    431: EVCNT_ATTACH_STATIC(pmap_ev_exec_discarded_unmap);
                    432: EVCNT_ATTACH_STATIC(pmap_ev_exec_discarded_zero);
                    433: EVCNT_ATTACH_STATIC(pmap_ev_exec_discarded_copy);
                    434: EVCNT_ATTACH_STATIC(pmap_ev_exec_discarded_page_protect);
                    435: EVCNT_ATTACH_STATIC(pmap_ev_exec_discarded_clearbit);
                    436: EVCNT_ATTACH_STATIC(pmap_ev_exec_discarded_kremove);
                    437: #endif /* PMAP_CACHE_VIPT */
                    438:
                    439: static struct evcnt pmap_ev_updates = PMAP_EVCNT_INITIALIZER("updates");
                    440: static struct evcnt pmap_ev_collects = PMAP_EVCNT_INITIALIZER("collects");
                    441: static struct evcnt pmap_ev_activations = PMAP_EVCNT_INITIALIZER("activations");
                    442:
                    443: EVCNT_ATTACH_STATIC(pmap_ev_updates);
                    444: EVCNT_ATTACH_STATIC(pmap_ev_collects);
                    445: EVCNT_ATTACH_STATIC(pmap_ev_activations);
                    446:
                    447: #define        PMAPCOUNT(x)    ((void)(pmap_ev_##x.ev_count++))
                    448: #else
                    449: #define        PMAPCOUNT(x)    ((void)0)
                    450: #endif
                    451:
1.134     thorpej   452: /*
                    453:  * pmap copy/zero page, and mem(5) hook point
                    454:  */
1.54      thorpej   455: static pt_entry_t *csrc_pte, *cdst_pte;
                    456: static vaddr_t csrcp, cdstp;
1.183     matt      457: vaddr_t memhook;                       /* used by mem.c */
1.189     matt      458: kmutex_t memlock;                      /* used by mem.c */
1.191     matt      459: void *zeropage;                                /* used by mem.c */
1.161     christos  460: extern void *msgbufaddr;
1.186     matt      461: int pmap_kmpages;
1.17      chris     462: /*
1.134     thorpej   463:  * Flag to indicate if pmap_init() has done its thing
                    464:  */
1.159     thorpej   465: bool pmap_initialized;
1.134     thorpej   466:
                    467: /*
                    468:  * Misc. locking data structures
1.17      chris     469:  */
1.1       matt      470:
1.134     thorpej   471: #define        pmap_acquire_pmap_lock(pm)                      \
                    472:        do {                                            \
                    473:                if ((pm) != pmap_kernel())              \
1.222     rmind     474:                        mutex_enter((pm)->pm_lock);     \
1.134     thorpej   475:        } while (/*CONSTCOND*/0)
                    476:
                    477: #define        pmap_release_pmap_lock(pm)                      \
                    478:        do {                                            \
                    479:                if ((pm) != pmap_kernel())              \
1.222     rmind     480:                        mutex_exit((pm)->pm_lock);      \
1.134     thorpej   481:        } while (/*CONSTCOND*/0)
1.1       matt      482:
1.33      chris     483:
1.69      thorpej   484: /*
1.134     thorpej   485:  * Metadata for L1 translation tables.
1.69      thorpej   486:  */
1.134     thorpej   487: struct l1_ttable {
                    488:        /* Entry on the L1 Table list */
                    489:        SLIST_ENTRY(l1_ttable) l1_link;
1.1       matt      490:
1.134     thorpej   491:        /* Entry on the L1 Least Recently Used list */
                    492:        TAILQ_ENTRY(l1_ttable) l1_lru;
1.1       matt      493:
1.134     thorpej   494:        /* Track how many domains are allocated from this L1 */
                    495:        volatile u_int l1_domain_use_count;
1.1       matt      496:
1.134     thorpej   497:        /*
                    498:         * A free-list of domain numbers for this L1.
                    499:         * We avoid using ffs() and a bitmap to track domains since ffs()
                    500:         * is slow on ARM.
                    501:         */
1.242     skrll     502:        uint8_t l1_domain_first;
                    503:        uint8_t l1_domain_free[PMAP_DOMAINS];
1.1       matt      504:
1.134     thorpej   505:        /* Physical address of this L1 page table */
                    506:        paddr_t l1_physaddr;
1.1       matt      507:
1.134     thorpej   508:        /* KVA of this L1 page table */
                    509:        pd_entry_t *l1_kva;
                    510: };
1.1       matt      511:
1.134     thorpej   512: /*
                    513:  * Convert a virtual address into its L1 table index. That is, the
                    514:  * index used to locate the L2 descriptor table pointer in an L1 table.
                    515:  * This is basically used to index l1->l1_kva[].
                    516:  *
                    517:  * Each L2 descriptor table represents 1MB of VA space.
                    518:  */
                    519: #define        L1_IDX(va)              (((vaddr_t)(va)) >> L1_S_SHIFT)
1.11      chris     520:
1.17      chris     521: /*
1.134     thorpej   522:  * L1 Page Tables are tracked using a Least Recently Used list.
                    523:  *  - New L1s are allocated from the HEAD.
                    524:  *  - Freed L1s are added to the TAIl.
                    525:  *  - Recently accessed L1s (where an 'access' is some change to one of
                    526:  *    the userland pmaps which owns this L1) are moved to the TAIL.
1.17      chris     527:  */
1.134     thorpej   528: static TAILQ_HEAD(, l1_ttable) l1_lru_list;
1.226     matt      529: static kmutex_t l1_lru_lock __cacheline_aligned;
1.17      chris     530:
1.134     thorpej   531: /*
                    532:  * A list of all L1 tables
                    533:  */
                    534: static SLIST_HEAD(, l1_ttable) l1_list;
1.17      chris     535:
                    536: /*
1.134     thorpej   537:  * The l2_dtable tracks L2_BUCKET_SIZE worth of L1 slots.
                    538:  *
                    539:  * This is normally 16MB worth L2 page descriptors for any given pmap.
                    540:  * Reference counts are maintained for L2 descriptors so they can be
                    541:  * freed when empty.
1.17      chris     542:  */
1.134     thorpej   543: struct l2_dtable {
                    544:        /* The number of L2 page descriptors allocated to this l2_dtable */
                    545:        u_int l2_occupancy;
1.17      chris     546:
1.134     thorpej   547:        /* List of L2 page descriptors */
                    548:        struct l2_bucket {
                    549:                pt_entry_t *l2b_kva;    /* KVA of L2 Descriptor Table */
                    550:                paddr_t l2b_phys;       /* Physical address of same */
                    551:                u_short l2b_l1idx;      /* This L2 table's L1 index */
                    552:                u_short l2b_occupancy;  /* How many active descriptors */
                    553:        } l2_bucket[L2_BUCKET_SIZE];
1.17      chris     554: };
                    555:
                    556: /*
1.134     thorpej   557:  * Given an L1 table index, calculate the corresponding l2_dtable index
                    558:  * and bucket index within the l2_dtable.
1.17      chris     559:  */
1.134     thorpej   560: #define        L2_IDX(l1idx)           (((l1idx) >> L2_BUCKET_LOG2) & \
                    561:                                 (L2_SIZE - 1))
                    562: #define        L2_BUCKET(l1idx)        ((l1idx) & (L2_BUCKET_SIZE - 1))
1.17      chris     563:
1.134     thorpej   564: /*
                    565:  * Given a virtual address, this macro returns the
                    566:  * virtual address required to drop into the next L2 bucket.
                    567:  */
                    568: #define        L2_NEXT_BUCKET(va)      (((va) & L1_S_FRAME) + L1_S_SIZE)
1.17      chris     569:
                    570: /*
1.134     thorpej   571:  * L2 allocation.
1.17      chris     572:  */
1.134     thorpej   573: #define        pmap_alloc_l2_dtable()          \
                    574:            pool_cache_get(&pmap_l2dtable_cache, PR_NOWAIT)
                    575: #define        pmap_free_l2_dtable(l2)         \
                    576:            pool_cache_put(&pmap_l2dtable_cache, (l2))
                    577: #define pmap_alloc_l2_ptp(pap)         \
                    578:            ((pt_entry_t *)pool_cache_get_paddr(&pmap_l2ptp_cache,\
                    579:            PR_NOWAIT, (pap)))
1.1       matt      580:
                    581: /*
1.134     thorpej   582:  * We try to map the page tables write-through, if possible.  However, not
                    583:  * all CPUs have a write-through cache mode, so on those we have to sync
                    584:  * the cache when we frob page tables.
1.113     thorpej   585:  *
1.134     thorpej   586:  * We try to evaluate this at compile time, if possible.  However, it's
                    587:  * not always possible to do that, hence this run-time var.
                    588:  */
                    589: int    pmap_needs_pte_sync;
1.113     thorpej   590:
                    591: /*
1.134     thorpej   592:  * Real definition of pv_entry.
1.113     thorpej   593:  */
1.134     thorpej   594: struct pv_entry {
1.183     matt      595:        SLIST_ENTRY(pv_entry) pv_link;  /* next pv_entry */
1.134     thorpej   596:        pmap_t          pv_pmap;        /* pmap where mapping lies */
                    597:        vaddr_t         pv_va;          /* virtual address for mapping */
                    598:        u_int           pv_flags;       /* flags */
                    599: };
1.113     thorpej   600:
                    601: /*
1.134     thorpej   602:  * Macro to determine if a mapping might be resident in the
                    603:  * instruction cache and/or TLB
1.17      chris     604:  */
1.253     matt      605: #if ARM_MMU_V7 > 0
                    606: /*
                    607:  * Speculative loads by Cortex cores can cause TLB entries to be filled even if
                    608:  * there are no explicit accesses, so there may be always be TLB entries to
                    609:  * flush.  If we used ASIDs then this would not be a problem.
                    610:  */
                    611: #define        PV_BEEN_EXECD(f)  (((f) & PVF_EXEC) == PVF_EXEC)
                    612: #else
1.134     thorpej   613: #define        PV_BEEN_EXECD(f)  (((f) & (PVF_REF | PVF_EXEC)) == (PVF_REF | PVF_EXEC))
1.253     matt      614: #endif
1.174     matt      615: #define        PV_IS_EXEC_P(f)   (((f) & PVF_EXEC) != 0)
1.17      chris     616:
                    617: /*
1.134     thorpej   618:  * Macro to determine if a mapping might be resident in the
                    619:  * data cache and/or TLB
1.1       matt      620:  */
1.253     matt      621: #if ARM_MMU_V7 > 0
                    622: /*
                    623:  * Speculative loads by Cortex cores can cause TLB entries to be filled even if
                    624:  * there are no explicit accesses, so there may be always be TLB entries to
                    625:  * flush.  If we used ASIDs then this would not be a problem.
                    626:  */
                    627: #define        PV_BEEN_REFD(f)   (1)
                    628: #else
1.134     thorpej   629: #define        PV_BEEN_REFD(f)   (((f) & PVF_REF) != 0)
1.253     matt      630: #endif
1.1       matt      631:
                    632: /*
1.134     thorpej   633:  * Local prototypes
1.1       matt      634:  */
1.134     thorpej   635: static int             pmap_set_pt_cache_mode(pd_entry_t *, vaddr_t);
                    636: static void            pmap_alloc_specials(vaddr_t *, int, vaddr_t *,
                    637:                            pt_entry_t **);
1.159     thorpej   638: static bool            pmap_is_current(pmap_t);
                    639: static bool            pmap_is_cached(pmap_t);
1.215     uebayasi  640: static void            pmap_enter_pv(struct vm_page_md *, paddr_t, struct pv_entry *,
1.134     thorpej   641:                            pmap_t, vaddr_t, u_int);
1.215     uebayasi  642: static struct pv_entry *pmap_find_pv(struct vm_page_md *, pmap_t, vaddr_t);
                    643: static struct pv_entry *pmap_remove_pv(struct vm_page_md *, paddr_t, pmap_t, vaddr_t);
                    644: static u_int           pmap_modify_pv(struct vm_page_md *, paddr_t, pmap_t, vaddr_t,
1.134     thorpej   645:                            u_int, u_int);
1.17      chris     646:
1.134     thorpej   647: static void            pmap_pinit(pmap_t);
                    648: static int             pmap_pmap_ctor(void *, void *, int);
1.17      chris     649:
1.134     thorpej   650: static void            pmap_alloc_l1(pmap_t);
                    651: static void            pmap_free_l1(pmap_t);
                    652: static void            pmap_use_l1(pmap_t);
1.17      chris     653:
1.134     thorpej   654: static struct l2_bucket *pmap_get_l2_bucket(pmap_t, vaddr_t);
                    655: static struct l2_bucket *pmap_alloc_l2_bucket(pmap_t, vaddr_t);
                    656: static void            pmap_free_l2_bucket(pmap_t, struct l2_bucket *, u_int);
                    657: static int             pmap_l2ptp_ctor(void *, void *, int);
                    658: static int             pmap_l2dtable_ctor(void *, void *, int);
1.51      chris     659:
1.215     uebayasi  660: static void            pmap_vac_me_harder(struct vm_page_md *, paddr_t, pmap_t, vaddr_t);
1.174     matt      661: #ifdef PMAP_CACHE_VIVT
1.215     uebayasi  662: static void            pmap_vac_me_kpmap(struct vm_page_md *, paddr_t, pmap_t, vaddr_t);
                    663: static void            pmap_vac_me_user(struct vm_page_md *, paddr_t, pmap_t, vaddr_t);
1.174     matt      664: #endif
1.17      chris     665:
1.215     uebayasi  666: static void            pmap_clearbit(struct vm_page_md *, paddr_t, u_int);
1.174     matt      667: #ifdef PMAP_CACHE_VIVT
1.159     thorpej   668: static int             pmap_clean_page(struct pv_entry *, bool);
1.174     matt      669: #endif
                    670: #ifdef PMAP_CACHE_VIPT
1.215     uebayasi  671: static void            pmap_syncicache_page(struct vm_page_md *, paddr_t);
1.194     matt      672: enum pmap_flush_op {
                    673:        PMAP_FLUSH_PRIMARY,
                    674:        PMAP_FLUSH_SECONDARY,
                    675:        PMAP_CLEAN_PRIMARY
                    676: };
1.215     uebayasi  677: static void            pmap_flush_page(struct vm_page_md *, paddr_t, enum pmap_flush_op);
1.174     matt      678: #endif
1.215     uebayasi  679: static void            pmap_page_remove(struct vm_page_md *, paddr_t);
1.17      chris     680:
1.134     thorpej   681: static void            pmap_init_l1(struct l1_ttable *, pd_entry_t *);
                    682: static vaddr_t         kernel_pt_lookup(paddr_t);
1.17      chris     683:
                    684:
                    685: /*
1.134     thorpej   686:  * Misc variables
                    687:  */
                    688: vaddr_t virtual_avail;
                    689: vaddr_t virtual_end;
                    690: vaddr_t pmap_curmaxkvaddr;
1.17      chris     691:
1.196     nonaka    692: paddr_t avail_start;
                    693: paddr_t avail_end;
1.17      chris     694:
1.174     matt      695: pv_addrqh_t pmap_boot_freeq = SLIST_HEAD_INITIALIZER(&pmap_boot_freeq);
                    696: pv_addr_t kernelpages;
                    697: pv_addr_t kernel_l1pt;
                    698: pv_addr_t systempage;
1.17      chris     699:
1.134     thorpej   700: /* Function to set the debug level of the pmap code */
1.17      chris     701:
1.134     thorpej   702: #ifdef PMAP_DEBUG
                    703: void
                    704: pmap_debug(int level)
                    705: {
                    706:        pmap_debug_level = level;
                    707:        printf("pmap_debug: level=%d\n", pmap_debug_level);
1.1       matt      708: }
1.134     thorpej   709: #endif /* PMAP_DEBUG */
1.1       matt      710:
1.251     matt      711: #ifdef PMAP_CACHE_VIPT
                    712: #define PMAP_VALIDATE_MD_PAGE(md)      \
                    713:        KASSERTMSG(arm_cache_prefer_mask == 0 || (((md)->pvh_attrs & PVF_WRITE) == 0) == ((md)->urw_mappings + (md)->krw_mappings == 0), \
                    714:            "(md) %p: attrs=%#x urw=%u krw=%u", (md), \
                    715:            (md)->pvh_attrs, (md)->urw_mappings, (md)->krw_mappings);
                    716: #endif /* PMAP_CACHE_VIPT */
1.1       matt      717: /*
1.134     thorpej   718:  * A bunch of routines to conditionally flush the caches/TLB depending
                    719:  * on whether the specified pmap actually needs to be flushed at any
                    720:  * given time.
1.1       matt      721:  */
1.157     perry     722: static inline void
1.259     matt      723: pmap_tlb_flush_SE(pmap_t pm, vaddr_t va, u_int flags)
1.134     thorpej   724: {
1.259     matt      725:        if (pm->pm_cstate.cs_tlb_id != 0) {
                    726:                if (PV_BEEN_EXECD(flags)) {
                    727:                        cpu_tlb_flushID_SE(va);
                    728:                } else if (PV_BEEN_REFD(flags)) {
                    729:                        cpu_tlb_flushD_SE(va);
                    730:                }
                    731:        }
1.1       matt      732: }
                    733:
1.157     perry     734: static inline void
1.134     thorpej   735: pmap_tlb_flushID(pmap_t pm)
1.1       matt      736: {
1.134     thorpej   737:        if (pm->pm_cstate.cs_tlb_id) {
                    738:                cpu_tlb_flushID();
1.253     matt      739: #if ARM_MMU_V7 == 0
                    740:                /*
                    741:                 * Speculative loads by Cortex cores can cause TLB entries to
                    742:                 * be filled even if there are no explicit accesses, so there
                    743:                 * may be always be TLB entries to flush.  If we used ASIDs
                    744:                 * then it would not be a problem.
                    745:                 * This is not true for other CPUs.
                    746:                 */
1.134     thorpej   747:                pm->pm_cstate.cs_tlb = 0;
1.259     matt      748: #endif /* ARM_MMU_V7 */
1.1       matt      749:        }
1.134     thorpej   750: }
1.1       matt      751:
1.157     perry     752: static inline void
1.134     thorpej   753: pmap_tlb_flushD(pmap_t pm)
                    754: {
                    755:        if (pm->pm_cstate.cs_tlb_d) {
                    756:                cpu_tlb_flushD();
1.253     matt      757: #if ARM_MMU_V7 == 0
                    758:                /*
                    759:                 * Speculative loads by Cortex cores can cause TLB entries to
                    760:                 * be filled even if there are no explicit accesses, so there
                    761:                 * may be always be TLB entries to flush.  If we used ASIDs
                    762:                 * then it would not be a problem.
                    763:                 * This is not true for other CPUs.
                    764:                 */
1.134     thorpej   765:                pm->pm_cstate.cs_tlb_d = 0;
1.260     matt      766: #endif /* ARM_MMU_V7 */
1.1       matt      767:        }
                    768: }
                    769:
1.174     matt      770: #ifdef PMAP_CACHE_VIVT
1.157     perry     771: static inline void
1.259     matt      772: pmap_cache_wbinv_page(pmap_t pm, vaddr_t va, bool do_inv, u_int flags)
1.17      chris     773: {
1.259     matt      774:        if (PV_BEEN_EXECD(flags) && pm->pm_cstate.cs_cache_id) {
                    775:                cpu_idcache_wbinv_range(va, PAGE_SIZE);
                    776:        } else if (PV_BEEN_REFD(flags) && pm->pm_cstate.cs_cache_d) {
1.134     thorpej   777:                if (do_inv) {
1.259     matt      778:                        if (flags & PVF_WRITE)
                    779:                                cpu_dcache_wbinv_range(va, PAGE_SIZE);
1.134     thorpej   780:                        else
1.259     matt      781:                                cpu_dcache_inv_range(va, PAGE_SIZE);
                    782:                } else if (flags & PVF_WRITE) {
                    783:                        cpu_dcache_wb_range(va, PAGE_SIZE);
                    784:                }
1.1       matt      785:        }
1.134     thorpej   786: }
1.1       matt      787:
1.157     perry     788: static inline void
1.259     matt      789: pmap_cache_wbinv_all(pmap_t pm, u_int flags)
1.134     thorpej   790: {
1.259     matt      791:        if (PV_BEEN_EXECD(flags)) {
                    792:                if (pm->pm_cstate.cs_cache_id) {
                    793:                        cpu_idcache_wbinv_all();
                    794:                        pm->pm_cstate.cs_cache = 0;
                    795:                }
                    796:        } else if (pm->pm_cstate.cs_cache_d) {
1.134     thorpej   797:                cpu_dcache_wbinv_all();
                    798:                pm->pm_cstate.cs_cache_d = 0;
                    799:        }
                    800: }
1.174     matt      801: #endif /* PMAP_CACHE_VIVT */
1.1       matt      802:
1.258     matt      803: static inline uint8_t
                    804: pmap_domain(pmap_t pm)
                    805: {
                    806:        return pm->pm_domain;
                    807: }
                    808:
                    809: static inline pd_entry_t *
                    810: pmap_l1_kva(pmap_t pm)
                    811: {
                    812:        return pm->pm_l1->l1_kva;
                    813: }
                    814:
1.159     thorpej   815: static inline bool
1.134     thorpej   816: pmap_is_current(pmap_t pm)
1.1       matt      817: {
1.17      chris     818:
1.182     matt      819:        if (pm == pmap_kernel() || curproc->p_vmspace->vm_map.pmap == pm)
1.174     matt      820:                return true;
1.1       matt      821:
1.174     matt      822:        return false;
1.134     thorpej   823: }
1.1       matt      824:
1.159     thorpej   825: static inline bool
1.134     thorpej   826: pmap_is_cached(pmap_t pm)
                    827: {
1.17      chris     828:
1.165     scw       829:        if (pm == pmap_kernel() || pmap_recent_user == NULL ||
                    830:            pmap_recent_user == pm)
1.160     thorpej   831:                return (true);
1.17      chris     832:
1.174     matt      833:        return false;
1.134     thorpej   834: }
1.1       matt      835:
1.134     thorpej   836: /*
                    837:  * PTE_SYNC_CURRENT:
                    838:  *
                    839:  *     Make sure the pte is written out to RAM.
                    840:  *     We need to do this for one of two cases:
                    841:  *       - We're dealing with the kernel pmap
                    842:  *       - There is no pmap active in the cache/tlb.
                    843:  *       - The specified pmap is 'active' in the cache/tlb.
                    844:  */
                    845: #ifdef PMAP_INCLUDE_PTE_SYNC
                    846: #define        PTE_SYNC_CURRENT(pm, ptep)      \
                    847: do {                                   \
                    848:        if (PMAP_NEEDS_PTE_SYNC &&      \
                    849:            pmap_is_cached(pm))         \
                    850:                PTE_SYNC(ptep);         \
                    851: } while (/*CONSTCOND*/0)
                    852: #else
                    853: #define        PTE_SYNC_CURRENT(pm, ptep)      /* nothing */
                    854: #endif
1.1       matt      855:
                    856: /*
1.17      chris     857:  * main pv_entry manipulation functions:
1.49      thorpej   858:  *   pmap_enter_pv: enter a mapping onto a vm_page list
1.249     skrll     859:  *   pmap_remove_pv: remove a mapping from a vm_page list
1.17      chris     860:  *
                    861:  * NOTE: pmap_enter_pv expects to lock the pvh itself
1.250     skrll     862:  *       pmap_remove_pv expects the caller to lock the pvh before calling
1.17      chris     863:  */
                    864:
                    865: /*
1.49      thorpej   866:  * pmap_enter_pv: enter a mapping onto a vm_page lst
1.17      chris     867:  *
                    868:  * => caller should hold the proper lock on pmap_main_lock
                    869:  * => caller should have pmap locked
1.49      thorpej   870:  * => we will gain the lock on the vm_page and allocate the new pv_entry
1.17      chris     871:  * => caller should adjust ptp's wire_count before calling
                    872:  * => caller should not adjust pmap's wire_count
                    873:  */
1.134     thorpej   874: static void
1.215     uebayasi  875: pmap_enter_pv(struct vm_page_md *md, paddr_t pa, struct pv_entry *pv, pmap_t pm,
1.134     thorpej   876:     vaddr_t va, u_int flags)
                    877: {
1.182     matt      878:        struct pv_entry **pvp;
1.17      chris     879:
1.134     thorpej   880:        NPDEBUG(PDB_PVDUMP,
1.215     uebayasi  881:            printf("pmap_enter_pv: pm %p, md %p, flags 0x%x\n", pm, md, flags));
1.134     thorpej   882:
1.205     uebayasi  883:        pv->pv_pmap = pm;
                    884:        pv->pv_va = va;
                    885:        pv->pv_flags = flags;
1.134     thorpej   886:
1.215     uebayasi  887:        pvp = &SLIST_FIRST(&md->pvh_list);
1.182     matt      888: #ifdef PMAP_CACHE_VIPT
                    889:        /*
1.185     matt      890:         * Insert unmanaged entries, writeable first, at the head of
                    891:         * the pv list.
1.182     matt      892:         */
                    893:        if (__predict_true((flags & PVF_KENTRY) == 0)) {
                    894:                while (*pvp != NULL && (*pvp)->pv_flags & PVF_KENTRY)
1.183     matt      895:                        pvp = &SLIST_NEXT(*pvp, pv_link);
1.185     matt      896:        } else if ((flags & PVF_WRITE) == 0) {
                    897:                while (*pvp != NULL && (*pvp)->pv_flags & PVF_WRITE)
                    898:                        pvp = &SLIST_NEXT(*pvp, pv_link);
1.182     matt      899:        }
                    900: #endif
1.205     uebayasi  901:        SLIST_NEXT(pv, pv_link) = *pvp;         /* add to ... */
                    902:        *pvp = pv;                              /* ... locked list */
1.215     uebayasi  903:        md->pvh_attrs |= flags & (PVF_REF | PVF_MOD);
1.183     matt      904: #ifdef PMAP_CACHE_VIPT
1.205     uebayasi  905:        if ((pv->pv_flags & PVF_KWRITE) == PVF_KWRITE)
1.215     uebayasi  906:                md->pvh_attrs |= PVF_KMOD;
                    907:        if ((md->pvh_attrs & (PVF_DMOD|PVF_NC)) != PVF_NC)
                    908:                md->pvh_attrs |= PVF_DIRTY;
                    909:        KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC)));
1.183     matt      910: #endif
1.134     thorpej   911:        if (pm == pmap_kernel()) {
1.174     matt      912:                PMAPCOUNT(kernel_mappings);
1.134     thorpej   913:                if (flags & PVF_WRITE)
1.215     uebayasi  914:                        md->krw_mappings++;
1.134     thorpej   915:                else
1.215     uebayasi  916:                        md->kro_mappings++;
1.206     uebayasi  917:        } else {
                    918:                if (flags & PVF_WRITE)
1.215     uebayasi  919:                        md->urw_mappings++;
1.206     uebayasi  920:                else
1.215     uebayasi  921:                        md->uro_mappings++;
1.206     uebayasi  922:        }
1.174     matt      923:
                    924: #ifdef PMAP_CACHE_VIPT
                    925:        /*
1.251     matt      926:         * Even though pmap_vac_me_harder will set PVF_WRITE for us,
                    927:         * do it here as well to keep the mappings & KVF_WRITE consistent.
                    928:         */
                    929:        if (arm_cache_prefer_mask != 0 && (flags & PVF_WRITE) != 0) {
                    930:                md->pvh_attrs |= PVF_WRITE;
                    931:        }
                    932:        /*
1.174     matt      933:         * If this is an exec mapping and its the first exec mapping
                    934:         * for this page, make sure to sync the I-cache.
                    935:         */
                    936:        if (PV_IS_EXEC_P(flags)) {
1.215     uebayasi  937:                if (!PV_IS_EXEC_P(md->pvh_attrs)) {
                    938:                        pmap_syncicache_page(md, pa);
1.174     matt      939:                        PMAPCOUNT(exec_synced_map);
                    940:                }
                    941:                PMAPCOUNT(exec_mappings);
                    942:        }
                    943: #endif
                    944:
                    945:        PMAPCOUNT(mappings);
1.134     thorpej   946:
1.205     uebayasi  947:        if (pv->pv_flags & PVF_WIRED)
1.134     thorpej   948:                ++pm->pm_stats.wired_count;
1.17      chris     949: }
                    950:
                    951: /*
1.134     thorpej   952:  *
                    953:  * pmap_find_pv: Find a pv entry
                    954:  *
                    955:  * => caller should hold lock on vm_page
                    956:  */
1.157     perry     957: static inline struct pv_entry *
1.215     uebayasi  958: pmap_find_pv(struct vm_page_md *md, pmap_t pm, vaddr_t va)
1.134     thorpej   959: {
                    960:        struct pv_entry *pv;
                    961:
1.215     uebayasi  962:        SLIST_FOREACH(pv, &md->pvh_list, pv_link) {
1.134     thorpej   963:                if (pm == pv->pv_pmap && va == pv->pv_va)
                    964:                        break;
                    965:        }
                    966:
                    967:        return (pv);
                    968: }
                    969:
                    970: /*
                    971:  * pmap_remove_pv: try to remove a mapping from a pv_list
1.17      chris     972:  *
                    973:  * => caller should hold proper lock on pmap_main_lock
                    974:  * => pmap should be locked
1.49      thorpej   975:  * => caller should hold lock on vm_page [so that attrs can be adjusted]
1.17      chris     976:  * => caller should adjust ptp's wire_count and free PTP if needed
                    977:  * => caller should NOT adjust pmap's wire_count
1.205     uebayasi  978:  * => we return the removed pv
1.17      chris     979:  */
1.134     thorpej   980: static struct pv_entry *
1.215     uebayasi  981: pmap_remove_pv(struct vm_page_md *md, paddr_t pa, pmap_t pm, vaddr_t va)
1.17      chris     982: {
1.205     uebayasi  983:        struct pv_entry *pv, **prevptr;
1.17      chris     984:
1.134     thorpej   985:        NPDEBUG(PDB_PVDUMP,
1.215     uebayasi  986:            printf("pmap_remove_pv: pm %p, md %p, va 0x%08lx\n", pm, md, va));
1.134     thorpej   987:
1.215     uebayasi  988:        prevptr = &SLIST_FIRST(&md->pvh_list); /* prev pv_entry ptr */
1.205     uebayasi  989:        pv = *prevptr;
1.134     thorpej   990:
1.205     uebayasi  991:        while (pv) {
                    992:                if (pv->pv_pmap == pm && pv->pv_va == va) {     /* match? */
1.215     uebayasi  993:                        NPDEBUG(PDB_PVDUMP, printf("pmap_remove_pv: pm %p, md "
                    994:                            "%p, flags 0x%x\n", pm, md, pv->pv_flags));
1.205     uebayasi  995:                        if (pv->pv_flags & PVF_WIRED) {
1.156     scw       996:                                --pm->pm_stats.wired_count;
                    997:                        }
1.205     uebayasi  998:                        *prevptr = SLIST_NEXT(pv, pv_link);     /* remove it! */
1.134     thorpej   999:                        if (pm == pmap_kernel()) {
1.174     matt     1000:                                PMAPCOUNT(kernel_unmappings);
1.205     uebayasi 1001:                                if (pv->pv_flags & PVF_WRITE)
1.215     uebayasi 1002:                                        md->krw_mappings--;
1.134     thorpej  1003:                                else
1.215     uebayasi 1004:                                        md->kro_mappings--;
1.206     uebayasi 1005:                        } else {
                   1006:                                if (pv->pv_flags & PVF_WRITE)
1.215     uebayasi 1007:                                        md->urw_mappings--;
1.206     uebayasi 1008:                                else
1.215     uebayasi 1009:                                        md->uro_mappings--;
1.206     uebayasi 1010:                        }
1.174     matt     1011:
                   1012:                        PMAPCOUNT(unmappings);
                   1013: #ifdef PMAP_CACHE_VIPT
1.205     uebayasi 1014:                        if (!(pv->pv_flags & PVF_WRITE))
1.174     matt     1015:                                break;
                   1016:                        /*
                   1017:                         * If this page has had an exec mapping, then if
                   1018:                         * this was the last mapping, discard the contents,
                   1019:                         * otherwise sync the i-cache for this page.
                   1020:                         */
1.215     uebayasi 1021:                        if (PV_IS_EXEC_P(md->pvh_attrs)) {
                   1022:                                if (SLIST_EMPTY(&md->pvh_list)) {
                   1023:                                        md->pvh_attrs &= ~PVF_EXEC;
1.174     matt     1024:                                        PMAPCOUNT(exec_discarded_unmap);
                   1025:                                } else {
1.215     uebayasi 1026:                                        pmap_syncicache_page(md, pa);
1.174     matt     1027:                                        PMAPCOUNT(exec_synced_unmap);
                   1028:                                }
                   1029:                        }
                   1030: #endif /* PMAP_CACHE_VIPT */
1.17      chris    1031:                        break;
                   1032:                }
1.205     uebayasi 1033:                prevptr = &SLIST_NEXT(pv, pv_link);     /* previous pointer */
                   1034:                pv = *prevptr;                          /* advance */
1.17      chris    1035:        }
1.134     thorpej  1036:
1.182     matt     1037: #ifdef PMAP_CACHE_VIPT
                   1038:        /*
1.185     matt     1039:         * If we no longer have a WRITEABLE KENTRY at the head of list,
                   1040:         * clear the KMOD attribute from the page.
                   1041:         */
1.215     uebayasi 1042:        if (SLIST_FIRST(&md->pvh_list) == NULL
                   1043:            || (SLIST_FIRST(&md->pvh_list)->pv_flags & PVF_KWRITE) != PVF_KWRITE)
                   1044:                md->pvh_attrs &= ~PVF_KMOD;
1.185     matt     1045:
                   1046:        /*
1.182     matt     1047:         * If this was a writeable page and there are no more writeable
1.183     matt     1048:         * mappings (ignoring KMPAGE), clear the WRITE flag and writeback
                   1049:         * the contents to memory.
1.182     matt     1050:         */
1.251     matt     1051:        if (arm_cache_prefer_mask != 0) {
                   1052:                if (md->krw_mappings + md->urw_mappings == 0)
                   1053:                        md->pvh_attrs &= ~PVF_WRITE;
                   1054:                PMAP_VALIDATE_MD_PAGE(md);
                   1055:        }
1.215     uebayasi 1056:        KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC)));
1.182     matt     1057: #endif /* PMAP_CACHE_VIPT */
                   1058:
1.205     uebayasi 1059:        return(pv);                             /* return removed pv */
1.17      chris    1060: }
                   1061:
                   1062: /*
                   1063:  *
                   1064:  * pmap_modify_pv: Update pv flags
                   1065:  *
1.49      thorpej  1066:  * => caller should hold lock on vm_page [so that attrs can be adjusted]
1.17      chris    1067:  * => caller should NOT adjust pmap's wire_count
1.29      rearnsha 1068:  * => caller must call pmap_vac_me_harder() if writable status of a page
                   1069:  *    may have changed.
1.17      chris    1070:  * => we return the old flags
                   1071:  *
1.1       matt     1072:  * Modify a physical-virtual mapping in the pv table
                   1073:  */
1.134     thorpej  1074: static u_int
1.215     uebayasi 1075: pmap_modify_pv(struct vm_page_md *md, paddr_t pa, pmap_t pm, vaddr_t va,
1.134     thorpej  1076:     u_int clr_mask, u_int set_mask)
1.1       matt     1077: {
                   1078:        struct pv_entry *npv;
                   1079:        u_int flags, oflags;
                   1080:
1.185     matt     1081:        KASSERT((clr_mask & PVF_KENTRY) == 0);
                   1082:        KASSERT((set_mask & PVF_KENTRY) == 0);
                   1083:
1.215     uebayasi 1084:        if ((npv = pmap_find_pv(md, pm, va)) == NULL)
1.134     thorpej  1085:                return (0);
                   1086:
                   1087:        NPDEBUG(PDB_PVDUMP,
1.215     uebayasi 1088:            printf("pmap_modify_pv: pm %p, md %p, clr 0x%x, set 0x%x, flags 0x%x\n", pm, md, clr_mask, set_mask, npv->pv_flags));
1.134     thorpej  1089:
1.1       matt     1090:        /*
                   1091:         * There is at least one VA mapping this page.
                   1092:         */
                   1093:
1.183     matt     1094:        if (clr_mask & (PVF_REF | PVF_MOD)) {
1.215     uebayasi 1095:                md->pvh_attrs |= set_mask & (PVF_REF | PVF_MOD);
1.183     matt     1096: #ifdef PMAP_CACHE_VIPT
1.215     uebayasi 1097:                if ((md->pvh_attrs & (PVF_DMOD|PVF_NC)) != PVF_NC)
                   1098:                        md->pvh_attrs |= PVF_DIRTY;
                   1099:                KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC)));
1.183     matt     1100: #endif
                   1101:        }
1.134     thorpej  1102:
                   1103:        oflags = npv->pv_flags;
                   1104:        npv->pv_flags = flags = (oflags & ~clr_mask) | set_mask;
                   1105:
                   1106:        if ((flags ^ oflags) & PVF_WIRED) {
                   1107:                if (flags & PVF_WIRED)
                   1108:                        ++pm->pm_stats.wired_count;
                   1109:                else
                   1110:                        --pm->pm_stats.wired_count;
                   1111:        }
                   1112:
                   1113:        if ((flags ^ oflags) & PVF_WRITE) {
                   1114:                if (pm == pmap_kernel()) {
                   1115:                        if (flags & PVF_WRITE) {
1.215     uebayasi 1116:                                md->krw_mappings++;
                   1117:                                md->kro_mappings--;
1.134     thorpej  1118:                        } else {
1.215     uebayasi 1119:                                md->kro_mappings++;
                   1120:                                md->krw_mappings--;
1.1       matt     1121:                        }
1.134     thorpej  1122:                } else {
1.206     uebayasi 1123:                        if (flags & PVF_WRITE) {
1.215     uebayasi 1124:                                md->urw_mappings++;
                   1125:                                md->uro_mappings--;
1.206     uebayasi 1126:                        } else {
1.215     uebayasi 1127:                                md->uro_mappings++;
                   1128:                                md->urw_mappings--;
1.206     uebayasi 1129:                        }
1.1       matt     1130:                }
                   1131:        }
1.174     matt     1132: #ifdef PMAP_CACHE_VIPT
1.251     matt     1133:        if (arm_cache_prefer_mask != 0) {
                   1134:                if (md->urw_mappings + md->krw_mappings == 0) {
                   1135:                        md->pvh_attrs &= ~PVF_WRITE;
                   1136:                } else {
                   1137:                        md->pvh_attrs |= PVF_WRITE;
                   1138:                }
1.247     matt     1139:        }
1.174     matt     1140:        /*
                   1141:         * We have two cases here: the first is from enter_pv (new exec
                   1142:         * page), the second is a combined pmap_remove_pv/pmap_enter_pv.
                   1143:         * Since in latter, pmap_enter_pv won't do anything, we just have
                   1144:         * to do what pmap_remove_pv would do.
                   1145:         */
1.215     uebayasi 1146:        if ((PV_IS_EXEC_P(flags) && !PV_IS_EXEC_P(md->pvh_attrs))
                   1147:            || (PV_IS_EXEC_P(md->pvh_attrs)
1.174     matt     1148:                || (!(flags & PVF_WRITE) && (oflags & PVF_WRITE)))) {
1.215     uebayasi 1149:                pmap_syncicache_page(md, pa);
1.174     matt     1150:                PMAPCOUNT(exec_synced_remap);
                   1151:        }
1.215     uebayasi 1152:        KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC)));
1.174     matt     1153: #endif
                   1154:
                   1155:        PMAPCOUNT(remappings);
1.134     thorpej  1156:
                   1157:        return (oflags);
1.1       matt     1158: }
                   1159:
1.134     thorpej  1160: /*
                   1161:  * Allocate an L1 translation table for the specified pmap.
                   1162:  * This is called at pmap creation time.
                   1163:  */
                   1164: static void
                   1165: pmap_alloc_l1(pmap_t pm)
1.1       matt     1166: {
1.134     thorpej  1167:        struct l1_ttable *l1;
1.242     skrll    1168:        uint8_t domain;
1.134     thorpej  1169:
                   1170:        /*
                   1171:         * Remove the L1 at the head of the LRU list
                   1172:         */
1.226     matt     1173:        mutex_spin_enter(&l1_lru_lock);
1.134     thorpej  1174:        l1 = TAILQ_FIRST(&l1_lru_list);
                   1175:        KDASSERT(l1 != NULL);
                   1176:        TAILQ_REMOVE(&l1_lru_list, l1, l1_lru);
1.1       matt     1177:
1.134     thorpej  1178:        /*
                   1179:         * Pick the first available domain number, and update
                   1180:         * the link to the next number.
                   1181:         */
                   1182:        domain = l1->l1_domain_first;
                   1183:        l1->l1_domain_first = l1->l1_domain_free[domain];
1.115     thorpej  1184:
1.134     thorpej  1185:        /*
                   1186:         * If there are still free domain numbers in this L1,
                   1187:         * put it back on the TAIL of the LRU list.
                   1188:         */
                   1189:        if (++l1->l1_domain_use_count < PMAP_DOMAINS)
                   1190:                TAILQ_INSERT_TAIL(&l1_lru_list, l1, l1_lru);
1.1       matt     1191:
1.226     matt     1192:        mutex_spin_exit(&l1_lru_lock);
1.1       matt     1193:
1.134     thorpej  1194:        /*
                   1195:         * Fix up the relevant bits in the pmap structure
                   1196:         */
                   1197:        pm->pm_l1 = l1;
1.230     matt     1198:        pm->pm_domain = domain + 1;
1.1       matt     1199: }
                   1200:
                   1201: /*
1.134     thorpej  1202:  * Free an L1 translation table.
                   1203:  * This is called at pmap destruction time.
1.1       matt     1204:  */
1.134     thorpej  1205: static void
                   1206: pmap_free_l1(pmap_t pm)
1.1       matt     1207: {
1.134     thorpej  1208:        struct l1_ttable *l1 = pm->pm_l1;
1.1       matt     1209:
1.226     matt     1210:        mutex_spin_enter(&l1_lru_lock);
1.1       matt     1211:
1.134     thorpej  1212:        /*
                   1213:         * If this L1 is currently on the LRU list, remove it.
                   1214:         */
                   1215:        if (l1->l1_domain_use_count < PMAP_DOMAINS)
                   1216:                TAILQ_REMOVE(&l1_lru_list, l1, l1_lru);
1.1       matt     1217:
                   1218:        /*
1.134     thorpej  1219:         * Free up the domain number which was allocated to the pmap
1.1       matt     1220:         */
1.258     matt     1221:        l1->l1_domain_free[pmap_domain(pm) - 1] = l1->l1_domain_first;
                   1222:        l1->l1_domain_first = pmap_domain(pm) - 1;
1.134     thorpej  1223:        l1->l1_domain_use_count--;
1.1       matt     1224:
1.134     thorpej  1225:        /*
                   1226:         * The L1 now must have at least 1 free domain, so add
                   1227:         * it back to the LRU list. If the use count is zero,
                   1228:         * put it at the head of the list, otherwise it goes
                   1229:         * to the tail.
                   1230:         */
                   1231:        if (l1->l1_domain_use_count == 0)
                   1232:                TAILQ_INSERT_HEAD(&l1_lru_list, l1, l1_lru);
                   1233:        else
                   1234:                TAILQ_INSERT_TAIL(&l1_lru_list, l1, l1_lru);
1.54      thorpej  1235:
1.226     matt     1236:        mutex_spin_exit(&l1_lru_lock);
1.134     thorpej  1237: }
1.54      thorpej  1238:
1.157     perry    1239: static inline void
1.134     thorpej  1240: pmap_use_l1(pmap_t pm)
                   1241: {
                   1242:        struct l1_ttable *l1;
1.54      thorpej  1243:
1.134     thorpej  1244:        /*
                   1245:         * Do nothing if we're in interrupt context.
                   1246:         * Access to an L1 by the kernel pmap must not affect
                   1247:         * the LRU list.
                   1248:         */
1.171     matt     1249:        if (cpu_intr_p() || pm == pmap_kernel())
1.134     thorpej  1250:                return;
1.54      thorpej  1251:
1.134     thorpej  1252:        l1 = pm->pm_l1;
1.1       matt     1253:
1.17      chris    1254:        /*
1.134     thorpej  1255:         * If the L1 is not currently on the LRU list, just return
1.17      chris    1256:         */
1.134     thorpej  1257:        if (l1->l1_domain_use_count == PMAP_DOMAINS)
                   1258:                return;
                   1259:
1.226     matt     1260:        mutex_spin_enter(&l1_lru_lock);
1.1       matt     1261:
1.10      chris    1262:        /*
1.134     thorpej  1263:         * Check the use count again, now that we've acquired the lock
1.10      chris    1264:         */
1.134     thorpej  1265:        if (l1->l1_domain_use_count == PMAP_DOMAINS) {
1.226     matt     1266:                mutex_spin_exit(&l1_lru_lock);
1.134     thorpej  1267:                return;
                   1268:        }
1.111     thorpej  1269:
                   1270:        /*
1.134     thorpej  1271:         * Move the L1 to the back of the LRU list
1.111     thorpej  1272:         */
1.134     thorpej  1273:        TAILQ_REMOVE(&l1_lru_list, l1, l1_lru);
                   1274:        TAILQ_INSERT_TAIL(&l1_lru_list, l1, l1_lru);
1.111     thorpej  1275:
1.226     matt     1276:        mutex_spin_exit(&l1_lru_lock);
1.1       matt     1277: }
                   1278:
                   1279: /*
1.134     thorpej  1280:  * void pmap_free_l2_ptp(pt_entry_t *, paddr_t *)
1.1       matt     1281:  *
1.134     thorpej  1282:  * Free an L2 descriptor table.
1.1       matt     1283:  */
1.157     perry    1284: static inline void
1.134     thorpej  1285: #ifndef PMAP_INCLUDE_PTE_SYNC
                   1286: pmap_free_l2_ptp(pt_entry_t *l2, paddr_t pa)
                   1287: #else
1.159     thorpej  1288: pmap_free_l2_ptp(bool need_sync, pt_entry_t *l2, paddr_t pa)
1.134     thorpej  1289: #endif
1.1       matt     1290: {
1.134     thorpej  1291: #ifdef PMAP_INCLUDE_PTE_SYNC
1.174     matt     1292: #ifdef PMAP_CACHE_VIVT
1.1       matt     1293:        /*
1.134     thorpej  1294:         * Note: With a write-back cache, we may need to sync this
                   1295:         * L2 table before re-using it.
                   1296:         * This is because it may have belonged to a non-current
                   1297:         * pmap, in which case the cache syncs would have been
1.174     matt     1298:         * skipped for the pages that were being unmapped. If the
1.134     thorpej  1299:         * L2 table were then to be immediately re-allocated to
                   1300:         * the *current* pmap, it may well contain stale mappings
                   1301:         * which have not yet been cleared by a cache write-back
                   1302:         * and so would still be visible to the mmu.
1.1       matt     1303:         */
1.134     thorpej  1304:        if (need_sync)
                   1305:                PTE_SYNC_RANGE(l2, L2_TABLE_SIZE_REAL / sizeof(pt_entry_t));
1.174     matt     1306: #endif /* PMAP_CACHE_VIVT */
                   1307: #endif /* PMAP_INCLUDE_PTE_SYNC */
1.134     thorpej  1308:        pool_cache_put_paddr(&pmap_l2ptp_cache, (void *)l2, pa);
1.1       matt     1309: }
                   1310:
                   1311: /*
1.134     thorpej  1312:  * Returns a pointer to the L2 bucket associated with the specified pmap
                   1313:  * and VA, or NULL if no L2 bucket exists for the address.
1.1       matt     1314:  */
1.157     perry    1315: static inline struct l2_bucket *
1.134     thorpej  1316: pmap_get_l2_bucket(pmap_t pm, vaddr_t va)
                   1317: {
                   1318:        struct l2_dtable *l2;
                   1319:        struct l2_bucket *l2b;
                   1320:        u_short l1idx;
1.1       matt     1321:
1.134     thorpej  1322:        l1idx = L1_IDX(va);
1.1       matt     1323:
1.134     thorpej  1324:        if ((l2 = pm->pm_l2[L2_IDX(l1idx)]) == NULL ||
                   1325:            (l2b = &l2->l2_bucket[L2_BUCKET(l1idx)])->l2b_kva == NULL)
                   1326:                return (NULL);
1.1       matt     1327:
1.134     thorpej  1328:        return (l2b);
1.1       matt     1329: }
                   1330:
                   1331: /*
1.134     thorpej  1332:  * Returns a pointer to the L2 bucket associated with the specified pmap
                   1333:  * and VA.
1.1       matt     1334:  *
1.134     thorpej  1335:  * If no L2 bucket exists, perform the necessary allocations to put an L2
                   1336:  * bucket/page table in place.
1.1       matt     1337:  *
1.134     thorpej  1338:  * Note that if a new L2 bucket/page was allocated, the caller *must*
                   1339:  * increment the bucket occupancy counter appropriately *before*
                   1340:  * releasing the pmap's lock to ensure no other thread or cpu deallocates
                   1341:  * the bucket/page in the meantime.
1.1       matt     1342:  */
1.134     thorpej  1343: static struct l2_bucket *
                   1344: pmap_alloc_l2_bucket(pmap_t pm, vaddr_t va)
                   1345: {
                   1346:        struct l2_dtable *l2;
                   1347:        struct l2_bucket *l2b;
                   1348:        u_short l1idx;
                   1349:
                   1350:        l1idx = L1_IDX(va);
                   1351:
                   1352:        if ((l2 = pm->pm_l2[L2_IDX(l1idx)]) == NULL) {
                   1353:                /*
                   1354:                 * No mapping at this address, as there is
                   1355:                 * no entry in the L1 table.
                   1356:                 * Need to allocate a new l2_dtable.
                   1357:                 */
                   1358:                if ((l2 = pmap_alloc_l2_dtable()) == NULL)
                   1359:                        return (NULL);
                   1360:
                   1361:                /*
                   1362:                 * Link it into the parent pmap
                   1363:                 */
                   1364:                pm->pm_l2[L2_IDX(l1idx)] = l2;
                   1365:        }
1.1       matt     1366:
1.134     thorpej  1367:        l2b = &l2->l2_bucket[L2_BUCKET(l1idx)];
1.1       matt     1368:
1.10      chris    1369:        /*
1.134     thorpej  1370:         * Fetch pointer to the L2 page table associated with the address.
1.10      chris    1371:         */
1.134     thorpej  1372:        if (l2b->l2b_kva == NULL) {
                   1373:                pt_entry_t *ptep;
                   1374:
                   1375:                /*
                   1376:                 * No L2 page table has been allocated. Chances are, this
                   1377:                 * is because we just allocated the l2_dtable, above.
                   1378:                 */
                   1379:                if ((ptep = pmap_alloc_l2_ptp(&l2b->l2b_phys)) == NULL) {
                   1380:                        /*
                   1381:                         * Oops, no more L2 page tables available at this
                   1382:                         * time. We may need to deallocate the l2_dtable
                   1383:                         * if we allocated a new one above.
                   1384:                         */
                   1385:                        if (l2->l2_occupancy == 0) {
                   1386:                                pm->pm_l2[L2_IDX(l1idx)] = NULL;
                   1387:                                pmap_free_l2_dtable(l2);
                   1388:                        }
                   1389:                        return (NULL);
                   1390:                }
1.1       matt     1391:
1.134     thorpej  1392:                l2->l2_occupancy++;
                   1393:                l2b->l2b_kva = ptep;
                   1394:                l2b->l2b_l1idx = l1idx;
                   1395:        }
1.16      chris    1396:
1.134     thorpej  1397:        return (l2b);
1.1       matt     1398: }
                   1399:
                   1400: /*
1.134     thorpej  1401:  * One or more mappings in the specified L2 descriptor table have just been
                   1402:  * invalidated.
1.1       matt     1403:  *
1.134     thorpej  1404:  * Garbage collect the metadata and descriptor table itself if necessary.
1.1       matt     1405:  *
1.134     thorpej  1406:  * The pmap lock must be acquired when this is called (not necessary
                   1407:  * for the kernel pmap).
1.1       matt     1408:  */
1.134     thorpej  1409: static void
                   1410: pmap_free_l2_bucket(pmap_t pm, struct l2_bucket *l2b, u_int count)
1.1       matt     1411: {
1.134     thorpej  1412:        struct l2_dtable *l2;
                   1413:        pd_entry_t *pl1pd, l1pd;
                   1414:        pt_entry_t *ptep;
                   1415:        u_short l1idx;
1.1       matt     1416:
1.134     thorpej  1417:        KDASSERT(count <= l2b->l2b_occupancy);
1.1       matt     1418:
1.134     thorpej  1419:        /*
                   1420:         * Update the bucket's reference count according to how many
                   1421:         * PTEs the caller has just invalidated.
                   1422:         */
                   1423:        l2b->l2b_occupancy -= count;
1.1       matt     1424:
                   1425:        /*
1.134     thorpej  1426:         * Note:
                   1427:         *
                   1428:         * Level 2 page tables allocated to the kernel pmap are never freed
                   1429:         * as that would require checking all Level 1 page tables and
                   1430:         * removing any references to the Level 2 page table. See also the
                   1431:         * comment elsewhere about never freeing bootstrap L2 descriptors.
                   1432:         *
                   1433:         * We make do with just invalidating the mapping in the L2 table.
                   1434:         *
                   1435:         * This isn't really a big deal in practice and, in fact, leads
                   1436:         * to a performance win over time as we don't need to continually
                   1437:         * alloc/free.
1.1       matt     1438:         */
1.134     thorpej  1439:        if (l2b->l2b_occupancy > 0 || pm == pmap_kernel())
                   1440:                return;
1.1       matt     1441:
1.134     thorpej  1442:        /*
                   1443:         * There are no more valid mappings in this level 2 page table.
                   1444:         * Go ahead and NULL-out the pointer in the bucket, then
                   1445:         * free the page table.
                   1446:         */
                   1447:        l1idx = l2b->l2b_l1idx;
                   1448:        ptep = l2b->l2b_kva;
                   1449:        l2b->l2b_kva = NULL;
1.1       matt     1450:
1.258     matt     1451:        pl1pd = pmap_l1_kva(pm) + l1idx;
1.1       matt     1452:
1.134     thorpej  1453:        /*
                   1454:         * If the L1 slot matches the pmap's domain
                   1455:         * number, then invalidate it.
                   1456:         */
                   1457:        l1pd = *pl1pd & (L1_TYPE_MASK | L1_C_DOM_MASK);
1.258     matt     1458:        if (l1pd == (L1_C_DOM(pmap_domain(pm)) | L1_TYPE_C)) {
1.134     thorpej  1459:                *pl1pd = 0;
                   1460:                PTE_SYNC(pl1pd);
1.1       matt     1461:        }
                   1462:
1.134     thorpej  1463:        /*
                   1464:         * Release the L2 descriptor table back to the pool cache.
                   1465:         */
                   1466: #ifndef PMAP_INCLUDE_PTE_SYNC
                   1467:        pmap_free_l2_ptp(ptep, l2b->l2b_phys);
                   1468: #else
                   1469:        pmap_free_l2_ptp(!pmap_is_cached(pm), ptep, l2b->l2b_phys);
                   1470: #endif
                   1471:
                   1472:        /*
                   1473:         * Update the reference count in the associated l2_dtable
                   1474:         */
                   1475:        l2 = pm->pm_l2[L2_IDX(l1idx)];
                   1476:        if (--l2->l2_occupancy > 0)
                   1477:                return;
1.1       matt     1478:
1.134     thorpej  1479:        /*
                   1480:         * There are no more valid mappings in any of the Level 1
                   1481:         * slots managed by this l2_dtable. Go ahead and NULL-out
                   1482:         * the pointer in the parent pmap and free the l2_dtable.
                   1483:         */
                   1484:        pm->pm_l2[L2_IDX(l1idx)] = NULL;
                   1485:        pmap_free_l2_dtable(l2);
1.1       matt     1486: }
                   1487:
                   1488: /*
1.134     thorpej  1489:  * Pool cache constructors for L2 descriptor tables, metadata and pmap
                   1490:  * structures.
1.1       matt     1491:  */
1.134     thorpej  1492: static int
                   1493: pmap_l2ptp_ctor(void *arg, void *v, int flags)
1.1       matt     1494: {
1.134     thorpej  1495: #ifndef PMAP_INCLUDE_PTE_SYNC
                   1496:        vaddr_t va = (vaddr_t)v & ~PGOFSET;
                   1497:
                   1498:        /*
                   1499:         * The mappings for these page tables were initially made using
                   1500:         * pmap_kenter_pa() by the pool subsystem. Therefore, the cache-
                   1501:         * mode will not be right for page table mappings. To avoid
                   1502:         * polluting the pmap_kenter_pa() code with a special case for
                   1503:         * page tables, we simply fix up the cache-mode here if it's not
                   1504:         * correct.
                   1505:         */
1.262     matt     1506:        struct l2_bucket * const l2b = pmap_get_l2_bucket(pmap_kernel(), va);
1.134     thorpej  1507:        KDASSERT(l2b != NULL);
1.262     matt     1508:        pt_entry_t *ptep = &l2b->l2b_kva[l2pte_index(va)];
                   1509:        pt_entry_t opte = *ptep;
1.1       matt     1510:
1.262     matt     1511:        if ((opte & L2_S_CACHE_MASK) != pte_l2_s_cache_mode_pt) {
1.134     thorpej  1512:                /*
                   1513:                 * Page tables must have the cache-mode set to Write-Thru.
                   1514:                 */
1.262     matt     1515:                const pt_entry_t npte = (pte & ~L2_S_CACHE_MASK)
                   1516:                    | pte_l2_s_cache_mode_pt;
                   1517:                l2pte_set(ptep, npte, opte);
1.134     thorpej  1518:                PTE_SYNC(ptep);
                   1519:                cpu_tlb_flushD_SE(va);
                   1520:                cpu_cpwait();
                   1521:        }
                   1522: #endif
1.1       matt     1523:
1.134     thorpej  1524:        memset(v, 0, L2_TABLE_SIZE_REAL);
                   1525:        PTE_SYNC_RANGE(v, L2_TABLE_SIZE_REAL / sizeof(pt_entry_t));
                   1526:        return (0);
1.1       matt     1527: }
                   1528:
1.134     thorpej  1529: static int
                   1530: pmap_l2dtable_ctor(void *arg, void *v, int flags)
1.93      thorpej  1531: {
                   1532:
1.134     thorpej  1533:        memset(v, 0, sizeof(struct l2_dtable));
                   1534:        return (0);
                   1535: }
1.93      thorpej  1536:
1.134     thorpej  1537: static int
                   1538: pmap_pmap_ctor(void *arg, void *v, int flags)
                   1539: {
1.93      thorpej  1540:
1.134     thorpej  1541:        memset(v, 0, sizeof(struct pmap));
                   1542:        return (0);
1.93      thorpej  1543: }
                   1544:
1.165     scw      1545: static void
                   1546: pmap_pinit(pmap_t pm)
                   1547: {
1.257     matt     1548: #ifndef ARM_HAS_VBAR
1.165     scw      1549:        struct l2_bucket *l2b;
                   1550:
                   1551:        if (vector_page < KERNEL_BASE) {
                   1552:                /*
                   1553:                 * Map the vector page.
                   1554:                 */
                   1555:                pmap_enter(pm, vector_page, systempage.pv_pa,
1.262     matt     1556:                    VM_PROT_READ | VM_PROT_EXECUTE,
                   1557:                    VM_PROT_READ | VM_PROT_EXECUTE | PMAP_WIRED);
1.165     scw      1558:                pmap_update(pm);
                   1559:
1.258     matt     1560:                pm->pm_pl1vec = pmap_l1_kva(pm) + L1_IDX(vector_page);
1.165     scw      1561:                l2b = pmap_get_l2_bucket(pm, vector_page);
1.210     uebayasi 1562:                KDASSERT(l2b != NULL);
1.165     scw      1563:                pm->pm_l1vec = l2b->l2b_phys | L1_C_PROTO |
1.258     matt     1564:                    L1_C_DOM(pmap_domain(pm));
1.165     scw      1565:        } else
                   1566:                pm->pm_pl1vec = NULL;
1.257     matt     1567: #endif
1.165     scw      1568: }
                   1569:
1.174     matt     1570: #ifdef PMAP_CACHE_VIVT
1.93      thorpej  1571: /*
1.134     thorpej  1572:  * Since we have a virtually indexed cache, we may need to inhibit caching if
                   1573:  * there is more than one mapping and at least one of them is writable.
                   1574:  * Since we purge the cache on every context switch, we only need to check for
                   1575:  * other mappings within the same pmap, or kernel_pmap.
                   1576:  * This function is also called when a page is unmapped, to possibly reenable
                   1577:  * caching on any remaining mappings.
                   1578:  *
                   1579:  * The code implements the following logic, where:
                   1580:  *
                   1581:  * KW = # of kernel read/write pages
                   1582:  * KR = # of kernel read only pages
                   1583:  * UW = # of user read/write pages
                   1584:  * UR = # of user read only pages
                   1585:  *
                   1586:  * KC = kernel mapping is cacheable
                   1587:  * UC = user mapping is cacheable
1.93      thorpej  1588:  *
1.134     thorpej  1589:  *               KW=0,KR=0  KW=0,KR>0  KW=1,KR=0  KW>1,KR>=0
                   1590:  *             +---------------------------------------------
                   1591:  * UW=0,UR=0   | ---        KC=1       KC=1       KC=0
                   1592:  * UW=0,UR>0   | UC=1       KC=1,UC=1  KC=0,UC=0  KC=0,UC=0
                   1593:  * UW=1,UR=0   | UC=1       KC=0,UC=0  KC=0,UC=0  KC=0,UC=0
                   1594:  * UW>1,UR>=0  | UC=0       KC=0,UC=0  KC=0,UC=0  KC=0,UC=0
1.93      thorpej  1595:  */
1.111     thorpej  1596:
1.134     thorpej  1597: static const int pmap_vac_flags[4][4] = {
                   1598:        {-1,            0,              0,              PVF_KNC},
                   1599:        {0,             0,              PVF_NC,         PVF_NC},
                   1600:        {0,             PVF_NC,         PVF_NC,         PVF_NC},
                   1601:        {PVF_UNC,       PVF_NC,         PVF_NC,         PVF_NC}
                   1602: };
1.93      thorpej  1603:
1.157     perry    1604: static inline int
1.215     uebayasi 1605: pmap_get_vac_flags(const struct vm_page_md *md)
1.134     thorpej  1606: {
                   1607:        int kidx, uidx;
1.93      thorpej  1608:
1.134     thorpej  1609:        kidx = 0;
1.215     uebayasi 1610:        if (md->kro_mappings || md->krw_mappings > 1)
1.134     thorpej  1611:                kidx |= 1;
1.215     uebayasi 1612:        if (md->krw_mappings)
1.134     thorpej  1613:                kidx |= 2;
                   1614:
                   1615:        uidx = 0;
1.215     uebayasi 1616:        if (md->uro_mappings || md->urw_mappings > 1)
1.134     thorpej  1617:                uidx |= 1;
1.215     uebayasi 1618:        if (md->urw_mappings)
1.134     thorpej  1619:                uidx |= 2;
1.111     thorpej  1620:
1.134     thorpej  1621:        return (pmap_vac_flags[uidx][kidx]);
1.111     thorpej  1622: }
                   1623:
1.157     perry    1624: static inline void
1.215     uebayasi 1625: pmap_vac_me_harder(struct vm_page_md *md, paddr_t pa, pmap_t pm, vaddr_t va)
1.111     thorpej  1626: {
1.134     thorpej  1627:        int nattr;
                   1628:
1.215     uebayasi 1629:        nattr = pmap_get_vac_flags(md);
1.111     thorpej  1630:
1.134     thorpej  1631:        if (nattr < 0) {
1.215     uebayasi 1632:                md->pvh_attrs &= ~PVF_NC;
1.134     thorpej  1633:                return;
                   1634:        }
1.93      thorpej  1635:
1.215     uebayasi 1636:        if (nattr == 0 && (md->pvh_attrs & PVF_NC) == 0)
1.134     thorpej  1637:                return;
1.111     thorpej  1638:
1.134     thorpej  1639:        if (pm == pmap_kernel())
1.215     uebayasi 1640:                pmap_vac_me_kpmap(md, pa, pm, va);
1.134     thorpej  1641:        else
1.215     uebayasi 1642:                pmap_vac_me_user(md, pa, pm, va);
1.134     thorpej  1643:
1.215     uebayasi 1644:        md->pvh_attrs = (md->pvh_attrs & ~PVF_NC) | nattr;
1.93      thorpej  1645: }
                   1646:
1.134     thorpej  1647: static void
1.215     uebayasi 1648: pmap_vac_me_kpmap(struct vm_page_md *md, paddr_t pa, pmap_t pm, vaddr_t va)
1.1       matt     1649: {
1.134     thorpej  1650:        u_int u_cacheable, u_entries;
                   1651:        struct pv_entry *pv;
                   1652:        pmap_t last_pmap = pm;
                   1653:
                   1654:        /*
                   1655:         * Pass one, see if there are both kernel and user pmaps for
                   1656:         * this page.  Calculate whether there are user-writable or
                   1657:         * kernel-writable pages.
                   1658:         */
                   1659:        u_cacheable = 0;
1.215     uebayasi 1660:        SLIST_FOREACH(pv, &md->pvh_list, pv_link) {
1.134     thorpej  1661:                if (pv->pv_pmap != pm && (pv->pv_flags & PVF_NC) == 0)
                   1662:                        u_cacheable++;
1.1       matt     1663:        }
                   1664:
1.215     uebayasi 1665:        u_entries = md->urw_mappings + md->uro_mappings;
1.1       matt     1666:
1.134     thorpej  1667:        /*
                   1668:         * We know we have just been updating a kernel entry, so if
                   1669:         * all user pages are already cacheable, then there is nothing
                   1670:         * further to do.
                   1671:         */
1.215     uebayasi 1672:        if (md->k_mappings == 0 && u_cacheable == u_entries)
1.134     thorpej  1673:                return;
1.1       matt     1674:
1.134     thorpej  1675:        if (u_entries) {
                   1676:                /*
                   1677:                 * Scan over the list again, for each entry, if it
                   1678:                 * might not be set correctly, call pmap_vac_me_user
                   1679:                 * to recalculate the settings.
                   1680:                 */
1.215     uebayasi 1681:                SLIST_FOREACH(pv, &md->pvh_list, pv_link) {
1.134     thorpej  1682:                        /*
                   1683:                         * We know kernel mappings will get set
                   1684:                         * correctly in other calls.  We also know
                   1685:                         * that if the pmap is the same as last_pmap
                   1686:                         * then we've just handled this entry.
                   1687:                         */
                   1688:                        if (pv->pv_pmap == pm || pv->pv_pmap == last_pmap)
                   1689:                                continue;
1.1       matt     1690:
1.134     thorpej  1691:                        /*
                   1692:                         * If there are kernel entries and this page
                   1693:                         * is writable but non-cacheable, then we can
                   1694:                         * skip this entry also.
                   1695:                         */
1.215     uebayasi 1696:                        if (md->k_mappings &&
1.134     thorpej  1697:                            (pv->pv_flags & (PVF_NC | PVF_WRITE)) ==
                   1698:                            (PVF_NC | PVF_WRITE))
                   1699:                                continue;
1.111     thorpej  1700:
1.134     thorpej  1701:                        /*
                   1702:                         * Similarly if there are no kernel-writable
                   1703:                         * entries and the page is already
                   1704:                         * read-only/cacheable.
                   1705:                         */
1.215     uebayasi 1706:                        if (md->krw_mappings == 0 &&
1.134     thorpej  1707:                            (pv->pv_flags & (PVF_NC | PVF_WRITE)) == 0)
                   1708:                                continue;
1.5       toshii   1709:
1.134     thorpej  1710:                        /*
                   1711:                         * For some of the remaining cases, we know
                   1712:                         * that we must recalculate, but for others we
                   1713:                         * can't tell if they are correct or not, so
                   1714:                         * we recalculate anyway.
                   1715:                         */
1.215     uebayasi 1716:                        pmap_vac_me_user(md, pa, (last_pmap = pv->pv_pmap), 0);
1.134     thorpej  1717:                }
1.48      chris    1718:
1.215     uebayasi 1719:                if (md->k_mappings == 0)
1.134     thorpej  1720:                        return;
1.111     thorpej  1721:        }
                   1722:
1.215     uebayasi 1723:        pmap_vac_me_user(md, pa, pm, va);
1.134     thorpej  1724: }
1.111     thorpej  1725:
1.134     thorpej  1726: static void
1.215     uebayasi 1727: pmap_vac_me_user(struct vm_page_md *md, paddr_t pa, pmap_t pm, vaddr_t va)
1.134     thorpej  1728: {
                   1729:        pmap_t kpmap = pmap_kernel();
1.184     dogcow   1730:        struct pv_entry *pv, *npv = NULL;
1.134     thorpej  1731:        u_int entries = 0;
                   1732:        u_int writable = 0;
                   1733:        u_int cacheable_entries = 0;
                   1734:        u_int kern_cacheable = 0;
                   1735:        u_int other_writable = 0;
1.48      chris    1736:
1.134     thorpej  1737:        /*
                   1738:         * Count mappings and writable mappings in this pmap.
                   1739:         * Include kernel mappings as part of our own.
                   1740:         * Keep a pointer to the first one.
                   1741:         */
1.188     matt     1742:        npv = NULL;
1.215     uebayasi 1743:        SLIST_FOREACH(pv, &md->pvh_list, pv_link) {
1.134     thorpej  1744:                /* Count mappings in the same pmap */
                   1745:                if (pm == pv->pv_pmap || kpmap == pv->pv_pmap) {
                   1746:                        if (entries++ == 0)
                   1747:                                npv = pv;
1.1       matt     1748:
1.134     thorpej  1749:                        /* Cacheable mappings */
                   1750:                        if ((pv->pv_flags & PVF_NC) == 0) {
                   1751:                                cacheable_entries++;
                   1752:                                if (kpmap == pv->pv_pmap)
                   1753:                                        kern_cacheable++;
                   1754:                        }
1.110     thorpej  1755:
1.134     thorpej  1756:                        /* Writable mappings */
                   1757:                        if (pv->pv_flags & PVF_WRITE)
                   1758:                                ++writable;
                   1759:                } else
                   1760:                if (pv->pv_flags & PVF_WRITE)
                   1761:                        other_writable = 1;
                   1762:        }
1.1       matt     1763:
1.134     thorpej  1764:        /*
                   1765:         * Enable or disable caching as necessary.
                   1766:         * Note: the first entry might be part of the kernel pmap,
                   1767:         * so we can't assume this is indicative of the state of the
                   1768:         * other (maybe non-kpmap) entries.
                   1769:         */
                   1770:        if ((entries > 1 && writable) ||
                   1771:            (entries > 0 && pm == kpmap && other_writable)) {
                   1772:                if (cacheable_entries == 0)
                   1773:                        return;
1.1       matt     1774:
1.183     matt     1775:                for (pv = npv; pv; pv = SLIST_NEXT(pv, pv_link)) {
1.134     thorpej  1776:                        if ((pm != pv->pv_pmap && kpmap != pv->pv_pmap) ||
                   1777:                            (pv->pv_flags & PVF_NC))
                   1778:                                continue;
1.1       matt     1779:
1.134     thorpej  1780:                        pv->pv_flags |= PVF_NC;
1.26      rearnsha 1781:
1.262     matt     1782:                        struct l2_bucket * const l2b
                   1783:                            = pmap_get_l2_bucket(pv->pv_pmap, pv->pv_va);
1.210     uebayasi 1784:                        KDASSERT(l2b != NULL);
1.262     matt     1785:                        pt_entry_t * const ptep
                   1786:                            = &l2b->l2b_kva[l2pte_index(pv->pv_va)];
                   1787:                        const pt_entry_t opte = *ptep;
                   1788:                        pt_entry_t npte = opte & ~L2_S_CACHE_MASK;
1.134     thorpej  1789:
1.259     matt     1790:                        if ((va != pv->pv_va || pm != pv->pv_pmap)
1.262     matt     1791:                            && l2pte_valid(npte)) {
1.174     matt     1792: #ifdef PMAP_CACHE_VIVT
1.259     matt     1793:                                pmap_cache_wbinv_page(pv->pv_pmap, pv->pv_va,
                   1794:                                    true, pv->pv_flags);
1.174     matt     1795: #endif
1.259     matt     1796:                                pmap_tlb_flush_SE(pv->pv_pmap, pv->pv_va,
                   1797:                                    pv->pv_flags);
1.134     thorpej  1798:                        }
1.1       matt     1799:
1.262     matt     1800:                        l2pte_set(ptep, npte, opte);
1.134     thorpej  1801:                        PTE_SYNC_CURRENT(pv->pv_pmap, ptep);
                   1802:                }
                   1803:                cpu_cpwait();
                   1804:        } else
                   1805:        if (entries > cacheable_entries) {
1.1       matt     1806:                /*
1.134     thorpej  1807:                 * Turn cacheing back on for some pages.  If it is a kernel
                   1808:                 * page, only do so if there are no other writable pages.
1.1       matt     1809:                 */
1.183     matt     1810:                for (pv = npv; pv; pv = SLIST_NEXT(pv, pv_link)) {
1.134     thorpej  1811:                        if (!(pv->pv_flags & PVF_NC) || (pm != pv->pv_pmap &&
                   1812:                            (kpmap != pv->pv_pmap || other_writable)))
                   1813:                                continue;
                   1814:
                   1815:                        pv->pv_flags &= ~PVF_NC;
1.1       matt     1816:
1.262     matt     1817:                        struct l2_bucket * const l2b
                   1818:                            = pmap_get_l2_bucket(pv->pv_pmap, pv->pv_va);
1.210     uebayasi 1819:                        KDASSERT(l2b != NULL);
1.262     matt     1820:                        pt_entry_t * const ptep
                   1821:                            = &l2b->l2b_kva[l2pte_index(pv->pv_va)];
                   1822:                        const pt_entry_t opte = *ptep;
                   1823:                        pt_entry_t npte = (opte & ~L2_S_CACHE_MASK)
                   1824:                            | pte_l2_s_cache_mode;
1.134     thorpej  1825:
1.262     matt     1826:                        if (l2pte_valid(opte)) {
1.259     matt     1827:                                pmap_tlb_flush_SE(pv->pv_pmap, pv->pv_va,
                   1828:                                    pv->pv_flags);
1.134     thorpej  1829:                        }
1.1       matt     1830:
1.262     matt     1831:                        l2pte_set(ptep, npte, opte);
1.134     thorpej  1832:                        PTE_SYNC_CURRENT(pv->pv_pmap, ptep);
                   1833:                }
1.111     thorpej  1834:        }
1.1       matt     1835: }
1.174     matt     1836: #endif
                   1837:
                   1838: #ifdef PMAP_CACHE_VIPT
                   1839: static void
1.215     uebayasi 1840: pmap_vac_me_harder(struct vm_page_md *md, paddr_t pa, pmap_t pm, vaddr_t va)
1.174     matt     1841: {
1.182     matt     1842:        struct pv_entry *pv;
1.174     matt     1843:        vaddr_t tst_mask;
                   1844:        bool bad_alias;
1.183     matt     1845:        const u_int
1.215     uebayasi 1846:            rw_mappings = md->urw_mappings + md->krw_mappings,
                   1847:            ro_mappings = md->uro_mappings + md->kro_mappings;
1.174     matt     1848:
                   1849:        /* do we need to do anything? */
                   1850:        if (arm_cache_prefer_mask == 0)
                   1851:                return;
                   1852:
1.215     uebayasi 1853:        NPDEBUG(PDB_VAC, printf("pmap_vac_me_harder: md=%p, pmap=%p va=%08lx\n",
                   1854:            md, pm, va));
1.174     matt     1855:
1.182     matt     1856:        KASSERT(!va || pm);
1.215     uebayasi 1857:        KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC)));
1.174     matt     1858:
                   1859:        /* Already a conflict? */
1.215     uebayasi 1860:        if (__predict_false(md->pvh_attrs & PVF_NC)) {
1.174     matt     1861:                /* just an add, things are already non-cached */
1.215     uebayasi 1862:                KASSERT(!(md->pvh_attrs & PVF_DIRTY));
                   1863:                KASSERT(!(md->pvh_attrs & PVF_MULTCLR));
1.174     matt     1864:                bad_alias = false;
                   1865:                if (va) {
                   1866:                        PMAPCOUNT(vac_color_none);
                   1867:                        bad_alias = true;
1.215     uebayasi 1868:                        KASSERT((rw_mappings == 0) == !(md->pvh_attrs & PVF_WRITE));
1.174     matt     1869:                        goto fixup;
                   1870:                }
1.215     uebayasi 1871:                pv = SLIST_FIRST(&md->pvh_list);
1.174     matt     1872:                /* the list can't be empty because it would be cachable */
1.215     uebayasi 1873:                if (md->pvh_attrs & PVF_KMPAGE) {
                   1874:                        tst_mask = md->pvh_attrs;
1.174     matt     1875:                } else {
                   1876:                        KASSERT(pv);
                   1877:                        tst_mask = pv->pv_va;
1.183     matt     1878:                        pv = SLIST_NEXT(pv, pv_link);
1.174     matt     1879:                }
1.179     matt     1880:                /*
                   1881:                 * Only check for a bad alias if we have writable mappings.
                   1882:                 */
1.183     matt     1883:                tst_mask &= arm_cache_prefer_mask;
1.251     matt     1884:                if (rw_mappings > 0) {
1.183     matt     1885:                        for (; pv && !bad_alias; pv = SLIST_NEXT(pv, pv_link)) {
1.179     matt     1886:                                /* if there's a bad alias, stop checking. */
                   1887:                                if (tst_mask != (pv->pv_va & arm_cache_prefer_mask))
                   1888:                                        bad_alias = true;
                   1889:                        }
1.215     uebayasi 1890:                        md->pvh_attrs |= PVF_WRITE;
1.183     matt     1891:                        if (!bad_alias)
1.215     uebayasi 1892:                                md->pvh_attrs |= PVF_DIRTY;
1.183     matt     1893:                } else {
1.194     matt     1894:                        /*
                   1895:                         * We have only read-only mappings.  Let's see if there
                   1896:                         * are multiple colors in use or if we mapped a KMPAGE.
                   1897:                         * If the latter, we have a bad alias.  If the former,
                   1898:                         * we need to remember that.
                   1899:                         */
                   1900:                        for (; pv; pv = SLIST_NEXT(pv, pv_link)) {
                   1901:                                if (tst_mask != (pv->pv_va & arm_cache_prefer_mask)) {
1.215     uebayasi 1902:                                        if (md->pvh_attrs & PVF_KMPAGE)
1.194     matt     1903:                                                bad_alias = true;
                   1904:                                        break;
                   1905:                                }
                   1906:                        }
1.215     uebayasi 1907:                        md->pvh_attrs &= ~PVF_WRITE;
1.194     matt     1908:                        /*
                   1909:                         * No KMPAGE and we exited early, so we must have
                   1910:                         * multiple color mappings.
                   1911:                         */
                   1912:                        if (!bad_alias && pv != NULL)
1.215     uebayasi 1913:                                md->pvh_attrs |= PVF_MULTCLR;
1.174     matt     1914:                }
1.194     matt     1915:
1.174     matt     1916:                /* If no conflicting colors, set everything back to cached */
                   1917:                if (!bad_alias) {
1.183     matt     1918: #ifdef DEBUG
1.215     uebayasi 1919:                        if ((md->pvh_attrs & PVF_WRITE)
1.183     matt     1920:                            || ro_mappings < 2) {
1.215     uebayasi 1921:                                SLIST_FOREACH(pv, &md->pvh_list, pv_link)
1.183     matt     1922:                                        KDASSERT(((tst_mask ^ pv->pv_va) & arm_cache_prefer_mask) == 0);
                   1923:                        }
                   1924: #endif
1.215     uebayasi 1925:                        md->pvh_attrs &= (PAGE_SIZE - 1) & ~PVF_NC;
                   1926:                        md->pvh_attrs |= tst_mask | PVF_COLORED;
1.185     matt     1927:                        /*
                   1928:                         * Restore DIRTY bit if page is modified
                   1929:                         */
1.215     uebayasi 1930:                        if (md->pvh_attrs & PVF_DMOD)
                   1931:                                md->pvh_attrs |= PVF_DIRTY;
1.183     matt     1932:                        PMAPCOUNT(vac_color_restore);
1.174     matt     1933:                } else {
1.215     uebayasi 1934:                        KASSERT(SLIST_FIRST(&md->pvh_list) != NULL);
                   1935:                        KASSERT(SLIST_NEXT(SLIST_FIRST(&md->pvh_list), pv_link) != NULL);
1.174     matt     1936:                }
1.215     uebayasi 1937:                KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC)));
                   1938:                KASSERT((rw_mappings == 0) == !(md->pvh_attrs & PVF_WRITE));
1.174     matt     1939:        } else if (!va) {
1.251     matt     1940:                KASSERT(pmap_is_page_colored_p(md));
1.215     uebayasi 1941:                KASSERT(!(md->pvh_attrs & PVF_WRITE)
                   1942:                    || (md->pvh_attrs & PVF_DIRTY));
1.194     matt     1943:                if (rw_mappings == 0) {
1.215     uebayasi 1944:                        md->pvh_attrs &= ~PVF_WRITE;
1.194     matt     1945:                        if (ro_mappings == 1
1.215     uebayasi 1946:                            && (md->pvh_attrs & PVF_MULTCLR)) {
1.194     matt     1947:                                /*
                   1948:                                 * If this is the last readonly mapping
                   1949:                                 * but it doesn't match the current color
                   1950:                                 * for the page, change the current color
                   1951:                                 * to match this last readonly mapping.
                   1952:                                 */
1.215     uebayasi 1953:                                pv = SLIST_FIRST(&md->pvh_list);
                   1954:                                tst_mask = (md->pvh_attrs ^ pv->pv_va)
1.194     matt     1955:                                    & arm_cache_prefer_mask;
                   1956:                                if (tst_mask) {
1.215     uebayasi 1957:                                        md->pvh_attrs ^= tst_mask;
1.194     matt     1958:                                        PMAPCOUNT(vac_color_change);
                   1959:                                }
                   1960:                        }
                   1961:                }
1.215     uebayasi 1962:                KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC)));
                   1963:                KASSERT((rw_mappings == 0) == !(md->pvh_attrs & PVF_WRITE));
1.174     matt     1964:                return;
1.215     uebayasi 1965:        } else if (!pmap_is_page_colored_p(md)) {
1.174     matt     1966:                /* not colored so we just use its color */
1.215     uebayasi 1967:                KASSERT(md->pvh_attrs & (PVF_WRITE|PVF_DIRTY));
                   1968:                KASSERT(!(md->pvh_attrs & PVF_MULTCLR));
1.174     matt     1969:                PMAPCOUNT(vac_color_new);
1.215     uebayasi 1970:                md->pvh_attrs &= PAGE_SIZE - 1;
                   1971:                md->pvh_attrs |= PVF_COLORED
1.183     matt     1972:                    | (va & arm_cache_prefer_mask)
                   1973:                    | (rw_mappings > 0 ? PVF_WRITE : 0);
1.215     uebayasi 1974:                KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC)));
                   1975:                KASSERT((rw_mappings == 0) == !(md->pvh_attrs & PVF_WRITE));
1.174     matt     1976:                return;
1.215     uebayasi 1977:        } else if (((md->pvh_attrs ^ va) & arm_cache_prefer_mask) == 0) {
1.182     matt     1978:                bad_alias = false;
1.183     matt     1979:                if (rw_mappings > 0) {
1.182     matt     1980:                        /*
1.194     matt     1981:                         * We now have writeable mappings and if we have
                   1982:                         * readonly mappings in more than once color, we have
                   1983:                         * an aliasing problem.  Regardless mark the page as
                   1984:                         * writeable.
1.182     matt     1985:                         */
1.215     uebayasi 1986:                        if (md->pvh_attrs & PVF_MULTCLR) {
1.194     matt     1987:                                if (ro_mappings < 2) {
                   1988:                                        /*
                   1989:                                         * If we only have less than two
                   1990:                                         * read-only mappings, just flush the
                   1991:                                         * non-primary colors from the cache.
                   1992:                                         */
1.215     uebayasi 1993:                                        pmap_flush_page(md, pa,
1.194     matt     1994:                                            PMAP_FLUSH_SECONDARY);
                   1995:                                } else {
                   1996:                                        bad_alias = true;
1.182     matt     1997:                                }
                   1998:                        }
1.215     uebayasi 1999:                        md->pvh_attrs |= PVF_WRITE;
1.182     matt     2000:                }
                   2001:                /* If no conflicting colors, set everything back to cached */
                   2002:                if (!bad_alias) {
1.183     matt     2003: #ifdef DEBUG
                   2004:                        if (rw_mappings > 0
1.215     uebayasi 2005:                            || (md->pvh_attrs & PMAP_KMPAGE)) {
                   2006:                                tst_mask = md->pvh_attrs & arm_cache_prefer_mask;
                   2007:                                SLIST_FOREACH(pv, &md->pvh_list, pv_link)
1.183     matt     2008:                                        KDASSERT(((tst_mask ^ pv->pv_va) & arm_cache_prefer_mask) == 0);
                   2009:                        }
                   2010: #endif
1.215     uebayasi 2011:                        if (SLIST_EMPTY(&md->pvh_list))
1.182     matt     2012:                                PMAPCOUNT(vac_color_reuse);
                   2013:                        else
                   2014:                                PMAPCOUNT(vac_color_ok);
1.183     matt     2015:
1.182     matt     2016:                        /* matching color, just return */
1.215     uebayasi 2017:                        KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC)));
                   2018:                        KASSERT((rw_mappings == 0) == !(md->pvh_attrs & PVF_WRITE));
1.182     matt     2019:                        return;
                   2020:                }
1.215     uebayasi 2021:                KASSERT(SLIST_FIRST(&md->pvh_list) != NULL);
                   2022:                KASSERT(SLIST_NEXT(SLIST_FIRST(&md->pvh_list), pv_link) != NULL);
1.182     matt     2023:
                   2024:                /* color conflict.  evict from cache. */
                   2025:
1.215     uebayasi 2026:                pmap_flush_page(md, pa, PMAP_FLUSH_PRIMARY);
                   2027:                md->pvh_attrs &= ~PVF_COLORED;
                   2028:                md->pvh_attrs |= PVF_NC;
                   2029:                KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC)));
                   2030:                KASSERT(!(md->pvh_attrs & PVF_MULTCLR));
1.183     matt     2031:                PMAPCOUNT(vac_color_erase);
                   2032:        } else if (rw_mappings == 0
1.215     uebayasi 2033:                   && (md->pvh_attrs & PVF_KMPAGE) == 0) {
                   2034:                KASSERT((md->pvh_attrs & PVF_WRITE) == 0);
1.183     matt     2035:
                   2036:                /*
                   2037:                 * If the page has dirty cache lines, clean it.
                   2038:                 */
1.215     uebayasi 2039:                if (md->pvh_attrs & PVF_DIRTY)
                   2040:                        pmap_flush_page(md, pa, PMAP_CLEAN_PRIMARY);
1.183     matt     2041:
1.179     matt     2042:                /*
1.183     matt     2043:                 * If this is the first remapping (we know that there are no
                   2044:                 * writeable mappings), then this is a simple color change.
                   2045:                 * Otherwise this is a seconary r/o mapping, which means
                   2046:                 * we don't have to do anything.
1.179     matt     2047:                 */
1.183     matt     2048:                if (ro_mappings == 1) {
1.215     uebayasi 2049:                        KASSERT(((md->pvh_attrs ^ va) & arm_cache_prefer_mask) != 0);
                   2050:                        md->pvh_attrs &= PAGE_SIZE - 1;
                   2051:                        md->pvh_attrs |= (va & arm_cache_prefer_mask);
1.183     matt     2052:                        PMAPCOUNT(vac_color_change);
                   2053:                } else {
                   2054:                        PMAPCOUNT(vac_color_blind);
                   2055:                }
1.215     uebayasi 2056:                md->pvh_attrs |= PVF_MULTCLR;
                   2057:                KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC)));
                   2058:                KASSERT((rw_mappings == 0) == !(md->pvh_attrs & PVF_WRITE));
1.174     matt     2059:                return;
                   2060:        } else {
1.183     matt     2061:                if (rw_mappings > 0)
1.215     uebayasi 2062:                        md->pvh_attrs |= PVF_WRITE;
1.182     matt     2063:
1.174     matt     2064:                /* color conflict.  evict from cache. */
1.215     uebayasi 2065:                pmap_flush_page(md, pa, PMAP_FLUSH_PRIMARY);
1.174     matt     2066:
                   2067:                /* the list can't be empty because this was a enter/modify */
1.215     uebayasi 2068:                pv = SLIST_FIRST(&md->pvh_list);
                   2069:                if ((md->pvh_attrs & PVF_KMPAGE) == 0) {
1.183     matt     2070:                        KASSERT(pv);
                   2071:                        /*
                   2072:                         * If there's only one mapped page, change color to the
1.185     matt     2073:                         * page's new color and return.  Restore the DIRTY bit
                   2074:                         * that was erased by pmap_flush_page.
1.183     matt     2075:                         */
                   2076:                        if (SLIST_NEXT(pv, pv_link) == NULL) {
1.215     uebayasi 2077:                                md->pvh_attrs &= PAGE_SIZE - 1;
                   2078:                                md->pvh_attrs |= (va & arm_cache_prefer_mask);
                   2079:                                if (md->pvh_attrs & PVF_DMOD)
                   2080:                                        md->pvh_attrs |= PVF_DIRTY;
1.183     matt     2081:                                PMAPCOUNT(vac_color_change);
1.215     uebayasi 2082:                                KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC)));
                   2083:                                KASSERT((rw_mappings == 0) == !(md->pvh_attrs & PVF_WRITE));
                   2084:                                KASSERT(!(md->pvh_attrs & PVF_MULTCLR));
1.183     matt     2085:                                return;
                   2086:                        }
1.174     matt     2087:                }
                   2088:                bad_alias = true;
1.215     uebayasi 2089:                md->pvh_attrs &= ~PVF_COLORED;
                   2090:                md->pvh_attrs |= PVF_NC;
1.174     matt     2091:                PMAPCOUNT(vac_color_erase);
1.215     uebayasi 2092:                KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC)));
1.174     matt     2093:        }
                   2094:
                   2095:   fixup:
1.215     uebayasi 2096:        KASSERT((rw_mappings == 0) == !(md->pvh_attrs & PVF_WRITE));
1.174     matt     2097:
                   2098:        /*
                   2099:         * Turn cacheing on/off for all pages.
                   2100:         */
1.215     uebayasi 2101:        SLIST_FOREACH(pv, &md->pvh_list, pv_link) {
1.262     matt     2102:                struct l2_bucket * const l2b = pmap_get_l2_bucket(pv->pv_pmap,
                   2103:                    pv->pv_va);
1.210     uebayasi 2104:                KDASSERT(l2b != NULL);
1.262     matt     2105:                pt_entry_t * const ptep = &l2b->l2b_kva[l2pte_index(pv->pv_va)];
                   2106:                const pt_entry_t opte = *ptep;
                   2107:                pt_entry_t npte = opte & ~L2_S_CACHE_MASK;
1.174     matt     2108:                if (bad_alias) {
                   2109:                        pv->pv_flags |= PVF_NC;
                   2110:                } else {
                   2111:                        pv->pv_flags &= ~PVF_NC;
1.262     matt     2112:                        npte |= pte_l2_s_cache_mode;
1.174     matt     2113:                }
1.183     matt     2114:
1.262     matt     2115:                if (opte == npte)       /* only update is there's a change */
1.174     matt     2116:                        continue;
                   2117:
1.262     matt     2118:                if (l2pte_valid(npte)) {
                   2119:                        pmap_tlb_flush_SE(pv->pv_pmap, pv->pv_va, pv->pv_flags);
1.174     matt     2120:                }
                   2121:
1.262     matt     2122:                l2pte_set(ptep, npte, opte);
1.174     matt     2123:                PTE_SYNC_CURRENT(pv->pv_pmap, ptep);
                   2124:        }
                   2125: }
                   2126: #endif /* PMAP_CACHE_VIPT */
                   2127:
1.1       matt     2128:
                   2129: /*
1.134     thorpej  2130:  * Modify pte bits for all ptes corresponding to the given physical address.
                   2131:  * We use `maskbits' rather than `clearbits' because we're always passing
                   2132:  * constants and the latter would require an extra inversion at run-time.
1.1       matt     2133:  */
1.134     thorpej  2134: static void
1.215     uebayasi 2135: pmap_clearbit(struct vm_page_md *md, paddr_t pa, u_int maskbits)
1.1       matt     2136: {
1.134     thorpej  2137:        struct pv_entry *pv;
                   2138:        pmap_t pm;
                   2139:        vaddr_t va;
                   2140:        u_int oflags;
1.174     matt     2141: #ifdef PMAP_CACHE_VIPT
1.215     uebayasi 2142:        const bool want_syncicache = PV_IS_EXEC_P(md->pvh_attrs);
1.262     matt     2143:        bool need_vac_me_harder = false;
1.174     matt     2144:        bool need_syncicache = false;
                   2145: #endif
1.1       matt     2146:
1.134     thorpej  2147:        NPDEBUG(PDB_BITS,
1.215     uebayasi 2148:            printf("pmap_clearbit: md %p mask 0x%x\n",
                   2149:            md, maskbits));
1.1       matt     2150:
1.174     matt     2151: #ifdef PMAP_CACHE_VIPT
                   2152:        /*
                   2153:         * If we might want to sync the I-cache and we've modified it,
                   2154:         * then we know we definitely need to sync or discard it.
                   2155:         */
1.262     matt     2156:        if (want_syncicache) {
1.215     uebayasi 2157:                need_syncicache = md->pvh_attrs & PVF_MOD;
1.262     matt     2158:        }
1.174     matt     2159: #endif
1.17      chris    2160:        /*
1.134     thorpej  2161:         * Clear saved attributes (modify, reference)
1.17      chris    2162:         */
1.215     uebayasi 2163:        md->pvh_attrs &= ~(maskbits & (PVF_MOD | PVF_REF));
1.134     thorpej  2164:
1.215     uebayasi 2165:        if (SLIST_EMPTY(&md->pvh_list)) {
1.262     matt     2166: #if defined(PMAP_CACHE_VIPT)
1.174     matt     2167:                if (need_syncicache) {
                   2168:                        /*
                   2169:                         * No one has it mapped, so just discard it.  The next
                   2170:                         * exec remapping will cause it to be synced.
                   2171:                         */
1.215     uebayasi 2172:                        md->pvh_attrs &= ~PVF_EXEC;
1.174     matt     2173:                        PMAPCOUNT(exec_discarded_clearbit);
                   2174:                }
                   2175: #endif
1.17      chris    2176:                return;
1.1       matt     2177:        }
                   2178:
1.17      chris    2179:        /*
1.134     thorpej  2180:         * Loop over all current mappings setting/clearing as appropos
1.17      chris    2181:         */
1.215     uebayasi 2182:        SLIST_FOREACH(pv, &md->pvh_list, pv_link) {
1.134     thorpej  2183:                va = pv->pv_va;
                   2184:                pm = pv->pv_pmap;
                   2185:                oflags = pv->pv_flags;
1.185     matt     2186:                /*
                   2187:                 * Kernel entries are unmanaged and as such not to be changed.
                   2188:                 */
                   2189:                if (oflags & PVF_KENTRY)
                   2190:                        continue;
1.134     thorpej  2191:                pv->pv_flags &= ~maskbits;
1.48      chris    2192:
1.134     thorpej  2193:                pmap_acquire_pmap_lock(pm);
1.48      chris    2194:
1.262     matt     2195:                struct l2_bucket * const l2b = pmap_get_l2_bucket(pm, va);
1.134     thorpej  2196:                KDASSERT(l2b != NULL);
1.1       matt     2197:
1.262     matt     2198:                pt_entry_t * const ptep = &l2b->l2b_kva[l2pte_index(va)];
                   2199:                const pt_entry_t opte = *ptep;
                   2200:                pt_entry_t npte = opte;
1.114     thorpej  2201:
1.134     thorpej  2202:                NPDEBUG(PDB_BITS,
                   2203:                    printf(
                   2204:                    "pmap_clearbit: pv %p, pm %p, va 0x%08lx, flag 0x%x\n",
                   2205:                    pv, pv->pv_pmap, pv->pv_va, oflags));
1.114     thorpej  2206:
1.134     thorpej  2207:                if (maskbits & (PVF_WRITE|PVF_MOD)) {
1.174     matt     2208: #ifdef PMAP_CACHE_VIVT
1.134     thorpej  2209:                        if ((pv->pv_flags & PVF_NC)) {
                   2210:                                /*
                   2211:                                 * Entry is not cacheable:
                   2212:                                 *
                   2213:                                 * Don't turn caching on again if this is a
                   2214:                                 * modified emulation. This would be
                   2215:                                 * inconsitent with the settings created by
                   2216:                                 * pmap_vac_me_harder(). Otherwise, it's safe
                   2217:                                 * to re-enable cacheing.
                   2218:                                 *
                   2219:                                 * There's no need to call pmap_vac_me_harder()
                   2220:                                 * here: all pages are losing their write
                   2221:                                 * permission.
                   2222:                                 */
                   2223:                                if (maskbits & PVF_WRITE) {
                   2224:                                        npte |= pte_l2_s_cache_mode;
                   2225:                                        pv->pv_flags &= ~PVF_NC;
                   2226:                                }
                   2227:                        } else
1.214     jmcneill 2228:                        if (l2pte_writable_p(opte)) {
1.134     thorpej  2229:                                /*
                   2230:                                 * Entry is writable/cacheable: check if pmap
                   2231:                                 * is current if it is flush it, otherwise it
                   2232:                                 * won't be in the cache
                   2233:                                 */
1.259     matt     2234:                                pmap_cache_wbinv_page(pm, pv->pv_va,
                   2235:                                    (maskbits & PVF_REF) != 0,
                   2236:                                    oflags|PVF_WRITE);
1.134     thorpej  2237:                        }
1.174     matt     2238: #endif
1.111     thorpej  2239:
1.134     thorpej  2240:                        /* make the pte read only */
1.214     jmcneill 2241:                        npte = l2pte_set_readonly(npte);
1.111     thorpej  2242:
1.174     matt     2243:                        if (maskbits & oflags & PVF_WRITE) {
1.134     thorpej  2244:                                /*
                   2245:                                 * Keep alias accounting up to date
                   2246:                                 */
                   2247:                                if (pv->pv_pmap == pmap_kernel()) {
1.215     uebayasi 2248:                                        md->krw_mappings--;
                   2249:                                        md->kro_mappings++;
1.174     matt     2250:                                } else {
1.215     uebayasi 2251:                                        md->urw_mappings--;
                   2252:                                        md->uro_mappings++;
1.134     thorpej  2253:                                }
1.174     matt     2254: #ifdef PMAP_CACHE_VIPT
1.251     matt     2255:                                if (arm_cache_prefer_mask != 0) {
                   2256:                                        if (md->urw_mappings + md->krw_mappings == 0) {
                   2257:                                                md->pvh_attrs &= ~PVF_WRITE;
                   2258:                                        } else {
                   2259:                                                PMAP_VALIDATE_MD_PAGE(md);
                   2260:                                        }
1.247     matt     2261:                                }
1.174     matt     2262:                                if (want_syncicache)
                   2263:                                        need_syncicache = true;
1.183     matt     2264:                                need_vac_me_harder = true;
1.174     matt     2265: #endif
1.134     thorpej  2266:                        }
                   2267:                }
1.1       matt     2268:
1.134     thorpej  2269:                if (maskbits & PVF_REF) {
1.259     matt     2270:                        if ((pv->pv_flags & PVF_NC) == 0
                   2271:                            && (maskbits & (PVF_WRITE|PVF_MOD)) == 0
                   2272:                            && l2pte_valid(npte)) {
1.183     matt     2273: #ifdef PMAP_CACHE_VIVT
1.134     thorpej  2274:                                /*
                   2275:                                 * Check npte here; we may have already
                   2276:                                 * done the wbinv above, and the validity
                   2277:                                 * of the PTE is the same for opte and
                   2278:                                 * npte.
                   2279:                                 */
1.259     matt     2280:                                pmap_cache_wbinv_page(pm, pv->pv_va, true,
                   2281:                                    oflags);
1.183     matt     2282: #endif
1.134     thorpej  2283:                        }
1.1       matt     2284:
1.134     thorpej  2285:                        /*
                   2286:                         * Make the PTE invalid so that we will take a
                   2287:                         * page fault the next time the mapping is
                   2288:                         * referenced.
                   2289:                         */
                   2290:                        npte &= ~L2_TYPE_MASK;
                   2291:                        npte |= L2_TYPE_INV;
                   2292:                }
1.1       matt     2293:
1.134     thorpej  2294:                if (npte != opte) {
1.262     matt     2295:                        l2pte_set(ptep, npte, opte);
1.134     thorpej  2296:                        PTE_SYNC(ptep);
1.262     matt     2297:
1.134     thorpej  2298:                        /* Flush the TLB entry if a current pmap. */
1.259     matt     2299:                        pmap_tlb_flush_SE(pm, pv->pv_va, oflags);
1.134     thorpej  2300:                }
1.1       matt     2301:
1.134     thorpej  2302:                pmap_release_pmap_lock(pm);
1.133     thorpej  2303:
1.134     thorpej  2304:                NPDEBUG(PDB_BITS,
                   2305:                    printf("pmap_clearbit: pm %p va 0x%lx opte 0x%08x npte 0x%08x\n",
                   2306:                    pm, va, opte, npte));
                   2307:        }
1.133     thorpej  2308:
1.174     matt     2309: #ifdef PMAP_CACHE_VIPT
                   2310:        /*
                   2311:         * If we need to sync the I-cache and we haven't done it yet, do it.
                   2312:         */
1.262     matt     2313:        if (need_syncicache) {
1.215     uebayasi 2314:                pmap_syncicache_page(md, pa);
1.174     matt     2315:                PMAPCOUNT(exec_synced_clearbit);
                   2316:        }
1.262     matt     2317:
1.183     matt     2318:        /*
1.187     skrll    2319:         * If we are changing this to read-only, we need to call vac_me_harder
1.183     matt     2320:         * so we can change all the read-only pages to cacheable.  We pretend
                   2321:         * this as a page deletion.
                   2322:         */
                   2323:        if (need_vac_me_harder) {
1.215     uebayasi 2324:                if (md->pvh_attrs & PVF_NC)
                   2325:                        pmap_vac_me_harder(md, pa, NULL, 0);
1.183     matt     2326:        }
1.174     matt     2327: #endif
1.1       matt     2328: }
                   2329:
                   2330: /*
1.134     thorpej  2331:  * pmap_clean_page()
                   2332:  *
                   2333:  * This is a local function used to work out the best strategy to clean
                   2334:  * a single page referenced by its entry in the PV table. It's used by
                   2335:  * pmap_copy_page, pmap_zero page and maybe some others later on.
                   2336:  *
                   2337:  * Its policy is effectively:
                   2338:  *  o If there are no mappings, we don't bother doing anything with the cache.
                   2339:  *  o If there is one mapping, we clean just that page.
                   2340:  *  o If there are multiple mappings, we clean the entire cache.
                   2341:  *
                   2342:  * So that some functions can be further optimised, it returns 0 if it didn't
                   2343:  * clean the entire cache, or 1 if it did.
                   2344:  *
                   2345:  * XXX One bug in this routine is that if the pv_entry has a single page
                   2346:  * mapped at 0x00000000 a whole cache clean will be performed rather than
                   2347:  * just the 1 page. Since this should not occur in everyday use and if it does
                   2348:  * it will just result in not the most efficient clean for the page.
1.1       matt     2349:  */
1.174     matt     2350: #ifdef PMAP_CACHE_VIVT
1.134     thorpej  2351: static int
1.159     thorpej  2352: pmap_clean_page(struct pv_entry *pv, bool is_src)
1.1       matt     2353: {
1.211     he       2354:        pmap_t pm_to_clean = NULL;
1.134     thorpej  2355:        struct pv_entry *npv;
                   2356:        u_int cache_needs_cleaning = 0;
                   2357:        u_int flags = 0;
                   2358:        vaddr_t page_to_clean = 0;
1.1       matt     2359:
1.134     thorpej  2360:        if (pv == NULL) {
                   2361:                /* nothing mapped in so nothing to flush */
1.17      chris    2362:                return (0);
1.108     thorpej  2363:        }
1.17      chris    2364:
1.108     thorpej  2365:        /*
1.134     thorpej  2366:         * Since we flush the cache each time we change to a different
                   2367:         * user vmspace, we only need to flush the page if it is in the
                   2368:         * current pmap.
1.17      chris    2369:         */
                   2370:
1.183     matt     2371:        for (npv = pv; npv; npv = SLIST_NEXT(npv, pv_link)) {
1.209     uebayasi 2372:                if (pmap_is_current(npv->pv_pmap)) {
1.134     thorpej  2373:                        flags |= npv->pv_flags;
1.108     thorpej  2374:                        /*
                   2375:                         * The page is mapped non-cacheable in
1.17      chris    2376:                         * this map.  No need to flush the cache.
                   2377:                         */
1.78      thorpej  2378:                        if (npv->pv_flags & PVF_NC) {
1.17      chris    2379: #ifdef DIAGNOSTIC
                   2380:                                if (cache_needs_cleaning)
                   2381:                                        panic("pmap_clean_page: "
1.108     thorpej  2382:                                            "cache inconsistency");
1.17      chris    2383: #endif
                   2384:                                break;
1.108     thorpej  2385:                        } else if (is_src && (npv->pv_flags & PVF_WRITE) == 0)
1.17      chris    2386:                                continue;
1.108     thorpej  2387:                        if (cache_needs_cleaning) {
1.17      chris    2388:                                page_to_clean = 0;
                   2389:                                break;
1.134     thorpej  2390:                        } else {
1.17      chris    2391:                                page_to_clean = npv->pv_va;
1.134     thorpej  2392:                                pm_to_clean = npv->pv_pmap;
                   2393:                        }
                   2394:                        cache_needs_cleaning = 1;
1.17      chris    2395:                }
1.1       matt     2396:        }
                   2397:
1.108     thorpej  2398:        if (page_to_clean) {
1.259     matt     2399:                pmap_cache_wbinv_page(pm_to_clean, page_to_clean,
                   2400:                    !is_src, flags | PVF_REF);
1.108     thorpej  2401:        } else if (cache_needs_cleaning) {
1.209     uebayasi 2402:                pmap_t const pm = curproc->p_vmspace->vm_map.pmap;
                   2403:
1.259     matt     2404:                pmap_cache_wbinv_all(pm, flags);
1.1       matt     2405:                return (1);
                   2406:        }
                   2407:        return (0);
                   2408: }
1.174     matt     2409: #endif
                   2410:
                   2411: #ifdef PMAP_CACHE_VIPT
                   2412: /*
                   2413:  * Sync a page with the I-cache.  Since this is a VIPT, we must pick the
                   2414:  * right cache alias to make sure we flush the right stuff.
                   2415:  */
                   2416: void
1.215     uebayasi 2417: pmap_syncicache_page(struct vm_page_md *md, paddr_t pa)
1.174     matt     2418: {
1.215     uebayasi 2419:        const vsize_t va_offset = md->pvh_attrs & arm_cache_prefer_mask;
1.174     matt     2420:        pt_entry_t * const ptep = &cdst_pte[va_offset >> PGSHIFT];
                   2421:
1.215     uebayasi 2422:        NPDEBUG(PDB_EXEC, printf("pmap_syncicache_page: md=%p (attrs=%#x)\n",
                   2423:            md, md->pvh_attrs));
1.174     matt     2424:        /*
                   2425:         * No need to clean the page if it's non-cached.
                   2426:         */
1.215     uebayasi 2427:        if (md->pvh_attrs & PVF_NC)
1.174     matt     2428:                return;
1.215     uebayasi 2429:        KASSERT(arm_cache_prefer_mask == 0 || md->pvh_attrs & PVF_COLORED);
1.174     matt     2430:
1.259     matt     2431:        pmap_tlb_flush_SE(pmap_kernel(), cdstp + va_offset, PVF_REF | PVF_EXEC);
1.174     matt     2432:        /*
                   2433:         * Set up a PTE with the right coloring to flush existing cache lines.
                   2434:         */
1.262     matt     2435:        const pt_entry_t npte = L2_S_PROTO |
1.215     uebayasi 2436:            pa
1.174     matt     2437:            | L2_S_PROT(PTE_KERNEL, VM_PROT_READ|VM_PROT_WRITE)
                   2438:            | pte_l2_s_cache_mode;
1.262     matt     2439:        l2pte_set(ptep, npte, 0);
1.174     matt     2440:        PTE_SYNC(ptep);
                   2441:
                   2442:        /*
                   2443:         * Flush it.
                   2444:         */
                   2445:        cpu_icache_sync_range(cdstp + va_offset, PAGE_SIZE);
                   2446:        /*
                   2447:         * Unmap the page.
                   2448:         */
1.262     matt     2449:        l2pte_reset(ptep);
1.174     matt     2450:        PTE_SYNC(ptep);
1.259     matt     2451:        pmap_tlb_flush_SE(pmap_kernel(), cdstp + va_offset, PVF_REF | PVF_EXEC);
1.174     matt     2452:
1.215     uebayasi 2453:        md->pvh_attrs |= PVF_EXEC;
1.174     matt     2454:        PMAPCOUNT(exec_synced);
                   2455: }
                   2456:
                   2457: void
1.215     uebayasi 2458: pmap_flush_page(struct vm_page_md *md, paddr_t pa, enum pmap_flush_op flush)
1.174     matt     2459: {
1.194     matt     2460:        vsize_t va_offset, end_va;
1.254     matt     2461:        bool wbinv_p;
1.174     matt     2462:
1.194     matt     2463:        if (arm_cache_prefer_mask == 0)
                   2464:                return;
1.174     matt     2465:
1.194     matt     2466:        switch (flush) {
                   2467:        case PMAP_FLUSH_PRIMARY:
1.215     uebayasi 2468:                if (md->pvh_attrs & PVF_MULTCLR) {
1.194     matt     2469:                        va_offset = 0;
                   2470:                        end_va = arm_cache_prefer_mask;
1.215     uebayasi 2471:                        md->pvh_attrs &= ~PVF_MULTCLR;
1.194     matt     2472:                        PMAPCOUNT(vac_flush_lots);
                   2473:                } else {
1.215     uebayasi 2474:                        va_offset = md->pvh_attrs & arm_cache_prefer_mask;
1.194     matt     2475:                        end_va = va_offset;
                   2476:                        PMAPCOUNT(vac_flush_one);
                   2477:                }
                   2478:                /*
                   2479:                 * Mark that the page is no longer dirty.
                   2480:                 */
1.215     uebayasi 2481:                md->pvh_attrs &= ~PVF_DIRTY;
1.254     matt     2482:                wbinv_p = true;
1.194     matt     2483:                break;
                   2484:        case PMAP_FLUSH_SECONDARY:
                   2485:                va_offset = 0;
                   2486:                end_va = arm_cache_prefer_mask;
1.254     matt     2487:                wbinv_p = true;
1.215     uebayasi 2488:                md->pvh_attrs &= ~PVF_MULTCLR;
1.194     matt     2489:                PMAPCOUNT(vac_flush_lots);
                   2490:                break;
                   2491:        case PMAP_CLEAN_PRIMARY:
1.215     uebayasi 2492:                va_offset = md->pvh_attrs & arm_cache_prefer_mask;
1.194     matt     2493:                end_va = va_offset;
1.254     matt     2494:                wbinv_p = false;
1.185     matt     2495:                /*
                   2496:                 * Mark that the page is no longer dirty.
                   2497:                 */
1.215     uebayasi 2498:                if ((md->pvh_attrs & PVF_DMOD) == 0)
                   2499:                        md->pvh_attrs &= ~PVF_DIRTY;
1.194     matt     2500:                PMAPCOUNT(vac_clean_one);
                   2501:                break;
                   2502:        default:
                   2503:                return;
1.185     matt     2504:        }
1.174     matt     2505:
1.215     uebayasi 2506:        KASSERT(!(md->pvh_attrs & PVF_NC));
1.194     matt     2507:
1.215     uebayasi 2508:        NPDEBUG(PDB_VAC, printf("pmap_flush_page: md=%p (attrs=%#x)\n",
                   2509:            md, md->pvh_attrs));
1.194     matt     2510:
1.254     matt     2511:        const size_t scache_line_size = arm_scache.dcache_line_size;
                   2512:
1.194     matt     2513:        for (; va_offset <= end_va; va_offset += PAGE_SIZE) {
                   2514:                const size_t pte_offset = va_offset >> PGSHIFT;
                   2515:                pt_entry_t * const ptep = &cdst_pte[pte_offset];
1.262     matt     2516:                const pt_entry_t opte = *ptep;
1.194     matt     2517:
                   2518:                if (flush == PMAP_FLUSH_SECONDARY
1.215     uebayasi 2519:                    && va_offset == (md->pvh_attrs & arm_cache_prefer_mask))
1.194     matt     2520:                        continue;
                   2521:
1.259     matt     2522:                pmap_tlb_flush_SE(pmap_kernel(), cdstp + va_offset,
                   2523:                    PVF_REF | PVF_EXEC);
1.194     matt     2524:                /*
                   2525:                 * Set up a PTE with the right coloring to flush
                   2526:                 * existing cache entries.
                   2527:                 */
1.262     matt     2528:                const pt_entry_t npte = L2_S_PROTO
1.215     uebayasi 2529:                    | pa
1.194     matt     2530:                    | L2_S_PROT(PTE_KERNEL, VM_PROT_READ|VM_PROT_WRITE)
                   2531:                    | pte_l2_s_cache_mode;
1.262     matt     2532:                l2pte_set(ptep, npte, opte);
1.194     matt     2533:                PTE_SYNC(ptep);
                   2534:
                   2535:                /*
1.262     matt     2536:                 * Flush it.  Make sure to flush secondary cache too since
                   2537:                 * bus_dma will ignore uncached pages.
1.194     matt     2538:                 */
1.254     matt     2539:                 vaddr_t va = cdstp + va_offset;
                   2540:                if (scache_line_size != 0) {
                   2541:                        cpu_dcache_wb_range(va, PAGE_SIZE);
                   2542:                        if (wbinv_p) {
                   2543:                                cpu_sdcache_wbinv_range(va, pa, PAGE_SIZE);
                   2544:                                cpu_dcache_inv_range(va, PAGE_SIZE);
                   2545:                        } else {
                   2546:                                cpu_sdcache_wb_range(va, pa, PAGE_SIZE);
                   2547:                        }
                   2548:                } else {
                   2549:                        if (wbinv_p) {
                   2550:                                cpu_dcache_wbinv_range(va, PAGE_SIZE);
                   2551:                        } else {
                   2552:                                cpu_dcache_wb_range(va, PAGE_SIZE);
                   2553:                        }
                   2554:                }
1.194     matt     2555:
                   2556:                /*
                   2557:                 * Restore the page table entry since we might have interrupted
                   2558:                 * pmap_zero_page or pmap_copy_page which was already using
                   2559:                 * this pte.
                   2560:                 */
1.262     matt     2561:                l2pte_set(ptep, opte, npte);
1.194     matt     2562:                PTE_SYNC(ptep);
1.259     matt     2563:                pmap_tlb_flush_SE(pmap_kernel(), cdstp + va_offset,
                   2564:                    PVF_REF | PVF_EXEC);
1.194     matt     2565:        }
1.174     matt     2566: }
                   2567: #endif /* PMAP_CACHE_VIPT */
1.1       matt     2568:
                   2569: /*
1.134     thorpej  2570:  * Routine:    pmap_page_remove
                   2571:  * Function:
                   2572:  *             Removes this physical page from
                   2573:  *             all physical maps in which it resides.
                   2574:  *             Reflects back modify bits to the pager.
1.1       matt     2575:  */
1.134     thorpej  2576: static void
1.215     uebayasi 2577: pmap_page_remove(struct vm_page_md *md, paddr_t pa)
1.1       matt     2578: {
1.134     thorpej  2579:        struct l2_bucket *l2b;
1.182     matt     2580:        struct pv_entry *pv, *npv, **pvp;
1.209     uebayasi 2581:        pmap_t pm;
1.208     uebayasi 2582:        pt_entry_t *ptep;
1.159     thorpej  2583:        bool flush;
1.134     thorpej  2584:        u_int flags;
                   2585:
                   2586:        NPDEBUG(PDB_FOLLOW,
1.217     uebayasi 2587:            printf("pmap_page_remove: md %p (0x%08lx)\n", md,
1.215     uebayasi 2588:            pa));
1.71      thorpej  2589:
1.215     uebayasi 2590:        pv = SLIST_FIRST(&md->pvh_list);
1.134     thorpej  2591:        if (pv == NULL) {
1.174     matt     2592: #ifdef PMAP_CACHE_VIPT
                   2593:                /*
                   2594:                 * We *know* the page contents are about to be replaced.
                   2595:                 * Discard the exec contents
                   2596:                 */
1.215     uebayasi 2597:                if (PV_IS_EXEC_P(md->pvh_attrs))
1.174     matt     2598:                        PMAPCOUNT(exec_discarded_page_protect);
1.215     uebayasi 2599:                md->pvh_attrs &= ~PVF_EXEC;
1.251     matt     2600:                PMAP_VALIDATE_MD_PAGE(md);
1.174     matt     2601: #endif
1.134     thorpej  2602:                return;
                   2603:        }
1.174     matt     2604: #ifdef PMAP_CACHE_VIPT
1.215     uebayasi 2605:        KASSERT(arm_cache_prefer_mask == 0 || pmap_is_page_colored_p(md));
1.174     matt     2606: #endif
1.79      thorpej  2607:
1.1       matt     2608:        /*
1.134     thorpej  2609:         * Clear alias counts
1.1       matt     2610:         */
1.182     matt     2611: #ifdef PMAP_CACHE_VIVT
1.215     uebayasi 2612:        md->k_mappings = 0;
1.182     matt     2613: #endif
1.215     uebayasi 2614:        md->urw_mappings = md->uro_mappings = 0;
1.134     thorpej  2615:
1.160     thorpej  2616:        flush = false;
1.134     thorpej  2617:        flags = 0;
                   2618:
1.174     matt     2619: #ifdef PMAP_CACHE_VIVT
1.160     thorpej  2620:        pmap_clean_page(pv, false);
1.174     matt     2621: #endif
1.134     thorpej  2622:
1.215     uebayasi 2623:        pvp = &SLIST_FIRST(&md->pvh_list);
1.134     thorpej  2624:        while (pv) {
                   2625:                pm = pv->pv_pmap;
1.183     matt     2626:                npv = SLIST_NEXT(pv, pv_link);
1.209     uebayasi 2627:                if (flush == false && pmap_is_current(pm))
1.160     thorpej  2628:                        flush = true;
1.134     thorpej  2629:
1.182     matt     2630:                if (pm == pmap_kernel()) {
                   2631: #ifdef PMAP_CACHE_VIPT
                   2632:                        /*
                   2633:                         * If this was unmanaged mapping, it must be preserved.
                   2634:                         * Move it back on the list and advance the end-of-list
                   2635:                         * pointer.
                   2636:                         */
                   2637:                        if (pv->pv_flags & PVF_KENTRY) {
                   2638:                                *pvp = pv;
1.183     matt     2639:                                pvp = &SLIST_NEXT(pv, pv_link);
1.182     matt     2640:                                pv = npv;
                   2641:                                continue;
                   2642:                        }
                   2643:                        if (pv->pv_flags & PVF_WRITE)
1.215     uebayasi 2644:                                md->krw_mappings--;
1.182     matt     2645:                        else
1.215     uebayasi 2646:                                md->kro_mappings--;
1.182     matt     2647: #endif
1.174     matt     2648:                        PMAPCOUNT(kernel_unmappings);
1.182     matt     2649:                }
1.174     matt     2650:                PMAPCOUNT(unmappings);
                   2651:
1.134     thorpej  2652:                pmap_acquire_pmap_lock(pm);
                   2653:
                   2654:                l2b = pmap_get_l2_bucket(pm, pv->pv_va);
                   2655:                KDASSERT(l2b != NULL);
                   2656:
                   2657:                ptep = &l2b->l2b_kva[l2pte_index(pv->pv_va)];
                   2658:
                   2659:                /*
                   2660:                 * Update statistics
                   2661:                 */
                   2662:                --pm->pm_stats.resident_count;
                   2663:
                   2664:                /* Wired bit */
                   2665:                if (pv->pv_flags & PVF_WIRED)
                   2666:                        --pm->pm_stats.wired_count;
1.88      thorpej  2667:
1.134     thorpej  2668:                flags |= pv->pv_flags;
1.88      thorpej  2669:
1.134     thorpej  2670:                /*
                   2671:                 * Invalidate the PTEs.
                   2672:                 */
1.262     matt     2673:                l2pte_reset(ptep);
1.134     thorpej  2674:                PTE_SYNC_CURRENT(pm, ptep);
                   2675:                pmap_free_l2_bucket(pm, l2b, 1);
1.88      thorpej  2676:
1.134     thorpej  2677:                pool_put(&pmap_pv_pool, pv);
                   2678:                pv = npv;
1.182     matt     2679:                /*
                   2680:                 * if we reach the end of the list and there are still
                   2681:                 * mappings, they might be able to be cached now.
                   2682:                 */
1.174     matt     2683:                if (pv == NULL) {
1.182     matt     2684:                        *pvp = NULL;
1.215     uebayasi 2685:                        if (!SLIST_EMPTY(&md->pvh_list))
                   2686:                                pmap_vac_me_harder(md, pa, pm, 0);
1.174     matt     2687:                }
1.134     thorpej  2688:                pmap_release_pmap_lock(pm);
                   2689:        }
1.174     matt     2690: #ifdef PMAP_CACHE_VIPT
                   2691:        /*
1.182     matt     2692:         * Its EXEC cache is now gone.
1.174     matt     2693:         */
1.215     uebayasi 2694:        if (PV_IS_EXEC_P(md->pvh_attrs))
1.174     matt     2695:                PMAPCOUNT(exec_discarded_page_protect);
1.215     uebayasi 2696:        md->pvh_attrs &= ~PVF_EXEC;
                   2697:        KASSERT(md->urw_mappings == 0);
                   2698:        KASSERT(md->uro_mappings == 0);
1.251     matt     2699:        if (arm_cache_prefer_mask != 0) {
                   2700:                if (md->krw_mappings == 0)
                   2701:                        md->pvh_attrs &= ~PVF_WRITE;
                   2702:                PMAP_VALIDATE_MD_PAGE(md);
                   2703:        }
1.174     matt     2704: #endif
1.88      thorpej  2705:
1.134     thorpej  2706:        if (flush) {
1.152     scw      2707:                /*
1.212     skrll    2708:                 * Note: We can't use pmap_tlb_flush{I,D}() here since that
1.152     scw      2709:                 * would need a subsequent call to pmap_update() to ensure
                   2710:                 * curpm->pm_cstate.cs_all is reset. Our callers are not
                   2711:                 * required to do that (see pmap(9)), so we can't modify
                   2712:                 * the current pmap's state.
                   2713:                 */
1.134     thorpej  2714:                if (PV_BEEN_EXECD(flags))
1.152     scw      2715:                        cpu_tlb_flushID();
1.134     thorpej  2716:                else
1.152     scw      2717:                        cpu_tlb_flushD();
1.134     thorpej  2718:        }
1.88      thorpej  2719:        cpu_cpwait();
                   2720: }
1.1       matt     2721:
1.134     thorpej  2722: /*
                   2723:  * pmap_t pmap_create(void)
                   2724:  *
                   2725:  *      Create a new pmap structure from scratch.
1.17      chris    2726:  */
1.134     thorpej  2727: pmap_t
                   2728: pmap_create(void)
1.17      chris    2729: {
1.134     thorpej  2730:        pmap_t pm;
                   2731:
1.168     ad       2732:        pm = pool_cache_get(&pmap_cache, PR_WAITOK);
1.79      thorpej  2733:
1.222     rmind    2734:        mutex_init(&pm->pm_obj_lock, MUTEX_DEFAULT, IPL_NONE);
                   2735:        uvm_obj_init(&pm->pm_obj, NULL, false, 1);
                   2736:        uvm_obj_setlock(&pm->pm_obj, &pm->pm_obj_lock);
                   2737:
1.134     thorpej  2738:        pm->pm_stats.wired_count = 0;
                   2739:        pm->pm_stats.resident_count = 1;
                   2740:        pm->pm_cstate.cs_all = 0;
                   2741:        pmap_alloc_l1(pm);
1.79      thorpej  2742:
1.17      chris    2743:        /*
1.134     thorpej  2744:         * Note: The pool cache ensures that the pm_l2[] array is already
                   2745:         * initialised to zero.
1.17      chris    2746:         */
1.32      thorpej  2747:
1.134     thorpej  2748:        pmap_pinit(pm);
                   2749:
                   2750:        LIST_INSERT_HEAD(&pmap_pmaps, pm, pm_list);
1.17      chris    2751:
1.134     thorpej  2752:        return (pm);
1.17      chris    2753: }
1.134     thorpej  2754:
1.220     macallan 2755: u_int
                   2756: arm32_mmap_flags(paddr_t pa)
                   2757: {
                   2758:        /*
                   2759:         * the upper 8 bits in pmap_enter()'s flags are reserved for MD stuff
                   2760:         * and we're using the upper bits in page numbers to pass flags around
                   2761:         * so we might as well use the same bits
                   2762:         */
                   2763:        return (u_int)pa & PMAP_MD_MASK;
                   2764: }
1.1       matt     2765: /*
1.198     cegger   2766:  * int pmap_enter(pmap_t pm, vaddr_t va, paddr_t pa, vm_prot_t prot,
                   2767:  *      u_int flags)
1.134     thorpej  2768:  *
                   2769:  *      Insert the given physical page (p) at
                   2770:  *      the specified virtual address (v) in the
                   2771:  *      target physical map with the protection requested.
1.1       matt     2772:  *
1.134     thorpej  2773:  *      NB:  This is the only routine which MAY NOT lazy-evaluate
                   2774:  *      or lose information.  That is, this routine must actually
                   2775:  *      insert this page into the given map NOW.
1.1       matt     2776:  */
1.134     thorpej  2777: int
1.198     cegger   2778: pmap_enter(pmap_t pm, vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
1.1       matt     2779: {
1.134     thorpej  2780:        struct l2_bucket *l2b;
                   2781:        struct vm_page *pg, *opg;
1.205     uebayasi 2782:        struct pv_entry *pv;
1.134     thorpej  2783:        u_int nflags;
                   2784:        u_int oflags;
1.257     matt     2785: #ifdef ARM_HAS_VBAR
                   2786:        const bool vector_page_p = false;
                   2787: #else
                   2788:        const bool vector_page_p = (va == vector_page);
                   2789: #endif
1.71      thorpej  2790:
1.134     thorpej  2791:        NPDEBUG(PDB_ENTER, printf("pmap_enter: pm %p va 0x%lx pa 0x%lx prot %x flag %x\n", pm, va, pa, prot, flags));
1.71      thorpej  2792:
1.134     thorpej  2793:        KDASSERT((flags & PMAP_WIRED) == 0 || (flags & VM_PROT_ALL) != 0);
                   2794:        KDASSERT(((va | pa) & PGOFSET) == 0);
1.79      thorpej  2795:
1.71      thorpej  2796:        /*
1.134     thorpej  2797:         * Get a pointer to the page.  Later on in this function, we
                   2798:         * test for a managed page by checking pg != NULL.
1.71      thorpej  2799:         */
1.134     thorpej  2800:        pg = pmap_initialized ? PHYS_TO_VM_PAGE(pa) : NULL;
                   2801:
                   2802:        nflags = 0;
                   2803:        if (prot & VM_PROT_WRITE)
                   2804:                nflags |= PVF_WRITE;
                   2805:        if (prot & VM_PROT_EXECUTE)
                   2806:                nflags |= PVF_EXEC;
                   2807:        if (flags & PMAP_WIRED)
                   2808:                nflags |= PVF_WIRED;
                   2809:
                   2810:        pmap_acquire_pmap_lock(pm);
1.1       matt     2811:
                   2812:        /*
1.134     thorpej  2813:         * Fetch the L2 bucket which maps this page, allocating one if
                   2814:         * necessary for user pmaps.
1.1       matt     2815:         */
1.134     thorpej  2816:        if (pm == pmap_kernel())
                   2817:                l2b = pmap_get_l2_bucket(pm, va);
                   2818:        else
                   2819:                l2b = pmap_alloc_l2_bucket(pm, va);
                   2820:        if (l2b == NULL) {
                   2821:                if (flags & PMAP_CANFAIL) {
                   2822:                        pmap_release_pmap_lock(pm);
                   2823:                        return (ENOMEM);
                   2824:                }
                   2825:                panic("pmap_enter: failed to allocate L2 bucket");
                   2826:        }
1.262     matt     2827:        pt_entry_t *ptep = &l2b->l2b_kva[l2pte_index(va)];
                   2828:        const pt_entry_t opte = *ptep;
                   2829:        pt_entry_t npte = pa;
1.134     thorpej  2830:        oflags = 0;
1.88      thorpej  2831:
1.134     thorpej  2832:        if (opte) {
                   2833:                /*
                   2834:                 * There is already a mapping at this address.
                   2835:                 * If the physical address is different, lookup the
                   2836:                 * vm_page.
                   2837:                 */
                   2838:                if (l2pte_pa(opte) != pa)
                   2839:                        opg = PHYS_TO_VM_PAGE(l2pte_pa(opte));
                   2840:                else
                   2841:                        opg = pg;
                   2842:        } else
                   2843:                opg = NULL;
1.88      thorpej  2844:
1.134     thorpej  2845:        if (pg) {
1.215     uebayasi 2846:                struct vm_page_md *md = VM_PAGE_TO_MD(pg);
                   2847:
1.134     thorpej  2848:                /*
                   2849:                 * This is to be a managed mapping.
                   2850:                 */
1.251     matt     2851:                if ((flags & VM_PROT_ALL) || (md->pvh_attrs & PVF_REF)) {
1.134     thorpej  2852:                        /*
                   2853:                         * - The access type indicates that we don't need
                   2854:                         *   to do referenced emulation.
                   2855:                         * OR
                   2856:                         * - The physical page has already been referenced
                   2857:                         *   so no need to re-do referenced emulation here.
                   2858:                         */
1.214     jmcneill 2859:                        npte |= l2pte_set_readonly(L2_S_PROTO);
1.88      thorpej  2860:
1.134     thorpej  2861:                        nflags |= PVF_REF;
1.88      thorpej  2862:
1.134     thorpej  2863:                        if ((prot & VM_PROT_WRITE) != 0 &&
                   2864:                            ((flags & VM_PROT_WRITE) != 0 ||
1.215     uebayasi 2865:                             (md->pvh_attrs & PVF_MOD) != 0)) {
1.134     thorpej  2866:                                /*
                   2867:                                 * This is a writable mapping, and the
                   2868:                                 * page's mod state indicates it has
                   2869:                                 * already been modified. Make it
                   2870:                                 * writable from the outset.
                   2871:                                 */
1.214     jmcneill 2872:                                npte = l2pte_set_writable(npte);
1.134     thorpej  2873:                                nflags |= PVF_MOD;
                   2874:                        }
                   2875:                } else {
                   2876:                        /*
                   2877:                         * Need to do page referenced emulation.
                   2878:                         */
                   2879:                        npte |= L2_TYPE_INV;
                   2880:                }
1.88      thorpej  2881:
1.252     macallan 2882:                if (flags & ARM32_MMAP_WRITECOMBINE) {
                   2883:                        npte |= pte_l2_s_wc_mode;
                   2884:                } else
                   2885:                        npte |= pte_l2_s_cache_mode;
1.1       matt     2886:
1.134     thorpej  2887:                if (pg == opg) {
                   2888:                        /*
                   2889:                         * We're changing the attrs of an existing mapping.
                   2890:                         */
1.227     matt     2891: #ifdef MULTIPROCESSOR
1.226     matt     2892:                        KASSERT(uvm_page_locked_p(pg));
1.227     matt     2893: #endif
1.215     uebayasi 2894:                        oflags = pmap_modify_pv(md, pa, pm, va,
1.134     thorpej  2895:                            PVF_WRITE | PVF_EXEC | PVF_WIRED |
                   2896:                            PVF_MOD | PVF_REF, nflags);
1.1       matt     2897:
1.174     matt     2898: #ifdef PMAP_CACHE_VIVT
1.134     thorpej  2899:                        /*
                   2900:                         * We may need to flush the cache if we're
                   2901:                         * doing rw-ro...
                   2902:                         */
                   2903:                        if (pm->pm_cstate.cs_cache_d &&
                   2904:                            (oflags & PVF_NC) == 0 &&
1.214     jmcneill 2905:                            l2pte_writable_p(opte) &&
1.134     thorpej  2906:                            (prot & VM_PROT_WRITE) == 0)
                   2907:                                cpu_dcache_wb_range(va, PAGE_SIZE);
1.174     matt     2908: #endif
1.134     thorpej  2909:                } else {
                   2910:                        /*
                   2911:                         * New mapping, or changing the backing page
                   2912:                         * of an existing mapping.
                   2913:                         */
                   2914:                        if (opg) {
1.215     uebayasi 2915:                                struct vm_page_md *omd = VM_PAGE_TO_MD(opg);
                   2916:                                paddr_t opa = VM_PAGE_TO_PHYS(opg);
                   2917:
1.134     thorpej  2918:                                /*
                   2919:                                 * Replacing an existing mapping with a new one.
                   2920:                                 * It is part of our managed memory so we
                   2921:                                 * must remove it from the PV list
                   2922:                                 */
1.227     matt     2923: #ifdef MULTIPROCESSOR
1.226     matt     2924:                                KASSERT(uvm_page_locked_p(opg));
1.227     matt     2925: #endif
1.215     uebayasi 2926:                                pv = pmap_remove_pv(omd, opa, pm, va);
                   2927:                                pmap_vac_me_harder(omd, opa, pm, 0);
1.205     uebayasi 2928:                                oflags = pv->pv_flags;
1.1       matt     2929:
1.174     matt     2930: #ifdef PMAP_CACHE_VIVT
1.134     thorpej  2931:                                /*
                   2932:                                 * If the old mapping was valid (ref/mod
                   2933:                                 * emulation creates 'invalid' mappings
                   2934:                                 * initially) then make sure to frob
                   2935:                                 * the cache.
                   2936:                                 */
1.259     matt     2937:                                if (!(oflags & PVF_NC) && l2pte_valid(opte)) {
                   2938:                                        pmap_cache_wbinv_page(pm, va, true,
                   2939:                                            oflags);
1.134     thorpej  2940:                                }
1.174     matt     2941: #endif
1.134     thorpej  2942:                        } else
1.205     uebayasi 2943:                        if ((pv = pool_get(&pmap_pv_pool, PR_NOWAIT)) == NULL){
1.134     thorpej  2944:                                if ((flags & PMAP_CANFAIL) == 0)
                   2945:                                        panic("pmap_enter: no pv entries");
                   2946:
                   2947:                                if (pm != pmap_kernel())
                   2948:                                        pmap_free_l2_bucket(pm, l2b, 0);
                   2949:                                pmap_release_pmap_lock(pm);
                   2950:                                NPDEBUG(PDB_ENTER,
                   2951:                                    printf("pmap_enter: ENOMEM\n"));
                   2952:                                return (ENOMEM);
                   2953:                        }
1.25      rearnsha 2954:
1.227     matt     2955: #ifdef MULTIPROCESSOR
1.226     matt     2956:                        KASSERT(uvm_page_locked_p(pg));
1.227     matt     2957: #endif
1.215     uebayasi 2958:                        pmap_enter_pv(md, pa, pv, pm, va, nflags);
1.25      rearnsha 2959:                }
1.134     thorpej  2960:        } else {
                   2961:                /*
                   2962:                 * We're mapping an unmanaged page.
                   2963:                 * These are always readable, and possibly writable, from
                   2964:                 * the get go as we don't need to track ref/mod status.
                   2965:                 */
1.214     jmcneill 2966:                npte |= l2pte_set_readonly(L2_S_PROTO);
1.134     thorpej  2967:                if (prot & VM_PROT_WRITE)
1.214     jmcneill 2968:                        npte = l2pte_set_writable(npte);
1.25      rearnsha 2969:
1.134     thorpej  2970:                /*
                   2971:                 * Make sure the vector table is mapped cacheable
                   2972:                 */
1.257     matt     2973:                if ((vector_page_p && pm != pmap_kernel())
                   2974:                    || (flags & ARM32_MMAP_CACHEABLE)) {
1.134     thorpej  2975:                        npte |= pte_l2_s_cache_mode;
1.220     macallan 2976:                } else if (flags & ARM32_MMAP_WRITECOMBINE) {
                   2977:                        npte |= pte_l2_s_wc_mode;
                   2978:                }
1.134     thorpej  2979:                if (opg) {
                   2980:                        /*
                   2981:                         * Looks like there's an existing 'managed' mapping
                   2982:                         * at this address.
1.25      rearnsha 2983:                         */
1.215     uebayasi 2984:                        struct vm_page_md *omd = VM_PAGE_TO_MD(opg);
                   2985:                        paddr_t opa = VM_PAGE_TO_PHYS(opg);
                   2986:
1.227     matt     2987: #ifdef MULTIPROCESSOR
1.226     matt     2988:                        KASSERT(uvm_page_locked_p(opg));
1.227     matt     2989: #endif
1.215     uebayasi 2990:                        pv = pmap_remove_pv(omd, opa, pm, va);
                   2991:                        pmap_vac_me_harder(omd, opa, pm, 0);
1.205     uebayasi 2992:                        oflags = pv->pv_flags;
1.134     thorpej  2993:
1.174     matt     2994: #ifdef PMAP_CACHE_VIVT
1.259     matt     2995:                        if (!(oflags & PVF_NC) == 0 && l2pte_valid(opte)) {
                   2996:                                pmap_cache_wbinv_page(pm, va, true, oflags);
1.134     thorpej  2997:                        }
1.174     matt     2998: #endif
1.205     uebayasi 2999:                        pool_put(&pmap_pv_pool, pv);
1.25      rearnsha 3000:                }
                   3001:        }
                   3002:
1.134     thorpej  3003:        /*
                   3004:         * Make sure userland mappings get the right permissions
                   3005:         */
1.257     matt     3006:        if (!vector_page_p && pm != pmap_kernel()) {
1.134     thorpej  3007:                npte |= L2_S_PROT_U;
1.257     matt     3008:        }
1.25      rearnsha 3009:
1.134     thorpej  3010:        /*
                   3011:         * Keep the stats up to date
                   3012:         */
                   3013:        if (opte == 0) {
                   3014:                l2b->l2b_occupancy++;
                   3015:                pm->pm_stats.resident_count++;
                   3016:        }
1.1       matt     3017:
1.134     thorpej  3018:        NPDEBUG(PDB_ENTER,
                   3019:            printf("pmap_enter: opte 0x%08x npte 0x%08x\n", opte, npte));
1.1       matt     3020:
                   3021:        /*
1.134     thorpej  3022:         * If this is just a wiring change, the two PTEs will be
                   3023:         * identical, so there's no need to update the page table.
1.1       matt     3024:         */
1.134     thorpej  3025:        if (npte != opte) {
1.159     thorpej  3026:                bool is_cached = pmap_is_cached(pm);
1.1       matt     3027:
1.262     matt     3028:                l2pte_set(ptep, npte, opte);
1.237     matt     3029:                PTE_SYNC(ptep);
1.134     thorpej  3030:                if (is_cached) {
                   3031:                        /*
                   3032:                         * We only need to frob the cache/tlb if this pmap
                   3033:                         * is current
                   3034:                         */
1.257     matt     3035:                        if (!vector_page_p && l2pte_valid(npte)) {
1.25      rearnsha 3036:                                /*
1.134     thorpej  3037:                                 * This mapping is likely to be accessed as
                   3038:                                 * soon as we return to userland. Fix up the
                   3039:                                 * L1 entry to avoid taking another
                   3040:                                 * page/domain fault.
1.25      rearnsha 3041:                                 */
1.134     thorpej  3042:                                pd_entry_t *pl1pd, l1pd;
                   3043:
1.258     matt     3044:                                pl1pd = pmap_l1_kva(pm) + L1_IDX(va);
                   3045:                                l1pd = l2b->l2b_phys | L1_C_DOM(pmap_domain(pm)) |
1.134     thorpej  3046:                                    L1_C_PROTO;
                   3047:                                if (*pl1pd != l1pd) {
                   3048:                                        *pl1pd = l1pd;
                   3049:                                        PTE_SYNC(pl1pd);
1.12      chris    3050:                                }
1.1       matt     3051:                        }
                   3052:                }
1.134     thorpej  3053:
1.259     matt     3054:                pmap_tlb_flush_SE(pm, va, oflags);
1.134     thorpej  3055:
                   3056:                NPDEBUG(PDB_ENTER,
                   3057:                    printf("pmap_enter: is_cached %d cs 0x%08x\n",
                   3058:                    is_cached, pm->pm_cstate.cs_all));
                   3059:
                   3060:                if (pg != NULL) {
1.215     uebayasi 3061:                        struct vm_page_md *md = VM_PAGE_TO_MD(pg);
                   3062:
1.227     matt     3063: #ifdef MULTIPROCESSOR
1.226     matt     3064:                        KASSERT(uvm_page_locked_p(pg));
1.227     matt     3065: #endif
1.215     uebayasi 3066:                        pmap_vac_me_harder(md, pa, pm, va);
1.1       matt     3067:                }
                   3068:        }
1.185     matt     3069: #if defined(PMAP_CACHE_VIPT) && defined(DIAGNOSTIC)
1.188     matt     3070:        if (pg) {
1.215     uebayasi 3071:                struct vm_page_md *md = VM_PAGE_TO_MD(pg);
                   3072:
1.227     matt     3073: #ifdef MULTIPROCESSOR
1.226     matt     3074:                KASSERT(uvm_page_locked_p(pg));
1.227     matt     3075: #endif
1.215     uebayasi 3076:                KASSERT((md->pvh_attrs & PVF_DMOD) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC)));
1.251     matt     3077:                PMAP_VALIDATE_MD_PAGE(md);
1.188     matt     3078:        }
1.183     matt     3079: #endif
1.134     thorpej  3080:
                   3081:        pmap_release_pmap_lock(pm);
                   3082:
                   3083:        return (0);
1.1       matt     3084: }
                   3085:
                   3086: /*
                   3087:  * pmap_remove()
                   3088:  *
                   3089:  * pmap_remove is responsible for nuking a number of mappings for a range
                   3090:  * of virtual address space in the current pmap. To do this efficiently
                   3091:  * is interesting, because in a number of cases a wide virtual address
                   3092:  * range may be supplied that contains few actual mappings. So, the
                   3093:  * optimisations are:
1.134     thorpej  3094:  *  1. Skip over hunks of address space for which no L1 or L2 entry exists.
1.1       matt     3095:  *  2. Build up a list of pages we've hit, up to a maximum, so we can
                   3096:  *     maybe do just a partial cache clean. This path of execution is
                   3097:  *     complicated by the fact that the cache must be flushed _before_
                   3098:  *     the PTE is nuked, being a VAC :-)
1.134     thorpej  3099:  *  3. If we're called after UVM calls pmap_remove_all(), we can defer
                   3100:  *     all invalidations until pmap_update(), since pmap_remove_all() has
                   3101:  *     already flushed the cache.
                   3102:  *  4. Maybe later fast-case a single page, but I don't think this is
1.1       matt     3103:  *     going to make _that_ much difference overall.
                   3104:  */
                   3105:
1.134     thorpej  3106: #define        PMAP_REMOVE_CLEAN_LIST_SIZE     3
1.1       matt     3107:
                   3108: void
1.200     rmind    3109: pmap_remove(pmap_t pm, vaddr_t sva, vaddr_t eva)
1.1       matt     3110: {
1.134     thorpej  3111:        vaddr_t next_bucket;
                   3112:        u_int cleanlist_idx, total, cnt;
                   3113:        struct {
1.1       matt     3114:                vaddr_t va;
1.174     matt     3115:                pt_entry_t *ptep;
1.1       matt     3116:        } cleanlist[PMAP_REMOVE_CLEAN_LIST_SIZE];
1.259     matt     3117:        u_int mappings;
1.1       matt     3118:
1.156     scw      3119:        NPDEBUG(PDB_REMOVE, printf("pmap_do_remove: pmap=%p sva=%08lx "
                   3120:            "eva=%08lx\n", pm, sva, eva));
1.1       matt     3121:
1.17      chris    3122:        /*
1.134     thorpej  3123:         * we lock in the pmap => pv_head direction
1.17      chris    3124:         */
1.134     thorpej  3125:        pmap_acquire_pmap_lock(pm);
                   3126:
                   3127:        if (pm->pm_remove_all || !pmap_is_cached(pm)) {
                   3128:                cleanlist_idx = PMAP_REMOVE_CLEAN_LIST_SIZE + 1;
                   3129:                if (pm->pm_cstate.cs_tlb == 0)
1.160     thorpej  3130:                        pm->pm_remove_all = true;
1.134     thorpej  3131:        } else
                   3132:                cleanlist_idx = 0;
                   3133:
                   3134:        total = 0;
                   3135:
1.1       matt     3136:        while (sva < eva) {
1.134     thorpej  3137:                /*
                   3138:                 * Do one L2 bucket's worth at a time.
                   3139:                 */
                   3140:                next_bucket = L2_NEXT_BUCKET(sva);
                   3141:                if (next_bucket > eva)
                   3142:                        next_bucket = eva;
                   3143:
1.262     matt     3144:                struct l2_bucket * const l2b = pmap_get_l2_bucket(pm, sva);
1.134     thorpej  3145:                if (l2b == NULL) {
                   3146:                        sva = next_bucket;
                   3147:                        continue;
                   3148:                }
                   3149:
1.262     matt     3150:                pt_entry_t *ptep = &l2b->l2b_kva[l2pte_index(sva)];
1.134     thorpej  3151:
1.262     matt     3152:                for (mappings = 0;
                   3153:                     sva < next_bucket;
                   3154:                     sva += PAGE_SIZE, ptep += PAGE_SIZE / L2_S_SIZE) {
                   3155:                        pt_entry_t opte = *ptep;
1.134     thorpej  3156:
1.262     matt     3157:                        if (opte == 0) {
1.156     scw      3158:                                /* Nothing here, move along */
1.1       matt     3159:                                continue;
                   3160:                        }
                   3161:
1.259     matt     3162:                        u_int flags = PVF_REF;
1.262     matt     3163:                        paddr_t pa = l2pte_pa(opte);
                   3164:                        struct vm_page * const pg = PHYS_TO_VM_PAGE(pa);
1.1       matt     3165:
                   3166:                        /*
1.134     thorpej  3167:                         * Update flags. In a number of circumstances,
                   3168:                         * we could cluster a lot of these and do a
                   3169:                         * number of sequential pages in one go.
1.1       matt     3170:                         */
1.262     matt     3171:                        if (pg != NULL) {
1.215     uebayasi 3172:                                struct vm_page_md *md = VM_PAGE_TO_MD(pg);
1.205     uebayasi 3173:                                struct pv_entry *pv;
1.215     uebayasi 3174:
1.227     matt     3175: #ifdef MULTIPROCESSOR
1.226     matt     3176:                                KASSERT(uvm_page_locked_p(pg));
1.227     matt     3177: #endif
1.215     uebayasi 3178:                                pv = pmap_remove_pv(md, pa, pm, sva);
                   3179:                                pmap_vac_me_harder(md, pa, pm, 0);
1.205     uebayasi 3180:                                if (pv != NULL) {
1.261     matt     3181:                                        if (pm->pm_remove_all == false) {
                   3182:                                                flags = pv->pv_flags;
                   3183:                                        }
1.205     uebayasi 3184:                                        pool_put(&pmap_pv_pool, pv);
1.134     thorpej  3185:                                }
                   3186:                        }
1.156     scw      3187:                        mappings++;
                   3188:
1.262     matt     3189:                        if (!l2pte_valid(opte)) {
1.156     scw      3190:                                /*
                   3191:                                 * Ref/Mod emulation is still active for this
                   3192:                                 * mapping, therefore it is has not yet been
                   3193:                                 * accessed. No need to frob the cache/tlb.
                   3194:                                 */
1.262     matt     3195:                                l2pte_reset(ptep);
1.134     thorpej  3196:                                PTE_SYNC_CURRENT(pm, ptep);
                   3197:                                continue;
                   3198:                        }
1.1       matt     3199:
                   3200:                        if (cleanlist_idx < PMAP_REMOVE_CLEAN_LIST_SIZE) {
                   3201:                                /* Add to the clean list. */
1.174     matt     3202:                                cleanlist[cleanlist_idx].ptep = ptep;
1.134     thorpej  3203:                                cleanlist[cleanlist_idx].va =
1.259     matt     3204:                                    sva | (flags & PVF_EXEC);
1.1       matt     3205:                                cleanlist_idx++;
1.134     thorpej  3206:                        } else
                   3207:                        if (cleanlist_idx == PMAP_REMOVE_CLEAN_LIST_SIZE) {
1.1       matt     3208:                                /* Nuke everything if needed. */
1.174     matt     3209: #ifdef PMAP_CACHE_VIVT
1.259     matt     3210:                                pmap_cache_wbinv_all(pm, PVF_EXEC);
1.174     matt     3211: #endif
1.134     thorpej  3212:                                pmap_tlb_flushID(pm);
1.1       matt     3213:
                   3214:                                /*
                   3215:                                 * Roll back the previous PTE list,
                   3216:                                 * and zero out the current PTE.
                   3217:                                 */
1.113     thorpej  3218:                                for (cnt = 0;
1.134     thorpej  3219:                                     cnt < PMAP_REMOVE_CLEAN_LIST_SIZE; cnt++) {
1.262     matt     3220:                                        l2pte_reset(cleanlist[cnt].ptep);
1.181     scw      3221:                                        PTE_SYNC(cleanlist[cnt].ptep);
1.1       matt     3222:                                }
1.262     matt     3223:                                l2pte_reset(ptep);
1.134     thorpej  3224:                                PTE_SYNC(ptep);
1.1       matt     3225:                                cleanlist_idx++;
1.160     thorpej  3226:                                pm->pm_remove_all = true;
1.1       matt     3227:                        } else {
1.262     matt     3228:                                l2pte_reset(ptep);
1.134     thorpej  3229:                                PTE_SYNC(ptep);
1.160     thorpej  3230:                                if (pm->pm_remove_all == false) {
1.259     matt     3231:                                        pmap_tlb_flush_SE(pm, sva, flags);
1.134     thorpej  3232:                                }
                   3233:                        }
                   3234:                }
                   3235:
                   3236:                /*
                   3237:                 * Deal with any left overs
                   3238:                 */
                   3239:                if (cleanlist_idx <= PMAP_REMOVE_CLEAN_LIST_SIZE) {
                   3240:                        total += cleanlist_idx;
                   3241:                        for (cnt = 0; cnt < cleanlist_idx; cnt++) {
1.259     matt     3242:                                vaddr_t va = cleanlist[cnt].va;
1.134     thorpej  3243:                                if (pm->pm_cstate.cs_all != 0) {
1.259     matt     3244:                                        vaddr_t clva = va & ~PAGE_MASK;
                   3245:                                        u_int flags = va & PVF_EXEC;
1.174     matt     3246: #ifdef PMAP_CACHE_VIVT
1.259     matt     3247:                                        pmap_cache_wbinv_page(pm, clva, true,
                   3248:                                            PVF_REF | PVF_WRITE | flags);
1.174     matt     3249: #endif
1.259     matt     3250:                                        pmap_tlb_flush_SE(pm, clva,
                   3251:                                            PVF_REF | flags);
1.134     thorpej  3252:                                }
1.262     matt     3253:                                l2pte_reset(cleanlist[cnt].ptep);
1.174     matt     3254:                                PTE_SYNC_CURRENT(pm, cleanlist[cnt].ptep);
1.1       matt     3255:                        }
                   3256:
                   3257:                        /*
1.134     thorpej  3258:                         * If it looks like we're removing a whole bunch
                   3259:                         * of mappings, it's faster to just write-back
                   3260:                         * the whole cache now and defer TLB flushes until
                   3261:                         * pmap_update() is called.
1.1       matt     3262:                         */
1.134     thorpej  3263:                        if (total <= PMAP_REMOVE_CLEAN_LIST_SIZE)
                   3264:                                cleanlist_idx = 0;
                   3265:                        else {
                   3266:                                cleanlist_idx = PMAP_REMOVE_CLEAN_LIST_SIZE + 1;
1.174     matt     3267: #ifdef PMAP_CACHE_VIVT
1.259     matt     3268:                                pmap_cache_wbinv_all(pm, PVF_EXEC);
1.174     matt     3269: #endif
1.160     thorpej  3270:                                pm->pm_remove_all = true;
1.134     thorpej  3271:                        }
                   3272:                }
                   3273:
                   3274:                pmap_free_l2_bucket(pm, l2b, mappings);
1.156     scw      3275:                pm->pm_stats.resident_count -= mappings;
1.134     thorpej  3276:        }
                   3277:
                   3278:        pmap_release_pmap_lock(pm);
                   3279: }
                   3280:
1.182     matt     3281: #ifdef PMAP_CACHE_VIPT
                   3282: static struct pv_entry *
                   3283: pmap_kremove_pg(struct vm_page *pg, vaddr_t va)
                   3284: {
1.215     uebayasi 3285:        struct vm_page_md *md = VM_PAGE_TO_MD(pg);
                   3286:        paddr_t pa = VM_PAGE_TO_PHYS(pg);
1.182     matt     3287:        struct pv_entry *pv;
                   3288:
1.215     uebayasi 3289:        KASSERT(arm_cache_prefer_mask == 0 || md->pvh_attrs & (PVF_COLORED|PVF_NC));
                   3290:        KASSERT((md->pvh_attrs & PVF_KMPAGE) == 0);
1.182     matt     3291:
1.215     uebayasi 3292:        pv = pmap_remove_pv(md, pa, pmap_kernel(), va);
1.182     matt     3293:        KASSERT(pv);
                   3294:        KASSERT(pv->pv_flags & PVF_KENTRY);
                   3295:
                   3296:        /*
                   3297:         * If we are removing a writeable mapping to a cached exec page,
                   3298:         * if it's the last mapping then clear it execness other sync
                   3299:         * the page to the icache.
                   3300:         */
1.215     uebayasi 3301:        if ((md->pvh_attrs & (PVF_NC|PVF_EXEC)) == PVF_EXEC
1.182     matt     3302:            && (pv->pv_flags & PVF_WRITE) != 0) {
1.215     uebayasi 3303:                if (SLIST_EMPTY(&md->pvh_list)) {
                   3304:                        md->pvh_attrs &= ~PVF_EXEC;
1.182     matt     3305:                        PMAPCOUNT(exec_discarded_kremove);
                   3306:                } else {
1.215     uebayasi 3307:                        pmap_syncicache_page(md, pa);
1.182     matt     3308:                        PMAPCOUNT(exec_synced_kremove);
                   3309:                }
                   3310:        }
1.215     uebayasi 3311:        pmap_vac_me_harder(md, pa, pmap_kernel(), 0);
1.182     matt     3312:
                   3313:        return pv;
                   3314: }
                   3315: #endif /* PMAP_CACHE_VIPT */
                   3316:
1.134     thorpej  3317: /*
                   3318:  * pmap_kenter_pa: enter an unmanaged, wired kernel mapping
                   3319:  *
                   3320:  * We assume there is already sufficient KVM space available
                   3321:  * to do this, as we can't allocate L2 descriptor tables/metadata
                   3322:  * from here.
                   3323:  */
                   3324: void
1.201     cegger   3325: pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
1.134     thorpej  3326: {
1.186     matt     3327: #ifdef PMAP_CACHE_VIVT
1.213     cegger   3328:        struct vm_page *pg = (flags & PMAP_KMPAGE) ? PHYS_TO_VM_PAGE(pa) : NULL;
1.186     matt     3329: #endif
1.174     matt     3330: #ifdef PMAP_CACHE_VIPT
                   3331:        struct vm_page *pg = PHYS_TO_VM_PAGE(pa);
                   3332:        struct vm_page *opg;
1.182     matt     3333:        struct pv_entry *pv = NULL;
1.174     matt     3334: #endif
1.215     uebayasi 3335:        struct vm_page_md *md = VM_PAGE_TO_MD(pg);
1.174     matt     3336:
1.134     thorpej  3337:        NPDEBUG(PDB_KENTER,
                   3338:            printf("pmap_kenter_pa: va 0x%08lx, pa 0x%08lx, prot 0x%x\n",
                   3339:            va, pa, prot));
                   3340:
1.262     matt     3341:        struct l2_bucket * const l2b = pmap_get_l2_bucket(pmap_kernel(), va);
1.134     thorpej  3342:        KDASSERT(l2b != NULL);
                   3343:
1.262     matt     3344:        pt_entry_t * const ptep = &l2b->l2b_kva[l2pte_index(va)];
                   3345:        const pt_entry_t opte = *ptep;
1.134     thorpej  3346:
1.174     matt     3347:        if (opte == 0) {
                   3348:                PMAPCOUNT(kenter_mappings);
1.134     thorpej  3349:                l2b->l2b_occupancy++;
1.174     matt     3350:        } else {
                   3351:                PMAPCOUNT(kenter_remappings);
                   3352: #ifdef PMAP_CACHE_VIPT
                   3353:                opg = PHYS_TO_VM_PAGE(l2pte_pa(opte));
1.228     he       3354: #ifdef DIAGNOSTIC
1.215     uebayasi 3355:                struct vm_page_md *omd = VM_PAGE_TO_MD(opg);
1.228     he       3356: #endif
1.174     matt     3357:                if (opg) {
                   3358:                        KASSERT(opg != pg);
1.215     uebayasi 3359:                        KASSERT((omd->pvh_attrs & PVF_KMPAGE) == 0);
1.213     cegger   3360:                        KASSERT((flags & PMAP_KMPAGE) == 0);
1.182     matt     3361:                        pv = pmap_kremove_pg(opg, va);
1.174     matt     3362:                }
                   3363: #endif
                   3364:                if (l2pte_valid(opte)) {
                   3365: #ifdef PMAP_CACHE_VIVT
                   3366:                        cpu_dcache_wbinv_range(va, PAGE_SIZE);
                   3367: #endif
                   3368:                        cpu_tlb_flushD_SE(va);
                   3369:                        cpu_cpwait();
                   3370:                }
                   3371:        }
1.134     thorpej  3372:
1.262     matt     3373:        const pt_entry_t npte = L2_S_PROTO | pa | L2_S_PROT(PTE_KERNEL, prot)
1.233     matt     3374:            | ((flags & PMAP_NOCACHE) ? 0 : pte_l2_s_cache_mode);
1.262     matt     3375:        l2pte_set(ptep, npte, opte);
1.134     thorpej  3376:        PTE_SYNC(ptep);
1.174     matt     3377:
                   3378:        if (pg) {
1.227     matt     3379: #ifdef MULTIPROCESSOR
1.226     matt     3380:                KASSERT(uvm_page_locked_p(pg));
1.227     matt     3381: #endif
1.213     cegger   3382:                if (flags & PMAP_KMPAGE) {
1.215     uebayasi 3383:                        KASSERT(md->urw_mappings == 0);
                   3384:                        KASSERT(md->uro_mappings == 0);
                   3385:                        KASSERT(md->krw_mappings == 0);
                   3386:                        KASSERT(md->kro_mappings == 0);
1.186     matt     3387: #ifdef PMAP_CACHE_VIPT
                   3388:                        KASSERT(pv == NULL);
1.207     uebayasi 3389:                        KASSERT(arm_cache_prefer_mask == 0 || (va & PVF_COLORED) == 0);
1.215     uebayasi 3390:                        KASSERT((md->pvh_attrs & PVF_NC) == 0);
1.182     matt     3391:                        /* if there is a color conflict, evict from cache. */
1.215     uebayasi 3392:                        if (pmap_is_page_colored_p(md)
                   3393:                            && ((va ^ md->pvh_attrs) & arm_cache_prefer_mask)) {
1.183     matt     3394:                                PMAPCOUNT(vac_color_change);
1.215     uebayasi 3395:                                pmap_flush_page(md, pa, PMAP_FLUSH_PRIMARY);
                   3396:                        } else if (md->pvh_attrs & PVF_MULTCLR) {
1.195     matt     3397:                                /*
                   3398:                                 * If this page has multiple colors, expunge
                   3399:                                 * them.
                   3400:                                 */
                   3401:                                PMAPCOUNT(vac_flush_lots2);
1.215     uebayasi 3402:                                pmap_flush_page(md, pa, PMAP_FLUSH_SECONDARY);
1.183     matt     3403:                        }
1.215     uebayasi 3404:                        md->pvh_attrs &= PAGE_SIZE - 1;
                   3405:                        md->pvh_attrs |= PVF_KMPAGE
1.183     matt     3406:                            | PVF_COLORED | PVF_DIRTY
                   3407:                            | (va & arm_cache_prefer_mask);
1.186     matt     3408: #endif
                   3409: #ifdef PMAP_CACHE_VIVT
1.215     uebayasi 3410:                        md->pvh_attrs |= PVF_KMPAGE;
1.186     matt     3411: #endif
                   3412:                        pmap_kmpages++;
                   3413: #ifdef PMAP_CACHE_VIPT
1.179     matt     3414:                } else {
1.182     matt     3415:                        if (pv == NULL) {
                   3416:                                pv = pool_get(&pmap_pv_pool, PR_NOWAIT);
                   3417:                                KASSERT(pv != NULL);
                   3418:                        }
1.215     uebayasi 3419:                        pmap_enter_pv(md, pa, pv, pmap_kernel(), va,
1.182     matt     3420:                            PVF_WIRED | PVF_KENTRY
1.183     matt     3421:                            | (prot & VM_PROT_WRITE ? PVF_WRITE : 0));
                   3422:                        if ((prot & VM_PROT_WRITE)
1.215     uebayasi 3423:                            && !(md->pvh_attrs & PVF_NC))
                   3424:                                md->pvh_attrs |= PVF_DIRTY;
                   3425:                        KASSERT((prot & VM_PROT_WRITE) == 0 || (md->pvh_attrs & (PVF_DIRTY|PVF_NC)));
                   3426:                        pmap_vac_me_harder(md, pa, pmap_kernel(), va);
1.186     matt     3427: #endif
1.179     matt     3428:                }
1.186     matt     3429: #ifdef PMAP_CACHE_VIPT
1.182     matt     3430:        } else {
                   3431:                if (pv != NULL)
                   3432:                        pool_put(&pmap_pv_pool, pv);
1.186     matt     3433: #endif
1.174     matt     3434:        }
1.134     thorpej  3435: }
                   3436:
                   3437: void
                   3438: pmap_kremove(vaddr_t va, vsize_t len)
                   3439: {
                   3440:        vaddr_t next_bucket, eva;
                   3441:        u_int mappings;
1.174     matt     3442:
                   3443:        PMAPCOUNT(kenter_unmappings);
1.134     thorpej  3444:
                   3445:        NPDEBUG(PDB_KREMOVE, printf("pmap_kremove: va 0x%08lx, len 0x%08lx\n",
                   3446:            va, len));
                   3447:
                   3448:        eva = va + len;
                   3449:
                   3450:        while (va < eva) {
                   3451:                next_bucket = L2_NEXT_BUCKET(va);
                   3452:                if (next_bucket > eva)
                   3453:                        next_bucket = eva;
                   3454:
1.262     matt     3455:                struct l2_bucket * const l2b = pmap_get_l2_bucket(pmap_kernel(), va);
1.134     thorpej  3456:                KDASSERT(l2b != NULL);
                   3457:
1.262     matt     3458:                pt_entry_t * const sptep = &l2b->l2b_kva[l2pte_index(va)];
                   3459:                pt_entry_t *ptep = sptep;
1.134     thorpej  3460:                mappings = 0;
                   3461:
                   3462:                while (va < next_bucket) {
1.262     matt     3463:                        const pt_entry_t opte = *ptep;
                   3464:                        struct vm_page *opg = PHYS_TO_VM_PAGE(l2pte_pa(opte));
                   3465:                        if (opg != NULL) {
1.215     uebayasi 3466:                                struct vm_page_md *omd = VM_PAGE_TO_MD(opg);
                   3467:
                   3468:                                if (omd->pvh_attrs & PVF_KMPAGE) {
                   3469:                                        KASSERT(omd->urw_mappings == 0);
                   3470:                                        KASSERT(omd->uro_mappings == 0);
                   3471:                                        KASSERT(omd->krw_mappings == 0);
                   3472:                                        KASSERT(omd->kro_mappings == 0);
                   3473:                                        omd->pvh_attrs &= ~PVF_KMPAGE;
1.186     matt     3474: #ifdef PMAP_CACHE_VIPT
1.251     matt     3475:                                        if (arm_cache_prefer_mask != 0) {
                   3476:                                                omd->pvh_attrs &= ~PVF_WRITE;
                   3477:                                        }
1.186     matt     3478: #endif
                   3479:                                        pmap_kmpages--;
                   3480: #ifdef PMAP_CACHE_VIPT
1.179     matt     3481:                                } else {
1.182     matt     3482:                                        pool_put(&pmap_pv_pool,
                   3483:                                            pmap_kremove_pg(opg, va));
1.186     matt     3484: #endif
1.179     matt     3485:                                }
1.174     matt     3486:                        }
1.134     thorpej  3487:                        if (l2pte_valid(opte)) {
1.174     matt     3488: #ifdef PMAP_CACHE_VIVT
1.134     thorpej  3489:                                cpu_dcache_wbinv_range(va, PAGE_SIZE);
1.174     matt     3490: #endif
1.134     thorpej  3491:                                cpu_tlb_flushD_SE(va);
                   3492:                        }
                   3493:                        if (opte) {
1.262     matt     3494:                                l2pte_reset(ptep);
1.134     thorpej  3495:                                mappings++;
                   3496:                        }
                   3497:                        va += PAGE_SIZE;
1.262     matt     3498:                        ptep += PAGE_SIZE / L2_S_SIZE;
1.134     thorpej  3499:                }
                   3500:                KDASSERT(mappings <= l2b->l2b_occupancy);
                   3501:                l2b->l2b_occupancy -= mappings;
                   3502:                PTE_SYNC_RANGE(sptep, (u_int)(ptep - sptep));
                   3503:        }
                   3504:        cpu_cpwait();
                   3505: }
                   3506:
1.159     thorpej  3507: bool
1.134     thorpej  3508: pmap_extract(pmap_t pm, vaddr_t va, paddr_t *pap)
                   3509: {
                   3510:        struct l2_dtable *l2;
                   3511:        pd_entry_t *pl1pd, l1pd;
                   3512:        pt_entry_t *ptep, pte;
                   3513:        paddr_t pa;
                   3514:        u_int l1idx;
                   3515:
                   3516:        pmap_acquire_pmap_lock(pm);
                   3517:
                   3518:        l1idx = L1_IDX(va);
1.258     matt     3519:        pl1pd = pmap_l1_kva(pm) + l1idx;
1.134     thorpej  3520:        l1pd = *pl1pd;
                   3521:
                   3522:        if (l1pte_section_p(l1pd)) {
                   3523:                /*
                   3524:                 * These should only happen for pmap_kernel()
                   3525:                 */
                   3526:                KDASSERT(pm == pmap_kernel());
                   3527:                pmap_release_pmap_lock(pm);
1.235     matt     3528: #if (ARM_MMU_V6 + ARM_MMU_V7) > 0
                   3529:                if (l1pte_supersection_p(l1pd)) {
                   3530:                        pa = (l1pd & L1_SS_FRAME) | (va & L1_SS_OFFSET);
                   3531:                } else
                   3532: #endif
                   3533:                        pa = (l1pd & L1_S_FRAME) | (va & L1_S_OFFSET);
1.134     thorpej  3534:        } else {
                   3535:                /*
                   3536:                 * Note that we can't rely on the validity of the L1
                   3537:                 * descriptor as an indication that a mapping exists.
                   3538:                 * We have to look it up in the L2 dtable.
                   3539:                 */
                   3540:                l2 = pm->pm_l2[L2_IDX(l1idx)];
                   3541:
                   3542:                if (l2 == NULL ||
                   3543:                    (ptep = l2->l2_bucket[L2_BUCKET(l1idx)].l2b_kva) == NULL) {
                   3544:                        pmap_release_pmap_lock(pm);
1.174     matt     3545:                        return false;
1.134     thorpej  3546:                }
                   3547:
                   3548:                ptep = &ptep[l2pte_index(va)];
                   3549:                pte = *ptep;
                   3550:                pmap_release_pmap_lock(pm);
                   3551:
                   3552:                if (pte == 0)
1.174     matt     3553:                        return false;
1.134     thorpej  3554:
                   3555:                switch (pte & L2_TYPE_MASK) {
                   3556:                case L2_TYPE_L:
                   3557:                        pa = (pte & L2_L_FRAME) | (va & L2_L_OFFSET);
                   3558:                        break;
                   3559:
                   3560:                default:
                   3561:                        pa = (pte & L2_S_FRAME) | (va & L2_S_OFFSET);
                   3562:                        break;
                   3563:                }
                   3564:        }
                   3565:
                   3566:        if (pap != NULL)
                   3567:                *pap = pa;
                   3568:
1.174     matt     3569:        return true;
1.134     thorpej  3570: }
                   3571:
                   3572: void
                   3573: pmap_protect(pmap_t pm, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
                   3574: {
                   3575:        struct l2_bucket *l2b;
                   3576:        pt_entry_t *ptep, pte;
                   3577:        vaddr_t next_bucket;
                   3578:
                   3579:        NPDEBUG(PDB_PROTECT,
                   3580:            printf("pmap_protect: pm %p sva 0x%lx eva 0x%lx prot 0x%x\n",
                   3581:            pm, sva, eva, prot));
                   3582:
                   3583:        if ((prot & VM_PROT_READ) == 0) {
                   3584:                pmap_remove(pm, sva, eva);
                   3585:                return;
                   3586:        }
                   3587:
                   3588:        if (prot & VM_PROT_WRITE) {
                   3589:                /*
                   3590:                 * If this is a read->write transition, just ignore it and let
                   3591:                 * uvm_fault() take care of it later.
                   3592:                 */
                   3593:                return;
                   3594:        }
                   3595:
                   3596:        pmap_acquire_pmap_lock(pm);
                   3597:
1.262     matt     3598:        const bool flush = eva - sva >= PAGE_SIZE * 4;
                   3599:        u_int clr_mask = PVF_WRITE | ((prot & VM_PROT_EXECUTE) ? 0 : PVF_EXEC);
                   3600:        u_int flags = 0;
1.134     thorpej  3601:
                   3602:        while (sva < eva) {
                   3603:                next_bucket = L2_NEXT_BUCKET(sva);
                   3604:                if (next_bucket > eva)
                   3605:                        next_bucket = eva;
                   3606:
                   3607:                l2b = pmap_get_l2_bucket(pm, sva);
                   3608:                if (l2b == NULL) {
                   3609:                        sva = next_bucket;
                   3610:                        continue;
                   3611:                }
                   3612:
                   3613:                ptep = &l2b->l2b_kva[l2pte_index(sva)];
                   3614:
                   3615:                while (sva < next_bucket) {
1.174     matt     3616:                        pte = *ptep;
1.214     jmcneill 3617:                        if (l2pte_valid(pte) != 0 && l2pte_writable_p(pte)) {
1.134     thorpej  3618:                                struct vm_page *pg;
                   3619:                                u_int f;
                   3620:
1.174     matt     3621: #ifdef PMAP_CACHE_VIVT
                   3622:                                /*
                   3623:                                 * OK, at this point, we know we're doing
                   3624:                                 * write-protect operation.  If the pmap is
                   3625:                                 * active, write-back the page.
                   3626:                                 */
1.259     matt     3627:                                pmap_cache_wbinv_page(pm, sva, false, PVF_REF);
1.174     matt     3628: #endif
                   3629:
1.134     thorpej  3630:                                pg = PHYS_TO_VM_PAGE(l2pte_pa(pte));
1.214     jmcneill 3631:                                pte = l2pte_set_readonly(pte);
1.134     thorpej  3632:                                *ptep = pte;
                   3633:                                PTE_SYNC(ptep);
                   3634:
                   3635:                                if (pg != NULL) {
1.215     uebayasi 3636:                                        struct vm_page_md *md = VM_PAGE_TO_MD(pg);
                   3637:                                        paddr_t pa = VM_PAGE_TO_PHYS(pg);
                   3638:
1.227     matt     3639: #ifdef MULTIPROCESSOR
1.226     matt     3640:                                        KASSERT(uvm_page_locked_p(pg));
1.227     matt     3641: #endif
1.215     uebayasi 3642:                                        f = pmap_modify_pv(md, pa, pm, sva,
1.174     matt     3643:                                            clr_mask, 0);
1.215     uebayasi 3644:                                        pmap_vac_me_harder(md, pa, pm, sva);
1.226     matt     3645:                                } else {
1.134     thorpej  3646:                                        f = PVF_REF | PVF_EXEC;
1.226     matt     3647:                                }
1.134     thorpej  3648:
1.262     matt     3649:                                if (flush) {
1.134     thorpej  3650:                                        flags |= f;
1.259     matt     3651:                                } else {
                   3652:                                        pmap_tlb_flush_SE(pm, sva, f);
                   3653:                                }
1.1       matt     3654:                        }
1.134     thorpej  3655:
                   3656:                        sva += PAGE_SIZE;
                   3657:                        ptep++;
                   3658:                }
1.1       matt     3659:        }
                   3660:
1.134     thorpej  3661:        if (flush) {
1.262     matt     3662:                if (PV_BEEN_EXECD(flags)) {
1.134     thorpej  3663:                        pmap_tlb_flushID(pm);
1.262     matt     3664:                } else if (PV_BEEN_REFD(flags)) {
1.134     thorpej  3665:                        pmap_tlb_flushD(pm);
1.262     matt     3666:                }
1.134     thorpej  3667:        }
1.262     matt     3668:
                   3669:        pmap_release_pmap_lock(pm);
1.134     thorpej  3670: }
                   3671:
                   3672: void
1.174     matt     3673: pmap_icache_sync_range(pmap_t pm, vaddr_t sva, vaddr_t eva)
                   3674: {
                   3675:        struct l2_bucket *l2b;
                   3676:        pt_entry_t *ptep;
                   3677:        vaddr_t next_bucket;
                   3678:        vsize_t page_size = trunc_page(sva) + PAGE_SIZE - sva;
                   3679:
                   3680:        NPDEBUG(PDB_EXEC,
                   3681:            printf("pmap_icache_sync_range: pm %p sva 0x%lx eva 0x%lx\n",
                   3682:            pm, sva, eva));
                   3683:
                   3684:        pmap_acquire_pmap_lock(pm);
                   3685:
                   3686:        while (sva < eva) {
                   3687:                next_bucket = L2_NEXT_BUCKET(sva);
                   3688:                if (next_bucket > eva)
                   3689:                        next_bucket = eva;
                   3690:
                   3691:                l2b = pmap_get_l2_bucket(pm, sva);
                   3692:                if (l2b == NULL) {
                   3693:                        sva = next_bucket;
                   3694:                        continue;
                   3695:                }
                   3696:
                   3697:                for (ptep = &l2b->l2b_kva[l2pte_index(sva)];
                   3698:                     sva < next_bucket;
                   3699:                     sva += page_size, ptep++, page_size = PAGE_SIZE) {
                   3700:                        if (l2pte_valid(*ptep)) {
                   3701:                                cpu_icache_sync_range(sva,
                   3702:                                    min(page_size, eva - sva));
                   3703:                        }
                   3704:                }
                   3705:        }
                   3706:
                   3707:        pmap_release_pmap_lock(pm);
                   3708: }
                   3709:
                   3710: void
1.134     thorpej  3711: pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
                   3712: {
1.215     uebayasi 3713:        struct vm_page_md *md = VM_PAGE_TO_MD(pg);
                   3714:        paddr_t pa = VM_PAGE_TO_PHYS(pg);
1.134     thorpej  3715:
                   3716:        NPDEBUG(PDB_PROTECT,
1.215     uebayasi 3717:            printf("pmap_page_protect: md %p (0x%08lx), prot 0x%x\n",
                   3718:            md, pa, prot));
1.134     thorpej  3719:
1.227     matt     3720: #ifdef MULTIPROCESSOR
1.226     matt     3721:        KASSERT(uvm_page_locked_p(pg));
1.227     matt     3722: #endif
1.226     matt     3723:
1.134     thorpej  3724:        switch(prot) {
1.174     matt     3725:        case VM_PROT_READ|VM_PROT_WRITE:
                   3726: #if defined(PMAP_CHECK_VIPT) && defined(PMAP_APX)
1.215     uebayasi 3727:                pmap_clearbit(md, pa, PVF_EXEC);
1.174     matt     3728:                break;
                   3729: #endif
1.134     thorpej  3730:        case VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE:
1.174     matt     3731:                break;
1.134     thorpej  3732:
                   3733:        case VM_PROT_READ:
1.174     matt     3734: #if defined(PMAP_CHECK_VIPT) && defined(PMAP_APX)
1.215     uebayasi 3735:                pmap_clearbit(md, pa, PVF_WRITE|PVF_EXEC);
1.174     matt     3736:                break;
                   3737: #endif
1.134     thorpej  3738:        case VM_PROT_READ|VM_PROT_EXECUTE:
1.215     uebayasi 3739:                pmap_clearbit(md, pa, PVF_WRITE);
1.134     thorpej  3740:                break;
                   3741:
                   3742:        default:
1.215     uebayasi 3743:                pmap_page_remove(md, pa);
1.134     thorpej  3744:                break;
                   3745:        }
                   3746: }
                   3747:
                   3748: /*
                   3749:  * pmap_clear_modify:
                   3750:  *
                   3751:  *     Clear the "modified" attribute for a page.
                   3752:  */
1.159     thorpej  3753: bool
1.134     thorpej  3754: pmap_clear_modify(struct vm_page *pg)
                   3755: {
1.215     uebayasi 3756:        struct vm_page_md *md = VM_PAGE_TO_MD(pg);
                   3757:        paddr_t pa = VM_PAGE_TO_PHYS(pg);
1.159     thorpej  3758:        bool rv;
1.134     thorpej  3759:
1.227     matt     3760: #ifdef MULTIPROCESSOR
1.226     matt     3761:        KASSERT(uvm_page_locked_p(pg));
1.227     matt     3762: #endif
1.226     matt     3763:
1.215     uebayasi 3764:        if (md->pvh_attrs & PVF_MOD) {
1.160     thorpej  3765:                rv = true;
1.194     matt     3766: #ifdef PMAP_CACHE_VIPT
                   3767:                /*
                   3768:                 * If we are going to clear the modified bit and there are
                   3769:                 * no other modified bits set, flush the page to memory and
                   3770:                 * mark it clean.
                   3771:                 */
1.215     uebayasi 3772:                if ((md->pvh_attrs & (PVF_DMOD|PVF_NC)) == PVF_MOD)
                   3773:                        pmap_flush_page(md, pa, PMAP_CLEAN_PRIMARY);
1.194     matt     3774: #endif
1.215     uebayasi 3775:                pmap_clearbit(md, pa, PVF_MOD);
1.134     thorpej  3776:        } else
1.160     thorpej  3777:                rv = false;
1.134     thorpej  3778:
                   3779:        return (rv);
                   3780: }
                   3781:
                   3782: /*
                   3783:  * pmap_clear_reference:
                   3784:  *
                   3785:  *     Clear the "referenced" attribute for a page.
                   3786:  */
1.159     thorpej  3787: bool
1.134     thorpej  3788: pmap_clear_reference(struct vm_page *pg)
                   3789: {
1.215     uebayasi 3790:        struct vm_page_md *md = VM_PAGE_TO_MD(pg);
                   3791:        paddr_t pa = VM_PAGE_TO_PHYS(pg);
1.159     thorpej  3792:        bool rv;
1.134     thorpej  3793:
1.227     matt     3794: #ifdef MULTIPROCESSOR
1.226     matt     3795:        KASSERT(uvm_page_locked_p(pg));
1.227     matt     3796: #endif
1.226     matt     3797:
1.215     uebayasi 3798:        if (md->pvh_attrs & PVF_REF) {
1.160     thorpej  3799:                rv = true;
1.215     uebayasi 3800:                pmap_clearbit(md, pa, PVF_REF);
1.134     thorpej  3801:        } else
1.160     thorpej  3802:                rv = false;
1.134     thorpej  3803:
                   3804:        return (rv);
                   3805: }
                   3806:
                   3807: /*
                   3808:  * pmap_is_modified:
                   3809:  *
                   3810:  *     Test if a page has the "modified" attribute.
                   3811:  */
                   3812: /* See <arm/arm32/pmap.h> */
                   3813:
                   3814: /*
                   3815:  * pmap_is_referenced:
                   3816:  *
                   3817:  *     Test if a page has the "referenced" attribute.
                   3818:  */
                   3819: /* See <arm/arm32/pmap.h> */
                   3820:
                   3821: int
                   3822: pmap_fault_fixup(pmap_t pm, vaddr_t va, vm_prot_t ftype, int user)
                   3823: {
                   3824:        struct l2_dtable *l2;
                   3825:        struct l2_bucket *l2b;
                   3826:        pd_entry_t *pl1pd, l1pd;
                   3827:        pt_entry_t *ptep, pte;
                   3828:        paddr_t pa;
                   3829:        u_int l1idx;
                   3830:        int rv = 0;
                   3831:
                   3832:        pmap_acquire_pmap_lock(pm);
                   3833:
                   3834:        l1idx = L1_IDX(va);
                   3835:
                   3836:        /*
                   3837:         * If there is no l2_dtable for this address, then the process
                   3838:         * has no business accessing it.
                   3839:         *
                   3840:         * Note: This will catch userland processes trying to access
                   3841:         * kernel addresses.
                   3842:         */
                   3843:        l2 = pm->pm_l2[L2_IDX(l1idx)];
                   3844:        if (l2 == NULL)
                   3845:                goto out;
                   3846:
1.1       matt     3847:        /*
1.134     thorpej  3848:         * Likewise if there is no L2 descriptor table
1.1       matt     3849:         */
1.134     thorpej  3850:        l2b = &l2->l2_bucket[L2_BUCKET(l1idx)];
                   3851:        if (l2b->l2b_kva == NULL)
                   3852:                goto out;
                   3853:
                   3854:        /*
                   3855:         * Check the PTE itself.
                   3856:         */
                   3857:        ptep = &l2b->l2b_kva[l2pte_index(va)];
                   3858:        pte = *ptep;
                   3859:        if (pte == 0)
                   3860:                goto out;
                   3861:
                   3862:        /*
                   3863:         * Catch a userland access to the vector page mapped at 0x0
                   3864:         */
                   3865:        if (user && (pte & L2_S_PROT_U) == 0)
                   3866:                goto out;
                   3867:
                   3868:        pa = l2pte_pa(pte);
                   3869:
1.214     jmcneill 3870:        if ((ftype & VM_PROT_WRITE) && !l2pte_writable_p(pte)) {
1.134     thorpej  3871:                /*
                   3872:                 * This looks like a good candidate for "page modified"
                   3873:                 * emulation...
                   3874:                 */
                   3875:                struct pv_entry *pv;
                   3876:                struct vm_page *pg;
                   3877:
                   3878:                /* Extract the physical address of the page */
                   3879:                if ((pg = PHYS_TO_VM_PAGE(pa)) == NULL)
                   3880:                        goto out;
                   3881:
1.215     uebayasi 3882:                struct vm_page_md *md = VM_PAGE_TO_MD(pg);
                   3883:
1.134     thorpej  3884:                /* Get the current flags for this page. */
1.227     matt     3885: #ifdef MULTIPROCESSOR
1.226     matt     3886:                KASSERT(uvm_page_locked_p(pg));
1.227     matt     3887: #endif
1.134     thorpej  3888:
1.215     uebayasi 3889:                pv = pmap_find_pv(md, pm, va);
1.134     thorpej  3890:                if (pv == NULL) {
                   3891:                        goto out;
                   3892:                }
                   3893:
                   3894:                /*
                   3895:                 * Do the flags say this page is writable? If not then it
                   3896:                 * is a genuine write fault. If yes then the write fault is
                   3897:                 * our fault as we did not reflect the write access in the
                   3898:                 * PTE. Now we know a write has occurred we can correct this
                   3899:                 * and also set the modified bit
                   3900:                 */
                   3901:                if ((pv->pv_flags & PVF_WRITE) == 0) {
                   3902:                        goto out;
                   3903:                }
                   3904:
                   3905:                NPDEBUG(PDB_FOLLOW,
                   3906:                    printf("pmap_fault_fixup: mod emul. pm %p, va 0x%08lx, pa 0x%08lx\n",
1.215     uebayasi 3907:                    pm, va, pa));
1.134     thorpej  3908:
1.215     uebayasi 3909:                md->pvh_attrs |= PVF_REF | PVF_MOD;
1.134     thorpej  3910:                pv->pv_flags |= PVF_REF | PVF_MOD;
1.185     matt     3911: #ifdef PMAP_CACHE_VIPT
                   3912:                /*
                   3913:                 * If there are cacheable mappings for this page, mark it dirty.
                   3914:                 */
1.215     uebayasi 3915:                if ((md->pvh_attrs & PVF_NC) == 0)
                   3916:                        md->pvh_attrs |= PVF_DIRTY;
1.185     matt     3917: #endif
1.134     thorpej  3918:
                   3919:                /*
                   3920:                 * Re-enable write permissions for the page.  No need to call
                   3921:                 * pmap_vac_me_harder(), since this is just a
                   3922:                 * modified-emulation fault, and the PVF_WRITE bit isn't
                   3923:                 * changing. We've already set the cacheable bits based on
                   3924:                 * the assumption that we can write to this page.
                   3925:                 */
1.214     jmcneill 3926:                *ptep = l2pte_set_writable((pte & ~L2_TYPE_MASK) | L2_S_PROTO);
1.134     thorpej  3927:                PTE_SYNC(ptep);
                   3928:                rv = 1;
                   3929:        } else
                   3930:        if ((pte & L2_TYPE_MASK) == L2_TYPE_INV) {
                   3931:                /*
                   3932:                 * This looks like a good candidate for "page referenced"
                   3933:                 * emulation.
                   3934:                 */
                   3935:                struct pv_entry *pv;
                   3936:                struct vm_page *pg;
                   3937:
                   3938:                /* Extract the physical address of the page */
                   3939:                if ((pg = PHYS_TO_VM_PAGE(pa)) == NULL)
                   3940:                        goto out;
                   3941:
1.215     uebayasi 3942:                struct vm_page_md *md = VM_PAGE_TO_MD(pg);
                   3943:
1.134     thorpej  3944:                /* Get the current flags for this page. */
1.227     matt     3945: #ifdef MULTIPROCESSOR
1.226     matt     3946:                KASSERT(uvm_page_locked_p(pg));
1.227     matt     3947: #endif
1.134     thorpej  3948:
1.215     uebayasi 3949:                pv = pmap_find_pv(md, pm, va);
1.134     thorpej  3950:                if (pv == NULL) {
                   3951:                        goto out;
                   3952:                }
                   3953:
1.215     uebayasi 3954:                md->pvh_attrs |= PVF_REF;
1.134     thorpej  3955:                pv->pv_flags |= PVF_REF;
1.1       matt     3956:
1.134     thorpej  3957:                NPDEBUG(PDB_FOLLOW,
                   3958:                    printf("pmap_fault_fixup: ref emul. pm %p, va 0x%08lx, pa 0x%08lx\n",
1.215     uebayasi 3959:                    pm, va, pa));
1.134     thorpej  3960:
1.214     jmcneill 3961:                *ptep = l2pte_set_readonly((pte & ~L2_TYPE_MASK) | L2_S_PROTO);
1.134     thorpej  3962:                PTE_SYNC(ptep);
                   3963:                rv = 1;
                   3964:        }
                   3965:
                   3966:        /*
                   3967:         * We know there is a valid mapping here, so simply
                   3968:         * fix up the L1 if necessary.
                   3969:         */
1.258     matt     3970:        pl1pd = pmap_l1_kva(pm) + l1idx;
                   3971:        l1pd = l2b->l2b_phys | L1_C_DOM(pmap_domain(pm)) | L1_C_PROTO;
1.134     thorpej  3972:        if (*pl1pd != l1pd) {
                   3973:                *pl1pd = l1pd;
                   3974:                PTE_SYNC(pl1pd);
                   3975:                rv = 1;
                   3976:        }
                   3977:
                   3978: #ifdef CPU_SA110
                   3979:        /*
                   3980:         * There are bugs in the rev K SA110.  This is a check for one
                   3981:         * of them.
                   3982:         */
                   3983:        if (rv == 0 && curcpu()->ci_arm_cputype == CPU_ID_SA110 &&
                   3984:            curcpu()->ci_arm_cpurev < 3) {
                   3985:                /* Always current pmap */
                   3986:                if (l2pte_valid(pte)) {
                   3987:                        extern int kernel_debug;
                   3988:                        if (kernel_debug & 1) {
                   3989:                                struct proc *p = curlwp->l_proc;
                   3990:                                printf("prefetch_abort: page is already "
                   3991:                                    "mapped - pte=%p *pte=%08x\n", ptep, pte);
                   3992:                                printf("prefetch_abort: pc=%08lx proc=%p "
                   3993:                                    "process=%s\n", va, p, p->p_comm);
                   3994:                                printf("prefetch_abort: far=%08x fs=%x\n",
                   3995:                                    cpu_faultaddress(), cpu_faultstatus());
1.113     thorpej  3996:                        }
1.134     thorpej  3997: #ifdef DDB
                   3998:                        if (kernel_debug & 2)
                   3999:                                Debugger();
                   4000: #endif
                   4001:                        rv = 1;
1.1       matt     4002:                }
                   4003:        }
1.134     thorpej  4004: #endif /* CPU_SA110 */
1.104     thorpej  4005:
1.238     matt     4006:        /*
                   4007:         * If 'rv == 0' at this point, it generally indicates that there is a
                   4008:         * stale TLB entry for the faulting address.  That might be due to a
                   4009:         * wrong setting of pmap_needs_pte_sync.  So set it and retry.
                   4010:         */
                   4011:        if (rv == 0 && pm->pm_l1->l1_domain_use_count == 1
                   4012:            && pmap_needs_pte_sync == 0) {
1.240     matt     4013:                pmap_needs_pte_sync = 1;
1.239     matt     4014:                PTE_SYNC(ptep);
1.238     matt     4015:                rv = 1;
                   4016:        }
                   4017:
1.134     thorpej  4018: #ifdef DEBUG
                   4019:        /*
                   4020:         * If 'rv == 0' at this point, it generally indicates that there is a
                   4021:         * stale TLB entry for the faulting address. This happens when two or
                   4022:         * more processes are sharing an L1. Since we don't flush the TLB on
                   4023:         * a context switch between such processes, we can take domain faults
                   4024:         * for mappings which exist at the same VA in both processes. EVEN IF
                   4025:         * WE'VE RECENTLY FIXED UP THE CORRESPONDING L1 in pmap_enter(), for
                   4026:         * example.
                   4027:         *
                   4028:         * This is extremely likely to happen if pmap_enter() updated the L1
                   4029:         * entry for a recently entered mapping. In this case, the TLB is
                   4030:         * flushed for the new mapping, but there may still be TLB entries for
                   4031:         * other mappings belonging to other processes in the 1MB range
                   4032:         * covered by the L1 entry.
                   4033:         *
                   4034:         * Since 'rv == 0', we know that the L1 already contains the correct
                   4035:         * value, so the fault must be due to a stale TLB entry.
                   4036:         *
                   4037:         * Since we always need to flush the TLB anyway in the case where we
                   4038:         * fixed up the L1, or frobbed the L2 PTE, we effectively deal with
                   4039:         * stale TLB entries dynamically.
                   4040:         *
                   4041:         * However, the above condition can ONLY happen if the current L1 is
                   4042:         * being shared. If it happens when the L1 is unshared, it indicates
                   4043:         * that other parts of the pmap are not doing their job WRT managing
                   4044:         * the TLB.
                   4045:         */
                   4046:        if (rv == 0 && pm->pm_l1->l1_domain_use_count == 1) {
                   4047:                extern int last_fault_code;
                   4048:                printf("fixup: pm %p, va 0x%lx, ftype %d - nothing to do!\n",
                   4049:                    pm, va, ftype);
                   4050:                printf("fixup: l2 %p, l2b %p, ptep %p, pl1pd %p\n",
                   4051:                    l2, l2b, ptep, pl1pd);
                   4052:                printf("fixup: pte 0x%x, l1pd 0x%x, last code 0x%x\n",
                   4053:                    pte, l1pd, last_fault_code);
                   4054: #ifdef DDB
1.255     skrll    4055:                extern int kernel_debug;
                   4056:
1.247     matt     4057:                if (kernel_debug & 2)
                   4058:                        Debugger();
1.134     thorpej  4059: #endif
                   4060:        }
                   4061: #endif
                   4062:
                   4063:        cpu_tlb_flushID_SE(va);
                   4064:        cpu_cpwait();
                   4065:
                   4066:        rv = 1;
1.104     thorpej  4067:
1.134     thorpej  4068: out:
                   4069:        pmap_release_pmap_lock(pm);
                   4070:
                   4071:        return (rv);
                   4072: }
                   4073:
                   4074: /*
                   4075:  * Routine:    pmap_procwr
                   4076:  *
1.1       matt     4077:  * Function:
1.134     thorpej  4078:  *     Synchronize caches corresponding to [addr, addr+len) in p.
                   4079:  *
                   4080:  */
                   4081: void
                   4082: pmap_procwr(struct proc *p, vaddr_t va, int len)
                   4083: {
                   4084:        /* We only need to do anything if it is the current process. */
                   4085:        if (p == curproc)
                   4086:                cpu_icache_sync_range(va, len);
                   4087: }
                   4088:
                   4089: /*
                   4090:  * Routine:    pmap_unwire
                   4091:  * Function:   Clear the wired attribute for a map/virtual-address pair.
                   4092:  *
                   4093:  * In/out conditions:
                   4094:  *             The mapping must already exist in the pmap.
1.1       matt     4095:  */
1.134     thorpej  4096: void
                   4097: pmap_unwire(pmap_t pm, vaddr_t va)
                   4098: {
                   4099:        struct l2_bucket *l2b;
                   4100:        pt_entry_t *ptep, pte;
                   4101:        struct vm_page *pg;
                   4102:        paddr_t pa;
                   4103:
                   4104:        NPDEBUG(PDB_WIRING, printf("pmap_unwire: pm %p, va 0x%08lx\n", pm, va));
                   4105:
                   4106:        pmap_acquire_pmap_lock(pm);
                   4107:
                   4108:        l2b = pmap_get_l2_bucket(pm, va);
                   4109:        KDASSERT(l2b != NULL);
                   4110:
                   4111:        ptep = &l2b->l2b_kva[l2pte_index(va)];
                   4112:        pte = *ptep;
                   4113:
                   4114:        /* Extract the physical address of the page */
                   4115:        pa = l2pte_pa(pte);
1.1       matt     4116:
1.134     thorpej  4117:        if ((pg = PHYS_TO_VM_PAGE(pa)) != NULL) {
                   4118:                /* Update the wired bit in the pv entry for this page. */
1.215     uebayasi 4119:                struct vm_page_md *md = VM_PAGE_TO_MD(pg);
                   4120:
1.227     matt     4121: #ifdef MULTIPROCESSOR
1.226     matt     4122:                KASSERT(uvm_page_locked_p(pg));
1.227     matt     4123: #endif
1.215     uebayasi 4124:                (void) pmap_modify_pv(md, pa, pm, va, PVF_WIRED, 0);
1.134     thorpej  4125:        }
                   4126:
                   4127:        pmap_release_pmap_lock(pm);
                   4128: }
                   4129:
                   4130: void
1.173     scw      4131: pmap_activate(struct lwp *l)
1.1       matt     4132: {
1.165     scw      4133:        extern int block_userspace_access;
                   4134:        pmap_t opm, npm, rpm;
                   4135:        uint32_t odacr, ndacr;
                   4136:        int oldirqstate;
                   4137:
1.173     scw      4138:        /*
                   4139:         * If activating a non-current lwp or the current lwp is
                   4140:         * already active, just return.
                   4141:         */
                   4142:        if (l != curlwp ||
                   4143:            l->l_proc->p_vmspace->vm_map.pmap->pm_activated == true)
                   4144:                return;
                   4145:
                   4146:        npm = l->l_proc->p_vmspace->vm_map.pmap;
1.165     scw      4147:        ndacr = (DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL * 2)) |
1.258     matt     4148:            (DOMAIN_CLIENT << (pmap_domain(npm) * 2));
1.134     thorpej  4149:
1.165     scw      4150:        /*
                   4151:         * If TTB and DACR are unchanged, short-circuit all the
                   4152:         * TLB/cache management stuff.
                   4153:         */
1.173     scw      4154:        if (pmap_previous_active_lwp != NULL) {
                   4155:                opm = pmap_previous_active_lwp->l_proc->p_vmspace->vm_map.pmap;
1.165     scw      4156:                odacr = (DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL * 2)) |
1.258     matt     4157:                    (DOMAIN_CLIENT << (pmap_domain(opm) * 2));
1.134     thorpej  4158:
1.165     scw      4159:                if (opm->pm_l1 == npm->pm_l1 && odacr == ndacr)
                   4160:                        goto all_done;
                   4161:        } else
                   4162:                opm = NULL;
1.134     thorpej  4163:
1.174     matt     4164:        PMAPCOUNT(activations);
1.165     scw      4165:        block_userspace_access = 1;
1.134     thorpej  4166:
1.165     scw      4167:        /*
                   4168:         * If switching to a user vmspace which is different to the
                   4169:         * most recent one, and the most recent one is potentially
                   4170:         * live in the cache, we must write-back and invalidate the
                   4171:         * entire cache.
                   4172:         */
                   4173:        rpm = pmap_recent_user;
1.203     scw      4174:
                   4175: /*
                   4176:  * XXXSCW: There's a corner case here which can leave turds in the cache as
                   4177:  * reported in kern/41058. They're probably left over during tear-down and
                   4178:  * switching away from an exiting process. Until the root cause is identified
                   4179:  * and fixed, zap the cache when switching pmaps. This will result in a few
                   4180:  * unnecessary cache flushes, but that's better than silently corrupting data.
                   4181:  */
                   4182: #if 0
1.165     scw      4183:        if (npm != pmap_kernel() && rpm && npm != rpm &&
                   4184:            rpm->pm_cstate.cs_cache) {
                   4185:                rpm->pm_cstate.cs_cache = 0;
1.174     matt     4186: #ifdef PMAP_CACHE_VIVT
1.165     scw      4187:                cpu_idcache_wbinv_all();
1.174     matt     4188: #endif
1.165     scw      4189:        }
1.203     scw      4190: #else
                   4191:        if (rpm) {
                   4192:                rpm->pm_cstate.cs_cache = 0;
                   4193:                if (npm == pmap_kernel())
                   4194:                        pmap_recent_user = NULL;
                   4195: #ifdef PMAP_CACHE_VIVT
                   4196:                cpu_idcache_wbinv_all();
                   4197: #endif
                   4198:        }
                   4199: #endif
1.134     thorpej  4200:
1.165     scw      4201:        /* No interrupts while we frob the TTB/DACR */
1.183     matt     4202:        oldirqstate = disable_interrupts(IF32_bits);
1.1       matt     4203:
1.257     matt     4204: #ifndef ARM_HAS_VBAR
1.165     scw      4205:        /*
                   4206:         * For ARM_VECTORS_LOW, we MUST, I repeat, MUST fix up the L1
                   4207:         * entry corresponding to 'vector_page' in the incoming L1 table
                   4208:         * before switching to it otherwise subsequent interrupts/exceptions
                   4209:         * (including domain faults!) will jump into hyperspace.
                   4210:         */
                   4211:        if (npm->pm_pl1vec != NULL) {
                   4212:                cpu_tlb_flushID_SE((u_int)vector_page);
                   4213:                cpu_cpwait();
                   4214:                *npm->pm_pl1vec = npm->pm_l1vec;
                   4215:                PTE_SYNC(npm->pm_pl1vec);
                   4216:        }
1.257     matt     4217: #endif
1.1       matt     4218:
1.165     scw      4219:        cpu_domains(ndacr);
1.1       matt     4220:
1.165     scw      4221:        if (npm == pmap_kernel() || npm == rpm) {
1.134     thorpej  4222:                /*
1.165     scw      4223:                 * Switching to a kernel thread, or back to the
                   4224:                 * same user vmspace as before... Simply update
                   4225:                 * the TTB (no TLB flush required)
1.134     thorpej  4226:                 */
1.237     matt     4227:                cpu_setttb(npm->pm_l1->l1_physaddr, false);
1.165     scw      4228:                cpu_cpwait();
                   4229:        } else {
                   4230:                /*
                   4231:                 * Otherwise, update TTB and flush TLB
                   4232:                 */
                   4233:                cpu_context_switch(npm->pm_l1->l1_physaddr);
                   4234:                if (rpm != NULL)
                   4235:                        rpm->pm_cstate.cs_tlb = 0;
                   4236:        }
                   4237:
                   4238:        restore_interrupts(oldirqstate);
                   4239:
                   4240:        block_userspace_access = 0;
                   4241:
                   4242:  all_done:
                   4243:        /*
                   4244:         * The new pmap is resident. Make sure it's marked
                   4245:         * as resident in the cache/TLB.
                   4246:         */
                   4247:        npm->pm_cstate.cs_all = PMAP_CACHE_STATE_ALL;
                   4248:        if (npm != pmap_kernel())
                   4249:                pmap_recent_user = npm;
1.1       matt     4250:
1.165     scw      4251:        /* The old pmap is not longer active */
                   4252:        if (opm != NULL)
                   4253:                opm->pm_activated = false;
1.1       matt     4254:
1.165     scw      4255:        /* But the new one is */
                   4256:        npm->pm_activated = true;
                   4257: }
1.1       matt     4258:
1.165     scw      4259: void
1.134     thorpej  4260: pmap_deactivate(struct lwp *l)
                   4261: {
1.165     scw      4262:
1.178     scw      4263:        /*
                   4264:         * If the process is exiting, make sure pmap_activate() does
                   4265:         * a full MMU context-switch and cache flush, which we might
                   4266:         * otherwise skip. See PR port-arm/38950.
                   4267:         */
                   4268:        if (l->l_proc->p_sflag & PS_WEXIT)
                   4269:                pmap_previous_active_lwp = NULL;
                   4270:
1.165     scw      4271:        l->l_proc->p_vmspace->vm_map.pmap->pm_activated = false;
1.1       matt     4272: }
                   4273:
                   4274: void
1.134     thorpej  4275: pmap_update(pmap_t pm)
1.1       matt     4276: {
                   4277:
1.134     thorpej  4278:        if (pm->pm_remove_all) {
                   4279:                /*
                   4280:                 * Finish up the pmap_remove_all() optimisation by flushing
                   4281:                 * the TLB.
                   4282:                 */
                   4283:                pmap_tlb_flushID(pm);
1.160     thorpej  4284:                pm->pm_remove_all = false;
1.134     thorpej  4285:        }
1.1       matt     4286:
1.134     thorpej  4287:        if (pmap_is_current(pm)) {
1.107     thorpej  4288:                /*
1.134     thorpej  4289:                 * If we're dealing with a current userland pmap, move its L1
                   4290:                 * to the end of the LRU.
1.107     thorpej  4291:                 */
1.134     thorpej  4292:                if (pm != pmap_kernel())
                   4293:                        pmap_use_l1(pm);
                   4294:
1.1       matt     4295:                /*
1.134     thorpej  4296:                 * We can assume we're done with frobbing the cache/tlb for
                   4297:                 * now. Make sure any future pmap ops don't skip cache/tlb
                   4298:                 * flushes.
1.1       matt     4299:                 */
1.134     thorpej  4300:                pm->pm_cstate.cs_all = PMAP_CACHE_STATE_ALL;
1.1       matt     4301:        }
                   4302:
1.174     matt     4303:        PMAPCOUNT(updates);
                   4304:
1.96      thorpej  4305:        /*
1.134     thorpej  4306:         * make sure TLB/cache operations have completed.
1.96      thorpej  4307:         */
1.134     thorpej  4308:        cpu_cpwait();
                   4309: }
                   4310:
                   4311: void
                   4312: pmap_remove_all(pmap_t pm)
                   4313: {
1.96      thorpej  4314:
1.1       matt     4315:        /*
1.134     thorpej  4316:         * The vmspace described by this pmap is about to be torn down.
                   4317:         * Until pmap_update() is called, UVM will only make calls
                   4318:         * to pmap_remove(). We can make life much simpler by flushing
                   4319:         * the cache now, and deferring TLB invalidation to pmap_update().
1.1       matt     4320:         */
1.174     matt     4321: #ifdef PMAP_CACHE_VIVT
1.259     matt     4322:        pmap_cache_wbinv_all(pm, PVF_EXEC);
1.174     matt     4323: #endif
1.160     thorpej  4324:        pm->pm_remove_all = true;
1.1       matt     4325: }
                   4326:
                   4327: /*
1.134     thorpej  4328:  * Retire the given physical map from service.
                   4329:  * Should only be called if the map contains no valid mappings.
1.1       matt     4330:  */
1.134     thorpej  4331: void
                   4332: pmap_destroy(pmap_t pm)
1.1       matt     4333: {
1.134     thorpej  4334:        u_int count;
1.1       matt     4335:
1.134     thorpej  4336:        if (pm == NULL)
                   4337:                return;
1.1       matt     4338:
1.134     thorpej  4339:        if (pm->pm_remove_all) {
                   4340:                pmap_tlb_flushID(pm);
1.160     thorpej  4341:                pm->pm_remove_all = false;
1.1       matt     4342:        }
1.79      thorpej  4343:
1.49      thorpej  4344:        /*
1.134     thorpej  4345:         * Drop reference count
1.49      thorpej  4346:         */
1.222     rmind    4347:        mutex_enter(pm->pm_lock);
1.134     thorpej  4348:        count = --pm->pm_obj.uo_refs;
1.222     rmind    4349:        mutex_exit(pm->pm_lock);
1.134     thorpej  4350:        if (count > 0) {
                   4351:                if (pmap_is_current(pm)) {
                   4352:                        if (pm != pmap_kernel())
                   4353:                                pmap_use_l1(pm);
                   4354:                        pm->pm_cstate.cs_all = PMAP_CACHE_STATE_ALL;
                   4355:                }
                   4356:                return;
                   4357:        }
1.66      thorpej  4358:
1.1       matt     4359:        /*
1.134     thorpej  4360:         * reference count is zero, free pmap resources and then free pmap.
1.1       matt     4361:         */
1.134     thorpej  4362:
1.257     matt     4363: #ifndef ARM_HAS_VBAR
1.134     thorpej  4364:        if (vector_page < KERNEL_BASE) {
1.165     scw      4365:                KDASSERT(!pmap_is_current(pm));
1.147     scw      4366:
1.134     thorpej  4367:                /* Remove the vector page mapping */
                   4368:                pmap_remove(pm, vector_page, vector_page + PAGE_SIZE);
                   4369:                pmap_update(pm);
1.1       matt     4370:        }
1.257     matt     4371: #endif
1.1       matt     4372:
1.134     thorpej  4373:        LIST_REMOVE(pm, pm_list);
                   4374:
                   4375:        pmap_free_l1(pm);
                   4376:
1.165     scw      4377:        if (pmap_recent_user == pm)
                   4378:                pmap_recent_user = NULL;
                   4379:
1.222     rmind    4380:        uvm_obj_destroy(&pm->pm_obj, false);
                   4381:        mutex_destroy(&pm->pm_obj_lock);
1.168     ad       4382:        pool_cache_put(&pmap_cache, pm);
1.134     thorpej  4383: }
                   4384:
                   4385:
                   4386: /*
                   4387:  * void pmap_reference(pmap_t pm)
                   4388:  *
                   4389:  * Add a reference to the specified pmap.
                   4390:  */
                   4391: void
                   4392: pmap_reference(pmap_t pm)
                   4393: {
1.1       matt     4394:
1.134     thorpej  4395:        if (pm == NULL)
                   4396:                return;
1.1       matt     4397:
1.134     thorpej  4398:        pmap_use_l1(pm);
1.104     thorpej  4399:
1.222     rmind    4400:        mutex_enter(pm->pm_lock);
1.134     thorpej  4401:        pm->pm_obj.uo_refs++;
1.222     rmind    4402:        mutex_exit(pm->pm_lock);
1.134     thorpej  4403: }
1.49      thorpej  4404:
1.214     jmcneill 4405: #if (ARM_MMU_V6 + ARM_MMU_V7) > 0
1.174     matt     4406:
                   4407: static struct evcnt pmap_prefer_nochange_ev =
                   4408:     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "pmap prefer", "nochange");
                   4409: static struct evcnt pmap_prefer_change_ev =
                   4410:     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "pmap prefer", "change");
                   4411:
                   4412: EVCNT_ATTACH_STATIC(pmap_prefer_change_ev);
                   4413: EVCNT_ATTACH_STATIC(pmap_prefer_nochange_ev);
                   4414:
                   4415: void
                   4416: pmap_prefer(vaddr_t hint, vaddr_t *vap, int td)
                   4417: {
                   4418:        vsize_t mask = arm_cache_prefer_mask | (PAGE_SIZE - 1);
                   4419:        vaddr_t va = *vap;
                   4420:        vaddr_t diff = (hint - va) & mask;
                   4421:        if (diff == 0) {
                   4422:                pmap_prefer_nochange_ev.ev_count++;
                   4423:        } else {
                   4424:                pmap_prefer_change_ev.ev_count++;
                   4425:                if (__predict_false(td))
                   4426:                        va -= mask + 1;
                   4427:                *vap = va + diff;
                   4428:        }
                   4429: }
1.214     jmcneill 4430: #endif /* ARM_MMU_V6 | ARM_MMU_V7 */
1.174     matt     4431:
1.134     thorpej  4432: /*
                   4433:  * pmap_zero_page()
                   4434:  *
                   4435:  * Zero a given physical page by mapping it at a page hook point.
                   4436:  * In doing the zero page op, the page we zero is mapped cachable, as with
                   4437:  * StrongARM accesses to non-cached pages are non-burst making writing
                   4438:  * _any_ bulk data very slow.
                   4439:  */
1.214     jmcneill 4440: #if (ARM_MMU_GENERIC + ARM_MMU_SA1 + ARM_MMU_V6 + ARM_MMU_V7) != 0
1.134     thorpej  4441: void
                   4442: pmap_zero_page_generic(paddr_t phys)
                   4443: {
1.174     matt     4444: #if defined(PMAP_CACHE_VIPT) || defined(DEBUG)
1.134     thorpej  4445:        struct vm_page *pg = PHYS_TO_VM_PAGE(phys);
1.215     uebayasi 4446:        struct vm_page_md *md = VM_PAGE_TO_MD(pg);
1.174     matt     4447: #endif
1.244     matt     4448: #if defined(PMAP_CACHE_VIPT)
1.174     matt     4449:        /* Choose the last page color it had, if any */
1.215     uebayasi 4450:        const vsize_t va_offset = md->pvh_attrs & arm_cache_prefer_mask;
1.174     matt     4451: #else
                   4452:        const vsize_t va_offset = 0;
                   4453: #endif
1.244     matt     4454: #if defined(__HAVE_MM_MD_DIRECT_MAPPED_PHYS)
                   4455:        /*
                   4456:         * Is this page mapped at its natural color?
                   4457:         * If we have all of memory mapped, then just convert PA to VA.
                   4458:         */
                   4459:        const bool okcolor = va_offset == (phys & arm_cache_prefer_mask);
                   4460:        const vaddr_t vdstp = KERNEL_BASE + (phys - physical_start);
                   4461: #else
                   4462:        const bool okcolor = false;
                   4463:        const vaddr_t vdstp = cdstp + va_offset;
                   4464: #endif
1.174     matt     4465:        pt_entry_t * const ptep = &cdst_pte[va_offset >> PGSHIFT];
1.1       matt     4466:
1.244     matt     4467:
1.174     matt     4468: #ifdef DEBUG
1.215     uebayasi 4469:        if (!SLIST_EMPTY(&md->pvh_list))
1.134     thorpej  4470:                panic("pmap_zero_page: page has mappings");
                   4471: #endif
1.1       matt     4472:
1.134     thorpej  4473:        KDASSERT((phys & PGOFSET) == 0);
1.120     chris    4474:
1.244     matt     4475:        if (!okcolor) {
                   4476:                /*
                   4477:                 * Hook in the page, zero it, and purge the cache for that
                   4478:                 * zeroed page. Invalidate the TLB as needed.
                   4479:                 */
                   4480:                *ptep = L2_S_PROTO | phys |
                   4481:                    L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | pte_l2_s_cache_mode;
                   4482:                PTE_SYNC(ptep);
                   4483:                cpu_tlb_flushD_SE(cdstp + va_offset);
                   4484:                cpu_cpwait();
                   4485: #if defined(__HAVE_MM_MD_DIRECT_MAPPED_PHYS) && defined(PMAP_CACHE_VIPT)
                   4486:                /*
                   4487:                 * If we are direct-mapped and our color isn't ok, then before
                   4488:                 * we bzero the page invalidate its contents from the cache and
                   4489:                 * reset the color to its natural color.
                   4490:                 */
                   4491:                cpu_dcache_inv_range(cdstp + va_offset, PAGE_SIZE);
                   4492:                md->pvh_attrs &= ~arm_cache_prefer_mask;
                   4493:                md->pvh_attrs |= (phys & arm_cache_prefer_mask);
                   4494: #endif
                   4495:        }
                   4496:        bzero_page(vdstp);
                   4497:        if (!okcolor) {
                   4498:                /*
                   4499:                 * Unmap the page.
                   4500:                 */
                   4501:                *ptep = 0;
                   4502:                PTE_SYNC(ptep);
                   4503:                cpu_tlb_flushD_SE(cdstp + va_offset);
1.174     matt     4504: #ifdef PMAP_CACHE_VIVT
1.244     matt     4505:                cpu_dcache_wbinv_range(cdstp + va_offset, PAGE_SIZE);
1.174     matt     4506: #endif
1.244     matt     4507:        }
1.174     matt     4508: #ifdef PMAP_CACHE_VIPT
                   4509:        /*
                   4510:         * This page is now cache resident so it now has a page color.
                   4511:         * Any contents have been obliterated so clear the EXEC flag.
                   4512:         */
1.215     uebayasi 4513:        if (!pmap_is_page_colored_p(md)) {
1.174     matt     4514:                PMAPCOUNT(vac_color_new);
1.215     uebayasi 4515:                md->pvh_attrs |= PVF_COLORED;
1.174     matt     4516:        }
1.215     uebayasi 4517:        if (PV_IS_EXEC_P(md->pvh_attrs)) {
                   4518:                md->pvh_attrs &= ~PVF_EXEC;
1.174     matt     4519:                PMAPCOUNT(exec_discarded_zero);
                   4520:        }
1.215     uebayasi 4521:        md->pvh_attrs |= PVF_DIRTY;
1.174     matt     4522: #endif
1.134     thorpej  4523: }
1.174     matt     4524: #endif /* (ARM_MMU_GENERIC + ARM_MMU_SA1 + ARM_MMU_V6) != 0 */
1.1       matt     4525:
1.134     thorpej  4526: #if ARM_MMU_XSCALE == 1
                   4527: void
                   4528: pmap_zero_page_xscale(paddr_t phys)
                   4529: {
                   4530: #ifdef DEBUG
                   4531:        struct vm_page *pg = PHYS_TO_VM_PAGE(phys);
1.215     uebayasi 4532:        struct vm_page_md *md = VM_PAGE_TO_MD(pg);
1.1       matt     4533:
1.215     uebayasi 4534:        if (!SLIST_EMPTY(&md->pvh_list))
1.134     thorpej  4535:                panic("pmap_zero_page: page has mappings");
                   4536: #endif
1.1       matt     4537:
1.134     thorpej  4538:        KDASSERT((phys & PGOFSET) == 0);
1.1       matt     4539:
1.134     thorpej  4540:        /*
                   4541:         * Hook in the page, zero it, and purge the cache for that
                   4542:         * zeroed page. Invalidate the TLB as needed.
                   4543:         */
                   4544:        *cdst_pte = L2_S_PROTO | phys |
                   4545:            L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) |
1.174     matt     4546:            L2_C | L2_XS_T_TEX(TEX_XSCALE_X);   /* mini-data */
1.134     thorpej  4547:        PTE_SYNC(cdst_pte);
                   4548:        cpu_tlb_flushD_SE(cdstp);
                   4549:        cpu_cpwait();
                   4550:        bzero_page(cdstp);
                   4551:        xscale_cache_clean_minidata();
                   4552: }
                   4553: #endif /* ARM_MMU_XSCALE == 1 */
1.1       matt     4554:
1.134     thorpej  4555: /* pmap_pageidlezero()
                   4556:  *
                   4557:  * The same as above, except that we assume that the page is not
                   4558:  * mapped.  This means we never have to flush the cache first.  Called
                   4559:  * from the idle loop.
                   4560:  */
1.159     thorpej  4561: bool
1.134     thorpej  4562: pmap_pageidlezero(paddr_t phys)
                   4563: {
                   4564:        unsigned int i;
                   4565:        int *ptr;
1.160     thorpej  4566:        bool rv = true;
1.174     matt     4567: #if defined(PMAP_CACHE_VIPT) || defined(DEBUG)
                   4568:        struct vm_page * const pg = PHYS_TO_VM_PAGE(phys);
1.215     uebayasi 4569:        struct vm_page_md *md = VM_PAGE_TO_MD(pg);
1.174     matt     4570: #endif
                   4571: #ifdef PMAP_CACHE_VIPT
                   4572:        /* Choose the last page color it had, if any */
1.215     uebayasi 4573:        const vsize_t va_offset = md->pvh_attrs & arm_cache_prefer_mask;
1.174     matt     4574: #else
                   4575:        const vsize_t va_offset = 0;
                   4576: #endif
                   4577:        pt_entry_t * const ptep = &csrc_pte[va_offset >> PGSHIFT];
                   4578:
                   4579:
1.134     thorpej  4580: #ifdef DEBUG
1.215     uebayasi 4581:        if (!SLIST_EMPTY(&md->pvh_list))
1.134     thorpej  4582:                panic("pmap_pageidlezero: page has mappings");
1.1       matt     4583: #endif
                   4584:
1.134     thorpej  4585:        KDASSERT((phys & PGOFSET) == 0);
                   4586:
1.109     thorpej  4587:        /*
1.134     thorpej  4588:         * Hook in the page, zero it, and purge the cache for that
                   4589:         * zeroed page. Invalidate the TLB as needed.
1.109     thorpej  4590:         */
1.174     matt     4591:        *ptep = L2_S_PROTO | phys |
1.134     thorpej  4592:            L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | pte_l2_s_cache_mode;
1.174     matt     4593:        PTE_SYNC(ptep);
                   4594:        cpu_tlb_flushD_SE(cdstp + va_offset);
1.134     thorpej  4595:        cpu_cpwait();
1.1       matt     4596:
1.174     matt     4597:        for (i = 0, ptr = (int *)(cdstp + va_offset);
1.134     thorpej  4598:                        i < (PAGE_SIZE / sizeof(int)); i++) {
1.174     matt     4599:                if (sched_curcpu_runnable_p() != 0) {
1.134     thorpej  4600:                        /*
                   4601:                         * A process has become ready.  Abort now,
                   4602:                         * so we don't keep it waiting while we
                   4603:                         * do slow memory access to finish this
                   4604:                         * page.
                   4605:                         */
1.160     thorpej  4606:                        rv = false;
1.134     thorpej  4607:                        break;
                   4608:                }
                   4609:                *ptr++ = 0;
1.11      chris    4610:        }
1.1       matt     4611:
1.174     matt     4612: #ifdef PMAP_CACHE_VIVT
1.134     thorpej  4613:        if (rv)
                   4614:                /*
                   4615:                 * if we aborted we'll rezero this page again later so don't
                   4616:                 * purge it unless we finished it
                   4617:                 */
                   4618:                cpu_dcache_wbinv_range(cdstp, PAGE_SIZE);
1.174     matt     4619: #elif defined(PMAP_CACHE_VIPT)
                   4620:        /*
                   4621:         * This page is now cache resident so it now has a page color.
                   4622:         * Any contents have been obliterated so clear the EXEC flag.
                   4623:         */
1.215     uebayasi 4624:        if (!pmap_is_page_colored_p(md)) {
1.174     matt     4625:                PMAPCOUNT(vac_color_new);
1.215     uebayasi 4626:                md->pvh_attrs |= PVF_COLORED;
1.174     matt     4627:        }
1.215     uebayasi 4628:        if (PV_IS_EXEC_P(md->pvh_attrs)) {
                   4629:                md->pvh_attrs &= ~PVF_EXEC;
1.174     matt     4630:                PMAPCOUNT(exec_discarded_zero);
                   4631:        }
                   4632: #endif
                   4633:        /*
                   4634:         * Unmap the page.
                   4635:         */
                   4636:        *ptep = 0;
                   4637:        PTE_SYNC(ptep);
                   4638:        cpu_tlb_flushD_SE(cdstp + va_offset);
1.1       matt     4639:
1.134     thorpej  4640:        return (rv);
1.1       matt     4641: }
1.134     thorpej  4642:
1.48      chris    4643: /*
1.134     thorpej  4644:  * pmap_copy_page()
1.48      chris    4645:  *
1.134     thorpej  4646:  * Copy one physical page into another, by mapping the pages into
                   4647:  * hook points. The same comment regarding cachability as in
                   4648:  * pmap_zero_page also applies here.
1.48      chris    4649:  */
1.214     jmcneill 4650: #if (ARM_MMU_GENERIC + ARM_MMU_SA1 + ARM_MMU_V6 + ARM_MMU_V7) != 0
1.1       matt     4651: void
1.134     thorpej  4652: pmap_copy_page_generic(paddr_t src, paddr_t dst)
1.1       matt     4653: {
1.174     matt     4654:        struct vm_page * const src_pg = PHYS_TO_VM_PAGE(src);
1.215     uebayasi 4655:        struct vm_page_md *src_md = VM_PAGE_TO_MD(src_pg);
1.174     matt     4656: #if defined(PMAP_CACHE_VIPT) || defined(DEBUG)
                   4657:        struct vm_page * const dst_pg = PHYS_TO_VM_PAGE(dst);
1.215     uebayasi 4658:        struct vm_page_md *dst_md = VM_PAGE_TO_MD(dst_pg);
1.174     matt     4659: #endif
                   4660: #ifdef PMAP_CACHE_VIPT
1.215     uebayasi 4661:        const vsize_t src_va_offset = src_md->pvh_attrs & arm_cache_prefer_mask;
                   4662:        const vsize_t dst_va_offset = dst_md->pvh_attrs & arm_cache_prefer_mask;
1.174     matt     4663: #else
                   4664:        const vsize_t src_va_offset = 0;
                   4665:        const vsize_t dst_va_offset = 0;
                   4666: #endif
1.244     matt     4667: #if defined(__HAVE_MM_MD_DIRECT_MAPPED_PHYS)
                   4668:        /*
                   4669:         * Is this page mapped at its natural color?
                   4670:         * If we have all of memory mapped, then just convert PA to VA.
                   4671:         */
                   4672:        const bool src_okcolor = src_va_offset == (src & arm_cache_prefer_mask);
                   4673:        const bool dst_okcolor = dst_va_offset == (dst & arm_cache_prefer_mask);
                   4674:        const vaddr_t vsrcp = src_okcolor
                   4675:            ? KERNEL_BASE + (src - physical_start)
                   4676:            : csrcp + src_va_offset;
                   4677:        const vaddr_t vdstp = KERNEL_BASE + (dst - physical_start);
                   4678: #else
                   4679:        const bool src_okcolor = false;
                   4680:        const bool dst_okcolor = false;
1.245     matt     4681:        const vaddr_t vsrcp = csrcp + src_va_offset;
1.246     matt     4682:        const vaddr_t vdstp = cdstp + dst_va_offset;
1.244     matt     4683: #endif
1.174     matt     4684:        pt_entry_t * const src_ptep = &csrc_pte[src_va_offset >> PGSHIFT];
                   4685:        pt_entry_t * const dst_ptep = &cdst_pte[dst_va_offset >> PGSHIFT];
                   4686:
1.134     thorpej  4687: #ifdef DEBUG
1.215     uebayasi 4688:        if (!SLIST_EMPTY(&dst_md->pvh_list))
1.134     thorpej  4689:                panic("pmap_copy_page: dst page has mappings");
                   4690: #endif
1.83      thorpej  4691:
1.174     matt     4692: #ifdef PMAP_CACHE_VIPT
1.215     uebayasi 4693:        KASSERT(arm_cache_prefer_mask == 0 || src_md->pvh_attrs & (PVF_COLORED|PVF_NC));
1.174     matt     4694: #endif
1.134     thorpej  4695:        KDASSERT((src & PGOFSET) == 0);
                   4696:        KDASSERT((dst & PGOFSET) == 0);
1.105     thorpej  4697:
1.134     thorpej  4698:        /*
                   4699:         * Clean the source page.  Hold the source page's lock for
                   4700:         * the duration of the copy so that no other mappings can
                   4701:         * be created while we have a potentially aliased mapping.
                   4702:         */
1.227     matt     4703: #ifdef MULTIPROCESSOR
1.226     matt     4704:        KASSERT(uvm_page_locked_p(src_pg));
1.227     matt     4705: #endif
1.174     matt     4706: #ifdef PMAP_CACHE_VIVT
1.215     uebayasi 4707:        (void) pmap_clean_page(SLIST_FIRST(&src_md->pvh_list), true);
1.174     matt     4708: #endif
1.105     thorpej  4709:
1.134     thorpej  4710:        /*
                   4711:         * Map the pages into the page hook points, copy them, and purge
                   4712:         * the cache for the appropriate page. Invalidate the TLB
                   4713:         * as required.
                   4714:         */
1.244     matt     4715:        if (!src_okcolor) {
                   4716:                *src_ptep = L2_S_PROTO
                   4717:                    | src
1.174     matt     4718: #ifdef PMAP_CACHE_VIPT
1.244     matt     4719:                    | ((src_md->pvh_attrs & PVF_NC) ? 0 : pte_l2_s_cache_mode)
1.174     matt     4720: #endif
                   4721: #ifdef PMAP_CACHE_VIVT
1.244     matt     4722:                    | pte_l2_s_cache_mode
1.174     matt     4723: #endif
1.244     matt     4724:                    | L2_S_PROT(PTE_KERNEL, VM_PROT_READ);
                   4725:                PTE_SYNC(src_ptep);
                   4726:                cpu_tlb_flushD_SE(csrcp + src_va_offset);
                   4727:                cpu_cpwait();
                   4728:        }
                   4729:        if (!dst_okcolor) {
                   4730:                *dst_ptep = L2_S_PROTO | dst |
                   4731:                    L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | pte_l2_s_cache_mode;
                   4732:                PTE_SYNC(dst_ptep);
                   4733:                cpu_tlb_flushD_SE(cdstp + dst_va_offset);
                   4734:                cpu_cpwait();
                   4735: #if defined(__HAVE_MM_MD_DIRECT_MAPPED_PHYS) && defined(PMAP_CACHE_VIPT)
                   4736:                /*
                   4737:                 * If we are direct-mapped and our color isn't ok, then before
                   4738:                 * we bcopy to the new page invalidate its contents from the
                   4739:                 * cache and reset its color to its natural color.
                   4740:                 */
                   4741:                cpu_dcache_inv_range(cdstp + dst_va_offset, PAGE_SIZE);
                   4742:                dst_md->pvh_attrs &= ~arm_cache_prefer_mask;
                   4743:                dst_md->pvh_attrs |= (dst & arm_cache_prefer_mask);
1.174     matt     4744: #endif
1.244     matt     4745:        }
                   4746:        bcopy_page(vsrcp, vdstp);
1.174     matt     4747: #ifdef PMAP_CACHE_VIVT
1.244     matt     4748:        cpu_dcache_inv_range(vsrcp, PAGE_SIZE);
                   4749:        cpu_dcache_wbinv_range(vdstp, PAGE_SIZE);
1.174     matt     4750: #endif
                   4751:        /*
                   4752:         * Unmap the pages.
                   4753:         */
1.244     matt     4754:        if (!src_okcolor) {
                   4755:                *src_ptep = 0;
                   4756:                PTE_SYNC(src_ptep);
                   4757:                cpu_tlb_flushD_SE(csrcp + src_va_offset);
                   4758:                cpu_cpwait();
                   4759:        }
                   4760:        if (!dst_okcolor) {
                   4761:                *dst_ptep = 0;
                   4762:                PTE_SYNC(dst_ptep);
                   4763:                cpu_tlb_flushD_SE(cdstp + dst_va_offset);
                   4764:                cpu_cpwait();
                   4765:        }
1.174     matt     4766: #ifdef PMAP_CACHE_VIPT
                   4767:        /*
                   4768:         * Now that the destination page is in the cache, mark it as colored.
                   4769:         * If this was an exec page, discard it.
                   4770:         */
1.215     uebayasi 4771:        if (!pmap_is_page_colored_p(dst_md)) {
1.174     matt     4772:                PMAPCOUNT(vac_color_new);
1.215     uebayasi 4773:                dst_md->pvh_attrs |= PVF_COLORED;
1.174     matt     4774:        }
1.215     uebayasi 4775:        if (PV_IS_EXEC_P(dst_md->pvh_attrs)) {
                   4776:                dst_md->pvh_attrs &= ~PVF_EXEC;
1.174     matt     4777:                PMAPCOUNT(exec_discarded_copy);
                   4778:        }
1.215     uebayasi 4779:        dst_md->pvh_attrs |= PVF_DIRTY;
1.174     matt     4780: #endif
1.1       matt     4781: }
1.174     matt     4782: #endif /* (ARM_MMU_GENERIC + ARM_MMU_SA1 + ARM_MMU_V6) != 0 */
1.1       matt     4783:
1.134     thorpej  4784: #if ARM_MMU_XSCALE == 1
1.1       matt     4785: void
1.134     thorpej  4786: pmap_copy_page_xscale(paddr_t src, paddr_t dst)
1.1       matt     4787: {
1.226     matt     4788:        struct vm_page *src_pg = PHYS_TO_VM_PAGE(src);
                   4789:        struct vm_page_md *src_md = VM_PAGE_TO_MD(src_pg);
1.134     thorpej  4790: #ifdef DEBUG
1.216     uebayasi 4791:        struct vm_page_md *dst_md = VM_PAGE_TO_MD(PHYS_TO_VM_PAGE(dst));
1.14      chs      4792:
1.215     uebayasi 4793:        if (!SLIST_EMPTY(&dst_md->pvh_list))
1.134     thorpej  4794:                panic("pmap_copy_page: dst page has mappings");
                   4795: #endif
1.13      chris    4796:
1.134     thorpej  4797:        KDASSERT((src & PGOFSET) == 0);
                   4798:        KDASSERT((dst & PGOFSET) == 0);
1.14      chs      4799:
1.134     thorpej  4800:        /*
                   4801:         * Clean the source page.  Hold the source page's lock for
                   4802:         * the duration of the copy so that no other mappings can
                   4803:         * be created while we have a potentially aliased mapping.
                   4804:         */
1.227     matt     4805: #ifdef MULTIPROCESSOR
1.226     matt     4806:        KASSERT(uvm_page_locked_p(src_pg));
1.227     matt     4807: #endif
1.174     matt     4808: #ifdef PMAP_CACHE_VIVT
1.215     uebayasi 4809:        (void) pmap_clean_page(SLIST_FIRST(&src_md->pvh_list), true);
1.174     matt     4810: #endif
1.105     thorpej  4811:
1.134     thorpej  4812:        /*
                   4813:         * Map the pages into the page hook points, copy them, and purge
                   4814:         * the cache for the appropriate page. Invalidate the TLB
                   4815:         * as required.
                   4816:         */
                   4817:        *csrc_pte = L2_S_PROTO | src |
                   4818:            L2_S_PROT(PTE_KERNEL, VM_PROT_READ) |
1.174     matt     4819:            L2_C | L2_XS_T_TEX(TEX_XSCALE_X);   /* mini-data */
1.134     thorpej  4820:        PTE_SYNC(csrc_pte);
                   4821:        *cdst_pte = L2_S_PROTO | dst |
                   4822:            L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) |
1.174     matt     4823:            L2_C | L2_XS_T_TEX(TEX_XSCALE_X);   /* mini-data */
1.134     thorpej  4824:        PTE_SYNC(cdst_pte);
                   4825:        cpu_tlb_flushD_SE(csrcp);
                   4826:        cpu_tlb_flushD_SE(cdstp);
                   4827:        cpu_cpwait();
                   4828:        bcopy_page(csrcp, cdstp);
                   4829:        xscale_cache_clean_minidata();
1.1       matt     4830: }
1.134     thorpej  4831: #endif /* ARM_MMU_XSCALE == 1 */
1.1       matt     4832:
                   4833: /*
1.134     thorpej  4834:  * void pmap_virtual_space(vaddr_t *start, vaddr_t *end)
1.1       matt     4835:  *
1.134     thorpej  4836:  * Return the start and end addresses of the kernel's virtual space.
                   4837:  * These values are setup in pmap_bootstrap and are updated as pages
                   4838:  * are allocated.
1.1       matt     4839:  */
                   4840: void
1.134     thorpej  4841: pmap_virtual_space(vaddr_t *start, vaddr_t *end)
1.1       matt     4842: {
1.134     thorpej  4843:        *start = virtual_avail;
                   4844:        *end = virtual_end;
1.1       matt     4845: }
                   4846:
                   4847: /*
1.134     thorpej  4848:  * Helper function for pmap_grow_l2_bucket()
1.1       matt     4849:  */
1.157     perry    4850: static inline int
1.134     thorpej  4851: pmap_grow_map(vaddr_t va, pt_entry_t cache_mode, paddr_t *pap)
1.1       matt     4852: {
1.134     thorpej  4853:        struct l2_bucket *l2b;
                   4854:        pt_entry_t *ptep;
1.2       matt     4855:        paddr_t pa;
1.1       matt     4856:
1.160     thorpej  4857:        if (uvm.page_init_done == false) {
1.174     matt     4858: #ifdef PMAP_STEAL_MEMORY
                   4859:                pv_addr_t pv;
                   4860:                pmap_boot_pagealloc(PAGE_SIZE,
                   4861: #ifdef PMAP_CACHE_VIPT
                   4862:                    arm_cache_prefer_mask,
                   4863:                    va & arm_cache_prefer_mask,
                   4864: #else
                   4865:                    0, 0,
                   4866: #endif
                   4867:                    &pv);
                   4868:                pa = pv.pv_pa;
                   4869: #else
1.160     thorpej  4870:                if (uvm_page_physget(&pa) == false)
1.134     thorpej  4871:                        return (1);
1.174     matt     4872: #endif /* PMAP_STEAL_MEMORY */
1.134     thorpej  4873:        } else {
                   4874:                struct vm_page *pg;
                   4875:                pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_USERESERVE);
                   4876:                if (pg == NULL)
                   4877:                        return (1);
                   4878:                pa = VM_PAGE_TO_PHYS(pg);
1.174     matt     4879: #ifdef PMAP_CACHE_VIPT
1.215     uebayasi 4880: #ifdef DIAGNOSTIC
                   4881:                struct vm_page_md *md = VM_PAGE_TO_MD(pg);
                   4882: #endif
1.174     matt     4883:                /*
1.182     matt     4884:                 * This new page must not have any mappings.  Enter it via
                   4885:                 * pmap_kenter_pa and let that routine do the hard work.
1.174     matt     4886:                 */
1.215     uebayasi 4887:                KASSERT(SLIST_EMPTY(&md->pvh_list));
1.201     cegger   4888:                pmap_kenter_pa(va, pa,
1.213     cegger   4889:                    VM_PROT_READ|VM_PROT_WRITE, PMAP_KMPAGE);
1.174     matt     4890: #endif
1.134     thorpej  4891:        }
1.1       matt     4892:
1.134     thorpej  4893:        if (pap)
                   4894:                *pap = pa;
1.1       matt     4895:
1.174     matt     4896:        PMAPCOUNT(pt_mappings);
1.134     thorpej  4897:        l2b = pmap_get_l2_bucket(pmap_kernel(), va);
                   4898:        KDASSERT(l2b != NULL);
1.1       matt     4899:
1.134     thorpej  4900:        ptep = &l2b->l2b_kva[l2pte_index(va)];
                   4901:        *ptep = L2_S_PROTO | pa | cache_mode |
                   4902:            L2_S_PROT(PTE_KERNEL, VM_PROT_READ | VM_PROT_WRITE);
                   4903:        PTE_SYNC(ptep);
                   4904:        memset((void *)va, 0, PAGE_SIZE);
                   4905:        return (0);
1.1       matt     4906: }
                   4907:
                   4908: /*
1.134     thorpej  4909:  * This is the same as pmap_alloc_l2_bucket(), except that it is only
                   4910:  * used by pmap_growkernel().
1.1       matt     4911:  */
1.157     perry    4912: static inline struct l2_bucket *
1.134     thorpej  4913: pmap_grow_l2_bucket(pmap_t pm, vaddr_t va)
1.1       matt     4914: {
1.134     thorpej  4915:        struct l2_dtable *l2;
                   4916:        struct l2_bucket *l2b;
                   4917:        u_short l1idx;
                   4918:        vaddr_t nva;
                   4919:
                   4920:        l1idx = L1_IDX(va);
                   4921:
                   4922:        if ((l2 = pm->pm_l2[L2_IDX(l1idx)]) == NULL) {
                   4923:                /*
                   4924:                 * No mapping at this address, as there is
                   4925:                 * no entry in the L1 table.
                   4926:                 * Need to allocate a new l2_dtable.
                   4927:                 */
                   4928:                nva = pmap_kernel_l2dtable_kva;
                   4929:                if ((nva & PGOFSET) == 0) {
                   4930:                        /*
                   4931:                         * Need to allocate a backing page
                   4932:                         */
                   4933:                        if (pmap_grow_map(nva, pte_l2_s_cache_mode, NULL))
                   4934:                                return (NULL);
                   4935:                }
1.1       matt     4936:
1.134     thorpej  4937:                l2 = (struct l2_dtable *)nva;
                   4938:                nva += sizeof(struct l2_dtable);
1.82      thorpej  4939:
1.134     thorpej  4940:                if ((nva & PGOFSET) < (pmap_kernel_l2dtable_kva & PGOFSET)) {
                   4941:                        /*
                   4942:                         * The new l2_dtable straddles a page boundary.
                   4943:                         * Map in another page to cover it.
                   4944:                         */
                   4945:                        if (pmap_grow_map(nva, pte_l2_s_cache_mode, NULL))
                   4946:                                return (NULL);
                   4947:                }
1.1       matt     4948:
1.134     thorpej  4949:                pmap_kernel_l2dtable_kva = nva;
1.1       matt     4950:
1.134     thorpej  4951:                /*
                   4952:                 * Link it into the parent pmap
                   4953:                 */
                   4954:                pm->pm_l2[L2_IDX(l1idx)] = l2;
1.82      thorpej  4955:        }
1.75      reinoud  4956:
1.134     thorpej  4957:        l2b = &l2->l2_bucket[L2_BUCKET(l1idx)];
                   4958:
                   4959:        /*
                   4960:         * Fetch pointer to the L2 page table associated with the address.
                   4961:         */
                   4962:        if (l2b->l2b_kva == NULL) {
                   4963:                pt_entry_t *ptep;
                   4964:
                   4965:                /*
                   4966:                 * No L2 page table has been allocated. Chances are, this
                   4967:                 * is because we just allocated the l2_dtable, above.
                   4968:                 */
                   4969:                nva = pmap_kernel_l2ptp_kva;
                   4970:                ptep = (pt_entry_t *)nva;
                   4971:                if ((nva & PGOFSET) == 0) {
                   4972:                        /*
                   4973:                         * Need to allocate a backing page
                   4974:                         */
                   4975:                        if (pmap_grow_map(nva, pte_l2_s_cache_mode_pt,
                   4976:                            &pmap_kernel_l2ptp_phys))
                   4977:                                return (NULL);
                   4978:                        PTE_SYNC_RANGE(ptep, PAGE_SIZE / sizeof(pt_entry_t));
                   4979:                }
                   4980:
                   4981:                l2->l2_occupancy++;
                   4982:                l2b->l2b_kva = ptep;
                   4983:                l2b->l2b_l1idx = l1idx;
                   4984:                l2b->l2b_phys = pmap_kernel_l2ptp_phys;
                   4985:
                   4986:                pmap_kernel_l2ptp_kva += L2_TABLE_SIZE_REAL;
                   4987:                pmap_kernel_l2ptp_phys += L2_TABLE_SIZE_REAL;
1.82      thorpej  4988:        }
1.1       matt     4989:
1.134     thorpej  4990:        return (l2b);
                   4991: }
                   4992:
                   4993: vaddr_t
                   4994: pmap_growkernel(vaddr_t maxkvaddr)
                   4995: {
                   4996:        pmap_t kpm = pmap_kernel();
                   4997:        struct l1_ttable *l1;
                   4998:        struct l2_bucket *l2b;
                   4999:        pd_entry_t *pl1pd;
                   5000:        int s;
                   5001:
                   5002:        if (maxkvaddr <= pmap_curmaxkvaddr)
                   5003:                goto out;               /* we are OK */
1.1       matt     5004:
1.134     thorpej  5005:        NPDEBUG(PDB_GROWKERN,
                   5006:            printf("pmap_growkernel: growing kernel from 0x%lx to 0x%lx\n",
                   5007:            pmap_curmaxkvaddr, maxkvaddr));
1.1       matt     5008:
1.134     thorpej  5009:        KDASSERT(maxkvaddr <= virtual_end);
1.34      thorpej  5010:
1.134     thorpej  5011:        /*
                   5012:         * whoops!   we need to add kernel PTPs
                   5013:         */
1.1       matt     5014:
1.134     thorpej  5015:        s = splhigh();  /* to be safe */
1.222     rmind    5016:        mutex_enter(kpm->pm_lock);
1.1       matt     5017:
1.134     thorpej  5018:        /* Map 1MB at a time */
                   5019:        for (; pmap_curmaxkvaddr < maxkvaddr; pmap_curmaxkvaddr += L1_S_SIZE) {
1.1       matt     5020:
1.134     thorpej  5021:                l2b = pmap_grow_l2_bucket(kpm, pmap_curmaxkvaddr);
                   5022:                KDASSERT(l2b != NULL);
1.1       matt     5023:
1.134     thorpej  5024:                /* Distribute new L1 entry to all other L1s */
                   5025:                SLIST_FOREACH(l1, &l1_list, l1_link) {
                   5026:                        pl1pd = &l1->l1_kva[L1_IDX(pmap_curmaxkvaddr)];
                   5027:                        *pl1pd = l2b->l2b_phys | L1_C_DOM(PMAP_DOMAIN_KERNEL) |
                   5028:                            L1_C_PROTO;
                   5029:                        PTE_SYNC(pl1pd);
                   5030:                }
1.1       matt     5031:        }
                   5032:
1.134     thorpej  5033:        /*
                   5034:         * flush out the cache, expensive but growkernel will happen so
                   5035:         * rarely
                   5036:         */
                   5037:        cpu_dcache_wbinv_all();
                   5038:        cpu_tlb_flushD();
                   5039:        cpu_cpwait();
                   5040:
1.222     rmind    5041:        mutex_exit(kpm->pm_lock);
1.134     thorpej  5042:        splx(s);
1.1       matt     5043:
1.134     thorpej  5044: out:
                   5045:        return (pmap_curmaxkvaddr);
1.1       matt     5046: }
                   5047:
1.134     thorpej  5048: /************************ Utility routines ****************************/
1.1       matt     5049:
1.257     matt     5050: #ifndef ARM_HAS_VBAR
1.134     thorpej  5051: /*
                   5052:  * vector_page_setprot:
                   5053:  *
                   5054:  *     Manipulate the protection of the vector page.
                   5055:  */
                   5056: void
                   5057: vector_page_setprot(int prot)
1.11      chris    5058: {
1.134     thorpej  5059:        struct l2_bucket *l2b;
                   5060:        pt_entry_t *ptep;
                   5061:
1.256     matt     5062: #if defined(CPU_ARMV7) || defined(CPU_ARM11)
                   5063:        /*
                   5064:         * If we are using VBAR to use the vectors in the kernel, then it's
                   5065:         * already mapped in the kernel text so no need to anything here.
                   5066:         */
                   5067:        if (vector_page != ARM_VECTORS_LOW && vector_page != ARM_VECTORS_HIGH) {
                   5068:                KASSERT((armreg_pfr1_read() & ARM_PFR1_SEC_MASK) != 0);
                   5069:                return;
                   5070:        }
                   5071: #endif
                   5072:
1.134     thorpej  5073:        l2b = pmap_get_l2_bucket(pmap_kernel(), vector_page);
                   5074:        KDASSERT(l2b != NULL);
1.17      chris    5075:
1.134     thorpej  5076:        ptep = &l2b->l2b_kva[l2pte_index(vector_page)];
1.72      thorpej  5077:
1.232     matt     5078:        *ptep = (*ptep & ~L2_S_PROT_MASK) | L2_S_PROT(PTE_KERNEL, prot);
1.134     thorpej  5079:        PTE_SYNC(ptep);
                   5080:        cpu_tlb_flushD_SE(vector_page);
1.32      thorpej  5081:        cpu_cpwait();
1.17      chris    5082: }
1.257     matt     5083: #endif
1.17      chris    5084:
                   5085: /*
1.134     thorpej  5086:  * Fetch pointers to the PDE/PTE for the given pmap/VA pair.
1.160     thorpej  5087:  * Returns true if the mapping exists, else false.
1.134     thorpej  5088:  *
                   5089:  * NOTE: This function is only used by a couple of arm-specific modules.
                   5090:  * It is not safe to take any pmap locks here, since we could be right
                   5091:  * in the middle of debugging the pmap anyway...
                   5092:  *
1.160     thorpej  5093:  * It is possible for this routine to return false even though a valid
1.134     thorpej  5094:  * mapping does exist. This is because we don't lock, so the metadata
                   5095:  * state may be inconsistent.
                   5096:  *
                   5097:  * NOTE: We can return a NULL *ptp in the case where the L1 pde is
                   5098:  * a "section" mapping.
1.1       matt     5099:  */
1.159     thorpej  5100: bool
1.134     thorpej  5101: pmap_get_pde_pte(pmap_t pm, vaddr_t va, pd_entry_t **pdp, pt_entry_t **ptp)
1.1       matt     5102: {
1.134     thorpej  5103:        struct l2_dtable *l2;
                   5104:        pd_entry_t *pl1pd, l1pd;
                   5105:        pt_entry_t *ptep;
                   5106:        u_short l1idx;
                   5107:
                   5108:        if (pm->pm_l1 == NULL)
1.174     matt     5109:                return false;
1.134     thorpej  5110:
                   5111:        l1idx = L1_IDX(va);
1.258     matt     5112:        *pdp = pl1pd = pmap_l1_kva(pm) + l1idx;
1.134     thorpej  5113:        l1pd = *pl1pd;
1.1       matt     5114:
1.134     thorpej  5115:        if (l1pte_section_p(l1pd)) {
                   5116:                *ptp = NULL;
1.174     matt     5117:                return true;
1.1       matt     5118:        }
                   5119:
1.134     thorpej  5120:        if (pm->pm_l2 == NULL)
1.174     matt     5121:                return false;
1.21      chris    5122:
1.134     thorpej  5123:        l2 = pm->pm_l2[L2_IDX(l1idx)];
1.104     thorpej  5124:
1.134     thorpej  5125:        if (l2 == NULL ||
                   5126:            (ptep = l2->l2_bucket[L2_BUCKET(l1idx)].l2b_kva) == NULL) {
1.174     matt     5127:                return false;
1.29      rearnsha 5128:        }
1.21      chris    5129:
1.134     thorpej  5130:        *ptp = &ptep[l2pte_index(va)];
1.174     matt     5131:        return true;
1.1       matt     5132: }
                   5133:
1.159     thorpej  5134: bool
1.134     thorpej  5135: pmap_get_pde(pmap_t pm, vaddr_t va, pd_entry_t **pdp)
1.1       matt     5136: {
                   5137:
1.134     thorpej  5138:        if (pm->pm_l1 == NULL)
1.174     matt     5139:                return false;
1.50      thorpej  5140:
1.258     matt     5141:        *pdp = pmap_l1_kva(pm) + L1_IDX(va);
1.50      thorpej  5142:
1.174     matt     5143:        return true;
1.1       matt     5144: }
                   5145:
1.134     thorpej  5146: /************************ Bootstrapping routines ****************************/
                   5147:
                   5148: static void
                   5149: pmap_init_l1(struct l1_ttable *l1, pd_entry_t *l1pt)
1.1       matt     5150: {
1.134     thorpej  5151:        int i;
                   5152:
                   5153:        l1->l1_kva = l1pt;
                   5154:        l1->l1_domain_use_count = 0;
                   5155:        l1->l1_domain_first = 0;
                   5156:
                   5157:        for (i = 0; i < PMAP_DOMAINS; i++)
                   5158:                l1->l1_domain_free[i] = i + 1;
1.1       matt     5159:
1.134     thorpej  5160:        /*
                   5161:         * Copy the kernel's L1 entries to each new L1.
                   5162:         */
                   5163:        if (pmap_initialized)
1.258     matt     5164:                memcpy(l1pt, pmap_l1_kva(pmap_kernel()), L1_TABLE_SIZE);
1.50      thorpej  5165:
1.134     thorpej  5166:        if (pmap_extract(pmap_kernel(), (vaddr_t)l1pt,
1.160     thorpej  5167:            &l1->l1_physaddr) == false)
1.134     thorpej  5168:                panic("pmap_init_l1: can't get PA of L1 at %p", l1pt);
1.50      thorpej  5169:
1.134     thorpej  5170:        SLIST_INSERT_HEAD(&l1_list, l1, l1_link);
                   5171:        TAILQ_INSERT_TAIL(&l1_lru_list, l1, l1_lru);
1.1       matt     5172: }
                   5173:
1.50      thorpej  5174: /*
1.134     thorpej  5175:  * pmap_bootstrap() is called from the board-specific initarm() routine
                   5176:  * once the kernel L1/L2 descriptors tables have been set up.
                   5177:  *
                   5178:  * This is a somewhat convoluted process since pmap bootstrap is, effectively,
                   5179:  * spread over a number of disparate files/functions.
1.50      thorpej  5180:  *
1.134     thorpej  5181:  * We are passed the following parameters
                   5182:  *  - kernel_l1pt
                   5183:  *    This is a pointer to the base of the kernel's L1 translation table.
                   5184:  *  - vstart
                   5185:  *    1MB-aligned start of managed kernel virtual memory.
                   5186:  *  - vend
                   5187:  *    1MB-aligned end of managed kernel virtual memory.
1.50      thorpej  5188:  *
1.134     thorpej  5189:  * We use the first parameter to build the metadata (struct l1_ttable and
                   5190:  * struct l2_dtable) necessary to track kernel mappings.
1.50      thorpej  5191:  */
1.134     thorpej  5192: #define        PMAP_STATIC_L2_SIZE 16
                   5193: void
1.174     matt     5194: pmap_bootstrap(vaddr_t vstart, vaddr_t vend)
1.1       matt     5195: {
1.134     thorpej  5196:        static struct l1_ttable static_l1;
                   5197:        static struct l2_dtable static_l2[PMAP_STATIC_L2_SIZE];
                   5198:        struct l1_ttable *l1 = &static_l1;
                   5199:        struct l2_dtable *l2;
                   5200:        struct l2_bucket *l2b;
1.174     matt     5201:        pd_entry_t *l1pt = (pd_entry_t *) kernel_l1pt.pv_va;
1.134     thorpej  5202:        pmap_t pm = pmap_kernel();
                   5203:        pd_entry_t pde;
                   5204:        pt_entry_t *ptep;
1.2       matt     5205:        paddr_t pa;
1.134     thorpej  5206:        vaddr_t va;
                   5207:        vsize_t size;
1.174     matt     5208:        int nptes, l1idx, l2idx, l2next = 0;
1.134     thorpej  5209:
                   5210:        /*
                   5211:         * Initialise the kernel pmap object
                   5212:         */
                   5213:        pm->pm_l1 = l1;
                   5214:        pm->pm_domain = PMAP_DOMAIN_KERNEL;
1.165     scw      5215:        pm->pm_activated = true;
1.134     thorpej  5216:        pm->pm_cstate.cs_all = PMAP_CACHE_STATE_ALL;
1.222     rmind    5217:
                   5218:        mutex_init(&pm->pm_obj_lock, MUTEX_DEFAULT, IPL_NONE);
                   5219:        uvm_obj_init(&pm->pm_obj, NULL, false, 1);
                   5220:        uvm_obj_setlock(&pm->pm_obj, &pm->pm_obj_lock);
1.134     thorpej  5221:
                   5222:        /*
                   5223:         * Scan the L1 translation table created by initarm() and create
                   5224:         * the required metadata for all valid mappings found in it.
                   5225:         */
                   5226:        for (l1idx = 0; l1idx < (L1_TABLE_SIZE / sizeof(pd_entry_t)); l1idx++) {
1.174     matt     5227:                pde = l1pt[l1idx];
1.134     thorpej  5228:
                   5229:                /*
                   5230:                 * We're only interested in Coarse mappings.
                   5231:                 * pmap_extract() can deal with section mappings without
                   5232:                 * recourse to checking L2 metadata.
                   5233:                 */
                   5234:                if ((pde & L1_TYPE_MASK) != L1_TYPE_C)
                   5235:                        continue;
                   5236:
                   5237:                /*
                   5238:                 * Lookup the KVA of this L2 descriptor table
                   5239:                 */
                   5240:                pa = (paddr_t)(pde & L1_C_ADDR_MASK);
                   5241:                ptep = (pt_entry_t *)kernel_pt_lookup(pa);
                   5242:                if (ptep == NULL) {
                   5243:                        panic("pmap_bootstrap: No L2 for va 0x%x, pa 0x%lx",
                   5244:                            (u_int)l1idx << L1_S_SHIFT, pa);
                   5245:                }
                   5246:
                   5247:                /*
                   5248:                 * Fetch the associated L2 metadata structure.
                   5249:                 * Allocate a new one if necessary.
                   5250:                 */
                   5251:                if ((l2 = pm->pm_l2[L2_IDX(l1idx)]) == NULL) {
                   5252:                        if (l2next == PMAP_STATIC_L2_SIZE)
                   5253:                                panic("pmap_bootstrap: out of static L2s");
                   5254:                        pm->pm_l2[L2_IDX(l1idx)] = l2 = &static_l2[l2next++];
                   5255:                }
                   5256:
                   5257:                /*
                   5258:                 * One more L1 slot tracked...
                   5259:                 */
                   5260:                l2->l2_occupancy++;
                   5261:
                   5262:                /*
                   5263:                 * Fill in the details of the L2 descriptor in the
                   5264:                 * appropriate bucket.
                   5265:                 */
                   5266:                l2b = &l2->l2_bucket[L2_BUCKET(l1idx)];
                   5267:                l2b->l2b_kva = ptep;
                   5268:                l2b->l2b_phys = pa;
                   5269:                l2b->l2b_l1idx = l1idx;
1.1       matt     5270:
1.134     thorpej  5271:                /*
                   5272:                 * Establish an initial occupancy count for this descriptor
                   5273:                 */
                   5274:                for (l2idx = 0;
                   5275:                    l2idx < (L2_TABLE_SIZE_REAL / sizeof(pt_entry_t));
                   5276:                    l2idx++) {
                   5277:                        if ((ptep[l2idx] & L2_TYPE_MASK) != L2_TYPE_INV) {
                   5278:                                l2b->l2b_occupancy++;
                   5279:                        }
                   5280:                }
1.1       matt     5281:
1.134     thorpej  5282:                /*
                   5283:                 * Make sure the descriptor itself has the correct cache mode.
1.146     jdolecek 5284:                 * If not, fix it, but whine about the problem. Port-meisters
1.134     thorpej  5285:                 * should consider this a clue to fix up their initarm()
                   5286:                 * function. :)
                   5287:                 */
1.174     matt     5288:                if (pmap_set_pt_cache_mode(l1pt, (vaddr_t)ptep)) {
1.134     thorpej  5289:                        printf("pmap_bootstrap: WARNING! wrong cache mode for "
                   5290:                            "L2 pte @ %p\n", ptep);
                   5291:                }
                   5292:        }
1.61      thorpej  5293:
1.134     thorpej  5294:        /*
                   5295:         * Ensure the primary (kernel) L1 has the correct cache mode for
                   5296:         * a page table. Bitch if it is not correctly set.
                   5297:         */
1.174     matt     5298:        for (va = (vaddr_t)l1pt;
                   5299:            va < ((vaddr_t)l1pt + L1_TABLE_SIZE); va += PAGE_SIZE) {
                   5300:                if (pmap_set_pt_cache_mode(l1pt, va))
1.134     thorpej  5301:                        printf("pmap_bootstrap: WARNING! wrong cache mode for "
                   5302:                            "primary L1 @ 0x%lx\n", va);
1.1       matt     5303:        }
                   5304:
1.134     thorpej  5305:        cpu_dcache_wbinv_all();
                   5306:        cpu_tlb_flushID();
                   5307:        cpu_cpwait();
1.1       matt     5308:
1.113     thorpej  5309:        /*
1.134     thorpej  5310:         * now we allocate the "special" VAs which are used for tmp mappings
                   5311:         * by the pmap (and other modules).  we allocate the VAs by advancing
                   5312:         * virtual_avail (note that there are no pages mapped at these VAs).
                   5313:         *
                   5314:         * Managed KVM space start from wherever initarm() tells us.
1.113     thorpej  5315:         */
1.134     thorpej  5316:        virtual_avail = vstart;
                   5317:        virtual_end = vend;
1.113     thorpej  5318:
1.174     matt     5319: #ifdef PMAP_CACHE_VIPT
                   5320:        /*
                   5321:         * If we have a VIPT cache, we need one page/pte per possible alias
                   5322:         * page so we won't violate cache aliasing rules.
                   5323:         */
                   5324:        virtual_avail = (virtual_avail + arm_cache_prefer_mask) & ~arm_cache_prefer_mask;
                   5325:        nptes = (arm_cache_prefer_mask >> PGSHIFT) + 1;
                   5326: #else
                   5327:        nptes = 1;
                   5328: #endif
                   5329:        pmap_alloc_specials(&virtual_avail, nptes, &csrcp, &csrc_pte);
                   5330:        pmap_set_pt_cache_mode(l1pt, (vaddr_t)csrc_pte);
                   5331:        pmap_alloc_specials(&virtual_avail, nptes, &cdstp, &cdst_pte);
                   5332:        pmap_set_pt_cache_mode(l1pt, (vaddr_t)cdst_pte);
1.183     matt     5333:        pmap_alloc_specials(&virtual_avail, nptes, &memhook, NULL);
1.134     thorpej  5334:        pmap_alloc_specials(&virtual_avail, round_page(MSGBUFSIZE) / PAGE_SIZE,
1.139     matt     5335:            (void *)&msgbufaddr, NULL);
1.134     thorpej  5336:
                   5337:        /*
                   5338:         * Allocate a range of kernel virtual address space to be used
                   5339:         * for L2 descriptor tables and metadata allocation in
                   5340:         * pmap_growkernel().
                   5341:         */
                   5342:        size = ((virtual_end - pmap_curmaxkvaddr) + L1_S_OFFSET) / L1_S_SIZE;
                   5343:        pmap_alloc_specials(&virtual_avail,
                   5344:            round_page(size * L2_TABLE_SIZE_REAL) / PAGE_SIZE,
                   5345:            &pmap_kernel_l2ptp_kva, NULL);
1.1       matt     5346:
1.134     thorpej  5347:        size = (size + (L2_BUCKET_SIZE - 1)) / L2_BUCKET_SIZE;
                   5348:        pmap_alloc_specials(&virtual_avail,
                   5349:            round_page(size * sizeof(struct l2_dtable)) / PAGE_SIZE,
                   5350:            &pmap_kernel_l2dtable_kva, NULL);
1.1       matt     5351:
1.134     thorpej  5352:        /*
                   5353:         * init the static-global locks and global pmap list.
                   5354:         */
1.226     matt     5355:        mutex_init(&l1_lru_lock, MUTEX_DEFAULT, IPL_VM);
1.1       matt     5356:
1.134     thorpej  5357:        /*
                   5358:         * We can now initialise the first L1's metadata.
                   5359:         */
                   5360:        SLIST_INIT(&l1_list);
                   5361:        TAILQ_INIT(&l1_lru_list);
1.174     matt     5362:        pmap_init_l1(l1, l1pt);
1.1       matt     5363:
1.257     matt     5364: #ifndef ARM_HAS_VBAR
1.165     scw      5365:        /* Set up vector page L1 details, if necessary */
                   5366:        if (vector_page < KERNEL_BASE) {
1.258     matt     5367:                pm->pm_pl1vec = pmap_l1_kva(pm) + L1_IDX(vector_page);
1.165     scw      5368:                l2b = pmap_get_l2_bucket(pm, vector_page);
1.210     uebayasi 5369:                KDASSERT(l2b != NULL);
1.165     scw      5370:                pm->pm_l1vec = l2b->l2b_phys | L1_C_PROTO |
1.258     matt     5371:                    L1_C_DOM(pmap_domain(pm));
1.165     scw      5372:        } else
                   5373:                pm->pm_pl1vec = NULL;
1.257     matt     5374: #endif
1.165     scw      5375:
1.1       matt     5376:        /*
1.168     ad       5377:         * Initialize the pmap cache
1.1       matt     5378:         */
1.168     ad       5379:        pool_cache_bootstrap(&pmap_cache, sizeof(struct pmap), 0, 0, 0,
                   5380:            "pmappl", NULL, IPL_NONE, pmap_pmap_ctor, NULL, NULL);
1.134     thorpej  5381:        LIST_INIT(&pmap_pmaps);
                   5382:        LIST_INSERT_HEAD(&pmap_pmaps, pm, pm_list);
1.1       matt     5383:
1.134     thorpej  5384:        /*
                   5385:         * Initialize the pv pool.
                   5386:         */
                   5387:        pool_init(&pmap_pv_pool, sizeof(struct pv_entry), 0, 0, 0, "pvepl",
1.162     ad       5388:            &pmap_bootstrap_pv_allocator, IPL_NONE);
1.29      rearnsha 5389:
1.134     thorpej  5390:        /*
                   5391:         * Initialize the L2 dtable pool and cache.
                   5392:         */
1.168     ad       5393:        pool_cache_bootstrap(&pmap_l2dtable_cache, sizeof(struct l2_dtable), 0,
                   5394:            0, 0, "l2dtblpl", NULL, IPL_NONE, pmap_l2dtable_ctor, NULL, NULL);
1.1       matt     5395:
1.134     thorpej  5396:        /*
                   5397:         * Initialise the L2 descriptor table pool and cache
                   5398:         */
1.168     ad       5399:        pool_cache_bootstrap(&pmap_l2ptp_cache, L2_TABLE_SIZE_REAL, 0,
                   5400:            L2_TABLE_SIZE_REAL, 0, "l2ptppl", NULL, IPL_NONE,
1.134     thorpej  5401:            pmap_l2ptp_ctor, NULL, NULL);
1.61      thorpej  5402:
1.134     thorpej  5403:        cpu_dcache_wbinv_all();
1.1       matt     5404: }
                   5405:
1.134     thorpej  5406: static int
                   5407: pmap_set_pt_cache_mode(pd_entry_t *kl1, vaddr_t va)
1.1       matt     5408: {
1.134     thorpej  5409:        pd_entry_t *pdep, pde;
                   5410:        pt_entry_t *ptep, pte;
                   5411:        vaddr_t pa;
                   5412:        int rv = 0;
                   5413:
                   5414:        /*
                   5415:         * Make sure the descriptor itself has the correct cache mode
                   5416:         */
                   5417:        pdep = &kl1[L1_IDX(va)];
                   5418:        pde = *pdep;
                   5419:
                   5420:        if (l1pte_section_p(pde)) {
1.235     matt     5421:                __CTASSERT((L1_S_CACHE_MASK & L1_S_V6_SUPER) == 0);
1.134     thorpej  5422:                if ((pde & L1_S_CACHE_MASK) != pte_l1_s_cache_mode_pt) {
                   5423:                        *pdep = (pde & ~L1_S_CACHE_MASK) |
                   5424:                            pte_l1_s_cache_mode_pt;
                   5425:                        PTE_SYNC(pdep);
                   5426:                        cpu_dcache_wbinv_range((vaddr_t)pdep, sizeof(*pdep));
                   5427:                        rv = 1;
                   5428:                }
                   5429:        } else {
                   5430:                pa = (paddr_t)(pde & L1_C_ADDR_MASK);
                   5431:                ptep = (pt_entry_t *)kernel_pt_lookup(pa);
                   5432:                if (ptep == NULL)
                   5433:                        panic("pmap_bootstrap: No L2 for L2 @ va %p\n", ptep);
                   5434:
                   5435:                ptep = &ptep[l2pte_index(va)];
                   5436:                pte = *ptep;
                   5437:                if ((pte & L2_S_CACHE_MASK) != pte_l2_s_cache_mode_pt) {
                   5438:                        *ptep = (pte & ~L2_S_CACHE_MASK) |
                   5439:                            pte_l2_s_cache_mode_pt;
                   5440:                        PTE_SYNC(ptep);
                   5441:                        cpu_dcache_wbinv_range((vaddr_t)ptep, sizeof(*ptep));
                   5442:                        rv = 1;
                   5443:                }
                   5444:        }
                   5445:
                   5446:        return (rv);
                   5447: }
1.1       matt     5448:
1.134     thorpej  5449: static void
                   5450: pmap_alloc_specials(vaddr_t *availp, int pages, vaddr_t *vap, pt_entry_t **ptep)
                   5451: {
                   5452:        vaddr_t va = *availp;
                   5453:        struct l2_bucket *l2b;
1.1       matt     5454:
1.134     thorpej  5455:        if (ptep) {
                   5456:                l2b = pmap_get_l2_bucket(pmap_kernel(), va);
                   5457:                if (l2b == NULL)
                   5458:                        panic("pmap_alloc_specials: no l2b for 0x%lx", va);
1.62      thorpej  5459:
1.134     thorpej  5460:                if (ptep)
                   5461:                        *ptep = &l2b->l2b_kva[l2pte_index(va)];
1.1       matt     5462:        }
                   5463:
1.134     thorpej  5464:        *vap = va;
                   5465:        *availp = va + (PAGE_SIZE * pages);
                   5466: }
                   5467:
                   5468: void
                   5469: pmap_init(void)
                   5470: {
1.1       matt     5471:
1.113     thorpej  5472:        /*
1.134     thorpej  5473:         * Set the available memory vars - These do not map to real memory
                   5474:         * addresses and cannot as the physical memory is fragmented.
                   5475:         * They are used by ps for %mem calculations.
                   5476:         * One could argue whether this should be the entire memory or just
                   5477:         * the memory that is useable in a user process.
1.113     thorpej  5478:         */
1.218     uebayasi 5479:        avail_start = ptoa(VM_PHYSMEM_PTR(0)->start);
                   5480:        avail_end = ptoa(VM_PHYSMEM_PTR(vm_nphysseg - 1)->end);
1.63      thorpej  5481:
1.1       matt     5482:        /*
1.134     thorpej  5483:         * Now we need to free enough pv_entry structures to allow us to get
                   5484:         * the kmem_map/kmem_object allocated and inited (done after this
                   5485:         * function is finished).  to do this we allocate one bootstrap page out
                   5486:         * of kernel_map and use it to provide an initial pool of pv_entry
                   5487:         * structures.   we never free this page.
1.1       matt     5488:         */
1.134     thorpej  5489:        pool_setlowat(&pmap_pv_pool,
                   5490:            (PAGE_SIZE / sizeof(struct pv_entry)) * 2);
1.62      thorpej  5491:
1.191     matt     5492:        mutex_init(&memlock, MUTEX_DEFAULT, IPL_NONE);
                   5493:        zeropage = (void *)uvm_km_alloc(kernel_map, PAGE_SIZE, 0,
                   5494:            UVM_KMF_WIRED|UVM_KMF_ZERO);
                   5495:
1.160     thorpej  5496:        pmap_initialized = true;
1.1       matt     5497: }
1.17      chris    5498:
1.134     thorpej  5499: static vaddr_t last_bootstrap_page = 0;
                   5500: static void *free_bootstrap_pages = NULL;
1.1       matt     5501:
1.134     thorpej  5502: static void *
                   5503: pmap_bootstrap_pv_page_alloc(struct pool *pp, int flags)
1.1       matt     5504: {
1.134     thorpej  5505:        extern void *pool_page_alloc(struct pool *, int);
                   5506:        vaddr_t new_page;
                   5507:        void *rv;
                   5508:
                   5509:        if (pmap_initialized)
                   5510:                return (pool_page_alloc(pp, flags));
                   5511:
                   5512:        if (free_bootstrap_pages) {
                   5513:                rv = free_bootstrap_pages;
                   5514:                free_bootstrap_pages = *((void **)rv);
                   5515:                return (rv);
                   5516:        }
                   5517:
1.151     yamt     5518:        new_page = uvm_km_alloc(kernel_map, PAGE_SIZE, 0,
                   5519:            UVM_KMF_WIRED | ((flags & PR_WAITOK) ? 0 : UVM_KMF_NOWAIT));
1.1       matt     5520:
1.134     thorpej  5521:        KASSERT(new_page > last_bootstrap_page);
                   5522:        last_bootstrap_page = new_page;
                   5523:        return ((void *)new_page);
1.17      chris    5524: }
                   5525:
1.134     thorpej  5526: static void
                   5527: pmap_bootstrap_pv_page_free(struct pool *pp, void *v)
1.17      chris    5528: {
1.134     thorpej  5529:        extern void pool_page_free(struct pool *, void *);
1.17      chris    5530:
1.150     joff     5531:        if ((vaddr_t)v <= last_bootstrap_page) {
                   5532:                *((void **)v) = free_bootstrap_pages;
                   5533:                free_bootstrap_pages = v;
1.134     thorpej  5534:                return;
                   5535:        }
1.114     thorpej  5536:
1.150     joff     5537:        if (pmap_initialized) {
                   5538:                pool_page_free(pp, v);
1.134     thorpej  5539:                return;
1.57      thorpej  5540:        }
1.17      chris    5541: }
                   5542:
                   5543: /*
1.134     thorpej  5544:  * pmap_postinit()
1.17      chris    5545:  *
1.134     thorpej  5546:  * This routine is called after the vm and kmem subsystems have been
                   5547:  * initialised. This allows the pmap code to perform any initialisation
                   5548:  * that can only be done one the memory allocation is in place.
1.17      chris    5549:  */
1.134     thorpej  5550: void
                   5551: pmap_postinit(void)
1.17      chris    5552: {
1.134     thorpej  5553:        extern paddr_t physical_start, physical_end;
                   5554:        struct l2_bucket *l2b;
                   5555:        struct l1_ttable *l1;
                   5556:        struct pglist plist;
                   5557:        struct vm_page *m;
                   5558:        pd_entry_t *pl1pt;
                   5559:        pt_entry_t *ptep, pte;
                   5560:        vaddr_t va, eva;
                   5561:        u_int loop, needed;
                   5562:        int error;
1.114     thorpej  5563:
1.169     matt     5564:        pool_cache_setlowat(&pmap_l2ptp_cache,
1.134     thorpej  5565:            (PAGE_SIZE / L2_TABLE_SIZE_REAL) * 4);
1.169     matt     5566:        pool_cache_setlowat(&pmap_l2dtable_cache,
1.134     thorpej  5567:            (PAGE_SIZE / sizeof(struct l2_dtable)) * 2);
1.17      chris    5568:
1.134     thorpej  5569:        needed = (maxproc / PMAP_DOMAINS) + ((maxproc % PMAP_DOMAINS) ? 1 : 0);
                   5570:        needed -= 1;
1.48      chris    5571:
1.225     para     5572:        l1 = kmem_alloc(sizeof(*l1) * needed, KM_SLEEP);
1.48      chris    5573:
1.134     thorpej  5574:        for (loop = 0; loop < needed; loop++, l1++) {
                   5575:                /* Allocate a L1 page table */
1.151     yamt     5576:                va = uvm_km_alloc(kernel_map, L1_TABLE_SIZE, 0, UVM_KMF_VAONLY);
1.134     thorpej  5577:                if (va == 0)
                   5578:                        panic("Cannot allocate L1 KVM");
                   5579:
                   5580:                error = uvm_pglistalloc(L1_TABLE_SIZE, physical_start,
1.225     para     5581:                    physical_end, L1_TABLE_SIZE, 0, &plist, 1, 1);
1.134     thorpej  5582:                if (error)
                   5583:                        panic("Cannot allocate L1 physical pages");
                   5584:
                   5585:                m = TAILQ_FIRST(&plist);
                   5586:                eva = va + L1_TABLE_SIZE;
                   5587:                pl1pt = (pd_entry_t *)va;
1.48      chris    5588:
1.134     thorpej  5589:                while (m && va < eva) {
                   5590:                        paddr_t pa = VM_PAGE_TO_PHYS(m);
1.48      chris    5591:
1.182     matt     5592:                        pmap_kenter_pa(va, pa,
1.213     cegger   5593:                            VM_PROT_READ|VM_PROT_WRITE, PMAP_KMPAGE);
1.48      chris    5594:
                   5595:                        /*
1.134     thorpej  5596:                         * Make sure the L1 descriptor table is mapped
                   5597:                         * with the cache-mode set to write-through.
1.48      chris    5598:                         */
1.134     thorpej  5599:                        l2b = pmap_get_l2_bucket(pmap_kernel(), va);
1.210     uebayasi 5600:                        KDASSERT(l2b != NULL);
1.134     thorpej  5601:                        ptep = &l2b->l2b_kva[l2pte_index(va)];
                   5602:                        pte = *ptep;
                   5603:                        pte = (pte & ~L2_S_CACHE_MASK) | pte_l2_s_cache_mode_pt;
                   5604:                        *ptep = pte;
                   5605:                        PTE_SYNC(ptep);
                   5606:                        cpu_tlb_flushD_SE(va);
1.48      chris    5607:
1.134     thorpej  5608:                        va += PAGE_SIZE;
1.176     ad       5609:                        m = TAILQ_NEXT(m, pageq.queue);
1.48      chris    5610:                }
                   5611:
1.134     thorpej  5612: #ifdef DIAGNOSTIC
                   5613:                if (m)
                   5614:                        panic("pmap_alloc_l1pt: pglist not empty");
                   5615: #endif /* DIAGNOSTIC */
1.48      chris    5616:
1.134     thorpej  5617:                pmap_init_l1(l1, pl1pt);
1.48      chris    5618:        }
                   5619:
1.134     thorpej  5620: #ifdef DEBUG
                   5621:        printf("pmap_postinit: Allocated %d static L1 descriptor tables\n",
                   5622:            needed);
                   5623: #endif
1.48      chris    5624: }
                   5625:
1.76      thorpej  5626: /*
1.134     thorpej  5627:  * Note that the following routines are used by board-specific initialisation
                   5628:  * code to configure the initial kernel page tables.
                   5629:  *
                   5630:  * If ARM32_NEW_VM_LAYOUT is *not* defined, they operate on the assumption that
                   5631:  * L2 page-table pages are 4KB in size and use 4 L1 slots. This mimics the
                   5632:  * behaviour of the old pmap, and provides an easy migration path for
                   5633:  * initial bring-up of the new pmap on existing ports. Fortunately,
                   5634:  * pmap_bootstrap() compensates for this hackery. This is only a stop-gap and
                   5635:  * will be deprecated.
1.76      thorpej  5636:  *
1.134     thorpej  5637:  * If ARM32_NEW_VM_LAYOUT *is* defined, these functions deal with 1KB L2 page
                   5638:  * tables.
1.76      thorpej  5639:  */
1.40      thorpej  5640:
                   5641: /*
1.46      thorpej  5642:  * This list exists for the benefit of pmap_map_chunk().  It keeps track
                   5643:  * of the kernel L2 tables during bootstrap, so that pmap_map_chunk() can
                   5644:  * find them as necessary.
                   5645:  *
1.134     thorpej  5646:  * Note that the data on this list MUST remain valid after initarm() returns,
                   5647:  * as pmap_bootstrap() uses it to contruct L2 table metadata.
1.46      thorpej  5648:  */
                   5649: SLIST_HEAD(, pv_addr) kernel_pt_list = SLIST_HEAD_INITIALIZER(kernel_pt_list);
                   5650:
                   5651: static vaddr_t
                   5652: kernel_pt_lookup(paddr_t pa)
                   5653: {
                   5654:        pv_addr_t *pv;
                   5655:
                   5656:        SLIST_FOREACH(pv, &kernel_pt_list, pv_list) {
1.134     thorpej  5657: #ifndef ARM32_NEW_VM_LAYOUT
                   5658:                if (pv->pv_pa == (pa & ~PGOFSET))
                   5659:                        return (pv->pv_va | (pa & PGOFSET));
                   5660: #else
1.46      thorpej  5661:                if (pv->pv_pa == pa)
                   5662:                        return (pv->pv_va);
1.134     thorpej  5663: #endif
1.46      thorpej  5664:        }
                   5665:        return (0);
                   5666: }
                   5667:
                   5668: /*
1.40      thorpej  5669:  * pmap_map_section:
                   5670:  *
                   5671:  *     Create a single section mapping.
                   5672:  */
                   5673: void
                   5674: pmap_map_section(vaddr_t l1pt, vaddr_t va, paddr_t pa, int prot, int cache)
                   5675: {
                   5676:        pd_entry_t *pde = (pd_entry_t *) l1pt;
1.134     thorpej  5677:        pd_entry_t fl;
1.40      thorpej  5678:
1.81      thorpej  5679:        KASSERT(((va | pa) & L1_S_OFFSET) == 0);
1.40      thorpej  5680:
1.134     thorpej  5681:        switch (cache) {
                   5682:        case PTE_NOCACHE:
                   5683:        default:
                   5684:                fl = 0;
                   5685:                break;
                   5686:
                   5687:        case PTE_CACHE:
                   5688:                fl = pte_l1_s_cache_mode;
                   5689:                break;
                   5690:
                   5691:        case PTE_PAGETABLE:
                   5692:                fl = pte_l1_s_cache_mode_pt;
                   5693:                break;
                   5694:        }
                   5695:
1.262     matt     5696:        pde[L1_IDX(va)] = L1_S_PROTO | pa |
1.134     thorpej  5697:            L1_S_PROT(PTE_KERNEL, prot) | fl | L1_S_DOM(PMAP_DOMAIN_KERNEL);
1.262     matt     5698:        PTE_SYNC(&pde[L1_IDX(va)]);
1.41      thorpej  5699: }
                   5700:
                   5701: /*
                   5702:  * pmap_map_entry:
                   5703:  *
                   5704:  *     Create a single page mapping.
                   5705:  */
                   5706: void
1.47      thorpej  5707: pmap_map_entry(vaddr_t l1pt, vaddr_t va, paddr_t pa, int prot, int cache)
1.41      thorpej  5708: {
1.47      thorpej  5709:        pd_entry_t *pde = (pd_entry_t *) l1pt;
1.262     matt     5710:        pt_entry_t npte;
                   5711:        pt_entry_t *ptep;
1.41      thorpej  5712:
                   5713:        KASSERT(((va | pa) & PGOFSET) == 0);
                   5714:
1.134     thorpej  5715:        switch (cache) {
                   5716:        case PTE_NOCACHE:
                   5717:        default:
1.262     matt     5718:                npte = 0;
1.134     thorpej  5719:                break;
                   5720:
                   5721:        case PTE_CACHE:
1.262     matt     5722:                npte = pte_l2_s_cache_mode;
1.134     thorpej  5723:                break;
                   5724:
                   5725:        case PTE_PAGETABLE:
1.262     matt     5726:                npte = pte_l2_s_cache_mode_pt;
1.134     thorpej  5727:                break;
                   5728:        }
                   5729:
1.262     matt     5730:        if ((pde[L1_IDX(va)] & L1_TYPE_MASK) != L1_TYPE_C)
1.47      thorpej  5731:                panic("pmap_map_entry: no L2 table for VA 0x%08lx", va);
                   5732:
1.134     thorpej  5733: #ifndef ARM32_NEW_VM_LAYOUT
1.262     matt     5734:        ptep = (pt_entry_t *)
                   5735:            kernel_pt_lookup(pde[L1_IDX(va)] & L2_S_FRAME);
1.134     thorpej  5736: #else
1.262     matt     5737:        ptep = (pt_entry_t *) kernel_pt_lookup(pde[L1_IDX(va)] & L1_C_ADDR_MASK);
1.134     thorpej  5738: #endif
1.262     matt     5739:        if (ptep == NULL)
1.47      thorpej  5740:                panic("pmap_map_entry: can't find L2 table for VA 0x%08lx", va);
                   5741:
1.262     matt     5742:        npte |= L2_S_PROTO | pa | L2_S_PROT(PTE_KERNEL, prot);
1.134     thorpej  5743: #ifndef ARM32_NEW_VM_LAYOUT
1.262     matt     5744:        ptep += (va >> PGSHIFT) & 0x3ff;
1.134     thorpej  5745: #else
1.262     matt     5746:        ptep += l2pte_index(va);
1.134     thorpej  5747: #endif
1.262     matt     5748:        l2pte_set(ptep, npte, 0);
                   5749:        PTE_SYNC(ptep);
1.42      thorpej  5750: }
                   5751:
                   5752: /*
                   5753:  * pmap_link_l2pt:
                   5754:  *
1.134     thorpej  5755:  *     Link the L2 page table specified by "l2pv" into the L1
1.42      thorpej  5756:  *     page table at the slot for "va".
                   5757:  */
                   5758: void
1.46      thorpej  5759: pmap_link_l2pt(vaddr_t l1pt, vaddr_t va, pv_addr_t *l2pv)
1.42      thorpej  5760: {
1.134     thorpej  5761:        pd_entry_t *pde = (pd_entry_t *) l1pt, proto;
1.262     matt     5762:        u_int slot = L1_IDX(va);
1.42      thorpej  5763:
1.134     thorpej  5764: #ifndef ARM32_NEW_VM_LAYOUT
                   5765:        KASSERT((va & ((L1_S_SIZE * 4) - 1)) == 0);
1.46      thorpej  5766:        KASSERT((l2pv->pv_pa & PGOFSET) == 0);
1.134     thorpej  5767: #endif
1.46      thorpej  5768:
1.134     thorpej  5769:        proto = L1_S_DOM(PMAP_DOMAIN_KERNEL) | L1_C_PROTO;
                   5770:
                   5771:        pde[slot + 0] = proto | (l2pv->pv_pa + 0x000);
                   5772: #ifdef ARM32_NEW_VM_LAYOUT
                   5773:        PTE_SYNC(&pde[slot]);
                   5774: #else
1.262     matt     5775:        for (u_int off = 0, i = 0; off < PAGE_SIZE; off += L2_T_SIZE, i++) {
                   5776:                pde[slot + i] = proto | (l2pv->pv_pa + off);
                   5777:        }
                   5778:        PTE_SYNC_RANGE(&pde[slot + 0], PAGE_SIZE / L2_T_SIZE);
1.134     thorpej  5779: #endif
1.42      thorpej  5780:
1.46      thorpej  5781:        SLIST_INSERT_HEAD(&kernel_pt_list, l2pv, pv_list);
1.43      thorpej  5782: }
                   5783:
                   5784: /*
                   5785:  * pmap_map_chunk:
                   5786:  *
                   5787:  *     Map a chunk of memory using the most efficient mappings
                   5788:  *     possible (section, large page, small page) into the
                   5789:  *     provided L1 and L2 tables at the specified virtual address.
                   5790:  */
                   5791: vsize_t
1.46      thorpej  5792: pmap_map_chunk(vaddr_t l1pt, vaddr_t va, paddr_t pa, vsize_t size,
                   5793:     int prot, int cache)
1.43      thorpej  5794: {
1.230     matt     5795:        pd_entry_t *pdep = (pd_entry_t *) l1pt;
1.134     thorpej  5796:        pt_entry_t *pte, f1, f2s, f2l;
1.43      thorpej  5797:        vsize_t resid;
1.134     thorpej  5798:        int i;
1.43      thorpej  5799:
1.130     thorpej  5800:        resid = (size + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1);
1.43      thorpej  5801:
1.44      thorpej  5802:        if (l1pt == 0)
                   5803:                panic("pmap_map_chunk: no L1 table provided");
                   5804:
1.43      thorpej  5805: #ifdef VERBOSE_INIT_ARM
                   5806:        printf("pmap_map_chunk: pa=0x%lx va=0x%lx size=0x%lx resid=0x%lx "
                   5807:            "prot=0x%x cache=%d\n", pa, va, size, resid, prot, cache);
                   5808: #endif
                   5809:
1.134     thorpej  5810:        switch (cache) {
                   5811:        case PTE_NOCACHE:
                   5812:        default:
                   5813:                f1 = 0;
                   5814:                f2l = 0;
                   5815:                f2s = 0;
                   5816:                break;
                   5817:
                   5818:        case PTE_CACHE:
                   5819:                f1 = pte_l1_s_cache_mode;
                   5820:                f2l = pte_l2_l_cache_mode;
                   5821:                f2s = pte_l2_s_cache_mode;
                   5822:                break;
                   5823:
                   5824:        case PTE_PAGETABLE:
                   5825:                f1 = pte_l1_s_cache_mode_pt;
                   5826:                f2l = pte_l2_l_cache_mode_pt;
                   5827:                f2s = pte_l2_s_cache_mode_pt;
                   5828:                break;
                   5829:        }
                   5830:
1.43      thorpej  5831:        size = resid;
                   5832:
                   5833:        while (resid > 0) {
1.262     matt     5834:                size_t l1idx = L1_IDX(va);
1.236     matt     5835: #if (ARM_MMU_V6 + ARM_MMU_V7) > 0
1.230     matt     5836:                /* See if we can use a supersection mapping. */
                   5837:                if (L1_SS_PROTO && L1_SS_MAPPABLE_P(va, pa, resid)) {
                   5838:                        /* Supersection are always domain 0 */
                   5839:                        pd_entry_t pde = L1_SS_PROTO | pa |
                   5840:                            L1_S_PROT(PTE_KERNEL, prot) | f1;
                   5841: #ifdef VERBOSE_INIT_ARM
                   5842:                        printf("sS");
                   5843: #endif
1.262     matt     5844:                        for (size_t s = l1idx,
1.230     matt     5845:                             e = s + L1_SS_SIZE / L1_S_SIZE;
                   5846:                             s < e;
                   5847:                             s++) {
                   5848:                                pdep[s] = pde;
                   5849:                                PTE_SYNC(&pdep[s]);
                   5850:                        }
                   5851:                        va += L1_SS_SIZE;
                   5852:                        pa += L1_SS_SIZE;
                   5853:                        resid -= L1_SS_SIZE;
                   5854:                        continue;
                   5855:                }
                   5856: #endif
1.43      thorpej  5857:                /* See if we can use a section mapping. */
1.134     thorpej  5858:                if (L1_S_MAPPABLE_P(va, pa, resid)) {
1.43      thorpej  5859: #ifdef VERBOSE_INIT_ARM
                   5860:                        printf("S");
                   5861: #endif
1.262     matt     5862:                        pdep[l1idx] = L1_S_PROTO | pa |
1.134     thorpej  5863:                            L1_S_PROT(PTE_KERNEL, prot) | f1 |
                   5864:                            L1_S_DOM(PMAP_DOMAIN_KERNEL);
1.262     matt     5865:                        PTE_SYNC(&pdep[l1idx]);
1.81      thorpej  5866:                        va += L1_S_SIZE;
                   5867:                        pa += L1_S_SIZE;
                   5868:                        resid -= L1_S_SIZE;
1.43      thorpej  5869:                        continue;
                   5870:                }
1.45      thorpej  5871:
                   5872:                /*
                   5873:                 * Ok, we're going to use an L2 table.  Make sure
                   5874:                 * one is actually in the corresponding L1 slot
                   5875:                 * for the current VA.
                   5876:                 */
1.262     matt     5877:                if ((pdep[l1idx] & L1_TYPE_MASK) != L1_TYPE_C)
1.46      thorpej  5878:                        panic("pmap_map_chunk: no L2 table for VA 0x%08lx", va);
                   5879:
1.134     thorpej  5880: #ifndef ARM32_NEW_VM_LAYOUT
1.46      thorpej  5881:                pte = (pt_entry_t *)
1.262     matt     5882:                    kernel_pt_lookup(pdep[l1idx] & L2_S_FRAME);
1.134     thorpej  5883: #else
                   5884:                pte = (pt_entry_t *) kernel_pt_lookup(
1.262     matt     5885:                    pdep[l1idx] & L1_C_ADDR_MASK);
1.134     thorpej  5886: #endif
1.46      thorpej  5887:                if (pte == NULL)
                   5888:                        panic("pmap_map_chunk: can't find L2 table for VA"
                   5889:                            "0x%08lx", va);
1.43      thorpej  5890:
                   5891:                /* See if we can use a L2 large page mapping. */
1.134     thorpej  5892:                if (L2_L_MAPPABLE_P(va, pa, resid)) {
1.43      thorpej  5893: #ifdef VERBOSE_INIT_ARM
                   5894:                        printf("L");
                   5895: #endif
                   5896:                        for (i = 0; i < 16; i++) {
1.134     thorpej  5897: #ifndef ARM32_NEW_VM_LAYOUT
1.43      thorpej  5898:                                pte[((va >> PGSHIFT) & 0x3f0) + i] =
1.83      thorpej  5899:                                    L2_L_PROTO | pa |
1.134     thorpej  5900:                                    L2_L_PROT(PTE_KERNEL, prot) | f2l;
                   5901:                                PTE_SYNC(&pte[((va >> PGSHIFT) & 0x3f0) + i]);
                   5902: #else
                   5903:                                pte[l2pte_index(va) + i] =
                   5904:                                    L2_L_PROTO | pa |
                   5905:                                    L2_L_PROT(PTE_KERNEL, prot) | f2l;
                   5906:                                PTE_SYNC(&pte[l2pte_index(va) + i]);
                   5907: #endif
1.43      thorpej  5908:                        }
1.81      thorpej  5909:                        va += L2_L_SIZE;
                   5910:                        pa += L2_L_SIZE;
                   5911:                        resid -= L2_L_SIZE;
1.43      thorpej  5912:                        continue;
                   5913:                }
                   5914:
                   5915:                /* Use a small page mapping. */
                   5916: #ifdef VERBOSE_INIT_ARM
                   5917:                printf("P");
                   5918: #endif
1.262     matt     5919:                pt_entry_t npte =
                   5920:                    L2_S_PROTO | pa | L2_S_PROT(PTE_KERNEL, prot) | f2s;
1.134     thorpej  5921: #ifndef ARM32_NEW_VM_LAYOUT
1.262     matt     5922:                pt_entry_t *ptep = &pte[(va >> PGSHIFT) & 0x3ff];
1.134     thorpej  5923: #else
1.262     matt     5924:                pt_entry_t *ptep = &pte[l2pte_index(va)];
1.134     thorpej  5925: #endif
1.262     matt     5926:                l2pte_set(ptep, npte, 0);
                   5927:                PTE_SYNC(ptep);
1.130     thorpej  5928:                va += PAGE_SIZE;
                   5929:                pa += PAGE_SIZE;
                   5930:                resid -= PAGE_SIZE;
1.43      thorpej  5931:        }
                   5932: #ifdef VERBOSE_INIT_ARM
                   5933:        printf("\n");
                   5934: #endif
                   5935:        return (size);
1.135     thorpej  5936: }
                   5937:
                   5938: /********************** Static device map routines ***************************/
                   5939:
                   5940: static const struct pmap_devmap *pmap_devmap_table;
                   5941:
                   5942: /*
1.136     thorpej  5943:  * Register the devmap table.  This is provided in case early console
                   5944:  * initialization needs to register mappings created by bootstrap code
                   5945:  * before pmap_devmap_bootstrap() is called.
                   5946:  */
                   5947: void
                   5948: pmap_devmap_register(const struct pmap_devmap *table)
                   5949: {
                   5950:
                   5951:        pmap_devmap_table = table;
                   5952: }
                   5953:
                   5954: /*
1.135     thorpej  5955:  * Map all of the static regions in the devmap table, and remember
                   5956:  * the devmap table so other parts of the kernel can look up entries
                   5957:  * later.
                   5958:  */
                   5959: void
                   5960: pmap_devmap_bootstrap(vaddr_t l1pt, const struct pmap_devmap *table)
                   5961: {
                   5962:        int i;
                   5963:
                   5964:        pmap_devmap_table = table;
                   5965:
                   5966:        for (i = 0; pmap_devmap_table[i].pd_size != 0; i++) {
                   5967: #ifdef VERBOSE_INIT_ARM
                   5968:                printf("devmap: %08lx -> %08lx @ %08lx\n",
                   5969:                    pmap_devmap_table[i].pd_pa,
                   5970:                    pmap_devmap_table[i].pd_pa +
                   5971:                        pmap_devmap_table[i].pd_size - 1,
                   5972:                    pmap_devmap_table[i].pd_va);
                   5973: #endif
                   5974:                pmap_map_chunk(l1pt, pmap_devmap_table[i].pd_va,
                   5975:                    pmap_devmap_table[i].pd_pa,
                   5976:                    pmap_devmap_table[i].pd_size,
                   5977:                    pmap_devmap_table[i].pd_prot,
                   5978:                    pmap_devmap_table[i].pd_cache);
                   5979:        }
                   5980: }
                   5981:
                   5982: const struct pmap_devmap *
                   5983: pmap_devmap_find_pa(paddr_t pa, psize_t size)
                   5984: {
1.153     scw      5985:        uint64_t endpa;
1.135     thorpej  5986:        int i;
                   5987:
                   5988:        if (pmap_devmap_table == NULL)
                   5989:                return (NULL);
                   5990:
1.158     christos 5991:        endpa = (uint64_t)pa + (uint64_t)(size - 1);
1.153     scw      5992:
1.135     thorpej  5993:        for (i = 0; pmap_devmap_table[i].pd_size != 0; i++) {
                   5994:                if (pa >= pmap_devmap_table[i].pd_pa &&
1.153     scw      5995:                    endpa <= (uint64_t)pmap_devmap_table[i].pd_pa +
1.158     christos 5996:                             (uint64_t)(pmap_devmap_table[i].pd_size - 1))
1.135     thorpej  5997:                        return (&pmap_devmap_table[i]);
                   5998:        }
                   5999:
                   6000:        return (NULL);
                   6001: }
                   6002:
                   6003: const struct pmap_devmap *
                   6004: pmap_devmap_find_va(vaddr_t va, vsize_t size)
                   6005: {
                   6006:        int i;
                   6007:
                   6008:        if (pmap_devmap_table == NULL)
                   6009:                return (NULL);
                   6010:
                   6011:        for (i = 0; pmap_devmap_table[i].pd_size != 0; i++) {
                   6012:                if (va >= pmap_devmap_table[i].pd_va &&
1.158     christos 6013:                    va + size - 1 <= pmap_devmap_table[i].pd_va +
                   6014:                                     pmap_devmap_table[i].pd_size - 1)
1.135     thorpej  6015:                        return (&pmap_devmap_table[i]);
                   6016:        }
                   6017:
                   6018:        return (NULL);
1.40      thorpej  6019: }
1.85      thorpej  6020:
                   6021: /********************** PTE initialization routines **************************/
                   6022:
                   6023: /*
                   6024:  * These routines are called when the CPU type is identified to set up
                   6025:  * the PTE prototypes, cache modes, etc.
                   6026:  *
1.190     ad       6027:  * The variables are always here, just in case modules need to reference
1.85      thorpej  6028:  * them (though, they shouldn't).
                   6029:  */
                   6030:
1.86      thorpej  6031: pt_entry_t     pte_l1_s_cache_mode;
1.220     macallan 6032: pt_entry_t     pte_l1_s_wc_mode;
1.134     thorpej  6033: pt_entry_t     pte_l1_s_cache_mode_pt;
1.86      thorpej  6034: pt_entry_t     pte_l1_s_cache_mask;
                   6035:
                   6036: pt_entry_t     pte_l2_l_cache_mode;
1.220     macallan 6037: pt_entry_t     pte_l2_l_wc_mode;
1.134     thorpej  6038: pt_entry_t     pte_l2_l_cache_mode_pt;
1.86      thorpej  6039: pt_entry_t     pte_l2_l_cache_mask;
                   6040:
                   6041: pt_entry_t     pte_l2_s_cache_mode;
1.220     macallan 6042: pt_entry_t     pte_l2_s_wc_mode;
1.134     thorpej  6043: pt_entry_t     pte_l2_s_cache_mode_pt;
1.86      thorpej  6044: pt_entry_t     pte_l2_s_cache_mask;
1.85      thorpej  6045:
1.214     jmcneill 6046: pt_entry_t     pte_l1_s_prot_u;
                   6047: pt_entry_t     pte_l1_s_prot_w;
                   6048: pt_entry_t     pte_l1_s_prot_ro;
                   6049: pt_entry_t     pte_l1_s_prot_mask;
                   6050:
1.85      thorpej  6051: pt_entry_t     pte_l2_s_prot_u;
                   6052: pt_entry_t     pte_l2_s_prot_w;
1.214     jmcneill 6053: pt_entry_t     pte_l2_s_prot_ro;
1.85      thorpej  6054: pt_entry_t     pte_l2_s_prot_mask;
                   6055:
1.214     jmcneill 6056: pt_entry_t     pte_l2_l_prot_u;
                   6057: pt_entry_t     pte_l2_l_prot_w;
                   6058: pt_entry_t     pte_l2_l_prot_ro;
                   6059: pt_entry_t     pte_l2_l_prot_mask;
                   6060:
1.230     matt     6061: pt_entry_t     pte_l1_ss_proto;
1.85      thorpej  6062: pt_entry_t     pte_l1_s_proto;
                   6063: pt_entry_t     pte_l1_c_proto;
                   6064: pt_entry_t     pte_l2_s_proto;
                   6065:
1.88      thorpej  6066: void           (*pmap_copy_page_func)(paddr_t, paddr_t);
                   6067: void           (*pmap_zero_page_func)(paddr_t);
                   6068:
1.214     jmcneill 6069: #if (ARM_MMU_GENERIC + ARM_MMU_SA1 + ARM_MMU_V6 + ARM_MMU_V7) != 0
1.85      thorpej  6070: void
                   6071: pmap_pte_init_generic(void)
                   6072: {
                   6073:
1.86      thorpej  6074:        pte_l1_s_cache_mode = L1_S_B|L1_S_C;
1.220     macallan 6075:        pte_l1_s_wc_mode = L1_S_B;
1.86      thorpej  6076:        pte_l1_s_cache_mask = L1_S_CACHE_MASK_generic;
                   6077:
                   6078:        pte_l2_l_cache_mode = L2_B|L2_C;
1.220     macallan 6079:        pte_l2_l_wc_mode = L2_B;
1.86      thorpej  6080:        pte_l2_l_cache_mask = L2_L_CACHE_MASK_generic;
                   6081:
                   6082:        pte_l2_s_cache_mode = L2_B|L2_C;
1.220     macallan 6083:        pte_l2_s_wc_mode = L2_B;
1.86      thorpej  6084:        pte_l2_s_cache_mask = L2_S_CACHE_MASK_generic;
1.85      thorpej  6085:
1.134     thorpej  6086:        /*
                   6087:         * If we have a write-through cache, set B and C.  If
                   6088:         * we have a write-back cache, then we assume setting
1.230     matt     6089:         * only C will make those pages write-through (except for those
                   6090:         * Cortex CPUs which can read the L1 caches).
1.134     thorpej  6091:         */
1.230     matt     6092:        if (cpufuncs.cf_dcache_wb_range == (void *) cpufunc_nullop
1.234     matt     6093: #if ARM_MMU_V7 > 0
                   6094:            || CPU_ID_CORTEX_P(curcpu()->ci_arm_cpuid)
                   6095: #endif
                   6096: #if ARM_MMU_V6 > 0
                   6097:            || CPU_ID_ARM11_P(curcpu()->ci_arm_cpuid) /* arm116 errata 399234 */
1.230     matt     6098: #endif
                   6099:            || false) {
1.134     thorpej  6100:                pte_l1_s_cache_mode_pt = L1_S_B|L1_S_C;
                   6101:                pte_l2_l_cache_mode_pt = L2_B|L2_C;
                   6102:                pte_l2_s_cache_mode_pt = L2_B|L2_C;
1.230     matt     6103:        } else {
                   6104:                pte_l1_s_cache_mode_pt = L1_S_C;        /* write through */
                   6105:                pte_l2_l_cache_mode_pt = L2_C;          /* write through */
                   6106:                pte_l2_s_cache_mode_pt = L2_C;          /* write through */
1.134     thorpej  6107:        }
                   6108:
1.214     jmcneill 6109:        pte_l1_s_prot_u = L1_S_PROT_U_generic;
                   6110:        pte_l1_s_prot_w = L1_S_PROT_W_generic;
                   6111:        pte_l1_s_prot_ro = L1_S_PROT_RO_generic;
                   6112:        pte_l1_s_prot_mask = L1_S_PROT_MASK_generic;
                   6113:
1.85      thorpej  6114:        pte_l2_s_prot_u = L2_S_PROT_U_generic;
                   6115:        pte_l2_s_prot_w = L2_S_PROT_W_generic;
1.214     jmcneill 6116:        pte_l2_s_prot_ro = L2_S_PROT_RO_generic;
1.85      thorpej  6117:        pte_l2_s_prot_mask = L2_S_PROT_MASK_generic;
                   6118:
1.214     jmcneill 6119:        pte_l2_l_prot_u = L2_L_PROT_U_generic;
                   6120:        pte_l2_l_prot_w = L2_L_PROT_W_generic;
                   6121:        pte_l2_l_prot_ro = L2_L_PROT_RO_generic;
                   6122:        pte_l2_l_prot_mask = L2_L_PROT_MASK_generic;
                   6123:
1.230     matt     6124:        pte_l1_ss_proto = L1_SS_PROTO_generic;
1.85      thorpej  6125:        pte_l1_s_proto = L1_S_PROTO_generic;
                   6126:        pte_l1_c_proto = L1_C_PROTO_generic;
                   6127:        pte_l2_s_proto = L2_S_PROTO_generic;
1.88      thorpej  6128:
                   6129:        pmap_copy_page_func = pmap_copy_page_generic;
                   6130:        pmap_zero_page_func = pmap_zero_page_generic;
1.85      thorpej  6131: }
                   6132:
1.131     thorpej  6133: #if defined(CPU_ARM8)
                   6134: void
                   6135: pmap_pte_init_arm8(void)
                   6136: {
                   6137:
1.134     thorpej  6138:        /*
                   6139:         * ARM8 is compatible with generic, but we need to use
                   6140:         * the page tables uncached.
                   6141:         */
1.131     thorpej  6142:        pmap_pte_init_generic();
1.134     thorpej  6143:
                   6144:        pte_l1_s_cache_mode_pt = 0;
                   6145:        pte_l2_l_cache_mode_pt = 0;
                   6146:        pte_l2_s_cache_mode_pt = 0;
1.131     thorpej  6147: }
                   6148: #endif /* CPU_ARM8 */
                   6149:
1.148     bsh      6150: #if defined(CPU_ARM9) && defined(ARM9_CACHE_WRITE_THROUGH)
1.85      thorpej  6151: void
                   6152: pmap_pte_init_arm9(void)
                   6153: {
                   6154:
                   6155:        /*
                   6156:         * ARM9 is compatible with generic, but we want to use
                   6157:         * write-through caching for now.
                   6158:         */
                   6159:        pmap_pte_init_generic();
1.86      thorpej  6160:
                   6161:        pte_l1_s_cache_mode = L1_S_C;
                   6162:        pte_l2_l_cache_mode = L2_C;
                   6163:        pte_l2_s_cache_mode = L2_C;
1.134     thorpej  6164:
1.220     macallan 6165:        pte_l1_s_wc_mode = L1_S_B;
                   6166:        pte_l2_l_wc_mode = L2_B;
                   6167:        pte_l2_s_wc_mode = L2_B;
                   6168:
1.134     thorpej  6169:        pte_l1_s_cache_mode_pt = L1_S_C;
                   6170:        pte_l2_l_cache_mode_pt = L2_C;
                   6171:        pte_l2_s_cache_mode_pt = L2_C;
1.85      thorpej  6172: }
1.204     uebayasi 6173: #endif /* CPU_ARM9 && ARM9_CACHE_WRITE_THROUGH */
1.174     matt     6174: #endif /* (ARM_MMU_GENERIC + ARM_MMU_SA1 + ARM_MMU_V6) != 0 */
1.138     rearnsha 6175:
                   6176: #if defined(CPU_ARM10)
                   6177: void
                   6178: pmap_pte_init_arm10(void)
                   6179: {
                   6180:
                   6181:        /*
                   6182:         * ARM10 is compatible with generic, but we want to use
                   6183:         * write-through caching for now.
                   6184:         */
                   6185:        pmap_pte_init_generic();
                   6186:
                   6187:        pte_l1_s_cache_mode = L1_S_B | L1_S_C;
                   6188:        pte_l2_l_cache_mode = L2_B | L2_C;
                   6189:        pte_l2_s_cache_mode = L2_B | L2_C;
                   6190:
1.220     macallan 6191:        pte_l1_s_cache_mode = L1_S_B;
                   6192:        pte_l2_l_cache_mode = L2_B;
                   6193:        pte_l2_s_cache_mode = L2_B;
                   6194:
1.138     rearnsha 6195:        pte_l1_s_cache_mode_pt = L1_S_C;
                   6196:        pte_l2_l_cache_mode_pt = L2_C;
                   6197:        pte_l2_s_cache_mode_pt = L2_C;
                   6198:
                   6199: }
                   6200: #endif /* CPU_ARM10 */
1.131     thorpej  6201:
1.204     uebayasi 6202: #if defined(CPU_ARM11) && defined(ARM11_CACHE_WRITE_THROUGH)
                   6203: void
                   6204: pmap_pte_init_arm11(void)
                   6205: {
                   6206:
                   6207:        /*
                   6208:         * ARM11 is compatible with generic, but we want to use
                   6209:         * write-through caching for now.
                   6210:         */
                   6211:        pmap_pte_init_generic();
                   6212:
                   6213:        pte_l1_s_cache_mode = L1_S_C;
                   6214:        pte_l2_l_cache_mode = L2_C;
                   6215:        pte_l2_s_cache_mode = L2_C;
                   6216:
1.220     macallan 6217:        pte_l1_s_wc_mode = L1_S_B;
                   6218:        pte_l2_l_wc_mode = L2_B;
                   6219:        pte_l2_s_wc_mode = L2_B;
                   6220:
1.204     uebayasi 6221:        pte_l1_s_cache_mode_pt = L1_S_C;
                   6222:        pte_l2_l_cache_mode_pt = L2_C;
                   6223:        pte_l2_s_cache_mode_pt = L2_C;
                   6224: }
                   6225: #endif /* CPU_ARM11 && ARM11_CACHE_WRITE_THROUGH */
                   6226:
1.131     thorpej  6227: #if ARM_MMU_SA1 == 1
                   6228: void
                   6229: pmap_pte_init_sa1(void)
                   6230: {
                   6231:
1.134     thorpej  6232:        /*
                   6233:         * The StrongARM SA-1 cache does not have a write-through
                   6234:         * mode.  So, do the generic initialization, then reset
                   6235:         * the page table cache mode to B=1,C=1, and note that
                   6236:         * the PTEs need to be sync'd.
                   6237:         */
1.131     thorpej  6238:        pmap_pte_init_generic();
1.134     thorpej  6239:
                   6240:        pte_l1_s_cache_mode_pt = L1_S_B|L1_S_C;
                   6241:        pte_l2_l_cache_mode_pt = L2_B|L2_C;
                   6242:        pte_l2_s_cache_mode_pt = L2_B|L2_C;
                   6243:
                   6244:        pmap_needs_pte_sync = 1;
1.131     thorpej  6245: }
1.134     thorpej  6246: #endif /* ARM_MMU_SA1 == 1*/
1.85      thorpej  6247:
                   6248: #if ARM_MMU_XSCALE == 1
1.141     scw      6249: #if (ARM_NMMUS > 1)
                   6250: static u_int xscale_use_minidata;
                   6251: #endif
                   6252:
1.85      thorpej  6253: void
                   6254: pmap_pte_init_xscale(void)
                   6255: {
1.96      thorpej  6256:        uint32_t auxctl;
1.134     thorpej  6257:        int write_through = 0;
1.85      thorpej  6258:
1.96      thorpej  6259:        pte_l1_s_cache_mode = L1_S_B|L1_S_C;
1.220     macallan 6260:        pte_l1_s_wc_mode = L1_S_B;
1.86      thorpej  6261:        pte_l1_s_cache_mask = L1_S_CACHE_MASK_xscale;
                   6262:
1.96      thorpej  6263:        pte_l2_l_cache_mode = L2_B|L2_C;
1.220     macallan 6264:        pte_l2_l_wc_mode = L2_B;
1.86      thorpej  6265:        pte_l2_l_cache_mask = L2_L_CACHE_MASK_xscale;
                   6266:
1.96      thorpej  6267:        pte_l2_s_cache_mode = L2_B|L2_C;
1.220     macallan 6268:        pte_l2_s_wc_mode = L2_B;
1.86      thorpej  6269:        pte_l2_s_cache_mask = L2_S_CACHE_MASK_xscale;
1.106     thorpej  6270:
1.134     thorpej  6271:        pte_l1_s_cache_mode_pt = L1_S_C;
                   6272:        pte_l2_l_cache_mode_pt = L2_C;
                   6273:        pte_l2_s_cache_mode_pt = L2_C;
                   6274:
1.106     thorpej  6275: #ifdef XSCALE_CACHE_READ_WRITE_ALLOCATE
                   6276:        /*
                   6277:         * The XScale core has an enhanced mode where writes that
                   6278:         * miss the cache cause a cache line to be allocated.  This
                   6279:         * is significantly faster than the traditional, write-through
                   6280:         * behavior of this case.
                   6281:         */
1.174     matt     6282:        pte_l1_s_cache_mode |= L1_S_XS_TEX(TEX_XSCALE_X);
                   6283:        pte_l2_l_cache_mode |= L2_XS_L_TEX(TEX_XSCALE_X);
                   6284:        pte_l2_s_cache_mode |= L2_XS_T_TEX(TEX_XSCALE_X);
1.106     thorpej  6285: #endif /* XSCALE_CACHE_READ_WRITE_ALLOCATE */
1.85      thorpej  6286:
1.95      thorpej  6287: #ifdef XSCALE_CACHE_WRITE_THROUGH
                   6288:        /*
                   6289:         * Some versions of the XScale core have various bugs in
                   6290:         * their cache units, the work-around for which is to run
                   6291:         * the cache in write-through mode.  Unfortunately, this
                   6292:         * has a major (negative) impact on performance.  So, we
                   6293:         * go ahead and run fast-and-loose, in the hopes that we
                   6294:         * don't line up the planets in a way that will trip the
                   6295:         * bugs.
                   6296:         *
                   6297:         * However, we give you the option to be slow-but-correct.
                   6298:         */
1.129     bsh      6299:        write_through = 1;
                   6300: #elif defined(XSCALE_CACHE_WRITE_BACK)
1.134     thorpej  6301:        /* force write back cache mode */
1.129     bsh      6302:        write_through = 0;
1.154     bsh      6303: #elif defined(CPU_XSCALE_PXA250) || defined(CPU_XSCALE_PXA270)
1.129     bsh      6304:        /*
                   6305:         * Intel PXA2[15]0 processors are known to have a bug in
                   6306:         * write-back cache on revision 4 and earlier (stepping
                   6307:         * A[01] and B[012]).  Fixed for C0 and later.
                   6308:         */
                   6309:        {
1.134     thorpej  6310:                uint32_t id, type;
1.129     bsh      6311:
                   6312:                id = cpufunc_id();
                   6313:                type = id & ~(CPU_ID_XSCALE_COREREV_MASK|CPU_ID_REVISION_MASK);
                   6314:
                   6315:                if (type == CPU_ID_PXA250 || type == CPU_ID_PXA210) {
                   6316:                        if ((id & CPU_ID_REVISION_MASK) < 5) {
                   6317:                                /* write through for stepping A0-1 and B0-2 */
                   6318:                                write_through = 1;
                   6319:                        }
                   6320:                }
                   6321:        }
1.95      thorpej  6322: #endif /* XSCALE_CACHE_WRITE_THROUGH */
1.129     bsh      6323:
                   6324:        if (write_through) {
                   6325:                pte_l1_s_cache_mode = L1_S_C;
                   6326:                pte_l2_l_cache_mode = L2_C;
                   6327:                pte_l2_s_cache_mode = L2_C;
                   6328:        }
1.95      thorpej  6329:
1.141     scw      6330: #if (ARM_NMMUS > 1)
                   6331:        xscale_use_minidata = 1;
                   6332: #endif
                   6333:
1.214     jmcneill 6334:        pte_l1_s_prot_u = L1_S_PROT_U_xscale;
                   6335:        pte_l1_s_prot_w = L1_S_PROT_W_xscale;
                   6336:        pte_l1_s_prot_ro = L1_S_PROT_RO_xscale;
                   6337:        pte_l1_s_prot_mask = L1_S_PROT_MASK_xscale;
                   6338:
1.85      thorpej  6339:        pte_l2_s_prot_u = L2_S_PROT_U_xscale;
                   6340:        pte_l2_s_prot_w = L2_S_PROT_W_xscale;
1.214     jmcneill 6341:        pte_l2_s_prot_ro = L2_S_PROT_RO_xscale;
1.85      thorpej  6342:        pte_l2_s_prot_mask = L2_S_PROT_MASK_xscale;
                   6343:
1.214     jmcneill 6344:        pte_l2_l_prot_u = L2_L_PROT_U_xscale;
                   6345:        pte_l2_l_prot_w = L2_L_PROT_W_xscale;
                   6346:        pte_l2_l_prot_ro = L2_L_PROT_RO_xscale;
                   6347:        pte_l2_l_prot_mask = L2_L_PROT_MASK_xscale;
                   6348:
1.230     matt     6349:        pte_l1_ss_proto = L1_SS_PROTO_xscale;
1.85      thorpej  6350:        pte_l1_s_proto = L1_S_PROTO_xscale;
                   6351:        pte_l1_c_proto = L1_C_PROTO_xscale;
                   6352:        pte_l2_s_proto = L2_S_PROTO_xscale;
1.88      thorpej  6353:
                   6354:        pmap_copy_page_func = pmap_copy_page_xscale;
                   6355:        pmap_zero_page_func = pmap_zero_page_xscale;
1.96      thorpej  6356:
                   6357:        /*
                   6358:         * Disable ECC protection of page table access, for now.
                   6359:         */
1.157     perry    6360:        __asm volatile("mrc p15, 0, %0, c1, c0, 1" : "=r" (auxctl));
1.96      thorpej  6361:        auxctl &= ~XSCALE_AUXCTL_P;
1.157     perry    6362:        __asm volatile("mcr p15, 0, %0, c1, c0, 1" : : "r" (auxctl));
1.85      thorpej  6363: }
1.87      thorpej  6364:
                   6365: /*
                   6366:  * xscale_setup_minidata:
                   6367:  *
                   6368:  *     Set up the mini-data cache clean area.  We require the
                   6369:  *     caller to allocate the right amount of physically and
                   6370:  *     virtually contiguous space.
                   6371:  */
                   6372: void
                   6373: xscale_setup_minidata(vaddr_t l1pt, vaddr_t va, paddr_t pa)
                   6374: {
                   6375:        extern vaddr_t xscale_minidata_clean_addr;
                   6376:        extern vsize_t xscale_minidata_clean_size; /* already initialized */
                   6377:        pd_entry_t *pde = (pd_entry_t *) l1pt;
                   6378:        vsize_t size;
1.96      thorpej  6379:        uint32_t auxctl;
1.87      thorpej  6380:
                   6381:        xscale_minidata_clean_addr = va;
                   6382:
                   6383:        /* Round it to page size. */
                   6384:        size = (xscale_minidata_clean_size + L2_S_OFFSET) & L2_S_FRAME;
                   6385:
                   6386:        for (; size != 0;
                   6387:             va += L2_S_SIZE, pa += L2_S_SIZE, size -= L2_S_SIZE) {
1.262     matt     6388:                const size_t l1idx = L1_IDX(va);
1.134     thorpej  6389: #ifndef ARM32_NEW_VM_LAYOUT
1.262     matt     6390:                pt_entry_t *ptep = (pt_entry_t *)
                   6391:                    kernel_pt_lookup(pde[l1idx] & L2_S_FRAME);
1.134     thorpej  6392: #else
1.262     matt     6393:                pt_entry_t *ptep = (pt_entry_t *) kernel_pt_lookup(
                   6394:                    pde[l1idx] & L1_C_ADDR_MASK);
1.134     thorpej  6395: #endif
1.262     matt     6396:                if (ptep == NULL)
1.87      thorpej  6397:                        panic("xscale_setup_minidata: can't find L2 table for "
                   6398:                            "VA 0x%08lx", va);
1.262     matt     6399:
1.134     thorpej  6400: #ifndef ARM32_NEW_VM_LAYOUT
1.262     matt     6401:                ptep += (va >> PGSHIFT) & 0x3ff;
1.134     thorpej  6402: #else
1.262     matt     6403:                ptep += l2pte_index(va);
1.134     thorpej  6404: #endif
1.262     matt     6405:                pt_entry_t opte = *ptep;
                   6406:                l2pte_set(ptep,
                   6407:                    L2_S_PROTO | pa | L2_S_PROT(PTE_KERNEL, VM_PROT_READ)
                   6408:                    | L2_C | L2_XS_T_TEX(TEX_XSCALE_X), opte);
1.87      thorpej  6409:        }
1.96      thorpej  6410:
                   6411:        /*
                   6412:         * Configure the mini-data cache for write-back with
                   6413:         * read/write-allocate.
                   6414:         *
                   6415:         * NOTE: In order to reconfigure the mini-data cache, we must
                   6416:         * make sure it contains no valid data!  In order to do that,
                   6417:         * we must issue a global data cache invalidate command!
                   6418:         *
                   6419:         * WE ASSUME WE ARE RUNNING UN-CACHED WHEN THIS ROUTINE IS CALLED!
                   6420:         * THIS IS VERY IMPORTANT!
                   6421:         */
1.134     thorpej  6422:
1.96      thorpej  6423:        /* Invalidate data and mini-data. */
1.157     perry    6424:        __asm volatile("mcr p15, 0, %0, c7, c6, 0" : : "r" (0));
                   6425:        __asm volatile("mrc p15, 0, %0, c1, c0, 1" : "=r" (auxctl));
1.96      thorpej  6426:        auxctl = (auxctl & ~XSCALE_AUXCTL_MD_MASK) | XSCALE_AUXCTL_MD_WB_RWA;
1.157     perry    6427:        __asm volatile("mcr p15, 0, %0, c1, c0, 1" : : "r" (auxctl));
1.87      thorpej  6428: }
1.141     scw      6429:
                   6430: /*
                   6431:  * Change the PTEs for the specified kernel mappings such that they
                   6432:  * will use the mini data cache instead of the main data cache.
                   6433:  */
                   6434: void
                   6435: pmap_uarea(vaddr_t va)
                   6436: {
                   6437:        vaddr_t next_bucket, eva;
                   6438:
                   6439: #if (ARM_NMMUS > 1)
                   6440:        if (xscale_use_minidata == 0)
                   6441:                return;
                   6442: #endif
                   6443:
                   6444:        eva = va + USPACE;
                   6445:
                   6446:        while (va < eva) {
                   6447:                next_bucket = L2_NEXT_BUCKET(va);
                   6448:                if (next_bucket > eva)
                   6449:                        next_bucket = eva;
                   6450:
1.262     matt     6451:                struct l2_bucket *l2b = pmap_get_l2_bucket(pmap_kernel(), va);
1.141     scw      6452:                KDASSERT(l2b != NULL);
                   6453:
1.262     matt     6454:                pt_entry_t * const sptep = &l2b->l2b_kva[l2pte_index(va)];
                   6455:                pt_entry_t *ptep = sptep;
1.141     scw      6456:
                   6457:                while (va < next_bucket) {
1.262     matt     6458:                        const pt_entry_t opte = *ptep;
                   6459:                        if (!l2pte_minidata(opte)) {
1.141     scw      6460:                                cpu_dcache_wbinv_range(va, PAGE_SIZE);
                   6461:                                cpu_tlb_flushD_SE(va);
1.262     matt     6462:                                l2pte_set(ptep, opte & ~L2_B, opte);
1.141     scw      6463:                        }
1.262     matt     6464:                        ptep += PAGE_SIZE / L2_S_SIZE;
1.141     scw      6465:                        va += PAGE_SIZE;
                   6466:                }
                   6467:                PTE_SYNC_RANGE(sptep, (u_int)(ptep - sptep));
                   6468:        }
                   6469:        cpu_cpwait();
                   6470: }
1.85      thorpej  6471: #endif /* ARM_MMU_XSCALE == 1 */
1.134     thorpej  6472:
1.221     bsh      6473:
                   6474: #if defined(CPU_ARM11MPCORE)
                   6475:
                   6476: void
                   6477: pmap_pte_init_arm11mpcore(void)
                   6478: {
                   6479:
                   6480:        /* cache mode is controlled by 5 bits (B, C, TEX) */
                   6481:        pte_l1_s_cache_mask = L1_S_CACHE_MASK_armv6;
                   6482:        pte_l2_l_cache_mask = L2_L_CACHE_MASK_armv6;
                   6483: #if defined(ARM11MPCORE_COMPAT_MMU) || defined(ARMV6_EXTENDED_SMALL_PAGE)
                   6484:        /* use extended small page (without APn, with TEX) */
                   6485:        pte_l2_s_cache_mask = L2_XS_CACHE_MASK_armv6;
                   6486: #else
                   6487:        pte_l2_s_cache_mask = L2_S_CACHE_MASK_armv6c;
                   6488: #endif
                   6489:
                   6490:        /* write-back, write-allocate */
                   6491:        pte_l1_s_cache_mode = L1_S_C | L1_S_B | L1_S_V6_TEX(0x01);
                   6492:        pte_l2_l_cache_mode = L2_C | L2_B | L2_V6_L_TEX(0x01);
                   6493: #if defined(ARM11MPCORE_COMPAT_MMU) || defined(ARMV6_EXTENDED_SMALL_PAGE)
                   6494:        pte_l2_s_cache_mode = L2_C | L2_B | L2_V6_XS_TEX(0x01);
                   6495: #else
                   6496:        /* no TEX. read-allocate */
                   6497:        pte_l2_s_cache_mode = L2_C | L2_B;
                   6498: #endif
                   6499:        /*
                   6500:         * write-back, write-allocate for page tables.
                   6501:         */
                   6502:        pte_l1_s_cache_mode_pt = L1_S_C | L1_S_B | L1_S_V6_TEX(0x01);
                   6503:        pte_l2_l_cache_mode_pt = L2_C | L2_B | L2_V6_L_TEX(0x01);
                   6504: #if defined(ARM11MPCORE_COMPAT_MMU) || defined(ARMV6_EXTENDED_SMALL_PAGE)
                   6505:        pte_l2_s_cache_mode_pt = L2_C | L2_B | L2_V6_XS_TEX(0x01);
                   6506: #else
                   6507:        pte_l2_s_cache_mode_pt = L2_C | L2_B;
                   6508: #endif
                   6509:
                   6510:        pte_l1_s_prot_u = L1_S_PROT_U_armv6;
                   6511:        pte_l1_s_prot_w = L1_S_PROT_W_armv6;
                   6512:        pte_l1_s_prot_ro = L1_S_PROT_RO_armv6;
                   6513:        pte_l1_s_prot_mask = L1_S_PROT_MASK_armv6;
                   6514:
                   6515: #if defined(ARM11MPCORE_COMPAT_MMU) || defined(ARMV6_EXTENDED_SMALL_PAGE)
                   6516:        pte_l2_s_prot_u = L2_S_PROT_U_armv6n;
                   6517:        pte_l2_s_prot_w = L2_S_PROT_W_armv6n;
                   6518:        pte_l2_s_prot_ro = L2_S_PROT_RO_armv6n;
                   6519:        pte_l2_s_prot_mask = L2_S_PROT_MASK_armv6n;
                   6520:
                   6521: #else
                   6522:        /* with AP[0..3] */
                   6523:        pte_l2_s_prot_u = L2_S_PROT_U_generic;
                   6524:        pte_l2_s_prot_w = L2_S_PROT_W_generic;
                   6525:        pte_l2_s_prot_ro = L2_S_PROT_RO_generic;
                   6526:        pte_l2_s_prot_mask = L2_S_PROT_MASK_generic;
                   6527: #endif
                   6528:
                   6529: #ifdef ARM11MPCORE_COMPAT_MMU
                   6530:        /* with AP[0..3] */
                   6531:        pte_l2_l_prot_u = L2_L_PROT_U_generic;
                   6532:        pte_l2_l_prot_w = L2_L_PROT_W_generic;
                   6533:        pte_l2_l_prot_ro = L2_L_PROT_RO_generic;
                   6534:        pte_l2_l_prot_mask = L2_L_PROT_MASK_generic;
                   6535:
1.230     matt     6536:        pte_l1_ss_proto = L1_SS_PROTO_armv6;
1.221     bsh      6537:        pte_l1_s_proto = L1_S_PROTO_armv6;
                   6538:        pte_l1_c_proto = L1_C_PROTO_armv6;
                   6539:        pte_l2_s_proto = L2_S_PROTO_armv6c;
                   6540: #else
                   6541:        pte_l2_l_prot_u = L2_L_PROT_U_armv6n;
                   6542:        pte_l2_l_prot_w = L2_L_PROT_W_armv6n;
                   6543:        pte_l2_l_prot_ro = L2_L_PROT_RO_armv6n;
                   6544:        pte_l2_l_prot_mask = L2_L_PROT_MASK_armv6n;
                   6545:
1.230     matt     6546:        pte_l1_ss_proto = L1_SS_PROTO_armv6;
1.221     bsh      6547:        pte_l1_s_proto = L1_S_PROTO_armv6;
                   6548:        pte_l1_c_proto = L1_C_PROTO_armv6;
                   6549:        pte_l2_s_proto = L2_S_PROTO_armv6n;
                   6550: #endif
                   6551:
                   6552:        pmap_copy_page_func = pmap_copy_page_generic;
                   6553:        pmap_zero_page_func = pmap_zero_page_generic;
                   6554:        pmap_needs_pte_sync = 1;
                   6555: }
                   6556: #endif /* CPU_ARM11MPCORE */
                   6557:
                   6558:
1.214     jmcneill 6559: #if ARM_MMU_V7 == 1
                   6560: void
                   6561: pmap_pte_init_armv7(void)
                   6562: {
                   6563:        /*
                   6564:         * The ARMv7-A MMU is mostly compatible with generic. If the
                   6565:         * AP field is zero, that now means "no access" rather than
                   6566:         * read-only. The prototypes are a little different because of
                   6567:         * the XN bit.
                   6568:         */
                   6569:        pmap_pte_init_generic();
                   6570:
                   6571:        pte_l1_s_cache_mask = L1_S_CACHE_MASK_armv7;
                   6572:        pte_l2_l_cache_mask = L2_L_CACHE_MASK_armv7;
                   6573:        pte_l2_s_cache_mask = L2_S_CACHE_MASK_armv7;
                   6574:
1.237     matt     6575:        if (CPU_ID_CORTEX_A9_P(curcpu()->ci_arm_cpuid)) {
                   6576:                /*
                   6577:                 * write-back, no write-allocate, shareable for normal pages.
                   6578:                 */
                   6579:                pte_l1_s_cache_mode = L1_S_C | L1_S_B | L1_S_V6_S;
                   6580:                pte_l2_l_cache_mode = L2_C | L2_B | L2_XS_S;
                   6581:                pte_l2_s_cache_mode = L2_C | L2_B | L2_XS_S;
                   6582:
                   6583:                /*
                   6584:                 * write-back, no write-allocate, shareable for page tables.
                   6585:                 */
                   6586:                pte_l1_s_cache_mode_pt = L1_S_C | L1_S_B | L1_S_V6_S;
                   6587:                pte_l2_l_cache_mode_pt = L2_C | L2_B | L2_XS_S;
                   6588:                pte_l2_s_cache_mode_pt = L2_C | L2_B | L2_XS_S;
                   6589:        }
                   6590:
1.214     jmcneill 6591:        pte_l1_s_prot_u = L1_S_PROT_U_armv7;
                   6592:        pte_l1_s_prot_w = L1_S_PROT_W_armv7;
                   6593:        pte_l1_s_prot_ro = L1_S_PROT_RO_armv7;
                   6594:        pte_l1_s_prot_mask = L1_S_PROT_MASK_armv7;
                   6595:
                   6596:        pte_l2_s_prot_u = L2_S_PROT_U_armv7;
                   6597:        pte_l2_s_prot_w = L2_S_PROT_W_armv7;
                   6598:        pte_l2_s_prot_ro = L2_S_PROT_RO_armv7;
                   6599:        pte_l2_s_prot_mask = L2_S_PROT_MASK_armv7;
                   6600:
                   6601:        pte_l2_l_prot_u = L2_L_PROT_U_armv7;
                   6602:        pte_l2_l_prot_w = L2_L_PROT_W_armv7;
                   6603:        pte_l2_l_prot_ro = L2_L_PROT_RO_armv7;
                   6604:        pte_l2_l_prot_mask = L2_L_PROT_MASK_armv7;
                   6605:
1.230     matt     6606:        pte_l1_ss_proto = L1_SS_PROTO_armv7;
1.214     jmcneill 6607:        pte_l1_s_proto = L1_S_PROTO_armv7;
                   6608:        pte_l1_c_proto = L1_C_PROTO_armv7;
                   6609:        pte_l2_s_proto = L2_S_PROTO_armv7;
1.237     matt     6610:
                   6611:        pmap_needs_pte_sync = 1;
1.214     jmcneill 6612: }
                   6613: #endif /* ARM_MMU_V7 */
                   6614:
1.170     chris    6615: /*
                   6616:  * return the PA of the current L1 table, for use when handling a crash dump
                   6617:  */
1.197     cegger   6618: uint32_t pmap_kernel_L1_addr(void)
1.170     chris    6619: {
                   6620:        return pmap_kernel()->pm_l1->l1_physaddr;
                   6621: }
                   6622:
1.134     thorpej  6623: #if defined(DDB)
                   6624: /*
                   6625:  * A couple of ddb-callable functions for dumping pmaps
                   6626:  */
                   6627: void pmap_dump_all(void);
                   6628: void pmap_dump(pmap_t);
                   6629:
                   6630: void
                   6631: pmap_dump_all(void)
                   6632: {
                   6633:        pmap_t pm;
                   6634:
                   6635:        LIST_FOREACH(pm, &pmap_pmaps, pm_list) {
                   6636:                if (pm == pmap_kernel())
                   6637:                        continue;
                   6638:                pmap_dump(pm);
                   6639:                printf("\n");
                   6640:        }
                   6641: }
                   6642:
                   6643: static pt_entry_t ncptes[64];
                   6644: static void pmap_dump_ncpg(pmap_t);
                   6645:
                   6646: void
                   6647: pmap_dump(pmap_t pm)
                   6648: {
                   6649:        struct l2_dtable *l2;
                   6650:        struct l2_bucket *l2b;
                   6651:        pt_entry_t *ptep, pte;
                   6652:        vaddr_t l2_va, l2b_va, va;
                   6653:        int i, j, k, occ, rows = 0;
                   6654:
                   6655:        if (pm == pmap_kernel())
                   6656:                printf("pmap_kernel (%p): ", pm);
                   6657:        else
                   6658:                printf("user pmap (%p): ", pm);
                   6659:
1.258     matt     6660:        printf("domain %d, l1 at %p\n", pmap_domain(pm), pmap_l1_kva(pm));
1.134     thorpej  6661:
                   6662:        l2_va = 0;
                   6663:        for (i = 0; i < L2_SIZE; i++, l2_va += 0x01000000) {
                   6664:                l2 = pm->pm_l2[i];
                   6665:
                   6666:                if (l2 == NULL || l2->l2_occupancy == 0)
                   6667:                        continue;
                   6668:
                   6669:                l2b_va = l2_va;
                   6670:                for (j = 0; j < L2_BUCKET_SIZE; j++, l2b_va += 0x00100000) {
                   6671:                        l2b = &l2->l2_bucket[j];
                   6672:
                   6673:                        if (l2b->l2b_occupancy == 0 || l2b->l2b_kva == NULL)
                   6674:                                continue;
                   6675:
                   6676:                        ptep = l2b->l2b_kva;
                   6677:
                   6678:                        for (k = 0; k < 256 && ptep[k] == 0; k++)
                   6679:                                ;
                   6680:
                   6681:                        k &= ~63;
                   6682:                        occ = l2b->l2b_occupancy;
                   6683:                        va = l2b_va + (k * 4096);
                   6684:                        for (; k < 256; k++, va += 0x1000) {
1.142     chris    6685:                                char ch = ' ';
1.134     thorpej  6686:                                if ((k % 64) == 0) {
                   6687:                                        if ((rows % 8) == 0) {
                   6688:                                                printf(
                   6689: "          |0000   |8000   |10000  |18000  |20000  |28000  |30000  |38000\n");
                   6690:                                        }
                   6691:                                        printf("%08lx: ", va);
                   6692:                                }
                   6693:
                   6694:                                ncptes[k & 63] = 0;
                   6695:                                pte = ptep[k];
                   6696:                                if (pte == 0) {
                   6697:                                        ch = '.';
                   6698:                                } else {
                   6699:                                        occ--;
                   6700:                                        switch (pte & 0x0c) {
                   6701:                                        case 0x00:
                   6702:                                                ch = 'D'; /* No cache No buff */
                   6703:                                                break;
                   6704:                                        case 0x04:
                   6705:                                                ch = 'B'; /* No cache buff */
                   6706:                                                break;
                   6707:                                        case 0x08:
1.141     scw      6708:                                                if (pte & 0x40)
                   6709:                                                        ch = 'm';
                   6710:                                                else
                   6711:                                                   ch = 'C'; /* Cache No buff */
1.134     thorpej  6712:                                                break;
                   6713:                                        case 0x0c:
                   6714:                                                ch = 'F'; /* Cache Buff */
                   6715:                                                break;
                   6716:                                        }
                   6717:
                   6718:                                        if ((pte & L2_S_PROT_U) == L2_S_PROT_U)
                   6719:                                                ch += 0x20;
                   6720:
                   6721:                                        if ((pte & 0xc) == 0)
                   6722:                                                ncptes[k & 63] = pte;
                   6723:                                }
                   6724:
                   6725:                                if ((k % 64) == 63) {
                   6726:                                        rows++;
                   6727:                                        printf("%c\n", ch);
                   6728:                                        pmap_dump_ncpg(pm);
                   6729:                                        if (occ == 0)
                   6730:                                                break;
                   6731:                                } else
                   6732:                                        printf("%c", ch);
                   6733:                        }
                   6734:                }
                   6735:        }
                   6736: }
                   6737:
                   6738: static void
                   6739: pmap_dump_ncpg(pmap_t pm)
                   6740: {
                   6741:        struct vm_page *pg;
1.215     uebayasi 6742:        struct vm_page_md *md;
1.134     thorpej  6743:        struct pv_entry *pv;
                   6744:        int i;
                   6745:
                   6746:        for (i = 0; i < 63; i++) {
                   6747:                if (ncptes[i] == 0)
                   6748:                        continue;
                   6749:
                   6750:                pg = PHYS_TO_VM_PAGE(l2pte_pa(ncptes[i]));
                   6751:                if (pg == NULL)
                   6752:                        continue;
1.215     uebayasi 6753:                md = VM_PAGE_TO_MD(pg);
1.134     thorpej  6754:
                   6755:                printf(" pa 0x%08lx: krw %d kro %d urw %d uro %d\n",
1.155     yamt     6756:                    VM_PAGE_TO_PHYS(pg),
1.215     uebayasi 6757:                    md->krw_mappings, md->kro_mappings,
                   6758:                    md->urw_mappings, md->uro_mappings);
1.134     thorpej  6759:
1.215     uebayasi 6760:                SLIST_FOREACH(pv, &md->pvh_list, pv_link) {
1.134     thorpej  6761:                        printf("   %c va 0x%08lx, flags 0x%x\n",
                   6762:                            (pm == pv->pv_pmap) ? '*' : ' ',
                   6763:                            pv->pv_va, pv->pv_flags);
                   6764:                }
                   6765:        }
                   6766: }
                   6767: #endif
1.174     matt     6768:
                   6769: #ifdef PMAP_STEAL_MEMORY
                   6770: void
                   6771: pmap_boot_pageadd(pv_addr_t *newpv)
                   6772: {
                   6773:        pv_addr_t *pv, *npv;
                   6774:
                   6775:        if ((pv = SLIST_FIRST(&pmap_boot_freeq)) != NULL) {
                   6776:                if (newpv->pv_pa < pv->pv_va) {
                   6777:                        KASSERT(newpv->pv_pa + newpv->pv_size <= pv->pv_pa);
                   6778:                        if (newpv->pv_pa + newpv->pv_size == pv->pv_pa) {
                   6779:                                newpv->pv_size += pv->pv_size;
                   6780:                                SLIST_REMOVE_HEAD(&pmap_boot_freeq, pv_list);
                   6781:                        }
                   6782:                        pv = NULL;
                   6783:                } else {
                   6784:                        for (; (npv = SLIST_NEXT(pv, pv_list)) != NULL;
                   6785:                             pv = npv) {
                   6786:                                KASSERT(pv->pv_pa + pv->pv_size < npv->pv_pa);
                   6787:                                KASSERT(pv->pv_pa < newpv->pv_pa);
                   6788:                                if (newpv->pv_pa > npv->pv_pa)
                   6789:                                        continue;
                   6790:                                if (pv->pv_pa + pv->pv_size == newpv->pv_pa) {
                   6791:                                        pv->pv_size += newpv->pv_size;
                   6792:                                        return;
                   6793:                                }
                   6794:                                if (newpv->pv_pa + newpv->pv_size < npv->pv_pa)
                   6795:                                        break;
                   6796:                                newpv->pv_size += npv->pv_size;
                   6797:                                SLIST_INSERT_AFTER(pv, newpv, pv_list);
                   6798:                                SLIST_REMOVE_AFTER(newpv, pv_list);
                   6799:                                return;
                   6800:                        }
                   6801:                }
                   6802:        }
                   6803:
                   6804:        if (pv) {
                   6805:                SLIST_INSERT_AFTER(pv, newpv, pv_list);
                   6806:        } else {
                   6807:                SLIST_INSERT_HEAD(&pmap_boot_freeq, newpv, pv_list);
                   6808:        }
                   6809: }
                   6810:
                   6811: void
                   6812: pmap_boot_pagealloc(psize_t amount, psize_t mask, psize_t match,
                   6813:        pv_addr_t *rpv)
                   6814: {
                   6815:        pv_addr_t *pv, **pvp;
                   6816:        struct vm_physseg *ps;
                   6817:        size_t i;
                   6818:
                   6819:        KASSERT(amount & PGOFSET);
                   6820:        KASSERT((mask & PGOFSET) == 0);
                   6821:        KASSERT((match & PGOFSET) == 0);
                   6822:        KASSERT(amount != 0);
                   6823:
                   6824:        for (pvp = &SLIST_FIRST(&pmap_boot_freeq);
                   6825:             (pv = *pvp) != NULL;
                   6826:             pvp = &SLIST_NEXT(pv, pv_list)) {
                   6827:                pv_addr_t *newpv;
                   6828:                psize_t off;
                   6829:                /*
                   6830:                 * If this entry is too small to satify the request...
                   6831:                 */
                   6832:                KASSERT(pv->pv_size > 0);
                   6833:                if (pv->pv_size < amount)
                   6834:                        continue;
                   6835:
                   6836:                for (off = 0; off <= mask; off += PAGE_SIZE) {
                   6837:                        if (((pv->pv_pa + off) & mask) == match
                   6838:                            && off + amount <= pv->pv_size)
                   6839:                                break;
                   6840:                }
                   6841:                if (off > mask)
                   6842:                        continue;
                   6843:
                   6844:                rpv->pv_va = pv->pv_va + off;
                   6845:                rpv->pv_pa = pv->pv_pa + off;
                   6846:                rpv->pv_size = amount;
                   6847:                pv->pv_size -= amount;
                   6848:                if (pv->pv_size == 0) {
                   6849:                        KASSERT(off == 0);
                   6850:                        KASSERT((vaddr_t) pv == rpv->pv_va);
                   6851:                        *pvp = SLIST_NEXT(pv, pv_list);
                   6852:                } else if (off == 0) {
                   6853:                        KASSERT((vaddr_t) pv == rpv->pv_va);
                   6854:                        newpv = (pv_addr_t *) (rpv->pv_va + amount);
                   6855:                        *newpv = *pv;
                   6856:                        newpv->pv_pa += amount;
                   6857:                        newpv->pv_va += amount;
                   6858:                        *pvp = newpv;
                   6859:                } else if (off < pv->pv_size) {
                   6860:                        newpv = (pv_addr_t *) (rpv->pv_va + amount);
                   6861:                        *newpv = *pv;
                   6862:                        newpv->pv_size -= off;
                   6863:                        newpv->pv_pa += off + amount;
                   6864:                        newpv->pv_va += off + amount;
                   6865:
                   6866:                        SLIST_NEXT(pv, pv_list) = newpv;
                   6867:                        pv->pv_size = off;
                   6868:                } else {
                   6869:                        KASSERT((vaddr_t) pv != rpv->pv_va);
                   6870:                }
                   6871:                memset((void *)rpv->pv_va, 0, amount);
                   6872:                return;
                   6873:        }
                   6874:
                   6875:        if (vm_nphysseg == 0)
                   6876:                panic("pmap_boot_pagealloc: couldn't allocate memory");
                   6877:
                   6878:        for (pvp = &SLIST_FIRST(&pmap_boot_freeq);
                   6879:             (pv = *pvp) != NULL;
                   6880:             pvp = &SLIST_NEXT(pv, pv_list)) {
                   6881:                if (SLIST_NEXT(pv, pv_list) == NULL)
                   6882:                        break;
                   6883:        }
                   6884:        KASSERT(mask == 0);
1.218     uebayasi 6885:        for (i = 0; i < vm_nphysseg; i++) {
                   6886:                ps = VM_PHYSMEM_PTR(i);
1.174     matt     6887:                if (ps->avail_start == atop(pv->pv_pa + pv->pv_size)
                   6888:                    && pv->pv_va + pv->pv_size <= ptoa(ps->avail_end)) {
                   6889:                        rpv->pv_va = pv->pv_va;
                   6890:                        rpv->pv_pa = pv->pv_pa;
                   6891:                        rpv->pv_size = amount;
                   6892:                        *pvp = NULL;
                   6893:                        pmap_map_chunk(kernel_l1pt.pv_va,
                   6894:                             ptoa(ps->avail_start) + (pv->pv_va - pv->pv_pa),
                   6895:                             ptoa(ps->avail_start),
                   6896:                             amount - pv->pv_size,
                   6897:                             VM_PROT_READ|VM_PROT_WRITE,
                   6898:                             PTE_CACHE);
                   6899:                        ps->avail_start += atop(amount - pv->pv_size);
                   6900:                        /*
                   6901:                         * If we consumed the entire physseg, remove it.
                   6902:                         */
                   6903:                        if (ps->avail_start == ps->avail_end) {
1.218     uebayasi 6904:                                for (--vm_nphysseg; i < vm_nphysseg; i++)
                   6905:                                        VM_PHYSMEM_PTR_SWAP(i, i + 1);
1.174     matt     6906:                        }
                   6907:                        memset((void *)rpv->pv_va, 0, rpv->pv_size);
                   6908:                        return;
                   6909:                }
                   6910:        }
                   6911:
                   6912:        panic("pmap_boot_pagealloc: couldn't allocate memory");
                   6913: }
                   6914:
                   6915: vaddr_t
                   6916: pmap_steal_memory(vsize_t size, vaddr_t *vstartp, vaddr_t *vendp)
                   6917: {
                   6918:        pv_addr_t pv;
                   6919:
                   6920:        pmap_boot_pagealloc(size, 0, 0, &pv);
                   6921:
                   6922:        return pv.pv_va;
                   6923: }
                   6924: #endif /* PMAP_STEAL_MEMORY */
1.186     matt     6925:
                   6926: SYSCTL_SETUP(sysctl_machdep_pmap_setup, "sysctl machdep.kmpages setup")
                   6927: {
                   6928:        sysctl_createv(clog, 0, NULL, NULL,
                   6929:                        CTLFLAG_PERMANENT,
                   6930:                        CTLTYPE_NODE, "machdep", NULL,
                   6931:                        NULL, 0, NULL, 0,
                   6932:                        CTL_MACHDEP, CTL_EOL);
                   6933:
                   6934:        sysctl_createv(clog, 0, NULL, NULL,
                   6935:                        CTLFLAG_PERMANENT,
                   6936:                        CTLTYPE_INT, "kmpages",
                   6937:                        SYSCTL_DESCR("count of pages allocated to kernel memory allocators"),
                   6938:                        NULL, 0, &pmap_kmpages, 0,
                   6939:                        CTL_MACHDEP, CTL_CREATE, CTL_EOL);
                   6940: }
1.241     matt     6941:
                   6942: #ifdef PMAP_NEED_ALLOC_POOLPAGE
                   6943: struct vm_page *
                   6944: arm_pmap_alloc_poolpage(int flags)
                   6945: {
                   6946:        /*
                   6947:         * On some systems, only some pages may be "coherent" for dma and we
1.248     matt     6948:         * want to prefer those for pool pages (think mbufs) but fallback to
                   6949:         * any page if none is available.
1.241     matt     6950:         */
1.248     matt     6951:        if (arm_poolpage_vmfreelist != VM_FREELIST_DEFAULT) {
1.241     matt     6952:                return uvm_pagealloc_strat(NULL, 0, NULL, flags,
1.248     matt     6953:                    UVM_PGA_STRAT_FALLBACK, arm_poolpage_vmfreelist);
                   6954:        }
1.241     matt     6955:
                   6956:        return uvm_pagealloc(NULL, 0, NULL, flags);
                   6957: }
                   6958: #endif

CVSweb <webmaster@jp.NetBSD.org>