Annotation of src/sys/arch/aarch64/aarch64/pmap.c, Revision 1.74
1.74 ! tnn 1: /* $NetBSD: pmap.c,v 1.73 2020/05/14 07:59:03 skrll Exp $ */
1.1 matt 2:
1.2 ryo 3: /*
4: * Copyright (c) 2017 Ryo Shimizu <ryo@nerv.org>
1.1 matt 5: * All rights reserved.
6: *
7: * Redistribution and use in source and binary forms, with or without
8: * modification, are permitted provided that the following conditions
9: * are met:
10: * 1. Redistributions of source code must retain the above copyright
11: * notice, this list of conditions and the following disclaimer.
12: * 2. Redistributions in binary form must reproduce the above copyright
13: * notice, this list of conditions and the following disclaimer in the
14: * documentation and/or other materials provided with the distribution.
15: *
1.2 ryo 16: * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17: * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18: * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19: * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
20: * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21: * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22: * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23: * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
24: * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
25: * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
1.1 matt 26: * POSSIBILITY OF SUCH DAMAGE.
27: */
28:
29: #include <sys/cdefs.h>
1.74 ! tnn 30: __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.73 2020/05/14 07:59:03 skrll Exp $");
1.1 matt 31:
1.2 ryo 32: #include "opt_arm_debug.h"
33: #include "opt_ddb.h"
1.26 ryo 34: #include "opt_multiprocessor.h"
35: #include "opt_pmap.h"
1.2 ryo 36: #include "opt_uvmhist.h"
1.1 matt 37:
38: #include <sys/param.h>
39: #include <sys/types.h>
40: #include <sys/kmem.h>
41: #include <sys/vmem.h>
1.2 ryo 42: #include <sys/atomic.h>
1.33 maxv 43: #include <sys/asan.h>
1.1 matt 44:
45: #include <uvm/uvm.h>
1.63 ryo 46: #include <uvm/pmap/pmap_pvt.h>
1.1 matt 47:
1.2 ryo 48: #include <aarch64/pmap.h>
49: #include <aarch64/pte.h>
50: #include <aarch64/armreg.h>
51: #include <aarch64/cpufunc.h>
1.66 ryo 52: #include <aarch64/locore.h>
1.21 ryo 53: #include <aarch64/machdep.h>
1.37 ryo 54: #ifdef DDB
55: #include <aarch64/db_machdep.h>
56: #include <ddb/db_access.h>
57: #endif
1.2 ryo 58:
59: //#define PMAP_PV_DEBUG
60:
1.16 skrll 61: #ifdef VERBOSE_INIT_ARM
62: #define VPRINTF(...) printf(__VA_ARGS__)
63: #else
1.30 skrll 64: #define VPRINTF(...) __nothing
1.16 skrll 65: #endif
1.2 ryo 66:
67: UVMHIST_DEFINE(pmaphist);
68: #ifdef UVMHIST
69:
70: #ifndef UVMHIST_PMAPHIST_SIZE
71: #define UVMHIST_PMAPHIST_SIZE (1024 * 4)
72: #endif
73:
74: struct kern_history_ent pmaphistbuf[UVMHIST_PMAPHIST_SIZE];
75:
76: static void
77: pmap_hist_init(void)
78: {
79: static bool inited = false;
80: if (inited == false) {
81: UVMHIST_INIT_STATIC(pmaphist, pmaphistbuf);
82: inited = true;
83: }
84: }
85: #define PMAP_HIST_INIT() pmap_hist_init()
86:
87: #else /* UVMHIST */
88:
89: #define PMAP_HIST_INIT() ((void)0)
90:
91: #endif /* UVMHIST */
92:
93:
94: #ifdef PMAPCOUNTERS
95: #define PMAP_COUNT(name) (pmap_evcnt_##name.ev_count++ + 0)
96: #define PMAP_COUNTER(name, desc) \
97: struct evcnt pmap_evcnt_##name = \
98: EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "pmap", desc); \
99: EVCNT_ATTACH_STATIC(pmap_evcnt_##name)
100:
101: PMAP_COUNTER(pdp_alloc_boot, "page table page allocate (uvm_pageboot_alloc)");
102: PMAP_COUNTER(pdp_alloc, "page table page allocate (uvm_pagealloc)");
103: PMAP_COUNTER(pdp_free, "page table page free (uvm_pagefree)");
104:
105: PMAP_COUNTER(pv_enter, "pv_entry allocate and link");
106: PMAP_COUNTER(pv_remove, "pv_entry free and unlink");
1.5 ryo 107: PMAP_COUNTER(pv_remove_nopv, "no pv_entry found when removing pv");
1.2 ryo 108:
109: PMAP_COUNTER(activate, "pmap_activate call");
110: PMAP_COUNTER(deactivate, "pmap_deactivate call");
111: PMAP_COUNTER(create, "pmap_create call");
112: PMAP_COUNTER(destroy, "pmap_destroy call");
113:
114: PMAP_COUNTER(page_protect, "pmap_page_protect call");
115: PMAP_COUNTER(protect, "pmap_protect call");
116: PMAP_COUNTER(protect_remove_fallback, "pmap_protect with no-read");
117: PMAP_COUNTER(protect_none, "pmap_protect non-exists pages");
118: PMAP_COUNTER(protect_managed, "pmap_protect managed pages");
119: PMAP_COUNTER(protect_unmanaged, "pmap_protect unmanaged pages");
1.63 ryo 120: PMAP_COUNTER(protect_pvmanaged, "pmap_protect pv-tracked unmanaged pages");
1.2 ryo 121:
122: PMAP_COUNTER(clear_modify, "pmap_clear_modify call");
123: PMAP_COUNTER(clear_modify_pages, "pmap_clear_modify pages");
124: PMAP_COUNTER(clear_reference, "pmap_clear_reference call");
125: PMAP_COUNTER(clear_reference_pages, "pmap_clear_reference pages");
126:
127: PMAP_COUNTER(fixup_referenced, "page reference emulations");
128: PMAP_COUNTER(fixup_modified, "page modification emulations");
129:
130: PMAP_COUNTER(kern_mappings_bad, "kernel pages mapped (bad color)");
131: PMAP_COUNTER(kern_mappings_bad_wired, "kernel pages mapped (wired bad color)");
132: PMAP_COUNTER(user_mappings_bad, "user pages mapped (bad color, not wired)");
133: PMAP_COUNTER(user_mappings_bad_wired, "user pages mapped (bad colo, wiredr)");
134: PMAP_COUNTER(kern_mappings, "kernel pages mapped");
135: PMAP_COUNTER(user_mappings, "user pages mapped");
136: PMAP_COUNTER(user_mappings_changed, "user mapping changed");
137: PMAP_COUNTER(kern_mappings_changed, "kernel mapping changed");
138: PMAP_COUNTER(uncached_mappings, "uncached pages mapped");
139: PMAP_COUNTER(unmanaged_mappings, "unmanaged pages mapped");
1.63 ryo 140: PMAP_COUNTER(pvmanaged_mappings, "pv-tracked unmanaged pages mapped");
1.2 ryo 141: PMAP_COUNTER(managed_mappings, "managed pages mapped");
142: PMAP_COUNTER(mappings, "pages mapped (including remapped)");
143: PMAP_COUNTER(remappings, "pages remapped");
144:
145: PMAP_COUNTER(pv_entry_cannotalloc, "pv_entry allocation failure");
146:
147: PMAP_COUNTER(unwire, "pmap_unwire call");
1.3 ryo 148: PMAP_COUNTER(unwire_failure, "pmap_unwire failure");
1.2 ryo 149:
150: #else /* PMAPCOUNTERS */
151: #define PMAP_COUNT(name) __nothing
152: #endif /* PMAPCOUNTERS */
153:
1.19 ryo 154: /*
155: * invalidate TLB entry for ASID and VA.
156: * `ll' invalidates only the Last Level (usually L3) of TLB entry
157: */
158: #define AARCH64_TLBI_BY_ASID_VA(asid, va, ll) \
159: do { \
160: if ((ll)) { \
161: if ((asid) == 0) \
162: aarch64_tlbi_by_va_ll((va)); \
163: else \
164: aarch64_tlbi_by_asid_va_ll((asid), (va)); \
165: } else { \
166: if ((asid) == 0) \
167: aarch64_tlbi_by_va((va)); \
168: else \
169: aarch64_tlbi_by_asid_va((asid), (va)); \
170: } \
171: } while (0/*CONSTCOND*/)
172:
173: /*
174: * aarch64 require write permission in pte to invalidate instruction cache.
175: * changing pte to writable temporarly before cpu_icache_sync_range().
176: * this macro modifies PTE (*ptep). need to update PTE after this.
177: */
178: #define PTE_ICACHE_SYNC_PAGE(pte, ptep, pm, va, ll) \
179: do { \
180: pt_entry_t tpte; \
181: tpte = (pte) & ~(LX_BLKPAG_AF|LX_BLKPAG_AP); \
182: tpte |= (LX_BLKPAG_AF|LX_BLKPAG_AP_RW); \
183: tpte |= (LX_BLKPAG_UXN|LX_BLKPAG_PXN); \
184: atomic_swap_64((ptep), tpte); \
185: AARCH64_TLBI_BY_ASID_VA((pm)->pm_asid, (va), (ll)); \
186: cpu_icache_sync_range((va), PAGE_SIZE); \
187: } while (0/*CONSTCOND*/)
188:
1.62 ryo 189: #define VM_PAGE_TO_PP(pg) (&(pg)->mdpage.mdpg_pp)
190:
1.2 ryo 191: struct pv_entry {
1.64 ryo 192: LIST_ENTRY(pv_entry) pv_link;
1.2 ryo 193: struct pmap *pv_pmap;
194: vaddr_t pv_va;
195: paddr_t pv_pa; /* debug */
196: pt_entry_t *pv_ptep; /* for fast pte lookup */
197: };
1.64 ryo 198: #define pv_next pv_link.le_next
1.35 ryo 199:
200: #define L3INDEXMASK (L3_SIZE * Ln_ENTRIES - 1)
1.40 ryo 201: #define PDPSWEEP_TRIGGER 512
1.2 ryo 202:
1.27 ryo 203: static pt_entry_t *_pmap_pte_lookup_l3(struct pmap *, vaddr_t);
204: static pt_entry_t *_pmap_pte_lookup_bs(struct pmap *, vaddr_t, vsize_t *);
1.18 ryo 205: static pt_entry_t _pmap_pte_adjust_prot(pt_entry_t, vm_prot_t, vm_prot_t, bool);
1.2 ryo 206: static pt_entry_t _pmap_pte_adjust_cacheflags(pt_entry_t, u_int);
1.36 ryo 207: static void _pmap_remove(struct pmap *, vaddr_t, vaddr_t, bool,
208: struct pv_entry **);
1.2 ryo 209: static int _pmap_enter(struct pmap *, vaddr_t, paddr_t, vm_prot_t, u_int, bool);
210:
1.1 matt 211: static struct pmap kernel_pmap;
212:
213: struct pmap * const kernel_pmap_ptr = &kernel_pmap;
1.2 ryo 214: static vaddr_t pmap_maxkvaddr;
215:
216: vaddr_t virtual_avail, virtual_end;
217: vaddr_t virtual_devmap_addr;
1.45 ryo 218: bool pmap_devmap_bootstrap_done = false;
1.2 ryo 219:
220: static struct pool_cache _pmap_cache;
221: static struct pool_cache _pmap_pv_pool;
222:
1.70 maxv 223: /* Set to LX_BLKPAG_GP if supported. */
224: uint64_t pmap_attr_gp = 0;
1.2 ryo 225:
226: static inline void
1.62 ryo 227: pmap_pv_lock(struct pmap_page *pp)
1.2 ryo 228: {
229:
1.62 ryo 230: mutex_enter(&pp->pp_pvlock);
1.2 ryo 231: }
232:
233: static inline void
1.62 ryo 234: pmap_pv_unlock(struct pmap_page *pp)
1.2 ryo 235: {
236:
1.62 ryo 237: mutex_exit(&pp->pp_pvlock);
1.2 ryo 238: }
239:
1.11 ryo 240:
241: static inline void
242: pm_lock(struct pmap *pm)
243: {
244: mutex_enter(&pm->pm_lock);
245: }
246:
247: static inline void
248: pm_unlock(struct pmap *pm)
249: {
250: mutex_exit(&pm->pm_lock);
251: }
1.2 ryo 252:
1.62 ryo 253: static inline struct pmap_page *
254: phys_to_pp(paddr_t pa)
255: {
256: struct vm_page *pg;
257:
258: pg = PHYS_TO_VM_PAGE(pa);
259: if (pg != NULL)
260: return VM_PAGE_TO_PP(pg);
261:
1.63 ryo 262: #ifdef __HAVE_PMAP_PV_TRACK
1.69 ryo 263: return pmap_pv_tracked(pa);
1.63 ryo 264: #else
1.62 ryo 265: return NULL;
1.63 ryo 266: #endif /* __HAVE_PMAP_PV_TRACK */
1.62 ryo 267: }
268:
1.28 ryo 269: #define IN_RANGE(va,sta,end) (((sta) <= (va)) && ((va) < (end)))
270:
1.2 ryo 271: #define IN_KSEG_ADDR(va) \
1.28 ryo 272: IN_RANGE((va), AARCH64_KSEG_START, AARCH64_KSEG_END)
273:
1.66 ryo 274: #ifdef DIAGNOSTIC
275: #define KASSERT_PM_ADDR(pm,va) \
1.28 ryo 276: do { \
1.66 ryo 277: int space = aarch64_addressspace(va); \
1.28 ryo 278: if ((pm) == pmap_kernel()) { \
1.66 ryo 279: KASSERTMSG(space == AARCH64_ADDRSPACE_UPPER, \
280: "%s: kernel pm %p: va=%016lx" \
281: " is out of upper address space\n", \
282: __func__, (pm), (va)); \
1.28 ryo 283: KASSERTMSG(IN_RANGE((va), VM_MIN_KERNEL_ADDRESS, \
284: VM_MAX_KERNEL_ADDRESS), \
285: "%s: kernel pm %p: va=%016lx" \
286: " is not kernel address\n", \
287: __func__, (pm), (va)); \
288: } else { \
1.66 ryo 289: KASSERTMSG(space == AARCH64_ADDRSPACE_LOWER, \
290: "%s: user pm %p: va=%016lx" \
291: " is out of lower address space\n", \
292: __func__, (pm), (va)); \
1.28 ryo 293: KASSERTMSG(IN_RANGE((va), \
294: VM_MIN_ADDRESS, VM_MAX_ADDRESS), \
295: "%s: user pm %p: va=%016lx" \
296: " is not user address\n", \
297: __func__, (pm), (va)); \
298: } \
299: } while (0 /* CONSTCOND */)
1.66 ryo 300: #else /* DIAGNOSTIC */
301: #define KASSERT_PM_ADDR(pm,va)
302: #endif /* DIAGNOSTIC */
1.2 ryo 303:
304:
305: static const struct pmap_devmap *pmap_devmap_table;
306:
307: static vsize_t
1.26 ryo 308: pmap_map_chunk(vaddr_t va, paddr_t pa, vsize_t size,
1.2 ryo 309: vm_prot_t prot, u_int flags)
310: {
311: pt_entry_t attr;
1.60 skrll 312: vsize_t resid = round_page(size);
313:
1.65 ryo 314: attr = _pmap_pte_adjust_prot(0, prot, VM_PROT_ALL, false);
315: attr = _pmap_pte_adjust_cacheflags(attr, flags);
316: pmapboot_enter_range(va, pa, resid, attr,
317: PMAPBOOT_ENTER_NOOVERWRITE, bootpage_alloc, printf);
318: aarch64_tlbi_all();
1.2 ryo 319:
1.65 ryo 320: return resid;
1.2 ryo 321: }
1.1 matt 322:
323: void
1.2 ryo 324: pmap_devmap_register(const struct pmap_devmap *table)
1.1 matt 325: {
1.2 ryo 326: pmap_devmap_table = table;
1.1 matt 327: }
328:
329: void
1.31 skrll 330: pmap_devmap_bootstrap(vaddr_t l0pt, const struct pmap_devmap *table)
1.2 ryo 331: {
332: vaddr_t va;
333: int i;
334:
335: pmap_devmap_register(table);
336:
1.16 skrll 337: VPRINTF("%s:\n", __func__);
1.2 ryo 338: for (i = 0; table[i].pd_size != 0; i++) {
1.16 skrll 339: VPRINTF(" devmap: pa %08lx-%08lx = va %016lx\n",
1.2 ryo 340: table[i].pd_pa,
341: table[i].pd_pa + table[i].pd_size - 1,
342: table[i].pd_va);
343: va = table[i].pd_va;
344:
1.26 ryo 345: KASSERT((VM_KERNEL_IO_ADDRESS <= va) &&
346: (va < (VM_KERNEL_IO_ADDRESS + VM_KERNEL_IO_SIZE)));
347:
1.2 ryo 348: /* update and check virtual_devmap_addr */
1.59 skrll 349: if (virtual_devmap_addr == 0 || virtual_devmap_addr > va) {
1.2 ryo 350: virtual_devmap_addr = va;
351: }
352:
1.26 ryo 353: pmap_map_chunk(
1.2 ryo 354: table[i].pd_va,
355: table[i].pd_pa,
356: table[i].pd_size,
357: table[i].pd_prot,
358: table[i].pd_flags);
359: }
1.45 ryo 360:
361: pmap_devmap_bootstrap_done = true;
1.2 ryo 362: }
363:
364: const struct pmap_devmap *
365: pmap_devmap_find_va(vaddr_t va, vsize_t size)
366: {
367: paddr_t endva;
368: int i;
369:
370: if (pmap_devmap_table == NULL)
371: return NULL;
372:
373: endva = va + size;
374: for (i = 0; pmap_devmap_table[i].pd_size != 0; i++) {
375: if ((va >= pmap_devmap_table[i].pd_va) &&
376: (endva <= pmap_devmap_table[i].pd_va +
377: pmap_devmap_table[i].pd_size))
378: return &pmap_devmap_table[i];
379: }
380: return NULL;
381: }
382:
383: const struct pmap_devmap *
384: pmap_devmap_find_pa(paddr_t pa, psize_t size)
385: {
386: paddr_t endpa;
387: int i;
388:
389: if (pmap_devmap_table == NULL)
390: return NULL;
391:
392: endpa = pa + size;
393: for (i = 0; pmap_devmap_table[i].pd_size != 0; i++) {
394: if (pa >= pmap_devmap_table[i].pd_pa &&
395: (endpa <= pmap_devmap_table[i].pd_pa +
396: pmap_devmap_table[i].pd_size))
397: return (&pmap_devmap_table[i]);
398: }
399: return NULL;
400: }
401:
402: vaddr_t
403: pmap_devmap_phystov(paddr_t pa)
1.1 matt 404: {
1.2 ryo 405: const struct pmap_devmap *table;
406: paddr_t offset;
407:
408: table = pmap_devmap_find_pa(pa, 0);
409: if (table == NULL)
410: return 0;
411:
412: offset = pa - table->pd_pa;
413: return table->pd_va + offset;
1.1 matt 414: }
415:
416: vaddr_t
1.2 ryo 417: pmap_devmap_vtophys(paddr_t va)
418: {
419: const struct pmap_devmap *table;
420: vaddr_t offset;
421:
422: table = pmap_devmap_find_va(va, 0);
423: if (table == NULL)
424: return 0;
425:
426: offset = va - table->pd_va;
427: return table->pd_pa + offset;
428: }
429:
430: void
431: pmap_bootstrap(vaddr_t vstart, vaddr_t vend)
432: {
433: struct pmap *kpm;
434: pd_entry_t *l0;
435: paddr_t l0pa;
436:
437: PMAP_HIST_INIT(); /* init once */
438:
439: UVMHIST_FUNC(__func__);
440: UVMHIST_CALLED(pmaphist);
441:
442: #if 0
443: /* uvmexp.ncolors = icachesize / icacheways / PAGE_SIZE; */
444: uvmexp.ncolors = aarch64_cache_vindexsize / PAGE_SIZE;
445: #endif
446:
447: /* devmap already uses last of va? */
1.59 skrll 448: if (virtual_devmap_addr != 0 && virtual_devmap_addr < vend)
1.2 ryo 449: vend = virtual_devmap_addr;
450:
451: virtual_avail = vstart;
452: virtual_end = vend;
453: pmap_maxkvaddr = vstart;
454:
455: aarch64_tlbi_all();
456:
457: l0pa = reg_ttbr1_el1_read();
1.9 christos 458: l0 = (void *)AARCH64_PA_TO_KVA(l0pa);
1.2 ryo 459:
460: memset(&kernel_pmap, 0, sizeof(kernel_pmap));
461: kpm = pmap_kernel();
462: kpm->pm_asid = 0;
463: kpm->pm_refcnt = 1;
1.40 ryo 464: kpm->pm_idlepdp = 0;
1.2 ryo 465: kpm->pm_l0table = l0;
466: kpm->pm_l0table_pa = l0pa;
467: kpm->pm_activated = true;
1.64 ryo 468: LIST_INIT(&kpm->pm_vmlist);
1.2 ryo 469: mutex_init(&kpm->pm_lock, MUTEX_DEFAULT, IPL_VM);
1.36 ryo 470:
471: CTASSERT(sizeof(kpm->pm_stats.wired_count) == sizeof(long));
472: CTASSERT(sizeof(kpm->pm_stats.resident_count) == sizeof(long));
473: #define PMSTAT_INC_WIRED_COUNT(pm) \
474: atomic_inc_ulong(&(pm)->pm_stats.wired_count)
475: #define PMSTAT_DEC_WIRED_COUNT(pm) \
476: atomic_dec_ulong(&(pm)->pm_stats.wired_count)
477: #define PMSTAT_INC_RESIDENT_COUNT(pm) \
478: atomic_inc_ulong(&(pm)->pm_stats.resident_count)
479: #define PMSTAT_DEC_RESIDENT_COUNT(pm) \
480: atomic_dec_ulong(&(pm)->pm_stats.resident_count)
1.2 ryo 481: }
482:
483: inline static int
484: _pmap_color(vaddr_t addr) /* or paddr_t */
485: {
486: return (addr >> PGSHIFT) & (uvmexp.ncolors - 1);
487: }
488:
489: static int
490: _pmap_pmap_ctor(void *arg, void *v, int flags)
1.1 matt 491: {
1.2 ryo 492: memset(v, 0, sizeof(struct pmap));
1.1 matt 493: return 0;
494: }
495:
1.2 ryo 496: static int
497: _pmap_pv_ctor(void *arg, void *v, int flags)
1.1 matt 498: {
1.2 ryo 499: memset(v, 0, sizeof(struct pv_entry));
500: return 0;
1.1 matt 501: }
502:
503: void
1.2 ryo 504: pmap_init(void)
1.1 matt 505: {
1.2 ryo 506:
507: pool_cache_bootstrap(&_pmap_cache, sizeof(struct pmap),
508: 0, 0, 0, "pmappl", NULL, IPL_NONE, _pmap_pmap_ctor, NULL, NULL);
509: pool_cache_bootstrap(&_pmap_pv_pool, sizeof(struct pv_entry),
510: 0, 0, 0, "pvpl", NULL, IPL_VM, _pmap_pv_ctor, NULL, NULL);
511:
512: }
513:
514: void
515: pmap_virtual_space(vaddr_t *vstartp, vaddr_t *vendp)
516: {
517: *vstartp = virtual_avail;
518: *vendp = virtual_end;
519: }
520:
521: vaddr_t
522: pmap_steal_memory(vsize_t size, vaddr_t *vstartp, vaddr_t *vendp)
523: {
524: int npage;
525: paddr_t pa;
526: vaddr_t va;
527: psize_t bank_npage;
528: uvm_physseg_t bank;
529:
530: UVMHIST_FUNC(__func__);
531: UVMHIST_CALLED(pmaphist);
532:
533: UVMHIST_LOG(pmaphist, "size=%llu, *vstartp=%llx, *vendp=%llx",
534: size, *vstartp, *vendp, 0);
535:
536: size = round_page(size);
537: npage = atop(size);
538:
539: for (bank = uvm_physseg_get_first(); uvm_physseg_valid_p(bank);
540: bank = uvm_physseg_get_next(bank)) {
541:
542: bank_npage = uvm_physseg_get_avail_end(bank) -
543: uvm_physseg_get_avail_start(bank);
544: if (npage <= bank_npage)
545: break;
546: }
547:
1.23 maxv 548: if (!uvm_physseg_valid_p(bank)) {
549: panic("%s: no memory", __func__);
550: }
1.2 ryo 551:
552: /* Steal pages */
553: pa = ptoa(uvm_physseg_get_avail_start(bank));
554: va = AARCH64_PA_TO_KVA(pa);
555: uvm_physseg_unplug(atop(pa), npage);
556:
557: for (; npage > 0; npage--, pa += PAGE_SIZE)
558: pmap_zero_page(pa);
559:
560: return va;
1.1 matt 561: }
562:
563: void
564: pmap_reference(struct pmap *pm)
565: {
1.2 ryo 566: atomic_inc_uint(&pm->pm_refcnt);
1.1 matt 567: }
568:
1.36 ryo 569: paddr_t
1.40 ryo 570: pmap_alloc_pdp(struct pmap *pm, struct vm_page **pgp, int flags, bool waitok)
1.1 matt 571: {
1.2 ryo 572: paddr_t pa;
1.36 ryo 573: struct vm_page *pg;
1.2 ryo 574:
575: UVMHIST_FUNC(__func__);
576: UVMHIST_CALLED(pmaphist);
577:
578: if (uvm.page_init_done) {
1.40 ryo 579: int aflags = ((flags & PMAP_CANFAIL) ? 0 : UVM_PGA_USERESERVE) |
580: UVM_PGA_ZERO;
1.36 ryo 581: retry:
1.40 ryo 582: pg = uvm_pagealloc(NULL, 0, NULL, aflags);
1.36 ryo 583: if (pg == NULL) {
584: if (waitok) {
585: uvm_wait("pmap_alloc_pdp");
586: goto retry;
587: }
588: return POOL_PADDR_INVALID;
589: }
590:
1.64 ryo 591: LIST_INSERT_HEAD(&pm->pm_vmlist, pg, mdpage.mdpg_vmlist);
1.36 ryo 592: pg->flags &= ~PG_BUSY; /* never busy */
593: pg->wire_count = 1; /* max = 1 + Ln_ENTRIES = 513 */
1.2 ryo 594: pa = VM_PAGE_TO_PHYS(pg);
1.36 ryo 595: PMAP_COUNT(pdp_alloc);
1.2 ryo 596:
1.36 ryo 597: VM_PAGE_TO_MD(pg)->mdpg_ptep_parent = NULL;
1.2 ryo 598:
1.73 skrll 599: struct pmap_page *pp = VM_PAGE_TO_PP(pg);
600: pp->pp_flags = 0;
601:
1.2 ryo 602: } else {
603: /* uvm_pageboot_alloc() returns AARCH64 KSEG address */
1.36 ryo 604: pg = NULL;
1.2 ryo 605: pa = AARCH64_KVA_TO_PA(
606: uvm_pageboot_alloc(Ln_TABLE_SIZE));
607: PMAP_COUNT(pdp_alloc_boot);
608: }
1.36 ryo 609: if (pgp != NULL)
610: *pgp = pg;
611:
612: UVMHIST_LOG(pmaphist, "pa=%llx, pg=%llx",
613: pa, pg, 0, 0);
614:
615: return pa;
616: }
1.2 ryo 617:
1.36 ryo 618: static void
619: pmap_free_pdp(struct pmap *pm, struct vm_page *pg)
620: {
1.64 ryo 621: LIST_REMOVE(pg, mdpage.mdpg_vmlist);
1.36 ryo 622: pg->flags |= PG_BUSY;
623: pg->wire_count = 0;
1.73 skrll 624:
1.74 ! tnn 625: #ifdef DIAGNOSTIC
1.73 skrll 626: struct pmap_page *pp = VM_PAGE_TO_PP(pg);
1.74 ! tnn 627: #endif
1.73 skrll 628: KASSERT(LIST_EMPTY(&pp->pp_pvhead));
1.2 ryo 629:
1.36 ryo 630: uvm_pagefree(pg);
631: PMAP_COUNT(pdp_free);
1.1 matt 632: }
633:
1.40 ryo 634: /* free empty page table pages */
635: static int
636: _pmap_sweep_pdp(struct pmap *pm)
637: {
638: struct vm_page *pg, *tmp;
1.41 mrg 639: pd_entry_t *ptep_in_parent, opte __diagused;
1.40 ryo 640: paddr_t pa, pdppa;
641: int nsweep;
1.41 mrg 642: uint16_t wirecount __diagused;
1.40 ryo 643:
644: nsweep = 0;
1.64 ryo 645: LIST_FOREACH_SAFE(pg, &pm->pm_vmlist, mdpage.mdpg_vmlist, tmp) {
1.40 ryo 646: if (pg->wire_count != 1)
647: continue;
648:
649: pa = VM_PAGE_TO_PHYS(pg);
650: if (pa == pm->pm_l0table_pa)
651: continue;
652:
653: ptep_in_parent = VM_PAGE_TO_MD(pg)->mdpg_ptep_parent;
654: if (ptep_in_parent == NULL) {
655: /* no parent */
656: pmap_free_pdp(pm, pg);
657: nsweep++;
658: continue;
659: }
660:
661: /* unlink from parent */
662: opte = atomic_swap_64(ptep_in_parent, 0);
663: KASSERT(lxpde_valid(opte));
1.53 skrll 664: wirecount = atomic_add_32_nv(&pg->wire_count, -1); /* 1 -> 0 */
1.40 ryo 665: KASSERT(wirecount == 0);
666: pmap_free_pdp(pm, pg);
667: nsweep++;
668:
669: /* L3->L2->L1. no need for L0 */
670: pdppa = AARCH64_KVA_TO_PA(trunc_page((vaddr_t)ptep_in_parent));
671: if (pdppa == pm->pm_l0table_pa)
672: continue;
673:
674: pg = PHYS_TO_VM_PAGE(pdppa);
675: KASSERT(pg != NULL);
676: KASSERTMSG(pg->wire_count >= 1,
677: "wire_count=%d", pg->wire_count);
678: /* decrement wire_count of parent */
1.53 skrll 679: wirecount = atomic_add_32_nv(&pg->wire_count, -1);
1.40 ryo 680: KASSERTMSG(pg->wire_count <= (Ln_ENTRIES + 1),
681: "pm=%p[%d], pg=%p, wire_count=%d",
682: pm, pm->pm_asid, pg, pg->wire_count);
683: }
684: atomic_swap_uint(&pm->pm_idlepdp, 0);
685:
686: return nsweep;
687: }
688:
1.2 ryo 689: static void
690: _pmap_free_pdp_all(struct pmap *pm)
1.1 matt 691: {
1.2 ryo 692: struct vm_page *pg, *tmp;
693:
1.64 ryo 694: LIST_FOREACH_SAFE(pg, &pm->pm_vmlist, mdpage.mdpg_vmlist, tmp) {
1.36 ryo 695: pmap_free_pdp(pm, pg);
1.2 ryo 696: }
1.1 matt 697: }
698:
699: vaddr_t
700: pmap_growkernel(vaddr_t maxkvaddr)
701: {
1.2 ryo 702: UVMHIST_FUNC(__func__);
703: UVMHIST_CALLED(pmaphist);
704:
705: UVMHIST_LOG(pmaphist, "maxkvaddr=%llx, pmap_maxkvaddr=%llx",
706: maxkvaddr, pmap_maxkvaddr, 0, 0);
707:
1.54 ryo 708: kasan_shadow_map((void *)pmap_maxkvaddr,
709: (size_t)(maxkvaddr - pmap_maxkvaddr));
1.52 skrll 710:
1.2 ryo 711: pmap_maxkvaddr = maxkvaddr;
712:
1.1 matt 713: return maxkvaddr;
714: }
715:
1.2 ryo 716: bool
1.72 jmcneill 717: pmap_extract(struct pmap *pm, vaddr_t va, paddr_t *pap)
1.2 ryo 718: {
719:
1.72 jmcneill 720: return pmap_extract_coherency(pm, va, pap, NULL);
1.2 ryo 721: }
722:
723: bool
1.72 jmcneill 724: pmap_extract_coherency(struct pmap *pm, vaddr_t va, paddr_t *pap,
725: bool *coherencyp)
1.2 ryo 726: {
1.36 ryo 727: pt_entry_t *ptep, pte;
1.2 ryo 728: paddr_t pa;
1.27 ryo 729: vsize_t blocksize = 0;
1.66 ryo 730: int space;
1.72 jmcneill 731: bool coherency;
1.28 ryo 732: extern char __kernel_text[];
733: extern char _end[];
1.2 ryo 734:
1.72 jmcneill 735: coherency = false;
736:
1.66 ryo 737: space = aarch64_addressspace(va);
738: if (pm == pmap_kernel()) {
739: if (space != AARCH64_ADDRSPACE_UPPER)
740: return false;
741:
742: if (IN_RANGE(va, (vaddr_t)__kernel_text, (vaddr_t)_end)) {
743: /* kernel text/data/bss are definitely linear mapped */
744: pa = KERN_VTOPHYS(va);
745: goto mapped;
746: } else if (IN_KSEG_ADDR(va)) {
747: /*
748: * also KSEG is linear mapped, but areas that have no
749: * physical memory haven't been mapped.
750: * fast lookup by using the S1E1R/PAR_EL1 registers.
751: */
752: register_t s = daif_disable(DAIF_I|DAIF_F);
753: reg_s1e1r_write(va);
754: __asm __volatile ("isb");
755: uint64_t par = reg_par_el1_read();
1.67 ryo 756: reg_daif_write(s);
1.66 ryo 757:
758: if (par & PAR_F)
759: return false;
760: pa = (__SHIFTOUT(par, PAR_PA) << PAR_PA_SHIFT) +
761: (va & __BITS(PAR_PA_SHIFT - 1, 0));
762: goto mapped;
763: }
1.28 ryo 764: } else {
1.66 ryo 765: if (space != AARCH64_ADDRSPACE_LOWER)
1.35 ryo 766: return false;
1.28 ryo 767: }
1.2 ryo 768:
1.66 ryo 769: /*
770: * other areas, it isn't able to examined using the PAR_EL1 register,
771: * because the page may be in an access fault state due to
772: * reference bit emulation.
773: */
774: ptep = _pmap_pte_lookup_bs(pm, va, &blocksize);
775: if (ptep == NULL)
776: return false;
777: pte = *ptep;
778: if (!lxpde_valid(pte))
779: return false;
780: pa = lxpde_pa(pte) + (va & (blocksize - 1));
781:
1.72 jmcneill 782: switch (pte & LX_BLKPAG_ATTR_MASK) {
783: case LX_BLKPAG_ATTR_NORMAL_NC:
784: case LX_BLKPAG_ATTR_DEVICE_MEM:
785: case LX_BLKPAG_ATTR_DEVICE_MEM_SO:
786: coherency = true;
787: break;
788: }
789:
1.66 ryo 790: mapped:
1.27 ryo 791: if (pap != NULL)
1.2 ryo 792: *pap = pa;
1.72 jmcneill 793: if (coherencyp != NULL)
794: *coherencyp = coherency;
1.27 ryo 795: return true;
1.2 ryo 796: }
797:
798: paddr_t
799: vtophys(vaddr_t va)
800: {
801: struct pmap *pm;
802: paddr_t pa;
803:
1.66 ryo 804: /* even if TBI is disabled, AARCH64_ADDRTOP_TAG means KVA */
805: if ((uint64_t)va & AARCH64_ADDRTOP_TAG)
1.28 ryo 806: pm = pmap_kernel();
807: else
1.2 ryo 808: pm = curlwp->l_proc->p_vmspace->vm_map.pmap;
1.28 ryo 809:
810: if (pmap_extract(pm, va, &pa) == false)
1.2 ryo 811: return VTOPHYS_FAILED;
812: return pa;
813: }
814:
1.36 ryo 815: /*
816: * return pointer of the pte. regardess of whether the entry is valid or not.
817: */
1.2 ryo 818: static pt_entry_t *
1.27 ryo 819: _pmap_pte_lookup_bs(struct pmap *pm, vaddr_t va, vsize_t *bs)
1.2 ryo 820: {
1.27 ryo 821: pt_entry_t *ptep;
822: pd_entry_t *l0, *l1, *l2, *l3;
823: pd_entry_t pde;
824: vsize_t blocksize;
825: unsigned int idx;
826:
827: /*
828: * traverse L0 -> L1 -> L2 -> L3
829: */
830: blocksize = L0_SIZE;
831: l0 = pm->pm_l0table;
832: idx = l0pde_index(va);
1.35 ryo 833: ptep = &l0[idx];
834: pde = *ptep;
835: if (!l0pde_valid(pde))
1.27 ryo 836: goto done;
837:
838: blocksize = L1_SIZE;
839: l1 = (pd_entry_t *)AARCH64_PA_TO_KVA(l0pde_pa(pde));
840: idx = l1pde_index(va);
1.35 ryo 841: ptep = &l1[idx];
842: pde = *ptep;
843: if (!l1pde_valid(pde) || l1pde_is_block(pde))
1.27 ryo 844: goto done;
845:
846: blocksize = L2_SIZE;
847: l2 = (pd_entry_t *)AARCH64_PA_TO_KVA(l1pde_pa(pde));
848: idx = l2pde_index(va);
1.35 ryo 849: ptep = &l2[idx];
850: pde = *ptep;
851: if (!l2pde_valid(pde) || l2pde_is_block(pde))
1.27 ryo 852: goto done;
1.2 ryo 853:
1.27 ryo 854: blocksize = L3_SIZE;
855: l3 = (pd_entry_t *)AARCH64_PA_TO_KVA(l2pde_pa(pde));
856: idx = l3pte_index(va);
857: ptep = &l3[idx];
1.2 ryo 858:
1.27 ryo 859: done:
860: if (bs != NULL)
861: *bs = blocksize;
862: return ptep;
863: }
1.2 ryo 864:
1.27 ryo 865: static pt_entry_t *
866: _pmap_pte_lookup_l3(struct pmap *pm, vaddr_t va)
867: {
868: pt_entry_t *ptep;
869: vsize_t blocksize = 0;
870:
871: ptep = _pmap_pte_lookup_bs(pm, va, &blocksize);
872: if ((ptep != NULL) && (blocksize == L3_SIZE))
1.2 ryo 873: return ptep;
874:
875: return NULL;
876: }
877:
1.29 ryo 878: void
879: pmap_icache_sync_range(pmap_t pm, vaddr_t sva, vaddr_t eva)
880: {
1.35 ryo 881: pt_entry_t *ptep = NULL, pte;
1.29 ryo 882: vaddr_t va;
883: vsize_t blocksize = 0;
884:
1.66 ryo 885: KASSERT_PM_ADDR(pm, sva);
886:
1.29 ryo 887: pm_lock(pm);
888:
1.35 ryo 889: for (va = sva; va < eva; va = (va + blocksize) & ~(blocksize - 1)) {
890: /* va is belong to the same L3 table as before? */
891: if ((blocksize == L3_SIZE) && ((va & L3INDEXMASK) != 0)) {
892: ptep++;
893: } else {
894: ptep = _pmap_pte_lookup_bs(pm, va, &blocksize);
895: if (ptep == NULL)
896: break;
897: }
898:
899: pte = *ptep;
900: if (lxpde_valid(pte)) {
1.29 ryo 901: vaddr_t eob = (va + blocksize) & ~(blocksize - 1);
902: vsize_t len = ulmin(eva, eob - va);
903:
904: if (l3pte_writable(pte)) {
905: cpu_icache_sync_range(va, len);
906: } else {
907: /*
908: * change to writable temporally
909: * to do cpu_icache_sync_range()
910: */
911: pt_entry_t opte = pte;
912: pte = pte & ~(LX_BLKPAG_AF|LX_BLKPAG_AP);
913: pte |= (LX_BLKPAG_AF|LX_BLKPAG_AP_RW);
914: atomic_swap_64(ptep, pte);
915: AARCH64_TLBI_BY_ASID_VA(pm->pm_asid, va, true);
916: cpu_icache_sync_range(va, len);
917: atomic_swap_64(ptep, opte);
918: AARCH64_TLBI_BY_ASID_VA(pm->pm_asid, va, true);
919: }
920: }
921: }
922:
923: pm_unlock(pm);
924: }
925:
1.48 maya 926: /*
927: * Routine: pmap_procwr
928: *
929: * Function:
930: * Synchronize caches corresponding to [addr, addr+len) in p.
931: *
932: */
933: void
934: pmap_procwr(struct proc *p, vaddr_t va, int len)
935: {
936:
937: /* We only need to do anything if it is the current process. */
938: if (p == curproc)
939: cpu_icache_sync_range(va, len);
940: }
941:
1.2 ryo 942: static pt_entry_t
1.18 ryo 943: _pmap_pte_adjust_prot(pt_entry_t pte, vm_prot_t prot, vm_prot_t protmask,
944: bool user)
1.1 matt 945: {
1.2 ryo 946: vm_prot_t masked;
1.18 ryo 947: pt_entry_t xn;
1.2 ryo 948:
949: masked = prot & protmask;
950: pte &= ~(LX_BLKPAG_OS_RWMASK|LX_BLKPAG_AF|LX_BLKPAG_AP);
951:
952: /* keep prot for ref/mod emulation */
953: switch (prot & (VM_PROT_READ|VM_PROT_WRITE)) {
954: case 0:
955: default:
956: break;
957: case VM_PROT_READ:
958: pte |= LX_BLKPAG_OS_READ;
959: break;
960: case VM_PROT_WRITE:
961: case VM_PROT_READ|VM_PROT_WRITE:
962: pte |= (LX_BLKPAG_OS_READ|LX_BLKPAG_OS_WRITE);
963: break;
964: }
965:
966: switch (masked & (VM_PROT_READ|VM_PROT_WRITE)) {
967: case 0:
968: default:
969: /* cannot access due to No LX_BLKPAG_AF */
970: pte |= LX_BLKPAG_AP_RO;
971: break;
972: case VM_PROT_READ:
973: /* actual permission of pte */
974: pte |= LX_BLKPAG_AF;
975: pte |= LX_BLKPAG_AP_RO;
976: break;
977: case VM_PROT_WRITE:
978: case VM_PROT_READ|VM_PROT_WRITE:
979: /* actual permission of pte */
980: pte |= LX_BLKPAG_AF;
981: pte |= LX_BLKPAG_AP_RW;
982: break;
983: }
984:
1.18 ryo 985: /* executable for kernel or user? first set never exec both */
986: pte |= (LX_BLKPAG_UXN|LX_BLKPAG_PXN);
987: /* and either to executable */
988: xn = user ? LX_BLKPAG_UXN : LX_BLKPAG_PXN;
989: if (prot & VM_PROT_EXECUTE)
1.35 ryo 990: pte &= ~xn;
1.2 ryo 991:
992: return pte;
993: }
994:
995: static pt_entry_t
996: _pmap_pte_adjust_cacheflags(pt_entry_t pte, u_int flags)
997: {
998:
999: pte &= ~LX_BLKPAG_ATTR_MASK;
1000:
1.58 jmcneill 1001: switch (flags & (PMAP_CACHE_MASK|PMAP_DEV_MASK)) {
1002: case PMAP_DEV_SO ... PMAP_DEV_SO | PMAP_CACHE_MASK:
1003: pte |= LX_BLKPAG_ATTR_DEVICE_MEM_SO; /* Device-nGnRnE */
1004: break;
1.2 ryo 1005: case PMAP_DEV ... PMAP_DEV | PMAP_CACHE_MASK:
1.57 jmcneill 1006: pte |= LX_BLKPAG_ATTR_DEVICE_MEM; /* Device-nGnRE */
1.2 ryo 1007: break;
1008: case PMAP_NOCACHE:
1009: case PMAP_NOCACHE_OVR:
1010: case PMAP_WRITE_COMBINE:
1011: pte |= LX_BLKPAG_ATTR_NORMAL_NC; /* only no-cache */
1012: break;
1013: case PMAP_WRITE_BACK:
1014: case 0:
1015: default:
1016: pte |= LX_BLKPAG_ATTR_NORMAL_WB;
1017: break;
1018: }
1019:
1020: return pte;
1021: }
1022:
1.14 ryo 1023: static struct pv_entry *
1.62 ryo 1024: _pmap_remove_pv(struct pmap_page *pp, struct pmap *pm, vaddr_t va, pt_entry_t pte)
1.2 ryo 1025: {
1026: struct pv_entry *pv;
1027:
1028: UVMHIST_FUNC(__func__);
1029: UVMHIST_CALLED(pmaphist);
1030:
1.62 ryo 1031: UVMHIST_LOG(pmaphist, "pp=%p, pm=%p, va=%llx, pte=%llx",
1032: pp, pm, va, pte);
1.2 ryo 1033:
1.64 ryo 1034: LIST_FOREACH(pv, &pp->pp_pvhead, pv_link) {
1.2 ryo 1035: if ((pm == pv->pv_pmap) && (va == pv->pv_va)) {
1.64 ryo 1036: LIST_REMOVE(pv, pv_link);
1.5 ryo 1037: PMAP_COUNT(pv_remove);
1.2 ryo 1038: break;
1039: }
1040: }
1.5 ryo 1041: #ifdef PMAPCOUNTERS
1042: if (pv == NULL) {
1043: PMAP_COUNT(pv_remove_nopv);
1044: }
1045: #endif
1.2 ryo 1046:
1.14 ryo 1047: return pv;
1.2 ryo 1048: }
1049:
1050: #if defined(PMAP_PV_DEBUG) || defined(DDB)
1051:
1052: static char *
1053: str_vmflags(uint32_t flags)
1054: {
1055: static int idx = 0;
1056: static char buf[4][32]; /* XXX */
1057: char *p;
1058:
1059: p = buf[idx];
1060: idx = (idx + 1) & 3;
1061:
1062: p[0] = (flags & VM_PROT_READ) ? 'R' : '-';
1063: p[1] = (flags & VM_PROT_WRITE) ? 'W' : '-';
1064: p[2] = (flags & VM_PROT_EXECUTE) ? 'X' : '-';
1065: if (flags & PMAP_WIRED)
1066: memcpy(&p[3], ",WIRED\0", 7);
1067: else
1068: p[3] = '\0';
1069:
1070: return p;
1071: }
1072:
1073: static void
1.38 ryo 1074: pg_dump(struct vm_page *pg, void (*pr)(const char *, ...) __printflike(1, 2))
1.2 ryo 1075: {
1076: pr("pg=%p\n", pg);
1077: pr(" pg->uanon = %p\n", pg->uanon);
1078: pr(" pg->uobject = %p\n", pg->uobject);
1079: pr(" pg->offset = %zu\n", pg->offset);
1080: pr(" pg->flags = %u\n", pg->flags);
1081: pr(" pg->loan_count = %u\n", pg->loan_count);
1082: pr(" pg->wire_count = %u\n", pg->wire_count);
1083: pr(" pg->pqflags = %u\n", pg->pqflags);
1.51 ad 1084: pr(" pg->phys_addr = %016lx\n", VM_PAGE_TO_PHYS(pg));
1.2 ryo 1085: }
1086:
1087: static void
1.62 ryo 1088: pv_dump(struct pmap_page *pp, void (*pr)(const char *, ...) __printflike(1, 2))
1.2 ryo 1089: {
1090: struct pv_entry *pv;
1091: int i;
1092:
1093: i = 0;
1094:
1.62 ryo 1095: pr("pp=%p\n", pp);
1096: pr(" pp->pp_flags=%08x %s\n", pp->pp_flags,
1097: str_vmflags(pp->pp_flags));
1.2 ryo 1098:
1.64 ryo 1099: LIST_FOREACH(pv, &pp->pp_pvhead, pv_link) {
1.2 ryo 1100: pr(" pv[%d] pv=%p\n",
1101: i, pv);
1.35 ryo 1102: pr(" pv[%d].pv_pmap = %p (asid=%d)\n",
1.2 ryo 1103: i, pv->pv_pmap, pv->pv_pmap->pm_asid);
1.35 ryo 1104: pr(" pv[%d].pv_va = %016lx (color=%d)\n",
1.2 ryo 1105: i, pv->pv_va, _pmap_color(pv->pv_va));
1.35 ryo 1106: pr(" pv[%d].pv_pa = %016lx (color=%d)\n",
1.2 ryo 1107: i, pv->pv_pa, _pmap_color(pv->pv_pa));
1.35 ryo 1108: pr(" pv[%d].pv_ptep = %p\n",
1109: i, pv->pv_ptep);
1.2 ryo 1110: i++;
1111: }
1112: }
1113: #endif /* PMAP_PV_DEBUG & DDB */
1114:
1115: static int
1.62 ryo 1116: _pmap_enter_pv(struct pmap_page *pp, struct pmap *pm, struct pv_entry **pvp,
1.35 ryo 1117: vaddr_t va, pt_entry_t *ptep, paddr_t pa, u_int flags)
1.2 ryo 1118: {
1119: struct pv_entry *pv;
1120:
1121: UVMHIST_FUNC(__func__);
1122: UVMHIST_CALLED(pmaphist);
1123:
1.62 ryo 1124: UVMHIST_LOG(pmaphist, "pp=%p, pm=%p, va=%llx, pa=%llx", pp, pm, va, pa);
1.2 ryo 1125: UVMHIST_LOG(pmaphist, "ptep=%p, flags=%08x", ptep, flags, 0, 0);
1126:
1127: /* pv is already registered? */
1.64 ryo 1128: LIST_FOREACH(pv, &pp->pp_pvhead, pv_link) {
1.2 ryo 1129: if ((pm == pv->pv_pmap) && (va == pv->pv_va)) {
1130: break;
1131: }
1132: }
1133:
1.3 ryo 1134: if (pv == NULL) {
1.12 ryo 1135: /*
1136: * create and link new pv.
1137: * pv is already allocated at beginning of _pmap_enter().
1138: */
1139: pv = *pvp;
1.2 ryo 1140: if (pv == NULL)
1141: return ENOMEM;
1.12 ryo 1142: *pvp = NULL;
1.2 ryo 1143:
1144: pv->pv_pmap = pm;
1145: pv->pv_va = va;
1146: pv->pv_pa = pa;
1147: pv->pv_ptep = ptep;
1148:
1.64 ryo 1149: LIST_INSERT_HEAD(&pp->pp_pvhead, pv, pv_link);
1.2 ryo 1150: PMAP_COUNT(pv_enter);
1151:
1152: #ifdef PMAP_PV_DEBUG
1.64 ryo 1153: if (!LIST_EMPTY(&pp->pp_pvhead)){
1.2 ryo 1154: printf("pv %p alias added va=%016lx -> pa=%016lx\n",
1155: pv, va, pa);
1.62 ryo 1156: pv_dump(pp, printf);
1.2 ryo 1157: }
1158: #endif
1159: }
1.35 ryo 1160:
1.1 matt 1161: return 0;
1162: }
1163:
1164: void
1.2 ryo 1165: pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
1.1 matt 1166: {
1.2 ryo 1167: int s;
1168:
1169: s = splvm();
1170: _pmap_enter(pmap_kernel(), va, pa, prot, flags | PMAP_WIRED, true);
1171: splx(s);
1.1 matt 1172: }
1173:
1174: void
1.2 ryo 1175: pmap_kremove(vaddr_t va, vsize_t size)
1.1 matt 1176: {
1.2 ryo 1177: struct pmap *kpm = pmap_kernel();
1.3 ryo 1178: int s;
1.2 ryo 1179:
1180: UVMHIST_FUNC(__func__);
1181: UVMHIST_CALLED(pmaphist);
1182:
1183: UVMHIST_LOG(pmaphist, "va=%llx, size=%llx", va, size, 0, 0);
1184:
1185: KDASSERT((va & PGOFSET) == 0);
1186: KDASSERT((size & PGOFSET) == 0);
1187:
1.28 ryo 1188: KDASSERT(!IN_KSEG_ADDR(va));
1189: KDASSERT(IN_RANGE(va, VM_MIN_KERNEL_ADDRESS, VM_MAX_KERNEL_ADDRESS));
1.2 ryo 1190:
1.3 ryo 1191: s = splvm();
1.35 ryo 1192: pm_lock(kpm);
1193: _pmap_remove(kpm, va, va + size, true, NULL);
1194: pm_unlock(kpm);
1.3 ryo 1195: splx(s);
1.2 ryo 1196: }
1197:
1198: static void
1.62 ryo 1199: _pmap_protect_pv(struct pmap_page *pp, struct pv_entry *pv, vm_prot_t prot)
1.2 ryo 1200: {
1201: pt_entry_t *ptep, pte;
1202: vm_prot_t pteprot;
1203: uint32_t mdattr;
1.18 ryo 1204: const bool user = (pv->pv_pmap != pmap_kernel());
1.2 ryo 1205:
1206: UVMHIST_FUNC(__func__);
1207: UVMHIST_CALLED(pmaphist);
1208:
1.62 ryo 1209: UVMHIST_LOG(pmaphist, "pp=%p, pv=%p, prot=%08x", pp, pv, prot, 0);
1.2 ryo 1210:
1211: /* get prot mask from referenced/modified */
1.62 ryo 1212: mdattr = pp->pp_flags &
1.2 ryo 1213: (VM_PROT_READ | VM_PROT_WRITE);
1214:
1.11 ryo 1215: pm_lock(pv->pv_pmap);
1.2 ryo 1216:
1217: ptep = pv->pv_ptep;
1218: pte = *ptep;
1219:
1220: /* get prot mask from pte */
1221: pteprot = 0;
1.3 ryo 1222: if (pte & LX_BLKPAG_AF)
1.2 ryo 1223: pteprot |= VM_PROT_READ;
1.3 ryo 1224: if ((pte & LX_BLKPAG_AP) == LX_BLKPAG_AP_RW)
1.2 ryo 1225: pteprot |= VM_PROT_WRITE;
1.18 ryo 1226: if (l3pte_executable(pte, user))
1.2 ryo 1227: pteprot |= VM_PROT_EXECUTE;
1228:
1229: /* new prot = prot & pteprot & mdattr */
1.18 ryo 1230: pte = _pmap_pte_adjust_prot(pte, prot & pteprot, mdattr, user);
1.2 ryo 1231: atomic_swap_64(ptep, pte);
1.19 ryo 1232: AARCH64_TLBI_BY_ASID_VA(pv->pv_pmap->pm_asid, pv->pv_va, true);
1.2 ryo 1233:
1.11 ryo 1234: pm_unlock(pv->pv_pmap);
1.1 matt 1235: }
1236:
1237: void
1238: pmap_protect(struct pmap *pm, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
1239: {
1.35 ryo 1240: pt_entry_t *ptep = NULL, pte;
1.2 ryo 1241: vaddr_t va;
1.35 ryo 1242: vsize_t blocksize = 0;
1.18 ryo 1243: const bool user = (pm != pmap_kernel());
1.2 ryo 1244:
1245: KASSERT((prot & VM_PROT_READ) || !(prot & VM_PROT_WRITE));
1246:
1247: UVMHIST_FUNC(__func__);
1248: UVMHIST_CALLED(pmaphist);
1249:
1250: UVMHIST_LOG(pmaphist, "pm=%p, sva=%016lx, eva=%016lx, prot=%08x",
1251: pm, sva, eva, prot);
1252:
1.28 ryo 1253: KASSERT_PM_ADDR(pm, sva);
1.2 ryo 1254: KASSERT(!IN_KSEG_ADDR(sva));
1255:
1.61 ryo 1256: /* PROT_EXEC requires implicit PROT_READ */
1257: if (prot & VM_PROT_EXECUTE)
1258: prot |= VM_PROT_READ;
1259:
1.2 ryo 1260: if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
1261: PMAP_COUNT(protect_remove_fallback);
1262: pmap_remove(pm, sva, eva);
1263: return;
1264: }
1265: PMAP_COUNT(protect);
1266:
1267: KDASSERT((sva & PAGE_MASK) == 0);
1268: KDASSERT((eva & PAGE_MASK) == 0);
1269:
1.11 ryo 1270: pm_lock(pm);
1.2 ryo 1271:
1.35 ryo 1272: for (va = sva; va < eva; va = (va + blocksize) & ~(blocksize - 1)) {
1.13 ryo 1273: #ifdef UVMHIST
1274: pt_entry_t opte;
1275: #endif
1.2 ryo 1276: struct vm_page *pg;
1.62 ryo 1277: struct pmap_page *pp;
1.2 ryo 1278: paddr_t pa;
1279: uint32_t mdattr;
1280: bool executable;
1281:
1.35 ryo 1282: /* va is belong to the same L3 table as before? */
1283: if ((blocksize == L3_SIZE) && ((va & L3INDEXMASK) != 0))
1284: ptep++;
1285: else
1286: ptep = _pmap_pte_lookup_bs(pm, va, &blocksize);
1.2 ryo 1287:
1288: pte = *ptep;
1.35 ryo 1289: if (!lxpde_valid(pte)) {
1.2 ryo 1290: PMAP_COUNT(protect_none);
1291: continue;
1292: }
1293:
1.35 ryo 1294: pa = lxpde_pa(pte);
1.2 ryo 1295: pg = PHYS_TO_VM_PAGE(pa);
1.62 ryo 1296: if (pg != NULL) {
1297: pp = VM_PAGE_TO_PP(pg);
1298: PMAP_COUNT(protect_managed);
1299: } else {
1.63 ryo 1300: #ifdef __HAVE_PMAP_PV_TRACK
1301: pp = pmap_pv_tracked(pa);
1302: #ifdef PMAPCOUNTERS
1303: if (pp != NULL)
1304: PMAP_COUNT(protect_pvmanaged);
1305: else
1306: PMAP_COUNT(protect_unmanaged);
1307: #endif
1308: #else
1.62 ryo 1309: pp = NULL;
1310: PMAP_COUNT(protect_unmanaged);
1.63 ryo 1311: #endif /* __HAVE_PMAP_PV_TRACK */
1.62 ryo 1312: }
1.2 ryo 1313:
1.62 ryo 1314: if (pp != NULL) {
1.3 ryo 1315: /* get prot mask from referenced/modified */
1.62 ryo 1316: mdattr = pp->pp_flags & (VM_PROT_READ | VM_PROT_WRITE);
1.2 ryo 1317: } else {
1318: /* unmanaged page */
1319: mdattr = VM_PROT_ALL;
1320: }
1321:
1.13 ryo 1322: #ifdef UVMHIST
1323: opte = pte;
1324: #endif
1.18 ryo 1325: executable = l3pte_executable(pte, user);
1326: pte = _pmap_pte_adjust_prot(pte, prot, mdattr, user);
1.13 ryo 1327:
1328: if (!executable && (prot & VM_PROT_EXECUTE)) {
1329: /* non-exec -> exec */
1.15 ryo 1330: UVMHIST_LOG(pmaphist, "icache_sync: "
1331: "pm=%p, va=%016lx, pte: %016lx -> %016lx",
1.13 ryo 1332: pm, va, opte, pte);
1.15 ryo 1333: if (!l3pte_writable(pte)) {
1.19 ryo 1334: PTE_ICACHE_SYNC_PAGE(pte, ptep, pm, va, true);
1.15 ryo 1335: atomic_swap_64(ptep, pte);
1.19 ryo 1336: AARCH64_TLBI_BY_ASID_VA(pm->pm_asid, va, true);
1.15 ryo 1337: } else {
1338: atomic_swap_64(ptep, pte);
1.19 ryo 1339: AARCH64_TLBI_BY_ASID_VA(pm->pm_asid, va, true);
1.15 ryo 1340: cpu_icache_sync_range(va, PAGE_SIZE);
1341: }
1342: } else {
1343: atomic_swap_64(ptep, pte);
1.19 ryo 1344: AARCH64_TLBI_BY_ASID_VA(pm->pm_asid, va, true);
1.15 ryo 1345: }
1.2 ryo 1346: }
1347:
1.11 ryo 1348: pm_unlock(pm);
1.1 matt 1349: }
1350:
1351: void
1.2 ryo 1352: pmap_activate(struct lwp *l)
1353: {
1354: struct pmap *pm = l->l_proc->p_vmspace->vm_map.pmap;
1.47 jmcneill 1355: uint64_t ttbr0, tcr;
1.2 ryo 1356:
1357: UVMHIST_FUNC(__func__);
1358: UVMHIST_CALLED(pmaphist);
1359:
1360: if (pm == pmap_kernel())
1361: return;
1362: if (l != curlwp)
1363: return;
1364:
1365: KASSERT(pm->pm_l0table != NULL);
1366:
1367: UVMHIST_LOG(pmaphist, "lwp=%p (pid=%d)", l, l->l_proc->p_pid, 0, 0);
1368:
1.47 jmcneill 1369: /* Disable translation table walks using TTBR0 */
1370: tcr = reg_tcr_el1_read();
1371: reg_tcr_el1_write(tcr | TCR_EPD0);
1372: __asm __volatile("isb" ::: "memory");
1373:
1.2 ryo 1374: /* XXX */
1375: CTASSERT(PID_MAX <= 65535); /* 16bit ASID */
1376: if (pm->pm_asid == -1)
1377: pm->pm_asid = l->l_proc->p_pid;
1378:
1379: ttbr0 = ((uint64_t)pm->pm_asid << 48) | pm->pm_l0table_pa;
1.34 ryo 1380: cpu_set_ttbr0(ttbr0);
1.2 ryo 1381:
1.47 jmcneill 1382: /* Re-enable translation table walks using TTBR0 */
1383: tcr = reg_tcr_el1_read();
1384: reg_tcr_el1_write(tcr & ~TCR_EPD0);
1385: __asm __volatile("isb" ::: "memory");
1386:
1.2 ryo 1387: pm->pm_activated = true;
1388:
1389: PMAP_COUNT(activate);
1390: }
1391:
1392: void
1393: pmap_deactivate(struct lwp *l)
1.1 matt 1394: {
1.2 ryo 1395: struct pmap *pm = l->l_proc->p_vmspace->vm_map.pmap;
1.47 jmcneill 1396: uint64_t tcr;
1.2 ryo 1397:
1398: UVMHIST_FUNC(__func__);
1399: UVMHIST_CALLED(pmaphist);
1400:
1401: if (pm == pmap_kernel())
1402: return;
1403:
1404: UVMHIST_LOG(pmaphist, "lwp=%p, asid=%d", l, pm->pm_asid, 0, 0);
1405:
1.47 jmcneill 1406: /* Disable translation table walks using TTBR0 */
1407: tcr = reg_tcr_el1_read();
1408: reg_tcr_el1_write(tcr | TCR_EPD0);
1409: __asm __volatile("isb" ::: "memory");
1410:
1.2 ryo 1411: /* XXX */
1412: pm->pm_activated = false;
1413:
1414: PMAP_COUNT(deactivate);
1.1 matt 1415: }
1416:
1.2 ryo 1417: struct pmap *
1418: pmap_create(void)
1.1 matt 1419: {
1.2 ryo 1420: struct pmap *pm;
1421:
1422: UVMHIST_FUNC(__func__);
1423: UVMHIST_CALLED(pmaphist);
1424:
1425: pm = pool_cache_get(&_pmap_cache, PR_WAITOK);
1426: memset(pm, 0, sizeof(*pm));
1427: pm->pm_refcnt = 1;
1.40 ryo 1428: pm->pm_idlepdp = 0;
1.2 ryo 1429: pm->pm_asid = -1;
1.64 ryo 1430: LIST_INIT(&pm->pm_vmlist);
1.6 ryo 1431: mutex_init(&pm->pm_lock, MUTEX_DEFAULT, IPL_VM);
1.39 ryo 1432:
1.40 ryo 1433: pm->pm_l0table_pa = pmap_alloc_pdp(pm, NULL, 0, true);
1.36 ryo 1434: KASSERT(pm->pm_l0table_pa != POOL_PADDR_INVALID);
1435: pm->pm_l0table = (pd_entry_t *)AARCH64_PA_TO_KVA(pm->pm_l0table_pa);
1.2 ryo 1436: KASSERT(((vaddr_t)pm->pm_l0table & (PAGE_SIZE - 1)) == 0);
1437:
1438: UVMHIST_LOG(pmaphist, "pm=%p, pm_l0table=%016lx, pm_l0table_pa=%016lx",
1439: pm, pm->pm_l0table, pm->pm_l0table_pa, 0);
1440:
1441: PMAP_COUNT(create);
1442: return pm;
1.1 matt 1443: }
1444:
1445: void
1.2 ryo 1446: pmap_destroy(struct pmap *pm)
1447: {
1448: unsigned int refcnt;
1449:
1450: UVMHIST_FUNC(__func__);
1451: UVMHIST_CALLED(pmaphist);
1452:
1453: UVMHIST_LOG(pmaphist,
1454: "pm=%p, pm_l0table=%016lx, pm_l0table_pa=%016lx, refcnt=%d",
1455: pm, pm->pm_l0table, pm->pm_l0table_pa, pm->pm_refcnt);
1456:
1457: if (pm == NULL)
1458: return;
1459:
1460: if (pm == pmap_kernel())
1461: panic("cannot destroy kernel pmap");
1462:
1463: refcnt = atomic_dec_uint_nv(&pm->pm_refcnt);
1464: if (refcnt > 0)
1465: return;
1466:
1467: aarch64_tlbi_by_asid(pm->pm_asid);
1468:
1469: _pmap_free_pdp_all(pm);
1470: mutex_destroy(&pm->pm_lock);
1.35 ryo 1471:
1.2 ryo 1472: pool_cache_put(&_pmap_cache, pm);
1473:
1474: PMAP_COUNT(destroy);
1475: }
1476:
1.36 ryo 1477: static inline void
1478: _pmap_pdp_setparent(struct pmap *pm, struct vm_page *pg, pt_entry_t *ptep)
1479: {
1480: if ((pm != pmap_kernel()) && (pg != NULL))
1481: VM_PAGE_TO_MD(pg)->mdpg_ptep_parent = ptep;
1482: }
1483:
1484: /*
1485: * increment reference counter of the page descriptor page.
1486: * the reference counter should be equal to
1487: * 1 + num of valid entries the page has.
1488: */
1489: static inline void
1490: _pmap_pdp_addref(struct pmap *pm, paddr_t pdppa, struct vm_page *pdppg_hint)
1491: {
1492: struct vm_page *pg;
1493:
1494: /* kernel L0-L3 page will be never freed */
1495: if (pm == pmap_kernel())
1496: return;
1497: /* no need for L0 page */
1498: if (pm->pm_l0table_pa == pdppa)
1499: return;
1500:
1501: pg = pdppg_hint;
1502: if (pg == NULL)
1503: pg = PHYS_TO_VM_PAGE(pdppa);
1504: KASSERT(pg != NULL);
1505:
1.53 skrll 1506: CTASSERT(sizeof(pg->wire_count) == sizeof(uint32_t));
1507: atomic_add_32(&pg->wire_count, 1);
1.36 ryo 1508:
1509: KASSERTMSG(pg->wire_count <= (Ln_ENTRIES + 1),
1510: "pg=%p, wire_count=%d", pg, pg->wire_count);
1511: }
1512:
1513: /*
1514: * decrement reference counter of the page descriptr page.
1515: * if reference counter is 1(=empty), pages will be freed, and return true.
1516: * otherwise return false.
1517: * kernel page, or L0 page descriptor page will be never freed.
1518: */
1519: static bool
1520: _pmap_pdp_delref(struct pmap *pm, paddr_t pdppa, bool do_free_pdp)
1521: {
1522: struct vm_page *pg;
1523: bool removed;
1524: uint16_t wirecount;
1525:
1526: /* kernel L0-L3 page will be never freed */
1527: if (pm == pmap_kernel())
1528: return false;
1529: /* no need for L0 page */
1530: if (pm->pm_l0table_pa == pdppa)
1531: return false;
1532:
1533: pg = PHYS_TO_VM_PAGE(pdppa);
1534: KASSERT(pg != NULL);
1535:
1.53 skrll 1536: wirecount = atomic_add_32_nv(&pg->wire_count, -1);
1.36 ryo 1537:
1.40 ryo 1538: if (!do_free_pdp) {
1539: /*
1540: * pm_idlepdp is counted by only pmap_page_protect() with
1541: * VM_PROT_NONE. it is not correct because without considering
1542: * pmap_enter(), but useful hint to just sweep.
1543: */
1544: if (wirecount == 1)
1545: atomic_inc_uint(&pm->pm_idlepdp);
1.36 ryo 1546: return false;
1.40 ryo 1547: }
1.36 ryo 1548:
1549: /* if no reference, free pdp */
1550: removed = false;
1551: while (wirecount == 1) {
1.41 mrg 1552: pd_entry_t *ptep_in_parent, opte __diagused;
1.36 ryo 1553: ptep_in_parent = VM_PAGE_TO_MD(pg)->mdpg_ptep_parent;
1554: if (ptep_in_parent == NULL) {
1555: /* no parent */
1556: pmap_free_pdp(pm, pg);
1557: removed = true;
1558: break;
1559: }
1560:
1561: /* unlink from parent */
1562: opte = atomic_swap_64(ptep_in_parent, 0);
1563: KASSERT(lxpde_valid(opte));
1.53 skrll 1564: wirecount = atomic_add_32_nv(&pg->wire_count, -1); /* 1 -> 0 */
1.36 ryo 1565: KASSERT(wirecount == 0);
1566: pmap_free_pdp(pm, pg);
1567: removed = true;
1568:
1569: /* L3->L2->L1. no need for L0 */
1570: pdppa = AARCH64_KVA_TO_PA(trunc_page((vaddr_t)ptep_in_parent));
1571: if (pdppa == pm->pm_l0table_pa)
1572: break;
1573:
1574: pg = PHYS_TO_VM_PAGE(pdppa);
1575: KASSERT(pg != NULL);
1576: KASSERTMSG(pg->wire_count >= 1,
1577: "wire_count=%d", pg->wire_count);
1578: /* decrement wire_count of parent */
1.53 skrll 1579: wirecount = atomic_add_32_nv(&pg->wire_count, -1);
1.36 ryo 1580: KASSERTMSG(pg->wire_count <= (Ln_ENTRIES + 1),
1581: "pm=%p[%d], pg=%p, wire_count=%d",
1582: pm, pm->pm_asid, pg, pg->wire_count);
1583: }
1584:
1585: return removed;
1586: }
1587:
1.2 ryo 1588: static int
1589: _pmap_enter(struct pmap *pm, vaddr_t va, paddr_t pa, vm_prot_t prot,
1590: u_int flags, bool kenter)
1591: {
1.62 ryo 1592: struct vm_page *pdppg, *pdppg0;
1593: struct pmap_page *pp, *opp, *pps[2];
1.14 ryo 1594: struct pv_entry *spv, *opv = NULL;
1.2 ryo 1595: pd_entry_t pde;
1.39 ryo 1596: pt_entry_t attr, pte, opte, *ptep;
1.2 ryo 1597: pd_entry_t *l0, *l1, *l2, *l3;
1.36 ryo 1598: paddr_t pdppa, pdppa0;
1.2 ryo 1599: uint32_t mdattr;
1600: unsigned int idx;
1601: int error = 0;
1602: const bool user = (pm != pmap_kernel());
1.39 ryo 1603: bool need_sync_icache, need_update_pv;
1.19 ryo 1604: bool l3only = true;
1.2 ryo 1605:
1606: UVMHIST_FUNC(__func__);
1607: UVMHIST_CALLED(pmaphist);
1608:
1609: UVMHIST_LOG(pmaphist, "pm=%p, kentermode=%d", pm, kenter, 0, 0);
1610: UVMHIST_LOG(pmaphist, "va=%016lx, pa=%016lx, prot=%08x, flags=%08x",
1611: va, pa, prot, flags);
1612:
1.28 ryo 1613: KASSERT_PM_ADDR(pm, va);
1614: KASSERT(!IN_KSEG_ADDR(va));
1.2 ryo 1615:
1616: #ifdef PMAPCOUNTERS
1617: PMAP_COUNT(mappings);
1618: if (_pmap_color(va) == _pmap_color(pa)) {
1619: if (user) {
1620: PMAP_COUNT(user_mappings);
1621: } else {
1622: PMAP_COUNT(kern_mappings);
1623: }
1624: } else if (flags & PMAP_WIRED) {
1625: if (user) {
1626: PMAP_COUNT(user_mappings_bad_wired);
1627: } else {
1628: PMAP_COUNT(kern_mappings_bad_wired);
1629: }
1630: } else {
1631: if (user) {
1632: PMAP_COUNT(user_mappings_bad);
1633: } else {
1634: PMAP_COUNT(kern_mappings_bad);
1635: }
1636: }
1637: #endif
1638:
1.62 ryo 1639: if (kenter) {
1640: pp = NULL;
1641: } else {
1642: struct vm_page *pg = PHYS_TO_VM_PAGE(pa);
1643: if (pg != NULL) {
1644: pp = VM_PAGE_TO_PP(pg);
1645: PMAP_COUNT(managed_mappings);
1646: } else {
1.63 ryo 1647: #ifdef __HAVE_PMAP_PV_TRACK
1648: pp = pmap_pv_tracked(pa);
1649: #ifdef PMAPCOUNTERS
1650: if (pp != NULL)
1651: PMAP_COUNT(pvmanaged_mappings);
1652: else
1653: PMAP_COUNT(unmanaged_mappings);
1654: #endif
1655: #else
1.62 ryo 1656: pp = NULL;
1657: PMAP_COUNT(unmanaged_mappings);
1.63 ryo 1658: #endif /* __HAVE_PMAP_PV_TRACK */
1.62 ryo 1659: }
1660: }
1.3 ryo 1661:
1.62 ryo 1662: if (pp != NULL) {
1.12 ryo 1663: /*
1664: * allocate pv in advance of pm_lock() to avoid locking myself.
1665: * pool_cache_get() may call pmap_kenter() internally.
1666: */
1667: spv = pool_cache_get(&_pmap_pv_pool, PR_NOWAIT);
1.39 ryo 1668: need_update_pv = true;
1.12 ryo 1669: } else {
1670: spv = NULL;
1.39 ryo 1671: need_update_pv = false;
1.2 ryo 1672: }
1673:
1.12 ryo 1674: pm_lock(pm);
1.2 ryo 1675:
1.40 ryo 1676: if (pm->pm_idlepdp >= PDPSWEEP_TRIGGER) {
1677: if (_pmap_sweep_pdp(pm) != 0) {
1678: /* several L1-L3 page table pages have been freed */
1679: aarch64_tlbi_by_asid(pm->pm_asid);
1680: }
1681: }
1682:
1.2 ryo 1683: /*
1684: * traverse L0 -> L1 -> L2 -> L3 table with growing pdp if needed.
1685: */
1686: l0 = pm->pm_l0table;
1687:
1688: idx = l0pde_index(va);
1689: pde = l0[idx];
1690: if (!l0pde_valid(pde)) {
1.36 ryo 1691: /* no need to increment L0 occupancy. L0 page never freed */
1.40 ryo 1692: pdppa = pmap_alloc_pdp(pm, &pdppg, flags, false); /* L1 pdp */
1.36 ryo 1693: if (pdppa == POOL_PADDR_INVALID) {
1694: if (flags & PMAP_CANFAIL) {
1695: error = ENOMEM;
1.39 ryo 1696: goto fail0;
1.36 ryo 1697: }
1.39 ryo 1698: pm_unlock(pm);
1.36 ryo 1699: panic("%s: cannot allocate L1 table", __func__);
1700: }
1.2 ryo 1701: atomic_swap_64(&l0[idx], pdppa | L0_TABLE);
1.36 ryo 1702: _pmap_pdp_setparent(pm, pdppg, &l0[idx]);
1.19 ryo 1703: l3only = false;
1.2 ryo 1704: } else {
1705: pdppa = l0pde_pa(pde);
1.36 ryo 1706: pdppg = NULL;
1.2 ryo 1707: }
1.9 christos 1708: l1 = (void *)AARCH64_PA_TO_KVA(pdppa);
1.2 ryo 1709:
1710: idx = l1pde_index(va);
1711: pde = l1[idx];
1712: if (!l1pde_valid(pde)) {
1.36 ryo 1713: pdppa0 = pdppa;
1714: pdppg0 = pdppg;
1.40 ryo 1715: pdppa = pmap_alloc_pdp(pm, &pdppg, flags, false); /* L2 pdp */
1.36 ryo 1716: if (pdppa == POOL_PADDR_INVALID) {
1717: if (flags & PMAP_CANFAIL) {
1718: error = ENOMEM;
1.39 ryo 1719: goto fail0;
1.36 ryo 1720: }
1.39 ryo 1721: pm_unlock(pm);
1.36 ryo 1722: panic("%s: cannot allocate L2 table", __func__);
1723: }
1.2 ryo 1724: atomic_swap_64(&l1[idx], pdppa | L1_TABLE);
1.36 ryo 1725: _pmap_pdp_addref(pm, pdppa0, pdppg0); /* L1 occupancy++ */
1726: _pmap_pdp_setparent(pm, pdppg, &l1[idx]);
1.19 ryo 1727: l3only = false;
1.2 ryo 1728: } else {
1729: pdppa = l1pde_pa(pde);
1.36 ryo 1730: pdppg = NULL;
1.2 ryo 1731: }
1.9 christos 1732: l2 = (void *)AARCH64_PA_TO_KVA(pdppa);
1.2 ryo 1733:
1734: idx = l2pde_index(va);
1735: pde = l2[idx];
1736: if (!l2pde_valid(pde)) {
1.36 ryo 1737: pdppa0 = pdppa;
1738: pdppg0 = pdppg;
1.40 ryo 1739: pdppa = pmap_alloc_pdp(pm, &pdppg, flags, false); /* L3 pdp */
1.36 ryo 1740: if (pdppa == POOL_PADDR_INVALID) {
1741: if (flags & PMAP_CANFAIL) {
1742: error = ENOMEM;
1.39 ryo 1743: goto fail0;
1.36 ryo 1744: }
1.39 ryo 1745: pm_unlock(pm);
1.36 ryo 1746: panic("%s: cannot allocate L3 table", __func__);
1747: }
1.2 ryo 1748: atomic_swap_64(&l2[idx], pdppa | L2_TABLE);
1.36 ryo 1749: _pmap_pdp_addref(pm, pdppa0, pdppg0); /* L2 occupancy++ */
1750: _pmap_pdp_setparent(pm, pdppg, &l2[idx]);
1.19 ryo 1751: l3only = false;
1.2 ryo 1752: } else {
1753: pdppa = l2pde_pa(pde);
1.36 ryo 1754: pdppg = NULL;
1.2 ryo 1755: }
1.9 christos 1756: l3 = (void *)AARCH64_PA_TO_KVA(pdppa);
1.2 ryo 1757:
1758: idx = l3pte_index(va);
1759: ptep = &l3[idx]; /* as PTE */
1760:
1.39 ryo 1761: opte = atomic_swap_64(ptep, 0);
1.32 ryo 1762: need_sync_icache = (prot & VM_PROT_EXECUTE);
1.2 ryo 1763:
1.62 ryo 1764: /* for lock ordering for old page and new page */
1765: pps[0] = pp;
1766: pps[1] = NULL;
1.39 ryo 1767:
1768: /* remap? */
1769: if (l3pte_valid(opte)) {
1770: bool need_remove_pv;
1771:
1.2 ryo 1772: KASSERT(!kenter); /* pmap_kenter_pa() cannot override */
1.39 ryo 1773: #ifdef PMAPCOUNTERS
1.2 ryo 1774: PMAP_COUNT(remappings);
1.39 ryo 1775: if (opte & LX_BLKPAG_OS_WIRED) {
1776: PMSTAT_DEC_WIRED_COUNT(pm);
1777: }
1778: PMSTAT_DEC_RESIDENT_COUNT(pm);
1779: if (user) {
1780: PMAP_COUNT(user_mappings_changed);
1781: } else {
1782: PMAP_COUNT(kern_mappings_changed);
1783: }
1784: #endif
1785: UVMHIST_LOG(pmaphist,
1786: "va=%016lx has already mapped."
1787: " old-pa=%016lx new-pa=%016lx, old-pte=%016llx\n",
1788: va, l3pte_pa(opte), pa, opte);
1789:
1790: if (pa == l3pte_pa(opte)) {
1791: /* old and new pte have same pa, no need to update pv */
1.62 ryo 1792: need_remove_pv = (pp == NULL);
1.39 ryo 1793: need_update_pv = false;
1794: if (need_sync_icache && l3pte_executable(opte, user))
1.32 ryo 1795: need_sync_icache = false;
1796: } else {
1.39 ryo 1797: need_remove_pv = true;
1798: }
1.40 ryo 1799:
1800: if (need_remove_pv &&
1.62 ryo 1801: ((opp = phys_to_pp(l3pte_pa(opte))) != NULL)) {
1802: /*
1803: * need to lock both pp and opp(old pp)
1804: * against deadlock, and 'pp' maybe NULL.
1805: */
1806: if (pp < opp) {
1807: pps[0] = pp;
1808: pps[1] = opp;
1.40 ryo 1809: } else {
1.62 ryo 1810: pps[0] = opp;
1811: pps[1] = pp;
1.2 ryo 1812: }
1.62 ryo 1813: if (pps[0] != NULL)
1814: pmap_pv_lock(pps[0]);
1815: if (pps[1] != NULL)
1816: pmap_pv_lock(pps[1]);
1817: opv = _pmap_remove_pv(opp, pm, va, opte);
1.40 ryo 1818: } else {
1.62 ryo 1819: if (pp != NULL)
1820: pmap_pv_lock(pp);
1.36 ryo 1821: }
1.40 ryo 1822: } else {
1.62 ryo 1823: if (pp != NULL)
1824: pmap_pv_lock(pp);
1.2 ryo 1825: }
1826:
1.40 ryo 1827: if (!l3pte_valid(opte))
1828: _pmap_pdp_addref(pm, pdppa, pdppg); /* L3 occupancy++ */
1829:
1.20 ryo 1830: /*
1831: * read permission is treated as an access permission internally.
1832: * require to add PROT_READ even if only PROT_WRITE or PROT_EXEC
1833: */
1.44 ryo 1834: if (prot & (VM_PROT_WRITE|VM_PROT_EXECUTE))
1.20 ryo 1835: prot |= VM_PROT_READ;
1.46 ryo 1836: if (flags & (VM_PROT_WRITE|VM_PROT_EXECUTE))
1837: flags |= VM_PROT_READ;
1.20 ryo 1838:
1.2 ryo 1839: mdattr = VM_PROT_READ | VM_PROT_WRITE;
1.39 ryo 1840: if (need_update_pv) {
1.62 ryo 1841: error = _pmap_enter_pv(pp, pm, &spv, va, ptep, pa, flags);
1.3 ryo 1842: if (error != 0) {
1.11 ryo 1843: /*
1844: * If pmap_enter() fails,
1845: * it must not leave behind an existing pmap entry.
1846: */
1.39 ryo 1847: if (lxpde_valid(opte)) {
1848: bool pdpremoved = _pmap_pdp_delref(pm,
1849: AARCH64_KVA_TO_PA(trunc_page(
1850: (vaddr_t)ptep)), true);
1851: AARCH64_TLBI_BY_ASID_VA(pm->pm_asid,
1852: va, !pdpremoved);
1853: }
1.3 ryo 1854: PMAP_COUNT(pv_entry_cannotalloc);
1855: if (flags & PMAP_CANFAIL)
1.39 ryo 1856: goto fail1;
1.11 ryo 1857: panic("pmap_enter: failed to allocate pv_entry");
1.3 ryo 1858: }
1.39 ryo 1859: }
1.2 ryo 1860:
1.62 ryo 1861: if (pp != NULL) {
1.7 ryo 1862: /* update referenced/modified flags */
1.62 ryo 1863: pp->pp_flags |=
1.46 ryo 1864: (flags & (VM_PROT_READ | VM_PROT_WRITE));
1.62 ryo 1865: mdattr &= pp->pp_flags;
1.2 ryo 1866: }
1867:
1868: #ifdef PMAPCOUNTERS
1869: switch (flags & PMAP_CACHE_MASK) {
1870: case PMAP_NOCACHE:
1871: case PMAP_NOCACHE_OVR:
1872: PMAP_COUNT(uncached_mappings);
1873: break;
1874: }
1875: #endif
1876:
1.18 ryo 1877: attr = _pmap_pte_adjust_prot(L3_PAGE, prot, mdattr, user);
1.2 ryo 1878: attr = _pmap_pte_adjust_cacheflags(attr, flags);
1879: if (VM_MAXUSER_ADDRESS > va)
1880: attr |= LX_BLKPAG_APUSER;
1.3 ryo 1881: if (flags & PMAP_WIRED)
1882: attr |= LX_BLKPAG_OS_WIRED;
1.2 ryo 1883: #ifdef MULTIPROCESSOR
1884: attr |= LX_BLKPAG_SH_IS;
1885: #endif
1886:
1887: pte = pa | attr;
1.13 ryo 1888:
1.32 ryo 1889: if (need_sync_icache) {
1.15 ryo 1890: /* non-exec -> exec */
1891: UVMHIST_LOG(pmaphist,
1892: "icache_sync: pm=%p, va=%016lx, pte: %016lx -> %016lx",
1.13 ryo 1893: pm, va, opte, pte);
1.15 ryo 1894: if (!l3pte_writable(pte)) {
1.19 ryo 1895: PTE_ICACHE_SYNC_PAGE(pte, ptep, pm, va, l3only);
1.15 ryo 1896: atomic_swap_64(ptep, pte);
1.19 ryo 1897: AARCH64_TLBI_BY_ASID_VA(pm->pm_asid, va ,true);
1.15 ryo 1898: } else {
1899: atomic_swap_64(ptep, pte);
1.19 ryo 1900: AARCH64_TLBI_BY_ASID_VA(pm->pm_asid, va, l3only);
1.15 ryo 1901: cpu_icache_sync_range(va, PAGE_SIZE);
1902: }
1903: } else {
1904: atomic_swap_64(ptep, pte);
1.19 ryo 1905: AARCH64_TLBI_BY_ASID_VA(pm->pm_asid, va, l3only);
1.15 ryo 1906: }
1.39 ryo 1907:
1.36 ryo 1908: if (pte & LX_BLKPAG_OS_WIRED) {
1909: PMSTAT_INC_WIRED_COUNT(pm);
1910: }
1911: PMSTAT_INC_RESIDENT_COUNT(pm);
1.2 ryo 1912:
1.39 ryo 1913: fail1:
1.62 ryo 1914: if (pps[1] != NULL)
1915: pmap_pv_unlock(pps[1]);
1916: if (pps[0] != NULL)
1917: pmap_pv_unlock(pps[0]);
1.39 ryo 1918: fail0:
1.12 ryo 1919: pm_unlock(pm);
1920:
1921: /* spare pv was not used. discard */
1922: if (spv != NULL)
1923: pool_cache_put(&_pmap_pv_pool, spv);
1924:
1.14 ryo 1925: if (opv != NULL)
1926: pool_cache_put(&_pmap_pv_pool, opv);
1927:
1.2 ryo 1928: return error;
1929: }
1930:
1931: int
1932: pmap_enter(struct pmap *pm, vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
1.1 matt 1933: {
1.2 ryo 1934: return _pmap_enter(pm, va, pa, prot, flags, false);
1.1 matt 1935: }
1936:
1.68 ad 1937: bool
1.2 ryo 1938: pmap_remove_all(struct pmap *pm)
1.1 matt 1939: {
1.2 ryo 1940: /* nothing to do */
1.68 ad 1941: return false;
1.1 matt 1942: }
1943:
1.2 ryo 1944: static void
1.35 ryo 1945: _pmap_remove(struct pmap *pm, vaddr_t sva, vaddr_t eva, bool kremove,
1946: struct pv_entry **pvtofree)
1.1 matt 1947: {
1.35 ryo 1948: pt_entry_t pte, *ptep = NULL;
1.62 ryo 1949: struct pmap_page *pp;
1.35 ryo 1950: struct pv_entry *opv;
1.2 ryo 1951: paddr_t pa;
1.35 ryo 1952: vaddr_t va;
1953: vsize_t blocksize = 0;
1.36 ryo 1954: bool pdpremoved;
1.2 ryo 1955:
1956: UVMHIST_FUNC(__func__);
1957: UVMHIST_CALLED(pmaphist);
1958:
1.35 ryo 1959: UVMHIST_LOG(pmaphist, "pm=%p, sva=%016lx, eva=%016lx, kremovemode=%d",
1960: pm, sva, eva, kremove);
1.2 ryo 1961:
1.35 ryo 1962: for (va = sva; (va < eva) && (pm->pm_stats.resident_count != 0);
1963: va = (va + blocksize) & ~(blocksize - 1)) {
1964:
1965: /* va is belong to the same L3 table as before? */
1966: if ((blocksize == L3_SIZE) && ((va & L3INDEXMASK) != 0))
1967: ptep++;
1968: else
1969: ptep = _pmap_pte_lookup_bs(pm, va, &blocksize);
1.8 ryo 1970:
1.2 ryo 1971: pte = *ptep;
1.35 ryo 1972: if (!lxpde_valid(pte))
1973: continue;
1.2 ryo 1974:
1.35 ryo 1975: if (!kremove) {
1976: pa = lxpde_pa(pte);
1.62 ryo 1977: pp = phys_to_pp(pa);
1978: if (pp != NULL) {
1.39 ryo 1979:
1.62 ryo 1980: pmap_pv_lock(pp);
1981: opv = _pmap_remove_pv(pp, pm, va, pte);
1982: pmap_pv_unlock(pp);
1.35 ryo 1983: if (opv != NULL) {
1984: opv->pv_next = *pvtofree;
1985: *pvtofree = opv;
1986: }
1987: }
1988: }
1.2 ryo 1989:
1.36 ryo 1990: pte = atomic_swap_64(ptep, 0);
1991: if (!lxpde_valid(pte))
1992: continue;
1993:
1994: pdpremoved = _pmap_pdp_delref(pm,
1995: AARCH64_KVA_TO_PA(trunc_page((vaddr_t)ptep)), true);
1996: AARCH64_TLBI_BY_ASID_VA(pm->pm_asid, va, !pdpremoved);
1.3 ryo 1997:
1.36 ryo 1998: if (pdpremoved) {
1999: /*
2000: * this Ln page table page has been removed.
2001: * skip to next Ln table
2002: */
2003: blocksize *= Ln_ENTRIES;
2004: }
2005:
2006: if ((pte & LX_BLKPAG_OS_WIRED) != 0) {
2007: PMSTAT_DEC_WIRED_COUNT(pm);
2008: }
2009: PMSTAT_DEC_RESIDENT_COUNT(pm);
1.2 ryo 2010: }
1.1 matt 2011: }
2012:
2013: void
1.2 ryo 2014: pmap_remove(struct pmap *pm, vaddr_t sva, vaddr_t eva)
1.1 matt 2015: {
1.35 ryo 2016: struct pv_entry *pvtofree = NULL;
2017: struct pv_entry *pv, *pvtmp;
1.2 ryo 2018:
1.28 ryo 2019: KASSERT_PM_ADDR(pm, sva);
1.2 ryo 2020: KASSERT(!IN_KSEG_ADDR(sva));
2021:
1.35 ryo 2022: pm_lock(pm);
2023: _pmap_remove(pm, sva, eva, false, &pvtofree);
2024: pm_unlock(pm);
2025:
2026: for (pv = pvtofree; pv != NULL; pv = pvtmp) {
2027: pvtmp = pv->pv_next;
2028: pool_cache_put(&_pmap_pv_pool, pv);
2029: }
1.1 matt 2030: }
2031:
1.63 ryo 2032: static void
2033: pmap_page_remove(struct pmap_page *pp, vm_prot_t prot)
1.1 matt 2034: {
1.2 ryo 2035: struct pv_entry *pv, *pvtmp;
1.63 ryo 2036: struct pv_entry *pvtofree = NULL;
1.3 ryo 2037: pt_entry_t opte;
1.2 ryo 2038:
2039: /* remove all pages reference to this physical page */
1.62 ryo 2040: pmap_pv_lock(pp);
1.64 ryo 2041: LIST_FOREACH_SAFE(pv, &pp->pp_pvhead, pv_link, pvtmp) {
1.2 ryo 2042:
1.3 ryo 2043: opte = atomic_swap_64(pv->pv_ptep, 0);
1.36 ryo 2044: if (lxpde_valid(opte)) {
2045: _pmap_pdp_delref(pv->pv_pmap,
2046: AARCH64_KVA_TO_PA(trunc_page(
2047: (vaddr_t)pv->pv_ptep)), false);
2048: AARCH64_TLBI_BY_ASID_VA(pv->pv_pmap->pm_asid,
2049: pv->pv_va, true);
1.2 ryo 2050:
1.36 ryo 2051: if ((opte & LX_BLKPAG_OS_WIRED) != 0) {
2052: PMSTAT_DEC_WIRED_COUNT(pv->pv_pmap);
2053: }
2054: PMSTAT_DEC_RESIDENT_COUNT(pv->pv_pmap);
2055: }
1.64 ryo 2056: LIST_REMOVE(pv, pv_link);
1.2 ryo 2057: PMAP_COUNT(pv_remove);
1.35 ryo 2058:
2059: pv->pv_next = pvtofree;
2060: pvtofree = pv;
1.2 ryo 2061: }
1.62 ryo 2062: pmap_pv_unlock(pp);
1.2 ryo 2063:
1.35 ryo 2064: for (pv = pvtofree; pv != NULL; pv = pvtmp) {
2065: pvtmp = pv->pv_next;
2066: pool_cache_put(&_pmap_pv_pool, pv);
2067: }
1.63 ryo 2068: }
2069:
2070: #ifdef __HAVE_PMAP_PV_TRACK
2071: void
2072: pmap_pv_protect(paddr_t pa, vm_prot_t prot)
2073: {
2074: struct pmap_page *pp;
2075:
2076: UVMHIST_FUNC(__func__);
2077: UVMHIST_CALLED(pmaphist);
2078:
2079: UVMHIST_LOG(pmaphist, "pa=%016lx, prot=%08x",
2080: pa, prot, 0, 0);
2081:
2082: pp = pmap_pv_tracked(pa);
2083: if (pp == NULL)
2084: panic("pmap_pv_protect: page not pv-tracked: %#" PRIxPADDR, pa);
2085:
2086: KASSERT(prot == VM_PROT_NONE);
2087: pmap_page_remove(pp, prot);
2088: }
2089: #endif
2090:
2091: void
2092: pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
2093: {
2094: struct pv_entry *pv;
2095: struct pmap_page *pp;
2096:
2097: KASSERT((prot & VM_PROT_READ) || !(prot & VM_PROT_WRITE));
2098:
2099: UVMHIST_FUNC(__func__);
2100: UVMHIST_CALLED(pmaphist);
2101:
2102: pp = VM_PAGE_TO_PP(pg);
2103:
2104: UVMHIST_LOG(pmaphist, "pg=%p, pp=%p, pa=%016lx, prot=%08x",
2105: pg, pp, VM_PAGE_TO_PHYS(pg), prot);
2106:
2107: if ((prot & (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE)) ==
2108: VM_PROT_NONE) {
2109: pmap_page_remove(pp, prot);
1.2 ryo 2110: } else {
1.62 ryo 2111: pmap_pv_lock(pp);
1.64 ryo 2112: LIST_FOREACH(pv, &pp->pp_pvhead, pv_link) {
1.62 ryo 2113: _pmap_protect_pv(pp, pv, prot);
1.2 ryo 2114: }
1.62 ryo 2115: pmap_pv_unlock(pp);
1.2 ryo 2116: }
1.1 matt 2117: }
2118:
2119: void
1.2 ryo 2120: pmap_unwire(struct pmap *pm, vaddr_t va)
1.1 matt 2121: {
1.2 ryo 2122: pt_entry_t pte, *ptep;
2123:
2124: UVMHIST_FUNC(__func__);
2125: UVMHIST_CALLED(pmaphist);
2126:
2127: UVMHIST_LOG(pmaphist, "pm=%p, va=%016lx", pm, va, 0, 0);
2128:
2129: PMAP_COUNT(unwire);
2130:
1.28 ryo 2131: KASSERT_PM_ADDR(pm, va);
2132: KASSERT(!IN_KSEG_ADDR(va));
1.2 ryo 2133:
1.11 ryo 2134: pm_lock(pm);
1.27 ryo 2135: ptep = _pmap_pte_lookup_l3(pm, va);
1.2 ryo 2136: if (ptep != NULL) {
2137: pte = *ptep;
1.3 ryo 2138: if (!l3pte_valid(pte) ||
2139: ((pte & LX_BLKPAG_OS_WIRED) == 0)) {
2140: /* invalid pte, or pte is not wired */
2141: PMAP_COUNT(unwire_failure);
1.11 ryo 2142: pm_unlock(pm);
1.2 ryo 2143: return;
2144: }
2145:
1.3 ryo 2146: pte &= ~LX_BLKPAG_OS_WIRED;
2147: atomic_swap_64(ptep, pte);
1.2 ryo 2148:
1.36 ryo 2149: PMSTAT_DEC_WIRED_COUNT(pm);
1.2 ryo 2150: }
1.11 ryo 2151: pm_unlock(pm);
1.1 matt 2152: }
2153:
1.2 ryo 2154: bool
2155: pmap_fault_fixup(struct pmap *pm, vaddr_t va, vm_prot_t accessprot, bool user)
1.1 matt 2156: {
1.62 ryo 2157: struct pmap_page *pp;
1.2 ryo 2158: pt_entry_t *ptep, pte;
2159: vm_prot_t pmap_prot;
2160: paddr_t pa;
1.8 ryo 2161: bool fixed = false;
1.2 ryo 2162:
2163: UVMHIST_FUNC(__func__);
2164: UVMHIST_CALLED(pmaphist);
2165:
2166: UVMHIST_LOG(pmaphist, "pm=%p, va=%016lx, accessprot=%08x",
2167: pm, va, accessprot, 0);
2168:
2169:
2170: #if 0
1.28 ryo 2171: KASSERT_PM_ADDR(pm, va);
1.2 ryo 2172: #else
2173: if (((pm == pmap_kernel()) &&
1.28 ryo 2174: !(IN_RANGE(va, VM_MIN_KERNEL_ADDRESS, VM_MAX_KERNEL_ADDRESS))) ||
1.2 ryo 2175: ((pm != pmap_kernel()) &&
1.28 ryo 2176: !(IN_RANGE(va, VM_MIN_ADDRESS, VM_MAX_ADDRESS)))) {
1.2 ryo 2177:
2178: UVMHIST_LOG(pmaphist,
2179: "pmap space and va mismatch: pm=%s, va=%016lx",
2180: (pm == pmap_kernel()) ? "kernel" : "user", va, 0, 0);
2181: return false;
2182: }
2183: #endif
2184:
1.11 ryo 2185: pm_lock(pm);
1.8 ryo 2186:
1.27 ryo 2187: ptep = _pmap_pte_lookup_l3(pm, va);
1.2 ryo 2188: if (ptep == NULL) {
2189: UVMHIST_LOG(pmaphist, "pte_lookup failure: va=%016lx",
2190: va, 0, 0, 0);
1.8 ryo 2191: goto done;
1.2 ryo 2192: }
2193:
2194: pte = *ptep;
2195: if (!l3pte_valid(pte)) {
2196: UVMHIST_LOG(pmaphist, "invalid pte: %016llx: va=%016lx",
2197: pte, va, 0, 0);
1.8 ryo 2198: goto done;
1.2 ryo 2199: }
2200:
2201: pa = l3pte_pa(*ptep);
1.62 ryo 2202: pp = phys_to_pp(pa);
2203: if (pp == NULL) {
2204: UVMHIST_LOG(pmaphist, "pmap_page not found: va=%016lx", va, 0, 0, 0);
1.8 ryo 2205: goto done;
1.2 ryo 2206: }
2207:
2208: /* get prot by pmap_enter() (stored in software use bit in pte) */
2209: switch (pte & (LX_BLKPAG_OS_READ|LX_BLKPAG_OS_WRITE)) {
2210: case 0:
2211: default:
2212: pmap_prot = 0;
2213: break;
2214: case LX_BLKPAG_OS_READ:
2215: pmap_prot = VM_PROT_READ;
2216: break;
2217: case LX_BLKPAG_OS_WRITE:
2218: case LX_BLKPAG_OS_READ|LX_BLKPAG_OS_WRITE:
2219: pmap_prot = (VM_PROT_READ|VM_PROT_WRITE);
2220: break;
2221: }
1.18 ryo 2222: if (l3pte_executable(pte, pm != pmap_kernel()))
1.2 ryo 2223: pmap_prot |= VM_PROT_EXECUTE;
2224:
2225: UVMHIST_LOG(pmaphist, "va=%016lx, pmapprot=%08x, accessprot=%08x",
2226: va, pmap_prot, accessprot, 0);
2227:
2228: /* ignore except read/write */
2229: accessprot &= (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE);
2230:
1.61 ryo 2231: /* PROT_EXEC requires implicit PROT_READ */
2232: if (accessprot & VM_PROT_EXECUTE)
2233: accessprot |= VM_PROT_READ;
2234:
1.2 ryo 2235: /* no permission to read/write/execute for this page */
2236: if ((pmap_prot & accessprot) != accessprot) {
2237: UVMHIST_LOG(pmaphist, "no permission to access", 0, 0, 0, 0);
1.8 ryo 2238: goto done;
1.2 ryo 2239: }
2240:
1.24 ryo 2241: /* pte is readable and writable, but occured fault? probably copy(9) */
2242: if ((pte & LX_BLKPAG_AF) && ((pte & LX_BLKPAG_AP) == LX_BLKPAG_AP_RW))
1.8 ryo 2243: goto done;
1.2 ryo 2244:
1.62 ryo 2245: pmap_pv_lock(pp);
1.2 ryo 2246: if ((pte & LX_BLKPAG_AF) == 0) {
2247: /* pte has no AF bit, set referenced and AF bit */
2248: UVMHIST_LOG(pmaphist,
2249: "REFERENCED:"
2250: " va=%016lx, pa=%016lx, pte_prot=%08x, accessprot=%08x",
2251: va, pa, pmap_prot, accessprot);
1.62 ryo 2252: pp->pp_flags |= VM_PROT_READ; /* set referenced */
1.2 ryo 2253: pte |= LX_BLKPAG_AF;
2254:
2255: PMAP_COUNT(fixup_referenced);
2256: }
2257: if ((accessprot & VM_PROT_WRITE) &&
2258: ((pte & LX_BLKPAG_AP) == LX_BLKPAG_AP_RO)) {
2259: /* pte is not RW. set modified and RW */
2260:
2261: UVMHIST_LOG(pmaphist, "MODIFIED:"
2262: " va=%016lx, pa=%016lx, pte_prot=%08x, accessprot=%08x",
2263: va, pa, pmap_prot, accessprot);
1.62 ryo 2264: pp->pp_flags |= VM_PROT_WRITE; /* set modified */
1.2 ryo 2265: pte &= ~LX_BLKPAG_AP;
2266: pte |= LX_BLKPAG_AP_RW;
2267:
2268: PMAP_COUNT(fixup_modified);
2269: }
1.62 ryo 2270: pmap_pv_unlock(pp);
1.2 ryo 2271:
2272: atomic_swap_64(ptep, pte);
1.19 ryo 2273: AARCH64_TLBI_BY_ASID_VA(pm->pm_asid, va, true);
2274:
1.8 ryo 2275: fixed = true;
1.2 ryo 2276:
1.8 ryo 2277: done:
1.11 ryo 2278: pm_unlock(pm);
1.8 ryo 2279: return fixed;
1.1 matt 2280: }
2281:
2282: bool
2283: pmap_clear_modify(struct vm_page *pg)
2284: {
1.2 ryo 2285: struct pv_entry *pv;
1.62 ryo 2286: struct pmap_page * const pp = VM_PAGE_TO_PP(pg);
1.2 ryo 2287: pt_entry_t *ptep, pte, opte;
2288: vaddr_t va;
2289:
2290: UVMHIST_FUNC(__func__);
2291: UVMHIST_CALLED(pmaphist);
2292:
1.62 ryo 2293: UVMHIST_LOG(pmaphist, "pg=%p, pp_flags=%08x",
2294: pg, pp->pp_flags, 0, 0);
1.2 ryo 2295:
1.62 ryo 2296: pmap_pv_lock(pp);
1.2 ryo 2297:
1.62 ryo 2298: if ((pp->pp_flags & VM_PROT_WRITE) == 0) {
2299: pmap_pv_unlock(pp);
1.2 ryo 2300: return false;
2301: }
2302:
1.62 ryo 2303: pp->pp_flags &= ~VM_PROT_WRITE;
1.2 ryo 2304:
2305: PMAP_COUNT(clear_modify);
1.64 ryo 2306: LIST_FOREACH(pv, &pp->pp_pvhead, pv_link) {
1.2 ryo 2307: PMAP_COUNT(clear_modify_pages);
2308:
2309: va = pv->pv_va;
2310:
2311: ptep = pv->pv_ptep;
2312: opte = pte = *ptep;
2313: tryagain:
2314: if (!l3pte_valid(pte))
2315: continue;
2316:
2317: /* clear write permission */
2318: pte &= ~LX_BLKPAG_AP;
2319: pte |= LX_BLKPAG_AP_RO;
2320:
2321: /* XXX: possible deadlock if using PM_LOCK(). this is racy */
2322: if ((pte = atomic_cas_64(ptep, opte, pte)) != opte) {
2323: opte = pte;
2324: goto tryagain;
2325: }
2326:
1.19 ryo 2327: AARCH64_TLBI_BY_ASID_VA(pv->pv_pmap->pm_asid, va, true);
1.2 ryo 2328:
2329: UVMHIST_LOG(pmaphist,
2330: "va=%016llx, ptep=%p, pa=%016lx, RW -> RO",
2331: va, ptep, l3pte_pa(pte), 0);
2332: }
2333:
1.62 ryo 2334: pmap_pv_unlock(pp);
1.2 ryo 2335:
2336: return true;
1.1 matt 2337: }
2338:
2339: bool
2340: pmap_clear_reference(struct vm_page *pg)
2341: {
1.2 ryo 2342: struct pv_entry *pv;
1.62 ryo 2343: struct pmap_page * const pp = VM_PAGE_TO_PP(pg);
1.2 ryo 2344: pt_entry_t *ptep, pte, opte;
2345: vaddr_t va;
2346:
2347: UVMHIST_FUNC(__func__);
2348: UVMHIST_CALLED(pmaphist);
2349:
1.62 ryo 2350: UVMHIST_LOG(pmaphist, "pg=%p, pp=%p, pp_flags=%08x",
2351: pg, pp, pp->pp_flags, 0);
1.2 ryo 2352:
1.62 ryo 2353: pmap_pv_lock(pp);
1.2 ryo 2354:
1.62 ryo 2355: if ((pp->pp_flags & VM_PROT_READ) == 0) {
2356: pmap_pv_unlock(pp);
1.2 ryo 2357: return false;
2358: }
1.62 ryo 2359: pp->pp_flags &= ~VM_PROT_READ;
1.2 ryo 2360:
2361: PMAP_COUNT(clear_reference);
1.64 ryo 2362: LIST_FOREACH(pv, &pp->pp_pvhead, pv_link) {
1.2 ryo 2363: PMAP_COUNT(clear_reference_pages);
2364:
2365: va = pv->pv_va;
2366:
2367: ptep = pv->pv_ptep;
2368: opte = pte = *ptep;
2369: tryagain:
2370: if (!l3pte_valid(pte))
2371: continue;
2372:
2373: /* clear access permission */
2374: pte &= ~LX_BLKPAG_AF;
2375:
2376: /* XXX: possible deadlock if using PM_LOCK(). this is racy */
2377: if ((pte = atomic_cas_64(ptep, opte, pte)) != opte) {
2378: opte = pte;
2379: goto tryagain;
2380: }
2381:
1.19 ryo 2382: AARCH64_TLBI_BY_ASID_VA(pv->pv_pmap->pm_asid, va, true);
1.2 ryo 2383:
2384: UVMHIST_LOG(pmaphist, "va=%016llx, ptep=%p, pa=%016lx, unse AF",
2385: va, ptep, l3pte_pa(pte), 0);
2386: }
2387:
1.62 ryo 2388: pmap_pv_unlock(pp);
1.2 ryo 2389:
2390: return true;
1.1 matt 2391: }
2392:
2393: bool
2394: pmap_is_modified(struct vm_page *pg)
2395: {
1.62 ryo 2396: struct pmap_page * const pp = VM_PAGE_TO_PP(pg);
1.3 ryo 2397:
1.62 ryo 2398: return (pp->pp_flags & VM_PROT_WRITE);
1.1 matt 2399: }
2400:
2401: bool
2402: pmap_is_referenced(struct vm_page *pg)
2403: {
1.62 ryo 2404: struct pmap_page * const pp = VM_PAGE_TO_PP(pg);
1.3 ryo 2405:
1.62 ryo 2406: return (pp->pp_flags & VM_PROT_READ);
1.2 ryo 2407: }
2408:
2409: #ifdef DDB
1.37 ryo 2410:
1.17 ryo 2411: /* get pointer to kernel segment L2 or L3 table entry */
2412: pt_entry_t *
2413: kvtopte(vaddr_t va)
2414: {
1.28 ryo 2415: KASSERT(IN_RANGE(va, VM_MIN_KERNEL_ADDRESS, VM_MAX_KERNEL_ADDRESS));
1.17 ryo 2416:
1.27 ryo 2417: return _pmap_pte_lookup_bs(pmap_kernel(), va, NULL);
1.17 ryo 2418: }
2419:
2420: /* change attribute of kernel segment */
2421: pt_entry_t
2422: pmap_kvattr(vaddr_t va, vm_prot_t prot)
2423: {
2424: pt_entry_t *ptep, pte, opte;
2425:
1.28 ryo 2426: KASSERT(IN_RANGE(va, VM_MIN_KERNEL_ADDRESS, VM_MAX_KERNEL_ADDRESS));
1.17 ryo 2427:
2428: ptep = kvtopte(va);
2429: if (ptep == NULL)
2430: panic("%s: %016lx is not mapped\n", __func__, va);
2431:
2432: opte = pte = *ptep;
2433:
2434: pte &= ~(LX_BLKPAG_AF|LX_BLKPAG_AP);
2435: switch (prot & (VM_PROT_READ|VM_PROT_WRITE)) {
2436: case 0:
2437: break;
2438: case VM_PROT_READ:
2439: pte |= (LX_BLKPAG_AF|LX_BLKPAG_AP_RO);
2440: break;
2441: case VM_PROT_WRITE:
2442: case VM_PROT_READ|VM_PROT_WRITE:
2443: pte |= (LX_BLKPAG_AF|LX_BLKPAG_AP_RW);
2444: break;
2445: }
2446:
2447: if ((prot & VM_PROT_EXECUTE) == 0) {
1.18 ryo 2448: pte |= LX_BLKPAG_PXN;
1.17 ryo 2449: } else {
2450: pte |= LX_BLKPAG_AF;
1.18 ryo 2451: pte &= ~LX_BLKPAG_PXN;
1.17 ryo 2452: }
2453:
2454: *ptep = pte;
2455:
2456: return opte;
2457: }
2458:
1.26 ryo 2459: void
2460: pmap_db_pte_print(pt_entry_t pte, int level,
2461: void (*pr)(const char *, ...) __printflike(1, 2))
1.2 ryo 2462: {
2463: if (pte == 0) {
1.8 ryo 2464: pr(" UNUSED\n");
1.26 ryo 2465: return;
2466: }
2467:
2468: pr(" %s", (pte & LX_VALID) ? "VALID" : "**INVALID**");
2469:
2470: if ((level == 0) ||
2471: ((level == 1) && l1pde_is_table(pte)) ||
2472: ((level == 2) && l2pde_is_table(pte))) {
2473:
2474: /* L0/L1/L2 TABLE */
2475: if ((level == 0) && ((pte & LX_TYPE) != LX_TYPE_TBL))
2476: pr(" **ILLEGAL TYPE**"); /* L0 doesn't support block */
2477: else
1.37 ryo 2478: pr(" L%d-TABLE", level);
1.2 ryo 2479:
1.26 ryo 2480: pr(", PA=%lx", l0pde_pa(pte));
1.2 ryo 2481:
1.26 ryo 2482: if (pte & LX_TBL_NSTABLE)
2483: pr(", NSTABLE");
2484: if (pte & LX_TBL_APTABLE)
2485: pr(", APTABLE");
2486: if (pte & LX_TBL_UXNTABLE)
2487: pr(", UXNTABLE");
2488: if (pte & LX_TBL_PXNTABLE)
2489: pr(", PXNTABLE");
1.2 ryo 2490:
2491: } else if (((level == 1) && l1pde_is_block(pte)) ||
2492: ((level == 2) && l2pde_is_block(pte)) ||
1.26 ryo 2493: (level == 3)) {
1.2 ryo 2494:
1.26 ryo 2495: /* L1/L2 BLOCK or L3 PAGE */
1.37 ryo 2496: switch (level) {
2497: case 1:
2498: pr(" L1(1G)-BLOCK");
2499: break;
2500: case 2:
2501: pr(" L2(2M)-BLOCK");
2502: break;
2503: case 3:
1.26 ryo 2504: pr(" %s", l3pte_is_page(pte) ?
1.37 ryo 2505: "L3(4K)-PAGE" : "**ILLEGAL TYPE**");
2506: break;
2507: }
1.2 ryo 2508:
1.26 ryo 2509: pr(", PA=%lx", l3pte_pa(pte));
1.2 ryo 2510:
1.37 ryo 2511: pr(", %s", (pte & LX_BLKPAG_UXN) ?
1.43 skrll 2512: "UXN" : "UX ");
1.37 ryo 2513: pr(", %s", (pte & LX_BLKPAG_PXN) ?
1.43 skrll 2514: "PXN" : "PX ");
1.2 ryo 2515:
2516: if (pte & LX_BLKPAG_CONTIG)
1.26 ryo 2517: pr(", CONTIG");
1.2 ryo 2518:
1.26 ryo 2519: pr(", %s", (pte & LX_BLKPAG_NG) ? "NG" : "global");
1.37 ryo 2520: pr(", %s", (pte & LX_BLKPAG_AF) ?
2521: "accessible" :
2522: "**fault** ");
1.2 ryo 2523:
2524: switch (pte & LX_BLKPAG_SH) {
2525: case LX_BLKPAG_SH_NS:
2526: pr(", SH_NS");
2527: break;
2528: case LX_BLKPAG_SH_OS:
2529: pr(", SH_OS");
2530: break;
2531: case LX_BLKPAG_SH_IS:
2532: pr(", SH_IS");
2533: break;
2534: default:
2535: pr(", SH_??");
2536: break;
2537: }
2538:
2539: pr(", %s", (pte & LX_BLKPAG_AP_RO) ? "RO" : "RW");
2540: pr(", %s", (pte & LX_BLKPAG_APUSER) ? "EL0" : "EL1");
1.26 ryo 2541: pr(", %s", (pte & LX_BLKPAG_NS) ? "NS" : "secure");
1.2 ryo 2542:
2543: switch (pte & LX_BLKPAG_ATTR_MASK) {
2544: case LX_BLKPAG_ATTR_NORMAL_WB:
1.43 skrll 2545: pr(", WB");
1.2 ryo 2546: break;
2547: case LX_BLKPAG_ATTR_NORMAL_NC:
1.43 skrll 2548: pr(", NC");
1.2 ryo 2549: break;
2550: case LX_BLKPAG_ATTR_NORMAL_WT:
1.43 skrll 2551: pr(", WT");
1.2 ryo 2552: break;
2553: case LX_BLKPAG_ATTR_DEVICE_MEM:
2554: pr(", DEVICE");
2555: break;
2556: }
2557:
1.26 ryo 2558: if (pte & LX_BLKPAG_OS_BOOT)
2559: pr(", boot");
1.2 ryo 2560: if (pte & LX_BLKPAG_OS_READ)
2561: pr(", pmap_read");
2562: if (pte & LX_BLKPAG_OS_WRITE)
2563: pr(", pmap_write");
1.26 ryo 2564: if (pte & LX_BLKPAG_OS_WIRED)
1.43 skrll 2565: pr(", wired");
1.2 ryo 2566: } else {
1.26 ryo 2567: pr(" **ILLEGAL TYPE**");
1.2 ryo 2568: }
2569: pr("\n");
1.1 matt 2570: }
2571:
1.2 ryo 2572: void
1.38 ryo 2573: pmap_db_pteinfo(vaddr_t va, void (*pr)(const char *, ...) __printflike(1, 2))
1.1 matt 2574: {
1.2 ryo 2575: struct vm_page *pg;
1.62 ryo 2576: struct pmap_page *pp;
1.2 ryo 2577: bool user;
2578: pd_entry_t *l0, *l1, *l2, *l3;
2579: pd_entry_t pde;
2580: pt_entry_t pte;
1.26 ryo 2581: uint64_t ttbr;
1.2 ryo 2582: paddr_t pa;
2583: unsigned int idx;
2584:
1.66 ryo 2585: switch (aarch64_addressspace(va)) {
2586: case AARCH64_ADDRSPACE_UPPER:
1.26 ryo 2587: user = false;
2588: ttbr = reg_ttbr1_el1_read();
1.66 ryo 2589: break;
2590: case AARCH64_ADDRSPACE_LOWER:
1.26 ryo 2591: user = true;
2592: ttbr = reg_ttbr0_el1_read();
1.66 ryo 2593: break;
2594: default:
2595: pr("illegal address space\n");
2596: return;
1.26 ryo 2597: }
2598: pa = ttbr & TTBR_BADDR;
2599: l0 = (pd_entry_t *)AARCH64_PA_TO_KVA(pa);
2600:
1.2 ryo 2601: /*
2602: * traverse L0 -> L1 -> L2 -> L3 table
2603: */
1.38 ryo 2604: pr("TTBR%d=%016"PRIx64", pa=%016"PRIxPADDR", va=%p",
2605: user ? 0 : 1, ttbr, pa, l0);
2606: pr(", input-va=%016"PRIxVADDR
2607: ", L0-index=%ld, L1-index=%ld, L2-index=%ld, L3-index=%ld\n",
1.2 ryo 2608: va,
2609: (va & L0_ADDR_BITS) >> L0_SHIFT,
2610: (va & L1_ADDR_BITS) >> L1_SHIFT,
2611: (va & L2_ADDR_BITS) >> L2_SHIFT,
2612: (va & L3_ADDR_BITS) >> L3_SHIFT);
2613:
2614: idx = l0pde_index(va);
2615: pde = l0[idx];
2616:
1.38 ryo 2617: pr("L0[%3d]=%016"PRIx64":", idx, pde);
1.2 ryo 2618: pmap_db_pte_print(pde, 0, pr);
2619:
2620: if (!l0pde_valid(pde))
2621: return;
2622:
1.26 ryo 2623: l1 = (pd_entry_t *)AARCH64_PA_TO_KVA(l0pde_pa(pde));
1.2 ryo 2624: idx = l1pde_index(va);
2625: pde = l1[idx];
2626:
1.38 ryo 2627: pr(" L1[%3d]=%016"PRIx64":", idx, pde);
1.2 ryo 2628: pmap_db_pte_print(pde, 1, pr);
2629:
2630: if (!l1pde_valid(pde) || l1pde_is_block(pde))
2631: return;
2632:
1.26 ryo 2633: l2 = (pd_entry_t *)AARCH64_PA_TO_KVA(l1pde_pa(pde));
1.2 ryo 2634: idx = l2pde_index(va);
2635: pde = l2[idx];
2636:
1.38 ryo 2637: pr(" L2[%3d]=%016"PRIx64":", idx, pde);
1.2 ryo 2638: pmap_db_pte_print(pde, 2, pr);
2639:
2640: if (!l2pde_valid(pde) || l2pde_is_block(pde))
2641: return;
2642:
1.26 ryo 2643: l3 = (pd_entry_t *)AARCH64_PA_TO_KVA(l2pde_pa(pde));
1.2 ryo 2644: idx = l3pte_index(va);
2645: pte = l3[idx];
2646:
1.38 ryo 2647: pr(" L3[%3d]=%016"PRIx64":", idx, pte);
1.2 ryo 2648: pmap_db_pte_print(pte, 3, pr);
2649:
2650: pa = l3pte_pa(pte);
2651: pg = PHYS_TO_VM_PAGE(pa);
1.62 ryo 2652: pp = phys_to_pp(pa);
2653: if (pp == NULL) {
2654: pr("No VM_PAGE nor PMAP_PAGE\n");
1.8 ryo 2655: } else {
1.62 ryo 2656: if (pg != NULL)
2657: pg_dump(pg, pr);
2658: else
2659: pr("no VM_PAGE. pv-tracked page?\n");
2660: pv_dump(pp, pr);
1.2 ryo 2661: }
1.1 matt 2662: }
1.37 ryo 2663:
2664: static void
2665: dump_ln_table(bool countmode, pd_entry_t *pdp, int level, int lnindex,
1.38 ryo 2666: vaddr_t va, void (*pr)(const char *, ...) __printflike(1, 2))
1.37 ryo 2667: {
2668: struct vm_page *pg;
2669: struct vm_page_md *md;
2670: pd_entry_t pde;
2671: paddr_t pa;
2672: int i, n;
2673: const char *spaces[4] = { " ", " ", " ", " " };
2674: const char *spc = spaces[level];
2675:
2676: pa = AARCH64_KVA_TO_PA((vaddr_t)pdp);
2677: pg = PHYS_TO_VM_PAGE(pa);
2678: md = VM_PAGE_TO_MD(pg);
2679:
2680: if (pg == NULL) {
2681: pr("%sL%d: pa=%lx pg=NULL\n", spc, level, pa);
2682: } else {
2683: pr("%sL%d: pa=%lx pg=%p, wire_count=%d, mdpg_ptep_parent=%p\n",
2684: spc, level, pa, pg, pg->wire_count, md->mdpg_ptep_parent);
2685: }
2686:
2687: for (i = n = 0; i < Ln_ENTRIES; i++) {
2688: db_read_bytes((db_addr_t)&pdp[i], sizeof(pdp[i]), (char *)&pde);
2689: if (lxpde_valid(pde)) {
2690: if (!countmode)
2691: pr("%sL%d[%3d] %3dth, va=%016lx, pte=%016lx:",
2692: spc, level, i, n, va, pde);
2693: n++;
2694:
2695: if (((level != 0) && (level != 3) &&
2696: l1pde_is_block(pde)) ||
2697: ((level == 3) && l3pte_is_page(pde))) {
2698: if (!countmode)
2699: pmap_db_pte_print(pde, level, pr);
2700: } else if ((level != 3) && l1pde_is_table(pde)) {
2701: if (!countmode)
2702: pmap_db_pte_print(pde, level, pr);
2703: pa = l0pde_pa(pde);
2704: dump_ln_table(countmode,
2705: (pd_entry_t *)AARCH64_PA_TO_KVA(pa),
2706: level + 1, i, va, pr);
2707: } else {
2708: if (!countmode)
2709: pmap_db_pte_print(pde, level, pr);
2710: }
2711: }
2712:
2713: switch (level) {
2714: case 0:
2715: va += L0_SIZE;
2716: break;
2717: case 1:
2718: va += L1_SIZE;
2719: break;
2720: case 2:
2721: va += L2_SIZE;
2722: break;
2723: case 3:
2724: va += L3_SIZE;
2725: break;
2726: }
2727: }
2728:
2729: if (level == 0)
2730: pr("L0 has %d entries\n", n);
2731: else
2732: pr("%sL%d[%3d] has %d L%d entries\n", spaces[level - 1],
2733: level - 1, lnindex, n, level);
2734:
2735: }
2736:
2737: static void
2738: pmap_db_dump_l0_table(bool countmode, pd_entry_t *pdp, vaddr_t va_base,
1.38 ryo 2739: void (*pr)(const char *, ...) __printflike(1, 2))
1.37 ryo 2740: {
2741: dump_ln_table(countmode, pdp, 0, 0, va_base, pr);
2742: }
2743:
2744: void
1.38 ryo 2745: pmap_db_ttbrdump(bool countmode, vaddr_t va,
2746: void (*pr)(const char *, ...) __printflike(1, 2))
1.37 ryo 2747: {
2748: struct pmap *pm, _pm;
2749:
2750: pm = (struct pmap *)va;
2751: db_read_bytes((db_addr_t)va, sizeof(_pm), (char *)&_pm);
2752:
2753: pr("pmap=%p\n", pm);
2754: pr(" pm_asid = %d\n", _pm.pm_asid);
2755: pr(" pm_l0table = %p\n", _pm.pm_l0table);
2756: pr(" pm_l0table_pa = %lx\n", _pm.pm_l0table_pa);
2757: pr(" pm_activated = %d\n\n", _pm.pm_activated);
2758:
2759: pmap_db_dump_l0_table(countmode, _pm.pm_l0table,
2760: (pm == pmap_kernel()) ? 0xffff000000000000UL : 0, pr);
2761: }
2762:
1.2 ryo 2763: #endif /* DDB */
CVSweb <webmaster@jp.NetBSD.org>