Annotation of src/sys/arch/aarch64/aarch64/pmap.c, Revision 1.29
1.29 ! ryo 1: /* $NetBSD: pmap.c,v 1.26 2018/10/04 23:53:13 ryo Exp $ */
1.1 matt 2:
1.2 ryo 3: /*
4: * Copyright (c) 2017 Ryo Shimizu <ryo@nerv.org>
1.1 matt 5: * All rights reserved.
6: *
7: * Redistribution and use in source and binary forms, with or without
8: * modification, are permitted provided that the following conditions
9: * are met:
10: * 1. Redistributions of source code must retain the above copyright
11: * notice, this list of conditions and the following disclaimer.
12: * 2. Redistributions in binary form must reproduce the above copyright
13: * notice, this list of conditions and the following disclaimer in the
14: * documentation and/or other materials provided with the distribution.
15: *
1.2 ryo 16: * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17: * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18: * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19: * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
20: * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21: * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22: * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23: * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
24: * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
25: * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
1.1 matt 26: * POSSIBILITY OF SUCH DAMAGE.
27: */
28:
29: #include <sys/cdefs.h>
1.29 ! ryo 30: __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.26 2018/10/04 23:53:13 ryo Exp $");
1.1 matt 31:
1.2 ryo 32: #include "opt_arm_debug.h"
33: #include "opt_ddb.h"
1.26 ryo 34: #include "opt_multiprocessor.h"
35: #include "opt_pmap.h"
1.2 ryo 36: #include "opt_uvmhist.h"
1.1 matt 37:
38: #include <sys/param.h>
39: #include <sys/types.h>
40: #include <sys/kmem.h>
41: #include <sys/vmem.h>
1.2 ryo 42: #include <sys/atomic.h>
1.1 matt 43:
44: #include <uvm/uvm.h>
45:
1.2 ryo 46: #include <aarch64/pmap.h>
47: #include <aarch64/pte.h>
48: #include <aarch64/armreg.h>
49: #include <aarch64/cpufunc.h>
1.21 ryo 50: #include <aarch64/machdep.h>
1.2 ryo 51:
52: //#define PMAP_DEBUG
53: //#define PMAP_PV_DEBUG
54:
1.16 skrll 55: #ifdef VERBOSE_INIT_ARM
56: #define VPRINTF(...) printf(__VA_ARGS__)
57: #else
58: #define VPRINTF(...) do { } while (/* CONSTCOND */ 0)
59: #endif
1.2 ryo 60:
61: UVMHIST_DEFINE(pmaphist);
62: #ifdef UVMHIST
63:
64: #ifndef UVMHIST_PMAPHIST_SIZE
65: #define UVMHIST_PMAPHIST_SIZE (1024 * 4)
66: #endif
67:
68: struct kern_history_ent pmaphistbuf[UVMHIST_PMAPHIST_SIZE];
69:
70: static void
71: pmap_hist_init(void)
72: {
73: static bool inited = false;
74: if (inited == false) {
75: UVMHIST_INIT_STATIC(pmaphist, pmaphistbuf);
76: inited = true;
77: }
78: }
79: #define PMAP_HIST_INIT() pmap_hist_init()
80:
81: #else /* UVMHIST */
82:
83: #define PMAP_HIST_INIT() ((void)0)
84:
85: #endif /* UVMHIST */
86:
87:
88: #ifdef PMAPCOUNTERS
89: #define PMAP_COUNT(name) (pmap_evcnt_##name.ev_count++ + 0)
90: #define PMAP_COUNTER(name, desc) \
91: struct evcnt pmap_evcnt_##name = \
92: EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "pmap", desc); \
93: EVCNT_ATTACH_STATIC(pmap_evcnt_##name)
94:
95: PMAP_COUNTER(pdp_alloc_boot, "page table page allocate (uvm_pageboot_alloc)");
96: PMAP_COUNTER(pdp_alloc, "page table page allocate (uvm_pagealloc)");
97: PMAP_COUNTER(pdp_free, "page table page free (uvm_pagefree)");
98:
99: PMAP_COUNTER(pv_enter, "pv_entry allocate and link");
100: PMAP_COUNTER(pv_remove, "pv_entry free and unlink");
1.5 ryo 101: PMAP_COUNTER(pv_remove_nopv, "no pv_entry found when removing pv");
1.2 ryo 102:
103: PMAP_COUNTER(activate, "pmap_activate call");
104: PMAP_COUNTER(deactivate, "pmap_deactivate call");
105: PMAP_COUNTER(create, "pmap_create call");
106: PMAP_COUNTER(destroy, "pmap_destroy call");
107:
108: PMAP_COUNTER(page_protect, "pmap_page_protect call");
109: PMAP_COUNTER(protect, "pmap_protect call");
110: PMAP_COUNTER(protect_remove_fallback, "pmap_protect with no-read");
111: PMAP_COUNTER(protect_none, "pmap_protect non-exists pages");
112: PMAP_COUNTER(protect_managed, "pmap_protect managed pages");
113: PMAP_COUNTER(protect_unmanaged, "pmap_protect unmanaged pages");
114:
115: PMAP_COUNTER(clear_modify, "pmap_clear_modify call");
116: PMAP_COUNTER(clear_modify_pages, "pmap_clear_modify pages");
117: PMAP_COUNTER(clear_reference, "pmap_clear_reference call");
118: PMAP_COUNTER(clear_reference_pages, "pmap_clear_reference pages");
119:
120: PMAP_COUNTER(fixup_referenced, "page reference emulations");
121: PMAP_COUNTER(fixup_modified, "page modification emulations");
122:
123: PMAP_COUNTER(kern_mappings_bad, "kernel pages mapped (bad color)");
124: PMAP_COUNTER(kern_mappings_bad_wired, "kernel pages mapped (wired bad color)");
125: PMAP_COUNTER(user_mappings_bad, "user pages mapped (bad color, not wired)");
126: PMAP_COUNTER(user_mappings_bad_wired, "user pages mapped (bad colo, wiredr)");
127: PMAP_COUNTER(kern_mappings, "kernel pages mapped");
128: PMAP_COUNTER(user_mappings, "user pages mapped");
129: PMAP_COUNTER(user_mappings_changed, "user mapping changed");
130: PMAP_COUNTER(kern_mappings_changed, "kernel mapping changed");
131: PMAP_COUNTER(uncached_mappings, "uncached pages mapped");
132: PMAP_COUNTER(unmanaged_mappings, "unmanaged pages mapped");
133: PMAP_COUNTER(managed_mappings, "managed pages mapped");
134: PMAP_COUNTER(mappings, "pages mapped (including remapped)");
135: PMAP_COUNTER(remappings, "pages remapped");
136:
137: PMAP_COUNTER(pv_entry_cannotalloc, "pv_entry allocation failure");
138:
139: PMAP_COUNTER(unwire, "pmap_unwire call");
1.3 ryo 140: PMAP_COUNTER(unwire_failure, "pmap_unwire failure");
1.2 ryo 141:
142: #else /* PMAPCOUNTERS */
143: #define PMAP_COUNT(name) __nothing
144: #endif /* PMAPCOUNTERS */
145:
1.19 ryo 146: /*
147: * invalidate TLB entry for ASID and VA.
148: * `ll' invalidates only the Last Level (usually L3) of TLB entry
149: */
150: #define AARCH64_TLBI_BY_ASID_VA(asid, va, ll) \
151: do { \
152: if ((ll)) { \
153: if ((asid) == 0) \
154: aarch64_tlbi_by_va_ll((va)); \
155: else \
156: aarch64_tlbi_by_asid_va_ll((asid), (va)); \
157: } else { \
158: if ((asid) == 0) \
159: aarch64_tlbi_by_va((va)); \
160: else \
161: aarch64_tlbi_by_asid_va((asid), (va)); \
162: } \
163: } while (0/*CONSTCOND*/)
164:
165: /*
166: * aarch64 require write permission in pte to invalidate instruction cache.
167: * changing pte to writable temporarly before cpu_icache_sync_range().
168: * this macro modifies PTE (*ptep). need to update PTE after this.
169: */
170: #define PTE_ICACHE_SYNC_PAGE(pte, ptep, pm, va, ll) \
171: do { \
172: pt_entry_t tpte; \
173: tpte = (pte) & ~(LX_BLKPAG_AF|LX_BLKPAG_AP); \
174: tpte |= (LX_BLKPAG_AF|LX_BLKPAG_AP_RW); \
175: tpte |= (LX_BLKPAG_UXN|LX_BLKPAG_PXN); \
176: atomic_swap_64((ptep), tpte); \
177: AARCH64_TLBI_BY_ASID_VA((pm)->pm_asid, (va), (ll)); \
178: cpu_icache_sync_range((va), PAGE_SIZE); \
179: } while (0/*CONSTCOND*/)
180:
1.2 ryo 181: struct pv_entry {
182: TAILQ_ENTRY(pv_entry) pv_link;
183: struct pmap *pv_pmap;
184: vaddr_t pv_va;
185: paddr_t pv_pa; /* debug */
186: pt_entry_t *pv_ptep; /* for fast pte lookup */
187: };
188:
1.27 ryo 189: static pt_entry_t *_pmap_pte_lookup_l3(struct pmap *, vaddr_t);
190: static pt_entry_t *_pmap_pte_lookup_bs(struct pmap *, vaddr_t, vsize_t *);
1.18 ryo 191: static pt_entry_t _pmap_pte_adjust_prot(pt_entry_t, vm_prot_t, vm_prot_t, bool);
1.2 ryo 192: static pt_entry_t _pmap_pte_adjust_cacheflags(pt_entry_t, u_int);
193: static void _pmap_remove(struct pmap *, vaddr_t, bool);
194: static int _pmap_enter(struct pmap *, vaddr_t, paddr_t, vm_prot_t, u_int, bool);
195:
1.1 matt 196: static struct pmap kernel_pmap;
197:
198: struct pmap * const kernel_pmap_ptr = &kernel_pmap;
1.2 ryo 199: static vaddr_t pmap_maxkvaddr;
200:
201: vaddr_t virtual_avail, virtual_end;
202: vaddr_t virtual_devmap_addr;
203:
204: static struct pool_cache _pmap_cache;
205: static struct pool_cache _pmap_pv_pool;
206:
207:
208: static inline void
209: pmap_pv_lock(struct vm_page_md *md)
210: {
211:
212: mutex_enter(&md->mdpg_pvlock);
213: }
214:
215: static inline void
216: pmap_pv_unlock(struct vm_page_md *md)
217: {
218:
219: mutex_exit(&md->mdpg_pvlock);
220: }
221:
1.11 ryo 222:
223: static inline void
224: pm_lock(struct pmap *pm)
225: {
226: mutex_enter(&pm->pm_lock);
227: }
228:
229: static inline void
230: pm_unlock(struct pmap *pm)
231: {
232: mutex_exit(&pm->pm_lock);
233: }
1.2 ryo 234:
1.28 ryo 235: #define IN_RANGE(va,sta,end) (((sta) <= (va)) && ((va) < (end)))
236:
1.2 ryo 237: #define IN_KSEG_ADDR(va) \
1.28 ryo 238: IN_RANGE((va), AARCH64_KSEG_START, AARCH64_KSEG_END)
239:
240: #define KASSERT_PM_ADDR(pm, va) \
241: do { \
242: if ((pm) == pmap_kernel()) { \
243: KASSERTMSG(IN_RANGE((va), VM_MIN_KERNEL_ADDRESS, \
244: VM_MAX_KERNEL_ADDRESS), \
245: "%s: kernel pm %p: va=%016lx" \
246: " is not kernel address\n", \
247: __func__, (pm), (va)); \
248: } else { \
249: KASSERTMSG(IN_RANGE((va), \
250: VM_MIN_ADDRESS, VM_MAX_ADDRESS), \
251: "%s: user pm %p: va=%016lx" \
252: " is not user address\n", \
253: __func__, (pm), (va)); \
254: } \
255: } while (0 /* CONSTCOND */)
1.2 ryo 256:
257:
258: static const struct pmap_devmap *pmap_devmap_table;
259:
260: static vsize_t
1.26 ryo 261: pmap_map_chunk(vaddr_t va, paddr_t pa, vsize_t size,
1.2 ryo 262: vm_prot_t prot, u_int flags)
263: {
264: pt_entry_t attr;
1.26 ryo 265: psize_t blocksize;
266: int rc;
1.2 ryo 267:
1.26 ryo 268: /* devmap always use L2 mapping */
269: blocksize = L2_SIZE;
1.2 ryo 270:
1.18 ryo 271: attr = _pmap_pte_adjust_prot(L2_BLOCK, prot, VM_PROT_ALL, false);
1.2 ryo 272: attr = _pmap_pte_adjust_cacheflags(attr, flags | PMAP_DEV);
1.18 ryo 273: /* user cannot execute, and kernel follows the prot */
274: attr |= (LX_BLKPAG_UXN|LX_BLKPAG_PXN);
275: if (prot & VM_PROT_EXECUTE)
276: attr &= ~LX_BLKPAG_PXN;
1.2 ryo 277:
1.26 ryo 278: rc = pmapboot_enter(va, pa, size, blocksize, attr,
279: PMAPBOOT_ENTER_NOOVERWRITE, bootpage_alloc, NULL);
280: if (rc != 0)
281: panic("%s: pmapboot_enter failed. %lx is already mapped?\n",
282: __func__, va);
1.2 ryo 283:
1.26 ryo 284: aarch64_tlbi_by_va(va);
1.2 ryo 285:
1.26 ryo 286: return ((va + size + blocksize - 1) & ~(blocksize - 1)) - va;
1.2 ryo 287: }
1.1 matt 288:
289: void
1.2 ryo 290: pmap_devmap_register(const struct pmap_devmap *table)
1.1 matt 291: {
1.2 ryo 292: pmap_devmap_table = table;
1.1 matt 293: }
294:
295: void
1.2 ryo 296: pmap_devmap_bootstrap(const struct pmap_devmap *table)
297: {
298: vaddr_t va;
299: int i;
300:
301: pmap_devmap_register(table);
302:
1.16 skrll 303: VPRINTF("%s:\n", __func__);
1.2 ryo 304: for (i = 0; table[i].pd_size != 0; i++) {
1.16 skrll 305: VPRINTF(" devmap: pa %08lx-%08lx = va %016lx\n",
1.2 ryo 306: table[i].pd_pa,
307: table[i].pd_pa + table[i].pd_size - 1,
308: table[i].pd_va);
309: va = table[i].pd_va;
310:
1.26 ryo 311: KASSERT((VM_KERNEL_IO_ADDRESS <= va) &&
312: (va < (VM_KERNEL_IO_ADDRESS + VM_KERNEL_IO_SIZE)));
313:
1.2 ryo 314: /* update and check virtual_devmap_addr */
315: if ((virtual_devmap_addr == 0) ||
316: (virtual_devmap_addr > va)) {
317: virtual_devmap_addr = va;
318: }
319:
1.26 ryo 320: pmap_map_chunk(
1.2 ryo 321: table[i].pd_va,
322: table[i].pd_pa,
323: table[i].pd_size,
324: table[i].pd_prot,
325: table[i].pd_flags);
326: }
327: }
328:
329: const struct pmap_devmap *
330: pmap_devmap_find_va(vaddr_t va, vsize_t size)
331: {
332: paddr_t endva;
333: int i;
334:
335: if (pmap_devmap_table == NULL)
336: return NULL;
337:
338: endva = va + size;
339: for (i = 0; pmap_devmap_table[i].pd_size != 0; i++) {
340: if ((va >= pmap_devmap_table[i].pd_va) &&
341: (endva <= pmap_devmap_table[i].pd_va +
342: pmap_devmap_table[i].pd_size))
343: return &pmap_devmap_table[i];
344: }
345: return NULL;
346: }
347:
348: const struct pmap_devmap *
349: pmap_devmap_find_pa(paddr_t pa, psize_t size)
350: {
351: paddr_t endpa;
352: int i;
353:
354: if (pmap_devmap_table == NULL)
355: return NULL;
356:
357: endpa = pa + size;
358: for (i = 0; pmap_devmap_table[i].pd_size != 0; i++) {
359: if (pa >= pmap_devmap_table[i].pd_pa &&
360: (endpa <= pmap_devmap_table[i].pd_pa +
361: pmap_devmap_table[i].pd_size))
362: return (&pmap_devmap_table[i]);
363: }
364: return NULL;
365: }
366:
367: vaddr_t
368: pmap_devmap_phystov(paddr_t pa)
1.1 matt 369: {
1.2 ryo 370: const struct pmap_devmap *table;
371: paddr_t offset;
372:
373: table = pmap_devmap_find_pa(pa, 0);
374: if (table == NULL)
375: return 0;
376:
377: offset = pa - table->pd_pa;
378: return table->pd_va + offset;
1.1 matt 379: }
380:
381: vaddr_t
1.2 ryo 382: pmap_devmap_vtophys(paddr_t va)
383: {
384: const struct pmap_devmap *table;
385: vaddr_t offset;
386:
387: table = pmap_devmap_find_va(va, 0);
388: if (table == NULL)
389: return 0;
390:
391: offset = va - table->pd_va;
392: return table->pd_pa + offset;
393: }
394:
395: void
396: pmap_bootstrap(vaddr_t vstart, vaddr_t vend)
397: {
398: struct pmap *kpm;
399: pd_entry_t *l0;
400: paddr_t l0pa;
401:
402: PMAP_HIST_INIT(); /* init once */
403:
404: UVMHIST_FUNC(__func__);
405: UVMHIST_CALLED(pmaphist);
406:
407: #if 0
408: /* uvmexp.ncolors = icachesize / icacheways / PAGE_SIZE; */
409: uvmexp.ncolors = aarch64_cache_vindexsize / PAGE_SIZE;
410: #endif
411:
412: /* devmap already uses last of va? */
413: if ((virtual_devmap_addr != 0) && (virtual_devmap_addr < vend))
414: vend = virtual_devmap_addr;
415:
416: virtual_avail = vstart;
417: virtual_end = vend;
418: pmap_maxkvaddr = vstart;
419:
420: aarch64_tlbi_all();
421:
422: l0pa = reg_ttbr1_el1_read();
1.9 christos 423: l0 = (void *)AARCH64_PA_TO_KVA(l0pa);
1.2 ryo 424:
425: memset(&kernel_pmap, 0, sizeof(kernel_pmap));
426: kpm = pmap_kernel();
427: kpm->pm_asid = 0;
428: kpm->pm_refcnt = 1;
429: kpm->pm_l0table = l0;
430: kpm->pm_l0table_pa = l0pa;
431: kpm->pm_activated = true;
432: SLIST_INIT(&kpm->pm_vmlist);
433: mutex_init(&kpm->pm_lock, MUTEX_DEFAULT, IPL_VM);
434: }
435:
436: inline static int
437: _pmap_color(vaddr_t addr) /* or paddr_t */
438: {
439: return (addr >> PGSHIFT) & (uvmexp.ncolors - 1);
440: }
441:
442: static int
443: _pmap_pmap_ctor(void *arg, void *v, int flags)
1.1 matt 444: {
1.2 ryo 445: memset(v, 0, sizeof(struct pmap));
1.1 matt 446: return 0;
447: }
448:
1.2 ryo 449: static int
450: _pmap_pv_ctor(void *arg, void *v, int flags)
1.1 matt 451: {
1.2 ryo 452: memset(v, 0, sizeof(struct pv_entry));
453: return 0;
1.1 matt 454: }
455:
456: void
1.2 ryo 457: pmap_init(void)
1.1 matt 458: {
1.2 ryo 459: struct vm_page *pg;
460: struct vm_page_md *md;
461: uvm_physseg_t i;
462: paddr_t pfn;
463:
464: pool_cache_bootstrap(&_pmap_cache, sizeof(struct pmap),
465: 0, 0, 0, "pmappl", NULL, IPL_NONE, _pmap_pmap_ctor, NULL, NULL);
466: pool_cache_bootstrap(&_pmap_pv_pool, sizeof(struct pv_entry),
467: 0, 0, 0, "pvpl", NULL, IPL_VM, _pmap_pv_ctor, NULL, NULL);
468:
469: /*
470: * initialize vm_page_md:mdpg_pvlock at this time.
471: * When LOCKDEBUG, mutex_init() calls km_alloc,
472: * but VM_MDPAGE_INIT() is called before initialized kmem_vm_arena.
473: */
474: for (i = uvm_physseg_get_first();
475: uvm_physseg_valid_p(i);
476: i = uvm_physseg_get_next(i)) {
477: for (pfn = uvm_physseg_get_start(i);
478: pfn < uvm_physseg_get_end(i);
479: pfn++) {
480: pg = PHYS_TO_VM_PAGE(ptoa(pfn));
481: md = VM_PAGE_TO_MD(pg);
482: mutex_init(&md->mdpg_pvlock, MUTEX_SPIN, IPL_VM);
483: }
484: }
485: }
486:
487: void
488: pmap_virtual_space(vaddr_t *vstartp, vaddr_t *vendp)
489: {
490: *vstartp = virtual_avail;
491: *vendp = virtual_end;
492: }
493:
494: vaddr_t
495: pmap_steal_memory(vsize_t size, vaddr_t *vstartp, vaddr_t *vendp)
496: {
497: int npage;
498: paddr_t pa;
499: vaddr_t va;
500: psize_t bank_npage;
501: uvm_physseg_t bank;
502:
503: UVMHIST_FUNC(__func__);
504: UVMHIST_CALLED(pmaphist);
505:
506: UVMHIST_LOG(pmaphist, "size=%llu, *vstartp=%llx, *vendp=%llx",
507: size, *vstartp, *vendp, 0);
508:
509: size = round_page(size);
510: npage = atop(size);
511:
512: for (bank = uvm_physseg_get_first(); uvm_physseg_valid_p(bank);
513: bank = uvm_physseg_get_next(bank)) {
514:
515: bank_npage = uvm_physseg_get_avail_end(bank) -
516: uvm_physseg_get_avail_start(bank);
517: if (npage <= bank_npage)
518: break;
519: }
520:
1.23 maxv 521: if (!uvm_physseg_valid_p(bank)) {
522: panic("%s: no memory", __func__);
523: }
1.2 ryo 524:
525: /* Steal pages */
526: pa = ptoa(uvm_physseg_get_avail_start(bank));
527: va = AARCH64_PA_TO_KVA(pa);
528: uvm_physseg_unplug(atop(pa), npage);
529:
530: for (; npage > 0; npage--, pa += PAGE_SIZE)
531: pmap_zero_page(pa);
532:
533: return va;
1.1 matt 534: }
535:
536: void
537: pmap_reference(struct pmap *pm)
538: {
1.2 ryo 539: atomic_inc_uint(&pm->pm_refcnt);
1.1 matt 540: }
541:
1.22 maxv 542: pd_entry_t *
543: pmap_alloc_pdp(struct pmap *pm, paddr_t *pap)
1.1 matt 544: {
1.2 ryo 545: paddr_t pa;
546:
547: UVMHIST_FUNC(__func__);
548: UVMHIST_CALLED(pmaphist);
549:
550: if (uvm.page_init_done) {
551: struct vm_page *pg;
552:
553: pg = uvm_pagealloc(NULL, 0, NULL,
554: UVM_PGA_USERESERVE | UVM_PGA_ZERO);
555: if (pg == NULL)
556: panic("%s: cannot allocate L3 table", __func__);
557: pa = VM_PAGE_TO_PHYS(pg);
558:
559: SLIST_INSERT_HEAD(&pm->pm_vmlist, pg, mdpage.mdpg_vmlist);
560: PMAP_COUNT(pdp_alloc);
561:
562: } else {
563: /* uvm_pageboot_alloc() returns AARCH64 KSEG address */
564: pa = AARCH64_KVA_TO_PA(
565: uvm_pageboot_alloc(Ln_TABLE_SIZE));
566: PMAP_COUNT(pdp_alloc_boot);
567: }
568: if (pap != NULL)
569: *pap = pa;
570:
571: UVMHIST_LOG(pmaphist, "pa=%llx, va=%llx",
572: pa, AARCH64_PA_TO_KVA(pa), 0, 0);
573:
1.9 christos 574: return (void *)AARCH64_PA_TO_KVA(pa);
1.1 matt 575: }
576:
1.2 ryo 577: static void
578: _pmap_free_pdp_all(struct pmap *pm)
1.1 matt 579: {
1.2 ryo 580: struct vm_page *pg, *tmp;
581:
582: SLIST_FOREACH_SAFE(pg, &pm->pm_vmlist, mdpage.mdpg_vmlist, tmp) {
583: uvm_pagefree(pg);
584: PMAP_COUNT(pdp_free);
585: }
1.1 matt 586: }
587:
588: vaddr_t
589: pmap_growkernel(vaddr_t maxkvaddr)
590: {
1.2 ryo 591: UVMHIST_FUNC(__func__);
592: UVMHIST_CALLED(pmaphist);
593:
594: UVMHIST_LOG(pmaphist, "maxkvaddr=%llx, pmap_maxkvaddr=%llx",
595: maxkvaddr, pmap_maxkvaddr, 0, 0);
596:
597: pmap_maxkvaddr = maxkvaddr;
598:
1.1 matt 599: return maxkvaddr;
600: }
601:
1.2 ryo 602: bool
603: pmap_extract_coherency(struct pmap *pm, vaddr_t va, paddr_t *pap,
604: bool *coherencyp)
605: {
606: if (coherencyp)
607: *coherencyp = false;
608:
609: return pmap_extract(pm, va, pap);
610: }
611:
612: bool
613: pmap_extract(struct pmap *pm, vaddr_t va, paddr_t *pap)
614: {
615: static pt_entry_t *ptep;
616: paddr_t pa;
1.27 ryo 617: vsize_t blocksize = 0;
1.28 ryo 618: extern char __kernel_text[];
619: extern char _end[];
1.2 ryo 620:
1.28 ryo 621: if (IN_RANGE(va, (vaddr_t)__kernel_text, (vaddr_t)_end)) {
622: /* fast loookup */
623: pa = KERN_VTOPHYS(va);
624: } else if (IN_KSEG_ADDR(va)) {
625: /* fast loookup. should be used only if actually mapped? */
626: pa = AARCH64_KVA_TO_PA(va);
627: } else {
628: ptep = _pmap_pte_lookup_bs(pm, va, &blocksize);
629: if (ptep == NULL)
630: return false;
631: pa = lxpde_pa(*ptep) + (va & (blocksize - 1));
632: }
1.2 ryo 633:
1.27 ryo 634: if (pap != NULL)
1.2 ryo 635: *pap = pa;
1.27 ryo 636: return true;
1.2 ryo 637: }
638:
639: paddr_t
640: vtophys(vaddr_t va)
641: {
642: struct pmap *pm;
643: paddr_t pa;
644:
1.28 ryo 645: if (va & TTBR_SEL_VA)
646: pm = pmap_kernel();
647: else
1.2 ryo 648: pm = curlwp->l_proc->p_vmspace->vm_map.pmap;
1.28 ryo 649:
650: if (pmap_extract(pm, va, &pa) == false)
1.2 ryo 651: return VTOPHYS_FAILED;
652: return pa;
653: }
654:
655: static pt_entry_t *
1.27 ryo 656: _pmap_pte_lookup_bs(struct pmap *pm, vaddr_t va, vsize_t *bs)
1.2 ryo 657: {
1.27 ryo 658: pt_entry_t *ptep;
659: pd_entry_t *l0, *l1, *l2, *l3;
660: pd_entry_t pde;
661: vsize_t blocksize;
662: unsigned int idx;
663:
664: if (((pm == pmap_kernel()) && ((va & TTBR_SEL_VA) == 0)) ||
665: ((pm != pmap_kernel()) && ((va & TTBR_SEL_VA) != 0))) {
666: blocksize = 0;
667: ptep = NULL;
668: goto done;
669: }
670:
671: /*
672: * traverse L0 -> L1 -> L2 -> L3
673: */
674: blocksize = L0_SIZE;
675: l0 = pm->pm_l0table;
676: idx = l0pde_index(va);
677: pde = l0[idx];
678: if (!l0pde_valid(pde)) {
679: ptep = NULL;
680: goto done;
681: }
682:
683: blocksize = L1_SIZE;
684: l1 = (pd_entry_t *)AARCH64_PA_TO_KVA(l0pde_pa(pde));
685: idx = l1pde_index(va);
686: pde = l1[idx];
687: if (!l1pde_valid(pde)) {
688: ptep = NULL;
689: goto done;
690: }
691: if (l1pde_is_block(pde)) {
692: ptep = &l1[idx];
693: goto done;
694: }
695:
696: blocksize = L2_SIZE;
697: l2 = (pd_entry_t *)AARCH64_PA_TO_KVA(l1pde_pa(pde));
698: idx = l2pde_index(va);
699: pde = l2[idx];
700: if (!l2pde_valid(pde)) {
701: ptep = NULL;
702: goto done;
703: }
704: if (l2pde_is_block(pde)) {
705: ptep = &l2[idx];
706: goto done;
707: }
1.2 ryo 708:
1.27 ryo 709: blocksize = L3_SIZE;
710: l3 = (pd_entry_t *)AARCH64_PA_TO_KVA(l2pde_pa(pde));
711: idx = l3pte_index(va);
712: pde = l3[idx];
713: if (!l3pte_valid(pde)) {
714: ptep = NULL;
715: goto done;
716: }
717: ptep = &l3[idx];
1.2 ryo 718:
1.27 ryo 719: done:
720: if (bs != NULL)
721: *bs = blocksize;
722: return ptep;
723: }
1.2 ryo 724:
1.27 ryo 725: static pt_entry_t *
726: _pmap_pte_lookup_l3(struct pmap *pm, vaddr_t va)
727: {
728: pt_entry_t *ptep;
729: vsize_t blocksize = 0;
730:
731: ptep = _pmap_pte_lookup_bs(pm, va, &blocksize);
732: if ((ptep != NULL) && (blocksize == L3_SIZE))
1.2 ryo 733: return ptep;
734:
735: return NULL;
736: }
737:
1.29 ! ryo 738: void
! 739: pmap_icache_sync_range(pmap_t pm, vaddr_t sva, vaddr_t eva)
! 740: {
! 741: pt_entry_t *ptep, pte;
! 742: vaddr_t va;
! 743: vsize_t blocksize = 0;
! 744:
! 745: pm_lock(pm);
! 746:
! 747: for (va = sva; va < eva; va += blocksize) {
! 748: ptep = _pmap_pte_lookup_bs(pm, va, &blocksize);
! 749: if (blocksize == 0)
! 750: break;
! 751: if (ptep != NULL) {
! 752: vaddr_t eob = (va + blocksize) & ~(blocksize - 1);
! 753: vsize_t len = ulmin(eva, eob - va);
! 754:
! 755: pte = *ptep;
! 756: if (l3pte_writable(pte)) {
! 757: cpu_icache_sync_range(va, len);
! 758: } else {
! 759: /*
! 760: * change to writable temporally
! 761: * to do cpu_icache_sync_range()
! 762: */
! 763: pt_entry_t opte = pte;
! 764: pte = pte & ~(LX_BLKPAG_AF|LX_BLKPAG_AP);
! 765: pte |= (LX_BLKPAG_AF|LX_BLKPAG_AP_RW);
! 766: atomic_swap_64(ptep, pte);
! 767: AARCH64_TLBI_BY_ASID_VA(pm->pm_asid, va, true);
! 768: cpu_icache_sync_range(va, len);
! 769: atomic_swap_64(ptep, opte);
! 770: AARCH64_TLBI_BY_ASID_VA(pm->pm_asid, va, true);
! 771: }
! 772: va &= ~(blocksize - 1);
! 773: }
! 774: }
! 775:
! 776: pm_unlock(pm);
! 777: }
! 778:
1.2 ryo 779: static pt_entry_t
1.18 ryo 780: _pmap_pte_adjust_prot(pt_entry_t pte, vm_prot_t prot, vm_prot_t protmask,
781: bool user)
1.1 matt 782: {
1.2 ryo 783: vm_prot_t masked;
1.18 ryo 784: pt_entry_t xn;
1.2 ryo 785:
786: masked = prot & protmask;
787: pte &= ~(LX_BLKPAG_OS_RWMASK|LX_BLKPAG_AF|LX_BLKPAG_AP);
788:
789: /* keep prot for ref/mod emulation */
790: switch (prot & (VM_PROT_READ|VM_PROT_WRITE)) {
791: case 0:
792: default:
793: break;
794: case VM_PROT_READ:
795: pte |= LX_BLKPAG_OS_READ;
796: break;
797: case VM_PROT_WRITE:
798: case VM_PROT_READ|VM_PROT_WRITE:
799: pte |= (LX_BLKPAG_OS_READ|LX_BLKPAG_OS_WRITE);
800: break;
801: }
802:
803: switch (masked & (VM_PROT_READ|VM_PROT_WRITE)) {
804: case 0:
805: default:
806: /* cannot access due to No LX_BLKPAG_AF */
807: pte |= LX_BLKPAG_AP_RO;
808: break;
809: case VM_PROT_READ:
810: /* actual permission of pte */
811: pte |= LX_BLKPAG_AF;
812: pte |= LX_BLKPAG_AP_RO;
813: break;
814: case VM_PROT_WRITE:
815: case VM_PROT_READ|VM_PROT_WRITE:
816: /* actual permission of pte */
817: pte |= LX_BLKPAG_AF;
818: pte |= LX_BLKPAG_AP_RW;
819: break;
820: }
821:
1.18 ryo 822: /* executable for kernel or user? first set never exec both */
823: pte |= (LX_BLKPAG_UXN|LX_BLKPAG_PXN);
824: /* and either to executable */
825: xn = user ? LX_BLKPAG_UXN : LX_BLKPAG_PXN;
826: if (prot & VM_PROT_EXECUTE)
827: pte &= ~xn;
1.2 ryo 828:
829: return pte;
830: }
831:
832: static pt_entry_t
833: _pmap_pte_adjust_cacheflags(pt_entry_t pte, u_int flags)
834: {
835:
836: pte &= ~LX_BLKPAG_ATTR_MASK;
837:
838: switch (flags & (PMAP_CACHE_MASK|PMAP_DEV)) {
839: case PMAP_DEV ... PMAP_DEV | PMAP_CACHE_MASK:
840: pte |= LX_BLKPAG_ATTR_DEVICE_MEM; /* nGnRnE */
841: break;
842: case PMAP_NOCACHE:
843: case PMAP_NOCACHE_OVR:
844: case PMAP_WRITE_COMBINE:
845: pte |= LX_BLKPAG_ATTR_NORMAL_NC; /* only no-cache */
846: break;
847: case PMAP_WRITE_BACK:
848: case 0:
849: default:
850: pte |= LX_BLKPAG_ATTR_NORMAL_WB;
851: break;
852: }
853:
854: return pte;
855: }
856:
1.14 ryo 857: static struct pv_entry *
1.2 ryo 858: _pmap_remove_pv(struct vm_page *pg, struct pmap *pm, vaddr_t va, pt_entry_t pte)
859: {
860: struct vm_page_md *md;
861: struct pv_entry *pv;
862:
863: UVMHIST_FUNC(__func__);
864: UVMHIST_CALLED(pmaphist);
865:
866: UVMHIST_LOG(pmaphist, "pg=%p, pm=%p, va=%llx, pte=%llx",
867: pg, pm, va, pte);
868:
869: md = VM_PAGE_TO_MD(pg);
870:
871: pmap_pv_lock(md);
872:
873: TAILQ_FOREACH(pv, &md->mdpg_pvhead, pv_link) {
874: if ((pm == pv->pv_pmap) && (va == pv->pv_va)) {
1.5 ryo 875: TAILQ_REMOVE(&md->mdpg_pvhead, pv, pv_link);
876: PMAP_COUNT(pv_remove);
1.2 ryo 877: break;
878: }
879: }
1.5 ryo 880: #ifdef PMAPCOUNTERS
881: if (pv == NULL) {
882: PMAP_COUNT(pv_remove_nopv);
883: }
884: #endif
1.2 ryo 885:
886: pmap_pv_unlock(md);
887:
1.14 ryo 888: return pv;
1.2 ryo 889: }
890:
891: #if defined(PMAP_PV_DEBUG) || defined(DDB)
892:
893: static char *
894: str_vmflags(uint32_t flags)
895: {
896: static int idx = 0;
897: static char buf[4][32]; /* XXX */
898: char *p;
899:
900: p = buf[idx];
901: idx = (idx + 1) & 3;
902:
903: p[0] = (flags & VM_PROT_READ) ? 'R' : '-';
904: p[1] = (flags & VM_PROT_WRITE) ? 'W' : '-';
905: p[2] = (flags & VM_PROT_EXECUTE) ? 'X' : '-';
906: if (flags & PMAP_WIRED)
907: memcpy(&p[3], ",WIRED\0", 7);
908: else
909: p[3] = '\0';
910:
911: return p;
912: }
913:
914: static void
915: pg_dump(struct vm_page *pg, void (*pr)(const char *, ...))
916: {
917: pr("pg=%p\n", pg);
918: pr(" pg->uanon = %p\n", pg->uanon);
919: pr(" pg->uobject = %p\n", pg->uobject);
920: pr(" pg->offset = %zu\n", pg->offset);
921: pr(" pg->flags = %u\n", pg->flags);
922: pr(" pg->loan_count = %u\n", pg->loan_count);
923: pr(" pg->wire_count = %u\n", pg->wire_count);
924: pr(" pg->pqflags = %u\n", pg->pqflags);
925: pr(" pg->phys_addr = %016lx\n", pg->phys_addr);
926: }
927:
928: static void
929: pv_dump(struct vm_page_md *md, void (*pr)(const char *, ...))
930: {
931: struct pv_entry *pv;
932: int i;
933:
934: i = 0;
935:
936: pr("md=%p\n", md);
937: pr(" md->mdpg_flags=%08x %s\n", md->mdpg_flags,
938: str_vmflags(md->mdpg_flags));
939:
940: TAILQ_FOREACH(pv, &md->mdpg_pvhead, pv_link) {
941: pr(" pv[%d] pv=%p\n",
942: i, pv);
943: pr(" pv[%d].pv_pmap =%p (asid=%d)\n",
944: i, pv->pv_pmap, pv->pv_pmap->pm_asid);
945: pr(" pv[%d].pv_va =%016lx (color=%d)\n",
946: i, pv->pv_va, _pmap_color(pv->pv_va));
947: pr(" pv[%d].pv_pa =%016lx (color=%d)\n",
948: i, pv->pv_pa, _pmap_color(pv->pv_pa));
949: i++;
950: }
951: }
952: #endif /* PMAP_PV_DEBUG & DDB */
953:
954: static int
1.12 ryo 955: _pmap_enter_pv(struct vm_page *pg, struct pmap *pm, struct pv_entry **pvp, vaddr_t va,
1.2 ryo 956: pt_entry_t *ptep, paddr_t pa, u_int flags)
957: {
958: struct vm_page_md *md;
959: struct pv_entry *pv;
960:
961: UVMHIST_FUNC(__func__);
962: UVMHIST_CALLED(pmaphist);
963:
964: UVMHIST_LOG(pmaphist, "pg=%p, pm=%p, va=%llx, pa=%llx", pg, pm, va, pa);
965: UVMHIST_LOG(pmaphist, "ptep=%p, flags=%08x", ptep, flags, 0, 0);
966:
967: md = VM_PAGE_TO_MD(pg);
968:
969: pmap_pv_lock(md);
970:
971: /* pv is already registered? */
972: TAILQ_FOREACH(pv, &md->mdpg_pvhead, pv_link) {
973: if ((pm == pv->pv_pmap) && (va == pv->pv_va)) {
974: break;
975: }
976: }
977:
1.3 ryo 978: if (pv == NULL) {
1.2 ryo 979: pmap_pv_unlock(md);
980:
1.12 ryo 981: /*
982: * create and link new pv.
983: * pv is already allocated at beginning of _pmap_enter().
984: */
985: pv = *pvp;
1.2 ryo 986: if (pv == NULL)
987: return ENOMEM;
1.12 ryo 988: *pvp = NULL;
1.2 ryo 989:
990: pv->pv_pmap = pm;
991: pv->pv_va = va;
992: pv->pv_pa = pa;
993: pv->pv_ptep = ptep;
994:
995: pmap_pv_lock(md);
996: TAILQ_INSERT_HEAD(&md->mdpg_pvhead, pv, pv_link);
997: PMAP_COUNT(pv_enter);
998:
999: #ifdef PMAP_PV_DEBUG
1000: if (!TAILQ_EMPTY(&md->mdpg_pvhead)){
1001: printf("pv %p alias added va=%016lx -> pa=%016lx\n",
1002: pv, va, pa);
1003: pv_dump(md, printf);
1004: }
1005: #endif
1006: }
1007: pmap_pv_unlock(md);
1.1 matt 1008: return 0;
1009: }
1010:
1011: void
1.2 ryo 1012: pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
1.1 matt 1013: {
1.2 ryo 1014: int s;
1015:
1016: s = splvm();
1017: _pmap_enter(pmap_kernel(), va, pa, prot, flags | PMAP_WIRED, true);
1018: splx(s);
1.1 matt 1019: }
1020:
1021: void
1.2 ryo 1022: pmap_kremove(vaddr_t va, vsize_t size)
1.1 matt 1023: {
1.2 ryo 1024: struct pmap *kpm = pmap_kernel();
1025: vaddr_t eva;
1.3 ryo 1026: int s;
1.2 ryo 1027:
1028: UVMHIST_FUNC(__func__);
1029: UVMHIST_CALLED(pmaphist);
1030:
1031: UVMHIST_LOG(pmaphist, "va=%llx, size=%llx", va, size, 0, 0);
1032:
1033: KDASSERT((va & PGOFSET) == 0);
1034: KDASSERT((size & PGOFSET) == 0);
1035:
1.28 ryo 1036: KDASSERT(!IN_KSEG_ADDR(va));
1.2 ryo 1037:
1038: eva = va + size;
1.28 ryo 1039: KDASSERT(IN_RANGE(va, VM_MIN_KERNEL_ADDRESS, VM_MAX_KERNEL_ADDRESS));
1.2 ryo 1040:
1.3 ryo 1041: s = splvm();
1.2 ryo 1042: for (; va < eva; va += PAGE_SIZE) {
1043: _pmap_remove(kpm, va, true);
1044: }
1.3 ryo 1045: splx(s);
1.2 ryo 1046: }
1047:
1048: static void
1049: _pmap_protect_pv(struct vm_page *pg, struct pv_entry *pv, vm_prot_t prot)
1050: {
1051: pt_entry_t *ptep, pte;
1052: vm_prot_t pteprot;
1053: uint32_t mdattr;
1.18 ryo 1054: const bool user = (pv->pv_pmap != pmap_kernel());
1.2 ryo 1055:
1056: UVMHIST_FUNC(__func__);
1057: UVMHIST_CALLED(pmaphist);
1058:
1059: UVMHIST_LOG(pmaphist, "pg=%p, pv=%p, prot=%08x", pg, pv, prot, 0);
1060:
1061: /* get prot mask from referenced/modified */
1062: mdattr = VM_PAGE_TO_MD(pg)->mdpg_flags &
1063: (VM_PROT_READ | VM_PROT_WRITE);
1064:
1.11 ryo 1065: pm_lock(pv->pv_pmap);
1.2 ryo 1066:
1067: ptep = pv->pv_ptep;
1068: pte = *ptep;
1069:
1070: /* get prot mask from pte */
1071: pteprot = 0;
1.3 ryo 1072: if (pte & LX_BLKPAG_AF)
1.2 ryo 1073: pteprot |= VM_PROT_READ;
1.3 ryo 1074: if ((pte & LX_BLKPAG_AP) == LX_BLKPAG_AP_RW)
1.2 ryo 1075: pteprot |= VM_PROT_WRITE;
1.18 ryo 1076: if (l3pte_executable(pte, user))
1.2 ryo 1077: pteprot |= VM_PROT_EXECUTE;
1078:
1079: /* new prot = prot & pteprot & mdattr */
1.18 ryo 1080: pte = _pmap_pte_adjust_prot(pte, prot & pteprot, mdattr, user);
1.2 ryo 1081: atomic_swap_64(ptep, pte);
1.19 ryo 1082: AARCH64_TLBI_BY_ASID_VA(pv->pv_pmap->pm_asid, pv->pv_va, true);
1.2 ryo 1083:
1.11 ryo 1084: pm_unlock(pv->pv_pmap);
1.1 matt 1085: }
1086:
1087: void
1088: pmap_protect(struct pmap *pm, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
1089: {
1.2 ryo 1090: vaddr_t va;
1.18 ryo 1091: const bool user = (pm != pmap_kernel());
1.2 ryo 1092:
1093: KASSERT((prot & VM_PROT_READ) || !(prot & VM_PROT_WRITE));
1094:
1095: UVMHIST_FUNC(__func__);
1096: UVMHIST_CALLED(pmaphist);
1097:
1098: UVMHIST_LOG(pmaphist, "pm=%p, sva=%016lx, eva=%016lx, prot=%08x",
1099: pm, sva, eva, prot);
1100:
1.28 ryo 1101: KASSERT_PM_ADDR(pm, sva);
1.2 ryo 1102: KASSERT(!IN_KSEG_ADDR(sva));
1103:
1104: if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
1105: PMAP_COUNT(protect_remove_fallback);
1106: pmap_remove(pm, sva, eva);
1107: return;
1108: }
1109: PMAP_COUNT(protect);
1110:
1111: KDASSERT((sva & PAGE_MASK) == 0);
1112: KDASSERT((eva & PAGE_MASK) == 0);
1113:
1.11 ryo 1114: pm_lock(pm);
1.2 ryo 1115:
1116: for (va = sva; va < eva; va += PAGE_SIZE) {
1117: pt_entry_t *ptep, pte;
1.13 ryo 1118: #ifdef UVMHIST
1119: pt_entry_t opte;
1120: #endif
1.2 ryo 1121: struct vm_page *pg;
1122: paddr_t pa;
1123: uint32_t mdattr;
1124: bool executable;
1125:
1.27 ryo 1126: ptep = _pmap_pte_lookup_l3(pm, va);
1.2 ryo 1127: if (ptep == NULL) {
1128: PMAP_COUNT(protect_none);
1129: continue;
1130: }
1131:
1132: pte = *ptep;
1133:
1134: if (!l3pte_valid(pte)) {
1135: PMAP_COUNT(protect_none);
1136: continue;
1137: }
1138:
1139: pa = l3pte_pa(pte);
1140: pg = PHYS_TO_VM_PAGE(pa);
1141:
1142: if (pg != NULL) {
1.3 ryo 1143: /* get prot mask from referenced/modified */
1.2 ryo 1144: mdattr = VM_PAGE_TO_MD(pg)->mdpg_flags &
1145: (VM_PROT_READ | VM_PROT_WRITE);
1146: PMAP_COUNT(protect_managed);
1147: } else {
1148: /* unmanaged page */
1149: mdattr = VM_PROT_ALL;
1150: PMAP_COUNT(protect_unmanaged);
1151: }
1152:
1153: pte = *ptep;
1.13 ryo 1154: #ifdef UVMHIST
1155: opte = pte;
1156: #endif
1.18 ryo 1157: executable = l3pte_executable(pte, user);
1158: pte = _pmap_pte_adjust_prot(pte, prot, mdattr, user);
1.13 ryo 1159:
1160: if (!executable && (prot & VM_PROT_EXECUTE)) {
1161: /* non-exec -> exec */
1.15 ryo 1162: UVMHIST_LOG(pmaphist, "icache_sync: "
1163: "pm=%p, va=%016lx, pte: %016lx -> %016lx",
1.13 ryo 1164: pm, va, opte, pte);
1.15 ryo 1165: if (!l3pte_writable(pte)) {
1.19 ryo 1166: PTE_ICACHE_SYNC_PAGE(pte, ptep, pm, va, true);
1.15 ryo 1167: atomic_swap_64(ptep, pte);
1.19 ryo 1168: AARCH64_TLBI_BY_ASID_VA(pm->pm_asid, va, true);
1.15 ryo 1169: } else {
1170: atomic_swap_64(ptep, pte);
1.19 ryo 1171: AARCH64_TLBI_BY_ASID_VA(pm->pm_asid, va, true);
1.15 ryo 1172: cpu_icache_sync_range(va, PAGE_SIZE);
1173: }
1174: } else {
1175: atomic_swap_64(ptep, pte);
1.19 ryo 1176: AARCH64_TLBI_BY_ASID_VA(pm->pm_asid, va, true);
1.15 ryo 1177: }
1.2 ryo 1178: }
1179:
1.11 ryo 1180: pm_unlock(pm);
1.1 matt 1181: }
1182:
1183: void
1.2 ryo 1184: pmap_activate(struct lwp *l)
1185: {
1186: struct pmap *pm = l->l_proc->p_vmspace->vm_map.pmap;
1187: uint64_t ttbr0;
1188:
1189: UVMHIST_FUNC(__func__);
1190: UVMHIST_CALLED(pmaphist);
1191:
1192: if (pm == pmap_kernel())
1193: return;
1194: if (l != curlwp)
1195: return;
1196:
1197: KASSERT(pm->pm_l0table != NULL);
1198:
1199: UVMHIST_LOG(pmaphist, "lwp=%p (pid=%d)", l, l->l_proc->p_pid, 0, 0);
1200:
1201: /* XXX */
1202: CTASSERT(PID_MAX <= 65535); /* 16bit ASID */
1203: if (pm->pm_asid == -1)
1204: pm->pm_asid = l->l_proc->p_pid;
1205:
1206: ttbr0 = ((uint64_t)pm->pm_asid << 48) | pm->pm_l0table_pa;
1207: aarch64_set_ttbr0(ttbr0);
1208:
1209: pm->pm_activated = true;
1210:
1211: PMAP_COUNT(activate);
1212: }
1213:
1214: void
1215: pmap_deactivate(struct lwp *l)
1.1 matt 1216: {
1.2 ryo 1217: struct pmap *pm = l->l_proc->p_vmspace->vm_map.pmap;
1218:
1219: UVMHIST_FUNC(__func__);
1220: UVMHIST_CALLED(pmaphist);
1221:
1222: if (pm == pmap_kernel())
1223: return;
1224:
1225: UVMHIST_LOG(pmaphist, "lwp=%p, asid=%d", l, pm->pm_asid, 0, 0);
1226:
1227: /* XXX */
1228: pm->pm_activated = false;
1229:
1230: PMAP_COUNT(deactivate);
1.1 matt 1231: }
1232:
1.2 ryo 1233: struct pmap *
1234: pmap_create(void)
1.1 matt 1235: {
1.2 ryo 1236: struct pmap *pm;
1237:
1238: UVMHIST_FUNC(__func__);
1239: UVMHIST_CALLED(pmaphist);
1240:
1241: pm = pool_cache_get(&_pmap_cache, PR_WAITOK);
1242: memset(pm, 0, sizeof(*pm));
1243: pm->pm_refcnt = 1;
1244: pm->pm_asid = -1;
1.6 ryo 1245: SLIST_INIT(&pm->pm_vmlist);
1246: mutex_init(&pm->pm_lock, MUTEX_DEFAULT, IPL_VM);
1.22 maxv 1247: pm->pm_l0table = pmap_alloc_pdp(pm, &pm->pm_l0table_pa);
1.2 ryo 1248: KASSERT(((vaddr_t)pm->pm_l0table & (PAGE_SIZE - 1)) == 0);
1249:
1250: UVMHIST_LOG(pmaphist, "pm=%p, pm_l0table=%016lx, pm_l0table_pa=%016lx",
1251: pm, pm->pm_l0table, pm->pm_l0table_pa, 0);
1252:
1253: PMAP_COUNT(create);
1254: return pm;
1.1 matt 1255: }
1256:
1257: void
1.2 ryo 1258: pmap_destroy(struct pmap *pm)
1259: {
1260: unsigned int refcnt;
1261:
1262: UVMHIST_FUNC(__func__);
1263: UVMHIST_CALLED(pmaphist);
1264:
1265: UVMHIST_LOG(pmaphist,
1266: "pm=%p, pm_l0table=%016lx, pm_l0table_pa=%016lx, refcnt=%d",
1267: pm, pm->pm_l0table, pm->pm_l0table_pa, pm->pm_refcnt);
1268:
1269: if (pm == NULL)
1270: return;
1271:
1272: if (pm == pmap_kernel())
1273: panic("cannot destroy kernel pmap");
1274:
1275: refcnt = atomic_dec_uint_nv(&pm->pm_refcnt);
1276: if (refcnt > 0)
1277: return;
1278:
1279: aarch64_tlbi_by_asid(pm->pm_asid);
1280:
1281: _pmap_free_pdp_all(pm);
1282: mutex_destroy(&pm->pm_lock);
1283: pool_cache_put(&_pmap_cache, pm);
1284:
1285: PMAP_COUNT(destroy);
1286: }
1287:
1288: static int
1289: _pmap_enter(struct pmap *pm, vaddr_t va, paddr_t pa, vm_prot_t prot,
1290: u_int flags, bool kenter)
1291: {
1292: struct vm_page *pg;
1.14 ryo 1293: struct pv_entry *spv, *opv = NULL;
1.2 ryo 1294: pd_entry_t pde;
1295: pt_entry_t attr, pte, *ptep;
1.13 ryo 1296: #ifdef UVMHIST
1297: pt_entry_t opte;
1298: #endif
1.2 ryo 1299: pd_entry_t *l0, *l1, *l2, *l3;
1300: paddr_t pdppa;
1301: uint32_t mdattr;
1302: unsigned int idx;
1303: int error = 0;
1304: const bool user = (pm != pmap_kernel());
1305: bool executable;
1.19 ryo 1306: bool l3only = true;
1.2 ryo 1307:
1308: UVMHIST_FUNC(__func__);
1309: UVMHIST_CALLED(pmaphist);
1310:
1311: UVMHIST_LOG(pmaphist, "pm=%p, kentermode=%d", pm, kenter, 0, 0);
1312: UVMHIST_LOG(pmaphist, "va=%016lx, pa=%016lx, prot=%08x, flags=%08x",
1313: va, pa, prot, flags);
1314:
1.28 ryo 1315: KASSERT_PM_ADDR(pm, va);
1316: KASSERT(!IN_KSEG_ADDR(va));
1.2 ryo 1317:
1318: #ifdef PMAPCOUNTERS
1319: PMAP_COUNT(mappings);
1320: if (_pmap_color(va) == _pmap_color(pa)) {
1321: if (user) {
1322: PMAP_COUNT(user_mappings);
1323: } else {
1324: PMAP_COUNT(kern_mappings);
1325: }
1326: } else if (flags & PMAP_WIRED) {
1327: if (user) {
1328: PMAP_COUNT(user_mappings_bad_wired);
1329: } else {
1330: PMAP_COUNT(kern_mappings_bad_wired);
1331: }
1332: } else {
1333: if (user) {
1334: PMAP_COUNT(user_mappings_bad);
1335: } else {
1336: PMAP_COUNT(kern_mappings_bad);
1337: }
1338: }
1339: #endif
1340:
1.3 ryo 1341: if (kenter)
1342: pg = NULL;
1343: else
1344: pg = PHYS_TO_VM_PAGE(pa);
1345:
1.12 ryo 1346: if (pg != NULL) {
1347: PMAP_COUNT(managed_mappings);
1348: /*
1349: * allocate pv in advance of pm_lock() to avoid locking myself.
1350: * pool_cache_get() may call pmap_kenter() internally.
1351: */
1352: spv = pool_cache_get(&_pmap_pv_pool, PR_NOWAIT);
1353: } else {
1.2 ryo 1354: PMAP_COUNT(unmanaged_mappings);
1.12 ryo 1355: spv = NULL;
1.2 ryo 1356: }
1357:
1.12 ryo 1358: pm_lock(pm);
1.2 ryo 1359:
1360: /*
1361: * traverse L0 -> L1 -> L2 -> L3 table with growing pdp if needed.
1362: */
1363: l0 = pm->pm_l0table;
1364:
1365: idx = l0pde_index(va);
1366: pde = l0[idx];
1367: if (!l0pde_valid(pde)) {
1.22 maxv 1368: pmap_alloc_pdp(pm, &pdppa);
1.2 ryo 1369: KASSERT(pdppa != POOL_PADDR_INVALID);
1370: atomic_swap_64(&l0[idx], pdppa | L0_TABLE);
1.19 ryo 1371: l3only = false;
1.2 ryo 1372: } else {
1373: pdppa = l0pde_pa(pde);
1374: }
1.9 christos 1375: l1 = (void *)AARCH64_PA_TO_KVA(pdppa);
1.2 ryo 1376:
1377: idx = l1pde_index(va);
1378: pde = l1[idx];
1379: if (!l1pde_valid(pde)) {
1.22 maxv 1380: pmap_alloc_pdp(pm, &pdppa);
1.2 ryo 1381: KASSERT(pdppa != POOL_PADDR_INVALID);
1382: atomic_swap_64(&l1[idx], pdppa | L1_TABLE);
1.19 ryo 1383: l3only = false;
1.2 ryo 1384: } else {
1385: pdppa = l1pde_pa(pde);
1386: }
1.9 christos 1387: l2 = (void *)AARCH64_PA_TO_KVA(pdppa);
1.2 ryo 1388:
1389: idx = l2pde_index(va);
1390: pde = l2[idx];
1391: if (!l2pde_valid(pde)) {
1.22 maxv 1392: pmap_alloc_pdp(pm, &pdppa);
1.2 ryo 1393: KASSERT(pdppa != POOL_PADDR_INVALID);
1394: atomic_swap_64(&l2[idx], pdppa | L2_TABLE);
1.19 ryo 1395: l3only = false;
1.2 ryo 1396: } else {
1397: pdppa = l2pde_pa(pde);
1398: }
1.9 christos 1399: l3 = (void *)AARCH64_PA_TO_KVA(pdppa);
1.2 ryo 1400:
1401: idx = l3pte_index(va);
1402: ptep = &l3[idx]; /* as PTE */
1403:
1404: pte = *ptep;
1.13 ryo 1405: #ifdef UVMHIST
1406: opte = pte;
1407: #endif
1.18 ryo 1408: executable = l3pte_executable(pte, user);
1.2 ryo 1409:
1410: if (l3pte_valid(pte)) {
1411: KASSERT(!kenter); /* pmap_kenter_pa() cannot override */
1412:
1413: PMAP_COUNT(remappings);
1414:
1415: /* pte is Already mapped */
1416: if (l3pte_pa(pte) != pa) {
1417: struct vm_page *opg;
1418:
1419: #ifdef PMAPCOUNTERS
1420: if (user) {
1421: PMAP_COUNT(user_mappings_changed);
1422: } else {
1423: PMAP_COUNT(kern_mappings_changed);
1424: }
1425: #endif
1426:
1427: UVMHIST_LOG(pmaphist,
1428: "va=%016lx has already mapped."
1429: " old-pa=%016lx new-pa=%016lx, pte=%016llx\n",
1430: va, l3pte_pa(pte), pa, pte);
1431:
1432: opg = PHYS_TO_VM_PAGE(l3pte_pa(pte));
1.3 ryo 1433: if (opg != NULL)
1.14 ryo 1434: opv = _pmap_remove_pv(opg, pm, va, pte);
1.3 ryo 1435: }
1.2 ryo 1436:
1.3 ryo 1437: if (pte & LX_BLKPAG_OS_WIRED)
1438: pm->pm_stats.wired_count--;
1439: pm->pm_stats.resident_count--;
1.2 ryo 1440: }
1441:
1.20 ryo 1442: /*
1443: * read permission is treated as an access permission internally.
1444: * require to add PROT_READ even if only PROT_WRITE or PROT_EXEC
1445: * for wired mapping.
1446: */
1447: if ((flags & PMAP_WIRED) && (prot & (VM_PROT_WRITE|VM_PROT_EXECUTE)))
1448: prot |= VM_PROT_READ;
1449:
1.2 ryo 1450: mdattr = VM_PROT_READ | VM_PROT_WRITE;
1.3 ryo 1451: if (pg != NULL) {
1.12 ryo 1452: error = _pmap_enter_pv(pg, pm, &spv, va, ptep, pa, flags);
1.11 ryo 1453:
1.3 ryo 1454: if (error != 0) {
1.11 ryo 1455: /*
1456: * If pmap_enter() fails,
1457: * it must not leave behind an existing pmap entry.
1458: */
1459: if (!kenter && ((pte & LX_BLKPAG_OS_WIRED) == 0))
1460: atomic_swap_64(ptep, 0);
1461:
1.3 ryo 1462: PMAP_COUNT(pv_entry_cannotalloc);
1463: if (flags & PMAP_CANFAIL)
1464: goto done;
1.11 ryo 1465: panic("pmap_enter: failed to allocate pv_entry");
1.3 ryo 1466: }
1.2 ryo 1467:
1.7 ryo 1468: /* update referenced/modified flags */
1469: VM_PAGE_TO_MD(pg)->mdpg_flags |=
1470: (flags & (VM_PROT_READ | VM_PROT_WRITE));
1471: mdattr &= VM_PAGE_TO_MD(pg)->mdpg_flags;
1.2 ryo 1472: }
1473:
1474: #ifdef PMAPCOUNTERS
1475: switch (flags & PMAP_CACHE_MASK) {
1476: case PMAP_NOCACHE:
1477: case PMAP_NOCACHE_OVR:
1478: PMAP_COUNT(uncached_mappings);
1479: break;
1480: }
1481: #endif
1482:
1.18 ryo 1483: attr = _pmap_pte_adjust_prot(L3_PAGE, prot, mdattr, user);
1.2 ryo 1484: attr = _pmap_pte_adjust_cacheflags(attr, flags);
1485: if (VM_MAXUSER_ADDRESS > va)
1486: attr |= LX_BLKPAG_APUSER;
1.3 ryo 1487: if (flags & PMAP_WIRED)
1488: attr |= LX_BLKPAG_OS_WIRED;
1.2 ryo 1489: #ifdef MULTIPROCESSOR
1490: attr |= LX_BLKPAG_SH_IS;
1491: #endif
1492:
1493: pte = pa | attr;
1.13 ryo 1494:
1495: if (!executable && (prot & VM_PROT_EXECUTE)) {
1.15 ryo 1496: /* non-exec -> exec */
1497: UVMHIST_LOG(pmaphist,
1498: "icache_sync: pm=%p, va=%016lx, pte: %016lx -> %016lx",
1.13 ryo 1499: pm, va, opte, pte);
1.15 ryo 1500: if (!l3pte_writable(pte)) {
1.19 ryo 1501: PTE_ICACHE_SYNC_PAGE(pte, ptep, pm, va, l3only);
1.15 ryo 1502: atomic_swap_64(ptep, pte);
1.19 ryo 1503: AARCH64_TLBI_BY_ASID_VA(pm->pm_asid, va ,true);
1.15 ryo 1504: } else {
1505: atomic_swap_64(ptep, pte);
1.19 ryo 1506: AARCH64_TLBI_BY_ASID_VA(pm->pm_asid, va, l3only);
1.15 ryo 1507: cpu_icache_sync_range(va, PAGE_SIZE);
1508: }
1509: } else {
1510: atomic_swap_64(ptep, pte);
1.19 ryo 1511: AARCH64_TLBI_BY_ASID_VA(pm->pm_asid, va, l3only);
1.15 ryo 1512: }
1.2 ryo 1513:
1.3 ryo 1514: if (pte & LX_BLKPAG_OS_WIRED)
1515: pm->pm_stats.wired_count++;
1.2 ryo 1516: pm->pm_stats.resident_count++;
1517:
1518: done:
1.12 ryo 1519: pm_unlock(pm);
1520:
1521: /* spare pv was not used. discard */
1522: if (spv != NULL)
1523: pool_cache_put(&_pmap_pv_pool, spv);
1524:
1.14 ryo 1525: if (opv != NULL)
1526: pool_cache_put(&_pmap_pv_pool, opv);
1527:
1.2 ryo 1528: return error;
1529: }
1530:
1531: int
1532: pmap_enter(struct pmap *pm, vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
1.1 matt 1533: {
1.2 ryo 1534: KASSERT((prot & VM_PROT_READ) || !(prot & VM_PROT_WRITE));
1535:
1536: return _pmap_enter(pm, va, pa, prot, flags, false);
1.1 matt 1537: }
1538:
1539: void
1.2 ryo 1540: pmap_remove_all(struct pmap *pm)
1.1 matt 1541: {
1.2 ryo 1542: /* nothing to do */
1.1 matt 1543: }
1544:
1.2 ryo 1545: static void
1546: _pmap_remove(struct pmap *pm, vaddr_t va, bool kremove)
1.1 matt 1547: {
1.2 ryo 1548: pt_entry_t pte, *ptep;
1549: struct vm_page *pg;
1.14 ryo 1550: struct pv_entry *opv = NULL;
1.2 ryo 1551: paddr_t pa;
1552:
1553:
1554: UVMHIST_FUNC(__func__);
1555: UVMHIST_CALLED(pmaphist);
1556:
1557: UVMHIST_LOG(pmaphist, "pm=%p, va=%016lx, kremovemode=%d",
1558: pm, va, kremove, 0);
1559:
1.11 ryo 1560: pm_lock(pm);
1.8 ryo 1561:
1.27 ryo 1562: ptep = _pmap_pte_lookup_l3(pm, va);
1.2 ryo 1563: if (ptep != NULL) {
1564: pte = *ptep;
1565: if (!l3pte_valid(pte))
1.8 ryo 1566: goto done;
1.2 ryo 1567:
1568: pa = l3pte_pa(pte);
1569:
1.3 ryo 1570: if (kremove)
1571: pg = NULL;
1572: else
1573: pg = PHYS_TO_VM_PAGE(pa);
1.2 ryo 1574:
1.3 ryo 1575: if (pg != NULL)
1.14 ryo 1576: opv = _pmap_remove_pv(pg, pm, va, pte);
1.2 ryo 1577:
1578: atomic_swap_64(ptep, 0);
1.19 ryo 1579: AARCH64_TLBI_BY_ASID_VA(pm->pm_asid, va, true);
1.3 ryo 1580:
1581: if ((pte & LX_BLKPAG_OS_WIRED) != 0)
1582: pm->pm_stats.wired_count--;
1583: pm->pm_stats.resident_count--;
1.2 ryo 1584: }
1.8 ryo 1585: done:
1.11 ryo 1586: pm_unlock(pm);
1.14 ryo 1587:
1588: if (opv != NULL)
1589: pool_cache_put(&_pmap_pv_pool, opv);
1.1 matt 1590: }
1591:
1592: void
1.2 ryo 1593: pmap_remove(struct pmap *pm, vaddr_t sva, vaddr_t eva)
1.1 matt 1594: {
1.2 ryo 1595: vaddr_t va;
1596:
1.28 ryo 1597: KASSERT_PM_ADDR(pm, sva);
1.2 ryo 1598: KASSERT(!IN_KSEG_ADDR(sva));
1599:
1600: for (va = sva; va < eva; va += PAGE_SIZE)
1601: _pmap_remove(pm, va, false);
1.1 matt 1602: }
1603:
1604: void
1.2 ryo 1605: pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
1.1 matt 1606: {
1.2 ryo 1607: struct vm_page_md *md = VM_PAGE_TO_MD(pg);
1608: struct pv_entry *pv, *pvtmp;
1.3 ryo 1609: pt_entry_t opte;
1.2 ryo 1610:
1611: KASSERT((prot & VM_PROT_READ) || !(prot & VM_PROT_WRITE));
1612:
1613: UVMHIST_FUNC(__func__);
1614: UVMHIST_CALLED(pmaphist);
1615:
1616: UVMHIST_LOG(pmaphist, "pg=%p, md=%p, pa=%016lx, prot=%08x",
1617: pg, md, VM_PAGE_TO_PHYS(pg), prot);
1618:
1619:
1620: if ((prot & (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE)) ==
1621: VM_PROT_NONE) {
1622:
1623: /* remove all pages reference to this physical page */
1624: pmap_pv_lock(md);
1625: TAILQ_FOREACH_SAFE(pv, &md->mdpg_pvhead, pv_link, pvtmp) {
1626:
1.3 ryo 1627: opte = atomic_swap_64(pv->pv_ptep, 0);
1.19 ryo 1628: AARCH64_TLBI_BY_ASID_VA(pv->pv_pmap->pm_asid,
1629: pv->pv_va, true);
1630:
1.3 ryo 1631: if ((opte & LX_BLKPAG_OS_WIRED) != 0)
1632: pv->pv_pmap->pm_stats.wired_count--;
1.2 ryo 1633: pv->pv_pmap->pm_stats.resident_count--;
1634:
1635: TAILQ_REMOVE(&md->mdpg_pvhead, pv, pv_link);
1636: PMAP_COUNT(pv_remove);
1637: pool_cache_put(&_pmap_pv_pool, pv);
1638: }
1639: pmap_pv_unlock(md);
1640:
1641: } else {
1642: pmap_pv_lock(md);
1643: TAILQ_FOREACH(pv, &md->mdpg_pvhead, pv_link) {
1.3 ryo 1644: _pmap_protect_pv(pg, pv, prot);
1.2 ryo 1645: }
1646: pmap_pv_unlock(md);
1647: }
1.1 matt 1648: }
1649:
1650: void
1.2 ryo 1651: pmap_unwire(struct pmap *pm, vaddr_t va)
1.1 matt 1652: {
1.2 ryo 1653: pt_entry_t pte, *ptep;
1654:
1655: UVMHIST_FUNC(__func__);
1656: UVMHIST_CALLED(pmaphist);
1657:
1658: UVMHIST_LOG(pmaphist, "pm=%p, va=%016lx", pm, va, 0, 0);
1659:
1660: PMAP_COUNT(unwire);
1661:
1.28 ryo 1662: KASSERT_PM_ADDR(pm, va);
1663: KASSERT(!IN_KSEG_ADDR(va));
1.2 ryo 1664:
1.11 ryo 1665: pm_lock(pm);
1.27 ryo 1666: ptep = _pmap_pte_lookup_l3(pm, va);
1.2 ryo 1667: if (ptep != NULL) {
1668: pte = *ptep;
1.3 ryo 1669: if (!l3pte_valid(pte) ||
1670: ((pte & LX_BLKPAG_OS_WIRED) == 0)) {
1671: /* invalid pte, or pte is not wired */
1672: PMAP_COUNT(unwire_failure);
1.11 ryo 1673: pm_unlock(pm);
1.2 ryo 1674: return;
1675: }
1676:
1.3 ryo 1677: pte &= ~LX_BLKPAG_OS_WIRED;
1678: atomic_swap_64(ptep, pte);
1.2 ryo 1679:
1.3 ryo 1680: pm->pm_stats.wired_count--;
1.2 ryo 1681: }
1.11 ryo 1682: pm_unlock(pm);
1.1 matt 1683: }
1684:
1.2 ryo 1685: bool
1686: pmap_fault_fixup(struct pmap *pm, vaddr_t va, vm_prot_t accessprot, bool user)
1.1 matt 1687: {
1.2 ryo 1688: struct vm_page *pg;
1689: struct vm_page_md *md;
1690: pt_entry_t *ptep, pte;
1691: vm_prot_t pmap_prot;
1692: paddr_t pa;
1.8 ryo 1693: bool fixed = false;
1.2 ryo 1694:
1695: UVMHIST_FUNC(__func__);
1696: UVMHIST_CALLED(pmaphist);
1697:
1698: UVMHIST_LOG(pmaphist, "pm=%p, va=%016lx, accessprot=%08x",
1699: pm, va, accessprot, 0);
1700:
1701:
1702: #if 0
1.28 ryo 1703: KASSERT_PM_ADDR(pm, va);
1.2 ryo 1704: #else
1705: if (((pm == pmap_kernel()) &&
1.28 ryo 1706: !(IN_RANGE(va, VM_MIN_KERNEL_ADDRESS, VM_MAX_KERNEL_ADDRESS))) ||
1.2 ryo 1707: ((pm != pmap_kernel()) &&
1.28 ryo 1708: !(IN_RANGE(va, VM_MIN_ADDRESS, VM_MAX_ADDRESS)))) {
1.2 ryo 1709:
1710: UVMHIST_LOG(pmaphist,
1711: "pmap space and va mismatch: pm=%s, va=%016lx",
1712: (pm == pmap_kernel()) ? "kernel" : "user", va, 0, 0);
1713: return false;
1714: }
1715: #endif
1716:
1.11 ryo 1717: pm_lock(pm);
1.8 ryo 1718:
1.27 ryo 1719: ptep = _pmap_pte_lookup_l3(pm, va);
1.2 ryo 1720: if (ptep == NULL) {
1721: UVMHIST_LOG(pmaphist, "pte_lookup failure: va=%016lx",
1722: va, 0, 0, 0);
1.8 ryo 1723: goto done;
1.2 ryo 1724: }
1725:
1726: pte = *ptep;
1727: if (!l3pte_valid(pte)) {
1728: UVMHIST_LOG(pmaphist, "invalid pte: %016llx: va=%016lx",
1729: pte, va, 0, 0);
1.8 ryo 1730: goto done;
1.2 ryo 1731: }
1732:
1733: pa = l3pte_pa(*ptep);
1734: pg = PHYS_TO_VM_PAGE(pa);
1735: if (pg == NULL) {
1736: UVMHIST_LOG(pmaphist, "pg not found: va=%016lx", va, 0, 0, 0);
1.8 ryo 1737: goto done;
1.2 ryo 1738: }
1739: md = VM_PAGE_TO_MD(pg);
1740:
1741: /* get prot by pmap_enter() (stored in software use bit in pte) */
1742: switch (pte & (LX_BLKPAG_OS_READ|LX_BLKPAG_OS_WRITE)) {
1743: case 0:
1744: default:
1745: pmap_prot = 0;
1746: break;
1747: case LX_BLKPAG_OS_READ:
1748: pmap_prot = VM_PROT_READ;
1749: break;
1750: case LX_BLKPAG_OS_WRITE:
1751: case LX_BLKPAG_OS_READ|LX_BLKPAG_OS_WRITE:
1752: pmap_prot = (VM_PROT_READ|VM_PROT_WRITE);
1753: break;
1754: }
1.18 ryo 1755: if (l3pte_executable(pte, pm != pmap_kernel()))
1.2 ryo 1756: pmap_prot |= VM_PROT_EXECUTE;
1757:
1758: UVMHIST_LOG(pmaphist, "va=%016lx, pmapprot=%08x, accessprot=%08x",
1759: va, pmap_prot, accessprot, 0);
1760:
1761: /* ignore except read/write */
1762: accessprot &= (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE);
1763:
1764: /* no permission to read/write/execute for this page */
1765: if ((pmap_prot & accessprot) != accessprot) {
1766: UVMHIST_LOG(pmaphist, "no permission to access", 0, 0, 0, 0);
1.8 ryo 1767: goto done;
1.2 ryo 1768: }
1769:
1.24 ryo 1770: /* pte is readable and writable, but occured fault? probably copy(9) */
1771: if ((pte & LX_BLKPAG_AF) && ((pte & LX_BLKPAG_AP) == LX_BLKPAG_AP_RW))
1.8 ryo 1772: goto done;
1.2 ryo 1773:
1774: pmap_pv_lock(md);
1775: if ((pte & LX_BLKPAG_AF) == 0) {
1776: /* pte has no AF bit, set referenced and AF bit */
1777: UVMHIST_LOG(pmaphist,
1778: "REFERENCED:"
1779: " va=%016lx, pa=%016lx, pte_prot=%08x, accessprot=%08x",
1780: va, pa, pmap_prot, accessprot);
1781: md->mdpg_flags |= VM_PROT_READ; /* set referenced */
1782: pte |= LX_BLKPAG_AF;
1783:
1784: PMAP_COUNT(fixup_referenced);
1785: }
1786: if ((accessprot & VM_PROT_WRITE) &&
1787: ((pte & LX_BLKPAG_AP) == LX_BLKPAG_AP_RO)) {
1788: /* pte is not RW. set modified and RW */
1789:
1790: UVMHIST_LOG(pmaphist, "MODIFIED:"
1791: " va=%016lx, pa=%016lx, pte_prot=%08x, accessprot=%08x",
1792: va, pa, pmap_prot, accessprot);
1793: md->mdpg_flags |= VM_PROT_WRITE; /* set modified */
1794: pte &= ~LX_BLKPAG_AP;
1795: pte |= LX_BLKPAG_AP_RW;
1796:
1797: PMAP_COUNT(fixup_modified);
1798: }
1799: pmap_pv_unlock(md);
1800:
1801: atomic_swap_64(ptep, pte);
1.19 ryo 1802: AARCH64_TLBI_BY_ASID_VA(pm->pm_asid, va, true);
1803:
1.8 ryo 1804: fixed = true;
1.2 ryo 1805:
1.8 ryo 1806: done:
1.11 ryo 1807: pm_unlock(pm);
1.8 ryo 1808: return fixed;
1.1 matt 1809: }
1810:
1811: bool
1812: pmap_clear_modify(struct vm_page *pg)
1813: {
1.2 ryo 1814: struct pv_entry *pv;
1815: struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
1816: pt_entry_t *ptep, pte, opte;
1817: vaddr_t va;
1818:
1819: UVMHIST_FUNC(__func__);
1820: UVMHIST_CALLED(pmaphist);
1821:
1822: UVMHIST_LOG(pmaphist, "pg=%p, mdpg_flags=%08x",
1823: pg, md->mdpg_flags, 0, 0);
1824:
1825: pmap_pv_lock(md);
1826:
1827: if ((md->mdpg_flags & VM_PROT_WRITE) == 0) {
1828: pmap_pv_unlock(md);
1829: return false;
1830: }
1831:
1832: md->mdpg_flags &= ~VM_PROT_WRITE;
1833:
1834: PMAP_COUNT(clear_modify);
1835: TAILQ_FOREACH(pv, &md->mdpg_pvhead, pv_link) {
1836: PMAP_COUNT(clear_modify_pages);
1837:
1838: va = pv->pv_va;
1839:
1840: ptep = pv->pv_ptep;
1841: opte = pte = *ptep;
1842: tryagain:
1843: if (!l3pte_valid(pte))
1844: continue;
1845:
1846: /* clear write permission */
1847: pte &= ~LX_BLKPAG_AP;
1848: pte |= LX_BLKPAG_AP_RO;
1849:
1850: /* XXX: possible deadlock if using PM_LOCK(). this is racy */
1851: if ((pte = atomic_cas_64(ptep, opte, pte)) != opte) {
1852: opte = pte;
1853: goto tryagain;
1854: }
1855:
1.19 ryo 1856: AARCH64_TLBI_BY_ASID_VA(pv->pv_pmap->pm_asid, va, true);
1.2 ryo 1857:
1858: UVMHIST_LOG(pmaphist,
1859: "va=%016llx, ptep=%p, pa=%016lx, RW -> RO",
1860: va, ptep, l3pte_pa(pte), 0);
1861: }
1862:
1863: pmap_pv_unlock(md);
1864:
1865: return true;
1.1 matt 1866: }
1867:
1868: bool
1869: pmap_clear_reference(struct vm_page *pg)
1870: {
1.2 ryo 1871: struct pv_entry *pv;
1872: struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
1873: pt_entry_t *ptep, pte, opte;
1874: vaddr_t va;
1875:
1876: UVMHIST_FUNC(__func__);
1877: UVMHIST_CALLED(pmaphist);
1878:
1879: UVMHIST_LOG(pmaphist, "pg=%p, mdpg_flags=%08x",
1880: pg, md->mdpg_flags, 0, 0);
1881:
1882: pmap_pv_lock(md);
1883:
1884: if ((md->mdpg_flags & VM_PROT_READ) == 0) {
1885: pmap_pv_unlock(md);
1886: return false;
1887: }
1888: md->mdpg_flags &= ~VM_PROT_READ;
1889:
1890: PMAP_COUNT(clear_reference);
1891: TAILQ_FOREACH(pv, &md->mdpg_pvhead, pv_link) {
1892: PMAP_COUNT(clear_reference_pages);
1893:
1894: va = pv->pv_va;
1895:
1896: ptep = pv->pv_ptep;
1897: opte = pte = *ptep;
1898: tryagain:
1899: if (!l3pte_valid(pte))
1900: continue;
1901:
1902: /* clear access permission */
1903: pte &= ~LX_BLKPAG_AF;
1904:
1905: /* XXX: possible deadlock if using PM_LOCK(). this is racy */
1906: if ((pte = atomic_cas_64(ptep, opte, pte)) != opte) {
1907: opte = pte;
1908: goto tryagain;
1909: }
1910:
1.19 ryo 1911: AARCH64_TLBI_BY_ASID_VA(pv->pv_pmap->pm_asid, va, true);
1.2 ryo 1912:
1913: UVMHIST_LOG(pmaphist, "va=%016llx, ptep=%p, pa=%016lx, unse AF",
1914: va, ptep, l3pte_pa(pte), 0);
1915: }
1916:
1917: pmap_pv_unlock(md);
1918:
1919: return true;
1.1 matt 1920: }
1921:
1922: bool
1923: pmap_is_modified(struct vm_page *pg)
1924: {
1.2 ryo 1925: struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
1.3 ryo 1926:
1.2 ryo 1927: return (md->mdpg_flags & VM_PROT_WRITE);
1.1 matt 1928: }
1929:
1930: bool
1931: pmap_is_referenced(struct vm_page *pg)
1932: {
1.3 ryo 1933: struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
1934:
1935: return (md->mdpg_flags & VM_PROT_READ);
1.2 ryo 1936: }
1937:
1938: #ifdef DDB
1.17 ryo 1939: /* get pointer to kernel segment L2 or L3 table entry */
1940: pt_entry_t *
1941: kvtopte(vaddr_t va)
1942: {
1.28 ryo 1943: KASSERT(IN_RANGE(va, VM_MIN_KERNEL_ADDRESS, VM_MAX_KERNEL_ADDRESS));
1.17 ryo 1944:
1.27 ryo 1945: return _pmap_pte_lookup_bs(pmap_kernel(), va, NULL);
1.17 ryo 1946: }
1947:
1948: /* change attribute of kernel segment */
1949: pt_entry_t
1950: pmap_kvattr(vaddr_t va, vm_prot_t prot)
1951: {
1952: pt_entry_t *ptep, pte, opte;
1953:
1.28 ryo 1954: KASSERT(IN_RANGE(va, VM_MIN_KERNEL_ADDRESS, VM_MAX_KERNEL_ADDRESS));
1.17 ryo 1955:
1956: ptep = kvtopte(va);
1957: if (ptep == NULL)
1958: panic("%s: %016lx is not mapped\n", __func__, va);
1959:
1960: opte = pte = *ptep;
1961:
1962: pte &= ~(LX_BLKPAG_AF|LX_BLKPAG_AP);
1963: switch (prot & (VM_PROT_READ|VM_PROT_WRITE)) {
1964: case 0:
1965: break;
1966: case VM_PROT_READ:
1967: pte |= (LX_BLKPAG_AF|LX_BLKPAG_AP_RO);
1968: break;
1969: case VM_PROT_WRITE:
1970: case VM_PROT_READ|VM_PROT_WRITE:
1971: pte |= (LX_BLKPAG_AF|LX_BLKPAG_AP_RW);
1972: break;
1973: }
1974:
1975: if ((prot & VM_PROT_EXECUTE) == 0) {
1.18 ryo 1976: pte |= LX_BLKPAG_PXN;
1.17 ryo 1977: } else {
1978: pte |= LX_BLKPAG_AF;
1.18 ryo 1979: pte &= ~LX_BLKPAG_PXN;
1.17 ryo 1980: }
1981:
1982: *ptep = pte;
1983:
1984: return opte;
1985: }
1986:
1.26 ryo 1987: void
1988: pmap_db_pte_print(pt_entry_t pte, int level,
1989: void (*pr)(const char *, ...) __printflike(1, 2))
1.2 ryo 1990: {
1991: if (pte == 0) {
1.8 ryo 1992: pr(" UNUSED\n");
1.26 ryo 1993: return;
1994: }
1995:
1996: pr(" %s", (pte & LX_VALID) ? "VALID" : "**INVALID**");
1997:
1998: if ((level == 0) ||
1999: ((level == 1) && l1pde_is_table(pte)) ||
2000: ((level == 2) && l2pde_is_table(pte))) {
2001:
2002: /* L0/L1/L2 TABLE */
2003: if ((level == 0) && ((pte & LX_TYPE) != LX_TYPE_TBL))
2004: pr(" **ILLEGAL TYPE**"); /* L0 doesn't support block */
2005: else
2006: pr(" TABLE");
1.2 ryo 2007:
1.26 ryo 2008: pr(", PA=%lx", l0pde_pa(pte));
1.2 ryo 2009:
1.26 ryo 2010: if (pte & LX_TBL_NSTABLE)
2011: pr(", NSTABLE");
2012: if (pte & LX_TBL_APTABLE)
2013: pr(", APTABLE");
2014: if (pte & LX_TBL_UXNTABLE)
2015: pr(", UXNTABLE");
2016: if (pte & LX_TBL_PXNTABLE)
2017: pr(", PXNTABLE");
1.2 ryo 2018:
2019: } else if (((level == 1) && l1pde_is_block(pte)) ||
2020: ((level == 2) && l2pde_is_block(pte)) ||
1.26 ryo 2021: (level == 3)) {
1.2 ryo 2022:
1.26 ryo 2023: /* L1/L2 BLOCK or L3 PAGE */
1.2 ryo 2024: if (level == 3) {
1.26 ryo 2025: pr(" %s", l3pte_is_page(pte) ?
2026: "PAGE" : "**ILLEGAL TYPE**");
2027: } else
2028: pr(" BLOCK");
1.2 ryo 2029:
1.26 ryo 2030: pr(", PA=%lx", l3pte_pa(pte));
1.2 ryo 2031:
1.26 ryo 2032: pr(", %s", (pte & LX_BLKPAG_UXN) ? "UXN" : "user-exec");
2033: pr(", %s", (pte & LX_BLKPAG_PXN) ? "PXN" : "kernel-exec");
1.2 ryo 2034:
2035: if (pte & LX_BLKPAG_CONTIG)
1.26 ryo 2036: pr(", CONTIG");
1.2 ryo 2037:
1.26 ryo 2038: pr(", %s", (pte & LX_BLKPAG_NG) ? "NG" : "global");
2039: pr(", %s", (pte & LX_BLKPAG_AF) ? "AF" : "*cannot-access*");
1.2 ryo 2040:
2041: switch (pte & LX_BLKPAG_SH) {
2042: case LX_BLKPAG_SH_NS:
2043: pr(", SH_NS");
2044: break;
2045: case LX_BLKPAG_SH_OS:
2046: pr(", SH_OS");
2047: break;
2048: case LX_BLKPAG_SH_IS:
2049: pr(", SH_IS");
2050: break;
2051: default:
2052: pr(", SH_??");
2053: break;
2054: }
2055:
2056: pr(", %s", (pte & LX_BLKPAG_AP_RO) ? "RO" : "RW");
2057: pr(", %s", (pte & LX_BLKPAG_APUSER) ? "EL0" : "EL1");
1.26 ryo 2058: pr(", %s", (pte & LX_BLKPAG_NS) ? "NS" : "secure");
1.2 ryo 2059:
2060: switch (pte & LX_BLKPAG_ATTR_MASK) {
2061: case LX_BLKPAG_ATTR_NORMAL_WB:
2062: pr(", WRITEBACK");
2063: break;
2064: case LX_BLKPAG_ATTR_NORMAL_NC:
2065: pr(", NOCACHE");
2066: break;
2067: case LX_BLKPAG_ATTR_NORMAL_WT:
2068: pr(", WHITETHRU");
2069: break;
2070: case LX_BLKPAG_ATTR_DEVICE_MEM:
2071: pr(", DEVICE");
2072: break;
2073: }
2074:
1.26 ryo 2075: if (pte & LX_BLKPAG_OS_BOOT)
2076: pr(", boot");
1.2 ryo 2077: if (pte & LX_BLKPAG_OS_READ)
2078: pr(", pmap_read");
2079: if (pte & LX_BLKPAG_OS_WRITE)
2080: pr(", pmap_write");
1.26 ryo 2081: if (pte & LX_BLKPAG_OS_WIRED)
2082: pr(", pmap_wired");
1.2 ryo 2083: } else {
1.26 ryo 2084: pr(" **ILLEGAL TYPE**");
1.2 ryo 2085: }
2086: pr("\n");
1.1 matt 2087: }
2088:
1.2 ryo 2089: void
2090: pmap_db_pteinfo(vaddr_t va, void (*pr)(const char *, ...))
1.1 matt 2091: {
1.2 ryo 2092: struct vm_page *pg;
2093: bool user;
2094: pd_entry_t *l0, *l1, *l2, *l3;
2095: pd_entry_t pde;
2096: pt_entry_t pte;
2097: struct vm_page_md *md;
1.26 ryo 2098: uint64_t ttbr;
1.2 ryo 2099: paddr_t pa;
2100: unsigned int idx;
2101:
1.26 ryo 2102: if (va & TTBR_SEL_VA) {
2103: user = false;
2104: ttbr = reg_ttbr1_el1_read();
2105: } else {
2106: user = true;
2107: ttbr = reg_ttbr0_el1_read();
2108: }
2109: pa = ttbr & TTBR_BADDR;
2110: l0 = (pd_entry_t *)AARCH64_PA_TO_KVA(pa);
2111:
1.2 ryo 2112: /*
2113: * traverse L0 -> L1 -> L2 -> L3 table
2114: */
1.26 ryo 2115: pr("TTBR%d=%016llx, pa=%016lx, va=%016lx", user ? 0 : 1, ttbr, l0);
1.2 ryo 2116: pr(", input-va=%016llx,"
2117: " L0-index=%d, L1-index=%d, L2-index=%d, L3-index=%d\n",
2118: va,
2119: (va & L0_ADDR_BITS) >> L0_SHIFT,
2120: (va & L1_ADDR_BITS) >> L1_SHIFT,
2121: (va & L2_ADDR_BITS) >> L2_SHIFT,
2122: (va & L3_ADDR_BITS) >> L3_SHIFT);
2123:
2124: idx = l0pde_index(va);
2125: pde = l0[idx];
2126:
1.26 ryo 2127: pr("L0[%3d]=%016llx:", idx, pde);
1.2 ryo 2128: pmap_db_pte_print(pde, 0, pr);
2129:
2130: if (!l0pde_valid(pde))
2131: return;
2132:
1.26 ryo 2133: l1 = (pd_entry_t *)AARCH64_PA_TO_KVA(l0pde_pa(pde));
1.2 ryo 2134: idx = l1pde_index(va);
2135: pde = l1[idx];
2136:
1.26 ryo 2137: pr(" L1[%3d]=%016llx:", idx, pde);
1.2 ryo 2138: pmap_db_pte_print(pde, 1, pr);
2139:
2140: if (!l1pde_valid(pde) || l1pde_is_block(pde))
2141: return;
2142:
1.26 ryo 2143: l2 = (pd_entry_t *)AARCH64_PA_TO_KVA(l1pde_pa(pde));
1.2 ryo 2144: idx = l2pde_index(va);
2145: pde = l2[idx];
2146:
1.26 ryo 2147: pr(" L2[%3d]=%016llx:", idx, pde);
1.2 ryo 2148: pmap_db_pte_print(pde, 2, pr);
2149:
2150: if (!l2pde_valid(pde) || l2pde_is_block(pde))
2151: return;
2152:
1.26 ryo 2153: l3 = (pd_entry_t *)AARCH64_PA_TO_KVA(l2pde_pa(pde));
1.2 ryo 2154: idx = l3pte_index(va);
2155: pte = l3[idx];
2156:
1.26 ryo 2157: pr(" L3[%3d]=%016llx:", idx, pte);
1.2 ryo 2158: pmap_db_pte_print(pte, 3, pr);
2159:
2160: pa = l3pte_pa(pte);
2161: pg = PHYS_TO_VM_PAGE(pa);
1.8 ryo 2162: if (pg == NULL) {
2163: pr("No VM_PAGE\n");
2164: } else {
1.2 ryo 2165: pg_dump(pg, pr);
2166: md = VM_PAGE_TO_MD(pg);
2167: pv_dump(md, pr);
2168: }
1.1 matt 2169: }
1.2 ryo 2170: #endif /* DDB */
CVSweb <webmaster@jp.NetBSD.org>