version 1.18.2.5, 2017/04/26 02:53:32 |
version 1.19, 2016/08/05 20:54:28 |
Line 112 __KERNEL_RCSID(0, "$NetBSD$"); |
|
Line 112 __KERNEL_RCSID(0, "$NetBSD$"); |
|
#include <sys/atomic.h> |
#include <sys/atomic.h> |
|
|
#include <uvm/uvm.h> |
#include <uvm/uvm.h> |
#include <uvm/uvm_physseg.h> |
|
|
|
#if defined(MULTIPROCESSOR) && defined(PMAP_VIRTUAL_CACHE_ALIASES) \ |
#if defined(MULTIPROCESSOR) && defined(PMAP_VIRTUAL_CACHE_ALIASES) \ |
&& !defined(PMAP_NO_PV_UNCACHED) |
&& !defined(PMAP_NO_PV_UNCACHED) |
Line 453 pmap_steal_memory(vsize_t size, vaddr_t |
|
Line 452 pmap_steal_memory(vsize_t size, vaddr_t |
|
size_t npgs; |
size_t npgs; |
paddr_t pa; |
paddr_t pa; |
vaddr_t va; |
vaddr_t va; |
|
struct vm_physseg *maybe_seg = NULL; |
uvm_physseg_t maybe_bank = UVM_PHYSSEG_TYPE_INVALID; |
u_int maybe_bank = vm_nphysseg; |
|
|
size = round_page(size); |
size = round_page(size); |
npgs = atop(size); |
npgs = atop(size); |
|
|
aprint_debug("%s: need %zu pages\n", __func__, npgs); |
aprint_debug("%s: need %zu pages\n", __func__, npgs); |
|
|
for (uvm_physseg_t bank = uvm_physseg_get_first(); |
for (u_int bank = 0; bank < vm_nphysseg; bank++) { |
uvm_physseg_valid_p(bank); |
struct vm_physseg * const seg = VM_PHYSMEM_PTR(bank); |
bank = uvm_physseg_get_next(bank)) { |
|
|
|
if (uvm.page_init_done == true) |
if (uvm.page_init_done == true) |
panic("pmap_steal_memory: called _after_ bootstrap"); |
panic("pmap_steal_memory: called _after_ bootstrap"); |
|
|
aprint_debug("%s: seg %"PRIxPHYSSEG": %#"PRIxPADDR" %#"PRIxPADDR" %#"PRIxPADDR" %#"PRIxPADDR"\n", |
aprint_debug("%s: seg %u: %#"PRIxPADDR" %#"PRIxPADDR" %#"PRIxPADDR" %#"PRIxPADDR"\n", |
__func__, bank, |
__func__, bank, |
uvm_physseg_get_avail_start(bank), uvm_physseg_get_start(bank), |
seg->avail_start, seg->start, |
uvm_physseg_get_avail_end(bank), uvm_physseg_get_end(bank)); |
seg->avail_end, seg->end); |
|
|
if (uvm_physseg_get_avail_start(bank) != uvm_physseg_get_start(bank) |
if (seg->avail_start != seg->start |
|| uvm_physseg_get_avail_start(bank) >= uvm_physseg_get_avail_end(bank)) { |
|| seg->avail_start >= seg->avail_end) { |
aprint_debug("%s: seg %"PRIxPHYSSEG": bad start\n", __func__, bank); |
aprint_debug("%s: seg %u: bad start\n", __func__, bank); |
continue; |
continue; |
} |
} |
|
|
if (uvm_physseg_get_avail_end(bank) - uvm_physseg_get_avail_start(bank) < npgs) { |
if (seg->avail_end - seg->avail_start < npgs) { |
aprint_debug("%s: seg %"PRIxPHYSSEG": too small for %zu pages\n", |
aprint_debug("%s: seg %u: too small for %zu pages\n", |
__func__, bank, npgs); |
__func__, bank, npgs); |
continue; |
continue; |
} |
} |
|
|
if (!pmap_md_ok_to_steal_p(bank, npgs)) { |
if (!pmap_md_ok_to_steal_p(seg, npgs)) { |
continue; |
continue; |
} |
} |
|
|
Line 493 pmap_steal_memory(vsize_t size, vaddr_t |
|
Line 490 pmap_steal_memory(vsize_t size, vaddr_t |
|
* Always try to allocate from the segment with the least |
* Always try to allocate from the segment with the least |
* amount of space left. |
* amount of space left. |
*/ |
*/ |
#define VM_PHYSMEM_SPACE(b) ((uvm_physseg_get_avail_end(b)) - (uvm_physseg_get_avail_start(b))) |
#define VM_PHYSMEM_SPACE(s) ((s)->avail_end - (s)->avail_start) |
if (uvm_physseg_valid_p(maybe_bank) == false |
if (maybe_seg == NULL |
|| VM_PHYSMEM_SPACE(bank) < VM_PHYSMEM_SPACE(maybe_bank)) { |
|| VM_PHYSMEM_SPACE(seg) < VM_PHYSMEM_SPACE(maybe_seg)) { |
|
maybe_seg = seg; |
maybe_bank = bank; |
maybe_bank = bank; |
} |
} |
} |
} |
|
|
if (uvm_physseg_valid_p(maybe_bank)) { |
if (maybe_seg) { |
const uvm_physseg_t bank = maybe_bank; |
struct vm_physseg * const seg = maybe_seg; |
|
u_int bank = maybe_bank; |
|
|
/* |
/* |
* There are enough pages here; steal them! |
* There are enough pages here; steal them! |
*/ |
*/ |
pa = ptoa(uvm_physseg_get_start(bank)); |
pa = ptoa(seg->avail_start); |
uvm_physseg_unplug(atop(pa), npgs); |
seg->avail_start += npgs; |
|
seg->start += npgs; |
|
|
|
/* |
|
* Have we used up this segment? |
|
*/ |
|
if (seg->avail_start == seg->end) { |
|
if (vm_nphysseg == 1) |
|
panic("pmap_steal_memory: out of memory!"); |
|
|
aprint_debug("%s: seg %"PRIxPHYSSEG": %zu pages stolen (%#"PRIxPADDR" left)\n", |
aprint_debug("%s: seg %u: %zu pages stolen (removed)\n", |
__func__, bank, npgs, VM_PHYSMEM_SPACE(bank)); |
__func__, bank, npgs); |
|
/* Remove this segment from the list. */ |
|
vm_nphysseg--; |
|
for (u_int x = bank; x < vm_nphysseg; x++) { |
|
/* structure copy */ |
|
VM_PHYSMEM_PTR_SWAP(x, x + 1); |
|
} |
|
} else { |
|
aprint_debug("%s: seg %u: %zu pages stolen (%#"PRIxPADDR" left)\n", |
|
__func__, bank, npgs, VM_PHYSMEM_SPACE(seg)); |
|
} |
|
|
va = pmap_md_map_poolpage(pa, size); |
va = pmap_md_map_poolpage(pa, size); |
memset((void *)va, 0, size); |
memset((void *)va, 0, size); |
|
|
pmap_activate(struct lwp *l) |
pmap_activate(struct lwp *l) |
{ |
{ |
pmap_t pmap = l->l_proc->p_vmspace->vm_map.pmap; |
pmap_t pmap = l->l_proc->p_vmspace->vm_map.pmap; |
#define LNAME(l) \ |
|
((l)->l_name ? (l)->l_name : (l)->l_proc->p_comm) |
|
|
|
UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist); |
UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist); |
UVMHIST_LOG(pmaphist, "(l=%p pmap=%p)", l, pmap, 0, 0); |
UVMHIST_LOG(pmaphist, "(l=%p (pmap=%p))", l, pmap, 0, 0); |
PMAP_COUNT(activate); |
PMAP_COUNT(activate); |
|
|
kpreempt_disable(); |
kpreempt_disable(); |
Line 680 pmap_activate(struct lwp *l) |
|
Line 695 pmap_activate(struct lwp *l) |
|
pmap_md_tlb_miss_lock_exit(); |
pmap_md_tlb_miss_lock_exit(); |
kpreempt_enable(); |
kpreempt_enable(); |
|
|
UVMHIST_LOG(pmaphist, " <-- done (%u:%u)", l->l_proc->p_pid, l->l_lid, |
UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0); |
0, 0); |
|
} |
} |
|
|
/* |
/* |
Line 699 pmap_page_remove(struct vm_page *pg) |
|
Line 713 pmap_page_remove(struct vm_page *pg) |
|
|
|
UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist); |
UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist); |
|
|
UVMHIST_LOG(pmapexechist, "pg %p (pa %#"PRIxPADDR") [page removed]: " |
|
"execpage cleared", pg, VM_PAGE_TO_PHYS(pg), |
|
0, 0); |
|
#ifdef PMAP_VIRTUAL_CACHE_ALIASES |
|
pmap_page_clear_attributes(mdpg, VM_PAGEMD_EXECPAGE|VM_PAGEMD_UNCACHED); |
|
#else |
|
pmap_page_clear_attributes(mdpg, VM_PAGEMD_EXECPAGE); |
|
#endif |
|
PMAP_COUNT(exec_uncached_remove); |
|
|
|
pv_entry_t pv = &mdpg->mdpg_first; |
pv_entry_t pv = &mdpg->mdpg_first; |
if (pv->pv_pmap == NULL) { |
if (pv->pv_pmap == NULL) { |
VM_PAGEMD_PVLIST_UNLOCK(mdpg); |
VM_PAGEMD_PVLIST_UNLOCK(mdpg); |
Line 796 pmap_page_remove(struct vm_page *pg) |
|
Line 800 pmap_page_remove(struct vm_page *pg) |
|
} |
} |
} |
} |
|
|
|
#ifdef PMAP_VIRTUAL_CACHE_ALIASES |
|
pmap_page_clear_attributes(mdpg, VM_PAGEMD_UNCACHED); |
|
#endif |
pmap_pvlist_check(mdpg); |
pmap_pvlist_check(mdpg); |
VM_PAGEMD_PVLIST_UNLOCK(mdpg); |
VM_PAGEMD_PVLIST_UNLOCK(mdpg); |
kpreempt_enable(); |
kpreempt_enable(); |
Line 813 pmap_deactivate(struct lwp *l) |
|
Line 820 pmap_deactivate(struct lwp *l) |
|
pmap_t pmap = l->l_proc->p_vmspace->vm_map.pmap; |
pmap_t pmap = l->l_proc->p_vmspace->vm_map.pmap; |
|
|
UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist); |
UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist); |
UVMHIST_LOG(pmaphist, "(l=%p pmap=%p)", l, pmap, 0, 0); |
UVMHIST_LOG(pmaphist, "(l=%p (pmap=%p))", l, pmap, 0, 0); |
PMAP_COUNT(deactivate); |
PMAP_COUNT(deactivate); |
|
|
kpreempt_disable(); |
kpreempt_disable(); |
Line 827 pmap_deactivate(struct lwp *l) |
|
Line 834 pmap_deactivate(struct lwp *l) |
|
pmap_md_tlb_miss_lock_exit(); |
pmap_md_tlb_miss_lock_exit(); |
kpreempt_enable(); |
kpreempt_enable(); |
|
|
UVMHIST_LOG(pmaphist, " <-- done (%u:%u)", l->l_proc->p_pid, l->l_lid, |
UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0); |
0, 0); |
|
} |
} |
|
|
void |
void |
Line 861 pmap_update(struct pmap *pmap) |
|
Line 867 pmap_update(struct pmap *pmap) |
|
pmap_md_tlb_miss_lock_exit(); |
pmap_md_tlb_miss_lock_exit(); |
kpreempt_enable(); |
kpreempt_enable(); |
|
|
UVMHIST_LOG(pmaphist, " <-- done (%c)", |
UVMHIST_LOG(pmaphist, " <-- done%s", |
(pmap == pmap_kernel() ? 'k' : 'u'), 0, 0, 0); |
(pmap == pmap_kernel()) ? " (kernel)" : "", 0, 0, 0); |
} |
} |
|
|
/* |
/* |
Line 880 pmap_pte_remove(pmap_t pmap, vaddr_t sva |
|
Line 886 pmap_pte_remove(pmap_t pmap, vaddr_t sva |
|
const bool is_kernel_pmap_p = (pmap == pmap_kernel()); |
const bool is_kernel_pmap_p = (pmap == pmap_kernel()); |
|
|
UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist); |
UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist); |
UVMHIST_LOG(pmaphist, "(pmap=%p %cva=%#"PRIxVADDR"..%#"PRIxVADDR, |
UVMHIST_LOG(pmaphist, "(pmap=%p %sva=%#"PRIxVADDR"..%#"PRIxVADDR, |
pmap, (is_kernel_pmap_p ? 'k' : 'u'), sva, eva); |
pmap, (is_kernel_pmap_p ? "(kernel) " : ""), sva, eva); |
UVMHIST_LOG(pmaphist, "ptep=%p, flags(npte)=%#"PRIxPTR")", |
UVMHIST_LOG(pmaphist, "ptep=%p, flags(npte)=%#"PRIxPTR")", |
ptep, flags, 0, 0); |
ptep, flags, 0, 0); |
|
|
Line 1020 pmap_pte_protect(pmap_t pmap, vaddr_t sv |
|
Line 1026 pmap_pte_protect(pmap_t pmap, vaddr_t sv |
|
const vm_prot_t prot = (flags & VM_PROT_ALL); |
const vm_prot_t prot = (flags & VM_PROT_ALL); |
|
|
UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist); |
UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist); |
UVMHIST_LOG(pmaphist, "(pmap=%p %cva=%#"PRIxVADDR"..%#"PRIxVADDR")", |
UVMHIST_LOG(pmaphist, "(pmap=%p %sva=%#"PRIxVADDR"..%#"PRIxVADDR, |
pmap, (pmap == pmap_kernel() ? 'k' : 'u'), sva, eva); |
pmap, (pmap == pmap_kernel() ? "(kernel) " : ""), sva, eva); |
UVMHIST_LOG(pmaphist, "ptep=%p, flags(npte)=%#"PRIxPTR")", |
UVMHIST_LOG(pmaphist, "ptep=%p, flags(npte)=%#"PRIxPTR")", |
ptep, flags, 0, 0); |
ptep, flags, 0, 0); |
|
|
Line 1042 pmap_pte_protect(pmap_t pmap, vaddr_t sv |
|
Line 1048 pmap_pte_protect(pmap_t pmap, vaddr_t sv |
|
if (VM_PAGEMD_CACHED_P(mdpg)) { |
if (VM_PAGEMD_CACHED_P(mdpg)) { |
#endif |
#endif |
UVMHIST_LOG(pmapexechist, |
UVMHIST_LOG(pmapexechist, |
"pg %p (pa %#"PRIxPADDR"): " |
"pg %p (pa %#"PRIxPADDR"): %s", |
"syncicached performed", |
pg, VM_PAGE_TO_PHYS(pg), |
pg, VM_PAGE_TO_PHYS(pg), 0, 0); |
"syncicached performed", 0); |
pmap_page_syncicache(pg); |
pmap_page_syncicache(pg); |
PMAP_COUNT(exec_synced_protect); |
PMAP_COUNT(exec_synced_protect); |
#ifdef PMAP_VIRTUAL_CACHE_ALIASES |
#ifdef PMAP_VIRTUAL_CACHE_ALIASES |
Line 1111 pmap_page_cache(struct vm_page *pg, bool |
|
Line 1117 pmap_page_cache(struct vm_page *pg, bool |
|
struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg); |
struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg); |
|
|
UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist); |
UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist); |
UVMHIST_LOG(pmaphist, "(pg=%p (pa %#"PRIxPADDR") cached=%d)", |
UVMHIST_LOG(pmaphist, "(pg=%p (pa %#"PRIxPADDR") cached=%s)", |
pg, VM_PAGE_TO_PHYS(pg), cached, 0); |
pg, VM_PAGE_TO_PHYS(pg), cached ? "true" : "false", 0); |
|
|
KASSERT(kpreempt_disabled()); |
KASSERT(kpreempt_disabled()); |
KASSERT(VM_PAGEMD_PVLIST_LOCKED_P(mdpg)); |
KASSERT(VM_PAGEMD_PVLIST_LOCKED_P(mdpg)); |
Line 1172 pmap_enter(pmap_t pmap, vaddr_t va, padd |
|
Line 1178 pmap_enter(pmap_t pmap, vaddr_t va, padd |
|
#endif |
#endif |
|
|
UVMHIST_FUNC(__func__); UVMHIST_CALLED(*histp); |
UVMHIST_FUNC(__func__); UVMHIST_CALLED(*histp); |
|
#define VM_PROT_STRING(prot) \ |
|
&"\0 " \ |
|
"(R)\0 " \ |
|
"(W)\0 " \ |
|
"(RW)\0 " \ |
|
"(X)\0 " \ |
|
"(RX)\0 " \ |
|
"(WX)\0 " \ |
|
"(RWX)\0"[UVM_PROTECTION(prot)*6] |
UVMHIST_LOG(*histp, "(pmap=%p, va=%#"PRIxVADDR", pa=%#"PRIxPADDR, |
UVMHIST_LOG(*histp, "(pmap=%p, va=%#"PRIxVADDR", pa=%#"PRIxPADDR, |
pmap, va, pa, 0); |
pmap, va, pa, 0); |
UVMHIST_LOG(*histp, "prot=%#x flags=%#x)", prot, flags, 0, 0); |
UVMHIST_LOG(*histp, "prot=%#x%s flags=%#x%s)", |
|
prot, VM_PROT_STRING(prot), flags, VM_PROT_STRING(flags)); |
|
|
const bool good_color = PMAP_PAGE_COLOROK_P(pa, va); |
const bool good_color = PMAP_PAGE_COLOROK_P(pa, va); |
if (is_kernel_pmap_p) { |
if (is_kernel_pmap_p) { |
Line 1232 pmap_enter(pmap_t pmap, vaddr_t va, padd |
|
Line 1248 pmap_enter(pmap_t pmap, vaddr_t va, padd |
|
return ENOMEM; |
return ENOMEM; |
} |
} |
const pt_entry_t opte = *ptep; |
const pt_entry_t opte = *ptep; |
const bool resident = pte_valid_p(opte); |
|
bool remap = false; |
|
if (resident) { |
|
if (pte_to_paddr(opte) != pa) { |
|
KASSERT(!is_kernel_pmap_p); |
|
const pt_entry_t rpte = pte_nv_entry(false); |
|
|
|
pmap_addr_range_check(pmap, va, va + NBPG, __func__); |
|
pmap_pte_process(pmap, va, va + NBPG, pmap_pte_remove, |
|
rpte); |
|
PMAP_COUNT(user_mappings_changed); |
|
remap = true; |
|
} |
|
update_flags |= PMAP_TLB_NEED_IPI; |
|
} |
|
|
|
if (!resident || remap) { |
|
pmap->pm_stats.resident_count++; |
|
} |
|
|
|
/* Done after case that may sleep/return. */ |
/* Done after case that may sleep/return. */ |
if (pg) |
if (pg) |
Line 1269 pmap_enter(pmap_t pmap, vaddr_t va, padd |
|
Line 1266 pmap_enter(pmap_t pmap, vaddr_t va, padd |
|
UVMHIST_LOG(*histp, "new pte %#"PRIxPTE" (pa %#"PRIxPADDR")", |
UVMHIST_LOG(*histp, "new pte %#"PRIxPTE" (pa %#"PRIxPADDR")", |
pte_value(npte), pa, 0, 0); |
pte_value(npte), pa, 0, 0); |
|
|
|
if (pte_valid_p(opte) && pte_to_paddr(opte) != pa) { |
|
pmap_remove(pmap, va, va + NBPG); |
|
PMAP_COUNT(user_mappings_changed); |
|
} |
|
|
KASSERT(pte_valid_p(npte)); |
KASSERT(pte_valid_p(npte)); |
|
const bool resident = pte_valid_p(opte); |
|
if (resident) { |
|
update_flags |= PMAP_TLB_NEED_IPI; |
|
} else { |
|
pmap->pm_stats.resident_count++; |
|
} |
|
|
pmap_md_tlb_miss_lock_enter(); |
pmap_md_tlb_miss_lock_enter(); |
*ptep = npte; |
*ptep = npte; |
Line 1282 pmap_enter(pmap_t pmap, vaddr_t va, padd |
|
Line 1290 pmap_enter(pmap_t pmap, vaddr_t va, padd |
|
PMAP_COUNT(exec_mappings); |
PMAP_COUNT(exec_mappings); |
if (!VM_PAGEMD_EXECPAGE_P(mdpg) && pte_cached_p(npte)) { |
if (!VM_PAGEMD_EXECPAGE_P(mdpg) && pte_cached_p(npte)) { |
if (!pte_deferred_exec_p(npte)) { |
if (!pte_deferred_exec_p(npte)) { |
UVMHIST_LOG(*histp, "va=%#"PRIxVADDR |
UVMHIST_LOG(*histp, |
" pg %p: immediate syncicache", |
"va=%#"PRIxVADDR" pg %p: %s syncicache%s", |
va, pg, 0, 0); |
va, pg, "immediate", ""); |
pmap_page_syncicache(pg); |
pmap_page_syncicache(pg); |
pmap_page_set_attributes(mdpg, |
pmap_page_set_attributes(mdpg, |
VM_PAGEMD_EXECPAGE); |
VM_PAGEMD_EXECPAGE); |
PMAP_COUNT(exec_synced_mappings); |
PMAP_COUNT(exec_synced_mappings); |
} else { |
} else { |
UVMHIST_LOG(*histp, "va=%#"PRIxVADDR |
UVMHIST_LOG(*histp, "va=%#"PRIxVADDR |
" pg %p: defer syncicache: pte %#x", |
" pg %p: %s syncicache: pte %#x", |
va, pg, npte, 0); |
va, pg, "defer", npte); |
} |
} |
} else { |
} else { |
UVMHIST_LOG(*histp, |
UVMHIST_LOG(*histp, |
"va=%#"PRIxVADDR" pg %p: no syncicache cached %d", |
"va=%#"PRIxVADDR" pg %p: %s syncicache%s", |
va, pg, "no", pte_cached_p(npte)); |
va, pg, "no", |
|
(pte_cached_p(npte) |
|
? " (already exec)" |
|
: " (uncached)")); |
} |
} |
} else if (pg != NULL && (prot & VM_PROT_EXECUTE)) { |
} else if (pg != NULL && (prot & VM_PROT_EXECUTE)) { |
KASSERT(mdpg != NULL); |
KASSERT(mdpg != NULL); |
Line 1306 pmap_enter(pmap_t pmap, vaddr_t va, padd |
|
Line 1317 pmap_enter(pmap_t pmap, vaddr_t va, padd |
|
pmap_page_syncicache(pg); |
pmap_page_syncicache(pg); |
pmap_page_clear_attributes(mdpg, VM_PAGEMD_EXECPAGE); |
pmap_page_clear_attributes(mdpg, VM_PAGEMD_EXECPAGE); |
UVMHIST_LOG(*histp, |
UVMHIST_LOG(*histp, |
"va=%#"PRIxVADDR" pg %p: immediate syncicache (writeable)", |
"va=%#"PRIxVADDR" pg %p: %s syncicache%s", |
va, pg, 0, 0); |
va, pg, "immediate", " (writeable)"); |
} |
} |
|
|
UVMHIST_LOG(*histp, " <-- 0 (OK)", 0, 0, 0, 0); |
UVMHIST_LOG(*histp, " <-- 0 (OK)", 0, 0, 0, 0); |
Line 1582 pmap_clear_reference(struct vm_page *pg) |
|
Line 1593 pmap_clear_reference(struct vm_page *pg) |
|
|
|
bool rv = pmap_page_clear_attributes(mdpg, VM_PAGEMD_REFERENCED); |
bool rv = pmap_page_clear_attributes(mdpg, VM_PAGEMD_REFERENCED); |
|
|
UVMHIST_LOG(pmaphist, " <-- wasref %u", rv, 0, 0, 0); |
UVMHIST_LOG(pmaphist, " <-- %s", rv ? "true" : "false", 0, 0, 0); |
|
|
return rv; |
return rv; |
} |
} |
Line 1617 pmap_clear_modify(struct vm_page *pg) |
|
Line 1628 pmap_clear_modify(struct vm_page *pg) |
|
if (VM_PAGEMD_EXECPAGE_P(mdpg)) { |
if (VM_PAGEMD_EXECPAGE_P(mdpg)) { |
if (pv->pv_pmap == NULL) { |
if (pv->pv_pmap == NULL) { |
UVMHIST_LOG(pmapexechist, |
UVMHIST_LOG(pmapexechist, |
"pg %p (pa %#"PRIxPADDR"): execpage cleared", |
"pg %p (pa %#"PRIxPADDR"): %s", |
pg, VM_PAGE_TO_PHYS(pg), 0, 0); |
pg, VM_PAGE_TO_PHYS(pg), "execpage cleared", 0); |
pmap_page_clear_attributes(mdpg, VM_PAGEMD_EXECPAGE); |
pmap_page_clear_attributes(mdpg, VM_PAGEMD_EXECPAGE); |
PMAP_COUNT(exec_uncached_clear_modify); |
PMAP_COUNT(exec_uncached_clear_modify); |
} else { |
} else { |
UVMHIST_LOG(pmapexechist, |
UVMHIST_LOG(pmapexechist, |
"pg %p (pa %#"PRIxPADDR"): syncicache performed", |
"pg %p (pa %#"PRIxPADDR"): %s", |
pg, VM_PAGE_TO_PHYS(pg), 0, 0); |
pg, VM_PAGE_TO_PHYS(pg), "syncicache performed", 0); |
pmap_page_syncicache(pg); |
pmap_page_syncicache(pg); |
PMAP_COUNT(exec_synced_clear_modify); |
PMAP_COUNT(exec_synced_clear_modify); |
} |
} |
Line 1729 pmap_pvlist_check(struct vm_page_md *mdp |
|
Line 1740 pmap_pvlist_check(struct vm_page_md *mdp |
|
#endif |
#endif |
} |
} |
#ifdef PMAP_VIRTUAL_CACHE_ALIASES |
#ifdef PMAP_VIRTUAL_CACHE_ALIASES |
// Assert that if there is more than 1 color mapped, that the |
// Assert there if there more than 1 color mapped, that they |
// page is uncached. |
// are uncached. |
KASSERTMSG(!pmap_md_virtual_cache_aliasing_p() |
KASSERTMSG(!pmap_md_virtual_cache_aliasing_p() |
|| colors == 0 || (colors & (colors-1)) == 0 |
|| colors == 0 || (colors & (colors-1)) == 0 |
|| VM_PAGEMD_UNCACHED_P(mdpg), "colors=%#x uncached=%u", |
|| VM_PAGEMD_UNCACHED_P(mdpg), "colors=%#x uncached=%u", |
|
|
if (__predict_false(apv != NULL)) |
if (__predict_false(apv != NULL)) |
pmap_pv_free(apv); |
pmap_pv_free(apv); |
|
|
UVMHIST_LOG(pmaphist, " <-- done pv=%p (reused)", |
UVMHIST_LOG(pmaphist, " <-- done pv=%p%s", |
pv, 0, 0, 0); |
pv, " (reused)", 0, 0); |
return; |
return; |
} |
} |
} |
} |
|
|
if (__predict_false(apv != NULL)) |
if (__predict_false(apv != NULL)) |
pmap_pv_free(apv); |
pmap_pv_free(apv); |
|
|
UVMHIST_LOG(pmaphist, " <-- done pv=%p (first %u)", pv, first, 0, 0); |
UVMHIST_LOG(pmaphist, " <-- done pv=%p%s", |
|
pv, first ? " (first pv)" : "",0,0); |
} |
} |
|
|
/* |
/* |
Line 1899 pmap_remove_pv(pmap_t pmap, vaddr_t va, |
|
Line 1911 pmap_remove_pv(pmap_t pmap, vaddr_t va, |
|
UVMHIST_LOG(pmaphist, |
UVMHIST_LOG(pmaphist, |
"(pmap=%p, va=%#"PRIxVADDR", pg=%p (pa %#"PRIxPADDR")", |
"(pmap=%p, va=%#"PRIxVADDR", pg=%p (pa %#"PRIxPADDR")", |
pmap, va, pg, VM_PAGE_TO_PHYS(pg)); |
pmap, va, pg, VM_PAGE_TO_PHYS(pg)); |
UVMHIST_LOG(pmaphist, "dirty=%u)", dirty, 0, 0, 0); |
UVMHIST_LOG(pmaphist, "dirty=%s)", dirty ? "true" : "false", 0, 0, 0); |
|
|
KASSERT(kpreempt_disabled()); |
KASSERT(kpreempt_disabled()); |
KASSERT((va & PAGE_MASK) == 0); |
KASSERT((va & PAGE_MASK) == 0); |
Line 1959 pmap_remove_pv(pmap_t pmap, vaddr_t va, |
|
Line 1971 pmap_remove_pv(pmap_t pmap, vaddr_t va, |
|
* care about its execness. |
* care about its execness. |
*/ |
*/ |
UVMHIST_LOG(pmapexechist, |
UVMHIST_LOG(pmapexechist, |
"pg %p (pa %#"PRIxPADDR")last %u: execpage cleared", |
"pg %p (pa %#"PRIxPADDR")%s: %s", |
pg, VM_PAGE_TO_PHYS(pg), last, 0); |
pg, VM_PAGE_TO_PHYS(pg), |
|
last ? " [last mapping]" : "", |
|
"execpage cleared"); |
pmap_page_clear_attributes(mdpg, VM_PAGEMD_EXECPAGE); |
pmap_page_clear_attributes(mdpg, VM_PAGEMD_EXECPAGE); |
PMAP_COUNT(exec_uncached_remove); |
PMAP_COUNT(exec_uncached_remove); |
} else { |
} else { |
Line 1969 pmap_remove_pv(pmap_t pmap, vaddr_t va, |
|
Line 1983 pmap_remove_pv(pmap_t pmap, vaddr_t va, |
|
* so we must sync it. |
* so we must sync it. |
*/ |
*/ |
UVMHIST_LOG(pmapexechist, |
UVMHIST_LOG(pmapexechist, |
"pg %p (pa %#"PRIxPADDR")last %u: performed syncicache", |
"pg %p (pa %#"PRIxPADDR")%s: %s", |
pg, VM_PAGE_TO_PHYS(pg), last, 0); |
pg, VM_PAGE_TO_PHYS(pg), |
|
last ? " [last mapping]" : "", |
|
"performed syncicache"); |
pmap_page_syncicache(pg); |
pmap_page_syncicache(pg); |
PMAP_COUNT(exec_synced_remove); |
PMAP_COUNT(exec_synced_remove); |
} |
} |