Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files. =================================================================== RCS file: /ftp/cvs/cvsroot/src/sys/arch/hppa/hppa/pmap.c,v rcsdiff: /ftp/cvs/cvsroot/src/sys/arch/hppa/hppa/pmap.c,v: warning: Unknown phrases like `commitid ...;' are present. retrieving revision 1.71.2.6 retrieving revision 1.72 diff -u -p -r1.71.2.6 -r1.72 --- src/sys/arch/hppa/hppa/pmap.c 2011/05/19 03:42:59 1.71.2.6 +++ src/sys/arch/hppa/hppa/pmap.c 2010/03/16 16:20:19 1.72 @@ -1,4 +1,4 @@ -/* $NetBSD: pmap.c,v 1.71.2.6 2011/05/19 03:42:59 rmind Exp $ */ +/* $NetBSD: pmap.c,v 1.72 2010/03/16 16:20:19 skrll Exp $ */ /*- * Copyright (c) 2001, 2002 The NetBSD Foundation, Inc. @@ -65,7 +65,7 @@ */ #include -__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.71.2.6 2011/05/19 03:42:59 rmind Exp $"); +__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.72 2010/03/16 16:20:19 skrll Exp $"); #include "opt_cputype.h" @@ -73,14 +73,12 @@ __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.7 #include #include #include -#include #include #include #include #include -#include #include #include #include @@ -227,23 +225,24 @@ void pmap_dump_table(pa_space_t, vaddr_t void pmap_dump_pv(paddr_t); #endif -void pmap_page_remove_locked(struct vm_page *); -int pmap_check_alias(struct vm_page *, vaddr_t, pt_entry_t); +void pmap_check_alias(struct vm_page *, struct pv_entry *, vaddr_t, + pt_entry_t *); +static bool __changebit(struct vm_page *, u_int, u_int); /* un-invert PVF_REF */ #define pmap_pvh_attrs(a) \ - (((a) & (PVF_MOD|PVF_REF)) ^ PVF_REF) + (((a) & (PVF_MOD|PVF_REF|PVF_WRITE|PVF_UNCACHEABLE)) ^ PVF_REF) #define PMAP_LOCK(pm) \ do { \ if ((pm) != pmap_kernel()) \ - mutex_enter((pm)->pm_lock); \ + mutex_enter(&(pm)->pm_lock); \ } while (/*CONSTCOND*/0) #define PMAP_UNLOCK(pm) \ do { \ if ((pm) != pmap_kernel()) \ - mutex_exit((pm)->pm_lock); \ + mutex_exit(&(pm)->pm_lock); \ } while (/*CONSTCOND*/0) struct vm_page * @@ -343,7 +342,7 @@ pmap_pde_alloc(pmap_t pm, vaddr_t va, st ("%s(%p, 0x%lx, %p)\n", __func__, pm, va, pdep)); KASSERT(pm != pmap_kernel()); - KASSERT(mutex_owned(pm->pm_lock)); + KASSERT(mutex_owned(&pm->pm_lock)); pg = pmap_pagealloc(&pm->pm_obj, va); @@ -427,11 +426,11 @@ pmap_pte_flush(pmap_t pmap, vaddr_t va, { fdcache(pmap->pm_space, va, PAGE_SIZE); + pdtlb(pmap->pm_space, va); if (pte & PTE_PROT(TLB_EXECUTE)) { ficache(pmap->pm_space, va, PAGE_SIZE); pitlb(pmap->pm_space, va); } - pdtlb(pmap->pm_space, va); #ifdef USE_HPT if (pmap_hpt) { struct hpt_entry *hpt; @@ -499,42 +498,125 @@ void pmap_dump_pv(paddr_t pa) { struct vm_page *pg; - struct vm_page_md *md; struct pv_entry *pve; pg = PHYS_TO_VM_PAGE(pa); - md = VM_PAGE_TO_MD(pg); - db_printf("pg %p attr 0x%08x aliases %d\n", pg, md->pvh_attrs, - md->pvh_aliases); - for (pve = md->pvh_list; pve; pve = pve->pv_next) + mutex_enter(&pg->mdpage.pvh_lock); + db_printf("pg %p attr 0x%08x aliases %d\n", pg, pg->mdpage.pvh_attrs, + pg->mdpage.pvh_aliases); + for (pve = pg->mdpage.pvh_list; pve; pve = pve->pv_next) db_printf("%x:%lx\n", pve->pv_pmap->pm_space, pve->pv_va & PV_VAMASK); + mutex_exit(&pg->mdpage.pvh_lock); } #endif -int -pmap_check_alias(struct vm_page *pg, vaddr_t va, pt_entry_t pte) +/* + * Check for non-equiv aliases for this page and the mapping being added or + * removed. If, when adding, we find a new non-equiv alias then mark all PTEs + * as uncacheable including the one we're checking. If, when removing, there + * are no non-equiv aliases left then we mark PTEs as cacheable. + * + * - Shouldn't be called for pages that have been marked uncacheable by + * pmap_kenter_pa. + * - Must be called with pg->mdpage.pvh_lock held. + */ +void +pmap_check_alias(struct vm_page *pg, struct pv_entry *pve, vaddr_t va, + pt_entry_t *ptep) { - struct vm_page_md * const md = VM_PAGE_TO_MD(pg); - struct pv_entry *pve; - int ret = 0; + bool nonequiv = false; + struct pv_entry *tpve; + u_int attrs; + + DPRINTF(PDB_FOLLOW|PDB_ALIAS, + ("%s(%p, %p, 0x%lx, %p)\n", __func__, pg, pve, va, ptep)); + + /* we should only be looking if we're not PVF_NC */ + KASSERT((pg->mdpage.pvh_attrs & PVF_NC) == 0); + KASSERT(mutex_owned(&pg->mdpage.pvh_lock)); + + if (ptep) { + attrs = pmap_pvh_attrs(*ptep); + + DPRINTF(PDB_FOLLOW|PDB_ALIAS, + ("%s: va 0x%08lx attrs 0x%08x (new)\n", __func__, va, + attrs)); + } else { + attrs = 0; - /* check for non-equ aliased mappings */ - for (pve = md->pvh_list; pve; pve = pve->pv_next) { - vaddr_t pva = pve->pv_va & PV_VAMASK; - - pte |= pmap_vp_find(pve->pv_pmap, pva); - if ((va & HPPA_PGAOFF) != (pva & HPPA_PGAOFF) && - (pte & PTE_PROT(TLB_WRITE))) { + DPRINTF(PDB_FOLLOW|PDB_ALIAS, + ("%s: va 0x%08lx (removed)\n", __func__, va)); + } + + /* + * Add in flags for existing mappings and check if mapping we're + * adding/removing is an non-equiv aliases of the other mappings. + */ + for (tpve = pve; tpve; tpve = tpve->pv_next) { + pt_entry_t pte; + vaddr_t tva = tpve->pv_va & PV_VAMASK; + + /* XXX LOCK */ + pte = pmap_vp_find(tpve->pv_pmap, tva); + attrs |= pmap_pvh_attrs(pte); + + if (((va ^ tva) & HPPA_PGAOFF) != 0) + nonequiv = true; + + DPRINTF(PDB_FOLLOW|PDB_ALIAS, + ("%s: va 0x%08x:0x%08lx attrs 0x%08x %s\n", __func__, + tpve->pv_pmap->pm_space, tpve->pv_va & PV_VAMASK, + pmap_pvh_attrs(pte), nonequiv ? "alias" : "")); + } + + if (!nonequiv) { + /* + * Inherit uncacheable attribute if set as it means we already + * have non-equiv aliases. + */ + if (ptep && (attrs & PVF_UNCACHEABLE) != 0) + *ptep |= PTE_PROT(TLB_UNCACHEABLE); + + /* No more to be done. */ + return; + } + + if (ptep) { + if ((attrs & (PVF_WRITE|PVF_MOD)) != 0) { + /* + * We have non-equiv aliases and the new/some + * mapping(s) is/are writable (or modified). We must + * mark all mappings as uncacheable (if they're not + * already marked as such). + */ + pg->mdpage.pvh_aliases++; + + if ((attrs & PVF_UNCACHEABLE) == 0) + __changebit(pg, PVF_UNCACHEABLE, 0); + + *ptep |= PTE_PROT(TLB_UNCACHEABLE); DPRINTF(PDB_FOLLOW|PDB_ALIAS, - ("%s: aliased writable mapping 0x%x:0x%lx\n", - __func__, pve->pv_pmap->pm_space, pve->pv_va)); - ret++; + ("%s: page marked uncacheable\n", __func__)); } - } + } else { + if ((attrs & PVF_UNCACHEABLE) != 0) { + /* + * We've removed a non-equiv aliases. We can now mark + * it cacheable if all non-equiv aliases are gone. + */ - return (ret); + pg->mdpage.pvh_aliases--; + if (pg->mdpage.pvh_aliases == 0) { + __changebit(pg, 0, PVF_UNCACHEABLE); + + DPRINTF(PDB_FOLLOW|PDB_ALIAS, + ("%s: page re-marked cacheable\n", + __func__)); + } + } + } } /* @@ -569,29 +651,26 @@ static inline void pmap_pv_enter(struct vm_page *pg, struct pv_entry *pve, pmap_t pm, vaddr_t va, struct vm_page *pdep, u_int flags) { - struct vm_page_md * const md = VM_PAGE_TO_MD(pg); - DPRINTF(PDB_FOLLOW|PDB_PV, ("%s(%p, %p, %p, 0x%lx, %p, 0x%x)\n", __func__, pg, pve, pm, va, pdep, flags)); - KASSERT(pm == pmap_kernel() || uvm_page_locked_p(pg)); + KASSERT(mutex_owned(&pg->mdpage.pvh_lock)); pve->pv_pmap = pm; pve->pv_va = va | flags; pve->pv_ptp = pdep; - pve->pv_next = md->pvh_list; - md->pvh_list = pve; + pve->pv_next = pg->mdpage.pvh_list; + pg->mdpage.pvh_list = pve; } static inline struct pv_entry * pmap_pv_remove(struct vm_page *pg, pmap_t pmap, vaddr_t va) { - struct vm_page_md * const md = VM_PAGE_TO_MD(pg); struct pv_entry **pve, *pv; - KASSERT(pmap == pmap_kernel() || uvm_page_locked_p(pg)); + KASSERT(mutex_owned(&pg->mdpage.pvh_lock)); - for (pv = *(pve = &md->pvh_list); + for (pv = *(pve = &pg->mdpage.pvh_list); pv; pv = *(pve = &(*pve)->pv_next)) if (pv->pv_pmap == pmap && (pv->pv_va & PV_VAMASK) == va) { *pve = pv->pv_next; @@ -679,10 +758,7 @@ pmap_bootstrap(vaddr_t vstart) kpm = pmap_kernel(); memset(kpm, 0, sizeof(*kpm)); - mutex_init(&kpm->pm_obj_lock, MUTEX_DEFAULT, IPL_NONE); - uvm_obj_init(&kpm->pm_obj, NULL, false, 1); - uvm_obj_setlock(&kpm->pm_obj, &kpm->pm_obj_lock); - + UVM_OBJ_INIT(&kpm->pm_obj, NULL, 1); kpm->pm_space = HPPA_SID_KERNEL; kpm->pm_pid = HPPA_PID_KERNEL; kpm->pm_pdir_pg = NULL; @@ -1042,9 +1118,7 @@ pmap_create(void) DPRINTF(PDB_FOLLOW|PDB_PMAP, ("%s: pmap = %p\n", __func__, pmap)); - mutex_init(&pmap->pm_obj_lock, MUTEX_DEFAULT, IPL_NONE); - uvm_obj_init(&pmap->pm_obj, NULL, false, 1); - uvm_obj_setlock(&pmap->pm_obj, &pmap->pm_obj_lock); + UVM_OBJ_INIT(&pmap->pm_obj, NULL, 1); mutex_enter(&pmaps_lock); @@ -1093,9 +1167,9 @@ pmap_destroy(pmap_t pmap) DPRINTF(PDB_FOLLOW|PDB_PMAP, ("%s(%p)\n", __func__, pmap)); - mutex_enter(pmap->pm_lock); + mutex_enter(&pmap->pm_lock); refs = --pmap->pm_obj.uo_refs; - mutex_exit(pmap->pm_lock); + mutex_exit(&pmap->pm_lock); if (refs > 0) return; @@ -1120,8 +1194,7 @@ pmap_destroy(pmap_t pmap) continue; sheep = PHYS_TO_VM_PAGE(PTE_PAGE(*pde)); - struct vm_page_md * const md = VM_PAGE_TO_MD(sheep); - for (haggis = md->pvh_list; haggis != NULL; ) + for (haggis = sheep->mdpage.pvh_list; haggis != NULL; ) if (haggis->pv_pmap == pmap) { DPRINTF(PDB_FOLLOW, (" 0x%lx", @@ -1135,7 +1208,7 @@ pmap_destroy(pmap_t pmap) * exploit the sacred knowledge of * lambeous ozzmosis */ - haggis = md->pvh_list; + haggis = sheep->mdpage.pvh_list; } else haggis = haggis->pv_next; } @@ -1143,12 +1216,11 @@ pmap_destroy(pmap_t pmap) } #endif pmap_sdir_set(pmap->pm_space, 0); - mutex_enter(pmap->pm_lock); + mutex_enter(&pmap->pm_lock); pmap_pagefree(pmap->pm_pdir_pg); - mutex_exit(pmap->pm_lock); - - uvm_obj_destroy(&pmap->pm_obj, false); - mutex_destroy(&pmap->pm_obj_lock); + mutex_exit(&pmap->pm_lock); + mutex_destroy(&pmap->pm_lock); + pmap->pm_pdir_pg = NULL; pool_put(&pmap_pool, pmap); } @@ -1161,9 +1233,9 @@ pmap_reference(pmap_t pmap) DPRINTF(PDB_FOLLOW|PDB_PMAP, ("%s(%p)\n", __func__, pmap)); - mutex_enter(pmap->pm_lock); + mutex_enter(&pmap->pm_lock); pmap->pm_obj.uo_refs++; - mutex_exit(pmap->pm_lock); + mutex_exit(&pmap->pm_lock); } /* @@ -1219,10 +1291,10 @@ pmap_enter(pmap_t pmap, vaddr_t va, padd } pg = PHYS_TO_VM_PAGE(PTE_PAGE(pte)); + mutex_enter(&pg->mdpage.pvh_lock); pve = pmap_pv_remove(pg, pmap, va); - - struct vm_page_md * const md = VM_PAGE_TO_MD(pg); - md->pvh_attrs |= pmap_pvh_attrs(pte); + pg->mdpage.pvh_attrs |= pmap_pvh_attrs(pte); + mutex_exit(&pg->mdpage.pvh_lock); } else { DPRINTF(PDB_ENTER, ("%s: new mapping 0x%lx -> 0x%lx\n", __func__, va, pa)); @@ -1236,18 +1308,20 @@ pmap_enter(pmap_t pmap, vaddr_t va, padd } if (pmap_initialized && (pg = PHYS_TO_VM_PAGE(pa))) { + mutex_enter(&pg->mdpage.pvh_lock); + if (!pve && !(pve = pmap_pv_alloc())) { if (flags & PMAP_CANFAIL) { + mutex_exit(&pg->mdpage.pvh_lock); PMAP_UNLOCK(pmap); return (ENOMEM); } panic("%s: no pv entries available", __func__); } - pte |= PTE_PROT(pmap_prot(pmap, prot)); - if (pmap_check_alias(pg, va, pte)) - pmap_page_remove(pg); pmap_pv_enter(pg, pve, pmap, va, ptp, 0); + pmap_check_alias(pg, pve, va, &pte); + mutex_exit(&pg->mdpage.pvh_lock); } else if (pve) { pmap_pv_free(pve); } @@ -1289,13 +1363,15 @@ pmap_remove(pmap_t pmap, vaddr_t sva, va PMAP_LOCK(pmap); - for (batch = 0; sva < eva; sva += PAGE_SIZE) { - pdemask = sva & PDE_MASK; - if (!(pde = pmap_pde_get(pmap->pm_pdir, sva))) { - sva = pdemask + PDE_SIZE - PAGE_SIZE; - continue; + for (batch = 0, pdemask = 1; sva < eva; sva += PAGE_SIZE) { + if (pdemask != (sva & PDE_MASK)) { + pdemask = sva & PDE_MASK; + if (!(pde = pmap_pde_get(pmap->pm_pdir, sva))) { + sva = pdemask + PDE_SIZE - PAGE_SIZE; + continue; + } + batch = pdemask == sva && sva + PDE_SIZE <= eva; } - batch = pdemask == sva && sva + PDE_SIZE <= eva; if ((pte = pmap_pte_get(pde, sva))) { @@ -1314,11 +1390,17 @@ pmap_remove(pmap_t pmap, vaddr_t sva, va if (pmap_initialized && (pg = PHYS_TO_VM_PAGE(PTE_PAGE(pte)))) { - struct vm_page_md * const md = VM_PAGE_TO_MD(pg); + mutex_enter(&pg->mdpage.pvh_lock); pve = pmap_pv_remove(pg, pmap, sva); - md->pvh_attrs |= pmap_pvh_attrs(pte); + + pmap_check_alias(pg, pg->mdpage.pvh_list, + sva, NULL); + + pg->mdpage.pvh_attrs |= pmap_pvh_attrs(pte); + + mutex_exit(&pg->mdpage.pvh_lock); if (pve != NULL) pmap_pv_free(pve); @@ -1368,8 +1450,9 @@ pmap_write_protect(pmap_t pmap, vaddr_t continue; pg = PHYS_TO_VM_PAGE(PTE_PAGE(pte)); - struct vm_page_md * const md = VM_PAGE_TO_MD(pg); - md->pvh_attrs |= pmap_pvh_attrs(pte); + mutex_enter(&pg->mdpage.pvh_lock); + pg->mdpage.pvh_attrs |= pmap_pvh_attrs(pte); + mutex_exit(&pg->mdpage.pvh_lock); pmap_pte_flush(pmap, sva, pte); pte &= ~PTE_PROT(TLB_AR_MASK); @@ -1384,16 +1467,16 @@ pmap_write_protect(pmap_t pmap, vaddr_t void pmap_page_remove(struct vm_page *pg) { - struct vm_page_md * const md = VM_PAGE_TO_MD(pg); struct pv_entry *pve, *npve, **pvp; DPRINTF(PDB_FOLLOW|PDB_PV, ("%s(%p)\n", __func__, pg)); - if (md->pvh_list == NULL) + if (pg->mdpage.pvh_list == NULL) return; - pvp = &md->pvh_list; - for (pve = md->pvh_list; pve; pve = npve) { + mutex_enter(&pg->mdpage.pvh_lock); + pvp = &pg->mdpage.pvh_list; + for (pve = pg->mdpage.pvh_list; pve; pve = npve) { pmap_t pmap = pve->pv_pmap; vaddr_t va = pve->pv_va & PV_VAMASK; volatile pt_entry_t *pde; @@ -1412,21 +1495,22 @@ pmap_page_remove(struct vm_page *pg) if (pve->pv_va & PV_KENTER) { *pvp = pve; pvp = &pve->pv_next; - } else - md->pvh_attrs |= pmap_pvh_attrs(pte); + continue; + } + + pg->mdpage.pvh_attrs |= pmap_pvh_attrs(pte); pmap_pte_flush(pmap, va, pte); if (pte & PTE_PROT(TLB_WIRED)) pmap->pm_stats.wired_count--; pmap->pm_stats.resident_count--; - if (!(pve->pv_va & PV_KENTER)) { - pmap_pte_set(pde, va, 0); - pmap_pv_free(pve); - } + pmap_pte_set(pde, va, 0); + pmap_pv_free(pve); PMAP_UNLOCK(pmap); } *pvp = NULL; + mutex_exit(&pg->mdpage.pvh_lock); DPRINTF(PDB_FOLLOW|PDB_PV, ("%s: leaving\n", __func__)); } @@ -1469,21 +1553,36 @@ pmap_unwire(pmap_t pmap, vaddr_t va) bool pmap_changebit(struct vm_page *pg, u_int set, u_int clear) { - struct vm_page_md * const md = VM_PAGE_TO_MD(pg); - struct pv_entry *pve; - int res; + bool rv; DPRINTF(PDB_FOLLOW|PDB_BITS, ("%s(%p, %x, %x)\n", __func__, pg, set, clear)); - KASSERT((set & ~(PVF_REF|PVF_UNCACHEABLE)) == 0); - KASSERT((clear & ~(PVF_MOD|PVF_WRITE|PVF_UNCACHEABLE)) == 0); + mutex_enter(&pg->mdpage.pvh_lock); + rv = __changebit(pg, set, clear); + mutex_exit(&pg->mdpage.pvh_lock); + + return rv; +} + +/* + * Must be called with pg->mdpage.pvh_lock held. + */ +static bool +__changebit(struct vm_page *pg, u_int set, u_int clear) +{ + struct pv_entry *pve; + int res; + + KASSERT(mutex_owned(&pg->mdpage.pvh_lock)); + KASSERT(((set | clear) & + ~(PVF_MOD|PVF_REF|PVF_UNCACHEABLE|PVF_WRITE)) == 0); /* preserve other bits */ - res = md->pvh_attrs & (set | clear); - md->pvh_attrs ^= res; + res = pg->mdpage.pvh_attrs & (set | clear); + pg->mdpage.pvh_attrs ^= res; - for (pve = md->pvh_list; pve; pve = pve->pv_next) { + for (pve = pg->mdpage.pvh_list; pve; pve = pve->pv_next) { pmap_t pmap = pve->pv_pmap; vaddr_t va = pve->pv_va & PV_VAMASK; volatile pt_entry_t *pde; @@ -1503,7 +1602,7 @@ pmap_changebit(struct vm_page *pg, u_int pte |= set; if (!(pve->pv_va & PV_KENTER)) { - md->pvh_attrs |= pmap_pvh_attrs(pte); + pg->mdpage.pvh_attrs |= pmap_pvh_attrs(pte); res |= pmap_pvh_attrs(opte); } @@ -1520,14 +1619,15 @@ pmap_changebit(struct vm_page *pg, u_int bool pmap_testbit(struct vm_page *pg, u_int bit) { - struct vm_page_md * const md = VM_PAGE_TO_MD(pg); struct pv_entry *pve; pt_entry_t pte; int ret; DPRINTF(PDB_FOLLOW|PDB_BITS, ("%s(%p, %x)\n", __func__, pg, bit)); - for (pve = md->pvh_list; !(md->pvh_attrs & bit) && pve; + mutex_enter(&pg->mdpage.pvh_lock); + + for (pve = pg->mdpage.pvh_list; !(pg->mdpage.pvh_attrs & bit) && pve; pve = pve->pv_next) { pmap_t pm = pve->pv_pmap; @@ -1535,9 +1635,10 @@ pmap_testbit(struct vm_page *pg, u_int b if (pve->pv_va & PV_KENTER) continue; - md->pvh_attrs |= pmap_pvh_attrs(pte); + pg->mdpage.pvh_attrs |= pmap_pvh_attrs(pte); } - ret = ((md->pvh_attrs & bit) != 0); + ret = ((pg->mdpage.pvh_attrs & bit) != 0); + mutex_exit(&pg->mdpage.pvh_lock); return ret; } @@ -1596,13 +1697,14 @@ pmap_activate(struct lwp *l) static inline void pmap_flush_page(struct vm_page *pg, bool purge) { - struct vm_page_md * const md = VM_PAGE_TO_MD(pg); struct pv_entry *pve; DPRINTF(PDB_FOLLOW|PDB_CACHE, ("%s(%p, %d)\n", __func__, pg, purge)); + KASSERT(!(pg->mdpage.pvh_attrs & PVF_NC)); + /* purge cache for all possible mappings for the pa */ - for (pve = md->pvh_list; pve; pve = pve->pv_next) { + for (pve = pg->mdpage.pvh_list; pve; pve = pve->pv_next) { vaddr_t va = pve->pv_va & PV_VAMASK; pa_space_t sp = pve->pv_pmap->pm_space; @@ -1612,8 +1714,8 @@ pmap_flush_page(struct vm_page *pg, bool fdcache(sp, va, PAGE_SIZE); #if defined(HP8000_CPU) || defined(HP8200_CPU) || \ defined(HP8500_CPU) || defined(HP8600_CPU) - ficache(sp, va, PAGE_SIZE); pdtlb(sp, va); + ficache(sp, va, PAGE_SIZE); pitlb(sp, va); #endif } @@ -1630,15 +1732,14 @@ pmap_zero_page(paddr_t pa) DPRINTF(PDB_FOLLOW|PDB_PHYS, ("%s(%lx)\n", __func__, pa)); - KASSERT(VM_PAGE_TO_MD(PHYS_TO_VM_PAGE(pa))->pvh_list == NULL); + KASSERT(PHYS_TO_VM_PAGE(pa)->mdpage.pvh_list == NULL); memset((void *)pa, 0, PAGE_SIZE); fdcache(HPPA_SID_KERNEL, pa, PAGE_SIZE); - #if defined(HP8000_CPU) || defined(HP8200_CPU) || \ defined(HP8500_CPU) || defined(HP8600_CPU) - ficache(HPPA_SID_KERNEL, pa, PAGE_SIZE); pdtlb(HPPA_SID_KERNEL, pa); + ficache(HPPA_SID_KERNEL, pa, PAGE_SIZE); pitlb(HPPA_SID_KERNEL, pa); #endif } @@ -1655,7 +1756,7 @@ pmap_copy_page(paddr_t spa, paddr_t dpa) DPRINTF(PDB_FOLLOW|PDB_PHYS, ("%s(%lx, %lx)\n", __func__, spa, dpa)); - KASSERT(VM_PAGE_TO_MD(PHYS_TO_VM_PAGE(dpa))->pvh_list == NULL); + KASSERT(PHYS_TO_VM_PAGE(dpa)->mdpage.pvh_list == NULL); pmap_flush_page(srcpg, false); @@ -1665,10 +1766,10 @@ pmap_copy_page(paddr_t spa, paddr_t dpa) fdcache(HPPA_SID_KERNEL, dpa, PAGE_SIZE); #if defined(HP8000_CPU) || defined(HP8200_CPU) || \ defined(HP8500_CPU) || defined(HP8600_CPU) - ficache(HPPA_SID_KERNEL, spa, PAGE_SIZE); - ficache(HPPA_SID_KERNEL, dpa, PAGE_SIZE); pdtlb(HPPA_SID_KERNEL, spa); pdtlb(HPPA_SID_KERNEL, dpa); + ficache(HPPA_SID_KERNEL, spa, PAGE_SIZE); + ficache(HPPA_SID_KERNEL, dpa, PAGE_SIZE); pitlb(HPPA_SID_KERNEL, spa); pitlb(HPPA_SID_KERNEL, dpa); #endif @@ -1713,22 +1814,30 @@ pmap_kenter_pa(vaddr_t va, paddr_t pa, v pg = PHYS_TO_VM_PAGE(PTE_PAGE(pte)); if (pg != NULL) { + KASSERT(pa < HPPA_IOBEGIN); - struct pv_entry *pve; + mutex_enter(&pg->mdpage.pvh_lock); + + if (flags & PMAP_NOCACHE) + pg->mdpage.pvh_attrs |= PVF_NC; + else { + struct pv_entry *pve; - pve = pmap_pv_alloc(); - if (!pve) - panic("%s: no pv entries available", - __func__); - DPRINTF(PDB_FOLLOW|PDB_ENTER, - ("%s(%lx, %lx, %x) TLB_KENTER\n", __func__, - va, pa, pte)); + pve = pmap_pv_alloc(); + if (!pve) + panic("%s: no pv entries available", + __func__); + DPRINTF(PDB_FOLLOW|PDB_ENTER, + ("%s(%lx, %lx, %x) TLB_KENTER\n", __func__, + va, pa, pte)); + + pmap_pv_enter(pg, pve, pmap_kernel(), va, NULL, + PV_KENTER); + pmap_check_alias(pg, pve, va, &pte); + } - if (pmap_check_alias(pg, va, pte)) - pmap_page_remove(pg); - pmap_pv_enter(pg, pve, pmap_kernel(), va, NULL, - PV_KENTER); + mutex_exit(&pg->mdpage.pvh_lock); } } pmap_pte_set(pde, va, pte); @@ -1797,8 +1906,18 @@ pmap_kremove(vaddr_t va, vsize_t size) pmap_pte_flush(pmap, va, pte); pmap_pte_set(pde, va, 0); if (pmap_initialized && (pg = PHYS_TO_VM_PAGE(PTE_PAGE(pte)))) { + + mutex_enter(&pg->mdpage.pvh_lock); + pve = pmap_pv_remove(pg, pmap, va); + if ((pg->mdpage.pvh_attrs & PVF_NC) == 0) + pmap_check_alias(pg, pg->mdpage.pvh_list, va, + NULL); + + pg->mdpage.pvh_attrs &= ~PVF_NC; + + mutex_exit(&pg->mdpage.pvh_lock); if (pve != NULL) pmap_pv_free(pve); }