Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files. =================================================================== RCS file: /ftp/cvs/cvsroot/src/sys/arch/hppa/hppa/pmap.c,v rcsdiff: /ftp/cvs/cvsroot/src/sys/arch/hppa/hppa/pmap.c,v: warning: Unknown phrases like `commitid ...;' are present. retrieving revision 1.63.2.10 retrieving revision 1.64 diff -u -p -r1.63.2.10 -r1.64 --- src/sys/arch/hppa/hppa/pmap.c 2010/11/06 08:08:17 1.63.2.10 +++ src/sys/arch/hppa/hppa/pmap.c 2010/02/16 16:56:30 1.64 @@ -1,4 +1,4 @@ -/* $NetBSD: pmap.c,v 1.63.2.10 2010/11/06 08:08:17 uebayasi Exp $ */ +/* $NetBSD: pmap.c,v 1.64 2010/02/16 16:56:30 skrll Exp $ */ /*- * Copyright (c) 2001, 2002 The NetBSD Foundation, Inc. @@ -65,11 +65,7 @@ */ #include -__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.63.2.10 2010/11/06 08:08:17 uebayasi Exp $"); - -#include "opt_xip.h" - -#include "opt_cputype.h" +__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.64 2010/02/16 16:56:30 skrll Exp $"); #include #include @@ -93,8 +89,6 @@ __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.6 #include #endif -#define VM_PAGE_TO_MD(pg) (&(pg)->mdpage) - #ifdef PMAPDEBUG #define static /**/ @@ -213,8 +207,6 @@ static inline void pmap_flush_page(struc void pmap_copy_page(paddr_t, paddr_t); -static void pmap_page_physload(paddr_t, paddr_t); - #ifdef USE_HPT static inline struct hpt_entry *pmap_hash(pmap_t, vaddr_t); static inline uint32_t pmap_vtag(pmap_t, vaddr_t); @@ -229,12 +221,12 @@ void pmap_dump_table(pa_space_t, vaddr_t void pmap_dump_pv(paddr_t); #endif -void pmap_page_remove_locked(struct vm_page *); -int pmap_check_alias(struct vm_page *, vaddr_t, pt_entry_t); +void pmap_check_alias(struct vm_page *, struct pv_entry *, vaddr_t, + pt_entry_t *); +static bool __changebit(struct vm_page *, u_int, u_int); -/* un-invert PVF_REF */ #define pmap_pvh_attrs(a) \ - (((a) & (PVF_MOD|PVF_REF)) ^ PVF_REF) + (((a) & (PVF_MOD|PVF_REF|PVF_WRITE|PVF_UNCACHEABLE)) ^ PVF_REF) #define PMAP_LOCK(pm) \ do { \ @@ -263,14 +255,7 @@ pmap_pagealloc(struct uvm_object *obj, v void pmap_pagefree(struct vm_page *pg) { - paddr_t pa = VM_PAGE_TO_PHYS(pg); - pdcache(HPPA_SID_KERNEL, pa, PAGE_SIZE); - -#if defined(HP8000_CPU) || defined(HP8200_CPU) || \ - defined(HP8500_CPU) || defined(HP8600_CPU) - pdtlb(HPPA_SID_KERNEL, pa); - pitlb(HPPA_SID_KERNEL, pa); -#endif + fdcache(HPPA_SID_KERNEL, VM_PAGE_TO_PHYS(pg), PAGE_SIZE); uvm_pagefree(pg); } @@ -428,11 +413,11 @@ void pmap_pte_flush(pmap_t pmap, vaddr_t va, pt_entry_t pte) { - fdcache(pmap->pm_space, va, PAGE_SIZE); if (pte & PTE_PROT(TLB_EXECUTE)) { ficache(pmap->pm_space, va, PAGE_SIZE); pitlb(pmap->pm_space, va); } + fdcache(pmap->pm_space, va, PAGE_SIZE); pdtlb(pmap->pm_space, va); #ifdef USE_HPT if (pmap_hpt) { @@ -483,14 +468,14 @@ pmap_dump_table(pa_space_t space, vaddr_ va = pdemask + PDE_SIZE; continue; } - db_printf("%x:%8p:\n", space, pde); + printf("%x:%8p:\n", space, pde); } pte = pmap_pte_get(pde, va); if (pte) { snprintb(buf, sizeof(buf), TLB_BITS, TLB_PROT(pte & PAGE_MASK)); - db_printf("0x%08lx-0x%08x:%s\n", va, pte & ~PAGE_MASK, + printf("0x%08lx-0x%08x:%s\n", va, pte & ~PAGE_MASK, buf); } va += PAGE_SIZE; @@ -501,44 +486,125 @@ void pmap_dump_pv(paddr_t pa) { struct vm_page *pg; - struct vm_page_md *md; struct pv_entry *pve; pg = PHYS_TO_VM_PAGE(pa); - md = VM_PAGE_TO_MD(pg); - mutex_enter(&md->pvh_lock); - db_printf("pg %p attr 0x%08x aliases %d\n", pg, md->pvh_attrs, - md->pvh_aliases); - for (pve = md->pvh_list; pve; pve = pve->pv_next) - db_printf("%x:%lx\n", pve->pv_pmap->pm_space, + mutex_enter(&pg->mdpage.pvh_lock); + printf("pg %p attr 0x%08x aliases %d\n", pg, pg->mdpage.pvh_attrs, + pg->mdpage.pvh_aliases); + for (pve = pg->mdpage.pvh_list; pve; pve = pve->pv_next) + printf("%x:%lx\n", pve->pv_pmap->pm_space, pve->pv_va & PV_VAMASK); - mutex_exit(&md->pvh_lock); + mutex_exit(&pg->mdpage.pvh_lock); } #endif -int -pmap_check_alias(struct vm_page *pg, vaddr_t va, pt_entry_t pte) +/* + * Check for non-equiv aliases for this page and the mapping being added or + * removed. If, when adding, we find a new non-equiv alias then mark all PTEs + * as uncacheable including the one we're checking. If, when removing, there + * are no non-equiv aliases left then we mark PTEs as cacheable. + * + * - Shouldn't be called for pages that have been marked uncacheable by + * pmap_kenter_pa. + * - Must be called with pg->mdpage.pvh_lock held. + */ +void +pmap_check_alias(struct vm_page *pg, struct pv_entry *pve, vaddr_t va, + pt_entry_t *ptep) { - struct vm_page_md * const md = VM_PAGE_TO_MD(pg); - struct pv_entry *pve; - int ret = 0; + bool nonequiv = false; + struct pv_entry *tpve; + u_int attrs; + + DPRINTF(PDB_FOLLOW|PDB_ALIAS, + ("%s(%p, %p, 0x%lx, %p)\n", __func__, pg, pve, va, ptep)); + + /* we should only be looking if we're not PVF_NC */ + KASSERT((pg->mdpage.pvh_attrs & PVF_NC) == 0); + KASSERT(mutex_owned(&pg->mdpage.pvh_lock)); + + if (ptep) { + attrs = pmap_pvh_attrs(*ptep); + + DPRINTF(PDB_FOLLOW|PDB_ALIAS, + ("%s: va 0x%08lx attrs 0x%08x (new)\n", __func__, va, + attrs)); + } else { + attrs = 0; + + DPRINTF(PDB_FOLLOW|PDB_ALIAS, + ("%s: va 0x%08lx (removed)\n", __func__, va)); + } + + /* + * Add in flags for existing mappings and check if mapping we're + * adding/removing is an non-equiv aliases of the other mappings. + */ + for (tpve = pve; tpve; tpve = tpve->pv_next) { + pt_entry_t pte; + vaddr_t tva = tpve->pv_va & PV_VAMASK; - /* check for non-equ aliased mappings */ - for (pve = md->pvh_list; pve; pve = pve->pv_next) { - vaddr_t pva = pve->pv_va & PV_VAMASK; - - pte |= pmap_vp_find(pve->pv_pmap, pva); - if ((va & HPPA_PGAOFF) != (pva & HPPA_PGAOFF) && - (pte & PTE_PROT(TLB_WRITE))) { + /* XXX LOCK */ + pte = pmap_vp_find(tpve->pv_pmap, tva); + attrs |= pmap_pvh_attrs(pte); + + if (((va ^ tva) & HPPA_PGAOFF) != 0) + nonequiv = true; + + DPRINTF(PDB_FOLLOW|PDB_ALIAS, + ("%s: va 0x%08x:0x%08lx attrs 0x%08x %s\n", __func__, + tpve->pv_pmap->pm_space, tpve->pv_va & PV_VAMASK, + pmap_pvh_attrs(pte), nonequiv ? "alias" : "")); + } + + if (!nonequiv) { + /* + * Inherit uncacheable attribute if set as it means we already + * have non-equiv aliases. + */ + if (ptep && (attrs & PVF_UNCACHEABLE) != 0) + *ptep |= PTE_PROT(TLB_UNCACHEABLE); + + /* No more to be done. */ + return; + } + + if (ptep) { + if ((attrs & (PVF_WRITE|PVF_MOD)) != 0) { + /* + * We have non-equiv aliases and the new/some + * mapping(s) is/are writable (or modified). We must + * mark all mappings as uncacheable (if they're not + * already marked as such). + */ + pg->mdpage.pvh_aliases++; + + if ((attrs & PVF_UNCACHEABLE) == 0) + __changebit(pg, PVF_UNCACHEABLE, 0); + + *ptep |= PTE_PROT(TLB_UNCACHEABLE); DPRINTF(PDB_FOLLOW|PDB_ALIAS, - ("%s: aliased writable mapping 0x%x:0x%lx\n", - __func__, pve->pv_pmap->pm_space, pve->pv_va)); - ret++; + ("%s: page marked uncacheable\n", __func__)); } - } + } else { + if ((attrs & PVF_UNCACHEABLE) != 0) { + /* + * We've removed a non-equiv aliases. We can now mark + * it cacheable if all non-equiv aliases are gone. + */ - return (ret); + pg->mdpage.pvh_aliases--; + if (pg->mdpage.pvh_aliases == 0) { + __changebit(pg, 0, PVF_UNCACHEABLE); + + DPRINTF(PDB_FOLLOW|PDB_ALIAS, + ("%s: page re-marked cacheable\n", + __func__)); + } + } + } } /* @@ -573,29 +639,26 @@ static inline void pmap_pv_enter(struct vm_page *pg, struct pv_entry *pve, pmap_t pm, vaddr_t va, struct vm_page *pdep, u_int flags) { - struct vm_page_md * const md = VM_PAGE_TO_MD(pg); - DPRINTF(PDB_FOLLOW|PDB_PV, ("%s(%p, %p, %p, 0x%lx, %p, 0x%x)\n", __func__, pg, pve, pm, va, pdep, flags)); - KASSERT(mutex_owned(&md->pvh_lock)); + KASSERT(mutex_owned(&pg->mdpage.pvh_lock)); pve->pv_pmap = pm; pve->pv_va = va | flags; pve->pv_ptp = pdep; - pve->pv_next = md->pvh_list; - md->pvh_list = pve; + pve->pv_next = pg->mdpage.pvh_list; + pg->mdpage.pvh_list = pve; } static inline struct pv_entry * pmap_pv_remove(struct vm_page *pg, pmap_t pmap, vaddr_t va) { - struct vm_page_md * const md = VM_PAGE_TO_MD(pg); struct pv_entry **pve, *pv; - KASSERT(mutex_owned(&md->pvh_lock)); + KASSERT(mutex_owned(&pg->mdpage.pvh_lock)); - for (pv = *(pve = &md->pvh_list); + for (pv = *(pve = &pg->mdpage.pvh_list); pv; pv = *(pve = &(*pve)->pv_next)) if (pv->pv_pmap == pmap && (pv->pv_va & PV_VAMASK) == va) { *pve = pv->pv_next; @@ -942,7 +1005,7 @@ pmap_bootstrap(vaddr_t vstart) availphysmem = 0; - pmap_page_physload(resvmem, atop(ksrx)); + pmap_page_physload(resvmem, atop(ksro)); pmap_page_physload(atop(kero), atop(ksrw)); pmap_page_physload(atop(kerw), physmem); @@ -1119,8 +1182,7 @@ pmap_destroy(pmap_t pmap) continue; sheep = PHYS_TO_VM_PAGE(PTE_PAGE(*pde)); - struct vm_page_md * const md = VM_PAGE_TO_MD(sheep); - for (haggis = md->pvh_list; haggis != NULL; ) + for (haggis = sheep->mdpage.pvh_list; haggis != NULL; ) if (haggis->pv_pmap == pmap) { DPRINTF(PDB_FOLLOW, (" 0x%lx", @@ -1134,7 +1196,7 @@ pmap_destroy(pmap_t pmap) * exploit the sacred knowledge of * lambeous ozzmosis */ - haggis = md->pvh_list; + haggis = sheep->mdpage.pvh_list; } else haggis = haggis->pv_next; } @@ -1217,11 +1279,10 @@ pmap_enter(pmap_t pmap, vaddr_t va, padd } pg = PHYS_TO_VM_PAGE(PTE_PAGE(pte)); - struct vm_page_md * const md = VM_PAGE_TO_MD(pg); - mutex_enter(&md->pvh_lock); + mutex_enter(&pg->mdpage.pvh_lock); pve = pmap_pv_remove(pg, pmap, va); - md->pvh_attrs |= pmap_pvh_attrs(pte); - mutex_exit(&md->pvh_lock); + pg->mdpage.pvh_attrs |= pmap_pvh_attrs(pte); + mutex_exit(&pg->mdpage.pvh_lock); } else { DPRINTF(PDB_ENTER, ("%s: new mapping 0x%lx -> 0x%lx\n", __func__, va, pa)); @@ -1234,24 +1295,21 @@ pmap_enter(pmap_t pmap, vaddr_t va, padd ptp->wire_count++; } - if (pmap_initialized && ((flags & PMAP_NOCACHE) == 0) && - (pg = PHYS_TO_VM_PAGE(pa))) { - struct vm_page_md * const md = VM_PAGE_TO_MD(pg); + if (pmap_initialized && (pg = PHYS_TO_VM_PAGE(pa))) { + mutex_enter(&pg->mdpage.pvh_lock); if (!pve && !(pve = pmap_pv_alloc())) { if (flags & PMAP_CANFAIL) { - mutex_exit(&md->pvh_lock); + mutex_exit(&pg->mdpage.pvh_lock); PMAP_UNLOCK(pmap); return (ENOMEM); } panic("%s: no pv entries available", __func__); } - pte |= PTE_PROT(pmap_prot(pmap, prot)); - mutex_enter(&md->pvh_lock); - if (pmap_check_alias(pg, va, pte)) - pmap_page_remove_locked(pg); pmap_pv_enter(pg, pve, pmap, va, ptp, 0); - mutex_exit(&md->pvh_lock); + pmap_check_alias(pg, pve, va, &pte); + + mutex_exit(&pg->mdpage.pvh_lock); } else if (pve) { pmap_pv_free(pve); } @@ -1293,13 +1351,15 @@ pmap_remove(pmap_t pmap, vaddr_t sva, va PMAP_LOCK(pmap); - for (batch = 0; sva < eva; sva += PAGE_SIZE) { - pdemask = sva & PDE_MASK; - if (!(pde = pmap_pde_get(pmap->pm_pdir, sva))) { - sva = pdemask + PDE_SIZE - PAGE_SIZE; - continue; + for (batch = 0, pdemask = 1; sva < eva; sva += PAGE_SIZE) { + if (pdemask != (sva & PDE_MASK)) { + pdemask = sva & PDE_MASK; + if (!(pde = pmap_pde_get(pmap->pm_pdir, sva))) { + sva = pdemask + PDE_SIZE - PAGE_SIZE; + continue; + } + batch = pdemask == sva && sva + PDE_SIZE <= eva; } - batch = pdemask == sva && sva + PDE_SIZE <= eva; if ((pte = pmap_pte_get(pde, sva))) { @@ -1318,14 +1378,17 @@ pmap_remove(pmap_t pmap, vaddr_t sva, va if (pmap_initialized && (pg = PHYS_TO_VM_PAGE(PTE_PAGE(pte)))) { - struct vm_page_md * const md = VM_PAGE_TO_MD(pg); - mutex_enter(&md->pvh_lock); + mutex_enter(&pg->mdpage.pvh_lock); pve = pmap_pv_remove(pg, pmap, sva); - md->pvh_attrs |= pmap_pvh_attrs(pte); - mutex_exit(&md->pvh_lock); + pmap_check_alias(pg, pg->mdpage.pvh_list, + sva, NULL); + + pg->mdpage.pvh_attrs |= pmap_pvh_attrs(pte); + + mutex_exit(&pg->mdpage.pvh_lock); if (pve != NULL) pmap_pv_free(pve); @@ -1375,10 +1438,9 @@ pmap_write_protect(pmap_t pmap, vaddr_t continue; pg = PHYS_TO_VM_PAGE(PTE_PAGE(pte)); - struct vm_page_md * const md = VM_PAGE_TO_MD(pg); - mutex_enter(&md->pvh_lock); - md->pvh_attrs |= pmap_pvh_attrs(pte); - mutex_exit(&md->pvh_lock); + mutex_enter(&pg->mdpage.pvh_lock); + pg->mdpage.pvh_attrs |= pmap_pvh_attrs(pte); + mutex_exit(&pg->mdpage.pvh_lock); pmap_pte_flush(pmap, sva, pte); pte &= ~PTE_PROT(TLB_AR_MASK); @@ -1393,26 +1455,16 @@ pmap_write_protect(pmap_t pmap, vaddr_t void pmap_page_remove(struct vm_page *pg) { - struct vm_page_md * const md = VM_PAGE_TO_MD(pg); - - mutex_enter(&md->pvh_lock); - pmap_page_remove_locked(pg); - mutex_exit(&md->pvh_lock); -} - -void -pmap_page_remove_locked(struct vm_page *pg) -{ - struct vm_page_md * const md = VM_PAGE_TO_MD(pg); struct pv_entry *pve, *npve, **pvp; DPRINTF(PDB_FOLLOW|PDB_PV, ("%s(%p)\n", __func__, pg)); - if (md->pvh_list == NULL) + if (pg->mdpage.pvh_list == NULL) return; - pvp = &md->pvh_list; - for (pve = md->pvh_list; pve; pve = npve) { + mutex_enter(&pg->mdpage.pvh_lock); + pvp = &pg->mdpage.pvh_list; + for (pve = pg->mdpage.pvh_list; pve; pve = npve) { pmap_t pmap = pve->pv_pmap; vaddr_t va = pve->pv_va & PV_VAMASK; volatile pt_entry_t *pde; @@ -1431,21 +1483,22 @@ pmap_page_remove_locked(struct vm_page * if (pve->pv_va & PV_KENTER) { *pvp = pve; pvp = &pve->pv_next; - } else - md->pvh_attrs |= pmap_pvh_attrs(pte); + continue; + } + + pg->mdpage.pvh_attrs |= pmap_pvh_attrs(pte); pmap_pte_flush(pmap, va, pte); if (pte & PTE_PROT(TLB_WIRED)) pmap->pm_stats.wired_count--; pmap->pm_stats.resident_count--; - if (!(pve->pv_va & PV_KENTER)) { - pmap_pte_set(pde, va, 0); - pmap_pv_free(pve); - } + pmap_pte_set(pde, va, 0); + pmap_pv_free(pve); PMAP_UNLOCK(pmap); } *pvp = NULL; + mutex_exit(&pg->mdpage.pvh_lock); DPRINTF(PDB_FOLLOW|PDB_PV, ("%s: leaving\n", __func__)); } @@ -1488,23 +1541,36 @@ pmap_unwire(pmap_t pmap, vaddr_t va) bool pmap_changebit(struct vm_page *pg, u_int set, u_int clear) { - struct vm_page_md * const md = VM_PAGE_TO_MD(pg); - struct pv_entry *pve; - int res; + bool rv; DPRINTF(PDB_FOLLOW|PDB_BITS, ("%s(%p, %x, %x)\n", __func__, pg, set, clear)); - KASSERT((set & ~(PVF_REF|PVF_UNCACHEABLE)) == 0); - KASSERT((clear & ~(PVF_MOD|PVF_WRITE|PVF_UNCACHEABLE)) == 0); + mutex_enter(&pg->mdpage.pvh_lock); + rv = __changebit(pg, set, clear); + mutex_exit(&pg->mdpage.pvh_lock); + + return rv; +} + +/* + * Must be called with pg->mdpage.pvh_lock held. + */ +static bool +__changebit(struct vm_page *pg, u_int set, u_int clear) +{ + struct pv_entry *pve; + int res; - mutex_enter(&md->pvh_lock); + KASSERT(mutex_owned(&pg->mdpage.pvh_lock)); + KASSERT(((set | clear) & + ~(PVF_MOD|PVF_REF|PVF_UNCACHEABLE|PVF_WRITE)) == 0); /* preserve other bits */ - res = md->pvh_attrs & (set | clear); - md->pvh_attrs ^= res; + res = pg->mdpage.pvh_attrs & (set | clear); + pg->mdpage.pvh_attrs ^= res; - for (pve = md->pvh_list; pve; pve = pve->pv_next) { + for (pve = pg->mdpage.pvh_list; pve; pve = pve->pv_next) { pmap_t pmap = pve->pv_pmap; vaddr_t va = pve->pv_va & PV_VAMASK; volatile pt_entry_t *pde; @@ -1524,7 +1590,7 @@ pmap_changebit(struct vm_page *pg, u_int pte |= set; if (!(pve->pv_va & PV_KENTER)) { - md->pvh_attrs |= pmap_pvh_attrs(pte); + pg->mdpage.pvh_attrs |= pmap_pvh_attrs(pte); res |= pmap_pvh_attrs(opte); } @@ -1534,7 +1600,6 @@ pmap_changebit(struct vm_page *pg, u_int } } } - mutex_exit(&md->pvh_lock); return ((res & (clear | set)) != 0); } @@ -1542,16 +1607,15 @@ pmap_changebit(struct vm_page *pg, u_int bool pmap_testbit(struct vm_page *pg, u_int bit) { - struct vm_page_md * const md = VM_PAGE_TO_MD(pg); struct pv_entry *pve; pt_entry_t pte; int ret; DPRINTF(PDB_FOLLOW|PDB_BITS, ("%s(%p, %x)\n", __func__, pg, bit)); - mutex_enter(&md->pvh_lock); + mutex_enter(&pg->mdpage.pvh_lock); - for (pve = md->pvh_list; !(md->pvh_attrs & bit) && pve; + for (pve = pg->mdpage.pvh_list; !(pg->mdpage.pvh_attrs & bit) && pve; pve = pve->pv_next) { pmap_t pm = pve->pv_pmap; @@ -1559,27 +1623,15 @@ pmap_testbit(struct vm_page *pg, u_int b if (pve->pv_va & PV_KENTER) continue; - md->pvh_attrs |= pmap_pvh_attrs(pte); + pg->mdpage.pvh_attrs |= pmap_pvh_attrs(pte); } - ret = ((md->pvh_attrs & bit) != 0); - mutex_exit(&md->pvh_lock); + ret = ((pg->mdpage.pvh_attrs & bit) != 0); + mutex_exit(&pg->mdpage.pvh_lock); return ret; } /* - * pmap_mmap(addr, off) - * Convert the given kernel virtual address to the page frame - * number (mmap cookie). - */ -paddr_t -pmap_mmap(vaddr_t addr, off_t off) -{ - - return btop(addr + off); -} - -/* * pmap_extract(pmap, va, pap) * fills in the physical address corresponding to the * virtual address specified by pmap and va into the @@ -1621,6 +1673,8 @@ pmap_activate(struct lwp *l) pa_space_t space = pmap->pm_space; struct pcb *pcb = lwp_getpcb(l); + KASSERT(pcb->pcb_uva == uvm_lwp_getuarea(l)); + /* space is cached for the copy{in,out}'s pleasure */ pcb->pcb_space = space; fdcache(HPPA_SID_KERNEL, (vaddr_t)pcb, sizeof(struct pcb)); @@ -1633,27 +1687,23 @@ pmap_activate(struct lwp *l) static inline void pmap_flush_page(struct vm_page *pg, bool purge) { - struct vm_page_md * const md = VM_PAGE_TO_MD(pg); struct pv_entry *pve; DPRINTF(PDB_FOLLOW|PDB_CACHE, ("%s(%p, %d)\n", __func__, pg, purge)); + KASSERT(!(pg->mdpage.pvh_attrs & PVF_NC)); + /* purge cache for all possible mappings for the pa */ - for (pve = md->pvh_list; pve; pve = pve->pv_next) { + mutex_enter(&pg->mdpage.pvh_lock); + for (pve = pg->mdpage.pvh_list; pve; pve = pve->pv_next) { vaddr_t va = pve->pv_va & PV_VAMASK; - pa_space_t sp = pve->pv_pmap->pm_space; if (purge) - pdcache(sp, va, PAGE_SIZE); + pdcache(pve->pv_pmap->pm_space, va, PAGE_SIZE); else - fdcache(sp, va, PAGE_SIZE); -#if defined(HP8000_CPU) || defined(HP8200_CPU) || \ - defined(HP8500_CPU) || defined(HP8600_CPU) - ficache(sp, va, PAGE_SIZE); - pdtlb(sp, va); - pitlb(sp, va); -#endif + fdcache(pve->pv_pmap->pm_space, va, PAGE_SIZE); } + mutex_exit(&pg->mdpage.pvh_lock); } /* @@ -1667,17 +1717,10 @@ pmap_zero_page(paddr_t pa) DPRINTF(PDB_FOLLOW|PDB_PHYS, ("%s(%lx)\n", __func__, pa)); - KASSERT(VM_PAGE_TO_MD(PHYS_TO_VM_PAGE(pa))->pvh_list == NULL); + KASSERT(PHYS_TO_VM_PAGE(pa)->mdpage.pvh_list == NULL); memset((void *)pa, 0, PAGE_SIZE); fdcache(HPPA_SID_KERNEL, pa, PAGE_SIZE); - -#if defined(HP8000_CPU) || defined(HP8200_CPU) || \ - defined(HP8500_CPU) || defined(HP8600_CPU) - ficache(HPPA_SID_KERNEL, pa, PAGE_SIZE); - pdtlb(HPPA_SID_KERNEL, pa); - pitlb(HPPA_SID_KERNEL, pa); -#endif } /* @@ -1692,7 +1735,7 @@ pmap_copy_page(paddr_t spa, paddr_t dpa) DPRINTF(PDB_FOLLOW|PDB_PHYS, ("%s(%lx, %lx)\n", __func__, spa, dpa)); - KASSERT(VM_PAGE_TO_MD(PHYS_TO_VM_PAGE(dpa))->pvh_list == NULL); + KASSERT(PHYS_TO_VM_PAGE(dpa)->mdpage.pvh_list == NULL); pmap_flush_page(srcpg, false); @@ -1700,15 +1743,6 @@ pmap_copy_page(paddr_t spa, paddr_t dpa) pdcache(HPPA_SID_KERNEL, spa, PAGE_SIZE); fdcache(HPPA_SID_KERNEL, dpa, PAGE_SIZE); -#if defined(HP8000_CPU) || defined(HP8200_CPU) || \ - defined(HP8500_CPU) || defined(HP8600_CPU) - ficache(HPPA_SID_KERNEL, spa, PAGE_SIZE); - ficache(HPPA_SID_KERNEL, dpa, PAGE_SIZE); - pdtlb(HPPA_SID_KERNEL, spa); - pdtlb(HPPA_SID_KERNEL, dpa); - pitlb(HPPA_SID_KERNEL, spa); - pitlb(HPPA_SID_KERNEL, dpa); -#endif } void @@ -1750,26 +1784,30 @@ pmap_kenter_pa(vaddr_t va, paddr_t pa, v pg = PHYS_TO_VM_PAGE(PTE_PAGE(pte)); if (pg != NULL) { - struct vm_page_md * const md = VM_PAGE_TO_MD(pg); KASSERT(pa < HPPA_IOBEGIN); - struct pv_entry *pve; + mutex_enter(&pg->mdpage.pvh_lock); + + if (flags & PMAP_NOCACHE) + pg->mdpage.pvh_attrs |= PVF_NC; + else { + struct pv_entry *pve; - pve = pmap_pv_alloc(); - if (!pve) - panic("%s: no pv entries available", - __func__); - DPRINTF(PDB_FOLLOW|PDB_ENTER, - ("%s(%lx, %lx, %x) TLB_KENTER\n", __func__, - va, pa, pte)); + pve = pmap_pv_alloc(); + if (!pve) + panic("%s: no pv entries available", + __func__); + DPRINTF(PDB_FOLLOW|PDB_ENTER, + ("%s(%lx, %lx, %x) TLB_KENTER\n", __func__, + va, pa, pte)); + + pmap_pv_enter(pg, pve, pmap_kernel(), va, NULL, + PV_KENTER); + pmap_check_alias(pg, pve, va, &pte); + } - mutex_enter(&md->pvh_lock); - if (pmap_check_alias(pg, va, pte)) - pmap_page_remove_locked(pg); - pmap_pv_enter(pg, pve, pmap_kernel(), va, NULL, - PV_KENTER); - mutex_exit(&md->pvh_lock); + mutex_exit(&pg->mdpage.pvh_lock); } } pmap_pte_set(pde, va, pte); @@ -1838,13 +1876,18 @@ pmap_kremove(vaddr_t va, vsize_t size) pmap_pte_flush(pmap, va, pte); pmap_pte_set(pde, va, 0); if (pmap_initialized && (pg = PHYS_TO_VM_PAGE(PTE_PAGE(pte)))) { - struct vm_page_md * const md = VM_PAGE_TO_MD(pg); - mutex_enter(&md->pvh_lock); + mutex_enter(&pg->mdpage.pvh_lock); pve = pmap_pv_remove(pg, pmap, va); - mutex_exit(&md->pvh_lock); + if ((pg->mdpage.pvh_attrs & PVF_NC) == 0) + pmap_check_alias(pg, pg->mdpage.pvh_list, va, + NULL); + + pg->mdpage.pvh_attrs &= ~PVF_NC; + + mutex_exit(&pg->mdpage.pvh_lock); if (pve != NULL) pmap_pv_free(pve); }