Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files. =================================================================== RCS file: /ftp/cvs/cvsroot/src/sys/arch/hppa/hppa/pmap.c,v rcsdiff: /ftp/cvs/cvsroot/src/sys/arch/hppa/hppa/pmap.c,v: warning: Unknown phrases like `commitid ...;' are present. retrieving revision 1.63.2.2 retrieving revision 1.63.2.3 diff -u -p -r1.63.2.2 -r1.63.2.3 --- src/sys/arch/hppa/hppa/pmap.c 2010/04/27 07:19:28 1.63.2.2 +++ src/sys/arch/hppa/hppa/pmap.c 2010/04/30 14:39:27 1.63.2.3 @@ -1,4 +1,4 @@ -/* $NetBSD: pmap.c,v 1.63.2.2 2010/04/27 07:19:28 uebayasi Exp $ */ +/* $NetBSD: pmap.c,v 1.63.2.3 2010/04/30 14:39:27 uebayasi Exp $ */ /*- * Copyright (c) 2001, 2002 The NetBSD Foundation, Inc. @@ -65,11 +65,13 @@ */ #include -__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.63.2.2 2010/04/27 07:19:28 uebayasi Exp $"); +__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.63.2.3 2010/04/30 14:39:27 uebayasi Exp $"); #include "opt_device_page.h" #include "opt_xip.h" +#include "opt_cputype.h" + #include #include #include @@ -210,6 +212,8 @@ static inline void pmap_flush_page(struc void pmap_copy_page(paddr_t, paddr_t); +static void pmap_page_physload(paddr_t, paddr_t); + #ifdef USE_HPT static inline struct hpt_entry *pmap_hash(pmap_t, vaddr_t); static inline uint32_t pmap_vtag(pmap_t, vaddr_t); @@ -224,12 +228,12 @@ void pmap_dump_table(pa_space_t, vaddr_t void pmap_dump_pv(paddr_t); #endif -void pmap_check_alias(struct vm_page *, struct pv_entry *, vaddr_t, - pt_entry_t *); -static bool __changebit(struct vm_page *, u_int, u_int); +void pmap_page_remove_locked(struct vm_page *); +int pmap_check_alias(struct vm_page *, vaddr_t, pt_entry_t); +/* un-invert PVF_REF */ #define pmap_pvh_attrs(a) \ - (((a) & (PVF_MOD|PVF_REF|PVF_WRITE|PVF_UNCACHEABLE)) ^ PVF_REF) + (((a) & (PVF_MOD|PVF_REF)) ^ PVF_REF) #define PMAP_LOCK(pm) \ do { \ @@ -258,7 +262,14 @@ pmap_pagealloc(struct uvm_object *obj, v void pmap_pagefree(struct vm_page *pg) { - fdcache(HPPA_SID_KERNEL, VM_PAGE_TO_PHYS(pg), PAGE_SIZE); + paddr_t pa = VM_PAGE_TO_PHYS(pg); + pdcache(HPPA_SID_KERNEL, pa, PAGE_SIZE); + +#if defined(HP8000_CPU) || defined(HP8200_CPU) || \ + defined(HP8500_CPU) || defined(HP8600_CPU) + pdtlb(HPPA_SID_KERNEL, pa); + pitlb(HPPA_SID_KERNEL, pa); +#endif uvm_pagefree(pg); } @@ -416,11 +427,11 @@ void pmap_pte_flush(pmap_t pmap, vaddr_t va, pt_entry_t pte) { + fdcache(pmap->pm_space, va, PAGE_SIZE); if (pte & PTE_PROT(TLB_EXECUTE)) { ficache(pmap->pm_space, va, PAGE_SIZE); pitlb(pmap->pm_space, va); } - fdcache(pmap->pm_space, va, PAGE_SIZE); pdtlb(pmap->pm_space, va); #ifdef USE_HPT if (pmap_hpt) { @@ -471,14 +482,14 @@ pmap_dump_table(pa_space_t space, vaddr_ va = pdemask + PDE_SIZE; continue; } - printf("%x:%8p:\n", space, pde); + db_printf("%x:%8p:\n", space, pde); } pte = pmap_pte_get(pde, va); if (pte) { snprintb(buf, sizeof(buf), TLB_BITS, TLB_PROT(pte & PAGE_MASK)); - printf("0x%08lx-0x%08x:%s\n", va, pte & ~PAGE_MASK, + db_printf("0x%08lx-0x%08x:%s\n", va, pte & ~PAGE_MASK, buf); } va += PAGE_SIZE; @@ -495,122 +506,38 @@ pmap_dump_pv(paddr_t pa) pg = PHYS_TO_VM_PAGE(pa); md = VM_PAGE_TO_MD(pg); mutex_enter(&md->pvh_lock); - printf("pg %p attr 0x%08x aliases %d\n", pg, md->pvh_attrs, + db_printf("pg %p attr 0x%08x aliases %d\n", pg, md->pvh_attrs, md->pvh_aliases); for (pve = md->pvh_list; pve; pve = pve->pv_next) - printf("%x:%lx\n", pve->pv_pmap->pm_space, + db_printf("%x:%lx\n", pve->pv_pmap->pm_space, pve->pv_va & PV_VAMASK); mutex_exit(&md->pvh_lock); } #endif -/* - * Check for non-equiv aliases for this page and the mapping being added or - * removed. If, when adding, we find a new non-equiv alias then mark all PTEs - * as uncacheable including the one we're checking. If, when removing, there - * are no non-equiv aliases left then we mark PTEs as cacheable. - * - * - Shouldn't be called for pages that have been marked uncacheable by - * pmap_kenter_pa. - * - Must be called with md->pvh_lock held. - */ -void -pmap_check_alias(struct vm_page *pg, struct pv_entry *pve, vaddr_t va, - pt_entry_t *ptep) +int +pmap_check_alias(struct vm_page *pg, vaddr_t va, pt_entry_t pte) { struct vm_page_md * const md = VM_PAGE_TO_MD(pg); - bool nonequiv = false; - struct pv_entry *tpve; - u_int attrs; - - DPRINTF(PDB_FOLLOW|PDB_ALIAS, - ("%s(%p, %p, 0x%lx, %p)\n", __func__, pg, pve, va, ptep)); - - /* we should only be looking if we're not PVF_NC */ - KASSERT((md->pvh_attrs & PVF_NC) == 0); - KASSERT(mutex_owned(&md->pvh_lock)); - - if (ptep) { - attrs = pmap_pvh_attrs(*ptep); - - DPRINTF(PDB_FOLLOW|PDB_ALIAS, - ("%s: va 0x%08lx attrs 0x%08x (new)\n", __func__, va, - attrs)); - } else { - attrs = 0; - - DPRINTF(PDB_FOLLOW|PDB_ALIAS, - ("%s: va 0x%08lx (removed)\n", __func__, va)); - } - - /* - * Add in flags for existing mappings and check if mapping we're - * adding/removing is an non-equiv aliases of the other mappings. - */ - for (tpve = pve; tpve; tpve = tpve->pv_next) { - pt_entry_t pte; - vaddr_t tva = tpve->pv_va & PV_VAMASK; - - /* XXX LOCK */ - pte = pmap_vp_find(tpve->pv_pmap, tva); - attrs |= pmap_pvh_attrs(pte); - - if (((va ^ tva) & HPPA_PGAOFF) != 0) - nonequiv = true; - - DPRINTF(PDB_FOLLOW|PDB_ALIAS, - ("%s: va 0x%08x:0x%08lx attrs 0x%08x %s\n", __func__, - tpve->pv_pmap->pm_space, tpve->pv_va & PV_VAMASK, - pmap_pvh_attrs(pte), nonequiv ? "alias" : "")); - } - - if (!nonequiv) { - /* - * Inherit uncacheable attribute if set as it means we already - * have non-equiv aliases. - */ - if (ptep && (attrs & PVF_UNCACHEABLE) != 0) - *ptep |= PTE_PROT(TLB_UNCACHEABLE); - - /* No more to be done. */ - return; - } - - if (ptep) { - if ((attrs & (PVF_WRITE|PVF_MOD)) != 0) { - /* - * We have non-equiv aliases and the new/some - * mapping(s) is/are writable (or modified). We must - * mark all mappings as uncacheable (if they're not - * already marked as such). - */ - md->pvh_aliases++; + struct pv_entry *pve; + int ret = 0; - if ((attrs & PVF_UNCACHEABLE) == 0) - __changebit(pg, PVF_UNCACHEABLE, 0); + /* check for non-equ aliased mappings */ + for (pve = md->pvh_list; pve; pve = pve->pv_next) { + vaddr_t pva = pve->pv_va & PV_VAMASK; - *ptep |= PTE_PROT(TLB_UNCACHEABLE); + pte |= pmap_vp_find(pve->pv_pmap, pva); + if ((va & HPPA_PGAOFF) != (pva & HPPA_PGAOFF) && + (pte & PTE_PROT(TLB_WRITE))) { DPRINTF(PDB_FOLLOW|PDB_ALIAS, - ("%s: page marked uncacheable\n", __func__)); - } - } else { - if ((attrs & PVF_UNCACHEABLE) != 0) { - /* - * We've removed a non-equiv aliases. We can now mark - * it cacheable if all non-equiv aliases are gone. - */ - - md->pvh_aliases--; - if (md->pvh_aliases == 0) { - __changebit(pg, 0, PVF_UNCACHEABLE); - - DPRINTF(PDB_FOLLOW|PDB_ALIAS, - ("%s: page re-marked cacheable\n", - __func__)); - } + ("%s: aliased writable mapping 0x%x:0x%lx\n", + __func__, pve->pv_pmap->pm_space, pve->pv_va)); + ret++; } } + + return (ret); } /* @@ -676,6 +603,39 @@ pmap_pv_remove(struct vm_page *pg, pmap_ return (pv); } +#define FIRST_16M atop(16 * 1024 * 1024) + +static void +pmap_page_physload(paddr_t spa, paddr_t epa) +{ + + if (spa < FIRST_16M && epa <= FIRST_16M) { + DPRINTF(PDB_INIT, ("%s: phys segment 0x%05lx 0x%05lx\n", + __func__, spa, epa)); + + uvm_page_physload(spa, epa, spa, epa, VM_FREELIST_ISADMA); + } else if (spa < FIRST_16M && epa > FIRST_16M) { + DPRINTF(PDB_INIT, ("%s: phys segment 0x%05lx 0x%05lx\n", + __func__, spa, FIRST_16M)); + + uvm_page_physload(spa, FIRST_16M, spa, FIRST_16M, + VM_FREELIST_ISADMA); + + DPRINTF(PDB_INIT, ("%s: phys segment 0x%05lx 0x%05lx\n", + __func__, FIRST_16M, epa)); + + uvm_page_physload(FIRST_16M, epa, FIRST_16M, epa, + VM_FREELIST_DEFAULT); + } else { + DPRINTF(PDB_INIT, ("%s: phys segment 0x%05lx 0x%05lx\n", + __func__, spa, epa)); + + uvm_page_physload(spa, epa, spa, epa, VM_FREELIST_DEFAULT); + } + + availphysmem += epa - spa; +} + /* * Bootstrap the system enough to run with virtual memory. * Map the kernel's code, data and bss, and allocate the system page table. @@ -695,7 +655,6 @@ pmap_bootstrap(vaddr_t vstart) extern int resvphysmem; vsize_t btlb_entry_min, btlb_entry_max, btlb_entry_got; paddr_t ksrx, kerx, ksro, kero, ksrw, kerw; - paddr_t phys_start, phys_end; extern int usebtlb; /* Provided by the linker script */ @@ -982,41 +941,9 @@ pmap_bootstrap(vaddr_t vstart) availphysmem = 0; - /* The first segment runs from [resvmem..ksrx). */ - phys_start = resvmem; - phys_end = atop(ksrx); - - DPRINTF(PDB_INIT, ("%s: phys segment 0x%05x 0x%05x\n", __func__, - (u_int)phys_start, (u_int)phys_end)); - if (phys_end > phys_start) { - uvm_page_physload(phys_start, phys_end, - phys_start, phys_end, VM_FREELIST_DEFAULT); - availphysmem += phys_end - phys_start; - } - - /* The second segment runs from [kero..ksrw). */ - phys_start = atop(kero); - phys_end = atop(ksrw); - - DPRINTF(PDB_INIT, ("%s: phys segment 0x%05x 0x%05x\n", __func__, - (u_int)phys_start, (u_int)phys_end)); - if (phys_end > phys_start) { - uvm_page_physload(phys_start, phys_end, - phys_start, phys_end, VM_FREELIST_DEFAULT); - availphysmem += phys_end - phys_start; - } - - /* The third segment runs from [kerw..physmem). */ - phys_start = atop(kerw); - phys_end = physmem; - - DPRINTF(PDB_INIT, ("%s: phys segment 0x%05x 0x%05x\n", __func__, - (u_int)phys_start, (u_int)phys_end)); - if (phys_end > phys_start) { - uvm_page_physload(phys_start, phys_end, - phys_start, phys_end, VM_FREELIST_DEFAULT); - availphysmem += phys_end - phys_start; - } + pmap_page_physload(resvmem, atop(ksrx)); + pmap_page_physload(atop(kero), atop(ksrw)); + pmap_page_physload(atop(kerw), physmem); mutex_init(&pmaps_lock, MUTEX_DEFAULT, IPL_NONE); @@ -1309,7 +1236,6 @@ pmap_enter(pmap_t pmap, vaddr_t va, padd if (pmap_initialized && ((flags & PMAP_UNMANAGED) == 0) && (pg = PHYS_TO_VM_PAGE(pa))) { struct vm_page_md * const md = VM_PAGE_TO_MD(pg); - mutex_enter(&md->pvh_lock); if (!pve && !(pve = pmap_pv_alloc())) { if (flags & PMAP_CANFAIL) { @@ -1319,9 +1245,11 @@ pmap_enter(pmap_t pmap, vaddr_t va, padd } panic("%s: no pv entries available", __func__); } + pte |= PTE_PROT(pmap_prot(pmap, prot)); + mutex_enter(&md->pvh_lock); + if (pmap_check_alias(pg, va, pte)) + pmap_page_remove_locked(pg); pmap_pv_enter(pg, pve, pmap, va, ptp, 0); - pmap_check_alias(pg, pve, va, &pte); - mutex_exit(&md->pvh_lock); } else if (pve) { pmap_pv_free(pve); @@ -1364,15 +1292,13 @@ pmap_remove(pmap_t pmap, vaddr_t sva, va PMAP_LOCK(pmap); - for (batch = 0, pdemask = 1; sva < eva; sva += PAGE_SIZE) { - if (pdemask != (sva & PDE_MASK)) { - pdemask = sva & PDE_MASK; - if (!(pde = pmap_pde_get(pmap->pm_pdir, sva))) { - sva = pdemask + PDE_SIZE - PAGE_SIZE; - continue; - } - batch = pdemask == sva && sva + PDE_SIZE <= eva; + for (batch = 0; sva < eva; sva += PAGE_SIZE) { + pdemask = sva & PDE_MASK; + if (!(pde = pmap_pde_get(pmap->pm_pdir, sva))) { + sva = pdemask + PDE_SIZE - PAGE_SIZE; + continue; } + batch = pdemask == sva && sva + PDE_SIZE <= eva; if ((pte = pmap_pte_get(pde, sva))) { @@ -1396,10 +1322,6 @@ pmap_remove(pmap_t pmap, vaddr_t sva, va mutex_enter(&md->pvh_lock); pve = pmap_pv_remove(pg, pmap, sva); - - pmap_check_alias(pg, md->pvh_list, - sva, NULL); - md->pvh_attrs |= pmap_pvh_attrs(pte); mutex_exit(&md->pvh_lock); @@ -1471,6 +1393,16 @@ void pmap_page_remove(struct vm_page *pg) { struct vm_page_md * const md = VM_PAGE_TO_MD(pg); + + mutex_enter(&md->pvh_lock); + pmap_page_remove_locked(pg); + mutex_exit(&md->pvh_lock); +} + +void +pmap_page_remove_locked(struct vm_page *pg) +{ + struct vm_page_md * const md = VM_PAGE_TO_MD(pg); struct pv_entry *pve, *npve, **pvp; DPRINTF(PDB_FOLLOW|PDB_PV, ("%s(%p)\n", __func__, pg)); @@ -1478,7 +1410,6 @@ pmap_page_remove(struct vm_page *pg) if (md->pvh_list == NULL) return; - mutex_enter(&md->pvh_lock); pvp = &md->pvh_list; for (pve = md->pvh_list; pve; pve = npve) { pmap_t pmap = pve->pv_pmap; @@ -1499,22 +1430,21 @@ pmap_page_remove(struct vm_page *pg) if (pve->pv_va & PV_KENTER) { *pvp = pve; pvp = &pve->pv_next; - continue; - } - - md->pvh_attrs |= pmap_pvh_attrs(pte); + } else + md->pvh_attrs |= pmap_pvh_attrs(pte); pmap_pte_flush(pmap, va, pte); if (pte & PTE_PROT(TLB_WIRED)) pmap->pm_stats.wired_count--; pmap->pm_stats.resident_count--; - pmap_pte_set(pde, va, 0); - pmap_pv_free(pve); + if (!(pve->pv_va & PV_KENTER)) { + pmap_pte_set(pde, va, 0); + pmap_pv_free(pve); + } PMAP_UNLOCK(pmap); } *pvp = NULL; - mutex_exit(&md->pvh_lock); DPRINTF(PDB_FOLLOW|PDB_PV, ("%s: leaving\n", __func__)); } @@ -1558,31 +1488,16 @@ bool pmap_changebit(struct vm_page *pg, u_int set, u_int clear) { struct vm_page_md * const md = VM_PAGE_TO_MD(pg); - bool rv; + struct pv_entry *pve; + int res; DPRINTF(PDB_FOLLOW|PDB_BITS, ("%s(%p, %x, %x)\n", __func__, pg, set, clear)); - mutex_enter(&md->pvh_lock); - rv = __changebit(pg, set, clear); - mutex_exit(&md->pvh_lock); - - return rv; -} + KASSERT((set & ~(PVF_REF)) == 0); + KASSERT((clear & ~(PVF_MOD|PVF_WRITE)) == 0); -/* - * Must be called with md->pvh_lock held. - */ -static bool -__changebit(struct vm_page *pg, u_int set, u_int clear) -{ - struct vm_page_md * const md = VM_PAGE_TO_MD(pg); - struct pv_entry *pve; - int res; - - KASSERT(mutex_owned(&md->pvh_lock)); - KASSERT(((set | clear) & - ~(PVF_MOD|PVF_REF|PVF_UNCACHEABLE|PVF_WRITE)) == 0); + mutex_enter(&md->pvh_lock); /* preserve other bits */ res = md->pvh_attrs & (set | clear); @@ -1618,6 +1533,7 @@ __changebit(struct vm_page *pg, u_int se } } } + mutex_exit(&md->pvh_lock); return ((res & (clear | set)) != 0); } @@ -1692,8 +1608,6 @@ pmap_activate(struct lwp *l) pa_space_t space = pmap->pm_space; struct pcb *pcb = lwp_getpcb(l); - KASSERT(pcb->pcb_uva == uvm_lwp_getuarea(l)); - /* space is cached for the copy{in,out}'s pleasure */ pcb->pcb_space = space; fdcache(HPPA_SID_KERNEL, (vaddr_t)pcb, sizeof(struct pcb)); @@ -1711,19 +1625,22 @@ pmap_flush_page(struct vm_page *pg, bool DPRINTF(PDB_FOLLOW|PDB_CACHE, ("%s(%p, %d)\n", __func__, pg, purge)); - KASSERT(!(md->pvh_attrs & PVF_NC)); - /* purge cache for all possible mappings for the pa */ - mutex_enter(&md->pvh_lock); for (pve = md->pvh_list; pve; pve = pve->pv_next) { vaddr_t va = pve->pv_va & PV_VAMASK; + pa_space_t sp = pve->pv_pmap->pm_space; if (purge) - pdcache(pve->pv_pmap->pm_space, va, PAGE_SIZE); + pdcache(sp, va, PAGE_SIZE); else - fdcache(pve->pv_pmap->pm_space, va, PAGE_SIZE); + fdcache(sp, va, PAGE_SIZE); +#if defined(HP8000_CPU) || defined(HP8200_CPU) || \ + defined(HP8500_CPU) || defined(HP8600_CPU) + ficache(sp, va, PAGE_SIZE); + pdtlb(sp, va); + pitlb(sp, va); +#endif } - mutex_exit(&md->pvh_lock); } /* @@ -1741,6 +1658,13 @@ pmap_zero_page(paddr_t pa) memset((void *)pa, 0, PAGE_SIZE); fdcache(HPPA_SID_KERNEL, pa, PAGE_SIZE); + +#if defined(HP8000_CPU) || defined(HP8200_CPU) || \ + defined(HP8500_CPU) || defined(HP8600_CPU) + ficache(HPPA_SID_KERNEL, pa, PAGE_SIZE); + pdtlb(HPPA_SID_KERNEL, pa); + pitlb(HPPA_SID_KERNEL, pa); +#endif } /* @@ -1763,6 +1687,15 @@ pmap_copy_page(paddr_t spa, paddr_t dpa) pdcache(HPPA_SID_KERNEL, spa, PAGE_SIZE); fdcache(HPPA_SID_KERNEL, dpa, PAGE_SIZE); +#if defined(HP8000_CPU) || defined(HP8200_CPU) || \ + defined(HP8500_CPU) || defined(HP8600_CPU) + ficache(HPPA_SID_KERNEL, spa, PAGE_SIZE); + ficache(HPPA_SID_KERNEL, dpa, PAGE_SIZE); + pdtlb(HPPA_SID_KERNEL, spa); + pdtlb(HPPA_SID_KERNEL, dpa); + pitlb(HPPA_SID_KERNEL, spa); + pitlb(HPPA_SID_KERNEL, dpa); +#endif } void @@ -1808,26 +1741,21 @@ pmap_kenter_pa(vaddr_t va, paddr_t pa, v KASSERT(pa < HPPA_IOBEGIN); - mutex_enter(&md->pvh_lock); - - if (flags & PMAP_NOCACHE) - md->pvh_attrs |= PVF_NC; - else { - struct pv_entry *pve; + struct pv_entry *pve; - pve = pmap_pv_alloc(); - if (!pve) - panic("%s: no pv entries available", - __func__); - DPRINTF(PDB_FOLLOW|PDB_ENTER, - ("%s(%lx, %lx, %x) TLB_KENTER\n", __func__, - va, pa, pte)); - - pmap_pv_enter(pg, pve, pmap_kernel(), va, NULL, - PV_KENTER); - pmap_check_alias(pg, pve, va, &pte); - } + pve = pmap_pv_alloc(); + if (!pve) + panic("%s: no pv entries available", + __func__); + DPRINTF(PDB_FOLLOW|PDB_ENTER, + ("%s(%lx, %lx, %x) TLB_KENTER\n", __func__, + va, pa, pte)); + mutex_enter(&md->pvh_lock); + if (pmap_check_alias(pg, va, pte)) + pmap_page_remove_locked(pg); + pmap_pv_enter(pg, pve, pmap_kernel(), va, NULL, + PV_KENTER); mutex_exit(&md->pvh_lock); } } @@ -1903,12 +1831,6 @@ pmap_kremove(vaddr_t va, vsize_t size) pve = pmap_pv_remove(pg, pmap, va); - if ((md->pvh_attrs & PVF_NC) == 0) - pmap_check_alias(pg, md->pvh_list, va, - NULL); - - md->pvh_attrs &= ~PVF_NC; - mutex_exit(&md->pvh_lock); if (pve != NULL) pmap_pv_free(pve);