Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files. =================================================================== RCS file: /ftp/cvs/cvsroot/src/sys/arch/arm/arm32/pmap.c,v rcsdiff: /ftp/cvs/cvsroot/src/sys/arch/arm/arm32/pmap.c,v: warning: Unknown phrases like `commitid ...;' are present. retrieving revision 1.97 retrieving revision 1.97.4.4 diff -u -p -r1.97 -r1.97.4.4 --- src/sys/arch/arm/arm32/pmap.c 2002/05/14 19:22:34 1.97 +++ src/sys/arch/arm/arm32/pmap.c 2002/12/07 20:43:02 1.97.4.4 @@ -1,4 +1,4 @@ -/* $NetBSD: pmap.c,v 1.97 2002/05/14 19:22:34 chris Exp $ */ +/* $NetBSD: pmap.c,v 1.97.4.4 2002/12/07 20:43:02 he Exp $ */ /* * Copyright (c) 2002 Wasabi Systems, Inc. @@ -143,7 +143,7 @@ #include #include -__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.97 2002/05/14 19:22:34 chris Exp $"); +__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.97.4.4 2002/12/07 20:43:02 he Exp $"); #ifdef PMAP_DEBUG #define PDEBUG(_lev_,_stat_) \ if (pmap_debug_level >= (_lev_)) \ @@ -813,6 +813,22 @@ pmap_enter_pv(struct vm_page *pg, struct simple_unlock(&pg->mdpage.pvh_slock); /* unlock, done! */ if (pve->pv_flags & PVF_WIRED) ++pmap->pm_stats.wired_count; +#ifdef PMAP_ALIAS_DEBUG + { + int s = splhigh(); + if (pve->pv_flags & PVF_WRITE) + pg->mdpage.rw_mappings++; + else + pg->mdpage.ro_mappings++; + if (pg->mdpage.rw_mappings != 0 && + (pg->mdpage.kro_mappings != 0 || pg->mdpage.krw_mappings != 0)) { + printf("pmap_enter_pv: rw %u, kro %u, krw %u\n", + pg->mdpage.rw_mappings, pg->mdpage.kro_mappings, + pg->mdpage.krw_mappings); + } + splx(s); + } +#endif /* PMAP_ALIAS_DEBUG */ } /* @@ -838,6 +854,19 @@ pmap_remove_pv(struct vm_page *pg, struc *prevptr = pve->pv_next; /* remove it! */ if (pve->pv_flags & PVF_WIRED) --pmap->pm_stats.wired_count; +#ifdef PMAP_ALIAS_DEBUG + { + int s = splhigh(); + if (pve->pv_flags & PVF_WRITE) { + KASSERT(pg->mdpage.rw_mappings != 0); + pg->mdpage.rw_mappings--; + } else { + KASSERT(pg->mdpage.ro_mappings != 0); + pg->mdpage.ro_mappings--; + } + splx(s); + } +#endif /* PMAP_ALIAS_DEBUG */ break; } prevptr = &pve->pv_next; /* previous pointer */ @@ -881,6 +910,31 @@ pmap_modify_pv(struct pmap *pmap, vaddr_ else --pmap->pm_stats.wired_count; } +#ifdef PMAP_ALIAS_DEBUG + { + int s = splhigh(); + if ((flags ^ oflags) & PVF_WRITE) { + if (flags & PVF_WRITE) { + pg->mdpage.rw_mappings++; + pg->mdpage.ro_mappings--; + if (pg->mdpage.rw_mappings != 0 && + (pg->mdpage.kro_mappings != 0 || + pg->mdpage.krw_mappings != 0)) { + printf("pmap_modify_pv: rw %u, " + "kro %u, krw %u\n", + pg->mdpage.rw_mappings, + pg->mdpage.kro_mappings, + pg->mdpage.krw_mappings); + } + } else { + KASSERT(pg->mdpage.rw_mappings != 0); + pg->mdpage.rw_mappings--; + pg->mdpage.ro_mappings++; + } + } + splx(s); + } +#endif /* PMAP_ALIAS_DEBUG */ return (oflags); } } @@ -905,6 +959,7 @@ pmap_map_in_l1(struct pmap *pmap, vaddr_ pmap->pm_pdir[ptva + 1] = L1_C_PROTO | (l2pa + 0x400); pmap->pm_pdir[ptva + 2] = L1_C_PROTO | (l2pa + 0x800); pmap->pm_pdir[ptva + 3] = L1_C_PROTO | (l2pa + 0xc00); + cpu_dcache_wb_range((vaddr_t) &pmap->pm_pdir[ptva + 0], 16); /* Map the page table into the page table area. */ if (selfref) @@ -926,6 +981,7 @@ pmap_unmap_in_l1(struct pmap *pmap, vadd pmap->pm_pdir[ptva + 1] = 0; pmap->pm_pdir[ptva + 2] = 0; pmap->pm_pdir[ptva + 3] = 0; + cpu_dcache_wb_range((vaddr_t) &pmap->pm_pdir[ptva + 0], 16); /* Unmap the page table from the page table area. */ *((pt_entry_t *)(pmap->pm_vptpt + ptva)) = 0; @@ -938,12 +994,21 @@ pmap_unmap_in_l1(struct pmap *pmap, vadd * * For now, VM is already on, we only need to map the * specified memory. + * + * XXX This routine should eventually go away; it's only used + * XXX by machine-dependent crash dump code. */ vaddr_t pmap_map(vaddr_t va, paddr_t spa, paddr_t epa, vm_prot_t prot) { + pt_entry_t *pte; + while (spa < epa) { - pmap_kenter_pa(va, spa, prot); + pte = vtopte(va); + + *pte = L2_S_PROTO | spa | + L2_S_PROT(PTE_KERNEL, prot) | pte_l2_s_cache_mode; + cpu_tlb_flushID_SE(va); va += NBPG; spa += NBPG; } @@ -988,7 +1053,7 @@ pmap_bootstrap(pd_entry_t *kernel_l1pt, TAILQ_INIT(&(pmap_kernel()->pm_obj.memq)); pmap_kernel()->pm_obj.uo_npages = 0; pmap_kernel()->pm_obj.uo_refs = 1; - + /* * Initialize PAGE_SIZE-dependent variables. */ @@ -1248,7 +1313,6 @@ pmap_alloc_l1pt(void) struct l1pt *pt; int error; struct vm_page *m; - pt_entry_t *pte; /* Allocate virtual address space for the L1 page table */ va = uvm_km_valloc(kernel_map, L1_TABLE_SIZE); @@ -1287,17 +1351,7 @@ pmap_alloc_l1pt(void) while (m && va < (pt->pt_va + L1_TABLE_SIZE)) { pa = VM_PAGE_TO_PHYS(m); - pte = vtopte(va); - - /* - * Assert that the PTE is invalid. If it's invalid, - * then we are guaranteed that there won't be an entry - * for this VA in the TLB. - */ - KDASSERT(pmap_pte_v(pte) == 0); - - *pte = L2_S_PROTO | VM_PAGE_TO_PHYS(m) | - L2_S_PROT(PTE_KERNEL, VM_PROT_READ|VM_PROT_WRITE); + pmap_kenter_pa(va, pa, VM_PROT_READ|VM_PROT_WRITE); va += NBPG; m = m->pageq.tqe_next; @@ -1367,6 +1421,13 @@ pmap_alloc_ptpt(struct pmap *pmap) *pte = L2_S_PROTO | pmap->pm_pptpt | L2_S_PROT(PTE_KERNEL, VM_PROT_READ|VM_PROT_WRITE); +#ifdef PMAP_ALIAS_DEBUG + { + int s = splhigh(); + pg->mdpage.krw_mappings++; + splx(s); + } +#endif /* PMAP_ALIAS_DEBUG */ return (0); } @@ -1431,8 +1492,11 @@ pmap_allocpagedir(struct pmap *pmap) pmap->pm_pdir = (pd_entry_t *)pt->pt_va; /* Clean the L1 if it is dirty */ - if (!(pt->pt_flags & PTFLAG_CLEAN)) + if (!(pt->pt_flags & PTFLAG_CLEAN)) { bzero((void *)pmap->pm_pdir, (L1_TABLE_SIZE - KERNEL_PD_SIZE)); + cpu_dcache_wb_range((vaddr_t) pmap->pm_pdir, + (L1_TABLE_SIZE - KERNEL_PD_SIZE)); + } /* Allocate a page table to map all the page tables for this pmap */ if ((error = pmap_alloc_ptpt(pmap)) != 0) { @@ -1447,12 +1511,14 @@ pmap_allocpagedir(struct pmap *pmap) bcopy((char *)pmap_kernel()->pm_pdir + (L1_TABLE_SIZE - KERNEL_PD_SIZE), (char *)pmap->pm_pdir + (L1_TABLE_SIZE - KERNEL_PD_SIZE), KERNEL_PD_SIZE); + cpu_dcache_wb_range((vaddr_t)pmap->pm_pdir + + (L1_TABLE_SIZE - KERNEL_PD_SIZE), KERNEL_PD_SIZE); /* Wire in this page table */ pmap_map_in_l1(pmap, PTE_BASE, pmap->pm_pptpt, TRUE); pt->pt_flags &= ~PTFLAG_CLEAN; /* L1 is dirty now */ - + /* * Map the kernel page tables into the new PT map. */ @@ -2525,18 +2591,17 @@ pmap_protect(struct pmap *pmap, vaddr_t { pt_entry_t *pte = NULL, *ptes; struct vm_page *pg; - int armprot; int flush = 0; - paddr_t pa; PDEBUG(0, printf("pmap_protect: pmap=%p %08lx->%08lx %x\n", pmap, sva, eva, prot)); if (~prot & VM_PROT_READ) { - /* Just remove the mappings. */ + /* + * Just remove the mappings. pmap_update() is not required + * here since the caller should do it. + */ pmap_remove(pmap, sva, eva); - /* pmap_update not needed as it should be called by the caller - * of pmap_protect */ return; } if (prot & VM_PROT_WRITE) { @@ -2587,26 +2652,17 @@ pmap_protect(struct pmap *pmap, vaddr_t flush = 1; - armprot = 0; - if (sva < VM_MAXUSER_ADDRESS) - armprot |= L2_S_PROT_U; - else if (sva < VM_MAX_ADDRESS) - armprot |= L2_S_PROT_W; /* XXX Ekk what is this ? */ - *pte = (*pte & 0xfffff00f) | armprot; - - pa = pmap_pte_pa(pte); - - /* Get the physical page index */ + *pte &= ~L2_S_PROT_W; /* clear write bit */ /* Clear write flag */ - if ((pg = PHYS_TO_VM_PAGE(pa)) != NULL) { + if ((pg = PHYS_TO_VM_PAGE(pmap_pte_pa(pte))) != NULL) { simple_lock(&pg->mdpage.pvh_slock); (void) pmap_modify_pv(pmap, sva, pg, PVF_WRITE, 0); pmap_vac_me_harder(pmap, pg, ptes, FALSE); simple_unlock(&pg->mdpage.pvh_slock); } -next: + next: sva += NBPG; pte++; } @@ -2825,10 +2881,44 @@ void pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot) { pt_entry_t *pte; - + pte = vtopte(va); KASSERT(!pmap_pte_v(pte)); +#ifdef PMAP_ALIAS_DEBUG + { + struct vm_page *pg; + int s; + + pg = PHYS_TO_VM_PAGE(pa); + if (pg != NULL) { + s = splhigh(); + if (pg->mdpage.ro_mappings == 0 && + pg->mdpage.rw_mappings == 0 && + pg->mdpage.kro_mappings == 0 && + pg->mdpage.krw_mappings == 0) { + /* This case is okay. */ + } else if (pg->mdpage.rw_mappings == 0 && + pg->mdpage.krw_mappings == 0 && + (prot & VM_PROT_WRITE) == 0) { + /* This case is okay. */ + } else { + /* Something is awry. */ + printf("pmap_kenter_pa: ro %u, rw %u, kro %u, krw %u " + "prot 0x%x\n", pg->mdpage.ro_mappings, + pg->mdpage.rw_mappings, pg->mdpage.kro_mappings, + pg->mdpage.krw_mappings, prot); + Debugger(); + } + if (prot & VM_PROT_WRITE) + pg->mdpage.krw_mappings++; + else + pg->mdpage.kro_mappings++; + splx(s); + } + } +#endif /* PMAP_ALIAS_DEBUG */ + *pte = L2_S_PROTO | pa | L2_S_PROT(PTE_KERNEL, prot) | pte_l2_s_cache_mode; } @@ -2847,6 +2937,25 @@ pmap_kremove(vaddr_t va, vsize_t len) KASSERT(pmap_pde_page(pmap_pde(pmap_kernel(), va))); pte = vtopte(va); +#ifdef PMAP_ALIAS_DEBUG + { + struct vm_page *pg; + int s; + + if ((*pte & L2_TYPE_MASK) != L2_TYPE_INV && + (pg = PHYS_TO_VM_PAGE(*pte & L2_S_FRAME)) != NULL) { + s = splhigh(); + if (*pte & L2_S_PROT_W) { + KASSERT(pg->mdpage.krw_mappings != 0); + pg->mdpage.krw_mappings--; + } else { + KASSERT(pg->mdpage.kro_mappings != 0); + pg->mdpage.kro_mappings--; + } + splx(s); + } + } +#endif /* PMAP_ALIAS_DEBUG */ cpu_idcache_wbinv_range(va, PAGE_SIZE); *pte = 0; cpu_tlb_flushID_SE(va); @@ -3111,6 +3220,18 @@ pmap_clearbit(struct vm_page *pg, u_int * Loop over all current mappings setting/clearing as appropos */ for (pv = pg->mdpage.pvh_list; pv; pv = pv->pv_next) { +#ifdef PMAP_ALIAS_DEBUG + { + int s = splhigh(); + if ((maskbits & PVF_WRITE) != 0 && + (pv->pv_flags & PVF_WRITE) != 0) { + KASSERT(pg->mdpage.rw_mappings != 0); + pg->mdpage.rw_mappings--; + pg->mdpage.ro_mappings++; + } + splx(s); + } +#endif /* PMAP_ALIAS_DEBUG */ va = pv->pv_va; pv->pv_flags &= ~maskbits; ptes = pmap_map_ptes(pv->pv_pmap); /* locks pmap */