Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files. =================================================================== RCS file: /ftp/cvs/cvsroot/src/sys/arch/arm/arm32/pmap.c,v rcsdiff: /ftp/cvs/cvsroot/src/sys/arch/arm/arm32/pmap.c,v: warning: Unknown phrases like `commitid ...;' are present. retrieving revision 1.187.2.2 retrieving revision 1.188 diff -u -p -r1.187.2.2 -r1.188 --- src/sys/arch/arm/arm32/pmap.c 2009/04/28 07:33:43 1.187.2.2 +++ src/sys/arch/arm/arm32/pmap.c 2008/11/04 07:10:01 1.188 @@ -1,4 +1,4 @@ -/* $NetBSD: pmap.c,v 1.187.2.2 2009/04/28 07:33:43 skrll Exp $ */ +/* $NetBSD: pmap.c,v 1.188 2008/11/04 07:10:01 matt Exp $ */ /* * Copyright 2003 Wasabi Systems, Inc. @@ -212,7 +212,7 @@ #include #include -__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.187.2.2 2009/04/28 07:33:43 skrll Exp $"); +__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.188 2008/11/04 07:10:01 matt Exp $"); #ifdef PMAP_DEBUG @@ -255,8 +255,7 @@ int pmapdebug = 0; /* * pmap_kernel() points here */ -static struct pmap kernel_pmap_store; -struct pmap *const kernel_pmap_ptr = &kernel_pmap_store; +struct pmap kernel_pmap_store; /* * Which pmap is currently 'live' in the cache @@ -310,19 +309,6 @@ static paddr_t pmap_kernel_l2ptp_phys; EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "pmap", name) #ifdef PMAP_CACHE_VIPT -static struct evcnt pmap_ev_vac_clean_one = - PMAP_EVCNT_INITIALIZER("clean page (1 color)"); -static struct evcnt pmap_ev_vac_flush_one = - PMAP_EVCNT_INITIALIZER("flush page (1 color)"); -static struct evcnt pmap_ev_vac_flush_lots = - PMAP_EVCNT_INITIALIZER("flush page (2+ colors)"); -static struct evcnt pmap_ev_vac_flush_lots2 = - PMAP_EVCNT_INITIALIZER("flush page (2+ colors, kmpage)"); -EVCNT_ATTACH_STATIC(pmap_ev_vac_clean_one); -EVCNT_ATTACH_STATIC(pmap_ev_vac_flush_one); -EVCNT_ATTACH_STATIC(pmap_ev_vac_flush_lots); -EVCNT_ATTACH_STATIC(pmap_ev_vac_flush_lots2); - static struct evcnt pmap_ev_vac_color_new = PMAP_EVCNT_INITIALIZER("new page color"); static struct evcnt pmap_ev_vac_color_reuse = @@ -455,8 +441,6 @@ EVCNT_ATTACH_STATIC(pmap_ev_activations) static pt_entry_t *csrc_pte, *cdst_pte; static vaddr_t csrcp, cdstp; vaddr_t memhook; /* used by mem.c */ -kmutex_t memlock; /* used by mem.c */ -void *zeropage; /* used by mem.c */ extern void *msgbufaddr; int pmap_kmpages; /* @@ -669,12 +653,7 @@ static int pmap_clean_page(struct pv_en #endif #ifdef PMAP_CACHE_VIPT static void pmap_syncicache_page(struct vm_page *); -enum pmap_flush_op { - PMAP_FLUSH_PRIMARY, - PMAP_FLUSH_SECONDARY, - PMAP_CLEAN_PRIMARY -}; -static void pmap_flush_page(struct vm_page *, enum pmap_flush_op); +static void pmap_flush_page(struct vm_page *, bool); #endif static void pmap_page_remove(struct vm_page *); @@ -695,8 +674,8 @@ vaddr_t virtual_avail; vaddr_t virtual_end; vaddr_t pmap_curmaxkvaddr; -paddr_t avail_start; -paddr_t avail_end; +vaddr_t avail_start; +vaddr_t avail_end; pv_addrqh_t pmap_boot_freeq = SLIST_HEAD_INITIALIZER(&pmap_boot_freeq); pv_addr_t kernelpages; @@ -1023,7 +1002,7 @@ pmap_remove_pv(struct vm_page *pg, pmap_ * clear the KMOD attribute from the page. */ if (SLIST_FIRST(&pg->mdpage.pvh_list) == NULL - || (SLIST_FIRST(&pg->mdpage.pvh_list)->pv_flags & PVF_KWRITE) != PVF_KWRITE) + || (SLIST_FIRST(&pg->mdpage.pvh_list)->pv_flags & PVF_KWRITE) == PVF_KWRITE) pg->mdpage.pvh_attrs &= ~PVF_KMOD; /* @@ -1816,6 +1795,36 @@ pmap_vac_me_user(struct vm_page *pg, pma #endif #ifdef PMAP_CACHE_VIPT +/* + * For virtually indexed / physically tagged caches, what we have to worry + * about is illegal cache aliases. To prevent this, we must ensure that + * virtual addresses that map the physical page use the same bits for those + * bits masked by "arm_cache_prefer_mask" (bits 12+). If there is a conflict, + * all mappings of the page must be non-cached. + */ +#if 0 +static inline vaddr_t +pmap_check_sets(paddr_t pa) +{ + extern int arm_dcache_l2_nsets; + int set, way; + vaddr_t mask = 0; + int v; + pa |= 1; + for (set = 0; set < (1 << arm_dcache_l2_nsets); set++) { + for (way = 0; way < 4; way++) { + v = (way << 30) | (set << 5); + asm("mcr p15, 3, %0, c15, c2, 0" :: "r"(v)); + asm("mrc p15, 3, %0, c15, c0, 0" : "=r"(v)); + + if ((v & (1 | ~(PAGE_SIZE-1))) == pa) { + mask |= 1 << (set >> 7); + } + } + } + return mask; +} +#endif static void pmap_vac_me_harder(struct vm_page *pg, pmap_t pm, vaddr_t va) { @@ -1835,6 +1844,13 @@ pmap_vac_me_harder(struct vm_page *pg, p NPDEBUG(PDB_VAC, printf("pmap_vac_me_harder: pg=%p, pmap=%p va=%08lx\n", pg, pm, va)); +#define popc4(x) \ + (((0x94 >> ((x & 3) << 1)) & 3) + ((0x94 >> ((x & 12) >> 1)) & 3)) +#if 0 + tst_mask = pmap_check_sets(pg->phys_addr); + KASSERT(popc4(tst_mask) < 2); +#endif + KASSERT(!va || pm); KASSERT((pg->mdpage.pvh_attrs & PVF_DMOD) == 0 || (pg->mdpage.pvh_attrs & (PVF_DIRTY|PVF_NC))); @@ -1842,7 +1858,6 @@ pmap_vac_me_harder(struct vm_page *pg, p if (__predict_false(pg->mdpage.pvh_attrs & PVF_NC)) { /* just an add, things are already non-cached */ KASSERT(!(pg->mdpage.pvh_attrs & PVF_DIRTY)); - KASSERT(!(pg->mdpage.pvh_attrs & PVF_MULTCLR)); bad_alias = false; if (va) { PMAPCOUNT(vac_color_none); @@ -1873,28 +1888,8 @@ pmap_vac_me_harder(struct vm_page *pg, p if (!bad_alias) pg->mdpage.pvh_attrs |= PVF_DIRTY; } else { - /* - * We have only read-only mappings. Let's see if there - * are multiple colors in use or if we mapped a KMPAGE. - * If the latter, we have a bad alias. If the former, - * we need to remember that. - */ - for (; pv; pv = SLIST_NEXT(pv, pv_link)) { - if (tst_mask != (pv->pv_va & arm_cache_prefer_mask)) { - if (pg->mdpage.pvh_attrs & PVF_KMPAGE) - bad_alias = true; - break; - } - } pg->mdpage.pvh_attrs &= ~PVF_WRITE; - /* - * No KMPAGE and we exited early, so we must have - * multiple color mappings. - */ - if (!bad_alias && pv != NULL) - pg->mdpage.pvh_attrs |= PVF_MULTCLR; } - /* If no conflicting colors, set everything back to cached */ if (!bad_alias) { #ifdef DEBUG @@ -1903,6 +1898,7 @@ pmap_vac_me_harder(struct vm_page *pg, p SLIST_FOREACH(pv, &pg->mdpage.pvh_list, pv_link) KDASSERT(((tst_mask ^ pv->pv_va) & arm_cache_prefer_mask) == 0); } + #endif pg->mdpage.pvh_attrs &= (PAGE_SIZE - 1) & ~PVF_NC; pg->mdpage.pvh_attrs |= tst_mask | PVF_COLORED; @@ -1922,32 +1918,14 @@ pmap_vac_me_harder(struct vm_page *pg, p KASSERT(pmap_is_page_colored_p(pg)); KASSERT(!(pg->mdpage.pvh_attrs & PVF_WRITE) || (pg->mdpage.pvh_attrs & PVF_DIRTY)); - if (rw_mappings == 0) { + if (rw_mappings == 0) pg->mdpage.pvh_attrs &= ~PVF_WRITE; - if (ro_mappings == 1 - && (pg->mdpage.pvh_attrs & PVF_MULTCLR)) { - /* - * If this is the last readonly mapping - * but it doesn't match the current color - * for the page, change the current color - * to match this last readonly mapping. - */ - pv = SLIST_FIRST(&pg->mdpage.pvh_list); - tst_mask = (pg->mdpage.pvh_attrs ^ pv->pv_va) - & arm_cache_prefer_mask; - if (tst_mask) { - pg->mdpage.pvh_attrs ^= tst_mask; - PMAPCOUNT(vac_color_change); - } - } - } KASSERT((pg->mdpage.pvh_attrs & PVF_DMOD) == 0 || (pg->mdpage.pvh_attrs & (PVF_DIRTY|PVF_NC))); KASSERT((rw_mappings == 0) == !(pg->mdpage.pvh_attrs & PVF_WRITE)); return; } else if (!pmap_is_page_colored_p(pg)) { /* not colored so we just use its color */ KASSERT(pg->mdpage.pvh_attrs & (PVF_WRITE|PVF_DIRTY)); - KASSERT(!(pg->mdpage.pvh_attrs & PVF_MULTCLR)); PMAPCOUNT(vac_color_new); pg->mdpage.pvh_attrs &= PAGE_SIZE - 1; pg->mdpage.pvh_attrs |= PVF_COLORED @@ -1960,22 +1938,20 @@ pmap_vac_me_harder(struct vm_page *pg, p bad_alias = false; if (rw_mappings > 0) { /* - * We now have writeable mappings and if we have - * readonly mappings in more than once color, we have - * an aliasing problem. Regardless mark the page as - * writeable. + * We now have writeable mappings and more than one + * readonly mapping, verify the colors don't clash + * and mark the page as writeable. */ - if (pg->mdpage.pvh_attrs & PVF_MULTCLR) { - if (ro_mappings < 2) { - /* - * If we only have less than two - * read-only mappings, just flush the - * non-primary colors from the cache. - */ - pmap_flush_page(pg, - PMAP_FLUSH_SECONDARY); - } else { - bad_alias = true; + if (ro_mappings > 1 + && (pg->mdpage.pvh_attrs & PVF_WRITE) == 0 + && arm_cache_prefer_mask) { + tst_mask = pg->mdpage.pvh_attrs & arm_cache_prefer_mask; + SLIST_FOREACH(pv, &pg->mdpage.pvh_list, pv_link) { + /* if there's a bad alias, stop checking. */ + if (((tst_mask ^ pv->pv_va) & arm_cache_prefer_mask) == 0) { + bad_alias = true; + break; + } } } pg->mdpage.pvh_attrs |= PVF_WRITE; @@ -2005,11 +1981,10 @@ pmap_vac_me_harder(struct vm_page *pg, p /* color conflict. evict from cache. */ - pmap_flush_page(pg, PMAP_FLUSH_PRIMARY); + pmap_flush_page(pg, true); pg->mdpage.pvh_attrs &= ~PVF_COLORED; pg->mdpage.pvh_attrs |= PVF_NC; KASSERT((pg->mdpage.pvh_attrs & PVF_DMOD) == 0 || (pg->mdpage.pvh_attrs & (PVF_DIRTY|PVF_NC))); - KASSERT(!(pg->mdpage.pvh_attrs & PVF_MULTCLR)); PMAPCOUNT(vac_color_erase); } else if (rw_mappings == 0 && (pg->mdpage.pvh_attrs & PVF_KMPAGE) == 0) { @@ -2019,7 +1994,7 @@ pmap_vac_me_harder(struct vm_page *pg, p * If the page has dirty cache lines, clean it. */ if (pg->mdpage.pvh_attrs & PVF_DIRTY) - pmap_flush_page(pg, PMAP_CLEAN_PRIMARY); + pmap_flush_page(pg, false); /* * If this is the first remapping (we know that there are no @@ -2035,7 +2010,6 @@ pmap_vac_me_harder(struct vm_page *pg, p } else { PMAPCOUNT(vac_color_blind); } - pg->mdpage.pvh_attrs |= PVF_MULTCLR; KASSERT((pg->mdpage.pvh_attrs & PVF_DMOD) == 0 || (pg->mdpage.pvh_attrs & (PVF_DIRTY|PVF_NC))); KASSERT((rw_mappings == 0) == !(pg->mdpage.pvh_attrs & PVF_WRITE)); return; @@ -2044,7 +2018,7 @@ pmap_vac_me_harder(struct vm_page *pg, p pg->mdpage.pvh_attrs |= PVF_WRITE; /* color conflict. evict from cache. */ - pmap_flush_page(pg, PMAP_FLUSH_PRIMARY); + pmap_flush_page(pg, true); /* the list can't be empty because this was a enter/modify */ pv = SLIST_FIRST(&pg->mdpage.pvh_list); @@ -2063,7 +2037,6 @@ pmap_vac_me_harder(struct vm_page *pg, p PMAPCOUNT(vac_color_change); KASSERT((pg->mdpage.pvh_attrs & PVF_DMOD) == 0 || (pg->mdpage.pvh_attrs & (PVF_DIRTY|PVF_NC))); KASSERT((rw_mappings == 0) == !(pg->mdpage.pvh_attrs & PVF_WRITE)); - KASSERT(!(pg->mdpage.pvh_attrs & PVF_MULTCLR)); return; } } @@ -2462,93 +2435,60 @@ pmap_syncicache_page(struct vm_page *pg) } void -pmap_flush_page(struct vm_page *pg, enum pmap_flush_op flush) +pmap_flush_page(struct vm_page *pg, bool flush) { - vsize_t va_offset, end_va; - void (*cf)(vaddr_t, vsize_t); + const vsize_t va_offset = pg->mdpage.pvh_attrs & arm_cache_prefer_mask; + const size_t pte_offset = va_offset >> PGSHIFT; + pt_entry_t * const ptep = &cdst_pte[pte_offset]; + const pt_entry_t oldpte = *ptep; +#if 0 + vaddr_t mask; +#endif - if (arm_cache_prefer_mask == 0) - return; + KASSERT(!(pg->mdpage.pvh_attrs & PVF_NC)); +#if 0 + mask = pmap_check_sets(pg->phys_addr); + KASSERT(popc4(mask) < 2); +#endif - switch (flush) { - case PMAP_FLUSH_PRIMARY: - if (pg->mdpage.pvh_attrs & PVF_MULTCLR) { - va_offset = 0; - end_va = arm_cache_prefer_mask; - pg->mdpage.pvh_attrs &= ~PVF_MULTCLR; - PMAPCOUNT(vac_flush_lots); - } else { - va_offset = pg->mdpage.pvh_attrs & arm_cache_prefer_mask; - end_va = va_offset; - PMAPCOUNT(vac_flush_one); - } - /* - * Mark that the page is no longer dirty. - */ + NPDEBUG(PDB_VAC, printf("pmap_flush_page: pg=%p (attrs=%#x)\n", + pg, pg->mdpage.pvh_attrs)); + pmap_tlb_flushID_SE(pmap_kernel(), cdstp + va_offset); + /* + * Set up a PTE with the right coloring to flush existing cache entries. + */ + *ptep = L2_S_PROTO + | VM_PAGE_TO_PHYS(pg) + | L2_S_PROT(PTE_KERNEL, VM_PROT_READ|VM_PROT_WRITE) + | pte_l2_s_cache_mode; + PTE_SYNC(ptep); + + /* + * Flush it. + */ + if (flush) { + cpu_idcache_wbinv_range(cdstp + va_offset, PAGE_SIZE); pg->mdpage.pvh_attrs &= ~PVF_DIRTY; - cf = cpufuncs.cf_idcache_wbinv_range; - break; - case PMAP_FLUSH_SECONDARY: - va_offset = 0; - end_va = arm_cache_prefer_mask; - cf = cpufuncs.cf_idcache_wbinv_range; - pg->mdpage.pvh_attrs &= ~PVF_MULTCLR; - PMAPCOUNT(vac_flush_lots); - break; - case PMAP_CLEAN_PRIMARY: - va_offset = pg->mdpage.pvh_attrs & arm_cache_prefer_mask; - end_va = va_offset; - cf = cpufuncs.cf_dcache_wb_range; + } else { + cpu_dcache_wb_range(cdstp + va_offset, PAGE_SIZE); /* * Mark that the page is no longer dirty. */ if ((pg->mdpage.pvh_attrs & PVF_DMOD) == 0) pg->mdpage.pvh_attrs &= ~PVF_DIRTY; - PMAPCOUNT(vac_clean_one); - break; - default: - return; } - KASSERT(!(pg->mdpage.pvh_attrs & PVF_NC)); - - NPDEBUG(PDB_VAC, printf("pmap_flush_page: pg=%p (attrs=%#x)\n", - pg, pg->mdpage.pvh_attrs)); - - for (; va_offset <= end_va; va_offset += PAGE_SIZE) { - const size_t pte_offset = va_offset >> PGSHIFT; - pt_entry_t * const ptep = &cdst_pte[pte_offset]; - const pt_entry_t oldpte = *ptep; - - if (flush == PMAP_FLUSH_SECONDARY - && va_offset == (pg->mdpage.pvh_attrs & arm_cache_prefer_mask)) - continue; - - pmap_tlb_flushID_SE(pmap_kernel(), cdstp + va_offset); - /* - * Set up a PTE with the right coloring to flush - * existing cache entries. - */ - *ptep = L2_S_PROTO - | VM_PAGE_TO_PHYS(pg) - | L2_S_PROT(PTE_KERNEL, VM_PROT_READ|VM_PROT_WRITE) - | pte_l2_s_cache_mode; - PTE_SYNC(ptep); - - /* - * Flush it. - */ - (*cf)(cdstp + va_offset, PAGE_SIZE); - - /* - * Restore the page table entry since we might have interrupted - * pmap_zero_page or pmap_copy_page which was already using - * this pte. - */ - *ptep = oldpte; - PTE_SYNC(ptep); - pmap_tlb_flushID_SE(pmap_kernel(), cdstp + va_offset); - } + /* + * Restore the page table entry since we might have interrupted + * pmap_zero_page or pmap_copy_page which was already using this pte. + */ + *ptep = oldpte; + PTE_SYNC(ptep); + pmap_tlb_flushID_SE(pmap_kernel(), cdstp + va_offset); +#if 0 + mask = pmap_check_sets(pg->phys_addr); + KASSERT(mask == 0); +#endif } #endif /* PMAP_CACHE_VIPT */ @@ -2743,8 +2683,8 @@ pmap_create(void) } /* - * int pmap_enter(pmap_t pm, vaddr_t va, paddr_t pa, vm_prot_t prot, - * u_int flags) + * void pmap_enter(pmap_t pm, vaddr_t va, paddr_t pa, vm_prot_t prot, + * int flags) * * Insert the given physical page (p) at * the specified virtual address (v) in the @@ -2755,7 +2695,7 @@ pmap_create(void) * insert this page into the given map NOW. */ int -pmap_enter(pmap_t pm, vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags) +pmap_enter(pmap_t pm, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags) { struct l2_bucket *l2b; struct vm_page *pg, *opg; @@ -3386,14 +3326,7 @@ pmap_kenter_pa(vaddr_t va, paddr_t pa, v if (pmap_is_page_colored_p(pg) && ((va ^ pg->mdpage.pvh_attrs) & arm_cache_prefer_mask)) { PMAPCOUNT(vac_color_change); - pmap_flush_page(pg, PMAP_FLUSH_PRIMARY); - } else if (pg->mdpage.pvh_attrs & PVF_MULTCLR) { - /* - * If this page has multiple colors, expunge - * them. - */ - PMAPCOUNT(vac_flush_lots2); - pmap_flush_page(pg, PMAP_FLUSH_SECONDARY); + pmap_flush_page(pg, true); } pg->mdpage.pvh_attrs &= PAGE_SIZE - 1; pg->mdpage.pvh_attrs |= PVF_KMPAGE @@ -3751,15 +3684,6 @@ pmap_clear_modify(struct vm_page *pg) if (pg->mdpage.pvh_attrs & PVF_MOD) { rv = true; -#ifdef PMAP_CACHE_VIPT - /* - * If we are going to clear the modified bit and there are - * no other modified bits set, flush the page to memory and - * mark it clean. - */ - if ((pg->mdpage.pvh_attrs & (PVF_DMOD|PVF_NC)) == PVF_MOD) - pmap_flush_page(pg, PMAP_CLEAN_PRIMARY); -#endif pmap_clearbit(pg, PVF_MOD); } else rv = false; @@ -5340,6 +5264,7 @@ pmap_alloc_specials(vaddr_t *availp, int void pmap_init(void) { + extern int physmem; /* * Set the available memory vars - These do not map to real memory @@ -5348,8 +5273,8 @@ pmap_init(void) * One could argue whether this should be the entire memory or just * the memory that is useable in a user process. */ - avail_start = ptoa(vm_physmem[0].start); - avail_end = ptoa(vm_physmem[vm_nphysseg - 1].end); + avail_start = 0; + avail_end = physmem * PAGE_SIZE; /* * Now we need to free enough pv_entry structures to allow us to get @@ -5361,10 +5286,6 @@ pmap_init(void) pool_setlowat(&pmap_pv_pool, (PAGE_SIZE / sizeof(struct pv_entry)) * 2); - mutex_init(&memlock, MUTEX_DEFAULT, IPL_NONE); - zeropage = (void *)uvm_km_alloc(kernel_map, PAGE_SIZE, 0, - UVM_KMF_WIRED|UVM_KMF_ZERO); - pmap_initialized = true; } @@ -5873,7 +5794,7 @@ pmap_devmap_find_va(vaddr_t va, vsize_t * These routines are called when the CPU type is identified to set up * the PTE prototypes, cache modes, etc. * - * The variables are always here, just in case modules need to reference + * The variables are always here, just in case LKMs need to reference * them (though, they shouldn't). */ @@ -6240,7 +6161,7 @@ pmap_uarea(vaddr_t va) /* * return the PA of the current L1 table, for use when handling a crash dump */ -uint32_t pmap_kernel_L1_addr(void) +uint32_t pmap_kernel_L1_addr() { return pmap_kernel()->pm_l1->l1_physaddr; }