Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files. =================================================================== RCS file: /ftp/cvs/cvsroot/src/sys/arch/sun3/sun3/pmap.c,v rcsdiff: /ftp/cvs/cvsroot/src/sys/arch/sun3/sun3/pmap.c,v: warning: Unknown phrases like `commitid ...;' are present. retrieving revision 1.166.2.3 retrieving revision 1.167 diff -u -p -r1.166.2.3 -r1.167 --- src/sys/arch/sun3/sun3/pmap.c 2014/05/22 11:40:10 1.166.2.3 +++ src/sys/arch/sun3/sun3/pmap.c 2012/01/29 16:24:51 1.167 @@ -1,4 +1,4 @@ -/* $NetBSD: pmap.c,v 1.166.2.3 2014/05/22 11:40:10 yamt Exp $ */ +/* $NetBSD: pmap.c,v 1.167 2012/01/29 16:24:51 para Exp $ */ /*- * Copyright (c) 1996 The NetBSD Foundation, Inc. @@ -80,7 +80,7 @@ */ #include -__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.166.2.3 2014/05/22 11:40:10 yamt Exp $"); +__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.167 2012/01/29 16:24:51 para Exp $"); #include "opt_ddb.h" #include "opt_pmap_debug.h" @@ -479,7 +479,7 @@ pmeg_p(int sme) #define is_pmeg_wired(pmegp) (pmegp->pmeg_wired != 0) -static void +static void pmeg_set_wiring(pmeg_t pmegp, vaddr_t va, int flag) { int idx, mask; @@ -498,7 +498,7 @@ pmeg_set_wiring(pmeg_t pmegp, vaddr_t va */ /* part of pmap_bootstrap */ -static void +static void context_init(void) { int i; @@ -523,7 +523,7 @@ context_init(void) } /* Get us a context (steal one if necessary). */ -static void +static void context_allocate(pmap_t pmap) { context_t context; @@ -575,7 +575,7 @@ context_allocate(pmap_t pmap) /* * Unload the context and put it on the free queue. */ -static void +static void context_free(pmap_t pmap) { int saved_ctxnum, ctxnum; @@ -646,7 +646,7 @@ context_free(pmap_t pmap) * PMEG management functions. */ -static void +static void pmeg_init(void) { int x; @@ -673,7 +673,7 @@ pmeg_init(void) * Reserve a pmeg (forever) for use by PROM, etc. * Contents are left as-is. Called very early... */ -void +void pmeg_reserve(int sme) { pmeg_t pmegp; @@ -701,7 +701,7 @@ pmeg_reserve(int sme) * Examine PMEGs used by the monitor, and either * reserve them (keep=1) or clear them (keep=0) */ -static void +static void pmeg_mon_init(vaddr_t sva, vaddr_t eva, int keep) { vaddr_t pgva, endseg; @@ -745,7 +745,7 @@ pmeg_mon_init(vaddr_t sva, vaddr_t eva, * This is used only during pmap_bootstrap, so we can * get away with borrowing a slot in the segmap. */ -static void +static void pmeg_clean(pmeg_t pmegp) { int sme; @@ -770,7 +770,7 @@ pmeg_clean(pmeg_t pmegp) * puts them at the end. The ending condition is finding the first * queue element at the head of the queue again. */ -static void +static void pmeg_clean_free(void) { pmeg_t pmegp, pmegp_first; @@ -802,7 +802,7 @@ pmeg_clean_free(void) * Allocate a PMEG by whatever means necessary. * (May invalidate some mappings!) */ -static pmeg_t +static pmeg_t pmeg_allocate(pmap_t pmap, vaddr_t va) { pmeg_t pmegp; @@ -880,7 +880,7 @@ pmeg_allocate(pmap_t pmap, vaddr_t va) * This happens when we loose our context. We may reclaim * this pmeg later if it is still in the inactive queue. */ -static void +static void pmeg_release(pmeg_t pmegp) { @@ -901,7 +901,7 @@ pmeg_release(pmeg_t pmegp) * Move the pmeg to the free queue from wherever it is. * The pmeg will be clean. It might be in kernel_pmap. */ -static void +static void pmeg_free(pmeg_t pmegp) { @@ -945,7 +945,7 @@ pmeg_free(pmeg_t pmegp) * Find a PMEG that was put on the inactive queue when we * had our context stolen. If found, move to active queue. */ -static pmeg_t +static pmeg_t pmeg_cache(pmap_t pmap, vaddr_t va) { int sme, segnum; @@ -1012,7 +1012,7 @@ pmeg_cache(pmap_t pmap, vaddr_t va) } #ifdef PMAP_DEBUG -static void +static void pmeg_verify_empty(vaddr_t va) { vaddr_t eva; @@ -1035,7 +1035,7 @@ pmeg_verify_empty(vaddr_t va) * there is one list head for each physical page, and * at most one element for each PMEG slot. */ -static void +static void pv_init(void) { int npp, nvp, sz; @@ -1089,7 +1089,7 @@ pv_init(void) * Set or clear bits in all PTEs mapping a page. * Also does syncflags work while we are there... */ -static void +static void pv_changepte(paddr_t pa, int set_bits, int clear_bits) { pv_entry_t *head, pv; @@ -1193,7 +1193,7 @@ pv_changepte(paddr_t pa, int set_bits, i * Return ref and mod bits from pvlist, * and turns off same in hardware PTEs. */ -static u_int +static u_int pv_syncflags(pv_entry_t pv) { pmap_t pmap; @@ -1285,7 +1285,7 @@ pv_syncflags(pv_entry_t pv) } /* Remove all mappings for the physical page. */ -static void +static void pv_remove_all(paddr_t pa) { pv_entry_t *head, pv; @@ -1322,7 +1322,7 @@ pv_remove_all(paddr_t pa) * Returns PV_NC if the (new) pvlist says that the address cannot * be cached. */ -static int +static int pv_link(pmap_t pmap, int pte, vaddr_t va) { paddr_t pa; @@ -1401,7 +1401,7 @@ pv_link(pmap_t pmap, int pte, vaddr_t va * definition nonempty, since it must have at least two elements * in it to have PV_NC set, and we only remove one here.) */ -static void +static void pv_unlink(pmap_t pmap, int pte, vaddr_t va) { paddr_t pa; @@ -1478,7 +1478,7 @@ pv_unlink(pmap_t pmap, int pte, vaddr_t * Bootstrap and Initialization, etc. */ -void +void pmap_common_init(pmap_t pmap) { memset(pmap, 0, sizeof(struct pmap)); @@ -1493,7 +1493,7 @@ pmap_common_init(pmap_t pmap) * after the "start/end" globals are set. * This function must NOT leave context zero. */ -void +void pmap_bootstrap(vaddr_t nextva) { struct sunromvec *rvec; @@ -1750,7 +1750,7 @@ pmap_bootstrap(vaddr_t nextva) * because this lets pmap_remove() and pmap_protect() * use a S/W copy of the segmap to avoid function calls. */ -void +void pmap_kernel_init(pmap_t pmap) { vaddr_t va; @@ -1776,7 +1776,7 @@ pmap_kernel_init(pmap_t pmap) * How much virtual space does this kernel have? * (After mapping kernel text, data, etc.) */ -void +void pmap_virtual_space(vaddr_t *v_start, vaddr_t *v_end) { *v_start = virtual_avail; @@ -1784,7 +1784,7 @@ pmap_virtual_space(vaddr_t *v_start, vad } /* Provide memory to the VM system. */ -static void +static void pmap_page_upload(void) { int a, b, c, d; @@ -1813,7 +1813,7 @@ pmap_page_upload(void) * Called by vm_init, to initialize any structures that the pmap * system needs to map virtual memory. */ -void +void pmap_init(void) { pv_init(); @@ -1829,7 +1829,7 @@ pmap_init(void) * record the mapping for kernel text/data/bss. * Return VA following the mapped range. */ -vaddr_t +vaddr_t pmap_map(vaddr_t va, paddr_t pa, paddr_t endpa, int prot) { int sz; @@ -1845,7 +1845,7 @@ pmap_map(vaddr_t va, paddr_t pa, paddr_t return(va); } -void +void pmap_user_init(pmap_t pmap) { int i; @@ -1867,7 +1867,7 @@ pmap_user_init(pmap_t pmap) * the map will be used in software only, and * is bounded by that size. */ -pmap_t +pmap_t pmap_create(void) { pmap_t pmap; @@ -1882,7 +1882,7 @@ pmap_create(void) * Called when a pmap initialized by pmap_pinit is being released. * Should only be called if the map contains no valid mappings. */ -void +void pmap_release(struct pmap *pmap) { int s; @@ -1912,7 +1912,7 @@ pmap_release(struct pmap *pmap) * Should only be called if the map contains * no valid mappings. */ -void +void pmap_destroy(pmap_t pmap) { int count; @@ -1933,7 +1933,7 @@ pmap_destroy(pmap_t pmap) /* * Add a reference to the specified pmap. */ -void +void pmap_reference(pmap_t pmap) { @@ -1957,7 +1957,7 @@ pmap_reference(pmap_t pmap) * or lose information. That is, this routine must actually * insert this page into the given map NOW. */ -int +int pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags) { int new_pte, s; @@ -2005,7 +2005,7 @@ pmap_enter(pmap_t pmap, vaddr_t va, padd return 0; } -static void +static void pmap_enter_kernel(vaddr_t pgva, int new_pte, bool wired) { pmap_t pmap = kernel_pmap; @@ -2148,7 +2148,7 @@ add_pte: /* can be destructive */ } -static void +static void pmap_enter_user(pmap_t pmap, vaddr_t pgva, int new_pte, bool wired) { int do_pv, old_pte, sme; @@ -2323,7 +2323,7 @@ pmap_enter_user(pmap_t pmap, vaddr_t pgv pmegp->pmeg_vpages++; } -void +void pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags) { int new_pte, s; @@ -2432,7 +2432,7 @@ pmap_kenter_pa(vaddr_t va, paddr_t pa, v splx(s); } -void +void pmap_kremove(vaddr_t va, vsize_t len) { pmap_t pmap = kernel_pmap; @@ -2541,7 +2541,7 @@ pmap_kremove(vaddr_t va, vsize_t len) * * XXX: Merge this with the next function? */ -int +int _pmap_fault(struct vm_map *map, vaddr_t va, vm_prot_t ftype) { pmap_t pmap; @@ -2582,7 +2582,7 @@ _pmap_fault(struct vm_map *map, vaddr_t * it has a valid PTE at va. * Otherwise return zero and let VM code handle it. */ -int +int pmap_fault_reload(pmap_t pmap, vaddr_t pgva, vm_prot_t ftype) { int rv, s, pte, chkpte, sme; @@ -2761,7 +2761,7 @@ pmap_is_referenced(struct vm_page *pg) * need our own context for user-space mappings in * pmap_enter_user(). */ -void +void _pmap_switch(pmap_t pmap) { set_context(pmap->pm_ctxnum); @@ -2774,7 +2774,7 @@ _pmap_switch(pmap_t pmap) * If (p == curlwp) do like cpu_switch would do; otherwise just * take this as notification that the process has a new pmap. */ -void +void pmap_activate(struct lwp *l) { pmap_t pmap = l->l_proc->p_vmspace->vm_map.pmap; @@ -2787,7 +2787,7 @@ pmap_activate(struct lwp *l) /* * Deactivate the address space of the specified process. */ -void +void pmap_deactivate(struct lwp *l) { /* Nothing to do. */ @@ -2800,7 +2800,7 @@ pmap_deactivate(struct lwp *l) * In/out conditions: * The mapping must already exist in the pmap. */ -void +void pmap_unwire(pmap_t pmap, vaddr_t va) { int s, sme; @@ -2857,7 +2857,7 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pm * with the given map/virtual_address pair. * Returns zero if VA not valid. */ -bool +bool pmap_extract(pmap_t pmap, vaddr_t va, paddr_t *pap) { int s, sme, segnum, ptenum, pte; @@ -2904,7 +2904,7 @@ pmap_extract(pmap_t pmap, vaddr_t va, pa * * Lower the permission for all mappings to a given page. */ -void +void pmap_page_protect(struct vm_page *pg, vm_prot_t prot) { paddr_t pa = VM_PAGE_TO_PHYS(pg); @@ -2934,7 +2934,7 @@ pmap_page_protect(struct vm_page *pg, vm * Initialize a preallocated and zeroed pmap structure, * such as one in a vmspace structure. */ -void +void pmap_pinit(pmap_t pmap) { pmap_common_init(pmap); @@ -2946,7 +2946,7 @@ pmap_pinit(pmap_t pmap) * range of this map as requested. * (Make pages read-only.) */ -void +void pmap_protect(pmap_t pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot) { vaddr_t va, neva; @@ -2989,7 +2989,7 @@ pmap_protect(pmap_t pmap, vaddr_t sva, v * (guaranteed to be within one segment) * similar to pmap_remove1() */ -void +void pmap_protect1(pmap_t pmap, vaddr_t sva, vaddr_t eva) { int old_ctx, s, sme; @@ -3041,11 +3041,12 @@ out: * where that PMEG is currently in the MMU. * The current context is already correct. */ -void +void pmap_protect_mmu(pmap_t pmap, vaddr_t sva, vaddr_t eva) { + pmeg_t pmegp; vaddr_t pgva, segva; - int pte; + int pte, sme; #ifdef HAVECACHE int flush_by_page = 0; #endif @@ -3060,9 +3061,9 @@ pmap_protect_mmu(pmap_t pmap, vaddr_t sv #endif segva = sun3_trunc_seg(sva); + sme = get_segmap(segva); #ifdef DIAGNOSTIC - int sme = get_segmap(segva); /* Make sure it is valid and known. */ if (sme == SEGINV) panic("pmap_protect_mmu: SEGINV"); @@ -3070,10 +3071,10 @@ pmap_protect_mmu(pmap_t pmap, vaddr_t sv panic("pmap_protect_mmu: incorrect sme, va=0x%lx", segva); #endif + pmegp = pmeg_p(sme); + /* have pmeg, will travel */ #ifdef DIAGNOSTIC - /* have pmeg, will travel */ - pmeg_t pmegp = pmeg_p(sme); /* Make sure we own the pmeg, right va, etc. */ if ((pmegp->pmeg_va != segva) || (pmegp->pmeg_owner != pmap) || @@ -3126,7 +3127,7 @@ pmap_protect_mmu(pmap_t pmap, vaddr_t sv * Remove write permissions, all in one PMEG, * where it is not currently in any context. */ -void +void pmap_protect_noctx(pmap_t pmap, vaddr_t sva, vaddr_t eva) { int old_ctx, pte, sme, segnum; @@ -3182,7 +3183,7 @@ pmap_protect_noctx(pmap_t pmap, vaddr_t * It is assumed that the start and end are properly * rounded to the page size. */ -void +void pmap_remove(pmap_t pmap, vaddr_t sva, vaddr_t eva) { vaddr_t va, neva; @@ -3194,7 +3195,7 @@ pmap_remove(pmap_t pmap, vaddr_t sva, va printf("pmap_remove(%p, 0x%lx, 0x%lx)\n", pmap, sva, eva); #endif - + KASSERT((pmap == kernel_pmap) ? sva >= virtual_avail && eva < DVMA_MAP_END : eva <= VM_MAXUSER_ADDRESS); @@ -3214,7 +3215,7 @@ pmap_remove(pmap_t pmap, vaddr_t sva, va /* * Remove user mappings, all within one segment */ -void +void pmap_remove1(pmap_t pmap, vaddr_t sva, vaddr_t eva) { int old_ctx, s, sme; @@ -3267,7 +3268,7 @@ out: * The current context is already correct. * If no PTEs remain valid in the PMEG, free it. */ -void +void pmap_remove_mmu(pmap_t pmap, vaddr_t sva, vaddr_t eva) { pmeg_t pmegp; @@ -3398,7 +3399,7 @@ pmap_remove_mmu(pmap_t pmap, vaddr_t sva * Remove some mappings, all in one PMEG, * where it is not currently in any context. */ -void +void pmap_remove_noctx(pmap_t pmap, vaddr_t sva, vaddr_t eva) { pmeg_t pmegp; @@ -3483,7 +3484,7 @@ pmap_remove_noctx(pmap_t pmap, vaddr_t s * Count resident pages in this pmap. * See: kern_sysctl.c:pmap_resident_count */ -segsz_t +segsz_t pmap_resident_pages(pmap_t pmap) { int i, sme, pages; @@ -3507,7 +3508,7 @@ pmap_resident_pages(pmap_t pmap) * Count wired pages in this pmap. * See vm_mmap.c:pmap_wired_count */ -segsz_t +segsz_t pmap_wired_pages(pmap_t pmap) { int i, mask, sme, pages; @@ -3539,7 +3540,7 @@ pmap_wired_pages(pmap_t pmap) * bcopy to copy the page, one machine dependent page at a * time. */ -void +void pmap_copy_page(paddr_t src, paddr_t dst) { int pte; @@ -3581,7 +3582,7 @@ pmap_copy_page(paddr_t src, paddr_t dst) * bzero to clear its contents, one machine dependent page * at a time. */ -void +void pmap_zero_page(paddr_t pa) { int pte; @@ -3619,19 +3620,13 @@ pmap_zero_page(paddr_t pa) * least likely to cause cache aliases. * (This will just seg-align mappings.) */ -void -pmap_prefer(vaddr_t fo, vaddr_t *va, int td) +void +pmap_prefer(vaddr_t fo, vaddr_t *va) { long d; d = fo - *va; d &= SEGOFSET; - if (d == 0) { - return; - } - if (td) { - *va -= SEGOFSET + 1; - } *va += d; } @@ -3639,7 +3634,7 @@ pmap_prefer(vaddr_t fo, vaddr_t *va, int * Fill in the sun3x-specific part of the kernel core header * for dumpsys(). (See machdep.c for the rest.) */ -void +void pmap_kcore_hdr(struct sun3_kcore_hdr *sh) { vaddr_t va; @@ -3664,7 +3659,7 @@ pmap_kcore_hdr(struct sun3_kcore_hdr *sh * Copy the pagemap RAM into the passed buffer (one page) * starting at OFF in the pagemap RAM. */ -void +void pmap_get_pagemap(int *pt, int off) { vaddr_t va, va_end; @@ -3747,11 +3742,11 @@ set_pte_pmeg(int pmeg_num, int page_num, /* * Routine: pmap_procwr - * + * * Function: * Synchronize caches corresponding to [addr, addr+len) in p. - */ -void + */ +void pmap_procwr(struct proc *p, vaddr_t va, size_t len) { (void)cachectl1(0x80000004, va, len, p); @@ -3761,7 +3756,7 @@ pmap_procwr(struct proc *p, vaddr_t va, #ifdef PMAP_DEBUG /* Things to call from the debugger. */ -void +void pmap_print(pmap_t pmap) { db_printf(" pm_ctxnum=%d\n", pmap->pm_ctxnum); @@ -3769,7 +3764,7 @@ pmap_print(pmap_t pmap) db_printf(" pm_segmap=%p\n", pmap->pm_segmap); } -void +void pmeg_print(pmeg_t pmegp) { db_printf("link_next=%p link_prev=%p\n", @@ -3783,7 +3778,7 @@ pmeg_print(pmeg_t pmegp) pmegp->pmeg_qstate); } -void +void pv_print(paddr_t pa) { pv_entry_t pv;