Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files. =================================================================== RCS file: /ftp/cvs/cvsroot/src/sys/uvm/uvm_loan.c,v rcsdiff: /ftp/cvs/cvsroot/src/sys/uvm/uvm_loan.c,v: warning: Unknown phrases like `commitid ...;' are present. retrieving revision 1.55 retrieving revision 1.55.2.5 diff -u -p -r1.55 -r1.55.2.5 --- src/sys/uvm/uvm_loan.c 2005/06/28 04:06:52 1.55 +++ src/sys/uvm/uvm_loan.c 2007/10/27 11:36:54 1.55.2.5 @@ -1,4 +1,4 @@ -/* $NetBSD: uvm_loan.c,v 1.55 2005/06/28 04:06:52 thorpej Exp $ */ +/* $NetBSD: uvm_loan.c,v 1.55.2.5 2007/10/27 11:36:54 yamt Exp $ */ /* * @@ -39,7 +39,7 @@ */ #include -__KERNEL_RCSID(0, "$NetBSD: uvm_loan.c,v 1.55 2005/06/28 04:06:52 thorpej Exp $"); +__KERNEL_RCSID(0, "$NetBSD: uvm_loan.c,v 1.55.2.5 2007/10/27 11:36:54 yamt Exp $"); #include #include @@ -138,7 +138,7 @@ static int uvm_loanpage(struct vm_page * * from one place. */ -static __inline int +static inline int uvm_loanentry(struct uvm_faultinfo *ufi, void ***output, int flags) { vaddr_t curaddr = ufi->orig_rvaddr; @@ -155,8 +155,6 @@ uvm_loanentry(struct uvm_faultinfo *ufi, */ if (aref->ar_amap) amap_lock(aref->ar_amap); - if (uobj) - simple_lock(&uobj->vmobjlock); /* * loop until done @@ -185,10 +183,8 @@ uvm_loanentry(struct uvm_faultinfo *ufi, rv = -1; } /* locked: if (rv > 0) => map, amap, uobj [o.w. unlocked] */ - LOCK_ASSERT(rv > 0 || aref->ar_amap == NULL || - !simple_lock_held(&aref->ar_amap->am_l)); - LOCK_ASSERT(rv > 0 || uobj == NULL || - !simple_lock_held(&uobj->vmobjlock)); + KASSERT(rv > 0 || aref->ar_amap == NULL || + !mutex_owned(&aref->ar_amap->am_l)); /* total failure */ if (rv < 0) { @@ -218,9 +214,7 @@ uvm_loanentry(struct uvm_faultinfo *ufi, if (aref->ar_amap) amap_unlock(aref->ar_amap); - if (uobj) - simple_unlock(&uobj->vmobjlock); - uvmfault_unlockmaps(ufi, FALSE); + uvmfault_unlockmaps(ufi, false); UVMHIST_LOG(loanhist, "done %d", result, 0,0,0); return (result); } @@ -284,7 +278,7 @@ uvm_loan(struct vm_map *map, vaddr_t sta * an unmapped region (an error) */ - if (!uvmfault_lookup(&ufi, FALSE)) { + if (!uvmfault_lookup(&ufi, false)) { error = ENOENT; goto fail; } @@ -426,20 +420,20 @@ uvm_loananon(struct uvm_faultinfo *ufi, UVMHIST_LOG(loanhist, "->K wired %p", pg,0,0,0); KASSERT(pg->uobject == NULL); uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, - ufi->entry->object.uvm_obj, anon); + NULL, anon); return (-1); } if (pg->loan_count == 0) { pmap_page_protect(pg, VM_PROT_READ); } pg->loan_count++; - uvm_pagedequeue(pg); + uvm_pageactivate(pg); uvm_unlock_pageq(); **output = pg; (*output)++; /* unlock anon and return success */ - if (pg->uobject) /* XXXCDC: what if this is our uobj? bad */ + if (pg->uobject) simple_unlock(&pg->uobject->vmobjlock); simple_unlock(&anon->an_lock); UVMHIST_LOG(loanhist, "->K done", 0,0,0,0); @@ -483,7 +477,7 @@ uvm_loanpage(struct vm_page **pgpp, int pmap_page_protect(pg, VM_PROT_READ); } pg->loan_count++; - uvm_pagedequeue(pg); + uvm_pageactivate(pg); uvm_unlock_pageq(); } @@ -632,7 +626,7 @@ uvm_loanuobj(struct uvm_faultinfo *ufi, struct vm_page *pg; struct vm_anon *anon; int error, npages; - boolean_t locked; + bool locked; UVMHIST_FUNC(__func__); UVMHIST_CALLED(loanhist); @@ -642,6 +636,7 @@ uvm_loanuobj(struct uvm_faultinfo *ufi, * XXXCDC: duplicate code with uvm_fault(). */ + simple_lock(&uobj->vmobjlock); if (uobj->pgops->pgo_get) { /* try locked pgo_get */ npages = 1; pg = NULL; @@ -691,6 +686,7 @@ uvm_loanuobj(struct uvm_faultinfo *ufi, locked = uvmfault_relock(ufi); if (locked && amap) amap_lock(amap); + uobj = pg->uobject; simple_lock(&uobj->vmobjlock); /* @@ -704,14 +700,14 @@ uvm_loanuobj(struct uvm_faultinfo *ufi, ufi->orig_rvaddr - ufi->entry->start))) { if (locked) uvmfault_unlockall(ufi, amap, NULL, NULL); - locked = FALSE; + locked = false; } /* * didn't get the lock? release the page and retry. */ - if (locked == FALSE) { + if (locked == false) { if (pg->flags & PG_WANTED) { wakeup(pg); } @@ -719,6 +715,7 @@ uvm_loanuobj(struct uvm_faultinfo *ufi, uvm_lock_pageq(); uvm_pagefree(pg); uvm_unlock_pageq(); + simple_unlock(&uobj->vmobjlock); return (0); } uvm_lock_pageq(); @@ -731,6 +728,8 @@ uvm_loanuobj(struct uvm_faultinfo *ufi, } } + KASSERT(uobj == pg->uobject); + /* * at this point we have the page we want ("pg") marked PG_BUSY for us * and we have all data structures locked. do the loanout. page can @@ -742,6 +741,7 @@ uvm_loanuobj(struct uvm_faultinfo *ufi, uvmfault_unlockall(ufi, amap, uobj, NULL); return (-1); } + simple_unlock(&uobj->vmobjlock); **output = pg; (*output)++; return (1); @@ -764,6 +764,7 @@ uvm_loanuobj(struct uvm_faultinfo *ufi, } pg->flags &= ~(PG_WANTED|PG_BUSY); UVM_PAGE_OWN(pg, NULL); + simple_unlock(&uobj->vmobjlock); **output = anon; (*output)++; return (1); @@ -801,6 +802,7 @@ uvm_loanuobj(struct uvm_faultinfo *ufi, } pg->flags &= ~(PG_WANTED|PG_BUSY); UVM_PAGE_OWN(pg, NULL); + simple_unlock(&uobj->vmobjlock); simple_unlock(&anon->an_lock); **output = anon; (*output)++; @@ -838,7 +840,6 @@ uvm_loanzero(struct uvm_faultinfo *ufi, { struct vm_anon *anon; struct vm_page *pg; - struct uvm_object *uobj = ufi->entry->object.uvm_obj; struct vm_amap *amap = ufi->entry->aref.ar_amap; UVMHIST_FUNC(__func__); UVMHIST_CALLED(loanhist); @@ -854,7 +855,7 @@ again: while ((pg = uvm_pagealloc(&uvm_loanzero_object, 0, NULL, UVM_PGA_ZERO)) == NULL) { simple_unlock(&uvm_loanzero_object.vmobjlock); - uvmfault_unlockall(ufi, amap, uobj, NULL); + uvmfault_unlockall(ufi, amap, NULL, NULL); uvm_wait("loanzero"); if (!uvmfault_relock(ufi)) { return (0); @@ -862,9 +863,6 @@ again: if (amap) { amap_lock(amap); } - if (uobj) { - simple_lock(&uobj->vmobjlock); - } goto again; } @@ -880,7 +878,6 @@ again: if ((flags & UVM_LOAN_TOANON) == 0) { /* loaning to kernel-page */ uvm_lock_pageq(); pg->loan_count++; - uvm_pagedequeue(pg); uvm_unlock_pageq(); simple_unlock(&uvm_loanzero_object.vmobjlock); **output = pg; @@ -913,7 +910,7 @@ again: if (anon == NULL) { /* out of swap causes us to fail */ simple_unlock(&uvm_loanzero_object.vmobjlock); - uvmfault_unlockall(ufi, amap, uobj, NULL); + uvmfault_unlockall(ufi, amap, NULL, NULL); return (-1); } anon->an_page = pg; @@ -922,6 +919,7 @@ again: pg->loan_count++; uvm_pageactivate(pg); uvm_unlock_pageq(); + simple_unlock(&anon->an_lock); simple_unlock(&uvm_loanzero_object.vmobjlock); **output = anon; (*output)++; @@ -1010,16 +1008,10 @@ uvm_unloanpage(struct vm_page **ploans, pg->loan_count--; pg->pqflags |= PQ_ANON; } - if (pg->loan_count == 0) { - if (pg->uobject == NULL && pg->uanon == NULL) { - KASSERT((pg->flags & PG_BUSY) == 0); - uvm_pagefree(pg); - } else { - uvm_pageactivate(pg); - } - } else if (pg->loan_count == 1 && pg->uobject != NULL && - pg->uanon != NULL) { - uvm_pageactivate(pg); + if (pg->loan_count == 0 && pg->uobject == NULL && + pg->uanon == NULL) { + KASSERT((pg->flags & PG_BUSY) == 0); + uvm_pagefree(pg); } if (slock != NULL) { simple_unlock(slock); @@ -1136,16 +1128,25 @@ uvm_loanbreak(struct vm_page *uobjpage) /* * copy the data from the old page to the new - * one and clear the fake/clean flags on the - * new page (keep it busy). force a reload - * of the old page by clearing it from all - * pmaps. then lock the page queues to - * rename the pages. + * one and clear the fake flags on the new page (keep it busy). + * force a reload of the old page by clearing it from all + * pmaps. + * transfer dirtiness of the old page to the new page. + * then lock the page queues to rename the pages. */ uvm_pagecopy(uobjpage, pg); /* old -> new */ - pg->flags &= ~(PG_FAKE|PG_CLEAN); + pg->flags &= ~PG_FAKE; pmap_page_protect(uobjpage, VM_PROT_NONE); + if ((uobjpage->flags & PG_CLEAN) != 0 && !pmap_clear_modify(uobjpage)) { + pmap_clear_modify(pg); + pg->flags |= PG_CLEAN; + } else { + /* uvm_pagecopy marked it dirty */ + KASSERT((pg->flags & PG_CLEAN) == 0); + /* a object with a dirty page should be dirty. */ + KASSERT(!UVM_OBJ_IS_CLEAN(uobj)); + } if (uobjpage->flags & PG_WANTED) wakeup(uobjpage); /* uobj still locked */