Annotation of src/sys/uvm/uvm_loan.c, Revision 1.65.4.1
1.65.4.1! ad 1: /* $NetBSD: uvm_loan.c,v 1.65 2007/02/22 06:05:01 thorpej Exp $ */
1.1 mrg 2:
3: /*
4: *
5: * Copyright (c) 1997 Charles D. Cranor and Washington University.
6: * All rights reserved.
7: *
8: * Redistribution and use in source and binary forms, with or without
9: * modification, are permitted provided that the following conditions
10: * are met:
11: * 1. Redistributions of source code must retain the above copyright
12: * notice, this list of conditions and the following disclaimer.
13: * 2. Redistributions in binary form must reproduce the above copyright
14: * notice, this list of conditions and the following disclaimer in the
15: * documentation and/or other materials provided with the distribution.
16: * 3. All advertising materials mentioning features or use of this software
17: * must display the following acknowledgement:
18: * This product includes software developed by Charles D. Cranor and
19: * Washington University.
20: * 4. The name of the author may not be used to endorse or promote products
21: * derived from this software without specific prior written permission.
22: *
23: * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24: * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25: * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26: * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27: * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28: * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29: * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30: * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31: * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32: * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1.4 mrg 33: *
34: * from: Id: uvm_loan.c,v 1.1.6.4 1998/02/06 05:08:43 chs Exp
1.1 mrg 35: */
36:
37: /*
38: * uvm_loan.c: page loanout handler
39: */
1.35 lukem 40:
41: #include <sys/cdefs.h>
1.65.4.1! ad 42: __KERNEL_RCSID(0, "$NetBSD: uvm_loan.c,v 1.65 2007/02/22 06:05:01 thorpej Exp $");
1.1 mrg 43:
44: #include <sys/param.h>
45: #include <sys/systm.h>
46: #include <sys/kernel.h>
47: #include <sys/proc.h>
48: #include <sys/malloc.h>
49: #include <sys/mman.h>
50:
51: #include <uvm/uvm.h>
52:
53: /*
1.29 chs 54: * "loaned" pages are pages which are (read-only, copy-on-write) loaned
1.1 mrg 55: * from the VM system to other parts of the kernel. this allows page
56: * copying to be avoided (e.g. you can loan pages from objs/anons to
57: * the mbuf system).
58: *
59: * there are 3 types of loans possible:
60: * O->K uvm_object page to wired kernel page (e.g. mbuf data area)
1.16 thorpej 61: * A->K anon page to wired kernel page (e.g. mbuf data area)
1.1 mrg 62: * O->A uvm_object to anon loan (e.g. vnode page to an anon)
63: * note that it possible to have an O page loaned to both an A and K
64: * at the same time.
65: *
66: * loans are tracked by pg->loan_count. an O->A page will have both
67: * a uvm_object and a vm_anon, but PQ_ANON will not be set. this sort
68: * of page is considered "owned" by the uvm_object (not the anon).
69: *
1.16 thorpej 70: * each loan of a page to the kernel bumps the pg->wire_count. the
71: * kernel mappings for these pages will be read-only and wired. since
72: * the page will also be wired, it will not be a candidate for pageout,
73: * and thus will never be pmap_page_protect()'d with VM_PROT_NONE. a
74: * write fault in the kernel to one of these pages will not cause
75: * copy-on-write. instead, the page fault is considered fatal. this
76: * is because the kernel mapping will have no way to look up the
77: * object/anon which the page is owned by. this is a good side-effect,
78: * since a kernel write to a loaned page is an error.
1.1 mrg 79: *
1.29 chs 80: * owners that want to free their pages and discover that they are
1.1 mrg 81: * loaned out simply "disown" them (the page becomes an orphan). these
82: * pages should be freed when the last loan is dropped. in some cases
83: * an anon may "adopt" an orphaned page.
84: *
85: * locking: to read pg->loan_count either the owner or the page queues
86: * must be locked. to modify pg->loan_count, both the owner of the page
87: * and the PQs must be locked. pg->flags is (as always) locked by
88: * the owner of the page.
89: *
90: * note that locking from the "loaned" side is tricky since the object
91: * getting the loaned page has no reference to the page's owner and thus
92: * the owner could "die" at any time. in order to prevent the owner
93: * from dying the page queues should be locked. this forces us to sometimes
94: * use "try" locking.
95: *
96: * loans are typically broken by the following events:
1.29 chs 97: * 1. user-level xwrite fault to a loaned page
1.1 mrg 98: * 2. pageout of clean+inactive O->A loaned page
99: * 3. owner frees page (e.g. pager flush)
100: *
101: * note that loaning a page causes all mappings of the page to become
102: * read-only (via pmap_page_protect). this could have an unexpected
1.16 thorpej 103: * effect on normal "wired" pages if one is not careful (XXX).
1.1 mrg 104: */
105:
106: /*
107: * local prototypes
108: */
109:
1.50 junyoung 110: static int uvm_loananon(struct uvm_faultinfo *, void ***,
111: int, struct vm_anon *);
112: static int uvm_loanuobj(struct uvm_faultinfo *, void ***,
113: int, vaddr_t);
114: static int uvm_loanzero(struct uvm_faultinfo *, void ***, int);
115: static void uvm_unloananon(struct vm_anon **, int);
116: static void uvm_unloanpage(struct vm_page **, int);
1.51 yamt 117: static int uvm_loanpage(struct vm_page **, int);
1.33 jdolecek 118:
1.1 mrg 119:
120: /*
121: * inlines
122: */
123:
124: /*
125: * uvm_loanentry: loan out pages in a map entry (helper fn for uvm_loan())
126: *
127: * => "ufi" is the result of a successful map lookup (meaning that
1.31 chuck 128: * on entry the map is locked by the caller)
1.28 chuck 129: * => we may unlock and then relock the map if needed (for I/O)
1.1 mrg 130: * => we put our output result in "output"
1.31 chuck 131: * => we always return with the map unlocked
1.28 chuck 132: * => possible return values:
133: * -1 == error, map is unlocked
134: * 0 == map relock error (try again!), map is unlocked
1.31 chuck 135: * >0 == number of pages we loaned, map is unlocked
1.55 thorpej 136: *
137: * NOTE: We can live with this being an inline, because it is only called
138: * from one place.
1.1 mrg 139: */
140:
1.57 perry 141: static inline int
1.54 thorpej 142: uvm_loanentry(struct uvm_faultinfo *ufi, void ***output, int flags)
1.1 mrg 143: {
1.10 chuck 144: vaddr_t curaddr = ufi->orig_rvaddr;
1.9 eeh 145: vsize_t togo = ufi->size;
1.6 mrg 146: struct vm_aref *aref = &ufi->entry->aref;
147: struct uvm_object *uobj = ufi->entry->object.uvm_obj;
148: struct vm_anon *anon;
149: int rv, result = 0;
150:
1.52 yamt 151: UVMHIST_FUNC(__func__); UVMHIST_CALLED(loanhist);
152:
1.6 mrg 153: /*
1.28 chuck 154: * lock us the rest of the way down (we unlock before return)
1.6 mrg 155: */
156: if (aref->ar_amap)
1.13 chuck 157: amap_lock(aref->ar_amap);
1.6 mrg 158:
159: /*
160: * loop until done
161: */
162: while (togo) {
163:
164: /*
165: * find the page we want. check the anon layer first.
166: */
167:
168: if (aref->ar_amap) {
169: anon = amap_lookup(aref, curaddr - ufi->entry->start);
170: } else {
171: anon = NULL;
172: }
173:
1.28 chuck 174: /* locked: map, amap, uobj */
1.6 mrg 175: if (anon) {
176: rv = uvm_loananon(ufi, output, flags, anon);
177: } else if (uobj) {
178: rv = uvm_loanuobj(ufi, output, flags, curaddr);
179: } else if (UVM_ET_ISCOPYONWRITE(ufi->entry)) {
180: rv = uvm_loanzero(ufi, output, flags);
181: } else {
1.47 yamt 182: uvmfault_unlockall(ufi, aref->ar_amap, uobj, NULL);
1.34 chs 183: rv = -1;
1.6 mrg 184: }
1.31 chuck 185: /* locked: if (rv > 0) => map, amap, uobj [o.w. unlocked] */
1.65.4.1! ad 186: KASSERT(rv > 0 || aref->ar_amap == NULL ||
! 187: !mutex_owned(&aref->ar_amap->am_l));
! 188: KASSERT(rv > 0 || uobj == NULL ||
! 189: !mutex_owned(&uobj->vmobjlock));
1.6 mrg 190:
191: /* total failure */
1.52 yamt 192: if (rv < 0) {
193: UVMHIST_LOG(loanhist, "failure %d", rv, 0,0,0);
1.34 chs 194: return (-1);
1.52 yamt 195: }
1.6 mrg 196:
197: /* relock failed, need to do another lookup */
1.52 yamt 198: if (rv == 0) {
199: UVMHIST_LOG(loanhist, "relock failure %d", result
200: ,0,0,0);
1.34 chs 201: return (result);
1.52 yamt 202: }
1.6 mrg 203:
204: /*
205: * got it... advance to next page
206: */
1.34 chs 207:
1.6 mrg 208: result++;
209: togo -= PAGE_SIZE;
210: curaddr += PAGE_SIZE;
211: }
212:
213: /*
1.31 chuck 214: * unlock what we locked, unlock the maps and return
1.6 mrg 215: */
1.34 chs 216:
1.28 chuck 217: if (aref->ar_amap)
218: amap_unlock(aref->ar_amap);
1.65 thorpej 219: uvmfault_unlockmaps(ufi, false);
1.52 yamt 220: UVMHIST_LOG(loanhist, "done %d", result, 0,0,0);
1.34 chs 221: return (result);
1.1 mrg 222: }
223:
224: /*
225: * normal functions
226: */
227:
228: /*
1.28 chuck 229: * uvm_loan: loan pages in a map out to anons or to the kernel
1.29 chs 230: *
1.1 mrg 231: * => map should be unlocked
232: * => start and len should be multiples of PAGE_SIZE
233: * => result is either an array of anon's or vm_pages (depending on flags)
234: * => flag values: UVM_LOAN_TOANON - loan to anons
235: * UVM_LOAN_TOPAGE - loan to wired kernel page
236: * one and only one of these flags must be set!
1.28 chuck 237: * => returns 0 (success), or an appropriate error number
1.1 mrg 238: */
239:
1.6 mrg 240: int
1.54 thorpej 241: uvm_loan(struct vm_map *map, vaddr_t start, vsize_t len, void *v, int flags)
1.1 mrg 242: {
1.6 mrg 243: struct uvm_faultinfo ufi;
1.34 chs 244: void **result, **output;
1.25 chs 245: int rv, error;
1.6 mrg 246:
1.52 yamt 247: UVMHIST_FUNC(__func__); UVMHIST_CALLED(loanhist);
248:
1.6 mrg 249: /*
250: * ensure that one and only one of the flags is set
251: */
252:
1.25 chs 253: KASSERT(((flags & UVM_LOAN_TOANON) == 0) ^
254: ((flags & UVM_LOAN_TOPAGE) == 0));
255: KASSERT((map->flags & VM_MAP_INTRSAFE) == 0);
1.6 mrg 256:
257: /*
1.34 chs 258: * "output" is a pointer to the current place to put the loaned page.
1.6 mrg 259: */
260:
1.34 chs 261: result = v;
1.6 mrg 262: output = &result[0]; /* start at the beginning ... */
263:
264: /*
265: * while we've got pages to do
266: */
267:
268: while (len > 0) {
269:
270: /*
271: * fill in params for a call to uvmfault_lookup
272: */
273:
274: ufi.orig_map = map;
275: ufi.orig_rvaddr = start;
276: ufi.orig_size = len;
1.29 chs 277:
1.6 mrg 278: /*
279: * do the lookup, the only time this will fail is if we hit on
280: * an unmapped region (an error)
281: */
282:
1.65 thorpej 283: if (!uvmfault_lookup(&ufi, false)) {
1.25 chs 284: error = ENOENT;
1.6 mrg 285: goto fail;
1.25 chs 286: }
1.6 mrg 287:
288: /*
1.28 chuck 289: * map now locked. now do the loanout...
1.6 mrg 290: */
1.34 chs 291:
1.6 mrg 292: rv = uvm_loanentry(&ufi, &output, flags);
1.25 chs 293: if (rv < 0) {
1.28 chuck 294: /* all unlocked due to error */
1.25 chs 295: error = EINVAL;
1.6 mrg 296: goto fail;
1.25 chs 297: }
1.6 mrg 298:
299: /*
1.31 chuck 300: * done! the map is unlocked. advance, if possible.
1.28 chuck 301: *
1.50 junyoung 302: * XXXCDC: could be recoded to hold the map lock with
303: * smarter code (but it only happens on map entry
1.31 chuck 304: * boundaries, so it isn't that bad).
1.6 mrg 305: */
1.34 chs 306:
1.28 chuck 307: if (rv) {
308: rv <<= PAGE_SHIFT;
309: len -= rv;
310: start += rv;
311: }
1.6 mrg 312: }
1.52 yamt 313: UVMHIST_LOG(loanhist, "success", 0,0,0,0);
1.25 chs 314: return 0;
1.1 mrg 315:
316: fail:
1.6 mrg 317: /*
1.34 chs 318: * failed to complete loans. drop any loans and return failure code.
1.28 chuck 319: * map is already unlocked.
1.6 mrg 320: */
1.34 chs 321:
1.6 mrg 322: if (output - result) {
1.34 chs 323: if (flags & UVM_LOAN_TOANON) {
1.6 mrg 324: uvm_unloananon((struct vm_anon **)result,
1.34 chs 325: output - result);
326: } else {
1.6 mrg 327: uvm_unloanpage((struct vm_page **)result,
1.34 chs 328: output - result);
329: }
1.6 mrg 330: }
1.52 yamt 331: UVMHIST_LOG(loanhist, "error %d", error,0,0,0);
1.26 jdolecek 332: return (error);
1.1 mrg 333: }
334:
335: /*
336: * uvm_loananon: loan a page from an anon out
1.29 chs 337: *
1.28 chuck 338: * => called with map, amap, uobj locked
1.1 mrg 339: * => return value:
340: * -1 = fatal error, everything is unlocked, abort.
341: * 0 = lookup in ufi went stale, everything unlocked, relookup and
342: * try again
343: * 1 = got it, everything still locked
344: */
345:
1.6 mrg 346: int
1.54 thorpej 347: uvm_loananon(struct uvm_faultinfo *ufi, void ***output, int flags,
348: struct vm_anon *anon)
1.1 mrg 349: {
1.6 mrg 350: struct vm_page *pg;
1.34 chs 351: int error;
1.1 mrg 352:
1.52 yamt 353: UVMHIST_FUNC(__func__); UVMHIST_CALLED(loanhist);
354:
1.6 mrg 355: /*
1.28 chuck 356: * if we are loaning to "another" anon then it is easy, we just
1.6 mrg 357: * bump the reference count on the current anon and return a
1.28 chuck 358: * pointer to it (it becomes copy-on-write shared).
1.6 mrg 359: */
1.34 chs 360:
1.6 mrg 361: if (flags & UVM_LOAN_TOANON) {
1.65.4.1! ad 362: mutex_enter(&anon->an_lock);
1.53 yamt 363: pg = anon->an_page;
1.34 chs 364: if (pg && (pg->pqflags & PQ_ANON) != 0 && anon->an_ref == 1) {
1.51 yamt 365: if (pg->wire_count > 0) {
1.52 yamt 366: UVMHIST_LOG(loanhist, "->A wired %p", pg,0,0,0);
1.51 yamt 367: uvmfault_unlockall(ufi,
368: ufi->entry->aref.ar_amap,
369: ufi->entry->object.uvm_obj, anon);
370: return (-1);
371: }
1.19 chs 372: pmap_page_protect(pg, VM_PROT_READ);
1.34 chs 373: }
1.6 mrg 374: anon->an_ref++;
375: **output = anon;
1.34 chs 376: (*output)++;
1.65.4.1! ad 377: mutex_exit(&anon->an_lock);
1.52 yamt 378: UVMHIST_LOG(loanhist, "->A done", 0,0,0,0);
1.34 chs 379: return (1);
1.6 mrg 380: }
381:
382: /*
383: * we are loaning to a kernel-page. we need to get the page
384: * resident so we can wire it. uvmfault_anonget will handle
385: * this for us.
386: */
387:
1.65.4.1! ad 388: mutex_enter(&anon->an_lock);
1.34 chs 389: error = uvmfault_anonget(ufi, ufi->entry->aref.ar_amap, anon);
1.6 mrg 390:
391: /*
392: * if we were unable to get the anon, then uvmfault_anonget has
393: * unlocked everything and returned an error code.
394: */
1.34 chs 395:
396: if (error) {
1.52 yamt 397: UVMHIST_LOG(loanhist, "error %d", error,0,0,0);
1.6 mrg 398:
399: /* need to refault (i.e. refresh our lookup) ? */
1.34 chs 400: if (error == ERESTART) {
401: return (0);
402: }
1.6 mrg 403:
404: /* "try again"? sleep a bit and retry ... */
1.34 chs 405: if (error == EAGAIN) {
1.24 chs 406: tsleep(&lbolt, PVM, "loanagain", 0);
1.34 chs 407: return (0);
1.6 mrg 408: }
409:
410: /* otherwise flag it as an error */
1.34 chs 411: return (-1);
1.6 mrg 412: }
413:
414: /*
415: * we have the page and its owner locked: do the loan now.
416: */
417:
1.53 yamt 418: pg = anon->an_page;
1.65.4.1! ad 419: mutex_enter(&uvm_pageqlock);
1.51 yamt 420: if (pg->wire_count > 0) {
1.65.4.1! ad 421: mutex_exit(&uvm_pageqlock);
1.52 yamt 422: UVMHIST_LOG(loanhist, "->K wired %p", pg,0,0,0);
1.51 yamt 423: KASSERT(pg->uobject == NULL);
424: uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap,
1.58 yamt 425: NULL, anon);
1.51 yamt 426: return (-1);
427: }
1.34 chs 428: if (pg->loan_count == 0) {
1.19 chs 429: pmap_page_protect(pg, VM_PROT_READ);
1.34 chs 430: }
1.6 mrg 431: pg->loan_count++;
1.63 yamt 432: uvm_pageactivate(pg);
1.65.4.1! ad 433: mutex_exit(&uvm_pageqlock);
1.6 mrg 434: **output = pg;
1.34 chs 435: (*output)++;
1.6 mrg 436:
437: /* unlock anon and return success */
1.58 yamt 438: if (pg->uobject)
1.65.4.1! ad 439: mutex_exit(&pg->uobject->vmobjlock);
! 440: mutex_exit(&anon->an_lock);
1.52 yamt 441: UVMHIST_LOG(loanhist, "->K done", 0,0,0,0);
1.34 chs 442: return (1);
1.1 mrg 443: }
444:
445: /*
1.45 yamt 446: * uvm_loanpage: loan out pages to kernel (->K)
1.42 yamt 447: *
1.51 yamt 448: * => pages should be object-owned and the object should be locked.
449: * => in the case of error, the object might be unlocked and relocked.
450: * => caller should busy the pages beforehand.
451: * => pages will be unbusied.
452: * => fail with EBUSY if meet a wired page.
1.42 yamt 453: */
1.51 yamt 454: static int
1.54 thorpej 455: uvm_loanpage(struct vm_page **pgpp, int npages)
1.42 yamt 456: {
457: int i;
1.51 yamt 458: int error = 0;
1.42 yamt 459:
1.52 yamt 460: UVMHIST_FUNC(__func__); UVMHIST_CALLED(loanhist);
461:
1.42 yamt 462: for (i = 0; i < npages; i++) {
463: struct vm_page *pg = pgpp[i];
464:
465: KASSERT(pg->uobject != NULL);
1.51 yamt 466: KASSERT(pg->uobject == pgpp[0]->uobject);
1.42 yamt 467: KASSERT(!(pg->flags & (PG_RELEASED|PG_PAGEOUT)));
1.65.4.1! ad 468: KASSERT(mutex_owned(&pg->uobject->vmobjlock));
1.42 yamt 469: KASSERT(pg->flags & PG_BUSY);
470:
1.65.4.1! ad 471: mutex_enter(&uvm_pageqlock);
1.51 yamt 472: if (pg->wire_count > 0) {
1.65.4.1! ad 473: mutex_exit(&uvm_pageqlock);
1.52 yamt 474: UVMHIST_LOG(loanhist, "wired %p", pg,0,0,0);
1.51 yamt 475: error = EBUSY;
476: break;
477: }
1.42 yamt 478: if (pg->loan_count == 0) {
479: pmap_page_protect(pg, VM_PROT_READ);
480: }
481: pg->loan_count++;
1.63 yamt 482: uvm_pageactivate(pg);
1.65.4.1! ad 483: mutex_exit(&uvm_pageqlock);
1.42 yamt 484: }
1.51 yamt 485:
486: uvm_page_unbusy(pgpp, npages);
487:
488: if (error) {
489: /*
490: * backout what we've done
491: */
1.65.4.1! ad 492: kmutex_t *slock = &pgpp[0]->uobject->vmobjlock;
1.51 yamt 493:
1.65.4.1! ad 494: mutex_exit(slock);
1.51 yamt 495: uvm_unloan(pgpp, i, UVM_LOAN_TOPAGE);
1.65.4.1! ad 496: mutex_enter(slock);
1.51 yamt 497: }
498:
1.52 yamt 499: UVMHIST_LOG(loanhist, "done %d", error,0,0,0);
1.51 yamt 500: return error;
1.42 yamt 501: }
502:
503: /*
1.45 yamt 504: * XXX UBC temp limit
505: * number of pages to get at once.
506: * should be <= MAX_READ_AHEAD in genfs_vnops.c
507: */
508: #define UVM_LOAN_GET_CHUNK 16
509:
510: /*
1.46 yamt 511: * uvm_loanuobjpages: loan pages from a uobj out (O->K)
1.45 yamt 512: *
1.46 yamt 513: * => uobj shouldn't be locked. (we'll lock it)
1.51 yamt 514: * => fail with EBUSY if we meet a wired page.
1.45 yamt 515: */
516: int
1.54 thorpej 517: uvm_loanuobjpages(struct uvm_object *uobj, voff_t pgoff, int orignpages,
518: struct vm_page **origpgpp)
1.45 yamt 519: {
1.51 yamt 520: int ndone; /* # of pages loaned out */
1.45 yamt 521: struct vm_page **pgpp;
522: int error;
523: int i;
1.65.4.1! ad 524: kmutex_t *slock;
1.45 yamt 525:
526: pgpp = origpgpp;
527: for (ndone = 0; ndone < orignpages; ) {
528: int npages;
1.51 yamt 529: /* npendloan: # of pages busied but not loand out yet. */
1.45 yamt 530: int npendloan = 0xdead; /* XXX gcc */
531: reget:
532: npages = MIN(UVM_LOAN_GET_CHUNK, orignpages - ndone);
1.65.4.1! ad 533: mutex_enter(&uobj->vmobjlock);
1.45 yamt 534: error = (*uobj->pgops->pgo_get)(uobj,
535: pgoff + (ndone << PAGE_SHIFT), pgpp, &npages, 0,
536: VM_PROT_READ, 0, PGO_SYNCIO);
537: if (error == EAGAIN) {
538: tsleep(&lbolt, PVM, "nfsread", 0);
539: continue;
540: }
1.51 yamt 541: if (error)
542: goto fail;
1.45 yamt 543:
544: KASSERT(npages > 0);
1.50 junyoung 545:
1.45 yamt 546: /* loan and unbusy pages */
547: slock = NULL;
548: for (i = 0; i < npages; i++) {
1.65.4.1! ad 549: kmutex_t *nextslock; /* slock for next page */
1.45 yamt 550: struct vm_page *pg = *pgpp;
551:
552: /* XXX assuming that the page is owned by uobj */
553: KASSERT(pg->uobject != NULL);
554: nextslock = &pg->uobject->vmobjlock;
555:
556: if (slock != nextslock) {
557: if (slock) {
558: KASSERT(npendloan > 0);
1.51 yamt 559: error = uvm_loanpage(pgpp - npendloan,
1.45 yamt 560: npendloan);
1.65.4.1! ad 561: mutex_exit(slock);
1.51 yamt 562: if (error)
563: goto fail;
564: ndone += npendloan;
565: KASSERT(origpgpp + ndone == pgpp);
1.45 yamt 566: }
567: slock = nextslock;
1.51 yamt 568: npendloan = 0;
1.65.4.1! ad 569: mutex_enter(slock);
1.45 yamt 570: }
571:
1.51 yamt 572: if ((pg->flags & PG_RELEASED) != 0) {
1.45 yamt 573: /*
574: * release pages and try again.
575: */
1.65.4.1! ad 576: mutex_exit(slock);
1.45 yamt 577: for (; i < npages; i++) {
578: pg = pgpp[i];
579: slock = &pg->uobject->vmobjlock;
580:
1.65.4.1! ad 581: mutex_enter(slock);
! 582: mutex_enter(&uvm_pageqlock);
1.45 yamt 583: uvm_page_unbusy(&pg, 1);
1.65.4.1! ad 584: mutex_exit(&uvm_pageqlock);
! 585: mutex_exit(slock);
1.45 yamt 586: }
587: goto reget;
588: }
589:
590: npendloan++;
591: pgpp++;
1.51 yamt 592: KASSERT(origpgpp + ndone + npendloan == pgpp);
1.45 yamt 593: }
594: KASSERT(slock != NULL);
595: KASSERT(npendloan > 0);
1.51 yamt 596: error = uvm_loanpage(pgpp - npendloan, npendloan);
1.65.4.1! ad 597: mutex_exit(slock);
1.51 yamt 598: if (error)
599: goto fail;
600: ndone += npendloan;
601: KASSERT(origpgpp + ndone == pgpp);
1.45 yamt 602: }
603:
604: return 0;
1.51 yamt 605:
606: fail:
607: uvm_unloan(origpgpp, ndone, UVM_LOAN_TOPAGE);
608:
609: return error;
1.45 yamt 610: }
611:
612: /*
1.1 mrg 613: * uvm_loanuobj: loan a page from a uobj out
614: *
1.28 chuck 615: * => called with map, amap, uobj locked
1.1 mrg 616: * => return value:
617: * -1 = fatal error, everything is unlocked, abort.
618: * 0 = lookup in ufi went stale, everything unlocked, relookup and
619: * try again
620: * 1 = got it, everything still locked
621: */
622:
1.33 jdolecek 623: static int
1.54 thorpej 624: uvm_loanuobj(struct uvm_faultinfo *ufi, void ***output, int flags, vaddr_t va)
1.1 mrg 625: {
1.6 mrg 626: struct vm_amap *amap = ufi->entry->aref.ar_amap;
627: struct uvm_object *uobj = ufi->entry->object.uvm_obj;
628: struct vm_page *pg;
629: struct vm_anon *anon;
1.34 chs 630: int error, npages;
1.64 thorpej 631: bool locked;
1.6 mrg 632:
1.52 yamt 633: UVMHIST_FUNC(__func__); UVMHIST_CALLED(loanhist);
634:
1.6 mrg 635: /*
636: * first we must make sure the page is resident.
637: *
638: * XXXCDC: duplicate code with uvm_fault().
639: */
640:
1.65.4.1! ad 641: mutex_enter(&uobj->vmobjlock);
1.28 chuck 642: if (uobj->pgops->pgo_get) { /* try locked pgo_get */
1.6 mrg 643: npages = 1;
644: pg = NULL;
1.37 enami 645: error = (*uobj->pgops->pgo_get)(uobj,
646: va - ufi->entry->start + ufi->entry->offset,
1.6 mrg 647: &pg, &npages, 0, VM_PROT_READ, MADV_NORMAL, PGO_LOCKED);
648: } else {
1.34 chs 649: error = EIO; /* must have pgo_get op */
1.6 mrg 650: }
651:
652: /*
653: * check the result of the locked pgo_get. if there is a problem,
654: * then we fail the loan.
655: */
656:
1.34 chs 657: if (error && error != EBUSY) {
1.6 mrg 658: uvmfault_unlockall(ufi, amap, uobj, NULL);
1.34 chs 659: return (-1);
1.6 mrg 660: }
661:
662: /*
663: * if we need to unlock for I/O, do so now.
664: */
665:
1.34 chs 666: if (error == EBUSY) {
1.6 mrg 667: uvmfault_unlockall(ufi, amap, NULL, NULL);
1.34 chs 668:
669: /* locked: uobj */
1.6 mrg 670: npages = 1;
1.37 enami 671: error = (*uobj->pgops->pgo_get)(uobj,
672: va - ufi->entry->start + ufi->entry->offset,
1.30 chs 673: &pg, &npages, 0, VM_PROT_READ, MADV_NORMAL, PGO_SYNCIO);
1.6 mrg 674: /* locked: <nothing> */
1.29 chs 675:
1.34 chs 676: if (error) {
1.43 yamt 677: if (error == EAGAIN) {
1.24 chs 678: tsleep(&lbolt, PVM, "fltagain2", 0);
1.34 chs 679: return (0);
1.29 chs 680: }
1.34 chs 681: return (-1);
1.6 mrg 682: }
683:
684: /*
685: * pgo_get was a success. attempt to relock everything.
686: */
687:
688: locked = uvmfault_relock(ufi);
689: if (locked && amap)
1.13 chuck 690: amap_lock(amap);
1.58 yamt 691: uobj = pg->uobject;
1.65.4.1! ad 692: mutex_enter(&uobj->vmobjlock);
1.6 mrg 693:
694: /*
695: * verify that the page has not be released and re-verify
696: * that amap slot is still free. if there is a problem we
697: * drop our lock (thus force a lookup refresh/retry).
698: */
1.29 chs 699:
1.6 mrg 700: if ((pg->flags & PG_RELEASED) != 0 ||
701: (locked && amap && amap_lookup(&ufi->entry->aref,
1.10 chuck 702: ufi->orig_rvaddr - ufi->entry->start))) {
1.6 mrg 703: if (locked)
704: uvmfault_unlockall(ufi, amap, NULL, NULL);
1.65 thorpej 705: locked = false;
1.29 chs 706: }
1.6 mrg 707:
708: /*
709: * didn't get the lock? release the page and retry.
710: */
711:
1.65 thorpej 712: if (locked == false) {
1.32 chs 713: if (pg->flags & PG_WANTED) {
1.18 thorpej 714: wakeup(pg);
1.32 chs 715: }
1.6 mrg 716: if (pg->flags & PG_RELEASED) {
1.65.4.1! ad 717: mutex_enter(&uvm_pageqlock);
1.32 chs 718: uvm_pagefree(pg);
1.65.4.1! ad 719: mutex_exit(&uvm_pageqlock);
! 720: mutex_exit(&uobj->vmobjlock);
1.6 mrg 721: return (0);
722: }
1.65.4.1! ad 723: mutex_enter(&uvm_pageqlock);
1.32 chs 724: uvm_pageactivate(pg);
1.65.4.1! ad 725: mutex_exit(&uvm_pageqlock);
1.6 mrg 726: pg->flags &= ~(PG_BUSY|PG_WANTED);
727: UVM_PAGE_OWN(pg, NULL);
1.65.4.1! ad 728: mutex_exit(&uobj->vmobjlock);
1.6 mrg 729: return (0);
730: }
731: }
732:
1.58 yamt 733: KASSERT(uobj == pg->uobject);
734:
1.6 mrg 735: /*
736: * at this point we have the page we want ("pg") marked PG_BUSY for us
1.34 chs 737: * and we have all data structures locked. do the loanout. page can
1.6 mrg 738: * not be PG_RELEASED (we caught this above).
739: */
740:
1.34 chs 741: if ((flags & UVM_LOAN_TOANON) == 0) {
1.51 yamt 742: if (uvm_loanpage(&pg, 1)) {
743: uvmfault_unlockall(ufi, amap, uobj, NULL);
744: return (-1);
745: }
1.65.4.1! ad 746: mutex_exit(&uobj->vmobjlock);
1.34 chs 747: **output = pg;
748: (*output)++;
749: return (1);
1.6 mrg 750: }
751:
752: /*
753: * must be a loan to an anon. check to see if there is already
754: * an anon associated with this page. if so, then just return
1.29 chs 755: * a reference to this object. the page should already be
1.6 mrg 756: * mapped read-only because it is already on loan.
757: */
758:
759: if (pg->uanon) {
760: anon = pg->uanon;
1.65.4.1! ad 761: mutex_enter(&anon->an_lock);
1.6 mrg 762: anon->an_ref++;
1.65.4.1! ad 763: mutex_exit(&anon->an_lock);
1.34 chs 764: if (pg->flags & PG_WANTED) {
1.18 thorpej 765: wakeup(pg);
1.34 chs 766: }
1.6 mrg 767: pg->flags &= ~(PG_WANTED|PG_BUSY);
768: UVM_PAGE_OWN(pg, NULL);
1.65.4.1! ad 769: mutex_exit(&uobj->vmobjlock);
1.34 chs 770: **output = anon;
771: (*output)++;
772: return (1);
1.6 mrg 773: }
1.29 chs 774:
1.6 mrg 775: /*
776: * need to allocate a new anon
777: */
778:
779: anon = uvm_analloc();
1.34 chs 780: if (anon == NULL) {
1.51 yamt 781: goto fail;
1.6 mrg 782: }
1.53 yamt 783: anon->an_page = pg;
1.6 mrg 784: pg->uanon = anon;
1.65.4.1! ad 785: mutex_enter(&uvm_pageqlock);
1.51 yamt 786: if (pg->wire_count > 0) {
1.65.4.1! ad 787: mutex_exit(&uvm_pageqlock);
1.52 yamt 788: UVMHIST_LOG(loanhist, "wired %p", pg,0,0,0);
1.51 yamt 789: pg->uanon = NULL;
1.53 yamt 790: anon->an_page = NULL;
1.51 yamt 791: anon->an_ref--;
1.65.4.1! ad 792: mutex_exit(&anon->an_lock);
1.51 yamt 793: uvm_anfree(anon);
794: goto fail;
795: }
1.34 chs 796: if (pg->loan_count == 0) {
1.19 chs 797: pmap_page_protect(pg, VM_PROT_READ);
1.34 chs 798: }
1.6 mrg 799: pg->loan_count++;
800: uvm_pageactivate(pg);
1.65.4.1! ad 801: mutex_exit(&uvm_pageqlock);
1.34 chs 802: if (pg->flags & PG_WANTED) {
1.18 thorpej 803: wakeup(pg);
1.34 chs 804: }
1.6 mrg 805: pg->flags &= ~(PG_WANTED|PG_BUSY);
806: UVM_PAGE_OWN(pg, NULL);
1.65.4.1! ad 807: mutex_exit(&uobj->vmobjlock);
! 808: mutex_exit(&anon->an_lock);
1.34 chs 809: **output = anon;
810: (*output)++;
811: return (1);
1.51 yamt 812:
813: fail:
1.52 yamt 814: UVMHIST_LOG(loanhist, "fail", 0,0,0,0);
1.51 yamt 815: /*
816: * unlock everything and bail out.
817: */
818: if (pg->flags & PG_WANTED) {
819: wakeup(pg);
820: }
821: pg->flags &= ~(PG_WANTED|PG_BUSY);
822: UVM_PAGE_OWN(pg, NULL);
823: uvmfault_unlockall(ufi, amap, uobj, NULL);
824: return (-1);
1.1 mrg 825: }
826:
827: /*
1.40 thorpej 828: * uvm_loanzero: loan a zero-fill page out
1.1 mrg 829: *
1.28 chuck 830: * => called with map, amap, uobj locked
1.1 mrg 831: * => return value:
832: * -1 = fatal error, everything is unlocked, abort.
833: * 0 = lookup in ufi went stale, everything unlocked, relookup and
834: * try again
835: * 1 = got it, everything still locked
836: */
837:
1.40 thorpej 838: static struct uvm_object uvm_loanzero_object;
839:
1.33 jdolecek 840: static int
1.54 thorpej 841: uvm_loanzero(struct uvm_faultinfo *ufi, void ***output, int flags)
1.1 mrg 842: {
1.6 mrg 843: struct vm_anon *anon;
844: struct vm_page *pg;
1.34 chs 845: struct vm_amap *amap = ufi->entry->aref.ar_amap;
1.1 mrg 846:
1.52 yamt 847: UVMHIST_FUNC(__func__); UVMHIST_CALLED(loanhist);
1.44 yamt 848: again:
1.65.4.1! ad 849: mutex_enter(&uvm_loanzero_object.vmobjlock);
1.40 thorpej 850:
851: /*
852: * first, get ahold of our single zero page.
853: */
854:
855: if (__predict_false((pg =
856: TAILQ_FIRST(&uvm_loanzero_object.memq)) == NULL)) {
857: while ((pg = uvm_pagealloc(&uvm_loanzero_object, 0, NULL,
858: UVM_PGA_ZERO)) == NULL) {
1.65.4.1! ad 859: mutex_exit(&uvm_loanzero_object.vmobjlock);
1.58 yamt 860: uvmfault_unlockall(ufi, amap, NULL, NULL);
1.40 thorpej 861: uvm_wait("loanzero");
1.34 chs 862: if (!uvmfault_relock(ufi)) {
863: return (0);
864: }
865: if (amap) {
866: amap_lock(amap);
867: }
1.44 yamt 868: goto again;
1.6 mrg 869: }
1.29 chs 870:
1.40 thorpej 871: /* got a zero'd page. */
872: pg->flags &= ~(PG_WANTED|PG_BUSY|PG_FAKE);
873: pg->flags |= PG_RDONLY;
1.65.4.1! ad 874: mutex_enter(&uvm_pageqlock);
1.44 yamt 875: uvm_pageactivate(pg);
1.65.4.1! ad 876: mutex_exit(&uvm_pageqlock);
1.6 mrg 877: UVM_PAGE_OWN(pg, NULL);
1.40 thorpej 878: }
879:
880: if ((flags & UVM_LOAN_TOANON) == 0) { /* loaning to kernel-page */
1.65.4.1! ad 881: mutex_enter(&uvm_pageqlock);
1.40 thorpej 882: pg->loan_count++;
1.65.4.1! ad 883: mutex_exit(&uvm_pageqlock);
! 884: mutex_exit(&uvm_loanzero_object.vmobjlock);
1.6 mrg 885: **output = pg;
1.34 chs 886: (*output)++;
887: return (1);
1.6 mrg 888: }
889:
1.40 thorpej 890: /*
891: * loaning to an anon. check to see if there is already an anon
892: * associated with this page. if so, then just return a reference
893: * to this object.
894: */
1.23 thorpej 895:
1.40 thorpej 896: if (pg->uanon) {
897: anon = pg->uanon;
1.65.4.1! ad 898: mutex_enter(&anon->an_lock);
1.40 thorpej 899: anon->an_ref++;
1.65.4.1! ad 900: mutex_exit(&anon->an_lock);
! 901: mutex_exit(&uvm_loanzero_object.vmobjlock);
1.40 thorpej 902: **output = anon;
903: (*output)++;
904: return (1);
905: }
1.23 thorpej 906:
1.40 thorpej 907: /*
908: * need to allocate a new anon
909: */
1.34 chs 910:
1.40 thorpej 911: anon = uvm_analloc();
912: if (anon == NULL) {
913: /* out of swap causes us to fail */
1.65.4.1! ad 914: mutex_exit(&uvm_loanzero_object.vmobjlock);
1.58 yamt 915: uvmfault_unlockall(ufi, amap, NULL, NULL);
1.40 thorpej 916: return (-1);
1.6 mrg 917: }
1.53 yamt 918: anon->an_page = pg;
1.40 thorpej 919: pg->uanon = anon;
1.65.4.1! ad 920: mutex_enter(&uvm_pageqlock);
1.40 thorpej 921: pg->loan_count++;
1.6 mrg 922: uvm_pageactivate(pg);
1.65.4.1! ad 923: mutex_exit(&uvm_pageqlock);
! 924: mutex_exit(&anon->an_lock);
! 925: mutex_exit(&uvm_loanzero_object.vmobjlock);
1.6 mrg 926: **output = anon;
1.34 chs 927: (*output)++;
928: return (1);
1.1 mrg 929: }
930:
931:
932: /*
933: * uvm_unloananon: kill loans on anons (basically a normal ref drop)
934: *
935: * => we expect all our resources to be unlocked
936: */
937:
1.33 jdolecek 938: static void
1.54 thorpej 939: uvm_unloananon(struct vm_anon **aloans, int nanons)
1.1 mrg 940: {
1.6 mrg 941: struct vm_anon *anon;
1.1 mrg 942:
1.6 mrg 943: while (nanons-- > 0) {
1.12 chs 944: int refs;
1.1 mrg 945:
1.6 mrg 946: anon = *aloans++;
1.65.4.1! ad 947: mutex_enter(&anon->an_lock);
1.12 chs 948: refs = --anon->an_ref;
1.65.4.1! ad 949: mutex_exit(&anon->an_lock);
1.1 mrg 950:
1.12 chs 951: if (refs == 0) {
1.34 chs 952: uvm_anfree(anon);
1.6 mrg 953: }
954: }
1.1 mrg 955: }
956:
957: /*
958: * uvm_unloanpage: kill loans on pages loaned out to the kernel
959: *
960: * => we expect all our resources to be unlocked
961: */
962:
1.33 jdolecek 963: static void
1.54 thorpej 964: uvm_unloanpage(struct vm_page **ploans, int npages)
1.1 mrg 965: {
1.6 mrg 966: struct vm_page *pg;
1.65.4.1! ad 967: kmutex_t *slock;
1.1 mrg 968:
1.65.4.1! ad 969: mutex_enter(&uvm_pageqlock);
1.6 mrg 970: while (npages-- > 0) {
971: pg = *ploans++;
1.1 mrg 972:
1.6 mrg 973: /*
1.36 chs 974: * do a little dance to acquire the object or anon lock
975: * as appropriate. we are locking in the wrong order,
976: * so we have to do a try-lock here.
977: */
978:
979: slock = NULL;
980: while (pg->uobject != NULL || pg->uanon != NULL) {
981: if (pg->uobject != NULL) {
982: slock = &pg->uobject->vmobjlock;
983: } else {
984: slock = &pg->uanon->an_lock;
985: }
1.65.4.1! ad 986: if (mutex_tryenter(slock)) {
1.36 chs 987: break;
988: }
1.65.4.1! ad 989: mutex_exit(&uvm_pageqlock);
! 990: mutex_enter(&uvm_pageqlock);
1.36 chs 991: slock = NULL;
992: }
993:
994: /*
995: * drop our loan. if page is owned by an anon but
996: * PQ_ANON is not set, the page was loaned to the anon
997: * from an object which dropped ownership, so resolve
998: * this by turning the anon's loan into real ownership
999: * (ie. decrement loan_count again and set PQ_ANON).
1000: * after all this, if there are no loans left, put the
1001: * page back a paging queue (if the page is owned by
1002: * an anon) or free it (if the page is now unowned).
1.6 mrg 1003: */
1.1 mrg 1004:
1.34 chs 1005: KASSERT(pg->loan_count > 0);
1006: pg->loan_count--;
1.36 chs 1007: if (pg->uobject == NULL && pg->uanon != NULL &&
1008: (pg->pqflags & PQ_ANON) == 0) {
1009: KASSERT(pg->loan_count > 0);
1010: pg->loan_count--;
1011: pg->pqflags |= PQ_ANON;
1012: }
1.63 yamt 1013: if (pg->loan_count == 0 && pg->uobject == NULL &&
1014: pg->uanon == NULL) {
1015: KASSERT((pg->flags & PG_BUSY) == 0);
1016: uvm_pagefree(pg);
1.36 chs 1017: }
1018: if (slock != NULL) {
1.65.4.1! ad 1019: mutex_exit(slock);
1.6 mrg 1020: }
1021: }
1.65.4.1! ad 1022: mutex_exit(&uvm_pageqlock);
1.1 mrg 1023: }
1024:
1.33 jdolecek 1025: /*
1.34 chs 1026: * uvm_unloan: kill loans on pages or anons.
1.33 jdolecek 1027: */
1.34 chs 1028:
1.33 jdolecek 1029: void
1.34 chs 1030: uvm_unloan(void *v, int npages, int flags)
1.33 jdolecek 1031: {
1.34 chs 1032: if (flags & UVM_LOAN_TOANON) {
1033: uvm_unloananon(v, npages);
1034: } else {
1035: uvm_unloanpage(v, npages);
1036: }
1.40 thorpej 1037: }
1038:
1039: /*
1.41 thorpej 1040: * Minimal pager for uvm_loanzero_object. We need to provide a "put"
1041: * method, because the page can end up on a paging queue, and the
1042: * page daemon will want to call pgo_put when it encounters the page
1043: * on the inactive list.
1044: */
1045:
1046: static int
1.62 yamt 1047: ulz_put(struct uvm_object *uobj, voff_t start, voff_t stop, int flags)
1.41 thorpej 1048: {
1049: struct vm_page *pg;
1050:
1051: KDASSERT(uobj == &uvm_loanzero_object);
1052:
1053: /*
1054: * Don't need to do any work here if we're not freeing pages.
1055: */
1056:
1057: if ((flags & PGO_FREE) == 0) {
1.65.4.1! ad 1058: mutex_exit(&uobj->vmobjlock);
1.41 thorpej 1059: return 0;
1060: }
1061:
1062: /*
1063: * we don't actually want to ever free the uvm_loanzero_page, so
1064: * just reactivate or dequeue it.
1065: */
1066:
1067: pg = TAILQ_FIRST(&uobj->memq);
1068: KASSERT(pg != NULL);
1069: KASSERT(TAILQ_NEXT(pg, listq) == NULL);
1070:
1.65.4.1! ad 1071: mutex_enter(&uvm_pageqlock);
1.41 thorpej 1072: if (pg->uanon)
1073: uvm_pageactivate(pg);
1074: else
1075: uvm_pagedequeue(pg);
1.65.4.1! ad 1076: mutex_exit(&uvm_pageqlock);
1.41 thorpej 1077:
1.65.4.1! ad 1078: mutex_exit(&uobj->vmobjlock);
1.41 thorpej 1079: return 0;
1080: }
1081:
1082: static struct uvm_pagerops ulz_pager = {
1083: NULL, /* init */
1084: NULL, /* reference */
1085: NULL, /* detach */
1086: NULL, /* fault */
1087: NULL, /* get */
1088: ulz_put, /* put */
1089: };
1090:
1091: /*
1.40 thorpej 1092: * uvm_loan_init(): initialize the uvm_loan() facility.
1093: */
1094:
1095: void
1096: uvm_loan_init(void)
1097: {
1098:
1.65.4.1! ad 1099: mutex_init(&uvm_loanzero_object.vmobjlock, MUTEX_DEFAULT, IPL_NONE);
1.40 thorpej 1100: TAILQ_INIT(&uvm_loanzero_object.memq);
1.41 thorpej 1101: uvm_loanzero_object.pgops = &ulz_pager;
1.52 yamt 1102:
1103: UVMHIST_INIT(loanhist, 300);
1.42 yamt 1104: }
1105:
1106: /*
1107: * uvm_loanbreak: break loan on a uobj page
1108: *
1109: * => called with uobj locked
1110: * => the page should be busy
1111: * => return value:
1112: * newly allocated page if succeeded
1113: */
1114: struct vm_page *
1115: uvm_loanbreak(struct vm_page *uobjpage)
1116: {
1117: struct vm_page *pg;
1.49 drochner 1118: #ifdef DIAGNOSTIC
1.42 yamt 1119: struct uvm_object *uobj = uobjpage->uobject;
1.49 drochner 1120: #endif
1.42 yamt 1121:
1122: KASSERT(uobj != NULL);
1.65.4.1! ad 1123: KASSERT(mutex_owned(&uobj->vmobjlock));
1.42 yamt 1124: KASSERT(uobjpage->flags & PG_BUSY);
1125:
1126: /* alloc new un-owned page */
1127: pg = uvm_pagealloc(NULL, 0, NULL, 0);
1128: if (pg == NULL)
1129: return NULL;
1130:
1131: /*
1132: * copy the data from the old page to the new
1.61 yamt 1133: * one and clear the fake flags on the new page (keep it busy).
1134: * force a reload of the old page by clearing it from all
1135: * pmaps.
1136: * transfer dirtiness of the old page to the new page.
1137: * then lock the page queues to rename the pages.
1.42 yamt 1138: */
1139:
1140: uvm_pagecopy(uobjpage, pg); /* old -> new */
1.61 yamt 1141: pg->flags &= ~PG_FAKE;
1.42 yamt 1142: pmap_page_protect(uobjpage, VM_PROT_NONE);
1.61 yamt 1143: if ((uobjpage->flags & PG_CLEAN) != 0 && !pmap_clear_modify(uobjpage)) {
1144: pmap_clear_modify(pg);
1145: pg->flags |= PG_CLEAN;
1146: } else {
1147: /* uvm_pagecopy marked it dirty */
1148: KASSERT((pg->flags & PG_CLEAN) == 0);
1149: /* a object with a dirty page should be dirty. */
1150: KASSERT(!UVM_OBJ_IS_CLEAN(uobj));
1151: }
1.42 yamt 1152: if (uobjpage->flags & PG_WANTED)
1153: wakeup(uobjpage);
1154: /* uobj still locked */
1155: uobjpage->flags &= ~(PG_WANTED|PG_BUSY);
1156: UVM_PAGE_OWN(uobjpage, NULL);
1157:
1.65.4.1! ad 1158: mutex_enter(&uvm_pageqlock);
1.48 yamt 1159:
1160: /*
1161: * replace uobjpage with new page.
1162: */
1163:
1164: uvm_pagereplace(uobjpage, pg);
1.42 yamt 1165:
1166: /*
1167: * if the page is no longer referenced by
1168: * an anon (i.e. we are breaking an O->K
1169: * loan), then remove it from any pageq's.
1170: */
1171: if (uobjpage->uanon == NULL)
1172: uvm_pagedequeue(uobjpage);
1173:
1174: /*
1175: * at this point we have absolutely no
1176: * control over uobjpage
1177: */
1178:
1179: /* install new page */
1180: uvm_pageactivate(pg);
1.65.4.1! ad 1181: mutex_exit(&uvm_pageqlock);
1.42 yamt 1182:
1183: /*
1184: * done! loan is broken and "pg" is
1185: * PG_BUSY. it can now replace uobjpage.
1186: */
1187:
1188: return pg;
1.33 jdolecek 1189: }
CVSweb <webmaster@jp.NetBSD.org>