Annotation of src/sys/uvm/uvm_loan.c, Revision 1.81.2.12
1.81.2.12! yamt 1: /* $NetBSD: uvm_loan.c,v 1.81.2.11 2012/01/11 00:09:51 yamt Exp $ */
1.1 mrg 2:
3: /*
4: * Copyright (c) 1997 Charles D. Cranor and Washington University.
5: * All rights reserved.
6: *
7: * Redistribution and use in source and binary forms, with or without
8: * modification, are permitted provided that the following conditions
9: * are met:
10: * 1. Redistributions of source code must retain the above copyright
11: * notice, this list of conditions and the following disclaimer.
12: * 2. Redistributions in binary form must reproduce the above copyright
13: * notice, this list of conditions and the following disclaimer in the
14: * documentation and/or other materials provided with the distribution.
15: *
16: * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17: * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18: * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19: * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20: * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21: * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22: * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23: * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24: * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25: * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1.4 mrg 26: *
27: * from: Id: uvm_loan.c,v 1.1.6.4 1998/02/06 05:08:43 chs Exp
1.1 mrg 28: */
29:
30: /*
31: * uvm_loan.c: page loanout handler
32: */
1.35 lukem 33:
34: #include <sys/cdefs.h>
1.81.2.12! yamt 35: __KERNEL_RCSID(0, "$NetBSD: uvm_loan.c,v 1.81.2.11 2012/01/11 00:09:51 yamt Exp $");
1.1 mrg 36:
37: #include <sys/param.h>
38: #include <sys/systm.h>
39: #include <sys/kernel.h>
1.81.2.6 yamt 40: #include <sys/atomic.h>
1.1 mrg 41: #include <sys/mman.h>
42:
43: #include <uvm/uvm.h>
44:
1.81.2.10 yamt 45: bool vm_loan_read = true;
1.81.2.5 yamt 46:
1.1 mrg 47: /*
1.29 chs 48: * "loaned" pages are pages which are (read-only, copy-on-write) loaned
1.1 mrg 49: * from the VM system to other parts of the kernel. this allows page
50: * copying to be avoided (e.g. you can loan pages from objs/anons to
51: * the mbuf system).
52: *
53: * there are 3 types of loans possible:
54: * O->K uvm_object page to wired kernel page (e.g. mbuf data area)
1.16 thorpej 55: * A->K anon page to wired kernel page (e.g. mbuf data area)
1.1 mrg 56: * O->A uvm_object to anon loan (e.g. vnode page to an anon)
57: * note that it possible to have an O page loaned to both an A and K
58: * at the same time.
59: *
60: * loans are tracked by pg->loan_count. an O->A page will have both
61: * a uvm_object and a vm_anon, but PQ_ANON will not be set. this sort
62: * of page is considered "owned" by the uvm_object (not the anon).
63: *
1.16 thorpej 64: * each loan of a page to the kernel bumps the pg->wire_count. the
65: * kernel mappings for these pages will be read-only and wired. since
66: * the page will also be wired, it will not be a candidate for pageout,
67: * and thus will never be pmap_page_protect()'d with VM_PROT_NONE. a
68: * write fault in the kernel to one of these pages will not cause
69: * copy-on-write. instead, the page fault is considered fatal. this
70: * is because the kernel mapping will have no way to look up the
71: * object/anon which the page is owned by. this is a good side-effect,
72: * since a kernel write to a loaned page is an error.
1.1 mrg 73: *
1.29 chs 74: * owners that want to free their pages and discover that they are
1.1 mrg 75: * loaned out simply "disown" them (the page becomes an orphan). these
76: * pages should be freed when the last loan is dropped. in some cases
77: * an anon may "adopt" an orphaned page.
78: *
79: * locking: to read pg->loan_count either the owner or the page queues
80: * must be locked. to modify pg->loan_count, both the owner of the page
81: * and the PQs must be locked. pg->flags is (as always) locked by
82: * the owner of the page.
83: *
84: * note that locking from the "loaned" side is tricky since the object
85: * getting the loaned page has no reference to the page's owner and thus
86: * the owner could "die" at any time. in order to prevent the owner
87: * from dying the page queues should be locked. this forces us to sometimes
88: * use "try" locking.
89: *
90: * loans are typically broken by the following events:
1.29 chs 91: * 1. user-level xwrite fault to a loaned page
1.1 mrg 92: * 2. pageout of clean+inactive O->A loaned page
93: * 3. owner frees page (e.g. pager flush)
94: *
95: * note that loaning a page causes all mappings of the page to become
96: * read-only (via pmap_page_protect). this could have an unexpected
1.16 thorpej 97: * effect on normal "wired" pages if one is not careful (XXX).
1.1 mrg 98: */
99:
100: /*
101: * local prototypes
102: */
103:
1.50 junyoung 104: static int uvm_loananon(struct uvm_faultinfo *, void ***,
105: int, struct vm_anon *);
106: static int uvm_loanuobj(struct uvm_faultinfo *, void ***,
107: int, vaddr_t);
108: static int uvm_loanzero(struct uvm_faultinfo *, void ***, int);
109: static void uvm_unloananon(struct vm_anon **, int);
110: static void uvm_unloanpage(struct vm_page **, int);
1.51 yamt 111: static int uvm_loanpage(struct vm_page **, int);
1.81.2.5 yamt 112: static int uvm_loanobj_read(struct vm_map *, vaddr_t, size_t,
113: struct uvm_object *, off_t);
1.33 jdolecek 114:
1.1 mrg 115:
116: /*
117: * inlines
118: */
119:
120: /*
121: * uvm_loanentry: loan out pages in a map entry (helper fn for uvm_loan())
122: *
123: * => "ufi" is the result of a successful map lookup (meaning that
1.31 chuck 124: * on entry the map is locked by the caller)
1.28 chuck 125: * => we may unlock and then relock the map if needed (for I/O)
1.1 mrg 126: * => we put our output result in "output"
1.31 chuck 127: * => we always return with the map unlocked
1.28 chuck 128: * => possible return values:
129: * -1 == error, map is unlocked
130: * 0 == map relock error (try again!), map is unlocked
1.31 chuck 131: * >0 == number of pages we loaned, map is unlocked
1.55 thorpej 132: *
133: * NOTE: We can live with this being an inline, because it is only called
134: * from one place.
1.1 mrg 135: */
136:
1.57 perry 137: static inline int
1.54 thorpej 138: uvm_loanentry(struct uvm_faultinfo *ufi, void ***output, int flags)
1.1 mrg 139: {
1.10 chuck 140: vaddr_t curaddr = ufi->orig_rvaddr;
1.9 eeh 141: vsize_t togo = ufi->size;
1.6 mrg 142: struct vm_aref *aref = &ufi->entry->aref;
143: struct uvm_object *uobj = ufi->entry->object.uvm_obj;
144: struct vm_anon *anon;
145: int rv, result = 0;
146:
1.52 yamt 147: UVMHIST_FUNC(__func__); UVMHIST_CALLED(loanhist);
148:
1.6 mrg 149: /*
1.28 chuck 150: * lock us the rest of the way down (we unlock before return)
1.6 mrg 151: */
1.80 rmind 152: if (aref->ar_amap) {
1.13 chuck 153: amap_lock(aref->ar_amap);
1.80 rmind 154: }
1.6 mrg 155:
156: /*
157: * loop until done
158: */
159: while (togo) {
160:
161: /*
162: * find the page we want. check the anon layer first.
163: */
164:
165: if (aref->ar_amap) {
166: anon = amap_lookup(aref, curaddr - ufi->entry->start);
167: } else {
168: anon = NULL;
169: }
170:
1.28 chuck 171: /* locked: map, amap, uobj */
1.6 mrg 172: if (anon) {
173: rv = uvm_loananon(ufi, output, flags, anon);
174: } else if (uobj) {
175: rv = uvm_loanuobj(ufi, output, flags, curaddr);
176: } else if (UVM_ET_ISCOPYONWRITE(ufi->entry)) {
177: rv = uvm_loanzero(ufi, output, flags);
178: } else {
1.80 rmind 179: uvmfault_unlockall(ufi, aref->ar_amap, uobj);
1.34 chs 180: rv = -1;
1.6 mrg 181: }
1.31 chuck 182: /* locked: if (rv > 0) => map, amap, uobj [o.w. unlocked] */
1.66 ad 183: KASSERT(rv > 0 || aref->ar_amap == NULL ||
1.80 rmind 184: !mutex_owned(aref->ar_amap->am_lock));
1.70 ad 185: KASSERT(rv > 0 || uobj == NULL ||
1.80 rmind 186: !mutex_owned(uobj->vmobjlock));
1.6 mrg 187:
188: /* total failure */
1.52 yamt 189: if (rv < 0) {
190: UVMHIST_LOG(loanhist, "failure %d", rv, 0,0,0);
1.34 chs 191: return (-1);
1.52 yamt 192: }
1.6 mrg 193:
194: /* relock failed, need to do another lookup */
1.52 yamt 195: if (rv == 0) {
196: UVMHIST_LOG(loanhist, "relock failure %d", result
197: ,0,0,0);
1.34 chs 198: return (result);
1.52 yamt 199: }
1.6 mrg 200:
201: /*
202: * got it... advance to next page
203: */
1.34 chs 204:
1.6 mrg 205: result++;
206: togo -= PAGE_SIZE;
207: curaddr += PAGE_SIZE;
208: }
209:
210: /*
1.31 chuck 211: * unlock what we locked, unlock the maps and return
1.6 mrg 212: */
1.34 chs 213:
1.80 rmind 214: if (aref->ar_amap) {
1.28 chuck 215: amap_unlock(aref->ar_amap);
1.80 rmind 216: }
1.65 thorpej 217: uvmfault_unlockmaps(ufi, false);
1.52 yamt 218: UVMHIST_LOG(loanhist, "done %d", result, 0,0,0);
1.34 chs 219: return (result);
1.1 mrg 220: }
221:
222: /*
223: * normal functions
224: */
225:
226: /*
1.28 chuck 227: * uvm_loan: loan pages in a map out to anons or to the kernel
1.29 chs 228: *
1.1 mrg 229: * => map should be unlocked
230: * => start and len should be multiples of PAGE_SIZE
231: * => result is either an array of anon's or vm_pages (depending on flags)
232: * => flag values: UVM_LOAN_TOANON - loan to anons
233: * UVM_LOAN_TOPAGE - loan to wired kernel page
234: * one and only one of these flags must be set!
1.28 chuck 235: * => returns 0 (success), or an appropriate error number
1.1 mrg 236: */
237:
1.6 mrg 238: int
1.54 thorpej 239: uvm_loan(struct vm_map *map, vaddr_t start, vsize_t len, void *v, int flags)
1.1 mrg 240: {
1.6 mrg 241: struct uvm_faultinfo ufi;
1.34 chs 242: void **result, **output;
1.25 chs 243: int rv, error;
1.6 mrg 244:
1.52 yamt 245: UVMHIST_FUNC(__func__); UVMHIST_CALLED(loanhist);
246:
1.6 mrg 247: /*
248: * ensure that one and only one of the flags is set
249: */
250:
1.25 chs 251: KASSERT(((flags & UVM_LOAN_TOANON) == 0) ^
252: ((flags & UVM_LOAN_TOPAGE) == 0));
253: KASSERT((map->flags & VM_MAP_INTRSAFE) == 0);
1.6 mrg 254:
255: /*
1.34 chs 256: * "output" is a pointer to the current place to put the loaned page.
1.6 mrg 257: */
258:
1.34 chs 259: result = v;
1.6 mrg 260: output = &result[0]; /* start at the beginning ... */
261:
262: /*
263: * while we've got pages to do
264: */
265:
266: while (len > 0) {
267:
268: /*
269: * fill in params for a call to uvmfault_lookup
270: */
271:
272: ufi.orig_map = map;
273: ufi.orig_rvaddr = start;
274: ufi.orig_size = len;
1.29 chs 275:
1.6 mrg 276: /*
277: * do the lookup, the only time this will fail is if we hit on
278: * an unmapped region (an error)
279: */
280:
1.65 thorpej 281: if (!uvmfault_lookup(&ufi, false)) {
1.25 chs 282: error = ENOENT;
1.6 mrg 283: goto fail;
1.25 chs 284: }
1.6 mrg 285:
286: /*
1.28 chuck 287: * map now locked. now do the loanout...
1.6 mrg 288: */
1.34 chs 289:
1.6 mrg 290: rv = uvm_loanentry(&ufi, &output, flags);
1.25 chs 291: if (rv < 0) {
1.28 chuck 292: /* all unlocked due to error */
1.25 chs 293: error = EINVAL;
1.6 mrg 294: goto fail;
1.25 chs 295: }
1.6 mrg 296:
297: /*
1.31 chuck 298: * done! the map is unlocked. advance, if possible.
1.28 chuck 299: *
1.50 junyoung 300: * XXXCDC: could be recoded to hold the map lock with
301: * smarter code (but it only happens on map entry
1.31 chuck 302: * boundaries, so it isn't that bad).
1.6 mrg 303: */
1.34 chs 304:
1.28 chuck 305: if (rv) {
306: rv <<= PAGE_SHIFT;
307: len -= rv;
308: start += rv;
309: }
1.6 mrg 310: }
1.52 yamt 311: UVMHIST_LOG(loanhist, "success", 0,0,0,0);
1.25 chs 312: return 0;
1.1 mrg 313:
314: fail:
1.6 mrg 315: /*
1.34 chs 316: * failed to complete loans. drop any loans and return failure code.
1.28 chuck 317: * map is already unlocked.
1.6 mrg 318: */
1.34 chs 319:
1.6 mrg 320: if (output - result) {
1.34 chs 321: if (flags & UVM_LOAN_TOANON) {
1.6 mrg 322: uvm_unloananon((struct vm_anon **)result,
1.34 chs 323: output - result);
324: } else {
1.6 mrg 325: uvm_unloanpage((struct vm_page **)result,
1.34 chs 326: output - result);
327: }
1.6 mrg 328: }
1.52 yamt 329: UVMHIST_LOG(loanhist, "error %d", error,0,0,0);
1.26 jdolecek 330: return (error);
1.1 mrg 331: }
332:
333: /*
334: * uvm_loananon: loan a page from an anon out
1.29 chs 335: *
1.81.2.5 yamt 336: * => called with map, amap, anon locked
1.1 mrg 337: * => return value:
338: * -1 = fatal error, everything is unlocked, abort.
339: * 0 = lookup in ufi went stale, everything unlocked, relookup and
340: * try again
341: * 1 = got it, everything still locked
342: */
343:
1.6 mrg 344: int
1.54 thorpej 345: uvm_loananon(struct uvm_faultinfo *ufi, void ***output, int flags,
346: struct vm_anon *anon)
1.1 mrg 347: {
1.81.2.4 yamt 348: struct uvm_cpu *ucpu;
1.6 mrg 349: struct vm_page *pg;
1.34 chs 350: int error;
1.1 mrg 351:
1.52 yamt 352: UVMHIST_FUNC(__func__); UVMHIST_CALLED(loanhist);
353:
1.6 mrg 354: /*
1.28 chuck 355: * if we are loaning to "another" anon then it is easy, we just
1.6 mrg 356: * bump the reference count on the current anon and return a
1.28 chuck 357: * pointer to it (it becomes copy-on-write shared).
1.6 mrg 358: */
1.34 chs 359:
1.6 mrg 360: if (flags & UVM_LOAN_TOANON) {
1.80 rmind 361: KASSERT(mutex_owned(anon->an_lock));
1.53 yamt 362: pg = anon->an_page;
1.34 chs 363: if (pg && (pg->pqflags & PQ_ANON) != 0 && anon->an_ref == 1) {
1.51 yamt 364: if (pg->wire_count > 0) {
1.52 yamt 365: UVMHIST_LOG(loanhist, "->A wired %p", pg,0,0,0);
1.51 yamt 366: uvmfault_unlockall(ufi,
367: ufi->entry->aref.ar_amap,
1.80 rmind 368: ufi->entry->object.uvm_obj);
1.51 yamt 369: return (-1);
370: }
1.19 chs 371: pmap_page_protect(pg, VM_PROT_READ);
1.34 chs 372: }
1.6 mrg 373: anon->an_ref++;
374: **output = anon;
1.34 chs 375: (*output)++;
1.52 yamt 376: UVMHIST_LOG(loanhist, "->A done", 0,0,0,0);
1.34 chs 377: return (1);
1.6 mrg 378: }
379:
380: /*
381: * we are loaning to a kernel-page. we need to get the page
382: * resident so we can wire it. uvmfault_anonget will handle
383: * this for us.
384: */
385:
1.80 rmind 386: KASSERT(mutex_owned(anon->an_lock));
1.34 chs 387: error = uvmfault_anonget(ufi, ufi->entry->aref.ar_amap, anon);
1.6 mrg 388:
389: /*
390: * if we were unable to get the anon, then uvmfault_anonget has
391: * unlocked everything and returned an error code.
392: */
1.34 chs 393:
394: if (error) {
1.52 yamt 395: UVMHIST_LOG(loanhist, "error %d", error,0,0,0);
1.6 mrg 396:
397: /* need to refault (i.e. refresh our lookup) ? */
1.34 chs 398: if (error == ERESTART) {
399: return (0);
400: }
1.6 mrg 401:
402: /* "try again"? sleep a bit and retry ... */
1.34 chs 403: if (error == EAGAIN) {
1.74 pooka 404: kpause("loanagain", false, hz/2, NULL);
1.34 chs 405: return (0);
1.6 mrg 406: }
407:
408: /* otherwise flag it as an error */
1.34 chs 409: return (-1);
1.6 mrg 410: }
411:
412: /*
413: * we have the page and its owner locked: do the loan now.
414: */
415:
1.53 yamt 416: pg = anon->an_page;
1.70 ad 417: mutex_enter(&uvm_pageqlock);
1.51 yamt 418: if (pg->wire_count > 0) {
1.70 ad 419: mutex_exit(&uvm_pageqlock);
1.52 yamt 420: UVMHIST_LOG(loanhist, "->K wired %p", pg,0,0,0);
1.51 yamt 421: KASSERT(pg->uobject == NULL);
1.80 rmind 422: uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, NULL);
1.51 yamt 423: return (-1);
424: }
1.34 chs 425: if (pg->loan_count == 0) {
1.19 chs 426: pmap_page_protect(pg, VM_PROT_READ);
1.34 chs 427: }
1.6 mrg 428: pg->loan_count++;
1.63 yamt 429: uvm_pageactivate(pg);
1.70 ad 430: mutex_exit(&uvm_pageqlock);
1.6 mrg 431: **output = pg;
1.34 chs 432: (*output)++;
1.6 mrg 433:
1.81.2.4 yamt 434: ucpu = uvm_cpu_get();
1.81.2.5 yamt 435: if (pg->uobject != NULL) {
436: ucpu->loan_oa++;
437: } else {
438: ucpu->loan_anon++;
439: }
1.81.2.4 yamt 440: uvm_cpu_put(ucpu);
441:
1.52 yamt 442: UVMHIST_LOG(loanhist, "->K done", 0,0,0,0);
1.34 chs 443: return (1);
1.1 mrg 444: }
445:
446: /*
1.45 yamt 447: * uvm_loanpage: loan out pages to kernel (->K)
1.42 yamt 448: *
1.51 yamt 449: * => pages should be object-owned and the object should be locked.
450: * => in the case of error, the object might be unlocked and relocked.
451: * => caller should busy the pages beforehand.
452: * => pages will be unbusied.
453: * => fail with EBUSY if meet a wired page.
1.42 yamt 454: */
1.51 yamt 455: static int
1.54 thorpej 456: uvm_loanpage(struct vm_page **pgpp, int npages)
1.42 yamt 457: {
1.81.2.4 yamt 458: struct uvm_cpu *ucpu;
1.42 yamt 459: int i;
1.51 yamt 460: int error = 0;
1.42 yamt 461:
1.52 yamt 462: UVMHIST_FUNC(__func__); UVMHIST_CALLED(loanhist);
463:
1.42 yamt 464: for (i = 0; i < npages; i++) {
465: struct vm_page *pg = pgpp[i];
466:
467: KASSERT(pg->uobject != NULL);
1.51 yamt 468: KASSERT(pg->uobject == pgpp[0]->uobject);
1.42 yamt 469: KASSERT(!(pg->flags & (PG_RELEASED|PG_PAGEOUT)));
1.80 rmind 470: KASSERT(mutex_owned(pg->uobject->vmobjlock));
1.42 yamt 471: KASSERT(pg->flags & PG_BUSY);
472:
1.70 ad 473: mutex_enter(&uvm_pageqlock);
1.51 yamt 474: if (pg->wire_count > 0) {
1.70 ad 475: mutex_exit(&uvm_pageqlock);
1.52 yamt 476: UVMHIST_LOG(loanhist, "wired %p", pg,0,0,0);
1.51 yamt 477: error = EBUSY;
478: break;
479: }
1.42 yamt 480: if (pg->loan_count == 0) {
481: pmap_page_protect(pg, VM_PROT_READ);
482: }
483: pg->loan_count++;
1.63 yamt 484: uvm_pageactivate(pg);
1.70 ad 485: mutex_exit(&uvm_pageqlock);
1.42 yamt 486: }
1.51 yamt 487:
488: uvm_page_unbusy(pgpp, npages);
489:
1.81.2.4 yamt 490: if (i > 0) {
491: ucpu = uvm_cpu_get();
492: ucpu->loan_obj += i;
493: uvm_cpu_put(ucpu);
494: if (error) {
495: /*
496: * backout what we've done
497: */
498: kmutex_t *slock = pgpp[0]->uobject->vmobjlock;
1.51 yamt 499:
1.81.2.4 yamt 500: mutex_exit(slock);
501: uvm_unloan(pgpp, i, UVM_LOAN_TOPAGE);
502: mutex_enter(slock);
503: }
1.51 yamt 504: }
505:
1.52 yamt 506: UVMHIST_LOG(loanhist, "done %d", error,0,0,0);
1.51 yamt 507: return error;
1.42 yamt 508: }
509:
510: /*
1.45 yamt 511: * XXX UBC temp limit
512: * number of pages to get at once.
513: * should be <= MAX_READ_AHEAD in genfs_vnops.c
514: */
515: #define UVM_LOAN_GET_CHUNK 16
516:
517: /*
1.46 yamt 518: * uvm_loanuobjpages: loan pages from a uobj out (O->K)
1.45 yamt 519: *
1.46 yamt 520: * => uobj shouldn't be locked. (we'll lock it)
1.51 yamt 521: * => fail with EBUSY if we meet a wired page.
1.45 yamt 522: */
523: int
1.54 thorpej 524: uvm_loanuobjpages(struct uvm_object *uobj, voff_t pgoff, int orignpages,
525: struct vm_page **origpgpp)
1.45 yamt 526: {
1.51 yamt 527: int ndone; /* # of pages loaned out */
1.45 yamt 528: struct vm_page **pgpp;
529: int error;
530: int i;
1.70 ad 531: kmutex_t *slock;
1.45 yamt 532:
533: pgpp = origpgpp;
534: for (ndone = 0; ndone < orignpages; ) {
535: int npages;
1.51 yamt 536: /* npendloan: # of pages busied but not loand out yet. */
1.45 yamt 537: int npendloan = 0xdead; /* XXX gcc */
538: reget:
539: npages = MIN(UVM_LOAN_GET_CHUNK, orignpages - ndone);
1.80 rmind 540: mutex_enter(uobj->vmobjlock);
1.45 yamt 541: error = (*uobj->pgops->pgo_get)(uobj,
542: pgoff + (ndone << PAGE_SHIFT), pgpp, &npages, 0,
543: VM_PROT_READ, 0, PGO_SYNCIO);
544: if (error == EAGAIN) {
1.74 pooka 545: kpause("loanuopg", false, hz/2, NULL);
1.45 yamt 546: continue;
547: }
1.51 yamt 548: if (error)
549: goto fail;
1.45 yamt 550:
551: KASSERT(npages > 0);
1.50 junyoung 552:
1.45 yamt 553: /* loan and unbusy pages */
554: slock = NULL;
555: for (i = 0; i < npages; i++) {
1.70 ad 556: kmutex_t *nextslock; /* slock for next page */
1.45 yamt 557: struct vm_page *pg = *pgpp;
558:
559: /* XXX assuming that the page is owned by uobj */
560: KASSERT(pg->uobject != NULL);
1.80 rmind 561: nextslock = pg->uobject->vmobjlock;
1.45 yamt 562:
563: if (slock != nextslock) {
564: if (slock) {
565: KASSERT(npendloan > 0);
1.51 yamt 566: error = uvm_loanpage(pgpp - npendloan,
1.45 yamt 567: npendloan);
1.70 ad 568: mutex_exit(slock);
1.51 yamt 569: if (error)
570: goto fail;
571: ndone += npendloan;
572: KASSERT(origpgpp + ndone == pgpp);
1.45 yamt 573: }
574: slock = nextslock;
1.51 yamt 575: npendloan = 0;
1.70 ad 576: mutex_enter(slock);
1.45 yamt 577: }
578:
1.51 yamt 579: if ((pg->flags & PG_RELEASED) != 0) {
1.45 yamt 580: /*
581: * release pages and try again.
582: */
1.70 ad 583: mutex_exit(slock);
1.45 yamt 584: for (; i < npages; i++) {
585: pg = pgpp[i];
1.80 rmind 586: slock = pg->uobject->vmobjlock;
1.45 yamt 587:
1.70 ad 588: mutex_enter(slock);
589: mutex_enter(&uvm_pageqlock);
1.45 yamt 590: uvm_page_unbusy(&pg, 1);
1.70 ad 591: mutex_exit(&uvm_pageqlock);
592: mutex_exit(slock);
1.45 yamt 593: }
594: goto reget;
595: }
596:
597: npendloan++;
598: pgpp++;
1.51 yamt 599: KASSERT(origpgpp + ndone + npendloan == pgpp);
1.45 yamt 600: }
601: KASSERT(slock != NULL);
602: KASSERT(npendloan > 0);
1.51 yamt 603: error = uvm_loanpage(pgpp - npendloan, npendloan);
1.70 ad 604: mutex_exit(slock);
1.51 yamt 605: if (error)
606: goto fail;
607: ndone += npendloan;
608: KASSERT(origpgpp + ndone == pgpp);
1.45 yamt 609: }
610:
611: return 0;
1.51 yamt 612:
613: fail:
614: uvm_unloan(origpgpp, ndone, UVM_LOAN_TOPAGE);
615:
616: return error;
1.45 yamt 617: }
618:
619: /*
1.1 mrg 620: * uvm_loanuobj: loan a page from a uobj out
621: *
1.28 chuck 622: * => called with map, amap, uobj locked
1.1 mrg 623: * => return value:
624: * -1 = fatal error, everything is unlocked, abort.
625: * 0 = lookup in ufi went stale, everything unlocked, relookup and
626: * try again
627: * 1 = got it, everything still locked
628: */
629:
1.33 jdolecek 630: static int
1.54 thorpej 631: uvm_loanuobj(struct uvm_faultinfo *ufi, void ***output, int flags, vaddr_t va)
1.1 mrg 632: {
1.6 mrg 633: struct vm_amap *amap = ufi->entry->aref.ar_amap;
634: struct uvm_object *uobj = ufi->entry->object.uvm_obj;
635: struct vm_page *pg;
1.34 chs 636: int error, npages;
1.64 thorpej 637: bool locked;
1.6 mrg 638:
1.52 yamt 639: UVMHIST_FUNC(__func__); UVMHIST_CALLED(loanhist);
640:
1.6 mrg 641: /*
642: * first we must make sure the page is resident.
643: *
644: * XXXCDC: duplicate code with uvm_fault().
645: */
646:
1.77 uebayasi 647: /* locked: maps(read), amap(if there) */
1.80 rmind 648: mutex_enter(uobj->vmobjlock);
1.77 uebayasi 649: /* locked: maps(read), amap(if there), uobj */
650:
1.28 chuck 651: if (uobj->pgops->pgo_get) { /* try locked pgo_get */
1.6 mrg 652: npages = 1;
653: pg = NULL;
1.37 enami 654: error = (*uobj->pgops->pgo_get)(uobj,
655: va - ufi->entry->start + ufi->entry->offset,
1.6 mrg 656: &pg, &npages, 0, VM_PROT_READ, MADV_NORMAL, PGO_LOCKED);
657: } else {
1.34 chs 658: error = EIO; /* must have pgo_get op */
1.6 mrg 659: }
660:
661: /*
662: * check the result of the locked pgo_get. if there is a problem,
663: * then we fail the loan.
664: */
665:
1.34 chs 666: if (error && error != EBUSY) {
1.80 rmind 667: uvmfault_unlockall(ufi, amap, uobj);
1.34 chs 668: return (-1);
1.6 mrg 669: }
670:
671: /*
672: * if we need to unlock for I/O, do so now.
673: */
674:
1.34 chs 675: if (error == EBUSY) {
1.80 rmind 676: uvmfault_unlockall(ufi, amap, NULL);
1.34 chs 677:
678: /* locked: uobj */
1.6 mrg 679: npages = 1;
1.37 enami 680: error = (*uobj->pgops->pgo_get)(uobj,
681: va - ufi->entry->start + ufi->entry->offset,
1.30 chs 682: &pg, &npages, 0, VM_PROT_READ, MADV_NORMAL, PGO_SYNCIO);
1.6 mrg 683: /* locked: <nothing> */
1.29 chs 684:
1.34 chs 685: if (error) {
1.43 yamt 686: if (error == EAGAIN) {
1.74 pooka 687: kpause("fltagain2", false, hz/2, NULL);
1.34 chs 688: return (0);
1.29 chs 689: }
1.34 chs 690: return (-1);
1.6 mrg 691: }
692:
693: /*
694: * pgo_get was a success. attempt to relock everything.
695: */
696:
697: locked = uvmfault_relock(ufi);
698: if (locked && amap)
1.13 chuck 699: amap_lock(amap);
1.58 yamt 700: uobj = pg->uobject;
1.80 rmind 701: mutex_enter(uobj->vmobjlock);
1.6 mrg 702:
703: /*
704: * verify that the page has not be released and re-verify
705: * that amap slot is still free. if there is a problem we
706: * drop our lock (thus force a lookup refresh/retry).
707: */
1.29 chs 708:
1.6 mrg 709: if ((pg->flags & PG_RELEASED) != 0 ||
710: (locked && amap && amap_lookup(&ufi->entry->aref,
1.10 chuck 711: ufi->orig_rvaddr - ufi->entry->start))) {
1.6 mrg 712: if (locked)
1.80 rmind 713: uvmfault_unlockall(ufi, amap, NULL);
1.65 thorpej 714: locked = false;
1.29 chs 715: }
1.6 mrg 716:
717: /*
718: * didn't get the lock? release the page and retry.
719: */
720:
1.65 thorpej 721: if (locked == false) {
1.32 chs 722: if (pg->flags & PG_WANTED) {
1.18 thorpej 723: wakeup(pg);
1.32 chs 724: }
1.6 mrg 725: if (pg->flags & PG_RELEASED) {
1.70 ad 726: mutex_enter(&uvm_pageqlock);
1.32 chs 727: uvm_pagefree(pg);
1.70 ad 728: mutex_exit(&uvm_pageqlock);
1.80 rmind 729: mutex_exit(uobj->vmobjlock);
1.6 mrg 730: return (0);
731: }
1.70 ad 732: mutex_enter(&uvm_pageqlock);
1.32 chs 733: uvm_pageactivate(pg);
1.70 ad 734: mutex_exit(&uvm_pageqlock);
1.6 mrg 735: pg->flags &= ~(PG_BUSY|PG_WANTED);
736: UVM_PAGE_OWN(pg, NULL);
1.80 rmind 737: mutex_exit(uobj->vmobjlock);
1.6 mrg 738: return (0);
739: }
740: }
741:
1.58 yamt 742: KASSERT(uobj == pg->uobject);
743:
1.6 mrg 744: /*
745: * at this point we have the page we want ("pg") marked PG_BUSY for us
1.34 chs 746: * and we have all data structures locked. do the loanout. page can
1.6 mrg 747: * not be PG_RELEASED (we caught this above).
748: */
749:
1.34 chs 750: if ((flags & UVM_LOAN_TOANON) == 0) {
1.51 yamt 751: if (uvm_loanpage(&pg, 1)) {
1.80 rmind 752: uvmfault_unlockall(ufi, amap, uobj);
1.51 yamt 753: return (-1);
754: }
1.80 rmind 755: mutex_exit(uobj->vmobjlock);
1.34 chs 756: **output = pg;
757: (*output)++;
758: return (1);
1.6 mrg 759: }
760:
1.80 rmind 761: #ifdef notdef
1.6 mrg 762: /*
763: * must be a loan to an anon. check to see if there is already
764: * an anon associated with this page. if so, then just return
1.29 chs 765: * a reference to this object. the page should already be
1.6 mrg 766: * mapped read-only because it is already on loan.
767: */
768:
769: if (pg->uanon) {
1.81 rmind 770: /* XXX: locking */
1.6 mrg 771: anon = pg->uanon;
772: anon->an_ref++;
1.34 chs 773: if (pg->flags & PG_WANTED) {
1.18 thorpej 774: wakeup(pg);
1.34 chs 775: }
1.6 mrg 776: pg->flags &= ~(PG_WANTED|PG_BUSY);
777: UVM_PAGE_OWN(pg, NULL);
1.80 rmind 778: mutex_exit(uobj->vmobjlock);
1.34 chs 779: **output = anon;
780: (*output)++;
781: return (1);
1.6 mrg 782: }
1.29 chs 783:
1.6 mrg 784: /*
785: * need to allocate a new anon
786: */
787:
788: anon = uvm_analloc();
1.34 chs 789: if (anon == NULL) {
1.51 yamt 790: goto fail;
1.6 mrg 791: }
1.70 ad 792: mutex_enter(&uvm_pageqlock);
1.51 yamt 793: if (pg->wire_count > 0) {
1.70 ad 794: mutex_exit(&uvm_pageqlock);
1.52 yamt 795: UVMHIST_LOG(loanhist, "wired %p", pg,0,0,0);
1.51 yamt 796: goto fail;
797: }
1.34 chs 798: if (pg->loan_count == 0) {
1.19 chs 799: pmap_page_protect(pg, VM_PROT_READ);
1.34 chs 800: }
1.6 mrg 801: pg->loan_count++;
1.81 rmind 802: pg->uanon = anon;
803: anon->an_page = pg;
804: anon->an_lock = /* TODO: share amap lock */
1.6 mrg 805: uvm_pageactivate(pg);
1.70 ad 806: mutex_exit(&uvm_pageqlock);
1.34 chs 807: if (pg->flags & PG_WANTED) {
1.18 thorpej 808: wakeup(pg);
1.34 chs 809: }
1.6 mrg 810: pg->flags &= ~(PG_WANTED|PG_BUSY);
811: UVM_PAGE_OWN(pg, NULL);
1.80 rmind 812: mutex_exit(uobj->vmobjlock);
1.70 ad 813: mutex_exit(&anon->an_lock);
1.34 chs 814: **output = anon;
815: (*output)++;
816: return (1);
1.51 yamt 817:
818: fail:
1.52 yamt 819: UVMHIST_LOG(loanhist, "fail", 0,0,0,0);
1.51 yamt 820: /*
821: * unlock everything and bail out.
822: */
823: if (pg->flags & PG_WANTED) {
824: wakeup(pg);
825: }
826: pg->flags &= ~(PG_WANTED|PG_BUSY);
827: UVM_PAGE_OWN(pg, NULL);
828: uvmfault_unlockall(ufi, amap, uobj, NULL);
1.81 rmind 829: if (anon) {
830: anon->an_ref--;
831: uvm_anon_free(anon);
832: }
1.80 rmind 833: #endif /* notdef */
1.51 yamt 834: return (-1);
1.1 mrg 835: }
836:
837: /*
1.40 thorpej 838: * uvm_loanzero: loan a zero-fill page out
1.1 mrg 839: *
1.28 chuck 840: * => called with map, amap, uobj locked
1.1 mrg 841: * => return value:
842: * -1 = fatal error, everything is unlocked, abort.
843: * 0 = lookup in ufi went stale, everything unlocked, relookup and
844: * try again
845: * 1 = got it, everything still locked
846: */
847:
1.40 thorpej 848: static struct uvm_object uvm_loanzero_object;
849:
1.33 jdolecek 850: static int
1.54 thorpej 851: uvm_loanzero(struct uvm_faultinfo *ufi, void ***output, int flags)
1.1 mrg 852: {
1.6 mrg 853: struct vm_page *pg;
1.34 chs 854: struct vm_amap *amap = ufi->entry->aref.ar_amap;
1.1 mrg 855:
1.52 yamt 856: UVMHIST_FUNC(__func__); UVMHIST_CALLED(loanhist);
1.44 yamt 857: again:
1.80 rmind 858: mutex_enter(uvm_loanzero_object.vmobjlock);
1.40 thorpej 859:
860: /*
861: * first, get ahold of our single zero page.
862: */
863:
1.81.2.2 yamt 864: pg = uvm_pagelookup(&uvm_loanzero_object, 0);
865: if (__predict_false(pg == NULL)) {
1.40 thorpej 866: while ((pg = uvm_pagealloc(&uvm_loanzero_object, 0, NULL,
867: UVM_PGA_ZERO)) == NULL) {
1.80 rmind 868: mutex_exit(uvm_loanzero_object.vmobjlock);
869: uvmfault_unlockall(ufi, amap, NULL);
1.40 thorpej 870: uvm_wait("loanzero");
1.34 chs 871: if (!uvmfault_relock(ufi)) {
872: return (0);
873: }
874: if (amap) {
875: amap_lock(amap);
876: }
1.44 yamt 877: goto again;
1.6 mrg 878: }
1.29 chs 879:
1.40 thorpej 880: /* got a zero'd page. */
881: pg->flags &= ~(PG_WANTED|PG_BUSY|PG_FAKE);
882: pg->flags |= PG_RDONLY;
1.70 ad 883: mutex_enter(&uvm_pageqlock);
1.44 yamt 884: uvm_pageactivate(pg);
1.70 ad 885: mutex_exit(&uvm_pageqlock);
1.6 mrg 886: UVM_PAGE_OWN(pg, NULL);
1.40 thorpej 887: }
888:
889: if ((flags & UVM_LOAN_TOANON) == 0) { /* loaning to kernel-page */
1.81.2.4 yamt 890: struct uvm_cpu *ucpu;
891:
1.70 ad 892: mutex_enter(&uvm_pageqlock);
1.40 thorpej 893: pg->loan_count++;
1.70 ad 894: mutex_exit(&uvm_pageqlock);
1.80 rmind 895: mutex_exit(uvm_loanzero_object.vmobjlock);
1.6 mrg 896: **output = pg;
1.34 chs 897: (*output)++;
1.81.2.4 yamt 898: ucpu = uvm_cpu_get();
899: ucpu->loan_zero++;
900: uvm_cpu_put(ucpu);
1.34 chs 901: return (1);
1.6 mrg 902: }
903:
1.80 rmind 904: #ifdef notdef
1.40 thorpej 905: /*
906: * loaning to an anon. check to see if there is already an anon
907: * associated with this page. if so, then just return a reference
908: * to this object.
909: */
1.23 thorpej 910:
1.40 thorpej 911: if (pg->uanon) {
912: anon = pg->uanon;
1.70 ad 913: mutex_enter(&anon->an_lock);
1.40 thorpej 914: anon->an_ref++;
1.70 ad 915: mutex_exit(&anon->an_lock);
1.80 rmind 916: mutex_exit(uvm_loanzero_object.vmobjlock);
1.40 thorpej 917: **output = anon;
918: (*output)++;
919: return (1);
920: }
1.23 thorpej 921:
1.40 thorpej 922: /*
923: * need to allocate a new anon
924: */
1.34 chs 925:
1.40 thorpej 926: anon = uvm_analloc();
927: if (anon == NULL) {
928: /* out of swap causes us to fail */
1.80 rmind 929: mutex_exit(uvm_loanzero_object.vmobjlock);
1.58 yamt 930: uvmfault_unlockall(ufi, amap, NULL, NULL);
1.40 thorpej 931: return (-1);
1.6 mrg 932: }
1.53 yamt 933: anon->an_page = pg;
1.40 thorpej 934: pg->uanon = anon;
1.70 ad 935: mutex_enter(&uvm_pageqlock);
1.40 thorpej 936: pg->loan_count++;
1.6 mrg 937: uvm_pageactivate(pg);
1.70 ad 938: mutex_exit(&uvm_pageqlock);
939: mutex_exit(&anon->an_lock);
1.80 rmind 940: mutex_exit(uvm_loanzero_object.vmobjlock);
1.6 mrg 941: **output = anon;
1.34 chs 942: (*output)++;
943: return (1);
1.80 rmind 944: #else
945: return (-1);
946: #endif
1.1 mrg 947: }
948:
949:
950: /*
951: * uvm_unloananon: kill loans on anons (basically a normal ref drop)
952: *
953: * => we expect all our resources to be unlocked
954: */
955:
1.33 jdolecek 956: static void
1.54 thorpej 957: uvm_unloananon(struct vm_anon **aloans, int nanons)
1.1 mrg 958: {
1.80 rmind 959: #ifdef notdef
1.81 rmind 960: struct vm_anon *anon, *to_free = NULL;
1.1 mrg 961:
1.81 rmind 962: /* TODO: locking */
963: amap_lock(amap);
1.6 mrg 964: while (nanons-- > 0) {
965: anon = *aloans++;
1.81 rmind 966: if (--anon->an_ref == 0) {
967: anon->an_link = to_free;
968: to_free = anon;
1.6 mrg 969: }
970: }
1.81 rmind 971: uvm_anon_freelst(amap, to_free);
1.80 rmind 972: #endif /* notdef */
1.1 mrg 973: }
974:
975: /*
976: * uvm_unloanpage: kill loans on pages loaned out to the kernel
977: *
978: * => we expect all our resources to be unlocked
979: */
980:
1.33 jdolecek 981: static void
1.54 thorpej 982: uvm_unloanpage(struct vm_page **ploans, int npages)
1.1 mrg 983: {
1.6 mrg 984: struct vm_page *pg;
1.70 ad 985: kmutex_t *slock;
1.1 mrg 986:
1.70 ad 987: mutex_enter(&uvm_pageqlock);
1.6 mrg 988: while (npages-- > 0) {
1.81.2.4 yamt 989: struct uvm_object *obj;
990: struct vm_anon *anon;
991: struct uvm_cpu *ucpu;
992:
1.6 mrg 993: pg = *ploans++;
1.1 mrg 994:
1.6 mrg 995: /*
1.36 chs 996: * do a little dance to acquire the object or anon lock
997: * as appropriate. we are locking in the wrong order,
998: * so we have to do a try-lock here.
999: */
1000:
1001: slock = NULL;
1002: while (pg->uobject != NULL || pg->uanon != NULL) {
1003: if (pg->uobject != NULL) {
1.80 rmind 1004: slock = pg->uobject->vmobjlock;
1.36 chs 1005: } else {
1.80 rmind 1006: slock = pg->uanon->an_lock;
1.36 chs 1007: }
1.70 ad 1008: if (mutex_tryenter(slock)) {
1.36 chs 1009: break;
1010: }
1.81.2.3 yamt 1011: mutex_obj_pause(slock, &uvm_pageqlock);
1.36 chs 1012: slock = NULL;
1013: }
1014:
1.81.2.4 yamt 1015: obj = pg->uobject;
1016: anon = pg->uanon;
1.81.2.5 yamt 1017: /*
1018: * drop our loan. (->K)
1019: */
1.34 chs 1020: KASSERT(pg->loan_count > 0);
1021: pg->loan_count--;
1.81.2.5 yamt 1022: /*
1023: * if there are no loans left, put the page back a paging queue
1024: * (if the page is owned by an anon) or free it (if the page
1025: * is now unowned).
1026: */
1027: uvm_loan_resolve_orphan(pg, true);
1028: if (pg->loan_count == 0) {
1029: if (obj == NULL && anon == NULL) {
1030: KASSERT((pg->flags & PG_BUSY) == 0);
1031: uvm_pagefree(pg);
1032: }
1033: if (anon != NULL) {
1034: uvm_pageactivate(pg);
1035: }
1.36 chs 1036: }
1037: if (slock != NULL) {
1.70 ad 1038: mutex_exit(slock);
1.6 mrg 1039: }
1.81.2.4 yamt 1040: ucpu = uvm_cpu_get();
1041: if (obj != NULL) {
1.81.2.5 yamt 1042: if (anon != NULL) {
1043: ucpu->unloan_oa++;
1044: } else if (obj == &uvm_loanzero_object) {
1.81.2.4 yamt 1045: ucpu->unloan_zero++;
1046: } else {
1047: ucpu->unloan_obj++;
1048: }
1049: } else if (anon != NULL) {
1050: ucpu->unloan_anon++;
1051: }
1052: uvm_cpu_put(ucpu);
1.6 mrg 1053: }
1.70 ad 1054: mutex_exit(&uvm_pageqlock);
1.1 mrg 1055: }
1056:
1.33 jdolecek 1057: /*
1.34 chs 1058: * uvm_unloan: kill loans on pages or anons.
1.33 jdolecek 1059: */
1.34 chs 1060:
1.33 jdolecek 1061: void
1.34 chs 1062: uvm_unloan(void *v, int npages, int flags)
1.33 jdolecek 1063: {
1.34 chs 1064: if (flags & UVM_LOAN_TOANON) {
1065: uvm_unloananon(v, npages);
1066: } else {
1067: uvm_unloanpage(v, npages);
1068: }
1.40 thorpej 1069: }
1070:
1071: /*
1.41 thorpej 1072: * Minimal pager for uvm_loanzero_object. We need to provide a "put"
1073: * method, because the page can end up on a paging queue, and the
1074: * page daemon will want to call pgo_put when it encounters the page
1075: * on the inactive list.
1076: */
1077:
1078: static int
1.62 yamt 1079: ulz_put(struct uvm_object *uobj, voff_t start, voff_t stop, int flags)
1.41 thorpej 1080: {
1081: struct vm_page *pg;
1082:
1083: KDASSERT(uobj == &uvm_loanzero_object);
1084:
1085: /*
1086: * Don't need to do any work here if we're not freeing pages.
1087: */
1088:
1089: if ((flags & PGO_FREE) == 0) {
1.80 rmind 1090: mutex_exit(uobj->vmobjlock);
1.41 thorpej 1091: return 0;
1092: }
1093:
1094: /*
1095: * we don't actually want to ever free the uvm_loanzero_page, so
1096: * just reactivate or dequeue it.
1097: */
1098:
1.81.2.1 yamt 1099: pg = uvm_pagelookup(uobj, 0);
1.41 thorpej 1100: KASSERT(pg != NULL);
1.81.2.1 yamt 1101: KASSERT(uobj->uo_npages == 1);
1.41 thorpej 1102:
1.70 ad 1103: mutex_enter(&uvm_pageqlock);
1.41 thorpej 1104: if (pg->uanon)
1105: uvm_pageactivate(pg);
1106: else
1107: uvm_pagedequeue(pg);
1.70 ad 1108: mutex_exit(&uvm_pageqlock);
1.41 thorpej 1109:
1.80 rmind 1110: mutex_exit(uobj->vmobjlock);
1.41 thorpej 1111: return 0;
1112: }
1113:
1.69 yamt 1114: static const struct uvm_pagerops ulz_pager = {
1.68 yamt 1115: .pgo_put = ulz_put,
1.41 thorpej 1116: };
1117:
1118: /*
1.40 thorpej 1119: * uvm_loan_init(): initialize the uvm_loan() facility.
1120: */
1121:
1122: void
1123: uvm_loan_init(void)
1124: {
1125:
1.81.2.4 yamt 1126: uvm_obj_init(&uvm_loanzero_object, &ulz_pager, true, 0);
1.52 yamt 1127: UVMHIST_INIT(loanhist, 300);
1.42 yamt 1128: }
1129:
1130: /*
1131: * uvm_loanbreak: break loan on a uobj page
1132: *
1133: * => called with uobj locked
1134: * => the page should be busy
1135: * => return value:
1136: * newly allocated page if succeeded
1137: */
1138: struct vm_page *
1139: uvm_loanbreak(struct vm_page *uobjpage)
1140: {
1.81.2.4 yamt 1141: struct uvm_cpu *ucpu;
1.42 yamt 1142: struct vm_page *pg;
1.49 drochner 1143: #ifdef DIAGNOSTIC
1.42 yamt 1144: struct uvm_object *uobj = uobjpage->uobject;
1.49 drochner 1145: #endif
1.81.2.5 yamt 1146: struct vm_anon * const anon = uobjpage->uanon;
1.81.2.4 yamt 1147: const unsigned int count = uobjpage->loan_count;
1.42 yamt 1148:
1149: KASSERT(uobj != NULL);
1.80 rmind 1150: KASSERT(mutex_owned(uobj->vmobjlock));
1.42 yamt 1151: KASSERT(uobjpage->flags & PG_BUSY);
1.81.2.4 yamt 1152: KASSERT(count > 0);
1.42 yamt 1153:
1154: /* alloc new un-owned page */
1155: pg = uvm_pagealloc(NULL, 0, NULL, 0);
1156: if (pg == NULL)
1157: return NULL;
1158:
1159: /*
1160: * copy the data from the old page to the new
1.61 yamt 1161: * one and clear the fake flags on the new page (keep it busy).
1162: * force a reload of the old page by clearing it from all
1163: * pmaps.
1164: * then lock the page queues to rename the pages.
1.42 yamt 1165: */
1166:
1167: uvm_pagecopy(uobjpage, pg); /* old -> new */
1.61 yamt 1168: pg->flags &= ~PG_FAKE;
1.81.2.4 yamt 1169: KASSERT(uvm_pagegetdirty(pg) == UVM_PAGE_STATUS_DIRTY);
1.42 yamt 1170: pmap_page_protect(uobjpage, VM_PROT_NONE);
1171: if (uobjpage->flags & PG_WANTED)
1172: wakeup(uobjpage);
1173: /* uobj still locked */
1174: uobjpage->flags &= ~(PG_WANTED|PG_BUSY);
1175: UVM_PAGE_OWN(uobjpage, NULL);
1176:
1.81.2.5 yamt 1177: mutex_enter(&uvm_pageqlock);
1178:
1179: /*
1180: * if the page is no longer referenced by an anon (i.e. we are breaking
1181: * O->K loans), then remove it from any pageq's.
1182: */
1183: if (anon == NULL)
1184: uvm_pagedequeue(uobjpage);
1185:
1.48 yamt 1186: /*
1187: * replace uobjpage with new page.
1.81.2.4 yamt 1188: *
1189: * this will update the page dirtiness statistics.
1.48 yamt 1190: */
1191:
1192: uvm_pagereplace(uobjpage, pg);
1.42 yamt 1193:
1194: /*
1.81.2.5 yamt 1195: * at this point we have absolutely no control over uobjpage
1.42 yamt 1196: */
1197:
1198: /* install new page */
1199: uvm_pageactivate(pg);
1.70 ad 1200: mutex_exit(&uvm_pageqlock);
1.42 yamt 1201:
1202: /*
1.81.2.5 yamt 1203: * update statistics.
1.42 yamt 1204: */
1.81.2.4 yamt 1205: ucpu = uvm_cpu_get();
1.81.2.5 yamt 1206: if (anon != NULL) {
1207: ucpu->loanbreak_oa_obj++;
1208: ucpu->loanbreak_orphaned += count - 1;
1209: } else {
1210: ucpu->loanbreak_obj += count;
1211: }
1.81.2.4 yamt 1212: uvm_cpu_put(ucpu);
1.81.2.5 yamt 1213:
1214: /*
1215: * done! loan is broken and "pg" is PG_BUSY.
1216: * it can now replace uobjpage.
1217: */
1.42 yamt 1218: return pg;
1.33 jdolecek 1219: }
1.75 uebayasi 1220:
1.81.2.5 yamt 1221: /*
1222: * uvm_loanbreak_anon:
1223: */
1224:
1.75 uebayasi 1225: int
1.81.2.5 yamt 1226: uvm_loanbreak_anon(struct vm_anon *anon)
1.75 uebayasi 1227: {
1.81.2.4 yamt 1228: struct uvm_cpu *ucpu;
1.75 uebayasi 1229: struct vm_page *pg;
1.81.2.4 yamt 1230: unsigned int oldstatus;
1.81.2.5 yamt 1231: struct uvm_object * const uobj = anon->an_page->uobject;
1.81.2.4 yamt 1232: const unsigned int count = anon->an_page->loan_count;
1.75 uebayasi 1233:
1.80 rmind 1234: KASSERT(mutex_owned(anon->an_lock));
1235: KASSERT(uobj == NULL || mutex_owned(uobj->vmobjlock));
1.81.2.4 yamt 1236: KASSERT(count > 0);
1.77 uebayasi 1237:
1.75 uebayasi 1238: /* get new un-owned replacement page */
1239: pg = uvm_pagealloc(NULL, 0, NULL, 0);
1240: if (pg == NULL) {
1241: return ENOMEM;
1242: }
1243:
1244: /* copy old -> new */
1245: uvm_pagecopy(anon->an_page, pg);
1.81.2.4 yamt 1246: KASSERT(uvm_pagegetdirty(pg) == UVM_PAGE_STATUS_DIRTY);
1.75 uebayasi 1247:
1248: /* force reload */
1249: pmap_page_protect(anon->an_page, VM_PROT_NONE);
1.81.2.4 yamt 1250: oldstatus = uvm_pagegetdirty(anon->an_page);
1.75 uebayasi 1251: mutex_enter(&uvm_pageqlock); /* KILL loan */
1252:
1253: anon->an_page->uanon = NULL;
1.81.2.5 yamt 1254: if (uobj != NULL) {
1255: /*
1256: * if we were receiver of loan (O->A)
1257: */
1258: KASSERT((anon->an_page->pqflags & PQ_ANON) == 0);
1.75 uebayasi 1259: anon->an_page->loan_count--;
1260: } else {
1261: /*
1.77 uebayasi 1262: * we were the lender (A->K); need to remove the page from
1263: * pageq's.
1.81.2.5 yamt 1264: *
1265: * PQ_ANON is updated by the caller.
1.75 uebayasi 1266: */
1.81.2.5 yamt 1267: KASSERT((anon->an_page->pqflags & PQ_ANON) != 0);
1268: anon->an_page->pqflags &= ~PQ_ANON;
1.75 uebayasi 1269: uvm_pagedequeue(anon->an_page);
1270: }
1271:
1272: /* install new page in anon */
1273: anon->an_page = pg;
1274: pg->uanon = anon;
1275: pg->pqflags |= PQ_ANON;
1276:
1277: uvm_pageactivate(pg);
1278: mutex_exit(&uvm_pageqlock);
1279:
1280: pg->flags &= ~(PG_BUSY|PG_FAKE);
1281: UVM_PAGE_OWN(pg, NULL);
1282:
1283: /* done! */
1.81.2.5 yamt 1284: ucpu = uvm_cpu_get();
1285: if (uobj != NULL) {
1286: ucpu->loanbreak_oa_anon++;
1287: ucpu->loanbreak_orphaned_anon += count - 1;
1288: atomic_inc_uint(&uvmexp.anonpages);
1289: } else {
1.81.2.4 yamt 1290: ucpu->loanbreak_anon += count;
1291: ucpu->pagestate[1][oldstatus]--;
1.81.2.5 yamt 1292: }
1293: ucpu->pagestate[1][UVM_PAGE_STATUS_DIRTY]++;
1294: uvm_cpu_put(ucpu);
1295: return 0;
1296: }
1297:
1298: int
1299: uvm_loanobj(struct uvm_object *uobj, struct uio *uio)
1300: {
1301: struct iovec *iov;
1302: struct vm_map *map;
1303: vaddr_t va;
1304: size_t len;
1305: int i, error = 0;
1306:
1.81.2.10 yamt 1307: if (!vm_loan_read) {
1.81.2.5 yamt 1308: return ENOSYS;
1309: }
1310:
1311: /*
1312: * This interface is only for loaning to user space.
1313: * Loans to the kernel should be done with the kernel-specific
1314: * loaning interfaces.
1315: */
1316:
1317: if (VMSPACE_IS_KERNEL_P(uio->uio_vmspace)) {
1318: return ENOSYS;
1319: }
1320:
1321: if (uio->uio_rw != UIO_READ) {
1322: return ENOSYS;
1323: }
1324:
1325: /*
1326: * Check that the uio is aligned properly for loaning.
1327: */
1328:
1329: if (uio->uio_offset & PAGE_MASK || uio->uio_resid & PAGE_MASK) {
1330: return EINVAL;
1331: }
1332: for (i = 0; i < uio->uio_iovcnt; i++) {
1333: if (((vaddr_t)uio->uio_iov[i].iov_base & PAGE_MASK) ||
1334: (uio->uio_iov[i].iov_len & PAGE_MASK)) {
1335: return EINVAL;
1336: }
1337: }
1338:
1339: /*
1340: * Process the uio.
1341: */
1342:
1343: map = &uio->uio_vmspace->vm_map;
1344: while (uio->uio_resid) {
1345: iov = uio->uio_iov;
1346: while (iov->iov_len) {
1347: va = (vaddr_t)iov->iov_base;
1348: len = MIN(iov->iov_len, MAXPHYS);
1349: error = uvm_loanobj_read(map, va, len, uobj,
1350: uio->uio_offset);
1351: if (error) {
1352: goto out;
1353: }
1354: iov->iov_base = (char *)iov->iov_base + len;
1355: iov->iov_len -= len;
1356: uio->uio_offset += len;
1357: uio->uio_resid -= len;
1358: }
1359: uio->uio_iov++;
1360: uio->uio_iovcnt--;
1361: }
1362:
1363: out:
1364: pmap_update(map->pmap);
1365: return error;
1366: }
1367:
1368: /*
1369: * Loan object pages to a user process.
1370: */
1371:
1372: /* XXX an arbitrary number larger than MAXPHYS/PAGE_SIZE */
1373: #define MAXPAGES 16
1374:
1375: static int
1376: uvm_loanobj_read(struct vm_map *map, vaddr_t va, size_t len,
1377: struct uvm_object *uobj, off_t off)
1378: {
1379: unsigned int npages = len >> PAGE_SHIFT;
1380: struct vm_page *pgs[MAXPAGES];
1381: struct vm_amap *amap;
1382: struct vm_anon *anon, *oanons[MAXPAGES], *nanons[MAXPAGES];
1383: struct vm_map_entry *entry;
1384: struct vm_anon *anon_tofree;
1385: unsigned int maptime;
1386: unsigned int i, refs, aoff, pgoff;
1387: unsigned int loaned; /* # of newly created O->A loan */
1388: int error;
1.81.2.12! yamt 1389: UVMHIST_FUNC("uvm_loanobj_read"); UVMHIST_CALLED(ubchist);
1.81.2.5 yamt 1390:
1391: UVMHIST_LOG(ubchist, "map %p va 0x%x npages %d", map, va, npages, 0);
1392: UVMHIST_LOG(ubchist, "uobj %p off 0x%x", uobj, off, 0, 0);
1393:
1.81.2.11 yamt 1394: KASSERT(npages <= MAXPAGES);
1.81.2.8 yamt 1395: #if defined(PMAP_PREFER)
1396: /*
1397: * avoid creating VAC aliases.
1398: */
1399: {
1400: const vaddr_t origva = va;
1401:
1402: PMAP_PREFER(off, &va, len, 0);
1403: if (va != origva) {
1404: /*
1405: * pmap's suggestion was different from the requested
1406: * address. punt.
1407: */
1408: return EINVAL;
1409: }
1410: }
1411: #endif /* defined(PMAP_PREFER) */
1.81.2.5 yamt 1412: retry:
1413: vm_map_lock_read(map);
1414: if (!uvm_map_lookup_entry(map, va, &entry)) {
1415: vm_map_unlock_read(map);
1416: UVMHIST_LOG(ubchist, "no entry", 0,0,0,0);
1417: return EINVAL;
1418: }
1419: if ((entry->protection & VM_PROT_WRITE) == 0) {
1420: vm_map_unlock_read(map);
1421: UVMHIST_LOG(ubchist, "no write access", 0,0,0,0);
1422: return EACCES;
1423: }
1424: if (VM_MAPENT_ISWIRED(entry)) {
1425: vm_map_unlock_read(map);
1426: UVMHIST_LOG(ubchist, "entry is wired", 0,0,0,0);
1427: return EBUSY;
1428: }
1429: if (!UVM_ET_ISCOPYONWRITE(entry)) {
1430: vm_map_unlock_read(map);
1431: UVMHIST_LOG(ubchist, "entry is not COW", 0,0,0,0);
1432: return EINVAL;
1433: }
1434: if (UVM_ET_ISOBJ(entry)) {
1435: /*
1436: * avoid locking order difficulty between
1437: * am_obj_lock and backing object's lock.
1438: */
1439: vm_map_unlock_read(map);
1440: UVMHIST_LOG(ubchist, "entry is obj backed", 0,0,0,0);
1441: return EINVAL;
1442: }
1443: if (entry->end < va + len) {
1444: vm_map_unlock_read(map);
1445: UVMHIST_LOG(ubchist, "chunk longer than entry", 0,0,0,0);
1446: return EINVAL;
1447: }
1448: amap = entry->aref.ar_amap;
1449: if (amap != NULL && (amap->am_flags & AMAP_SHARED) != 0) {
1450: vm_map_unlock_read(map);
1451: UVMHIST_LOG(ubchist, "amap is shared", 0,0,0,0);
1452: return EINVAL;
1453: }
1454:
1455: /*
1456: * None of the trivial reasons why we might not be able to do the loan
1457: * are true. If we need to COW the amap, try to do it now.
1458: */
1459:
1460: KASSERT(amap || UVM_ET_ISNEEDSCOPY(entry));
1461: if (UVM_ET_ISNEEDSCOPY(entry)) {
1462: maptime = map->timestamp;
1463: vm_map_unlock_read(map);
1464: vm_map_lock(map);
1465: if (maptime + 1 != map->timestamp) {
1466: vm_map_unlock(map);
1467: goto retry;
1468: }
1469: amap_copy(map, entry, 0, va, va + len);
1470: if (UVM_ET_ISNEEDSCOPY(entry)) {
1471: vm_map_unlock(map);
1472: UVMHIST_LOG(ubchist, "amap COW failed", 0,0,0,0);
1473: return ENOMEM;
1474: }
1475: UVMHIST_LOG(ubchist, "amap has been COWed", 0,0,0,0);
1476: aoff = va - entry->start;
1477: maptime = map->timestamp;
1478: vm_map_unlock(map);
1479: } else {
1480: aoff = va - entry->start;
1481: maptime = map->timestamp;
1482: vm_map_unlock_read(map);
1483: }
1484:
1485: /*
1486: * The map is all ready for us, now fetch the obj pages.
1487: * If the map changes out from under us, start over.
1488: *
1489: * XXX worth trying PGO_LOCKED?
1490: */
1491:
1492: memset(pgs, 0, sizeof(pgs));
1493: mutex_enter(uobj->vmobjlock);
1494: error = (*uobj->pgops->pgo_get)(uobj, off, pgs, &npages, 0,
1495: VM_PROT_READ, 0, PGO_SYNCIO);
1496: if (error) {
1497: UVMHIST_LOG(ubchist, "getpages -> %d", error,0,0,0);
1498: return error;
1499: }
1500: vm_map_lock_read(map);
1501: if (map->timestamp != maptime) {
1502: vm_map_unlock_read(map);
1503: mutex_enter(uobj->vmobjlock);
1504: mutex_enter(&uvm_pageqlock);
1505: for (i = 0; i < npages; i++) {
1506: uvm_pageactivate(pgs[i]);
1507: }
1508: uvm_page_unbusy(pgs, npages);
1509: mutex_exit(&uvm_pageqlock);
1510: mutex_exit(uobj->vmobjlock);
1511: goto retry;
1512: }
1513: amap = entry->aref.ar_amap;
1514: KASSERT(amap != NULL);
1515:
1516: /*
1517: * Prepare each object page for loaning. Allocate an anon for each page
1518: * that doesn't already have one. If any of the pages are wired,
1519: * undo everything and fail.
1520: */
1521:
1522: memset(nanons, 0, sizeof(nanons));
1523: amap_lock(amap);
1524: if (amap->am_obj_lock != NULL) {
1525: if (amap->am_obj_lock != uobj->vmobjlock) {
1526: /*
1527: * the amap might already have pages loaned from
1528: * another object. give up.
1529: *
1530: * XXX worth clipping amap?
1531: */
1532: error = EBUSY;
1533: amap_unlock(amap);
1534: amap = NULL;
1535: mutex_enter(uobj->vmobjlock);
1536: goto fail_amap_unlocked;
1537: }
1538: } else {
1539: mutex_enter(uobj->vmobjlock);
1540: }
1541: KASSERT(mutex_owned(amap->am_lock));
1542: KASSERT(mutex_owned(uobj->vmobjlock));
1543: loaned = 0;
1544: for (i = 0; i < npages; i++) {
1545: struct vm_page * const pg = pgs[i];
1546: KASSERT(uvm_page_locked_p(pg));
1547: if (pg->wire_count) {
1548: error = EBUSY;
1549: goto fail;
1550: }
1551: pmap_page_protect(pg, VM_PROT_READ);
1552: mutex_enter(&uvm_pageqlock);
1553: uvm_pageactivate(pgs[i]);
1554: mutex_exit(&uvm_pageqlock);
1555: if (pg->uanon != NULL) {
1556: KASSERTMSG(pg->loan_count > 0, "pg %p loan_count %u",
1557: pg, (unsigned int)pg->loan_count);
1558: anon = pg->uanon;
1559: if (anon->an_lock != amap->am_lock) {
1560: /*
1561: * the page is already loaned to another amap
1562: * whose lock is incompatible with ours.
1563: * give up.
1564: */
1565: error = EBUSY;
1566: goto fail;
1567: }
1568: anon->an_ref++;
1569: } else {
1570: anon = uvm_analloc();
1571: if (anon == NULL) {
1572: error = ENOMEM;
1573: goto fail;
1574: }
1575: mutex_enter(&uvm_pageqlock);
1576: anon->an_page = pg;
1577: pg->uanon = anon;
1578: pg->loan_count++;
1579: mutex_exit(&uvm_pageqlock);
1580: loaned++;
1581: }
1582: nanons[i] = anon;
1583: }
1584:
1585: /*
1586: * Look for any existing anons in the amap. These will be replaced
1587: * by the new loan anons we just set up. If any of these anon pages
1588: * are wired then we can't replace them.
1589: */
1590:
1591: memset(oanons, 0, sizeof(oanons));
1592: for (i = 0; i < npages; i++) {
1593: UVMHIST_LOG(ubchist, "pgs[%d] %p", i, pgs[i], 0,0);
1594: anon = amap_lookup(&entry->aref, aoff + (i << PAGE_SHIFT));
1595: oanons[i] = anon;
1596: if (anon && anon->an_page && anon->an_page->wire_count) {
1597: error = EBUSY;
1598: goto fail;
1599: }
1600: }
1601:
1602: /*
1603: * Everything is good to go. Remove any existing anons and insert
1604: * the loaned object anons.
1605: */
1606:
1607: anon_tofree = NULL;
1608: for (i = 0; i < npages; i++) {
1609: pgoff = i << PAGE_SHIFT;
1610: anon = oanons[i];
1611: if (anon != NULL) {
1612: amap_unadd(&entry->aref, aoff + pgoff);
1613: refs = --anon->an_ref;
1614: if (refs == 0) {
1615: anon->an_link = anon_tofree;
1616: anon_tofree = anon;
1617: }
1618: }
1619: anon = nanons[i];
1620: if (anon->an_lock == NULL) {
1621: anon->an_lock = amap->am_lock;
1622: }
1623: amap_add(&entry->aref, aoff + pgoff, anon, FALSE);
1624: }
1625:
1626: /*
1627: * The map has all the new information now.
1628: * Enter the pages into the pmap to save likely faults later.
1629: */
1630:
1631: for (i = 0; i < npages; i++) {
1632: error = pmap_enter(map->pmap, va + (i << PAGE_SHIFT),
1633: VM_PAGE_TO_PHYS(pgs[i]), VM_PROT_READ, PMAP_CANFAIL);
1634: if (error != 0) {
1635: /*
1636: * while the failure of pmap_enter here is not critical,
1637: * we should not leave the mapping of the oanon page.
1638: */
1639: pmap_remove(map->pmap, va + (i << PAGE_SHIFT),
1640: va + (i << PAGE_SHIFT) + PAGE_SIZE);
1641: }
1642: }
1643:
1644: /*
1645: * At this point we're done with the pages, unlock them now.
1646: */
1647:
1648: mutex_enter(&uvm_pageqlock);
1649: uvm_page_unbusy(pgs, npages);
1650: mutex_exit(&uvm_pageqlock);
1651: if (amap->am_obj_lock == NULL) {
1652: mutex_obj_hold(uobj->vmobjlock);
1653: amap->am_obj_lock = uobj->vmobjlock;
1654: } else {
1655: KASSERT(amap->am_obj_lock == uobj->vmobjlock);
1656: }
1657: uvm_anon_freelst(amap, anon_tofree);
1658: vm_map_unlock_read(map);
1659:
1660: /*
1661: * update statistics
1662: */
1663: if (loaned) {
1664: struct uvm_cpu *ucpu;
1665:
1666: ucpu = uvm_cpu_get();
1667: ucpu->loan_obj_read += loaned;
1.81.2.4 yamt 1668: uvm_cpu_put(ucpu);
1669: }
1.75 uebayasi 1670: return 0;
1.81.2.5 yamt 1671:
1672: /*
1673: * We couldn't complete the loan for some reason.
1674: * Undo any work we did so far.
1675: */
1676:
1677: fail:
1678: KASSERT(mutex_owned(amap->am_lock));
1679: fail_amap_unlocked:
1680: KASSERT(mutex_owned(uobj->vmobjlock));
1681: for (i = 0; i < npages; i++) {
1682: anon = nanons[i];
1683: if (anon != NULL) {
1684: KASSERT(amap != NULL);
1685: KASSERT(uvm_page_locked_p(anon->an_page));
1686: if (anon->an_lock == NULL) {
1687: struct vm_page * const pg = anon->an_page;
1688:
1689: KASSERT(anon->an_ref == 1);
1690: KASSERT(pg != NULL);
1691: KASSERT(pg->loan_count > 0);
1692: KASSERT(pg->uanon == anon);
1693: mutex_enter(&uvm_pageqlock);
1694: pg->loan_count--;
1695: pg->uanon = NULL;
1696: anon->an_page = NULL;
1697: mutex_exit(&uvm_pageqlock);
1698: anon->an_ref--;
1699: uvm_anon_free(anon);
1700: } else {
1701: KASSERT(anon->an_lock == amap->am_lock);
1702: KASSERT(anon->an_page->loan_count > 0);
1703: KASSERT(anon->an_ref > 1);
1704: anon->an_ref--;
1705: }
1706: } else {
1707: mutex_enter(&uvm_pageqlock);
1708: uvm_pageenqueue(pgs[i]);
1709: mutex_exit(&uvm_pageqlock);
1710: }
1711: }
1712: mutex_enter(&uvm_pageqlock);
1713: uvm_page_unbusy(pgs, npages);
1714: mutex_exit(&uvm_pageqlock);
1715: if (amap != NULL) {
1716: if (amap->am_obj_lock == NULL) {
1717: mutex_exit(uobj->vmobjlock);
1718: }
1719: amap_unlock(amap);
1720: } else {
1721: mutex_exit(uobj->vmobjlock);
1722: }
1723: vm_map_unlock_read(map);
1724: return error;
1725: }
1726:
1727: /*
1728: * uvm_loan_resolve_orphan: update the state of the page after a possible
1729: * ownership change
1730: *
1731: * if page is owned by an anon but PQ_ANON is not set, the page was loaned
1732: * to the anon from an object which dropped ownership, so resolve this by
1733: * turning the anon's loan into real ownership (ie. decrement loan_count
1734: * again and set PQ_ANON).
1735: */
1736:
1737: void
1738: uvm_loan_resolve_orphan(struct vm_page *pg, bool pageqlocked)
1739: {
1740: struct uvm_object * const uobj = pg->uobject;
1741: struct vm_anon * const anon = pg->uanon;
1742: struct uvm_cpu *ucpu;
1743:
1744: KASSERT(!pageqlocked || mutex_owned(&uvm_pageqlock));
1745: KASSERT(uvm_page_locked_p(pg));
1746: if (uobj != NULL) {
1747: return;
1748: }
1749: if (anon == NULL) {
1750: return;
1751: }
1752: if ((pg->pqflags & PQ_ANON) != 0) {
1753: return;
1754: }
1755: KASSERT(pg->loan_count > 0);
1.81.2.9 yamt 1756: uvm_pagemarkdirty(pg, UVM_PAGE_STATUS_DIRTY);
1.81.2.5 yamt 1757: if (!pageqlocked) {
1758: mutex_enter(&uvm_pageqlock);
1759: }
1760: pg->loan_count--;
1761: pg->pqflags |= PQ_ANON;
1762: if (!pageqlocked) {
1763: mutex_exit(&uvm_pageqlock);
1764: }
1765:
1766: /*
1767: * adjust statistics after the owner change.
1768: *
1769: * the pagestate should have been decremented when uobj dropped the
1770: * ownership.
1771: */
1772: ucpu = uvm_cpu_get();
1773: ucpu->loan_resolve_orphan++;
1774: ucpu->pagestate[1][UVM_PAGE_STATUS_DIRTY]++;
1775: uvm_cpu_put(ucpu);
1776: atomic_inc_uint(&uvmexp.anonpages);
1.75 uebayasi 1777: }
CVSweb <webmaster@jp.NetBSD.org>