Annotation of src/sys/uvm/uvm_fault.c, Revision 1.169
1.167 uebayasi 1: /* $NetBSD$ */
1.1 mrg 2:
3: /*
4: *
5: * Copyright (c) 1997 Charles D. Cranor and Washington University.
6: * All rights reserved.
7: *
8: * Redistribution and use in source and binary forms, with or without
9: * modification, are permitted provided that the following conditions
10: * are met:
11: * 1. Redistributions of source code must retain the above copyright
12: * notice, this list of conditions and the following disclaimer.
13: * 2. Redistributions in binary form must reproduce the above copyright
14: * notice, this list of conditions and the following disclaimer in the
15: * documentation and/or other materials provided with the distribution.
16: * 3. All advertising materials mentioning features or use of this software
17: * must display the following acknowledgement:
18: * This product includes software developed by Charles D. Cranor and
19: * Washington University.
20: * 4. The name of the author may not be used to endorse or promote products
21: * derived from this software without specific prior written permission.
22: *
23: * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24: * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25: * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26: * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27: * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28: * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29: * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30: * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31: * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32: * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1.4 mrg 33: *
34: * from: Id: uvm_fault.c,v 1.1.2.23 1998/02/06 05:29:05 chs Exp
1.1 mrg 35: */
36:
37: /*
38: * uvm_fault.c: fault handler
39: */
1.71 lukem 40:
41: #include <sys/cdefs.h>
1.167 uebayasi 42: __KERNEL_RCSID(0, "$NetBSD$");
1.71 lukem 43:
44: #include "opt_uvmhist.h"
1.1 mrg 45:
46: #include <sys/param.h>
47: #include <sys/systm.h>
48: #include <sys/kernel.h>
49: #include <sys/proc.h>
50: #include <sys/malloc.h>
51: #include <sys/mman.h>
52:
53: #include <uvm/uvm.h>
54:
55: /*
56: *
57: * a word on page faults:
58: *
59: * types of page faults we handle:
60: *
61: * CASE 1: upper layer faults CASE 2: lower layer faults
62: *
63: * CASE 1A CASE 1B CASE 2A CASE 2B
64: * read/write1 write>1 read/write +-cow_write/zero
1.63 chs 65: * | | | |
1.1 mrg 66: * +--|--+ +--|--+ +-----+ + | + | +-----+
1.127 uebayasi 67: * amap | V | | ---------> new | | | | ^ |
1.1 mrg 68: * +-----+ +-----+ +-----+ + | + | +--|--+
69: * | | |
70: * +-----+ +-----+ +--|--+ | +--|--+
1.127 uebayasi 71: * uobj | d/c | | d/c | | V | +----+ |
1.1 mrg 72: * +-----+ +-----+ +-----+ +-----+
73: *
74: * d/c = don't care
1.63 chs 75: *
1.1 mrg 76: * case [0]: layerless fault
77: * no amap or uobj is present. this is an error.
78: *
79: * case [1]: upper layer fault [anon active]
80: * 1A: [read] or [write with anon->an_ref == 1]
1.127 uebayasi 81: * I/O takes place in upper level anon and uobj is not touched.
1.1 mrg 82: * 1B: [write with anon->an_ref > 1]
83: * new anon is alloc'd and data is copied off ["COW"]
84: *
85: * case [2]: lower layer fault [uobj]
86: * 2A: [read on non-NULL uobj] or [write to non-copy_on_write area]
87: * I/O takes place directly in object.
88: * 2B: [write to copy_on_write] or [read on NULL uobj]
1.63 chs 89: * data is "promoted" from uobj to a new anon.
1.1 mrg 90: * if uobj is null, then we zero fill.
91: *
92: * we follow the standard UVM locking protocol ordering:
93: *
1.63 chs 94: * MAPS => AMAP => UOBJ => ANON => PAGE QUEUES (PQ)
1.1 mrg 95: * we hold a PG_BUSY page if we unlock for I/O
96: *
97: *
98: * the code is structured as follows:
1.63 chs 99: *
1.1 mrg 100: * - init the "IN" params in the ufi structure
101: * ReFault:
102: * - do lookups [locks maps], check protection, handle needs_copy
103: * - check for case 0 fault (error)
104: * - establish "range" of fault
105: * - if we have an amap lock it and extract the anons
106: * - if sequential advice deactivate pages behind us
107: * - at the same time check pmap for unmapped areas and anon for pages
108: * that we could map in (and do map it if found)
109: * - check object for resident pages that we could map in
110: * - if (case 2) goto Case2
111: * - >>> handle case 1
112: * - ensure source anon is resident in RAM
113: * - if case 1B alloc new anon and copy from source
114: * - map the correct page in
115: * Case2:
116: * - >>> handle case 2
117: * - ensure source page is resident (if uobj)
118: * - if case 2B alloc new anon and copy from source (could be zero
119: * fill if uobj == NULL)
120: * - map the correct page in
121: * - done!
122: *
123: * note on paging:
124: * if we have to do I/O we place a PG_BUSY page in the correct object,
125: * unlock everything, and do the I/O. when I/O is done we must reverify
126: * the state of the world before assuming that our data structures are
127: * valid. [because mappings could change while the map is unlocked]
128: *
129: * alternative 1: unbusy the page in question and restart the page fault
130: * from the top (ReFault). this is easy but does not take advantage
1.63 chs 131: * of the information that we already have from our previous lookup,
1.1 mrg 132: * although it is possible that the "hints" in the vm_map will help here.
133: *
134: * alternative 2: the system already keeps track of a "version" number of
135: * a map. [i.e. every time you write-lock a map (e.g. to change a
136: * mapping) you bump the version number up by one...] so, we can save
137: * the version number of the map before we release the lock and start I/O.
138: * then when I/O is done we can relock and check the version numbers
139: * to see if anything changed. this might save us some over 1 because
140: * we don't have to unbusy the page and may be less compares(?).
141: *
142: * alternative 3: put in backpointers or a way to "hold" part of a map
143: * in place while I/O is in progress. this could be complex to
144: * implement (especially with structures like amap that can be referenced
145: * by multiple map entries, and figuring out what should wait could be
146: * complex as well...).
147: *
1.125 ad 148: * we use alternative 2. given that we are multi-threaded now we may want
149: * to reconsider the choice.
1.1 mrg 150: */
151:
152: /*
153: * local data structures
154: */
155:
156: struct uvm_advice {
1.7 mrg 157: int advice;
158: int nback;
159: int nforw;
1.1 mrg 160: };
161:
162: /*
163: * page range array:
1.63 chs 164: * note: index in array must match "advice" value
1.1 mrg 165: * XXX: borrowed numbers from freebsd. do they work well for us?
166: */
167:
1.95 thorpej 168: static const struct uvm_advice uvmadvice[] = {
1.7 mrg 169: { MADV_NORMAL, 3, 4 },
170: { MADV_RANDOM, 0, 0 },
171: { MADV_SEQUENTIAL, 8, 7},
1.1 mrg 172: };
173:
1.69 chs 174: #define UVM_MAXRANGE 16 /* must be MAX() of nback+nforw+1 */
1.1 mrg 175:
176: /*
177: * private prototypes
178: */
179:
180: /*
181: * inline functions
182: */
183:
184: /*
185: * uvmfault_anonflush: try and deactivate pages in specified anons
186: *
187: * => does not have to deactivate page if it is busy
188: */
189:
1.103 perry 190: static inline void
1.95 thorpej 191: uvmfault_anonflush(struct vm_anon **anons, int n)
1.1 mrg 192: {
1.7 mrg 193: int lcv;
194: struct vm_page *pg;
1.63 chs 195:
1.163 uebayasi 196: for (lcv = 0; lcv < n; lcv++) {
1.7 mrg 197: if (anons[lcv] == NULL)
198: continue;
1.122 ad 199: mutex_enter(&anons[lcv]->an_lock);
1.94 yamt 200: pg = anons[lcv]->an_page;
1.117 yamt 201: if (pg && (pg->flags & PG_BUSY) == 0) {
1.122 ad 202: mutex_enter(&uvm_pageqlock);
1.7 mrg 203: if (pg->wire_count == 0) {
204: uvm_pagedeactivate(pg);
205: }
1.122 ad 206: mutex_exit(&uvm_pageqlock);
1.7 mrg 207: }
1.122 ad 208: mutex_exit(&anons[lcv]->an_lock);
1.7 mrg 209: }
1.1 mrg 210: }
211:
212: /*
213: * normal functions
214: */
215:
216: /*
217: * uvmfault_amapcopy: clear "needs_copy" in a map.
218: *
219: * => called with VM data structures unlocked (usually, see below)
220: * => we get a write lock on the maps and clear needs_copy for a VA
221: * => if we are out of RAM we sleep (waiting for more)
222: */
223:
1.7 mrg 224: static void
1.95 thorpej 225: uvmfault_amapcopy(struct uvm_faultinfo *ufi)
1.1 mrg 226: {
1.69 chs 227: for (;;) {
1.1 mrg 228:
1.7 mrg 229: /*
230: * no mapping? give up.
231: */
1.1 mrg 232:
1.119 thorpej 233: if (uvmfault_lookup(ufi, true) == false)
1.7 mrg 234: return;
1.1 mrg 235:
1.7 mrg 236: /*
237: * copy if needed.
238: */
1.1 mrg 239:
1.7 mrg 240: if (UVM_ET_ISNEEDSCOPY(ufi->entry))
1.108 yamt 241: amap_copy(ufi->map, ufi->entry, AMAP_COPY_NOWAIT,
1.13 chuck 242: ufi->orig_rvaddr, ufi->orig_rvaddr + 1);
1.1 mrg 243:
1.7 mrg 244: /*
245: * didn't work? must be out of RAM. unlock and sleep.
246: */
247:
248: if (UVM_ET_ISNEEDSCOPY(ufi->entry)) {
1.119 thorpej 249: uvmfault_unlockmaps(ufi, true);
1.7 mrg 250: uvm_wait("fltamapcopy");
251: continue;
252: }
253:
254: /*
255: * got it! unlock and return.
256: */
1.63 chs 257:
1.119 thorpej 258: uvmfault_unlockmaps(ufi, true);
1.7 mrg 259: return;
260: }
261: /*NOTREACHED*/
1.1 mrg 262: }
263:
264: /*
265: * uvmfault_anonget: get data in an anon into a non-busy, non-released
266: * page in that anon.
267: *
268: * => maps, amap, and anon locked by caller.
1.57 chs 269: * => if we fail (result != 0) we unlock everything.
1.1 mrg 270: * => if we are successful, we return with everything still locked.
271: * => we don't move the page on the queues [gets moved later]
272: * => if we allocate a new page [we_own], it gets put on the queues.
273: * either way, the result is that the page is on the queues at return time
274: * => for pages which are on loan from a uvm_object (and thus are not
275: * owned by the anon): if successful, we return with the owning object
276: * locked. the caller must unlock this object when it unlocks everything
277: * else.
278: */
279:
1.47 chs 280: int
1.95 thorpej 281: uvmfault_anonget(struct uvm_faultinfo *ufi, struct vm_amap *amap,
282: struct vm_anon *anon)
1.7 mrg 283: {
1.118 thorpej 284: bool we_own; /* we own anon's page? */
285: bool locked; /* did we relock? */
1.7 mrg 286: struct vm_page *pg;
1.58 chs 287: int error;
1.7 mrg 288: UVMHIST_FUNC("uvmfault_anonget"); UVMHIST_CALLED(maphist);
289:
1.122 ad 290: KASSERT(mutex_owned(&anon->an_lock));
1.53 thorpej 291:
1.58 chs 292: error = 0;
1.9 chuck 293: uvmexp.fltanget++;
294: /* bump rusage counters */
1.94 yamt 295: if (anon->an_page)
1.124 ad 296: curlwp->l_ru.ru_minflt++;
1.9 chuck 297: else
1.124 ad 298: curlwp->l_ru.ru_majflt++;
1.7 mrg 299:
1.63 chs 300: /*
1.7 mrg 301: * loop until we get it, or fail.
302: */
303:
1.69 chs 304: for (;;) {
1.119 thorpej 305: we_own = false; /* true if we set PG_BUSY on a page */
1.94 yamt 306: pg = anon->an_page;
1.1 mrg 307:
1.7 mrg 308: /*
309: * if there is a resident page and it is loaned, then anon
310: * may not own it. call out to uvm_anon_lockpage() to ensure
311: * the real owner of the page has been identified and locked.
312: */
313:
314: if (pg && pg->loan_count)
1.13 chuck 315: pg = uvm_anon_lockloanpg(anon);
1.7 mrg 316:
317: /*
318: * page there? make sure it is not busy/released.
319: */
320:
321: if (pg) {
322:
323: /*
324: * at this point, if the page has a uobject [meaning
325: * we have it on loan], then that uobject is locked
326: * by us! if the page is busy, we drop all the
327: * locks (including uobject) and try again.
328: */
329:
1.69 chs 330: if ((pg->flags & PG_BUSY) == 0) {
1.7 mrg 331: UVMHIST_LOG(maphist, "<- OK",0,0,0,0);
1.57 chs 332: return (0);
1.7 mrg 333: }
334: pg->flags |= PG_WANTED;
335: uvmexp.fltpgwait++;
336:
337: /*
338: * the last unlock must be an atomic unlock+wait on
339: * the owner of page
340: */
1.69 chs 341:
1.7 mrg 342: if (pg->uobject) { /* owner is uobject ? */
343: uvmfault_unlockall(ufi, amap, NULL, anon);
344: UVMHIST_LOG(maphist, " unlock+wait on uobj",0,
345: 0,0,0);
346: UVM_UNLOCK_AND_WAIT(pg,
347: &pg->uobject->vmobjlock,
1.119 thorpej 348: false, "anonget1",0);
1.7 mrg 349: } else {
350: /* anon owns page */
351: uvmfault_unlockall(ufi, amap, NULL, NULL);
352: UVMHIST_LOG(maphist, " unlock+wait on anon",0,
353: 0,0,0);
354: UVM_UNLOCK_AND_WAIT(pg,&anon->an_lock,0,
355: "anonget2",0);
356: }
357: } else {
1.101 yamt 358: #if defined(VMSWAP)
1.63 chs 359:
1.7 mrg 360: /*
361: * no page, we must try and bring it in.
362: */
1.69 chs 363:
1.28 chs 364: pg = uvm_pagealloc(NULL, 0, anon, 0);
1.7 mrg 365: if (pg == NULL) { /* out of RAM. */
366: uvmfault_unlockall(ufi, amap, NULL, anon);
367: uvmexp.fltnoram++;
368: UVMHIST_LOG(maphist, " noram -- UVM_WAIT",0,
369: 0,0,0);
1.93 yamt 370: if (!uvm_reclaimable()) {
371: return ENOMEM;
372: }
1.7 mrg 373: uvm_wait("flt_noram1");
374: } else {
375: /* we set the PG_BUSY bit */
1.119 thorpej 376: we_own = true;
1.7 mrg 377: uvmfault_unlockall(ufi, amap, NULL, anon);
378:
379: /*
380: * we are passing a PG_BUSY+PG_FAKE+PG_CLEAN
381: * page into the uvm_swap_get function with
1.18 chuck 382: * all data structures unlocked. note that
383: * it is ok to read an_swslot here because
384: * we hold PG_BUSY on the page.
1.7 mrg 385: */
386: uvmexp.pageins++;
1.58 chs 387: error = uvm_swap_get(pg, anon->an_swslot,
1.7 mrg 388: PGO_SYNCIO);
389:
390: /*
391: * we clean up after the i/o below in the
392: * "we_own" case
393: */
394: }
1.101 yamt 395: #else /* defined(VMSWAP) */
396: panic("%s: no page", __func__);
397: #endif /* defined(VMSWAP) */
1.7 mrg 398: }
399:
400: /*
401: * now relock and try again
402: */
403:
404: locked = uvmfault_relock(ufi);
1.47 chs 405: if (locked && amap != NULL) {
1.19 chuck 406: amap_lock(amap);
1.7 mrg 407: }
408: if (locked || we_own)
1.122 ad 409: mutex_enter(&anon->an_lock);
1.7 mrg 410:
411: /*
412: * if we own the page (i.e. we set PG_BUSY), then we need
413: * to clean up after the I/O. there are three cases to
414: * consider:
415: * [1] page released during I/O: free anon and ReFault.
1.63 chs 416: * [2] I/O not OK. free the page and cause the fault
1.7 mrg 417: * to fail.
418: * [3] I/O OK! activate the page and sync with the
419: * non-we_own case (i.e. drop anon lock if not locked).
420: */
1.63 chs 421:
1.7 mrg 422: if (we_own) {
1.101 yamt 423: #if defined(VMSWAP)
1.7 mrg 424: if (pg->flags & PG_WANTED) {
1.63 chs 425: wakeup(pg);
1.7 mrg 426: }
1.58 chs 427: if (error) {
1.1 mrg 428:
1.47 chs 429: /*
430: * remove the swap slot from the anon
431: * and mark the anon as having no real slot.
432: * don't free the swap slot, thus preventing
433: * it from being used again.
434: */
1.69 chs 435:
1.84 pk 436: if (anon->an_swslot > 0)
437: uvm_swap_markbad(anon->an_swslot, 1);
1.47 chs 438: anon->an_swslot = SWSLOT_BAD;
439:
1.88 yamt 440: if ((pg->flags & PG_RELEASED) != 0)
441: goto released;
442:
1.47 chs 443: /*
1.7 mrg 444: * note: page was never !PG_BUSY, so it
445: * can't be mapped and thus no need to
446: * pmap_page_protect it...
447: */
1.69 chs 448:
1.122 ad 449: mutex_enter(&uvm_pageqlock);
1.7 mrg 450: uvm_pagefree(pg);
1.122 ad 451: mutex_exit(&uvm_pageqlock);
1.7 mrg 452:
453: if (locked)
454: uvmfault_unlockall(ufi, amap, NULL,
455: anon);
456: else
1.122 ad 457: mutex_exit(&anon->an_lock);
1.7 mrg 458: UVMHIST_LOG(maphist, "<- ERROR", 0,0,0,0);
1.58 chs 459: return error;
1.7 mrg 460: }
1.63 chs 461:
1.88 yamt 462: if ((pg->flags & PG_RELEASED) != 0) {
463: released:
464: KASSERT(anon->an_ref == 0);
465:
466: /*
467: * released while we unlocked amap.
468: */
469:
470: if (locked)
471: uvmfault_unlockall(ufi, amap, NULL,
472: NULL);
473:
474: uvm_anon_release(anon);
475:
476: if (error) {
477: UVMHIST_LOG(maphist,
478: "<- ERROR/RELEASED", 0,0,0,0);
479: return error;
480: }
481:
482: UVMHIST_LOG(maphist, "<- RELEASED", 0,0,0,0);
483: return ERESTART;
484: }
485:
1.7 mrg 486: /*
1.69 chs 487: * we've successfully read the page, activate it.
1.7 mrg 488: */
1.69 chs 489:
1.122 ad 490: mutex_enter(&uvm_pageqlock);
1.7 mrg 491: uvm_pageactivate(pg);
1.122 ad 492: mutex_exit(&uvm_pageqlock);
1.69 chs 493: pg->flags &= ~(PG_WANTED|PG_BUSY|PG_FAKE);
494: UVM_PAGE_OWN(pg, NULL);
1.7 mrg 495: if (!locked)
1.122 ad 496: mutex_exit(&anon->an_lock);
1.101 yamt 497: #else /* defined(VMSWAP) */
498: panic("%s: we_own", __func__);
499: #endif /* defined(VMSWAP) */
1.7 mrg 500: }
501:
502: /*
503: * we were not able to relock. restart fault.
504: */
505:
506: if (!locked) {
507: UVMHIST_LOG(maphist, "<- REFAULT", 0,0,0,0);
1.57 chs 508: return (ERESTART);
1.7 mrg 509: }
510:
511: /*
512: * verify no one has touched the amap and moved the anon on us.
513: */
1.1 mrg 514:
1.47 chs 515: if (ufi != NULL &&
1.63 chs 516: amap_lookup(&ufi->entry->aref,
1.47 chs 517: ufi->orig_rvaddr - ufi->entry->start) != anon) {
1.63 chs 518:
1.7 mrg 519: uvmfault_unlockall(ufi, amap, NULL, anon);
520: UVMHIST_LOG(maphist, "<- REFAULT", 0,0,0,0);
1.57 chs 521: return (ERESTART);
1.7 mrg 522: }
1.63 chs 523:
1.7 mrg 524: /*
1.63 chs 525: * try it again!
1.7 mrg 526: */
1.1 mrg 527:
1.7 mrg 528: uvmexp.fltanretry++;
529: continue;
1.69 chs 530: }
1.7 mrg 531: /*NOTREACHED*/
1.1 mrg 532: }
533:
534: /*
1.106 yamt 535: * uvmfault_promote: promote data to a new anon. used for 1B and 2B.
536: *
537: * 1. allocate an anon and a page.
538: * 2. fill its contents.
539: * 3. put it into amap.
540: *
541: * => if we fail (result != 0) we unlock everything.
542: * => on success, return a new locked anon via 'nanon'.
543: * (*nanon)->an_page will be a resident, locked, dirty page.
544: */
545:
546: static int
547: uvmfault_promote(struct uvm_faultinfo *ufi,
548: struct vm_anon *oanon,
549: struct vm_page *uobjpage,
550: struct vm_anon **nanon, /* OUT: allocated anon */
551: struct vm_anon **spare)
552: {
553: struct vm_amap *amap = ufi->entry->aref.ar_amap;
554: struct uvm_object *uobj;
555: struct vm_anon *anon;
556: struct vm_page *pg;
557: struct vm_page *opg;
558: int error;
559: UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
560:
561: if (oanon) {
562: /* anon COW */
563: opg = oanon->an_page;
564: KASSERT(opg != NULL);
565: KASSERT(opg->uobject == NULL || opg->loan_count > 0);
566: } else if (uobjpage != PGO_DONTCARE) {
567: /* object-backed COW */
568: opg = uobjpage;
569: } else {
570: /* ZFOD */
571: opg = NULL;
572: }
573: if (opg != NULL) {
574: uobj = opg->uobject;
575: } else {
576: uobj = NULL;
577: }
578:
579: KASSERT(amap != NULL);
580: KASSERT(uobjpage != NULL);
581: KASSERT(uobjpage == PGO_DONTCARE || (uobjpage->flags & PG_BUSY) != 0);
1.120 ad 582: KASSERT(mutex_owned(&amap->am_l));
1.122 ad 583: KASSERT(oanon == NULL || mutex_owned(&oanon->an_lock));
584: KASSERT(uobj == NULL || mutex_owned(&uobj->vmobjlock));
585: #if 0
586: KASSERT(*spare == NULL || !mutex_owned(&(*spare)->an_lock));
587: #endif
1.106 yamt 588:
589: if (*spare != NULL) {
590: anon = *spare;
591: *spare = NULL;
1.122 ad 592: mutex_enter(&anon->an_lock);
1.106 yamt 593: } else if (ufi->map != kernel_map) {
594: anon = uvm_analloc();
595: } else {
596: UVMHIST_LOG(maphist, "kernel_map, unlock and retry", 0,0,0,0);
597:
598: /*
599: * we can't allocate anons with kernel_map locked.
600: */
601:
602: uvm_page_unbusy(&uobjpage, 1);
603: uvmfault_unlockall(ufi, amap, uobj, oanon);
604:
605: *spare = uvm_analloc();
606: if (*spare == NULL) {
607: goto nomem;
608: }
1.122 ad 609: mutex_exit(&(*spare)->an_lock);
1.106 yamt 610: error = ERESTART;
611: goto done;
612: }
613: if (anon) {
614:
615: /*
616: * The new anon is locked.
617: *
618: * if opg == NULL, we want a zero'd, dirty page,
619: * so have uvm_pagealloc() do that for us.
620: */
621:
622: pg = uvm_pagealloc(NULL, 0, anon,
623: (opg == NULL) ? UVM_PGA_ZERO : 0);
624: } else {
625: pg = NULL;
626: }
627:
628: /*
629: * out of memory resources?
630: */
631:
632: if (pg == NULL) {
633: /* save anon for the next try. */
634: if (anon != NULL) {
1.122 ad 635: mutex_exit(&anon->an_lock);
1.106 yamt 636: *spare = anon;
637: }
638:
639: /* unlock and fail ... */
640: uvm_page_unbusy(&uobjpage, 1);
641: uvmfault_unlockall(ufi, amap, uobj, oanon);
642: nomem:
643: if (!uvm_reclaimable()) {
644: UVMHIST_LOG(maphist, "out of VM", 0,0,0,0);
645: uvmexp.fltnoanon++;
646: error = ENOMEM;
647: goto done;
648: }
649:
650: UVMHIST_LOG(maphist, "out of RAM, waiting for more", 0,0,0,0);
651: uvmexp.fltnoram++;
652: uvm_wait("flt_noram5");
653: error = ERESTART;
654: goto done;
655: }
656:
657: /* copy page [pg now dirty] */
658: if (opg) {
659: uvm_pagecopy(opg, pg);
660: }
661:
662: amap_add(&ufi->entry->aref, ufi->orig_rvaddr - ufi->entry->start, anon,
663: oanon != NULL);
664:
665: *nanon = anon;
666: error = 0;
667: done:
668: return error;
669: }
670:
671:
672: /*
1.1 mrg 673: * F A U L T - m a i n e n t r y p o i n t
674: */
675:
676: /*
677: * uvm_fault: page fault handler
678: *
679: * => called from MD code to resolve a page fault
1.63 chs 680: * => VM data structures usually should be unlocked. however, it is
1.1 mrg 681: * possible to call here with the main map locked if the caller
682: * gets a write lock, sets it recusive, and then calls us (c.f.
683: * uvm_map_pageable). this should be avoided because it keeps
684: * the map locked off during I/O.
1.66 thorpej 685: * => MUST NEVER BE CALLED IN INTERRUPT CONTEXT
1.1 mrg 686: */
687:
1.24 mycroft 688: #define MASK(entry) (UVM_ET_ISCOPYONWRITE(entry) ? \
689: ~VM_PROT_WRITE : VM_PROT_ALL)
690:
1.110 drochner 691: /* fault_flag values passed from uvm_fault_wire to uvm_fault_internal */
1.130 uebayasi 692: #define UVM_FAULT_WIRE (1 << 0)
693: #define UVM_FAULT_MAXPROT (1 << 1)
1.110 drochner 694:
1.140 uebayasi 695: struct uvm_faultctx {
696: vm_prot_t access_type;
697: vm_prot_t enter_prot;
1.150 uebayasi 698: vaddr_t startva;
699: int npages;
700: int centeridx;
701: struct vm_anon *anon_spare;
1.146 uebayasi 702: bool wire_mapping;
1.140 uebayasi 703: bool narrow;
1.146 uebayasi 704: bool wire_paging;
1.140 uebayasi 705: bool maxprot;
706: bool cow_now;
1.168 uebayasi 707: bool promote;
1.140 uebayasi 708: };
709:
1.163 uebayasi 710: static inline int uvm_fault_check(
711: struct uvm_faultinfo *, struct uvm_faultctx *,
712: struct vm_anon ***, struct vm_page ***);
713:
714: static int uvm_fault_upper(
715: struct uvm_faultinfo *, struct uvm_faultctx *,
716: struct vm_anon **);
717: static inline int uvm_fault_upper_lookup(
718: struct uvm_faultinfo *, struct uvm_faultctx *,
719: struct vm_anon **, struct vm_page **);
720: static inline void uvm_fault_upper_neighbor(
721: struct uvm_faultinfo *, struct uvm_faultctx *,
722: vaddr_t, struct vm_page *, bool);
723: static inline int uvm_fault_upper_loan(
724: struct uvm_faultinfo *, struct uvm_faultctx *,
725: struct vm_anon *, struct uvm_object **);
726: static inline int uvm_fault_upper_promote(
727: struct uvm_faultinfo *, struct uvm_faultctx *,
728: struct uvm_object *, struct vm_anon *);
729: static inline int uvm_fault_upper_direct(
730: struct uvm_faultinfo *, struct uvm_faultctx *,
731: struct uvm_object *, struct vm_anon *);
732: static int uvm_fault_upper_enter(
733: struct uvm_faultinfo *, struct uvm_faultctx *,
734: struct uvm_object *, struct vm_anon *,
735: struct vm_page *, struct vm_anon *);
1.169 ! uebayasi 736: static inline void uvm_fault_upper_done(
1.163 uebayasi 737: struct uvm_faultinfo *, struct uvm_faultctx *,
738: struct uvm_object *, struct vm_anon *,
739: struct vm_page *, struct vm_anon *);
740:
741: static int uvm_fault_lower(
742: struct uvm_faultinfo *, struct uvm_faultctx *,
743: struct vm_page **);
744: static inline int uvm_fault_lower_lookup(
745: struct uvm_faultinfo *, struct uvm_faultctx *,
746: struct vm_page **);
747: static inline void uvm_fault_lower_neighbor(
748: struct uvm_faultinfo *, struct uvm_faultctx *,
749: vaddr_t, struct vm_page *, bool);
750: static inline int uvm_fault_lower1(
751: struct uvm_faultinfo *, struct uvm_faultctx *,
752: struct uvm_object *, struct vm_page *);
753: static inline int uvm_fault_lower_io(
754: struct uvm_faultinfo *, struct uvm_faultctx *,
755: struct uvm_object **, struct vm_page **);
756: static inline int uvm_fault_lower_direct(
757: struct uvm_faultinfo *, struct uvm_faultctx *,
758: struct uvm_object *, struct vm_page *);
759: static inline int uvm_fault_lower_direct_loan(
760: struct uvm_faultinfo *, struct uvm_faultctx *,
761: struct uvm_object *, struct vm_page **,
762: struct vm_page **);
763: static inline int uvm_fault_lower_promote(
764: struct uvm_faultinfo *, struct uvm_faultctx *,
765: struct uvm_object *, struct vm_page *);
766: static int uvm_fault_lower_enter(
767: struct uvm_faultinfo *, struct uvm_faultctx *,
768: struct uvm_object *,
769: struct vm_anon *, struct vm_page *,
770: struct vm_page *);
1.169 ! uebayasi 771: static inline void uvm_fault_lower_done(
1.163 uebayasi 772: struct uvm_faultinfo *, struct uvm_faultctx *,
773: struct uvm_object *,
774: struct vm_anon *, struct vm_page *);
1.138 uebayasi 775:
1.7 mrg 776: int
1.110 drochner 777: uvm_fault_internal(struct vm_map *orig_map, vaddr_t vaddr,
778: vm_prot_t access_type, int fault_flag)
1.1 mrg 779: {
1.7 mrg 780: struct uvm_faultinfo ufi;
1.140 uebayasi 781: struct uvm_faultctx flt = {
782: .access_type = access_type,
1.146 uebayasi 783:
784: /* don't look for neighborhood * pages on "wire" fault */
785: .narrow = (fault_flag & UVM_FAULT_WIRE) != 0,
786:
787: /* "wire" fault causes wiring of both mapping and paging */
788: .wire_mapping = (fault_flag & UVM_FAULT_WIRE) != 0,
789: .wire_paging = (fault_flag & UVM_FAULT_WIRE) != 0,
790:
1.140 uebayasi 791: .maxprot = (fault_flag & UVM_FAULT_MAXPROT) != 0,
792: };
1.137 uebayasi 793: struct vm_anon *anons_store[UVM_MAXRANGE], **anons;
1.141 uebayasi 794: struct vm_page *pages_store[UVM_MAXRANGE], **pages;
1.140 uebayasi 795: int error;
1.7 mrg 796: UVMHIST_FUNC("uvm_fault"); UVMHIST_CALLED(maphist);
1.1 mrg 797:
1.110 drochner 798: UVMHIST_LOG(maphist, "(map=0x%x, vaddr=0x%x, at=%d, ff=%d)",
799: orig_map, vaddr, access_type, fault_flag);
1.1 mrg 800:
1.7 mrg 801: uvmexp.faults++; /* XXX: locking? */
802:
803: /*
804: * init the IN parameters in the ufi
805: */
1.1 mrg 806:
1.7 mrg 807: ufi.orig_map = orig_map;
808: ufi.orig_rvaddr = trunc_page(vaddr);
809: ufi.orig_size = PAGE_SIZE; /* can't get any smaller than this */
810:
1.142 uebayasi 811: error = ERESTART;
812: while (error == ERESTART) {
1.143 uebayasi 813: anons = anons_store;
814: pages = pages_store;
1.1 mrg 815:
1.143 uebayasi 816: error = uvm_fault_check(&ufi, &flt, &anons, &pages);
817: if (error != 0)
818: continue;
1.141 uebayasi 819:
1.143 uebayasi 820: error = uvm_fault_upper_lookup(&ufi, &flt, anons, pages);
821: if (error != 0)
822: continue;
1.138 uebayasi 823:
1.144 uebayasi 824: if (pages[flt.centeridx] == PGO_DONTCARE)
1.148 uebayasi 825: error = uvm_fault_upper(&ufi, &flt, anons);
1.167 uebayasi 826: else {
827: struct uvm_object * const uobj = ufi.entry->object.uvm_obj;
828:
829: if (uobj && uobj->pgops->pgo_fault != NULL) {
830: mutex_enter(&uobj->vmobjlock);
831: /* locked: maps(read), amap (if there), uobj */
832: error = uobj->pgops->pgo_fault(&ufi, flt.startva, pages, flt.npages,
833: flt.centeridx, flt.access_type, PGO_LOCKED|PGO_SYNCIO);
834:
835: /* locked: nothing, pgo_fault has unlocked everything */
836:
837: /*
838: * object fault routine responsible for pmap_update().
839: */
840: } else {
841: error = uvm_fault_lower(&ufi, &flt, pages);
842: }
843: }
1.142 uebayasi 844: }
1.138 uebayasi 845:
1.140 uebayasi 846: if (flt.anon_spare != NULL) {
847: flt.anon_spare->an_ref--;
848: uvm_anfree(flt.anon_spare);
1.138 uebayasi 849: }
850: return error;
1.141 uebayasi 851: }
1.138 uebayasi 852:
1.144 uebayasi 853: static int
1.141 uebayasi 854: uvm_fault_check(
855: struct uvm_faultinfo *ufi, struct uvm_faultctx *flt,
856: struct vm_anon ***ranons, struct vm_page ***rpages)
857: {
858: struct vm_amap *amap;
859: struct uvm_object *uobj;
1.137 uebayasi 860: vm_prot_t check_prot;
861: int nback, nforw;
1.164 mlelstv 862: UVMHIST_FUNC("uvm_fault_check"); UVMHIST_CALLED(maphist);
1.137 uebayasi 863:
1.7 mrg 864: /*
865: * lookup and lock the maps
866: */
867:
1.141 uebayasi 868: if (uvmfault_lookup(ufi, false) == false) {
1.164 mlelstv 869: UVMHIST_LOG(maphist, "<- no mapping @ 0x%x", ufi->orig_rvaddr, 0,0,0);
1.141 uebayasi 870: return EFAULT;
1.7 mrg 871: }
872: /* locked: maps(read) */
873:
1.61 thorpej 874: #ifdef DIAGNOSTIC
1.141 uebayasi 875: if ((ufi->map->flags & VM_MAP_PAGEABLE) == 0) {
1.61 thorpej 876: printf("Page fault on non-pageable map:\n");
1.141 uebayasi 877: printf("ufi->map = %p\n", ufi->map);
878: printf("ufi->orig_map = %p\n", ufi->orig_map);
879: printf("ufi->orig_rvaddr = 0x%lx\n", (u_long) ufi->orig_rvaddr);
880: panic("uvm_fault: (ufi->map->flags & VM_MAP_PAGEABLE) == 0");
1.61 thorpej 881: }
882: #endif
1.58 chs 883:
1.7 mrg 884: /*
885: * check protection
886: */
887:
1.141 uebayasi 888: check_prot = flt->maxprot ?
889: ufi->entry->max_protection : ufi->entry->protection;
890: if ((check_prot & flt->access_type) != flt->access_type) {
1.7 mrg 891: UVMHIST_LOG(maphist,
892: "<- protection failure (prot=0x%x, access=0x%x)",
1.141 uebayasi 893: ufi->entry->protection, flt->access_type, 0, 0);
894: uvmfault_unlockmaps(ufi, false);
895: return EACCES;
1.7 mrg 896: }
897:
898: /*
899: * "enter_prot" is the protection we want to enter the page in at.
900: * for certain pages (e.g. copy-on-write pages) this protection can
1.141 uebayasi 901: * be more strict than ufi->entry->protection. "wired" means either
1.7 mrg 902: * the entry is wired or we are fault-wiring the pg.
903: */
904:
1.141 uebayasi 905: flt->enter_prot = ufi->entry->protection;
1.146 uebayasi 906: if (VM_MAPENT_ISWIRED(ufi->entry))
907: flt->wire_mapping = true;
908:
909: if (flt->wire_mapping) {
1.141 uebayasi 910: flt->access_type = flt->enter_prot; /* full access for wired */
911: flt->cow_now = (check_prot & VM_PROT_WRITE) != 0;
1.73 chs 912: } else {
1.141 uebayasi 913: flt->cow_now = (flt->access_type & VM_PROT_WRITE) != 0;
1.73 chs 914: }
1.7 mrg 915:
1.168 uebayasi 916: flt->promote = false;
917:
1.7 mrg 918: /*
919: * handle "needs_copy" case. if we need to copy the amap we will
920: * have to drop our readlock and relock it with a write lock. (we
921: * need a write lock to change anything in a map entry [e.g.
922: * needs_copy]).
923: */
924:
1.141 uebayasi 925: if (UVM_ET_ISNEEDSCOPY(ufi->entry)) {
926: if (flt->cow_now || (ufi->entry->object.uvm_obj == NULL)) {
927: KASSERT(!flt->maxprot);
1.7 mrg 928: /* need to clear */
929: UVMHIST_LOG(maphist,
930: " need to clear needs_copy and refault",0,0,0,0);
1.141 uebayasi 931: uvmfault_unlockmaps(ufi, false);
932: uvmfault_amapcopy(ufi);
1.7 mrg 933: uvmexp.fltamcopy++;
1.141 uebayasi 934: return ERESTART;
1.7 mrg 935:
936: } else {
937:
938: /*
939: * ensure that we pmap_enter page R/O since
940: * needs_copy is still true
941: */
1.72 chs 942:
1.141 uebayasi 943: flt->enter_prot &= ~VM_PROT_WRITE;
1.7 mrg 944: }
945: }
946:
947: /*
948: * identify the players
949: */
950:
1.141 uebayasi 951: amap = ufi->entry->aref.ar_amap; /* upper layer */
952: uobj = ufi->entry->object.uvm_obj; /* lower layer */
1.7 mrg 953:
954: /*
955: * check for a case 0 fault. if nothing backing the entry then
956: * error now.
957: */
958:
959: if (amap == NULL && uobj == NULL) {
1.141 uebayasi 960: uvmfault_unlockmaps(ufi, false);
1.7 mrg 961: UVMHIST_LOG(maphist,"<- no backing store, no overlay",0,0,0,0);
1.141 uebayasi 962: return EFAULT;
1.7 mrg 963: }
1.1 mrg 964:
1.7 mrg 965: /*
966: * establish range of interest based on advice from mapper
967: * and then clip to fit map entry. note that we only want
1.63 chs 968: * to do this the first time through the fault. if we
1.7 mrg 969: * ReFault we will disable this by setting "narrow" to true.
970: */
1.1 mrg 971:
1.141 uebayasi 972: if (flt->narrow == false) {
1.7 mrg 973:
974: /* wide fault (!narrow) */
1.141 uebayasi 975: KASSERT(uvmadvice[ufi->entry->advice].advice ==
976: ufi->entry->advice);
977: nback = MIN(uvmadvice[ufi->entry->advice].nback,
978: (ufi->orig_rvaddr - ufi->entry->start) >> PAGE_SHIFT);
979: flt->startva = ufi->orig_rvaddr - (nback << PAGE_SHIFT);
980: nforw = MIN(uvmadvice[ufi->entry->advice].nforw,
981: ((ufi->entry->end - ufi->orig_rvaddr) >>
1.15 chs 982: PAGE_SHIFT) - 1);
1.7 mrg 983: /*
984: * note: "-1" because we don't want to count the
985: * faulting page as forw
986: */
1.141 uebayasi 987: flt->npages = nback + nforw + 1;
988: flt->centeridx = nback;
1.7 mrg 989:
1.141 uebayasi 990: flt->narrow = true; /* ensure only once per-fault */
1.7 mrg 991:
992: } else {
1.63 chs 993:
1.7 mrg 994: /* narrow fault! */
995: nback = nforw = 0;
1.141 uebayasi 996: flt->startva = ufi->orig_rvaddr;
997: flt->npages = 1;
998: flt->centeridx = 0;
1.1 mrg 999:
1.7 mrg 1000: }
1.131 uebayasi 1001: /* offset from entry's start to pgs' start */
1.141 uebayasi 1002: const voff_t eoff = flt->startva - ufi->entry->start;
1.1 mrg 1003:
1.7 mrg 1004: /* locked: maps(read) */
1.13 chuck 1005: UVMHIST_LOG(maphist, " narrow=%d, back=%d, forw=%d, startva=0x%x",
1.141 uebayasi 1006: flt->narrow, nback, nforw, flt->startva);
1007: UVMHIST_LOG(maphist, " entry=0x%x, amap=0x%x, obj=0x%x", ufi->entry,
1.16 chs 1008: amap, uobj, 0);
1.1 mrg 1009:
1.7 mrg 1010: /*
1011: * if we've got an amap, lock it and extract current anons.
1012: */
1013:
1014: if (amap) {
1.19 chuck 1015: amap_lock(amap);
1.141 uebayasi 1016: amap_lookups(&ufi->entry->aref, eoff, *ranons, flt->npages);
1.7 mrg 1017: } else {
1.141 uebayasi 1018: *ranons = NULL; /* to be safe */
1.7 mrg 1019: }
1020:
1021: /* locked: maps(read), amap(if there) */
1.120 ad 1022: KASSERT(amap == NULL || mutex_owned(&amap->am_l));
1.7 mrg 1023:
1024: /*
1025: * for MADV_SEQUENTIAL mappings we want to deactivate the back pages
1026: * now and then forget about them (for the rest of the fault).
1027: */
1028:
1.141 uebayasi 1029: if (ufi->entry->advice == MADV_SEQUENTIAL && nback != 0) {
1.7 mrg 1030:
1031: UVMHIST_LOG(maphist, " MADV_SEQUENTIAL: flushing backpages",
1032: 0,0,0,0);
1033: /* flush back-page anons? */
1.63 chs 1034: if (amap)
1.141 uebayasi 1035: uvmfault_anonflush(*ranons, nback);
1.7 mrg 1036:
1037: /* flush object? */
1038: if (uobj) {
1.137 uebayasi 1039: voff_t uoff;
1040:
1.141 uebayasi 1041: uoff = ufi->entry->offset + eoff;
1.122 ad 1042: mutex_enter(&uobj->vmobjlock);
1.90 yamt 1043: (void) (uobj->pgops->pgo_put)(uobj, uoff, uoff +
1.15 chs 1044: (nback << PAGE_SHIFT), PGO_DEACTIVATE);
1.7 mrg 1045: }
1046:
1047: /* now forget about the backpages */
1048: if (amap)
1.141 uebayasi 1049: *ranons += nback;
1050: #if 0
1051: /* XXXUEBS */
1052: if (uobj)
1053: *rpages += nback;
1054: #endif
1055: flt->startva += (nback << PAGE_SHIFT);
1056: flt->npages -= nback;
1057: flt->centeridx = 0;
1.7 mrg 1058: }
1.137 uebayasi 1059: /*
1060: * => startva is fixed
1061: * => npages is fixed
1062: */
1063:
1.141 uebayasi 1064: return 0;
1065: }
1066:
1.144 uebayasi 1067: static int
1.141 uebayasi 1068: uvm_fault_upper_lookup(
1069: struct uvm_faultinfo *ufi, struct uvm_faultctx *flt,
1070: struct vm_anon **anons, struct vm_page **pages)
1071: {
1072: struct vm_amap *amap = ufi->entry->aref.ar_amap;
1.137 uebayasi 1073: int lcv;
1074: vaddr_t currva;
1.144 uebayasi 1075: bool shadowed;
1.164 mlelstv 1076: UVMHIST_FUNC("uvm_fault_upper_lookup"); UVMHIST_CALLED(maphist);
1.7 mrg 1077:
1078: /* locked: maps(read), amap(if there) */
1.120 ad 1079: KASSERT(amap == NULL || mutex_owned(&amap->am_l));
1.1 mrg 1080:
1.7 mrg 1081: /*
1082: * map in the backpages and frontpages we found in the amap in hopes
1083: * of preventing future faults. we also init the pages[] array as
1084: * we go.
1085: */
1086:
1.141 uebayasi 1087: currva = flt->startva;
1.144 uebayasi 1088: shadowed = false;
1.163 uebayasi 1089: for (lcv = 0; lcv < flt->npages; lcv++, currva += PAGE_SIZE) {
1.7 mrg 1090: /*
1091: * dont play with VAs that are already mapped
1.13 chuck 1092: * except for center)
1.7 mrg 1093: */
1.141 uebayasi 1094: if (lcv != flt->centeridx &&
1095: pmap_extract(ufi->orig_map->pmap, currva, NULL)) {
1.52 chs 1096: pages[lcv] = PGO_DONTCARE;
1097: continue;
1.7 mrg 1098: }
1099:
1100: /*
1101: * unmapped or center page. check if any anon at this level.
1102: */
1103: if (amap == NULL || anons[lcv] == NULL) {
1104: pages[lcv] = NULL;
1105: continue;
1106: }
1107:
1108: /*
1109: * check for present page and map if possible. re-activate it.
1110: */
1111:
1112: pages[lcv] = PGO_DONTCARE;
1.141 uebayasi 1113: if (lcv == flt->centeridx) { /* save center for later! */
1.144 uebayasi 1114: shadowed = true;
1.151 uebayasi 1115: } else {
1.161 uebayasi 1116: struct vm_anon *anon = anons[lcv];
1117:
1118: mutex_enter(&anon->an_lock);
1.163 uebayasi 1119: uvm_fault_upper_neighbor(ufi, flt, currva,
1.161 uebayasi 1120: anon->an_page, anon->an_ref > 1);
1121: mutex_exit(&anon->an_lock);
1.7 mrg 1122: }
1.151 uebayasi 1123: }
1124:
1.160 uebayasi 1125: /* locked: maps(read), amap(if there) */
1126: KASSERT(amap == NULL || mutex_owned(&amap->am_l));
1127: /* (shadowed == true) if there is an anon at the faulting address */
1128: UVMHIST_LOG(maphist, " shadowed=%d, will_get=%d", shadowed,
1.164 mlelstv 1129: (ufi->entry->object.uvm_obj && shadowed != false),0,0);
1.160 uebayasi 1130:
1131: /*
1132: * note that if we are really short of RAM we could sleep in the above
1133: * call to pmap_enter with everything locked. bad?
1134: *
1135: * XXX Actually, that is bad; pmap_enter() should just fail in that
1136: * XXX case. --thorpej
1137: */
1.151 uebayasi 1138:
1139: return 0;
1140: }
1141:
1142: static void
1.163 uebayasi 1143: uvm_fault_upper_neighbor(
1.151 uebayasi 1144: struct uvm_faultinfo *ufi, struct uvm_faultctx *flt,
1.161 uebayasi 1145: vaddr_t currva, struct vm_page *pg, bool readonly)
1.151 uebayasi 1146: {
1.164 mlelstv 1147: UVMHIST_FUNC("uvm_fault_upper_neighbor"); UVMHIST_CALLED(maphist);
1.151 uebayasi 1148:
1.152 uebayasi 1149: /* ignore loaned and busy pages */
1.161 uebayasi 1150: if (pg == NULL || pg->loan_count != 0 ||
1151: (pg->flags & PG_BUSY) != 0)
1.152 uebayasi 1152: goto uvm_fault_upper_lookup_enter_done;
1.145 uebayasi 1153:
1.152 uebayasi 1154: mutex_enter(&uvm_pageqlock);
1.161 uebayasi 1155: uvm_pageenqueue(pg);
1.152 uebayasi 1156: mutex_exit(&uvm_pageqlock);
1157: UVMHIST_LOG(maphist,
1158: " MAPPING: n anon: pm=0x%x, va=0x%x, pg=0x%x",
1.161 uebayasi 1159: ufi->orig_map->pmap, currva, pg, 0);
1.152 uebayasi 1160: uvmexp.fltnamap++;
1161:
1162: /*
1.161 uebayasi 1163: * Since this page isn't the page that's actually faulting,
1164: * ignore pmap_enter() failures; it's not critical that we
1165: * enter these right now.
1.152 uebayasi 1166: */
1167:
1168: (void) pmap_enter(ufi->orig_map->pmap, currva,
1.161 uebayasi 1169: VM_PAGE_TO_PHYS(pg),
1170: readonly ? (flt->enter_prot & ~VM_PROT_WRITE) :
1.152 uebayasi 1171: flt->enter_prot,
1.154 uebayasi 1172: PMAP_CANFAIL | (flt->wire_mapping ? PMAP_WIRED : 0));
1.52 chs 1173:
1.145 uebayasi 1174: uvm_fault_upper_lookup_enter_done:
1.152 uebayasi 1175: pmap_update(ufi->orig_map->pmap);
1.151 uebayasi 1176: }
1177:
1.138 uebayasi 1178: static int
1179: uvm_fault_lower(
1.140 uebayasi 1180: struct uvm_faultinfo *ufi, struct uvm_faultctx *flt,
1.144 uebayasi 1181: struct vm_page **pages)
1.138 uebayasi 1182: {
1.167 uebayasi 1183: #ifdef DIAGNOSTIC
1184: struct vm_amap *amap = ufi->entry->aref.ar_amap;
1185: #endif
1.141 uebayasi 1186: struct uvm_object *uobj = ufi->entry->object.uvm_obj;
1.167 uebayasi 1187: struct vm_page *uobjpage;
1.133 uebayasi 1188:
1.7 mrg 1189: /*
1190: * if the desired page is not shadowed by the amap and we have a
1191: * backing object, then we check to see if the backing object would
1192: * prefer to handle the fault itself (rather than letting us do it
1193: * with the usual pgo_get hook). the backing object signals this by
1194: * providing a pgo_fault routine.
1195: */
1.1 mrg 1196:
1.7 mrg 1197: /*
1198: * now, if the desired page is not shadowed by the amap and we have
1199: * a backing object that does not have a special fault routine, then
1200: * we ask (with pgo_get) the object for resident pages that we care
1201: * about and attempt to map them in. we do not let pgo_get block
1202: * (PGO_LOCKED).
1203: */
1204:
1.135 uebayasi 1205: if (uobj == NULL) {
1206: /* zero fill; don't care neighbor pages */
1.160 uebayasi 1207: uobjpage = NULL;
1.138 uebayasi 1208: } else {
1.163 uebayasi 1209: uvm_fault_lower_lookup(ufi, flt, pages);
1.160 uebayasi 1210: uobjpage = pages[flt->centeridx];
1.141 uebayasi 1211: }
1.160 uebayasi 1212:
1213: /* locked: maps(read), amap(if there), uobj(if !null), uobjpage(if !null) */
1214: KASSERT(amap == NULL || mutex_owned(&amap->am_l));
1215: KASSERT(uobj == NULL || mutex_owned(&uobj->vmobjlock));
1216: KASSERT(uobjpage == NULL || (uobjpage->flags & PG_BUSY) != 0);
1217:
1218: /*
1219: * note that at this point we are done with any front or back pages.
1220: * we are now going to focus on the center page (i.e. the one we've
1221: * faulted on). if we have faulted on the upper (anon) layer
1222: * [i.e. case 1], then the anon we want is anons[centeridx] (we have
1223: * not touched it yet). if we have faulted on the bottom (uobj)
1224: * layer [i.e. case 2] and the page was both present and available,
1225: * then we've got a pointer to it as "uobjpage" and we've already
1226: * made it BUSY.
1227: */
1228:
1229: /*
1230: * there are four possible cases we must address: 1A, 1B, 2A, and 2B
1231: */
1232:
1233: /*
1234: * redirect case 2: if we are not shadowed, go to case 2.
1235: */
1236:
1.163 uebayasi 1237: return uvm_fault_lower1(ufi, flt, uobj, uobjpage);
1.138 uebayasi 1238: }
1239:
1.141 uebayasi 1240: static int
1.163 uebayasi 1241: uvm_fault_lower_lookup(
1.140 uebayasi 1242: struct uvm_faultinfo *ufi, struct uvm_faultctx *flt,
1.144 uebayasi 1243: struct vm_page **pages)
1.138 uebayasi 1244: {
1.141 uebayasi 1245: struct uvm_object *uobj = ufi->entry->object.uvm_obj;
1.138 uebayasi 1246: int lcv, gotpages;
1247: vaddr_t currva;
1.164 mlelstv 1248: UVMHIST_FUNC("uvm_fault_lower_lookup"); UVMHIST_CALLED(maphist);
1.135 uebayasi 1249:
1.136 uebayasi 1250: mutex_enter(&uobj->vmobjlock);
1251: /* locked (!shadowed): maps(read), amap (if there), uobj */
1252: /*
1253: * the following call to pgo_get does _not_ change locking state
1254: */
1.7 mrg 1255:
1.136 uebayasi 1256: uvmexp.fltlget++;
1.140 uebayasi 1257: gotpages = flt->npages;
1.143 uebayasi 1258: (void) uobj->pgops->pgo_get(uobj,
1259: ufi->entry->offset + flt->startva - ufi->entry->start,
1260: pages, &gotpages, flt->centeridx,
1261: flt->access_type & MASK(ufi->entry), ufi->entry->advice, PGO_LOCKED);
1.1 mrg 1262:
1.136 uebayasi 1263: /*
1264: * check for pages to map, if we got any
1265: */
1.7 mrg 1266:
1.141 uebayasi 1267: if (gotpages == 0) {
1268: pages[flt->centeridx] = NULL;
1269: return 0;
1270: }
1.134 uebayasi 1271:
1.140 uebayasi 1272: currva = flt->startva;
1.143 uebayasi 1273: for (lcv = 0; lcv < flt->npages; lcv++, currva += PAGE_SIZE) {
1.136 uebayasi 1274: struct vm_page *curpg;
1.86 yamt 1275:
1.136 uebayasi 1276: curpg = pages[lcv];
1277: if (curpg == NULL || curpg == PGO_DONTCARE) {
1278: continue;
1279: }
1280: KASSERT(curpg->uobject == uobj);
1.1 mrg 1281:
1.136 uebayasi 1282: /*
1.143 uebayasi 1283: * if center page is resident and not PG_BUSY|PG_RELEASED
1284: * then pgo_get made it PG_BUSY for us and gave us a handle
1285: * to it. remember this page as "uobjpage." (for later use).
1.136 uebayasi 1286: */
1.63 chs 1287:
1.140 uebayasi 1288: if (lcv == flt->centeridx) {
1.136 uebayasi 1289: UVMHIST_LOG(maphist, " got uobjpage "
1290: "(0x%x) with locked get",
1.141 uebayasi 1291: curpg, 0,0,0);
1.161 uebayasi 1292: } else {
1293: bool readonly = (curpg->flags & PG_RDONLY)
1294: || (curpg->loan_count > 0)
1295: || UVM_OBJ_NEEDS_WRITEFAULT(curpg->uobject);
1296:
1.163 uebayasi 1297: uvm_fault_lower_neighbor(ufi, flt,
1.161 uebayasi 1298: currva, curpg, readonly);
1299: }
1.151 uebayasi 1300: }
1301: pmap_update(ufi->orig_map->pmap);
1302: return 0;
1303: }
1304:
1305: static void
1.163 uebayasi 1306: uvm_fault_lower_neighbor(
1.151 uebayasi 1307: struct uvm_faultinfo *ufi, struct uvm_faultctx *flt,
1.161 uebayasi 1308: vaddr_t currva, struct vm_page *pg, bool readonly)
1.151 uebayasi 1309: {
1.164 mlelstv 1310: UVMHIST_FUNC("uvm_fault_lower_neighor"); UVMHIST_CALLED(maphist);
1.63 chs 1311:
1.152 uebayasi 1312: /*
1313: * calling pgo_get with PGO_LOCKED returns us pages which
1314: * are neither busy nor released, so we don't need to check
1315: * for this. we can just directly enter the pages.
1316: */
1.7 mrg 1317:
1.152 uebayasi 1318: mutex_enter(&uvm_pageqlock);
1.161 uebayasi 1319: uvm_pageenqueue(pg);
1.152 uebayasi 1320: mutex_exit(&uvm_pageqlock);
1321: UVMHIST_LOG(maphist,
1.161 uebayasi 1322: " MAPPING: n obj: pm=0x%x, va=0x%x, pg=0x%x",
1323: ufi->orig_map->pmap, currva, pg, 0);
1.152 uebayasi 1324: uvmexp.fltnomap++;
1325:
1326: /*
1327: * Since this page isn't the page that's actually faulting,
1328: * ignore pmap_enter() failures; it's not critical that we
1329: * enter these right now.
1330: */
1.161 uebayasi 1331: KASSERT((pg->flags & PG_PAGEOUT) == 0);
1332: KASSERT((pg->flags & PG_RELEASED) == 0);
1333: KASSERT(!UVM_OBJ_IS_CLEAN(pg->uobject) ||
1334: (pg->flags & PG_CLEAN) != 0);
1.152 uebayasi 1335:
1336: (void) pmap_enter(ufi->orig_map->pmap, currva,
1.161 uebayasi 1337: VM_PAGE_TO_PHYS(pg),
1338: readonly ? (flt->enter_prot & ~VM_PROT_WRITE) :
1.152 uebayasi 1339: flt->enter_prot & MASK(ufi->entry),
1340: PMAP_CANFAIL | (flt->wire_mapping ? PMAP_WIRED : 0));
1.136 uebayasi 1341:
1.152 uebayasi 1342: /*
1343: * NOTE: page can't be PG_WANTED or PG_RELEASED because we've
1344: * held the lock the whole time we've had the handle.
1345: */
1.161 uebayasi 1346: KASSERT((pg->flags & PG_WANTED) == 0);
1347: KASSERT((pg->flags & PG_RELEASED) == 0);
1.52 chs 1348:
1.161 uebayasi 1349: pg->flags &= ~(PG_BUSY);
1350: UVM_PAGE_OWN(pg, NULL);
1.138 uebayasi 1351: }
1.134 uebayasi 1352:
1.138 uebayasi 1353: static int
1354: uvm_fault_upper(
1.140 uebayasi 1355: struct uvm_faultinfo *ufi, struct uvm_faultctx *flt,
1.148 uebayasi 1356: struct vm_anon **anons)
1.138 uebayasi 1357: {
1.148 uebayasi 1358: struct vm_amap * const amap = ufi->entry->aref.ar_amap;
1359: struct vm_anon * const anon = anons[flt->centeridx];
1360: struct uvm_object *uobj;
1.138 uebayasi 1361: int error;
1.164 mlelstv 1362: UVMHIST_FUNC("uvm_fault_upper"); UVMHIST_CALLED(maphist);
1.137 uebayasi 1363:
1.7 mrg 1364: /* locked: maps(read), amap */
1.133 uebayasi 1365: KASSERT(mutex_owned(&amap->am_l));
1.7 mrg 1366:
1367: /*
1368: * handle case 1: fault on an anon in our amap
1369: */
1370:
1371: UVMHIST_LOG(maphist, " case 1 fault: anon=0x%x", anon, 0,0,0);
1.122 ad 1372: mutex_enter(&anon->an_lock);
1.7 mrg 1373:
1374: /* locked: maps(read), amap, anon */
1.120 ad 1375: KASSERT(mutex_owned(&amap->am_l));
1.122 ad 1376: KASSERT(mutex_owned(&anon->an_lock));
1.7 mrg 1377:
1378: /*
1379: * no matter if we have case 1A or case 1B we are going to need to
1380: * have the anon's memory resident. ensure that now.
1381: */
1382:
1383: /*
1.47 chs 1384: * let uvmfault_anonget do the dirty work.
1.51 thorpej 1385: * if it fails (!OK) it will unlock everything for us.
1.47 chs 1386: * if it succeeds, locks are still valid and locked.
1.7 mrg 1387: * also, if it is OK, then the anon's page is on the queues.
1388: * if the page is on loan from a uvm_object, then anonget will
1389: * lock that object for us if it does not fail.
1390: */
1391:
1.138 uebayasi 1392: error = uvmfault_anonget(ufi, amap, anon);
1.58 chs 1393: switch (error) {
1.57 chs 1394: case 0:
1.63 chs 1395: break;
1.7 mrg 1396:
1.57 chs 1397: case ERESTART:
1.139 uebayasi 1398: return ERESTART;
1.7 mrg 1399:
1.57 chs 1400: case EAGAIN:
1.128 pooka 1401: kpause("fltagain1", false, hz/2, NULL);
1.139 uebayasi 1402: return ERESTART;
1.51 thorpej 1403:
1404: default:
1.138 uebayasi 1405: return error;
1.1 mrg 1406: }
1.7 mrg 1407:
1408: /*
1409: * uobj is non null if the page is on loan from an object (i.e. uobj)
1410: */
1411:
1.94 yamt 1412: uobj = anon->an_page->uobject; /* locked by anonget if !NULL */
1.7 mrg 1413:
1414: /* locked: maps(read), amap, anon, uobj(if one) */
1.120 ad 1415: KASSERT(mutex_owned(&amap->am_l));
1.122 ad 1416: KASSERT(mutex_owned(&anon->an_lock));
1417: KASSERT(uobj == NULL || mutex_owned(&uobj->vmobjlock));
1.7 mrg 1418:
1419: /*
1.63 chs 1420: * special handling for loaned pages
1.7 mrg 1421: */
1.52 chs 1422:
1.94 yamt 1423: if (anon->an_page->loan_count) {
1.148 uebayasi 1424: error = uvm_fault_upper_loan(ufi, flt, anon, &uobj);
1425: if (error != 0)
1426: return error;
1427: }
1.160 uebayasi 1428:
1429: /*
1430: * if we are case 1B then we will need to allocate a new blank
1431: * anon to transfer the data into. note that we have a lock
1432: * on anon, so no one can busy or release the page until we are done.
1433: * also note that the ref count can't drop to zero here because
1434: * it is > 1 and we are only dropping one ref.
1435: *
1436: * in the (hopefully very rare) case that we are out of RAM we
1437: * will unlock, wait for more RAM, and refault.
1438: *
1439: * if we are out of anon VM we kill the process (XXX: could wait?).
1440: */
1441:
1442: if (flt->cow_now && anon->an_ref > 1) {
1.168 uebayasi 1443: flt->promote = true;
1.160 uebayasi 1444: error = uvm_fault_upper_promote(ufi, flt, uobj, anon);
1445: } else {
1446: error = uvm_fault_upper_direct(ufi, flt, uobj, anon);
1447: }
1448: return error;
1.148 uebayasi 1449: }
1450:
1451: static int
1452: uvm_fault_upper_loan(
1453: struct uvm_faultinfo *ufi, struct uvm_faultctx *flt,
1454: struct vm_anon *anon, struct uvm_object **ruobj)
1455: {
1.149 uebayasi 1456: struct vm_amap * const amap = ufi->entry->aref.ar_amap;
1.151 uebayasi 1457: int error = 0;
1.149 uebayasi 1458:
1459: if (!flt->cow_now) {
1.7 mrg 1460:
1.149 uebayasi 1461: /*
1462: * for read faults on loaned pages we just cap the
1463: * protection at read-only.
1464: */
1.63 chs 1465:
1.149 uebayasi 1466: flt->enter_prot = flt->enter_prot & ~VM_PROT_WRITE;
1.7 mrg 1467:
1.149 uebayasi 1468: } else {
1469: /*
1470: * note that we can't allow writes into a loaned page!
1471: *
1472: * if we have a write fault on a loaned page in an
1473: * anon then we need to look at the anon's ref count.
1474: * if it is greater than one then we are going to do
1475: * a normal copy-on-write fault into a new anon (this
1476: * is not a problem). however, if the reference count
1477: * is one (a case where we would normally allow a
1478: * write directly to the page) then we need to kill
1479: * the loan before we continue.
1480: */
1481:
1482: /* >1 case is already ok */
1483: if (anon->an_ref == 1) {
1.155 uebayasi 1484: error = uvm_loanbreak_anon(anon, *ruobj);
1.151 uebayasi 1485: if (error != 0) {
1486: uvmfault_unlockall(ufi, amap, *ruobj, anon);
1487: uvm_wait("flt_noram2");
1488: return ERESTART;
1489: }
1.155 uebayasi 1490: /* if we were a loan reciever uobj is gone */
1491: if (*ruobj)
1492: *ruobj = NULL;
1.151 uebayasi 1493: }
1494: }
1495: return error;
1496: }
1497:
1.148 uebayasi 1498: static int
1499: uvm_fault_upper_promote(
1500: struct uvm_faultinfo *ufi, struct uvm_faultctx *flt,
1501: struct uvm_object *uobj, struct vm_anon *anon)
1502: {
1.149 uebayasi 1503: struct vm_anon * const oanon = anon;
1504: struct vm_page *pg;
1505: int error;
1.164 mlelstv 1506: UVMHIST_FUNC("uvm_fault_upper_promote"); UVMHIST_CALLED(maphist);
1.149 uebayasi 1507:
1508: UVMHIST_LOG(maphist, " case 1B: COW fault",0,0,0,0);
1509: uvmexp.flt_acow++;
1510:
1511: error = uvmfault_promote(ufi, oanon, PGO_DONTCARE,
1512: &anon, &flt->anon_spare);
1513: switch (error) {
1514: case 0:
1515: break;
1516: case ERESTART:
1517: return ERESTART;
1518: default:
1519: return error;
1520: }
1.7 mrg 1521:
1.149 uebayasi 1522: pg = anon->an_page;
1523: mutex_enter(&uvm_pageqlock);
1524: uvm_pageactivate(pg);
1525: mutex_exit(&uvm_pageqlock);
1526: pg->flags &= ~(PG_BUSY|PG_FAKE);
1527: UVM_PAGE_OWN(pg, NULL);
1.7 mrg 1528:
1.149 uebayasi 1529: /* deref: can not drop to zero here by defn! */
1530: oanon->an_ref--;
1.53 thorpej 1531:
1.149 uebayasi 1532: /*
1533: * note: oanon is still locked, as is the new anon. we
1534: * need to check for this later when we unlock oanon; if
1535: * oanon != anon, we'll have to unlock anon, too.
1536: */
1.7 mrg 1537:
1.149 uebayasi 1538: return uvm_fault_upper_enter(ufi, flt, uobj, anon, pg, oanon);
1.148 uebayasi 1539: }
1540:
1541: static int
1542: uvm_fault_upper_direct(
1543: struct uvm_faultinfo *ufi, struct uvm_faultctx *flt,
1544: struct uvm_object *uobj, struct vm_anon *anon)
1545: {
1.149 uebayasi 1546: struct vm_anon * const oanon = anon;
1547: struct vm_page *pg;
1.52 chs 1548:
1.149 uebayasi 1549: uvmexp.flt_anon++;
1550: pg = anon->an_page;
1551: if (anon->an_ref > 1) /* disallow writes to ref > 1 anons */
1552: flt->enter_prot = flt->enter_prot & ~VM_PROT_WRITE;
1.7 mrg 1553:
1.149 uebayasi 1554: return uvm_fault_upper_enter(ufi, flt, uobj, anon, pg, oanon);
1.148 uebayasi 1555: }
1556:
1557: static int
1558: uvm_fault_upper_enter(
1559: struct uvm_faultinfo *ufi, struct uvm_faultctx *flt,
1560: struct uvm_object *uobj, struct vm_anon *anon, struct vm_page *pg,
1561: struct vm_anon *oanon)
1562: {
1563: struct vm_amap * const amap = ufi->entry->aref.ar_amap;
1.164 mlelstv 1564: UVMHIST_FUNC("uvm_fault_upper_enter"); UVMHIST_CALLED(maphist);
1.7 mrg 1565:
1.53 thorpej 1566: /* locked: maps(read), amap, oanon, anon (if different from oanon) */
1.120 ad 1567: KASSERT(mutex_owned(&amap->am_l));
1.122 ad 1568: KASSERT(mutex_owned(&anon->an_lock));
1569: KASSERT(mutex_owned(&oanon->an_lock));
1.7 mrg 1570:
1571: /*
1.69 chs 1572: * now map the page in.
1.7 mrg 1573: */
1574:
1.168 uebayasi 1575: UVMHIST_LOG(maphist, " MAPPING: anon: pm=0x%x, va=0x%x, pg=0x%x, promote=%d",
1576: ufi->orig_map->pmap, ufi->orig_rvaddr, pg, flt->promote);
1.138 uebayasi 1577: if (pmap_enter(ufi->orig_map->pmap, ufi->orig_rvaddr, VM_PAGE_TO_PHYS(pg),
1.146 uebayasi 1578: flt->enter_prot, flt->access_type | PMAP_CANFAIL | (flt->wire_mapping ? PMAP_WIRED : 0))
1.58 chs 1579: != 0) {
1.69 chs 1580:
1.46 thorpej 1581: /*
1582: * No need to undo what we did; we can simply think of
1583: * this as the pmap throwing away the mapping information.
1584: *
1585: * We do, however, have to go through the ReFault path,
1586: * as the map may change while we're asleep.
1587: */
1.69 chs 1588:
1.53 thorpej 1589: if (anon != oanon)
1.122 ad 1590: mutex_exit(&anon->an_lock);
1.138 uebayasi 1591: uvmfault_unlockall(ufi, amap, uobj, oanon);
1.92 yamt 1592: if (!uvm_reclaimable()) {
1.46 thorpej 1593: UVMHIST_LOG(maphist,
1594: "<- failed. out of VM",0,0,0,0);
1595: /* XXX instrumentation */
1.148 uebayasi 1596: return ENOMEM;
1.46 thorpej 1597: }
1598: /* XXX instrumentation */
1599: uvm_wait("flt_pmfail1");
1.139 uebayasi 1600: return ERESTART;
1.46 thorpej 1601: }
1.7 mrg 1602:
1.169 ! uebayasi 1603: uvm_fault_upper_done(ufi, flt, uobj, anon, pg, oanon);
! 1604:
! 1605: /*
! 1606: * done case 1! finish up by unlocking everything and returning success
! 1607: */
! 1608:
! 1609: if (anon != oanon)
! 1610: mutex_exit(&anon->an_lock);
! 1611: uvmfault_unlockall(ufi, amap, uobj, oanon);
! 1612: pmap_update(ufi->orig_map->pmap);
! 1613: return 0;
1.148 uebayasi 1614: }
1615:
1.169 ! uebayasi 1616: static void
1.148 uebayasi 1617: uvm_fault_upper_done(
1618: struct uvm_faultinfo *ufi, struct uvm_faultctx *flt,
1619: struct uvm_object *uobj, struct vm_anon *anon,
1620: struct vm_page *pg, struct vm_anon *oanon)
1621: {
1622:
1.7 mrg 1623: /*
1.46 thorpej 1624: * ... update the page queues.
1.7 mrg 1625: */
1626:
1.122 ad 1627: mutex_enter(&uvm_pageqlock);
1.146 uebayasi 1628: if (flt->wire_paging) {
1.8 chuck 1629: uvm_pagewire(pg);
1.29 chs 1630:
1631: /*
1632: * since the now-wired page cannot be paged out,
1633: * release its swap resources for others to use.
1634: * since an anon with no swap cannot be PG_CLEAN,
1635: * clear its clean flag now.
1636: */
1637:
1638: pg->flags &= ~(PG_CLEAN);
1.22 chs 1639: uvm_anon_dropswap(anon);
1.7 mrg 1640: } else {
1641: uvm_pageactivate(pg);
1642: }
1.122 ad 1643: mutex_exit(&uvm_pageqlock);
1.138 uebayasi 1644: }
1.1 mrg 1645:
1.138 uebayasi 1646: static int
1.163 uebayasi 1647: uvm_fault_lower1(
1.140 uebayasi 1648: struct uvm_faultinfo *ufi, struct uvm_faultctx *flt,
1.156 uebayasi 1649: struct uvm_object *uobj, struct vm_page *uobjpage)
1.138 uebayasi 1650: {
1.148 uebayasi 1651: #ifdef DIAGNOSTIC
1652: struct vm_amap * const amap = ufi->entry->aref.ar_amap;
1653: #endif
1.138 uebayasi 1654: int error;
1.164 mlelstv 1655: UVMHIST_FUNC("uvm_fault_lower1"); UVMHIST_CALLED(maphist);
1.137 uebayasi 1656:
1.7 mrg 1657: /*
1658: * handle case 2: faulting on backing object or zero fill
1659: */
1660:
1661: /*
1662: * locked:
1663: * maps(read), amap(if there), uobj(if !null), uobjpage(if !null)
1664: */
1.120 ad 1665: KASSERT(amap == NULL || mutex_owned(&amap->am_l));
1.122 ad 1666: KASSERT(uobj == NULL || mutex_owned(&uobj->vmobjlock));
1.120 ad 1667: KASSERT(uobjpage == NULL || (uobjpage->flags & PG_BUSY) != 0);
1.7 mrg 1668:
1669: /*
1670: * note that uobjpage can not be PGO_DONTCARE at this point. we now
1671: * set uobjpage to PGO_DONTCARE if we are doing a zero fill. if we
1672: * have a backing object, check and see if we are going to promote
1673: * the data up to an anon during the fault.
1674: */
1675:
1676: if (uobj == NULL) {
1.63 chs 1677: uobjpage = PGO_DONTCARE;
1.168 uebayasi 1678: flt->promote = true; /* always need anon here */
1.7 mrg 1679: } else {
1.52 chs 1680: KASSERT(uobjpage != PGO_DONTCARE);
1.168 uebayasi 1681: flt->promote = flt->cow_now && UVM_ET_ISCOPYONWRITE(ufi->entry);
1.7 mrg 1682: }
1683: UVMHIST_LOG(maphist, " case 2 fault: promote=%d, zfill=%d",
1.168 uebayasi 1684: flt->promote, (uobj == NULL), 0,0);
1.1 mrg 1685:
1.7 mrg 1686: /*
1.9 chuck 1687: * if uobjpage is not null then we do not need to do I/O to get the
1688: * uobjpage.
1689: *
1.63 chs 1690: * if uobjpage is null, then we need to unlock and ask the pager to
1.7 mrg 1691: * get the data for us. once we have the data, we need to reverify
1692: * the state the world. we are currently not holding any resources.
1693: */
1.1 mrg 1694:
1.9 chuck 1695: if (uobjpage) {
1696: /* update rusage counters */
1.124 ad 1697: curlwp->l_ru.ru_minflt++;
1.9 chuck 1698: } else {
1.163 uebayasi 1699: error = uvm_fault_lower_io(ufi, flt, &uobj, &uobjpage);
1.148 uebayasi 1700: if (error != 0)
1701: return error;
1702: }
1.160 uebayasi 1703:
1704: /*
1705: * locked:
1706: * maps(read), amap(if !null), uobj(if !null), uobjpage(if uobj)
1707: */
1708: KASSERT(amap == NULL || mutex_owned(&amap->am_l));
1709: KASSERT(uobj == NULL || mutex_owned(&uobj->vmobjlock));
1710: KASSERT(uobj == NULL || (uobjpage->flags & PG_BUSY) != 0);
1711:
1712: /*
1713: * notes:
1714: * - at this point uobjpage can not be NULL
1715: * - at this point uobjpage can not be PG_RELEASED (since we checked
1716: * for it above)
1717: * - at this point uobjpage could be PG_WANTED (handle later)
1718: */
1719:
1720: KASSERT(uobj == NULL || uobj == uobjpage->uobject);
1721: KASSERT(uobj == NULL || !UVM_OBJ_IS_CLEAN(uobjpage->uobject) ||
1722: (uobjpage->flags & PG_CLEAN) != 0);
1723:
1.168 uebayasi 1724: if (flt->promote == false) {
1.163 uebayasi 1725: error = uvm_fault_lower_direct(ufi, flt, uobj, uobjpage);
1.160 uebayasi 1726: } else {
1.163 uebayasi 1727: error = uvm_fault_lower_promote(ufi, flt, uobj, uobjpage);
1.160 uebayasi 1728: }
1729: return error;
1.148 uebayasi 1730: }
1731:
1732: static int
1.163 uebayasi 1733: uvm_fault_lower_io(
1.148 uebayasi 1734: struct uvm_faultinfo *ufi, struct uvm_faultctx *flt,
1.156 uebayasi 1735: struct uvm_object **ruobj, struct vm_page **ruobjpage)
1.148 uebayasi 1736: {
1.149 uebayasi 1737: struct vm_amap * const amap = ufi->entry->aref.ar_amap;
1.156 uebayasi 1738: struct uvm_object *uobj = *ruobj;
1.158 uebayasi 1739: struct vm_page *pg;
1.149 uebayasi 1740: bool locked;
1741: int gotpages;
1742: int error;
1743: voff_t uoff;
1.164 mlelstv 1744: UVMHIST_FUNC("uvm_fault_lower_io"); UVMHIST_CALLED(maphist);
1.149 uebayasi 1745:
1746: /* update rusage counters */
1747: curlwp->l_ru.ru_majflt++;
1.137 uebayasi 1748:
1.149 uebayasi 1749: /* locked: maps(read), amap(if there), uobj */
1750: uvmfault_unlockall(ufi, amap, NULL, NULL);
1751: /* locked: uobj */
1.63 chs 1752:
1.149 uebayasi 1753: uvmexp.fltget++;
1754: gotpages = 1;
1.166 mlelstv 1755: pg = NULL;
1.149 uebayasi 1756: uoff = (ufi->orig_rvaddr - ufi->entry->start) + ufi->entry->offset;
1.158 uebayasi 1757: error = uobj->pgops->pgo_get(uobj, uoff, &pg, &gotpages,
1.149 uebayasi 1758: 0, flt->access_type & MASK(ufi->entry), ufi->entry->advice,
1759: PGO_SYNCIO);
1.158 uebayasi 1760: /* locked: pg(if no error) */
1.52 chs 1761:
1.149 uebayasi 1762: /*
1763: * recover from I/O
1764: */
1.1 mrg 1765:
1.149 uebayasi 1766: if (error) {
1767: if (error == EAGAIN) {
1768: UVMHIST_LOG(maphist,
1769: " pgo_get says TRY AGAIN!",0,0,0,0);
1770: kpause("fltagain2", false, hz/2, NULL);
1771: return ERESTART;
1772: }
1.1 mrg 1773:
1.139 uebayasi 1774: #if 0
1.149 uebayasi 1775: KASSERT(error != ERESTART);
1.139 uebayasi 1776: #else
1.149 uebayasi 1777: /* XXXUEBS don't re-fault? */
1778: if (error == ERESTART)
1779: error = EIO;
1.139 uebayasi 1780: #endif
1781:
1.149 uebayasi 1782: UVMHIST_LOG(maphist, "<- pgo_get failed (code %d)",
1783: error, 0,0,0);
1784: return error;
1785: }
1.7 mrg 1786:
1.158 uebayasi 1787: /* locked: pg */
1.7 mrg 1788:
1.165 mlelstv 1789: KASSERT((pg->flags & PG_BUSY) != 0);
1790:
1.149 uebayasi 1791: mutex_enter(&uvm_pageqlock);
1.158 uebayasi 1792: uvm_pageactivate(pg);
1.149 uebayasi 1793: mutex_exit(&uvm_pageqlock);
1.69 chs 1794:
1.149 uebayasi 1795: /*
1796: * re-verify the state of the world by first trying to relock
1797: * the maps. always relock the object.
1798: */
1.7 mrg 1799:
1.149 uebayasi 1800: locked = uvmfault_relock(ufi);
1801: if (locked && amap)
1802: amap_lock(amap);
1.156 uebayasi 1803:
1804: /* might be changed */
1.158 uebayasi 1805: uobj = pg->uobject;
1.156 uebayasi 1806:
1.149 uebayasi 1807: mutex_enter(&uobj->vmobjlock);
1.63 chs 1808:
1.158 uebayasi 1809: /* locked(locked): maps(read), amap(if !null), uobj, pg */
1810: /* locked(!locked): uobj, pg */
1.7 mrg 1811:
1.149 uebayasi 1812: /*
1813: * verify that the page has not be released and re-verify
1814: * that amap slot is still free. if there is a problem,
1815: * we unlock and clean up.
1816: */
1.7 mrg 1817:
1.158 uebayasi 1818: if ((pg->flags & PG_RELEASED) != 0 ||
1819: (locked && amap && amap_lookup(&ufi->entry->aref,
1.149 uebayasi 1820: ufi->orig_rvaddr - ufi->entry->start))) {
1821: if (locked)
1822: uvmfault_unlockall(ufi, amap, NULL, NULL);
1823: locked = false;
1824: }
1.7 mrg 1825:
1.149 uebayasi 1826: /*
1827: * didn't get the lock? release the page and retry.
1828: */
1.7 mrg 1829:
1.149 uebayasi 1830: if (locked == false) {
1831: UVMHIST_LOG(maphist,
1832: " wasn't able to relock after fault: retry",
1833: 0,0,0,0);
1.158 uebayasi 1834: if (pg->flags & PG_WANTED) {
1835: wakeup(pg);
1836: }
1837: if (pg->flags & PG_RELEASED) {
1.149 uebayasi 1838: uvmexp.fltpgrele++;
1.158 uebayasi 1839: uvm_pagefree(pg);
1.157 uebayasi 1840: mutex_exit(&uobj->vmobjlock);
1.139 uebayasi 1841: return ERESTART;
1.7 mrg 1842: }
1.158 uebayasi 1843: pg->flags &= ~(PG_BUSY|PG_WANTED);
1844: UVM_PAGE_OWN(pg, NULL);
1.149 uebayasi 1845: mutex_exit(&uobj->vmobjlock);
1846: return ERESTART;
1847: }
1.7 mrg 1848:
1.149 uebayasi 1849: /*
1.158 uebayasi 1850: * we have the data in pg which is busy and
1.149 uebayasi 1851: * not released. we are holding object lock (so the page
1852: * can't be released on us).
1853: */
1.7 mrg 1854:
1.158 uebayasi 1855: /* locked: maps(read), amap(if !null), uobj, pg */
1.148 uebayasi 1856:
1.156 uebayasi 1857: *ruobj = uobj;
1.158 uebayasi 1858: *ruobjpage = pg;
1.148 uebayasi 1859: return 0;
1860: }
1861:
1862: int
1.163 uebayasi 1863: uvm_fault_lower_direct(
1.148 uebayasi 1864: struct uvm_faultinfo *ufi, struct uvm_faultctx *flt,
1.156 uebayasi 1865: struct uvm_object *uobj, struct vm_page *uobjpage)
1.148 uebayasi 1866: {
1.149 uebayasi 1867: struct vm_page *pg;
1868:
1869: /*
1870: * we are not promoting. if the mapping is COW ensure that we
1871: * don't give more access than we should (e.g. when doing a read
1872: * fault on a COPYONWRITE mapping we want to map the COW page in
1873: * R/O even though the entry protection could be R/W).
1874: *
1875: * set "pg" to the page we want to map in (uobjpage, usually)
1876: */
1.1 mrg 1877:
1.149 uebayasi 1878: uvmexp.flt_obj++;
1879: if (UVM_ET_ISCOPYONWRITE(ufi->entry) ||
1880: UVM_OBJ_NEEDS_WRITEFAULT(uobjpage->uobject))
1881: flt->enter_prot &= ~VM_PROT_WRITE;
1882: pg = uobjpage; /* map in the actual object */
1.7 mrg 1883:
1.149 uebayasi 1884: KASSERT(uobjpage != PGO_DONTCARE);
1.7 mrg 1885:
1.149 uebayasi 1886: /*
1887: * we are faulting directly on the page. be careful
1888: * about writing to loaned pages...
1889: */
1890:
1891: if (uobjpage->loan_count) {
1.163 uebayasi 1892: uvm_fault_lower_direct_loan(ufi, flt, uobj, &pg, &uobjpage);
1.151 uebayasi 1893: }
1894: KASSERT(pg == uobjpage);
1895:
1.163 uebayasi 1896: return uvm_fault_lower_enter(ufi, flt, uobj, NULL, pg, uobjpage);
1.151 uebayasi 1897: }
1898:
1899: static int
1.163 uebayasi 1900: uvm_fault_lower_direct_loan(
1.151 uebayasi 1901: struct uvm_faultinfo *ufi, struct uvm_faultctx *flt,
1.156 uebayasi 1902: struct uvm_object *uobj, struct vm_page **rpg, struct vm_page **ruobjpage)
1.151 uebayasi 1903: {
1.152 uebayasi 1904: struct vm_amap * const amap = ufi->entry->aref.ar_amap;
1905: struct vm_page *pg;
1906: struct vm_page *uobjpage = *ruobjpage;
1.164 mlelstv 1907: UVMHIST_FUNC("uvm_fault_lower_direct_loan"); UVMHIST_CALLED(maphist);
1.152 uebayasi 1908:
1909: if (!flt->cow_now) {
1910: /* read fault: cap the protection at readonly */
1911: /* cap! */
1912: flt->enter_prot = flt->enter_prot & ~VM_PROT_WRITE;
1913: } else {
1914: /* write fault: must break the loan here */
1915:
1916: pg = uvm_loanbreak(uobjpage);
1917: if (pg == NULL) {
1918:
1919: /*
1920: * drop ownership of page, it can't be released
1921: */
1922:
1923: if (uobjpage->flags & PG_WANTED)
1924: wakeup(uobjpage);
1925: uobjpage->flags &= ~(PG_BUSY|PG_WANTED);
1926: UVM_PAGE_OWN(uobjpage, NULL);
1927:
1928: uvmfault_unlockall(ufi, amap, uobj, NULL);
1929: UVMHIST_LOG(maphist,
1930: " out of RAM breaking loan, waiting",
1931: 0,0,0,0);
1932: uvmexp.fltnoram++;
1933: uvm_wait("flt_noram4");
1934: return ERESTART;
1.69 chs 1935: }
1.152 uebayasi 1936: *rpg = pg;
1937: *ruobjpage = pg;
1938: }
1939: return 0;
1.148 uebayasi 1940: }
1941:
1942: int
1.163 uebayasi 1943: uvm_fault_lower_promote(
1.148 uebayasi 1944: struct uvm_faultinfo *ufi, struct uvm_faultctx *flt,
1.156 uebayasi 1945: struct uvm_object *uobj, struct vm_page *uobjpage)
1.148 uebayasi 1946: {
1.149 uebayasi 1947: struct vm_amap * const amap = ufi->entry->aref.ar_amap;
1948: struct vm_anon *anon;
1949: struct vm_page *pg;
1950: int error;
1.164 mlelstv 1951: UVMHIST_FUNC("uvm_fault_lower_promote"); UVMHIST_CALLED(maphist);
1.63 chs 1952:
1.149 uebayasi 1953: /*
1954: * if we are going to promote the data to an anon we
1955: * allocate a blank anon here and plug it into our amap.
1956: */
1.1 mrg 1957: #if DIAGNOSTIC
1.149 uebayasi 1958: if (amap == NULL)
1959: panic("uvm_fault: want to promote data, but no anon");
1.1 mrg 1960: #endif
1.149 uebayasi 1961: error = uvmfault_promote(ufi, NULL, uobjpage,
1962: &anon, &flt->anon_spare);
1963: switch (error) {
1964: case 0:
1965: break;
1966: case ERESTART:
1967: return ERESTART;
1968: default:
1969: return error;
1970: }
1971:
1972: pg = anon->an_page;
1973:
1974: /*
1975: * fill in the data
1976: */
1.105 yamt 1977:
1.149 uebayasi 1978: if (uobjpage != PGO_DONTCARE) {
1979: uvmexp.flt_prcopy++;
1.1 mrg 1980:
1.7 mrg 1981: /*
1.149 uebayasi 1982: * promote to shared amap? make sure all sharing
1983: * procs see it
1.7 mrg 1984: */
1985:
1.149 uebayasi 1986: if ((amap_flags(amap) & AMAP_SHARED) != 0) {
1987: pmap_page_protect(uobjpage, VM_PROT_NONE);
1.7 mrg 1988: /*
1.149 uebayasi 1989: * XXX: PAGE MIGHT BE WIRED!
1.7 mrg 1990: */
1.149 uebayasi 1991: }
1.69 chs 1992:
1.149 uebayasi 1993: /*
1994: * dispose of uobjpage. it can't be PG_RELEASED
1995: * since we still hold the object lock.
1996: * drop handle to uobj as well.
1997: */
1998:
1999: if (uobjpage->flags & PG_WANTED)
2000: /* still have the obj lock */
2001: wakeup(uobjpage);
2002: uobjpage->flags &= ~(PG_BUSY|PG_WANTED);
2003: UVM_PAGE_OWN(uobjpage, NULL);
2004: mutex_exit(&uobj->vmobjlock);
2005: uobj = NULL;
2006:
2007: UVMHIST_LOG(maphist,
2008: " promote uobjpage 0x%x to anon/page 0x%x/0x%x",
2009: uobjpage, anon, pg, 0);
1.63 chs 2010:
1.149 uebayasi 2011: } else {
2012: uvmexp.flt_przero++;
1.7 mrg 2013:
1.149 uebayasi 2014: /*
2015: * Page is zero'd and marked dirty by
2016: * uvmfault_promote().
2017: */
1.52 chs 2018:
1.149 uebayasi 2019: UVMHIST_LOG(maphist," zero fill anon/page 0x%x/0%x",
2020: anon, pg, 0, 0);
2021: }
1.148 uebayasi 2022:
1.163 uebayasi 2023: return uvm_fault_lower_enter(ufi, flt, uobj, anon, pg, uobjpage);
1.148 uebayasi 2024: }
2025:
2026: int
1.163 uebayasi 2027: uvm_fault_lower_enter(
1.148 uebayasi 2028: struct uvm_faultinfo *ufi, struct uvm_faultctx *flt,
2029: struct uvm_object *uobj,
2030: struct vm_anon *anon, struct vm_page *pg, struct vm_page *uobjpage)
2031: {
2032: struct vm_amap * const amap = ufi->entry->aref.ar_amap;
2033: int error;
1.164 mlelstv 2034: UVMHIST_FUNC("uvm_fault_lower_enter"); UVMHIST_CALLED(maphist);
1.7 mrg 2035:
2036: /*
2037: * locked:
1.53 thorpej 2038: * maps(read), amap(if !null), uobj(if !null), uobjpage(if uobj),
2039: * anon(if !null), pg(if anon)
1.7 mrg 2040: *
2041: * note: pg is either the uobjpage or the new page in the new anon
2042: */
1.120 ad 2043: KASSERT(amap == NULL || mutex_owned(&amap->am_l));
1.122 ad 2044: KASSERT(uobj == NULL || mutex_owned(&uobj->vmobjlock));
1.120 ad 2045: KASSERT(uobj == NULL || (uobjpage->flags & PG_BUSY) != 0);
1.122 ad 2046: KASSERT(anon == NULL || mutex_owned(&anon->an_lock));
1.120 ad 2047: KASSERT((pg->flags & PG_BUSY) != 0);
1.7 mrg 2048:
2049: /*
2050: * all resources are present. we can now map it in and free our
2051: * resources.
2052: */
2053:
2054: UVMHIST_LOG(maphist,
1.168 uebayasi 2055: " MAPPING: case2: pm=0x%x, va=0x%x, pg=0x%x, promote=%d",
2056: ufi->orig_map->pmap, ufi->orig_rvaddr, pg, flt->promote);
1.140 uebayasi 2057: KASSERT((flt->access_type & VM_PROT_WRITE) == 0 ||
1.75 chs 2058: (pg->flags & PG_RDONLY) == 0);
1.138 uebayasi 2059: if (pmap_enter(ufi->orig_map->pmap, ufi->orig_rvaddr, VM_PAGE_TO_PHYS(pg),
1.140 uebayasi 2060: pg->flags & PG_RDONLY ? flt->enter_prot & ~VM_PROT_WRITE : flt->enter_prot,
1.146 uebayasi 2061: flt->access_type | PMAP_CANFAIL | (flt->wire_mapping ? PMAP_WIRED : 0)) != 0) {
1.52 chs 2062:
1.46 thorpej 2063: /*
2064: * No need to undo what we did; we can simply think of
2065: * this as the pmap throwing away the mapping information.
2066: *
2067: * We do, however, have to go through the ReFault path,
2068: * as the map may change while we're asleep.
2069: */
1.52 chs 2070:
1.46 thorpej 2071: if (pg->flags & PG_WANTED)
1.69 chs 2072: wakeup(pg);
1.46 thorpej 2073:
1.63 chs 2074: /*
1.46 thorpej 2075: * note that pg can't be PG_RELEASED since we did not drop
2076: * the object lock since the last time we checked.
2077: */
1.111 yamt 2078: KASSERT((pg->flags & PG_RELEASED) == 0);
1.63 chs 2079:
1.46 thorpej 2080: pg->flags &= ~(PG_BUSY|PG_FAKE|PG_WANTED);
2081: UVM_PAGE_OWN(pg, NULL);
1.138 uebayasi 2082: uvmfault_unlockall(ufi, amap, uobj, anon);
1.92 yamt 2083: if (!uvm_reclaimable()) {
1.46 thorpej 2084: UVMHIST_LOG(maphist,
2085: "<- failed. out of VM",0,0,0,0);
2086: /* XXX instrumentation */
1.106 yamt 2087: error = ENOMEM;
1.138 uebayasi 2088: return error;
1.46 thorpej 2089: }
2090: /* XXX instrumentation */
2091: uvm_wait("flt_pmfail2");
1.139 uebayasi 2092: return ERESTART;
1.46 thorpej 2093: }
1.1 mrg 2094:
1.169 ! uebayasi 2095: uvm_fault_lower_done(ufi, flt, uobj, anon, pg);
! 2096:
! 2097: uvmfault_unlockall(ufi, amap, uobj, anon);
! 2098: pmap_update(ufi->orig_map->pmap);
! 2099: UVMHIST_LOG(maphist, "<- done (SUCCESS!)",0,0,0,0);
! 2100: return 0;
1.148 uebayasi 2101: }
2102:
1.169 ! uebayasi 2103: void
1.163 uebayasi 2104: uvm_fault_lower_done(
1.148 uebayasi 2105: struct uvm_faultinfo *ufi, struct uvm_faultctx *flt,
2106: struct uvm_object *uobj, struct vm_anon *anon, struct vm_page *pg)
2107: {
1.164 mlelstv 2108: UVMHIST_FUNC("uvm_fault_lower_done"); UVMHIST_CALLED(maphist);
1.148 uebayasi 2109:
1.122 ad 2110: mutex_enter(&uvm_pageqlock);
1.146 uebayasi 2111: if (flt->wire_paging) {
1.8 chuck 2112: uvm_pagewire(pg);
1.22 chs 2113: if (pg->pqflags & PQ_AOBJ) {
1.29 chs 2114:
2115: /*
2116: * since the now-wired page cannot be paged out,
2117: * release its swap resources for others to use.
2118: * since an aobj page with no swap cannot be PG_CLEAN,
2119: * clear its clean flag now.
2120: */
2121:
1.113 christos 2122: KASSERT(uobj != NULL);
1.29 chs 2123: pg->flags &= ~(PG_CLEAN);
1.22 chs 2124: uao_dropswap(uobj, pg->offset >> PAGE_SHIFT);
2125: }
1.7 mrg 2126: } else {
2127: uvm_pageactivate(pg);
2128: }
1.122 ad 2129: mutex_exit(&uvm_pageqlock);
1.7 mrg 2130: if (pg->flags & PG_WANTED)
1.69 chs 2131: wakeup(pg);
1.7 mrg 2132:
1.63 chs 2133: /*
2134: * note that pg can't be PG_RELEASED since we did not drop the object
1.7 mrg 2135: * lock since the last time we checked.
2136: */
1.111 yamt 2137: KASSERT((pg->flags & PG_RELEASED) == 0);
1.63 chs 2138:
1.7 mrg 2139: pg->flags &= ~(PG_BUSY|PG_FAKE|PG_WANTED);
2140: UVM_PAGE_OWN(pg, NULL);
1.1 mrg 2141: }
2142:
1.110 drochner 2143:
1.1 mrg 2144: /*
2145: * uvm_fault_wire: wire down a range of virtual addresses in a map.
2146: *
1.36 thorpej 2147: * => map may be read-locked by caller, but MUST NOT be write-locked.
2148: * => if map is read-locked, any operations which may cause map to
2149: * be write-locked in uvm_fault() must be taken care of by
2150: * the caller. See uvm_map_pageable().
1.1 mrg 2151: */
2152:
1.7 mrg 2153: int
1.95 thorpej 2154: uvm_fault_wire(struct vm_map *map, vaddr_t start, vaddr_t end,
1.130 uebayasi 2155: vm_prot_t access_type, int maxprot)
1.7 mrg 2156: {
1.12 eeh 2157: vaddr_t va;
1.58 chs 2158: int error;
1.7 mrg 2159:
2160: /*
1.47 chs 2161: * now fault it in a page at a time. if the fault fails then we have
1.63 chs 2162: * to undo what we have done. note that in uvm_fault VM_PROT_NONE
1.47 chs 2163: * is replaced with the max protection if fault_type is VM_FAULT_WIRE.
1.7 mrg 2164: */
1.1 mrg 2165:
1.65 chs 2166: /*
2167: * XXX work around overflowing a vaddr_t. this prevents us from
2168: * wiring the last page in the address space, though.
2169: */
2170: if (start > end) {
2171: return EFAULT;
2172: }
2173:
1.163 uebayasi 2174: for (va = start; va < end; va += PAGE_SIZE) {
1.110 drochner 2175: error = uvm_fault_internal(map, va, access_type,
1.130 uebayasi 2176: (maxprot ? UVM_FAULT_MAXPROT : 0) | UVM_FAULT_WIRE);
1.58 chs 2177: if (error) {
1.7 mrg 2178: if (va != start) {
1.31 thorpej 2179: uvm_fault_unwire(map, start, va);
1.7 mrg 2180: }
1.58 chs 2181: return error;
1.7 mrg 2182: }
2183: }
1.58 chs 2184: return 0;
1.1 mrg 2185: }
2186:
2187: /*
2188: * uvm_fault_unwire(): unwire range of virtual space.
2189: */
2190:
1.7 mrg 2191: void
1.95 thorpej 2192: uvm_fault_unwire(struct vm_map *map, vaddr_t start, vaddr_t end)
1.36 thorpej 2193: {
2194: vm_map_lock_read(map);
2195: uvm_fault_unwire_locked(map, start, end);
2196: vm_map_unlock_read(map);
2197: }
2198:
2199: /*
2200: * uvm_fault_unwire_locked(): the guts of uvm_fault_unwire().
2201: *
2202: * => map must be at least read-locked.
2203: */
2204:
2205: void
1.95 thorpej 2206: uvm_fault_unwire_locked(struct vm_map *map, vaddr_t start, vaddr_t end)
1.7 mrg 2207: {
1.64 chs 2208: struct vm_map_entry *entry;
1.31 thorpej 2209: pmap_t pmap = vm_map_pmap(map);
1.42 thorpej 2210: vaddr_t va;
1.12 eeh 2211: paddr_t pa;
1.42 thorpej 2212: struct vm_page *pg;
1.31 thorpej 2213:
1.52 chs 2214: KASSERT((map->flags & VM_MAP_INTRSAFE) == 0);
1.7 mrg 2215:
2216: /*
2217: * we assume that the area we are unwiring has actually been wired
2218: * in the first place. this means that we should be able to extract
2219: * the PAs from the pmap. we also lock out the page daemon so that
2220: * we can call uvm_pageunwire.
2221: */
1.37 thorpej 2222:
1.122 ad 2223: mutex_enter(&uvm_pageqlock);
1.7 mrg 2224:
1.37 thorpej 2225: /*
2226: * find the beginning map entry for the region.
2227: */
1.74 chs 2228:
1.56 chs 2229: KASSERT(start >= vm_map_min(map) && end <= vm_map_max(map));
1.119 thorpej 2230: if (uvm_map_lookup_entry(map, start, &entry) == false)
1.37 thorpej 2231: panic("uvm_fault_unwire_locked: address not in map");
2232:
1.69 chs 2233: for (va = start; va < end; va += PAGE_SIZE) {
1.119 thorpej 2234: if (pmap_extract(pmap, va, &pa) == false)
1.74 chs 2235: continue;
1.42 thorpej 2236:
2237: /*
1.74 chs 2238: * find the map entry for the current address.
1.42 thorpej 2239: */
1.56 chs 2240:
2241: KASSERT(va >= entry->start);
1.74 chs 2242: while (va >= entry->end) {
1.56 chs 2243: KASSERT(entry->next != &map->header &&
2244: entry->next->start <= entry->end);
1.42 thorpej 2245: entry = entry->next;
2246: }
1.37 thorpej 2247:
1.42 thorpej 2248: /*
2249: * if the entry is no longer wired, tell the pmap.
2250: */
1.74 chs 2251:
1.42 thorpej 2252: if (VM_MAPENT_ISWIRED(entry) == 0)
2253: pmap_unwire(pmap, va);
2254:
2255: pg = PHYS_TO_VM_PAGE(pa);
2256: if (pg)
2257: uvm_pageunwire(pg);
1.7 mrg 2258: }
1.1 mrg 2259:
1.122 ad 2260: mutex_exit(&uvm_pageqlock);
1.1 mrg 2261: }
CVSweb <webmaster@jp.NetBSD.org>