[BACK]Return to uvm_loan.c CVS log [TXT][DIR] Up to [cvs.NetBSD.org] / src / sys / uvm

Annotation of src/sys/uvm/uvm_loan.c, Revision 1.102

1.102   ! ad          1: /*     $NetBSD: uvm_loan.c,v 1.101 2020/05/17 19:38:17 ad Exp $        */
1.1       mrg         2:
                      3: /*
                      4:  * Copyright (c) 1997 Charles D. Cranor and Washington University.
                      5:  * All rights reserved.
                      6:  *
                      7:  * Redistribution and use in source and binary forms, with or without
                      8:  * modification, are permitted provided that the following conditions
                      9:  * are met:
                     10:  * 1. Redistributions of source code must retain the above copyright
                     11:  *    notice, this list of conditions and the following disclaimer.
                     12:  * 2. Redistributions in binary form must reproduce the above copyright
                     13:  *    notice, this list of conditions and the following disclaimer in the
                     14:  *    documentation and/or other materials provided with the distribution.
                     15:  *
                     16:  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
                     17:  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
                     18:  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
                     19:  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
                     20:  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
                     21:  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
                     22:  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
                     23:  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
                     24:  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
                     25:  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1.4       mrg        26:  *
                     27:  * from: Id: uvm_loan.c,v 1.1.6.4 1998/02/06 05:08:43 chs Exp
1.1       mrg        28:  */
                     29:
                     30: /*
                     31:  * uvm_loan.c: page loanout handler
                     32:  */
1.35      lukem      33:
                     34: #include <sys/cdefs.h>
1.102   ! ad         35: __KERNEL_RCSID(0, "$NetBSD: uvm_loan.c,v 1.101 2020/05/17 19:38:17 ad Exp $");
1.1       mrg        36:
                     37: #include <sys/param.h>
                     38: #include <sys/systm.h>
                     39: #include <sys/kernel.h>
                     40: #include <sys/mman.h>
                     41:
                     42: #include <uvm/uvm.h>
                     43:
1.83      matt       44: #ifdef UVMHIST
                     45: UVMHIST_DEFINE(loanhist);
                     46: #endif
                     47:
1.1       mrg        48: /*
1.29      chs        49:  * "loaned" pages are pages which are (read-only, copy-on-write) loaned
1.1       mrg        50:  * from the VM system to other parts of the kernel.   this allows page
                     51:  * copying to be avoided (e.g. you can loan pages from objs/anons to
                     52:  * the mbuf system).
                     53:  *
                     54:  * there are 3 types of loans possible:
                     55:  *  O->K  uvm_object page to wired kernel page (e.g. mbuf data area)
1.16      thorpej    56:  *  A->K  anon page to wired kernel page (e.g. mbuf data area)
1.1       mrg        57:  *  O->A  uvm_object to anon loan (e.g. vnode page to an anon)
                     58:  * note that it possible to have an O page loaned to both an A and K
                     59:  * at the same time.
                     60:  *
                     61:  * loans are tracked by pg->loan_count.  an O->A page will have both
1.89      ad         62:  * a uvm_object and a vm_anon, but PG_ANON will not be set.   this sort
1.1       mrg        63:  * of page is considered "owned" by the uvm_object (not the anon).
                     64:  *
1.16      thorpej    65:  * each loan of a page to the kernel bumps the pg->wire_count.  the
                     66:  * kernel mappings for these pages will be read-only and wired.  since
                     67:  * the page will also be wired, it will not be a candidate for pageout,
                     68:  * and thus will never be pmap_page_protect()'d with VM_PROT_NONE.  a
                     69:  * write fault in the kernel to one of these pages will not cause
                     70:  * copy-on-write.  instead, the page fault is considered fatal.  this
                     71:  * is because the kernel mapping will have no way to look up the
                     72:  * object/anon which the page is owned by.  this is a good side-effect,
                     73:  * since a kernel write to a loaned page is an error.
1.1       mrg        74:  *
1.29      chs        75:  * owners that want to free their pages and discover that they are
1.1       mrg        76:  * loaned out simply "disown" them (the page becomes an orphan).  these
                     77:  * pages should be freed when the last loan is dropped.   in some cases
                     78:  * an anon may "adopt" an orphaned page.
                     79:  *
1.89      ad         80:  * locking: to read pg->loan_count either the owner or pg->interlock
1.1       mrg        81:  * must be locked.   to modify pg->loan_count, both the owner of the page
1.89      ad         82:  * and pg->interlock must be locked.   pg->flags is (as always) locked by
1.1       mrg        83:  * the owner of the page.
                     84:  *
                     85:  * note that locking from the "loaned" side is tricky since the object
                     86:  * getting the loaned page has no reference to the page's owner and thus
                     87:  * the owner could "die" at any time.   in order to prevent the owner
1.89      ad         88:  * from dying pg->interlock should be locked.   this forces us to sometimes
1.1       mrg        89:  * use "try" locking.
                     90:  *
                     91:  * loans are typically broken by the following events:
1.29      chs        92:  *  1. user-level xwrite fault to a loaned page
1.1       mrg        93:  *  2. pageout of clean+inactive O->A loaned page
                     94:  *  3. owner frees page (e.g. pager flush)
                     95:  *
                     96:  * note that loaning a page causes all mappings of the page to become
                     97:  * read-only (via pmap_page_protect).   this could have an unexpected
1.16      thorpej    98:  * effect on normal "wired" pages if one is not careful (XXX).
1.1       mrg        99:  */
                    100:
                    101: /*
                    102:  * local prototypes
                    103:  */
                    104:
1.50      junyoung  105: static int     uvm_loananon(struct uvm_faultinfo *, void ***,
                    106:                             int, struct vm_anon *);
                    107: static int     uvm_loanuobj(struct uvm_faultinfo *, void ***,
                    108:                             int, vaddr_t);
                    109: static int     uvm_loanzero(struct uvm_faultinfo *, void ***, int);
                    110: static void    uvm_unloananon(struct vm_anon **, int);
                    111: static void    uvm_unloanpage(struct vm_page **, int);
1.101     ad        112: static int     uvm_loanpage(struct vm_page **, int, bool);
1.33      jdolecek  113:
1.1       mrg       114:
                    115: /*
                    116:  * inlines
                    117:  */
                    118:
                    119: /*
                    120:  * uvm_loanentry: loan out pages in a map entry (helper fn for uvm_loan())
                    121:  *
                    122:  * => "ufi" is the result of a successful map lookup (meaning that
1.31      chuck     123:  *     on entry the map is locked by the caller)
1.28      chuck     124:  * => we may unlock and then relock the map if needed (for I/O)
1.1       mrg       125:  * => we put our output result in "output"
1.31      chuck     126:  * => we always return with the map unlocked
1.28      chuck     127:  * => possible return values:
                    128:  *     -1 == error, map is unlocked
                    129:  *      0 == map relock error (try again!), map is unlocked
1.31      chuck     130:  *     >0 == number of pages we loaned, map is unlocked
1.55      thorpej   131:  *
                    132:  * NOTE: We can live with this being an inline, because it is only called
                    133:  * from one place.
1.1       mrg       134:  */
                    135:
1.57      perry     136: static inline int
1.54      thorpej   137: uvm_loanentry(struct uvm_faultinfo *ufi, void ***output, int flags)
1.1       mrg       138: {
1.10      chuck     139:        vaddr_t curaddr = ufi->orig_rvaddr;
1.9       eeh       140:        vsize_t togo = ufi->size;
1.6       mrg       141:        struct vm_aref *aref = &ufi->entry->aref;
                    142:        struct uvm_object *uobj = ufi->entry->object.uvm_obj;
                    143:        struct vm_anon *anon;
                    144:        int rv, result = 0;
                    145:
1.52      yamt      146:        UVMHIST_FUNC(__func__); UVMHIST_CALLED(loanhist);
                    147:
1.6       mrg       148:        /*
1.28      chuck     149:         * lock us the rest of the way down (we unlock before return)
1.6       mrg       150:         */
1.80      rmind     151:        if (aref->ar_amap) {
1.95      ad        152:                amap_lock(aref->ar_amap, RW_WRITER);
1.80      rmind     153:        }
1.6       mrg       154:
                    155:        /*
                    156:         * loop until done
                    157:         */
                    158:        while (togo) {
                    159:
                    160:                /*
                    161:                 * find the page we want.   check the anon layer first.
                    162:                 */
                    163:
                    164:                if (aref->ar_amap) {
                    165:                        anon = amap_lookup(aref, curaddr - ufi->entry->start);
                    166:                } else {
                    167:                        anon = NULL;
                    168:                }
                    169:
1.28      chuck     170:                /* locked: map, amap, uobj */
1.6       mrg       171:                if (anon) {
                    172:                        rv = uvm_loananon(ufi, output, flags, anon);
                    173:                } else if (uobj) {
                    174:                        rv = uvm_loanuobj(ufi, output, flags, curaddr);
                    175:                } else if (UVM_ET_ISCOPYONWRITE(ufi->entry)) {
                    176:                        rv = uvm_loanzero(ufi, output, flags);
                    177:                } else {
1.80      rmind     178:                        uvmfault_unlockall(ufi, aref->ar_amap, uobj);
1.34      chs       179:                        rv = -1;
1.6       mrg       180:                }
1.31      chuck     181:                /* locked: if (rv > 0) => map, amap, uobj  [o.w. unlocked] */
1.66      ad        182:                KASSERT(rv > 0 || aref->ar_amap == NULL ||
1.95      ad        183:                    !rw_write_held(aref->ar_amap->am_lock));
1.70      ad        184:                KASSERT(rv > 0 || uobj == NULL ||
1.95      ad        185:                    !rw_write_held(uobj->vmobjlock));
1.6       mrg       186:
                    187:                /* total failure */
1.52      yamt      188:                if (rv < 0) {
1.85      pgoyette  189:                        UVMHIST_LOG(loanhist, "failure %jd", rv, 0,0,0);
1.34      chs       190:                        return (-1);
1.52      yamt      191:                }
1.6       mrg       192:
                    193:                /* relock failed, need to do another lookup */
1.52      yamt      194:                if (rv == 0) {
1.85      pgoyette  195:                        UVMHIST_LOG(loanhist, "relock failure %jd", result
1.52      yamt      196:                            ,0,0,0);
1.34      chs       197:                        return (result);
1.52      yamt      198:                }
1.6       mrg       199:
                    200:                /*
                    201:                 * got it... advance to next page
                    202:                 */
1.34      chs       203:
1.6       mrg       204:                result++;
                    205:                togo -= PAGE_SIZE;
                    206:                curaddr += PAGE_SIZE;
                    207:        }
                    208:
                    209:        /*
1.31      chuck     210:         * unlock what we locked, unlock the maps and return
1.6       mrg       211:         */
1.34      chs       212:
1.80      rmind     213:        if (aref->ar_amap) {
1.28      chuck     214:                amap_unlock(aref->ar_amap);
1.80      rmind     215:        }
1.65      thorpej   216:        uvmfault_unlockmaps(ufi, false);
1.85      pgoyette  217:        UVMHIST_LOG(loanhist, "done %jd", result, 0,0,0);
1.34      chs       218:        return (result);
1.1       mrg       219: }
                    220:
                    221: /*
                    222:  * normal functions
                    223:  */
                    224:
                    225: /*
1.28      chuck     226:  * uvm_loan: loan pages in a map out to anons or to the kernel
1.29      chs       227:  *
1.1       mrg       228:  * => map should be unlocked
                    229:  * => start and len should be multiples of PAGE_SIZE
                    230:  * => result is either an array of anon's or vm_pages (depending on flags)
                    231:  * => flag values: UVM_LOAN_TOANON - loan to anons
                    232:  *                 UVM_LOAN_TOPAGE - loan to wired kernel page
                    233:  *    one and only one of these flags must be set!
1.28      chuck     234:  * => returns 0 (success), or an appropriate error number
1.1       mrg       235:  */
                    236:
1.6       mrg       237: int
1.54      thorpej   238: uvm_loan(struct vm_map *map, vaddr_t start, vsize_t len, void *v, int flags)
1.1       mrg       239: {
1.6       mrg       240:        struct uvm_faultinfo ufi;
1.34      chs       241:        void **result, **output;
1.25      chs       242:        int rv, error;
1.6       mrg       243:
1.52      yamt      244:        UVMHIST_FUNC(__func__); UVMHIST_CALLED(loanhist);
                    245:
1.6       mrg       246:        /*
                    247:         * ensure that one and only one of the flags is set
                    248:         */
                    249:
1.25      chs       250:        KASSERT(((flags & UVM_LOAN_TOANON) == 0) ^
                    251:                ((flags & UVM_LOAN_TOPAGE) == 0));
1.6       mrg       252:
                    253:        /*
1.34      chs       254:         * "output" is a pointer to the current place to put the loaned page.
1.6       mrg       255:         */
                    256:
1.34      chs       257:        result = v;
1.6       mrg       258:        output = &result[0];    /* start at the beginning ... */
                    259:
                    260:        /*
                    261:         * while we've got pages to do
                    262:         */
                    263:
                    264:        while (len > 0) {
                    265:
                    266:                /*
                    267:                 * fill in params for a call to uvmfault_lookup
                    268:                 */
                    269:
                    270:                ufi.orig_map = map;
                    271:                ufi.orig_rvaddr = start;
                    272:                ufi.orig_size = len;
1.29      chs       273:
1.6       mrg       274:                /*
                    275:                 * do the lookup, the only time this will fail is if we hit on
                    276:                 * an unmapped region (an error)
                    277:                 */
                    278:
1.65      thorpej   279:                if (!uvmfault_lookup(&ufi, false)) {
1.25      chs       280:                        error = ENOENT;
1.6       mrg       281:                        goto fail;
1.25      chs       282:                }
1.6       mrg       283:
                    284:                /*
1.28      chuck     285:                 * map now locked.  now do the loanout...
1.6       mrg       286:                 */
1.34      chs       287:
1.6       mrg       288:                rv = uvm_loanentry(&ufi, &output, flags);
1.25      chs       289:                if (rv < 0) {
1.28      chuck     290:                        /* all unlocked due to error */
1.25      chs       291:                        error = EINVAL;
1.6       mrg       292:                        goto fail;
1.25      chs       293:                }
1.6       mrg       294:
                    295:                /*
1.31      chuck     296:                 * done!  the map is unlocked.  advance, if possible.
1.28      chuck     297:                 *
1.50      junyoung  298:                 * XXXCDC: could be recoded to hold the map lock with
                    299:                 *         smarter code (but it only happens on map entry
1.31      chuck     300:                 *         boundaries, so it isn't that bad).
1.6       mrg       301:                 */
1.34      chs       302:
1.28      chuck     303:                if (rv) {
                    304:                        rv <<= PAGE_SHIFT;
                    305:                        len -= rv;
                    306:                        start += rv;
                    307:                }
1.6       mrg       308:        }
1.52      yamt      309:        UVMHIST_LOG(loanhist, "success", 0,0,0,0);
1.25      chs       310:        return 0;
1.1       mrg       311:
                    312: fail:
1.6       mrg       313:        /*
1.34      chs       314:         * failed to complete loans.  drop any loans and return failure code.
1.28      chuck     315:         * map is already unlocked.
1.6       mrg       316:         */
1.34      chs       317:
1.6       mrg       318:        if (output - result) {
1.34      chs       319:                if (flags & UVM_LOAN_TOANON) {
1.6       mrg       320:                        uvm_unloananon((struct vm_anon **)result,
1.34      chs       321:                            output - result);
                    322:                } else {
1.6       mrg       323:                        uvm_unloanpage((struct vm_page **)result,
1.34      chs       324:                            output - result);
                    325:                }
1.6       mrg       326:        }
1.85      pgoyette  327:        UVMHIST_LOG(loanhist, "error %jd", error,0,0,0);
1.26      jdolecek  328:        return (error);
1.1       mrg       329: }
                    330:
                    331: /*
                    332:  * uvm_loananon: loan a page from an anon out
1.29      chs       333:  *
1.28      chuck     334:  * => called with map, amap, uobj locked
1.1       mrg       335:  * => return value:
                    336:  *     -1 = fatal error, everything is unlocked, abort.
                    337:  *      0 = lookup in ufi went stale, everything unlocked, relookup and
                    338:  *             try again
                    339:  *      1 = got it, everything still locked
                    340:  */
                    341:
1.6       mrg       342: int
1.54      thorpej   343: uvm_loananon(struct uvm_faultinfo *ufi, void ***output, int flags,
                    344:     struct vm_anon *anon)
1.1       mrg       345: {
1.6       mrg       346:        struct vm_page *pg;
1.34      chs       347:        int error;
1.1       mrg       348:
1.52      yamt      349:        UVMHIST_FUNC(__func__); UVMHIST_CALLED(loanhist);
                    350:
1.6       mrg       351:        /*
1.28      chuck     352:         * if we are loaning to "another" anon then it is easy, we just
1.6       mrg       353:         * bump the reference count on the current anon and return a
1.28      chuck     354:         * pointer to it (it becomes copy-on-write shared).
1.6       mrg       355:         */
1.34      chs       356:
1.6       mrg       357:        if (flags & UVM_LOAN_TOANON) {
1.95      ad        358:                KASSERT(rw_write_held(anon->an_lock));
1.53      yamt      359:                pg = anon->an_page;
1.89      ad        360:                if (pg && (pg->flags & PG_ANON) != 0 && anon->an_ref == 1) {
1.51      yamt      361:                        if (pg->wire_count > 0) {
1.85      pgoyette  362:                                UVMHIST_LOG(loanhist, "->A wired %#jx",
                    363:                                    (uintptr_t)pg, 0, 0, 0);
1.51      yamt      364:                                uvmfault_unlockall(ufi,
                    365:                                    ufi->entry->aref.ar_amap,
1.80      rmind     366:                                    ufi->entry->object.uvm_obj);
1.51      yamt      367:                                return (-1);
                    368:                        }
1.19      chs       369:                        pmap_page_protect(pg, VM_PROT_READ);
1.34      chs       370:                }
1.6       mrg       371:                anon->an_ref++;
                    372:                **output = anon;
1.34      chs       373:                (*output)++;
1.52      yamt      374:                UVMHIST_LOG(loanhist, "->A done", 0,0,0,0);
1.34      chs       375:                return (1);
1.6       mrg       376:        }
                    377:
                    378:        /*
                    379:         * we are loaning to a kernel-page.   we need to get the page
                    380:         * resident so we can wire it.   uvmfault_anonget will handle
                    381:         * this for us.
                    382:         */
                    383:
1.95      ad        384:        KASSERT(rw_write_held(anon->an_lock));
1.34      chs       385:        error = uvmfault_anonget(ufi, ufi->entry->aref.ar_amap, anon);
1.6       mrg       386:
                    387:        /*
                    388:         * if we were unable to get the anon, then uvmfault_anonget has
                    389:         * unlocked everything and returned an error code.
                    390:         */
1.34      chs       391:
                    392:        if (error) {
1.85      pgoyette  393:                UVMHIST_LOG(loanhist, "error %jd", error,0,0,0);
1.100     ad        394:                KASSERT(error != ENOLCK);
1.6       mrg       395:
                    396:                /* need to refault (i.e. refresh our lookup) ? */
1.34      chs       397:                if (error == ERESTART) {
                    398:                        return (0);
                    399:                }
1.6       mrg       400:
                    401:                /* "try again"?   sleep a bit and retry ... */
1.34      chs       402:                if (error == EAGAIN) {
1.74      pooka     403:                        kpause("loanagain", false, hz/2, NULL);
1.34      chs       404:                        return (0);
1.6       mrg       405:                }
                    406:
                    407:                /* otherwise flag it as an error */
1.34      chs       408:                return (-1);
1.6       mrg       409:        }
                    410:
                    411:        /*
                    412:         * we have the page and its owner locked: do the loan now.
                    413:         */
                    414:
1.53      yamt      415:        pg = anon->an_page;
1.51      yamt      416:        if (pg->wire_count > 0) {
1.85      pgoyette  417:                UVMHIST_LOG(loanhist, "->K wired %#jx", (uintptr_t)pg, 0, 0, 0);
1.51      yamt      418:                KASSERT(pg->uobject == NULL);
1.80      rmind     419:                uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, NULL);
1.51      yamt      420:                return (-1);
                    421:        }
1.34      chs       422:        if (pg->loan_count == 0) {
1.19      chs       423:                pmap_page_protect(pg, VM_PROT_READ);
1.34      chs       424:        }
1.93      ad        425:        uvm_pagelock(pg);
1.6       mrg       426:        pg->loan_count++;
1.86      jdolecek  427:        KASSERT(pg->loan_count > 0);    /* detect wrap-around */
1.63      yamt      428:        uvm_pageactivate(pg);
1.93      ad        429:        uvm_pageunlock(pg);
1.6       mrg       430:        **output = pg;
1.34      chs       431:        (*output)++;
1.6       mrg       432:
1.80      rmind     433:        /* unlock and return success */
1.58      yamt      434:        if (pg->uobject)
1.95      ad        435:                rw_exit(pg->uobject->vmobjlock);
1.52      yamt      436:        UVMHIST_LOG(loanhist, "->K done", 0,0,0,0);
1.34      chs       437:        return (1);
1.1       mrg       438: }
                    439:
                    440: /*
1.45      yamt      441:  * uvm_loanpage: loan out pages to kernel (->K)
1.42      yamt      442:  *
1.51      yamt      443:  * => pages should be object-owned and the object should be locked.
                    444:  * => in the case of error, the object might be unlocked and relocked.
1.101     ad        445:  * => pages will be unbusied (if busied is true).
1.51      yamt      446:  * => fail with EBUSY if meet a wired page.
1.42      yamt      447:  */
1.51      yamt      448: static int
1.101     ad        449: uvm_loanpage(struct vm_page **pgpp, int npages, bool busied)
1.42      yamt      450: {
                    451:        int i;
1.51      yamt      452:        int error = 0;
1.42      yamt      453:
1.52      yamt      454:        UVMHIST_FUNC(__func__); UVMHIST_CALLED(loanhist);
                    455:
1.42      yamt      456:        for (i = 0; i < npages; i++) {
                    457:                struct vm_page *pg = pgpp[i];
                    458:
                    459:                KASSERT(pg->uobject != NULL);
1.51      yamt      460:                KASSERT(pg->uobject == pgpp[0]->uobject);
1.42      yamt      461:                KASSERT(!(pg->flags & (PG_RELEASED|PG_PAGEOUT)));
1.95      ad        462:                KASSERT(rw_write_held(pg->uobject->vmobjlock));
1.101     ad        463:                KASSERT(busied == ((pg->flags & PG_BUSY) != 0));
1.42      yamt      464:
1.51      yamt      465:                if (pg->wire_count > 0) {
1.85      pgoyette  466:                        UVMHIST_LOG(loanhist, "wired %#jx", (uintptr_t)pg,
                    467:                            0, 0, 0);
1.51      yamt      468:                        error = EBUSY;
                    469:                        break;
                    470:                }
1.42      yamt      471:                if (pg->loan_count == 0) {
                    472:                        pmap_page_protect(pg, VM_PROT_READ);
                    473:                }
1.93      ad        474:                uvm_pagelock(pg);
1.42      yamt      475:                pg->loan_count++;
1.87      jdolecek  476:                KASSERT(pg->loan_count > 0);    /* detect wrap-around */
1.63      yamt      477:                uvm_pageactivate(pg);
1.93      ad        478:                uvm_pageunlock(pg);
1.42      yamt      479:        }
1.51      yamt      480:
1.101     ad        481:        if (busied) {
                    482:                uvm_page_unbusy(pgpp, npages);
                    483:        }
1.51      yamt      484:
                    485:        if (error) {
                    486:                /*
                    487:                 * backout what we've done
                    488:                 */
1.95      ad        489:                krwlock_t *slock = pgpp[0]->uobject->vmobjlock;
1.51      yamt      490:
1.95      ad        491:                rw_exit(slock);
1.51      yamt      492:                uvm_unloan(pgpp, i, UVM_LOAN_TOPAGE);
1.95      ad        493:                rw_enter(slock, RW_WRITER);
1.51      yamt      494:        }
                    495:
1.85      pgoyette  496:        UVMHIST_LOG(loanhist, "done %jd", error, 0, 0, 0);
1.51      yamt      497:        return error;
1.42      yamt      498: }
                    499:
                    500: /*
1.45      yamt      501:  * XXX UBC temp limit
                    502:  * number of pages to get at once.
                    503:  * should be <= MAX_READ_AHEAD in genfs_vnops.c
                    504:  */
                    505: #define        UVM_LOAN_GET_CHUNK      16
                    506:
                    507: /*
1.102   ! ad        508:  * uvm_loanuobjchunk: helper for uvm_loanuobjpages()
1.45      yamt      509:  */
1.102   ! ad        510: static int
        !           511: uvm_loanuobjchunk(struct uvm_object *uobj, voff_t pgoff, int orignpages,
        !           512:     struct vm_page **pgpp)
1.45      yamt      513: {
1.102   ! ad        514:        int error, npages;
        !           515:
        !           516:        rw_enter(uobj->vmobjlock, RW_WRITER);
        !           517:  reget:
        !           518:        npages = orignpages;
        !           519:        error = (*uobj->pgops->pgo_get)(uobj, pgoff, pgpp, &npages, 0,
        !           520:            VM_PROT_READ, 0, PGO_SYNCIO);
        !           521:        switch (error) {
        !           522:        case 0:
        !           523:                KASSERT(npages == orignpages);
1.45      yamt      524:
1.102   ! ad        525:                /* check for released pages */
1.95      ad        526:                rw_enter(uobj->vmobjlock, RW_WRITER);
1.102   ! ad        527:                for (int i = 0; i < npages; i++) {
        !           528:                        KASSERT(pgpp[i]->uobject->vmobjlock == uobj->vmobjlock);
        !           529:                        if ((pgpp[i]->flags & PG_RELEASED) != 0) {
1.45      yamt      530:                                /*
                    531:                                 * release pages and try again.
                    532:                                 */
1.102   ! ad        533:                                uvm_page_unbusy(pgpp, npages);
1.45      yamt      534:                                goto reget;
                    535:                        }
1.102   ! ad        536:                }
        !           537:
        !           538:                /* loan out pages.  they will be unbusied whatever happens. */
        !           539:                error = uvm_loanpage(pgpp, npages, true);
        !           540:                rw_exit(uobj->vmobjlock);
        !           541:                return error;
        !           542:
        !           543:        case EAGAIN:
        !           544:                kpause("loanuopg", false, hz/2, NULL);
        !           545:                rw_enter(uobj->vmobjlock, RW_WRITER);
        !           546:                goto reget;
1.45      yamt      547:
1.102   ! ad        548:        default:
        !           549:                if (npages > 0) {
        !           550:                        rw_enter(uobj->vmobjlock, RW_WRITER);
        !           551:                        uvm_page_unbusy(pgpp, npages);
        !           552:                        rw_exit(uobj->vmobjlock);
        !           553:                }
        !           554:                return error;
1.45      yamt      555:        }
1.102   ! ad        556: }
1.45      yamt      557:
1.102   ! ad        558: /*
        !           559:  * uvm_loanuobjpages: loan pages from a uobj out (O->K)
        !           560:  *
        !           561:  * => uobj shouldn't be locked.  (we'll lock it)
        !           562:  * => fail with EBUSY if we meet a wired page.
        !           563:  */
        !           564: int
        !           565: uvm_loanuobjpages(struct uvm_object *uobj, voff_t pgoff, int npages,
        !           566:     struct vm_page **pgpp)
        !           567: {
        !           568:        int ndone, error, chunk;
        !           569:
        !           570:        KASSERT(npages > 0);
1.51      yamt      571:
1.102   ! ad        572:        for (ndone = 0; ndone < npages; ndone += chunk) {
        !           573:                chunk = MIN(UVM_LOAN_GET_CHUNK, npages - ndone);
        !           574:                error = uvm_loanuobjchunk(uobj, pgoff + (ndone << PAGE_SHIFT),
        !           575:                    chunk, pgpp + ndone);
        !           576:                if (error != 0) {
        !           577:                        if (ndone != 0) {
        !           578:                                uvm_unloan(pgpp, ndone, UVM_LOAN_TOPAGE);
        !           579:                        }
        !           580:                        break;
        !           581:                }
        !           582:        }
1.51      yamt      583:
                    584:        return error;
1.45      yamt      585: }
                    586:
                    587: /*
1.1       mrg       588:  * uvm_loanuobj: loan a page from a uobj out
                    589:  *
1.28      chuck     590:  * => called with map, amap, uobj locked
1.1       mrg       591:  * => return value:
                    592:  *     -1 = fatal error, everything is unlocked, abort.
                    593:  *      0 = lookup in ufi went stale, everything unlocked, relookup and
                    594:  *             try again
                    595:  *      1 = got it, everything still locked
                    596:  */
                    597:
1.33      jdolecek  598: static int
1.54      thorpej   599: uvm_loanuobj(struct uvm_faultinfo *ufi, void ***output, int flags, vaddr_t va)
1.1       mrg       600: {
1.6       mrg       601:        struct vm_amap *amap = ufi->entry->aref.ar_amap;
                    602:        struct uvm_object *uobj = ufi->entry->object.uvm_obj;
                    603:        struct vm_page *pg;
1.34      chs       604:        int error, npages;
1.64      thorpej   605:        bool locked;
1.6       mrg       606:
1.52      yamt      607:        UVMHIST_FUNC(__func__); UVMHIST_CALLED(loanhist);
                    608:
1.6       mrg       609:        /*
                    610:         * first we must make sure the page is resident.
                    611:         *
                    612:         * XXXCDC: duplicate code with uvm_fault().
                    613:         */
                    614:
1.77      uebayasi  615:        /* locked: maps(read), amap(if there) */
1.95      ad        616:        rw_enter(uobj->vmobjlock, RW_WRITER);
1.77      uebayasi  617:        /* locked: maps(read), amap(if there), uobj */
                    618:
1.28      chuck     619:        if (uobj->pgops->pgo_get) {     /* try locked pgo_get */
1.6       mrg       620:                npages = 1;
                    621:                pg = NULL;
1.37      enami     622:                error = (*uobj->pgops->pgo_get)(uobj,
                    623:                    va - ufi->entry->start + ufi->entry->offset,
1.6       mrg       624:                    &pg, &npages, 0, VM_PROT_READ, MADV_NORMAL, PGO_LOCKED);
                    625:        } else {
1.34      chs       626:                error = EIO;            /* must have pgo_get op */
1.6       mrg       627:        }
                    628:
                    629:        /*
                    630:         * check the result of the locked pgo_get.  if there is a problem,
                    631:         * then we fail the loan.
                    632:         */
                    633:
1.34      chs       634:        if (error && error != EBUSY) {
1.80      rmind     635:                uvmfault_unlockall(ufi, amap, uobj);
1.34      chs       636:                return (-1);
1.6       mrg       637:        }
                    638:
                    639:        /*
                    640:         * if we need to unlock for I/O, do so now.
                    641:         */
                    642:
1.34      chs       643:        if (error == EBUSY) {
1.80      rmind     644:                uvmfault_unlockall(ufi, amap, NULL);
1.34      chs       645:
                    646:                /* locked: uobj */
1.6       mrg       647:                npages = 1;
1.37      enami     648:                error = (*uobj->pgops->pgo_get)(uobj,
                    649:                    va - ufi->entry->start + ufi->entry->offset,
1.30      chs       650:                    &pg, &npages, 0, VM_PROT_READ, MADV_NORMAL, PGO_SYNCIO);
1.6       mrg       651:                /* locked: <nothing> */
1.29      chs       652:
1.34      chs       653:                if (error) {
1.43      yamt      654:                        if (error == EAGAIN) {
1.74      pooka     655:                                kpause("fltagain2", false, hz/2, NULL);
1.34      chs       656:                                return (0);
1.29      chs       657:                        }
1.34      chs       658:                        return (-1);
1.6       mrg       659:                }
                    660:
                    661:                /*
                    662:                 * pgo_get was a success.   attempt to relock everything.
                    663:                 */
                    664:
                    665:                locked = uvmfault_relock(ufi);
                    666:                if (locked && amap)
1.95      ad        667:                        amap_lock(amap, RW_WRITER);
1.58      yamt      668:                uobj = pg->uobject;
1.95      ad        669:                rw_enter(uobj->vmobjlock, RW_WRITER);
1.6       mrg       670:
                    671:                /*
                    672:                 * verify that the page has not be released and re-verify
                    673:                 * that amap slot is still free.   if there is a problem we
                    674:                 * drop our lock (thus force a lookup refresh/retry).
                    675:                 */
1.29      chs       676:
1.6       mrg       677:                if ((pg->flags & PG_RELEASED) != 0 ||
                    678:                    (locked && amap && amap_lookup(&ufi->entry->aref,
1.10      chuck     679:                    ufi->orig_rvaddr - ufi->entry->start))) {
1.6       mrg       680:                        if (locked)
1.80      rmind     681:                                uvmfault_unlockall(ufi, amap, NULL);
1.65      thorpej   682:                        locked = false;
1.29      chs       683:                }
1.6       mrg       684:
                    685:                /*
1.101     ad        686:                 * unbusy the page.
1.6       mrg       687:                 */
                    688:
1.101     ad        689:                if ((pg->flags & PG_RELEASED) == 0) {
1.93      ad        690:                        uvm_pagelock(pg);
1.98      ad        691:                        uvm_pagewakeup(pg);
1.93      ad        692:                        uvm_pageunlock(pg);
1.98      ad        693:                        pg->flags &= ~PG_BUSY;
                    694:                        UVM_PAGE_OWN(pg, NULL);
1.101     ad        695:                }
                    696:
                    697:                /*
                    698:                 * didn't get the lock?   release the page and retry.
                    699:                 */
                    700:
                    701:                if (locked == false) {
                    702:                        if (pg->flags & PG_RELEASED) {
                    703:                                uvm_pagefree(pg);
                    704:                        }
1.95      ad        705:                        rw_exit(uobj->vmobjlock);
1.6       mrg       706:                        return (0);
                    707:                }
                    708:        }
                    709:
1.101     ad        710:        /*
                    711:         * for tmpfs vnodes, the page will be from a UAO rather than
                    712:         * the vnode.  just check the locks match.
                    713:         */
                    714:
                    715:        KASSERT(uobj->vmobjlock == pg->uobject->vmobjlock);
1.58      yamt      716:
1.6       mrg       717:        /*
1.101     ad        718:         * at this point we have the page we want ("pg") and we have
                    719:         * all data structures locked.  do the loanout.  page can not
                    720:         * be PG_RELEASED (we caught this above).
1.6       mrg       721:         */
                    722:
1.34      chs       723:        if ((flags & UVM_LOAN_TOANON) == 0) {
1.101     ad        724:                if (uvm_loanpage(&pg, 1, false)) {
1.80      rmind     725:                        uvmfault_unlockall(ufi, amap, uobj);
1.51      yamt      726:                        return (-1);
                    727:                }
1.95      ad        728:                rw_exit(uobj->vmobjlock);
1.34      chs       729:                **output = pg;
                    730:                (*output)++;
                    731:                return (1);
1.6       mrg       732:        }
                    733:
1.80      rmind     734: #ifdef notdef
1.6       mrg       735:        /*
                    736:         * must be a loan to an anon.   check to see if there is already
                    737:         * an anon associated with this page.  if so, then just return
1.29      chs       738:         * a reference to this object.   the page should already be
1.6       mrg       739:         * mapped read-only because it is already on loan.
                    740:         */
                    741:
                    742:        if (pg->uanon) {
1.81      rmind     743:                /* XXX: locking */
1.6       mrg       744:                anon = pg->uanon;
                    745:                anon->an_ref++;
1.97      ad        746:                uvm_pagelock(pg);
1.98      ad        747:                uvm_pagewakeup(pg);
1.97      ad        748:                uvm_pageunlock(pg);
1.98      ad        749:                pg->flags &= ~PG_BUSY;
                    750:                UVM_PAGE_OWN(pg, NULL);
1.95      ad        751:                rw_exit(uobj->vmobjlock);
1.34      chs       752:                **output = anon;
                    753:                (*output)++;
                    754:                return (1);
1.6       mrg       755:        }
1.29      chs       756:
1.6       mrg       757:        /*
                    758:         * need to allocate a new anon
                    759:         */
                    760:
                    761:        anon = uvm_analloc();
1.34      chs       762:        if (anon == NULL) {
1.51      yamt      763:                goto fail;
1.6       mrg       764:        }
1.51      yamt      765:        if (pg->wire_count > 0) {
1.85      pgoyette  766:                UVMHIST_LOG(loanhist, "wired %#jx", (uintptr_t)pg, 0, 0, 0);
1.51      yamt      767:                goto fail;
                    768:        }
1.34      chs       769:        if (pg->loan_count == 0) {
1.19      chs       770:                pmap_page_protect(pg, VM_PROT_READ);
1.34      chs       771:        }
1.93      ad        772:        uvm_pagelock(pg);
1.6       mrg       773:        pg->loan_count++;
1.87      jdolecek  774:        KASSERT(pg->loan_count > 0);    /* detect wrap-around */
1.81      rmind     775:        pg->uanon = anon;
                    776:        anon->an_page = pg;
                    777:        anon->an_lock = /* TODO: share amap lock */
1.6       mrg       778:        uvm_pageactivate(pg);
1.98      ad        779:        uvm_pagewakeup(pg);
1.93      ad        780:        uvm_pageunlock(pg);
1.98      ad        781:        pg->flags &= ~PG_BUSY;
                    782:        UVM_PAGE_OWN(pg, NULL);
1.95      ad        783:        rw_exit(uobj->vmobjlock);
                    784:        rw_exit(&anon->an_lock);
1.34      chs       785:        **output = anon;
                    786:        (*output)++;
                    787:        return (1);
1.51      yamt      788:
                    789: fail:
1.52      yamt      790:        UVMHIST_LOG(loanhist, "fail", 0,0,0,0);
1.51      yamt      791:        /*
                    792:         * unlock everything and bail out.
                    793:         */
1.97      ad        794:        uvm_pagelock(pg);
1.98      ad        795:        uvm_pagewakeup(pg);
1.97      ad        796:        uvm_pageunlock(pg);
1.98      ad        797:        pg->flags &= ~PG_BUSY;
                    798:        UVM_PAGE_OWN(pg, NULL);
1.51      yamt      799:        uvmfault_unlockall(ufi, amap, uobj, NULL);
1.81      rmind     800:        if (anon) {
                    801:                anon->an_ref--;
1.99      ad        802:                uvm_anfree(anon);
1.81      rmind     803:        }
1.80      rmind     804: #endif /* notdef */
1.51      yamt      805:        return (-1);
1.1       mrg       806: }
                    807:
                    808: /*
1.40      thorpej   809:  * uvm_loanzero: loan a zero-fill page out
1.1       mrg       810:  *
1.28      chuck     811:  * => called with map, amap, uobj locked
1.1       mrg       812:  * => return value:
                    813:  *     -1 = fatal error, everything is unlocked, abort.
                    814:  *      0 = lookup in ufi went stale, everything unlocked, relookup and
                    815:  *             try again
                    816:  *      1 = got it, everything still locked
                    817:  */
                    818:
1.40      thorpej   819: static struct uvm_object uvm_loanzero_object;
1.95      ad        820: static krwlock_t uvm_loanzero_lock __cacheline_aligned;
1.40      thorpej   821:
1.33      jdolecek  822: static int
1.54      thorpej   823: uvm_loanzero(struct uvm_faultinfo *ufi, void ***output, int flags)
1.1       mrg       824: {
1.6       mrg       825:        struct vm_page *pg;
1.34      chs       826:        struct vm_amap *amap = ufi->entry->aref.ar_amap;
1.1       mrg       827:
1.52      yamt      828:        UVMHIST_FUNC(__func__); UVMHIST_CALLED(loanhist);
1.44      yamt      829: again:
1.95      ad        830:        rw_enter(uvm_loanzero_object.vmobjlock, RW_WRITER);
1.40      thorpej   831:
                    832:        /*
                    833:         * first, get ahold of our single zero page.
                    834:         */
                    835:
1.91      ad        836:        pg = uvm_pagelookup(&uvm_loanzero_object, 0);
                    837:        if (__predict_false(pg == NULL)) {
1.40      thorpej   838:                while ((pg = uvm_pagealloc(&uvm_loanzero_object, 0, NULL,
                    839:                                           UVM_PGA_ZERO)) == NULL) {
1.95      ad        840:                        rw_exit(uvm_loanzero_object.vmobjlock);
1.80      rmind     841:                        uvmfault_unlockall(ufi, amap, NULL);
1.40      thorpej   842:                        uvm_wait("loanzero");
1.34      chs       843:                        if (!uvmfault_relock(ufi)) {
                    844:                                return (0);
                    845:                        }
                    846:                        if (amap) {
1.95      ad        847:                                amap_lock(amap, RW_WRITER);
1.34      chs       848:                        }
1.44      yamt      849:                        goto again;
1.6       mrg       850:                }
1.29      chs       851:
1.40      thorpej   852:                /* got a zero'd page. */
1.98      ad        853:                pg->flags &= ~(PG_BUSY|PG_FAKE);
1.40      thorpej   854:                pg->flags |= PG_RDONLY;
1.93      ad        855:                uvm_pagelock(pg);
1.44      yamt      856:                uvm_pageactivate(pg);
1.98      ad        857:                uvm_pagewakeup(pg);
1.93      ad        858:                uvm_pageunlock(pg);
1.98      ad        859:                UVM_PAGE_OWN(pg, NULL);
1.40      thorpej   860:        }
                    861:
                    862:        if ((flags & UVM_LOAN_TOANON) == 0) {   /* loaning to kernel-page */
1.89      ad        863:                mutex_enter(&pg->interlock);
1.40      thorpej   864:                pg->loan_count++;
1.87      jdolecek  865:                KASSERT(pg->loan_count > 0);    /* detect wrap-around */
1.89      ad        866:                mutex_exit(&pg->interlock);
1.95      ad        867:                rw_exit(uvm_loanzero_object.vmobjlock);
1.6       mrg       868:                **output = pg;
1.34      chs       869:                (*output)++;
                    870:                return (1);
1.6       mrg       871:        }
                    872:
1.80      rmind     873: #ifdef notdef
1.40      thorpej   874:        /*
                    875:         * loaning to an anon.  check to see if there is already an anon
                    876:         * associated with this page.  if so, then just return a reference
                    877:         * to this object.
                    878:         */
1.23      thorpej   879:
1.40      thorpej   880:        if (pg->uanon) {
                    881:                anon = pg->uanon;
1.95      ad        882:                rw_enter(&anon->an_lock, RW_WRITER);
1.40      thorpej   883:                anon->an_ref++;
1.95      ad        884:                rw_exit(&anon->an_lock);
                    885:                rw_exit(uvm_loanzero_object.vmobjlock);
1.40      thorpej   886:                **output = anon;
                    887:                (*output)++;
                    888:                return (1);
                    889:        }
1.23      thorpej   890:
1.40      thorpej   891:        /*
                    892:         * need to allocate a new anon
                    893:         */
1.34      chs       894:
1.40      thorpej   895:        anon = uvm_analloc();
                    896:        if (anon == NULL) {
                    897:                /* out of swap causes us to fail */
1.95      ad        898:                rw_exit(uvm_loanzero_object.vmobjlock);
1.58      yamt      899:                uvmfault_unlockall(ufi, amap, NULL, NULL);
1.40      thorpej   900:                return (-1);
1.6       mrg       901:        }
1.53      yamt      902:        anon->an_page = pg;
1.40      thorpej   903:        pg->uanon = anon;
1.93      ad        904:        uvm_pagelock(pg);
1.40      thorpej   905:        pg->loan_count++;
1.87      jdolecek  906:        KASSERT(pg->loan_count > 0);    /* detect wrap-around */
1.6       mrg       907:        uvm_pageactivate(pg);
1.93      ad        908:        uvm_pageunlock(pg);
1.95      ad        909:        rw_exit(&anon->an_lock);
                    910:        rw_exit(uvm_loanzero_object.vmobjlock);
1.6       mrg       911:        **output = anon;
1.34      chs       912:        (*output)++;
                    913:        return (1);
1.80      rmind     914: #else
                    915:        return (-1);
                    916: #endif
1.1       mrg       917: }
                    918:
                    919:
                    920: /*
                    921:  * uvm_unloananon: kill loans on anons (basically a normal ref drop)
                    922:  *
                    923:  * => we expect all our resources to be unlocked
                    924:  */
                    925:
1.33      jdolecek  926: static void
1.54      thorpej   927: uvm_unloananon(struct vm_anon **aloans, int nanons)
1.1       mrg       928: {
1.80      rmind     929: #ifdef notdef
1.81      rmind     930:        struct vm_anon *anon, *to_free = NULL;
1.1       mrg       931:
1.81      rmind     932:        /* TODO: locking */
1.95      ad        933:        amap_lock(amap, RW_WRITER);
1.6       mrg       934:        while (nanons-- > 0) {
                    935:                anon = *aloans++;
1.81      rmind     936:                if (--anon->an_ref == 0) {
1.99      ad        937:                        uvm_anfree(anon);
1.6       mrg       938:                }
                    939:        }
1.99      ad        940:        amap_unlock(amap);
1.80      rmind     941: #endif /* notdef */
1.1       mrg       942: }
                    943:
                    944: /*
                    945:  * uvm_unloanpage: kill loans on pages loaned out to the kernel
                    946:  *
                    947:  * => we expect all our resources to be unlocked
                    948:  */
                    949:
1.33      jdolecek  950: static void
1.54      thorpej   951: uvm_unloanpage(struct vm_page **ploans, int npages)
1.1       mrg       952: {
1.6       mrg       953:        struct vm_page *pg;
1.95      ad        954:        krwlock_t *slock;
1.1       mrg       955:
1.6       mrg       956:        while (npages-- > 0) {
                    957:                pg = *ploans++;
1.1       mrg       958:
1.6       mrg       959:                /*
1.36      chs       960:                 * do a little dance to acquire the object or anon lock
                    961:                 * as appropriate.  we are locking in the wrong order,
                    962:                 * so we have to do a try-lock here.
                    963:                 */
                    964:
1.89      ad        965:                mutex_enter(&pg->interlock);
1.36      chs       966:                slock = NULL;
                    967:                while (pg->uobject != NULL || pg->uanon != NULL) {
                    968:                        if (pg->uobject != NULL) {
1.80      rmind     969:                                slock = pg->uobject->vmobjlock;
1.36      chs       970:                        } else {
1.80      rmind     971:                                slock = pg->uanon->an_lock;
1.36      chs       972:                        }
1.95      ad        973:                        if (rw_tryenter(slock, RW_WRITER)) {
1.36      chs       974:                                break;
                    975:                        }
1.70      ad        976:                        /* XXX Better than yielding but inadequate. */
1.89      ad        977:                        kpause("livelock", false, 1, &pg->interlock);
1.96      ad        978:                        slock = NULL;
1.36      chs       979:                }
                    980:
                    981:                /*
                    982:                 * drop our loan.  if page is owned by an anon but
1.89      ad        983:                 * PG_ANON is not set, the page was loaned to the anon
1.36      chs       984:                 * from an object which dropped ownership, so resolve
                    985:                 * this by turning the anon's loan into real ownership
1.89      ad        986:                 * (ie. decrement loan_count again and set PG_ANON).
1.36      chs       987:                 * after all this, if there are no loans left, put the
                    988:                 * page back a paging queue (if the page is owned by
                    989:                 * an anon) or free it (if the page is now unowned).
1.6       mrg       990:                 */
1.1       mrg       991:
1.34      chs       992:                KASSERT(pg->loan_count > 0);
                    993:                pg->loan_count--;
1.36      chs       994:                if (pg->uobject == NULL && pg->uanon != NULL &&
1.89      ad        995:                    (pg->flags & PG_ANON) == 0) {
1.36      chs       996:                        KASSERT(pg->loan_count > 0);
                    997:                        pg->loan_count--;
1.89      ad        998:                        pg->flags |= PG_ANON;
1.36      chs       999:                }
1.89      ad       1000:                mutex_exit(&pg->interlock);
1.63      yamt     1001:                if (pg->loan_count == 0 && pg->uobject == NULL &&
                   1002:                    pg->uanon == NULL) {
                   1003:                        KASSERT((pg->flags & PG_BUSY) == 0);
                   1004:                        uvm_pagefree(pg);
1.36      chs      1005:                }
1.96      ad       1006:                if (slock != NULL) {
                   1007:                        rw_exit(slock);
                   1008:                }
1.6       mrg      1009:        }
1.1       mrg      1010: }
                   1011:
1.33      jdolecek 1012: /*
1.34      chs      1013:  * uvm_unloan: kill loans on pages or anons.
1.33      jdolecek 1014:  */
1.34      chs      1015:
1.33      jdolecek 1016: void
1.34      chs      1017: uvm_unloan(void *v, int npages, int flags)
1.33      jdolecek 1018: {
1.34      chs      1019:        if (flags & UVM_LOAN_TOANON) {
                   1020:                uvm_unloananon(v, npages);
                   1021:        } else {
                   1022:                uvm_unloanpage(v, npages);
                   1023:        }
1.40      thorpej  1024: }
                   1025:
                   1026: /*
1.41      thorpej  1027:  * Minimal pager for uvm_loanzero_object.  We need to provide a "put"
                   1028:  * method, because the page can end up on a paging queue, and the
                   1029:  * page daemon will want to call pgo_put when it encounters the page
                   1030:  * on the inactive list.
                   1031:  */
                   1032:
                   1033: static int
1.62      yamt     1034: ulz_put(struct uvm_object *uobj, voff_t start, voff_t stop, int flags)
1.41      thorpej  1035: {
                   1036:        struct vm_page *pg;
                   1037:
                   1038:        KDASSERT(uobj == &uvm_loanzero_object);
                   1039:
                   1040:        /*
                   1041:         * Don't need to do any work here if we're not freeing pages.
                   1042:         */
                   1043:
                   1044:        if ((flags & PGO_FREE) == 0) {
1.95      ad       1045:                rw_exit(uobj->vmobjlock);
1.41      thorpej  1046:                return 0;
                   1047:        }
                   1048:
                   1049:        /*
                   1050:         * we don't actually want to ever free the uvm_loanzero_page, so
                   1051:         * just reactivate or dequeue it.
                   1052:         */
                   1053:
1.91      ad       1054:        pg = uvm_pagelookup(uobj, 0);
1.41      thorpej  1055:        KASSERT(pg != NULL);
                   1056:
1.93      ad       1057:        uvm_pagelock(pg);
1.91      ad       1058:        if (pg->uanon) {
1.41      thorpej  1059:                uvm_pageactivate(pg);
1.91      ad       1060:        } else {
1.41      thorpej  1061:                uvm_pagedequeue(pg);
1.89      ad       1062:        }
1.93      ad       1063:        uvm_pageunlock(pg);
1.41      thorpej  1064:
1.95      ad       1065:        rw_exit(uobj->vmobjlock);
1.41      thorpej  1066:        return 0;
                   1067: }
                   1068:
1.69      yamt     1069: static const struct uvm_pagerops ulz_pager = {
1.68      yamt     1070:        .pgo_put = ulz_put,
1.41      thorpej  1071: };
                   1072:
                   1073: /*
1.40      thorpej  1074:  * uvm_loan_init(): initialize the uvm_loan() facility.
                   1075:  */
                   1076:
                   1077: void
                   1078: uvm_loan_init(void)
                   1079: {
                   1080:
1.95      ad       1081:        rw_init(&uvm_loanzero_lock);
1.80      rmind    1082:        uvm_obj_init(&uvm_loanzero_object, &ulz_pager, false, 0);
                   1083:        uvm_obj_setlock(&uvm_loanzero_object, &uvm_loanzero_lock);
1.52      yamt     1084:
                   1085:        UVMHIST_INIT(loanhist, 300);
1.42      yamt     1086: }
                   1087:
                   1088: /*
                   1089:  * uvm_loanbreak: break loan on a uobj page
                   1090:  *
                   1091:  * => called with uobj locked
1.101     ad       1092:  * => the page may be busy; if it's busy, it will be unbusied
1.42      yamt     1093:  * => return value:
                   1094:  *     newly allocated page if succeeded
                   1095:  */
                   1096: struct vm_page *
                   1097: uvm_loanbreak(struct vm_page *uobjpage)
                   1098: {
                   1099:        struct vm_page *pg;
1.84      riastrad 1100:        struct uvm_object *uobj __diagused = uobjpage->uobject;
1.42      yamt     1101:
                   1102:        KASSERT(uobj != NULL);
1.95      ad       1103:        KASSERT(rw_write_held(uobj->vmobjlock));
1.42      yamt     1104:
                   1105:        /* alloc new un-owned page */
                   1106:        pg = uvm_pagealloc(NULL, 0, NULL, 0);
                   1107:        if (pg == NULL)
                   1108:                return NULL;
                   1109:
                   1110:        /*
                   1111:         * copy the data from the old page to the new
1.61      yamt     1112:         * one and clear the fake flags on the new page (keep it busy).
                   1113:         * force a reload of the old page by clearing it from all
                   1114:         * pmaps.
1.92      ad       1115:         * then rename the pages.
1.42      yamt     1116:         */
                   1117:
                   1118:        uvm_pagecopy(uobjpage, pg);     /* old -> new */
1.61      yamt     1119:        pg->flags &= ~PG_FAKE;
1.94      ad       1120:        KASSERT(uvm_pagegetdirty(pg) == UVM_PAGE_STATUS_DIRTY);
1.42      yamt     1121:        pmap_page_protect(uobjpage, VM_PROT_NONE);
                   1122:        /* uobj still locked */
1.101     ad       1123:        if ((uobjpage->flags & PG_BUSY) != 0) {
                   1124:                uobjpage->flags &= ~PG_BUSY;
                   1125:                UVM_PAGE_OWN(uobjpage, NULL);
                   1126:        }
1.42      yamt     1127:
1.48      yamt     1128:        /*
1.42      yamt     1129:         * if the page is no longer referenced by
                   1130:         * an anon (i.e. we are breaking an O->K
                   1131:         * loan), then remove it from any pageq's.
                   1132:         */
1.93      ad       1133:
                   1134:        uvm_pagelock2(uobjpage, pg);
1.98      ad       1135:        uvm_pagewakeup(uobjpage);
1.42      yamt     1136:        if (uobjpage->uanon == NULL)
                   1137:                uvm_pagedequeue(uobjpage);
                   1138:
                   1139:        /*
1.92      ad       1140:         * replace uobjpage with new page.
                   1141:         */
                   1142:
                   1143:        uvm_pagereplace(uobjpage, pg);
                   1144:
                   1145:        /*
1.42      yamt     1146:         * at this point we have absolutely no
                   1147:         * control over uobjpage
                   1148:         */
                   1149:
                   1150:        uvm_pageactivate(pg);
1.93      ad       1151:        uvm_pageunlock2(uobjpage, pg);
1.42      yamt     1152:
                   1153:        /*
                   1154:         * done!  loan is broken and "pg" is
                   1155:         * PG_BUSY.   it can now replace uobjpage.
                   1156:         */
                   1157:
                   1158:        return pg;
1.33      jdolecek 1159: }
1.75      uebayasi 1160:
                   1161: int
1.76      uebayasi 1162: uvm_loanbreak_anon(struct vm_anon *anon, struct uvm_object *uobj)
1.75      uebayasi 1163: {
1.92      ad       1164:        struct vm_page *newpg, *oldpg;
1.94      ad       1165:        unsigned oldstatus;
1.75      uebayasi 1166:
1.95      ad       1167:        KASSERT(rw_write_held(anon->an_lock));
                   1168:        KASSERT(uobj == NULL || rw_write_held(uobj->vmobjlock));
1.94      ad       1169:        KASSERT(anon->an_page->loan_count > 0);
1.77      uebayasi 1170:
1.75      uebayasi 1171:        /* get new un-owned replacement page */
1.92      ad       1172:        newpg = uvm_pagealloc(NULL, 0, NULL, 0);
                   1173:        if (newpg == NULL) {
1.75      uebayasi 1174:                return ENOMEM;
                   1175:        }
                   1176:
1.92      ad       1177:        oldpg = anon->an_page;
1.93      ad       1178:        /* copy old -> new */
                   1179:        uvm_pagecopy(oldpg, newpg);
1.94      ad       1180:        KASSERT(uvm_pagegetdirty(newpg) == UVM_PAGE_STATUS_DIRTY);
1.93      ad       1181:
                   1182:        /* force reload */
                   1183:        pmap_page_protect(oldpg, VM_PROT_NONE);
1.94      ad       1184:        oldstatus = uvm_pagegetdirty(anon->an_page);
1.93      ad       1185:
                   1186:        uvm_pagelock2(oldpg, newpg);
1.92      ad       1187:        if (uobj == NULL) {
                   1188:                /*
                   1189:                 * we were the lender (A->K); need to remove the page from
                   1190:                 * pageq's.
1.94      ad       1191:                 *
                   1192:                 * PG_ANON is updated by the caller.
1.92      ad       1193:                 */
1.94      ad       1194:                KASSERT((oldpg->flags & PG_ANON) != 0);
                   1195:                oldpg->flags &= ~PG_ANON;
1.92      ad       1196:                uvm_pagedequeue(oldpg);
                   1197:        }
                   1198:        oldpg->uanon = NULL;
1.75      uebayasi 1199:
1.76      uebayasi 1200:        if (uobj) {
1.75      uebayasi 1201:                /* if we were receiver of loan */
1.94      ad       1202:                KASSERT((oldpg->pqflags & PG_ANON) == 0);
1.92      ad       1203:                oldpg->loan_count--;
1.75      uebayasi 1204:        }
                   1205:
                   1206:        /* install new page in anon */
1.92      ad       1207:        anon->an_page = newpg;
                   1208:        newpg->uanon = anon;
                   1209:        newpg->flags |= PG_ANON;
                   1210:
                   1211:        uvm_pageactivate(newpg);
1.93      ad       1212:        uvm_pageunlock2(oldpg, newpg);
1.75      uebayasi 1213:
1.92      ad       1214:        newpg->flags &= ~(PG_BUSY|PG_FAKE);
                   1215:        UVM_PAGE_OWN(newpg, NULL);
1.75      uebayasi 1216:
1.89      ad       1217:        if (uobj) {
1.95      ad       1218:                rw_exit(uobj->vmobjlock);
1.89      ad       1219:        }
                   1220:
1.75      uebayasi 1221:        /* done! */
1.94      ad       1222:        kpreempt_disable();
                   1223:        if (uobj != NULL) {
                   1224:                CPU_COUNT(CPU_COUNT_ANONPAGES, 1);
                   1225:        } else {
                   1226:                CPU_COUNT(CPU_COUNT_ANONUNKNOWN + oldstatus, -1);
                   1227:        }
                   1228:        CPU_COUNT(CPU_COUNT_ANONDIRTY, 1);
                   1229:        kpreempt_enable();
1.75      uebayasi 1230:        return 0;
                   1231: }

CVSweb <webmaster@jp.NetBSD.org>