[BACK]Return to uvm_object.c CVS log [TXT][DIR] Up to [cvs.NetBSD.org] / src / sys / uvm

Annotation of src/sys/uvm/uvm_object.c, Revision 1.19

1.19    ! ad          1: /*     $NetBSD: uvm_object.c,v 1.18 2019/12/15 21:11:35 ad Exp $       */
1.1       yamt        2:
                      3: /*
1.17      ad          4:  * Copyright (c) 2006, 2010, 2019 The NetBSD Foundation, Inc.
1.1       yamt        5:  * All rights reserved.
                      6:  *
1.3       rmind       7:  * This code is derived from software contributed to The NetBSD Foundation
                      8:  * by Mindaugas Rasiukevicius.
                      9:  *
1.1       yamt       10:  * Redistribution and use in source and binary forms, with or without
                     11:  * modification, are permitted provided that the following conditions
                     12:  * are met:
                     13:  * 1. Redistributions of source code must retain the above copyright
                     14:  *    notice, this list of conditions and the following disclaimer.
                     15:  * 2. Redistributions in binary form must reproduce the above copyright
                     16:  *    notice, this list of conditions and the following disclaimer in the
                     17:  *    documentation and/or other materials provided with the distribution.
                     18:  *
                     19:  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
                     20:  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
                     21:  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
                     22:  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
                     23:  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
                     24:  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
                     25:  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
                     26:  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
                     27:  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
                     28:  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
                     29:  * POSSIBILITY OF SUCH DAMAGE.
                     30:  */
                     31:
                     32: /*
                     33:  * uvm_object.c: operate with memory objects
                     34:  *
                     35:  * TODO:
                     36:  *  1. Support PG_RELEASED-using objects
                     37:  */
                     38:
                     39: #include <sys/cdefs.h>
1.19    ! ad         40: __KERNEL_RCSID(0, "$NetBSD: uvm_object.c,v 1.18 2019/12/15 21:11:35 ad Exp $");
1.1       yamt       41:
1.13      pooka      42: #ifdef _KERNEL_OPT
1.7       thorpej    43: #include "opt_ddb.h"
1.13      pooka      44: #endif
1.1       yamt       45:
                     46: #include <sys/param.h>
1.8       rmind      47: #include <sys/mutex.h>
                     48: #include <sys/queue.h>
1.1       yamt       49:
                     50: #include <uvm/uvm.h>
1.7       thorpej    51: #include <uvm/uvm_ddb.h>
1.18      ad         52: #include <uvm/uvm_page_array.h>
1.1       yamt       53:
1.8       rmind      54: /* Page count to fetch per single step. */
                     55: #define        FETCH_PAGECOUNT                 16
                     56:
                     57: /*
                     58:  * uvm_obj_init: initialize UVM memory object.
                     59:  */
                     60: void
                     61: uvm_obj_init(struct uvm_object *uo, const struct uvm_pagerops *ops,
                     62:     bool alock, u_int refs)
                     63: {
                     64:
1.15      mrg        65: #if 0 /* notyet */
1.14      mrg        66:        KASSERT(ops);
1.15      mrg        67: #endif
1.8       rmind      68:        if (alock) {
                     69:                /* Allocate and assign a lock. */
                     70:                uo->vmobjlock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE);
                     71:        } else {
                     72:                /* The lock will need to be set via uvm_obj_setlock(). */
                     73:                uo->vmobjlock = NULL;
                     74:        }
                     75:        uo->pgops = ops;
                     76:        LIST_INIT(&uo->uo_ubc);
                     77:        uo->uo_npages = 0;
                     78:        uo->uo_refs = refs;
1.17      ad         79:        radix_tree_init_tree(&uo->uo_pages);
1.8       rmind      80: }
                     81:
                     82: /*
                     83:  * uvm_obj_destroy: destroy UVM memory object.
                     84:  */
                     85: void
                     86: uvm_obj_destroy(struct uvm_object *uo, bool dlock)
                     87: {
                     88:
1.17      ad         89:        KASSERT(radix_tree_empty_tree_p(&uo->uo_pages));
1.8       rmind      90:
1.10      rmind      91:        /* Purge any UBC entries associated with this object. */
                     92:        ubc_purge(uo);
                     93:
1.8       rmind      94:        /* Destroy the lock, if requested. */
                     95:        if (dlock) {
                     96:                mutex_obj_free(uo->vmobjlock);
                     97:        }
1.17      ad         98:        radix_tree_fini_tree(&uo->uo_pages);
1.8       rmind      99: }
                    100:
                    101: /*
                    102:  * uvm_obj_setlock: assign a vmobjlock to the UVM object.
                    103:  *
                    104:  * => Caller is responsible to ensure that UVM objects is not use.
                    105:  * => Only dynamic lock may be previously set.  We drop the reference then.
                    106:  */
                    107: void
                    108: uvm_obj_setlock(struct uvm_object *uo, kmutex_t *lockptr)
                    109: {
                    110:        kmutex_t *olockptr = uo->vmobjlock;
                    111:
                    112:        if (olockptr) {
                    113:                /* Drop the reference on the old lock. */
                    114:                mutex_obj_free(olockptr);
                    115:        }
                    116:        if (lockptr == NULL) {
                    117:                /* If new lock is not passed - allocate default one. */
                    118:                lockptr = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE);
                    119:        }
                    120:        uo->vmobjlock = lockptr;
                    121: }
1.1       yamt      122:
                    123: /*
1.8       rmind     124:  * uvm_obj_wirepages: wire the pages of entire UVM object.
1.1       yamt      125:  *
                    126:  * => NOTE: this function should only be used for types of objects
                    127:  *  where PG_RELEASED flag is never set (aobj objects)
                    128:  * => caller must pass page-aligned start and end values
                    129:  */
                    130: int
1.11      christos  131: uvm_obj_wirepages(struct uvm_object *uobj, off_t start, off_t end,
                    132:     struct pglist *list)
1.1       yamt      133: {
                    134:        int i, npages, error;
                    135:        struct vm_page *pgs[FETCH_PAGECOUNT], *pg = NULL;
                    136:        off_t offset = start, left;
                    137:
                    138:        left = (end - start) >> PAGE_SHIFT;
                    139:
1.8       rmind     140:        mutex_enter(uobj->vmobjlock);
1.1       yamt      141:        while (left) {
                    142:
                    143:                npages = MIN(FETCH_PAGECOUNT, left);
                    144:
                    145:                /* Get the pages */
                    146:                memset(pgs, 0, sizeof(pgs));
                    147:                error = (*uobj->pgops->pgo_get)(uobj, offset, pgs, &npages, 0,
                    148:                        VM_PROT_READ | VM_PROT_WRITE, UVM_ADV_SEQUENTIAL,
                    149:                        PGO_ALLPAGES | PGO_SYNCIO);
                    150:
                    151:                if (error)
                    152:                        goto error;
                    153:
1.8       rmind     154:                mutex_enter(uobj->vmobjlock);
1.1       yamt      155:                for (i = 0; i < npages; i++) {
                    156:
                    157:                        KASSERT(pgs[i] != NULL);
                    158:                        KASSERT(!(pgs[i]->flags & PG_RELEASED));
                    159:
                    160:                        /*
                    161:                         * Loan break
                    162:                         */
                    163:                        if (pgs[i]->loan_count) {
                    164:                                while (pgs[i]->loan_count) {
                    165:                                        pg = uvm_loanbreak(pgs[i]);
                    166:                                        if (!pg) {
1.8       rmind     167:                                                mutex_exit(uobj->vmobjlock);
1.1       yamt      168:                                                uvm_wait("uobjwirepg");
1.8       rmind     169:                                                mutex_enter(uobj->vmobjlock);
1.1       yamt      170:                                                continue;
                    171:                                        }
                    172:                                }
                    173:                                pgs[i] = pg;
                    174:                        }
                    175:
1.16      ad        176:                        if (pgs[i]->flags & PG_AOBJ) {
1.1       yamt      177:                                pgs[i]->flags &= ~(PG_CLEAN);
                    178:                                uao_dropswap(uobj, i);
                    179:                        }
                    180:                }
                    181:
                    182:                /* Wire the pages */
                    183:                for (i = 0; i < npages; i++) {
1.19    ! ad        184:                        uvm_pagelock(pgs[i]);
1.1       yamt      185:                        uvm_pagewire(pgs[i]);
1.19    ! ad        186:                        uvm_pageunlock(pgs[i]);
1.11      christos  187:                        if (list != NULL)
                    188:                                TAILQ_INSERT_TAIL(list, pgs[i], pageq.queue);
1.1       yamt      189:                }
                    190:
                    191:                /* Unbusy the pages */
                    192:                uvm_page_unbusy(pgs, npages);
                    193:
                    194:                left -= npages;
                    195:                offset += npages << PAGE_SHIFT;
                    196:        }
1.8       rmind     197:        mutex_exit(uobj->vmobjlock);
1.1       yamt      198:
                    199:        return 0;
                    200:
                    201: error:
                    202:        /* Unwire the pages which has been wired */
1.8       rmind     203:        uvm_obj_unwirepages(uobj, start, offset);
1.1       yamt      204:
                    205:        return error;
                    206: }
                    207:
                    208: /*
1.8       rmind     209:  * uvm_obj_unwirepages: unwire the pages of entire UVM object.
1.1       yamt      210:  *
                    211:  * => NOTE: this function should only be used for types of objects
                    212:  *  where PG_RELEASED flag is never set
                    213:  * => caller must pass page-aligned start and end values
                    214:  */
                    215: void
1.8       rmind     216: uvm_obj_unwirepages(struct uvm_object *uobj, off_t start, off_t end)
1.1       yamt      217: {
                    218:        struct vm_page *pg;
                    219:        off_t offset;
                    220:
1.8       rmind     221:        mutex_enter(uobj->vmobjlock);
1.1       yamt      222:        for (offset = start; offset < end; offset += PAGE_SIZE) {
                    223:                pg = uvm_pagelookup(uobj, offset);
                    224:
                    225:                KASSERT(pg != NULL);
                    226:                KASSERT(!(pg->flags & PG_RELEASED));
                    227:
1.19    ! ad        228:                uvm_pagelock(pg);
1.1       yamt      229:                uvm_pageunwire(pg);
1.19    ! ad        230:                uvm_pageunlock(pg);
1.1       yamt      231:        }
1.8       rmind     232:        mutex_exit(uobj->vmobjlock);
1.1       yamt      233: }
1.7       thorpej   234:
1.12      pooka     235: #if defined(DDB) || defined(DEBUGPRINT)
1.7       thorpej   236:
                    237: /*
                    238:  * uvm_object_printit: actually prints the object
                    239:  */
                    240: void
                    241: uvm_object_printit(struct uvm_object *uobj, bool full,
                    242:     void (*pr)(const char *, ...))
                    243: {
1.18      ad        244:        struct uvm_page_array a;
1.7       thorpej   245:        struct vm_page *pg;
                    246:        int cnt = 0;
1.18      ad        247:        voff_t off;
1.7       thorpej   248:
                    249:        (*pr)("OBJECT %p: locked=%d, pgops=%p, npages=%d, ",
1.8       rmind     250:            uobj, mutex_owned(uobj->vmobjlock), uobj->pgops, uobj->uo_npages);
1.7       thorpej   251:        if (UVM_OBJ_IS_KERN_OBJECT(uobj))
                    252:                (*pr)("refs=<SYSTEM>\n");
                    253:        else
                    254:                (*pr)("refs=%d\n", uobj->uo_refs);
                    255:
                    256:        if (!full) {
                    257:                return;
                    258:        }
                    259:        (*pr)("  PAGES <pg,offset>:\n  ");
1.18      ad        260:        uvm_page_array_init(&a);
                    261:        off = 0;
                    262:        while ((pg = uvm_page_array_fill_and_peek(&a, uobj, off, 0, 0))
                    263:            != NULL) {
1.7       thorpej   264:                cnt++;
                    265:                (*pr)("<%p,0x%llx> ", pg, (long long)pg->offset);
                    266:                if ((cnt % 3) == 0) {
                    267:                        (*pr)("\n  ");
                    268:                }
1.18      ad        269:                off = pg->offset + PAGE_SIZE;
                    270:                uvm_page_array_advance(&a);
1.7       thorpej   271:        }
                    272:        if ((cnt % 3) != 0) {
                    273:                (*pr)("\n");
                    274:        }
1.18      ad        275:        uvm_page_array_fini(&a);
1.7       thorpej   276: }
                    277:
                    278: #endif /* DDB || DEBUGPRINT */

CVSweb <webmaster@jp.NetBSD.org>