[BACK]Return to uvm_vnode.c CVS log [TXT][DIR] Up to [cvs.NetBSD.org] / src / sys / uvm

Annotation of src/sys/uvm/uvm_vnode.c, Revision 1.69.10.1

1.69.10.1! elad        1: /*     $NetBSD: uvm_vnode.c,v 1.69 2005/12/11 12:25:29 christos Exp $  */
1.1       mrg         2:
                      3: /*
                      4:  * Copyright (c) 1997 Charles D. Cranor and Washington University.
                      5:  * Copyright (c) 1991, 1993
1.49      chs         6:  *      The Regents of the University of California.
1.1       mrg         7:  * Copyright (c) 1990 University of Utah.
                      8:  *
                      9:  * All rights reserved.
                     10:  *
                     11:  * This code is derived from software contributed to Berkeley by
                     12:  * the Systems Programming Group of the University of Utah Computer
                     13:  * Science Department.
                     14:  *
                     15:  * Redistribution and use in source and binary forms, with or without
                     16:  * modification, are permitted provided that the following conditions
                     17:  * are met:
                     18:  * 1. Redistributions of source code must retain the above copyright
                     19:  *    notice, this list of conditions and the following disclaimer.
                     20:  * 2. Redistributions in binary form must reproduce the above copyright
                     21:  *    notice, this list of conditions and the following disclaimer in the
                     22:  *    documentation and/or other materials provided with the distribution.
                     23:  * 3. All advertising materials mentioning features or use of this software
                     24:  *    must display the following acknowledgement:
                     25:  *      This product includes software developed by Charles D. Cranor,
1.49      chs        26:  *     Washington University, the University of California, Berkeley and
1.1       mrg        27:  *     its contributors.
                     28:  * 4. Neither the name of the University nor the names of its contributors
                     29:  *    may be used to endorse or promote products derived from this software
                     30:  *    without specific prior written permission.
                     31:  *
                     32:  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
                     33:  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
                     34:  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
                     35:  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
                     36:  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
                     37:  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
                     38:  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
                     39:  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
                     40:  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
                     41:  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
                     42:  * SUCH DAMAGE.
                     43:  *
                     44:  *      @(#)vnode_pager.c       8.8 (Berkeley) 2/13/94
1.3       mrg        45:  * from: Id: uvm_vnode.c,v 1.1.2.26 1998/02/02 20:38:07 chuck Exp
1.1       mrg        46:  */
                     47:
1.55      lukem      48: /*
                     49:  * uvm_vnode.c: the vnode pager.
                     50:  */
                     51:
                     52: #include <sys/cdefs.h>
1.69.10.1! elad       53: __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.69 2005/12/11 12:25:29 christos Exp $");
1.55      lukem      54:
1.6       thorpej    55: #include "fs_nfs.h"
1.4       mrg        56: #include "opt_uvmhist.h"
1.67      yamt       57: #include "opt_readahead.h"
1.37      chs        58: #include "opt_ddb.h"
1.1       mrg        59:
                     60: #include <sys/param.h>
                     61: #include <sys/systm.h>
1.37      chs        62: #include <sys/kernel.h>
1.1       mrg        63: #include <sys/proc.h>
                     64: #include <sys/malloc.h>
                     65: #include <sys/vnode.h>
1.13      thorpej    66: #include <sys/disklabel.h>
                     67: #include <sys/ioctl.h>
                     68: #include <sys/fcntl.h>
                     69: #include <sys/conf.h>
1.37      chs        70: #include <sys/pool.h>
                     71: #include <sys/mount.h>
1.13      thorpej    72:
                     73: #include <miscfs/specfs/specdev.h>
1.1       mrg        74:
                     75: #include <uvm/uvm.h>
1.68      yamt       76: #include <uvm/uvm_readahead.h>
1.1       mrg        77:
                     78: /*
                     79:  * functions
                     80:  */
                     81:
1.66      thorpej    82: static void    uvn_detach(struct uvm_object *);
                     83: static int     uvn_get(struct uvm_object *, voff_t, struct vm_page **, int *,
                     84:                        int, vm_prot_t, int, int);
                     85: static int     uvn_put(struct uvm_object *, voff_t, voff_t, int);
                     86: static void    uvn_reference(struct uvm_object *);
1.52      chs        87:
1.66      thorpej    88: static int     uvn_findpage(struct uvm_object *, voff_t, struct vm_page **,
                     89:                             int);
1.1       mrg        90:
                     91: /*
                     92:  * master pager structure
                     93:  */
                     94:
                     95: struct uvm_pagerops uvm_vnodeops = {
1.37      chs        96:        NULL,
1.8       mrg        97:        uvn_reference,
                     98:        uvn_detach,
1.37      chs        99:        NULL,
1.8       mrg       100:        uvn_get,
                    101:        uvn_put,
1.1       mrg       102: };
                    103:
                    104: /*
                    105:  * the ops!
                    106:  */
                    107:
                    108: /*
                    109:  * uvn_attach
                    110:  *
                    111:  * attach a vnode structure to a VM object.  if the vnode is already
                    112:  * attached, then just bump the reference count by one and return the
                    113:  * VM object.   if not already attached, attach and return the new VM obj.
                    114:  * the "accessprot" tells the max access the attaching thread wants to
                    115:  * our pages.
                    116:  *
                    117:  * => caller must _not_ already be holding the lock on the uvm_object.
                    118:  * => in fact, nothing should be locked so that we can sleep here.
                    119:  * => note that uvm_object is first thing in vnode structure, so their
                    120:  *    pointers are equiv.
                    121:  */
                    122:
1.8       mrg       123: struct uvm_object *
1.65      thorpej   124: uvn_attach(void *arg, vm_prot_t accessprot)
1.8       mrg       125: {
                    126:        struct vnode *vp = arg;
1.52      chs       127:        struct uvm_object *uobj = &vp->v_uobj;
1.8       mrg       128:        struct vattr vattr;
1.59      gehenna   129:        const struct bdevsw *bdev;
1.37      chs       130:        int result;
1.13      thorpej   131:        struct partinfo pi;
1.37      chs       132:        voff_t used_vnode_size;
1.8       mrg       133:        UVMHIST_FUNC("uvn_attach"); UVMHIST_CALLED(maphist);
                    134:
                    135:        UVMHIST_LOG(maphist, "(vn=0x%x)", arg,0,0,0);
1.37      chs       136:        used_vnode_size = (voff_t)0;
1.13      thorpej   137:
1.8       mrg       138:        /*
1.52      chs       139:         * first get a lock on the uobj.
1.8       mrg       140:         */
1.52      chs       141:
                    142:        simple_lock(&uobj->vmobjlock);
                    143:        while (vp->v_flag & VXLOCK) {
                    144:                vp->v_flag |= VXWANT;
1.8       mrg       145:                UVMHIST_LOG(maphist, "  SLEEPING on blocked vn",0,0,0,0);
1.52      chs       146:                UVM_UNLOCK_AND_WAIT(uobj, &uobj->vmobjlock, FALSE,
1.8       mrg       147:                    "uvn_attach", 0);
1.52      chs       148:                simple_lock(&uobj->vmobjlock);
1.8       mrg       149:                UVMHIST_LOG(maphist,"  WOKE UP",0,0,0,0);
                    150:        }
1.1       mrg       151:
1.8       mrg       152:        /*
1.18      bouyer    153:         * if we're mapping a BLK device, make sure it is a disk.
1.13      thorpej   154:         */
1.59      gehenna   155:        if (vp->v_type == VBLK) {
                    156:                bdev = bdevsw_lookup(vp->v_rdev);
                    157:                if (bdev == NULL || bdev->d_type != D_DISK) {
                    158:                        simple_unlock(&uobj->vmobjlock);
                    159:                        UVMHIST_LOG(maphist,"<- done (VBLK not D_DISK!)",
                    160:                                    0,0,0,0);
                    161:                        return(NULL);
                    162:                }
1.13      thorpej   163:        }
1.51      chs       164:        KASSERT(vp->v_type == VREG || vp->v_type == VBLK);
1.37      chs       165:
1.13      thorpej   166:        /*
1.37      chs       167:         * set up our idea of the size
                    168:         * if this hasn't been done already.
1.8       mrg       169:         */
1.52      chs       170:        if (vp->v_size == VSIZENOTSET) {
1.8       mrg       171:
1.52      chs       172:
                    173:        vp->v_flag |= VXLOCK;
                    174:        simple_unlock(&uobj->vmobjlock); /* drop lock in case we sleep */
1.8       mrg       175:                /* XXX: curproc? */
1.13      thorpej   176:        if (vp->v_type == VBLK) {
                    177:                /*
                    178:                 * We could implement this as a specfs getattr call, but:
                    179:                 *
                    180:                 *      (1) VOP_GETATTR() would get the file system
                    181:                 *          vnode operation, not the specfs operation.
                    182:                 *
                    183:                 *      (2) All we want is the size, anyhow.
                    184:                 */
1.59      gehenna   185:                bdev = bdevsw_lookup(vp->v_rdev);
                    186:                if (bdev != NULL) {
                    187:                        result = (*bdev->d_ioctl)(vp->v_rdev, DIOCGPART,
1.69      christos  188:                                                  (caddr_t)&pi, FREAD, curlwp);
1.59      gehenna   189:                } else {
                    190:                        result = ENXIO;
                    191:                }
1.13      thorpej   192:                if (result == 0) {
                    193:                        /* XXX should remember blocksize */
1.37      chs       194:                        used_vnode_size = (voff_t)pi.disklab->d_secsize *
                    195:                            (voff_t)pi.part->p_size;
1.13      thorpej   196:                }
                    197:        } else {
1.69.10.1! elad      198:                result = VOP_GETATTR(vp, &vattr, curproc->p_cred, curlwp);
1.13      thorpej   199:                if (result == 0)
                    200:                        used_vnode_size = vattr.va_size;
1.8       mrg       201:        }
1.1       mrg       202:
1.8       mrg       203:        /* relock object */
1.52      chs       204:        simple_lock(&uobj->vmobjlock);
1.37      chs       205:
1.52      chs       206:        if (vp->v_flag & VXWANT) {
                    207:                wakeup(vp);
                    208:        }
                    209:        vp->v_flag &= ~(VXLOCK|VXWANT);
1.1       mrg       210:
1.8       mrg       211:        if (result != 0) {
1.52      chs       212:                simple_unlock(&uobj->vmobjlock);
1.8       mrg       213:                UVMHIST_LOG(maphist,"<- done (VOP_GETATTR FAILED!)", 0,0,0,0);
                    214:                return(NULL);
                    215:        }
1.52      chs       216:        vp->v_size = used_vnode_size;
1.8       mrg       217:
                    218:        }
                    219:
1.52      chs       220:        simple_unlock(&uobj->vmobjlock);
                    221:        UVMHIST_LOG(maphist,"<- done, refcnt=%d", vp->v_usecount,
1.37      chs       222:            0, 0, 0);
1.52      chs       223:        return uobj;
1.1       mrg       224: }
                    225:
                    226:
                    227: /*
                    228:  * uvn_reference
                    229:  *
                    230:  * duplicate a reference to a VM object.  Note that the reference
1.49      chs       231:  * count must already be at least one (the passed in reference) so
1.1       mrg       232:  * there is no chance of the uvn being killed or locked out here.
                    233:  *
1.49      chs       234:  * => caller must call with object unlocked.
1.1       mrg       235:  * => caller must be using the same accessprot as was used at attach time
                    236:  */
                    237:
1.66      thorpej   238: static void
1.65      thorpej   239: uvn_reference(struct uvm_object *uobj)
1.1       mrg       240: {
1.37      chs       241:        VREF((struct vnode *)uobj);
1.1       mrg       242: }
                    243:
1.52      chs       244:
1.1       mrg       245: /*
                    246:  * uvn_detach
                    247:  *
                    248:  * remove a reference to a VM object.
                    249:  *
                    250:  * => caller must call with object unlocked and map locked.
                    251:  */
1.52      chs       252:
1.66      thorpej   253: static void
1.65      thorpej   254: uvn_detach(struct uvm_object *uobj)
1.8       mrg       255: {
1.37      chs       256:        vrele((struct vnode *)uobj);
1.1       mrg       257: }
                    258:
                    259: /*
                    260:  * uvn_put: flush page data to backing store.
                    261:  *
1.53      sommerfe  262:  * => object must be locked on entry!   VOP_PUTPAGES must unlock it.
1.1       mrg       263:  * => flags: PGO_SYNCIO -- use sync. I/O
                    264:  * => note: caller must set PG_CLEAN and pmap_clear_modify (if needed)
                    265:  */
                    266:
1.66      thorpej   267: static int
1.65      thorpej   268: uvn_put(struct uvm_object *uobj, voff_t offlo, voff_t offhi, int flags)
1.1       mrg       269: {
1.37      chs       270:        struct vnode *vp = (struct vnode *)uobj;
                    271:        int error;
1.1       mrg       272:
1.53      sommerfe  273:        LOCK_ASSERT(simple_lock_held(&vp->v_interlock));
1.54      chs       274:        error = VOP_PUTPAGES(vp, offlo, offhi, flags);
1.53      sommerfe  275:        LOCK_ASSERT(!simple_lock_held(&vp->v_interlock));
1.48      chs       276:        return error;
1.1       mrg       277: }
                    278:
                    279:
                    280: /*
                    281:  * uvn_get: get pages (synchronously) from backing store
                    282:  *
                    283:  * => prefer map unlocked (not required)
                    284:  * => object must be locked!  we will _unlock_ it before starting any I/O.
                    285:  * => flags: PGO_ALLPAGES: get all of the pages
                    286:  *           PGO_LOCKED: fault data structures are locked
                    287:  * => NOTE: offset is the offset of pps[0], _NOT_ pps[centeridx]
                    288:  * => NOTE: caller must check for released pages!!
                    289:  */
1.49      chs       290:
1.66      thorpej   291: static int
1.65      thorpej   292: uvn_get(struct uvm_object *uobj, voff_t offset,
                    293:     struct vm_page **pps /* IN/OUT */,
                    294:     int *npagesp /* IN (OUT if PGO_LOCKED)*/,
                    295:     int centeridx, vm_prot_t access_type, int advice, int flags)
1.8       mrg       296: {
1.37      chs       297:        struct vnode *vp = (struct vnode *)uobj;
                    298:        int error;
1.67      yamt      299: #if defined(READAHEAD_STATS)
                    300:        int orignpages = *npagesp;
                    301: #endif /* defined(READAHEAD_STATS) */
                    302:
1.37      chs       303:        UVMHIST_FUNC("uvn_get"); UVMHIST_CALLED(ubchist);
                    304:
                    305:        UVMHIST_LOG(ubchist, "vp %p off 0x%x", vp, (int)offset, 0,0);
1.68      yamt      306:
                    307:        if ((access_type & VM_PROT_WRITE) == 0 && (flags & PGO_LOCKED) == 0) {
                    308:                simple_unlock(&vp->v_interlock);
                    309:                vn_ra_allocctx(vp);
                    310:                uvm_ra_request(vp->v_ractx, advice, uobj, offset,
                    311:                    *npagesp << PAGE_SHIFT);
                    312:                simple_lock(&vp->v_interlock);
                    313:        }
                    314:
1.37      chs       315:        error = VOP_GETPAGES(vp, offset, pps, npagesp, centeridx,
                    316:                             access_type, advice, flags);
1.67      yamt      317:
                    318: #if defined(READAHEAD_STATS)
                    319:        if (((flags & PGO_LOCKED) != 0 && *npagesp > 0) ||
                    320:            ((flags & (PGO_LOCKED|PGO_SYNCIO)) == PGO_SYNCIO && error == 0)) {
                    321:                int i;
                    322:
                    323:                if ((flags & PGO_LOCKED) == 0) {
                    324:                        simple_lock(&uobj->vmobjlock);
                    325:                }
                    326:                for (i = 0; i < orignpages; i++) {
                    327:                        struct vm_page *pg = pps[i];
                    328:
                    329:                        if (pg == NULL || pg == PGO_DONTCARE) {
                    330:                                continue;
                    331:                        }
                    332:                        if ((pg->flags & PG_SPECULATIVE) != 0) {
                    333:                                pg->flags &= ~PG_SPECULATIVE;
                    334:                                uvm_ra_hit.ev_count++;
                    335:                        }
                    336:                }
                    337:                if ((flags & PGO_LOCKED) == 0) {
                    338:                        simple_unlock(&uobj->vmobjlock);
                    339:                }
                    340:        }
                    341: #endif /* defined(READAHEAD_STATS) */
                    342:
1.48      chs       343:        return error;
1.37      chs       344: }
1.8       mrg       345:
                    346:
1.37      chs       347: /*
                    348:  * uvn_findpages:
                    349:  * return the page for the uobj and offset requested, allocating if needed.
                    350:  * => uobj must be locked.
1.52      chs       351:  * => returned pages will be BUSY.
1.37      chs       352:  */
1.1       mrg       353:
1.58      enami     354: int
1.65      thorpej   355: uvn_findpages(struct uvm_object *uobj, voff_t offset, int *npagesp,
                    356:     struct vm_page **pgs, int flags)
1.37      chs       357: {
1.58      enami     358:        int i, count, found, npages, rv;
1.8       mrg       359:
1.58      enami     360:        count = found = 0;
1.37      chs       361:        npages = *npagesp;
1.52      chs       362:        if (flags & UFP_BACKWARD) {
                    363:                for (i = npages - 1; i >= 0; i--, offset -= PAGE_SIZE) {
                    364:                        rv = uvn_findpage(uobj, offset, &pgs[i], flags);
1.58      enami     365:                        if (rv == 0) {
                    366:                                if (flags & UFP_DIRTYONLY)
                    367:                                        break;
                    368:                        } else
                    369:                                found++;
1.52      chs       370:                        count++;
                    371:                }
                    372:        } else {
                    373:                for (i = 0; i < npages; i++, offset += PAGE_SIZE) {
                    374:                        rv = uvn_findpage(uobj, offset, &pgs[i], flags);
1.58      enami     375:                        if (rv == 0) {
                    376:                                if (flags & UFP_DIRTYONLY)
                    377:                                        break;
                    378:                        } else
                    379:                                found++;
1.52      chs       380:                        count++;
                    381:                }
1.37      chs       382:        }
1.52      chs       383:        *npagesp = count;
1.58      enami     384:        return (found);
1.37      chs       385: }
1.8       mrg       386:
1.66      thorpej   387: static int
1.65      thorpej   388: uvn_findpage(struct uvm_object *uobj, voff_t offset, struct vm_page **pgp,
                    389:     int flags)
1.37      chs       390: {
                    391:        struct vm_page *pg;
1.52      chs       392:        boolean_t dirty;
1.37      chs       393:        UVMHIST_FUNC("uvn_findpage"); UVMHIST_CALLED(ubchist);
                    394:        UVMHIST_LOG(ubchist, "vp %p off 0x%lx", uobj, offset,0,0);
1.8       mrg       395:
1.37      chs       396:        if (*pgp != NULL) {
                    397:                UVMHIST_LOG(ubchist, "dontcare", 0,0,0,0);
                    398:                return 0;
                    399:        }
                    400:        for (;;) {
                    401:                /* look for an existing page */
                    402:                pg = uvm_pagelookup(uobj, offset);
                    403:
1.52      chs       404:                /* nope?  allocate one now */
1.37      chs       405:                if (pg == NULL) {
                    406:                        if (flags & UFP_NOALLOC) {
                    407:                                UVMHIST_LOG(ubchist, "noalloc", 0,0,0,0);
                    408:                                return 0;
                    409:                        }
1.47      chs       410:                        pg = uvm_pagealloc(uobj, offset, NULL, 0);
1.37      chs       411:                        if (pg == NULL) {
                    412:                                if (flags & UFP_NOWAIT) {
                    413:                                        UVMHIST_LOG(ubchist, "nowait",0,0,0,0);
                    414:                                        return 0;
1.8       mrg       415:                                }
1.37      chs       416:                                simple_unlock(&uobj->vmobjlock);
                    417:                                uvm_wait("uvn_fp1");
1.8       mrg       418:                                simple_lock(&uobj->vmobjlock);
1.37      chs       419:                                continue;
1.47      chs       420:                        }
1.52      chs       421:                        UVMHIST_LOG(ubchist, "alloced %p", pg,0,0,0);
1.37      chs       422:                        break;
                    423:                } else if (flags & UFP_NOCACHE) {
                    424:                        UVMHIST_LOG(ubchist, "nocache",0,0,0,0);
                    425:                        return 0;
1.8       mrg       426:                }
                    427:
1.37      chs       428:                /* page is there, see if we need to wait on it */
1.52      chs       429:                if ((pg->flags & PG_BUSY) != 0) {
1.37      chs       430:                        if (flags & UFP_NOWAIT) {
                    431:                                UVMHIST_LOG(ubchist, "nowait",0,0,0,0);
                    432:                                return 0;
                    433:                        }
                    434:                        pg->flags |= PG_WANTED;
1.58      enami     435:                        UVMHIST_LOG(ubchist, "wait %p", pg,0,0,0);
1.37      chs       436:                        UVM_UNLOCK_AND_WAIT(pg, &uobj->vmobjlock, 0,
                    437:                                            "uvn_fp2", 0);
                    438:                        simple_lock(&uobj->vmobjlock);
                    439:                        continue;
1.8       mrg       440:                }
1.49      chs       441:
1.37      chs       442:                /* skip PG_RDONLY pages if requested */
                    443:                if ((flags & UFP_NORDONLY) && (pg->flags & PG_RDONLY)) {
                    444:                        UVMHIST_LOG(ubchist, "nordonly",0,0,0,0);
                    445:                        return 0;
1.8       mrg       446:                }
                    447:
1.52      chs       448:                /* stop on clean pages if requested */
                    449:                if (flags & UFP_DIRTYONLY) {
                    450:                        dirty = pmap_clear_modify(pg) ||
                    451:                                (pg->flags & PG_CLEAN) == 0;
                    452:                        pg->flags |= PG_CLEAN;
                    453:                        if (!dirty) {
1.58      enami     454:                                UVMHIST_LOG(ubchist, "dirtonly", 0,0,0,0);
1.52      chs       455:                                return 0;
                    456:                        }
                    457:                }
                    458:
1.37      chs       459:                /* mark the page BUSY and we're done. */
                    460:                pg->flags |= PG_BUSY;
                    461:                UVM_PAGE_OWN(pg, "uvn_findpage");
1.52      chs       462:                UVMHIST_LOG(ubchist, "found %p", pg,0,0,0);
1.37      chs       463:                break;
1.8       mrg       464:        }
1.37      chs       465:        *pgp = pg;
                    466:        return 1;
1.1       mrg       467: }
                    468:
                    469: /*
1.52      chs       470:  * uvm_vnp_setsize: grow or shrink a vnode uobj
1.1       mrg       471:  *
                    472:  * grow   => just update size value
                    473:  * shrink => toss un-needed pages
                    474:  *
1.49      chs       475:  * => we assume that the caller has a reference of some sort to the
1.1       mrg       476:  *     vnode in question so that it will not be yanked out from under
                    477:  *     us.
                    478:  */
                    479:
1.8       mrg       480: void
1.65      thorpej   481: uvm_vnp_setsize(struct vnode *vp, voff_t newsize)
1.8       mrg       482: {
1.52      chs       483:        struct uvm_object *uobj = &vp->v_uobj;
1.46      enami     484:        voff_t pgend = round_page(newsize);
1.37      chs       485:        UVMHIST_FUNC("uvm_vnp_setsize"); UVMHIST_CALLED(ubchist);
                    486:
1.52      chs       487:        simple_lock(&uobj->vmobjlock);
                    488:        UVMHIST_LOG(ubchist, "vp %p old 0x%x new 0x%x",
                    489:            vp, vp->v_size, newsize, 0);
1.1       mrg       490:
1.8       mrg       491:        /*
1.37      chs       492:         * now check if the size has changed: if we shrink we had better
                    493:         * toss some pages...
1.8       mrg       494:         */
1.1       mrg       495:
1.52      chs       496:        if (vp->v_size > pgend && vp->v_size != VSIZENOTSET) {
1.57      chs       497:                (void) uvn_put(uobj, pgend, 0, PGO_FREE | PGO_SYNCIO);
1.52      chs       498:        } else {
                    499:                simple_unlock(&uobj->vmobjlock);
1.8       mrg       500:        }
1.52      chs       501:        vp->v_size = newsize;
1.1       mrg       502: }
                    503:
                    504: /*
1.37      chs       505:  * uvm_vnp_zerorange:  set a range of bytes in a file to zero.
1.1       mrg       506:  */
                    507:
1.8       mrg       508: void
1.65      thorpej   509: uvm_vnp_zerorange(struct vnode *vp, off_t off, size_t len)
1.8       mrg       510: {
1.64      chs       511:        void *win;
                    512:        int flags;
                    513:
                    514:        /*
                    515:         * XXXUBC invent kzero() and use it
                    516:         */
1.8       mrg       517:
1.64      chs       518:        while (len) {
                    519:                vsize_t bytelen = len;
                    520:
1.68      yamt      521:                win = ubc_alloc(&vp->v_uobj, off, &bytelen, UVM_ADV_NORMAL,
                    522:                    UBC_WRITE);
1.64      chs       523:                memset(win, 0, bytelen);
                    524:                flags = UBC_WANT_UNMAP(vp) ? UBC_UNMAP : 0;
                    525:                ubc_release(win, flags);
                    526:
                    527:                off += bytelen;
                    528:                len -= bytelen;
                    529:        }
1.1       mrg       530: }

CVSweb <webmaster@jp.NetBSD.org>