Annotation of src/sys/uvm/uvm_vnode.c, Revision 1.90
1.90 ! ad 1: /* $NetBSD: uvm_vnode.c,v 1.89 2007/12/01 10:40:28 yamt Exp $ */
1.1 mrg 2:
3: /*
4: * Copyright (c) 1997 Charles D. Cranor and Washington University.
5: * Copyright (c) 1991, 1993
1.49 chs 6: * The Regents of the University of California.
1.1 mrg 7: * Copyright (c) 1990 University of Utah.
8: *
9: * All rights reserved.
10: *
11: * This code is derived from software contributed to Berkeley by
12: * the Systems Programming Group of the University of Utah Computer
13: * Science Department.
14: *
15: * Redistribution and use in source and binary forms, with or without
16: * modification, are permitted provided that the following conditions
17: * are met:
18: * 1. Redistributions of source code must retain the above copyright
19: * notice, this list of conditions and the following disclaimer.
20: * 2. Redistributions in binary form must reproduce the above copyright
21: * notice, this list of conditions and the following disclaimer in the
22: * documentation and/or other materials provided with the distribution.
23: * 3. All advertising materials mentioning features or use of this software
24: * must display the following acknowledgement:
25: * This product includes software developed by Charles D. Cranor,
1.49 chs 26: * Washington University, the University of California, Berkeley and
1.1 mrg 27: * its contributors.
28: * 4. Neither the name of the University nor the names of its contributors
29: * may be used to endorse or promote products derived from this software
30: * without specific prior written permission.
31: *
32: * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
33: * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
34: * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
35: * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
36: * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
37: * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
38: * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
39: * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
40: * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
41: * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
42: * SUCH DAMAGE.
43: *
44: * @(#)vnode_pager.c 8.8 (Berkeley) 2/13/94
1.3 mrg 45: * from: Id: uvm_vnode.c,v 1.1.2.26 1998/02/02 20:38:07 chuck Exp
1.1 mrg 46: */
47:
1.55 lukem 48: /*
49: * uvm_vnode.c: the vnode pager.
50: */
51:
52: #include <sys/cdefs.h>
1.90 ! ad 53: __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.89 2007/12/01 10:40:28 yamt Exp $");
1.55 lukem 54:
1.6 thorpej 55: #include "fs_nfs.h"
1.4 mrg 56: #include "opt_uvmhist.h"
1.37 chs 57: #include "opt_ddb.h"
1.1 mrg 58:
59: #include <sys/param.h>
60: #include <sys/systm.h>
1.37 chs 61: #include <sys/kernel.h>
1.1 mrg 62: #include <sys/proc.h>
63: #include <sys/malloc.h>
64: #include <sys/vnode.h>
1.13 thorpej 65: #include <sys/disklabel.h>
66: #include <sys/ioctl.h>
67: #include <sys/fcntl.h>
68: #include <sys/conf.h>
1.37 chs 69: #include <sys/pool.h>
70: #include <sys/mount.h>
1.13 thorpej 71:
72: #include <miscfs/specfs/specdev.h>
1.1 mrg 73:
74: #include <uvm/uvm.h>
1.68 yamt 75: #include <uvm/uvm_readahead.h>
1.1 mrg 76:
77: /*
78: * functions
79: */
80:
1.66 thorpej 81: static void uvn_detach(struct uvm_object *);
82: static int uvn_get(struct uvm_object *, voff_t, struct vm_page **, int *,
83: int, vm_prot_t, int, int);
84: static int uvn_put(struct uvm_object *, voff_t, voff_t, int);
85: static void uvn_reference(struct uvm_object *);
1.52 chs 86:
1.66 thorpej 87: static int uvn_findpage(struct uvm_object *, voff_t, struct vm_page **,
88: int);
1.1 mrg 89:
90: /*
91: * master pager structure
92: */
93:
1.89 yamt 94: const struct uvm_pagerops uvm_vnodeops = {
1.88 yamt 95: .pgo_reference = uvn_reference,
96: .pgo_detach = uvn_detach,
97: .pgo_get = uvn_get,
98: .pgo_put = uvn_put,
1.1 mrg 99: };
100:
101: /*
102: * the ops!
103: */
104:
105: /*
106: * uvn_reference
107: *
108: * duplicate a reference to a VM object. Note that the reference
1.49 chs 109: * count must already be at least one (the passed in reference) so
1.1 mrg 110: * there is no chance of the uvn being killed or locked out here.
111: *
1.49 chs 112: * => caller must call with object unlocked.
1.1 mrg 113: * => caller must be using the same accessprot as was used at attach time
114: */
115:
1.66 thorpej 116: static void
1.65 thorpej 117: uvn_reference(struct uvm_object *uobj)
1.1 mrg 118: {
1.37 chs 119: VREF((struct vnode *)uobj);
1.1 mrg 120: }
121:
1.52 chs 122:
1.1 mrg 123: /*
124: * uvn_detach
125: *
126: * remove a reference to a VM object.
127: *
128: * => caller must call with object unlocked and map locked.
129: */
1.52 chs 130:
1.66 thorpej 131: static void
1.65 thorpej 132: uvn_detach(struct uvm_object *uobj)
1.8 mrg 133: {
1.37 chs 134: vrele((struct vnode *)uobj);
1.1 mrg 135: }
136:
137: /*
138: * uvn_put: flush page data to backing store.
139: *
1.53 sommerfe 140: * => object must be locked on entry! VOP_PUTPAGES must unlock it.
1.1 mrg 141: * => flags: PGO_SYNCIO -- use sync. I/O
142: * => note: caller must set PG_CLEAN and pmap_clear_modify (if needed)
143: */
144:
1.66 thorpej 145: static int
1.65 thorpej 146: uvn_put(struct uvm_object *uobj, voff_t offlo, voff_t offhi, int flags)
1.1 mrg 147: {
1.37 chs 148: struct vnode *vp = (struct vnode *)uobj;
149: int error;
1.1 mrg 150:
1.90 ! ad 151: KASSERT(mutex_owned(&vp->v_interlock));
1.54 chs 152: error = VOP_PUTPAGES(vp, offlo, offhi, flags);
1.90 ! ad 153:
1.48 chs 154: return error;
1.1 mrg 155: }
156:
157:
158: /*
159: * uvn_get: get pages (synchronously) from backing store
160: *
161: * => prefer map unlocked (not required)
162: * => object must be locked! we will _unlock_ it before starting any I/O.
163: * => flags: PGO_ALLPAGES: get all of the pages
164: * PGO_LOCKED: fault data structures are locked
165: * => NOTE: offset is the offset of pps[0], _NOT_ pps[centeridx]
166: * => NOTE: caller must check for released pages!!
167: */
1.49 chs 168:
1.66 thorpej 169: static int
1.65 thorpej 170: uvn_get(struct uvm_object *uobj, voff_t offset,
171: struct vm_page **pps /* IN/OUT */,
172: int *npagesp /* IN (OUT if PGO_LOCKED)*/,
173: int centeridx, vm_prot_t access_type, int advice, int flags)
1.8 mrg 174: {
1.37 chs 175: struct vnode *vp = (struct vnode *)uobj;
176: int error;
1.67 yamt 177:
1.37 chs 178: UVMHIST_FUNC("uvn_get"); UVMHIST_CALLED(ubchist);
179:
180: UVMHIST_LOG(ubchist, "vp %p off 0x%x", vp, (int)offset, 0,0);
1.68 yamt 181:
182: if ((access_type & VM_PROT_WRITE) == 0 && (flags & PGO_LOCKED) == 0) {
183: vn_ra_allocctx(vp);
184: uvm_ra_request(vp->v_ractx, advice, uobj, offset,
185: *npagesp << PAGE_SHIFT);
186: }
187:
1.37 chs 188: error = VOP_GETPAGES(vp, offset, pps, npagesp, centeridx,
189: access_type, advice, flags);
1.67 yamt 190:
1.90 ! ad 191: KASSERT(((flags & PGO_LOCKED) != 0 && mutex_owned(&vp->v_interlock)) ||
! 192: (flags & PGO_LOCKED) == 0);
1.48 chs 193: return error;
1.37 chs 194: }
1.8 mrg 195:
196:
1.37 chs 197: /*
198: * uvn_findpages:
199: * return the page for the uobj and offset requested, allocating if needed.
200: * => uobj must be locked.
1.52 chs 201: * => returned pages will be BUSY.
1.37 chs 202: */
1.1 mrg 203:
1.58 enami 204: int
1.65 thorpej 205: uvn_findpages(struct uvm_object *uobj, voff_t offset, int *npagesp,
206: struct vm_page **pgs, int flags)
1.37 chs 207: {
1.58 enami 208: int i, count, found, npages, rv;
1.8 mrg 209:
1.58 enami 210: count = found = 0;
1.37 chs 211: npages = *npagesp;
1.52 chs 212: if (flags & UFP_BACKWARD) {
213: for (i = npages - 1; i >= 0; i--, offset -= PAGE_SIZE) {
214: rv = uvn_findpage(uobj, offset, &pgs[i], flags);
1.58 enami 215: if (rv == 0) {
216: if (flags & UFP_DIRTYONLY)
217: break;
218: } else
219: found++;
1.52 chs 220: count++;
221: }
222: } else {
223: for (i = 0; i < npages; i++, offset += PAGE_SIZE) {
224: rv = uvn_findpage(uobj, offset, &pgs[i], flags);
1.58 enami 225: if (rv == 0) {
226: if (flags & UFP_DIRTYONLY)
227: break;
228: } else
229: found++;
1.52 chs 230: count++;
231: }
1.37 chs 232: }
1.52 chs 233: *npagesp = count;
1.58 enami 234: return (found);
1.37 chs 235: }
1.8 mrg 236:
1.66 thorpej 237: static int
1.65 thorpej 238: uvn_findpage(struct uvm_object *uobj, voff_t offset, struct vm_page **pgp,
239: int flags)
1.37 chs 240: {
241: struct vm_page *pg;
1.79 thorpej 242: bool dirty;
1.37 chs 243: UVMHIST_FUNC("uvn_findpage"); UVMHIST_CALLED(ubchist);
244: UVMHIST_LOG(ubchist, "vp %p off 0x%lx", uobj, offset,0,0);
1.8 mrg 245:
1.37 chs 246: if (*pgp != NULL) {
247: UVMHIST_LOG(ubchist, "dontcare", 0,0,0,0);
248: return 0;
249: }
250: for (;;) {
251: /* look for an existing page */
252: pg = uvm_pagelookup(uobj, offset);
253:
1.52 chs 254: /* nope? allocate one now */
1.37 chs 255: if (pg == NULL) {
256: if (flags & UFP_NOALLOC) {
257: UVMHIST_LOG(ubchist, "noalloc", 0,0,0,0);
258: return 0;
259: }
1.47 chs 260: pg = uvm_pagealloc(uobj, offset, NULL, 0);
1.37 chs 261: if (pg == NULL) {
262: if (flags & UFP_NOWAIT) {
263: UVMHIST_LOG(ubchist, "nowait",0,0,0,0);
264: return 0;
1.8 mrg 265: }
1.90 ! ad 266: mutex_exit(&uobj->vmobjlock);
1.37 chs 267: uvm_wait("uvn_fp1");
1.90 ! ad 268: mutex_enter(&uobj->vmobjlock);
1.37 chs 269: continue;
1.47 chs 270: }
1.52 chs 271: UVMHIST_LOG(ubchist, "alloced %p", pg,0,0,0);
1.37 chs 272: break;
273: } else if (flags & UFP_NOCACHE) {
274: UVMHIST_LOG(ubchist, "nocache",0,0,0,0);
275: return 0;
1.8 mrg 276: }
277:
1.37 chs 278: /* page is there, see if we need to wait on it */
1.52 chs 279: if ((pg->flags & PG_BUSY) != 0) {
1.37 chs 280: if (flags & UFP_NOWAIT) {
281: UVMHIST_LOG(ubchist, "nowait",0,0,0,0);
282: return 0;
283: }
284: pg->flags |= PG_WANTED;
1.58 enami 285: UVMHIST_LOG(ubchist, "wait %p", pg,0,0,0);
1.37 chs 286: UVM_UNLOCK_AND_WAIT(pg, &uobj->vmobjlock, 0,
287: "uvn_fp2", 0);
1.90 ! ad 288: mutex_enter(&uobj->vmobjlock);
1.37 chs 289: continue;
1.8 mrg 290: }
1.49 chs 291:
1.37 chs 292: /* skip PG_RDONLY pages if requested */
293: if ((flags & UFP_NORDONLY) && (pg->flags & PG_RDONLY)) {
294: UVMHIST_LOG(ubchist, "nordonly",0,0,0,0);
295: return 0;
1.8 mrg 296: }
297:
1.52 chs 298: /* stop on clean pages if requested */
299: if (flags & UFP_DIRTYONLY) {
300: dirty = pmap_clear_modify(pg) ||
301: (pg->flags & PG_CLEAN) == 0;
302: pg->flags |= PG_CLEAN;
303: if (!dirty) {
1.58 enami 304: UVMHIST_LOG(ubchist, "dirtonly", 0,0,0,0);
1.52 chs 305: return 0;
306: }
307: }
308:
1.37 chs 309: /* mark the page BUSY and we're done. */
310: pg->flags |= PG_BUSY;
311: UVM_PAGE_OWN(pg, "uvn_findpage");
1.52 chs 312: UVMHIST_LOG(ubchist, "found %p", pg,0,0,0);
1.37 chs 313: break;
1.8 mrg 314: }
1.37 chs 315: *pgp = pg;
316: return 1;
1.1 mrg 317: }
318:
319: /*
1.52 chs 320: * uvm_vnp_setsize: grow or shrink a vnode uobj
1.1 mrg 321: *
322: * grow => just update size value
323: * shrink => toss un-needed pages
324: *
1.49 chs 325: * => we assume that the caller has a reference of some sort to the
1.1 mrg 326: * vnode in question so that it will not be yanked out from under
327: * us.
328: */
329:
1.8 mrg 330: void
1.65 thorpej 331: uvm_vnp_setsize(struct vnode *vp, voff_t newsize)
1.8 mrg 332: {
1.52 chs 333: struct uvm_object *uobj = &vp->v_uobj;
1.46 enami 334: voff_t pgend = round_page(newsize);
1.72 yamt 335: voff_t oldsize;
1.37 chs 336: UVMHIST_FUNC("uvm_vnp_setsize"); UVMHIST_CALLED(ubchist);
337:
1.90 ! ad 338: mutex_enter(&uobj->vmobjlock);
1.52 chs 339: UVMHIST_LOG(ubchist, "vp %p old 0x%x new 0x%x",
340: vp, vp->v_size, newsize, 0);
1.1 mrg 341:
1.8 mrg 342: /*
1.37 chs 343: * now check if the size has changed: if we shrink we had better
344: * toss some pages...
1.8 mrg 345: */
1.1 mrg 346:
1.85 pooka 347: KASSERT(newsize != VSIZENOTSET);
348: KASSERT(vp->v_size <= vp->v_writesize);
349: KASSERT(vp->v_size == vp->v_writesize ||
350: newsize == vp->v_writesize || newsize <= vp->v_size);
351:
352: oldsize = vp->v_writesize;
353: KASSERT(oldsize != VSIZENOTSET || pgend > oldsize);
354:
355: if (oldsize > pgend) {
1.57 chs 356: (void) uvn_put(uobj, pgend, 0, PGO_FREE | PGO_SYNCIO);
1.90 ! ad 357: mutex_enter(&uobj->vmobjlock);
1.8 mrg 358: }
1.82 yamt 359: vp->v_size = vp->v_writesize = newsize;
1.90 ! ad 360: mutex_exit(&uobj->vmobjlock);
1.1 mrg 361: }
362:
1.82 yamt 363: void
364: uvm_vnp_setwritesize(struct vnode *vp, voff_t newsize)
365: {
366:
1.90 ! ad 367: mutex_enter(&vp->v_interlock);
1.85 pooka 368: KASSERT(newsize != VSIZENOTSET);
1.82 yamt 369: KASSERT(vp->v_size != VSIZENOTSET);
370: KASSERT(vp->v_writesize != VSIZENOTSET);
371: KASSERT(vp->v_size <= vp->v_writesize);
372: KASSERT(vp->v_size <= newsize);
373: vp->v_writesize = newsize;
1.90 ! ad 374: mutex_exit(&vp->v_interlock);
1.82 yamt 375: }
376:
1.1 mrg 377: /*
1.37 chs 378: * uvm_vnp_zerorange: set a range of bytes in a file to zero.
1.1 mrg 379: */
380:
1.8 mrg 381: void
1.65 thorpej 382: uvm_vnp_zerorange(struct vnode *vp, off_t off, size_t len)
1.8 mrg 383: {
1.64 chs 384: void *win;
385: int flags;
386:
387: /*
388: * XXXUBC invent kzero() and use it
389: */
1.8 mrg 390:
1.64 chs 391: while (len) {
392: vsize_t bytelen = len;
393:
1.68 yamt 394: win = ubc_alloc(&vp->v_uobj, off, &bytelen, UVM_ADV_NORMAL,
395: UBC_WRITE);
1.64 chs 396: memset(win, 0, bytelen);
397: flags = UBC_WANT_UNMAP(vp) ? UBC_UNMAP : 0;
398: ubc_release(win, flags);
399:
400: off += bytelen;
401: len -= bytelen;
402: }
1.1 mrg 403: }
1.75 yamt 404:
1.79 thorpej 405: bool
1.75 yamt 406: uvn_text_p(struct uvm_object *uobj)
407: {
408: struct vnode *vp = (struct vnode *)uobj;
409:
1.86 ad 410: return (vp->v_iflag & VI_EXECMAP) != 0;
1.75 yamt 411: }
412:
1.79 thorpej 413: bool
1.75 yamt 414: uvn_clean_p(struct uvm_object *uobj)
415: {
416: struct vnode *vp = (struct vnode *)uobj;
417:
1.86 ad 418: return (vp->v_iflag & VI_ONWORKLST) == 0;
1.75 yamt 419: }
420:
1.79 thorpej 421: bool
1.75 yamt 422: uvn_needs_writefault_p(struct uvm_object *uobj)
423: {
424: struct vnode *vp = (struct vnode *)uobj;
425:
426: return uvn_clean_p(uobj) ||
1.86 ad 427: (vp->v_iflag & (VI_WRMAP|VI_WRMAPDIRTY)) == VI_WRMAP;
1.75 yamt 428: }
CVSweb <webmaster@jp.NetBSD.org>