Annotation of src/sys/miscfs/genfs/genfs_io.c, Revision 1.53.2.4
1.53.2.1 yamt 1: /* $NetBSD$ */
1.1 pooka 2:
3: /*
4: * Copyright (c) 1982, 1986, 1989, 1993
5: * The Regents of the University of California. All rights reserved.
6: *
7: * Redistribution and use in source and binary forms, with or without
8: * modification, are permitted provided that the following conditions
9: * are met:
10: * 1. Redistributions of source code must retain the above copyright
11: * notice, this list of conditions and the following disclaimer.
12: * 2. Redistributions in binary form must reproduce the above copyright
13: * notice, this list of conditions and the following disclaimer in the
14: * documentation and/or other materials provided with the distribution.
15: * 3. Neither the name of the University nor the names of its contributors
16: * may be used to endorse or promote products derived from this software
17: * without specific prior written permission.
18: *
19: * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20: * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21: * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22: * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23: * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24: * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25: * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26: * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27: * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28: * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29: * SUCH DAMAGE.
30: *
31: */
32:
33: #include <sys/cdefs.h>
1.53.2.1 yamt 34: __KERNEL_RCSID(0, "$NetBSD$");
1.1 pooka 35:
36: #include <sys/param.h>
37: #include <sys/systm.h>
38: #include <sys/proc.h>
39: #include <sys/kernel.h>
40: #include <sys/mount.h>
41: #include <sys/vnode.h>
42: #include <sys/kmem.h>
43: #include <sys/kauth.h>
44: #include <sys/fstrans.h>
1.15 pooka 45: #include <sys/buf.h>
1.53.2.1 yamt 46: #include <sys/radixtree.h>
1.1 pooka 47:
48: #include <miscfs/genfs/genfs.h>
49: #include <miscfs/genfs/genfs_node.h>
50: #include <miscfs/specfs/specdev.h>
1.47 rmind 51: #include <miscfs/syncfs/syncfs.h>
1.1 pooka 52:
53: #include <uvm/uvm.h>
54: #include <uvm/uvm_pager.h>
1.53.2.1 yamt 55: #include <uvm/uvm_page_array.h>
1.1 pooka 56:
57: static int genfs_do_directio(struct vmspace *, vaddr_t, size_t, struct vnode *,
58: off_t, enum uio_rw);
59: static void genfs_dio_iodone(struct buf *);
60:
61: static int genfs_do_io(struct vnode *, off_t, vaddr_t, size_t, int, enum uio_rw,
62: void (*)(struct buf *));
1.38 chs 63: static void genfs_rel_pages(struct vm_page **, int);
64: static void genfs_markdirty(struct vnode *);
1.1 pooka 65:
66: int genfs_maxdio = MAXPHYS;
67:
1.38 chs 68: static void
1.1 pooka 69: genfs_rel_pages(struct vm_page **pgs, int npages)
70: {
71: int i;
72:
73: for (i = 0; i < npages; i++) {
74: struct vm_page *pg = pgs[i];
75:
76: if (pg == NULL || pg == PGO_DONTCARE)
77: continue;
78: if (pg->flags & PG_FAKE) {
79: pg->flags |= PG_RELEASED;
80: }
81: }
1.2 ad 82: mutex_enter(&uvm_pageqlock);
1.1 pooka 83: uvm_page_unbusy(pgs, npages);
1.2 ad 84: mutex_exit(&uvm_pageqlock);
1.1 pooka 85: }
86:
1.38 chs 87: static void
88: genfs_markdirty(struct vnode *vp)
89: {
90:
1.49 rmind 91: KASSERT(mutex_owned(vp->v_interlock));
1.38 chs 92: if ((vp->v_iflag & VI_ONWORKLST) == 0) {
93: vn_syncer_add_to_worklist(vp, filedelay);
94: }
95: if ((vp->v_iflag & (VI_WRMAP|VI_WRMAPDIRTY)) == VI_WRMAP) {
96: vp->v_iflag |= VI_WRMAPDIRTY;
97: }
98: }
99:
1.1 pooka 100: /*
101: * generic VM getpages routine.
102: * Return PG_BUSY pages for the given range,
103: * reading from backing store if necessary.
104: */
105:
106: int
107: genfs_getpages(void *v)
108: {
109: struct vop_getpages_args /* {
110: struct vnode *a_vp;
111: voff_t a_offset;
112: struct vm_page **a_m;
113: int *a_count;
114: int a_centeridx;
115: vm_prot_t a_access_type;
116: int a_advice;
117: int a_flags;
1.22 uebayasi 118: } */ * const ap = v;
1.1 pooka 119:
1.24 uebayasi 120: off_t diskeof, memeof;
1.31 uebayasi 121: int i, error, npages;
1.10 yamt 122: const int flags = ap->a_flags;
1.22 uebayasi 123: struct vnode * const vp = ap->a_vp;
124: struct uvm_object * const uobj = &vp->v_uobj;
1.31 uebayasi 125: kauth_cred_t const cred = curlwp->l_cred; /* XXXUBC curlwp */
1.10 yamt 126: const bool async = (flags & PGO_SYNCIO) == 0;
1.35 uebayasi 127: const bool memwrite = (ap->a_access_type & VM_PROT_WRITE) != 0;
1.10 yamt 128: const bool overwrite = (flags & PGO_OVERWRITE) != 0;
1.35 uebayasi 129: const bool blockalloc = memwrite && (flags & PGO_NOBLOCKALLOC) == 0;
1.40 chs 130: const bool glocked = (flags & PGO_GLOCKHELD) != 0;
1.42 hannken 131: const bool need_wapbl = blockalloc && vp->v_mount->mnt_wapbl;
132: bool has_trans_wapbl = false;
1.1 pooka 133: UVMHIST_FUNC("genfs_getpages"); UVMHIST_CALLED(ubchist);
134:
135: UVMHIST_LOG(ubchist, "vp %p off 0x%x/%x count %d",
136: vp, ap->a_offset >> 32, ap->a_offset, *ap->a_count);
137:
138: KASSERT(vp->v_type == VREG || vp->v_type == VDIR ||
139: vp->v_type == VLNK || vp->v_type == VBLK);
140:
141: startover:
142: error = 0;
1.27 uebayasi 143: const voff_t origvsize = vp->v_size;
144: const off_t origoffset = ap->a_offset;
1.29 uebayasi 145: const int orignpages = *ap->a_count;
1.33 uebayasi 146:
1.1 pooka 147: GOP_SIZE(vp, origvsize, &diskeof, 0);
148: if (flags & PGO_PASTEOF) {
1.24 uebayasi 149: off_t newsize;
1.1 pooka 150: #if defined(DIAGNOSTIC)
151: off_t writeeof;
152: #endif /* defined(DIAGNOSTIC) */
153:
154: newsize = MAX(origvsize,
155: origoffset + (orignpages << PAGE_SHIFT));
156: GOP_SIZE(vp, newsize, &memeof, GOP_SIZE_MEM);
157: #if defined(DIAGNOSTIC)
158: GOP_SIZE(vp, vp->v_writesize, &writeeof, GOP_SIZE_MEM);
159: if (newsize > round_page(writeeof)) {
1.39 pooka 160: panic("%s: past eof: %" PRId64 " vs. %" PRId64,
161: __func__, newsize, round_page(writeeof));
1.1 pooka 162: }
163: #endif /* defined(DIAGNOSTIC) */
164: } else {
165: GOP_SIZE(vp, origvsize, &memeof, GOP_SIZE_MEM);
166: }
167: KASSERT(ap->a_centeridx >= 0 || ap->a_centeridx <= orignpages);
168: KASSERT((origoffset & (PAGE_SIZE - 1)) == 0 && origoffset >= 0);
169: KASSERT(orignpages > 0);
170:
171: /*
172: * Bounds-check the request.
173: */
174:
175: if (origoffset + (ap->a_centeridx << PAGE_SHIFT) >= memeof) {
176: if ((flags & PGO_LOCKED) == 0) {
1.49 rmind 177: mutex_exit(uobj->vmobjlock);
1.1 pooka 178: }
179: UVMHIST_LOG(ubchist, "off 0x%x count %d goes past EOF 0x%x",
180: origoffset, *ap->a_count, memeof,0);
181: error = EINVAL;
182: goto out_err;
183: }
184:
185: /* uobj is locked */
186:
187: if ((flags & PGO_NOTIMESTAMP) == 0 &&
188: (vp->v_type != VBLK ||
189: (vp->v_mount->mnt_flag & MNT_NODEVMTIME) == 0)) {
190: int updflags = 0;
191:
192: if ((vp->v_mount->mnt_flag & MNT_NOATIME) == 0) {
193: updflags = GOP_UPDATE_ACCESSED;
194: }
1.35 uebayasi 195: if (memwrite) {
1.1 pooka 196: updflags |= GOP_UPDATE_MODIFIED;
197: }
198: if (updflags != 0) {
199: GOP_MARKUPDATE(vp, updflags);
200: }
201: }
202:
203: /*
204: * For PGO_LOCKED requests, just return whatever's in memory.
205: */
206:
207: if (flags & PGO_LOCKED) {
208: int nfound;
1.31 uebayasi 209: struct vm_page *pg;
1.1 pooka 210:
1.40 chs 211: KASSERT(!glocked);
1.1 pooka 212: npages = *ap->a_count;
213: #if defined(DEBUG)
214: for (i = 0; i < npages; i++) {
215: pg = ap->a_m[i];
216: KASSERT(pg == NULL || pg == PGO_DONTCARE);
217: }
218: #endif /* defined(DEBUG) */
219: nfound = uvn_findpages(uobj, origoffset, &npages,
1.35 uebayasi 220: ap->a_m, UFP_NOWAIT|UFP_NOALLOC|(memwrite ? UFP_NORDONLY : 0));
1.1 pooka 221: KASSERT(npages == *ap->a_count);
222: if (nfound == 0) {
223: error = EBUSY;
224: goto out_err;
225: }
1.23 uebayasi 226: if (!genfs_node_rdtrylock(vp)) {
1.1 pooka 227: genfs_rel_pages(ap->a_m, npages);
228:
229: /*
230: * restore the array.
231: */
232:
233: for (i = 0; i < npages; i++) {
234: pg = ap->a_m[i];
235:
1.41 uebayasi 236: if (pg != NULL && pg != PGO_DONTCARE) {
1.1 pooka 237: ap->a_m[i] = NULL;
238: }
1.46 uebayasi 239: KASSERT(ap->a_m[i] == NULL ||
240: ap->a_m[i] == PGO_DONTCARE);
1.1 pooka 241: }
242: } else {
1.23 uebayasi 243: genfs_node_unlock(vp);
1.1 pooka 244: }
245: error = (ap->a_m[ap->a_centeridx] == NULL ? EBUSY : 0);
1.38 chs 246: if (error == 0 && memwrite) {
1.53.2.1 yamt 247: for (i = 0; i < npages; i++) {
248: pg = ap->a_m[i];
249: if (pg == NULL || pg == PGO_DONTCARE) {
250: continue;
251: }
252: if (uvm_pagegetdirty(pg) ==
253: UVM_PAGE_STATUS_CLEAN) {
254: uvm_pagemarkdirty(pg,
255: UVM_PAGE_STATUS_UNKNOWN);
256: }
257: }
1.38 chs 258: genfs_markdirty(vp);
259: }
1.1 pooka 260: goto out_err;
261: }
1.49 rmind 262: mutex_exit(uobj->vmobjlock);
1.1 pooka 263:
264: /*
265: * find the requested pages and make some simple checks.
266: * leave space in the page array for a whole block.
267: */
268:
1.27 uebayasi 269: const int fs_bshift = (vp->v_type != VBLK) ?
270: vp->v_mount->mnt_fs_bshift : DEV_BSHIFT;
271: const int dev_bshift = (vp->v_type != VBLK) ?
272: vp->v_mount->mnt_dev_bshift : DEV_BSHIFT;
273: const int fs_bsize = 1 << fs_bshift;
1.30 uebayasi 274: #define blk_mask (fs_bsize - 1)
275: #define trunc_blk(x) ((x) & ~blk_mask)
276: #define round_blk(x) (((x) + blk_mask) & ~blk_mask)
1.1 pooka 277:
1.29 uebayasi 278: const int orignmempages = MIN(orignpages,
1.1 pooka 279: round_page(memeof - origoffset) >> PAGE_SHIFT);
1.29 uebayasi 280: npages = orignmempages;
1.30 uebayasi 281: const off_t startoffset = trunc_blk(origoffset);
282: const off_t endoffset = MIN(
283: round_page(round_blk(origoffset + (npages << PAGE_SHIFT))),
284: round_page(memeof));
1.31 uebayasi 285: const int ridx = (origoffset - startoffset) >> PAGE_SHIFT;
1.1 pooka 286:
1.33 uebayasi 287: const int pgs_size = sizeof(struct vm_page *) *
1.1 pooka 288: ((endoffset - startoffset) >> PAGE_SHIFT);
1.33 uebayasi 289: struct vm_page **pgs, *pgs_onstack[UBC_MAX_PAGES];
1.31 uebayasi 290:
1.1 pooka 291: if (pgs_size > sizeof(pgs_onstack)) {
292: pgs = kmem_zalloc(pgs_size, async ? KM_NOSLEEP : KM_SLEEP);
293: if (pgs == NULL) {
294: pgs = pgs_onstack;
295: error = ENOMEM;
1.32 uebayasi 296: goto out_err;
1.1 pooka 297: }
298: } else {
1.14 christos 299: pgs = pgs_onstack;
300: (void)memset(pgs, 0, pgs_size);
1.1 pooka 301: }
1.14 christos 302:
1.1 pooka 303: UVMHIST_LOG(ubchist, "ridx %d npages %d startoff %ld endoff %ld",
304: ridx, npages, startoffset, endoffset);
305:
1.42 hannken 306: if (!has_trans_wapbl) {
1.1 pooka 307: fstrans_start(vp->v_mount, FSTRANS_SHARED);
1.42 hannken 308: /*
309: * XXX: This assumes that we come here only via
310: * the mmio path
311: */
312: if (need_wapbl) {
313: error = WAPBL_BEGIN(vp->v_mount);
314: if (error) {
315: fstrans_done(vp->v_mount);
316: goto out_err_free;
317: }
318: }
319: has_trans_wapbl = true;
1.1 pooka 320: }
321:
322: /*
323: * hold g_glock to prevent a race with truncate.
324: *
325: * check if our idea of v_size is still valid.
326: */
327:
1.40 chs 328: KASSERT(!glocked || genfs_node_wrlocked(vp));
329: if (!glocked) {
330: if (blockalloc) {
331: genfs_node_wrlock(vp);
332: } else {
333: genfs_node_rdlock(vp);
334: }
1.1 pooka 335: }
1.49 rmind 336: mutex_enter(uobj->vmobjlock);
1.1 pooka 337: if (vp->v_size < origvsize) {
1.40 chs 338: if (!glocked) {
339: genfs_node_unlock(vp);
340: }
1.1 pooka 341: if (pgs != pgs_onstack)
342: kmem_free(pgs, pgs_size);
343: goto startover;
344: }
345:
346: if (uvn_findpages(uobj, origoffset, &npages, &pgs[ridx],
1.29 uebayasi 347: async ? UFP_NOWAIT : UFP_ALL) != orignmempages) {
1.40 chs 348: if (!glocked) {
349: genfs_node_unlock(vp);
350: }
1.1 pooka 351: KASSERT(async != 0);
1.29 uebayasi 352: genfs_rel_pages(&pgs[ridx], orignmempages);
1.49 rmind 353: mutex_exit(uobj->vmobjlock);
1.1 pooka 354: error = EBUSY;
1.33 uebayasi 355: goto out_err_free;
1.1 pooka 356: }
357:
358: /*
1.53.2.1 yamt 359: * if PGO_OVERWRITE is set, don't bother reading the pages.
1.1 pooka 360: */
361:
1.53.2.1 yamt 362: if (overwrite) {
1.40 chs 363: if (!glocked) {
364: genfs_node_unlock(vp);
365: }
1.53.2.1 yamt 366: UVMHIST_LOG(ubchist, "PGO_OVERWRITE",0,0,0,0);
367:
368: for (i = 0; i < npages; i++) {
369: struct vm_page *pg = pgs[ridx + i];
370:
371: /*
372: * we should not see PG_HOLE pages here as it's a
373: * caller's responsibility to allocate blocks
374: * beforehand for the overwrite case.
375: */
376: KASSERT((pg->flags & PG_HOLE) == 0);
377: pg->flags &= ~PG_RDONLY;
378: /*
379: * mark the page dirty.
380: * otherwise another thread can do putpages and pull
381: * our vnode from syncer's queue before our caller does
382: * ubc_release.
383: */
384: uvm_pagemarkdirty(pg, UVM_PAGE_STATUS_DIRTY);
385: }
1.1 pooka 386: npages += ridx;
387: goto out;
388: }
389:
390: /*
1.53.2.1 yamt 391: * if the pages are already resident, just return them.
1.1 pooka 392: */
393:
1.53.2.1 yamt 394: for (i = 0; i < npages; i++) {
395: struct vm_page *pg = pgs[ridx + i];
396:
397: if ((pg->flags & PG_FAKE) ||
398: (memwrite && (pg->flags & (PG_RDONLY|PG_HOLE)) != 0)) {
399: break;
400: }
401: }
402: if (i == npages) {
1.40 chs 403: if (!glocked) {
404: genfs_node_unlock(vp);
405: }
1.53.2.1 yamt 406: UVMHIST_LOG(ubchist, "returning cached pages", 0,0,0,0);
1.1 pooka 407: npages += ridx;
408: goto out;
409: }
410:
411: /*
412: * the page wasn't resident and we're not overwriting,
413: * so we're going to have to do some i/o.
414: * find any additional pages needed to cover the expanded range.
415: */
416:
417: npages = (endoffset - startoffset) >> PAGE_SHIFT;
1.29 uebayasi 418: if (startoffset != origoffset || npages != orignmempages) {
1.31 uebayasi 419: int npgs;
1.1 pooka 420:
421: /*
422: * we need to avoid deadlocks caused by locking
423: * additional pages at lower offsets than pages we
424: * already have locked. unlock them all and start over.
425: */
426:
1.29 uebayasi 427: genfs_rel_pages(&pgs[ridx], orignmempages);
1.1 pooka 428: memset(pgs, 0, pgs_size);
429:
430: UVMHIST_LOG(ubchist, "reset npages start 0x%x end 0x%x",
431: startoffset, endoffset, 0,0);
432: npgs = npages;
433: if (uvn_findpages(uobj, startoffset, &npgs, pgs,
434: async ? UFP_NOWAIT : UFP_ALL) != npages) {
1.40 chs 435: if (!glocked) {
436: genfs_node_unlock(vp);
437: }
1.1 pooka 438: KASSERT(async != 0);
439: genfs_rel_pages(pgs, npages);
1.49 rmind 440: mutex_exit(uobj->vmobjlock);
1.1 pooka 441: error = EBUSY;
1.33 uebayasi 442: goto out_err_free;
1.1 pooka 443: }
444: }
1.34 uebayasi 445:
1.49 rmind 446: mutex_exit(uobj->vmobjlock);
1.1 pooka 447:
1.34 uebayasi 448: {
449: size_t bytes, iobytes, tailstart, tailbytes, totalbytes, skipbytes;
450: vaddr_t kva;
451: struct buf *bp, *mbp;
452: bool sawhole = false;
453:
1.1 pooka 454: /*
455: * read the desired page(s).
456: */
457:
458: totalbytes = npages << PAGE_SHIFT;
459: bytes = MIN(totalbytes, MAX(diskeof - startoffset, 0));
460: tailbytes = totalbytes - bytes;
461: skipbytes = 0;
462:
463: kva = uvm_pagermapin(pgs, npages,
464: UVMPAGER_MAPIN_READ | UVMPAGER_MAPIN_WAITOK);
465:
1.2 ad 466: mbp = getiobuf(vp, true);
1.1 pooka 467: mbp->b_bufsize = totalbytes;
468: mbp->b_data = (void *)kva;
469: mbp->b_resid = mbp->b_bcount = bytes;
1.2 ad 470: mbp->b_cflags = BC_BUSY;
471: if (async) {
472: mbp->b_flags = B_READ | B_ASYNC;
473: mbp->b_iodone = uvm_aio_biodone;
474: } else {
475: mbp->b_flags = B_READ;
476: mbp->b_iodone = NULL;
1.43 uebayasi 477: }
1.1 pooka 478: if (async)
479: BIO_SETPRIO(mbp, BPRIO_TIMELIMITED);
480: else
481: BIO_SETPRIO(mbp, BPRIO_TIMECRITICAL);
482:
483: /*
484: * if EOF is in the middle of the range, zero the part past EOF.
485: * skip over pages which are not PG_FAKE since in that case they have
486: * valid data that we need to preserve.
487: */
488:
489: tailstart = bytes;
490: while (tailbytes > 0) {
491: const int len = PAGE_SIZE - (tailstart & PAGE_MASK);
492:
493: KASSERT(len <= tailbytes);
494: if ((pgs[tailstart >> PAGE_SHIFT]->flags & PG_FAKE) != 0) {
495: memset((void *)(kva + tailstart), 0, len);
496: UVMHIST_LOG(ubchist, "tailbytes %p 0x%x 0x%x",
497: kva, tailstart, len, 0);
498: }
499: tailstart += len;
500: tailbytes -= len;
501: }
502:
503: /*
504: * now loop over the pages, reading as needed.
505: */
506:
507: bp = NULL;
1.28 uebayasi 508: off_t offset;
509: for (offset = startoffset;
1.1 pooka 510: bytes > 0;
511: offset += iobytes, bytes -= iobytes) {
1.30 uebayasi 512: int run;
1.25 uebayasi 513: daddr_t lbn, blkno;
1.24 uebayasi 514: int pidx;
1.26 uebayasi 515: struct vnode *devvp;
1.1 pooka 516:
517: /*
518: * skip pages which don't need to be read.
519: */
520:
521: pidx = (offset - startoffset) >> PAGE_SHIFT;
522: while ((pgs[pidx]->flags & PG_FAKE) == 0) {
523: size_t b;
524:
525: KASSERT((offset & (PAGE_SIZE - 1)) == 0);
1.53.2.1 yamt 526: if ((pgs[pidx]->flags & PG_HOLE)) {
1.1 pooka 527: sawhole = true;
528: }
529: b = MIN(PAGE_SIZE, bytes);
530: offset += b;
531: bytes -= b;
532: skipbytes += b;
533: pidx++;
534: UVMHIST_LOG(ubchist, "skipping, new offset 0x%x",
535: offset, 0,0,0);
536: if (bytes == 0) {
537: goto loopdone;
538: }
539: }
540:
541: /*
542: * bmap the file to find out the blkno to read from and
543: * how much we can read in one i/o. if bmap returns an error,
544: * skip the rest of the top-level i/o.
545: */
546:
547: lbn = offset >> fs_bshift;
548: error = VOP_BMAP(vp, lbn, &devvp, &blkno, &run);
549: if (error) {
550: UVMHIST_LOG(ubchist, "VOP_BMAP lbn 0x%x -> %d\n",
1.36 uebayasi 551: lbn,error,0,0);
1.1 pooka 552: skipbytes += bytes;
1.36 uebayasi 553: bytes = 0;
1.1 pooka 554: goto loopdone;
555: }
556:
557: /*
558: * see how many pages can be read with this i/o.
559: * reduce the i/o size if necessary to avoid
560: * overwriting pages with valid data.
561: */
562:
563: iobytes = MIN((((off_t)lbn + 1 + run) << fs_bshift) - offset,
564: bytes);
565: if (offset + iobytes > round_page(offset)) {
1.24 uebayasi 566: int pcount;
567:
1.1 pooka 568: pcount = 1;
569: while (pidx + pcount < npages &&
570: pgs[pidx + pcount]->flags & PG_FAKE) {
571: pcount++;
572: }
573: iobytes = MIN(iobytes, (pcount << PAGE_SHIFT) -
574: (offset - trunc_page(offset)));
575: }
576:
577: /*
578: * if this block isn't allocated, zero it instead of
579: * reading it. unless we are going to allocate blocks,
1.53.2.1 yamt 580: * mark the pages we zeroed PG_HOLE.
1.1 pooka 581: */
582:
1.36 uebayasi 583: if (blkno == (daddr_t)-1) {
1.1 pooka 584: int holepages = (round_page(offset + iobytes) -
585: trunc_page(offset)) >> PAGE_SHIFT;
586: UVMHIST_LOG(ubchist, "lbn 0x%x -> HOLE", lbn,0,0,0);
587:
588: sawhole = true;
589: memset((char *)kva + (offset - startoffset), 0,
590: iobytes);
591: skipbytes += iobytes;
592:
1.49 rmind 593: mutex_enter(uobj->vmobjlock);
1.1 pooka 594: for (i = 0; i < holepages; i++) {
1.53.2.1 yamt 595: #if 0
1.35 uebayasi 596: if (memwrite) {
1.53.2.1 yamt 597: uvm_pagemarkdirty(pgs[pidx + i],
598: UVM_PAGE_STATUS_DIRTY);
1.1 pooka 599: }
1.53.2.1 yamt 600: #endif
1.1 pooka 601: if (!blockalloc) {
1.53.2.1 yamt 602: pgs[pidx + i]->flags |= PG_HOLE;
1.1 pooka 603: }
604: }
1.49 rmind 605: mutex_exit(uobj->vmobjlock);
1.1 pooka 606: continue;
607: }
608:
609: /*
610: * allocate a sub-buf for this piece of the i/o
611: * (or just use mbp if there's only 1 piece),
612: * and start it going.
613: */
614:
615: if (offset == startoffset && iobytes == bytes) {
616: bp = mbp;
617: } else {
1.36 uebayasi 618: UVMHIST_LOG(ubchist, "vp %p bp %p num now %d",
619: vp, bp, vp->v_numoutput, 0);
1.2 ad 620: bp = getiobuf(vp, true);
1.1 pooka 621: nestiobuf_setup(mbp, bp, offset - startoffset, iobytes);
622: }
623: bp->b_lblkno = 0;
624:
625: /* adjust physical blkno for partial blocks */
626: bp->b_blkno = blkno + ((offset - ((off_t)lbn << fs_bshift)) >>
627: dev_bshift);
628:
629: UVMHIST_LOG(ubchist,
630: "bp %p offset 0x%x bcount 0x%x blkno 0x%x",
1.36 uebayasi 631: bp, offset, bp->b_bcount, bp->b_blkno);
1.1 pooka 632:
633: VOP_STRATEGY(devvp, bp);
634: }
635:
636: loopdone:
637: nestiobuf_done(mbp, skipbytes, error);
638: if (async) {
639: UVMHIST_LOG(ubchist, "returning 0 (async)",0,0,0,0);
1.40 chs 640: if (!glocked) {
641: genfs_node_unlock(vp);
642: }
1.1 pooka 643: error = 0;
1.33 uebayasi 644: goto out_err_free;
1.1 pooka 645: }
646: if (bp != NULL) {
647: error = biowait(mbp);
648: }
649:
1.19 rmind 650: /* Remove the mapping (make KVA available as soon as possible) */
651: uvm_pagermapout(kva, npages);
652:
1.1 pooka 653: /*
654: * if this we encountered a hole then we have to do a little more work.
1.53.2.1 yamt 655: * if blockalloc is false, we marked the page PG_HOLE so that future
1.1 pooka 656: * write accesses to the page will fault again.
1.53.2.1 yamt 657: * if blockalloc is true, we must make sure that the backing store for
1.1 pooka 658: * the page is completely allocated while the pages are locked.
659: */
660:
661: if (!error && sawhole && blockalloc) {
1.42 hannken 662: error = GOP_ALLOC(vp, startoffset,
663: npages << PAGE_SHIFT, 0, cred);
1.1 pooka 664: UVMHIST_LOG(ubchist, "gop_alloc off 0x%x/0x%x -> %d",
665: startoffset, npages << PAGE_SHIFT, error,0);
666: if (!error) {
1.49 rmind 667: mutex_enter(uobj->vmobjlock);
1.1 pooka 668: for (i = 0; i < npages; i++) {
1.31 uebayasi 669: struct vm_page *pg = pgs[i];
670:
671: if (pg == NULL) {
1.1 pooka 672: continue;
673: }
1.53.2.1 yamt 674: pg->flags &= ~PG_HOLE;
675: uvm_pagemarkdirty(pg, UVM_PAGE_STATUS_DIRTY);
1.1 pooka 676: UVMHIST_LOG(ubchist, "mark dirty pg %p",
1.31 uebayasi 677: pg,0,0,0);
1.1 pooka 678: }
1.49 rmind 679: mutex_exit(uobj->vmobjlock);
1.1 pooka 680: }
681: }
1.40 chs 682: if (!glocked) {
683: genfs_node_unlock(vp);
684: }
1.18 rmind 685:
686: putiobuf(mbp);
1.34 uebayasi 687: }
1.18 rmind 688:
1.49 rmind 689: mutex_enter(uobj->vmobjlock);
1.1 pooka 690:
691: /*
692: * we're almost done! release the pages...
693: * for errors, we free the pages.
694: * otherwise we activate them and mark them as valid and clean.
695: * also, unbusy pages that were not actually requested.
696: */
697:
698: if (error) {
699: for (i = 0; i < npages; i++) {
1.31 uebayasi 700: struct vm_page *pg = pgs[i];
701:
702: if (pg == NULL) {
1.1 pooka 703: continue;
704: }
705: UVMHIST_LOG(ubchist, "examining pg %p flags 0x%x",
1.31 uebayasi 706: pg, pg->flags, 0,0);
707: if (pg->flags & PG_FAKE) {
708: pg->flags |= PG_RELEASED;
1.1 pooka 709: }
710: }
1.2 ad 711: mutex_enter(&uvm_pageqlock);
1.1 pooka 712: uvm_page_unbusy(pgs, npages);
1.2 ad 713: mutex_exit(&uvm_pageqlock);
1.49 rmind 714: mutex_exit(uobj->vmobjlock);
1.1 pooka 715: UVMHIST_LOG(ubchist, "returning error %d", error,0,0,0);
1.33 uebayasi 716: goto out_err_free;
1.1 pooka 717: }
718:
719: out:
720: UVMHIST_LOG(ubchist, "succeeding, npages %d", npages,0,0,0);
721: error = 0;
1.2 ad 722: mutex_enter(&uvm_pageqlock);
1.1 pooka 723: for (i = 0; i < npages; i++) {
1.31 uebayasi 724: struct vm_page *pg = pgs[i];
1.1 pooka 725: if (pg == NULL) {
726: continue;
727: }
728: UVMHIST_LOG(ubchist, "examining pg %p flags 0x%x",
729: pg, pg->flags, 0,0);
730: if (pg->flags & PG_FAKE && !overwrite) {
1.53.2.1 yamt 731: /*
732: * we've read page's contents from the backing storage.
733: *
734: * for a read fault, we keep them CLEAN.
735: */
736: KASSERT(uvm_pagegetdirty(pg) == UVM_PAGE_STATUS_CLEAN);
737: pg->flags &= ~PG_FAKE;
1.1 pooka 738: }
1.53.2.1 yamt 739: KASSERT(!blockalloc || (pg->flags & PG_HOLE) == 0);
1.29 uebayasi 740: if (i < ridx || i >= ridx + orignmempages || async) {
1.1 pooka 741: UVMHIST_LOG(ubchist, "unbusy pg %p offset 0x%x",
742: pg, pg->offset,0,0);
1.53.2.1 yamt 743: KASSERT(!overwrite);
1.1 pooka 744: if (pg->flags & PG_WANTED) {
745: wakeup(pg);
746: }
1.53.2.1 yamt 747: if (pg->flags & PG_FAKE && overwrite) {
1.1 pooka 748: uvm_pagezero(pg);
749: }
750: if (pg->flags & PG_RELEASED) {
751: uvm_pagefree(pg);
752: continue;
753: }
754: uvm_pageenqueue(pg);
755: pg->flags &= ~(PG_WANTED|PG_BUSY|PG_FAKE);
756: UVM_PAGE_OWN(pg, NULL);
1.53.2.1 yamt 757: } else if (memwrite && !overwrite &&
758: uvm_pagegetdirty(pg) == UVM_PAGE_STATUS_CLEAN) {
759: /*
760: * for a write fault, start dirtiness tracking of
761: * requested pages.
762: */
763: uvm_pagemarkdirty(pg, UVM_PAGE_STATUS_UNKNOWN);
1.1 pooka 764: }
765: }
1.2 ad 766: mutex_exit(&uvm_pageqlock);
1.38 chs 767: if (memwrite) {
768: genfs_markdirty(vp);
769: }
1.49 rmind 770: mutex_exit(uobj->vmobjlock);
1.1 pooka 771: if (ap->a_m != NULL) {
772: memcpy(ap->a_m, &pgs[ridx],
1.29 uebayasi 773: orignmempages * sizeof(struct vm_page *));
1.1 pooka 774: }
775:
1.33 uebayasi 776: out_err_free:
1.14 christos 777: if (pgs != NULL && pgs != pgs_onstack)
1.1 pooka 778: kmem_free(pgs, pgs_size);
1.33 uebayasi 779: out_err:
1.42 hannken 780: if (has_trans_wapbl) {
781: if (need_wapbl)
782: WAPBL_END(vp->v_mount);
1.1 pooka 783: fstrans_done(vp->v_mount);
1.42 hannken 784: }
1.38 chs 785: return error;
1.1 pooka 786: }
787:
788: /*
789: * generic VM putpages routine.
790: * Write the given range of pages to backing store.
791: *
792: * => "offhi == 0" means flush all pages at or after "offlo".
793: * => object should be locked by caller. we return with the
794: * object unlocked.
795: * => if PGO_CLEANIT or PGO_SYNCIO is set, we may block (due to I/O).
796: * thus, a caller might want to unlock higher level resources
797: * (e.g. vm_map) before calling flush.
798: * => if neither PGO_CLEANIT nor PGO_SYNCIO is set, we will not block
799: * => if PGO_ALLPAGES is set, then all pages in the object will be processed.
800: * => NOTE: we rely on the fact that the object's memq is a TAILQ and
801: * that new pages are inserted on the tail end of the list. thus,
802: * we can make a complete pass through the object in one go by starting
803: * at the head and working towards the tail (new pages are put in
804: * front of us).
805: * => NOTE: we are allowed to lock the page queues, so the caller
806: * must not be holding the page queue lock.
807: *
808: * note on "cleaning" object and PG_BUSY pages:
809: * this routine is holding the lock on the object. the only time
810: * that it can run into a PG_BUSY page that it does not own is if
811: * some other process has started I/O on the page (e.g. either
1.53.2.1 yamt 812: * a pagein, or a pageout). if the PG_BUSY page is being paged
813: * in, then it can not be dirty (!UVM_PAGE_STATUS_CLEAN) because no
814: * one has had a chance to modify it yet. if the PG_BUSY page is
815: * being paged out then it means that someone else has already started
816: * cleaning the page for us (how nice!). in this case, if we
1.1 pooka 817: * have syncio specified, then after we make our pass through the
818: * object we need to wait for the other PG_BUSY pages to clear
819: * off (i.e. we need to do an iosync). also note that once a
820: * page is PG_BUSY it must stay in its object until it is un-busyed.
821: *
822: * note on page traversal:
823: * we can traverse the pages in an object either by going down the
824: * linked list in "uobj->memq", or we can go over the address range
825: * by page doing hash table lookups for each address. depending
826: * on how many pages are in the object it may be cheaper to do one
827: * or the other. we set "by_list" to true if we are using memq.
828: * if the cost of a hash lookup was equal to the cost of the list
829: * traversal we could compare the number of pages in the start->stop
830: * range to the total number of pages in the object. however, it
831: * seems that a hash table lookup is more expensive than the linked
832: * list traversal, so we multiply the number of pages in the
833: * range by an estimate of the relatively higher cost of the hash lookup.
834: */
835:
836: int
837: genfs_putpages(void *v)
838: {
839: struct vop_putpages_args /* {
840: struct vnode *a_vp;
841: voff_t a_offlo;
842: voff_t a_offhi;
843: int a_flags;
1.22 uebayasi 844: } */ * const ap = v;
1.1 pooka 845:
846: return genfs_do_putpages(ap->a_vp, ap->a_offlo, ap->a_offhi,
847: ap->a_flags, NULL);
848: }
849:
850: int
1.4 yamt 851: genfs_do_putpages(struct vnode *vp, off_t startoff, off_t endoff,
852: int origflags, struct vm_page **busypg)
1.1 pooka 853: {
1.22 uebayasi 854: struct uvm_object * const uobj = &vp->v_uobj;
1.49 rmind 855: kmutex_t * const slock = uobj->vmobjlock;
1.1 pooka 856: off_t off;
857: /* Even for strange MAXPHYS, the shift rounds down to a page */
858: #define maxpages (MAXPHYS >> PAGE_SHIFT)
1.2 ad 859: int i, error, npages, nback;
1.1 pooka 860: int freeflag;
1.53.2.1 yamt 861: struct vm_page *pgs[maxpages], *pg;
1.53.2.3 yamt 862: struct uvm_page_array a;
1.53.2.1 yamt 863: bool wasclean, needs_clean, yld;
1.4 yamt 864: bool async = (origflags & PGO_SYNCIO) == 0;
1.1 pooka 865: bool pagedaemon = curlwp == uvm.pagedaemon_lwp;
1.22 uebayasi 866: struct lwp * const l = curlwp ? curlwp : &lwp0;
1.4 yamt 867: int flags;
1.53.2.2 yamt 868: bool modified; /* if we write out any pages */
1.12 hannken 869: bool need_wapbl;
1.4 yamt 870: bool has_trans;
1.53.2.3 yamt 871: bool tryclean; /* try to pull off from the syncer's list */
1.4 yamt 872: bool onworklst;
1.53.2.1 yamt 873: const bool dirtyonly = (origflags & (PGO_DEACTIVATE|PGO_FREE)) == 0;
1.1 pooka 874:
875: UVMHIST_FUNC("genfs_putpages"); UVMHIST_CALLED(ubchist);
876:
1.4 yamt 877: KASSERT(origflags & (PGO_CLEANIT|PGO_FREE|PGO_DEACTIVATE));
1.1 pooka 878: KASSERT((startoff & PAGE_MASK) == 0 && (endoff & PAGE_MASK) == 0);
879: KASSERT(startoff < endoff || endoff == 0);
880:
881: UVMHIST_LOG(ubchist, "vp %p pages %d off 0x%x len 0x%x",
882: vp, uobj->uo_npages, startoff, endoff - startoff);
883:
1.6 hannken 884: has_trans = false;
1.12 hannken 885: need_wapbl = (!pagedaemon && vp->v_mount && vp->v_mount->mnt_wapbl &&
886: (origflags & PGO_JOURNALLOCKED) == 0);
1.6 hannken 887:
1.4 yamt 888: retry:
889: modified = false;
890: flags = origflags;
1.1 pooka 891: KASSERT((vp->v_iflag & VI_ONWORKLST) != 0 ||
892: (vp->v_iflag & VI_WRMAPDIRTY) == 0);
1.53.2.3 yamt 893:
894: /*
895: * shortcut if we have no pages to process.
896: */
897:
1.53.2.2 yamt 898: if (uobj->uo_npages == 0 || (dirtyonly &&
899: radix_tree_empty_tagged_tree_p(&uobj->uo_pages,
900: UVM_PAGE_DIRTY_TAG))) {
1.1 pooka 901: if (vp->v_iflag & VI_ONWORKLST) {
902: vp->v_iflag &= ~VI_WRMAPDIRTY;
903: if (LIST_FIRST(&vp->v_dirtyblkhd) == NULL)
904: vn_syncer_remove_from_worklist(vp);
905: }
1.12 hannken 906: if (has_trans) {
907: if (need_wapbl)
908: WAPBL_END(vp->v_mount);
1.6 hannken 909: fstrans_done(vp->v_mount);
1.12 hannken 910: }
1.2 ad 911: mutex_exit(slock);
1.1 pooka 912: return (0);
913: }
914:
915: /*
916: * the vnode has pages, set up to process the request.
917: */
918:
1.6 hannken 919: if (!has_trans && (flags & PGO_CLEANIT) != 0) {
1.2 ad 920: mutex_exit(slock);
1.1 pooka 921: if (pagedaemon) {
922: error = fstrans_start_nowait(vp->v_mount, FSTRANS_LAZY);
923: if (error)
924: return error;
925: } else
926: fstrans_start(vp->v_mount, FSTRANS_LAZY);
1.12 hannken 927: if (need_wapbl) {
928: error = WAPBL_BEGIN(vp->v_mount);
929: if (error) {
930: fstrans_done(vp->v_mount);
931: return error;
932: }
933: }
1.1 pooka 934: has_trans = true;
1.2 ad 935: mutex_enter(slock);
1.6 hannken 936: goto retry;
1.1 pooka 937: }
938:
939: error = 0;
940: wasclean = (vp->v_numoutput == 0);
941: off = startoff;
942: if (endoff == 0 || flags & PGO_ALLPAGES) {
943: endoff = trunc_page(LLONG_MAX);
944: }
945:
946: /*
947: * if this vnode is known not to have dirty pages,
948: * don't bother to clean it out.
949: */
950:
951: if ((vp->v_iflag & VI_ONWORKLST) == 0) {
1.48 matt 952: #if !defined(DEBUG)
1.53.2.2 yamt 953: if (dirtyonly) {
1.1 pooka 954: goto skip_scan;
955: }
1.48 matt 956: #endif /* !defined(DEBUG) */
1.1 pooka 957: flags &= ~PGO_CLEANIT;
958: }
959:
960: /*
1.53.2.3 yamt 961: * start the loop.
1.1 pooka 962: */
963:
964: freeflag = pagedaemon ? PG_PAGEOUT : PG_RELEASED;
1.53.2.3 yamt 965: tryclean = true;
966: uvm_page_array_init(&a);
1.53.2.1 yamt 967: for (;;) {
968: bool protected;
969:
1.53.2.4! yamt 970: pg = uvm_page_array_fill_and_peek(&a, uobj, off, 0,
! 971: dirtyonly ? UVM_PAGE_ARRAY_FILL_DIRTYONLY : 0);
1.53.2.1 yamt 972: if (pg == NULL) {
1.53.2.3 yamt 973: break;
1.53.2.1 yamt 974: }
1.1 pooka 975:
976: /*
977: * if the current page is not interesting, move on to the next.
978: */
979:
1.53.2.1 yamt 980: KASSERT(pg->uobject == uobj);
981: KASSERT((pg->flags & (PG_RELEASED|PG_PAGEOUT)) == 0 ||
1.53.2.3 yamt 982: (pg->flags & (PG_BUSY)) != 0);
1.53.2.1 yamt 983: KASSERT(pg->offset >= startoff);
984: KASSERT(pg->offset >= off);
985: KASSERT(!dirtyonly ||
986: uvm_pagegetdirty(pg) != UVM_PAGE_STATUS_CLEAN);
987: if (pg->offset >= endoff) {
988: break;
989: }
990: if (pg->flags & (PG_RELEASED|PG_PAGEOUT)) {
991: KASSERT((pg->flags & PG_BUSY) != 0);
992: wasclean = false;
1.53.2.3 yamt 993: off = pg->offset + PAGE_SIZE;
994: uvm_page_array_advance(&a);
1.1 pooka 995: continue;
996: }
997:
998: /*
999: * if the current page needs to be cleaned and it's busy,
1000: * wait for it to become unbusy.
1001: */
1002:
1003: yld = (l->l_cpu->ci_schedstate.spc_flags &
1004: SPCF_SHOULDYIELD) && !pagedaemon;
1005: if (pg->flags & PG_BUSY || yld) {
1006: UVMHIST_LOG(ubchist, "busy %p", pg,0,0,0);
1007: if (flags & PGO_BUSYFAIL && pg->flags & PG_BUSY) {
1008: UVMHIST_LOG(ubchist, "busyfail %p", pg, 0,0,0);
1009: error = EDEADLK;
1010: if (busypg != NULL)
1011: *busypg = pg;
1012: break;
1013: }
1014: if (pagedaemon) {
1015: /*
1016: * someone has taken the page while we
1017: * dropped the lock for fstrans_start.
1018: */
1019: break;
1020: }
1.53.2.1 yamt 1021: off = pg->offset; /* visit this page again */
1.53.2.3 yamt 1022: if ((pg->flags & PG_BUSY) != 0) {
1.1 pooka 1023: pg->flags |= PG_WANTED;
1024: UVM_UNLOCK_AND_WAIT(pg, slock, 0, "genput", 0);
1.53.2.3 yamt 1025: } else {
1026: KASSERT(yld);
1027: mutex_exit(slock);
1028: preempt();
1.1 pooka 1029: }
1.53.2.1 yamt 1030: /*
1031: * as we dropped the object lock, our cached pages can
1032: * be stale.
1033: */
1.53.2.3 yamt 1034: uvm_page_array_clear(&a);
1.53.2.1 yamt 1035: mutex_enter(slock);
1.1 pooka 1036: continue;
1037: }
1038:
1.53.2.3 yamt 1039: off = pg->offset + PAGE_SIZE;
1040: uvm_page_array_advance(&a);
1.53.2.1 yamt 1041:
1.1 pooka 1042: /*
1043: * if we're freeing, remove all mappings of the page now.
1044: * if we're cleaning, check if the page is needs to be cleaned.
1045: */
1046:
1.53.2.1 yamt 1047: protected = false;
1.1 pooka 1048: if (flags & PGO_FREE) {
1049: pmap_page_protect(pg, VM_PROT_NONE);
1.53.2.1 yamt 1050: protected = true;
1.1 pooka 1051: } else if (flags & PGO_CLEANIT) {
1052:
1053: /*
1054: * if we still have some hope to pull this vnode off
1055: * from the syncer queue, write-protect the page.
1056: */
1057:
1.53.2.3 yamt 1058: if (tryclean && wasclean) {
1.1 pooka 1059:
1060: /*
1061: * uobj pages get wired only by uvm_fault
1062: * where uobj is locked.
1063: */
1064:
1065: if (pg->wire_count == 0) {
1066: pmap_page_protect(pg,
1067: VM_PROT_READ|VM_PROT_EXECUTE);
1.53.2.1 yamt 1068: protected = true;
1.1 pooka 1069: } else {
1.53.2.3 yamt 1070: /*
1071: * give up.
1072: */
1073: tryclean = false;
1.1 pooka 1074: }
1075: }
1076: }
1077:
1078: if (flags & PGO_CLEANIT) {
1.53.2.1 yamt 1079: needs_clean = uvm_pagecheckdirty(pg, protected);
1.1 pooka 1080: } else {
1081: needs_clean = false;
1082: }
1083:
1084: /*
1085: * if we're cleaning, build a cluster.
1.53.2.1 yamt 1086: * the cluster will consist of pages which are currently dirty.
1.1 pooka 1087: * if not cleaning, just operate on the one page.
1088: */
1089:
1090: if (needs_clean) {
1091: KDASSERT((vp->v_iflag & VI_ONWORKLST));
1092: wasclean = false;
1093: memset(pgs, 0, sizeof(pgs));
1094: pg->flags |= PG_BUSY;
1095: UVM_PAGE_OWN(pg, "genfs_putpages");
1096:
1.53.2.4! yamt 1097: #if 1 /* XXX notyet */
1.1 pooka 1098: /*
1099: * first look backward.
1100: */
1101:
1102: npages = MIN(maxpages >> 1, off >> PAGE_SHIFT);
1103: nback = npages;
1104: uvn_findpages(uobj, off - PAGE_SIZE, &nback, &pgs[0],
1105: UFP_NOWAIT|UFP_NOALLOC|UFP_DIRTYONLY|UFP_BACKWARD);
1106: if (nback) {
1107: memmove(&pgs[0], &pgs[npages - nback],
1108: nback * sizeof(pgs[0]));
1109: if (npages - nback < nback)
1110: memset(&pgs[nback], 0,
1111: (npages - nback) * sizeof(pgs[0]));
1112: else
1113: memset(&pgs[npages - nback], 0,
1114: nback * sizeof(pgs[0]));
1115: }
1.53.2.1 yamt 1116: #else
1117: nback = 0;
1118: #endif
1.1 pooka 1119:
1120: /*
1121: * then plug in our page of interest.
1122: */
1123:
1124: pgs[nback] = pg;
1125:
1126: /*
1127: * then look forward to fill in the remaining space in
1128: * the array of pages.
1129: */
1130:
1.53.2.1 yamt 1131: for (npages = 1; npages < maxpages; npages++) {
1132: struct vm_page *nextpg;
1133:
1.53.2.3 yamt 1134: /*
1135: * regardless of the value of dirtyonly,
1136: * we don't need to care about clean pages here
1137: * as we will drop the object lock to call
1138: * GOP_WRITE and thus need to clear the array
1139: * before the next iteration anyway.
1140: */
1141:
1142: nextpg = uvm_page_array_fill_and_peek(&a, uobj,
1.53.2.4! yamt 1143: pgs[npages - 1]->offset + PAGE_SIZE,
! 1144: maxpages - npages,
! 1145: UVM_PAGE_ARRAY_FILL_DIRTYONLY |
! 1146: UVM_PAGE_ARRAY_FILL_DENSE);
1.53.2.1 yamt 1147: if (nextpg == NULL) {
1.53.2.3 yamt 1148: break;
1.53.2.1 yamt 1149: }
1150: KASSERT(nextpg->uobject == pg->uobject);
1151: KASSERT(nextpg->offset > pg->offset);
1152: KASSERT(nextpg->offset >
1153: pgs[npages - 1]->offset);
1154: if (pgs[npages - 1]->offset + PAGE_SIZE !=
1155: nextpg->offset) {
1156: break;
1157: }
1158: if ((nextpg->flags & PG_BUSY) != 0) {
1159: break;
1160: }
1.53.2.3 yamt 1161:
1.53.2.1 yamt 1162: /*
1.53.2.3 yamt 1163: * don't bother to cluster incompatible pages
1164: * together.
1165: *
1.53.2.1 yamt 1166: * XXX hack for nfs
1167: */
1.53.2.3 yamt 1168:
1.53.2.1 yamt 1169: if (((nextpg->flags ^ pgs[npages - 1]->flags) &
1170: PG_PAGER1) != 0) {
1171: break;
1172: }
1173: if (!uvm_pagecheckdirty(nextpg, false)) {
1174: break;
1175: }
1176: nextpg->flags |= PG_BUSY;
1.53.2.2 yamt 1177: UVM_PAGE_OWN(nextpg, "genfs_putpages2");
1.53.2.1 yamt 1178: pgs[npages] = nextpg;
1.53.2.3 yamt 1179: uvm_page_array_advance(&a);
1.53.2.1 yamt 1180: }
1.1 pooka 1181: } else {
1182: pgs[0] = pg;
1183: npages = 1;
1184: nback = 0;
1185: }
1186:
1187: /*
1188: * apply FREE or DEACTIVATE options if requested.
1189: */
1190:
1191: if (flags & (PGO_DEACTIVATE|PGO_FREE)) {
1.2 ad 1192: mutex_enter(&uvm_pageqlock);
1.1 pooka 1193: }
1194: for (i = 0; i < npages; i++) {
1.53.2.1 yamt 1195: struct vm_page *tpg = pgs[i];
1196:
1.1 pooka 1197: KASSERT(tpg->uobject == uobj);
1198: if (tpg->offset < startoff || tpg->offset >= endoff)
1199: continue;
1200: if (flags & PGO_DEACTIVATE && tpg->wire_count == 0) {
1201: uvm_pagedeactivate(tpg);
1202: } else if (flags & PGO_FREE) {
1203: pmap_page_protect(tpg, VM_PROT_NONE);
1204: if (tpg->flags & PG_BUSY) {
1205: tpg->flags |= freeflag;
1206: if (pagedaemon) {
1.2 ad 1207: uvm_pageout_start(1);
1.1 pooka 1208: uvm_pagedequeue(tpg);
1209: }
1210: } else {
1211:
1212: /*
1213: * ``page is not busy''
1214: * implies that npages is 1
1215: * and needs_clean is false.
1216: */
1217:
1.53.2.1 yamt 1218: KASSERT(npages == 1);
1219: KASSERT(!needs_clean);
1220: KASSERT(pg == tpg);
1221: KASSERT(off == tpg->offset + PAGE_SIZE);
1.1 pooka 1222: uvm_pagefree(tpg);
1223: if (pagedaemon)
1224: uvmexp.pdfreed++;
1225: }
1226: }
1227: }
1228: if (flags & (PGO_DEACTIVATE|PGO_FREE)) {
1.2 ad 1229: mutex_exit(&uvm_pageqlock);
1.1 pooka 1230: }
1231: if (needs_clean) {
1.53.2.1 yamt 1232: KASSERT(off == pg->offset + PAGE_SIZE);
1233: off = pg->offset + ((npages - nback) << PAGE_SHIFT);
1234: KASSERT(pgs[nback] == pg);
1235: KASSERT(off == pgs[npages - 1]->offset + PAGE_SIZE);
1236: mutex_exit(slock);
1.1 pooka 1237:
1238: /*
1.53.2.1 yamt 1239: * start the i/o.
1240: *
1241: * as we dropped the object lock, our cached pages can
1242: * be stale.
1.1 pooka 1243: */
1.53.2.1 yamt 1244: modified = true;
1.53.2.3 yamt 1245: uvm_page_array_clear(&a);
1.1 pooka 1246: error = GOP_WRITE(vp, pgs, npages, flags);
1.2 ad 1247: mutex_enter(slock);
1.1 pooka 1248: if (error) {
1249: break;
1250: }
1251: }
1252: }
1.53.2.3 yamt 1253: uvm_page_array_fini(&a);
1.1 pooka 1254:
1.53.2.2 yamt 1255: /*
1256: * update ctime/mtime if the modification we started writing out might
1257: * be from mmap'ed write.
1258: *
1259: * this is necessary when an application keeps a file mmaped and
1260: * repeatedly modifies it via the window. note that, because we
1261: * don't always write-protect pages when cleaning, such modifications
1262: * might not involve any page faults.
1263: */
1264:
1.1 pooka 1265: if (modified && (vp->v_iflag & VI_WRMAPDIRTY) != 0 &&
1266: (vp->v_type != VBLK ||
1267: (vp->v_mount->mnt_flag & MNT_NODEVMTIME) == 0)) {
1268: GOP_MARKUPDATE(vp, GOP_UPDATE_MODIFIED);
1269: }
1270:
1271: /*
1.53.2.3 yamt 1272: * if we no longer have any possibly dirty pages, take us off the
1273: * syncer list.
1.1 pooka 1274: */
1275:
1.53.2.3 yamt 1276: if ((vp->v_iflag & VI_ONWORKLST) != 0 &&
1277: radix_tree_empty_tagged_tree_p(&uobj->uo_pages,
1278: UVM_PAGE_DIRTY_TAG)) {
1.1 pooka 1279: vp->v_iflag &= ~VI_WRMAPDIRTY;
1280: if (LIST_FIRST(&vp->v_dirtyblkhd) == NULL)
1281: vn_syncer_remove_from_worklist(vp);
1282: }
1283:
1284: #if !defined(DEBUG)
1285: skip_scan:
1286: #endif /* !defined(DEBUG) */
1.2 ad 1287:
1.53.2.3 yamt 1288: /*
1289: * if we started any i/o and we're doing sync i/o, wait for all writes
1290: * to finish.
1291: */
1292:
1293: if (!wasclean && !async) {
1.2 ad 1294: while (vp->v_numoutput != 0)
1295: cv_wait(&vp->v_cv, slock);
1.1 pooka 1296: }
1.4 yamt 1297: onworklst = (vp->v_iflag & VI_ONWORKLST) != 0;
1.2 ad 1298: mutex_exit(slock);
1.1 pooka 1299:
1.4 yamt 1300: if ((flags & PGO_RECLAIM) != 0 && onworklst) {
1301: /*
1302: * in the case of PGO_RECLAIM, ensure to make the vnode clean.
1303: * retrying is not a big deal because, in many cases,
1304: * uobj->uo_npages is already 0 here.
1305: */
1306: mutex_enter(slock);
1307: goto retry;
1308: }
1309:
1.12 hannken 1310: if (has_trans) {
1311: if (need_wapbl)
1312: WAPBL_END(vp->v_mount);
1.6 hannken 1313: fstrans_done(vp->v_mount);
1.12 hannken 1314: }
1.6 hannken 1315:
1.1 pooka 1316: return (error);
1317: }
1318:
1319: int
1320: genfs_gop_write(struct vnode *vp, struct vm_page **pgs, int npages, int flags)
1321: {
1322: off_t off;
1323: vaddr_t kva;
1324: size_t len;
1325: int error;
1326: UVMHIST_FUNC(__func__); UVMHIST_CALLED(ubchist);
1327:
1328: UVMHIST_LOG(ubchist, "vp %p pgs %p npages %d flags 0x%x",
1329: vp, pgs, npages, flags);
1330:
1331: off = pgs[0]->offset;
1332: kva = uvm_pagermapin(pgs, npages,
1333: UVMPAGER_MAPIN_WRITE | UVMPAGER_MAPIN_WAITOK);
1334: len = npages << PAGE_SHIFT;
1335:
1336: error = genfs_do_io(vp, off, kva, len, flags, UIO_WRITE,
1337: uvm_aio_biodone);
1338:
1339: return error;
1340: }
1341:
1.7 reinoud 1342: int
1343: genfs_gop_write_rwmap(struct vnode *vp, struct vm_page **pgs, int npages, int flags)
1344: {
1345: off_t off;
1346: vaddr_t kva;
1347: size_t len;
1348: int error;
1349: UVMHIST_FUNC(__func__); UVMHIST_CALLED(ubchist);
1350:
1351: UVMHIST_LOG(ubchist, "vp %p pgs %p npages %d flags 0x%x",
1352: vp, pgs, npages, flags);
1353:
1354: off = pgs[0]->offset;
1355: kva = uvm_pagermapin(pgs, npages,
1356: UVMPAGER_MAPIN_READ | UVMPAGER_MAPIN_WAITOK);
1357: len = npages << PAGE_SHIFT;
1358:
1359: error = genfs_do_io(vp, off, kva, len, flags, UIO_WRITE,
1360: uvm_aio_biodone);
1361:
1362: return error;
1363: }
1364:
1.1 pooka 1365: /*
1366: * Backend routine for doing I/O to vnode pages. Pages are already locked
1367: * and mapped into kernel memory. Here we just look up the underlying
1368: * device block addresses and call the strategy routine.
1369: */
1370:
1371: static int
1372: genfs_do_io(struct vnode *vp, off_t off, vaddr_t kva, size_t len, int flags,
1373: enum uio_rw rw, void (*iodone)(struct buf *))
1374: {
1.36 uebayasi 1375: int s, error;
1.1 pooka 1376: int fs_bshift, dev_bshift;
1377: off_t eof, offset, startoffset;
1378: size_t bytes, iobytes, skipbytes;
1379: struct buf *mbp, *bp;
1.35 uebayasi 1380: const bool async = (flags & PGO_SYNCIO) == 0;
1381: const bool iowrite = rw == UIO_WRITE;
1382: const int brw = iowrite ? B_WRITE : B_READ;
1.1 pooka 1383: UVMHIST_FUNC(__func__); UVMHIST_CALLED(ubchist);
1384:
1385: UVMHIST_LOG(ubchist, "vp %p kva %p len 0x%x flags 0x%x",
1386: vp, kva, len, flags);
1387:
1388: KASSERT(vp->v_size <= vp->v_writesize);
1389: GOP_SIZE(vp, vp->v_writesize, &eof, 0);
1390: if (vp->v_type != VBLK) {
1391: fs_bshift = vp->v_mount->mnt_fs_bshift;
1392: dev_bshift = vp->v_mount->mnt_dev_bshift;
1393: } else {
1394: fs_bshift = DEV_BSHIFT;
1395: dev_bshift = DEV_BSHIFT;
1396: }
1397: error = 0;
1398: startoffset = off;
1399: bytes = MIN(len, eof - startoffset);
1400: skipbytes = 0;
1401: KASSERT(bytes != 0);
1402:
1.35 uebayasi 1403: if (iowrite) {
1.49 rmind 1404: mutex_enter(vp->v_interlock);
1.1 pooka 1405: vp->v_numoutput += 2;
1.49 rmind 1406: mutex_exit(vp->v_interlock);
1.1 pooka 1407: }
1.2 ad 1408: mbp = getiobuf(vp, true);
1.1 pooka 1409: UVMHIST_LOG(ubchist, "vp %p mbp %p num now %d bytes 0x%x",
1410: vp, mbp, vp->v_numoutput, bytes);
1411: mbp->b_bufsize = len;
1412: mbp->b_data = (void *)kva;
1413: mbp->b_resid = mbp->b_bcount = bytes;
1.2 ad 1414: mbp->b_cflags = BC_BUSY | BC_AGE;
1415: if (async) {
1416: mbp->b_flags = brw | B_ASYNC;
1417: mbp->b_iodone = iodone;
1418: } else {
1419: mbp->b_flags = brw;
1420: mbp->b_iodone = NULL;
1421: }
1.1 pooka 1422: if (curlwp == uvm.pagedaemon_lwp)
1423: BIO_SETPRIO(mbp, BPRIO_TIMELIMITED);
1424: else if (async)
1425: BIO_SETPRIO(mbp, BPRIO_TIMENONCRITICAL);
1426: else
1427: BIO_SETPRIO(mbp, BPRIO_TIMECRITICAL);
1428:
1429: bp = NULL;
1430: for (offset = startoffset;
1431: bytes > 0;
1432: offset += iobytes, bytes -= iobytes) {
1.36 uebayasi 1433: int run;
1434: daddr_t lbn, blkno;
1435: struct vnode *devvp;
1436:
1437: /*
1438: * bmap the file to find out the blkno to read from and
1439: * how much we can read in one i/o. if bmap returns an error,
1440: * skip the rest of the top-level i/o.
1441: */
1442:
1.1 pooka 1443: lbn = offset >> fs_bshift;
1444: error = VOP_BMAP(vp, lbn, &devvp, &blkno, &run);
1445: if (error) {
1.36 uebayasi 1446: UVMHIST_LOG(ubchist, "VOP_BMAP lbn 0x%x -> %d\n",
1447: lbn,error,0,0);
1.1 pooka 1448: skipbytes += bytes;
1449: bytes = 0;
1.36 uebayasi 1450: goto loopdone;
1.1 pooka 1451: }
1452:
1.36 uebayasi 1453: /*
1454: * see how many pages can be read with this i/o.
1455: * reduce the i/o size if necessary to avoid
1456: * overwriting pages with valid data.
1457: */
1458:
1.1 pooka 1459: iobytes = MIN((((off_t)lbn + 1 + run) << fs_bshift) - offset,
1460: bytes);
1.36 uebayasi 1461:
1462: /*
1463: * if this block isn't allocated, zero it instead of
1464: * reading it. unless we are going to allocate blocks,
1465: * mark the pages we zeroed PG_RDONLY.
1466: */
1467:
1.1 pooka 1468: if (blkno == (daddr_t)-1) {
1.35 uebayasi 1469: if (!iowrite) {
1.1 pooka 1470: memset((char *)kva + (offset - startoffset), 0,
1.36 uebayasi 1471: iobytes);
1.1 pooka 1472: }
1473: skipbytes += iobytes;
1474: continue;
1475: }
1476:
1.36 uebayasi 1477: /*
1478: * allocate a sub-buf for this piece of the i/o
1479: * (or just use mbp if there's only 1 piece),
1480: * and start it going.
1481: */
1482:
1.1 pooka 1483: if (offset == startoffset && iobytes == bytes) {
1484: bp = mbp;
1485: } else {
1486: UVMHIST_LOG(ubchist, "vp %p bp %p num now %d",
1487: vp, bp, vp->v_numoutput, 0);
1.2 ad 1488: bp = getiobuf(vp, true);
1.1 pooka 1489: nestiobuf_setup(mbp, bp, offset - startoffset, iobytes);
1490: }
1491: bp->b_lblkno = 0;
1492:
1493: /* adjust physical blkno for partial blocks */
1494: bp->b_blkno = blkno + ((offset - ((off_t)lbn << fs_bshift)) >>
1495: dev_bshift);
1.36 uebayasi 1496:
1.1 pooka 1497: UVMHIST_LOG(ubchist,
1.36 uebayasi 1498: "bp %p offset 0x%x bcount 0x%x blkno 0x%x",
1499: bp, offset, bp->b_bcount, bp->b_blkno);
1.1 pooka 1500:
1501: VOP_STRATEGY(devvp, bp);
1502: }
1.36 uebayasi 1503:
1504: loopdone:
1.1 pooka 1505: if (skipbytes) {
1506: UVMHIST_LOG(ubchist, "skipbytes %d", skipbytes, 0,0,0);
1507: }
1508: nestiobuf_done(mbp, skipbytes, error);
1509: if (async) {
1510: UVMHIST_LOG(ubchist, "returning 0 (async)", 0,0,0,0);
1511: return (0);
1512: }
1513: UVMHIST_LOG(ubchist, "waiting for mbp %p", mbp,0,0,0);
1514: error = biowait(mbp);
1515: s = splbio();
1516: (*iodone)(mbp);
1517: splx(s);
1518: UVMHIST_LOG(ubchist, "returning, error %d", error,0,0,0);
1519: return (error);
1520: }
1521:
1522: int
1523: genfs_compat_getpages(void *v)
1524: {
1525: struct vop_getpages_args /* {
1526: struct vnode *a_vp;
1527: voff_t a_offset;
1528: struct vm_page **a_m;
1529: int *a_count;
1530: int a_centeridx;
1531: vm_prot_t a_access_type;
1532: int a_advice;
1533: int a_flags;
1534: } */ *ap = v;
1535:
1536: off_t origoffset;
1537: struct vnode *vp = ap->a_vp;
1538: struct uvm_object *uobj = &vp->v_uobj;
1539: struct vm_page *pg, **pgs;
1540: vaddr_t kva;
1541: int i, error, orignpages, npages;
1542: struct iovec iov;
1543: struct uio uio;
1544: kauth_cred_t cred = curlwp->l_cred;
1.35 uebayasi 1545: const bool memwrite = (ap->a_access_type & VM_PROT_WRITE) != 0;
1.1 pooka 1546:
1547: error = 0;
1548: origoffset = ap->a_offset;
1549: orignpages = *ap->a_count;
1550: pgs = ap->a_m;
1551:
1552: if (ap->a_flags & PGO_LOCKED) {
1553: uvn_findpages(uobj, origoffset, ap->a_count, ap->a_m,
1.35 uebayasi 1554: UFP_NOWAIT|UFP_NOALLOC| (memwrite ? UFP_NORDONLY : 0));
1.1 pooka 1555:
1.38 chs 1556: error = ap->a_m[ap->a_centeridx] == NULL ? EBUSY : 0;
1557: if (error == 0 && memwrite) {
1558: genfs_markdirty(vp);
1559: }
1560: return error;
1.1 pooka 1561: }
1562: if (origoffset + (ap->a_centeridx << PAGE_SHIFT) >= vp->v_size) {
1.49 rmind 1563: mutex_exit(uobj->vmobjlock);
1.38 chs 1564: return EINVAL;
1.1 pooka 1565: }
1566: if ((ap->a_flags & PGO_SYNCIO) == 0) {
1.49 rmind 1567: mutex_exit(uobj->vmobjlock);
1.1 pooka 1568: return 0;
1569: }
1570: npages = orignpages;
1571: uvn_findpages(uobj, origoffset, &npages, pgs, UFP_ALL);
1.49 rmind 1572: mutex_exit(uobj->vmobjlock);
1.1 pooka 1573: kva = uvm_pagermapin(pgs, npages,
1574: UVMPAGER_MAPIN_READ | UVMPAGER_MAPIN_WAITOK);
1575: for (i = 0; i < npages; i++) {
1576: pg = pgs[i];
1577: if ((pg->flags & PG_FAKE) == 0) {
1578: continue;
1579: }
1580: iov.iov_base = (char *)kva + (i << PAGE_SHIFT);
1581: iov.iov_len = PAGE_SIZE;
1582: uio.uio_iov = &iov;
1583: uio.uio_iovcnt = 1;
1584: uio.uio_offset = origoffset + (i << PAGE_SHIFT);
1585: uio.uio_rw = UIO_READ;
1586: uio.uio_resid = PAGE_SIZE;
1587: UIO_SETUP_SYSSPACE(&uio);
1588: /* XXX vn_lock */
1589: error = VOP_READ(vp, &uio, 0, cred);
1590: if (error) {
1591: break;
1592: }
1593: if (uio.uio_resid) {
1594: memset(iov.iov_base, 0, uio.uio_resid);
1595: }
1596: }
1597: uvm_pagermapout(kva, npages);
1.49 rmind 1598: mutex_enter(uobj->vmobjlock);
1.2 ad 1599: mutex_enter(&uvm_pageqlock);
1.1 pooka 1600: for (i = 0; i < npages; i++) {
1601: pg = pgs[i];
1602: if (error && (pg->flags & PG_FAKE) != 0) {
1603: pg->flags |= PG_RELEASED;
1604: } else {
1.53.2.1 yamt 1605: uvm_pagemarkdirty(pg, UVM_PAGE_STATUS_UNKNOWN);
1.1 pooka 1606: uvm_pageactivate(pg);
1607: }
1608: }
1609: if (error) {
1610: uvm_page_unbusy(pgs, npages);
1611: }
1.2 ad 1612: mutex_exit(&uvm_pageqlock);
1.38 chs 1613: if (error == 0 && memwrite) {
1614: genfs_markdirty(vp);
1615: }
1.49 rmind 1616: mutex_exit(uobj->vmobjlock);
1.38 chs 1617: return error;
1.1 pooka 1618: }
1619:
1620: int
1621: genfs_compat_gop_write(struct vnode *vp, struct vm_page **pgs, int npages,
1622: int flags)
1623: {
1624: off_t offset;
1625: struct iovec iov;
1626: struct uio uio;
1627: kauth_cred_t cred = curlwp->l_cred;
1628: struct buf *bp;
1629: vaddr_t kva;
1.2 ad 1630: int error;
1.1 pooka 1631:
1632: offset = pgs[0]->offset;
1633: kva = uvm_pagermapin(pgs, npages,
1634: UVMPAGER_MAPIN_WRITE | UVMPAGER_MAPIN_WAITOK);
1635:
1636: iov.iov_base = (void *)kva;
1637: iov.iov_len = npages << PAGE_SHIFT;
1638: uio.uio_iov = &iov;
1639: uio.uio_iovcnt = 1;
1640: uio.uio_offset = offset;
1641: uio.uio_rw = UIO_WRITE;
1642: uio.uio_resid = npages << PAGE_SHIFT;
1643: UIO_SETUP_SYSSPACE(&uio);
1644: /* XXX vn_lock */
1645: error = VOP_WRITE(vp, &uio, 0, cred);
1646:
1.49 rmind 1647: mutex_enter(vp->v_interlock);
1.2 ad 1648: vp->v_numoutput++;
1.49 rmind 1649: mutex_exit(vp->v_interlock);
1.1 pooka 1650:
1.2 ad 1651: bp = getiobuf(vp, true);
1652: bp->b_cflags = BC_BUSY | BC_AGE;
1.1 pooka 1653: bp->b_lblkno = offset >> vp->v_mount->mnt_fs_bshift;
1654: bp->b_data = (char *)kva;
1655: bp->b_bcount = npages << PAGE_SHIFT;
1656: bp->b_bufsize = npages << PAGE_SHIFT;
1657: bp->b_resid = 0;
1658: bp->b_error = error;
1659: uvm_aio_aiodone(bp);
1660: return (error);
1661: }
1662:
1663: /*
1664: * Process a uio using direct I/O. If we reach a part of the request
1665: * which cannot be processed in this fashion for some reason, just return.
1666: * The caller must handle some additional part of the request using
1667: * buffered I/O before trying direct I/O again.
1668: */
1669:
1670: void
1671: genfs_directio(struct vnode *vp, struct uio *uio, int ioflag)
1672: {
1673: struct vmspace *vs;
1674: struct iovec *iov;
1675: vaddr_t va;
1676: size_t len;
1677: const int mask = DEV_BSIZE - 1;
1678: int error;
1.16 joerg 1679: bool need_wapbl = (vp->v_mount && vp->v_mount->mnt_wapbl &&
1680: (ioflag & IO_JOURNALLOCKED) == 0);
1.1 pooka 1681:
1682: /*
1683: * We only support direct I/O to user space for now.
1684: */
1685:
1686: if (VMSPACE_IS_KERNEL_P(uio->uio_vmspace)) {
1687: return;
1688: }
1689:
1690: /*
1691: * If the vnode is mapped, we would need to get the getpages lock
1.53 yamt 1692: * to stabilize the bmap, but then we would get into trouble while
1.1 pooka 1693: * locking the pages if the pages belong to this same vnode (or a
1694: * multi-vnode cascade to the same effect). Just fall back to
1695: * buffered I/O if the vnode is mapped to avoid this mess.
1696: */
1697:
1698: if (vp->v_vflag & VV_MAPPED) {
1699: return;
1700: }
1701:
1.16 joerg 1702: if (need_wapbl) {
1.13 hannken 1703: error = WAPBL_BEGIN(vp->v_mount);
1704: if (error)
1705: return;
1706: }
1707:
1.1 pooka 1708: /*
1709: * Do as much of the uio as possible with direct I/O.
1710: */
1711:
1712: vs = uio->uio_vmspace;
1713: while (uio->uio_resid) {
1714: iov = uio->uio_iov;
1715: if (iov->iov_len == 0) {
1716: uio->uio_iov++;
1717: uio->uio_iovcnt--;
1718: continue;
1719: }
1720: va = (vaddr_t)iov->iov_base;
1721: len = MIN(iov->iov_len, genfs_maxdio);
1722: len &= ~mask;
1723:
1724: /*
1725: * If the next chunk is smaller than DEV_BSIZE or extends past
1726: * the current EOF, then fall back to buffered I/O.
1727: */
1728:
1729: if (len == 0 || uio->uio_offset + len > vp->v_size) {
1.13 hannken 1730: break;
1.1 pooka 1731: }
1732:
1733: /*
1734: * Check alignment. The file offset must be at least
1735: * sector-aligned. The exact constraint on memory alignment
1736: * is very hardware-dependent, but requiring sector-aligned
1737: * addresses there too is safe.
1738: */
1739:
1740: if (uio->uio_offset & mask || va & mask) {
1.13 hannken 1741: break;
1.1 pooka 1742: }
1743: error = genfs_do_directio(vs, va, len, vp, uio->uio_offset,
1744: uio->uio_rw);
1745: if (error) {
1746: break;
1747: }
1748: iov->iov_base = (char *)iov->iov_base + len;
1749: iov->iov_len -= len;
1750: uio->uio_offset += len;
1751: uio->uio_resid -= len;
1752: }
1.13 hannken 1753:
1.16 joerg 1754: if (need_wapbl)
1.13 hannken 1755: WAPBL_END(vp->v_mount);
1.1 pooka 1756: }
1757:
1758: /*
1759: * Iodone routine for direct I/O. We don't do much here since the request is
1760: * always synchronous, so the caller will do most of the work after biowait().
1761: */
1762:
1763: static void
1764: genfs_dio_iodone(struct buf *bp)
1765: {
1766:
1767: KASSERT((bp->b_flags & B_ASYNC) == 0);
1.2 ad 1768: if ((bp->b_flags & B_READ) == 0 && (bp->b_cflags & BC_AGE) != 0) {
1769: mutex_enter(bp->b_objlock);
1.1 pooka 1770: vwakeup(bp);
1.2 ad 1771: mutex_exit(bp->b_objlock);
1.1 pooka 1772: }
1773: putiobuf(bp);
1774: }
1775:
1776: /*
1777: * Process one chunk of a direct I/O request.
1778: */
1779:
1780: static int
1781: genfs_do_directio(struct vmspace *vs, vaddr_t uva, size_t len, struct vnode *vp,
1782: off_t off, enum uio_rw rw)
1783: {
1784: struct vm_map *map;
1785: struct pmap *upm, *kpm;
1786: size_t klen = round_page(uva + len) - trunc_page(uva);
1787: off_t spoff, epoff;
1788: vaddr_t kva, puva;
1789: paddr_t pa;
1790: vm_prot_t prot;
1791: int error, rv, poff, koff;
1.13 hannken 1792: const int pgoflags = PGO_CLEANIT | PGO_SYNCIO | PGO_JOURNALLOCKED |
1.1 pooka 1793: (rw == UIO_WRITE ? PGO_FREE : 0);
1794:
1795: /*
1796: * For writes, verify that this range of the file already has fully
1797: * allocated backing store. If there are any holes, just punt and
1798: * make the caller take the buffered write path.
1799: */
1800:
1801: if (rw == UIO_WRITE) {
1802: daddr_t lbn, elbn, blkno;
1803: int bsize, bshift, run;
1804:
1805: bshift = vp->v_mount->mnt_fs_bshift;
1806: bsize = 1 << bshift;
1807: lbn = off >> bshift;
1808: elbn = (off + len + bsize - 1) >> bshift;
1809: while (lbn < elbn) {
1810: error = VOP_BMAP(vp, lbn, NULL, &blkno, &run);
1811: if (error) {
1812: return error;
1813: }
1814: if (blkno == (daddr_t)-1) {
1815: return ENOSPC;
1816: }
1817: lbn += 1 + run;
1818: }
1819: }
1820:
1821: /*
1822: * Flush any cached pages for parts of the file that we're about to
1823: * access. If we're writing, invalidate pages as well.
1824: */
1825:
1826: spoff = trunc_page(off);
1827: epoff = round_page(off + len);
1.49 rmind 1828: mutex_enter(vp->v_interlock);
1.1 pooka 1829: error = VOP_PUTPAGES(vp, spoff, epoff, pgoflags);
1830: if (error) {
1831: return error;
1832: }
1833:
1834: /*
1835: * Wire the user pages and remap them into kernel memory.
1836: */
1837:
1838: prot = rw == UIO_READ ? VM_PROT_READ | VM_PROT_WRITE : VM_PROT_READ;
1839: error = uvm_vslock(vs, (void *)uva, len, prot);
1840: if (error) {
1841: return error;
1842: }
1843:
1844: map = &vs->vm_map;
1845: upm = vm_map_pmap(map);
1846: kpm = vm_map_pmap(kernel_map);
1847: puva = trunc_page(uva);
1.51 matt 1848: kva = uvm_km_alloc(kernel_map, klen, atop(puva) & uvmexp.colormask,
1849: UVM_KMF_VAONLY | UVM_KMF_WAITVA | UVM_KMF_COLORMATCH);
1.1 pooka 1850: for (poff = 0; poff < klen; poff += PAGE_SIZE) {
1851: rv = pmap_extract(upm, puva + poff, &pa);
1852: KASSERT(rv);
1.51 matt 1853: pmap_kenter_pa(kva + poff, pa, prot, PMAP_WIRED);
1.1 pooka 1854: }
1855: pmap_update(kpm);
1856:
1857: /*
1858: * Do the I/O.
1859: */
1860:
1861: koff = uva - trunc_page(uva);
1862: error = genfs_do_io(vp, off, kva + koff, len, PGO_SYNCIO, rw,
1863: genfs_dio_iodone);
1864:
1865: /*
1866: * Tear down the kernel mapping.
1867: */
1868:
1.51 matt 1869: pmap_kremove(kva, klen);
1.1 pooka 1870: pmap_update(kpm);
1871: uvm_km_free(kernel_map, kva, klen, UVM_KMF_VAONLY);
1872:
1873: /*
1874: * Unwire the user pages.
1875: */
1876:
1877: uvm_vsunlock(vs, (void *)uva, len);
1878: return error;
1879: }
CVSweb <webmaster@jp.NetBSD.org>