Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files. =================================================================== RCS file: /ftp/cvs/cvsroot/src/sys/ufs/ffs/ffs_vnops.c,v rcsdiff: /ftp/cvs/cvsroot/src/sys/ufs/ffs/ffs_vnops.c,v: warning: Unknown phrases like `commitid ...;' are present. retrieving revision 1.18.4.1 retrieving revision 1.77.8.3 diff -u -p -r1.18.4.1 -r1.77.8.3 --- src/sys/ufs/ffs/ffs_vnops.c 1999/06/07 04:25:34 1.18.4.1 +++ src/sys/ufs/ffs/ffs_vnops.c 2006/05/24 10:59:25 1.77.8.3 @@ -1,4 +1,4 @@ -/* $NetBSD: ffs_vnops.c,v 1.18.4.1 1999/06/07 04:25:34 chs Exp $ */ +/* $NetBSD: ffs_vnops.c,v 1.77.8.3 2006/05/24 10:59:25 yamt Exp $ */ /* * Copyright (c) 1982, 1986, 1989, 1993 @@ -12,11 +12,7 @@ * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. - * 3. All advertising materials mentioning features or use of this software - * must display the following acknowledgement: - * This product includes software developed by the University of - * California, Berkeley and its contributors. - * 4. Neither the name of the University nor the names of its contributors + * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * @@ -35,9 +31,8 @@ * @(#)ffs_vnops.c 8.15 (Berkeley) 5/14/95 */ -#if defined(_KERNEL) && !defined(_LKM) -#include "opt_uvmhist.h" -#endif +#include +__KERNEL_RCSID(0, "$NetBSD: ffs_vnops.c,v 1.77.8.3 2006/05/24 10:59:25 yamt Exp $"); #include #include @@ -46,22 +41,18 @@ #include #include #include +#include #include -#include #include #include #include #include -#include - -#include -#include +#include #include #include #include -#include #include #include #include @@ -70,9 +61,13 @@ #include #include +#include + +static int ffs_full_fsync(void *); + /* Global vfs data structures for ufs. */ -int (**ffs_vnodeop_p) __P((void *)); -struct vnodeopv_entry_desc ffs_vnodeop_entries[] = { +int (**ffs_vnodeop_p)(void *); +const struct vnodeopv_entry_desc ffs_vnodeop_entries[] = { { &vop_default_desc, vn_default_error }, { &vop_lookup_desc, ufs_lookup }, /* lookup */ { &vop_create_desc, ufs_create }, /* create */ @@ -87,7 +82,9 @@ struct vnodeopv_entry_desc ffs_vnodeop_e { &vop_write_desc, ffs_write }, /* write */ { &vop_lease_desc, ufs_lease_check }, /* lease */ { &vop_ioctl_desc, ufs_ioctl }, /* ioctl */ + { &vop_fcntl_desc, ufs_fcntl }, /* fcntl */ { &vop_poll_desc, ufs_poll }, /* poll */ + { &vop_kqfilter_desc, genfs_kqfilter }, /* kqfilter */ { &vop_revoke_desc, ufs_revoke }, /* revoke */ { &vop_mmap_desc, ufs_mmap }, /* mmap */ { &vop_fsync_desc, ffs_fsync }, /* fsync */ @@ -111,22 +108,22 @@ struct vnodeopv_entry_desc ffs_vnodeop_e { &vop_islocked_desc, ufs_islocked }, /* islocked */ { &vop_pathconf_desc, ufs_pathconf }, /* pathconf */ { &vop_advlock_desc, ufs_advlock }, /* advlock */ - { &vop_blkatoff_desc, ffs_blkatoff }, /* blkatoff */ - { &vop_valloc_desc, ffs_valloc }, /* valloc */ - { &vop_reallocblks_desc, ffs_reallocblks }, /* reallocblks */ - { &vop_vfree_desc, ffs_vfree }, /* vfree */ - { &vop_truncate_desc, ffs_truncate }, /* truncate */ - { &vop_update_desc, ffs_update }, /* update */ { &vop_bwrite_desc, vn_bwrite }, /* bwrite */ { &vop_getpages_desc, ffs_getpages }, /* getpages */ - { &vop_putpages_desc, ffs_putpages }, /* putpages */ + { &vop_putpages_desc, genfs_putpages }, /* putpages */ + { &vop_openextattr_desc, ffs_openextattr }, /* openextattr */ + { &vop_closeextattr_desc, ffs_closeextattr }, /* closeextattr */ + { &vop_getextattr_desc, ffs_getextattr }, /* getextattr */ + { &vop_setextattr_desc, ffs_setextattr }, /* setextattr */ + { &vop_listextattr_desc, ffs_listextattr }, /* listextattr */ + { &vop_deleteextattr_desc, ffs_deleteextattr }, /* deleteextattr */ { NULL, NULL } }; -struct vnodeopv_desc ffs_vnodeop_opv_desc = +const struct vnodeopv_desc ffs_vnodeop_opv_desc = { &ffs_vnodeop_p, ffs_vnodeop_entries }; -int (**ffs_specop_p) __P((void *)); -struct vnodeopv_entry_desc ffs_specop_entries[] = { +int (**ffs_specop_p)(void *); +const struct vnodeopv_entry_desc ffs_specop_entries[] = { { &vop_default_desc, vn_default_error }, { &vop_lookup_desc, spec_lookup }, /* lookup */ { &vop_create_desc, spec_create }, /* create */ @@ -140,7 +137,9 @@ struct vnodeopv_entry_desc ffs_specop_en { &vop_write_desc, ufsspec_write }, /* write */ { &vop_lease_desc, spec_lease_check }, /* lease */ { &vop_ioctl_desc, spec_ioctl }, /* ioctl */ + { &vop_fcntl_desc, ufs_fcntl }, /* fcntl */ { &vop_poll_desc, spec_poll }, /* poll */ + { &vop_kqfilter_desc, spec_kqfilter }, /* kqfilter */ { &vop_revoke_desc, spec_revoke }, /* revoke */ { &vop_mmap_desc, spec_mmap }, /* mmap */ { &vop_fsync_desc, ffs_fsync }, /* fsync */ @@ -164,20 +163,22 @@ struct vnodeopv_entry_desc ffs_specop_en { &vop_islocked_desc, ufs_islocked }, /* islocked */ { &vop_pathconf_desc, spec_pathconf }, /* pathconf */ { &vop_advlock_desc, spec_advlock }, /* advlock */ - { &vop_blkatoff_desc, spec_blkatoff }, /* blkatoff */ - { &vop_valloc_desc, spec_valloc }, /* valloc */ - { &vop_reallocblks_desc, spec_reallocblks }, /* reallocblks */ - { &vop_vfree_desc, ffs_vfree }, /* vfree */ - { &vop_truncate_desc, spec_truncate }, /* truncate */ - { &vop_update_desc, ffs_update }, /* update */ { &vop_bwrite_desc, vn_bwrite }, /* bwrite */ - { (struct vnodeop_desc*)NULL, (int(*) __P((void *)))NULL } + { &vop_getpages_desc, spec_getpages }, /* getpages */ + { &vop_putpages_desc, spec_putpages }, /* putpages */ + { &vop_openextattr_desc, ffs_openextattr }, /* openextattr */ + { &vop_closeextattr_desc, ffs_closeextattr }, /* closeextattr */ + { &vop_getextattr_desc, ffs_getextattr }, /* getextattr */ + { &vop_setextattr_desc, ffs_setextattr }, /* setextattr */ + { &vop_listextattr_desc, ffs_listextattr }, /* listextattr */ + { &vop_deleteextattr_desc, ffs_deleteextattr }, /* deleteextattr */ + { NULL, NULL } }; -struct vnodeopv_desc ffs_specop_opv_desc = +const struct vnodeopv_desc ffs_specop_opv_desc = { &ffs_specop_p, ffs_specop_entries }; -int (**ffs_fifoop_p) __P((void *)); -struct vnodeopv_entry_desc ffs_fifoop_entries[] = { +int (**ffs_fifoop_p)(void *); +const struct vnodeopv_entry_desc ffs_fifoop_entries[] = { { &vop_default_desc, vn_default_error }, { &vop_lookup_desc, fifo_lookup }, /* lookup */ { &vop_create_desc, fifo_create }, /* create */ @@ -191,7 +192,9 @@ struct vnodeopv_entry_desc ffs_fifoop_en { &vop_write_desc, ufsfifo_write }, /* write */ { &vop_lease_desc, fifo_lease_check }, /* lease */ { &vop_ioctl_desc, fifo_ioctl }, /* ioctl */ + { &vop_fcntl_desc, ufs_fcntl }, /* fcntl */ { &vop_poll_desc, fifo_poll }, /* poll */ + { &vop_kqfilter_desc, fifo_kqfilter }, /* kqfilter */ { &vop_revoke_desc, fifo_revoke }, /* revoke */ { &vop_mmap_desc, fifo_mmap }, /* mmap */ { &vop_fsync_desc, ffs_fsync }, /* fsync */ @@ -215,40 +218,279 @@ struct vnodeopv_entry_desc ffs_fifoop_en { &vop_islocked_desc, ufs_islocked }, /* islocked */ { &vop_pathconf_desc, fifo_pathconf }, /* pathconf */ { &vop_advlock_desc, fifo_advlock }, /* advlock */ - { &vop_blkatoff_desc, fifo_blkatoff }, /* blkatoff */ - { &vop_valloc_desc, fifo_valloc }, /* valloc */ - { &vop_reallocblks_desc, fifo_reallocblks }, /* reallocblks */ - { &vop_vfree_desc, ffs_vfree }, /* vfree */ - { &vop_truncate_desc, fifo_truncate }, /* truncate */ - { &vop_update_desc, ffs_update }, /* update */ { &vop_bwrite_desc, vn_bwrite }, /* bwrite */ - { (struct vnodeop_desc*)NULL, (int(*) __P((void *)))NULL } + { &vop_putpages_desc, fifo_putpages }, /* putpages */ + { &vop_openextattr_desc, ffs_openextattr }, /* openextattr */ + { &vop_closeextattr_desc, ffs_closeextattr }, /* closeextattr */ + { &vop_getextattr_desc, ffs_getextattr }, /* getextattr */ + { &vop_setextattr_desc, ffs_setextattr }, /* setextattr */ + { &vop_listextattr_desc, ffs_listextattr }, /* listextattr */ + { &vop_deleteextattr_desc, ffs_deleteextattr }, /* deleteextattr */ + { NULL, NULL } }; -struct vnodeopv_desc ffs_fifoop_opv_desc = +const struct vnodeopv_desc ffs_fifoop_opv_desc = { &ffs_fifoop_p, ffs_fifoop_entries }; -int doclusterread = 0; -int doclusterwrite = 0; - #include +int +ffs_fsync(void *v) +{ + struct vop_fsync_args /* { + struct vnode *a_vp; + kauth_cred_t a_cred; + int a_flags; + off_t a_offlo; + off_t a_offhi; + struct lwp *a_l; + } */ *ap = v; + struct buf *bp; + int s, num, error, i; + struct indir ia[NIADDR + 1]; + int bsize; + daddr_t blk_high; + struct vnode *vp; + + /* + * XXX no easy way to sync a range in a file with softdep. + */ + if ((ap->a_offlo == 0 && ap->a_offhi == 0) || DOINGSOFTDEP(ap->a_vp) || + (ap->a_vp->v_type != VREG)) + return ffs_full_fsync(v); + + vp = ap->a_vp; + + bsize = ap->a_vp->v_mount->mnt_stat.f_iosize; + blk_high = ap->a_offhi / bsize; + if (ap->a_offhi % bsize != 0) + blk_high++; + + /* + * First, flush all pages in range. + */ + + simple_lock(&vp->v_interlock); + error = VOP_PUTPAGES(vp, trunc_page(ap->a_offlo), + round_page(ap->a_offhi), PGO_CLEANIT | + ((ap->a_flags & FSYNC_WAIT) ? PGO_SYNCIO : 0)); + if (error) { + return error; + } + + /* + * Then, flush indirect blocks. + */ + + s = splbio(); + if (blk_high >= NDADDR) { + error = ufs_getlbns(vp, blk_high, ia, &num); + if (error) { + splx(s); + return error; + } + for (i = 0; i < num; i++) { + bp = incore(vp, ia[i].in_lbn); + if (bp != NULL) { + simple_lock(&bp->b_interlock); + if (!(bp->b_flags & B_BUSY) && (bp->b_flags & B_DELWRI)) { + bp->b_flags |= B_BUSY | B_VFLUSH; + simple_unlock(&bp->b_interlock); + splx(s); + bawrite(bp); + s = splbio(); + } else { + simple_unlock(&bp->b_interlock); + } + } + } + } + + if (ap->a_flags & FSYNC_WAIT) { + simple_lock(&global_v_numoutput_slock); + while (vp->v_numoutput > 0) { + vp->v_flag |= VBWAIT; + ltsleep(&vp->v_numoutput, PRIBIO + 1, "fsync_range", 0, + &global_v_numoutput_slock); + } + simple_unlock(&global_v_numoutput_slock); + } + splx(s); + + error = ffs_update(vp, NULL, NULL, + ((ap->a_flags & (FSYNC_WAIT | FSYNC_DATAONLY)) == FSYNC_WAIT) + ? UPDATE_WAIT : 0); + + if (error == 0 && ap->a_flags & FSYNC_CACHE) { + int l = 0; + VOP_IOCTL(VTOI(vp)->i_devvp, DIOCCACHESYNC, &l, FWRITE, + ap->a_l->l_proc->p_cred, ap->a_l); + } + + return error; +} + +/* + * Synch an open file. + */ +/* ARGSUSED */ +static int +ffs_full_fsync(void *v) +{ + struct vop_fsync_args /* { + struct vnode *a_vp; + kauth_cred_t a_cred; + int a_flags; + off_t a_offlo; + off_t a_offhi; + struct lwp *a_l; + } */ *ap = v; + struct vnode *vp = ap->a_vp; + struct buf *bp, *nbp; + int s, error, passes, skipmeta, inodedeps_only, waitfor; + + if (vp->v_type == VBLK && + vp->v_specmountpoint != NULL && + (vp->v_specmountpoint->mnt_flag & MNT_SOFTDEP)) + softdep_fsync_mountdev(vp); + + inodedeps_only = DOINGSOFTDEP(vp) && (ap->a_flags & FSYNC_RECLAIM) + && vp->v_uobj.uo_npages == 0 && LIST_EMPTY(&vp->v_dirtyblkhd); + + /* + * Flush all dirty data associated with a vnode. + */ + + if (vp->v_type == VREG || vp->v_type == VBLK) { + simple_lock(&vp->v_interlock); + error = VOP_PUTPAGES(vp, 0, 0, PGO_ALLPAGES | PGO_CLEANIT | + ((ap->a_flags & FSYNC_WAIT) ? PGO_SYNCIO : 0)); + if (error) { + return error; + } + } + + passes = NIADDR + 1; + skipmeta = 0; + if (ap->a_flags & FSYNC_WAIT) + skipmeta = 1; + s = splbio(); + +loop: + LIST_FOREACH(bp, &vp->v_dirtyblkhd, b_vnbufs) + bp->b_flags &= ~B_SCANNED; + for (bp = LIST_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) { + nbp = LIST_NEXT(bp, b_vnbufs); + simple_lock(&bp->b_interlock); + if (bp->b_flags & (B_BUSY | B_SCANNED)) { + simple_unlock(&bp->b_interlock); + continue; + } + if ((bp->b_flags & B_DELWRI) == 0) + panic("ffs_fsync: not dirty"); + if (skipmeta && bp->b_lblkno < 0) { + simple_unlock(&bp->b_interlock); + continue; + } + simple_unlock(&bp->b_interlock); + bp->b_flags |= B_BUSY | B_VFLUSH | B_SCANNED; + splx(s); + /* + * On our final pass through, do all I/O synchronously + * so that we can find out if our flush is failing + * because of write errors. + */ + if (passes > 0 || !(ap->a_flags & FSYNC_WAIT)) + (void) bawrite(bp); + else if ((error = bwrite(bp)) != 0) + return (error); + s = splbio(); + /* + * Since we may have slept during the I/O, we need + * to start from a known point. + */ + nbp = LIST_FIRST(&vp->v_dirtyblkhd); + } + if (skipmeta) { + skipmeta = 0; + goto loop; + } + if (ap->a_flags & FSYNC_WAIT) { + simple_lock(&global_v_numoutput_slock); + while (vp->v_numoutput) { + vp->v_flag |= VBWAIT; + (void) ltsleep(&vp->v_numoutput, PRIBIO + 1, + "ffsfsync", 0, &global_v_numoutput_slock); + } + simple_unlock(&global_v_numoutput_slock); + splx(s); + + /* + * Ensure that any filesystem metadata associated + * with the vnode has been written. + */ + if ((error = softdep_sync_metadata(ap)) != 0) + return (error); + + s = splbio(); + if (!LIST_EMPTY(&vp->v_dirtyblkhd)) { + /* + * Block devices associated with filesystems may + * have new I/O requests posted for them even if + * the vnode is locked, so no amount of trying will + * get them clean. Thus we give block devices a + * good effort, then just give up. For all other file + * types, go around and try again until it is clean. + */ + if (passes > 0) { + passes--; + goto loop; + } +#ifdef DIAGNOSTIC + if (vp->v_type != VBLK) + vprint("ffs_fsync: dirty", vp); +#endif + } + } + splx(s); + + if (inodedeps_only) + waitfor = 0; + else + waitfor = (ap->a_flags & FSYNC_WAIT) ? UPDATE_WAIT : 0; + error = ffs_update(vp, NULL, NULL, waitfor); + + if (error == 0 && ap->a_flags & FSYNC_CACHE) { + int i = 0; + VOP_IOCTL(VTOI(vp)->i_devvp, DIOCCACHESYNC, &i, FWRITE, + ap->a_l->l_proc->p_cred, ap->a_l); + } + + return error; +} + /* * Reclaim an inode so that it can be used for other purposes. */ int -ffs_reclaim(v) - void *v; +ffs_reclaim(void *v) { struct vop_reclaim_args /* { struct vnode *a_vp; - struct proc *a_p; + struct lwp *a_l; } */ *ap = v; - register struct vnode *vp = ap->a_vp; + struct vnode *vp = ap->a_vp; + struct inode *ip = VTOI(vp); + struct ufsmount *ump = ip->i_ump; int error; - if ((error = ufs_reclaim(vp, ap->a_p)) != 0) + if ((error = ufs_reclaim(vp, ap->a_l)) != 0) return (error); - + if (ip->i_din.ffs1_din != NULL) { + if (ump->um_fstype == UFS1) + pool_put(&ffs_dinode1_pool, ip->i_din.ffs1_din); + else + pool_put(&ffs_dinode2_pool, ip->i_din.ffs2_din); + } /* * XXX MFS ends up here, too, to free an inode. Should we create * XXX a separate pool for MFS inodes? @@ -258,311 +500,196 @@ ffs_reclaim(v) return (0); } -#include - int -ffs_getpages(v) - void *v; +ffs_getpages(void *v) { struct vop_getpages_args /* { struct vnode *a_vp; - vaddr_t a_offset; - vm_page_t *a_m; + voff_t a_offset; + struct vm_page **a_m; int *a_count; int a_centeridx; vm_prot_t a_access_type; int a_advice; int a_flags; } */ *ap = v; - - int error, npages, cidx, i; - struct buf tmpbuf, *bp; struct vnode *vp = ap->a_vp; - struct uvm_object *uobj = &vp->v_uvm.u_obj; struct inode *ip = VTOI(vp); struct fs *fs = ip->i_fs; - struct vm_page *pg, *pgs[16]; /* XXX 16 */ - struct ucred *cred = curproc->p_ucred; - off_t offset; - UVMHIST_FUNC("ffs_getpages"); UVMHIST_CALLED(ubchist); - -#ifdef DIAGNOSTIC - if (ap->a_centeridx < 0 || ap->a_centeridx > *ap->a_count) { - panic("ffs_getpages: centeridx %d out of range", - ap->a_centeridx); - } -#endif - - if (ap->a_flags & PGO_LOCKED) { - uvn_findpages(uobj, ap->a_offset, ap->a_count, ap->a_m, - UFP_NOWAIT|UFP_NOALLOC|UFP_NORDONLY); - - /* XXX PGO_ALLPAGES? */ - return ap->a_m[ap->a_centeridx] == NULL ? - VM_PAGER_UNLOCK : VM_PAGER_OK; - } - - /* vnode is VOP_LOCKed, uobj is locked */ - - - /* - * XXX do the findpages for our 1 page first, - * change asyncget to take the one page as an arg and - * pretend that its findpages found it. - */ - - /* - * kick off a big read first to get some readahead, then - * get the one page we wanted. - */ - - if ((ap->a_flags & PGO_OVERWRITE) == 0 && - (ap->a_offset & (MAXBSIZE - 1)) == 0) { - /* - * XXX pretty sure unlocking here is wrong. - */ - simple_unlock(&uobj->vmobjlock); - uvm_vnp_asyncget(vp, ap->a_offset, MAXBSIZE, fs->fs_bsize); - simple_lock(&uobj->vmobjlock); - } /* - * the desired page isn't resident, we'll have to read it. + * don't allow a softdep write to create pages for only part of a block. + * the dependency tracking requires that all pages be in memory for + * a block involved in a dependency. */ - offset = ap->a_offset + (ap->a_centeridx << PAGE_SHIFT); - npages = 1; - pg = NULL; - uvn_findpages(uobj, offset, &npages, &pg, 0); - simple_unlock(&uobj->vmobjlock); - - /* - * if the page is already resident, just return it. - */ - - if ((pg->flags & PG_FAKE) == 0 && - !((ap->a_access_type & VM_PROT_WRITE) && (pg->flags & PG_RDONLY))) { - ap->a_m[ap->a_centeridx] = pg; - return VM_PAGER_OK; + if (ap->a_flags & PGO_OVERWRITE && + (blkoff(fs, ap->a_offset) != 0 || + blkoff(fs, *ap->a_count << PAGE_SHIFT) != 0) && + DOINGSOFTDEP(ap->a_vp)) { + if ((ap->a_flags & PGO_LOCKED) == 0) { + simple_unlock(&vp->v_interlock); + } + return EINVAL; } + return genfs_getpages(v); +} - UVMHIST_LOG(ubchist, "pg %p flags 0x%x access_type 0x%x", - pg, (int)pg->flags, (int)ap->a_access_type, 0); +/* + * Return the last logical file offset that should be written for this file + * if we're doing a write that ends at "size". + */ - /* - * ok, really read the desired page. - */ +void +ffs_gop_size(struct vnode *vp, off_t size, off_t *eobp, int flags) +{ + struct inode *ip = VTOI(vp); + struct fs *fs = ip->i_fs; + daddr_t olbn, nlbn; - bp = &tmpbuf; - bzero(bp, sizeof *bp); - bp->b_lblkno = lblkno(fs, offset); - error = VOP_BMAP(vp, bp->b_lblkno, NULL, &bp->b_blkno, NULL); - if (error) { - UVMHIST_LOG(ubchist, "VOP_BMAP lbn 0x%x -> %d\n", - bp->b_lblkno, error,0,0); - goto out; + olbn = lblkno(fs, ip->i_size); + nlbn = lblkno(fs, size); + if (nlbn < NDADDR && olbn <= nlbn) { + *eobp = fragroundup(fs, size); + } else { + *eobp = blkroundup(fs, size); } - if (bp->b_blkno == (daddr_t)-1) { - UVMHIST_LOG(ubchist, "lbn 0x%x -> HOLE", bp->b_lblkno,0,0,0); - - /* - * for read faults, we can skip the block allocation - * by marking the page PG_RDONLY and PG_CLEAN. - */ - - if ((ap->a_access_type & VM_PROT_WRITE) == 0) { - uvm_pagezero(pg); - pg->flags |= PG_CLEAN|PG_RDONLY; - UVMHIST_LOG(ubchist, "setting PG_RDONLY", 0,0,0,0); - goto out; - } +} - /* - * for write faults, we must now allocate the backing store - * and make sure the block is zeroed. - */ +int +ffs_openextattr(void *v) +{ + struct vop_openextattr_args /* { + struct vnode *a_vp; + kauth_cred_t a_cred; + struct proc *a_p; + } */ *ap = v; + struct inode *ip = VTOI(ap->a_vp); + struct fs *fs = ip->i_fs; - error = ffs_balloc(ip, bp->b_lblkno, - blksize(fs, ip, bp->b_lblkno), - cred, NULL, &bp->b_blkno, 0); - if (error) { - UVMHIST_LOG(ubchist, "ffs_balloc lbn 0x%x -> %d", - bp->b_lblkno, error,0,0); - goto out; - } + /* Not supported for UFS1 file systems. */ + if (fs->fs_magic == FS_UFS1_MAGIC) + return (EOPNOTSUPP); - simple_lock(&uobj->vmobjlock); - uvm_pager_dropcluster(uobj, NULL, &pg, &npages, 0, 0); - npages = fs->fs_bsize >> PAGE_SHIFT; - uvn_findpages(uobj, offset & ~((off_t)fs->fs_bsize - 1), - &npages, pgs, 0); - for (i = 0; i < npages; i++) { - uvm_pagezero(pgs[i]); - uvm_pageactivate(pgs[i]); + /* XXX Not implemented for UFS2 file systems. */ + return (EOPNOTSUPP); +} - /* - * don't bother clearing mod/ref, the block is - * being modified anyways. - */ - - pgs[i]->flags &= ~(PG_FAKE|PG_RDONLY); - } - cidx = (offset >> PAGE_SHIFT) - (pgs[0]->offset >> PAGE_SHIFT); - pg = pgs[cidx]; - pgs[cidx] = NULL; - uvm_pager_dropcluster(uobj, NULL, pgs, &npages, 0, 0); - simple_unlock(&uobj->vmobjlock); - UVMHIST_LOG(ubchist, "cleared pages",0,0,0,0); - goto out; - } +int +ffs_closeextattr(void *v) +{ + struct vop_closeextattr_args /* { + struct vnode *a_vp; + int a_commit; + kauth_cred_t a_cred; + struct proc *a_p; + } */ *ap = v; + struct inode *ip = VTOI(ap->a_vp); + struct fs *fs = ip->i_fs; - /* adjust physical blkno for partial blocks */ - bp->b_blkno += (offset - lblktosize(fs, bp->b_lblkno)) >> DEV_BSHIFT; - UVMHIST_LOG(ubchist, "vp %p bp %p lblkno 0x%x blkno 0x%x", - vp, bp, bp->b_lblkno, bp->b_blkno); + /* Not supported for UFS1 file systems. */ + if (fs->fs_magic == FS_UFS1_MAGIC) + return (EOPNOTSUPP); - /* - * don't bother reading the pages if we're just going to - * overwrite them. - */ - if (ap->a_flags & PGO_OVERWRITE) { - UVMHIST_LOG(ubchist, "PGO_OVERWRITE",0,0,0,0); + /* XXX Not implemented for UFS2 file systems. */ + return (EOPNOTSUPP); +} - /* XXX for now, zero the page */ - if (pg->flags & PG_FAKE) { - uvm_pagezero(pg); - } +int +ffs_getextattr(void *v) +{ + struct vop_getextattr_args /* { + struct vnode *a_vp; + int a_attrnamespace; + const char *a_name; + struct uio *a_uio; + size_t *a_size; + kauth_cred_t a_cred; + struct proc *a_p; + } */ *ap = v; + struct inode *ip = VTOI(ap->a_vp); + struct fs *fs = ip->i_fs; - goto out; + if (fs->fs_magic == FS_UFS1_MAGIC) { +#ifdef UFS_EXTATTR + return (ufs_getextattr(ap)); +#else + return (EOPNOTSUPP); +#endif } - bp->b_bufsize = PAGE_SIZE; - bp->b_data = (void *)uvm_pagermapin(&pg, 1, NULL, M_WAITOK); - bp->b_bcount = bp->b_lblkno < 0 ? bp->b_bufsize : - min(bp->b_bufsize, fragroundup(fs, vp->v_uvm.u_size - offset)); - bp->b_vp = vp; - bp->b_flags = B_BUSY|B_READ; - VOP_STRATEGY(bp); - error = biowait(bp); - uvm_pagermapout((vaddr_t)bp->b_data, *ap->a_count); - -out: - if (error) { - simple_lock(&uobj->vmobjlock); - if (pg->flags & PG_WANTED) { - wakeup(pg); - } - UVM_PAGE_OWN(pg, NULL); - uvm_lock_pageq(); - uvm_pagefree(pg); - uvm_unlock_pageq(); - simple_unlock(&uobj->vmobjlock); - } else { - pg->flags &= ~(PG_FAKE); - pmap_clear_modify(PMAP_PGARG(pg)); - pmap_clear_reference(PMAP_PGARG(pg)); - uvm_pageactivate(pg); - ap->a_m[ap->a_centeridx] = pg; - } - UVMHIST_LOG(ubchist, "returning, error %d", error,0,0,0); - return error ? VM_PAGER_ERROR : VM_PAGER_OK; + /* XXX Not implemented for UFS2 file systems. */ + return (EOPNOTSUPP); } -/* - * Vnode op for VM putpages. - */ int -ffs_putpages(v) - void *v; +ffs_setextattr(void *v) { - struct vop_putpages_args /* { + struct vop_setextattr_args /* { struct vnode *a_vp; - struct vm_page **a_m; - int a_count; - int a_sync; - int *a_rtvals; + int a_attrnamespace; + const char *a_name; + struct uio *a_uio; + kauth_cred_t a_cred; + struct proc *a_p; } */ *ap = v; - - int s, error; - int bsize; - vaddr_t kva, offset; - struct vm_page *pg; - struct buf tmpbuf, *bp; - struct vnode *vp = ap->a_vp; - struct inode *ip = VTOI(vp); + struct inode *ip = VTOI(ap->a_vp); struct fs *fs = ip->i_fs; - UVMHIST_FUNC("ffs_putpages"); UVMHIST_CALLED(ubchist); - - pg = ap->a_m[0]; -#ifdef DEBUG - /* XXX verify that pages given are in fact physically contiguous. */ + if (fs->fs_magic == FS_UFS1_MAGIC) { +#ifdef UFS_EXTATTR + return (ufs_setextattr(ap)); +#else + return (EOPNOTSUPP); #endif + } - kva = uvm_pagermapin(ap->a_m, ap->a_count, NULL, M_WAITOK); - if (kva == 0) { - return VM_PAGER_AGAIN; - } - - if (ap->a_sync) { - bp = &tmpbuf; - bzero(bp, sizeof *bp); - } - else { - struct uvm_aiobuf *abp = pool_get(uvm_aiobuf_pool, PR_WAITOK); - abp->aio.aiodone = uvm_aio_aiodone; - abp->aio.kva = kva; - abp->aio.npages = ap->a_count; - abp->aio.pd_ptr = abp; - /* XXX pagedaemon */ - abp->aio.flags = (curproc == uvm.pagedaemon_proc) ? - UVM_AIO_PAGEDAEMON : 0; - bp = &abp->buf; - bzero(bp, sizeof *bp); - bp->b_flags = B_CALL|B_ASYNC; - bp->b_iodone = uvm_aio_biodone; - } - - offset = pg->offset; - bp->b_bufsize = ap->a_count << PAGE_SHIFT; - bp->b_data = (void *)kva; - bp->b_lblkno = lblkno(fs, offset); - bp->b_bcount = min(bp->b_bufsize, - fragroundup(fs, vp->v_uvm.u_size - offset)); - bp->b_flags |= B_BUSY|B_WRITE; - bp->b_vp = vp; + /* XXX Not implemented for UFS2 file systems. */ + return (EOPNOTSUPP); +} +int +ffs_listextattr(void *v) +{ + struct vop_listextattr_args /* { + struct vnode *a_vp; + int a_attrnamespace; + struct uio *a_uio; + size_t *a_size; + kauth_cred_t a_cred; + struct proc *a_p; + } */ *ap = v; + struct inode *ip = VTOI(ap->a_vp); + struct fs *fs = ip->i_fs; - bsize = blksize(fs, ip, bp->b_lblkno); - error = VOP_BMAP(vp, bp->b_lblkno, NULL, &bp->b_blkno, NULL); - if (error) { - UVMHIST_LOG(ubchist, "ffs_balloc -> %d", error, 0,0,0); - goto out; - } + /* Not supported for UFS1 file systems. */ + if (fs->fs_magic == FS_UFS1_MAGIC) + return (EOPNOTSUPP); - /* this could be ifdef DIAGNOSTIC, but it's really important */ - if (bp->b_blkno == (daddr_t)-1) { - panic("ffs_putpages: no backing store vp %p lbn 0x%x", - vp, bp->b_lblkno); - } + /* XXX Not implemented for UFS2 file systems. */ + return (EOPNOTSUPP); +} - /* adjust physical blkno for partial blocks */ - bp->b_blkno += (offset - lblktosize(fs, bp->b_lblkno)) >> DEV_BSHIFT; - UVMHIST_LOG(ubchist, "pg %p vp %p lblkno 0x%x blkno 0x%x", - pg, vp, bp->b_lblkno, bp->b_blkno); +int +ffs_deleteextattr(void *v) +{ + struct vop_deleteextattr_args /* { + struct vnode *a_vp; + int a_attrnamespace; + kauth_cred_t a_cred; + struct proc *a_p; + } */ *ap = v; + struct inode *ip = VTOI(ap->a_vp); + struct fs *fs = ip->i_fs; - s = splbio(); - vp->v_numoutput++; - splx(s); - VOP_STRATEGY(bp); - if (!ap->a_sync) { - return VM_PAGER_PEND; + if (fs->fs_magic == FS_UFS1_MAGIC) { +#ifdef UFS_EXTATTR + return (ufs_deleteextattr(ap)); +#else + return (EOPNOTSUPP); +#endif } - error = biowait(bp); - -out: - uvm_pagermapout(kva, ap->a_count); - UVMHIST_LOG(ubchist, "returning, error %d", error,0,0,0); - return error ? VM_PAGER_ERROR : VM_PAGER_OK; + /* XXX Not implemented for UFS2 file systems. */ + return (EOPNOTSUPP); }