Annotation of src/sys/fs/tmpfs/tmpfs_vnops.c, Revision 1.136
1.136 ! ad 1: /* $NetBSD: tmpfs_vnops.c,v 1.135 2020/03/14 13:39:36 ad Exp $ */
1.1 jmmv 2:
3: /*
1.45 ad 4: * Copyright (c) 2005, 2006, 2007 The NetBSD Foundation, Inc.
1.1 jmmv 5: * All rights reserved.
6: *
7: * This code is derived from software contributed to The NetBSD Foundation
1.12 jmmv 8: * by Julio M. Merino Vidal, developed as part of Google's Summer of Code
9: * 2005 program.
1.1 jmmv 10: *
11: * Redistribution and use in source and binary forms, with or without
12: * modification, are permitted provided that the following conditions
13: * are met:
14: * 1. Redistributions of source code must retain the above copyright
15: * notice, this list of conditions and the following disclaimer.
16: * 2. Redistributions in binary form must reproduce the above copyright
17: * notice, this list of conditions and the following disclaimer in the
18: * documentation and/or other materials provided with the distribution.
19: *
20: * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21: * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22: * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23: * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24: * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25: * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26: * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27: * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28: * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29: * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30: * POSSIBILITY OF SUCH DAMAGE.
31: */
32:
33: /*
34: * tmpfs vnode interface.
35: */
36:
37: #include <sys/cdefs.h>
1.136 ! ad 38: __KERNEL_RCSID(0, "$NetBSD: tmpfs_vnops.c,v 1.135 2020/03/14 13:39:36 ad Exp $");
1.1 jmmv 39:
40: #include <sys/param.h>
41: #include <sys/dirent.h>
42: #include <sys/fcntl.h>
43: #include <sys/event.h>
44: #include <sys/malloc.h>
45: #include <sys/namei.h>
46: #include <sys/stat.h>
47: #include <sys/uio.h>
48: #include <sys/unistd.h>
49: #include <sys/vnode.h>
1.15 jmmv 50: #include <sys/lockf.h>
1.24 christos 51: #include <sys/kauth.h>
1.111 hannken 52: #include <sys/atomic.h>
1.1 jmmv 53:
54: #include <uvm/uvm.h>
55:
56: #include <miscfs/fifofs/fifo.h>
1.60 elad 57: #include <miscfs/genfs/genfs.h>
1.1 jmmv 58: #include <fs/tmpfs/tmpfs_vnops.h>
59: #include <fs/tmpfs/tmpfs.h>
60:
61: /*
1.2 jmmv 62: * vnode operations vector used for files stored in a tmpfs file system.
1.1 jmmv 63: */
64: int (**tmpfs_vnodeop_p)(void *);
65: const struct vnodeopv_entry_desc tmpfs_vnodeop_entries[] = {
66: { &vop_default_desc, vn_default_error },
67: { &vop_lookup_desc, tmpfs_lookup },
68: { &vop_create_desc, tmpfs_create },
69: { &vop_mknod_desc, tmpfs_mknod },
70: { &vop_open_desc, tmpfs_open },
71: { &vop_close_desc, tmpfs_close },
72: { &vop_access_desc, tmpfs_access },
73: { &vop_getattr_desc, tmpfs_getattr },
74: { &vop_setattr_desc, tmpfs_setattr },
75: { &vop_read_desc, tmpfs_read },
76: { &vop_write_desc, tmpfs_write },
1.120 dholland 77: { &vop_fallocate_desc, genfs_eopnotsupp },
78: { &vop_fdiscard_desc, genfs_eopnotsupp },
1.1 jmmv 79: { &vop_ioctl_desc, tmpfs_ioctl },
80: { &vop_fcntl_desc, tmpfs_fcntl },
81: { &vop_poll_desc, tmpfs_poll },
82: { &vop_kqfilter_desc, tmpfs_kqfilter },
83: { &vop_revoke_desc, tmpfs_revoke },
84: { &vop_mmap_desc, tmpfs_mmap },
85: { &vop_fsync_desc, tmpfs_fsync },
86: { &vop_seek_desc, tmpfs_seek },
87: { &vop_remove_desc, tmpfs_remove },
88: { &vop_link_desc, tmpfs_link },
89: { &vop_rename_desc, tmpfs_rename },
90: { &vop_mkdir_desc, tmpfs_mkdir },
91: { &vop_rmdir_desc, tmpfs_rmdir },
92: { &vop_symlink_desc, tmpfs_symlink },
93: { &vop_readdir_desc, tmpfs_readdir },
94: { &vop_readlink_desc, tmpfs_readlink },
95: { &vop_abortop_desc, tmpfs_abortop },
96: { &vop_inactive_desc, tmpfs_inactive },
97: { &vop_reclaim_desc, tmpfs_reclaim },
98: { &vop_lock_desc, tmpfs_lock },
99: { &vop_unlock_desc, tmpfs_unlock },
100: { &vop_bmap_desc, tmpfs_bmap },
101: { &vop_strategy_desc, tmpfs_strategy },
102: { &vop_print_desc, tmpfs_print },
103: { &vop_pathconf_desc, tmpfs_pathconf },
104: { &vop_islocked_desc, tmpfs_islocked },
105: { &vop_advlock_desc, tmpfs_advlock },
106: { &vop_bwrite_desc, tmpfs_bwrite },
107: { &vop_getpages_desc, tmpfs_getpages },
108: { &vop_putpages_desc, tmpfs_putpages },
1.76 pooka 109: { &vop_whiteout_desc, tmpfs_whiteout },
1.1 jmmv 110: { NULL, NULL }
111: };
1.83 rmind 112:
113: const struct vnodeopv_desc tmpfs_vnodeop_opv_desc = {
114: &tmpfs_vnodeop_p, tmpfs_vnodeop_entries
115: };
1.1 jmmv 116:
1.72 rmind 117: /*
1.82 rmind 118: * tmpfs_lookup: path name traversal routine.
1.72 rmind 119: *
120: * Arguments: dvp (directory being searched), vpp (result),
121: * cnp (component name - path).
122: *
123: * => Caller holds a reference and lock on dvp.
124: * => We return looked-up vnode (vpp) locked, with a reference held.
125: */
1.1 jmmv 126: int
127: tmpfs_lookup(void *v)
128: {
1.115 hannken 129: struct vop_lookup_v2_args /* {
1.72 rmind 130: struct vnode *a_dvp;
131: struct vnode **a_vpp;
132: struct componentname *a_cnp;
133: } */ *ap = v;
1.83 rmind 134: vnode_t *dvp = ap->a_dvp, **vpp = ap->a_vpp;
1.72 rmind 135: struct componentname *cnp = ap->a_cnp;
1.84 rmind 136: const bool lastcn = (cnp->cn_flags & ISLASTCN) != 0;
137: tmpfs_node_t *dnode, *tnode;
1.83 rmind 138: tmpfs_dirent_t *de;
1.99 dholland 139: int cachefound, iswhiteout;
1.72 rmind 140: int error;
1.1 jmmv 141:
142: KASSERT(VOP_ISLOCKED(dvp));
143:
144: dnode = VP_TO_TMPFS_DIR(dvp);
145: *vpp = NULL;
146:
1.85 rmind 147: /* Check accessibility of directory. */
1.44 pooka 148: error = VOP_ACCESS(dvp, VEXEC, cnp->cn_cred);
1.83 rmind 149: if (error) {
1.1 jmmv 150: goto out;
1.83 rmind 151: }
1.85 rmind 152:
1.72 rmind 153: /*
154: * If requesting the last path component on a read-only file system
155: * with a write operation, deny it.
156: */
1.84 rmind 157: if (lastcn && (dvp->v_mount->mnt_flag & MNT_RDONLY) != 0 &&
1.1 jmmv 158: (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME)) {
159: error = EROFS;
160: goto out;
161: }
162:
1.72 rmind 163: /*
164: * Avoid doing a linear scan of the directory if the requested
165: * directory/name couple is already in the cache.
166: */
1.100 dholland 167: cachefound = cache_lookup(dvp, cnp->cn_nameptr, cnp->cn_namelen,
168: cnp->cn_nameiop, cnp->cn_flags,
169: &iswhiteout, vpp);
1.99 dholland 170: if (iswhiteout) {
171: cnp->cn_flags |= ISWHITEOUT;
172: }
173: if (cachefound && *vpp == NULLVP) {
174: /* Negative cache hit. */
175: error = ENOENT;
1.123 hannken 176: goto out;
1.99 dholland 177: } else if (cachefound) {
178: error = 0;
1.123 hannken 179: goto out;
1.82 rmind 180: }
1.1 jmmv 181:
1.110 hannken 182: /*
183: * Treat an unlinked directory as empty (no "." or "..")
184: */
185: if (dnode->tn_links == 0) {
186: KASSERT(dnode->tn_size == 0);
187: error = ENOENT;
188: goto out;
189: }
190:
1.1 jmmv 191: if (cnp->cn_flags & ISDOTDOT) {
1.83 rmind 192: tmpfs_node_t *pnode;
1.85 rmind 193:
1.82 rmind 194: /*
195: * Lookup of ".." case.
196: */
1.85 rmind 197: if (lastcn && cnp->cn_nameiop == RENAME) {
198: error = EINVAL;
199: goto out;
200: }
201: KASSERT(dnode->tn_type == VDIR);
1.82 rmind 202: pnode = dnode->tn_spec.tn_dir.tn_parent;
1.85 rmind 203: if (pnode == NULL) {
204: error = ENOENT;
205: goto out;
206: }
207:
1.123 hannken 208: error = vcache_get(dvp->v_mount, &pnode, sizeof(pnode), vpp);
1.82 rmind 209: goto out;
1.1 jmmv 210: } else if (cnp->cn_namelen == 1 && cnp->cn_nameptr[0] == '.') {
1.82 rmind 211: /*
212: * Lookup of "." case.
213: */
1.84 rmind 214: if (lastcn && cnp->cn_nameiop == RENAME) {
1.73 pooka 215: error = EISDIR;
216: goto out;
217: }
1.66 pooka 218: vref(dvp);
1.1 jmmv 219: *vpp = dvp;
220: error = 0;
1.72 rmind 221: goto done;
222: }
1.1 jmmv 223:
1.82 rmind 224: /*
225: * Other lookup cases: perform directory scan.
226: */
1.72 rmind 227: de = tmpfs_dir_lookup(dnode, cnp);
1.76 pooka 228: if (de == NULL || de->td_node == TMPFS_NODE_WHITEOUT) {
1.72 rmind 229: /*
230: * The entry was not found in the directory. This is valid
231: * if we are creating or renaming an entry and are working
232: * on the last component of the path name.
233: */
1.84 rmind 234: if (lastcn && (cnp->cn_nameiop == CREATE ||
1.72 rmind 235: cnp->cn_nameiop == RENAME)) {
236: error = VOP_ACCESS(dvp, VWRITE, cnp->cn_cred);
237: if (error) {
1.1 jmmv 238: goto out;
239: }
1.72 rmind 240: error = EJUSTRETURN;
241: } else {
242: error = ENOENT;
243: }
1.76 pooka 244: if (de) {
245: KASSERT(de->td_node == TMPFS_NODE_WHITEOUT);
246: cnp->cn_flags |= ISWHITEOUT;
247: }
1.84 rmind 248: goto done;
249: }
1.1 jmmv 250:
1.84 rmind 251: tnode = de->td_node;
1.62 elad 252:
1.84 rmind 253: /*
254: * If it is not the last path component and found a non-directory
255: * or non-link entry (which may itself be pointing to a directory),
256: * raise an error.
257: */
258: if (!lastcn && tnode->tn_type != VDIR && tnode->tn_type != VLNK) {
259: error = ENOTDIR;
260: goto out;
261: }
1.72 rmind 262:
1.84 rmind 263: /* Check the permissions. */
264: if (lastcn && (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME)) {
1.96 elad 265: error = VOP_ACCESS(dvp, VWRITE, cnp->cn_cred);
266: if (error)
267: goto out;
1.84 rmind 268:
1.96 elad 269: if ((dnode->tn_mode & S_ISTXT) != 0) {
270: error = kauth_authorize_vnode(cnp->cn_cred,
271: KAUTH_VNODE_DELETE, tnode->tn_vnode,
272: dnode->tn_vnode, genfs_can_sticky(cnp->cn_cred,
273: dnode->tn_uid, tnode->tn_uid));
274: if (error) {
275: error = EPERM;
276: goto out;
277: }
1.1 jmmv 278: }
279: }
1.84 rmind 280:
1.85 rmind 281: /* Get a vnode for the matching entry. */
1.123 hannken 282: error = vcache_get(dvp->v_mount, &tnode, sizeof(tnode), vpp);
1.72 rmind 283: done:
284: /*
1.82 rmind 285: * Cache the result, unless request was for creation (as it does
286: * not improve the performance).
1.72 rmind 287: */
1.98 rmind 288: if (cnp->cn_nameiop != CREATE) {
1.100 dholland 289: cache_enter(dvp, *vpp, cnp->cn_nameptr, cnp->cn_namelen,
290: cnp->cn_flags);
1.82 rmind 291: }
1.1 jmmv 292: out:
1.33 chs 293: KASSERT(VOP_ISLOCKED(dvp));
1.76 pooka 294:
1.1 jmmv 295: return error;
296: }
297:
298: int
299: tmpfs_create(void *v)
300: {
1.114 hannken 301: struct vop_create_v3_args /* {
1.83 rmind 302: struct vnode *a_dvp;
303: struct vnode **a_vpp;
304: struct componentname *a_cnp;
305: struct vattr *a_vap;
306: } */ *ap = v;
307: vnode_t *dvp = ap->a_dvp, **vpp = ap->a_vpp;
308: struct componentname *cnp = ap->a_cnp;
309: struct vattr *vap = ap->a_vap;
1.1 jmmv 310:
1.83 rmind 311: KASSERT(VOP_ISLOCKED(dvp));
1.1 jmmv 312: KASSERT(vap->va_type == VREG || vap->va_type == VSOCK);
1.107 rmind 313: return tmpfs_construct_node(dvp, vpp, vap, cnp, NULL);
1.1 jmmv 314: }
315:
316: int
317: tmpfs_mknod(void *v)
318: {
1.114 hannken 319: struct vop_mknod_v3_args /* {
1.83 rmind 320: struct vnode *a_dvp;
321: struct vnode **a_vpp;
322: struct componentname *a_cnp;
323: struct vattr *a_vap;
324: } */ *ap = v;
325: vnode_t *dvp = ap->a_dvp, **vpp = ap->a_vpp;
326: struct componentname *cnp = ap->a_cnp;
327: struct vattr *vap = ap->a_vap;
328: enum vtype vt = vap->va_type;
1.1 jmmv 329:
1.83 rmind 330: if (vt != VBLK && vt != VCHR && vt != VFIFO) {
1.119 rmind 331: *vpp = NULL;
1.1 jmmv 332: return EINVAL;
1.54 pooka 333: }
1.107 rmind 334: return tmpfs_construct_node(dvp, vpp, vap, cnp, NULL);
1.1 jmmv 335: }
336:
337: int
338: tmpfs_open(void *v)
339: {
1.83 rmind 340: struct vop_open_args /* {
341: struct vnode *a_vp;
342: int a_mode;
343: kauth_cred_t a_cred;
344: } */ *ap = v;
345: vnode_t *vp = ap->a_vp;
346: mode_t mode = ap->a_mode;
347: tmpfs_node_t *node;
1.1 jmmv 348:
349: KASSERT(VOP_ISLOCKED(vp));
350:
351: node = VP_TO_TMPFS_NODE(vp);
1.32 jmmv 352:
1.1 jmmv 353: /* If the file is marked append-only, deny write requests. */
1.83 rmind 354: if ((node->tn_flags & APPEND) != 0 &&
355: (mode & (FWRITE | O_APPEND)) == FWRITE) {
356: return EPERM;
357: }
358: return 0;
1.1 jmmv 359: }
360:
361: int
362: tmpfs_close(void *v)
363: {
1.83 rmind 364: struct vop_close_args /* {
365: struct vnode *a_vp;
366: int a_fflag;
367: kauth_cred_t a_cred;
368: } */ *ap = v;
1.108 rmind 369: vnode_t *vp __diagused = ap->a_vp;
1.1 jmmv 370:
371: KASSERT(VOP_ISLOCKED(vp));
1.17 yamt 372: return 0;
1.1 jmmv 373: }
374:
1.94 rmind 375: int
376: tmpfs_access(void *v)
1.1 jmmv 377: {
1.94 rmind 378: struct vop_access_args /* {
379: struct vnode *a_vp;
380: int a_mode;
381: kauth_cred_t a_cred;
382: } */ *ap = v;
383: vnode_t *vp = ap->a_vp;
384: mode_t mode = ap->a_mode;
385: kauth_cred_t cred = ap->a_cred;
386: tmpfs_node_t *node = VP_TO_TMPFS_NODE(vp);
1.83 rmind 387: const bool writing = (mode & VWRITE) != 0;
1.94 rmind 388:
389: KASSERT(VOP_ISLOCKED(vp));
1.1 jmmv 390:
1.94 rmind 391: /* Possible? */
1.1 jmmv 392: switch (vp->v_type) {
393: case VDIR:
394: case VLNK:
395: case VREG:
1.83 rmind 396: if (writing && (vp->v_mount->mnt_flag & MNT_RDONLY) != 0) {
397: return EROFS;
1.1 jmmv 398: }
399: break;
400: case VBLK:
401: case VCHR:
402: case VSOCK:
403: case VFIFO:
404: break;
405: default:
1.83 rmind 406: return EINVAL;
1.1 jmmv 407: }
1.94 rmind 408: if (writing && (node->tn_flags & IMMUTABLE) != 0) {
409: return EPERM;
410: }
1.61 elad 411:
1.101 plunky 412: return kauth_authorize_vnode(cred, KAUTH_ACCESS_ACTION(mode,
1.96 elad 413: vp->v_type, node->tn_mode), vp, NULL, genfs_can_access(vp->v_type,
414: node->tn_mode, node->tn_uid, node->tn_gid, mode, cred));
1.1 jmmv 415: }
416:
417: int
418: tmpfs_getattr(void *v)
419: {
1.83 rmind 420: struct vop_getattr_args /* {
421: struct vnode *a_vp;
422: struct vattr *a_vap;
423: kauth_cred_t a_cred;
424: } */ *ap = v;
425: vnode_t *vp = ap->a_vp;
426: struct vattr *vap = ap->a_vap;
427: tmpfs_node_t *node = VP_TO_TMPFS_NODE(vp);
1.1 jmmv 428:
1.66 pooka 429: vattr_null(vap);
1.1 jmmv 430:
431: vap->va_type = vp->v_type;
432: vap->va_mode = node->tn_mode;
433: vap->va_nlink = node->tn_links;
434: vap->va_uid = node->tn_uid;
435: vap->va_gid = node->tn_gid;
436: vap->va_fsid = vp->v_mount->mnt_stat.f_fsidx.__fsid_val[0];
437: vap->va_fileid = node->tn_id;
438: vap->va_size = node->tn_size;
439: vap->va_blocksize = PAGE_SIZE;
440: vap->va_atime = node->tn_atime;
441: vap->va_mtime = node->tn_mtime;
442: vap->va_ctime = node->tn_ctime;
443: vap->va_birthtime = node->tn_birthtime;
1.85 rmind 444: vap->va_gen = TMPFS_NODE_GEN(node);
1.1 jmmv 445: vap->va_flags = node->tn_flags;
446: vap->va_rdev = (vp->v_type == VBLK || vp->v_type == VCHR) ?
1.83 rmind 447: node->tn_spec.tn_dev.tn_rdev : VNOVAL;
1.1 jmmv 448: vap->va_bytes = round_page(node->tn_size);
449: vap->va_filerev = VNOVAL;
450: vap->va_vaflags = 0;
451: vap->va_spare = VNOVAL; /* XXX */
452:
453: return 0;
454: }
455:
456: int
457: tmpfs_setattr(void *v)
458: {
1.83 rmind 459: struct vop_setattr_args /* {
460: struct vnode *a_vp;
461: struct vattr *a_vap;
462: kauth_cred_t a_cred;
463: } */ *ap = v;
464: vnode_t *vp = ap->a_vp;
465: struct vattr *vap = ap->a_vap;
466: kauth_cred_t cred = ap->a_cred;
467: lwp_t *l = curlwp;
468: int error = 0;
1.1 jmmv 469:
470: KASSERT(VOP_ISLOCKED(vp));
471:
472: /* Abort if any unsettable attribute is given. */
1.83 rmind 473: if (vap->va_type != VNON || vap->va_nlink != VNOVAL ||
474: vap->va_fsid != VNOVAL || vap->va_fileid != VNOVAL ||
1.107 rmind 475: vap->va_blocksize != VNOVAL || vap->va_ctime.tv_sec != VNOVAL ||
1.83 rmind 476: vap->va_gen != VNOVAL || vap->va_rdev != VNOVAL ||
477: vap->va_bytes != VNOVAL) {
478: return EINVAL;
479: }
1.107 rmind 480:
481: if (error == 0 && vap->va_flags != VNOVAL)
1.25 ad 482: error = tmpfs_chflags(vp, vap->va_flags, cred, l);
1.1 jmmv 483:
1.107 rmind 484: if (error == 0 && vap->va_size != VNOVAL)
1.25 ad 485: error = tmpfs_chsize(vp, vap->va_size, cred, l);
1.1 jmmv 486:
487: if (error == 0 && (vap->va_uid != VNOVAL || vap->va_gid != VNOVAL))
1.25 ad 488: error = tmpfs_chown(vp, vap->va_uid, vap->va_gid, cred, l);
1.1 jmmv 489:
1.107 rmind 490: if (error == 0 && vap->va_mode != VNOVAL)
1.25 ad 491: error = tmpfs_chmod(vp, vap->va_mode, cred, l);
1.1 jmmv 492:
1.107 rmind 493: const bool chsometime =
494: vap->va_atime.tv_sec != VNOVAL ||
495: vap->va_mtime.tv_sec != VNOVAL ||
496: vap->va_birthtime.tv_sec != VNOVAL;
497: if (error == 0 && chsometime) {
1.83 rmind 498: error = tmpfs_chtimes(vp, &vap->va_atime, &vap->va_mtime,
499: &vap->va_birthtime, vap->va_vaflags, cred, l);
500: }
1.1 jmmv 501: return error;
502: }
503:
504: int
505: tmpfs_read(void *v)
506: {
1.83 rmind 507: struct vop_read_args /* {
508: struct vnode *a_vp;
509: struct uio *a_uio;
510: int a_ioflag;
511: kauth_cred_t a_cred;
512: } */ *ap = v;
513: vnode_t *vp = ap->a_vp;
514: struct uio *uio = ap->a_uio;
515: const int ioflag = ap->a_ioflag;
516: tmpfs_node_t *node;
517: struct uvm_object *uobj;
1.7 jmmv 518: int error;
1.1 jmmv 519:
520: KASSERT(VOP_ISLOCKED(vp));
521:
1.117 maxv 522: if (vp->v_type == VDIR) {
523: return EISDIR;
524: }
1.116 maxv 525: if (uio->uio_offset < 0 || vp->v_type != VREG) {
1.83 rmind 526: return EINVAL;
1.1 jmmv 527: }
528:
1.107 rmind 529: /* Note: reading zero bytes should not update atime. */
530: if (uio->uio_resid == 0) {
531: return 0;
532: }
533:
1.83 rmind 534: node = VP_TO_TMPFS_NODE(vp);
1.21 jmmv 535: uobj = node->tn_spec.tn_reg.tn_aobj;
1.6 yamt 536: error = 0;
1.83 rmind 537:
1.7 jmmv 538: while (error == 0 && uio->uio_resid > 0) {
1.6 yamt 539: vsize_t len;
540:
1.83 rmind 541: if (node->tn_size <= uio->uio_offset) {
1.8 yamt 542: break;
1.83 rmind 543: }
1.6 yamt 544: len = MIN(node->tn_size - uio->uio_offset, uio->uio_resid);
1.83 rmind 545: if (len == 0) {
1.6 yamt 546: break;
1.83 rmind 547: }
1.52 pooka 548: error = ubc_uiomove(uobj, uio, len, IO_ADV_DECODE(ioflag),
1.136 ! ad 549: UBC_READ | UBC_PARTIALOK | UBC_VNODE_FLAGS(vp));
1.1 jmmv 550: }
1.107 rmind 551:
552: tmpfs_update(vp, TMPFS_UPDATE_ATIME);
1.1 jmmv 553: return error;
554: }
555:
556: int
557: tmpfs_write(void *v)
558: {
1.83 rmind 559: struct vop_write_args /* {
560: struct vnode *a_vp;
561: struct uio *a_uio;
562: int a_ioflag;
563: kauth_cred_t a_cred;
564: } */ *ap = v;
565: vnode_t *vp = ap->a_vp;
566: struct uio *uio = ap->a_uio;
567: const int ioflag = ap->a_ioflag;
568: tmpfs_node_t *node;
569: struct uvm_object *uobj;
1.127 martin 570: off_t oldsize;
1.1 jmmv 571: int error;
572:
573: KASSERT(VOP_ISLOCKED(vp));
574:
1.125 kardel 575: node = VP_TO_TMPFS_NODE(vp);
576: oldsize = node->tn_size;
577:
1.124 joerg 578: if ((vp->v_mount->mnt_flag & MNT_RDONLY) != 0) {
579: error = EROFS;
580: goto out;
581: }
582:
1.1 jmmv 583: if (uio->uio_offset < 0 || vp->v_type != VREG) {
584: error = EINVAL;
585: goto out;
586: }
587: if (uio->uio_resid == 0) {
588: error = 0;
589: goto out;
590: }
1.83 rmind 591: if (ioflag & IO_APPEND) {
1.1 jmmv 592: uio->uio_offset = node->tn_size;
1.83 rmind 593: }
1.1 jmmv 594:
1.107 rmind 595: if (uio->uio_offset + uio->uio_resid > node->tn_size) {
1.1 jmmv 596: error = tmpfs_reg_resize(vp, uio->uio_offset + uio->uio_resid);
1.83 rmind 597: if (error)
1.1 jmmv 598: goto out;
599: }
600:
1.21 jmmv 601: uobj = node->tn_spec.tn_reg.tn_aobj;
1.6 yamt 602: error = 0;
1.7 jmmv 603: while (error == 0 && uio->uio_resid > 0) {
1.6 yamt 604: vsize_t len;
605:
606: len = MIN(node->tn_size - uio->uio_offset, uio->uio_resid);
1.83 rmind 607: if (len == 0) {
1.6 yamt 608: break;
1.83 rmind 609: }
1.52 pooka 610: error = ubc_uiomove(uobj, uio, len, IO_ADV_DECODE(ioflag),
1.136 ! ad 611: UBC_WRITE | UBC_VNODE_FLAGS(vp));
1.1 jmmv 612: }
1.83 rmind 613: if (error) {
614: (void)tmpfs_reg_resize(vp, oldsize);
615: }
1.6 yamt 616:
1.107 rmind 617: tmpfs_update(vp, TMPFS_UPDATE_MTIME | TMPFS_UPDATE_CTIME);
1.31 jmmv 618: VN_KNOTE(vp, NOTE_WRITE);
1.1 jmmv 619: out:
1.83 rmind 620: if (error) {
621: KASSERT(oldsize == node->tn_size);
622: } else {
623: KASSERT(uio->uio_resid == 0);
624: }
1.1 jmmv 625: return error;
626: }
627:
628: int
629: tmpfs_fsync(void *v)
630: {
1.83 rmind 631: struct vop_fsync_args /* {
632: struct vnode *a_vp;
633: kauth_cred_t a_cred;
634: int a_flags;
635: off_t a_offlo;
636: off_t a_offhi;
637: struct lwp *a_l;
638: } */ *ap = v;
1.108 rmind 639: vnode_t *vp __diagused = ap->a_vp;
1.1 jmmv 640:
1.107 rmind 641: /* Nothing to do. Should be up to date. */
1.1 jmmv 642: KASSERT(VOP_ISLOCKED(vp));
1.17 yamt 643: return 0;
1.1 jmmv 644: }
645:
1.83 rmind 646: /*
647: * tmpfs_remove: unlink a file.
648: *
649: * => Both directory (dvp) and file (vp) are locked.
650: * => We unlock and drop the reference on both.
651: */
1.1 jmmv 652: int
653: tmpfs_remove(void *v)
654: {
1.132 riastrad 655: struct vop_remove_v2_args /* {
1.82 rmind 656: struct vnode *a_dvp;
657: struct vnode *a_vp;
658: struct componentname *a_cnp;
659: } */ *ap = v;
1.83 rmind 660: vnode_t *dvp = ap->a_dvp, *vp = ap->a_vp;
1.103 rmind 661: tmpfs_node_t *dnode, *node;
1.83 rmind 662: tmpfs_dirent_t *de;
1.82 rmind 663: int error;
1.1 jmmv 664:
665: KASSERT(VOP_ISLOCKED(dvp));
666: KASSERT(VOP_ISLOCKED(vp));
667:
1.34 pooka 668: if (vp->v_type == VDIR) {
669: error = EPERM;
670: goto out;
671: }
1.103 rmind 672: dnode = VP_TO_TMPFS_DIR(dvp);
1.1 jmmv 673: node = VP_TO_TMPFS_NODE(vp);
674:
1.103 rmind 675: /*
676: * Files marked as immutable or append-only cannot be deleted.
677: * Likewise, files residing on directories marked as append-only
678: * cannot be deleted.
679: */
1.1 jmmv 680: if (node->tn_flags & (IMMUTABLE | APPEND)) {
681: error = EPERM;
682: goto out;
683: }
1.103 rmind 684: if (dnode->tn_flags & APPEND) {
685: error = EPERM;
686: goto out;
687: }
1.1 jmmv 688:
1.85 rmind 689: /* Lookup the directory entry (check the cached hint first). */
690: de = tmpfs_dir_cached(node);
691: if (de == NULL) {
692: struct componentname *cnp = ap->a_cnp;
693: de = tmpfs_dir_lookup(dnode, cnp);
694: }
1.82 rmind 695: KASSERT(de && de->td_node == node);
1.1 jmmv 696:
1.82 rmind 697: /*
1.85 rmind 698: * Remove the entry from the directory (drops the link count) and
1.107 rmind 699: * destroy it or replace with a whiteout.
700: *
701: * Note: the inode referred by it will not be destroyed until the
702: * vnode is reclaimed/recycled.
1.82 rmind 703: */
1.107 rmind 704:
1.106 rmind 705: tmpfs_dir_detach(dnode, de);
1.107 rmind 706:
1.90 hannken 707: if (ap->a_cnp->cn_flags & DOWHITEOUT)
1.106 rmind 708: tmpfs_dir_attach(dnode, de, TMPFS_NODE_WHITEOUT);
1.90 hannken 709: else
710: tmpfs_free_dirent(VFS_TO_TMPFS(vp->v_mount), de);
1.103 rmind 711:
1.102 rmind 712: if (node->tn_links > 0) {
713: /* We removed a hard link. */
1.107 rmind 714: tmpfs_update(vp, TMPFS_UPDATE_CTIME);
1.102 rmind 715: }
1.107 rmind 716: tmpfs_update(dvp, TMPFS_UPDATE_MTIME | TMPFS_UPDATE_CTIME);
1.1 jmmv 717: error = 0;
718: out:
1.132 riastrad 719: /* Drop the reference and unlock the node. */
1.83 rmind 720: if (dvp == vp) {
1.132 riastrad 721: vrele(vp);
1.83 rmind 722: } else {
1.132 riastrad 723: vput(vp);
1.83 rmind 724: }
1.1 jmmv 725: return error;
726: }
727:
1.77 rmind 728: /*
1.83 rmind 729: * tmpfs_link: create a hard link.
1.77 rmind 730: */
1.1 jmmv 731: int
732: tmpfs_link(void *v)
733: {
1.122 riastrad 734: struct vop_link_v2_args /* {
1.77 rmind 735: struct vnode *a_dvp;
736: struct vnode *a_vp;
737: struct componentname *a_cnp;
738: } */ *ap = v;
1.83 rmind 739: vnode_t *dvp = ap->a_dvp;
740: vnode_t *vp = ap->a_vp;
1.82 rmind 741: struct componentname *cnp = ap->a_cnp;
1.106 rmind 742: tmpfs_node_t *dnode, *node;
1.83 rmind 743: tmpfs_dirent_t *de;
1.1 jmmv 744: int error;
745:
1.77 rmind 746: KASSERT(dvp != vp);
1.1 jmmv 747: KASSERT(VOP_ISLOCKED(dvp));
1.77 rmind 748: KASSERT(vp->v_type != VDIR);
749: KASSERT(dvp->v_mount == vp->v_mount);
1.1 jmmv 750:
1.106 rmind 751: dnode = VP_TO_TMPFS_DIR(dvp);
1.1 jmmv 752: node = VP_TO_TMPFS_NODE(vp);
753:
1.63 rmind 754: vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1.1 jmmv 755:
1.84 rmind 756: /* Check for maximum number of links limit. */
1.1 jmmv 757: if (node->tn_links == LINK_MAX) {
758: error = EMLINK;
759: goto out;
760: }
1.85 rmind 761: KASSERT(node->tn_links < LINK_MAX);
1.1 jmmv 762:
763: /* We cannot create links of files marked immutable or append-only. */
764: if (node->tn_flags & (IMMUTABLE | APPEND)) {
765: error = EPERM;
766: goto out;
767: }
768:
1.85 rmind 769: /* Allocate a new directory entry to represent the inode. */
770: error = tmpfs_alloc_dirent(VFS_TO_TMPFS(vp->v_mount),
1.1 jmmv 771: cnp->cn_nameptr, cnp->cn_namelen, &de);
1.83 rmind 772: if (error) {
1.1 jmmv 773: goto out;
1.83 rmind 774: }
1.1 jmmv 775:
1.106 rmind 776: /*
1.85 rmind 777: * Insert the entry into the directory.
778: * It will increase the inode link count.
779: */
1.106 rmind 780: tmpfs_dir_attach(dnode, de, node);
1.107 rmind 781: tmpfs_update(dvp, TMPFS_UPDATE_MTIME | TMPFS_UPDATE_CTIME);
1.1 jmmv 782:
1.85 rmind 783: /* Update the timestamps and trigger the event. */
784: if (node->tn_vnode) {
785: VN_KNOTE(node->tn_vnode, NOTE_LINK);
786: }
1.107 rmind 787: tmpfs_update(vp, TMPFS_UPDATE_CTIME);
1.1 jmmv 788: error = 0;
789: out:
1.71 hannken 790: VOP_UNLOCK(vp);
1.1 jmmv 791: return error;
792: }
793:
794: int
795: tmpfs_mkdir(void *v)
796: {
1.114 hannken 797: struct vop_mkdir_v3_args /* {
1.83 rmind 798: struct vnode *a_dvp;
799: struct vnode **a_vpp;
800: struct componentname *a_cnp;
801: struct vattr *a_vap;
802: } */ *ap = v;
803: vnode_t *dvp = ap->a_dvp;
804: vnode_t **vpp = ap->a_vpp;
805: struct componentname *cnp = ap->a_cnp;
806: struct vattr *vap = ap->a_vap;
1.1 jmmv 807:
808: KASSERT(vap->va_type == VDIR);
1.107 rmind 809: return tmpfs_construct_node(dvp, vpp, vap, cnp, NULL);
1.1 jmmv 810: }
811:
812: int
813: tmpfs_rmdir(void *v)
814: {
1.132 riastrad 815: struct vop_rmdir_v2_args /* {
1.83 rmind 816: struct vnode *a_dvp;
817: struct vnode *a_vp;
818: struct componentname *a_cnp;
819: } */ *ap = v;
820: vnode_t *dvp = ap->a_dvp;
821: vnode_t *vp = ap->a_vp;
822: tmpfs_mount_t *tmp = VFS_TO_TMPFS(dvp->v_mount);
823: tmpfs_node_t *dnode = VP_TO_TMPFS_DIR(dvp);
824: tmpfs_node_t *node = VP_TO_TMPFS_DIR(vp);
825: tmpfs_dirent_t *de;
826: int error = 0;
1.1 jmmv 827:
828: KASSERT(VOP_ISLOCKED(dvp));
829: KASSERT(VOP_ISLOCKED(vp));
830:
1.83 rmind 831: /*
1.107 rmind 832: * Directories with more than two entries ('.' and '..') cannot be
833: * removed. There may be whiteout entries, which we will destroy.
1.83 rmind 834: */
1.34 pooka 835: if (node->tn_size > 0) {
1.107 rmind 836: /*
837: * If never had whiteout entries, the directory is certainly
838: * not empty. Otherwise, scan for any non-whiteout entry.
839: */
840: if ((node->tn_gen & TMPFS_WHITEOUT_BIT) == 0) {
841: error = ENOTEMPTY;
842: goto out;
843: }
1.90 hannken 844: TAILQ_FOREACH(de, &node->tn_spec.tn_dir.tn_dir, td_entries) {
845: if (de->td_node != TMPFS_NODE_WHITEOUT) {
846: error = ENOTEMPTY;
1.107 rmind 847: goto out;
1.90 hannken 848: }
849: }
1.107 rmind 850: KASSERT(error == 0);
1.34 pooka 851: }
852:
1.112 pedro 853: KASSERT(node->tn_spec.tn_dir.tn_parent == dnode);
854:
1.85 rmind 855: /* Lookup the directory entry (check the cached hint first). */
856: de = tmpfs_dir_cached(node);
857: if (de == NULL) {
858: struct componentname *cnp = ap->a_cnp;
859: de = tmpfs_dir_lookup(dnode, cnp);
860: }
1.83 rmind 861: KASSERT(de && de->td_node == node);
1.1 jmmv 862:
863: /* Check flags to see if we are allowed to remove the directory. */
864: if (dnode->tn_flags & APPEND || node->tn_flags & (IMMUTABLE | APPEND)) {
865: error = EPERM;
866: goto out;
867: }
868:
1.85 rmind 869: /* Decrement the link count for the virtual '.' entry. */
1.1 jmmv 870: node->tn_links--;
871:
1.86 rmind 872: /* Detach the directory entry from the directory. */
1.106 rmind 873: tmpfs_dir_detach(dnode, de);
1.86 rmind 874:
1.83 rmind 875: /* Purge the cache for parent. */
876: cache_purge(dvp);
1.1 jmmv 877:
1.83 rmind 878: /*
1.90 hannken 879: * Destroy the directory entry or replace it with a whiteout.
1.107 rmind 880: *
881: * Note: the inode referred by it will not be destroyed until the
882: * vnode is reclaimed.
1.83 rmind 883: */
1.90 hannken 884: if (ap->a_cnp->cn_flags & DOWHITEOUT)
1.106 rmind 885: tmpfs_dir_attach(dnode, de, TMPFS_NODE_WHITEOUT);
1.90 hannken 886: else
887: tmpfs_free_dirent(tmp, de);
888:
889: /* Destroy the whiteout entries from the node. */
890: while ((de = TAILQ_FIRST(&node->tn_spec.tn_dir.tn_dir)) != NULL) {
891: KASSERT(de->td_node == TMPFS_NODE_WHITEOUT);
1.106 rmind 892: tmpfs_dir_detach(node, de);
1.90 hannken 893: tmpfs_free_dirent(tmp, de);
894: }
1.107 rmind 895: tmpfs_update(dvp, TMPFS_UPDATE_MTIME | TMPFS_UPDATE_CTIME);
1.90 hannken 896:
1.107 rmind 897: KASSERT(node->tn_size == 0);
1.45 ad 898: KASSERT(node->tn_links == 0);
1.83 rmind 899: out:
1.132 riastrad 900: /* Release the node. */
901: KASSERT(dvp != vp);
1.1 jmmv 902: vput(vp);
903: return error;
904: }
905:
906: int
907: tmpfs_symlink(void *v)
908: {
1.114 hannken 909: struct vop_symlink_v3_args /* {
1.83 rmind 910: struct vnode *a_dvp;
911: struct vnode **a_vpp;
912: struct componentname *a_cnp;
913: struct vattr *a_vap;
914: char *a_target;
915: } */ *ap = v;
916: vnode_t *dvp = ap->a_dvp;
917: vnode_t **vpp = ap->a_vpp;
918: struct componentname *cnp = ap->a_cnp;
919: struct vattr *vap = ap->a_vap;
920: char *target = ap->a_target;
1.1 jmmv 921:
922: KASSERT(vap->va_type == VLNK);
1.107 rmind 923: return tmpfs_construct_node(dvp, vpp, vap, cnp, target);
1.1 jmmv 924: }
925:
926: int
927: tmpfs_readdir(void *v)
928: {
1.83 rmind 929: struct vop_readdir_args /* {
930: struct vnode *a_vp;
931: struct uio *a_uio;
932: kauth_cred_t a_cred;
933: int *a_eofflag;
934: off_t **a_cookies;
935: int *ncookies;
936: } */ *ap = v;
937: vnode_t *vp = ap->a_vp;
938: struct uio *uio = ap->a_uio;
939: int *eofflag = ap->a_eofflag;
940: off_t **cookies = ap->a_cookies;
941: int *ncookies = ap->a_ncookies;
942: off_t startoff, cnt;
943: tmpfs_node_t *node;
1.1 jmmv 944: int error;
945:
946: KASSERT(VOP_ISLOCKED(vp));
947:
948: /* This operation only makes sense on directory nodes. */
949: if (vp->v_type != VDIR) {
1.83 rmind 950: return ENOTDIR;
1.1 jmmv 951: }
952: node = VP_TO_TMPFS_DIR(vp);
953: startoff = uio->uio_offset;
1.83 rmind 954: cnt = 0;
1.106 rmind 955:
956: /*
957: * Retrieve the directory entries, unless it is being destroyed.
958: */
959: if (node->tn_links) {
960: error = tmpfs_dir_getdents(node, uio, &cnt);
961: } else {
1.95 chs 962: error = 0;
963: }
1.1 jmmv 964:
1.83 rmind 965: if (eofflag != NULL) {
1.106 rmind 966: *eofflag = !error && uio->uio_offset == TMPFS_DIRSEQ_EOF;
1.83 rmind 967: }
968: if (error || cookies == NULL || ncookies == NULL) {
969: return error;
970: }
1.1 jmmv 971:
1.83 rmind 972: /* Update NFS-related variables, if any. */
1.106 rmind 973: tmpfs_dirent_t *de = NULL;
1.83 rmind 974: off_t i, off = startoff;
975:
976: *cookies = malloc(cnt * sizeof(off_t), M_TEMP, M_WAITOK);
977: *ncookies = cnt;
978:
979: for (i = 0; i < cnt; i++) {
1.106 rmind 980: KASSERT(off != TMPFS_DIRSEQ_EOF);
981: if (off != TMPFS_DIRSEQ_DOT) {
982: if (off == TMPFS_DIRSEQ_DOTDOT) {
1.83 rmind 983: de = TAILQ_FIRST(&node->tn_spec.tn_dir.tn_dir);
984: } else if (de != NULL) {
985: de = TAILQ_NEXT(de, td_entries);
1.10 yamt 986: } else {
1.106 rmind 987: de = tmpfs_dir_lookupbyseq(node, off);
1.83 rmind 988: KASSERT(de != NULL);
989: de = TAILQ_NEXT(de, td_entries);
990: }
991: if (de == NULL) {
1.106 rmind 992: off = TMPFS_DIRSEQ_EOF;
1.83 rmind 993: } else {
1.106 rmind 994: off = tmpfs_dir_getseq(node, de);
1.10 yamt 995: }
1.83 rmind 996: } else {
1.106 rmind 997: off = TMPFS_DIRSEQ_DOTDOT;
1.10 yamt 998: }
1.83 rmind 999: (*cookies)[i] = off;
1.1 jmmv 1000: }
1.83 rmind 1001: KASSERT(uio->uio_offset == off);
1.1 jmmv 1002: return error;
1003: }
1004:
1005: int
1006: tmpfs_readlink(void *v)
1007: {
1.83 rmind 1008: struct vop_readlink_args /* {
1009: struct vnode *a_vp;
1010: struct uio *a_uio;
1011: kauth_cred_t a_cred;
1012: } */ *ap = v;
1013: vnode_t *vp = ap->a_vp;
1014: struct uio *uio = ap->a_uio;
1.104 rmind 1015: tmpfs_node_t *node = VP_TO_TMPFS_NODE(vp);
1.1 jmmv 1016: int error;
1017:
1018: KASSERT(VOP_ISLOCKED(vp));
1019: KASSERT(uio->uio_offset == 0);
1020: KASSERT(vp->v_type == VLNK);
1021:
1.105 rmind 1022: /* Note: readlink(2) returns the path without NUL terminator. */
1023: if (node->tn_size > 0) {
1024: error = uiomove(node->tn_spec.tn_lnk.tn_link,
1.121 gson 1025: MIN(node->tn_size, uio->uio_resid), uio);
1.105 rmind 1026: } else {
1027: error = 0;
1028: }
1.107 rmind 1029: tmpfs_update(vp, TMPFS_UPDATE_ATIME);
1.1 jmmv 1030:
1031: return error;
1032: }
1033:
1034: int
1035: tmpfs_inactive(void *v)
1036: {
1.131 riastrad 1037: struct vop_inactive_v2_args /* {
1.82 rmind 1038: struct vnode *a_vp;
1039: bool *a_recycle;
1040: } */ *ap = v;
1.83 rmind 1041: vnode_t *vp = ap->a_vp;
1042: tmpfs_node_t *node;
1.135 ad 1043: int error = 0;
1.1 jmmv 1044:
1045: KASSERT(VOP_ISLOCKED(vp));
1046:
1047: node = VP_TO_TMPFS_NODE(vp);
1.111 hannken 1048: if (node->tn_links == 0) {
1049: /*
1050: * Mark node as dead by setting its generation to zero.
1051: */
1052: atomic_and_32(&node->tn_gen, ~TMPFS_NODE_GEN_MASK);
1.135 ad 1053:
1054: /*
1055: * If the file has been deleted, truncate it, otherwise VFS
1056: * will quite rightly try to write back dirty data, which in
1057: * the case of tmpfs/UAO means needless page deactivations.
1058: */
1059: if (vp->v_type == VREG) {
1060: error = tmpfs_reg_resize(vp, 0);
1061: }
1.111 hannken 1062: *ap->a_recycle = true;
1063: } else {
1064: *ap->a_recycle = false;
1065: }
1.1 jmmv 1066:
1.135 ad 1067: return error;
1.1 jmmv 1068: }
1069:
1070: int
1071: tmpfs_reclaim(void *v)
1072: {
1.133 riastrad 1073: struct vop_reclaim_v2_args /* {
1.82 rmind 1074: struct vnode *a_vp;
1075: } */ *ap = v;
1.83 rmind 1076: vnode_t *vp = ap->a_vp;
1077: tmpfs_mount_t *tmp = VFS_TO_TMPFS(vp->v_mount);
1078: tmpfs_node_t *node = VP_TO_TMPFS_NODE(vp);
1.1 jmmv 1079:
1.133 riastrad 1080: /* Unlock vnode. We still have exclusive access to it. */
1081: VOP_UNLOCK(vp);
1082:
1.82 rmind 1083: /* Disassociate inode from vnode. */
1.85 rmind 1084: node->tn_vnode = NULL;
1085: vp->v_data = NULL;
1.109 rmind 1086:
1087: /* If inode is not referenced, i.e. no links, then destroy it. */
1.123 hannken 1088: if (node->tn_links == 0)
1.1 jmmv 1089: tmpfs_free_node(tmp, node);
1090: return 0;
1091: }
1092:
1093: int
1094: tmpfs_pathconf(void *v)
1095: {
1.83 rmind 1096: struct vop_pathconf_args /* {
1097: struct vnode *a_vp;
1098: int a_name;
1099: register_t *a_retval;
1100: } */ *ap = v;
1101: const int name = ap->a_name;
1102: register_t *retval = ap->a_retval;
1103: int error = 0;
1.1 jmmv 1104:
1105: switch (name) {
1106: case _PC_LINK_MAX:
1107: *retval = LINK_MAX;
1108: break;
1109: case _PC_NAME_MAX:
1.92 christos 1110: *retval = TMPFS_MAXNAMLEN;
1.1 jmmv 1111: break;
1112: case _PC_PATH_MAX:
1113: *retval = PATH_MAX;
1114: break;
1115: case _PC_PIPE_BUF:
1116: *retval = PIPE_BUF;
1117: break;
1118: case _PC_CHOWN_RESTRICTED:
1119: *retval = 1;
1120: break;
1121: case _PC_NO_TRUNC:
1122: *retval = 1;
1123: break;
1124: case _PC_SYNC_IO:
1125: *retval = 1;
1126: break;
1127: case _PC_FILESIZEBITS:
1.85 rmind 1128: *retval = sizeof(off_t) * CHAR_BIT;
1.1 jmmv 1129: break;
1130: default:
1131: error = EINVAL;
1132: }
1133: return error;
1134: }
1135:
1136: int
1.15 jmmv 1137: tmpfs_advlock(void *v)
1138: {
1.83 rmind 1139: struct vop_advlock_args /* {
1140: struct vnode *a_vp;
1141: void * a_id;
1142: int a_op;
1143: struct flock *a_fl;
1144: int a_flags;
1145: } */ *ap = v;
1146: vnode_t *vp = ap->a_vp;
1147: tmpfs_node_t *node = VP_TO_TMPFS_NODE(vp);
1.15 jmmv 1148:
1149: return lf_advlock(v, &node->tn_lockf, node->tn_size);
1150: }
1151:
1152: int
1.1 jmmv 1153: tmpfs_getpages(void *v)
1154: {
1.78 rmind 1155: struct vop_getpages_args /* {
1156: struct vnode *a_vp;
1157: voff_t a_offset;
1158: struct vm_page **a_m;
1159: int *a_count;
1160: int a_centeridx;
1161: vm_prot_t a_access_type;
1162: int a_advice;
1163: int a_flags;
1164: } */ * const ap = v;
1.83 rmind 1165: vnode_t *vp = ap->a_vp;
1.78 rmind 1166: const voff_t offset = ap->a_offset;
1167: struct vm_page **pgs = ap->a_m;
1168: const int centeridx = ap->a_centeridx;
1169: const vm_prot_t access_type = ap->a_access_type;
1170: const int advice = ap->a_advice;
1171: const int flags = ap->a_flags;
1172: int error, npages = *ap->a_count;
1.83 rmind 1173: tmpfs_node_t *node;
1.6 yamt 1174: struct uvm_object *uobj;
1.1 jmmv 1175:
1.6 yamt 1176: KASSERT(vp->v_type == VREG);
1.134 ad 1177: KASSERT(rw_lock_held(vp->v_uobj.vmobjlock));
1.1 jmmv 1178:
1.78 rmind 1179: /*
1180: * Currently, PGO_PASTEOF is not supported.
1181: */
1.9 yamt 1182: if (vp->v_size <= offset + (centeridx << PAGE_SHIFT)) {
1183: if ((flags & PGO_LOCKED) == 0)
1.134 ad 1184: rw_exit(vp->v_uobj.vmobjlock);
1.9 yamt 1185: return EINVAL;
1186: }
1187:
1188: if (vp->v_size < offset + (npages << PAGE_SHIFT)) {
1189: npages = (round_page(vp->v_size) - offset) >> PAGE_SHIFT;
1190: }
1191:
1.7 jmmv 1192: if ((flags & PGO_LOCKED) != 0)
1.6 yamt 1193: return EBUSY;
1.1 jmmv 1194:
1.134 ad 1195: mutex_enter(vp->v_interlock);
1196: error = vdead_check(vp, VDEAD_NOWAIT);
1197: mutex_exit(vp->v_interlock);
1198: if (error != 0)
1.130 hannken 1199: return ENOENT;
1200:
1201: node = VP_TO_TMPFS_NODE(vp);
1202: uobj = node->tn_spec.tn_reg.tn_aobj;
1203:
1.6 yamt 1204: if ((flags & PGO_NOTIMESTAMP) == 0) {
1.107 rmind 1205: u_int tflags = 0;
1206:
1.7 jmmv 1207: if ((vp->v_mount->mnt_flag & MNT_NOATIME) == 0)
1.107 rmind 1208: tflags |= TMPFS_UPDATE_ATIME;
1.7 jmmv 1209:
1.93 christos 1210: if ((access_type & VM_PROT_WRITE) != 0) {
1.107 rmind 1211: tflags |= TMPFS_UPDATE_MTIME;
1.93 christos 1212: if (vp->v_mount->mnt_flag & MNT_RELATIME)
1.107 rmind 1213: tflags |= TMPFS_UPDATE_ATIME;
1.93 christos 1214: }
1.107 rmind 1215: tmpfs_update(vp, tflags);
1.1 jmmv 1216: }
1217:
1.28 jmmv 1218: /*
1.78 rmind 1219: * Invoke the pager.
1.49 jmmv 1220: *
1.78 rmind 1221: * Clean the array of pages before. XXX: PR/32166
1222: * Note that vnode lock is shared with underlying UVM object.
1.28 jmmv 1223: */
1.78 rmind 1224: if (pgs) {
1225: memset(pgs, 0, sizeof(struct vm_pages *) * npages);
1226: }
1.134 ad 1227: KASSERT(vp->v_uobj.vmobjlock == uobj->vmobjlock);
1.87 rmind 1228:
1.78 rmind 1229: error = (*uobj->pgops->pgo_get)(uobj, offset, pgs, &npages, centeridx,
1.28 jmmv 1230: access_type, advice, flags | PGO_ALLPAGES);
1.78 rmind 1231:
1.28 jmmv 1232: #if defined(DEBUG)
1.78 rmind 1233: if (!error && pgs) {
1.80 matt 1234: for (int i = 0; i < npages; i++) {
1.78 rmind 1235: KASSERT(pgs[i] != NULL);
1236: }
1.28 jmmv 1237: }
1238: #endif
1.6 yamt 1239: return error;
1240: }
1241:
1242: int
1243: tmpfs_putpages(void *v)
1244: {
1.78 rmind 1245: struct vop_putpages_args /* {
1246: struct vnode *a_vp;
1247: voff_t a_offlo;
1248: voff_t a_offhi;
1249: int a_flags;
1250: } */ * const ap = v;
1.83 rmind 1251: vnode_t *vp = ap->a_vp;
1.78 rmind 1252: const voff_t offlo = ap->a_offlo;
1253: const voff_t offhi = ap->a_offhi;
1254: const int flags = ap->a_flags;
1.83 rmind 1255: tmpfs_node_t *node;
1.6 yamt 1256: struct uvm_object *uobj;
1.78 rmind 1257: int error;
1.6 yamt 1258:
1.134 ad 1259: KASSERT(rw_write_held(vp->v_uobj.vmobjlock));
1.7 jmmv 1260:
1.6 yamt 1261: if (vp->v_type != VREG) {
1.134 ad 1262: rw_exit(vp->v_uobj.vmobjlock);
1.6 yamt 1263: return 0;
1.1 jmmv 1264: }
1265:
1.87 rmind 1266: node = VP_TO_TMPFS_NODE(vp);
1.21 jmmv 1267: uobj = node->tn_spec.tn_reg.tn_aobj;
1.6 yamt 1268:
1.134 ad 1269: KASSERT(vp->v_uobj.vmobjlock == uobj->vmobjlock);
1.7 jmmv 1270: error = (*uobj->pgops->pgo_put)(uobj, offlo, offhi, flags);
1.6 yamt 1271:
1272: /* XXX mtime */
1.1 jmmv 1273:
1274: return error;
1275: }
1.76 pooka 1276:
1277: int
1278: tmpfs_whiteout(void *v)
1279: {
1.83 rmind 1280: struct vop_whiteout_args /* {
1281: struct vnode *a_dvp;
1282: struct componentname *a_cnp;
1283: int a_flags;
1284: } */ *ap = v;
1285: vnode_t *dvp = ap->a_dvp;
1286: struct componentname *cnp = ap->a_cnp;
1287: const int flags = ap->a_flags;
1288: tmpfs_mount_t *tmp = VFS_TO_TMPFS(dvp->v_mount);
1.106 rmind 1289: tmpfs_node_t *dnode = VP_TO_TMPFS_DIR(dvp);
1.83 rmind 1290: tmpfs_dirent_t *de;
1.76 pooka 1291: int error;
1292:
1293: switch (flags) {
1294: case LOOKUP:
1295: break;
1296: case CREATE:
1.85 rmind 1297: error = tmpfs_alloc_dirent(tmp, cnp->cn_nameptr,
1298: cnp->cn_namelen, &de);
1.76 pooka 1299: if (error)
1300: return error;
1.106 rmind 1301: tmpfs_dir_attach(dnode, de, TMPFS_NODE_WHITEOUT);
1.76 pooka 1302: break;
1303: case DELETE:
1304: cnp->cn_flags &= ~DOWHITEOUT; /* when in doubt, cargo cult */
1.106 rmind 1305: de = tmpfs_dir_lookup(dnode, cnp);
1.76 pooka 1306: if (de == NULL)
1307: return ENOENT;
1.106 rmind 1308: tmpfs_dir_detach(dnode, de);
1.85 rmind 1309: tmpfs_free_dirent(tmp, de);
1.76 pooka 1310: break;
1311: }
1.107 rmind 1312: tmpfs_update(dvp, TMPFS_UPDATE_MTIME | TMPFS_UPDATE_CTIME);
1.83 rmind 1313: return 0;
1314: }
1.76 pooka 1315:
1.83 rmind 1316: int
1317: tmpfs_print(void *v)
1318: {
1319: struct vop_print_args /* {
1320: struct vnode *a_vp;
1321: } */ *ap = v;
1322: vnode_t *vp = ap->a_vp;
1323: tmpfs_node_t *node = VP_TO_TMPFS_NODE(vp);
1324:
1325: printf("tag VT_TMPFS, tmpfs_node %p, flags 0x%x, links %d\n"
1.107 rmind 1326: "\tmode 0%o, owner %d, group %d, size %" PRIdMAX,
1.83 rmind 1327: node, node->tn_flags, node->tn_links, node->tn_mode, node->tn_uid,
1.107 rmind 1328: node->tn_gid, (uintmax_t)node->tn_size);
1.83 rmind 1329: if (vp->v_type == VFIFO) {
1330: VOCALL(fifo_vnodeop_p, VOFFSET(vop_print), v);
1331: }
1332: printf("\n");
1.76 pooka 1333: return 0;
1334: }
CVSweb <webmaster@jp.NetBSD.org>