Annotation of src/sys/nfs/nfs_node.c, Revision 1.115.2.1
1.115.2.1! cherry 1: /* $NetBSD: nfs_node.c,v 1.116 2011/06/12 03:35:59 rmind Exp $ */
1.12 cgd 2:
1.1 cgd 3: /*
1.9 mycroft 4: * Copyright (c) 1989, 1993
5: * The Regents of the University of California. All rights reserved.
1.1 cgd 6: *
7: * This code is derived from software contributed to Berkeley by
8: * Rick Macklem at The University of Guelph.
9: *
10: * Redistribution and use in source and binary forms, with or without
11: * modification, are permitted provided that the following conditions
12: * are met:
13: * 1. Redistributions of source code must retain the above copyright
14: * notice, this list of conditions and the following disclaimer.
15: * 2. Redistributions in binary form must reproduce the above copyright
16: * notice, this list of conditions and the following disclaimer in the
17: * documentation and/or other materials provided with the distribution.
1.70 agc 18: * 3. Neither the name of the University nor the names of its contributors
1.1 cgd 19: * may be used to endorse or promote products derived from this software
20: * without specific prior written permission.
21: *
22: * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23: * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24: * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25: * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26: * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27: * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28: * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29: * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30: * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31: * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32: * SUCH DAMAGE.
33: *
1.16 fvdl 34: * @(#)nfs_node.c 8.6 (Berkeley) 5/22/95
1.1 cgd 35: */
1.47 lukem 36:
37: #include <sys/cdefs.h>
1.115.2.1! cherry 38: __KERNEL_RCSID(0, "$NetBSD: nfs_node.c,v 1.116 2011/06/12 03:35:59 rmind Exp $");
1.1 cgd 39:
1.107 ad 40: #ifdef _KERNEL_OPT
1.35 bjh21 41: #include "opt_nfs.h"
1.107 ad 42: #endif
1.16 fvdl 43:
1.4 mycroft 44: #include <sys/param.h>
45: #include <sys/systm.h>
46: #include <sys/proc.h>
47: #include <sys/mount.h>
48: #include <sys/namei.h>
49: #include <sys/vnode.h>
50: #include <sys/kernel.h>
1.28 thorpej 51: #include <sys/pool.h>
1.22 fvdl 52: #include <sys/lock.h>
1.48 lukem 53: #include <sys/hash.h>
1.84 elad 54: #include <sys/kauth.h>
1.1 cgd 55:
1.9 mycroft 56: #include <nfs/rpcv2.h>
1.16 fvdl 57: #include <nfs/nfsproto.h>
1.4 mycroft 58: #include <nfs/nfs.h>
59: #include <nfs/nfsnode.h>
60: #include <nfs/nfsmount.h>
1.15 christos 61: #include <nfs/nfs_var.h>
1.1 cgd 62:
1.104 pooka 63: struct pool nfs_node_pool;
64: struct pool nfs_vattr_pool;
1.108 ad 65: static struct workqueue *nfs_sillyworkq;
1.28 thorpej 66:
1.41 tsutsui 67: extern int prtactive;
1.1 cgd 68:
1.108 ad 69: static void nfs_gop_size(struct vnode *, off_t, off_t *, int);
70: static int nfs_gop_alloc(struct vnode *, off_t, off_t, int, kauth_cred_t);
71: static int nfs_gop_write(struct vnode *, struct vm_page **, int, int);
72: static void nfs_sillyworker(struct work *, void *);
1.46 chs 73:
1.80 yamt 74: static const struct genfs_ops nfs_genfsops = {
75: .gop_size = nfs_gop_size,
76: .gop_alloc = nfs_gop_alloc,
77: .gop_write = nfs_gop_write,
1.46 chs 78: };
79:
1.1 cgd 80: /*
1.105 matt 81: * Reinitialize inode hash table.
1.1 cgd 82: */
1.15 christos 83: void
1.110 cegger 84: nfs_node_init(void)
1.1 cgd 85: {
1.108 ad 86:
1.104 pooka 87: pool_init(&nfs_node_pool, sizeof(struct nfsnode), 0, 0, 0, "nfsnodepl",
88: &pool_allocator_nointr, IPL_NONE);
89: pool_init(&nfs_vattr_pool, sizeof(struct vattr), 0, 0, 0, "nfsvapl",
90: &pool_allocator_nointr, IPL_NONE);
1.108 ad 91: if (workqueue_create(&nfs_sillyworkq, "nfssilly", nfs_sillyworker,
92: NULL, PRI_NONE, IPL_NONE, 0) != 0) {
93: panic("nfs_node_init");
94: }
1.31 jdolecek 95: }
96:
97: /*
1.105 matt 98: * Free resources previously allocated in nfs_node_reinit().
1.45 chs 99: */
1.105 matt 100: void
1.110 cegger 101: nfs_node_done(void)
1.105 matt 102: {
1.108 ad 103:
1.105 matt 104: pool_destroy(&nfs_node_pool);
105: pool_destroy(&nfs_vattr_pool);
1.108 ad 106: workqueue_destroy(nfs_sillyworkq);
1.105 matt 107: }
108:
109: struct fh_match {
110: nfsfh_t *fhm_fhp;
111: size_t fhm_fhsize;
112: size_t fhm_fhoffset;
113: };
114:
115: static int
1.114 rmind 116: nfs_compare_nodes(void *ctx, const void *parent, const void *node)
1.105 matt 117: {
1.114 rmind 118: const struct nfsnode * const pnp = parent;
119: const struct nfsnode * const np = node;
1.105 matt 120:
121: if (pnp->n_fhsize != np->n_fhsize)
122: return np->n_fhsize - pnp->n_fhsize;
1.45 chs 123:
1.105 matt 124: return memcmp(np->n_fhp, pnp->n_fhp, np->n_fhsize);
125: }
126:
127: static int
1.114 rmind 128: nfs_compare_node_fh(void *ctx, const void *b, const void *key)
1.45 chs 129: {
1.114 rmind 130: const struct nfsnode * const pnp = b;
1.105 matt 131: const struct fh_match * const fhm = key;
1.45 chs 132:
1.105 matt 133: if (pnp->n_fhsize != fhm->fhm_fhsize)
134: return fhm->fhm_fhsize - pnp->n_fhsize;
1.79 perry 135:
1.105 matt 136: return memcmp(fhm->fhm_fhp, pnp->n_fhp, pnp->n_fhsize);
1.45 chs 137: }
138:
1.114 rmind 139: static const rb_tree_ops_t nfs_node_rbtree_ops = {
1.105 matt 140: .rbto_compare_nodes = nfs_compare_nodes,
141: .rbto_compare_key = nfs_compare_node_fh,
1.114 rmind 142: .rbto_node_offset = offsetof(struct nfsnode, n_rbnode),
143: .rbto_context = NULL
1.105 matt 144: };
145:
1.31 jdolecek 146: void
1.105 matt 147: nfs_rbtinit(struct nfsmount *nmp)
1.31 jdolecek 148: {
1.114 rmind 149:
1.105 matt 150: rb_tree_init(&nmp->nm_rbtree, &nfs_node_rbtree_ops);
151: }
1.104 pooka 152:
1.1 cgd 153: /*
154: * Look up a vnode/nfsnode by file handle.
155: * Callers must check for mount points!!
156: * In all cases, a pointer to a
157: * nfsnode structure is returned.
158: */
1.15 christos 159: int
1.114 rmind 160: nfs_nget1(struct mount *mntp, nfsfh_t *fhp, int fhsize, struct nfsnode **npp,
161: int lkflags)
1.1 cgd 162: {
1.105 matt 163: struct nfsnode *np;
1.33 augustss 164: struct vnode *vp;
1.105 matt 165: struct nfsmount *nmp = VFSTONFS(mntp);
1.1 cgd 166: int error;
1.105 matt 167: struct fh_match fhm;
168:
169: fhm.fhm_fhp = fhp;
170: fhm.fhm_fhsize = fhsize;
1.1 cgd 171:
172: loop:
1.105 matt 173: rw_enter(&nmp->nm_rbtlock, RW_READER);
1.114 rmind 174: np = rb_tree_find_node(&nmp->nm_rbtree, &fhm);
175: if (np != NULL) {
1.1 cgd 176: vp = NFSTOV(np);
1.115.2.1! cherry 177: mutex_enter(vp->v_interlock);
1.105 matt 178: rw_exit(&nmp->nm_rbtlock);
1.113 hannken 179: error = vget(vp, LK_EXCLUSIVE | lkflags);
1.75 yamt 180: if (error == EBUSY)
181: return error;
182: if (error)
1.1 cgd 183: goto loop;
184: *npp = np;
185: return(0);
186: }
1.105 matt 187: rw_exit(&nmp->nm_rbtlock);
1.100 ad 188:
1.115.2.1! cherry 189: error = getnewvnode(VT_NFS, mntp, nfsv2_vnodeop_p, NULL, &vp);
1.15 christos 190: if (error) {
1.1 cgd 191: *npp = 0;
192: return (error);
193: }
1.28 thorpej 194: np = pool_get(&nfs_node_pool, PR_WAITOK);
1.38 chs 195: memset(np, 0, sizeof *np);
1.1 cgd 196: np->n_vnode = vp;
1.38 chs 197:
1.1 cgd 198: /*
199: * Insert the nfsnode in the hash queue for its new file handle
200: */
1.46 chs 201:
1.16 fvdl 202: if (fhsize > NFS_SMALLFH) {
1.98 yamt 203: np->n_fhp = kmem_alloc(fhsize, KM_SLEEP);
1.16 fvdl 204: } else
205: np->n_fhp = &np->n_fh;
1.38 chs 206: memcpy(np->n_fhp, fhp, fhsize);
1.16 fvdl 207: np->n_fhsize = fhsize;
1.30 fvdl 208: np->n_accstamp = -1;
1.28 thorpej 209: np->n_vattr = pool_get(&nfs_vattr_pool, PR_WAITOK);
1.71 fvdl 210:
1.105 matt 211: rw_enter(&nmp->nm_rbtlock, RW_WRITER);
212: if (NULL != rb_tree_find_node(&nmp->nm_rbtree, &fhm)) {
213: rw_exit(&nmp->nm_rbtlock);
1.100 ad 214: if (fhsize > NFS_SMALLFH) {
215: kmem_free(np->n_fhp, fhsize);
216: }
217: pool_put(&nfs_vattr_pool, np->n_vattr);
218: pool_put(&nfs_node_pool, np);
219: ungetnewvnode(vp);
220: goto loop;
221: }
222: vp->v_data = np;
223: genfs_node_init(vp, &nfs_genfsops);
1.71 fvdl 224: /*
225: * Initalize read/write creds to useful values. VOP_OPEN will
226: * overwrite these.
227: */
1.85 ad 228: np->n_rcred = curlwp->l_cred;
1.84 elad 229: kauth_cred_hold(np->n_rcred);
1.85 ad 230: np->n_wcred = curlwp->l_cred;
1.84 elad 231: kauth_cred_hold(np->n_wcred);
1.112 hannken 232: VOP_LOCK(vp, LK_EXCLUSIVE);
1.74 yamt 233: NFS_INVALIDATE_ATTRCACHE(np);
234: uvm_vnp_setsize(vp, 0);
1.114 rmind 235: (void)rb_tree_insert_node(&nmp->nm_rbtree, np);
1.105 matt 236: rw_exit(&nmp->nm_rbtlock);
1.100 ad 237:
1.1 cgd 238: *npp = np;
239: return (0);
240: }
241:
1.15 christos 242: int
1.109 dsl 243: nfs_inactive(void *v)
1.15 christos 244: {
1.9 mycroft 245: struct vop_inactive_args /* {
246: struct vnode *a_vp;
1.97 ad 247: bool *a_recycle;
1.15 christos 248: } */ *ap = v;
1.33 augustss 249: struct nfsnode *np;
250: struct sillyrename *sp;
1.40 fvdl 251: struct vnode *vp = ap->a_vp;
1.1 cgd 252:
1.40 fvdl 253: np = VTONFS(vp);
254: if (vp->v_type != VDIR) {
1.16 fvdl 255: sp = np->n_sillyrename;
1.18 fvdl 256: np->n_sillyrename = (struct sillyrename *)0;
257: } else
1.44 fvdl 258: sp = NULL;
259: if (sp != NULL)
1.97 ad 260: nfs_vinvalbuf(vp, 0, sp->s_cred, curlwp, 1);
261: *ap->a_recycle = (np->n_flag & NREMOVED) != 0;
1.94 yamt 262: np->n_flag &=
263: (NMODIFIED | NFLUSHINPROG | NFLUSHWANT | NEOFVALID | NTRUNCDELAYED);
1.76 yamt 264:
265: if (vp->v_type == VDIR && np->n_dircache)
1.78 yamt 266: nfs_invaldircache(vp,
267: NFS_INVALDIRCACHE_FORCE | NFS_INVALDIRCACHE_KEEPEOF);
1.76 yamt 268:
1.111 hannken 269: VOP_UNLOCK(vp);
1.76 yamt 270:
1.44 fvdl 271: if (sp != NULL) {
1.108 ad 272: workqueue_enqueue(nfs_sillyworkq, &sp->s_work, NULL);
1.1 cgd 273: }
1.59 fvdl 274:
1.1 cgd 275: return (0);
276: }
277:
278: /*
279: * Reclaim an nfsnode so that it can be used for other purposes.
280: */
1.15 christos 281: int
1.109 dsl 282: nfs_reclaim(void *v)
1.15 christos 283: {
1.9 mycroft 284: struct vop_reclaim_args /* {
285: struct vnode *a_vp;
1.15 christos 286: } */ *ap = v;
1.33 augustss 287: struct vnode *vp = ap->a_vp;
288: struct nfsnode *np = VTONFS(vp);
1.105 matt 289: struct nfsmount *nmp = VFSTONFS(vp->v_mount);
1.1 cgd 290:
1.99 ad 291: if (prtactive && vp->v_usecount > 1)
1.1 cgd 292: vprint("nfs_reclaim: pushing active", vp);
1.60 drochner 293:
1.105 matt 294: rw_enter(&nmp->nm_rbtlock, RW_WRITER);
1.114 rmind 295: rb_tree_remove_node(&nmp->nm_rbtree, np);
1.105 matt 296: rw_exit(&nmp->nm_rbtlock);
1.16 fvdl 297:
298: /*
299: * Free up any directory cookie structures and
300: * large file handle structures that might be associated with
301: * this nfs node.
302: */
1.103 tron 303: if (vp->v_type == VDIR && np->n_dircache != NULL) {
304: nfs_invaldircache(vp, NFS_INVALDIRCACHE_FORCE);
1.102 ad 305: hashdone(np->n_dircache, HASH_LIST, nfsdirhashmask);
1.103 tron 306: }
1.65 yamt 307: KASSERT(np->n_dirgens == NULL);
1.59 fvdl 308:
309: if (np->n_fhsize > NFS_SMALLFH)
1.98 yamt 310: kmem_free(np->n_fhp, np->n_fhsize);
1.16 fvdl 311:
1.28 thorpej 312: pool_put(&nfs_vattr_pool, np->n_vattr);
1.59 fvdl 313: if (np->n_rcred)
1.84 elad 314: kauth_cred_free(np->n_rcred);
1.59 fvdl 315:
316: if (np->n_wcred)
1.84 elad 317: kauth_cred_free(np->n_wcred);
1.59 fvdl 318:
1.90 yamt 319: if (vp->v_type == VREG) {
320: mutex_destroy(&np->n_commitlock);
321: }
1.91 ad 322: genfs_node_destroy(vp);
1.90 yamt 323: pool_put(&nfs_node_pool, np);
1.38 chs 324: vp->v_data = NULL;
1.1 cgd 325: return (0);
1.46 chs 326: }
327:
328: void
1.87 yamt 329: nfs_gop_size(struct vnode *vp, off_t size, off_t *eobp, int flags)
1.46 chs 330: {
1.83 yamt 331:
1.46 chs 332: *eobp = MAX(size, vp->v_size);
333: }
334:
335: int
1.87 yamt 336: nfs_gop_alloc(struct vnode *vp, off_t off, off_t len, int flags,
337: kauth_cred_t cred)
1.46 chs 338: {
1.87 yamt 339:
1.46 chs 340: return 0;
1.53 chs 341: }
342:
343: int
344: nfs_gop_write(struct vnode *vp, struct vm_page **pgs, int npages, int flags)
345: {
346: int i;
347:
1.115.2.1! cherry 348: mutex_enter(vp->v_interlock);
1.53 chs 349: for (i = 0; i < npages; i++) {
350: pmap_page_protect(pgs[i], VM_PROT_READ);
351: }
1.115.2.1! cherry 352: mutex_exit(vp->v_interlock);
! 353:
1.53 chs 354: return genfs_gop_write(vp, pgs, npages, flags);
1.1 cgd 355: }
1.108 ad 356:
357: /*
358: * Remove a silly file that was rename'd earlier
359: */
360: static void
361: nfs_sillyworker(struct work *work, void *arg)
362: {
363: struct sillyrename *sp;
364: int error;
365:
366: sp = (struct sillyrename *)work;
367: error = vn_lock(sp->s_dvp, LK_EXCLUSIVE);
368: if (error || sp->s_dvp->v_data == NULL) {
369: /* XXX should recover */
370: printf("%s: vp=%p error=%d\n", __func__, sp->s_dvp, error);
371: if (error == 0) {
372: vput(sp->s_dvp);
373: } else {
374: vrele(sp->s_dvp);
375: }
376: } else {
377: nfs_removeit(sp);
378: vput(sp->s_dvp);
379: }
380: kauth_cred_free(sp->s_cred);
381: kmem_free(sp, sizeof(*sp));
382: }
CVSweb <webmaster@jp.NetBSD.org>