version 1.117, 2014/02/27 16:51:38 |
version 1.117.2.1, 2014/08/10 06:56:45 |
Line 106 nfs_node_done(void) |
|
Line 106 nfs_node_done(void) |
|
workqueue_destroy(nfs_sillyworkq); |
workqueue_destroy(nfs_sillyworkq); |
} |
} |
|
|
struct fh_match { |
|
nfsfh_t *fhm_fhp; |
|
size_t fhm_fhsize; |
|
size_t fhm_fhoffset; |
|
}; |
|
|
|
static int |
|
nfs_compare_nodes(void *ctx, const void *parent, const void *node) |
|
{ |
|
const struct nfsnode * const pnp = parent; |
|
const struct nfsnode * const np = node; |
|
|
|
if (pnp->n_fhsize != np->n_fhsize) |
|
return np->n_fhsize - pnp->n_fhsize; |
|
|
|
return memcmp(np->n_fhp, pnp->n_fhp, np->n_fhsize); |
|
} |
|
|
|
static int |
|
nfs_compare_node_fh(void *ctx, const void *b, const void *key) |
|
{ |
|
const struct nfsnode * const pnp = b; |
|
const struct fh_match * const fhm = key; |
|
|
|
if (pnp->n_fhsize != fhm->fhm_fhsize) |
|
return fhm->fhm_fhsize - pnp->n_fhsize; |
|
|
|
return memcmp(fhm->fhm_fhp, pnp->n_fhp, pnp->n_fhsize); |
|
} |
|
|
|
static const rb_tree_ops_t nfs_node_rbtree_ops = { |
|
.rbto_compare_nodes = nfs_compare_nodes, |
|
.rbto_compare_key = nfs_compare_node_fh, |
|
.rbto_node_offset = offsetof(struct nfsnode, n_rbnode), |
|
.rbto_context = NULL |
|
}; |
|
|
|
void |
|
nfs_rbtinit(struct nfsmount *nmp) |
|
{ |
|
|
|
rb_tree_init(&nmp->nm_rbtree, &nfs_node_rbtree_ops); |
|
} |
|
|
|
/* |
/* |
* Look up a vnode/nfsnode by file handle. |
* Initialize this vnode / nfs node pair. |
* Callers must check for mount points!! |
* Caller assures no other thread will try to load this node. |
* In all cases, a pointer to a |
|
* nfsnode structure is returned. |
|
*/ |
*/ |
int |
int |
nfs_nget1(struct mount *mntp, nfsfh_t *fhp, int fhsize, struct nfsnode **npp, |
nfs_loadvnode(struct mount *mp, struct vnode *vp, |
int lkflags) |
const void *key, size_t key_len, const void **new_key) |
{ |
{ |
|
int fhsize = key_len; |
|
const nfsfh_t *fhp = key; |
struct nfsnode *np; |
struct nfsnode *np; |
struct vnode *vp; |
|
struct nfsmount *nmp = VFSTONFS(mntp); |
|
int error; |
|
struct fh_match fhm; |
|
|
|
fhm.fhm_fhp = fhp; |
/* Aloocate and initialize the nfsnode. */ |
fhm.fhm_fhsize = fhsize; |
|
|
|
loop: |
|
rw_enter(&nmp->nm_rbtlock, RW_READER); |
|
np = rb_tree_find_node(&nmp->nm_rbtree, &fhm); |
|
if (np != NULL) { |
|
vp = NFSTOV(np); |
|
mutex_enter(vp->v_interlock); |
|
rw_exit(&nmp->nm_rbtlock); |
|
error = vget(vp, LK_EXCLUSIVE | lkflags); |
|
if (error == EBUSY) |
|
return error; |
|
if (error) |
|
goto loop; |
|
*npp = np; |
|
return(0); |
|
} |
|
rw_exit(&nmp->nm_rbtlock); |
|
|
|
error = getnewvnode(VT_NFS, mntp, nfsv2_vnodeop_p, NULL, &vp); |
|
if (error) { |
|
*npp = 0; |
|
return (error); |
|
} |
|
np = pool_get(&nfs_node_pool, PR_WAITOK); |
np = pool_get(&nfs_node_pool, PR_WAITOK); |
memset(np, 0, sizeof *np); |
memset(np, 0, sizeof *np); |
np->n_vnode = vp; |
|
|
|
/* |
|
* Insert the nfsnode in the hash queue for its new file handle |
|
*/ |
|
|
|
if (fhsize > NFS_SMALLFH) { |
if (fhsize > NFS_SMALLFH) { |
np->n_fhp = kmem_alloc(fhsize, KM_SLEEP); |
np->n_fhp = kmem_alloc(fhsize, KM_SLEEP); |
} else |
} else |
np->n_fhp = &np->n_fh; |
np->n_fhp = &np->n_fh; |
|
vp->v_tag = VT_NFS; |
|
vp->v_type = VNON; |
|
vp->v_op = nfsv2_vnodeop_p; |
|
vp->v_data = np; |
memcpy(np->n_fhp, fhp, fhsize); |
memcpy(np->n_fhp, fhp, fhsize); |
np->n_fhsize = fhsize; |
np->n_fhsize = fhsize; |
np->n_accstamp = -1; |
np->n_accstamp = -1; |
np->n_vattr = pool_get(&nfs_vattr_pool, PR_WAITOK); |
np->n_vattr = pool_get(&nfs_vattr_pool, PR_WAITOK); |
|
np->n_vnode = vp; |
|
|
rw_enter(&nmp->nm_rbtlock, RW_WRITER); |
/* Initialize genfs node. */ |
if (NULL != rb_tree_find_node(&nmp->nm_rbtree, &fhm)) { |
|
rw_exit(&nmp->nm_rbtlock); |
|
if (fhsize > NFS_SMALLFH) { |
|
kmem_free(np->n_fhp, fhsize); |
|
} |
|
pool_put(&nfs_vattr_pool, np->n_vattr); |
|
pool_put(&nfs_node_pool, np); |
|
ungetnewvnode(vp); |
|
goto loop; |
|
} |
|
vp->v_data = np; |
|
genfs_node_init(vp, &nfs_genfsops); |
genfs_node_init(vp, &nfs_genfsops); |
/* |
/* |
* Initalize read/write creds to useful values. VOP_OPEN will |
* Initalize read/write creds to useful values. VOP_OPEN will |
|
|
kauth_cred_hold(np->n_rcred); |
kauth_cred_hold(np->n_rcred); |
np->n_wcred = curlwp->l_cred; |
np->n_wcred = curlwp->l_cred; |
kauth_cred_hold(np->n_wcred); |
kauth_cred_hold(np->n_wcred); |
error = VOP_LOCK(vp, LK_EXCLUSIVE); |
|
KASSERT(error == 0); |
|
NFS_INVALIDATE_ATTRCACHE(np); |
NFS_INVALIDATE_ATTRCACHE(np); |
uvm_vnp_setsize(vp, 0); |
uvm_vnp_setsize(vp, 0); |
(void)rb_tree_insert_node(&nmp->nm_rbtree, np); |
*new_key = np->n_fhp; |
rw_exit(&nmp->nm_rbtlock); |
return 0; |
|
} |
|
|
*npp = np; |
/* |
return (0); |
* Look up a vnode/nfsnode by file handle. |
|
* Callers must check for mount points!! |
|
* In all cases, a pointer to a |
|
* nfsnode structure is returned. |
|
*/ |
|
int |
|
nfs_nget1(struct mount *mntp, nfsfh_t *fhp, int fhsize, struct nfsnode **npp, |
|
int lkflags) |
|
{ |
|
int error; |
|
struct vnode *vp; |
|
|
|
error = vcache_get(mntp, fhp, fhsize, &vp); |
|
if (error) |
|
return error; |
|
error = vn_lock(vp, LK_EXCLUSIVE | lkflags); |
|
if (error) { |
|
vrele(vp); |
|
return error; |
|
} |
|
*npp = VTONFS(vp); |
|
return 0; |
} |
} |
|
|
int |
int |
Line 287 nfs_reclaim(void *v) |
|
Line 223 nfs_reclaim(void *v) |
|
} */ *ap = v; |
} */ *ap = v; |
struct vnode *vp = ap->a_vp; |
struct vnode *vp = ap->a_vp; |
struct nfsnode *np = VTONFS(vp); |
struct nfsnode *np = VTONFS(vp); |
struct nfsmount *nmp = VFSTONFS(vp->v_mount); |
|
|
|
if (prtactive && vp->v_usecount > 1) |
if (prtactive && vp->v_usecount > 1) |
vprint("nfs_reclaim: pushing active", vp); |
vprint("nfs_reclaim: pushing active", vp); |
|
|
rw_enter(&nmp->nm_rbtlock, RW_WRITER); |
vcache_remove(vp->v_mount, np->n_fhp, np->n_fhsize); |
rb_tree_remove_node(&nmp->nm_rbtree, np); |
|
rw_exit(&nmp->nm_rbtlock); |
|
|
|
/* |
/* |
* Free up any directory cookie structures and |
* Free up any directory cookie structures and |