[BACK]Return to vnode_if.c CVS log [TXT][DIR] Up to [cvs.NetBSD.org] / src / sys / kern

File: [cvs.NetBSD.org] / src / sys / kern / vnode_if.c (download)

Revision 1.75, Wed Jan 2 11:51:17 2008 UTC (16 years, 3 months ago) by ad
Branch: MAIN
CVS Tags: matt-armv6-base, bouyer-xeni386-nbase, bouyer-xeni386-base
Changes since 1.74: +5 -3 lines

Regen.

/*	$NetBSD: vnode_if.c,v 1.75 2008/01/02 11:51:17 ad Exp $	*/

/*
 * Warning: DO NOT EDIT! This file is automatically generated!
 * (Modifications made here may easily be lost!)
 *
 * Created from the file:
 *	NetBSD: vnode_if.src,v 1.56 2008/01/02 11:48:58 ad Exp
 * by the script:
 *	NetBSD: vnode_if.sh,v 1.48 2008/01/02 11:48:57 ad Exp
 */

/*
 * Copyright (c) 1992, 1993, 1994, 1995
 *	The Regents of the University of California.  All rights reserved.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 * 1. Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer.
 * 2. Redistributions in binary form must reproduce the above copyright
 *    notice, this list of conditions and the following disclaimer in the
 *    documentation and/or other materials provided with the distribution.
 * 3. Neither the name of the University nor the names of its contributors
 *    may be used to endorse or promote products derived from this software
 *    without specific prior written permission.
 *
 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
 * SUCH DAMAGE.
 */

#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: vnode_if.c,v 1.75 2008/01/02 11:51:17 ad Exp $");


#include "opt_vnode_lockdebug.h"
#include "opt_multiprocessor.h"

#include <sys/param.h>
#include <sys/mount.h>
#include <sys/buf.h>
#include <sys/vnode.h>
#include <sys/lock.h>

const struct vnodeop_desc vop_default_desc = {
	0,
	"default",
	0,
	NULL,
	VDESC_NO_OFFSET,
	VDESC_NO_OFFSET,
	VDESC_NO_OFFSET,
	NULL,
};


/* Special cases: */

const int vop_bwrite_vp_offsets[] = {
	VDESC_NO_OFFSET
};
const struct vnodeop_desc vop_bwrite_desc = {
	VOP_BWRITE_DESCOFFSET,
	"vop_bwrite",
	0,
	vop_bwrite_vp_offsets,
	VDESC_NO_OFFSET,
	VDESC_NO_OFFSET,
	VDESC_NO_OFFSET,
	NULL,
};
int
VOP_BWRITE(struct buf *bp)
{
	int error;
	bool mpsafe;
	struct vop_bwrite_args a;
#ifdef VNODE_LOCKDEBUG
#endif
	a.a_desc = VDESC(vop_bwrite);
	a.a_bp = bp;
	mpsafe = (bp->b_vp->v_vflag & VV_MPSAFE);
	if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
	error = (VCALL(bp->b_vp, VOFFSET(vop_bwrite), &a));
	if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
	return error;
}

/* End of special cases */

const int vop_lookup_vp_offsets[] = {
	VOPARG_OFFSETOF(struct vop_lookup_args,a_dvp),
	VDESC_NO_OFFSET
};
const struct vnodeop_desc vop_lookup_desc = {
	VOP_LOOKUP_DESCOFFSET,
	"vop_lookup",
	0,
	vop_lookup_vp_offsets,
	VOPARG_OFFSETOF(struct vop_lookup_args, a_vpp),
	VDESC_NO_OFFSET,
	VOPARG_OFFSETOF(struct vop_lookup_args, a_cnp),
	NULL,
};
int
VOP_LOOKUP(struct vnode *dvp,
    struct vnode **vpp,
    struct componentname *cnp)
{
	int error;
	bool mpsafe;
	struct vop_lookup_args a;
#ifdef VNODE_LOCKDEBUG
#endif
	a.a_desc = VDESC(vop_lookup);
	a.a_dvp = dvp;
	a.a_vpp = vpp;
	a.a_cnp = cnp;
	mpsafe = (dvp->v_vflag & VV_MPSAFE);
	if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
	error = (VCALL(dvp, VOFFSET(vop_lookup), &a));
	if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
#ifdef DIAGNOSTIC
	if (error == 0)
		KASSERT((*vpp)->v_size != VSIZENOTSET
		    && (*vpp)->v_writesize != VSIZENOTSET);
#endif /* DIAGNOSTIC */
	return error;
}

const int vop_create_vp_offsets[] = {
	VOPARG_OFFSETOF(struct vop_create_args,a_dvp),
	VDESC_NO_OFFSET
};
const struct vnodeop_desc vop_create_desc = {
	VOP_CREATE_DESCOFFSET,
	"vop_create",
	0 | VDESC_VP0_WILLPUT,
	vop_create_vp_offsets,
	VOPARG_OFFSETOF(struct vop_create_args, a_vpp),
	VDESC_NO_OFFSET,
	VOPARG_OFFSETOF(struct vop_create_args, a_cnp),
	NULL,
};
int
VOP_CREATE(struct vnode *dvp,
    struct vnode **vpp,
    struct componentname *cnp,
    struct vattr *vap)
{
	int error;
	bool mpsafe;
	struct vop_create_args a;
#ifdef VNODE_LOCKDEBUG
	int islocked_dvp;
#endif
	a.a_desc = VDESC(vop_create);
	a.a_dvp = dvp;
#ifdef VNODE_LOCKDEBUG
	islocked_dvp = (dvp->v_vflag & VV_LOCKSWORK) ? (VOP_ISLOCKED(dvp) == LK_EXCLUSIVE) : 1;
	if (islocked_dvp != 1)
		panic("vop_create: dvp: locked %d, expected %d", islocked_dvp, 1);
#endif
	a.a_vpp = vpp;
	a.a_cnp = cnp;
	a.a_vap = vap;
	mpsafe = (dvp->v_vflag & VV_MPSAFE);
	if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
	error = (VCALL(dvp, VOFFSET(vop_create), &a));
	if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
#ifdef DIAGNOSTIC
	if (error == 0)
		KASSERT((*vpp)->v_size != VSIZENOTSET
		    && (*vpp)->v_writesize != VSIZENOTSET);
#endif /* DIAGNOSTIC */
	return error;
}

const int vop_mknod_vp_offsets[] = {
	VOPARG_OFFSETOF(struct vop_mknod_args,a_dvp),
	VDESC_NO_OFFSET
};
const struct vnodeop_desc vop_mknod_desc = {
	VOP_MKNOD_DESCOFFSET,
	"vop_mknod",
	0 | VDESC_VP0_WILLPUT,
	vop_mknod_vp_offsets,
	VOPARG_OFFSETOF(struct vop_mknod_args, a_vpp),
	VDESC_NO_OFFSET,
	VOPARG_OFFSETOF(struct vop_mknod_args, a_cnp),
	NULL,
};
int
VOP_MKNOD(struct vnode *dvp,
    struct vnode **vpp,
    struct componentname *cnp,
    struct vattr *vap)
{
	int error;
	bool mpsafe;
	struct vop_mknod_args a;
#ifdef VNODE_LOCKDEBUG
	int islocked_dvp;
#endif
	a.a_desc = VDESC(vop_mknod);
	a.a_dvp = dvp;
#ifdef VNODE_LOCKDEBUG
	islocked_dvp = (dvp->v_vflag & VV_LOCKSWORK) ? (VOP_ISLOCKED(dvp) == LK_EXCLUSIVE) : 1;
	if (islocked_dvp != 1)
		panic("vop_mknod: dvp: locked %d, expected %d", islocked_dvp, 1);
#endif
	a.a_vpp = vpp;
	a.a_cnp = cnp;
	a.a_vap = vap;
	mpsafe = (dvp->v_vflag & VV_MPSAFE);
	if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
	error = (VCALL(dvp, VOFFSET(vop_mknod), &a));
	if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
#ifdef DIAGNOSTIC
	if (error == 0)
		KASSERT((*vpp)->v_size != VSIZENOTSET
		    && (*vpp)->v_writesize != VSIZENOTSET);
#endif /* DIAGNOSTIC */
	return error;
}

const int vop_open_vp_offsets[] = {
	VOPARG_OFFSETOF(struct vop_open_args,a_vp),
	VDESC_NO_OFFSET
};
const struct vnodeop_desc vop_open_desc = {
	VOP_OPEN_DESCOFFSET,
	"vop_open",
	0,
	vop_open_vp_offsets,
	VDESC_NO_OFFSET,
	VOPARG_OFFSETOF(struct vop_open_args, a_cred),
	VDESC_NO_OFFSET,
	NULL,
};
int
VOP_OPEN(struct vnode *vp,
    int mode,
    kauth_cred_t cred)
{
	int error;
	bool mpsafe;
	struct vop_open_args a;
#ifdef VNODE_LOCKDEBUG
	int islocked_vp;
#endif
	a.a_desc = VDESC(vop_open);
	a.a_vp = vp;
#ifdef VNODE_LOCKDEBUG
	islocked_vp = (vp->v_vflag & VV_LOCKSWORK) ? (VOP_ISLOCKED(vp) == LK_EXCLUSIVE) : 1;
	if (islocked_vp != 1)
		panic("vop_open: vp: locked %d, expected %d", islocked_vp, 1);
#endif
	a.a_mode = mode;
	a.a_cred = cred;
	mpsafe = (vp->v_vflag & VV_MPSAFE);
	if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
	error = (VCALL(vp, VOFFSET(vop_open), &a));
	if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
	return error;
}

const int vop_close_vp_offsets[] = {
	VOPARG_OFFSETOF(struct vop_close_args,a_vp),
	VDESC_NO_OFFSET
};
const struct vnodeop_desc vop_close_desc = {
	VOP_CLOSE_DESCOFFSET,
	"vop_close",
	0,
	vop_close_vp_offsets,
	VDESC_NO_OFFSET,
	VOPARG_OFFSETOF(struct vop_close_args, a_cred),
	VDESC_NO_OFFSET,
	NULL,
};
int
VOP_CLOSE(struct vnode *vp,
    int fflag,
    kauth_cred_t cred)
{
	int error;
	bool mpsafe;
	struct vop_close_args a;
#ifdef VNODE_LOCKDEBUG
	int islocked_vp;
#endif
	a.a_desc = VDESC(vop_close);
	a.a_vp = vp;
#ifdef VNODE_LOCKDEBUG
	islocked_vp = (vp->v_vflag & VV_LOCKSWORK) ? (VOP_ISLOCKED(vp) == LK_EXCLUSIVE) : 1;
	if (islocked_vp != 1)
		panic("vop_close: vp: locked %d, expected %d", islocked_vp, 1);
#endif
	a.a_fflag = fflag;
	a.a_cred = cred;
	mpsafe = (vp->v_vflag & VV_MPSAFE);
	if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
	error = (VCALL(vp, VOFFSET(vop_close), &a));
	if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
	return error;
}

const int vop_access_vp_offsets[] = {
	VOPARG_OFFSETOF(struct vop_access_args,a_vp),
	VDESC_NO_OFFSET
};
const struct vnodeop_desc vop_access_desc = {
	VOP_ACCESS_DESCOFFSET,
	"vop_access",
	0,
	vop_access_vp_offsets,
	VDESC_NO_OFFSET,
	VOPARG_OFFSETOF(struct vop_access_args, a_cred),
	VDESC_NO_OFFSET,
	NULL,
};
int
VOP_ACCESS(struct vnode *vp,
    int mode,
    kauth_cred_t cred)
{
	int error;
	bool mpsafe;
	struct vop_access_args a;
#ifdef VNODE_LOCKDEBUG
	int islocked_vp;
#endif
	a.a_desc = VDESC(vop_access);
	a.a_vp = vp;
#ifdef VNODE_LOCKDEBUG
	islocked_vp = (vp->v_vflag & VV_LOCKSWORK) ? (VOP_ISLOCKED(vp) == LK_EXCLUSIVE) : 1;
	if (islocked_vp != 1)
		panic("vop_access: vp: locked %d, expected %d", islocked_vp, 1);
#endif
	a.a_mode = mode;
	a.a_cred = cred;
	mpsafe = (vp->v_vflag & VV_MPSAFE);
	if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
	error = (VCALL(vp, VOFFSET(vop_access), &a));
	if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
	return error;
}

const int vop_getattr_vp_offsets[] = {
	VOPARG_OFFSETOF(struct vop_getattr_args,a_vp),
	VDESC_NO_OFFSET
};
const struct vnodeop_desc vop_getattr_desc = {
	VOP_GETATTR_DESCOFFSET,
	"vop_getattr",
	0,
	vop_getattr_vp_offsets,
	VDESC_NO_OFFSET,
	VOPARG_OFFSETOF(struct vop_getattr_args, a_cred),
	VDESC_NO_OFFSET,
	NULL,
};
int
VOP_GETATTR(struct vnode *vp,
    struct vattr *vap,
    kauth_cred_t cred)
{
	int error;
	bool mpsafe;
	struct vop_getattr_args a;
#ifdef VNODE_LOCKDEBUG
#endif
	a.a_desc = VDESC(vop_getattr);
	a.a_vp = vp;
	a.a_vap = vap;
	a.a_cred = cred;
	mpsafe = (vp->v_vflag & VV_MPSAFE);
	if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
	error = (VCALL(vp, VOFFSET(vop_getattr), &a));
	if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
	return error;
}

const int vop_setattr_vp_offsets[] = {
	VOPARG_OFFSETOF(struct vop_setattr_args,a_vp),
	VDESC_NO_OFFSET
};
const struct vnodeop_desc vop_setattr_desc = {
	VOP_SETATTR_DESCOFFSET,
	"vop_setattr",
	0,
	vop_setattr_vp_offsets,
	VDESC_NO_OFFSET,
	VOPARG_OFFSETOF(struct vop_setattr_args, a_cred),
	VDESC_NO_OFFSET,
	NULL,
};
int
VOP_SETATTR(struct vnode *vp,
    struct vattr *vap,
    kauth_cred_t cred)
{
	int error;
	bool mpsafe;
	struct vop_setattr_args a;
#ifdef VNODE_LOCKDEBUG
	int islocked_vp;
#endif
	a.a_desc = VDESC(vop_setattr);
	a.a_vp = vp;
#ifdef VNODE_LOCKDEBUG
	islocked_vp = (vp->v_vflag & VV_LOCKSWORK) ? (VOP_ISLOCKED(vp) == LK_EXCLUSIVE) : 1;
	if (islocked_vp != 1)
		panic("vop_setattr: vp: locked %d, expected %d", islocked_vp, 1);
#endif
	a.a_vap = vap;
	a.a_cred = cred;
	mpsafe = (vp->v_vflag & VV_MPSAFE);
	if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
	error = (VCALL(vp, VOFFSET(vop_setattr), &a));
	if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
	return error;
}

const int vop_read_vp_offsets[] = {
	VOPARG_OFFSETOF(struct vop_read_args,a_vp),
	VDESC_NO_OFFSET
};
const struct vnodeop_desc vop_read_desc = {
	VOP_READ_DESCOFFSET,
	"vop_read",
	0,
	vop_read_vp_offsets,
	VDESC_NO_OFFSET,
	VOPARG_OFFSETOF(struct vop_read_args, a_cred),
	VDESC_NO_OFFSET,
	NULL,
};
int
VOP_READ(struct vnode *vp,
    struct uio *uio,
    int ioflag,
    kauth_cred_t cred)
{
	int error;
	bool mpsafe;
	struct vop_read_args a;
#ifdef VNODE_LOCKDEBUG
	int islocked_vp;
#endif
	a.a_desc = VDESC(vop_read);
	a.a_vp = vp;
#ifdef VNODE_LOCKDEBUG
	islocked_vp = (vp->v_vflag & VV_LOCKSWORK) ? (VOP_ISLOCKED(vp) == LK_EXCLUSIVE) : 1;
	if (islocked_vp != 1)
		panic("vop_read: vp: locked %d, expected %d", islocked_vp, 1);
#endif
	a.a_uio = uio;
	a.a_ioflag = ioflag;
	a.a_cred = cred;
	mpsafe = (vp->v_vflag & VV_MPSAFE);
	if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
	error = (VCALL(vp, VOFFSET(vop_read), &a));
	if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
	return error;
}

const int vop_write_vp_offsets[] = {
	VOPARG_OFFSETOF(struct vop_write_args,a_vp),
	VDESC_NO_OFFSET
};
const struct vnodeop_desc vop_write_desc = {
	VOP_WRITE_DESCOFFSET,
	"vop_write",
	0,
	vop_write_vp_offsets,
	VDESC_NO_OFFSET,
	VOPARG_OFFSETOF(struct vop_write_args, a_cred),
	VDESC_NO_OFFSET,
	NULL,
};
int
VOP_WRITE(struct vnode *vp,
    struct uio *uio,
    int ioflag,
    kauth_cred_t cred)
{
	int error;
	bool mpsafe;
	struct vop_write_args a;
#ifdef VNODE_LOCKDEBUG
	int islocked_vp;
#endif
	a.a_desc = VDESC(vop_write);
	a.a_vp = vp;
#ifdef VNODE_LOCKDEBUG
	islocked_vp = (vp->v_vflag & VV_LOCKSWORK) ? (VOP_ISLOCKED(vp) == LK_EXCLUSIVE) : 1;
	if (islocked_vp != 1)
		panic("vop_write: vp: locked %d, expected %d", islocked_vp, 1);
#endif
	a.a_uio = uio;
	a.a_ioflag = ioflag;
	a.a_cred = cred;
	mpsafe = (vp->v_vflag & VV_MPSAFE);
	if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
	error = (VCALL(vp, VOFFSET(vop_write), &a));
	if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
	return error;
}

const int vop_ioctl_vp_offsets[] = {
	VOPARG_OFFSETOF(struct vop_ioctl_args,a_vp),
	VDESC_NO_OFFSET
};
const struct vnodeop_desc vop_ioctl_desc = {
	VOP_IOCTL_DESCOFFSET,
	"vop_ioctl",
	0,
	vop_ioctl_vp_offsets,
	VDESC_NO_OFFSET,
	VOPARG_OFFSETOF(struct vop_ioctl_args, a_cred),
	VDESC_NO_OFFSET,
	NULL,
};
int
VOP_IOCTL(struct vnode *vp,
    u_long command,
    void *data,
    int fflag,
    kauth_cred_t cred)
{
	int error;
	bool mpsafe;
	struct vop_ioctl_args a;
#ifdef VNODE_LOCKDEBUG
	int islocked_vp;
#endif
	a.a_desc = VDESC(vop_ioctl);
	a.a_vp = vp;
#ifdef VNODE_LOCKDEBUG
	islocked_vp = (vp->v_vflag & VV_LOCKSWORK) ? (VOP_ISLOCKED(vp) == LK_EXCLUSIVE) : 0;
	if (islocked_vp != 0)
		panic("vop_ioctl: vp: locked %d, expected %d", islocked_vp, 0);
#endif
	a.a_command = command;
	a.a_data = data;
	a.a_fflag = fflag;
	a.a_cred = cred;
	mpsafe = (vp->v_vflag & VV_MPSAFE);
	if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
	error = (VCALL(vp, VOFFSET(vop_ioctl), &a));
	if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
	return error;
}

const int vop_fcntl_vp_offsets[] = {
	VOPARG_OFFSETOF(struct vop_fcntl_args,a_vp),
	VDESC_NO_OFFSET
};
const struct vnodeop_desc vop_fcntl_desc = {
	VOP_FCNTL_DESCOFFSET,
	"vop_fcntl",
	0,
	vop_fcntl_vp_offsets,
	VDESC_NO_OFFSET,
	VOPARG_OFFSETOF(struct vop_fcntl_args, a_cred),
	VDESC_NO_OFFSET,
	NULL,
};
int
VOP_FCNTL(struct vnode *vp,
    u_int command,
    void *data,
    int fflag,
    kauth_cred_t cred)
{
	int error;
	bool mpsafe;
	struct vop_fcntl_args a;
#ifdef VNODE_LOCKDEBUG
	int islocked_vp;
#endif
	a.a_desc = VDESC(vop_fcntl);
	a.a_vp = vp;
#ifdef VNODE_LOCKDEBUG
	islocked_vp = (vp->v_vflag & VV_LOCKSWORK) ? (VOP_ISLOCKED(vp) == LK_EXCLUSIVE) : 0;
	if (islocked_vp != 0)
		panic("vop_fcntl: vp: locked %d, expected %d", islocked_vp, 0);
#endif
	a.a_command = command;
	a.a_data = data;
	a.a_fflag = fflag;
	a.a_cred = cred;
	mpsafe = (vp->v_vflag & VV_MPSAFE);
	if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
	error = (VCALL(vp, VOFFSET(vop_fcntl), &a));
	if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
	return error;
}

const int vop_poll_vp_offsets[] = {
	VOPARG_OFFSETOF(struct vop_poll_args,a_vp),
	VDESC_NO_OFFSET
};
const struct vnodeop_desc vop_poll_desc = {
	VOP_POLL_DESCOFFSET,
	"vop_poll",
	0,
	vop_poll_vp_offsets,
	VDESC_NO_OFFSET,
	VDESC_NO_OFFSET,
	VDESC_NO_OFFSET,
	NULL,
};
int
VOP_POLL(struct vnode *vp,
    int events)
{
	int error;
	bool mpsafe;
	struct vop_poll_args a;
#ifdef VNODE_LOCKDEBUG
	int islocked_vp;
#endif
	a.a_desc = VDESC(vop_poll);
	a.a_vp = vp;
#ifdef VNODE_LOCKDEBUG
	islocked_vp = (vp->v_vflag & VV_LOCKSWORK) ? (VOP_ISLOCKED(vp) == LK_EXCLUSIVE) : 0;
	if (islocked_vp != 0)
		panic("vop_poll: vp: locked %d, expected %d", islocked_vp, 0);
#endif
	a.a_events = events;
	mpsafe = (vp->v_vflag & VV_MPSAFE);
	if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
	error = (VCALL(vp, VOFFSET(vop_poll), &a));
	if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
	return error;
}

const int vop_kqfilter_vp_offsets[] = {
	VOPARG_OFFSETOF(struct vop_kqfilter_args,a_vp),
	VDESC_NO_OFFSET
};
const struct vnodeop_desc vop_kqfilter_desc = {
	VOP_KQFILTER_DESCOFFSET,
	"vop_kqfilter",
	0,
	vop_kqfilter_vp_offsets,
	VDESC_NO_OFFSET,
	VDESC_NO_OFFSET,
	VDESC_NO_OFFSET,
	NULL,
};
int
VOP_KQFILTER(struct vnode *vp,
    struct knote *kn)
{
	int error;
	bool mpsafe;
	struct vop_kqfilter_args a;
#ifdef VNODE_LOCKDEBUG
	int islocked_vp;
#endif
	a.a_desc = VDESC(vop_kqfilter);
	a.a_vp = vp;
#ifdef VNODE_LOCKDEBUG
	islocked_vp = (vp->v_vflag & VV_LOCKSWORK) ? (VOP_ISLOCKED(vp) == LK_EXCLUSIVE) : 0;
	if (islocked_vp != 0)
		panic("vop_kqfilter: vp: locked %d, expected %d", islocked_vp, 0);
#endif
	a.a_kn = kn;
	mpsafe = (vp->v_vflag & VV_MPSAFE);
	if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
	error = (VCALL(vp, VOFFSET(vop_kqfilter), &a));
	if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
	return error;
}

const int vop_revoke_vp_offsets[] = {
	VOPARG_OFFSETOF(struct vop_revoke_args,a_vp),
	VDESC_NO_OFFSET
};
const struct vnodeop_desc vop_revoke_desc = {
	VOP_REVOKE_DESCOFFSET,
	"vop_revoke",
	0,
	vop_revoke_vp_offsets,
	VDESC_NO_OFFSET,
	VDESC_NO_OFFSET,
	VDESC_NO_OFFSET,
	NULL,
};
int
VOP_REVOKE(struct vnode *vp,
    int flags)
{
	int error;
	bool mpsafe;
	struct vop_revoke_args a;
#ifdef VNODE_LOCKDEBUG
	int islocked_vp;
#endif
	a.a_desc = VDESC(vop_revoke);
	a.a_vp = vp;
#ifdef VNODE_LOCKDEBUG
	islocked_vp = (vp->v_vflag & VV_LOCKSWORK) ? (VOP_ISLOCKED(vp) == LK_EXCLUSIVE) : 0;
	if (islocked_vp != 0)
		panic("vop_revoke: vp: locked %d, expected %d", islocked_vp, 0);
#endif
	a.a_flags = flags;
	mpsafe = (vp->v_vflag & VV_MPSAFE);
	if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
	error = (VCALL(vp, VOFFSET(vop_revoke), &a));
	if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
	return error;
}

const int vop_mmap_vp_offsets[] = {
	VOPARG_OFFSETOF(struct vop_mmap_args,a_vp),
	VDESC_NO_OFFSET
};
const struct vnodeop_desc vop_mmap_desc = {
	VOP_MMAP_DESCOFFSET,
	"vop_mmap",
	0,
	vop_mmap_vp_offsets,
	VDESC_NO_OFFSET,
	VOPARG_OFFSETOF(struct vop_mmap_args, a_cred),
	VDESC_NO_OFFSET,
	NULL,
};
int
VOP_MMAP(struct vnode *vp,
    vm_prot_t prot,
    kauth_cred_t cred)
{
	int error;
	bool mpsafe;
	struct vop_mmap_args a;
#ifdef VNODE_LOCKDEBUG
#endif
	a.a_desc = VDESC(vop_mmap);
	a.a_vp = vp;
	a.a_prot = prot;
	a.a_cred = cred;
	mpsafe = (vp->v_vflag & VV_MPSAFE);
	if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
	error = (VCALL(vp, VOFFSET(vop_mmap), &a));
	if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
	return error;
}

const int vop_fsync_vp_offsets[] = {
	VOPARG_OFFSETOF(struct vop_fsync_args,a_vp),
	VDESC_NO_OFFSET
};
const struct vnodeop_desc vop_fsync_desc = {
	VOP_FSYNC_DESCOFFSET,
	"vop_fsync",
	0,
	vop_fsync_vp_offsets,
	VDESC_NO_OFFSET,
	VOPARG_OFFSETOF(struct vop_fsync_args, a_cred),
	VDESC_NO_OFFSET,
	NULL,
};
int
VOP_FSYNC(struct vnode *vp,
    kauth_cred_t cred,
    int flags,
    off_t offlo,
    off_t offhi)
{
	int error;
	bool mpsafe;
	struct vop_fsync_args a;
#ifdef VNODE_LOCKDEBUG
	int islocked_vp;
#endif
	a.a_desc = VDESC(vop_fsync);
	a.a_vp = vp;
#ifdef VNODE_LOCKDEBUG
	islocked_vp = (vp->v_vflag & VV_LOCKSWORK) ? (VOP_ISLOCKED(vp) == LK_EXCLUSIVE) : 1;
	if (islocked_vp != 1)
		panic("vop_fsync: vp: locked %d, expected %d", islocked_vp, 1);
#endif
	a.a_cred = cred;
	a.a_flags = flags;
	a.a_offlo = offlo;
	a.a_offhi = offhi;
	mpsafe = (vp->v_vflag & VV_MPSAFE);
	if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
	error = (VCALL(vp, VOFFSET(vop_fsync), &a));
	if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
	return error;
}

const int vop_seek_vp_offsets[] = {
	VOPARG_OFFSETOF(struct vop_seek_args,a_vp),
	VDESC_NO_OFFSET
};
const struct vnodeop_desc vop_seek_desc = {
	VOP_SEEK_DESCOFFSET,
	"vop_seek",
	0,
	vop_seek_vp_offsets,
	VDESC_NO_OFFSET,
	VOPARG_OFFSETOF(struct vop_seek_args, a_cred),
	VDESC_NO_OFFSET,
	NULL,
};
int
VOP_SEEK(struct vnode *vp,
    off_t oldoff,
    off_t newoff,
    kauth_cred_t cred)
{
	int error;
	bool mpsafe;
	struct vop_seek_args a;
#ifdef VNODE_LOCKDEBUG
#endif
	a.a_desc = VDESC(vop_seek);
	a.a_vp = vp;
	a.a_oldoff = oldoff;
	a.a_newoff = newoff;
	a.a_cred = cred;
	mpsafe = (vp->v_vflag & VV_MPSAFE);
	if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
	error = (VCALL(vp, VOFFSET(vop_seek), &a));
	if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
	return error;
}

const int vop_remove_vp_offsets[] = {
	VOPARG_OFFSETOF(struct vop_remove_args,a_dvp),
	VOPARG_OFFSETOF(struct vop_remove_args,a_vp),
	VDESC_NO_OFFSET
};
const struct vnodeop_desc vop_remove_desc = {
	VOP_REMOVE_DESCOFFSET,
	"vop_remove",
	0 | VDESC_VP0_WILLPUT | VDESC_VP1_WILLPUT,
	vop_remove_vp_offsets,
	VDESC_NO_OFFSET,
	VDESC_NO_OFFSET,
	VOPARG_OFFSETOF(struct vop_remove_args, a_cnp),
	NULL,
};
int
VOP_REMOVE(struct vnode *dvp,
    struct vnode *vp,
    struct componentname *cnp)
{
	int error;
	bool mpsafe;
	struct vop_remove_args a;
#ifdef VNODE_LOCKDEBUG
	int islocked_dvp;
	int islocked_vp;
#endif
	a.a_desc = VDESC(vop_remove);
	a.a_dvp = dvp;
#ifdef VNODE_LOCKDEBUG
	islocked_dvp = (dvp->v_vflag & VV_LOCKSWORK) ? (VOP_ISLOCKED(dvp) == LK_EXCLUSIVE) : 1;
	if (islocked_dvp != 1)
		panic("vop_remove: dvp: locked %d, expected %d", islocked_dvp, 1);
#endif
	a.a_vp = vp;
#ifdef VNODE_LOCKDEBUG
	islocked_vp = (vp->v_vflag & VV_LOCKSWORK) ? (VOP_ISLOCKED(vp) == LK_EXCLUSIVE) : 1;
	if (islocked_vp != 1)
		panic("vop_remove: vp: locked %d, expected %d", islocked_vp, 1);
#endif
	a.a_cnp = cnp;
	mpsafe = (dvp->v_vflag & VV_MPSAFE);
	if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
	error = (VCALL(dvp, VOFFSET(vop_remove), &a));
	if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
	return error;
}

const int vop_link_vp_offsets[] = {
	VOPARG_OFFSETOF(struct vop_link_args,a_dvp),
	VOPARG_OFFSETOF(struct vop_link_args,a_vp),
	VDESC_NO_OFFSET
};
const struct vnodeop_desc vop_link_desc = {
	VOP_LINK_DESCOFFSET,
	"vop_link",
	0 | VDESC_VP0_WILLPUT,
	vop_link_vp_offsets,
	VDESC_NO_OFFSET,
	VDESC_NO_OFFSET,
	VOPARG_OFFSETOF(struct vop_link_args, a_cnp),
	NULL,
};
int
VOP_LINK(struct vnode *dvp,
    struct vnode *vp,
    struct componentname *cnp)
{
	int error;
	bool mpsafe;
	struct vop_link_args a;
#ifdef VNODE_LOCKDEBUG
	int islocked_dvp;
	int islocked_vp;
#endif
	a.a_desc = VDESC(vop_link);
	a.a_dvp = dvp;
#ifdef VNODE_LOCKDEBUG
	islocked_dvp = (dvp->v_vflag & VV_LOCKSWORK) ? (VOP_ISLOCKED(dvp) == LK_EXCLUSIVE) : 1;
	if (islocked_dvp != 1)
		panic("vop_link: dvp: locked %d, expected %d", islocked_dvp, 1);
#endif
	a.a_vp = vp;
#ifdef VNODE_LOCKDEBUG
	islocked_vp = (vp->v_vflag & VV_LOCKSWORK) ? (VOP_ISLOCKED(vp) == LK_EXCLUSIVE) : 0;
	if (islocked_vp != 0)
		panic("vop_link: vp: locked %d, expected %d", islocked_vp, 0);
#endif
	a.a_cnp = cnp;
	mpsafe = (dvp->v_vflag & VV_MPSAFE);
	if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
	error = (VCALL(dvp, VOFFSET(vop_link), &a));
	if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
	return error;
}

const int vop_rename_vp_offsets[] = {
	VOPARG_OFFSETOF(struct vop_rename_args,a_fdvp),
	VOPARG_OFFSETOF(struct vop_rename_args,a_fvp),
	VOPARG_OFFSETOF(struct vop_rename_args,a_tdvp),
	VOPARG_OFFSETOF(struct vop_rename_args,a_tvp),
	VDESC_NO_OFFSET
};
const struct vnodeop_desc vop_rename_desc = {
	VOP_RENAME_DESCOFFSET,
	"vop_rename",
	0 | VDESC_VP0_WILLRELE | VDESC_VP1_WILLRELE | VDESC_VP2_WILLPUT | VDESC_VP3_WILLPUT,
	vop_rename_vp_offsets,
	VDESC_NO_OFFSET,
	VDESC_NO_OFFSET,
	VOPARG_OFFSETOF(struct vop_rename_args, a_fcnp),
	NULL,
};
int
VOP_RENAME(struct vnode *fdvp,
    struct vnode *fvp,
    struct componentname *fcnp,
    struct vnode *tdvp,
    struct vnode *tvp,
    struct componentname *tcnp)
{
	int error;
	bool mpsafe;
	struct vop_rename_args a;
#ifdef VNODE_LOCKDEBUG
	int islocked_fdvp;
	int islocked_fvp;
	int islocked_tdvp;
#endif
	a.a_desc = VDESC(vop_rename);
	a.a_fdvp = fdvp;
#ifdef VNODE_LOCKDEBUG
	islocked_fdvp = (fdvp->v_vflag & VV_LOCKSWORK) ? (VOP_ISLOCKED(fdvp) == LK_EXCLUSIVE) : 0;
	if (islocked_fdvp != 0)
		panic("vop_rename: fdvp: locked %d, expected %d", islocked_fdvp, 0);
#endif
	a.a_fvp = fvp;
#ifdef VNODE_LOCKDEBUG
	islocked_fvp = (fvp->v_vflag & VV_LOCKSWORK) ? (VOP_ISLOCKED(fvp) == LK_EXCLUSIVE) : 0;
	if (islocked_fvp != 0)
		panic("vop_rename: fvp: locked %d, expected %d", islocked_fvp, 0);
#endif
	a.a_fcnp = fcnp;
	a.a_tdvp = tdvp;
#ifdef VNODE_LOCKDEBUG
	islocked_tdvp = (tdvp->v_vflag & VV_LOCKSWORK) ? (VOP_ISLOCKED(tdvp) == LK_EXCLUSIVE) : 1;
	if (islocked_tdvp != 1)
		panic("vop_rename: tdvp: locked %d, expected %d", islocked_tdvp, 1);
#endif
	a.a_tvp = tvp;
	a.a_tcnp = tcnp;
	mpsafe = (fdvp->v_vflag & VV_MPSAFE);
	if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
	error = (VCALL(fdvp, VOFFSET(vop_rename), &a));
	if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
	return error;
}

const int vop_mkdir_vp_offsets[] = {
	VOPARG_OFFSETOF(struct vop_mkdir_args,a_dvp),
	VDESC_NO_OFFSET
};
const struct vnodeop_desc vop_mkdir_desc = {
	VOP_MKDIR_DESCOFFSET,
	"vop_mkdir",
	0 | VDESC_VP0_WILLPUT,
	vop_mkdir_vp_offsets,
	VOPARG_OFFSETOF(struct vop_mkdir_args, a_vpp),
	VDESC_NO_OFFSET,
	VOPARG_OFFSETOF(struct vop_mkdir_args, a_cnp),
	NULL,
};
int
VOP_MKDIR(struct vnode *dvp,
    struct vnode **vpp,
    struct componentname *cnp,
    struct vattr *vap)
{
	int error;
	bool mpsafe;
	struct vop_mkdir_args a;
#ifdef VNODE_LOCKDEBUG
	int islocked_dvp;
#endif
	a.a_desc = VDESC(vop_mkdir);
	a.a_dvp = dvp;
#ifdef VNODE_LOCKDEBUG
	islocked_dvp = (dvp->v_vflag & VV_LOCKSWORK) ? (VOP_ISLOCKED(dvp) == LK_EXCLUSIVE) : 1;
	if (islocked_dvp != 1)
		panic("vop_mkdir: dvp: locked %d, expected %d", islocked_dvp, 1);
#endif
	a.a_vpp = vpp;
	a.a_cnp = cnp;
	a.a_vap = vap;
	mpsafe = (dvp->v_vflag & VV_MPSAFE);
	if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
	error = (VCALL(dvp, VOFFSET(vop_mkdir), &a));
	if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
#ifdef DIAGNOSTIC
	if (error == 0)
		KASSERT((*vpp)->v_size != VSIZENOTSET
		    && (*vpp)->v_writesize != VSIZENOTSET);
#endif /* DIAGNOSTIC */
	return error;
}

const int vop_rmdir_vp_offsets[] = {
	VOPARG_OFFSETOF(struct vop_rmdir_args,a_dvp),
	VOPARG_OFFSETOF(struct vop_rmdir_args,a_vp),
	VDESC_NO_OFFSET
};
const struct vnodeop_desc vop_rmdir_desc = {
	VOP_RMDIR_DESCOFFSET,
	"vop_rmdir",
	0 | VDESC_VP0_WILLPUT | VDESC_VP1_WILLPUT,
	vop_rmdir_vp_offsets,
	VDESC_NO_OFFSET,
	VDESC_NO_OFFSET,
	VOPARG_OFFSETOF(struct vop_rmdir_args, a_cnp),
	NULL,
};
int
VOP_RMDIR(struct vnode *dvp,
    struct vnode *vp,
    struct componentname *cnp)
{
	int error;
	bool mpsafe;
	struct vop_rmdir_args a;
#ifdef VNODE_LOCKDEBUG
	int islocked_dvp;
	int islocked_vp;
#endif
	a.a_desc = VDESC(vop_rmdir);
	a.a_dvp = dvp;
#ifdef VNODE_LOCKDEBUG
	islocked_dvp = (dvp->v_vflag & VV_LOCKSWORK) ? (VOP_ISLOCKED(dvp) == LK_EXCLUSIVE) : 1;
	if (islocked_dvp != 1)
		panic("vop_rmdir: dvp: locked %d, expected %d", islocked_dvp, 1);
#endif
	a.a_vp = vp;
#ifdef VNODE_LOCKDEBUG
	islocked_vp = (vp->v_vflag & VV_LOCKSWORK) ? (VOP_ISLOCKED(vp) == LK_EXCLUSIVE) : 1;
	if (islocked_vp != 1)
		panic("vop_rmdir: vp: locked %d, expected %d", islocked_vp, 1);
#endif
	a.a_cnp = cnp;
	mpsafe = (dvp->v_vflag & VV_MPSAFE);
	if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
	error = (VCALL(dvp, VOFFSET(vop_rmdir), &a));
	if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
	return error;
}

const int vop_symlink_vp_offsets[] = {
	VOPARG_OFFSETOF(struct vop_symlink_args,a_dvp),
	VDESC_NO_OFFSET
};
const struct vnodeop_desc vop_symlink_desc = {
	VOP_SYMLINK_DESCOFFSET,
	"vop_symlink",
	0 | VDESC_VP0_WILLPUT,
	vop_symlink_vp_offsets,
	VOPARG_OFFSETOF(struct vop_symlink_args, a_vpp),
	VDESC_NO_OFFSET,
	VOPARG_OFFSETOF(struct vop_symlink_args, a_cnp),
	NULL,
};
int
VOP_SYMLINK(struct vnode *dvp,
    struct vnode **vpp,
    struct componentname *cnp,
    struct vattr *vap,
    char *target)
{
	int error;
	bool mpsafe;
	struct vop_symlink_args a;
#ifdef VNODE_LOCKDEBUG
	int islocked_dvp;
#endif
	a.a_desc = VDESC(vop_symlink);
	a.a_dvp = dvp;
#ifdef VNODE_LOCKDEBUG
	islocked_dvp = (dvp->v_vflag & VV_LOCKSWORK) ? (VOP_ISLOCKED(dvp) == LK_EXCLUSIVE) : 1;
	if (islocked_dvp != 1)
		panic("vop_symlink: dvp: locked %d, expected %d", islocked_dvp, 1);
#endif
	a.a_vpp = vpp;
	a.a_cnp = cnp;
	a.a_vap = vap;
	a.a_target = target;
	mpsafe = (dvp->v_vflag & VV_MPSAFE);
	if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
	error = (VCALL(dvp, VOFFSET(vop_symlink), &a));
	if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
#ifdef DIAGNOSTIC
	if (error == 0)
		KASSERT((*vpp)->v_size != VSIZENOTSET
		    && (*vpp)->v_writesize != VSIZENOTSET);
#endif /* DIAGNOSTIC */
	return error;
}

const int vop_readdir_vp_offsets[] = {
	VOPARG_OFFSETOF(struct vop_readdir_args,a_vp),
	VDESC_NO_OFFSET
};
const struct vnodeop_desc vop_readdir_desc = {
	VOP_READDIR_DESCOFFSET,
	"vop_readdir",
	0,
	vop_readdir_vp_offsets,
	VDESC_NO_OFFSET,
	VOPARG_OFFSETOF(struct vop_readdir_args, a_cred),
	VDESC_NO_OFFSET,
	NULL,
};
int
VOP_READDIR(struct vnode *vp,
    struct uio *uio,
    kauth_cred_t cred,
    int *eofflag,
    off_t **cookies,
    int *ncookies)
{
	int error;
	bool mpsafe;
	struct vop_readdir_args a;
#ifdef VNODE_LOCKDEBUG
	int islocked_vp;
#endif
	a.a_desc = VDESC(vop_readdir);
	a.a_vp = vp;
#ifdef VNODE_LOCKDEBUG
	islocked_vp = (vp->v_vflag & VV_LOCKSWORK) ? (VOP_ISLOCKED(vp) == LK_EXCLUSIVE) : 1;
	if (islocked_vp != 1)
		panic("vop_readdir: vp: locked %d, expected %d", islocked_vp, 1);
#endif
	a.a_uio = uio;
	a.a_cred = cred;
	a.a_eofflag = eofflag;
	a.a_cookies = cookies;
	a.a_ncookies = ncookies;
	mpsafe = (vp->v_vflag & VV_MPSAFE);
	if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
	error = (VCALL(vp, VOFFSET(vop_readdir), &a));
	if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
	return error;
}

const int vop_readlink_vp_offsets[] = {
	VOPARG_OFFSETOF(struct vop_readlink_args,a_vp),
	VDESC_NO_OFFSET
};
const struct vnodeop_desc vop_readlink_desc = {
	VOP_READLINK_DESCOFFSET,
	"vop_readlink",
	0,
	vop_readlink_vp_offsets,
	VDESC_NO_OFFSET,
	VOPARG_OFFSETOF(struct vop_readlink_args, a_cred),
	VDESC_NO_OFFSET,
	NULL,
};
int
VOP_READLINK(struct vnode *vp,
    struct uio *uio,
    kauth_cred_t cred)
{
	int error;
	bool mpsafe;
	struct vop_readlink_args a;
#ifdef VNODE_LOCKDEBUG
	int islocked_vp;
#endif
	a.a_desc = VDESC(vop_readlink);
	a.a_vp = vp;
#ifdef VNODE_LOCKDEBUG
	islocked_vp = (vp->v_vflag & VV_LOCKSWORK) ? (VOP_ISLOCKED(vp) == LK_EXCLUSIVE) : 1;
	if (islocked_vp != 1)
		panic("vop_readlink: vp: locked %d, expected %d", islocked_vp, 1);
#endif
	a.a_uio = uio;
	a.a_cred = cred;
	mpsafe = (vp->v_vflag & VV_MPSAFE);
	if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
	error = (VCALL(vp, VOFFSET(vop_readlink), &a));
	if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
	return error;
}

const int vop_abortop_vp_offsets[] = {
	VOPARG_OFFSETOF(struct vop_abortop_args,a_dvp),
	VDESC_NO_OFFSET
};
const struct vnodeop_desc vop_abortop_desc = {
	VOP_ABORTOP_DESCOFFSET,
	"vop_abortop",
	0,
	vop_abortop_vp_offsets,
	VDESC_NO_OFFSET,
	VDESC_NO_OFFSET,
	VOPARG_OFFSETOF(struct vop_abortop_args, a_cnp),
	NULL,
};
int
VOP_ABORTOP(struct vnode *dvp,
    struct componentname *cnp)
{
	int error;
	bool mpsafe;
	struct vop_abortop_args a;
#ifdef VNODE_LOCKDEBUG
#endif
	a.a_desc = VDESC(vop_abortop);
	a.a_dvp = dvp;
	a.a_cnp = cnp;
	mpsafe = (dvp->v_vflag & VV_MPSAFE);
	if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
	error = (VCALL(dvp, VOFFSET(vop_abortop), &a));
	if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
	return error;
}

const int vop_inactive_vp_offsets[] = {
	VOPARG_OFFSETOF(struct vop_inactive_args,a_vp),
	VDESC_NO_OFFSET
};
const struct vnodeop_desc vop_inactive_desc = {
	VOP_INACTIVE_DESCOFFSET,
	"vop_inactive",
	0 | VDESC_VP0_WILLUNLOCK,
	vop_inactive_vp_offsets,
	VDESC_NO_OFFSET,
	VDESC_NO_OFFSET,
	VDESC_NO_OFFSET,
	NULL,
};
int
VOP_INACTIVE(struct vnode *vp,
    bool *recycle)
{
	int error;
	bool mpsafe;
	struct vop_inactive_args a;
#ifdef VNODE_LOCKDEBUG
	int islocked_vp;
#endif
	a.a_desc = VDESC(vop_inactive);
	a.a_vp = vp;
#ifdef VNODE_LOCKDEBUG
	islocked_vp = (vp->v_vflag & VV_LOCKSWORK) ? (VOP_ISLOCKED(vp) == LK_EXCLUSIVE) : 1;
	if (islocked_vp != 1)
		panic("vop_inactive: vp: locked %d, expected %d", islocked_vp, 1);
#endif
	a.a_recycle = recycle;
	mpsafe = (vp->v_vflag & VV_MPSAFE);
	if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
	error = (VCALL(vp, VOFFSET(vop_inactive), &a));
	if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
	return error;
}

const int vop_reclaim_vp_offsets[] = {
	VOPARG_OFFSETOF(struct vop_reclaim_args,a_vp),
	VDESC_NO_OFFSET
};
const struct vnodeop_desc vop_reclaim_desc = {
	VOP_RECLAIM_DESCOFFSET,
	"vop_reclaim",
	0,
	vop_reclaim_vp_offsets,
	VDESC_NO_OFFSET,
	VDESC_NO_OFFSET,
	VDESC_NO_OFFSET,
	NULL,
};
int
VOP_RECLAIM(struct vnode *vp)
{
	int error;
	bool mpsafe;
	struct vop_reclaim_args a;
#ifdef VNODE_LOCKDEBUG
	int islocked_vp;
#endif
	a.a_desc = VDESC(vop_reclaim);
	a.a_vp = vp;
#ifdef VNODE_LOCKDEBUG
	islocked_vp = (vp->v_vflag & VV_LOCKSWORK) ? (VOP_ISLOCKED(vp) == LK_EXCLUSIVE) : 0;
	if (islocked_vp != 0)
		panic("vop_reclaim: vp: locked %d, expected %d", islocked_vp, 0);
#endif
	mpsafe = (vp->v_vflag & VV_MPSAFE);
	if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
	error = (VCALL(vp, VOFFSET(vop_reclaim), &a));
	if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
	return error;
}

const int vop_lock_vp_offsets[] = {
	VOPARG_OFFSETOF(struct vop_lock_args,a_vp),
	VDESC_NO_OFFSET
};
const struct vnodeop_desc vop_lock_desc = {
	VOP_LOCK_DESCOFFSET,
	"vop_lock",
	0,
	vop_lock_vp_offsets,
	VDESC_NO_OFFSET,
	VDESC_NO_OFFSET,
	VDESC_NO_OFFSET,
	NULL,
};
int
VOP_LOCK(struct vnode *vp,
    int flags)
{
	int error;
	bool mpsafe;
	struct vop_lock_args a;
#ifdef VNODE_LOCKDEBUG
	int islocked_vp;
#endif
	a.a_desc = VDESC(vop_lock);
	a.a_vp = vp;
#ifdef VNODE_LOCKDEBUG
	islocked_vp = (vp->v_vflag & VV_LOCKSWORK) ? (VOP_ISLOCKED(vp) == LK_EXCLUSIVE) : 0;
	if (islocked_vp != 0)
		panic("vop_lock: vp: locked %d, expected %d", islocked_vp, 0);
#endif
	a.a_flags = flags;
	mpsafe = (vp->v_vflag & VV_MPSAFE);
	if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
	error = (VCALL(vp, VOFFSET(vop_lock), &a));
	if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
	return error;
}

const int vop_unlock_vp_offsets[] = {
	VOPARG_OFFSETOF(struct vop_unlock_args,a_vp),
	VDESC_NO_OFFSET
};
const struct vnodeop_desc vop_unlock_desc = {
	VOP_UNLOCK_DESCOFFSET,
	"vop_unlock",
	0,
	vop_unlock_vp_offsets,
	VDESC_NO_OFFSET,
	VDESC_NO_OFFSET,
	VDESC_NO_OFFSET,
	NULL,
};
int
VOP_UNLOCK(struct vnode *vp,
    int flags)
{
	int error;
	bool mpsafe;
	struct vop_unlock_args a;
#ifdef VNODE_LOCKDEBUG
	int islocked_vp;
#endif
	a.a_desc = VDESC(vop_unlock);
	a.a_vp = vp;
#ifdef VNODE_LOCKDEBUG
	islocked_vp = (vp->v_vflag & VV_LOCKSWORK) ? (VOP_ISLOCKED(vp) == LK_EXCLUSIVE) : 1;
	if (islocked_vp != 1)
		panic("vop_unlock: vp: locked %d, expected %d", islocked_vp, 1);
#endif
	a.a_flags = flags;
	mpsafe = (vp->v_vflag & VV_MPSAFE);
	if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
	error = (VCALL(vp, VOFFSET(vop_unlock), &a));
	if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
	return error;
}

const int vop_bmap_vp_offsets[] = {
	VOPARG_OFFSETOF(struct vop_bmap_args,a_vp),
	VDESC_NO_OFFSET
};
const struct vnodeop_desc vop_bmap_desc = {
	VOP_BMAP_DESCOFFSET,
	"vop_bmap",
	0,
	vop_bmap_vp_offsets,
	VOPARG_OFFSETOF(struct vop_bmap_args, a_vpp),
	VDESC_NO_OFFSET,
	VDESC_NO_OFFSET,
	NULL,
};
int
VOP_BMAP(struct vnode *vp,
    daddr_t bn,
    struct vnode **vpp,
    daddr_t *bnp,
    int *runp)
{
	int error;
	bool mpsafe;
	struct vop_bmap_args a;
#ifdef VNODE_LOCKDEBUG
#endif
	a.a_desc = VDESC(vop_bmap);
	a.a_vp = vp;
	a.a_bn = bn;
	a.a_vpp = vpp;
	a.a_bnp = bnp;
	a.a_runp = runp;
	mpsafe = (vp->v_vflag & VV_MPSAFE);
	if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
	error = (VCALL(vp, VOFFSET(vop_bmap), &a));
	if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
	return error;
}

const int vop_strategy_vp_offsets[] = {
	VOPARG_OFFSETOF(struct vop_strategy_args,a_vp),
	VDESC_NO_OFFSET
};
const struct vnodeop_desc vop_strategy_desc = {
	VOP_STRATEGY_DESCOFFSET,
	"vop_strategy",
	0,
	vop_strategy_vp_offsets,
	VDESC_NO_OFFSET,
	VDESC_NO_OFFSET,
	VDESC_NO_OFFSET,
	NULL,
};
int
VOP_STRATEGY(struct vnode *vp,
    struct buf *bp)
{
	int error;
	bool mpsafe;
	struct vop_strategy_args a;
#ifdef VNODE_LOCKDEBUG
#endif
	a.a_desc = VDESC(vop_strategy);
	a.a_vp = vp;
	a.a_bp = bp;
	mpsafe = (vp->v_vflag & VV_MPSAFE);
	if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
	error = (VCALL(vp, VOFFSET(vop_strategy), &a));
	if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
	return error;
}

const int vop_print_vp_offsets[] = {
	VOPARG_OFFSETOF(struct vop_print_args,a_vp),
	VDESC_NO_OFFSET
};
const struct vnodeop_desc vop_print_desc = {
	VOP_PRINT_DESCOFFSET,
	"vop_print",
	0,
	vop_print_vp_offsets,
	VDESC_NO_OFFSET,
	VDESC_NO_OFFSET,
	VDESC_NO_OFFSET,
	NULL,
};
int
VOP_PRINT(struct vnode *vp)
{
	int error;
	bool mpsafe;
	struct vop_print_args a;
#ifdef VNODE_LOCKDEBUG
#endif
	a.a_desc = VDESC(vop_print);
	a.a_vp = vp;
	mpsafe = (vp->v_vflag & VV_MPSAFE);
	if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
	error = (VCALL(vp, VOFFSET(vop_print), &a));
	if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
	return error;
}

const int vop_islocked_vp_offsets[] = {
	VOPARG_OFFSETOF(struct vop_islocked_args,a_vp),
	VDESC_NO_OFFSET
};
const struct vnodeop_desc vop_islocked_desc = {
	VOP_ISLOCKED_DESCOFFSET,
	"vop_islocked",
	0,
	vop_islocked_vp_offsets,
	VDESC_NO_OFFSET,
	VDESC_NO_OFFSET,
	VDESC_NO_OFFSET,
	NULL,
};
int
VOP_ISLOCKED(struct vnode *vp)
{
	int error;
	bool mpsafe;
	struct vop_islocked_args a;
#ifdef VNODE_LOCKDEBUG
#endif
	a.a_desc = VDESC(vop_islocked);
	a.a_vp = vp;
	mpsafe = (vp->v_vflag & VV_MPSAFE);
	if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
	error = (VCALL(vp, VOFFSET(vop_islocked), &a));
	if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
	return error;
}

const int vop_pathconf_vp_offsets[] = {
	VOPARG_OFFSETOF(struct vop_pathconf_args,a_vp),
	VDESC_NO_OFFSET
};
const struct vnodeop_desc vop_pathconf_desc = {
	VOP_PATHCONF_DESCOFFSET,
	"vop_pathconf",
	0,
	vop_pathconf_vp_offsets,
	VDESC_NO_OFFSET,
	VDESC_NO_OFFSET,
	VDESC_NO_OFFSET,
	NULL,
};
int
VOP_PATHCONF(struct vnode *vp,
    int name,
    register_t *retval)
{
	int error;
	bool mpsafe;
	struct vop_pathconf_args a;
#ifdef VNODE_LOCKDEBUG
	int islocked_vp;
#endif
	a.a_desc = VDESC(vop_pathconf);
	a.a_vp = vp;
#ifdef VNODE_LOCKDEBUG
	islocked_vp = (vp->v_vflag & VV_LOCKSWORK) ? (VOP_ISLOCKED(vp) == LK_EXCLUSIVE) : 1;
	if (islocked_vp != 1)
		panic("vop_pathconf: vp: locked %d, expected %d", islocked_vp, 1);
#endif
	a.a_name = name;
	a.a_retval = retval;
	mpsafe = (vp->v_vflag & VV_MPSAFE);
	if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
	error = (VCALL(vp, VOFFSET(vop_pathconf), &a));
	if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
	return error;
}

const int vop_advlock_vp_offsets[] = {
	VOPARG_OFFSETOF(struct vop_advlock_args,a_vp),
	VDESC_NO_OFFSET
};
const struct vnodeop_desc vop_advlock_desc = {
	VOP_ADVLOCK_DESCOFFSET,
	"vop_advlock",
	0,
	vop_advlock_vp_offsets,
	VDESC_NO_OFFSET,
	VDESC_NO_OFFSET,
	VDESC_NO_OFFSET,
	NULL,
};
int
VOP_ADVLOCK(struct vnode *vp,
    void *id,
    int op,
    struct flock *fl,
    int flags)
{
	int error;
	bool mpsafe;
	struct vop_advlock_args a;
#ifdef VNODE_LOCKDEBUG
	int islocked_vp;
#endif
	a.a_desc = VDESC(vop_advlock);
	a.a_vp = vp;
#ifdef VNODE_LOCKDEBUG
	islocked_vp = (vp->v_vflag & VV_LOCKSWORK) ? (VOP_ISLOCKED(vp) == LK_EXCLUSIVE) : 0;
	if (islocked_vp != 0)
		panic("vop_advlock: vp: locked %d, expected %d", islocked_vp, 0);
#endif
	a.a_id = id;
	a.a_op = op;
	a.a_fl = fl;
	a.a_flags = flags;
	mpsafe = (vp->v_vflag & VV_MPSAFE);
	if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
	error = (VCALL(vp, VOFFSET(vop_advlock), &a));
	if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
	return error;
}

const int vop_lease_vp_offsets[] = {
	VOPARG_OFFSETOF(struct vop_lease_args,a_vp),
	VDESC_NO_OFFSET
};
const struct vnodeop_desc vop_lease_desc = {
	VOP_LEASE_DESCOFFSET,
	"vop_lease",
	0,
	vop_lease_vp_offsets,
	VDESC_NO_OFFSET,
	VOPARG_OFFSETOF(struct vop_lease_args, a_cred),
	VDESC_NO_OFFSET,
	NULL,
};
int
VOP_LEASE(struct vnode *vp,
    kauth_cred_t cred,
    int flag)
{
	int error;
	bool mpsafe;
	struct vop_lease_args a;
#ifdef VNODE_LOCKDEBUG
#endif
	a.a_desc = VDESC(vop_lease);
	a.a_vp = vp;
	a.a_cred = cred;
	a.a_flag = flag;
	mpsafe = (vp->v_vflag & VV_MPSAFE);
	if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
	error = (VCALL(vp, VOFFSET(vop_lease), &a));
	if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
	return error;
}

const int vop_whiteout_vp_offsets[] = {
	VOPARG_OFFSETOF(struct vop_whiteout_args,a_dvp),
	VDESC_NO_OFFSET
};
const struct vnodeop_desc vop_whiteout_desc = {
	VOP_WHITEOUT_DESCOFFSET,
	"vop_whiteout",
	0,
	vop_whiteout_vp_offsets,
	VDESC_NO_OFFSET,
	VDESC_NO_OFFSET,
	VOPARG_OFFSETOF(struct vop_whiteout_args, a_cnp),
	NULL,
};
int
VOP_WHITEOUT(struct vnode *dvp,
    struct componentname *cnp,
    int flags)
{
	int error;
	bool mpsafe;
	struct vop_whiteout_args a;
#ifdef VNODE_LOCKDEBUG
	int islocked_dvp;
#endif
	a.a_desc = VDESC(vop_whiteout);
	a.a_dvp = dvp;
#ifdef VNODE_LOCKDEBUG
	islocked_dvp = (dvp->v_vflag & VV_LOCKSWORK) ? (VOP_ISLOCKED(dvp) == LK_EXCLUSIVE) : 1;
	if (islocked_dvp != 1)
		panic("vop_whiteout: dvp: locked %d, expected %d", islocked_dvp, 1);
#endif
	a.a_cnp = cnp;
	a.a_flags = flags;
	mpsafe = (dvp->v_vflag & VV_MPSAFE);
	if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
	error = (VCALL(dvp, VOFFSET(vop_whiteout), &a));
	if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
	return error;
}

const int vop_getpages_vp_offsets[] = {
	VOPARG_OFFSETOF(struct vop_getpages_args,a_vp),
	VDESC_NO_OFFSET
};
const struct vnodeop_desc vop_getpages_desc = {
	VOP_GETPAGES_DESCOFFSET,
	"vop_getpages",
	0,
	vop_getpages_vp_offsets,
	VDESC_NO_OFFSET,
	VDESC_NO_OFFSET,
	VDESC_NO_OFFSET,
	NULL,
};
int
VOP_GETPAGES(struct vnode *vp,
    voff_t offset,
    struct vm_page **m,
    int *count,
    int centeridx,
    vm_prot_t access_type,
    int advice,
    int flags)
{
	int error;
	bool mpsafe;
	struct vop_getpages_args a;
#ifdef VNODE_LOCKDEBUG
#endif
	a.a_desc = VDESC(vop_getpages);
	a.a_vp = vp;
	a.a_offset = offset;
	a.a_m = m;
	a.a_count = count;
	a.a_centeridx = centeridx;
	a.a_access_type = access_type;
	a.a_advice = advice;
	a.a_flags = flags;
	mpsafe = (vp->v_vflag & VV_MPSAFE);
	if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
	error = (VCALL(vp, VOFFSET(vop_getpages), &a));
	if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
	return error;
}

const int vop_putpages_vp_offsets[] = {
	VOPARG_OFFSETOF(struct vop_putpages_args,a_vp),
	VDESC_NO_OFFSET
};
const struct vnodeop_desc vop_putpages_desc = {
	VOP_PUTPAGES_DESCOFFSET,
	"vop_putpages",
	0,
	vop_putpages_vp_offsets,
	VDESC_NO_OFFSET,
	VDESC_NO_OFFSET,
	VDESC_NO_OFFSET,
	NULL,
};
int
VOP_PUTPAGES(struct vnode *vp,
    voff_t offlo,
    voff_t offhi,
    int flags)
{
	int error;
	bool mpsafe;
	struct vop_putpages_args a;
#ifdef VNODE_LOCKDEBUG
#endif
	a.a_desc = VDESC(vop_putpages);
	a.a_vp = vp;
	a.a_offlo = offlo;
	a.a_offhi = offhi;
	a.a_flags = flags;
	mpsafe = (vp->v_vflag & VV_MPSAFE);
	if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
	error = (VCALL(vp, VOFFSET(vop_putpages), &a));
	if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
	return error;
}

const int vop_closeextattr_vp_offsets[] = {
	VOPARG_OFFSETOF(struct vop_closeextattr_args,a_vp),
	VDESC_NO_OFFSET
};
const struct vnodeop_desc vop_closeextattr_desc = {
	VOP_CLOSEEXTATTR_DESCOFFSET,
	"vop_closeextattr",
	0,
	vop_closeextattr_vp_offsets,
	VDESC_NO_OFFSET,
	VOPARG_OFFSETOF(struct vop_closeextattr_args, a_cred),
	VDESC_NO_OFFSET,
	NULL,
};
int
VOP_CLOSEEXTATTR(struct vnode *vp,
    int commit,
    kauth_cred_t cred)
{
	int error;
	bool mpsafe;
	struct vop_closeextattr_args a;
#ifdef VNODE_LOCKDEBUG
	int islocked_vp;
#endif
	a.a_desc = VDESC(vop_closeextattr);
	a.a_vp = vp;
#ifdef VNODE_LOCKDEBUG
	islocked_vp = (vp->v_vflag & VV_LOCKSWORK) ? (VOP_ISLOCKED(vp) == LK_EXCLUSIVE) : 1;
	if (islocked_vp != 1)
		panic("vop_closeextattr: vp: locked %d, expected %d", islocked_vp, 1);
#endif
	a.a_commit = commit;
	a.a_cred = cred;
	mpsafe = (vp->v_vflag & VV_MPSAFE);
	if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
	error = (VCALL(vp, VOFFSET(vop_closeextattr), &a));
	if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
	return error;
}

const int vop_getextattr_vp_offsets[] = {
	VOPARG_OFFSETOF(struct vop_getextattr_args,a_vp),
	VDESC_NO_OFFSET
};
const struct vnodeop_desc vop_getextattr_desc = {
	VOP_GETEXTATTR_DESCOFFSET,
	"vop_getextattr",
	0,
	vop_getextattr_vp_offsets,
	VDESC_NO_OFFSET,
	VOPARG_OFFSETOF(struct vop_getextattr_args, a_cred),
	VDESC_NO_OFFSET,
	NULL,
};
int
VOP_GETEXTATTR(struct vnode *vp,
    int attrnamespace,
    const char *name,
    struct uio *uio,
    size_t *size,
    kauth_cred_t cred)
{
	int error;
	bool mpsafe;
	struct vop_getextattr_args a;
#ifdef VNODE_LOCKDEBUG
	int islocked_vp;
#endif
	a.a_desc = VDESC(vop_getextattr);
	a.a_vp = vp;
#ifdef VNODE_LOCKDEBUG
	islocked_vp = (vp->v_vflag & VV_LOCKSWORK) ? (VOP_ISLOCKED(vp) == LK_EXCLUSIVE) : 1;
	if (islocked_vp != 1)
		panic("vop_getextattr: vp: locked %d, expected %d", islocked_vp, 1);
#endif
	a.a_attrnamespace = attrnamespace;
	a.a_name = name;
	a.a_uio = uio;
	a.a_size = size;
	a.a_cred = cred;
	mpsafe = (vp->v_vflag & VV_MPSAFE);
	if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
	error = (VCALL(vp, VOFFSET(vop_getextattr), &a));
	if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
	return error;
}

const int vop_listextattr_vp_offsets[] = {
	VOPARG_OFFSETOF(struct vop_listextattr_args,a_vp),
	VDESC_NO_OFFSET
};
const struct vnodeop_desc vop_listextattr_desc = {
	VOP_LISTEXTATTR_DESCOFFSET,
	"vop_listextattr",
	0,
	vop_listextattr_vp_offsets,
	VDESC_NO_OFFSET,
	VOPARG_OFFSETOF(struct vop_listextattr_args, a_cred),
	VDESC_NO_OFFSET,
	NULL,
};
int
VOP_LISTEXTATTR(struct vnode *vp,
    int attrnamespace,
    struct uio *uio,
    size_t *size,
    kauth_cred_t cred)
{
	int error;
	bool mpsafe;
	struct vop_listextattr_args a;
#ifdef VNODE_LOCKDEBUG
	int islocked_vp;
#endif
	a.a_desc = VDESC(vop_listextattr);
	a.a_vp = vp;
#ifdef VNODE_LOCKDEBUG
	islocked_vp = (vp->v_vflag & VV_LOCKSWORK) ? (VOP_ISLOCKED(vp) == LK_EXCLUSIVE) : 1;
	if (islocked_vp != 1)
		panic("vop_listextattr: vp: locked %d, expected %d", islocked_vp, 1);
#endif
	a.a_attrnamespace = attrnamespace;
	a.a_uio = uio;
	a.a_size = size;
	a.a_cred = cred;
	mpsafe = (vp->v_vflag & VV_MPSAFE);
	if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
	error = (VCALL(vp, VOFFSET(vop_listextattr), &a));
	if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
	return error;
}

const int vop_openextattr_vp_offsets[] = {
	VOPARG_OFFSETOF(struct vop_openextattr_args,a_vp),
	VDESC_NO_OFFSET
};
const struct vnodeop_desc vop_openextattr_desc = {
	VOP_OPENEXTATTR_DESCOFFSET,
	"vop_openextattr",
	0,
	vop_openextattr_vp_offsets,
	VDESC_NO_OFFSET,
	VOPARG_OFFSETOF(struct vop_openextattr_args, a_cred),
	VDESC_NO_OFFSET,
	NULL,
};
int
VOP_OPENEXTATTR(struct vnode *vp,
    kauth_cred_t cred)
{
	int error;
	bool mpsafe;
	struct vop_openextattr_args a;
#ifdef VNODE_LOCKDEBUG
	int islocked_vp;
#endif
	a.a_desc = VDESC(vop_openextattr);
	a.a_vp = vp;
#ifdef VNODE_LOCKDEBUG
	islocked_vp = (vp->v_vflag & VV_LOCKSWORK) ? (VOP_ISLOCKED(vp) == LK_EXCLUSIVE) : 1;
	if (islocked_vp != 1)
		panic("vop_openextattr: vp: locked %d, expected %d", islocked_vp, 1);
#endif
	a.a_cred = cred;
	mpsafe = (vp->v_vflag & VV_MPSAFE);
	if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
	error = (VCALL(vp, VOFFSET(vop_openextattr), &a));
	if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
	return error;
}

const int vop_deleteextattr_vp_offsets[] = {
	VOPARG_OFFSETOF(struct vop_deleteextattr_args,a_vp),
	VDESC_NO_OFFSET
};
const struct vnodeop_desc vop_deleteextattr_desc = {
	VOP_DELETEEXTATTR_DESCOFFSET,
	"vop_deleteextattr",
	0,
	vop_deleteextattr_vp_offsets,
	VDESC_NO_OFFSET,
	VOPARG_OFFSETOF(struct vop_deleteextattr_args, a_cred),
	VDESC_NO_OFFSET,
	NULL,
};
int
VOP_DELETEEXTATTR(struct vnode *vp,
    int attrnamespace,
    const char *name,
    kauth_cred_t cred)
{
	int error;
	bool mpsafe;
	struct vop_deleteextattr_args a;
#ifdef VNODE_LOCKDEBUG
	int islocked_vp;
#endif
	a.a_desc = VDESC(vop_deleteextattr);
	a.a_vp = vp;
#ifdef VNODE_LOCKDEBUG
	islocked_vp = (vp->v_vflag & VV_LOCKSWORK) ? (VOP_ISLOCKED(vp) == LK_EXCLUSIVE) : 1;
	if (islocked_vp != 1)
		panic("vop_deleteextattr: vp: locked %d, expected %d", islocked_vp, 1);
#endif
	a.a_attrnamespace = attrnamespace;
	a.a_name = name;
	a.a_cred = cred;
	mpsafe = (vp->v_vflag & VV_MPSAFE);
	if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
	error = (VCALL(vp, VOFFSET(vop_deleteextattr), &a));
	if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
	return error;
}

const int vop_setextattr_vp_offsets[] = {
	VOPARG_OFFSETOF(struct vop_setextattr_args,a_vp),
	VDESC_NO_OFFSET
};
const struct vnodeop_desc vop_setextattr_desc = {
	VOP_SETEXTATTR_DESCOFFSET,
	"vop_setextattr",
	0,
	vop_setextattr_vp_offsets,
	VDESC_NO_OFFSET,
	VOPARG_OFFSETOF(struct vop_setextattr_args, a_cred),
	VDESC_NO_OFFSET,
	NULL,
};
int
VOP_SETEXTATTR(struct vnode *vp,
    int attrnamespace,
    const char *name,
    struct uio *uio,
    kauth_cred_t cred)
{
	int error;
	bool mpsafe;
	struct vop_setextattr_args a;
#ifdef VNODE_LOCKDEBUG
	int islocked_vp;
#endif
	a.a_desc = VDESC(vop_setextattr);
	a.a_vp = vp;
#ifdef VNODE_LOCKDEBUG
	islocked_vp = (vp->v_vflag & VV_LOCKSWORK) ? (VOP_ISLOCKED(vp) == LK_EXCLUSIVE) : 1;
	if (islocked_vp != 1)
		panic("vop_setextattr: vp: locked %d, expected %d", islocked_vp, 1);
#endif
	a.a_attrnamespace = attrnamespace;
	a.a_name = name;
	a.a_uio = uio;
	a.a_cred = cred;
	mpsafe = (vp->v_vflag & VV_MPSAFE);
	if (!mpsafe) { KERNEL_LOCK(1, curlwp); }
	error = (VCALL(vp, VOFFSET(vop_setextattr), &a));
	if (!mpsafe) { KERNEL_UNLOCK_ONE(curlwp); }
	return error;
}

/* End of special cases. */

const struct vnodeop_desc * const vfs_op_descs[] = {
	&vop_default_desc,	/* MUST BE FIRST */
	&vop_bwrite_desc,	/* XXX: SPECIAL CASE */

	&vop_lookup_desc,
	&vop_create_desc,
	&vop_mknod_desc,
	&vop_open_desc,
	&vop_close_desc,
	&vop_access_desc,
	&vop_getattr_desc,
	&vop_setattr_desc,
	&vop_read_desc,
	&vop_write_desc,
	&vop_ioctl_desc,
	&vop_fcntl_desc,
	&vop_poll_desc,
	&vop_kqfilter_desc,
	&vop_revoke_desc,
	&vop_mmap_desc,
	&vop_fsync_desc,
	&vop_seek_desc,
	&vop_remove_desc,
	&vop_link_desc,
	&vop_rename_desc,
	&vop_mkdir_desc,
	&vop_rmdir_desc,
	&vop_symlink_desc,
	&vop_readdir_desc,
	&vop_readlink_desc,
	&vop_abortop_desc,
	&vop_inactive_desc,
	&vop_reclaim_desc,
	&vop_lock_desc,
	&vop_unlock_desc,
	&vop_bmap_desc,
	&vop_strategy_desc,
	&vop_print_desc,
	&vop_islocked_desc,
	&vop_pathconf_desc,
	&vop_advlock_desc,
	&vop_lease_desc,
	&vop_whiteout_desc,
	&vop_getpages_desc,
	&vop_putpages_desc,
	&vop_closeextattr_desc,
	&vop_getextattr_desc,
	&vop_listextattr_desc,
	&vop_openextattr_desc,
	&vop_deleteextattr_desc,
	&vop_setextattr_desc,
	NULL
};