version 1.5.2.5, 2011/05/30 14:57:48 |
version 1.5.2.6, 2011/05/31 03:05:04 |
|
|
*/ |
*/ |
|
|
/* |
/* |
* Note on v_usecount and locking: |
* The vnode cache subsystem. |
* |
* |
* At nearly all points it is known that v_usecount could be zero, the |
* Life-cycle |
* vnode interlock will be held. |
|
* |
* |
* To change v_usecount away from zero, the interlock must be held. To |
* Normally, there are two points where new vnodes are created: |
* change from a non-zero value to zero, again the interlock must be |
* VOP_CREATE(9) and VOP_LOOKUP(9). The life-cycle of a vnode |
* held. |
* starts in one of the following ways: |
* |
* |
* There's a flag bit, VC_XLOCK, embedded in v_usecount. |
* - Allocation, via getnewvnode(9) and/or vnalloc(9). |
* To raise v_usecount, if the VC_XLOCK bit is set in it, the interlock |
* - Recycle from a free list, via getnewvnode(9) -> getcleanvnode(9). |
* must be held. |
* - Reclamation of inactive vnode, via vget(9). |
* To modify the VC_XLOCK bit, the interlock must be held. |
|
* We always keep the usecount (v_usecount & VC_MASK) non-zero while the |
|
* VC_XLOCK bit is set. |
|
* |
* |
* Unless the VC_XLOCK bit is set, changing the usecount from a non-zero |
* The life-cycle ends when the last reference is dropped, usually |
* value to a non-zero value can safely be done using atomic operations, |
* in VOP_REMOVE(9). In such case, VOP_INACTIVE(9) is called to inform |
* without the interlock held. |
* the file system that vnode is inactive. Via this call, file system |
* Even if the VC_XLOCK bit is set, decreasing the usecount to a non-zero |
* indicates whether vnode should be recycled (usually, count of links |
* value can be done using atomic operations, without the interlock held. |
* is checked i.e. whether file was removed). |
|
* |
|
* Depending on indication, vnode can be put into a free list (cache), |
|
* or cleaned via vclean(9), which calls VOP_RECLAIM(9) to disassociate |
|
* underlying file system from the vnode, and finally destroyed. |
|
* |
|
* Reference counting |
|
* |
|
* Vnode is considered active, if reference count (vnode_t::v_usecount) |
|
* is non-zero. It is maintained using: vref(9) and vrele(9), as well |
|
* as vput(9), routines. Common points holding references are e.g. |
|
* file openings, current working directory, mount points, etc. |
|
* |
|
* Note on v_usecount and its locking |
|
* |
|
* At nearly all points it is known that v_usecount could be zero, |
|
* the vnode_t::v_interlock will be held. To change v_usecount away |
|
* from zero, the interlock must be held. To change from a non-zero |
|
* value to zero, again the interlock must be held. |
|
* |
|
* There is a flag bit, VC_XLOCK, embedded in v_usecount. To raise |
|
* v_usecount, if the VC_XLOCK bit is set in it, the interlock must |
|
* be held. To modify the VC_XLOCK bit, the interlock must be held. |
|
* We always keep the usecount (v_usecount & VC_MASK) non-zero while |
|
* the VC_XLOCK bit is set. |
|
* |
|
* Unless the VC_XLOCK bit is set, changing the usecount from a non-zero |
|
* value to a non-zero value can safely be done using atomic operations, |
|
* without the interlock held. |
|
* |
|
* Even if the VC_XLOCK bit is set, decreasing the usecount to a non-zero |
|
* value can be done using atomic operations, without the interlock held. |
|
* |
|
* Note: if VI_CLEAN is set, vnode_t::v_interlock will be released while |
|
* mntvnode_lock is still held. |
*/ |
*/ |
|
|
#include <sys/cdefs.h> |
#include <sys/cdefs.h> |
Line 115 __KERNEL_RCSID(0, "$NetBSD$"); |
|
Line 145 __KERNEL_RCSID(0, "$NetBSD$"); |
|
#include <uvm/uvm.h> |
#include <uvm/uvm.h> |
#include <uvm/uvm_readahead.h> |
#include <uvm/uvm_readahead.h> |
|
|
u_int numvnodes; |
u_int numvnodes __cacheline_aligned; |
|
|
static pool_cache_t vnode_cache; |
static pool_cache_t vnode_cache __read_mostly; |
static kmutex_t vnode_free_list_lock; |
static kmutex_t vnode_free_list_lock __cacheline_aligned; |
|
|
static vnodelst_t vnode_free_list; |
static vnodelst_t vnode_free_list __cacheline_aligned; |
static vnodelst_t vnode_hold_list; |
static vnodelst_t vnode_hold_list __cacheline_aligned; |
static vnodelst_t vrele_list; |
static vnodelst_t vrele_list __cacheline_aligned; |
|
|
static kmutex_t vrele_lock; |
static kmutex_t vrele_lock __cacheline_aligned; |
static kcondvar_t vrele_cv; |
static kcondvar_t vrele_cv __cacheline_aligned; |
static lwp_t * vrele_lwp; |
static lwp_t * vrele_lwp __cacheline_aligned; |
static int vrele_pending; |
static int vrele_pending __cacheline_aligned; |
static int vrele_gen; |
static int vrele_gen __cacheline_aligned; |
|
|
static vnode_t * getcleanvnode(void); |
static vnode_t * getcleanvnode(void); |
static void vrele_thread(void *); |
static void vrele_thread(void *); |
Line 1016 vclean(vnode_t *vp, int flags) |
|
Line 1046 vclean(vnode_t *vp, int flags) |
|
vpanic(vp, "vclean: cannot reclaim"); |
vpanic(vp, "vclean: cannot reclaim"); |
} |
} |
|
|
|
KASSERT(vp->v_data == NULL); |
KASSERT(vp->v_uobj.uo_npages == 0); |
KASSERT(vp->v_uobj.uo_npages == 0); |
|
|
if (vp->v_type == VREG && vp->v_ractx != NULL) { |
if (vp->v_type == VREG && vp->v_ractx != NULL) { |
uvm_ra_freectx(vp->v_ractx); |
uvm_ra_freectx(vp->v_ractx); |
vp->v_ractx = NULL; |
vp->v_ractx = NULL; |
} |
} |
|
|
|
/* Purge name cache. */ |
cache_purge(vp); |
cache_purge(vp); |
|
|
/* Done with purge, notify sleepers of the grim news. */ |
/* Done with purge, notify sleepers of the grim news. */ |