version 1.48, 2005/12/24 19:12:23 |
version 1.48.6.1, 2006/06/01 22:38:10 |
Line 46 __KERNEL_RCSID(0, "$NetBSD$"); |
|
Line 46 __KERNEL_RCSID(0, "$NetBSD$"); |
|
#include <sys/pool.h> |
#include <sys/pool.h> |
#include <sys/fcntl.h> |
#include <sys/fcntl.h> |
#include <sys/lockf.h> |
#include <sys/lockf.h> |
|
#include <sys/kauth.h> |
|
|
POOL_INIT(lockfpool, sizeof(struct lockf), 0, 0, 0, "lockfpl", |
/* |
|
* The lockf structure is a kernel structure which contains the information |
|
* associated with a byte range lock. The lockf structures are linked into |
|
* the inode structure. Locks are sorted by the starting byte of the lock for |
|
* efficiency. |
|
* |
|
* lf_next is used for two purposes, depending on whether the lock is |
|
* being held, or is in conflict with an existing lock. If this lock |
|
* is held, it indicates the next lock on the same vnode. |
|
* For pending locks, if lock->lf_next is non-NULL, then lock->lf_block |
|
* must be queued on the lf_blkhd TAILQ of lock->lf_next. |
|
*/ |
|
|
|
TAILQ_HEAD(locklist, lockf); |
|
|
|
struct lockf { |
|
short lf_flags; /* Lock semantics: F_POSIX, F_FLOCK, F_WAIT */ |
|
short lf_type; /* Lock type: F_RDLCK, F_WRLCK */ |
|
off_t lf_start; /* The byte # of the start of the lock */ |
|
off_t lf_end; /* The byte # of the end of the lock (-1=EOF)*/ |
|
void *lf_id; /* process or file description holding lock */ |
|
struct lockf **lf_head; /* Back pointer to the head of lockf list */ |
|
struct lockf *lf_next; /* Next lock on this vnode, or blocking lock */ |
|
struct locklist lf_blkhd; /* List of requests blocked on this lock */ |
|
TAILQ_ENTRY(lockf) lf_block;/* A request waiting for a lock */ |
|
uid_t lf_uid; /* User ID responsible */ |
|
}; |
|
|
|
/* Maximum length of sleep chains to traverse to try and detect deadlock. */ |
|
#define MAXDEPTH 50 |
|
|
|
static POOL_INIT(lockfpool, sizeof(struct lockf), 0, 0, 0, "lockfpl", |
&pool_allocator_nointr); |
&pool_allocator_nointr); |
|
|
/* |
/* |
Line 60 int maxlockdepth = MAXDEPTH; |
|
Line 92 int maxlockdepth = MAXDEPTH; |
|
int lockf_debug = 0; |
int lockf_debug = 0; |
#endif |
#endif |
|
|
#define NOLOCKF (struct lockf *)0 |
|
#define SELF 0x1 |
#define SELF 0x1 |
#define OTHERS 0x2 |
#define OTHERS 0x2 |
|
|
Line 202 lf_findoverlap(struct lockf *lf, struct |
|
Line 233 lf_findoverlap(struct lockf *lf, struct |
|
off_t start, end; |
off_t start, end; |
|
|
*overlap = lf; |
*overlap = lf; |
if (lf == NOLOCKF) |
if (lf == NULL) |
return 0; |
return 0; |
#ifdef LOCKF_DEBUG |
#ifdef LOCKF_DEBUG |
if (lockf_debug & 2) |
if (lockf_debug & 2) |
Line 210 lf_findoverlap(struct lockf *lf, struct |
|
Line 241 lf_findoverlap(struct lockf *lf, struct |
|
#endif /* LOCKF_DEBUG */ |
#endif /* LOCKF_DEBUG */ |
start = lock->lf_start; |
start = lock->lf_start; |
end = lock->lf_end; |
end = lock->lf_end; |
while (lf != NOLOCKF) { |
while (lf != NULL) { |
if (((type == SELF) && lf->lf_id != lock->lf_id) || |
if (((type == SELF) && lf->lf_id != lock->lf_id) || |
((type == OTHERS) && lf->lf_id == lock->lf_id)) { |
((type == OTHERS) && lf->lf_id == lock->lf_id)) { |
*prev = &lf->lf_next; |
*prev = &lf->lf_next; |
Line 355 lf_wakelock(struct lockf *listhead) |
|
Line 386 lf_wakelock(struct lockf *listhead) |
|
while ((wakelock = TAILQ_FIRST(&listhead->lf_blkhd))) { |
while ((wakelock = TAILQ_FIRST(&listhead->lf_blkhd))) { |
KASSERT(wakelock->lf_next == listhead); |
KASSERT(wakelock->lf_next == listhead); |
TAILQ_REMOVE(&listhead->lf_blkhd, wakelock, lf_block); |
TAILQ_REMOVE(&listhead->lf_blkhd, wakelock, lf_block); |
wakelock->lf_next = NOLOCKF; |
wakelock->lf_next = NULL; |
#ifdef LOCKF_DEBUG |
#ifdef LOCKF_DEBUG |
if (lockf_debug & 2) |
if (lockf_debug & 2) |
lf_print("lf_wakelock: awakening", wakelock); |
lf_print("lf_wakelock: awakening", wakelock); |
Line 378 lf_clearlock(struct lockf *unlock, struc |
|
Line 409 lf_clearlock(struct lockf *unlock, struc |
|
struct lockf *overlap, **prev; |
struct lockf *overlap, **prev; |
int ovcase; |
int ovcase; |
|
|
if (lf == NOLOCKF) |
if (lf == NULL) |
return 0; |
return 0; |
#ifdef LOCKF_DEBUG |
#ifdef LOCKF_DEBUG |
if (unlock->lf_type != F_UNLCK) |
if (unlock->lf_type != F_UNLCK) |
Line 457 lf_getblock(struct lockf *lock) |
|
Line 488 lf_getblock(struct lockf *lock) |
|
*/ |
*/ |
lf = overlap->lf_next; |
lf = overlap->lf_next; |
} |
} |
return NOLOCKF; |
return NULL; |
} |
} |
|
|
/* |
/* |
Line 511 lf_setlock(struct lockf *lock, struct lo |
|
Line 542 lf_setlock(struct lockf *lock, struct lo |
|
struct lwp *wlwp; |
struct lwp *wlwp; |
volatile const struct lockf *waitblock; |
volatile const struct lockf *waitblock; |
int i = 0; |
int i = 0; |
|
struct proc *p; |
|
|
/* |
p = (struct proc *)block->lf_id; |
* The block is waiting on something. if_lwp will be |
KASSERT(p != NULL); |
* 0 once the lock is granted, so we terminate the |
while (i++ < maxlockdepth) { |
* loop if we find this. |
simple_lock(&p->p_lock); |
*/ |
if (p->p_nlwps > 1) { |
wlwp = block->lf_lwp; |
simple_unlock(&p->p_lock); |
while (wlwp && (i++ < maxlockdepth)) { |
break; |
|
} |
|
wlwp = LIST_FIRST(&p->p_lwps); |
|
if (wlwp->l_wmesg != lockstr) { |
|
simple_unlock(&p->p_lock); |
|
break; |
|
} |
|
simple_unlock(&p->p_lock); |
waitblock = wlwp->l_wchan; |
waitblock = wlwp->l_wchan; |
|
if (waitblock == NULL) { |
|
/* |
|
* this lwp just got up but |
|
* not returned from ltsleep yet. |
|
*/ |
|
break; |
|
} |
/* Get the owner of the blocking lock */ |
/* Get the owner of the blocking lock */ |
waitblock = waitblock->lf_next; |
waitblock = waitblock->lf_next; |
if ((waitblock->lf_flags & F_POSIX) == 0) |
if ((waitblock->lf_flags & F_POSIX) == 0) |
break; |
break; |
wlwp = waitblock->lf_lwp; |
p = (struct proc *)waitblock->lf_id; |
if (wlwp == lock->lf_lwp) { |
if (p == curproc) { |
lf_free(lock); |
lf_free(lock); |
return EDEADLK; |
return EDEADLK; |
} |
} |
Line 571 lf_setlock(struct lockf *lock, struct lo |
|
Line 617 lf_setlock(struct lockf *lock, struct lo |
|
* blocked list) and/or by another process |
* blocked list) and/or by another process |
* releasing a lock (in which case we have already |
* releasing a lock (in which case we have already |
* been removed from the blocked list and our |
* been removed from the blocked list and our |
* lf_next field set to NOLOCKF). |
* lf_next field set to NULL). |
*/ |
*/ |
if (lock->lf_next != NOLOCKF) { |
if (lock->lf_next != NULL) { |
TAILQ_REMOVE(&lock->lf_next->lf_blkhd, lock, lf_block); |
TAILQ_REMOVE(&lock->lf_next->lf_blkhd, lock, lf_block); |
lock->lf_next = NOLOCKF; |
lock->lf_next = NULL; |
} |
} |
if (error) { |
if (error) { |
lf_free(lock); |
lf_free(lock); |
Line 590 lf_setlock(struct lockf *lock, struct lo |
|
Line 636 lf_setlock(struct lockf *lock, struct lo |
|
* Skip over locks owned by other processes. |
* Skip over locks owned by other processes. |
* Handle any locks that overlap and are owned by ourselves. |
* Handle any locks that overlap and are owned by ourselves. |
*/ |
*/ |
lock->lf_lwp = 0; |
|
prev = head; |
prev = head; |
block = *head; |
block = *head; |
needtolink = 1; |
needtolink = 1; |
Line 795 lf_advlock(struct vop_advlock_args *ap, |
|
Line 840 lf_advlock(struct vop_advlock_args *ap, |
|
/* |
/* |
* byte-range lock might need one more lock. |
* byte-range lock might need one more lock. |
*/ |
*/ |
sparelock = lf_alloc(p->p_ucred->cr_uid, 0); |
sparelock = lf_alloc(kauth_cred_geteuid(p->p_cred), 0); |
if (sparelock == NULL) { |
if (sparelock == NULL) { |
error = ENOMEM; |
error = ENOMEM; |
goto quit; |
goto quit; |
Line 812 lf_advlock(struct vop_advlock_args *ap, |
|
Line 857 lf_advlock(struct vop_advlock_args *ap, |
|
return EINVAL; |
return EINVAL; |
} |
} |
|
|
lock = lf_alloc(p->p_ucred->cr_uid, ap->a_op != F_UNLCK ? 1 : 2); |
lock = lf_alloc(kauth_cred_geteuid(p->p_cred), ap->a_op != F_UNLCK ? 1 : 2); |
if (lock == NULL) { |
if (lock == NULL) { |
error = ENOMEM; |
error = ENOMEM; |
goto quit; |
goto quit; |
Line 855 lf_advlock(struct vop_advlock_args *ap, |
|
Line 900 lf_advlock(struct vop_advlock_args *ap, |
|
KASSERT(curproc == (struct proc *)ap->a_id); |
KASSERT(curproc == (struct proc *)ap->a_id); |
} |
} |
lock->lf_id = (struct proc *)ap->a_id; |
lock->lf_id = (struct proc *)ap->a_id; |
lock->lf_lwp = curlwp; |
|
|
|
/* |
/* |
* Do the requested operation. |
* Do the requested operation. |