version 1.5.2.9, 2007/10/18 15:47:34 |
version 1.6, 2007/05/02 14:07:03 |
|
|
* Basic lock debugging code shared among lock primatives. |
* Basic lock debugging code shared among lock primatives. |
*/ |
*/ |
|
|
#include <sys/cdefs.h> |
|
__KERNEL_RCSID(0, "$NetBSD$"); |
|
|
|
#include "opt_multiprocessor.h" |
#include "opt_multiprocessor.h" |
#include "opt_ddb.h" |
#include "opt_ddb.h" |
|
|
|
#include <sys/cdefs.h> |
|
__KERNEL_RCSID(0, "$NetBSD$"); |
|
|
#include <sys/param.h> |
#include <sys/param.h> |
#include <sys/proc.h> |
#include <sys/proc.h> |
#include <sys/systm.h> |
#include <sys/systm.h> |
#include <sys/kernel.h> |
|
#include <sys/kmem.h> |
#include <sys/kmem.h> |
#include <sys/lock.h> |
#include <sys/lock.h> |
#include <sys/lockdebug.h> |
#include <sys/lockdebug.h> |
#include <sys/sleepq.h> |
#include <sys/sleepq.h> |
#include <sys/cpu.h> |
|
|
#include <machine/cpu.h> |
|
|
#ifdef LOCKDEBUG |
#ifdef LOCKDEBUG |
|
|
Line 66 __KERNEL_RCSID(0, "$NetBSD$"); |
|
Line 66 __KERNEL_RCSID(0, "$NetBSD$"); |
|
|
|
#define LD_LOCKED 0x01 |
#define LD_LOCKED 0x01 |
#define LD_SLEEPER 0x02 |
#define LD_SLEEPER 0x02 |
#define LD_MLOCKS 8 |
|
#define LD_MLISTS 8192 |
|
|
|
#define LD_NOID (LD_MAX_LOCKS + 1) |
#define LD_NOID (LD_MAX_LOCKS + 1) |
|
|
Line 85 typedef union lockdebuglk { |
|
Line 83 typedef union lockdebuglk { |
|
typedef struct lockdebug { |
typedef struct lockdebug { |
_TAILQ_ENTRY(struct lockdebug, volatile) ld_chain; |
_TAILQ_ENTRY(struct lockdebug, volatile) ld_chain; |
_TAILQ_ENTRY(struct lockdebug, volatile) ld_achain; |
_TAILQ_ENTRY(struct lockdebug, volatile) ld_achain; |
_TAILQ_ENTRY(struct lockdebug, volatile) ld_mchain; |
|
volatile void *ld_lock; |
volatile void *ld_lock; |
lockops_t *ld_lockops; |
lockops_t *ld_lockops; |
struct lwp *ld_lwp; |
struct lwp *ld_lwp; |
uintptr_t ld_locked; |
uintptr_t ld_locked; |
uintptr_t ld_unlocked; |
uintptr_t ld_unlocked; |
uintptr_t ld_initaddr; |
|
u_int ld_id; |
u_int ld_id; |
uint16_t ld_shares; |
uint16_t ld_shares; |
uint16_t ld_cpu; |
uint16_t ld_cpu; |
Line 106 typedef _TAILQ_HEAD(lockdebuglist, struc |
|
Line 102 typedef _TAILQ_HEAD(lockdebuglist, struc |
|
lockdebuglk_t ld_sleeper_lk; |
lockdebuglk_t ld_sleeper_lk; |
lockdebuglk_t ld_spinner_lk; |
lockdebuglk_t ld_spinner_lk; |
lockdebuglk_t ld_free_lk; |
lockdebuglk_t ld_free_lk; |
lockdebuglk_t ld_mem_lk[LD_MLOCKS]; |
|
|
|
lockdebuglist_t ld_mem_list[LD_MLISTS]; |
|
lockdebuglist_t ld_sleepers; |
lockdebuglist_t ld_sleepers; |
lockdebuglist_t ld_spinners; |
lockdebuglist_t ld_spinners; |
lockdebuglist_t ld_free; |
lockdebuglist_t ld_free; |
Line 122 lockdebug_t *ld_table[LD_MAX_LOCKS / LD |
|
Line 116 lockdebug_t *ld_table[LD_MAX_LOCKS / LD |
|
lockdebug_t ld_prime[LD_BATCH]; |
lockdebug_t ld_prime[LD_BATCH]; |
|
|
static void lockdebug_abort1(lockdebug_t *, lockdebuglk_t *lk, |
static void lockdebug_abort1(lockdebug_t *, lockdebuglk_t *lk, |
const char *, const char *, bool); |
const char *, const char *); |
static void lockdebug_more(void); |
static void lockdebug_more(void); |
static void lockdebug_init(void); |
static void lockdebug_init(void); |
|
|
Line 131 lockdebug_lock(lockdebuglk_t *lk) |
|
Line 125 lockdebug_lock(lockdebuglk_t *lk) |
|
{ |
{ |
int s; |
int s; |
|
|
s = splhigh(); |
s = spllock(); |
__cpu_simple_lock(&lk->lk_lock); |
__cpu_simple_lock(&lk->lk_lock); |
lk->lk_oldspl = s; |
lk->lk_oldspl = s; |
} |
} |
Line 146 lockdebug_unlock(lockdebuglk_t *lk) |
|
Line 140 lockdebug_unlock(lockdebuglk_t *lk) |
|
splx(s); |
splx(s); |
} |
} |
|
|
static inline void |
|
lockdebug_mhash(volatile void *addr, lockdebuglk_t **lk, lockdebuglist_t **head) |
|
{ |
|
u_int hash; |
|
|
|
hash = (uintptr_t)addr >> PGSHIFT; |
|
*lk = &ld_mem_lk[hash & (LD_MLOCKS - 1)]; |
|
*head = &ld_mem_list[hash & (LD_MLISTS - 1)]; |
|
lockdebug_lock(*lk); |
|
} |
|
|
|
/* |
/* |
* lockdebug_lookup: |
* lockdebug_lookup: |
* |
* |
Line 218 lockdebug_init(void) |
|
Line 201 lockdebug_init(void) |
|
} |
} |
ld_freeptr = 1; |
ld_freeptr = 1; |
ld_nfree = LD_BATCH - 1; |
ld_nfree = LD_BATCH - 1; |
|
|
for (i = 0; i < LD_MLOCKS; i++) |
|
__cpu_simple_lock_init(&ld_mem_lk[i].lk_lock); |
|
for (i = 0; i < LD_MLISTS; i++) |
|
TAILQ_INIT(&ld_mem_list[i]); |
|
} |
} |
|
|
/* |
/* |
Line 232 lockdebug_init(void) |
|
Line 210 lockdebug_init(void) |
|
* structure. |
* structure. |
*/ |
*/ |
u_int |
u_int |
lockdebug_alloc(volatile void *lock, lockops_t *lo, uintptr_t initaddr) |
lockdebug_alloc(volatile void *lock, lockops_t *lo) |
{ |
{ |
#if 0 |
|
lockdebuglist_t *head; |
|
lockdebuglk_t *lk; |
|
#endif |
|
struct cpu_info *ci; |
struct cpu_info *ci; |
lockdebug_t *ld; |
lockdebug_t *ld; |
|
|
Line 251 lockdebug_alloc(volatile void *lock, loc |
|
Line 225 lockdebug_alloc(volatile void *lock, loc |
|
/* |
/* |
* Pinch a new debug structure. We may recurse because we call |
* Pinch a new debug structure. We may recurse because we call |
* kmem_alloc(), which may need to initialize new locks somewhere |
* kmem_alloc(), which may need to initialize new locks somewhere |
* down the path. If not recursing, we try to maintain at least |
* down the path. If not recursing, we try to maintain at keep |
* LD_SLOP structures free, which should hopefully be enough to |
* LD_SLOP structures free, which should hopefully be enough to |
* satisfy kmem_alloc(). If we can't provide a structure, not to |
* satisfy kmem_alloc(). If we can't provide a structure, not to |
* worry: we'll just mark the lock as not having an ID. |
* worry: we'll just mark the lock as not having an ID. |
Line 294 lockdebug_alloc(volatile void *lock, loc |
|
Line 268 lockdebug_alloc(volatile void *lock, loc |
|
ld->ld_locked = 0; |
ld->ld_locked = 0; |
ld->ld_unlocked = 0; |
ld->ld_unlocked = 0; |
ld->ld_lwp = NULL; |
ld->ld_lwp = NULL; |
ld->ld_initaddr = initaddr; |
|
|
|
if (lo->lo_sleeplock) { |
if (lo->lo_sleeplock) { |
ld->ld_flags = LD_SLEEPER; |
ld->ld_flags = LD_SLEEPER; |
Line 304 lockdebug_alloc(volatile void *lock, loc |
|
Line 277 lockdebug_alloc(volatile void *lock, loc |
|
lockdebug_unlock(&ld_spinner_lk); |
lockdebug_unlock(&ld_spinner_lk); |
} |
} |
|
|
#if 0 |
|
/* Insert into address hash. */ |
|
lockdebug_mhash(lock, &lk, &head); |
|
TAILQ_INSERT_HEAD(head, ld, ld_mchain); |
|
lockdebug_unlock(lk); |
|
#endif |
|
|
|
return ld->ld_id; |
return ld->ld_id; |
} |
} |
|
|
Line 322 lockdebug_alloc(volatile void *lock, loc |
|
Line 288 lockdebug_alloc(volatile void *lock, loc |
|
void |
void |
lockdebug_free(volatile void *lock, u_int id) |
lockdebug_free(volatile void *lock, u_int id) |
{ |
{ |
#if 0 |
|
lockdebuglist_t *head; |
|
#endif |
|
lockdebug_t *ld; |
lockdebug_t *ld; |
lockdebuglk_t *lk; |
lockdebuglk_t *lk; |
|
|
Line 337 lockdebug_free(volatile void *lock, u_in |
|
Line 300 lockdebug_free(volatile void *lock, u_in |
|
if (ld->ld_lock != lock) { |
if (ld->ld_lock != lock) { |
panic("lockdebug_free: destroying uninitialized lock %p" |
panic("lockdebug_free: destroying uninitialized lock %p" |
"(ld_id=%d ld_lock=%p)", lock, id, ld->ld_lock); |
"(ld_id=%d ld_lock=%p)", lock, id, ld->ld_lock); |
lockdebug_abort1(ld, lk, __func__, "lock record follows", |
lockdebug_abort1(ld, lk, __func__, "lock record follows"); |
true); |
|
} |
} |
if ((ld->ld_flags & LD_LOCKED) != 0 || ld->ld_shares != 0) |
if ((ld->ld_flags & LD_LOCKED) != 0 || ld->ld_shares != 0) |
lockdebug_abort1(ld, lk, __func__, "is locked", true); |
lockdebug_abort1(ld, lk, __func__, "is locked"); |
|
|
ld->ld_lock = NULL; |
ld->ld_lock = NULL; |
|
|
Line 351 lockdebug_free(volatile void *lock, u_in |
|
Line 313 lockdebug_free(volatile void *lock, u_in |
|
TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain); |
TAILQ_INSERT_TAIL(&ld_free, ld, ld_chain); |
ld_nfree++; |
ld_nfree++; |
lockdebug_unlock(&ld_free_lk); |
lockdebug_unlock(&ld_free_lk); |
|
|
#if 0 |
|
/* Remove from address hash. */ |
|
lockdebug_mhash(lock, &lk, &head); |
|
TAILQ_REMOVE(head, ld, ld_mchain); |
|
lockdebug_unlock(lk); |
|
#endif |
|
} |
} |
|
|
/* |
/* |
Line 439 lockdebug_wantlock(u_int id, uintptr_t w |
|
Line 394 lockdebug_wantlock(u_int id, uintptr_t w |
|
recurse = true; |
recurse = true; |
} |
} |
|
|
if (cpu_intr_p()) { |
|
if ((ld->ld_flags & LD_SLEEPER) != 0) |
|
lockdebug_abort1(ld, lk, __func__, |
|
"acquiring sleep lock from interrupt context", |
|
true); |
|
} |
|
|
|
if (shared) |
if (shared) |
ld->ld_shwant++; |
ld->ld_shwant++; |
else |
else |
ld->ld_exwant++; |
ld->ld_exwant++; |
|
|
if (recurse) |
if (recurse) |
lockdebug_abort1(ld, lk, __func__, "locking against myself", |
lockdebug_abort1(ld, lk, __func__, "locking against myself"); |
true); |
|
|
|
lockdebug_unlock(lk); |
lockdebug_unlock(lk); |
} |
} |
Line 483 lockdebug_locked(u_int id, uintptr_t whe |
|
Line 430 lockdebug_locked(u_int id, uintptr_t whe |
|
} else { |
} else { |
if ((ld->ld_flags & LD_LOCKED) != 0) |
if ((ld->ld_flags & LD_LOCKED) != 0) |
lockdebug_abort1(ld, lk, __func__, |
lockdebug_abort1(ld, lk, __func__, |
"already locked", true); |
"already locked"); |
|
|
ld->ld_flags |= LD_LOCKED; |
ld->ld_flags |= LD_LOCKED; |
ld->ld_locked = where; |
ld->ld_locked = where; |
Line 524 lockdebug_unlocked(u_int id, uintptr_t w |
|
Line 471 lockdebug_unlocked(u_int id, uintptr_t w |
|
if (shared) { |
if (shared) { |
if (l->l_shlocks == 0) |
if (l->l_shlocks == 0) |
lockdebug_abort1(ld, lk, __func__, |
lockdebug_abort1(ld, lk, __func__, |
"no shared locks held by LWP", true); |
"no shared locks held by LWP"); |
if (ld->ld_shares == 0) |
if (ld->ld_shares == 0) |
lockdebug_abort1(ld, lk, __func__, |
lockdebug_abort1(ld, lk, __func__, |
"no shared holds on this lock", true); |
"no shared holds on this lock"); |
l->l_shlocks--; |
l->l_shlocks--; |
ld->ld_shares--; |
ld->ld_shares--; |
} else { |
} else { |
if ((ld->ld_flags & LD_LOCKED) == 0) |
if ((ld->ld_flags & LD_LOCKED) == 0) |
lockdebug_abort1(ld, lk, __func__, "not locked", |
lockdebug_abort1(ld, lk, __func__, "not locked"); |
true); |
|
|
|
if ((ld->ld_flags & LD_SLEEPER) != 0) { |
if ((ld->ld_flags & LD_SLEEPER) != 0) { |
if (ld->ld_lwp != curlwp) |
if (ld->ld_lwp != curlwp) |
lockdebug_abort1(ld, lk, __func__, |
lockdebug_abort1(ld, lk, __func__, |
"not held by current LWP", true); |
"not held by current LWP"); |
ld->ld_flags &= ~LD_LOCKED; |
ld->ld_flags &= ~LD_LOCKED; |
ld->ld_unlocked = where; |
ld->ld_unlocked = where; |
ld->ld_lwp = NULL; |
ld->ld_lwp = NULL; |
Line 547 lockdebug_unlocked(u_int id, uintptr_t w |
|
Line 493 lockdebug_unlocked(u_int id, uintptr_t w |
|
} else { |
} else { |
if (ld->ld_cpu != (uint16_t)cpu_number()) |
if (ld->ld_cpu != (uint16_t)cpu_number()) |
lockdebug_abort1(ld, lk, __func__, |
lockdebug_abort1(ld, lk, __func__, |
"not held by current CPU", true); |
"not held by current CPU"); |
ld->ld_flags &= ~LD_LOCKED; |
ld->ld_flags &= ~LD_LOCKED; |
ld->ld_unlocked = where; |
ld->ld_unlocked = where; |
ld->ld_lwp = NULL; |
ld->ld_lwp = NULL; |
Line 584 lockdebug_barrier(volatile void *spinloc |
|
Line 530 lockdebug_barrier(volatile void *spinloc |
|
if (ld->ld_cpu != cpuno) |
if (ld->ld_cpu != cpuno) |
lockdebug_abort1(ld, &ld_spinner_lk, |
lockdebug_abort1(ld, &ld_spinner_lk, |
__func__, |
__func__, |
"not held by current CPU", true); |
"not held by current CPU"); |
continue; |
continue; |
} |
} |
if (ld->ld_cpu == cpuno && (l->l_pflag & LP_INTR) == 0) |
if (ld->ld_cpu == cpuno) |
lockdebug_abort1(ld, &ld_spinner_lk, |
lockdebug_abort1(ld, &ld_spinner_lk, |
__func__, "spin lock held", true); |
__func__, "spin lock held"); |
} |
} |
lockdebug_unlock(&ld_spinner_lk); |
lockdebug_unlock(&ld_spinner_lk); |
} |
} |
Line 600 lockdebug_barrier(volatile void *spinloc |
|
Line 546 lockdebug_barrier(volatile void *spinloc |
|
TAILQ_FOREACH(ld, &ld_sleepers, ld_chain) { |
TAILQ_FOREACH(ld, &ld_sleepers, ld_chain) { |
if (ld->ld_lwp == l) |
if (ld->ld_lwp == l) |
lockdebug_abort1(ld, &ld_sleeper_lk, |
lockdebug_abort1(ld, &ld_sleeper_lk, |
__func__, "sleep lock held", true); |
__func__, "sleep lock held"); |
} |
} |
lockdebug_unlock(&ld_sleeper_lk); |
lockdebug_unlock(&ld_sleeper_lk); |
} |
} |
Line 611 lockdebug_barrier(volatile void *spinloc |
|
Line 557 lockdebug_barrier(volatile void *spinloc |
|
} |
} |
|
|
/* |
/* |
* lockdebug_mem_check: |
|
* |
|
* Check for in-use locks within a memory region that is |
|
* being freed. We only check for active locks within the |
|
* first page of the allocation. |
|
*/ |
|
void |
|
lockdebug_mem_check(const char *func, void *base, size_t sz) |
|
{ |
|
#if 0 |
|
lockdebuglist_t *head; |
|
lockdebuglk_t *lk; |
|
lockdebug_t *ld; |
|
uintptr_t sa, ea, la; |
|
|
|
sa = (uintptr_t)base; |
|
ea = sa + sz; |
|
|
|
lockdebug_mhash(base, &lk, &head); |
|
TAILQ_FOREACH(ld, head, ld_mchain) { |
|
la = (uintptr_t)ld->ld_lock; |
|
if (la >= sa && la < ea) { |
|
lockdebug_abort1(ld, lk, func, |
|
"allocation contains active lock", !cold); |
|
return; |
|
} |
|
} |
|
lockdebug_unlock(lk); |
|
#endif |
|
} |
|
|
|
/* |
|
* lockdebug_dump: |
* lockdebug_dump: |
* |
* |
* Dump information about a lock on panic, or for DDB. |
* Dump information about a lock on panic, or for DDB. |
Line 658 lockdebug_dump(lockdebug_t *ld, void (*p |
|
Line 572 lockdebug_dump(lockdebug_t *ld, void (*p |
|
"shares wanted: %18u exclusive: %18u\n" |
"shares wanted: %18u exclusive: %18u\n" |
"current cpu : %18u last held: %18u\n" |
"current cpu : %18u last held: %18u\n" |
"current lwp : %#018lx last held: %#018lx\n" |
"current lwp : %#018lx last held: %#018lx\n" |
"last locked : %#018lx unlocked : %#018lx\n" |
"last locked : %#018lx unlocked : %#018lx\n", |
"initialized : %#018lx\n", |
|
(long)ld->ld_lock, (sleeper ? "sleep/adaptive" : "spin"), |
(long)ld->ld_lock, (sleeper ? "sleep/adaptive" : "spin"), |
(unsigned)ld->ld_shares, ((ld->ld_flags & LD_LOCKED) != 0), |
(unsigned)ld->ld_shares, ((ld->ld_flags & LD_LOCKED) != 0), |
(unsigned)ld->ld_shwant, (unsigned)ld->ld_exwant, |
(unsigned)ld->ld_shwant, (unsigned)ld->ld_exwant, |
(unsigned)cpu_number(), (unsigned)ld->ld_cpu, |
(unsigned)cpu_number(), (unsigned)ld->ld_cpu, |
(long)curlwp, (long)ld->ld_lwp, |
(long)curlwp, (long)ld->ld_lwp, |
(long)ld->ld_locked, (long)ld->ld_unlocked, |
(long)ld->ld_locked, (long)ld->ld_unlocked); |
(long)ld->ld_initaddr); |
|
|
|
if (ld->ld_lockops->lo_dump != NULL) |
if (ld->ld_lockops->lo_dump != NULL) |
(*ld->ld_lockops->lo_dump)(ld->ld_lock); |
(*ld->ld_lockops->lo_dump)(ld->ld_lock); |
Line 684 lockdebug_dump(lockdebug_t *ld, void (*p |
|
Line 596 lockdebug_dump(lockdebug_t *ld, void (*p |
|
*/ |
*/ |
static void |
static void |
lockdebug_abort1(lockdebug_t *ld, lockdebuglk_t *lk, const char *func, |
lockdebug_abort1(lockdebug_t *ld, lockdebuglk_t *lk, const char *func, |
const char *msg, bool dopanic) |
const char *msg) |
{ |
{ |
|
|
printf_nolog("%s error: %s: %s\n\n", ld->ld_lockops->lo_name, |
printf_nolog("%s error: %s: %s\n\n", ld->ld_lockops->lo_name, |
Line 692 lockdebug_abort1(lockdebug_t *ld, lockde |
|
Line 604 lockdebug_abort1(lockdebug_t *ld, lockde |
|
lockdebug_dump(ld, printf_nolog); |
lockdebug_dump(ld, printf_nolog); |
lockdebug_unlock(lk); |
lockdebug_unlock(lk); |
printf_nolog("\n"); |
printf_nolog("\n"); |
if (dopanic) |
panic("LOCKDEBUG"); |
panic("LOCKDEBUG"); |
|
} |
} |
|
|
#endif /* LOCKDEBUG */ |
#endif /* LOCKDEBUG */ |
Line 737 lockdebug_abort(u_int id, volatile void |
|
Line 648 lockdebug_abort(u_int id, volatile void |
|
lockdebuglk_t *lk; |
lockdebuglk_t *lk; |
|
|
if ((ld = lockdebug_lookup(id, &lk)) != NULL) { |
if ((ld = lockdebug_lookup(id, &lk)) != NULL) { |
lockdebug_abort1(ld, lk, func, msg, true); |
lockdebug_abort1(ld, lk, func, msg); |
/* NOTREACHED */ |
/* NOTREACHED */ |
} |
} |
#endif /* LOCKDEBUG */ |
#endif /* LOCKDEBUG */ |