Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files. =================================================================== RCS file: /ftp/cvs/cvsroot/src/sys/kern/kern_lock.c,v rcsdiff: /ftp/cvs/cvsroot/src/sys/kern/kern_lock.c,v: warning: Unknown phrases like `commitid ...;' are present. retrieving revision 1.134 retrieving revision 1.134.6.3 diff -u -p -r1.134 -r1.134.6.3 --- src/sys/kern/kern_lock.c 2008/01/30 14:54:26 1.134 +++ src/sys/kern/kern_lock.c 2008/06/29 09:33:14 1.134.6.3 @@ -1,4 +1,4 @@ -/* $NetBSD: kern_lock.c,v 1.134 2008/01/30 14:54:26 ad Exp $ */ +/* $NetBSD: kern_lock.c,v 1.134.6.3 2008/06/29 09:33:14 mjf Exp $ */ /*- * Copyright (c) 2002, 2006, 2007, 2008 The NetBSD Foundation, Inc. @@ -16,13 +16,6 @@ * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. - * 3. All advertising materials mentioning features or use of this software - * must display the following acknowledgement: - * This product includes software developed by the NetBSD - * Foundation, Inc. and its contributors. - * 4. Neither the name of The NetBSD Foundation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED @@ -38,9 +31,7 @@ */ #include -__KERNEL_RCSID(0, "$NetBSD: kern_lock.c,v 1.134 2008/01/30 14:54:26 ad Exp $"); - -#include "opt_multiprocessor.h" +__KERNEL_RCSID(0, "$NetBSD: kern_lock.c,v 1.134.6.3 2008/06/29 09:33:14 mjf Exp $"); #include #include @@ -64,29 +55,37 @@ bool kernel_lock_dodebug; __cpu_simple_lock_t kernel_lock[CACHE_LINE_SIZE / sizeof(__cpu_simple_lock_t)] __aligned(CACHE_LINE_SIZE); -#if defined(LOCKDEBUG) +#if defined(DEBUG) || defined(LKM) void -assert_sleepable(struct simplelock *interlock, const char *msg) +assert_sleepable(void) { + const char *reason; - if (panicstr != NULL) + if (panicstr != NULL) { return; + } + LOCKDEBUG_BARRIER(kernel_lock, 1); + + reason = NULL; if (CURCPU_IDLE_P() && !cold) { - panic("assert_sleepable: idle"); + reason = "idle"; + } + if (cpu_intr_p()) { + reason = "interrupt"; + } + if ((curlwp->l_pflag & LP_INTR) != 0) { + reason = "softint"; + } + + if (reason) { + panic("%s: %s caller=%p", __func__, reason, + (void *)RETURN_ADDRESS); } } -#endif +#endif /* defined(DEBUG) || defined(LKM) */ /* - * rump doesn't need the kernel lock so force it out. We cannot - * currently easily include it for compilation because of - * a) SPINLOCK_* b) membar_producer(). They are defined in different - * places / way for each arch, so just simply do not bother to - * fight a lot for no gain (i.e. pain but still no gain). - */ -#ifndef _RUMPKERNEL -/* * Functions for manipulating the kernel_lock. We put them here * so that they show up in profiles. */ @@ -108,7 +107,7 @@ void _kernel_lock_dump(volatile void *); lockops_t _kernel_lock_ops = { "Kernel lock", - 0, + LOCKOPS_SPIN, _kernel_lock_dump }; @@ -144,37 +143,36 @@ _kernel_lock_dump(volatile void *junk) * acquisition is from process context. */ void -_kernel_lock(int nlocks, struct lwp *l) +_kernel_lock(int nlocks) { - struct cpu_info *ci = curcpu(); + struct cpu_info *ci; LOCKSTAT_TIMER(spintime); LOCKSTAT_FLAG(lsflag); struct lwp *owant; u_int spins; int s; + struct lwp *l = curlwp; - if (nlocks == 0) - return; _KERNEL_LOCK_ASSERT(nlocks > 0); - l = curlwp; - + s = splvm(); + ci = curcpu(); if (ci->ci_biglock_count != 0) { _KERNEL_LOCK_ASSERT(__SIMPLELOCK_LOCKED_P(kernel_lock)); ci->ci_biglock_count += nlocks; l->l_blcnt += nlocks; + splx(s); return; } _KERNEL_LOCK_ASSERT(l->l_blcnt == 0); LOCKDEBUG_WANTLOCK(kernel_lock_dodebug, kernel_lock, RETURN_ADDRESS, - 0); + false, false); - s = splvm(); if (__cpu_simple_lock_try(kernel_lock)) { ci->ci_biglock_count = nlocks; l->l_blcnt = nlocks; - LOCKDEBUG_LOCKED(kernel_lock_dodebug, kernel_lock, + LOCKDEBUG_LOCKED(kernel_lock_dodebug, kernel_lock, NULL, RETURN_ADDRESS, 0); splx(s); return; @@ -204,7 +202,9 @@ _kernel_lock(int nlocks, struct lwp *l) splx(s); while (__SIMPLELOCK_LOCKED_P(kernel_lock)) { if (SPINLOCK_SPINOUT(spins)) { - _KERNEL_LOCK_ABORT("spinout"); + extern int start_init_exec; + if (!start_init_exec) + _KERNEL_LOCK_ABORT("spinout"); } SPINLOCK_BACKOFF_HOOK; SPINLOCK_SPIN_HOOK; @@ -215,7 +215,8 @@ _kernel_lock(int nlocks, struct lwp *l) ci->ci_biglock_count = nlocks; l->l_blcnt = nlocks; LOCKSTAT_STOP_TIMER(lsflag, spintime); - LOCKDEBUG_LOCKED(kernel_lock_dodebug, kernel_lock, RETURN_ADDRESS, 0); + LOCKDEBUG_LOCKED(kernel_lock_dodebug, kernel_lock, NULL, + RETURN_ADDRESS, 0); if (owant == NULL) { LOCKSTAT_EVENT_RA(lsflag, kernel_lock, LB_KERNEL_LOCK | LB_SPIN, 1, spintime, RETURN_ADDRESS); @@ -249,13 +250,12 @@ _kernel_lock(int nlocks, struct lwp *l) * all holds. If 'l' is non-null, the release is from process context. */ void -_kernel_unlock(int nlocks, struct lwp *l, int *countp) +_kernel_unlock(int nlocks, int *countp) { - struct cpu_info *ci = curcpu(); + struct cpu_info *ci; u_int olocks; int s; - - l = curlwp; + struct lwp *l = curlwp; _KERNEL_LOCK_ASSERT(nlocks < 2); @@ -276,21 +276,24 @@ _kernel_unlock(int nlocks, struct lwp *l nlocks = 1; _KERNEL_LOCK_ASSERT(olocks == 1); } - + s = splvm(); + ci = curcpu(); _KERNEL_LOCK_ASSERT(ci->ci_biglock_count >= l->l_blcnt); - - l->l_blcnt -= nlocks; if (ci->ci_biglock_count == nlocks) { - s = splvm(); LOCKDEBUG_UNLOCKED(kernel_lock_dodebug, kernel_lock, RETURN_ADDRESS, 0); ci->ci_biglock_count = 0; __cpu_simple_unlock(kernel_lock); + l->l_blcnt -= nlocks; splx(s); - } else + if (l->l_dopreempt) + kpreempt(0); + } else { ci->ci_biglock_count -= nlocks; + l->l_blcnt -= nlocks; + splx(s); + } if (countp != NULL) *countp = olocks; } -#endif /* !_RUMPKERNEL */