[BACK]Return to kern_lock.c CVS log [TXT][DIR] Up to [cvs.NetBSD.org] / src / sys / kern

Annotation of src/sys/kern/kern_lock.c, Revision 1.181

1.181   ! riastrad    1: /*     $NetBSD: kern_lock.c,v 1.180 2022/09/13 09:28:05 riastradh Exp $        */
1.19      thorpej     2:
                      3: /*-
1.165     ad          4:  * Copyright (c) 2002, 2006, 2007, 2008, 2009, 2020 The NetBSD Foundation, Inc.
1.19      thorpej     5:  * All rights reserved.
                      6:  *
                      7:  * This code is derived from software contributed to The NetBSD Foundation
                      8:  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
1.105     ad          9:  * NASA Ames Research Center, and by Andrew Doran.
1.19      thorpej    10:  *
                     11:  * Redistribution and use in source and binary forms, with or without
                     12:  * modification, are permitted provided that the following conditions
                     13:  * are met:
                     14:  * 1. Redistributions of source code must retain the above copyright
                     15:  *    notice, this list of conditions and the following disclaimer.
                     16:  * 2. Redistributions in binary form must reproduce the above copyright
                     17:  *    notice, this list of conditions and the following disclaimer in the
                     18:  *    documentation and/or other materials provided with the distribution.
                     19:  *
                     20:  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
                     21:  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
                     22:  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
                     23:  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
                     24:  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
                     25:  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
                     26:  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
                     27:  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
                     28:  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
                     29:  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
                     30:  * POSSIBILITY OF SUCH DAMAGE.
                     31:  */
1.2       fvdl       32:
1.60      lukem      33: #include <sys/cdefs.h>
1.181   ! riastrad   34: __KERNEL_RCSID(0, "$NetBSD: kern_lock.c,v 1.180 2022/09/13 09:28:05 riastradh Exp $");
1.168     ad         35:
                     36: #ifdef _KERNEL_OPT
                     37: #include "opt_lockdebug.h"
                     38: #endif
1.105     ad         39:
1.1       fvdl       40: #include <sys/param.h>
                     41: #include <sys/proc.h>
                     42: #include <sys/lock.h>
1.2       fvdl       43: #include <sys/systm.h>
1.125     ad         44: #include <sys/kernel.h>
1.105     ad         45: #include <sys/lockdebug.h>
1.122     ad         46: #include <sys/cpu.h>
                     47: #include <sys/syslog.h>
1.128     ad         48: #include <sys/atomic.h>
1.148     ad         49: #include <sys/lwp.h>
1.160     ozaki-r    50: #include <sys/pserialize.h>
1.105     ad         51:
1.168     ad         52: #if defined(DIAGNOSTIC) && !defined(LOCKDEBUG)
                     53: #include <sys/ksyms.h>
                     54: #endif
                     55:
1.131     ad         56: #include <machine/lock.h>
1.1       fvdl       57:
1.98      ad         58: #include <dev/lockstat.h>
                     59:
1.134     ad         60: #define        RETURN_ADDRESS  (uintptr_t)__builtin_return_address(0)
1.25      thorpej    61:
1.127     yamt       62: bool   kernel_lock_dodebug;
1.132     ad         63:
                     64: __cpu_simple_lock_t kernel_lock[CACHE_LINE_SIZE / sizeof(__cpu_simple_lock_t)]
1.153     matt       65:     __cacheline_aligned;
1.1       fvdl       66:
1.96      yamt       67: void
1.135     yamt       68: assert_sleepable(void)
1.96      yamt       69: {
1.135     yamt       70:        const char *reason;
1.148     ad         71:        uint64_t pctr;
                     72:        bool idle;
1.96      yamt       73:
1.135     yamt       74:        if (panicstr != NULL) {
1.117     ad         75:                return;
1.135     yamt       76:        }
                     77:
1.132     ad         78:        LOCKDEBUG_BARRIER(kernel_lock, 1);
1.135     yamt       79:
1.148     ad         80:        /*
                     81:         * Avoid disabling/re-enabling preemption here since this
1.149     dyoung     82:         * routine may be called in delicate situations.
1.148     ad         83:         */
                     84:        do {
                     85:                pctr = lwp_pctr();
1.164     riastrad   86:                __insn_barrier();
1.148     ad         87:                idle = CURCPU_IDLE_P();
1.164     riastrad   88:                __insn_barrier();
1.148     ad         89:        } while (pctr != lwp_pctr());
                     90:
1.135     yamt       91:        reason = NULL;
1.173     skrll      92:        if (idle && !cold) {
1.135     yamt       93:                reason = "idle";
                     94:        }
                     95:        if (cpu_intr_p()) {
                     96:                reason = "interrupt";
1.97      yamt       97:        }
1.148     ad         98:        if (cpu_softintr_p()) {
1.135     yamt       99:                reason = "softint";
                    100:        }
1.160     ozaki-r   101:        if (!pserialize_not_in_read_section()) {
                    102:                reason = "pserialize";
                    103:        }
1.135     yamt      104:
                    105:        if (reason) {
                    106:                panic("%s: %s caller=%p", __func__, reason,
                    107:                    (void *)RETURN_ADDRESS);
                    108:        }
1.96      yamt      109: }
1.105     ad        110:
1.62      thorpej   111: /*
                    112:  * Functions for manipulating the kernel_lock.  We put them here
                    113:  * so that they show up in profiles.
                    114:  */
                    115:
1.105     ad        116: #define        _KERNEL_LOCK_ABORT(msg)                                         \
1.158     christos  117:     LOCKDEBUG_ABORT(__func__, __LINE__, kernel_lock, &_kernel_lock_ops, msg)
1.105     ad        118:
                    119: #ifdef LOCKDEBUG
                    120: #define        _KERNEL_LOCK_ASSERT(cond)                                       \
                    121: do {                                                                   \
                    122:        if (!(cond))                                                    \
                    123:                _KERNEL_LOCK_ABORT("assertion failed: " #cond);         \
                    124: } while (/* CONSTCOND */ 0)
                    125: #else
                    126: #define        _KERNEL_LOCK_ASSERT(cond)       /* nothing */
                    127: #endif
                    128:
1.163     ozaki-r   129: static void    _kernel_lock_dump(const volatile void *, lockop_printer_t);
1.105     ad        130:
                    131: lockops_t _kernel_lock_ops = {
1.161     ozaki-r   132:        .lo_name = "Kernel lock",
                    133:        .lo_type = LOCKOPS_SPIN,
                    134:        .lo_dump = _kernel_lock_dump,
1.105     ad        135: };
                    136:
1.174     riastrad  137: #ifdef LOCKDEBUG
                    138:
                    139: #include <ddb/ddb.h>
                    140:
                    141: static void
                    142: kernel_lock_trace_ipi(void *cookie)
                    143: {
                    144:
                    145:        printf("%s[%d %s]: hogging kernel lock\n", cpu_name(curcpu()),
                    146:            curlwp->l_lid,
                    147:            curlwp->l_name ? curlwp->l_name : curproc->p_comm);
                    148:        db_stacktrace();
                    149: }
                    150:
                    151: #endif
                    152:
1.85      yamt      153: /*
1.105     ad        154:  * Initialize the kernel lock.
1.85      yamt      155:  */
1.62      thorpej   156: void
1.122     ad        157: kernel_lock_init(void)
1.62      thorpej   158: {
                    159:
1.132     ad        160:        __cpu_simple_lock_init(kernel_lock);
                    161:        kernel_lock_dodebug = LOCKDEBUG_ALLOC(kernel_lock, &_kernel_lock_ops,
1.122     ad        162:            RETURN_ADDRESS);
1.62      thorpej   163: }
1.155     martin    164: CTASSERT(CACHE_LINE_SIZE >= sizeof(__cpu_simple_lock_t));
1.62      thorpej   165:
                    166: /*
1.105     ad        167:  * Print debugging information about the kernel lock.
1.62      thorpej   168:  */
1.162     ozaki-r   169: static void
1.163     ozaki-r   170: _kernel_lock_dump(const volatile void *junk, lockop_printer_t pr)
1.62      thorpej   171: {
1.85      yamt      172:        struct cpu_info *ci = curcpu();
1.62      thorpej   173:
1.105     ad        174:        (void)junk;
1.85      yamt      175:
1.163     ozaki-r   176:        pr("curcpu holds : %18d wanted by: %#018lx\n",
1.105     ad        177:            ci->ci_biglock_count, (long)ci->ci_biglock_wanted);
1.62      thorpej   178: }
                    179:
1.105     ad        180: /*
1.150     mrg       181:  * Acquire 'nlocks' holds on the kernel lock.
1.167     ad        182:  *
                    183:  * Although it may not look it, this is one of the most central, intricate
                    184:  * routines in the kernel, and tons of code elsewhere depends on its exact
                    185:  * behaviour.  If you change something in here, expect it to bite you in the
                    186:  * rear.
1.105     ad        187:  */
1.62      thorpej   188: void
1.137     drochner  189: _kernel_lock(int nlocks)
1.62      thorpej   190: {
1.138     ad        191:        struct cpu_info *ci;
1.105     ad        192:        LOCKSTAT_TIMER(spintime);
                    193:        LOCKSTAT_FLAG(lsflag);
                    194:        struct lwp *owant;
1.165     ad        195: #ifdef LOCKDEBUG
1.174     riastrad  196:        static struct cpu_info *kernel_lock_holder;
1.165     ad        197:        u_int spins = 0;
1.180     riastrad  198:        u_int starttime = getticks();
1.165     ad        199: #endif
1.85      yamt      200:        int s;
1.137     drochner  201:        struct lwp *l = curlwp;
1.85      yamt      202:
1.105     ad        203:        _KERNEL_LOCK_ASSERT(nlocks > 0);
1.62      thorpej   204:
1.138     ad        205:        s = splvm();
                    206:        ci = curcpu();
1.105     ad        207:        if (ci->ci_biglock_count != 0) {
1.132     ad        208:                _KERNEL_LOCK_ASSERT(__SIMPLELOCK_LOCKED_P(kernel_lock));
1.105     ad        209:                ci->ci_biglock_count += nlocks;
1.122     ad        210:                l->l_blcnt += nlocks;
1.138     ad        211:                splx(s);
1.105     ad        212:                return;
                    213:        }
                    214:
1.122     ad        215:        _KERNEL_LOCK_ASSERT(l->l_blcnt == 0);
1.132     ad        216:        LOCKDEBUG_WANTLOCK(kernel_lock_dodebug, kernel_lock, RETURN_ADDRESS,
1.154     mlelstv   217:            0);
1.107     ad        218:
1.165     ad        219:        if (__predict_true(__cpu_simple_lock_try(kernel_lock))) {
1.177     riastrad  220: #ifdef LOCKDEBUG
1.176     riastrad  221:                kernel_lock_holder = curcpu();
1.177     riastrad  222: #endif
1.105     ad        223:                ci->ci_biglock_count = nlocks;
1.122     ad        224:                l->l_blcnt = nlocks;
1.144     ad        225:                LOCKDEBUG_LOCKED(kernel_lock_dodebug, kernel_lock, NULL,
1.127     yamt      226:                    RETURN_ADDRESS, 0);
1.105     ad        227:                splx(s);
                    228:                return;
                    229:        }
                    230:
1.132     ad        231:        /*
                    232:         * To remove the ordering constraint between adaptive mutexes
                    233:         * and kernel_lock we must make it appear as if this thread is
                    234:         * blocking.  For non-interlocked mutex release, a store fence
                    235:         * is required to ensure that the result of any mutex_exit()
                    236:         * by the current LWP becomes visible on the bus before the set
                    237:         * of ci->ci_biglock_wanted becomes visible.
                    238:         */
                    239:        membar_producer();
                    240:        owant = ci->ci_biglock_wanted;
1.167     ad        241:        ci->ci_biglock_wanted = l;
1.168     ad        242: #if defined(DIAGNOSTIC) && !defined(LOCKDEBUG)
                    243:        l->l_ld_wanted = __builtin_return_address(0);
                    244: #endif
1.105     ad        245:
                    246:        /*
1.167     ad        247:         * Spin until we acquire the lock.  Once we have it, record the
                    248:         * time spent with lockstat.
1.105     ad        249:         */
1.132     ad        250:        LOCKSTAT_ENTER(lsflag);
                    251:        LOCKSTAT_START_TIMER(lsflag, spintime);
1.105     ad        252:
                    253:        do {
1.122     ad        254:                splx(s);
1.132     ad        255:                while (__SIMPLELOCK_LOCKED_P(kernel_lock)) {
1.165     ad        256: #ifdef LOCKDEBUG
1.180     riastrad  257:                        if (SPINLOCK_SPINOUT(spins) && start_init_exec &&
                    258:                            (getticks() - starttime) > 10*hz) {
1.174     riastrad  259:                                ipi_msg_t msg = {
                    260:                                        .func = kernel_lock_trace_ipi,
                    261:                                };
1.175     riastrad  262:                                kpreempt_disable();
1.174     riastrad  263:                                ipi_unicast(&msg, kernel_lock_holder);
                    264:                                ipi_wait(&msg);
1.175     riastrad  265:                                kpreempt_enable();
1.178     riastrad  266:                                _KERNEL_LOCK_ABORT("spinout");
1.132     ad        267:                        }
1.179     riastrad  268: #endif
1.169     christos  269:                        SPINLOCK_BACKOFF_HOOK;
                    270:                        SPINLOCK_SPIN_HOOK;
1.105     ad        271:                }
1.132     ad        272:                s = splvm();
                    273:        } while (!__cpu_simple_lock_try(kernel_lock));
1.105     ad        274:
1.122     ad        275:        ci->ci_biglock_count = nlocks;
                    276:        l->l_blcnt = nlocks;
1.107     ad        277:        LOCKSTAT_STOP_TIMER(lsflag, spintime);
1.144     ad        278:        LOCKDEBUG_LOCKED(kernel_lock_dodebug, kernel_lock, NULL,
                    279:            RETURN_ADDRESS, 0);
1.132     ad        280:        if (owant == NULL) {
                    281:                LOCKSTAT_EVENT_RA(lsflag, kernel_lock,
                    282:                    LB_KERNEL_LOCK | LB_SPIN, 1, spintime, RETURN_ADDRESS);
                    283:        }
                    284:        LOCKSTAT_EXIT(lsflag);
1.167     ad        285:        splx(s);
1.105     ad        286:
                    287:        /*
1.132     ad        288:         * Now that we have kernel_lock, reset ci_biglock_wanted.  This
                    289:         * store must be unbuffered (immediately visible on the bus) in
1.157     skrll     290:         * order for non-interlocked mutex release to work correctly.
1.132     ad        291:         * It must be visible before a mutex_exit() can execute on this
                    292:         * processor.
                    293:         *
                    294:         * Note: only where CAS is available in hardware will this be
                    295:         * an unbuffered write, but non-interlocked release cannot be
                    296:         * done on CPUs without CAS in hardware.
1.105     ad        297:         */
1.132     ad        298:        (void)atomic_swap_ptr(&ci->ci_biglock_wanted, owant);
                    299:
                    300:        /*
                    301:         * Issue a memory barrier as we have acquired a lock.  This also
                    302:         * prevents stores from a following mutex_exit() being reordered
                    303:         * to occur before our store to ci_biglock_wanted above.
                    304:         */
1.165     ad        305: #ifndef __HAVE_ATOMIC_AS_MEMBAR
1.132     ad        306:        membar_enter();
1.165     ad        307: #endif
1.174     riastrad  308:
                    309: #ifdef LOCKDEBUG
                    310:        kernel_lock_holder = curcpu();
                    311: #endif
1.62      thorpej   312: }
                    313:
                    314: /*
1.105     ad        315:  * Release 'nlocks' holds on the kernel lock.  If 'nlocks' is zero, release
1.150     mrg       316:  * all holds.
1.62      thorpej   317:  */
                    318: void
1.137     drochner  319: _kernel_unlock(int nlocks, int *countp)
1.62      thorpej   320: {
1.138     ad        321:        struct cpu_info *ci;
1.105     ad        322:        u_int olocks;
                    323:        int s;
1.137     drochner  324:        struct lwp *l = curlwp;
1.62      thorpej   325:
1.105     ad        326:        _KERNEL_LOCK_ASSERT(nlocks < 2);
1.62      thorpej   327:
1.122     ad        328:        olocks = l->l_blcnt;
1.77      yamt      329:
1.105     ad        330:        if (olocks == 0) {
                    331:                _KERNEL_LOCK_ASSERT(nlocks <= 0);
                    332:                if (countp != NULL)
                    333:                        *countp = 0;
                    334:                return;
                    335:        }
1.77      yamt      336:
1.132     ad        337:        _KERNEL_LOCK_ASSERT(__SIMPLELOCK_LOCKED_P(kernel_lock));
1.85      yamt      338:
1.105     ad        339:        if (nlocks == 0)
                    340:                nlocks = olocks;
                    341:        else if (nlocks == -1) {
                    342:                nlocks = 1;
                    343:                _KERNEL_LOCK_ASSERT(olocks == 1);
                    344:        }
1.138     ad        345:        s = splvm();
                    346:        ci = curcpu();
1.122     ad        347:        _KERNEL_LOCK_ASSERT(ci->ci_biglock_count >= l->l_blcnt);
                    348:        if (ci->ci_biglock_count == nlocks) {
1.132     ad        349:                LOCKDEBUG_UNLOCKED(kernel_lock_dodebug, kernel_lock,
1.127     yamt      350:                    RETURN_ADDRESS, 0);
1.122     ad        351:                ci->ci_biglock_count = 0;
1.132     ad        352:                __cpu_simple_unlock(kernel_lock);
1.138     ad        353:                l->l_blcnt -= nlocks;
1.122     ad        354:                splx(s);
1.139     ad        355:                if (l->l_dopreempt)
                    356:                        kpreempt(0);
1.138     ad        357:        } else {
1.122     ad        358:                ci->ci_biglock_count -= nlocks;
1.138     ad        359:                l->l_blcnt -= nlocks;
                    360:                splx(s);
                    361:        }
1.77      yamt      362:
1.105     ad        363:        if (countp != NULL)
                    364:                *countp = olocks;
1.77      yamt      365: }
1.152     jmcneill  366:
                    367: bool
                    368: _kernel_locked_p(void)
                    369: {
                    370:        return __SIMPLELOCK_LOCKED_P(kernel_lock);
                    371: }

CVSweb <webmaster@jp.NetBSD.org>