[BACK]Return to kern_softint.c CVS log [TXT][DIR] Up to [cvs.NetBSD.org] / src / sys / kern

Annotation of src/sys/kern/kern_softint.c, Revision 1.56.2.3

1.56.2.3! ad          1: /*     $NetBSD: kern_softint.c,v 1.61 2020/02/17 21:44:42 ad Exp $     */
1.2       ad          2:
                      3: /*-
1.56.2.2  ad          4:  * Copyright (c) 2007, 2008, 2019, 2020 The NetBSD Foundation, Inc.
1.2       ad          5:  * All rights reserved.
                      6:  *
                      7:  * This code is derived from software contributed to The NetBSD Foundation
                      8:  * by Andrew Doran.
                      9:  *
                     10:  * Redistribution and use in source and binary forms, with or without
                     11:  * modification, are permitted provided that the following conditions
                     12:  * are met:
                     13:  * 1. Redistributions of source code must retain the above copyright
                     14:  *    notice, this list of conditions and the following disclaimer.
                     15:  * 2. Redistributions in binary form must reproduce the above copyright
                     16:  *    notice, this list of conditions and the following disclaimer in the
                     17:  *    documentation and/or other materials provided with the distribution.
                     18:  *
                     19:  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
                     20:  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
                     21:  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
                     22:  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
                     23:  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
                     24:  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
                     25:  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
                     26:  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
                     27:  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
                     28:  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
                     29:  * POSSIBILITY OF SUCH DAMAGE.
                     30:  */
                     31:
                     32: /*
1.5       ad         33:  * Generic software interrupt framework.
                     34:  *
                     35:  * Overview
                     36:  *
                     37:  *     The soft interrupt framework provides a mechanism to schedule a
                     38:  *     low priority callback that runs with thread context.  It allows
                     39:  *     for dynamic registration of software interrupts, and for fair
                     40:  *     queueing and prioritization of those interrupts.  The callbacks
                     41:  *     can be scheduled to run from nearly any point in the kernel: by
                     42:  *     code running with thread context, by code running from a
                     43:  *     hardware interrupt handler, and at any interrupt priority
                     44:  *     level.
                     45:  *
                     46:  * Priority levels
                     47:  *
                     48:  *     Since soft interrupt dispatch can be tied to the underlying
                     49:  *     architecture's interrupt dispatch code, it can be limited
                     50:  *     both by the capabilities of the hardware and the capabilities
                     51:  *     of the interrupt dispatch code itself.  The number of priority
                     52:  *     levels is restricted to four.  In order of priority (lowest to
                     53:  *     highest) the levels are: clock, bio, net, serial.
                     54:  *
                     55:  *     The names are symbolic and in isolation do not have any direct
                     56:  *     connection with a particular kind of device activity: they are
                     57:  *     only meant as a guide.
                     58:  *
                     59:  *     The four priority levels map directly to scheduler priority
                     60:  *     levels, and where the architecture implements 'fast' software
                     61:  *     interrupts, they also map onto interrupt priorities.  The
                     62:  *     interrupt priorities are intended to be hidden from machine
                     63:  *     independent code, which should use thread-safe mechanisms to
                     64:  *     synchronize with software interrupts (for example: mutexes).
                     65:  *
                     66:  * Capabilities
                     67:  *
                     68:  *     Software interrupts run with limited machine context.  In
                     69:  *     particular, they do not posess any address space context.  They
                     70:  *     should not try to operate on user space addresses, or to use
                     71:  *     virtual memory facilities other than those noted as interrupt
                     72:  *     safe.
                     73:  *
                     74:  *     Unlike hardware interrupts, software interrupts do have thread
                     75:  *     context.  They may block on synchronization objects, sleep, and
                     76:  *     resume execution at a later time.
                     77:  *
                     78:  *     Since software interrupts are a limited resource and run with
                     79:  *     higher priority than most other LWPs in the system, all
                     80:  *     block-and-resume activity by a software interrupt must be kept
                     81:  *     short to allow futher processing at that level to continue.  By
                     82:  *     extension, code running with process context must take care to
                     83:  *     ensure that any lock that may be taken from a software interrupt
                     84:  *     can not be held for more than a short period of time.
                     85:  *
                     86:  *     The kernel does not allow software interrupts to use facilities
                     87:  *     or perform actions that may block for a significant amount of
                     88:  *     time.  This means that it's not valid for a software interrupt
1.10      ad         89:  *     to sleep on condition variables or wait for resources to become
                     90:  *     available (for example, memory).
1.5       ad         91:  *
                     92:  * Per-CPU operation
                     93:  *
                     94:  *     If a soft interrupt is triggered on a CPU, it can only be
                     95:  *     dispatched on the same CPU.  Each LWP dedicated to handling a
                     96:  *     soft interrupt is bound to its home CPU, so if the LWP blocks
                     97:  *     and needs to run again, it can only run there.  Nearly all data
                     98:  *     structures used to manage software interrupts are per-CPU.
                     99:  *
                    100:  *     The per-CPU requirement is intended to reduce "ping-pong" of
                    101:  *     cache lines between CPUs: lines occupied by data structures
                    102:  *     used to manage the soft interrupts, and lines occupied by data
                    103:  *     items being passed down to the soft interrupt.  As a positive
                    104:  *     side effect, this also means that the soft interrupt dispatch
                    105:  *     code does not need to to use spinlocks to synchronize.
                    106:  *
                    107:  * Generic implementation
                    108:  *
                    109:  *     A generic, low performance implementation is provided that
                    110:  *     works across all architectures, with no machine-dependent
                    111:  *     modifications needed.  This implementation uses the scheduler,
                    112:  *     and so has a number of restrictions:
                    113:  *
                    114:  *     1) The software interrupts are not currently preemptive, so
                    115:  *     must wait for the currently executing LWP to yield the CPU.
                    116:  *     This can introduce latency.
                    117:  *
                    118:  *     2) An expensive context switch is required for a software
                    119:  *     interrupt to be handled.
                    120:  *
                    121:  * 'Fast' software interrupts
                    122:  *
                    123:  *     If an architectures defines __HAVE_FAST_SOFTINTS, it implements
                    124:  *     the fast mechanism.  Threads running either in the kernel or in
                    125:  *     userspace will be interrupted, but will not be preempted.  When
                    126:  *     the soft interrupt completes execution, the interrupted LWP
                    127:  *     is resumed.  Interrupt dispatch code must provide the minimum
                    128:  *     level of context necessary for the soft interrupt to block and
                    129:  *     be resumed at a later time.  The machine-dependent dispatch
                    130:  *     path looks something like the following:
                    131:  *
                    132:  *     softintr()
                    133:  *     {
                    134:  *             go to IPL_HIGH if necessary for switch;
                    135:  *             save any necessary registers in a format that can be
                    136:  *                 restored by cpu_switchto if the softint blocks;
                    137:  *             arrange for cpu_switchto() to restore into the
                    138:  *                 trampoline function;
                    139:  *             identify LWP to handle this interrupt;
                    140:  *             switch to the LWP's stack;
                    141:  *             switch register stacks, if necessary;
                    142:  *             assign new value of curlwp;
                    143:  *             call MI softint_dispatch, passing old curlwp and IPL
                    144:  *                 to execute interrupt at;
                    145:  *             switch back to old stack;
                    146:  *             switch back to old register stack, if necessary;
                    147:  *             restore curlwp;
                    148:  *             return to interrupted LWP;
                    149:  *     }
                    150:  *
                    151:  *     If the soft interrupt blocks, a trampoline function is returned
                    152:  *     to in the context of the interrupted LWP, as arranged for by
                    153:  *     softint():
                    154:  *
                    155:  *     softint_ret()
                    156:  *     {
                    157:  *             unlock soft interrupt LWP;
                    158:  *             resume interrupt processing, likely returning to
                    159:  *                 interrupted LWP or dispatching another, different
                    160:  *                 interrupt;
                    161:  *     }
                    162:  *
                    163:  *     Once the soft interrupt has fired (and even if it has blocked),
                    164:  *     no further soft interrupts at that level will be triggered by
                    165:  *     MI code until the soft interrupt handler has ceased execution.
                    166:  *     If a soft interrupt handler blocks and is resumed, it resumes
                    167:  *     execution as a normal LWP (kthread) and gains VM context.  Only
                    168:  *     when it has completed and is ready to fire again will it
                    169:  *     interrupt other threads.
1.2       ad        170:  */
                    171:
                    172: #include <sys/cdefs.h>
1.56.2.3! ad        173: __KERNEL_RCSID(0, "$NetBSD: kern_softint.c,v 1.61 2020/02/17 21:44:42 ad Exp $");
1.2       ad        174:
                    175: #include <sys/param.h>
1.5       ad        176: #include <sys/proc.h>
1.2       ad        177: #include <sys/intr.h>
1.41      rmind     178: #include <sys/ipi.h>
1.56.2.3! ad        179: #include <sys/lock.h>
1.5       ad        180: #include <sys/mutex.h>
1.45      msaitoh   181: #include <sys/kernel.h>
1.5       ad        182: #include <sys/kthread.h>
                    183: #include <sys/evcnt.h>
                    184: #include <sys/cpu.h>
1.24      ad        185: #include <sys/xcall.h>
1.5       ad        186:
                    187: #include <net/netisr.h>
                    188:
                    189: #include <uvm/uvm_extern.h>
                    190:
                    191: /* This could overlap with signal info in struct lwp. */
                    192: typedef struct softint {
                    193:        SIMPLEQ_HEAD(, softhand) si_q;
                    194:        struct lwp              *si_lwp;
                    195:        struct cpu_info         *si_cpu;
                    196:        uintptr_t               si_machdep;
                    197:        struct evcnt            si_evcnt;
                    198:        struct evcnt            si_evcnt_block;
                    199:        int                     si_active;
                    200:        char                    si_name[8];
                    201:        char                    si_name_block[8+6];
                    202: } softint_t;
                    203:
                    204: typedef struct softhand {
                    205:        SIMPLEQ_ENTRY(softhand) sh_q;
                    206:        void                    (*sh_func)(void *);
                    207:        void                    *sh_arg;
                    208:        softint_t               *sh_isr;
1.28      bouyer    209:        u_int                   sh_flags;
1.41      rmind     210:        u_int                   sh_ipi_id;
1.5       ad        211: } softhand_t;
                    212:
                    213: typedef struct softcpu {
                    214:        struct cpu_info         *sc_cpu;
                    215:        softint_t               sc_int[SOFTINT_COUNT];
                    216:        softhand_t              sc_hand[1];
                    217: } softcpu_t;
                    218:
                    219: static void    softint_thread(void *);
                    220:
1.44      msaitoh   221: u_int          softint_bytes = 32768;
1.5       ad        222: u_int          softint_timing;
                    223: static u_int   softint_max;
                    224: static kmutex_t        softint_lock;
1.23      pooka     225: static void    *softint_netisrs[NETISR_MAX];
1.2       ad        226:
1.5       ad        227: /*
                    228:  * softint_init_isr:
                    229:  *
                    230:  *     Initialize a single interrupt level for a single CPU.
                    231:  */
                    232: static void
                    233: softint_init_isr(softcpu_t *sc, const char *desc, pri_t pri, u_int level)
                    234: {
                    235:        struct cpu_info *ci;
                    236:        softint_t *si;
                    237:        int error;
                    238:
                    239:        si = &sc->sc_int[level];
                    240:        ci = sc->sc_cpu;
                    241:        si->si_cpu = ci;
                    242:
                    243:        SIMPLEQ_INIT(&si->si_q);
                    244:
                    245:        error = kthread_create(pri, KTHREAD_MPSAFE | KTHREAD_INTR |
                    246:            KTHREAD_IDLE, ci, softint_thread, si, &si->si_lwp,
1.12      martin    247:            "soft%s/%u", desc, ci->ci_index);
1.5       ad        248:        if (error != 0)
                    249:                panic("softint_init_isr: error %d", error);
                    250:
1.12      martin    251:        snprintf(si->si_name, sizeof(si->si_name), "%s/%u", desc,
                    252:            ci->ci_index);
1.20      ad        253:        evcnt_attach_dynamic(&si->si_evcnt, EVCNT_TYPE_MISC, NULL,
1.5       ad        254:           "softint", si->si_name);
1.12      martin    255:        snprintf(si->si_name_block, sizeof(si->si_name_block), "%s block/%u",
                    256:            desc, ci->ci_index);
1.20      ad        257:        evcnt_attach_dynamic(&si->si_evcnt_block, EVCNT_TYPE_MISC, NULL,
1.5       ad        258:           "softint", si->si_name_block);
1.3       ad        259:
1.5       ad        260:        si->si_lwp->l_private = si;
                    261:        softint_init_md(si->si_lwp, level, &si->si_machdep);
                    262: }
1.37      uebayasi  263:
1.2       ad        264: /*
                    265:  * softint_init:
                    266:  *
                    267:  *     Initialize per-CPU data structures.  Called from mi_cpu_attach().
                    268:  */
                    269: void
                    270: softint_init(struct cpu_info *ci)
                    271: {
1.5       ad        272:        static struct cpu_info *first;
                    273:        softcpu_t *sc, *scfirst;
                    274:        softhand_t *sh, *shmax;
                    275:
                    276:        if (first == NULL) {
                    277:                /* Boot CPU. */
                    278:                first = ci;
                    279:                mutex_init(&softint_lock, MUTEX_DEFAULT, IPL_NONE);
                    280:                softint_bytes = round_page(softint_bytes);
                    281:                softint_max = (softint_bytes - sizeof(softcpu_t)) /
                    282:                    sizeof(softhand_t);
                    283:        }
1.2       ad        284:
1.37      uebayasi  285:        /* Use uvm_km(9) for persistent, page-aligned allocation. */
                    286:        sc = (softcpu_t *)uvm_km_alloc(kernel_map, softint_bytes, 0,
                    287:            UVM_KMF_WIRED | UVM_KMF_ZERO);
1.5       ad        288:        if (sc == NULL)
                    289:                panic("softint_init_cpu: cannot allocate memory");
                    290:
                    291:        ci->ci_data.cpu_softcpu = sc;
                    292:        ci->ci_data.cpu_softints = 0;
                    293:        sc->sc_cpu = ci;
                    294:
                    295:        softint_init_isr(sc, "net", PRI_SOFTNET, SOFTINT_NET);
                    296:        softint_init_isr(sc, "bio", PRI_SOFTBIO, SOFTINT_BIO);
                    297:        softint_init_isr(sc, "clk", PRI_SOFTCLOCK, SOFTINT_CLOCK);
                    298:        softint_init_isr(sc, "ser", PRI_SOFTSERIAL, SOFTINT_SERIAL);
                    299:
                    300:        if (first != ci) {
                    301:                mutex_enter(&softint_lock);
                    302:                scfirst = first->ci_data.cpu_softcpu;
                    303:                sh = sc->sc_hand;
                    304:                memcpy(sh, scfirst->sc_hand, sizeof(*sh) * softint_max);
                    305:                /* Update pointers for this CPU. */
                    306:                for (shmax = sh + softint_max; sh < shmax; sh++) {
                    307:                        if (sh->sh_func == NULL)
                    308:                                continue;
                    309:                        sh->sh_isr =
                    310:                            &sc->sc_int[sh->sh_flags & SOFTINT_LVLMASK];
                    311:                }
                    312:                mutex_exit(&softint_lock);
                    313:        } else {
                    314:                /*
                    315:                 * Establish handlers for legacy net interrupts.
                    316:                 * XXX Needs to go away.
                    317:                 */
                    318: #define DONETISR(n, f)                                                 \
1.16      ad        319:     softint_netisrs[(n)] = softint_establish(SOFTINT_NET|SOFTINT_MPSAFE,\
                    320:         (void (*)(void *))(f), NULL)
1.5       ad        321: #include <net/netisr_dispatch.h>
                    322:        }
1.2       ad        323: }
                    324:
                    325: /*
                    326:  * softint_establish:
                    327:  *
                    328:  *     Register a software interrupt handler.
                    329:  */
                    330: void *
                    331: softint_establish(u_int flags, void (*func)(void *), void *arg)
                    332: {
1.5       ad        333:        CPU_INFO_ITERATOR cii;
                    334:        struct cpu_info *ci;
                    335:        softcpu_t *sc;
                    336:        softhand_t *sh;
                    337:        u_int level, index;
1.41      rmind     338:        u_int ipi_id = 0;
                    339:        void *sih;
1.2       ad        340:
                    341:        level = (flags & SOFTINT_LVLMASK);
                    342:        KASSERT(level < SOFTINT_COUNT);
1.24      ad        343:        KASSERT((flags & SOFTINT_IMPMASK) == 0);
1.2       ad        344:
1.5       ad        345:        mutex_enter(&softint_lock);
                    346:
                    347:        /* Find a free slot. */
                    348:        sc = curcpu()->ci_data.cpu_softcpu;
1.32      matt      349:        for (index = 1; index < softint_max; index++) {
1.5       ad        350:                if (sc->sc_hand[index].sh_func == NULL)
                    351:                        break;
1.32      matt      352:        }
1.5       ad        353:        if (index == softint_max) {
                    354:                mutex_exit(&softint_lock);
                    355:                printf("WARNING: softint_establish: table full, "
                    356:                    "increase softint_bytes\n");
                    357:                return NULL;
                    358:        }
1.41      rmind     359:        sih = (void *)((uint8_t *)&sc->sc_hand[index] - (uint8_t *)sc);
                    360:
                    361:        if (flags & SOFTINT_RCPU) {
                    362:                if ((ipi_id = ipi_register(softint_schedule, sih)) == 0) {
                    363:                        mutex_exit(&softint_lock);
                    364:                        return NULL;
                    365:                }
                    366:        }
1.5       ad        367:
                    368:        /* Set up the handler on each CPU. */
1.8       ad        369:        if (ncpu < 2) {
1.7       ad        370:                /* XXX hack for machines with no CPU_INFO_FOREACH() early on */
                    371:                sc = curcpu()->ci_data.cpu_softcpu;
                    372:                sh = &sc->sc_hand[index];
                    373:                sh->sh_isr = &sc->sc_int[level];
                    374:                sh->sh_func = func;
                    375:                sh->sh_arg = arg;
                    376:                sh->sh_flags = flags;
1.41      rmind     377:                sh->sh_ipi_id = ipi_id;
1.7       ad        378:        } else for (CPU_INFO_FOREACH(cii, ci)) {
1.5       ad        379:                sc = ci->ci_data.cpu_softcpu;
                    380:                sh = &sc->sc_hand[index];
                    381:                sh->sh_isr = &sc->sc_int[level];
                    382:                sh->sh_func = func;
                    383:                sh->sh_arg = arg;
                    384:                sh->sh_flags = flags;
1.41      rmind     385:                sh->sh_ipi_id = ipi_id;
1.2       ad        386:        }
1.5       ad        387:        mutex_exit(&softint_lock);
                    388:
1.41      rmind     389:        return sih;
1.2       ad        390: }
                    391:
                    392: /*
                    393:  * softint_disestablish:
                    394:  *
1.24      ad        395:  *     Unregister a software interrupt handler.  The soft interrupt could
                    396:  *     still be active at this point, but the caller commits not to try
                    397:  *     and trigger it again once this call is made.  The caller must not
                    398:  *     hold any locks that could be taken from soft interrupt context,
                    399:  *     because we will wait for the softint to complete if it's still
                    400:  *     running.
1.2       ad        401:  */
                    402: void
                    403: softint_disestablish(void *arg)
                    404: {
1.5       ad        405:        CPU_INFO_ITERATOR cii;
                    406:        struct cpu_info *ci;
                    407:        softcpu_t *sc;
                    408:        softhand_t *sh;
                    409:        uintptr_t offset;
1.24      ad        410:        u_int flags;
1.5       ad        411:
                    412:        offset = (uintptr_t)arg;
1.40      matt      413:        KASSERTMSG(offset != 0 && offset < softint_bytes, "%"PRIuPTR" %u",
                    414:            offset, softint_bytes);
1.5       ad        415:
1.24      ad        416:        /*
1.41      rmind     417:         * Unregister an IPI handler if there is any.  Note: there is
                    418:         * no need to disable preemption here - ID is stable.
                    419:         */
                    420:        sc = curcpu()->ci_data.cpu_softcpu;
                    421:        sh = (softhand_t *)((uint8_t *)sc + offset);
                    422:        if (sh->sh_ipi_id) {
                    423:                ipi_unregister(sh->sh_ipi_id);
                    424:        }
                    425:
                    426:        /*
1.24      ad        427:         * Run a cross call so we see up to date values of sh_flags from
                    428:         * all CPUs.  Once softint_disestablish() is called, the caller
                    429:         * commits to not trigger the interrupt and set SOFTINT_ACTIVE on
                    430:         * it again.  So, we are only looking for handler records with
1.26      dyoung    431:         * SOFTINT_ACTIVE already set.
1.24      ad        432:         */
1.45      msaitoh   433:        if (__predict_true(mp_online)) {
1.48      uwe       434:                xc_barrier(0);
1.45      msaitoh   435:        }
1.24      ad        436:
                    437:        for (;;) {
                    438:                /* Collect flag values from each CPU. */
                    439:                flags = 0;
                    440:                for (CPU_INFO_FOREACH(cii, ci)) {
                    441:                        sc = ci->ci_data.cpu_softcpu;
                    442:                        sh = (softhand_t *)((uint8_t *)sc + offset);
                    443:                        KASSERT(sh->sh_func != NULL);
                    444:                        flags |= sh->sh_flags;
                    445:                }
1.43      knakahar  446:                /* Inactive on all CPUs? */
                    447:                if ((flags & SOFTINT_ACTIVE) == 0) {
1.24      ad        448:                        break;
                    449:                }
                    450:                /* Oops, still active.  Wait for it to clear. */
1.25      ad        451:                (void)kpause("softdis", false, 1, NULL);
1.24      ad        452:        }
1.5       ad        453:
                    454:        /* Clear the handler on each CPU. */
1.24      ad        455:        mutex_enter(&softint_lock);
1.5       ad        456:        for (CPU_INFO_FOREACH(cii, ci)) {
                    457:                sc = ci->ci_data.cpu_softcpu;
                    458:                sh = (softhand_t *)((uint8_t *)sc + offset);
                    459:                KASSERT(sh->sh_func != NULL);
                    460:                sh->sh_func = NULL;
                    461:        }
                    462:        mutex_exit(&softint_lock);
1.2       ad        463: }
                    464:
                    465: /*
                    466:  * softint_schedule:
                    467:  *
                    468:  *     Trigger a software interrupt.  Must be called from a hardware
                    469:  *     interrupt handler, or with preemption disabled (since we are
                    470:  *     using the value of curcpu()).
                    471:  */
                    472: void
                    473: softint_schedule(void *arg)
                    474: {
1.5       ad        475:        softhand_t *sh;
                    476:        softint_t *si;
                    477:        uintptr_t offset;
                    478:        int s;
                    479:
1.17      ad        480:        KASSERT(kpreempt_disabled());
                    481:
1.5       ad        482:        /* Find the handler record for this CPU. */
                    483:        offset = (uintptr_t)arg;
1.40      matt      484:        KASSERTMSG(offset != 0 && offset < softint_bytes, "%"PRIuPTR" %u",
                    485:            offset, softint_bytes);
1.5       ad        486:        sh = (softhand_t *)((uint8_t *)curcpu()->ci_data.cpu_softcpu + offset);
                    487:
                    488:        /* If it's already pending there's nothing to do. */
1.32      matt      489:        if ((sh->sh_flags & SOFTINT_PENDING) != 0) {
1.5       ad        490:                return;
1.32      matt      491:        }
1.5       ad        492:
                    493:        /*
                    494:         * Enqueue the handler into the LWP's pending list.
                    495:         * If the LWP is completely idle, then make it run.
                    496:         */
                    497:        s = splhigh();
1.24      ad        498:        if ((sh->sh_flags & SOFTINT_PENDING) == 0) {
1.5       ad        499:                si = sh->sh_isr;
1.24      ad        500:                sh->sh_flags |= SOFTINT_PENDING;
1.5       ad        501:                SIMPLEQ_INSERT_TAIL(&si->si_q, sh, sh_q);
                    502:                if (si->si_active == 0) {
                    503:                        si->si_active = 1;
                    504:                        softint_trigger(si->si_machdep);
                    505:                }
                    506:        }
                    507:        splx(s);
                    508: }
                    509:
                    510: /*
1.41      rmind     511:  * softint_schedule_cpu:
                    512:  *
                    513:  *     Trigger a software interrupt on a target CPU.  This invokes
                    514:  *     softint_schedule() for the local CPU or send an IPI to invoke
                    515:  *     this routine on the remote CPU.  Preemption must be disabled.
                    516:  */
                    517: void
                    518: softint_schedule_cpu(void *arg, struct cpu_info *ci)
                    519: {
                    520:        KASSERT(kpreempt_disabled());
                    521:
                    522:        if (curcpu() != ci) {
                    523:                const softcpu_t *sc = ci->ci_data.cpu_softcpu;
                    524:                const uintptr_t offset = (uintptr_t)arg;
                    525:                const softhand_t *sh;
                    526:
                    527:                sh = (const softhand_t *)((const uint8_t *)sc + offset);
                    528:                KASSERT((sh->sh_flags & SOFTINT_RCPU) != 0);
                    529:                ipi_trigger(sh->sh_ipi_id, ci);
                    530:                return;
                    531:        }
                    532:
                    533:        /* Just a local CPU. */
                    534:        softint_schedule(arg);
                    535: }
                    536:
                    537: /*
1.5       ad        538:  * softint_execute:
                    539:  *
                    540:  *     Invoke handlers for the specified soft interrupt.
                    541:  *     Must be entered at splhigh.  Will drop the priority
                    542:  *     to the level specified, but returns back at splhigh.
                    543:  */
                    544: static inline void
                    545: softint_execute(softint_t *si, lwp_t *l, int s)
                    546: {
                    547:        softhand_t *sh;
                    548:
                    549: #ifdef __HAVE_FAST_SOFTINTS
                    550:        KASSERT(si->si_lwp == curlwp);
                    551: #else
                    552:        /* May be running in user context. */
                    553: #endif
                    554:        KASSERT(si->si_cpu == curcpu());
                    555:        KASSERT(si->si_lwp->l_wchan == NULL);
                    556:        KASSERT(si->si_active);
                    557:
                    558:        /*
                    559:         * Note: due to priority inheritance we may have interrupted a
                    560:         * higher priority LWP.  Since the soft interrupt must be quick
                    561:         * and is non-preemptable, we don't bother yielding.
                    562:         */
                    563:
                    564:        while (!SIMPLEQ_EMPTY(&si->si_q)) {
                    565:                /*
                    566:                 * Pick the longest waiting handler to run.  We block
                    567:                 * interrupts but do not lock in order to do this, as
                    568:                 * we are protecting against the local CPU only.
                    569:                 */
                    570:                sh = SIMPLEQ_FIRST(&si->si_q);
                    571:                SIMPLEQ_REMOVE_HEAD(&si->si_q, sh_q);
1.24      ad        572:                KASSERT((sh->sh_flags & SOFTINT_PENDING) != 0);
                    573:                KASSERT((sh->sh_flags & SOFTINT_ACTIVE) == 0);
                    574:                sh->sh_flags ^= (SOFTINT_PENDING | SOFTINT_ACTIVE);
1.5       ad        575:                splx(s);
                    576:
                    577:                /* Run the handler. */
1.56.2.2  ad        578:                if (__predict_true((sh->sh_flags & SOFTINT_MPSAFE) != 0)) {
                    579:                        (*sh->sh_func)(sh->sh_arg);
                    580:                } else {
1.5       ad        581:                        KERNEL_LOCK(1, l);
1.56.2.2  ad        582:                        (*sh->sh_func)(sh->sh_arg);
                    583:                        KERNEL_UNLOCK_ONE(l);
1.5       ad        584:                }
1.56.2.2  ad        585:
1.34      rmind     586:                /* Diagnostic: check that spin-locks have not leaked. */
                    587:                KASSERTMSG(curcpu()->ci_mtx_count == 0,
1.38      jym       588:                    "%s: ci_mtx_count (%d) != 0, sh_func %p\n",
                    589:                    __func__, curcpu()->ci_mtx_count, sh->sh_func);
1.46      ozaki-r   590:                /* Diagnostic: check that psrefs have not leaked. */
                    591:                KASSERTMSG(l->l_psrefs == 0, "%s: l_psrefs=%d, sh_func=%p\n",
                    592:                    __func__, l->l_psrefs, sh->sh_func);
1.41      rmind     593:
1.5       ad        594:                (void)splhigh();
1.24      ad        595:                KASSERT((sh->sh_flags & SOFTINT_ACTIVE) != 0);
                    596:                sh->sh_flags ^= SOFTINT_ACTIVE;
1.5       ad        597:        }
1.2       ad        598:
1.47      ozaki-r   599:        PSREF_DEBUG_BARRIER();
                    600:
1.56      ad        601:        CPU_COUNT(CPU_COUNT_NSOFT, 1);
1.5       ad        602:
1.13      ad        603:        KASSERT(si->si_cpu == curcpu());
                    604:        KASSERT(si->si_lwp->l_wchan == NULL);
                    605:        KASSERT(si->si_active);
1.5       ad        606:        si->si_evcnt.ev_count++;
                    607:        si->si_active = 0;
1.2       ad        608: }
                    609:
                    610: /*
                    611:  * softint_block:
                    612:  *
                    613:  *     Update statistics when the soft interrupt blocks.
                    614:  */
                    615: void
                    616: softint_block(lwp_t *l)
                    617: {
1.5       ad        618:        softint_t *si = l->l_private;
                    619:
                    620:        KASSERT((l->l_pflag & LP_INTR) != 0);
                    621:        si->si_evcnt_block.ev_count++;
                    622: }
                    623:
                    624: /*
                    625:  * schednetisr:
                    626:  *
                    627:  *     Trigger a legacy network interrupt.  XXX Needs to go away.
                    628:  */
                    629: void
                    630: schednetisr(int isr)
                    631: {
                    632:
                    633:        softint_schedule(softint_netisrs[isr]);
                    634: }
                    635:
                    636: #ifndef __HAVE_FAST_SOFTINTS
                    637:
1.19      ad        638: #ifdef __HAVE_PREEMPTION
                    639: #error __HAVE_PREEMPTION requires __HAVE_FAST_SOFTINTS
1.17      ad        640: #endif
                    641:
1.5       ad        642: /*
                    643:  * softint_init_md:
                    644:  *
                    645:  *     Slow path: perform machine-dependent initialization.
                    646:  */
                    647: void
                    648: softint_init_md(lwp_t *l, u_int level, uintptr_t *machdep)
                    649: {
1.50      ad        650:        struct proc *p;
1.5       ad        651:        softint_t *si;
                    652:
                    653:        *machdep = (1 << level);
                    654:        si = l->l_private;
1.50      ad        655:        p = l->l_proc;
1.5       ad        656:
1.50      ad        657:        mutex_enter(p->p_lock);
1.5       ad        658:        lwp_lock(l);
                    659:        /* Cheat and make the KASSERT in softint_thread() happy. */
                    660:        si->si_active = 1;
1.50      ad        661:        setrunnable(l);
                    662:        /* LWP now unlocked */
                    663:        mutex_exit(p->p_lock);
1.5       ad        664: }
                    665:
                    666: /*
                    667:  * softint_trigger:
                    668:  *
                    669:  *     Slow path: cause a soft interrupt handler to begin executing.
                    670:  *     Called at IPL_HIGH.
                    671:  */
                    672: void
                    673: softint_trigger(uintptr_t machdep)
                    674: {
                    675:        struct cpu_info *ci;
                    676:        lwp_t *l;
1.2       ad        677:
1.51      ad        678:        ci = curcpu();
1.5       ad        679:        ci->ci_data.cpu_softints |= machdep;
1.52      ad        680:        l = ci->ci_onproc;
1.5       ad        681:        if (l == ci->ci_data.cpu_idlelwp) {
1.54      ad        682:                atomic_or_uint(&ci->ci_want_resched,
                    683:                    RESCHED_IDLE | RESCHED_UPREEMPT);
1.5       ad        684:        } else {
                    685:                /* MI equivalent of aston() */
1.51      ad        686:                cpu_signotify(l);
1.5       ad        687:        }
                    688: }
                    689:
                    690: /*
                    691:  * softint_thread:
                    692:  *
                    693:  *     Slow path: MI software interrupt dispatch.
                    694:  */
                    695: void
                    696: softint_thread(void *cookie)
                    697: {
                    698:        softint_t *si;
                    699:        lwp_t *l;
                    700:        int s;
                    701:
                    702:        l = curlwp;
                    703:        si = l->l_private;
                    704:
                    705:        for (;;) {
                    706:                /*
                    707:                 * Clear pending status and run it.  We must drop the
                    708:                 * spl before mi_switch(), since IPL_HIGH may be higher
                    709:                 * than IPL_SCHED (and it is not safe to switch at a
                    710:                 * higher level).
                    711:                 */
                    712:                s = splhigh();
                    713:                l->l_cpu->ci_data.cpu_softints &= ~si->si_machdep;
                    714:                softint_execute(si, l, s);
                    715:                splx(s);
                    716:
                    717:                lwp_lock(l);
                    718:                l->l_stat = LSIDL;
1.55      ad        719:                spc_lock(l->l_cpu);
1.5       ad        720:                mi_switch(l);
                    721:        }
1.2       ad        722: }
1.4       ad        723:
                    724: /*
                    725:  * softint_picklwp:
                    726:  *
                    727:  *     Slow path: called from mi_switch() to pick the highest priority
                    728:  *     soft interrupt LWP that needs to run.
                    729:  */
                    730: lwp_t *
                    731: softint_picklwp(void)
                    732: {
1.5       ad        733:        struct cpu_info *ci;
                    734:        u_int mask;
                    735:        softint_t *si;
                    736:        lwp_t *l;
                    737:
                    738:        ci = curcpu();
                    739:        si = ((softcpu_t *)ci->ci_data.cpu_softcpu)->sc_int;
                    740:        mask = ci->ci_data.cpu_softints;
                    741:
                    742:        if ((mask & (1 << SOFTINT_SERIAL)) != 0) {
                    743:                l = si[SOFTINT_SERIAL].si_lwp;
                    744:        } else if ((mask & (1 << SOFTINT_NET)) != 0) {
                    745:                l = si[SOFTINT_NET].si_lwp;
                    746:        } else if ((mask & (1 << SOFTINT_BIO)) != 0) {
                    747:                l = si[SOFTINT_BIO].si_lwp;
                    748:        } else if ((mask & (1 << SOFTINT_CLOCK)) != 0) {
                    749:                l = si[SOFTINT_CLOCK].si_lwp;
                    750:        } else {
                    751:                panic("softint_picklwp");
                    752:        }
1.4       ad        753:
1.5       ad        754:        return l;
1.4       ad        755: }
                    756:
                    757: /*
                    758:  * softint_overlay:
                    759:  *
                    760:  *     Slow path: called from lwp_userret() to run a soft interrupt
1.6       ad        761:  *     within the context of a user thread.
1.4       ad        762:  */
                    763: void
                    764: softint_overlay(void)
                    765: {
1.5       ad        766:        struct cpu_info *ci;
1.14      ad        767:        u_int softints, oflag;
1.5       ad        768:        softint_t *si;
1.6       ad        769:        pri_t obase;
1.5       ad        770:        lwp_t *l;
                    771:        int s;
                    772:
                    773:        l = curlwp;
1.31      rmind     774:        KASSERT((l->l_pflag & LP_INTR) == 0);
                    775:
                    776:        /*
                    777:         * Arrange to elevate priority if the LWP blocks.  Also, bind LWP
                    778:         * to the CPU.  Note: disable kernel preemption before doing that.
                    779:         */
                    780:        s = splhigh();
1.5       ad        781:        ci = l->l_cpu;
                    782:        si = ((softcpu_t *)ci->ci_data.cpu_softcpu)->sc_int;
                    783:
1.6       ad        784:        obase = l->l_kpribase;
                    785:        l->l_kpribase = PRI_KERNEL_RT;
1.14      ad        786:        oflag = l->l_pflag;
                    787:        l->l_pflag = oflag | LP_INTR | LP_BOUND;
1.31      rmind     788:
1.5       ad        789:        while ((softints = ci->ci_data.cpu_softints) != 0) {
                    790:                if ((softints & (1 << SOFTINT_SERIAL)) != 0) {
                    791:                        ci->ci_data.cpu_softints &= ~(1 << SOFTINT_SERIAL);
                    792:                        softint_execute(&si[SOFTINT_SERIAL], l, s);
                    793:                        continue;
                    794:                }
                    795:                if ((softints & (1 << SOFTINT_NET)) != 0) {
                    796:                        ci->ci_data.cpu_softints &= ~(1 << SOFTINT_NET);
                    797:                        softint_execute(&si[SOFTINT_NET], l, s);
                    798:                        continue;
                    799:                }
                    800:                if ((softints & (1 << SOFTINT_BIO)) != 0) {
                    801:                        ci->ci_data.cpu_softints &= ~(1 << SOFTINT_BIO);
                    802:                        softint_execute(&si[SOFTINT_BIO], l, s);
                    803:                        continue;
                    804:                }
                    805:                if ((softints & (1 << SOFTINT_CLOCK)) != 0) {
                    806:                        ci->ci_data.cpu_softints &= ~(1 << SOFTINT_CLOCK);
                    807:                        softint_execute(&si[SOFTINT_CLOCK], l, s);
                    808:                        continue;
                    809:                }
                    810:        }
1.15      ad        811:        l->l_pflag = oflag;
1.14      ad        812:        l->l_kpribase = obase;
1.5       ad        813:        splx(s);
1.4       ad        814: }
1.5       ad        815:
                    816: #else  /*  !__HAVE_FAST_SOFTINTS */
                    817:
                    818: /*
                    819:  * softint_thread:
                    820:  *
                    821:  *     Fast path: the LWP is switched to without restoring any state,
                    822:  *     so we should not arrive here - there is a direct handoff between
                    823:  *     the interrupt stub and softint_dispatch().
                    824:  */
                    825: void
                    826: softint_thread(void *cookie)
                    827: {
                    828:
                    829:        panic("softint_thread");
                    830: }
                    831:
                    832: /*
                    833:  * softint_dispatch:
                    834:  *
                    835:  *     Fast path: entry point from machine-dependent code.
                    836:  */
                    837: void
                    838: softint_dispatch(lwp_t *pinned, int s)
                    839: {
1.9       yamt      840:        struct bintime now;
1.5       ad        841:        softint_t *si;
                    842:        u_int timing;
                    843:        lwp_t *l;
                    844:
1.56.2.3! ad        845: #ifdef DIAGNOSTIC
        !           846:        if ((pinned->l_pflag & LP_RUNNING) == 0 || curlwp->l_stat != LSIDL) {
        !           847:                struct lwp *onproc = curcpu()->ci_onproc;
        !           848:                int s2 = splhigh();
        !           849:                printf("curcpu=%d, spl=%d curspl=%d\n"
        !           850:                        "onproc=%p => l_stat=%d l_flag=%08x l_cpu=%d\n"
        !           851:                        "curlwp=%p => l_stat=%d l_flag=%08x l_cpu=%d\n"
        !           852:                        "pinned=%p => l_stat=%d l_flag=%08x l_cpu=%d\n",
        !           853:                        cpu_index(curcpu()), s, s2, onproc, onproc->l_stat,
        !           854:                        onproc->l_flag, cpu_index(onproc->l_cpu), curlwp,
        !           855:                        curlwp->l_stat, curlwp->l_flag,
        !           856:                        cpu_index(curlwp->l_cpu), pinned, pinned->l_stat,
        !           857:                        pinned->l_flag, cpu_index(pinned->l_cpu));
        !           858:                splx(s2);
        !           859:                panic("softint screwup");
        !           860:        }
        !           861: #endif
        !           862:
1.5       ad        863:        l = curlwp;
                    864:        si = l->l_private;
                    865:
                    866:        /*
                    867:         * Note the interrupted LWP, and mark the current LWP as running
                    868:         * before proceeding.  Although this must as a rule be done with
                    869:         * the LWP locked, at this point no external agents will want to
                    870:         * modify the interrupt LWP's state.
                    871:         */
1.56.2.1  ad        872:        timing = softint_timing;
1.5       ad        873:        l->l_switchto = pinned;
                    874:        l->l_stat = LSONPROC;
                    875:
                    876:        /*
                    877:         * Dispatch the interrupt.  If softints are being timed, charge
                    878:         * for it.
                    879:         */
1.49      ad        880:        if (timing) {
1.11      yamt      881:                binuptime(&l->l_stime);
1.49      ad        882:                membar_producer();      /* for calcru */
1.56.2.1  ad        883:                l->l_pflag |= LP_TIMEINTR;
1.49      ad        884:        }
1.56.2.3! ad        885:        l->l_pflag |= LP_RUNNING;
1.5       ad        886:        softint_execute(si, l, s);
                    887:        if (timing) {
1.11      yamt      888:                binuptime(&now);
1.5       ad        889:                updatertime(l, &now);
1.22      ad        890:                l->l_pflag &= ~LP_TIMEINTR;
1.5       ad        891:        }
                    892:
1.56.2.3! ad        893:        /* XXX temporary */
        !           894:        kernel_lock_plug_leak();
        !           895:
1.5       ad        896:        /*
                    897:         * If we blocked while handling the interrupt, the pinned LWP is
                    898:         * gone so switch to the idle LWP.  It will select a new LWP to
                    899:         * run.
                    900:         *
                    901:         * We must drop the priority level as switching at IPL_HIGH could
                    902:         * deadlock the system.  We have already set si->si_active = 0,
                    903:         * which means another interrupt at this level can be triggered.
                    904:         * That's not be a problem: we are lowering to level 's' which will
                    905:         * prevent softint_dispatch() from being reentered at level 's',
                    906:         * until the priority is finally dropped to IPL_NONE on entry to
1.56.2.1  ad        907:         * the LWP chosen by mi_switch().
1.5       ad        908:         */
                    909:        l->l_stat = LSIDL;
                    910:        if (l->l_switchto == NULL) {
                    911:                splx(s);
1.56.2.1  ad        912:                lwp_lock(l);
                    913:                spc_lock(l->l_cpu);
                    914:                mi_switch(l);
1.5       ad        915:                /* NOTREACHED */
                    916:        }
                    917:        l->l_switchto = NULL;
1.56.2.3! ad        918:        l->l_pflag &= ~LP_RUNNING;
1.5       ad        919: }
                    920:
                    921: #endif /* !__HAVE_FAST_SOFTINTS */

CVSweb <webmaster@jp.NetBSD.org>