[BACK]Return to kern_softint.c CVS log [TXT][DIR] Up to [cvs.NetBSD.org] / src / sys / kern

Annotation of src/sys/kern/kern_softint.c, Revision 1.23

1.23    ! pooka       1: /*     $NetBSD: kern_softint.c,v 1.22 2008/05/31 21:26:01 ad Exp $     */
1.2       ad          2:
                      3: /*-
1.10      ad          4:  * Copyright (c) 2007, 2008 The NetBSD Foundation, Inc.
1.2       ad          5:  * All rights reserved.
                      6:  *
                      7:  * This code is derived from software contributed to The NetBSD Foundation
                      8:  * by Andrew Doran.
                      9:  *
                     10:  * Redistribution and use in source and binary forms, with or without
                     11:  * modification, are permitted provided that the following conditions
                     12:  * are met:
                     13:  * 1. Redistributions of source code must retain the above copyright
                     14:  *    notice, this list of conditions and the following disclaimer.
                     15:  * 2. Redistributions in binary form must reproduce the above copyright
                     16:  *    notice, this list of conditions and the following disclaimer in the
                     17:  *    documentation and/or other materials provided with the distribution.
                     18:  *
                     19:  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
                     20:  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
                     21:  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
                     22:  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
                     23:  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
                     24:  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
                     25:  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
                     26:  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
                     27:  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
                     28:  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
                     29:  * POSSIBILITY OF SUCH DAMAGE.
                     30:  */
                     31:
                     32: /*
1.5       ad         33:  * Generic software interrupt framework.
                     34:  *
                     35:  * Overview
                     36:  *
                     37:  *     The soft interrupt framework provides a mechanism to schedule a
                     38:  *     low priority callback that runs with thread context.  It allows
                     39:  *     for dynamic registration of software interrupts, and for fair
                     40:  *     queueing and prioritization of those interrupts.  The callbacks
                     41:  *     can be scheduled to run from nearly any point in the kernel: by
                     42:  *     code running with thread context, by code running from a
                     43:  *     hardware interrupt handler, and at any interrupt priority
                     44:  *     level.
                     45:  *
                     46:  * Priority levels
                     47:  *
                     48:  *     Since soft interrupt dispatch can be tied to the underlying
                     49:  *     architecture's interrupt dispatch code, it can be limited
                     50:  *     both by the capabilities of the hardware and the capabilities
                     51:  *     of the interrupt dispatch code itself.  The number of priority
                     52:  *     levels is restricted to four.  In order of priority (lowest to
                     53:  *     highest) the levels are: clock, bio, net, serial.
                     54:  *
                     55:  *     The names are symbolic and in isolation do not have any direct
                     56:  *     connection with a particular kind of device activity: they are
                     57:  *     only meant as a guide.
                     58:  *
                     59:  *     The four priority levels map directly to scheduler priority
                     60:  *     levels, and where the architecture implements 'fast' software
                     61:  *     interrupts, they also map onto interrupt priorities.  The
                     62:  *     interrupt priorities are intended to be hidden from machine
                     63:  *     independent code, which should use thread-safe mechanisms to
                     64:  *     synchronize with software interrupts (for example: mutexes).
                     65:  *
                     66:  * Capabilities
                     67:  *
                     68:  *     Software interrupts run with limited machine context.  In
                     69:  *     particular, they do not posess any address space context.  They
                     70:  *     should not try to operate on user space addresses, or to use
                     71:  *     virtual memory facilities other than those noted as interrupt
                     72:  *     safe.
                     73:  *
                     74:  *     Unlike hardware interrupts, software interrupts do have thread
                     75:  *     context.  They may block on synchronization objects, sleep, and
                     76:  *     resume execution at a later time.
                     77:  *
                     78:  *     Since software interrupts are a limited resource and run with
                     79:  *     higher priority than most other LWPs in the system, all
                     80:  *     block-and-resume activity by a software interrupt must be kept
                     81:  *     short to allow futher processing at that level to continue.  By
                     82:  *     extension, code running with process context must take care to
                     83:  *     ensure that any lock that may be taken from a software interrupt
                     84:  *     can not be held for more than a short period of time.
                     85:  *
                     86:  *     The kernel does not allow software interrupts to use facilities
                     87:  *     or perform actions that may block for a significant amount of
                     88:  *     time.  This means that it's not valid for a software interrupt
1.10      ad         89:  *     to sleep on condition variables or wait for resources to become
                     90:  *     available (for example, memory).
1.5       ad         91:  *
                     92:  * Per-CPU operation
                     93:  *
                     94:  *     If a soft interrupt is triggered on a CPU, it can only be
                     95:  *     dispatched on the same CPU.  Each LWP dedicated to handling a
                     96:  *     soft interrupt is bound to its home CPU, so if the LWP blocks
                     97:  *     and needs to run again, it can only run there.  Nearly all data
                     98:  *     structures used to manage software interrupts are per-CPU.
                     99:  *
                    100:  *     The per-CPU requirement is intended to reduce "ping-pong" of
                    101:  *     cache lines between CPUs: lines occupied by data structures
                    102:  *     used to manage the soft interrupts, and lines occupied by data
                    103:  *     items being passed down to the soft interrupt.  As a positive
                    104:  *     side effect, this also means that the soft interrupt dispatch
                    105:  *     code does not need to to use spinlocks to synchronize.
                    106:  *
                    107:  * Generic implementation
                    108:  *
                    109:  *     A generic, low performance implementation is provided that
                    110:  *     works across all architectures, with no machine-dependent
                    111:  *     modifications needed.  This implementation uses the scheduler,
                    112:  *     and so has a number of restrictions:
                    113:  *
                    114:  *     1) The software interrupts are not currently preemptive, so
                    115:  *     must wait for the currently executing LWP to yield the CPU.
                    116:  *     This can introduce latency.
                    117:  *
                    118:  *     2) An expensive context switch is required for a software
                    119:  *     interrupt to be handled.
                    120:  *
                    121:  * 'Fast' software interrupts
                    122:  *
                    123:  *     If an architectures defines __HAVE_FAST_SOFTINTS, it implements
                    124:  *     the fast mechanism.  Threads running either in the kernel or in
                    125:  *     userspace will be interrupted, but will not be preempted.  When
                    126:  *     the soft interrupt completes execution, the interrupted LWP
                    127:  *     is resumed.  Interrupt dispatch code must provide the minimum
                    128:  *     level of context necessary for the soft interrupt to block and
                    129:  *     be resumed at a later time.  The machine-dependent dispatch
                    130:  *     path looks something like the following:
                    131:  *
                    132:  *     softintr()
                    133:  *     {
                    134:  *             go to IPL_HIGH if necessary for switch;
                    135:  *             save any necessary registers in a format that can be
                    136:  *                 restored by cpu_switchto if the softint blocks;
                    137:  *             arrange for cpu_switchto() to restore into the
                    138:  *                 trampoline function;
                    139:  *             identify LWP to handle this interrupt;
                    140:  *             switch to the LWP's stack;
                    141:  *             switch register stacks, if necessary;
                    142:  *             assign new value of curlwp;
                    143:  *             call MI softint_dispatch, passing old curlwp and IPL
                    144:  *                 to execute interrupt at;
                    145:  *             switch back to old stack;
                    146:  *             switch back to old register stack, if necessary;
                    147:  *             restore curlwp;
                    148:  *             return to interrupted LWP;
                    149:  *     }
                    150:  *
                    151:  *     If the soft interrupt blocks, a trampoline function is returned
                    152:  *     to in the context of the interrupted LWP, as arranged for by
                    153:  *     softint():
                    154:  *
                    155:  *     softint_ret()
                    156:  *     {
                    157:  *             unlock soft interrupt LWP;
                    158:  *             resume interrupt processing, likely returning to
                    159:  *                 interrupted LWP or dispatching another, different
                    160:  *                 interrupt;
                    161:  *     }
                    162:  *
                    163:  *     Once the soft interrupt has fired (and even if it has blocked),
                    164:  *     no further soft interrupts at that level will be triggered by
                    165:  *     MI code until the soft interrupt handler has ceased execution.
                    166:  *     If a soft interrupt handler blocks and is resumed, it resumes
                    167:  *     execution as a normal LWP (kthread) and gains VM context.  Only
                    168:  *     when it has completed and is ready to fire again will it
                    169:  *     interrupt other threads.
                    170:  *
                    171:  * Future directions
                    172:  *
                    173:  *     Provide a cheap way to direct software interrupts to remote
                    174:  *     CPUs.  Provide a way to enqueue work items into the handler
                    175:  *     record, removing additional spl calls (see subr_workqueue.c).
1.2       ad        176:  */
                    177:
                    178: #include <sys/cdefs.h>
1.23    ! pooka     179: __KERNEL_RCSID(0, "$NetBSD: kern_softint.c,v 1.22 2008/05/31 21:26:01 ad Exp $");
1.2       ad        180:
                    181: #include <sys/param.h>
1.5       ad        182: #include <sys/malloc.h>
                    183: #include <sys/proc.h>
1.2       ad        184: #include <sys/intr.h>
1.5       ad        185: #include <sys/mutex.h>
                    186: #include <sys/kthread.h>
                    187: #include <sys/evcnt.h>
                    188: #include <sys/cpu.h>
                    189:
                    190: #include <net/netisr.h>
                    191:
                    192: #include <uvm/uvm_extern.h>
                    193:
                    194: /* This could overlap with signal info in struct lwp. */
                    195: typedef struct softint {
                    196:        SIMPLEQ_HEAD(, softhand) si_q;
                    197:        struct lwp              *si_lwp;
                    198:        struct cpu_info         *si_cpu;
                    199:        uintptr_t               si_machdep;
                    200:        struct evcnt            si_evcnt;
                    201:        struct evcnt            si_evcnt_block;
                    202:        int                     si_active;
                    203:        char                    si_name[8];
                    204:        char                    si_name_block[8+6];
                    205: } softint_t;
                    206:
                    207: typedef struct softhand {
                    208:        SIMPLEQ_ENTRY(softhand) sh_q;
                    209:        void                    (*sh_func)(void *);
                    210:        void                    *sh_arg;
                    211:        softint_t               *sh_isr;
                    212:        u_int                   sh_pending;
                    213:        u_int                   sh_flags;
                    214: } softhand_t;
                    215:
                    216: typedef struct softcpu {
                    217:        struct cpu_info         *sc_cpu;
                    218:        softint_t               sc_int[SOFTINT_COUNT];
                    219:        softhand_t              sc_hand[1];
                    220: } softcpu_t;
                    221:
                    222: static void    softint_thread(void *);
                    223:
                    224: u_int          softint_bytes = 8192;
                    225: u_int          softint_timing;
                    226: static u_int   softint_max;
                    227: static kmutex_t        softint_lock;
1.23    ! pooka     228: static void    *softint_netisrs[NETISR_MAX];
1.2       ad        229:
1.5       ad        230: /*
                    231:  * softint_init_isr:
                    232:  *
                    233:  *     Initialize a single interrupt level for a single CPU.
                    234:  */
                    235: static void
                    236: softint_init_isr(softcpu_t *sc, const char *desc, pri_t pri, u_int level)
                    237: {
                    238:        struct cpu_info *ci;
                    239:        softint_t *si;
                    240:        int error;
                    241:
                    242:        si = &sc->sc_int[level];
                    243:        ci = sc->sc_cpu;
                    244:        si->si_cpu = ci;
                    245:
                    246:        SIMPLEQ_INIT(&si->si_q);
                    247:
                    248:        error = kthread_create(pri, KTHREAD_MPSAFE | KTHREAD_INTR |
                    249:            KTHREAD_IDLE, ci, softint_thread, si, &si->si_lwp,
1.12      martin    250:            "soft%s/%u", desc, ci->ci_index);
1.5       ad        251:        if (error != 0)
                    252:                panic("softint_init_isr: error %d", error);
                    253:
1.12      martin    254:        snprintf(si->si_name, sizeof(si->si_name), "%s/%u", desc,
                    255:            ci->ci_index);
1.20      ad        256:        evcnt_attach_dynamic(&si->si_evcnt, EVCNT_TYPE_MISC, NULL,
1.5       ad        257:           "softint", si->si_name);
1.12      martin    258:        snprintf(si->si_name_block, sizeof(si->si_name_block), "%s block/%u",
                    259:            desc, ci->ci_index);
1.20      ad        260:        evcnt_attach_dynamic(&si->si_evcnt_block, EVCNT_TYPE_MISC, NULL,
1.5       ad        261:           "softint", si->si_name_block);
1.3       ad        262:
1.5       ad        263:        si->si_lwp->l_private = si;
                    264:        softint_init_md(si->si_lwp, level, &si->si_machdep);
                    265: }
1.2       ad        266: /*
                    267:  * softint_init:
                    268:  *
                    269:  *     Initialize per-CPU data structures.  Called from mi_cpu_attach().
                    270:  */
                    271: void
                    272: softint_init(struct cpu_info *ci)
                    273: {
1.5       ad        274:        static struct cpu_info *first;
                    275:        softcpu_t *sc, *scfirst;
                    276:        softhand_t *sh, *shmax;
                    277:
                    278:        if (first == NULL) {
                    279:                /* Boot CPU. */
                    280:                first = ci;
                    281:                mutex_init(&softint_lock, MUTEX_DEFAULT, IPL_NONE);
                    282:                softint_bytes = round_page(softint_bytes);
                    283:                softint_max = (softint_bytes - sizeof(softcpu_t)) /
                    284:                    sizeof(softhand_t);
                    285:        }
1.2       ad        286:
1.5       ad        287:        sc = (softcpu_t *)uvm_km_alloc(kernel_map, softint_bytes, 0,
                    288:            UVM_KMF_WIRED | UVM_KMF_ZERO);
                    289:        if (sc == NULL)
                    290:                panic("softint_init_cpu: cannot allocate memory");
                    291:
                    292:        ci->ci_data.cpu_softcpu = sc;
                    293:        ci->ci_data.cpu_softints = 0;
                    294:        sc->sc_cpu = ci;
                    295:
                    296:        softint_init_isr(sc, "net", PRI_SOFTNET, SOFTINT_NET);
                    297:        softint_init_isr(sc, "bio", PRI_SOFTBIO, SOFTINT_BIO);
                    298:        softint_init_isr(sc, "clk", PRI_SOFTCLOCK, SOFTINT_CLOCK);
                    299:        softint_init_isr(sc, "ser", PRI_SOFTSERIAL, SOFTINT_SERIAL);
                    300:
                    301:        if (first != ci) {
                    302:                mutex_enter(&softint_lock);
                    303:                scfirst = first->ci_data.cpu_softcpu;
                    304:                sh = sc->sc_hand;
                    305:                memcpy(sh, scfirst->sc_hand, sizeof(*sh) * softint_max);
                    306:                /* Update pointers for this CPU. */
                    307:                for (shmax = sh + softint_max; sh < shmax; sh++) {
                    308:                        if (sh->sh_func == NULL)
                    309:                                continue;
                    310:                        sh->sh_isr =
                    311:                            &sc->sc_int[sh->sh_flags & SOFTINT_LVLMASK];
                    312:                }
                    313:                mutex_exit(&softint_lock);
                    314:        } else {
                    315:                /*
                    316:                 * Establish handlers for legacy net interrupts.
                    317:                 * XXX Needs to go away.
                    318:                 */
                    319: #define DONETISR(n, f)                                                 \
1.16      ad        320:     softint_netisrs[(n)] = softint_establish(SOFTINT_NET|SOFTINT_MPSAFE,\
                    321:         (void (*)(void *))(f), NULL)
1.5       ad        322: #include <net/netisr_dispatch.h>
                    323:        }
1.2       ad        324: }
                    325:
                    326: /*
                    327:  * softint_establish:
                    328:  *
                    329:  *     Register a software interrupt handler.
                    330:  */
                    331: void *
                    332: softint_establish(u_int flags, void (*func)(void *), void *arg)
                    333: {
1.5       ad        334:        CPU_INFO_ITERATOR cii;
                    335:        struct cpu_info *ci;
                    336:        softcpu_t *sc;
                    337:        softhand_t *sh;
                    338:        u_int level, index;
1.2       ad        339:
                    340:        level = (flags & SOFTINT_LVLMASK);
                    341:        KASSERT(level < SOFTINT_COUNT);
                    342:
1.5       ad        343:        mutex_enter(&softint_lock);
                    344:
                    345:        /* Find a free slot. */
                    346:        sc = curcpu()->ci_data.cpu_softcpu;
                    347:        for (index = 1; index < softint_max; index++)
                    348:                if (sc->sc_hand[index].sh_func == NULL)
                    349:                        break;
                    350:        if (index == softint_max) {
                    351:                mutex_exit(&softint_lock);
                    352:                printf("WARNING: softint_establish: table full, "
                    353:                    "increase softint_bytes\n");
                    354:                return NULL;
                    355:        }
                    356:
                    357:        /* Set up the handler on each CPU. */
1.8       ad        358:        if (ncpu < 2) {
1.7       ad        359:                /* XXX hack for machines with no CPU_INFO_FOREACH() early on */
                    360:                sc = curcpu()->ci_data.cpu_softcpu;
                    361:                sh = &sc->sc_hand[index];
                    362:                sh->sh_isr = &sc->sc_int[level];
                    363:                sh->sh_func = func;
                    364:                sh->sh_arg = arg;
                    365:                sh->sh_flags = flags;
                    366:                sh->sh_pending = 0;
                    367:        } else for (CPU_INFO_FOREACH(cii, ci)) {
1.5       ad        368:                sc = ci->ci_data.cpu_softcpu;
                    369:                sh = &sc->sc_hand[index];
                    370:                sh->sh_isr = &sc->sc_int[level];
                    371:                sh->sh_func = func;
                    372:                sh->sh_arg = arg;
                    373:                sh->sh_flags = flags;
                    374:                sh->sh_pending = 0;
1.2       ad        375:        }
                    376:
1.5       ad        377:        mutex_exit(&softint_lock);
                    378:
                    379:        return (void *)((uint8_t *)&sc->sc_hand[index] - (uint8_t *)sc);
1.2       ad        380: }
                    381:
                    382: /*
                    383:  * softint_disestablish:
                    384:  *
                    385:  *     Unregister a software interrupt handler.
                    386:  */
                    387: void
                    388: softint_disestablish(void *arg)
                    389: {
1.5       ad        390:        CPU_INFO_ITERATOR cii;
                    391:        struct cpu_info *ci;
                    392:        softcpu_t *sc;
                    393:        softhand_t *sh;
                    394:        uintptr_t offset;
                    395:
                    396:        offset = (uintptr_t)arg;
                    397:        KASSERT(offset != 0 && offset < softint_bytes);
                    398:
                    399:        mutex_enter(&softint_lock);
                    400:
                    401:        /* Clear the handler on each CPU. */
                    402:        for (CPU_INFO_FOREACH(cii, ci)) {
                    403:                sc = ci->ci_data.cpu_softcpu;
                    404:                sh = (softhand_t *)((uint8_t *)sc + offset);
                    405:                KASSERT(sh->sh_func != NULL);
                    406:                KASSERT(sh->sh_pending == 0);
                    407:                sh->sh_func = NULL;
                    408:        }
1.2       ad        409:
1.5       ad        410:        mutex_exit(&softint_lock);
1.2       ad        411: }
                    412:
                    413: /*
                    414:  * softint_schedule:
                    415:  *
                    416:  *     Trigger a software interrupt.  Must be called from a hardware
                    417:  *     interrupt handler, or with preemption disabled (since we are
                    418:  *     using the value of curcpu()).
                    419:  */
                    420: void
                    421: softint_schedule(void *arg)
                    422: {
1.5       ad        423:        softhand_t *sh;
                    424:        softint_t *si;
                    425:        uintptr_t offset;
                    426:        int s;
                    427:
1.17      ad        428:        KASSERT(kpreempt_disabled());
                    429:
1.5       ad        430:        /* Find the handler record for this CPU. */
                    431:        offset = (uintptr_t)arg;
                    432:        KASSERT(offset != 0 && offset < softint_bytes);
                    433:        sh = (softhand_t *)((uint8_t *)curcpu()->ci_data.cpu_softcpu + offset);
                    434:
                    435:        /* If it's already pending there's nothing to do. */
                    436:        if (sh->sh_pending)
                    437:                return;
                    438:
                    439:        /*
                    440:         * Enqueue the handler into the LWP's pending list.
                    441:         * If the LWP is completely idle, then make it run.
                    442:         */
                    443:        s = splhigh();
                    444:        if (!sh->sh_pending) {
                    445:                si = sh->sh_isr;
                    446:                sh->sh_pending = 1;
                    447:                SIMPLEQ_INSERT_TAIL(&si->si_q, sh, sh_q);
                    448:                if (si->si_active == 0) {
                    449:                        si->si_active = 1;
                    450:                        softint_trigger(si->si_machdep);
                    451:                }
                    452:        }
                    453:        splx(s);
                    454: }
                    455:
                    456: /*
                    457:  * softint_execute:
                    458:  *
                    459:  *     Invoke handlers for the specified soft interrupt.
                    460:  *     Must be entered at splhigh.  Will drop the priority
                    461:  *     to the level specified, but returns back at splhigh.
                    462:  */
                    463: static inline void
                    464: softint_execute(softint_t *si, lwp_t *l, int s)
                    465: {
                    466:        softhand_t *sh;
                    467:        bool havelock;
                    468:
                    469: #ifdef __HAVE_FAST_SOFTINTS
                    470:        KASSERT(si->si_lwp == curlwp);
                    471: #else
                    472:        /* May be running in user context. */
                    473: #endif
                    474:        KASSERT(si->si_cpu == curcpu());
                    475:        KASSERT(si->si_lwp->l_wchan == NULL);
                    476:        KASSERT(si->si_active);
                    477:
                    478:        havelock = false;
                    479:
                    480:        /*
                    481:         * Note: due to priority inheritance we may have interrupted a
                    482:         * higher priority LWP.  Since the soft interrupt must be quick
                    483:         * and is non-preemptable, we don't bother yielding.
                    484:         */
                    485:
                    486:        while (!SIMPLEQ_EMPTY(&si->si_q)) {
                    487:                /*
                    488:                 * Pick the longest waiting handler to run.  We block
                    489:                 * interrupts but do not lock in order to do this, as
                    490:                 * we are protecting against the local CPU only.
                    491:                 */
                    492:                sh = SIMPLEQ_FIRST(&si->si_q);
                    493:                SIMPLEQ_REMOVE_HEAD(&si->si_q, sh_q);
                    494:                sh->sh_pending = 0;
                    495:                splx(s);
                    496:
                    497:                /* Run the handler. */
                    498:                if ((sh->sh_flags & SOFTINT_MPSAFE) == 0 && !havelock) {
                    499:                        KERNEL_LOCK(1, l);
                    500:                        havelock = true;
                    501:                }
                    502:                (*sh->sh_func)(sh->sh_arg);
                    503:
                    504:                (void)splhigh();
                    505:        }
1.2       ad        506:
1.5       ad        507:        if (havelock) {
                    508:                KERNEL_UNLOCK_ONE(l);
                    509:        }
                    510:
                    511:        /*
                    512:         * Unlocked, but only for statistics.
                    513:         * Should be per-CPU to prevent cache ping-pong.
                    514:         */
                    515:        uvmexp.softs++;
                    516:
1.13      ad        517:        KASSERT(si->si_cpu == curcpu());
                    518:        KASSERT(si->si_lwp->l_wchan == NULL);
                    519:        KASSERT(si->si_active);
1.5       ad        520:        si->si_evcnt.ev_count++;
                    521:        si->si_active = 0;
1.2       ad        522: }
                    523:
                    524: /*
                    525:  * softint_block:
                    526:  *
                    527:  *     Update statistics when the soft interrupt blocks.
                    528:  */
                    529: void
                    530: softint_block(lwp_t *l)
                    531: {
1.5       ad        532:        softint_t *si = l->l_private;
                    533:
                    534:        KASSERT((l->l_pflag & LP_INTR) != 0);
                    535:        si->si_evcnt_block.ev_count++;
                    536: }
                    537:
                    538: /*
                    539:  * schednetisr:
                    540:  *
                    541:  *     Trigger a legacy network interrupt.  XXX Needs to go away.
                    542:  */
                    543: void
                    544: schednetisr(int isr)
                    545: {
                    546:
                    547:        softint_schedule(softint_netisrs[isr]);
                    548: }
                    549:
                    550: #ifndef __HAVE_FAST_SOFTINTS
                    551:
1.19      ad        552: #ifdef __HAVE_PREEMPTION
                    553: #error __HAVE_PREEMPTION requires __HAVE_FAST_SOFTINTS
1.17      ad        554: #endif
                    555:
1.5       ad        556: /*
                    557:  * softint_init_md:
                    558:  *
                    559:  *     Slow path: perform machine-dependent initialization.
                    560:  */
                    561: void
                    562: softint_init_md(lwp_t *l, u_int level, uintptr_t *machdep)
                    563: {
                    564:        softint_t *si;
                    565:
                    566:        *machdep = (1 << level);
                    567:        si = l->l_private;
                    568:
                    569:        lwp_lock(l);
                    570:        lwp_unlock_to(l, l->l_cpu->ci_schedstate.spc_mutex);
                    571:        lwp_lock(l);
                    572:        /* Cheat and make the KASSERT in softint_thread() happy. */
                    573:        si->si_active = 1;
                    574:        l->l_stat = LSRUN;
                    575:        sched_enqueue(l, false);
                    576:        lwp_unlock(l);
                    577: }
                    578:
                    579: /*
                    580:  * softint_trigger:
                    581:  *
                    582:  *     Slow path: cause a soft interrupt handler to begin executing.
                    583:  *     Called at IPL_HIGH.
                    584:  */
                    585: void
                    586: softint_trigger(uintptr_t machdep)
                    587: {
                    588:        struct cpu_info *ci;
                    589:        lwp_t *l;
1.2       ad        590:
1.5       ad        591:        l = curlwp;
                    592:        ci = l->l_cpu;
                    593:        ci->ci_data.cpu_softints |= machdep;
                    594:        if (l == ci->ci_data.cpu_idlelwp) {
                    595:                cpu_need_resched(ci, 0);
                    596:        } else {
                    597:                /* MI equivalent of aston() */
                    598:                cpu_signotify(l);
                    599:        }
                    600: }
                    601:
                    602: /*
                    603:  * softint_thread:
                    604:  *
                    605:  *     Slow path: MI software interrupt dispatch.
                    606:  */
                    607: void
                    608: softint_thread(void *cookie)
                    609: {
                    610:        softint_t *si;
                    611:        lwp_t *l;
                    612:        int s;
                    613:
                    614:        l = curlwp;
                    615:        si = l->l_private;
                    616:
                    617:        for (;;) {
                    618:                /*
                    619:                 * Clear pending status and run it.  We must drop the
                    620:                 * spl before mi_switch(), since IPL_HIGH may be higher
                    621:                 * than IPL_SCHED (and it is not safe to switch at a
                    622:                 * higher level).
                    623:                 */
                    624:                s = splhigh();
                    625:                l->l_cpu->ci_data.cpu_softints &= ~si->si_machdep;
                    626:                softint_execute(si, l, s);
                    627:                splx(s);
                    628:
                    629:                lwp_lock(l);
                    630:                l->l_stat = LSIDL;
                    631:                mi_switch(l);
                    632:        }
1.2       ad        633: }
1.4       ad        634:
                    635: /*
                    636:  * softint_picklwp:
                    637:  *
                    638:  *     Slow path: called from mi_switch() to pick the highest priority
                    639:  *     soft interrupt LWP that needs to run.
                    640:  */
                    641: lwp_t *
                    642: softint_picklwp(void)
                    643: {
1.5       ad        644:        struct cpu_info *ci;
                    645:        u_int mask;
                    646:        softint_t *si;
                    647:        lwp_t *l;
                    648:
                    649:        ci = curcpu();
                    650:        si = ((softcpu_t *)ci->ci_data.cpu_softcpu)->sc_int;
                    651:        mask = ci->ci_data.cpu_softints;
                    652:
                    653:        if ((mask & (1 << SOFTINT_SERIAL)) != 0) {
                    654:                l = si[SOFTINT_SERIAL].si_lwp;
                    655:        } else if ((mask & (1 << SOFTINT_NET)) != 0) {
                    656:                l = si[SOFTINT_NET].si_lwp;
                    657:        } else if ((mask & (1 << SOFTINT_BIO)) != 0) {
                    658:                l = si[SOFTINT_BIO].si_lwp;
                    659:        } else if ((mask & (1 << SOFTINT_CLOCK)) != 0) {
                    660:                l = si[SOFTINT_CLOCK].si_lwp;
                    661:        } else {
                    662:                panic("softint_picklwp");
                    663:        }
1.4       ad        664:
1.5       ad        665:        return l;
1.4       ad        666: }
                    667:
                    668: /*
                    669:  * softint_overlay:
                    670:  *
                    671:  *     Slow path: called from lwp_userret() to run a soft interrupt
1.6       ad        672:  *     within the context of a user thread.
1.4       ad        673:  */
                    674: void
                    675: softint_overlay(void)
                    676: {
1.5       ad        677:        struct cpu_info *ci;
1.14      ad        678:        u_int softints, oflag;
1.5       ad        679:        softint_t *si;
1.6       ad        680:        pri_t obase;
1.5       ad        681:        lwp_t *l;
                    682:        int s;
                    683:
                    684:        l = curlwp;
                    685:        ci = l->l_cpu;
                    686:        si = ((softcpu_t *)ci->ci_data.cpu_softcpu)->sc_int;
                    687:
                    688:        KASSERT((l->l_pflag & LP_INTR) == 0);
                    689:
1.6       ad        690:        /* Arrange to elevate priority if the LWP blocks. */
1.14      ad        691:        s = splhigh();
1.6       ad        692:        obase = l->l_kpribase;
                    693:        l->l_kpribase = PRI_KERNEL_RT;
1.14      ad        694:        oflag = l->l_pflag;
                    695:        l->l_pflag = oflag | LP_INTR | LP_BOUND;
1.5       ad        696:        while ((softints = ci->ci_data.cpu_softints) != 0) {
                    697:                if ((softints & (1 << SOFTINT_SERIAL)) != 0) {
                    698:                        ci->ci_data.cpu_softints &= ~(1 << SOFTINT_SERIAL);
                    699:                        softint_execute(&si[SOFTINT_SERIAL], l, s);
                    700:                        continue;
                    701:                }
                    702:                if ((softints & (1 << SOFTINT_NET)) != 0) {
                    703:                        ci->ci_data.cpu_softints &= ~(1 << SOFTINT_NET);
                    704:                        softint_execute(&si[SOFTINT_NET], l, s);
                    705:                        continue;
                    706:                }
                    707:                if ((softints & (1 << SOFTINT_BIO)) != 0) {
                    708:                        ci->ci_data.cpu_softints &= ~(1 << SOFTINT_BIO);
                    709:                        softint_execute(&si[SOFTINT_BIO], l, s);
                    710:                        continue;
                    711:                }
                    712:                if ((softints & (1 << SOFTINT_CLOCK)) != 0) {
                    713:                        ci->ci_data.cpu_softints &= ~(1 << SOFTINT_CLOCK);
                    714:                        softint_execute(&si[SOFTINT_CLOCK], l, s);
                    715:                        continue;
                    716:                }
                    717:        }
1.15      ad        718:        l->l_pflag = oflag;
1.14      ad        719:        l->l_kpribase = obase;
1.5       ad        720:        splx(s);
1.4       ad        721: }
1.5       ad        722:
                    723: #else  /*  !__HAVE_FAST_SOFTINTS */
                    724:
                    725: /*
                    726:  * softint_thread:
                    727:  *
                    728:  *     Fast path: the LWP is switched to without restoring any state,
                    729:  *     so we should not arrive here - there is a direct handoff between
                    730:  *     the interrupt stub and softint_dispatch().
                    731:  */
                    732: void
                    733: softint_thread(void *cookie)
                    734: {
                    735:
                    736:        panic("softint_thread");
                    737: }
                    738:
                    739: /*
                    740:  * softint_dispatch:
                    741:  *
                    742:  *     Fast path: entry point from machine-dependent code.
                    743:  */
                    744: void
                    745: softint_dispatch(lwp_t *pinned, int s)
                    746: {
1.9       yamt      747:        struct bintime now;
1.5       ad        748:        softint_t *si;
                    749:        u_int timing;
                    750:        lwp_t *l;
                    751:
                    752:        l = curlwp;
                    753:        si = l->l_private;
                    754:
                    755:        /*
                    756:         * Note the interrupted LWP, and mark the current LWP as running
                    757:         * before proceeding.  Although this must as a rule be done with
                    758:         * the LWP locked, at this point no external agents will want to
                    759:         * modify the interrupt LWP's state.
                    760:         */
1.22      ad        761:        timing = (softint_timing ? LP_TIMEINTR : 0);
1.5       ad        762:        l->l_switchto = pinned;
                    763:        l->l_stat = LSONPROC;
1.22      ad        764:        l->l_pflag |= (LP_RUNNING | timing);
1.5       ad        765:
                    766:        /*
                    767:         * Dispatch the interrupt.  If softints are being timed, charge
                    768:         * for it.
                    769:         */
                    770:        if (timing)
1.11      yamt      771:                binuptime(&l->l_stime);
1.5       ad        772:        softint_execute(si, l, s);
                    773:        if (timing) {
1.11      yamt      774:                binuptime(&now);
1.5       ad        775:                updatertime(l, &now);
1.22      ad        776:                l->l_pflag &= ~LP_TIMEINTR;
1.5       ad        777:        }
                    778:
                    779:        /*
                    780:         * If we blocked while handling the interrupt, the pinned LWP is
                    781:         * gone so switch to the idle LWP.  It will select a new LWP to
                    782:         * run.
                    783:         *
                    784:         * We must drop the priority level as switching at IPL_HIGH could
                    785:         * deadlock the system.  We have already set si->si_active = 0,
                    786:         * which means another interrupt at this level can be triggered.
                    787:         * That's not be a problem: we are lowering to level 's' which will
                    788:         * prevent softint_dispatch() from being reentered at level 's',
                    789:         * until the priority is finally dropped to IPL_NONE on entry to
1.21      ad        790:         * the LWP chosen by lwp_exit_switchaway().
1.5       ad        791:         */
                    792:        l->l_stat = LSIDL;
                    793:        if (l->l_switchto == NULL) {
                    794:                splx(s);
                    795:                pmap_deactivate(l);
                    796:                lwp_exit_switchaway(l);
                    797:                /* NOTREACHED */
                    798:        }
                    799:        l->l_switchto = NULL;
1.22      ad        800:        l->l_pflag &= ~LP_RUNNING;
1.5       ad        801: }
                    802:
                    803: #endif /* !__HAVE_FAST_SOFTINTS */

CVSweb <webmaster@jp.NetBSD.org>